repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
fw1121/ete | examples/phylogenies/link_sequences_to_phylogenies.py | 4 | 3112 | from ete2 import PhyloTree
fasta_txt = """
>seqA
MAEIPDETIQQFMALT---HNIAVQYLSEFGDLNEALNSYYASQTDDIKDRREEAH
>seqB
MAEIPDATIQQFMALTNVSHNIAVQY--EFGDLNEALNSYYAYQTDDQKDRREEAH
>seqC
MAEIPDATIQ---ALTNVSHNIAVQYLSEFGDLNEALNSYYASQTDDQPDRREEAH
>seqD
MAEAPDETIQQFMALTNVSHNIAVQYLSEFGDLNEAL--------------REEAH
"""
iphylip_txt = """
4 76
seqA MAEIPDETIQ QFMALT---H NIAVQYLSEF GDLNEALNSY YASQTDDIKD RREEAHQFMA
seqB MAEIPDATIQ QFMALTNVSH NIAVQY--EF GDLNEALNSY YAYQTDDQKD RREEAHQFMA
seqC MAEIPDATIQ ---ALTNVSH NIAVQYLSEF GDLNEALNSY YASQTDDQPD RREEAHQFMA
seqD MAEAPDETIQ QFMALTNVSH NIAVQYLSEF GDLNEAL--- ---------- -REEAHQ---
LTNVSHQFMA LTNVSH
LTNVSH---- ------
LTNVSH---- ------
-------FMA LTNVSH
"""
# Load a tree and link it to an alignment. As usual, 'alignment' can
# be the path to a file or data in text format.
t = PhyloTree("(((seqA,seqB),seqC),seqD);", alignment=fasta_txt, alg_format="fasta")
#We can now access the sequence of every leaf node
print "These are the nodes and its sequences:"
for leaf in t.iter_leaves():
print leaf.name, leaf.sequence
#seqD MAEAPDETIQQFMALTNVSHNIAVQYLSEFGDLNEAL--------------REEAH
#seqC MAEIPDATIQ---ALTNVSHNIAVQYLSEFGDLNEALNSYYASQTDDQPDRREEAH
#seqA MAEIPDETIQQFMALT---HNIAVQYLSEFGDLNEALNSYYASQTDDIKDRREEAH
#seqB MAEIPDATIQQFMALTNVSHNIAVQY--EFGDLNEALNSYYAYQTDDQKDRREEAH
#
# The associated alignment can be changed at any time
t.link_to_alignment(alignment=iphylip_txt, alg_format="iphylip")
# Let's check that sequences have changed
print "These are the nodes and its re-linked sequences:"
for leaf in t.iter_leaves():
print leaf.name, leaf.sequence
#seqD MAEAPDETIQQFMALTNVSHNIAVQYLSEFGDLNEAL--------------REEAHQ----------FMALTNVSH
#seqC MAEIPDATIQ---ALTNVSHNIAVQYLSEFGDLNEALNSYYASQTDDQPDRREEAHQFMALTNVSH----------
#seqA MAEIPDETIQQFMALT---HNIAVQYLSEFGDLNEALNSYYASQTDDIKDRREEAHQFMALTNVSHQFMALTNVSH
#seqB MAEIPDATIQQFMALTNVSHNIAVQY--EFGDLNEALNSYYAYQTDDQKDRREEAHQFMALTNVSH----------
#
# The sequence attribute is considered as node feature, so you can
# even include sequences in your extended newick format!
print t.write(features=["sequence"], format=9)
#
#
# (((seqA[&&NHX:sequence=MAEIPDETIQQFMALT---HNIAVQYLSEFGDLNEALNSYYASQTDDIKDRREEAHQF
# MALTNVSHQFMALTNVSH],seqB[&&NHX:sequence=MAEIPDATIQQFMALTNVSHNIAVQY--EFGDLNEALNSY
# YAYQTDDQKDRREEAHQFMALTNVSH----------]),seqC[&&NHX:sequence=MAEIPDATIQ---ALTNVSHNIA
# VQYLSEFGDLNEALNSYYASQTDDQPDRREEAHQFMALTNVSH----------]),seqD[&&NHX:sequence=MAEAPD
# ETIQQFMALTNVSHNIAVQYLSEFGDLNEAL--------------REEAHQ----------FMALTNVSH]);
#
# And yes, you can save this newick text and reload it into a PhyloTree instance.
sametree = PhyloTree(t.write(features=["sequence"]))
print "Recovered tree with sequence features:"
print sametree
#
# /-seqA
# /--------|
# /--------| \-seqB
# | |
#---------| \-seqC
# |
# \-seqD
#
print "seqA sequence:", (t&"seqA").sequence
# MAEIPDETIQQFMALT---HNIAVQYLSEFGDLNEALNSYYASQTDDIKDRREEAHQFMALTNVSHQFMALTNVSH
| gpl-3.0 |
jhaux/tensorflow | tensorflow/python/platform/flags.py | 85 | 4773 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of the flags interface."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse as _argparse
from tensorflow.python.util.all_util import remove_undocumented
_global_parser = _argparse.ArgumentParser()
# pylint: disable=invalid-name
class _FlagValues(object):
"""Global container and accessor for flags and their values."""
def __init__(self):
self.__dict__['__flags'] = {}
self.__dict__['__parsed'] = False
def _parse_flags(self, args=None):
result, unparsed = _global_parser.parse_known_args(args=args)
for flag_name, val in vars(result).items():
self.__dict__['__flags'][flag_name] = val
self.__dict__['__parsed'] = True
return unparsed
def __getattr__(self, name):
"""Retrieves the 'value' attribute of the flag --name."""
if not self.__dict__['__parsed']:
self._parse_flags()
if name not in self.__dict__['__flags']:
raise AttributeError(name)
return self.__dict__['__flags'][name]
def __setattr__(self, name, value):
"""Sets the 'value' attribute of the flag --name."""
if not self.__dict__['__parsed']:
self._parse_flags()
self.__dict__['__flags'][name] = value
def _define_helper(flag_name, default_value, docstring, flagtype):
"""Registers 'flag_name' with 'default_value' and 'docstring'."""
_global_parser.add_argument('--' + flag_name,
default=default_value,
help=docstring,
type=flagtype)
# Provides the global object that can be used to access flags.
FLAGS = _FlagValues()
def DEFINE_string(flag_name, default_value, docstring):
"""Defines a flag of type 'string'.
Args:
flag_name: The name of the flag as a string.
default_value: The default value the flag should take as a string.
docstring: A helpful message explaining the use of the flag.
"""
_define_helper(flag_name, default_value, docstring, str)
def DEFINE_integer(flag_name, default_value, docstring):
"""Defines a flag of type 'int'.
Args:
flag_name: The name of the flag as a string.
default_value: The default value the flag should take as an int.
docstring: A helpful message explaining the use of the flag.
"""
_define_helper(flag_name, default_value, docstring, int)
def DEFINE_boolean(flag_name, default_value, docstring):
"""Defines a flag of type 'boolean'.
Args:
flag_name: The name of the flag as a string.
default_value: The default value the flag should take as a boolean.
docstring: A helpful message explaining the use of the flag.
"""
# Register a custom function for 'bool' so --flag=True works.
def str2bool(v):
return v.lower() in ('true', 't', '1')
_global_parser.add_argument('--' + flag_name,
nargs='?',
const=True,
help=docstring,
default=default_value,
type=str2bool)
# Add negated version, stay consistent with argparse with regard to
# dashes in flag names.
_global_parser.add_argument('--no' + flag_name,
action='store_false',
dest=flag_name.replace('-', '_'))
# The internal google library defines the following alias, so we match
# the API for consistency.
DEFINE_bool = DEFINE_boolean # pylint: disable=invalid-name
def DEFINE_float(flag_name, default_value, docstring):
"""Defines a flag of type 'float'.
Args:
flag_name: The name of the flag as a string.
default_value: The default value the flag should take as a float.
docstring: A helpful message explaining the use of the flag.
"""
_define_helper(flag_name, default_value, docstring, float)
_allowed_symbols = [
# We rely on gflags documentation.
'DEFINE_bool',
'DEFINE_boolean',
'DEFINE_float',
'DEFINE_integer',
'DEFINE_string',
'FLAGS',
]
remove_undocumented(__name__, _allowed_symbols)
| apache-2.0 |
orangeduck/PyAutoC | Python27/Lib/distutils/extension.py | 250 | 10904 | """distutils.extension
Provides the Extension class, used to describe C/C++ extension
modules in setup scripts."""
__revision__ = "$Id$"
import os, string, sys
from types import *
try:
import warnings
except ImportError:
warnings = None
# This class is really only used by the "build_ext" command, so it might
# make sense to put it in distutils.command.build_ext. However, that
# module is already big enough, and I want to make this class a bit more
# complex to simplify some common cases ("foo" module in "foo.c") and do
# better error-checking ("foo.c" actually exists).
#
# Also, putting this in build_ext.py means every setup script would have to
# import that large-ish module (indirectly, through distutils.core) in
# order to do anything.
class Extension:
"""Just a collection of attributes that describes an extension
module and everything needed to build it (hopefully in a portable
way, but there are hooks that let you be as unportable as you need).
Instance attributes:
name : string
the full name of the extension, including any packages -- ie.
*not* a filename or pathname, but Python dotted name
sources : [string]
list of source filenames, relative to the distribution root
(where the setup script lives), in Unix form (slash-separated)
for portability. Source files may be C, C++, SWIG (.i),
platform-specific resource files, or whatever else is recognized
by the "build_ext" command as source for a Python extension.
include_dirs : [string]
list of directories to search for C/C++ header files (in Unix
form for portability)
define_macros : [(name : string, value : string|None)]
list of macros to define; each macro is defined using a 2-tuple,
where 'value' is either the string to define it to or None to
define it without a particular value (equivalent of "#define
FOO" in source or -DFOO on Unix C compiler command line)
undef_macros : [string]
list of macros to undefine explicitly
library_dirs : [string]
list of directories to search for C/C++ libraries at link time
libraries : [string]
list of library names (not filenames or paths) to link against
runtime_library_dirs : [string]
list of directories to search for C/C++ libraries at run time
(for shared extensions, this is when the extension is loaded)
extra_objects : [string]
list of extra files to link with (eg. object files not implied
by 'sources', static library that must be explicitly specified,
binary resource files, etc.)
extra_compile_args : [string]
any extra platform- and compiler-specific information to use
when compiling the source files in 'sources'. For platforms and
compilers where "command line" makes sense, this is typically a
list of command-line arguments, but for other platforms it could
be anything.
extra_link_args : [string]
any extra platform- and compiler-specific information to use
when linking object files together to create the extension (or
to create a new static Python interpreter). Similar
interpretation as for 'extra_compile_args'.
export_symbols : [string]
list of symbols to be exported from a shared extension. Not
used on all platforms, and not generally necessary for Python
extensions, which typically export exactly one symbol: "init" +
extension_name.
swig_opts : [string]
any extra options to pass to SWIG if a source file has the .i
extension.
depends : [string]
list of files that the extension depends on
language : string
extension language (i.e. "c", "c++", "objc"). Will be detected
from the source extensions if not provided.
"""
# When adding arguments to this constructor, be sure to update
# setup_keywords in core.py.
def __init__ (self, name, sources,
include_dirs=None,
define_macros=None,
undef_macros=None,
library_dirs=None,
libraries=None,
runtime_library_dirs=None,
extra_objects=None,
extra_compile_args=None,
extra_link_args=None,
export_symbols=None,
swig_opts = None,
depends=None,
language=None,
**kw # To catch unknown keywords
):
assert type(name) is StringType, "'name' must be a string"
assert (type(sources) is ListType and
map(type, sources) == [StringType]*len(sources)), \
"'sources' must be a list of strings"
self.name = name
self.sources = sources
self.include_dirs = include_dirs or []
self.define_macros = define_macros or []
self.undef_macros = undef_macros or []
self.library_dirs = library_dirs or []
self.libraries = libraries or []
self.runtime_library_dirs = runtime_library_dirs or []
self.extra_objects = extra_objects or []
self.extra_compile_args = extra_compile_args or []
self.extra_link_args = extra_link_args or []
self.export_symbols = export_symbols or []
self.swig_opts = swig_opts or []
self.depends = depends or []
self.language = language
# If there are unknown keyword options, warn about them
if len(kw):
L = kw.keys() ; L.sort()
L = map(repr, L)
msg = "Unknown Extension options: " + string.join(L, ', ')
if warnings is not None:
warnings.warn(msg)
else:
sys.stderr.write(msg + '\n')
# class Extension
def read_setup_file (filename):
from distutils.sysconfig import \
parse_makefile, expand_makefile_vars, _variable_rx
from distutils.text_file import TextFile
from distutils.util import split_quoted
# First pass over the file to gather "VAR = VALUE" assignments.
vars = parse_makefile(filename)
# Second pass to gobble up the real content: lines of the form
# <module> ... [<sourcefile> ...] [<cpparg> ...] [<library> ...]
file = TextFile(filename,
strip_comments=1, skip_blanks=1, join_lines=1,
lstrip_ws=1, rstrip_ws=1)
try:
extensions = []
while 1:
line = file.readline()
if line is None: # eof
break
if _variable_rx.match(line): # VAR=VALUE, handled in first pass
continue
if line[0] == line[-1] == "*":
file.warn("'%s' lines not handled yet" % line)
continue
#print "original line: " + line
line = expand_makefile_vars(line, vars)
words = split_quoted(line)
#print "expanded line: " + line
# NB. this parses a slightly different syntax than the old
# makesetup script: here, there must be exactly one extension per
# line, and it must be the first word of the line. I have no idea
# why the old syntax supported multiple extensions per line, as
# they all wind up being the same.
module = words[0]
ext = Extension(module, [])
append_next_word = None
for word in words[1:]:
if append_next_word is not None:
append_next_word.append(word)
append_next_word = None
continue
suffix = os.path.splitext(word)[1]
switch = word[0:2] ; value = word[2:]
if suffix in (".c", ".cc", ".cpp", ".cxx", ".c++", ".m", ".mm"):
# hmm, should we do something about C vs. C++ sources?
# or leave it up to the CCompiler implementation to
# worry about?
ext.sources.append(word)
elif switch == "-I":
ext.include_dirs.append(value)
elif switch == "-D":
equals = string.find(value, "=")
if equals == -1: # bare "-DFOO" -- no value
ext.define_macros.append((value, None))
else: # "-DFOO=blah"
ext.define_macros.append((value[0:equals],
value[equals+2:]))
elif switch == "-U":
ext.undef_macros.append(value)
elif switch == "-C": # only here 'cause makesetup has it!
ext.extra_compile_args.append(word)
elif switch == "-l":
ext.libraries.append(value)
elif switch == "-L":
ext.library_dirs.append(value)
elif switch == "-R":
ext.runtime_library_dirs.append(value)
elif word == "-rpath":
append_next_word = ext.runtime_library_dirs
elif word == "-Xlinker":
append_next_word = ext.extra_link_args
elif word == "-Xcompiler":
append_next_word = ext.extra_compile_args
elif switch == "-u":
ext.extra_link_args.append(word)
if not value:
append_next_word = ext.extra_link_args
elif word == "-Xcompiler":
append_next_word = ext.extra_compile_args
elif switch == "-u":
ext.extra_link_args.append(word)
if not value:
append_next_word = ext.extra_link_args
elif suffix in (".a", ".so", ".sl", ".o", ".dylib"):
# NB. a really faithful emulation of makesetup would
# append a .o file to extra_objects only if it
# had a slash in it; otherwise, it would s/.o/.c/
# and append it to sources. Hmmmm.
ext.extra_objects.append(word)
else:
file.warn("unrecognized argument '%s'" % word)
extensions.append(ext)
finally:
file.close()
#print "module:", module
#print "source files:", source_files
#print "cpp args:", cpp_args
#print "lib args:", library_args
#extensions[module] = { 'sources': source_files,
# 'cpp_args': cpp_args,
# 'lib_args': library_args }
return extensions
# read_setup_file ()
| bsd-2-clause |
spoqa/geofront-cli | geofrontcli/cli.py | 1 | 15567 | """:mod:`geofrontcli.cli` --- CLI main
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
from __future__ import print_function
import argparse
import logging
import os
import os.path
import subprocess
import sys
import webbrowser
from dirspec.basedir import load_config_paths, save_config_path
from iterfzf import iterfzf
from logging_spinner import SpinnerHandler, UserWaitingFilter
from six.moves import input
from .client import (REMOTE_PATTERN, Client, ExpiredTokenIdError,
NoTokenIdError, ProtocolVersionError, RemoteError,
TokenIdError, UnfinishedAuthenticationError)
from .key import PublicKey
from .version import VERSION
CONFIG_RESOURCE = 'geofront-cli'
SERVER_CONFIG_FILENAME = 'server'
WHICH_CMD = 'where' if sys.platform == 'win32' else 'which'
SSH_PROGRAM = None
try:
SSH_PROGRAM = subprocess.check_output([WHICH_CMD, 'ssh']).strip() or None
except subprocess.CalledProcessError:
pass
SCP_PROGRAM = None
try:
SCP_PROGRAM = subprocess.check_output([WHICH_CMD, 'scp']).strip() or None
except subprocess.CalledProcessError:
pass
parser = argparse.ArgumentParser(description='Geofront client utility')
parser.add_argument(
'-S', '--ssh',
default=SSH_PROGRAM,
required=not SSH_PROGRAM,
help='ssh client to use' + (' [%(default)s]' if SSH_PROGRAM else '')
)
parser.add_argument('-d', '--debug', action='store_true', help='debug mode')
parser.add_argument('-v', '--version', action='version',
version='%(prog)s ' + VERSION)
subparsers = parser.add_subparsers()
def get_server_url():
for path in load_config_paths(CONFIG_RESOURCE):
path = os.path.join(path.decode(), SERVER_CONFIG_FILENAME)
if os.path.isfile(path):
with open(path) as f:
return f.read().strip()
parser.exit('Geofront server URL is not configured yet.\n'
'Try `{0} start` command.'.format(parser.prog))
def get_client():
server_url = get_server_url()
return Client(server_url)
def subparser(function):
"""Register a subparser function."""
p = subparsers.add_parser(function.__name__, description=function.__doc__)
p.set_defaults(function=function)
p.call = function
return p
@subparser
def start(args):
"""Set up the Geofront server URL."""
for path in load_config_paths(CONFIG_RESOURCE):
path = os.path.join(path.decode(), SERVER_CONFIG_FILENAME)
if os.path.isfile(path):
message = 'Geofront server URL is already configured: ' + path
if args.force:
print(message + '; overwriting...', file=sys.stderr)
else:
parser.exit(message)
while True:
server_url = input('Geofront server URL: ')
if not server_url.startswith(('https://', 'http://')):
print(server_url, 'is not a valid url.')
continue
elif not server_url.startswith('https://'):
cont = input('It is not a secure URL. '
'https:// is preferred over http://. '
'Continue (y/N)? ')
if cont.strip().lower() != 'y':
continue
break
server_config_filename = os.path.join(
save_config_path(CONFIG_RESOURCE).decode(),
SERVER_CONFIG_FILENAME
)
with open(server_config_filename, 'w') as f:
print(server_url, file=f)
authenticate.call(args)
start.add_argument('-f', '--force',
action='store_true',
help='overwrite the server url configuration')
@subparser
def authenticate(args):
"""Authenticate to Geofront server."""
client = get_client()
while True:
with client.authenticate() as url:
if args.open_browser:
print('Continue to authenticate in your web browser...')
webbrowser.open(url)
else:
print('Continue to authenticate in your web browser:')
print(url)
input('Press return to continue')
try:
client.identity
except UnfinishedAuthenticationError as e:
print(str(e))
else:
break
home = os.path.expanduser('~')
ssh_dir = os.path.join(home, '.ssh')
if os.path.isdir(ssh_dir):
for name in 'id_rsa.pub', 'id_dsa.pub':
pubkey_path = os.path.join(ssh_dir, name)
if os.path.isfile(pubkey_path):
with open(pubkey_path) as f:
public_key = PublicKey.parse_line(f.read())
break
else:
public_key = None
if public_key and public_key.fingerprint not in client.public_keys:
print('You have a public key ({0}), and it is not registered '
'to the Geofront server ({1}).'.format(pubkey_path,
client.server_url))
while True:
register = input('Would you register the public key to '
'the Geofront server (Y/n)? ').strip()
if register.lower() in ('', 'y', 'n'):
break
print('{0!r} is an invalid answer.'.format(register))
if register.lower() != 'n':
try:
client.public_keys[public_key.fingerprint] = public_key
except ValueError as e:
print(e, file=sys.stderr)
if args.debug:
raise
@subparser
def keys(args):
"""List registered public keys."""
client = get_client()
for fingerprint, key in client.public_keys.items():
if args.fingerprint:
print(fingerprint)
else:
print(key)
keys.add_argument(
'-v', '--verbose',
dest='fingerprint',
action='store_false',
help='print public keys with OpenSSH authorized_keys format instead of '
'fingerprints'
)
@subparser
def masterkey(args):
"""Show the current master key."""
client = get_client()
master_key = client.master_key
if args.fingerprint:
print(master_key.fingerprint)
else:
print(master_key)
masterkey.add_argument(
'-v', '--verbose',
dest='fingerprint',
action='store_false',
help='print the master key with OpenSSH authorized_keys format instead of '
'its fingerprint'
)
def align_remote_list(remotes):
maxlength = max(map(len, remotes)) if remotes else 0
for alias, remote in sorted(remotes.items()):
if remote.endswith(':22'):
remote = remote[:-3]
yield '{0:{1}} {2}'.format(alias, maxlength, remote)
@subparser
def remotes(args):
"""List available remotes."""
client = get_client()
remotes = client.remotes
if args.alias:
for alias in sorted(remotes):
print(alias)
else:
for line in align_remote_list(remotes):
print(line)
remotes.add_argument(
'-v', '--verbose',
dest='alias',
action='store_false',
help='print remote aliases with their actual addresses, not only aliases'
)
@subparser
def authorize(args, alias=None):
"""Temporarily authorize you to access the given remote.
A made authorization keeps alive in a minute, and then will be expired.
"""
client = get_client()
while True:
try:
remote = client.authorize(alias or args.remote)
except RemoteError as e:
print(e, file=sys.stderr)
if args.debug:
raise
except TokenIdError:
print('Authentication required.', file=sys.stderr)
authenticate.call(args)
else:
break
return remote
authorize.add_argument(
'remote',
help='the remote alias to authorize you to access'
)
def get_ssh_options(remote):
"""Translate the given ``remote`` to a corresponding :program:`ssh`
options. For example, it returns the following list for ``'user@host'``::
['-l', 'user', 'host']
The remote can contain the port number or omit the user login as well
e.g. ``'host:22'``::
['-p', '22', 'host']
"""
remote_match = REMOTE_PATTERN.match(remote)
if not remote_match:
raise ValueError('invalid remote format: ' + str(remote))
options = []
user = remote_match.group('user')
if user:
options.extend(['-l', user])
port = remote_match.group('port')
if port:
options.extend(['-p', port])
options.append(remote_match.group('host'))
return options
@subparser
def colonize(args):
"""Make the given remote to allow the current master key.
It is equivalent to ``geofront-cli masterkey -v > /tmp/master_id_rsa &&
ssh-copy-id -i /tmp/master_id_rsa REMOTE``.
"""
client = get_client()
remote = client.remotes.get(args.remote, args.remote)
try:
options = get_ssh_options(remote)
except ValueError as e:
colonize.error(str(e))
cmd = [args.ssh]
if args.identity_file:
cmd.extend(['-i', args.identity_file])
cmd.extend(options)
cmd.extend([
'mkdir', '~/.ssh', '&>', '/dev/null', '||', 'true', ';',
'echo', repr(str(client.master_key)),
'>>', '~/.ssh/authorized_keys'
])
subprocess.call(cmd)
colonize.add_argument(
'-i',
dest='identity_file',
help='identity file to use. it will be forwarded to the same option '
'of the ssh program if used'
)
colonize.add_argument('remote', help='the remote alias to colonize')
@subparser
def ssh(args, alias=None):
"""SSH to the remote through Geofront's temporary authorization."""
remote = authorize.call(args, alias=alias)
try:
options = get_ssh_options(remote)
except ValueError as e:
ssh.error(str(e))
if args.jump_host:
options.extend(['-o', 'ProxyJump=={}'.format(args.jump_host)])
subprocess.call([args.ssh] + options)
ssh.add_argument('remote', help='the remote alias to ssh')
def parse_scp_path(path, args):
"""Parse remote:path format."""
if ':' not in path:
return None, path
alias, path = path.split(':', 1)
remote = authorize.call(args, alias=alias)
return remote, path
@subparser
def scp(args):
options = []
src_remote, src_path = parse_scp_path(args.source, args)
dst_remote, dst_path = parse_scp_path(args.destination, args)
if src_remote and dst_remote:
scp.error('source and destination cannot be both '
'remote paths at a time')
elif not (src_remote or dst_remote):
scp.error('one of source and destination has to be a remote path')
if args.ssh:
options.extend(['-S', args.ssh])
if args.recursive:
options.append('-r')
if args.jump_host:
options.extend(['-o', 'ProxyJump=={}'.format(args.jump_host)])
remote = src_remote or dst_remote
remote_match = REMOTE_PATTERN.match(remote)
if not remote_match:
raise ValueError('invalid remote format: ' + str(remote))
port = remote_match.group('port')
if port:
options.extend(['-P', port])
host = remote_match.group('host')
user = remote_match.group('user')
if user:
host = user + '@' + host
if src_remote:
options.append(host + ':' + src_path)
else:
options.append(src_path)
if dst_remote:
options.append(host + ':' + dst_path)
else:
options.append(dst_path)
subprocess.call([args.scp] + options)
scp.add_argument(
'--scp',
default=SCP_PROGRAM,
required=not SCP_PROGRAM,
help='scp client to use' + (' [%(default)s]' if SCP_PROGRAM else '')
)
scp.add_argument(
'-r', '-R', '--recursive',
action='store_true',
help='recursively copy entire directories'
)
scp.add_argument('source', help='the source path to copy')
scp.add_argument('destination', help='the destination path')
@subparser
def go(args):
"""Select a remote and SSH to it at once (in interactive way)."""
client = get_client()
remotes = client.remotes
chosen = iterfzf(align_remote_list(remotes))
if chosen is None:
return
alias = chosen.split()[0]
ssh.call(args, alias=alias)
for p in authenticate, authorize, start, ssh, scp, go:
p.add_argument(
'-O', '--no-open-browser',
dest='open_browser',
action='store_false',
help='do not open the authentication web page using browser. '
'instead print the url to open'
)
p.add_argument(
'-J', '--jump-host',
default=None,
help='Proxy jump host to use'
)
def fix_mac_codesign():
"""If the running Python interpreter isn't property signed on macOS
it's unable to get/set password using keyring from Keychain.
In such case, we need to sign the interpreter first.
https://github.com/jaraco/keyring/issues/219
"""
global fix_mac_codesign
logger = logging.getLogger(__name__ + '.fix_mac_codesign')
p = subprocess.Popen(['codesign', '-dvvvvv', sys.executable],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
def prepend_lines(c, text):
if not isinstance(text, str):
text = text.decode()
return ''.join(c + l for l in text.splitlines(True))
logger.debug('codesign -dvvvvv %s:\n%s\n%s',
sys.executable,
prepend_lines('| ', stdout),
prepend_lines('> ', stderr))
if b'\nSignature=' in stderr:
logger.debug('%s: already signed', sys.executable)
return
logger.info('%s: not signed yet; try signing...', sys.executable)
p = subprocess.Popen(['codesign', '-f', '-s', '-', sys.executable],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
os.waitpid(p.pid, 0)
logger.debug('%s: signed\n%s\n%s',
sys.executable,
prepend_lines('| ', stdout),
prepend_lines('> ', stderr))
logger.debug('respawn the equivalent process...')
raise SystemExit(subprocess.call(sys.argv))
def main(args=None):
args = parser.parse_args(args)
log_handler = logging.StreamHandler(sys.stdout)
log_handler.addFilter(UserWaitingFilter())
spinner_handler = SpinnerHandler(sys.stdout)
local = logging.getLogger('geofrontcli')
if args.debug:
root = logging.getLogger()
root.setLevel(logging.INFO)
root.addHandler(log_handler)
local.setLevel(logging.DEBUG)
else:
local.setLevel(logging.INFO)
local.addHandler(log_handler)
local.addHandler(spinner_handler)
if sys.platform == 'darwin':
fix_mac_codesign()
if getattr(args, 'function', None):
try:
args.function(args)
except NoTokenIdError:
parser.exit('Not authenticated yet.\n'
'Try `{0} authenticate` command.'.format(parser.prog))
except ExpiredTokenIdError:
parser.exit('Authentication renewal required.\n'
'Try `{0} authenticate` command.'.format(parser.prog))
except ProtocolVersionError as e:
parser.exit('geofront-cli seems incompatible with the server.\n'
'Try `pip install --upgrade geofront-cli` command.\n'
'The server version is {0}.'.format(e.server_version))
else:
parser.print_usage()
def main_go():
parser.prog = 'geofront-cli'
main(['go'])
| gpl-3.0 |
jakespringer/angr_ctf | solutions/17_angr_arbitrary_jump/scaffold17.py | 3 | 5173 | import angr
import claripy
import sys
def main(argv):
path_to_binary = argv[1]
project = angr.Project(path_to_binary)
initial_state = ???
# An under-constrained (unconstrained) state occurs when there are too many
# possible branches from a single instruction. This occurs, among other ways,
# when the instruction pointer (on x86, eip) is completely symbolic, meaning
# that user input can control the address of code the computer executes.
# For example, imagine the following pseudo assembly:
#
# mov user_input, eax
# jmp eax
#
# The value of what the user entered dictates the next instruction. This
# is an unconstrained state. It wouldn't usually make sense for the execution
# engine to continue. (Where should the program jump to if eax could be
# anything?) Normally, when Angr encounters an unconstrained state, it throws
# it out. In our case, we want to exploit the unconstrained state to jump to
# a location of our choosing. We will get to how to disable Angr's default
# behavior later. For now, test if a state is vulnerable by checking if we
# can set the instruction pointer to the address of print_good in the binary.
# (!)
def check_vulnerable(state):
# Reimplement me!
return False
# The save_unconstrained=True parameter specifies to Angr to not throw out
# unconstrained states. Instead, it will move them to the list called
# 'simulation.unconstrained'.
simulation = project.factory.simgr(initial_state, save_unconstrained=True)
# Explore will not work for us, since the method specified with the 'find'
# parameter will not be called on an unconstrained state. Instead, we want to
# explore the binary ourselves.
# To get started, construct an exit condition to know when we've found a
# solution. We will later be able to move states from the unconstrained list
# to the simulation.found list. Alternatively, you can create a boolean value
# that serves the same purpose.
def has_found_solution():
return len(simulation.found) > 0
# Check if there are still unconstrained states left to check. Once we
# determine a given unconstrained state is not exploitable, we can throw it
# out. Use the simulation.unconstrained list.
# (!)
def has_unconstrained_to_check():
# Reimplement me!
pass
# The list simulation.active is a list of all states that can be explored
# further.
# (!)
def has_active():
# Reimplement me!
pass
while (has_active() or has_unconstrained_to_check()) and (not has_found_solution()):
# Iterate through all unconstrained states and check them.
# (!)
for unconstrained_state in ???:
# Check if the unconstrained state is exploitable.
# (!)
if ???:
# Found an exploit, exit the while loop and keep unconstrained_state as
# the solution. The way the loops is currently set up, you should move
# the exploitable unconstrained state to the 'found' stash.
# A 'stash' should be a string that corresponds to a list that stores
# all the states that the state group keeps. Values include:
# 'active' = states that can be stepped
# 'deadended' = states that have exited the program
# 'errored' = states that encountered an error with Angr
# 'unconstrained' = states that are unconstrained
# 'found' = solutions
# anything else = whatever you want, perhaps you want a 'not_needed',
# you can call it whatever you want
# Moves anything in the stash 'from_stash' to the 'to_stash' if the
# function should_move evaluates to true.
# Reimplement this entire block of code.
# (!)
# def should_move(state):
# # Reimplement me if you decide to use me
# return False
# simulation.move(from_stash, to_stash, filter_func=should_move)
# # For example, the following moves everything in 'active' to
# # 'not_needed' except if the state is in keep_states
# keep_states = [ ... ]
# def should_move(state):
# return state in keep_states
# simulation.move('active', 'not_needed', filter_func=should_move)
pass
else: # unconstrained state is not exploitable
# Move the unconstrained_state that you tested that doesn't work to a
# different stash, perhaps 'not_needed'.
# Reimplement me.
# (!)
pass
# Advance the simulation.
simulation.step()
if simulation.found:
solution_state = simulation.found[0]
# Ensure that every printed byte is within the acceptable ASCII range (A..Z)
for byte in solution_state.posix.files[sys.stdin.fileno()].all_bytes().chop(bits=8):
solution_state.add_constraints(byte >= ???, byte <= ???)
# Constrain the instruction pointer to target the print_good function and
# then solve for the user input (recall that this is
# 'solution_state.posix.dumps(sys.stdin.fileno())')
# (!)
...
solution = ???
print solution
else:
raise Exception('Could not find the solution')
if __name__ == '__main__':
main(sys.argv)
| gpl-3.0 |
harterj/moose | framework/scripts/build_coverage.py | 12 | 15165 | #!/usr/bin/env python3
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import os, sys, argparse, string, subprocess, time, re
from argparse import RawTextHelpFormatter
def buildCMD(options):
tmp_cmd = []
# Store any additional directories supplied in a temp list
if options.mode != 'combine' and options.mode != 'sync':
tmp_additional_directories = str(' '.join(['--directory ' + os.getcwd() + '/' + app + '/src' for app in options.application[1:]])).split()
# Initialize Mode (run initialize mode before run_tests, or application)
if options.mode == 'initialize':
if len(options.application) > 1:
tmp_cmd.append([options.lcov_command[0],
'--gcov-tool', options.cov_tool,
'--capture',
'--initial',
'--directory', os.getcwd() + '/' + options.application[0],
'--output-file', os.getcwd() + '/initialize.info'
])
tmp_cmd[0].extend(tmp_additional_directories)
else:
tmp_cmd.append([options.lcov_command[0],
'--gcov-tool', options.cov_tool,
'--capture',
'--initial',
'--directory', os.getcwd() + '/' + options.application[0],
'--output-file', os.getcwd() + '/initialize.info'
])
# Generate Mode (run generate mode only after initialize mode, run_tests/application has ran)
if options.mode == 'generate':
if len(options.application) > 1:
tmp_cmd.append([options.lcov_command[0],
'--gcov-tool', options.cov_tool,
'--directory', os.getcwd() + '/' + options.application[0],
'--capture',
'--ignore-errors', 'gcov,source',
'--output-file', os.getcwd() + '/covered.info'
])
tmp_cmd[0].extend(tmp_additional_directories)
else:
tmp_cmd.append([options.lcov_command[0],
'--gcov-tool', options.cov_tool,
'--directory', os.getcwd() + '/' + options.application[0],
'--capture',
'--ignore-errors', 'gcov,source',
'--output-file', os.getcwd() + '/covered.info'
])
# Build lcov combine command
tmp_cmd.append([options.lcov_command[0],
'--gcov-tool', options.cov_tool,
'--add-tracefile', os.getcwd() + '/initialize.info',
'--add-tracefile', os.getcwd() + '/covered.info',
'--output-file', os.getcwd() + '/combined.info' ])
# Build lcov filter command
tmp_cmd.append([options.lcov_command[0],
'--gcov-tool', options.cov_tool,
'--extract', os.getcwd() + '/combined.info', '*' + options.application[0] + '/src*',
'--extract', os.getcwd() + '/combined.info', '*' + options.application[0] + '/include*',
'--extract', os.getcwd() + '/combined.info', '*' + options.application[0] + '/build/header_symlinks*',
'--output-file', options.outfile ])
# Build genhtml command if --generate-html was used
if options.generate_html:
tmp_cmd.append([options.genhtml_command[0], options.outfile,
'--title', options.title + ' Test Coverage',
'--num-spaces', '2',
'--legend',
'--no-branch-coverage',
'--output-directory', options.html_location])
# Clean up old tracefiles if asked
if options.cleanup:
tmp_cmd.append(['rm', '-f', os.getcwd() + '/initialize.info', os.getcwd() + '/covered.info', os.getcwd() + '/combined.info'])
# Combine Mode
if options.mode == 'combine':
# Build lcov tracefile command
for single_tracefile in options.add_tracefile:
options.lcov_command.extend(['--add-tracefile', single_tracefile])
options.lcov_command.extend(['-o', options.outfile ])
tmp_cmd.append(options.lcov_command)
# Build genhtml command if --generate-html was used
if options.generate_html:
tmp_cmd.append([options.genhtml_command[0], options.outfile,
'--title', options.title + ' Test Coverage',
'--num-spaces', '2',
'--legend',
'--no-branch-coverage',
'--output-directory', options.html_location])
# Sync Mode
if options.mode == 'sync':
tmp_cmd.append([options.rsync_command[0],
'-ro', options.html_location, options.sync_location ])
# Run all built commands. This is where the magic happens
for single_command in tmp_cmd:
if options.debug:
print('\n\nDry Run:', str(options.mode) + ' Mode\n', ' '.join(single_command))
else:
runCMD(single_command)
# Run post-process stuff
postProcess(options)
# Run verify coverage (if enabled)
if options.verify_coverage:
verifyCoverage(options)
def verifyCoverage(options):
summary_command = subprocess.Popen([options.lcov_command[0],
'--gcov-tool',
options.cov_tool,
'--summary',
options.outfile],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = summary_command.communicate()
summary_output = ' '.join([stdout.decode(), stderr.decode()])
coverage = float(re.findall(r'lines.*: (\d+.\d+)', summary_output)[0])
print(summary_output, '\n\nCode Coverage:', str(coverage))
if coverage >= options.coverage_percent:
sys.exit(0)
else:
sys.exit(1)
def postProcess(options):
# Add exact time at the bottom of generated HTML coverage
if options.generate_html and options.addtime:
addBetterDate(options)
# Clean up
if options.cleanup and options.mode is not 'sync':
cleanUp(options)
def cleanUp(options):
if options.mode == 'combine':
for tracefile in options.add_tracefile:
if options.debug:
print('\n\nDry Run: deleting file', tracefile)
else:
try:
os.remove(tracefile)
except:
pass
if options.debug:
print('\n\nDry Run: deleting file', options.outfile)
else:
try:
os.remove(options.outfile)
except:
pass
if options.mode == 'generate':
if options.debug:
print('\n\nDry Run: deleting file', options.outfile)
else:
try:
os.remove(options.outfile)
except:
pass
def addBetterDate(options):
if options.debug:
print('\n\nDry Run: appending timestamp to generated HTML content\n', "echo '" + str(time.ctime(time.time())) + "'", '>>', str(options.html_location) + '/index.html')
else:
current_time = time.ctime(time.time())
output_file = open(options.html_location + '/index.html', 'a')
output_file.write(current_time)
output_file.close()
def runCMD(cmd_opts):
a_proc = subprocess.Popen(cmd_opts,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
retstr = a_proc.communicate()
if not a_proc.poll() == 0:
print('Error:', retstr[1].decode())
sys.exit(1)
else:
return retstr[0].decode()
def exit_error(error_list):
print('\n\tThere were errors running build_coverage:\n')
for sgl_error in error_list:
print(sgl_error, '\n')
sys.exit(1)
def _find(myfile, matchFunc=os.path.isfile):
paths = os.getenv('PATH').split(os.pathsep)
for eachpth in paths:
candidate = os.path.join(eachpth, myfile)
if matchFunc(candidate):
return candidate
def find(myfile):
return _find(myfile)
def _verifyOptions(options):
error_list = []
# TODO
# print ignored commands as warnings?
# warn_list = []
# Discover and set the necessary binaries we need
# find('app_name') returns None if not found
options.lcov_command = [ find('lcov') ]
options.genhtml_command = [ find('genhtml') ]
options.rsync_command = [ find('rsync') ]
# gererate and combine mode parsing options
if options.mode == 'generate' or options.mode == 'combine':
if options.generate_html and options.title is None:
error_list.append('when generating HTML content, you must specify a title page with --title')
if options.generate_html and options.html_location is None:
error_list.append('when generating HTML content, you must specify a save location with --html-location')
if options.html_location is not None and os.path.exists(options.html_location):
if options.overwrite is False:
error_list.append('html location specified already exists. Exiting for safty measures...')
if options.outfile is not None and os.path.exists(options.outfile):
if options.overwrite is False:
error_list.append('output file specified already exists. Exiting for safty measures...')
if options.outfile is None:
error_list.append('you must specifiy an output file: --outfile')
# generate specific parsing options
if options.mode == 'initialize':
if options.application is None:
error_list.append('initialize mode requires a list of applications to zero counters: --application <list of directories>')
# generate specific parsing options
if options.mode == 'generate':
if options.application is None:
error_list.append('generate mode requires a list of applications to test: --application <list of directories>')
# combine specific parsing options
if options.mode == 'combine':
if options.add_tracefile is None:
error_list.append('combine mode requires a list of tracefiles to combine: --add-tracefile <list of tracefiles>')
if options.outfile is None:
error_list.append('you must specifiy an output file: --outfile')
if options.outfile is not None and os.path.exists(options.outfile):
if options.overwrite is False:
error_list.append('output file specified already exists. Exiting for safty measures...')
# sync mode parsing options
if options.mode == 'sync':
if options.sync_location is None:
error_list.append('sync mode requires --sync-location to be set')
if options.html_location is None:
error_list.append('sync mode requires --html-location to be set')
if options.generate_html:
error_list.append('sync mode does not permit generating HTML content')
# Did we find all the binaries we need?
if options.lcov_command[0] == None:
error_list.append('lcov command not found.')
if options.genhtml_command[0] == None:
error_list.append('genhtml command not found.')
if options.rsync_command[0] == None:
error_list.append('rsync command not found.')
# Runs if our list of errors is greater then zero
if len(error_list) > 0:
exit_error(error_list)
# everything appears correct
return options
def _parseARGs(args=None):
parser = argparse.ArgumentParser(description='Build code coverage with the option of combining and or transferring HTML \ngenerated content to a specified location (using rsync)', formatter_class=RawTextHelpFormatter)
parser.add_argument('--generate-html', dest='generate_html', action='store_const', const=True, default=False, help='Generate HTML output. Requires --html-location\n ')
parser.add_argument('--addtime', dest='addtime', action='store_const', const=True, default=False, help='Add timestamp to code coverage index page\n ')
parser.add_argument('--overwrite', dest='overwrite', action='store_const', const=True, default=False, help='Ignore files already present\n ')
parser.add_argument('--cleanup', dest='cleanup', action='store_const', const=True, default=False, help='Delete tracefiles after generating HTML content\n ')
parser.add_argument('--dryrun', dest='debug', action='store_const', const=True, default=False, help='Do nothing except print what would happen\n ')
parser.add_argument('--verify-coverage', dest='verify_coverage', action='store_const', const=True, default=False, help='Verify if coverage succeeds 80 percent. \nstdout True|False, as well as exits with 0|1\n ')
parser.add_argument('--title', help='Title name of code coverage generated HTML \noutput\n ')
parser.add_argument('--outfile', help='Output tracefile\n ')
parser.add_argument('--mode', choices=['generate','initialize','combine','sync'], help='Choose an operational mode:\n\nINITIALIZE: Initialize code coverage for each\nspecified --application\n\nGENERATE: Generate code coverage for each\nspecified --application\n\nCOMBINE: Combines tracefiles generated by \ndifferent runs of generate mode\n\nSYNC: Optionally rsync the data to a specified\nlocation. Requires --html-location and\n--sync-location\n ')
parser.add_argument('--application', metavar='directories', nargs='+', help='A list of Application/s to cover (path to \ndirectories). Used in conjunction with\ngenerate mode.\n ')
parser.add_argument('--add-tracefile', metavar='tracefiles', nargs='+', help='A list of tracefiles to use in conjunction\nwith combine mode.\n ')
parser.add_argument('--html-location', help='Location of HTML generated content. Used in\nconjunction with --generate-html or --sync-location\n ')
parser.add_argument('--sync-location', help='location to rsync the data to:\nuserid@server:/some/location\n ')
parser.add_argument('--coverage-percentage', dest='coverage_percent', type=float, default=80.0, help='If specified, this is the percentage coverage has to pass\n ')
parser.add_argument('--cov-tool', metavar='coverage_tool', default="gcov", help='Which coverage tool to use (gcov default)\n ')
options = parser.parse_args(args)
return _verifyOptions(options)
if __name__ == '__main__':
options = _parseARGs()
buildCMD(options)
| lgpl-2.1 |
SentyQ/python_training | conftest.py | 1 | 2466 | import pytest
from fixture.application import Application
import json
import os.path
import importlib
import jsonpickle
from fixture.db import DbFixture
fixture = None
target = None
@pytest.fixture
def check_ui(request):
return request.config.getoption("--check_ui")
def load_config(file):
global target
if target is None:
config_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), file)
with open(config_file) as f:
target = json.load(f)
return target
@pytest.fixture(scope="session")
def db(request):
db_config = load_config(request.config.getoption("--target"))["db"]
dbfixture = DbFixture(host=db_config['host'],name=db_config['name'],
user=db_config['user'],password=db_config['password'])
def fin():
dbfixture.destroy()
request.addfinalizer(fin)
return dbfixture
@pytest.fixture
def app(request):
global fixture
browser = request.config.getoption("--browser")
web_config = load_config(request.config.getoption("--target"))['web']
if fixture is None or not fixture.is_valid():
fixture = Application(browser=browser, base_url=web_config['baseUrl'])
fixture.session.ensure_login(username=web_config['username'], password=web_config['password'])
return fixture
@pytest.fixture(scope="session", autouse = True)
def stop(request):
def fin():
fixture.session.ensure_logout()
fixture.destroy()
request.addfinalizer(fin)
return fixture
def pytest_addoption(parser):
parser.addoption("--browser",action="store",default="firefox")
parser.addoption("--target", action="store", default="target.json")
parser.addoption("--check_ui", action="store_true")
def pytest_generate_tests(metafunc):
for fixture in metafunc.fixturenames:
if fixture.startswith("data_"):
testdata = load_from_module(fixture[5:])
metafunc.parametrize(fixture, testdata, ids=[str(x) for x in testdata])
elif fixture.startswith("json_"):
testdata = load_from_json(fixture[5:])
metafunc.parametrize(fixture, testdata, ids=[str(x) for x in testdata])
def load_from_module(module):
return importlib.import_module("data.%s" % module).testdata
def load_from_json(file):
with open(os.path.join(os.path.dirname(os.path.abspath(__file__)),
"data/%s.json" % file)) as f:
return jsonpickle.decode(f.read())
| apache-2.0 |
McNetic/couchpotato-ger | library/hachoir_parser/parser_list.py | 85 | 7819 | import re
import types
from hachoir_core.error import error
from hachoir_core.i18n import _
from hachoir_parser import Parser, HachoirParser
import sys
### Parser list ################################################################
class ParserList(object):
VALID_CATEGORY = ("archive", "audio", "container", "file_system",
"game", "image", "misc", "program", "video")
ID_REGEX = re.compile("^[a-z0-9][a-z0-9_]{2,}$")
def __init__(self):
self.parser_list = []
self.bytag = { "id": {}, "category": {} }
def translate(self, name, value):
if name in ("magic",):
return True
elif name == "min_size":
return - value < 0 or "Invalid minimum size (min_size)"
elif name == "description":
return isinstance(value, (str, unicode)) and bool(value) or "Invalid description"
elif name == "category":
if value not in self.VALID_CATEGORY:
return "Invalid category: %r" % value
elif name == "id":
if type(value) is not str or not self.ID_REGEX.match(value):
return "Invalid identifier: %r" % value
parser = self.bytag[name].get(value)
if parser:
return "Duplicate parser id: %s already used by %s" % \
(value, parser[0].__name__)
# TODO: lists should be forbidden
if isinstance(value, list):
value = tuple(value)
elif not isinstance(value, tuple):
value = value,
return name, value
def validParser(self, parser, tags):
if "id" not in tags:
return "No identifier"
if "description" not in tags:
return "No description"
# TODO: Allow simple strings for file_ext/mime ?
# (see also HachoirParser.createFilenameSuffix)
file_ext = tags.get("file_ext", ())
if not isinstance(file_ext, (tuple, list)):
return "File extension is not a tuple or list"
mimes = tags.get("mime", ())
if not isinstance(mimes, tuple):
return "MIME type is not a tuple"
for mime in mimes:
if not isinstance(mime, unicode):
return "MIME type %r is not an unicode string" % mime
return ""
def add(self, parser):
tags = parser.getParserTags()
err = self.validParser(parser, tags)
if err:
error("Skip parser %s: %s" % (parser.__name__, err))
return
_tags = []
for tag in tags.iteritems():
tag = self.translate(*tag)
if isinstance(tag, tuple):
_tags.append(tag)
elif tag is not True:
error("[%s] %s" % (parser.__name__, tag))
return
self.parser_list.append(parser)
for name, values in _tags:
byname = self.bytag.setdefault(name,{})
for value in values:
byname.setdefault(value,[]).append(parser)
def __iter__(self):
return iter(self.parser_list)
def print_(self, title=None, out=None, verbose=False, format="one-line"):
"""Display a list of parser with its title
* out: output file
* title : title of the list to display
* format: "rest", "trac", "file-ext", "mime" or "one_line" (default)
"""
if out is None:
out = sys.stdout
if format in ("file-ext", "mime"):
# Create file extension set
extensions = set()
for parser in self:
file_ext = parser.getParserTags().get(format, ())
file_ext = list(file_ext)
try:
file_ext.remove("")
except ValueError:
pass
extensions |= set(file_ext)
# Remove empty extension
extensions -= set(('',))
# Convert to list and sort by ASCII order
extensions = list(extensions)
extensions.sort()
# Print list
text = ", ".join( str(item) for item in extensions )
if format == "file-ext":
print >>out, "File extensions: %s." % text
print >>out
print >>out, "Total: %s file extensions." % len(extensions)
else:
print >>out, "MIME types: %s." % text
print >>out
print >>out, "Total: %s MIME types." % len(extensions)
return
if format == "trac":
print >>out, "== List of parsers =="
print >>out
print >>out, "Total: %s parsers" % len(self.parser_list)
print >>out
elif format == "one_line":
if title:
print >>out, title
else:
print >>out, _("List of Hachoir parsers.")
print >>out
# Create parser list sorted by module
bycategory = self.bytag["category"]
for category in sorted(bycategory.iterkeys()):
if format == "one_line":
parser_list = [ parser.PARSER_TAGS["id"] for parser in bycategory[category] ]
parser_list.sort()
print >>out, "- %s: %s" % (category.title(), ", ".join(parser_list))
else:
if format == "rest":
print >>out, category.replace("_", " ").title()
print >>out, "-" * len(category)
print >>out
elif format == "trac":
print >>out, "=== %s ===" % category.replace("_", " ").title()
print >>out
else:
print >>out, "[%s]" % category
parser_list = sorted(bycategory[category],
key=lambda parser: parser.PARSER_TAGS["id"])
if format == "rest":
for parser in parser_list:
tags = parser.getParserTags()
print >>out, "* %s: %s" % (tags["id"], tags["description"])
elif format == "trac":
for parser in parser_list:
tags = parser.getParserTags()
desc = tags["description"]
desc = re.sub(r"([A-Z][a-z]+[A-Z][^ ]+)", r"!\1", desc)
print >>out, " * %s: %s" % (tags["id"], desc)
else:
for parser in parser_list:
parser.print_(out, verbose)
print >>out
if format != "trac":
print >>out, "Total: %s parsers" % len(self.parser_list)
class HachoirParserList(ParserList):
_instance = None
@classmethod
def getInstance(cls):
if cls._instance is None:
cls._instance = cls()
return cls._instance
def __init__(self):
ParserList.__init__(self)
self._load()
def _load(self):
"""
Load all parsers from "hachoir.parser" module.
Return the list of loaded parsers.
"""
# Parser list is already loaded?
if self.parser_list:
return self.parser_list
todo = []
module = __import__("hachoir_parser")
for attrname in dir(module):
attr = getattr(module, attrname)
if isinstance(attr, types.ModuleType):
todo.append(attr)
for module in todo:
for name in dir(module):
attr = getattr(module, name)
if isinstance(attr, type) \
and issubclass(attr, HachoirParser) \
and attr not in (Parser, HachoirParser):
self.add(attr)
assert 1 <= len(self.parser_list)
return self.parser_list
| gpl-3.0 |
vallsv/pyqtgraph | pyqtgraph/tests/test_srttransform3d.py | 51 | 1339 | import pyqtgraph as pg
from pyqtgraph.Qt import QtCore, QtGui
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_almost_equal
testPoints = np.array([
[0, 0, 0],
[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[-1, -1, 0],
[0, -1, -1]])
def testMatrix():
"""
SRTTransform3D => Transform3D => SRTTransform3D
"""
tr = pg.SRTTransform3D()
tr.setRotate(45, (0, 0, 1))
tr.setScale(0.2, 0.4, 1)
tr.setTranslate(10, 20, 40)
assert tr.getRotation() == (45, QtGui.QVector3D(0, 0, 1))
assert tr.getScale() == QtGui.QVector3D(0.2, 0.4, 1)
assert tr.getTranslation() == QtGui.QVector3D(10, 20, 40)
tr2 = pg.Transform3D(tr)
assert np.all(tr.matrix() == tr2.matrix())
# This is the most important test:
# The transition from Transform3D to SRTTransform3D is a tricky one.
tr3 = pg.SRTTransform3D(tr2)
assert_array_almost_equal(tr.matrix(), tr3.matrix())
assert_almost_equal(tr3.getRotation()[0], tr.getRotation()[0])
assert_array_almost_equal(tr3.getRotation()[1], tr.getRotation()[1])
assert_array_almost_equal(tr3.getScale(), tr.getScale())
assert_array_almost_equal(tr3.getTranslation(), tr.getTranslation())
| mit |
kevin-hannegan/vps-droplet | website/lib/python2.7/site-packages/jinja2/visitor.py | 1401 | 3316 | # -*- coding: utf-8 -*-
"""
jinja2.visitor
~~~~~~~~~~~~~~
This module implements a visitor for the nodes.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD.
"""
from jinja2.nodes import Node
class NodeVisitor(object):
"""Walks the abstract syntax tree and call visitor functions for every
node found. The visitor functions may return values which will be
forwarded by the `visit` method.
Per default the visitor functions for the nodes are ``'visit_'`` +
class name of the node. So a `TryFinally` node visit function would
be `visit_TryFinally`. This behavior can be changed by overriding
the `get_visitor` function. If no visitor function exists for a node
(return value `None`) the `generic_visit` visitor is used instead.
"""
def get_visitor(self, node):
"""Return the visitor function for this node or `None` if no visitor
exists for this node. In that case the generic visit function is
used instead.
"""
method = 'visit_' + node.__class__.__name__
return getattr(self, method, None)
def visit(self, node, *args, **kwargs):
"""Visit a node."""
f = self.get_visitor(node)
if f is not None:
return f(node, *args, **kwargs)
return self.generic_visit(node, *args, **kwargs)
def generic_visit(self, node, *args, **kwargs):
"""Called if no explicit visitor function exists for a node."""
for node in node.iter_child_nodes():
self.visit(node, *args, **kwargs)
class NodeTransformer(NodeVisitor):
"""Walks the abstract syntax tree and allows modifications of nodes.
The `NodeTransformer` will walk the AST and use the return value of the
visitor functions to replace or remove the old node. If the return
value of the visitor function is `None` the node will be removed
from the previous location otherwise it's replaced with the return
value. The return value may be the original node in which case no
replacement takes place.
"""
def generic_visit(self, node, *args, **kwargs):
for field, old_value in node.iter_fields():
if isinstance(old_value, list):
new_values = []
for value in old_value:
if isinstance(value, Node):
value = self.visit(value, *args, **kwargs)
if value is None:
continue
elif not isinstance(value, Node):
new_values.extend(value)
continue
new_values.append(value)
old_value[:] = new_values
elif isinstance(old_value, Node):
new_node = self.visit(old_value, *args, **kwargs)
if new_node is None:
delattr(node, field)
else:
setattr(node, field, new_node)
return node
def visit_list(self, node, *args, **kwargs):
"""As transformers may return lists in some places this method
can be used to enforce a list as return value.
"""
rv = self.visit(node, *args, **kwargs)
if not isinstance(rv, list):
rv = [rv]
return rv
| mit |
rtoma/Diamond | src/collectors/flume/flume.py | 63 | 3401 | # coding=utf-8
"""
Collect statistics from Flume
#### Dependencies
* urllib2
* json or simplejson
"""
import urllib2
import diamond.collector
try:
import simplejson as json
except ImportError:
import json
class FlumeCollector(diamond.collector.Collector):
# items to collect
_metrics_collect = {
'CHANNEL': [
'ChannelFillPercentage',
'EventPutAttemptCount',
'EventPutSuccessCount',
'EventTakeAttemptCount',
'EventTakeSuccessCount'
],
'SINK': [
'BatchCompleteCount',
'BatchEmptyCount',
'BatchUnderflowCount',
'ConnectionClosedCount',
'ConnectionCreatedCount',
'ConnectionFailedCount',
'EventDrainAttemptCount',
'EventDrainSuccessCount'
],
'SOURCE': [
'AppendAcceptedCount',
'AppendBatchAcceptedCount',
'AppendBatchReceivedCount',
'AppendReceivedCount',
'EventAcceptedCount',
'EventReceivedCount',
'OpenConnectionCount'
]
}
def get_default_config_help(self):
config_help = super(FlumeCollector, self).get_default_config_help()
config_help.update({
'req_host': 'Hostname',
'req_port': 'Port',
'req_path': 'Path',
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
default_config = super(FlumeCollector, self).get_default_config()
default_config['path'] = 'flume'
default_config['req_host'] = 'localhost'
default_config['req_port'] = 41414
default_config['req_path'] = '/metrics'
return default_config
def collect(self):
url = 'http://{0}:{1}{2}'.format(
self.config['req_host'],
self.config['req_port'],
self.config['req_path']
)
try:
resp = urllib2.urlopen(url)
try:
j = json.loads(resp.read())
resp.close()
except Exception, e:
resp.close()
self.log.error('Cannot load json data: %s', e)
return None
except urllib2.URLError, e:
self.log.error('Failed to open url: %s', e)
return None
except Exception, e:
self.log.error('Unknown error opening url: %s', e)
return None
for comp in j.iteritems():
comp_name = comp[0]
comp_items = comp[1]
comp_type = comp_items['Type']
for item in self._metrics_collect[comp_type]:
if item.endswith('Count'):
metric_name = '{0}.{1}'.format(comp_name, item[:-5])
metric_value = int(comp_items[item])
self.publish_counter(metric_name, metric_value)
elif item.endswith('Percentage'):
metric_name = '{0}.{1}'.format(comp_name, item)
metric_value = float(comp_items[item])
self.publish_gauge(metric_name, metric_value)
else:
metric_name = item
metric_value = int(comp_items[item])
self.publish_gauge(metric_name, metric_value)
| mit |
anntzer/scipy | scipy/interpolate/tests/test_rbf.py | 18 | 6546 | # Created by John Travers, Robert Hetland, 2007
""" Test functions for rbf module """
import numpy as np
from numpy.testing import (assert_, assert_array_almost_equal,
assert_almost_equal)
from numpy import linspace, sin, cos, random, exp, allclose
from scipy.interpolate.rbf import Rbf
FUNCTIONS = ('multiquadric', 'inverse multiquadric', 'gaussian',
'cubic', 'quintic', 'thin-plate', 'linear')
def check_rbf1d_interpolation(function):
# Check that the Rbf function interpolates through the nodes (1D)
x = linspace(0,10,9)
y = sin(x)
rbf = Rbf(x, y, function=function)
yi = rbf(x)
assert_array_almost_equal(y, yi)
assert_almost_equal(rbf(float(x[0])), y[0])
def check_rbf2d_interpolation(function):
# Check that the Rbf function interpolates through the nodes (2D).
x = random.rand(50,1)*4-2
y = random.rand(50,1)*4-2
z = x*exp(-x**2-1j*y**2)
rbf = Rbf(x, y, z, epsilon=2, function=function)
zi = rbf(x, y)
zi.shape = x.shape
assert_array_almost_equal(z, zi)
def check_rbf3d_interpolation(function):
# Check that the Rbf function interpolates through the nodes (3D).
x = random.rand(50, 1)*4 - 2
y = random.rand(50, 1)*4 - 2
z = random.rand(50, 1)*4 - 2
d = x*exp(-x**2 - y**2)
rbf = Rbf(x, y, z, d, epsilon=2, function=function)
di = rbf(x, y, z)
di.shape = x.shape
assert_array_almost_equal(di, d)
def test_rbf_interpolation():
for function in FUNCTIONS:
check_rbf1d_interpolation(function)
check_rbf2d_interpolation(function)
check_rbf3d_interpolation(function)
def check_2drbf1d_interpolation(function):
# Check that the 2-D Rbf function interpolates through the nodes (1D)
x = linspace(0, 10, 9)
y0 = sin(x)
y1 = cos(x)
y = np.vstack([y0, y1]).T
rbf = Rbf(x, y, function=function, mode='N-D')
yi = rbf(x)
assert_array_almost_equal(y, yi)
assert_almost_equal(rbf(float(x[0])), y[0])
def check_2drbf2d_interpolation(function):
# Check that the 2-D Rbf function interpolates through the nodes (2D).
x = random.rand(50, ) * 4 - 2
y = random.rand(50, ) * 4 - 2
z0 = x * exp(-x ** 2 - 1j * y ** 2)
z1 = y * exp(-y ** 2 - 1j * x ** 2)
z = np.vstack([z0, z1]).T
rbf = Rbf(x, y, z, epsilon=2, function=function, mode='N-D')
zi = rbf(x, y)
zi.shape = z.shape
assert_array_almost_equal(z, zi)
def check_2drbf3d_interpolation(function):
# Check that the 2-D Rbf function interpolates through the nodes (3D).
x = random.rand(50, ) * 4 - 2
y = random.rand(50, ) * 4 - 2
z = random.rand(50, ) * 4 - 2
d0 = x * exp(-x ** 2 - y ** 2)
d1 = y * exp(-y ** 2 - x ** 2)
d = np.vstack([d0, d1]).T
rbf = Rbf(x, y, z, d, epsilon=2, function=function, mode='N-D')
di = rbf(x, y, z)
di.shape = d.shape
assert_array_almost_equal(di, d)
def test_2drbf_interpolation():
for function in FUNCTIONS:
check_2drbf1d_interpolation(function)
check_2drbf2d_interpolation(function)
check_2drbf3d_interpolation(function)
def check_rbf1d_regularity(function, atol):
# Check that the Rbf function approximates a smooth function well away
# from the nodes.
x = linspace(0, 10, 9)
y = sin(x)
rbf = Rbf(x, y, function=function)
xi = linspace(0, 10, 100)
yi = rbf(xi)
msg = "abs-diff: %f" % abs(yi - sin(xi)).max()
assert_(allclose(yi, sin(xi), atol=atol), msg)
def test_rbf_regularity():
tolerances = {
'multiquadric': 0.1,
'inverse multiquadric': 0.15,
'gaussian': 0.15,
'cubic': 0.15,
'quintic': 0.1,
'thin-plate': 0.1,
'linear': 0.2
}
for function in FUNCTIONS:
check_rbf1d_regularity(function, tolerances.get(function, 1e-2))
def check_2drbf1d_regularity(function, atol):
# Check that the 2-D Rbf function approximates a smooth function well away
# from the nodes.
x = linspace(0, 10, 9)
y0 = sin(x)
y1 = cos(x)
y = np.vstack([y0, y1]).T
rbf = Rbf(x, y, function=function, mode='N-D')
xi = linspace(0, 10, 100)
yi = rbf(xi)
msg = "abs-diff: %f" % abs(yi - np.vstack([sin(xi), cos(xi)]).T).max()
assert_(allclose(yi, np.vstack([sin(xi), cos(xi)]).T, atol=atol), msg)
def test_2drbf_regularity():
tolerances = {
'multiquadric': 0.1,
'inverse multiquadric': 0.15,
'gaussian': 0.15,
'cubic': 0.15,
'quintic': 0.1,
'thin-plate': 0.15,
'linear': 0.2
}
for function in FUNCTIONS:
check_2drbf1d_regularity(function, tolerances.get(function, 1e-2))
def check_rbf1d_stability(function):
# Check that the Rbf function with default epsilon is not subject
# to overshoot. Regression for issue #4523.
#
# Generate some data (fixed random seed hence deterministic)
np.random.seed(1234)
x = np.linspace(0, 10, 50)
z = x + 4.0 * np.random.randn(len(x))
rbf = Rbf(x, z, function=function)
xi = np.linspace(0, 10, 1000)
yi = rbf(xi)
# subtract the linear trend and make sure there no spikes
assert_(np.abs(yi-xi).max() / np.abs(z-x).max() < 1.1)
def test_rbf_stability():
for function in FUNCTIONS:
check_rbf1d_stability(function)
def test_default_construction():
# Check that the Rbf class can be constructed with the default
# multiquadric basis function. Regression test for ticket #1228.
x = linspace(0,10,9)
y = sin(x)
rbf = Rbf(x, y)
yi = rbf(x)
assert_array_almost_equal(y, yi)
def test_function_is_callable():
# Check that the Rbf class can be constructed with function=callable.
x = linspace(0,10,9)
y = sin(x)
linfunc = lambda x:x
rbf = Rbf(x, y, function=linfunc)
yi = rbf(x)
assert_array_almost_equal(y, yi)
def test_two_arg_function_is_callable():
# Check that the Rbf class can be constructed with a two argument
# function=callable.
def _func(self, r):
return self.epsilon + r
x = linspace(0,10,9)
y = sin(x)
rbf = Rbf(x, y, function=_func)
yi = rbf(x)
assert_array_almost_equal(y, yi)
def test_rbf_epsilon_none():
x = linspace(0, 10, 9)
y = sin(x)
Rbf(x, y, epsilon=None)
def test_rbf_epsilon_none_collinear():
# Check that collinear points in one dimension doesn't cause an error
# due to epsilon = 0
x = [1, 2, 3]
y = [4, 4, 4]
z = [5, 6, 7]
rbf = Rbf(x, y, z, epsilon=None)
assert_(rbf.epsilon > 0)
| bsd-3-clause |
dakcarto/QGIS | python/plugins/db_manager/db_plugins/data_model.py | 7 | 11370 | # -*- coding: utf-8 -*-
"""
/***************************************************************************
Name : DB Manager
Description : Database manager plugin for QGIS
Date : May 23, 2011
copyright : (C) 2011 by Giuseppe Sucameli
email : brush.tyler@gmail.com
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from PyQt4.QtCore import Qt, QTime, QRegExp, QAbstractTableModel
from PyQt4.QtGui import QFont, QStandardItemModel, QStandardItem, QApplication
from .plugin import DbError
class BaseTableModel(QAbstractTableModel):
def __init__(self, header=None, data=None, parent=None):
QAbstractTableModel.__init__(self, parent)
self._header = header if header else []
self.resdata = data if data else []
def headerToString(self, sep=u"\t"):
header = self._header
return sep.join(header)
def rowToString(self, row, sep=u"\t"):
text = u""
for col in range(self.columnCount()):
text += u"%s" % self.getData(row, col) + sep
return text[:-1]
def getData(self, row, col):
return self.resdata[row][col]
def columnNames(self):
return list(self._header)
def rowCount(self, parent=None):
return len(self.resdata)
def columnCount(self, parent=None):
return len(self._header)
def data(self, index, role):
if role != Qt.DisplayRole and role != Qt.FontRole:
return None
val = self.getData(index.row(), index.column())
if role == Qt.FontRole: # draw NULL in italic
if val is not None:
return None
f = QFont()
f.setItalic(True)
return f
if val is None:
return "NULL"
elif isinstance(val, buffer):
# hide binary data
return None
elif isinstance(val, (str, unicode)) and len(val) > 300:
# too much data to display, elide the string
val = val[:300]
try:
return unicode(val) # convert to unicode
except UnicodeDecodeError:
return unicode(val, 'utf-8', 'replace') # convert from utf8 and replace errors (if any)
def headerData(self, section, orientation, role):
if role != Qt.DisplayRole:
return None
if orientation == Qt.Vertical:
# header for a row
return section + 1
else:
# header for a column
return self._header[section]
class TableDataModel(BaseTableModel):
def __init__(self, table, parent=None):
self.db = table.database().connector
self.table = table
fieldNames = map(lambda x: x.name, table.fields())
BaseTableModel.__init__(self, fieldNames, None, parent)
# get table fields
self.fields = []
for fld in table.fields():
self.fields.append(self._sanitizeTableField(fld))
self.fetchedCount = 201
self.fetchedFrom = -self.fetchedCount - 1 # so the first call to getData will exec fetchMoreData(0)
def _sanitizeTableField(self, field):
""" quote column names to avoid some problems (e.g. columns with upper case) """
return self.db.quoteId(field)
def getData(self, row, col):
if row < self.fetchedFrom or row >= self.fetchedFrom + self.fetchedCount:
margin = self.fetchedCount / 2
start = self.rowCount() - margin if row + margin >= self.rowCount() else row - margin
if start < 0:
start = 0
self.fetchMoreData(start)
return self.resdata[row - self.fetchedFrom][col]
def fetchMoreData(self, row_start):
pass
def rowCount(self, index=None):
# case for tables with no columns ... any reason to use them? :-)
return self.table.rowCount if self.table.rowCount is not None and self.columnCount(index) > 0 else 0
class SqlResultModel(BaseTableModel):
def __init__(self, db, sql, parent=None):
self.db = db.connector
t = QTime()
t.start()
c = self.db._execute(None, unicode(sql))
self._secs = t.elapsed() / 1000.0
del t
self._affectedRows = 0
data = []
header = self.db._get_cursor_columns(c)
if header is None:
header = []
try:
if len(header) > 0:
data = self.db._fetchall(c)
self._affectedRows = c.rowcount
except DbError:
# nothing to fetch!
data = []
header = []
BaseTableModel.__init__(self, header, data, parent)
# commit before closing the cursor to make sure that the changes are stored
self.db._commit()
c.close()
del c
def secs(self):
return self._secs
def affectedRows(self):
return self._affectedRows
class SimpleTableModel(QStandardItemModel):
def __init__(self, header, editable=False, parent=None):
self.header = header
self.editable = editable
QStandardItemModel.__init__(self, 0, len(self.header), parent)
def rowFromData(self, data):
row = []
for c in data:
item = QStandardItem(unicode(c))
item.setFlags((item.flags() | Qt.ItemIsEditable) if self.editable else (item.flags() & ~Qt.ItemIsEditable))
row.append(item)
return row
def headerData(self, section, orientation, role):
if orientation == Qt.Horizontal and role == Qt.DisplayRole:
return self.header[section]
return None
def _getNewObject(self):
pass
def getObject(self, row):
return self._getNewObject()
def getObjectIter(self):
for row in range(self.rowCount()):
yield self.getObject(row)
class TableFieldsModel(SimpleTableModel):
def __init__(self, parent, editable=False):
SimpleTableModel.__init__(self, ['Name', 'Type', 'Null', 'Default'], editable, parent)
def headerData(self, section, orientation, role):
if orientation == Qt.Vertical and role == Qt.DisplayRole:
return section + 1
return SimpleTableModel.headerData(self, section, orientation, role)
def flags(self, index):
flags = SimpleTableModel.flags(self, index)
if index.column() == 2 and flags & Qt.ItemIsEditable: # set Null column as checkable instead of editable
flags = flags & ~Qt.ItemIsEditable | Qt.ItemIsUserCheckable
return flags
def append(self, fld):
data = [fld.name, fld.type2String(), not fld.notNull, fld.default2String()]
self.appendRow(self.rowFromData(data))
row = self.rowCount() - 1
self.setData(self.index(row, 0), fld, Qt.UserRole)
self.setData(self.index(row, 1), fld.primaryKey, Qt.UserRole)
self.setData(self.index(row, 2), None, Qt.DisplayRole)
self.setData(self.index(row, 2), Qt.Unchecked if fld.notNull else Qt.Checked, Qt.CheckStateRole)
def _getNewObject(self):
from .plugin import TableField
return TableField(None)
def getObject(self, row):
val = self.data(self.index(row, 0), Qt.UserRole)
fld = val if val is not None else self._getNewObject()
fld.name = self.data(self.index(row, 0)) or ""
typestr = self.data(self.index(row, 1)) or ""
regex = QRegExp("([^\(]+)\(([^\)]+)\)")
startpos = regex.indexIn(typestr)
if startpos >= 0:
fld.dataType = regex.cap(1).strip()
fld.modifier = regex.cap(2).strip()
else:
fld.modifier = None
fld.dataType = typestr
fld.notNull = self.data(self.index(row, 2), Qt.CheckStateRole) == Qt.Unchecked
fld.primaryKey = self.data(self.index(row, 1), Qt.UserRole)
return fld
def getFields(self):
flds = []
for fld in self.getObjectIter():
flds.append(fld)
return flds
class TableConstraintsModel(SimpleTableModel):
def __init__(self, parent, editable=False):
SimpleTableModel.__init__(self, [QApplication.translate("DBManagerPlugin", 'Name'),
QApplication.translate("DBManagerPlugin", 'Type'),
QApplication.translate("DBManagerPlugin", 'Column(s)')], editable, parent)
def append(self, constr):
field_names = map(lambda k_v: unicode(k_v[1].name), constr.fields().iteritems())
data = [constr.name, constr.type2String(), u", ".join(field_names)]
self.appendRow(self.rowFromData(data))
row = self.rowCount() - 1
self.setData(self.index(row, 0), constr, Qt.UserRole)
self.setData(self.index(row, 1), constr.type, Qt.UserRole)
self.setData(self.index(row, 2), constr.columns, Qt.UserRole)
def _getNewObject(self):
from .plugin import TableConstraint
return TableConstraint(None)
def getObject(self, row):
constr = self.data(self.index(row, 0), Qt.UserRole)
if not constr:
constr = self._getNewObject()
constr.name = self.data(self.index(row, 0)) or ""
constr.type = self.data(self.index(row, 1), Qt.UserRole)
constr.columns = self.data(self.index(row, 2), Qt.UserRole)
return constr
def getConstraints(self):
constrs = []
for constr in self.getObjectIter():
constrs.append(constr)
return constrs
class TableIndexesModel(SimpleTableModel):
def __init__(self, parent, editable=False):
SimpleTableModel.__init__(self, [QApplication.translate("DBManagerPlugin", 'Name'),
QApplication.translate("DBManagerPlugin", 'Column(s)')], editable, parent)
def append(self, idx):
field_names = map(lambda k_v1: unicode(k_v1[1].name), idx.fields().iteritems())
data = [idx.name, u", ".join(field_names)]
self.appendRow(self.rowFromData(data))
row = self.rowCount() - 1
self.setData(self.index(row, 0), idx, Qt.UserRole)
self.setData(self.index(row, 1), idx.columns, Qt.UserRole)
def _getNewObject(self):
from .plugin import TableIndex
return TableIndex(None)
def getObject(self, row):
idx = self.data(self.index(row, 0), Qt.UserRole)
if not idx:
idx = self._getNewObject()
idx.name = self.data(self.index(row, 0))
idx.columns = self.data(self.index(row, 1), Qt.UserRole)
return idx
def getIndexes(self):
idxs = []
for idx in self.getObjectIter():
idxs.append(idx)
return idxs
| gpl-2.0 |
arnavd96/Cinemiezer | myvenv/lib/python3.4/site-packages/ez_setup.py | 103 | 15757 | #!python
"""Bootstrap distribute installation
If you want to use setuptools in your package's setup.py, just include this
file in the same directory with it, and add this to the top of your setup.py::
from distribute_setup import use_setuptools
use_setuptools()
If you want to require a specific version of setuptools, set a download
mirror, or use an alternate download directory, you can do so by supplying
the appropriate options to ``use_setuptools()``.
This file can also be run as a script to install or upgrade setuptools.
"""
import os
import sys
import time
import fnmatch
import tempfile
import tarfile
from distutils import log
try:
from site import USER_SITE
except ImportError:
USER_SITE = None
try:
import subprocess
def _python_cmd(*args):
args = (sys.executable,) + args
return subprocess.call(args) == 0
except ImportError:
# will be used for python 2.3
def _python_cmd(*args):
args = (sys.executable,) + args
# quoting arguments if windows
if sys.platform == 'win32':
def quote(arg):
if ' ' in arg:
return '"%s"' % arg
return arg
args = [quote(arg) for arg in args]
return os.spawnl(os.P_WAIT, sys.executable, *args) == 0
DEFAULT_VERSION = "0.6.14"
DEFAULT_URL = "http://pypi.python.org/packages/source/d/distribute/"
SETUPTOOLS_FAKED_VERSION = "0.6c11"
SETUPTOOLS_PKG_INFO = """\
Metadata-Version: 1.0
Name: setuptools
Version: %s
Summary: xxxx
Home-page: xxx
Author: xxx
Author-email: xxx
License: xxx
Description: xxx
""" % SETUPTOOLS_FAKED_VERSION
def _install(tarball):
# extracting the tarball
tmpdir = tempfile.mkdtemp()
log.warn('Extracting in %s', tmpdir)
old_wd = os.getcwd()
try:
os.chdir(tmpdir)
tar = tarfile.open(tarball)
_extractall(tar)
tar.close()
# going in the directory
subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
os.chdir(subdir)
log.warn('Now working in %s', subdir)
# installing
log.warn('Installing Distribute')
if not _python_cmd('setup.py', 'install'):
log.warn('Something went wrong during the installation.')
log.warn('See the error message above.')
finally:
os.chdir(old_wd)
def _build_egg(egg, tarball, to_dir):
# extracting the tarball
tmpdir = tempfile.mkdtemp()
log.warn('Extracting in %s', tmpdir)
old_wd = os.getcwd()
try:
os.chdir(tmpdir)
tar = tarfile.open(tarball)
_extractall(tar)
tar.close()
# going in the directory
subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
os.chdir(subdir)
log.warn('Now working in %s', subdir)
# building an egg
log.warn('Building a Distribute egg in %s', to_dir)
_python_cmd('setup.py', '-q', 'bdist_egg', '--dist-dir', to_dir)
finally:
os.chdir(old_wd)
# returning the result
log.warn(egg)
if not os.path.exists(egg):
raise IOError('Could not build the egg.')
def _do_download(version, download_base, to_dir, download_delay):
egg = os.path.join(to_dir, 'distribute-%s-py%d.%d.egg'
% (version, sys.version_info[0], sys.version_info[1]))
if not os.path.exists(egg):
tarball = download_setuptools(version, download_base,
to_dir, download_delay)
_build_egg(egg, tarball, to_dir)
sys.path.insert(0, egg)
import setuptools
setuptools.bootstrap_install_from = egg
def use_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
to_dir=os.curdir, download_delay=15, no_fake=True):
# making sure we use the absolute path
to_dir = os.path.abspath(to_dir)
was_imported = 'pkg_resources' in sys.modules or \
'setuptools' in sys.modules
try:
try:
import pkg_resources
if not hasattr(pkg_resources, '_distribute'):
if not no_fake:
_fake_setuptools()
raise ImportError
except ImportError:
return _do_download(version, download_base, to_dir, download_delay)
try:
pkg_resources.require("distribute>="+version)
return
except pkg_resources.VersionConflict:
e = sys.exc_info()[1]
if was_imported:
sys.stderr.write(
"The required version of distribute (>=%s) is not available,\n"
"and can't be installed while this script is running. Please\n"
"install a more recent version first, using\n"
"'easy_install -U distribute'."
"\n\n(Currently using %r)\n" % (version, e.args[0]))
sys.exit(2)
else:
del pkg_resources, sys.modules['pkg_resources'] # reload ok
return _do_download(version, download_base, to_dir,
download_delay)
except pkg_resources.DistributionNotFound:
return _do_download(version, download_base, to_dir,
download_delay)
finally:
if not no_fake:
_create_fake_setuptools_pkg_info(to_dir)
def download_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
to_dir=os.curdir, delay=15):
"""Download distribute from a specified location and return its filename
`version` should be a valid distribute version number that is available
as an egg for download under the `download_base` URL (which should end
with a '/'). `to_dir` is the directory where the egg will be downloaded.
`delay` is the number of seconds to pause before an actual download
attempt.
"""
# making sure we use the absolute path
to_dir = os.path.abspath(to_dir)
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
tgz_name = "distribute-%s.tar.gz" % version
url = download_base + tgz_name
saveto = os.path.join(to_dir, tgz_name)
src = dst = None
if not os.path.exists(saveto): # Avoid repeated downloads
try:
log.warn("Downloading %s", url)
src = urlopen(url)
# Read/write all in one block, so we don't create a corrupt file
# if the download is interrupted.
data = src.read()
dst = open(saveto, "wb")
dst.write(data)
finally:
if src:
src.close()
if dst:
dst.close()
return os.path.realpath(saveto)
def _no_sandbox(function):
def __no_sandbox(*args, **kw):
try:
from setuptools.sandbox import DirectorySandbox
if not hasattr(DirectorySandbox, '_old'):
def violation(*args):
pass
DirectorySandbox._old = DirectorySandbox._violation
DirectorySandbox._violation = violation
patched = True
else:
patched = False
except ImportError:
patched = False
try:
return function(*args, **kw)
finally:
if patched:
DirectorySandbox._violation = DirectorySandbox._old
del DirectorySandbox._old
return __no_sandbox
def _patch_file(path, content):
"""Will backup the file then patch it"""
existing_content = open(path).read()
if existing_content == content:
# already patched
log.warn('Already patched.')
return False
log.warn('Patching...')
_rename_path(path)
f = open(path, 'w')
try:
f.write(content)
finally:
f.close()
return True
_patch_file = _no_sandbox(_patch_file)
def _same_content(path, content):
return open(path).read() == content
def _rename_path(path):
new_name = path + '.OLD.%s' % time.time()
log.warn('Renaming %s into %s', path, new_name)
os.rename(path, new_name)
return new_name
def _remove_flat_installation(placeholder):
if not os.path.isdir(placeholder):
log.warn('Unkown installation at %s', placeholder)
return False
found = False
for file in os.listdir(placeholder):
if fnmatch.fnmatch(file, 'setuptools*.egg-info'):
found = True
break
if not found:
log.warn('Could not locate setuptools*.egg-info')
return
log.warn('Removing elements out of the way...')
pkg_info = os.path.join(placeholder, file)
if os.path.isdir(pkg_info):
patched = _patch_egg_dir(pkg_info)
else:
patched = _patch_file(pkg_info, SETUPTOOLS_PKG_INFO)
if not patched:
log.warn('%s already patched.', pkg_info)
return False
# now let's move the files out of the way
for element in ('setuptools', 'pkg_resources.py', 'site.py'):
element = os.path.join(placeholder, element)
if os.path.exists(element):
_rename_path(element)
else:
log.warn('Could not find the %s element of the '
'Setuptools distribution', element)
return True
_remove_flat_installation = _no_sandbox(_remove_flat_installation)
def _after_install(dist):
log.warn('After install bootstrap.')
placeholder = dist.get_command_obj('install').install_purelib
_create_fake_setuptools_pkg_info(placeholder)
def _create_fake_setuptools_pkg_info(placeholder):
if not placeholder or not os.path.exists(placeholder):
log.warn('Could not find the install location')
return
pyver = '%s.%s' % (sys.version_info[0], sys.version_info[1])
setuptools_file = 'setuptools-%s-py%s.egg-info' % \
(SETUPTOOLS_FAKED_VERSION, pyver)
pkg_info = os.path.join(placeholder, setuptools_file)
if os.path.exists(pkg_info):
log.warn('%s already exists', pkg_info)
return
log.warn('Creating %s', pkg_info)
f = open(pkg_info, 'w')
try:
f.write(SETUPTOOLS_PKG_INFO)
finally:
f.close()
pth_file = os.path.join(placeholder, 'setuptools.pth')
log.warn('Creating %s', pth_file)
f = open(pth_file, 'w')
try:
f.write(os.path.join(os.curdir, setuptools_file))
finally:
f.close()
_create_fake_setuptools_pkg_info = _no_sandbox(_create_fake_setuptools_pkg_info)
def _patch_egg_dir(path):
# let's check if it's already patched
pkg_info = os.path.join(path, 'EGG-INFO', 'PKG-INFO')
if os.path.exists(pkg_info):
if _same_content(pkg_info, SETUPTOOLS_PKG_INFO):
log.warn('%s already patched.', pkg_info)
return False
_rename_path(path)
os.mkdir(path)
os.mkdir(os.path.join(path, 'EGG-INFO'))
pkg_info = os.path.join(path, 'EGG-INFO', 'PKG-INFO')
f = open(pkg_info, 'w')
try:
f.write(SETUPTOOLS_PKG_INFO)
finally:
f.close()
return True
_patch_egg_dir = _no_sandbox(_patch_egg_dir)
def _before_install():
log.warn('Before install bootstrap.')
_fake_setuptools()
def _under_prefix(location):
if 'install' not in sys.argv:
return True
args = sys.argv[sys.argv.index('install')+1:]
for index, arg in enumerate(args):
for option in ('--root', '--prefix'):
if arg.startswith('%s=' % option):
top_dir = arg.split('root=')[-1]
return location.startswith(top_dir)
elif arg == option:
if len(args) > index:
top_dir = args[index+1]
return location.startswith(top_dir)
if arg == '--user' and USER_SITE is not None:
return location.startswith(USER_SITE)
return True
def _fake_setuptools():
log.warn('Scanning installed packages')
try:
import pkg_resources
except ImportError:
# we're cool
log.warn('Setuptools or Distribute does not seem to be installed.')
return
ws = pkg_resources.working_set
try:
setuptools_dist = ws.find(pkg_resources.Requirement.parse('setuptools',
replacement=False))
except TypeError:
# old distribute API
setuptools_dist = ws.find(pkg_resources.Requirement.parse('setuptools'))
if setuptools_dist is None:
log.warn('No setuptools distribution found')
return
# detecting if it was already faked
setuptools_location = setuptools_dist.location
log.warn('Setuptools installation detected at %s', setuptools_location)
# if --root or --preix was provided, and if
# setuptools is not located in them, we don't patch it
if not _under_prefix(setuptools_location):
log.warn('Not patching, --root or --prefix is installing Distribute'
' in another location')
return
# let's see if its an egg
if not setuptools_location.endswith('.egg'):
log.warn('Non-egg installation')
res = _remove_flat_installation(setuptools_location)
if not res:
return
else:
log.warn('Egg installation')
pkg_info = os.path.join(setuptools_location, 'EGG-INFO', 'PKG-INFO')
if (os.path.exists(pkg_info) and
_same_content(pkg_info, SETUPTOOLS_PKG_INFO)):
log.warn('Already patched.')
return
log.warn('Patching...')
# let's create a fake egg replacing setuptools one
res = _patch_egg_dir(setuptools_location)
if not res:
return
log.warn('Patched done.')
_relaunch()
def _relaunch():
log.warn('Relaunching...')
# we have to relaunch the process
# pip marker to avoid a relaunch bug
if sys.argv[:3] == ['-c', 'install', '--single-version-externally-managed']:
sys.argv[0] = 'setup.py'
args = [sys.executable] + sys.argv
sys.exit(subprocess.call(args))
def _extractall(self, path=".", members=None):
"""Extract all members from the archive to the current working
directory and set owner, modification time and permissions on
directories afterwards. `path' specifies a different directory
to extract to. `members' is optional and must be a subset of the
list returned by getmembers().
"""
import copy
import operator
from tarfile import ExtractError
directories = []
if members is None:
members = self
for tarinfo in members:
if tarinfo.isdir():
# Extract directories with a safe mode.
directories.append(tarinfo)
tarinfo = copy.copy(tarinfo)
tarinfo.mode = 448 # decimal for oct 0700
self.extract(tarinfo, path)
# Reverse sort directories.
if sys.version_info < (2, 4):
def sorter(dir1, dir2):
return cmp(dir1.name, dir2.name)
directories.sort(sorter)
directories.reverse()
else:
directories.sort(key=operator.attrgetter('name'), reverse=True)
# Set correct owner, mtime and filemode on directories.
for tarinfo in directories:
dirpath = os.path.join(path, tarinfo.name)
try:
self.chown(tarinfo, dirpath)
self.utime(tarinfo, dirpath)
self.chmod(tarinfo, dirpath)
except ExtractError:
e = sys.exc_info()[1]
if self.errorlevel > 1:
raise
else:
self._dbg(1, "tarfile: %s" % e)
def main(argv, version=DEFAULT_VERSION):
"""Install or upgrade setuptools and EasyInstall"""
tarball = download_setuptools()
_install(tarball)
if __name__ == '__main__':
main(sys.argv[1:])
| mit |
samfpetersen/gnuradio | gr-utils/python/utils/pyqt_plot.py | 91 | 14619 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'pyqt_plot.ui'
#
# Created: Tue Oct 6 10:39:58 2009
# by: PyQt4 UI code generator 4.4.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(927, 696)
self.centralwidget = QtGui.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.gridLayout = QtGui.QGridLayout(self.centralwidget)
self.gridLayout.setObjectName("gridLayout")
self.plotHBar = QtGui.QScrollBar(self.centralwidget)
self.plotHBar.setOrientation(QtCore.Qt.Horizontal)
self.plotHBar.setObjectName("plotHBar")
self.gridLayout.addWidget(self.plotHBar, 2, 0, 1, 2)
self.tabGroup = QtGui.QTabWidget(self.centralwidget)
self.tabGroup.setObjectName("tabGroup")
self.timeTab = QtGui.QWidget()
self.timeTab.setObjectName("timeTab")
self.horizontalLayout = QtGui.QHBoxLayout(self.timeTab)
self.horizontalLayout.setObjectName("horizontalLayout")
self.timePlot = Qwt5.QwtPlot(self.timeTab)
self.timePlot.setObjectName("timePlot")
self.horizontalLayout.addWidget(self.timePlot)
self.tabGroup.addTab(self.timeTab, "")
self.freqTab = QtGui.QWidget()
self.freqTab.setObjectName("freqTab")
self.horizontalLayout_2 = QtGui.QHBoxLayout(self.freqTab)
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.fftPropBox = QtGui.QGroupBox(self.freqTab)
self.fftPropBox.setMinimumSize(QtCore.QSize(160, 0))
self.fftPropBox.setObjectName("fftPropBox")
self.formLayout_4 = QtGui.QFormLayout(self.fftPropBox)
self.formLayout_4.setFieldGrowthPolicy(QtGui.QFormLayout.AllNonFixedFieldsGrow)
self.formLayout_4.setObjectName("formLayout_4")
self.psdFFTComboBox = QtGui.QComboBox(self.fftPropBox)
self.psdFFTComboBox.setMinimumSize(QtCore.QSize(96, 0))
self.psdFFTComboBox.setMaximumSize(QtCore.QSize(96, 16777215))
self.psdFFTComboBox.setObjectName("psdFFTComboBox")
self.formLayout_4.setWidget(0, QtGui.QFormLayout.FieldRole, self.psdFFTComboBox)
self.psdFFTSizeLabel = QtGui.QLabel(self.fftPropBox)
self.psdFFTSizeLabel.setObjectName("psdFFTSizeLabel")
self.formLayout_4.setWidget(0, QtGui.QFormLayout.LabelRole, self.psdFFTSizeLabel)
self.horizontalLayout_2.addWidget(self.fftPropBox)
self.freqPlot = Qwt5.QwtPlot(self.freqTab)
self.freqPlot.setObjectName("freqPlot")
self.horizontalLayout_2.addWidget(self.freqPlot)
self.tabGroup.addTab(self.freqTab, "")
self.specTab = QtGui.QWidget()
self.specTab.setObjectName("specTab")
self.horizontalLayout_3 = QtGui.QHBoxLayout(self.specTab)
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.groupBox = QtGui.QGroupBox(self.specTab)
self.groupBox.setObjectName("groupBox")
self.formLayout_3 = QtGui.QFormLayout(self.groupBox)
self.formLayout_3.setObjectName("formLayout_3")
self.specFFTLabel = QtGui.QLabel(self.groupBox)
self.specFFTLabel.setObjectName("specFFTLabel")
self.formLayout_3.setWidget(1, QtGui.QFormLayout.LabelRole, self.specFFTLabel)
self.specFFTComboBox = QtGui.QComboBox(self.groupBox)
self.specFFTComboBox.setMinimumSize(QtCore.QSize(96, 0))
self.specFFTComboBox.setMaximumSize(QtCore.QSize(96, 16777215))
self.specFFTComboBox.setObjectName("specFFTComboBox")
self.formLayout_3.setWidget(1, QtGui.QFormLayout.FieldRole, self.specFFTComboBox)
self.horizontalLayout_3.addWidget(self.groupBox)
self.specPlot = Qwt5.QwtPlot(self.specTab)
self.specPlot.setObjectName("specPlot")
self.horizontalLayout_3.addWidget(self.specPlot)
self.tabGroup.addTab(self.specTab, "")
self.gridLayout.addWidget(self.tabGroup, 1, 0, 1, 1)
self.filePosBox = QtGui.QGroupBox(self.centralwidget)
self.filePosBox.setMinimumSize(QtCore.QSize(0, 120))
self.filePosBox.setObjectName("filePosBox")
self.formLayoutWidget_2 = QtGui.QWidget(self.filePosBox)
self.formLayoutWidget_2.setGeometry(QtCore.QRect(0, 20, 160, 92))
self.formLayoutWidget_2.setObjectName("formLayoutWidget_2")
self.filePosLayout = QtGui.QFormLayout(self.formLayoutWidget_2)
self.filePosLayout.setObjectName("filePosLayout")
self.filePosStartLabel = QtGui.QLabel(self.formLayoutWidget_2)
self.filePosStartLabel.setObjectName("filePosStartLabel")
self.filePosLayout.setWidget(0, QtGui.QFormLayout.LabelRole, self.filePosStartLabel)
self.filePosStartLineEdit = QtGui.QLineEdit(self.formLayoutWidget_2)
self.filePosStartLineEdit.setObjectName("filePosStartLineEdit")
self.filePosLayout.setWidget(0, QtGui.QFormLayout.FieldRole, self.filePosStartLineEdit)
self.filePosStopLabel = QtGui.QLabel(self.formLayoutWidget_2)
self.filePosStopLabel.setObjectName("filePosStopLabel")
self.filePosLayout.setWidget(1, QtGui.QFormLayout.LabelRole, self.filePosStopLabel)
self.filePosStopLineEdit = QtGui.QLineEdit(self.formLayoutWidget_2)
self.filePosStopLineEdit.setObjectName("filePosStopLineEdit")
self.filePosLayout.setWidget(1, QtGui.QFormLayout.FieldRole, self.filePosStopLineEdit)
self.filePosLengthLabel = QtGui.QLabel(self.formLayoutWidget_2)
self.filePosLengthLabel.setObjectName("filePosLengthLabel")
self.filePosLayout.setWidget(2, QtGui.QFormLayout.LabelRole, self.filePosLengthLabel)
self.filePosLengthLineEdit = QtGui.QLineEdit(self.formLayoutWidget_2)
self.filePosLengthLineEdit.setObjectName("filePosLengthLineEdit")
self.filePosLayout.setWidget(2, QtGui.QFormLayout.FieldRole, self.filePosLengthLineEdit)
self.formLayoutWidget_4 = QtGui.QWidget(self.filePosBox)
self.formLayoutWidget_4.setGeometry(QtCore.QRect(180, 20, 231, 92))
self.formLayoutWidget_4.setObjectName("formLayoutWidget_4")
self.fileTimeLayout = QtGui.QFormLayout(self.formLayoutWidget_4)
self.fileTimeLayout.setObjectName("fileTimeLayout")
self.fileTimeStartLabel = QtGui.QLabel(self.formLayoutWidget_4)
self.fileTimeStartLabel.setObjectName("fileTimeStartLabel")
self.fileTimeLayout.setWidget(0, QtGui.QFormLayout.LabelRole, self.fileTimeStartLabel)
self.fileTimeStartLineEdit = QtGui.QLineEdit(self.formLayoutWidget_4)
self.fileTimeStartLineEdit.setObjectName("fileTimeStartLineEdit")
self.fileTimeLayout.setWidget(0, QtGui.QFormLayout.FieldRole, self.fileTimeStartLineEdit)
self.fileTimeStopLabel = QtGui.QLabel(self.formLayoutWidget_4)
self.fileTimeStopLabel.setObjectName("fileTimeStopLabel")
self.fileTimeLayout.setWidget(1, QtGui.QFormLayout.LabelRole, self.fileTimeStopLabel)
self.fileTimeStopLineEdit = QtGui.QLineEdit(self.formLayoutWidget_4)
self.fileTimeStopLineEdit.setObjectName("fileTimeStopLineEdit")
self.fileTimeLayout.setWidget(1, QtGui.QFormLayout.FieldRole, self.fileTimeStopLineEdit)
self.fileTimeLengthLabel = QtGui.QLabel(self.formLayoutWidget_4)
self.fileTimeLengthLabel.setObjectName("fileTimeLengthLabel")
self.fileTimeLayout.setWidget(2, QtGui.QFormLayout.LabelRole, self.fileTimeLengthLabel)
self.fileTimeLengthLineEdit = QtGui.QLineEdit(self.formLayoutWidget_4)
self.fileTimeLengthLineEdit.setObjectName("fileTimeLengthLineEdit")
self.fileTimeLayout.setWidget(2, QtGui.QFormLayout.FieldRole, self.fileTimeLengthLineEdit)
self.sysGroupBox = QtGui.QGroupBox(self.filePosBox)
self.sysGroupBox.setGeometry(QtCore.QRect(530, 0, 200, 120))
self.sysGroupBox.setMinimumSize(QtCore.QSize(200, 0))
self.sysGroupBox.setObjectName("sysGroupBox")
self.formLayoutWidget_3 = QtGui.QWidget(self.sysGroupBox)
self.formLayoutWidget_3.setGeometry(QtCore.QRect(0, 20, 191, 91))
self.formLayoutWidget_3.setObjectName("formLayoutWidget_3")
self.formLayout_2 = QtGui.QFormLayout(self.formLayoutWidget_3)
self.formLayout_2.setObjectName("formLayout_2")
self.sampleRateLabel = QtGui.QLabel(self.formLayoutWidget_3)
self.sampleRateLabel.setObjectName("sampleRateLabel")
self.formLayout_2.setWidget(0, QtGui.QFormLayout.LabelRole, self.sampleRateLabel)
self.sampleRateLineEdit = QtGui.QLineEdit(self.formLayoutWidget_3)
self.sampleRateLineEdit.setMinimumSize(QtCore.QSize(0, 0))
self.sampleRateLineEdit.setObjectName("sampleRateLineEdit")
self.formLayout_2.setWidget(0, QtGui.QFormLayout.FieldRole, self.sampleRateLineEdit)
self.displayGroupBox = QtGui.QGroupBox(self.filePosBox)
self.displayGroupBox.setGeometry(QtCore.QRect(730, 0, 170, 120))
self.displayGroupBox.setMinimumSize(QtCore.QSize(170, 0))
self.displayGroupBox.setObjectName("displayGroupBox")
self.verticalLayoutWidget = QtGui.QWidget(self.displayGroupBox)
self.verticalLayoutWidget.setGeometry(QtCore.QRect(0, 20, 160, 91))
self.verticalLayoutWidget.setObjectName("verticalLayoutWidget")
self.verticalLayout = QtGui.QVBoxLayout(self.verticalLayoutWidget)
self.verticalLayout.setObjectName("verticalLayout")
self.colorComboBox = QtGui.QComboBox(self.verticalLayoutWidget)
self.colorComboBox.setObjectName("colorComboBox")
self.verticalLayout.addWidget(self.colorComboBox)
spacerItem = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.verticalLayout.addItem(spacerItem)
self.gridLayout.addWidget(self.filePosBox, 3, 0, 1, 1)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtGui.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 927, 25))
self.menubar.setObjectName("menubar")
self.menu_File = QtGui.QMenu(self.menubar)
self.menu_File.setObjectName("menu_File")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtGui.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.action_open = QtGui.QAction(MainWindow)
self.action_open.setObjectName("action_open")
self.action_exit = QtGui.QAction(MainWindow)
self.action_exit.setObjectName("action_exit")
self.action_reload = QtGui.QAction(MainWindow)
self.action_reload.setObjectName("action_reload")
self.menu_File.addAction(self.action_open)
self.menu_File.addAction(self.action_reload)
self.menu_File.addAction(self.action_exit)
self.menubar.addAction(self.menu_File.menuAction())
self.retranslateUi(MainWindow)
self.tabGroup.setCurrentIndex(0)
QtCore.QObject.connect(self.action_exit, QtCore.SIGNAL("activated()"), MainWindow.close)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(QtGui.QApplication.translate("MainWindow", "MainWindow", None, QtGui.QApplication.UnicodeUTF8))
self.tabGroup.setTabText(self.tabGroup.indexOf(self.timeTab), QtGui.QApplication.translate("MainWindow", "Time Domain", None, QtGui.QApplication.UnicodeUTF8))
self.fftPropBox.setTitle(QtGui.QApplication.translate("MainWindow", "FFT Properties", None, QtGui.QApplication.UnicodeUTF8))
self.psdFFTSizeLabel.setText(QtGui.QApplication.translate("MainWindow", "FFT Size", None, QtGui.QApplication.UnicodeUTF8))
self.tabGroup.setTabText(self.tabGroup.indexOf(self.freqTab), QtGui.QApplication.translate("MainWindow", "Frequency Domain", None, QtGui.QApplication.UnicodeUTF8))
self.groupBox.setTitle(QtGui.QApplication.translate("MainWindow", "Spectrogram Properties", None, QtGui.QApplication.UnicodeUTF8))
self.specFFTLabel.setText(QtGui.QApplication.translate("MainWindow", "FFT Size", None, QtGui.QApplication.UnicodeUTF8))
self.tabGroup.setTabText(self.tabGroup.indexOf(self.specTab), QtGui.QApplication.translate("MainWindow", "Spectrogram", None, QtGui.QApplication.UnicodeUTF8))
self.filePosBox.setTitle(QtGui.QApplication.translate("MainWindow", "File Position", None, QtGui.QApplication.UnicodeUTF8))
self.filePosStartLabel.setText(QtGui.QApplication.translate("MainWindow", "Start", None, QtGui.QApplication.UnicodeUTF8))
self.filePosStopLabel.setText(QtGui.QApplication.translate("MainWindow", "Stop", None, QtGui.QApplication.UnicodeUTF8))
self.filePosLengthLabel.setText(QtGui.QApplication.translate("MainWindow", "Length", None, QtGui.QApplication.UnicodeUTF8))
self.fileTimeStartLabel.setText(QtGui.QApplication.translate("MainWindow", "time start (sec)", None, QtGui.QApplication.UnicodeUTF8))
self.fileTimeStopLabel.setText(QtGui.QApplication.translate("MainWindow", "time stop (sec)", None, QtGui.QApplication.UnicodeUTF8))
self.fileTimeLengthLabel.setText(QtGui.QApplication.translate("MainWindow", "time length (sec)", None, QtGui.QApplication.UnicodeUTF8))
self.sysGroupBox.setTitle(QtGui.QApplication.translate("MainWindow", "System Properties", None, QtGui.QApplication.UnicodeUTF8))
self.sampleRateLabel.setText(QtGui.QApplication.translate("MainWindow", "Sample Rate", None, QtGui.QApplication.UnicodeUTF8))
self.displayGroupBox.setTitle(QtGui.QApplication.translate("MainWindow", "Display Properties", None, QtGui.QApplication.UnicodeUTF8))
self.menu_File.setTitle(QtGui.QApplication.translate("MainWindow", "&File", None, QtGui.QApplication.UnicodeUTF8))
self.action_open.setText(QtGui.QApplication.translate("MainWindow", "&Open", None, QtGui.QApplication.UnicodeUTF8))
self.action_open.setShortcut(QtGui.QApplication.translate("MainWindow", "Ctrl+O", None, QtGui.QApplication.UnicodeUTF8))
self.action_exit.setText(QtGui.QApplication.translate("MainWindow", "E&xit", None, QtGui.QApplication.UnicodeUTF8))
self.action_reload.setText(QtGui.QApplication.translate("MainWindow", "&Reload", None, QtGui.QApplication.UnicodeUTF8))
self.action_reload.setShortcut(QtGui.QApplication.translate("MainWindow", "Ctrl+R", None, QtGui.QApplication.UnicodeUTF8))
from PyQt4 import Qwt5
| gpl-3.0 |
fenglu-g/incubator-airflow | airflow/utils/log/logging_mixin.py | 6 | 5510 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import sys
import warnings
import six
from builtins import object
from contextlib import contextmanager
from logging import Handler, StreamHandler
class LoggingMixin(object):
"""
Convenience super-class to have a logger configured with the class name
"""
def __init__(self, context=None):
self._set_context(context)
# We want to deprecate the logger property in Airflow 2.0
# The log property is the de facto standard in most programming languages
@property
def logger(self):
warnings.warn(
'Initializing logger for {} using logger(), which will '
'be replaced by .log in Airflow 2.0'.format(
self.__class__.__module__ + '.' + self.__class__.__name__
),
DeprecationWarning
)
return self.log
@property
def log(self):
try:
return self._log
except AttributeError:
self._log = logging.root.getChild(
self.__class__.__module__ + '.' + self.__class__.__name__
)
return self._log
def _set_context(self, context):
if context is not None:
set_context(self.log, context)
# TODO: Formally inherit from io.IOBase
class StreamLogWriter(object):
encoding = False
"""
Allows to redirect stdout and stderr to logger
"""
def __init__(self, logger, level):
"""
:param log: The log level method to write to, ie. log.debug, log.warning
:return:
"""
self.logger = logger
self.level = level
self._buffer = str()
@property
def closed(self):
"""
Returns False to indicate that the stream is not closed (as it will be
open for the duration of Airflow's lifecycle).
For compatibility with the io.IOBase interface.
"""
return False
def write(self, message):
"""
Do whatever it takes to actually log the specified logging record
:param message: message to log
"""
if not message.endswith("\n"):
self._buffer += message
else:
self._buffer += message
self.logger.log(self.level, self._buffer.rstrip())
self._buffer = str()
def flush(self):
"""
Ensure all logging output has been flushed
"""
if len(self._buffer) > 0:
self.logger.log(self.level, self._buffer)
self._buffer = str()
def isatty(self):
"""
Returns False to indicate the fd is not connected to a tty(-like) device.
For compatibility reasons.
"""
return False
class RedirectStdHandler(StreamHandler):
"""
This class is like a StreamHandler using sys.stderr/stdout, but always uses
whatever sys.stderr/stderr is currently set to rather than the value of
sys.stderr/stdout at handler construction time.
"""
def __init__(self, stream):
if not isinstance(stream, six.string_types):
raise Exception("Cannot use file like objects. Use 'stdout' or 'stderr'"
" as a str and without 'ext://'.")
self._use_stderr = True
if 'stdout' in stream:
self._use_stderr = False
# StreamHandler tries to set self.stream
Handler.__init__(self)
@property
def stream(self):
if self._use_stderr:
return sys.stderr
return sys.stdout
@contextmanager
def redirect_stdout(logger, level):
writer = StreamLogWriter(logger, level)
try:
sys.stdout = writer
yield
finally:
sys.stdout = sys.__stdout__
@contextmanager
def redirect_stderr(logger, level):
writer = StreamLogWriter(logger, level)
try:
sys.stderr = writer
yield
finally:
sys.stderr = sys.__stderr__
def set_context(logger, value):
"""
Walks the tree of loggers and tries to set the context for each handler
:param logger: logger
:param value: value to set
"""
_logger = logger
while _logger:
for handler in _logger.handlers:
try:
handler.set_context(value)
except AttributeError:
# Not all handlers need to have context passed in so we ignore
# the error when handlers do not have set_context defined.
pass
if _logger.propagate is True:
_logger = _logger.parent
else:
_logger = None
| apache-2.0 |
ygravrand/dpxdt | dpxdt/server/__init__.py | 6 | 1711 | #!/usr/bin/env python
# Copyright 2013 Brett Slatkin
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Main module for the API server."""
import datetime
import logging
import os
# Local libraries
from flask import Flask, url_for
from flask.ext.cache import Cache
from flask.ext.login import LoginManager
from flask.ext.mail import Mail
from flask.ext.sqlalchemy import SQLAlchemy
import jinja2
# Local modules required for app setup
import config
app = Flask(__name__)
app.config.from_object(config)
if 'YOURAPPLICATION_SETTINGS' in os.environ:
app.config.from_envvar('YOURAPPLICATION_SETTINGS')
db = SQLAlchemy(
app,
# Don't expire model instances on commit. Let functions continue to
# quickly read properties from their last known-good state.
session_options=dict(expire_on_commit=False))
login = LoginManager(app)
login.login_view = 'login_view'
login.refresh_view = 'login_view'
cache = Cache(app)
mail = Mail(app)
# Modules with handlers to register with the app
from dpxdt.server import api
from dpxdt.server import auth
from dpxdt.server import emails
from dpxdt.server import frontend
from dpxdt.server import work_queue
from dpxdt.server import work_queue_handlers
| apache-2.0 |
initNirvana/Easyphotos | env/lib/python3.4/site-packages/IPython/nbformat/v3/__init__.py | 18 | 2328 | """The main API for the v3 notebook format.
"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
__all__ = ['NotebookNode', 'new_code_cell', 'new_text_cell', 'new_notebook',
'new_output', 'new_worksheet', 'new_metadata', 'new_author',
'new_heading_cell', 'nbformat', 'nbformat_minor', 'nbformat_schema',
'reads_json', 'writes_json', 'read_json', 'write_json',
'to_notebook_json', 'reads_py', 'writes_py', 'read_py', 'write_py',
'to_notebook_py', 'downgrade', 'upgrade', 'parse_filename'
]
import os
from .nbbase import (
NotebookNode,
new_code_cell, new_text_cell, new_notebook, new_output, new_worksheet,
new_metadata, new_author, new_heading_cell, nbformat, nbformat_minor,
nbformat_schema
)
from .nbjson import reads as reads_json, writes as writes_json
from .nbjson import reads as read_json, writes as write_json
from .nbjson import to_notebook as to_notebook_json
from .nbpy import reads as reads_py, writes as writes_py
from .nbpy import reads as read_py, writes as write_py
from .nbpy import to_notebook as to_notebook_py
from .convert import downgrade, upgrade
def parse_filename(fname):
"""Parse a notebook filename.
This function takes a notebook filename and returns the notebook
format (json/py) and the notebook name. This logic can be
summarized as follows:
* notebook.ipynb -> (notebook.ipynb, notebook, json)
* notebook.json -> (notebook.json, notebook, json)
* notebook.py -> (notebook.py, notebook, py)
* notebook -> (notebook.ipynb, notebook, json)
Parameters
----------
fname : unicode
The notebook filename. The filename can use a specific filename
extention (.ipynb, .json, .py) or none, in which case .ipynb will
be assumed.
Returns
-------
(fname, name, format) : (unicode, unicode, unicode)
The filename, notebook name and format.
"""
basename, ext = os.path.splitext(fname)
if ext == u'.ipynb':
format = u'json'
elif ext == u'.json':
format = u'json'
elif ext == u'.py':
format = u'py'
else:
basename = fname
fname = fname + u'.ipynb'
format = u'json'
return fname, basename, format
| mit |
wschwa/Mr-Orange-Sick-Beard | lib/requests/packages/chardet2/escprober.py | 52 | 3094 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from . import constants
import sys
from .escsm import HZSMModel, ISO2022CNSMModel, ISO2022JPSMModel, ISO2022KRSMModel
from .charsetprober import CharSetProber
from .codingstatemachine import CodingStateMachine
class EscCharSetProber(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mCodingSM = [ \
CodingStateMachine(HZSMModel),
CodingStateMachine(ISO2022CNSMModel),
CodingStateMachine(ISO2022JPSMModel),
CodingStateMachine(ISO2022KRSMModel)
]
self.reset()
def reset(self):
CharSetProber.reset(self)
for codingSM in self._mCodingSM:
if not codingSM: continue
codingSM.active = True
codingSM.reset()
self._mActiveSM = len(self._mCodingSM)
self._mDetectedCharset = None
def get_charset_name(self):
return self._mDetectedCharset
def get_confidence(self):
if self._mDetectedCharset:
return 0.99
else:
return 0.00
def feed(self, aBuf):
for c in aBuf:
# PY3K: aBuf is a byte array, so c is an int, not a byte
for codingSM in self._mCodingSM:
if not codingSM: continue
if not codingSM.active: continue
codingState = codingSM.next_state(c)
if codingState == constants.eError:
codingSM.active = False
self._mActiveSM -= 1
if self._mActiveSM <= 0:
self._mState = constants.eNotMe
return self.get_state()
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
self._mDetectedCharset = codingSM.get_coding_state_machine()
return self.get_state()
return self.get_state()
| gpl-3.0 |
LiquidSmooth-Devices/android_kernel_moto_shamu | tools/perf/util/setup.py | 2079 | 1438 | #!/usr/bin/python2
from distutils.core import setup, Extension
from os import getenv
from distutils.command.build_ext import build_ext as _build_ext
from distutils.command.install_lib import install_lib as _install_lib
class build_ext(_build_ext):
def finalize_options(self):
_build_ext.finalize_options(self)
self.build_lib = build_lib
self.build_temp = build_tmp
class install_lib(_install_lib):
def finalize_options(self):
_install_lib.finalize_options(self)
self.build_dir = build_lib
cflags = ['-fno-strict-aliasing', '-Wno-write-strings']
cflags += getenv('CFLAGS', '').split()
build_lib = getenv('PYTHON_EXTBUILD_LIB')
build_tmp = getenv('PYTHON_EXTBUILD_TMP')
libtraceevent = getenv('LIBTRACEEVENT')
liblk = getenv('LIBLK')
ext_sources = [f.strip() for f in file('util/python-ext-sources')
if len(f.strip()) > 0 and f[0] != '#']
perf = Extension('perf',
sources = ext_sources,
include_dirs = ['util/include'],
extra_compile_args = cflags,
extra_objects = [libtraceevent, liblk],
)
setup(name='perf',
version='0.1',
description='Interface with the Linux profiling infrastructure',
author='Arnaldo Carvalho de Melo',
author_email='acme@redhat.com',
license='GPLv2',
url='http://perf.wiki.kernel.org',
ext_modules=[perf],
cmdclass={'build_ext': build_ext, 'install_lib': install_lib})
| gpl-2.0 |
taigaio/taiga-back | taiga/projects/notifications/validators.py | 1 | 1824 | # -*- coding: utf-8 -*-
# Copyright (C) 2014-present Taiga Agile LLC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.utils.translation import ugettext as _
from taiga.base.exceptions import ValidationError
class WatchersValidator:
def validate_watchers(self, attrs, source):
users = attrs.get(source, [])
# Try obtain a valid project
if self.object is None and "project" in attrs:
project = attrs["project"]
elif self.object:
project = self.object.project
else:
project = None
# If project is empty in all conditions, continue
# without errors, because other validator should
# validate the empty project field.
if not project:
return attrs
# Check if incoming watchers are contained
# in project members list
member_ids = project.members.values_list("id", flat=True)
existing_watcher_ids = project.get_watchers().values_list("id", flat=True)
result = set(users).difference(member_ids).difference(existing_watcher_ids)
if result:
raise ValidationError(_("Watchers contains invalid users"))
return attrs
| agpl-3.0 |
negronjl/kolla | tools/validate-json.py | 14 | 1155 | #!/usr/bin/python
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import logging
import sys
def parse_args():
p = argparse.ArgumentParser()
p.add_argument('input', nargs='*')
return p.parse_args()
def main():
args = parse_args()
logging.basicConfig()
res = 0
for filename in args.input:
with open(filename) as fd:
try:
json.load(fd)
except ValueError as error:
res = 1
logging.error('%s failed validation: %s',
filename, error)
sys.exit(res)
if __name__ == '__main__':
main()
| apache-2.0 |
benoitsteiner/tensorflow-xsmm | tensorflow/contrib/distributions/python/ops/mvn_full_covariance.py | 13 | 7544 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Multivariate Normal distribution class initialized with a full covariance."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.distributions.python.ops import mvn_tril
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.util import deprecation
__all__ = [
"MultivariateNormalFullCovariance",
]
class MultivariateNormalFullCovariance(mvn_tril.MultivariateNormalTriL):
"""The multivariate normal distribution on `R^k`.
The Multivariate Normal distribution is defined over `R^k` and parameterized
by a (batch of) length-`k` `loc` vector (aka "mu") and a (batch of) `k x k`
`covariance_matrix` matrices that are the covariance.
This is different than the other multivariate normals, which are parameterized
by a matrix more akin to the standard deviation.
#### Mathematical Details
The probability density function (pdf) is, with `@` as matrix multiplication,
```none
pdf(x; loc, covariance_matrix) = exp(-0.5 y) / Z,
y = (x - loc)^T @ inv(covariance_matrix) @ (x - loc)
Z = (2 pi)**(0.5 k) |det(covariance_matrix)|**(0.5).
```
where:
* `loc` is a vector in `R^k`,
* `covariance_matrix` is an `R^{k x k}` symmetric positive definite matrix,
* `Z` denotes the normalization constant.
Additional leading dimensions (if any) in `loc` and `covariance_matrix` allow
for batch dimensions.
The MultivariateNormal distribution is a member of the [location-scale
family](https://en.wikipedia.org/wiki/Location-scale_family), i.e., it can be
constructed e.g. as,
```none
X ~ MultivariateNormal(loc=0, scale=1) # Identity scale, zero shift.
scale = Cholesky(covariance_matrix)
Y = scale @ X + loc
```
#### Examples
```python
tfd = tf.contrib.distributions
# Initialize a single 3-variate Gaussian.
mu = [1., 2, 3]
cov = [[ 0.36, 0.12, 0.06],
[ 0.12, 0.29, -0.13],
[ 0.06, -0.13, 0.26]]
mvn = tfd.MultivariateNormalFullCovariance(
loc=mu,
covariance_matrix=cov)
mvn.mean().eval()
# ==> [1., 2, 3]
# Covariance agrees with covariance_matrix.
mvn.covariance().eval()
# ==> [[ 0.36, 0.12, 0.06],
# [ 0.12, 0.29, -0.13],
# [ 0.06, -0.13, 0.26]]
# Compute the pdf of an observation in `R^3` ; return a scalar.
mvn.prob([-1., 0, 1]).eval() # shape: []
# Initialize a 2-batch of 3-variate Gaussians.
mu = [[1., 2, 3],
[11, 22, 33]] # shape: [2, 3]
covariance_matrix = ... # shape: [2, 3, 3], symmetric, positive definite.
mvn = tfd.MultivariateNormalFullCovariance(
loc=mu,
covariance=covariance_matrix)
# Compute the pdf of two `R^3` observations; return a length-2 vector.
x = [[-0.9, 0, 0.1],
[-10, 0, 9]] # shape: [2, 3]
mvn.prob(x).eval() # shape: [2]
```
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
loc=None,
covariance_matrix=None,
validate_args=False,
allow_nan_stats=True,
name="MultivariateNormalFullCovariance"):
"""Construct Multivariate Normal distribution on `R^k`.
The `batch_shape` is the broadcast shape between `loc` and
`covariance_matrix` arguments.
The `event_shape` is given by last dimension of the matrix implied by
`covariance_matrix`. The last dimension of `loc` (if provided) must
broadcast with this.
A non-batch `covariance_matrix` matrix is a `k x k` symmetric positive
definite matrix. In other words it is (real) symmetric with all eigenvalues
strictly positive.
Additional leading dimensions (if any) will index batches.
Args:
loc: Floating-point `Tensor`. If this is set to `None`, `loc` is
implicitly `0`. When specified, may have shape `[B1, ..., Bb, k]` where
`b >= 0` and `k` is the event size.
covariance_matrix: Floating-point, symmetric positive definite `Tensor` of
same `dtype` as `loc`. The strict upper triangle of `covariance_matrix`
is ignored, so if `covariance_matrix` is not symmetric no error will be
raised (unless `validate_args is True`). `covariance_matrix` has shape
`[B1, ..., Bb, k, k]` where `b >= 0` and `k` is the event size.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`,
statistics (e.g., mean, mode, variance) use the value "`NaN`" to
indicate the result is undefined. When `False`, an exception is raised
if one or more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Raises:
ValueError: if neither `loc` nor `covariance_matrix` are specified.
"""
parameters = dict(locals())
# Convert the covariance_matrix up to a scale_tril and call MVNTriL.
with ops.name_scope(name) as name:
with ops.name_scope("init", values=[loc, covariance_matrix]):
if covariance_matrix is None:
scale_tril = None
else:
covariance_matrix = ops.convert_to_tensor(
covariance_matrix, name="covariance_matrix")
if validate_args:
covariance_matrix = control_flow_ops.with_dependencies([
check_ops.assert_near(
covariance_matrix,
array_ops.matrix_transpose(covariance_matrix),
message="Matrix was not symmetric")], covariance_matrix)
# No need to validate that covariance_matrix is non-singular.
# LinearOperatorLowerTriangular has an assert_non_singular method that
# is called by the Bijector.
# However, cholesky() ignores the upper triangular part, so we do need
# to separately assert symmetric.
scale_tril = linalg_ops.cholesky(covariance_matrix)
super(MultivariateNormalFullCovariance, self).__init__(
loc=loc,
scale_tril=scale_tril,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=name)
self._parameters = parameters
| apache-2.0 |
iurilarosa/thesis | codici/Archiviati/Notebook Completi/Hough64HWI.py | 1 | 11654 | import tensorflow as tf
import numpy
import scipy.io
from tensorflow.python.client import timeline
import time
sessione = tf.Session()
# CARICO DATI
tFft = 8192
inputFreq = 187
if inputFreq > 128:
tFft = 4096
print(tFft)
#tFft = 4096
tObs = 9 #mesi
tObs = tObs*30*24*60*60
nPunti = 2
cands = 100
percorsoDati = ("dati/dati9mesi%dHWI.mat"% inputFreq)
percorsoQuad = ("quadHWI%d.mat"% inputFreq)
percorsoPatch = ("quadHWI%dEcl.mat"% inputFreq)
percorsoOut = ("/home/protoss/Documenti/TESI/HWITest/mio%d.mat" % inputFreq)
#percorsoQuad = ("quad%dLIL.mat" % tFft)
#percorsoPatch = ("quad%dEclNew.mat" % tFft)
#carico file dati
quadrato = scipy.io.loadmat(percorsoQuad)['quad'].astype(numpy.float64)
patch = scipy.io.loadmat(percorsoPatch)['quadratoEclNew'].astype(numpy.float64)
print(patch)
struttura = scipy.io.loadmat(percorsoDati)['job_pack_0']
tempi = struttura['peaks'][0,0][0]#.astype(numpy.float32)
frequenze = struttura['peaks'][0,0][1]#.astype(numpy.float32)
pesi = (struttura['peaks'][0,0][4]+1)#.astype(numpy.float32)
#print(tempi.size,frequenze.size)
#nb: picchi ha 0-tempi
# 1-frequenze
# 2-pesi
#headers vari
securbelt = 4000
#securbelt = 4000*3
#frequenze
stepFreq = 1/tFft
enhancement = 10
stepFreqRaffinato = stepFreq/enhancement
#tempi
#epoca definita come mediana di tempi di tutto il run #WARNING da ridefinire con durata dati che prendo
epoca = (57722+57990)/2
#spindowns
spindownMin = -1e-9
spindownMax = 1e-10
if inputFreq > 128:
securbelt = 4000*2
spindownMin = -9e-9
spindownMax = -8e-9
stepSpindown = stepFreq/tObs
nstepSpindown = numpy.round((spindownMax-spindownMin)/stepSpindown).astype(numpy.int64)
spindowns = numpy.arange(0, nstepSpindown)
spindowns = numpy.multiply(spindowns,stepSpindown)
spindowns = numpy.add(spindowns, spindownMin)
#LO STEP DI SPINDOWN LO SI DEFINISCE TRAMITE LA BANDA IN FREQUENZA DELLA PEAKMAP (in hz)
#SU TEMPO DI OSSERVAZIONE (in sec):
#STEPFREQ/TOBS! DA QUI SCEGLIAMO QUALE SD MASSIMO E MINIMO TENERE
# COME VALORE MASSIMO TENERE SUI 1*-10^-9
# COME MASSIMO ANCHE CIRCA 1*+10^-10
# CONSIDERARE EVENTUALE SORGENTE NELLA SCELTA DI INTERVALLO SPINDOWN
#per doppler corr
veloc = struttura['basic_info'][0,0]['velpos'][0,0][0:3,:].astype(numpy.float64)
nTempi = struttura['basic_info'][0,0]['ntim'][0,0][0,0]
primoTempo = struttura['basic_info'][0,0]['tim0'][0,0][0,0]
indices = struttura['basic_info'][0,0]['index'][0,0][0]
#SPLITTO DOPP CORR E HOUGH TRANS
# PREPARO FUNZIONI
#TODO STUDIARE FATTIBILITÀ DI USARE TUTTO A 32BYTES
# calcola la correzione doppler per ogni punto del cielo
def doppcorr(i):
quadratoNP = quadrato[i]
indicesOpt = indices-1
inizi = indicesOpt[:-1]
fini = indicesOpt[1:]
velocitas = numpy.zeros((3,frequenze.size))
for i in numpy.arange(0,nTempi-1):
velocitas[:,inizi[i]:fini[i]+1] = veloc[:,i:i+1]
velPerPosIndex = numpy.dot(quadratoNP,velocitas)
divisoreIndex = 1+velPerPosIndex
freqCorr = frequenze / divisoreIndex
#print(freqCorr)
#mi ricavo l'header per le frequenze
freqMin = numpy.amin(freqCorr)
#freqMax = tf.reduce_max(freqCorr)
freqIniz = freqMin- stepFreq/2 - stepFreqRaffinato
freqFinal = freqCorr-freqIniz
freqFinal = (freqFinal/stepFreqRaffinato)-round(enhancement/2+0.001)
#freqs = tf.concat([[freqIniz], freqCorr], 0)
return freqIniz, freqCorr, freqFinal#, nstepFrequenze
def noncorr():
freqMin = numpy.amin(frequenze)
freqIniz = freqMin - stepFreq/2 -stepFreqRaffinato
freqNonCor = (frequenze -freqIniz)/stepFreqRaffinato-round(enhancement/2+0.001)
freqNonCor = tf.constant(freqNonCor, dtype = tf.float64)
return freqNonCor
# calcola la hough per ogni punto del cielo (per ogni spindown)
def inDaHough(i, freqHM):
#def houghizza(stepIesimo):
#sdTimed = tf.multiply(spindownsTF[stepIesimo], tempiHM, name = "Tdotpert")
##sdTimed = tf.cast(sdTimed, dtype=tf.float32)
#appoggio = tf.round(freqHM-sdTimed+securbelt/2, name = "appoggioperindici")
#appoggio = tf.cast(appoggio, dtype=tf.int32)
#valori = tf.bincount(appoggio,weights=pesiTF)
#zeriDopo = tf.zeros([nColumns - tf.size(valori)], dtype=tf.float64)
#riga = tf.concat([valori,zeriDopo],0, name = "rigadihough")
#return riga
def houghizza(stepIesimo):
sdTimed = tf.multiply(spindownsTF[stepIesimo], tempiHM, name = "Tdotpert")
#sdTimed = tf.cast(sdTimed, dtype=tf.float32)
appoggio = tf.round(freqHM-sdTimed+securbelt/2, name = "appoggioperindici")
appoggio = tf.cast(appoggio, dtype=tf.int32)
valorisx = tf.unsorted_segment_sum(pesiHM, appoggio, nColumns)
valorisx = tf.cast(valorisx, dtype=tf.float32)
return valorisx
houghDiff = tf.map_fn(houghizza, tf.range(0, nRows), dtype=tf.float32, parallel_iterations=8)
def sliceInt():
#faccio integrazione finale (vecchia versione senza conv)
semiLarghezza = tf.round(enhancement/2+0.001)
semiLarghezza = tf.cast(semiLarghezza, tf.int64)
houghInt = houghDiff[:,enhancement:nColumns]-houghDiff[:,0:nColumns - enhancement]
houghInt = tf.concat([houghDiff[:,0:enhancement],houghInt],1)
return houghInt
hough = sliceInt()
houghinal = tf.cumsum(hough, axis = 1)
return houghinal
def manchurian_candidates(numCand, freqIniz, image, coord):
minDistance = enhancement*4
candidati = numpy.zeros((9,numCand*2))
primaFreq = freqIniz-(securbelt/2)*stepFreqRaffinato
freqIniziale = struttura['basic_info'][0,0]['frin'][0,0][0,0]
freqFinale = struttura['basic_info'][0,0]['frfi'][0,0][0,0]
#QUI ANALOGO FUNZIONE CUT GD2
#%time indexInizialewh = numpy.where(freqniu>freqIniziale)[0][0]
#%time indexFinalewh = numpy.where(freqniu>freqFinale)[0][0]
start = time.time()
indexIniziale = ((freqIniziale-primaFreq)/stepFreqRaffinato).astype(numpy.int64)
indexFinale = ((freqFinale-primaFreq)/stepFreqRaffinato+1).astype(numpy.int64)
imageCand = image[:,indexIniziale:indexFinale].astype(numpy.int64)
#imageCand = numpy.flip(imageCand,0)
size = numpy.shape(imageCand)[1]
freqniu = numpy.arange(0,size)*stepFreqRaffinato+freqIniziale
maxPerColumn = numpy.amax(imageCand, axis = 0)
rigaMax = numpy.argmax(imageCand, axis = 0)
#######################
stepFrequenzaNiu = maxPerColumn.size/numCand
indiciFreq = numpy.arange(0,maxPerColumn.size,stepFrequenzaNiu)
indiciFreq = numpy.append(indiciFreq, maxPerColumn.size)
indiciFreq = numpy.round(indiciFreq).astype(numpy.int64)
print(indiciFreq)
def statistics(ndArray):
mediana = numpy.median(ndArray)
sigmana = numpy.median(numpy.absolute(ndArray-mediana))/0.6745
return mediana, sigmana
stats = statistics(imageCand)
medianaTot = stats[0]
sigmanaTot = stats[1]
#print(medianaTot, sigmanaTot)
iniziali = numpy.concatenate(([indiciFreq[0]],indiciFreq[0:numCand-2],[indiciFreq[indiciFreq.size-3]]),0)
finali = numpy.concatenate(([indiciFreq[2]],indiciFreq[3:numCand+1],[indiciFreq[indiciFreq.size-1]]),0)
def statsPerCand(i):
stat = statistics(maxPerColumn[iniziali[i]:finali[i]])#[0]
return stat
statPerCand = numpy.array(list(map(statsPerCand, numpy.arange(numCand))))
#statPerCand = numpy.zeros((numCand,2))
#for i in numpy.arange(numCand):
#statPerCand[i] = statsPerCand(i)
#print(statPerCand)
medianaPerCand = statPerCand[:,0]
sigmanaPerCand = statPerCand[:,1]
percorsoRob = ("/home/protoss/Documenti/TESI/HWITest/mioRob%d.mat" % inputFreq)
scipy.io.savemat(percorsoRob,{"stat": statPerCand,
"maxs": maxPerColumn,
"imax": rigaMax,
"imageCand": imageCand})
filtro = numpy.where(medianaPerCand > 0)[0]
counter = 0
for i in filtro:
inizio = indiciFreq[i]
fine = indiciFreq[i+1]
porzioneMaxPerColumn = maxPerColumn[inizio:fine]
localMax = numpy.amax(porzioneMaxPerColumn)
localInd = numpy.argmax(porzioneMaxPerColumn)
if i == 1:
print(inizio, fine)
print(porzioneMaxPerColumn, porzioneMaxPerColumn.size)
print(localMax, localInd)
print(medianaPerCand[i])
print(medianaTot/2)
if localMax > medianaPerCand[i] and localMax > medianaTot/2:
counter = counter + 1
index = indiciFreq[i] + localInd
riga = rigaMax[index]
candidati[0,counter] = freqniu[index]
candidati[1,counter] = coord[0]
candidati[2,counter] = coord[1]
candidati[3,counter] = spindowns[riga]
candidati[4,counter] = localMax
candidati[5,counter] = (localMax-medianaPerCand[i])/sigmanaPerCand[i]
candidati[6,counter] = coord[2]/2
candidati[7,counter] = numpy.abs(coord[3]-coord[4])/4
candidati[8,counter] = 1
limite1 = numpy.amax([localInd-minDistance,1]).astype(numpy.int64)
limite2 = numpy.amin([localInd+minDistance,porzioneMaxPerColumn.size]).astype(numpy.int64)
porzioneMaxPerColumn[limite1:limite2] = 0
secondLocMax = numpy.amax(porzioneMaxPerColumn)
secondLocInd = numpy.argmax(porzioneMaxPerColumn)
if numpy.absolute(secondLocInd-localInd) > 2 * minDistance and secondLocMax > medianaPerCand[i]:
counter = counter + 1
index = indiciFreq[i] + secondLocInd
riga = rigaMax[index]
candidati[0,counter] = freqniu[index]
candidati[1,counter] = coord[0]
candidati[2,counter] = coord[1]
candidati[3,counter] = spindowns[riga]
candidati[4,counter] = secondLocMax
candidati[5,counter] = (secondLocMax-medianaPerCand[i])/sigmanaPerCand[i]
candidati[6,counter] = coord[2]/2
candidati[7,counter] = numpy.abs(coord[3]-coord[4])/4
candidati[8,counter] = 2
candidati[3,:]=numpy.round(candidati[3,:] / stepSpindown) * stepSpindown
return candidati
#da qui si usa tensorflow
#definisco tutte le costanti necessarie
tempiTF = tf.constant(tempi,dtype=tf.float64)
pesiTF = tf.constant(pesi,dtype=tf.float64)
spindownsTF = tf.constant(spindowns, dtype=tf.float64)
tempiHM = tempiTF-epoca
tempiHM = ((tempiHM)*3600*24/stepFreqRaffinato)
tempiHM = tf.cast(tempiHM, tf.float64)
pesiHM = tf.reshape(pesiTF,(1,tf.size(pesiTF)))
pesiHM = pesiHM[0]
nRows = tf.constant(nstepSpindown, dtype=tf.int64)
#problema! num step freq cambia a seconda della correzione doppler
#perché freq min e freq max possono variare e lo step lo si lascia uguale
#posso andarci in 2 modi: uno è tagliando a 96000 tutto
#uno è mettendo un po' di zeri prima e dopo, cercando con la doppler corr quale è la max assoluta
#e quale è la min assoluta
#freqTF = noncorr()
#mettere un for
start = time.time()
for punto in numpy.arange(0,nPunti-1):
freqInCorr,freqCorr, freqPerHough = doppcorr(punto)
nstepsFreq = numpy.ceil(securbelt+(numpy.amax(freqCorr)-numpy.amin(freqCorr) + stepFreq + 2*stepFreqRaffinato)/stepFreqRaffinato)
#print(nstepsFreq)
nColumns = tf.cast(nstepsFreq, dtype=tf.int32)
freqTF = tf.constant(freqPerHough, dtype = tf.float64)
houghmap = inDaHough(punto,freqTF)
hough = sessione.run(houghmap)
candidati = manchurian_candidates(cands, freqInCorr, hough, patch[punto])
nonzeri = numpy.nonzero(candidati[0])
finalCand = candidati[:,nonzeri]
stop = time.time()
print(stop-start)
#print(freqCorr)
scipy.io.savemat(percorsoOut,{"freqCorr": freqCorr,
"hough": hough,
"candidati": finalCand
})
from matplotlib import pyplot
pyplot.figure(figsize=(10, 8))
##posxTick = numpy.arange(5551, 89454, round((89454-5551)/10))
#posxTick = numpy.arange(2330, 210300, round((210300-2330)/50))
##labelxTick = numpy.arange(0,1.1,0.1)+inputFreq
#labelxTick = numpy.arange(0,5.1,0.1)+inputFreq
#pyplot.xticks(posxTick,labelxTick)
#posyTick = numpy.arange(11)*nstepSpindown/10
#labelyTick = numpy.arange(spindownMin, spindownMax, stepSpindown*nstepSpindown/10)
#pyplot.yticks(posyTick,labelyTick)
a = pyplot.imshow(hough, aspect = 400)
pyplot.colorbar(shrink = 1,aspect = 10)
pyplot.show() | gpl-3.0 |
matiasinsaurralde/tyk-buildpack | tyk/coprocess/bindings/python/coprocess_session_state_pb2.py | 3 | 24781 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: coprocess_session_state.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='coprocess_session_state.proto',
package='coprocess',
syntax='proto3',
serialized_pb=_b('\n\x1d\x63oprocess_session_state.proto\x12\tcoprocess\"*\n\nAccessSpec\x12\x0b\n\x03url\x18\x01 \x01(\t\x12\x0f\n\x07methods\x18\x02 \x03(\t\"s\n\x10\x41\x63\x63\x65ssDefinition\x12\x10\n\x08\x61pi_name\x18\x01 \x01(\t\x12\x0e\n\x06\x61pi_id\x18\x02 \x01(\t\x12\x10\n\x08versions\x18\x03 \x03(\t\x12+\n\x0c\x61llowed_urls\x18\x04 \x03(\x0b\x32\x15.coprocess.AccessSpec\"/\n\rBasicAuthData\x12\x10\n\x08password\x18\x01 \x01(\t\x12\x0c\n\x04hash\x18\x02 \x01(\t\"\x19\n\x07JWTData\x12\x0e\n\x06secret\x18\x01 \x01(\t\"!\n\x07Monitor\x12\x16\n\x0etrigger_limits\x18\x01 \x03(\x01\"\xf8\x06\n\x0cSessionState\x12\x12\n\nlast_check\x18\x01 \x01(\x03\x12\x11\n\tallowance\x18\x02 \x01(\x01\x12\x0c\n\x04rate\x18\x03 \x01(\x01\x12\x0b\n\x03per\x18\x04 \x01(\x01\x12\x0f\n\x07\x65xpires\x18\x05 \x01(\x03\x12\x11\n\tquota_max\x18\x06 \x01(\x03\x12\x14\n\x0cquota_renews\x18\x07 \x01(\x03\x12\x17\n\x0fquota_remaining\x18\x08 \x01(\x03\x12\x1a\n\x12quota_renewal_rate\x18\t \x01(\x03\x12@\n\raccess_rights\x18\n \x03(\x0b\x32).coprocess.SessionState.AccessRightsEntry\x12\x0e\n\x06org_id\x18\x0b \x01(\t\x12\x17\n\x0foauth_client_id\x18\x0c \x01(\t\x12:\n\noauth_keys\x18\r \x03(\x0b\x32&.coprocess.SessionState.OauthKeysEntry\x12\x31\n\x0f\x62\x61sic_auth_data\x18\x0e \x01(\x0b\x32\x18.coprocess.BasicAuthData\x12$\n\x08jwt_data\x18\x0f \x01(\x0b\x32\x12.coprocess.JWTData\x12\x14\n\x0chmac_enabled\x18\x10 \x01(\x08\x12\x13\n\x0bhmac_secret\x18\x11 \x01(\t\x12\x13\n\x0bis_inactive\x18\x12 \x01(\x08\x12\x17\n\x0f\x61pply_policy_id\x18\x13 \x01(\t\x12\x14\n\x0c\x64\x61ta_expires\x18\x14 \x01(\x03\x12#\n\x07monitor\x18\x15 \x01(\x0b\x32\x12.coprocess.Monitor\x12!\n\x19\x65nable_detailed_recording\x18\x16 \x01(\x08\x12\x10\n\x08metadata\x18\x17 \x01(\t\x12\x0c\n\x04tags\x18\x18 \x03(\t\x12\r\n\x05\x61lias\x18\x19 \x01(\t\x12\x14\n\x0clast_updated\x18\x1a \x01(\t\x12\x1d\n\x15id_extractor_deadline\x18\x1b \x01(\x03\x12\x18\n\x10session_lifetime\x18\x1c \x01(\x03\x1aP\n\x11\x41\x63\x63\x65ssRightsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12*\n\x05value\x18\x02 \x01(\x0b\x32\x1b.coprocess.AccessDefinition:\x02\x38\x01\x1a\x30\n\x0eOauthKeysEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x62\x06proto3')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_ACCESSSPEC = _descriptor.Descriptor(
name='AccessSpec',
full_name='coprocess.AccessSpec',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='url', full_name='coprocess.AccessSpec.url', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='methods', full_name='coprocess.AccessSpec.methods', index=1,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=44,
serialized_end=86,
)
_ACCESSDEFINITION = _descriptor.Descriptor(
name='AccessDefinition',
full_name='coprocess.AccessDefinition',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='api_name', full_name='coprocess.AccessDefinition.api_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='api_id', full_name='coprocess.AccessDefinition.api_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='versions', full_name='coprocess.AccessDefinition.versions', index=2,
number=3, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='allowed_urls', full_name='coprocess.AccessDefinition.allowed_urls', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=88,
serialized_end=203,
)
_BASICAUTHDATA = _descriptor.Descriptor(
name='BasicAuthData',
full_name='coprocess.BasicAuthData',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='password', full_name='coprocess.BasicAuthData.password', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='hash', full_name='coprocess.BasicAuthData.hash', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=205,
serialized_end=252,
)
_JWTDATA = _descriptor.Descriptor(
name='JWTData',
full_name='coprocess.JWTData',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='secret', full_name='coprocess.JWTData.secret', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=254,
serialized_end=279,
)
_MONITOR = _descriptor.Descriptor(
name='Monitor',
full_name='coprocess.Monitor',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='trigger_limits', full_name='coprocess.Monitor.trigger_limits', index=0,
number=1, type=1, cpp_type=5, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=281,
serialized_end=314,
)
_SESSIONSTATE_ACCESSRIGHTSENTRY = _descriptor.Descriptor(
name='AccessRightsEntry',
full_name='coprocess.SessionState.AccessRightsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='coprocess.SessionState.AccessRightsEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='value', full_name='coprocess.SessionState.AccessRightsEntry.value', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1075,
serialized_end=1155,
)
_SESSIONSTATE_OAUTHKEYSENTRY = _descriptor.Descriptor(
name='OauthKeysEntry',
full_name='coprocess.SessionState.OauthKeysEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='coprocess.SessionState.OauthKeysEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='value', full_name='coprocess.SessionState.OauthKeysEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1157,
serialized_end=1205,
)
_SESSIONSTATE = _descriptor.Descriptor(
name='SessionState',
full_name='coprocess.SessionState',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='last_check', full_name='coprocess.SessionState.last_check', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='allowance', full_name='coprocess.SessionState.allowance', index=1,
number=2, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='rate', full_name='coprocess.SessionState.rate', index=2,
number=3, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='per', full_name='coprocess.SessionState.per', index=3,
number=4, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='expires', full_name='coprocess.SessionState.expires', index=4,
number=5, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='quota_max', full_name='coprocess.SessionState.quota_max', index=5,
number=6, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='quota_renews', full_name='coprocess.SessionState.quota_renews', index=6,
number=7, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='quota_remaining', full_name='coprocess.SessionState.quota_remaining', index=7,
number=8, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='quota_renewal_rate', full_name='coprocess.SessionState.quota_renewal_rate', index=8,
number=9, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='access_rights', full_name='coprocess.SessionState.access_rights', index=9,
number=10, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='org_id', full_name='coprocess.SessionState.org_id', index=10,
number=11, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='oauth_client_id', full_name='coprocess.SessionState.oauth_client_id', index=11,
number=12, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='oauth_keys', full_name='coprocess.SessionState.oauth_keys', index=12,
number=13, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='basic_auth_data', full_name='coprocess.SessionState.basic_auth_data', index=13,
number=14, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='jwt_data', full_name='coprocess.SessionState.jwt_data', index=14,
number=15, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='hmac_enabled', full_name='coprocess.SessionState.hmac_enabled', index=15,
number=16, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='hmac_secret', full_name='coprocess.SessionState.hmac_secret', index=16,
number=17, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='is_inactive', full_name='coprocess.SessionState.is_inactive', index=17,
number=18, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='apply_policy_id', full_name='coprocess.SessionState.apply_policy_id', index=18,
number=19, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='data_expires', full_name='coprocess.SessionState.data_expires', index=19,
number=20, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='monitor', full_name='coprocess.SessionState.monitor', index=20,
number=21, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='enable_detailed_recording', full_name='coprocess.SessionState.enable_detailed_recording', index=21,
number=22, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='metadata', full_name='coprocess.SessionState.metadata', index=22,
number=23, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='tags', full_name='coprocess.SessionState.tags', index=23,
number=24, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='alias', full_name='coprocess.SessionState.alias', index=24,
number=25, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='last_updated', full_name='coprocess.SessionState.last_updated', index=25,
number=26, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='id_extractor_deadline', full_name='coprocess.SessionState.id_extractor_deadline', index=26,
number=27, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='session_lifetime', full_name='coprocess.SessionState.session_lifetime', index=27,
number=28, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_SESSIONSTATE_ACCESSRIGHTSENTRY, _SESSIONSTATE_OAUTHKEYSENTRY, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=317,
serialized_end=1205,
)
_ACCESSDEFINITION.fields_by_name['allowed_urls'].message_type = _ACCESSSPEC
_SESSIONSTATE_ACCESSRIGHTSENTRY.fields_by_name['value'].message_type = _ACCESSDEFINITION
_SESSIONSTATE_ACCESSRIGHTSENTRY.containing_type = _SESSIONSTATE
_SESSIONSTATE_OAUTHKEYSENTRY.containing_type = _SESSIONSTATE
_SESSIONSTATE.fields_by_name['access_rights'].message_type = _SESSIONSTATE_ACCESSRIGHTSENTRY
_SESSIONSTATE.fields_by_name['oauth_keys'].message_type = _SESSIONSTATE_OAUTHKEYSENTRY
_SESSIONSTATE.fields_by_name['basic_auth_data'].message_type = _BASICAUTHDATA
_SESSIONSTATE.fields_by_name['jwt_data'].message_type = _JWTDATA
_SESSIONSTATE.fields_by_name['monitor'].message_type = _MONITOR
DESCRIPTOR.message_types_by_name['AccessSpec'] = _ACCESSSPEC
DESCRIPTOR.message_types_by_name['AccessDefinition'] = _ACCESSDEFINITION
DESCRIPTOR.message_types_by_name['BasicAuthData'] = _BASICAUTHDATA
DESCRIPTOR.message_types_by_name['JWTData'] = _JWTDATA
DESCRIPTOR.message_types_by_name['Monitor'] = _MONITOR
DESCRIPTOR.message_types_by_name['SessionState'] = _SESSIONSTATE
AccessSpec = _reflection.GeneratedProtocolMessageType('AccessSpec', (_message.Message,), dict(
DESCRIPTOR = _ACCESSSPEC,
__module__ = 'coprocess_session_state_pb2'
# @@protoc_insertion_point(class_scope:coprocess.AccessSpec)
))
_sym_db.RegisterMessage(AccessSpec)
AccessDefinition = _reflection.GeneratedProtocolMessageType('AccessDefinition', (_message.Message,), dict(
DESCRIPTOR = _ACCESSDEFINITION,
__module__ = 'coprocess_session_state_pb2'
# @@protoc_insertion_point(class_scope:coprocess.AccessDefinition)
))
_sym_db.RegisterMessage(AccessDefinition)
BasicAuthData = _reflection.GeneratedProtocolMessageType('BasicAuthData', (_message.Message,), dict(
DESCRIPTOR = _BASICAUTHDATA,
__module__ = 'coprocess_session_state_pb2'
# @@protoc_insertion_point(class_scope:coprocess.BasicAuthData)
))
_sym_db.RegisterMessage(BasicAuthData)
JWTData = _reflection.GeneratedProtocolMessageType('JWTData', (_message.Message,), dict(
DESCRIPTOR = _JWTDATA,
__module__ = 'coprocess_session_state_pb2'
# @@protoc_insertion_point(class_scope:coprocess.JWTData)
))
_sym_db.RegisterMessage(JWTData)
Monitor = _reflection.GeneratedProtocolMessageType('Monitor', (_message.Message,), dict(
DESCRIPTOR = _MONITOR,
__module__ = 'coprocess_session_state_pb2'
# @@protoc_insertion_point(class_scope:coprocess.Monitor)
))
_sym_db.RegisterMessage(Monitor)
SessionState = _reflection.GeneratedProtocolMessageType('SessionState', (_message.Message,), dict(
AccessRightsEntry = _reflection.GeneratedProtocolMessageType('AccessRightsEntry', (_message.Message,), dict(
DESCRIPTOR = _SESSIONSTATE_ACCESSRIGHTSENTRY,
__module__ = 'coprocess_session_state_pb2'
# @@protoc_insertion_point(class_scope:coprocess.SessionState.AccessRightsEntry)
))
,
OauthKeysEntry = _reflection.GeneratedProtocolMessageType('OauthKeysEntry', (_message.Message,), dict(
DESCRIPTOR = _SESSIONSTATE_OAUTHKEYSENTRY,
__module__ = 'coprocess_session_state_pb2'
# @@protoc_insertion_point(class_scope:coprocess.SessionState.OauthKeysEntry)
))
,
DESCRIPTOR = _SESSIONSTATE,
__module__ = 'coprocess_session_state_pb2'
# @@protoc_insertion_point(class_scope:coprocess.SessionState)
))
_sym_db.RegisterMessage(SessionState)
_sym_db.RegisterMessage(SessionState.AccessRightsEntry)
_sym_db.RegisterMessage(SessionState.OauthKeysEntry)
_SESSIONSTATE_ACCESSRIGHTSENTRY.has_options = True
_SESSIONSTATE_ACCESSRIGHTSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
_SESSIONSTATE_OAUTHKEYSENTRY.has_options = True
_SESSIONSTATE_OAUTHKEYSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
# @@protoc_insertion_point(module_scope)
| apache-2.0 |
ESS-LLP/erpnext-medical | erpnext/hr/doctype/expense_claim/expense_claim.py | 3 | 11728 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.utils import get_fullname, flt, cstr
from frappe.model.document import Document
from erpnext.hr.utils import set_employee_name
from erpnext.accounts.party import get_party_account
from erpnext.accounts.general_ledger import make_gl_entries
from erpnext.accounts.doctype.sales_invoice.sales_invoice import get_bank_cash_account
from erpnext.controllers.accounts_controller import AccountsController
from frappe.utils.csvutils import getlink
class InvalidExpenseApproverError(frappe.ValidationError): pass
class ExpenseApproverIdentityError(frappe.ValidationError): pass
class ExpenseClaim(AccountsController):
def onload(self):
self.get("__onload").make_payment_via_journal_entry = frappe.db.get_single_value('Accounts Settings',
'make_payment_via_journal_entry')
def validate(self):
self.validate_advances()
self.validate_sanctioned_amount()
self.calculate_total_amount()
set_employee_name(self)
self.set_expense_account(validate=True)
self.set_payable_account()
self.set_cost_center()
self.set_status()
if self.task and not self.project:
self.project = frappe.db.get_value("Task", self.task, "project")
def set_status(self):
self.status = {
"0": "Draft",
"1": "Submitted",
"2": "Cancelled"
}[cstr(self.docstatus or 0)]
paid_amount = flt(self.total_amount_reimbursed) + flt(self.total_advance_amount)
precision = self.precision("total_sanctioned_amount")
if (self.is_paid or (flt(self.total_sanctioned_amount) > 0
and flt(self.total_sanctioned_amount, precision) == flt(paid_amount, precision))) \
and self.docstatus == 1 and self.approval_status == 'Approved':
self.status = "Paid"
elif flt(self.total_sanctioned_amount) > 0 and self.docstatus == 1 and self.approval_status == 'Approved':
self.status = "Unpaid"
elif self.docstatus == 1 and self.approval_status == 'Rejected':
self.status = 'Rejected'
def set_payable_account(self):
if not self.payable_account and not self.is_paid:
self.payable_account = frappe.get_cached_value('Company', self.company, 'default_expense_claim_payable_account')
def set_cost_center(self):
if not self.cost_center:
self.cost_center = frappe.get_cached_value('Company', self.company, 'cost_center')
def on_submit(self):
if self.approval_status=="Draft":
frappe.throw(_("""Approval Status must be 'Approved' or 'Rejected'"""))
self.update_task_and_project()
self.make_gl_entries()
if self.is_paid:
update_reimbursed_amount(self)
self.set_status()
self.update_claimed_amount_in_employee_advance()
def on_cancel(self):
self.update_task_and_project()
if self.payable_account:
self.make_gl_entries(cancel=True)
if self.is_paid:
update_reimbursed_amount(self)
self.set_status()
self.update_claimed_amount_in_employee_advance()
def update_claimed_amount_in_employee_advance(self):
for d in self.get("advances"):
frappe.get_doc("Employee Advance", d.employee_advance).update_claimed_amount()
def update_task_and_project(self):
if self.task:
self.update_task()
elif self.project:
frappe.get_doc("Project", self.project).update_project()
def make_gl_entries(self, cancel = False):
if flt(self.total_sanctioned_amount) > 0:
gl_entries = self.get_gl_entries()
make_gl_entries(gl_entries, cancel)
def get_gl_entries(self):
gl_entry = []
self.validate_account_details()
payable_amount = flt(self.total_sanctioned_amount) - flt(self.total_advance_amount)
# payable entry
if payable_amount:
gl_entry.append(
self.get_gl_dict({
"account": self.payable_account,
"credit": payable_amount,
"credit_in_account_currency": payable_amount,
"against": ",".join([d.default_account for d in self.expenses]),
"party_type": "Employee",
"party": self.employee,
"against_voucher_type": self.doctype,
"against_voucher": self.name
})
)
# expense entries
for data in self.expenses:
gl_entry.append(
self.get_gl_dict({
"account": data.default_account,
"debit": data.sanctioned_amount,
"debit_in_account_currency": data.sanctioned_amount,
"against": self.employee,
"cost_center": self.cost_center
})
)
for data in self.advances:
gl_entry.append(
self.get_gl_dict({
"account": data.advance_account,
"credit": data.allocated_amount,
"credit_in_account_currency": data.allocated_amount,
"against": ",".join([d.default_account for d in self.expenses]),
"party_type": "Employee",
"party": self.employee,
"against_voucher_type": self.doctype,
"against_voucher": self.name
})
)
if self.is_paid and payable_amount:
# payment entry
payment_account = get_bank_cash_account(self.mode_of_payment, self.company).get("account")
gl_entry.append(
self.get_gl_dict({
"account": payment_account,
"credit": payable_amount,
"credit_in_account_currency": payable_amount,
"against": self.employee
})
)
gl_entry.append(
self.get_gl_dict({
"account": self.payable_account,
"party_type": "Employee",
"party": self.employee,
"against": payment_account,
"debit": payable_amount,
"debit_in_account_currency": payable_amount,
"against_voucher": self.name,
"against_voucher_type": self.doctype,
})
)
return gl_entry
def validate_account_details(self):
if not self.cost_center:
frappe.throw(_("Cost center is required to book an expense claim"))
if not self.payable_account:
frappe.throw(_("Please set default payable account for the company {0}").format(getlink("Company",self.company)))
if self.is_paid:
if not self.mode_of_payment:
frappe.throw(_("Mode of payment is required to make a payment").format(self.employee))
def calculate_total_amount(self):
self.total_claimed_amount = 0
self.total_sanctioned_amount = 0
for d in self.get('expenses'):
if self.approval_status == 'Rejected':
d.sanctioned_amount = 0.0
self.total_claimed_amount += flt(d.claim_amount)
self.total_sanctioned_amount += flt(d.sanctioned_amount)
def update_task(self):
task = frappe.get_doc("Task", self.task)
task.update_total_expense_claim()
task.save()
def validate_advances(self):
self.total_advance_amount = 0
for d in self.get("advances"):
ref_doc = frappe.db.get_value("Employee Advance", d.employee_advance,
["posting_date", "paid_amount", "claimed_amount", "advance_account"], as_dict=1)
d.posting_date = ref_doc.posting_date
d.advance_account = ref_doc.advance_account
d.advance_paid = ref_doc.paid_amount
d.unclaimed_amount = flt(ref_doc.paid_amount) - flt(ref_doc.claimed_amount)
if d.allocated_amount and flt(d.allocated_amount) > flt(d.unclaimed_amount):
frappe.throw(_("Row {0}# Allocated amount {1} cannot be greater than unclaimed amount {2}")
.format(d.idx, d.allocated_amount, d.unclaimed_amount))
self.total_advance_amount += flt(d.allocated_amount)
if self.total_advance_amount:
precision = self.precision("total_advance_amount")
if flt(self.total_advance_amount, precision) > flt(self.total_claimed_amount, precision):
frappe.throw(_("Total advance amount cannot be greater than total claimed amount"))
if self.total_sanctioned_amount \
and flt(self.total_advance_amount, precision) > flt(self.total_sanctioned_amount, precision):
frappe.throw(_("Total advance amount cannot be greater than total sanctioned amount"))
def validate_sanctioned_amount(self):
for d in self.get('expenses'):
if flt(d.sanctioned_amount) > flt(d.claim_amount):
frappe.throw(_("Sanctioned Amount cannot be greater than Claim Amount in Row {0}.").format(d.idx))
def set_expense_account(self, validate=False):
for expense in self.expenses:
if not expense.default_account or not validate:
expense.default_account = get_expense_claim_account(expense.expense_type, self.company)["account"]
def update_reimbursed_amount(doc):
amt = frappe.db.sql("""select ifnull(sum(debit_in_account_currency), 0) as amt
from `tabGL Entry` where against_voucher_type = 'Expense Claim' and against_voucher = %s
and party = %s """, (doc.name, doc.employee) ,as_dict=1)[0].amt
doc.total_amount_reimbursed = amt
frappe.db.set_value("Expense Claim", doc.name , "total_amount_reimbursed", amt)
doc.set_status()
frappe.db.set_value("Expense Claim", doc.name , "status", doc.status)
@frappe.whitelist()
def make_bank_entry(dt, dn):
from erpnext.accounts.doctype.journal_entry.journal_entry import get_default_bank_cash_account
expense_claim = frappe.get_doc(dt, dn)
default_bank_cash_account = get_default_bank_cash_account(expense_claim.company, "Bank")
if not default_bank_cash_account:
default_bank_cash_account = get_default_bank_cash_account(expense_claim.company, "Cash")
payable_amount = flt(expense_claim.total_sanctioned_amount) \
- flt(expense_claim.total_amount_reimbursed) - flt(expense_claim.total_advance_amount)
je = frappe.new_doc("Journal Entry")
je.voucher_type = 'Bank Entry'
je.company = expense_claim.company
je.remark = 'Payment against Expense Claim: ' + dn;
je.append("accounts", {
"account": expense_claim.payable_account,
"debit_in_account_currency": payable_amount,
"reference_type": "Expense Claim",
"party_type": "Employee",
"party": expense_claim.employee,
"reference_name": expense_claim.name
})
je.append("accounts", {
"account": default_bank_cash_account.account,
"credit_in_account_currency": payable_amount,
"reference_type": "Expense Claim",
"reference_name": expense_claim.name,
"balance": default_bank_cash_account.balance,
"account_currency": default_bank_cash_account.account_currency,
"account_type": default_bank_cash_account.account_type
})
return je.as_dict()
@frappe.whitelist()
def get_expense_claim_account(expense_claim_type, company):
account = frappe.db.get_value("Expense Claim Account",
{"parent": expense_claim_type, "company": company}, "default_account")
if not account:
frappe.throw(_("Please set default account in Expense Claim Type {0}")
.format(expense_claim_type))
return {
"account": account
}
@frappe.whitelist()
def get_advances(employee, advance_id=None):
if not advance_id:
condition = 'docstatus=1 and employee="{0}" and paid_amount > 0 and paid_amount > claimed_amount'.format(frappe.db.escape(employee))
else:
condition = 'name="{0}"'.format(frappe.db.escape(advance_id))
return frappe.db.sql("""
select
name, posting_date, paid_amount, claimed_amount, advance_account
from
`tabEmployee Advance`
where {0}
""".format(condition), as_dict=1)
@frappe.whitelist()
def get_expense_claim(
employee_name, company, employee_advance_name, posting_date, paid_amount, claimed_amount):
default_payable_account = frappe.get_cached_value('Company', company, "default_payable_account")
default_cost_center = frappe.get_cached_value('Company', company, 'cost_center')
expense_claim = frappe.new_doc('Expense Claim')
expense_claim.company = company
expense_claim.employee = employee_name
expense_claim.payable_account = default_payable_account
expense_claim.cost_center = default_cost_center
expense_claim.is_paid = 1 if flt(paid_amount) else 0
expense_claim.append(
'advances',
{
'employee_advance': employee_advance_name,
'posting_date': posting_date,
'advance_paid': flt(paid_amount),
'unclaimed_amount': flt(paid_amount) - flt(claimed_amount),
'allocated_amount': flt(paid_amount) - flt(claimed_amount)
}
)
return expense_claim
| gpl-3.0 |
lmazuel/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/v2016_09_01/models/next_hop_parameters_py3.py | 9 | 2266 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class NextHopParameters(Model):
"""Parameters that define the source and destination endpoint.
All required parameters must be populated in order to send to Azure.
:param target_resource_id: Required. The resource identifier of the target
resource against which the action is to be performed.
:type target_resource_id: str
:param source_ip_address: Required. The source IP address.
:type source_ip_address: str
:param destination_ip_address: Required. The destination IP address.
:type destination_ip_address: str
:param target_nic_resource_id: The NIC ID. (If VM has multiple NICs and IP
forwarding is enabled on any of the nics, then this parameter must be
specified. Otherwise optional).
:type target_nic_resource_id: str
"""
_validation = {
'target_resource_id': {'required': True},
'source_ip_address': {'required': True},
'destination_ip_address': {'required': True},
}
_attribute_map = {
'target_resource_id': {'key': 'targetResourceId', 'type': 'str'},
'source_ip_address': {'key': 'sourceIPAddress', 'type': 'str'},
'destination_ip_address': {'key': 'destinationIPAddress', 'type': 'str'},
'target_nic_resource_id': {'key': 'targetNicResourceId', 'type': 'str'},
}
def __init__(self, *, target_resource_id: str, source_ip_address: str, destination_ip_address: str, target_nic_resource_id: str=None, **kwargs) -> None:
super(NextHopParameters, self).__init__(**kwargs)
self.target_resource_id = target_resource_id
self.source_ip_address = source_ip_address
self.destination_ip_address = destination_ip_address
self.target_nic_resource_id = target_nic_resource_id
| mit |
emilio/servo | tests/wpt/web-platform-tests/webdriver/tests/support/sync.py | 16 | 5414 | import collections
import sys
import time
from webdriver import error
DEFAULT_TIMEOUT = 5
DEFAULT_INTERVAL = 0.1
class Poll(object):
"""
An explicit conditional utility primitive for polling until a
condition evaluates to something truthy.
A `Poll` instance defines the maximum amount of time to wait
for a condition, as well as the frequency with which to check
the condition. Furthermore, the user may configure the wait
to ignore specific types of exceptions whilst waiting, such as
`error.NoSuchElementException` when searching for an element
on the page.
"""
def __init__(self,
session,
timeout=DEFAULT_TIMEOUT,
interval=DEFAULT_INTERVAL,
raises=error.TimeoutException,
message=None,
ignored_exceptions=None,
clock=time):
"""
Configure the poller to have a custom timeout, interval,
and list of ignored exceptions. Optionally a different time
implementation than the one provided by the standard library
(`time`) can also be provided.
Sample usage::
# Wait 30 seconds for window to open,
# checking for its presence once every 5 seconds.
from support.sync import Poll
wait = Poll(session, timeout=30, interval=5,
ignored_exceptions=error.NoSuchWindowException)
window = wait.until(lambda s: s.switch_to_window(42))
:param session: The input value to be provided to conditions,
usually a `webdriver.Session` instance.
:param timeout: How long to wait for the evaluated condition
to become true.
:param interval: How often the condition should be evaluated.
In reality the interval may be greater as the cost of
evaluating the condition function. If that is not the case the
interval for the next condition function call is shortend to keep
the original interval sequence as best as possible.
:param raises: Optional exception to raise when poll elapses.
If not used, an `error.TimeoutException` is raised.
If it is `None`, no exception is raised on the poll elapsing.
:param message: An optional message to include in `raises`'s
message if the `until` condition times out.
:param ignored_exceptions: Ignore specific types of exceptions
whilst waiting for the condition. Any exceptions not
whitelisted will be allowed to propagate, terminating the
wait.
:param clock: Allows overriding the use of the runtime's
default time library. See `sync.SystemClock` for
implementation details.
"""
self.session = session
self.timeout = timeout
self.interval = interval
self.exc_cls = raises
self.exc_msg = message
self.clock = clock
exceptions = []
if ignored_exceptions is not None:
if isinstance(ignored_exceptions, collections.Iterable):
exceptions.extend(iter(ignored_exceptions))
else:
exceptions.append(ignored_exceptions)
self.exceptions = tuple(set(exceptions))
def until(self, condition):
"""
This will repeatedly evaluate `condition` in anticipation
for a truthy return value, or the timeout to expire.
A condition that returns `None` or does not evaluate to
true will fully elapse its timeout before raising, unless
the `raises` keyword argument is `None`, in which case the
condition's return value is propagated unconditionally.
If an exception is raised in `condition` and it's not ignored,
this function will raise immediately. If the exception is
ignored it will be swallowed and polling will resume until
either the condition meets the return requirements or the
timeout duration is reached.
:param condition: A callable function whose return value will
be returned by this function.
"""
rv = None
last_exc = None
start = self.clock.time()
end = start + self.timeout
while not self.clock.time() >= end:
try:
next = self.clock.time() + self.interval
rv = condition(self.session)
except (KeyboardInterrupt, SystemExit):
raise
except self.exceptions:
last_exc = sys.exc_info()
# re-adjust the interval depending on how long
# the callback took to evaluate the condition
interval_new = max(next - self.clock.time(), 0)
if not rv:
self.clock.sleep(interval_new)
continue
if rv is not None:
return rv
self.clock.sleep(interval_new)
if self.exc_cls is not None:
elapsed = round((self.clock.time() - start), 1)
message = ""
if self.exc_msg is not None:
message = " with message: {}".format(self.exc_msg)
raise self.exc_cls(
"Timed out after {} seconds{}".format(elapsed, message),
cause=last_exc)
else:
return rv
| mpl-2.0 |
h0nIg/ansible-modules-extras | cloud/centurylink/clc_aa_policy.py | 43 | 10985 | #!/usr/bin/python
#
# Copyright (c) 2015 CenturyLink
#
# This file is part of Ansible.
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>
#
DOCUMENTATION = '''
module: clc_aa_policy
short_description: Create or Delete Anti Affinity Policies at CenturyLink Cloud.
description:
- An Ansible module to Create or Delete Anti Affinity Policies at CenturyLink Cloud.
version_added: "2.0"
options:
name:
description:
- The name of the Anti Affinity Policy.
required: True
location:
description:
- Datacenter in which the policy lives/should live.
required: True
state:
description:
- Whether to create or delete the policy.
required: False
default: present
choices: ['present','absent']
wait:
description:
- Whether to wait for the tasks to finish before returning.
default: True
required: False
choices: [True, False]
requirements:
- python = 2.7
- requests >= 2.5.0
- clc-sdk
author: "CLC Runner (@clc-runner)"
notes:
- To use this module, it is required to set the below environment variables which enables access to the
Centurylink Cloud
- CLC_V2_API_USERNAME, the account login id for the centurylink cloud
- CLC_V2_API_PASSWORD, the account password for the centurylink cloud
- Alternatively, the module accepts the API token and account alias. The API token can be generated using the
CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
- CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
- CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
- Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
'''
EXAMPLES = '''
# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
---
- name: Create AA Policy
hosts: localhost
gather_facts: False
connection: local
tasks:
- name: Create an Anti Affinity Policy
clc_aa_policy:
name: 'Hammer Time'
location: 'UK3'
state: present
register: policy
- name: debug
debug: var=policy
---
- name: Delete AA Policy
hosts: localhost
gather_facts: False
connection: local
tasks:
- name: Delete an Anti Affinity Policy
clc_aa_policy:
name: 'Hammer Time'
location: 'UK3'
state: absent
register: policy
- name: debug
debug: var=policy
'''
RETURN = '''
changed:
description: A flag indicating if any change was made or not
returned: success
type: boolean
sample: True
policy:
description: The anti affinity policy information
returned: success
type: dict
sample:
{
"id":"1a28dd0988984d87b9cd61fa8da15424",
"name":"test_aa_policy",
"location":"UC1",
"links":[
{
"rel":"self",
"href":"/v2/antiAffinityPolicies/wfad/1a28dd0988984d87b9cd61fa8da15424",
"verbs":[
"GET",
"DELETE",
"PUT"
]
},
{
"rel":"location",
"href":"/v2/datacenters/wfad/UC1",
"id":"uc1",
"name":"UC1 - US West (Santa Clara)"
}
]
}
'''
__version__ = '${version}'
from distutils.version import LooseVersion
try:
import requests
except ImportError:
REQUESTS_FOUND = False
else:
REQUESTS_FOUND = True
#
# Requires the clc-python-sdk.
# sudo pip install clc-sdk
#
try:
import clc as clc_sdk
from clc import CLCException
except ImportError:
CLC_FOUND = False
clc_sdk = None
else:
CLC_FOUND = True
class ClcAntiAffinityPolicy:
clc = clc_sdk
module = None
def __init__(self, module):
"""
Construct module
"""
self.module = module
self.policy_dict = {}
if not CLC_FOUND:
self.module.fail_json(
msg='clc-python-sdk required for this module')
if not REQUESTS_FOUND:
self.module.fail_json(
msg='requests library is required for this module')
if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'):
self.module.fail_json(
msg='requests library version should be >= 2.5.0')
self._set_user_agent(self.clc)
@staticmethod
def _define_module_argument_spec():
"""
Define the argument spec for the ansible module
:return: argument spec dictionary
"""
argument_spec = dict(
name=dict(required=True),
location=dict(required=True),
wait=dict(default=True),
state=dict(default='present', choices=['present', 'absent']),
)
return argument_spec
# Module Behavior Goodness
def process_request(self):
"""
Process the request - Main Code Path
:return: Returns with either an exit_json or fail_json
"""
p = self.module.params
self._set_clc_credentials_from_env()
self.policy_dict = self._get_policies_for_datacenter(p)
if p['state'] == "absent":
changed, policy = self._ensure_policy_is_absent(p)
else:
changed, policy = self._ensure_policy_is_present(p)
if hasattr(policy, 'data'):
policy = policy.data
elif hasattr(policy, '__dict__'):
policy = policy.__dict__
self.module.exit_json(changed=changed, policy=policy)
def _set_clc_credentials_from_env(self):
"""
Set the CLC Credentials on the sdk by reading environment variables
:return: none
"""
env = os.environ
v2_api_token = env.get('CLC_V2_API_TOKEN', False)
v2_api_username = env.get('CLC_V2_API_USERNAME', False)
v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
clc_alias = env.get('CLC_ACCT_ALIAS', False)
api_url = env.get('CLC_V2_API_URL', False)
if api_url:
self.clc.defaults.ENDPOINT_URL_V2 = api_url
if v2_api_token and clc_alias:
self.clc._LOGIN_TOKEN_V2 = v2_api_token
self.clc._V2_ENABLED = True
self.clc.ALIAS = clc_alias
elif v2_api_username and v2_api_passwd:
self.clc.v2.SetCredentials(
api_username=v2_api_username,
api_passwd=v2_api_passwd)
else:
return self.module.fail_json(
msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
"environment variables")
def _get_policies_for_datacenter(self, p):
"""
Get the Policies for a datacenter by calling the CLC API.
:param p: datacenter to get policies from
:return: policies in the datacenter
"""
response = {}
policies = self.clc.v2.AntiAffinity.GetAll(location=p['location'])
for policy in policies:
response[policy.name] = policy
return response
def _create_policy(self, p):
"""
Create an Anti Affinity Policy using the CLC API.
:param p: datacenter to create policy in
:return: response dictionary from the CLC API.
"""
try:
return self.clc.v2.AntiAffinity.Create(
name=p['name'],
location=p['location'])
except CLCException, ex:
self.module.fail_json(msg='Failed to create anti affinity policy : {0}. {1}'.format(
p['name'], ex.response_text
))
def _delete_policy(self, p):
"""
Delete an Anti Affinity Policy using the CLC API.
:param p: datacenter to delete a policy from
:return: none
"""
try:
policy = self.policy_dict[p['name']]
policy.Delete()
except CLCException, ex:
self.module.fail_json(msg='Failed to delete anti affinity policy : {0}. {1}'.format(
p['name'], ex.response_text
))
def _policy_exists(self, policy_name):
"""
Check to see if an Anti Affinity Policy exists
:param policy_name: name of the policy
:return: boolean of if the policy exists
"""
if policy_name in self.policy_dict:
return self.policy_dict.get(policy_name)
return False
def _ensure_policy_is_absent(self, p):
"""
Makes sure that a policy is absent
:param p: dictionary of policy name
:return: tuple of if a deletion occurred and the name of the policy that was deleted
"""
changed = False
if self._policy_exists(policy_name=p['name']):
changed = True
if not self.module.check_mode:
self._delete_policy(p)
return changed, None
def _ensure_policy_is_present(self, p):
"""
Ensures that a policy is present
:param p: dictionary of a policy name
:return: tuple of if an addition occurred and the name of the policy that was added
"""
changed = False
policy = self._policy_exists(policy_name=p['name'])
if not policy:
changed = True
policy = None
if not self.module.check_mode:
policy = self._create_policy(p)
return changed, policy
@staticmethod
def _set_user_agent(clc):
if hasattr(clc, 'SetRequestsSession'):
agent_string = "ClcAnsibleModule/" + __version__
ses = requests.Session()
ses.headers.update({"Api-Client": agent_string})
ses.headers['User-Agent'] += " " + agent_string
clc.SetRequestsSession(ses)
def main():
"""
The main function. Instantiates the module and calls process_request.
:return: none
"""
module = AnsibleModule(
argument_spec=ClcAntiAffinityPolicy._define_module_argument_spec(),
supports_check_mode=True)
clc_aa_policy = ClcAntiAffinityPolicy(module)
clc_aa_policy.process_request()
from ansible.module_utils.basic import * # pylint: disable=W0614
if __name__ == '__main__':
main()
| gpl-3.0 |
yangbh/dpkt | dpkt/ip6.py | 6 | 13386 | # $Id: ip6.py 87 2013-03-05 19:41:04Z andrewflnr@gmail.com $
# -*- coding: utf-8 -*-
"""Internet Protocol, version 6."""
import dpkt
from decorators import deprecated
class IP6(dpkt.Packet):
__hdr__ = (
('_v_fc_flow', 'I', 0x60000000L),
('plen', 'H', 0), # payload length (not including header)
('nxt', 'B', 0), # next header protocol
('hlim', 'B', 0), # hop limit
('src', '16s', ''),
('dst', '16s', '')
)
# XXX - to be shared with IP. We cannot refer to the ip module
# right now because ip.__load_protos() expects the IP6 class to be
# defined.
_protosw = None
@property
def v(self):
return self._v_fc_flow >> 28
@v.setter
def v(self, v):
self._v_fc_flow = (self._v_fc_flow & ~0xf0000000L) | (v << 28)
@property
def fc(self):
return (self._v_fc_flow >> 20) & 0xff
@fc.setter
def fc(self, v):
self._v_fc_flow = (self._v_fc_flow & ~0xff00000L) | (v << 20)
@property
def flow(self):
return self._v_fc_flow & 0xfffff
@flow.setter
def flow(self, v):
self._v_fc_flow = (self._v_fc_flow & ~0xfffff) | (v & 0xfffff)
# Deprecated methods, will be removed in the future
# =================================================
@deprecated('v')
def _get_v(self):
return self.v
@deprecated('v')
def _set_v(self, v):
self.v = v
@deprecated('fc')
def _get_fc(self):
return self.fc
@deprecated('fc')
def _set_fc(self, v):
self.fc = v
@deprecated('flow')
def _get_flow(self):
return self.flow
@deprecated('flow')
def _set_flow(self, v):
self.flow = v
# =================================================
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.extension_hdrs = {}
if self.plen:
buf = self.data[:self.plen]
else: # due to jumbo payload or TSO
buf = self.data
next_ext_hdr = self.nxt
while next_ext_hdr in ext_hdrs:
ext = ext_hdrs_cls[next_ext_hdr](buf)
self.extension_hdrs[next_ext_hdr] = ext
buf = buf[ext.length:]
next_ext_hdr = getattr(ext, 'nxt', None)
# set the payload protocol id
if next_ext_hdr is not None:
self.p = next_ext_hdr
try:
self.data = self._protosw[next_ext_hdr](buf)
setattr(self, self.data.__class__.__name__.lower(), self.data)
except (KeyError, dpkt.UnpackError):
self.data = buf
def headers_str(self):
"""Output extension headers in order defined in RFC1883 (except dest opts)"""
header_str = ""
for hdr in ext_hdrs:
if hdr in self.extension_hdrs:
header_str += str(self.extension_hdrs[hdr])
return header_str
def __str__(self):
if (self.p == 6 or self.p == 17 or self.p == 58) and not self.data.sum:
# XXX - set TCP, UDP, and ICMPv6 checksums
p = str(self.data)
s = dpkt.struct.pack('>16s16sxBH', self.src, self.dst, self.nxt, len(p))
s = dpkt.in_cksum_add(0, s)
s = dpkt.in_cksum_add(s, p)
try:
self.data.sum = dpkt.in_cksum_done(s)
except AttributeError:
pass
return self.pack_hdr() + self.headers_str() + str(self.data)
@classmethod
def set_proto(cls, p, pktclass):
cls._protosw[p] = pktclass
@classmethod
def get_proto(cls, p):
return cls._protosw[p]
import ip
# We are most likely still in the middle of ip.__load_protos() which
# implicitly loads this module through __import__(), so the content of
# ip.IP._protosw is still incomplete at the moment. By sharing the
# same dictionary by reference as opposed to making a copy, when
# ip.__load_protos() finishes, we will also automatically get the most
# up-to-date dictionary.
IP6._protosw = ip.IP._protosw
class IP6ExtensionHeader(dpkt.Packet):
"""
An extension header is very similar to a 'sub-packet'.
We just want to re-use all the hdr unpacking etc.
"""
pass
class IP6OptsHeader(IP6ExtensionHeader):
__hdr__ = (
('nxt', 'B', 0), # next extension header protocol
('len', 'B', 0) # option data length in 8 octect units (ignoring first 8 octets) so, len 0 == 64bit header
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.length = (self.len + 1) * 8
options = []
index = 0
while index < self.length - 2:
opt_type = ord(self.data[index])
# PAD1 option
if opt_type == 0:
index += 1
continue
opt_length = ord(self.data[index + 1])
if opt_type == 1: # PADN option
# PADN uses opt_length bytes in total
index += opt_length + 2
continue
options.append(
{'type': opt_type, 'opt_length': opt_length, 'data': self.data[index + 2:index + 2 + opt_length]})
# add the two chars and the option_length, to move to the next option
index += opt_length + 2
self.options = options
class IP6HopOptsHeader(IP6OptsHeader):
pass
class IP6DstOptsHeader(IP6OptsHeader):
pass
class IP6RoutingHeader(IP6ExtensionHeader):
__hdr__ = (
('nxt', 'B', 0), # next extension header protocol
('len', 'B', 0), # extension data length in 8 octect units (ignoring first 8 octets) (<= 46 for type 0)
('type', 'B', 0), # routing type (currently, only 0 is used)
('segs_left', 'B', 0), # remaining segments in route, until destination (<= 23)
('rsvd_sl_bits', 'I', 0), # reserved (1 byte), strict/loose bitmap for addresses
)
@property
def sl_bits(self):
return self.rsvd_sl_bits & 0xffffff
@sl_bits.setter
def sl_bits(self, v):
self.rsvd_sl_bits = (self.rsvd_sl_bits & ~0xfffff) | (v & 0xfffff)
# Deprecated methods, will be removed in the future
# =================================================
@deprecated('sl_bits')
def _get_sl_bits(self): return self.sl_bits
@deprecated('sl_bits')
def _set_sl_bits(self, v): self.sl_bits = v
# =================================================
def unpack(self, buf):
hdr_size = 8
addr_size = 16
dpkt.Packet.unpack(self, buf)
addresses = []
num_addresses = self.len / 2
buf = buf[hdr_size:hdr_size + num_addresses * addr_size]
for i in range(num_addresses):
addresses.append(buf[i * addr_size: i * addr_size + addr_size])
self.data = buf
self.addresses = addresses
self.length = self.len * 8 + 8
class IP6FragmentHeader(IP6ExtensionHeader):
__hdr__ = (
('nxt', 'B', 0), # next extension header protocol
('resv', 'B', 0), # reserved, set to 0
('frag_off_resv_m', 'H', 0), # frag offset (13 bits), reserved zero (2 bits), More frags flag
('id', 'I', 0) # fragments id
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.length = self.__hdr_len__
self.data = ''
@property
def frag_off(self):
return self.frag_off_resv_m >> 3
@frag_off.setter
def frag_off(self, v):
self.frag_off_resv_m = (self.frag_off_resv_m & ~0xfff8) | (v << 3)
@property
def m_flag(self):
return self.frag_off_resv_m & 1
@m_flag.setter
def m_flag(self, v):
self.frag_off_resv_m = (self.frag_off_resv_m & ~0xfffe) | v
# Deprecated methods, will be removed in the future
# =================================================
@deprecated('frag_off')
def _get_frag_off(self): return self.frag_off
@deprecated('frag_off')
def _set_frag_off(self, v): self.frag_off = v
@deprecated('m_flag')
def _get_m_flag(self): return self.m_flag
@deprecated('m_flag')
def _set_m_flag(self, v): self.m_flag = v
# =================================================
class IP6AHHeader(IP6ExtensionHeader):
__hdr__ = (
('nxt', 'B', 0), # next extension header protocol
('len', 'B', 0), # length of header in 4 octet units (ignoring first 2 units)
('resv', 'H', 0), # reserved, 2 bytes of 0
('spi', 'I', 0), # SPI security parameter index
('seq', 'I', 0) # sequence no.
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.length = (self.len + 2) * 4
self.auth_data = self.data[:(self.len - 1) * 4]
class IP6ESPHeader(IP6ExtensionHeader):
__hdr__ = (
('spi', 'I', 0),
('seq', 'I', 0)
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.length = self.__hdr_len__ + len(self.data)
ext_hdrs = [ip.IP_PROTO_HOPOPTS, ip.IP_PROTO_ROUTING, ip.IP_PROTO_FRAGMENT, ip.IP_PROTO_AH, ip.IP_PROTO_ESP,
ip.IP_PROTO_DSTOPTS]
ext_hdrs_cls = {ip.IP_PROTO_HOPOPTS: IP6HopOptsHeader,
ip.IP_PROTO_ROUTING: IP6RoutingHeader,
ip.IP_PROTO_FRAGMENT: IP6FragmentHeader,
ip.IP_PROTO_ESP: IP6ESPHeader,
ip.IP_PROTO_AH: IP6AHHeader,
ip.IP_PROTO_DSTOPTS: IP6DstOptsHeader}
def test_ipg():
s = '`\x00\x00\x00\x00(\x06@\xfe\x80\x00\x00\x00\x00\x00\x00\x02\x11$\xff\xfe\x8c\x11\xde\xfe\x80\x00\x00\x00\x00\x00\x00\x02\xb0\xd0\xff\xfe\xe1\x80r\xcd\xca\x00\x16\x04\x84F\xd5\x00\x00\x00\x00\xa0\x02\xff\xff\xf8\t\x00\x00\x02\x04\x05\xa0\x01\x03\x03\x00\x01\x01\x08\n}\x185?\x00\x00\x00\x00'
_ip = IP6(s)
# print `ip`
_ip.data.sum = 0
s2 = str(_ip)
IP6(s)
# print `ip2`
assert (s == s2)
def test_ip6_routing_header():
s = '`\x00\x00\x00\x00<+@ H\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xde\xca G\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xca\xfe\x06\x04\x00\x02\x00\x00\x00\x00 \x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xde\xca "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xde\xca\x00\x14\x00P\x00\x00\x00\x00\x00\x00\x00\x00P\x02 \x00\x91\x7f\x00\x00'
ip = IP6(s)
s2 = str(ip)
# 43 is Routing header id
assert (len(ip.extension_hdrs[43].addresses) == 2)
assert ip.tcp
assert (s == s2)
assert str(ip) == s
def test_ip6_fragment_header():
s = '\x06\xee\xff\xfb\x00\x00\xff\xff'
fh = IP6FragmentHeader(s)
# s2 = str(fh) variable 's2' is not used
str(fh)
assert (fh.nxt == 6)
assert (fh.id == 65535)
assert (fh.frag_off == 8191)
assert (fh.m_flag == 1)
assert str(fh) == s
# IP6 with fragment header
s = '\x60\x00\x00\x00\x00\x10\x2c\x00\x02\x22\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x03\x33\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03\x29\x00\x00\x01\x00\x00\x00\x00\x60\x00\x00\x00\x00\x10\x2c\x00'
ip = IP6(s)
assert str(ip) == s
def test_ip6_options_header():
s = ';\x04\x01\x02\x00\x00\xc9\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\xc2\x04\x00\x00\x00\x00\x05\x02\x00\x00\x01\x02\x00\x00'
options = IP6OptsHeader(s).options
assert (len(options) == 3)
assert str(IP6OptsHeader(s)) == s
def test_ip6_ah_header():
s = ';\x04\x00\x00\x02\x02\x02\x02\x01\x01\x01\x01\x78\x78\x78\x78\x78\x78\x78\x78'
ah = IP6AHHeader(s)
assert (ah.length == 24)
assert (ah.auth_data == 'xxxxxxxx')
assert (ah.spi == 0x2020202)
assert (ah.seq == 0x1010101)
assert str(ah) == s
def test_ip6_esp_header():
s = '\x00\x00\x01\x00\x00\x00\x00\x44\xe2\x4f\x9e\x68\xf3\xcd\xb1\x5f\x61\x65\x42\x8b\x78\x0b\x4a\xfd\x13\xf0\x15\x98\xf5\x55\x16\xa8\x12\xb3\xb8\x4d\xbc\x16\xb2\x14\xbe\x3d\xf9\x96\xd4\xa0\x39\x1f\x85\x74\x25\x81\x83\xa6\x0d\x99\xb6\xba\xa3\xcc\xb6\xe0\x9a\x78\xee\xf2\xaf\x9a'
esp = IP6ESPHeader(s)
assert esp.length == 68
assert esp.spi == 256
assert str(esp) == s
def test_ip6_extension_headers():
p = '`\x00\x00\x00\x00<+@ H\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xde\xca G\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xca\xfe\x06\x04\x00\x02\x00\x00\x00\x00 \x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xde\xca "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xde\xca\x00\x14\x00P\x00\x00\x00\x00\x00\x00\x00\x00P\x02 \x00\x91\x7f\x00\x00'
ip = IP6(p)
o = ';\x04\x01\x02\x00\x00\xc9\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\xc2\x04\x00\x00\x00\x00\x05\x02\x00\x00\x01\x02\x00\x00'
options = IP6HopOptsHeader(o)
ip.extension_hdrs[0] = options
fh = '\x06\xee\xff\xfb\x00\x00\xff\xff'
ip.extension_hdrs[44] = IP6FragmentHeader(fh)
ah = ';\x04\x00\x00\x02\x02\x02\x02\x01\x01\x01\x01\x78\x78\x78\x78\x78\x78\x78\x78'
ip.extension_hdrs[51] = IP6AHHeader(ah)
do = ';\x02\x01\x02\x00\x00\xc9\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
ip.extension_hdrs[60] = IP6DstOptsHeader(do)
assert (len(ip.extension_hdrs) == 5)
if __name__ == '__main__':
test_ipg()
test_ip6_routing_header()
test_ip6_fragment_header()
test_ip6_options_header()
test_ip6_ah_header()
test_ip6_esp_header()
test_ip6_extension_headers()
print 'Tests Successful...'
| bsd-3-clause |
Willyham/tchannel-python | tchannel/testing/__init__.py | 1 | 1121 | # Copyright (c) 2015 Uber Technologies, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
__all__ = ['vcr']
| mit |
savoirfairelinux/account-fiscal-rule | __unported__/account_fiscal_position_rule_sale/__openerp__.py | 1 | 1681 | # -*- encoding: utf-8 -*-
###############################################################################
#
# account_fiscal_position_rule_sale for OpenERP
# Copyright (C) 2009-TODAY Akretion <http://www.akretion.com>
# @author Renato Lima <renato.lima@akretion.com>
# Copyright 2012 Camptocamp SA
# @author: Guewen Baconnier
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
{
'name': 'Account Fiscal Position Rule Sale',
'version': '1.1.1',
'category': 'Generic Modules/Accounting',
'description': """Include a rule to decide the
correct fiscal position for Sale""",
'author': 'Akretion',
'license': 'AGPL-3',
'website': 'http://www.akretion.com',
'depends': [
'account_fiscal_position_rule',
'sale',
],
'data': [
'sale_view.xml',
'security/account_fiscal_position_rule_sale_security.xml',
'security/ir.model.access.csv',
],
'demo': [],
'installable': False,
}
| agpl-3.0 |
IxLabs/net-next | arch/ia64/scripts/unwcheck.py | 13143 | 1714 | #!/usr/bin/python
#
# Usage: unwcheck.py FILE
#
# This script checks the unwind info of each function in file FILE
# and verifies that the sum of the region-lengths matches the total
# length of the function.
#
# Based on a shell/awk script originally written by Harish Patil,
# which was converted to Perl by Matthew Chapman, which was converted
# to Python by David Mosberger.
#
import os
import re
import sys
if len(sys.argv) != 2:
print "Usage: %s FILE" % sys.argv[0]
sys.exit(2)
readelf = os.getenv("READELF", "readelf")
start_pattern = re.compile("<([^>]*)>: \[0x([0-9a-f]+)-0x([0-9a-f]+)\]")
rlen_pattern = re.compile(".*rlen=([0-9]+)")
def check_func (func, slots, rlen_sum):
if slots != rlen_sum:
global num_errors
num_errors += 1
if not func: func = "[%#x-%#x]" % (start, end)
print "ERROR: %s: %lu slots, total region length = %lu" % (func, slots, rlen_sum)
return
num_funcs = 0
num_errors = 0
func = False
slots = 0
rlen_sum = 0
for line in os.popen("%s -u %s" % (readelf, sys.argv[1])):
m = start_pattern.match(line)
if m:
check_func(func, slots, rlen_sum)
func = m.group(1)
start = long(m.group(2), 16)
end = long(m.group(3), 16)
slots = 3 * (end - start) / 16
rlen_sum = 0L
num_funcs += 1
else:
m = rlen_pattern.match(line)
if m:
rlen_sum += long(m.group(1))
check_func(func, slots, rlen_sum)
if num_errors == 0:
print "No errors detected in %u functions." % num_funcs
else:
if num_errors > 1:
err="errors"
else:
err="error"
print "%u %s detected in %u functions." % (num_errors, err, num_funcs)
sys.exit(1)
| gpl-2.0 |
karlito40/servo | tests/wpt/web-platform-tests/tools/pywebsocket/src/test/test_stream_hixie75.py | 496 | 2285 | #!/usr/bin/env python
#
# Copyright 2011, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests for stream module."""
import unittest
import set_sys_path # Update sys.path to locate mod_pywebsocket module.
from mod_pywebsocket.stream import StreamHixie75
from test.test_msgutil import _create_request_hixie75
class StreamHixie75Test(unittest.TestCase):
"""A unittest for StreamHixie75 class."""
def test_payload_length(self):
for length, bytes in ((0, '\x00'), (0x7f, '\x7f'), (0x80, '\x81\x00'),
(0x1234, '\x80\xa4\x34')):
test_stream = StreamHixie75(_create_request_hixie75(bytes))
self.assertEqual(
length, test_stream._read_payload_length_hixie75())
if __name__ == '__main__':
unittest.main()
# vi:sts=4 sw=4 et
| mpl-2.0 |
janezhango/BigDataMachineLearning | py/h2o_jobs.py | 2 | 9555 | import time, sys
import h2o, h2o_browse as h2b
def pollStatsWhileBusy(timeoutSecs=300, pollTimeoutSecs=15, retryDelaySecs=5):
busy = True
trials = 0
start = time.time()
polls = 0
statSum = {}
# just init for worst case 64 nodes?
lastUsedMemBytes = [1 for i in range(64)]
while busy:
polls += 1
# get utilization and print it
# any busy jobs
a = h2o.nodes[0].jobs_admin(timeoutSecs=60)
busy = False
for j in a['jobs']:
if j['end_time']=='' and not (j['cancelled'] or (j['result'].get('val', None)=='CANCELLED')):
busy = True
h2o.verboseprint("Still busy")
break
cloudStatus = h2o.nodes[0].get_cloud(timeoutSecs=timeoutSecs)
nodes = cloudStatus['nodes']
for i,n in enumerate(nodes):
# check for drop in tot_mem_bytes, and report as "probably post GC"
totMemBytes = n['tot_mem_bytes']
maxMemBytes = n['max_mem_bytes']
freeMemBytes = n['free_mem_bytes']
usedMemBytes = totMemBytes - freeMemBytes
availMemBytes = maxMemBytes - usedMemBytes
print 'Node %s:' % i, \
'num_cpus:', n['num_cpus'],\
'my_cpu_%:', n['my_cpu_%'],\
'sys_cpu_%:', n['sys_cpu_%'],\
'system_load:', n['system_load'],\
'tot_mem_bytes: {:,}'.format(totMemBytes),\
'max_mem_bytes: {:,}'.format(maxMemBytes),\
'free_mem_bytes: {:,}'.format(freeMemBytes),\
'usedMemBytes: {:,}'.format(usedMemBytes)
decrease = round((0.0 + lastUsedMemBytes[i] - usedMemBytes) / lastUsedMemBytes[i], 3)
if decrease > .05:
print
print "\nProbably GC at Node {:}: usedMemBytes decreased by {:f} pct.. {:,} {:,}".format(i, 100 * decrease, lastUsedMemBytes[i], usedMemBytes)
lastUsedMemBytes[i] = usedMemBytes
# don't update lastUsedMemBytes if we're decreasing
if usedMemBytes > lastUsedMemBytes[i]:
lastUsedMemBytes[i] = usedMemBytes
# sum all individual stats
for stat in n:
if stat in statSum:
try:
statSum[stat] += n[stat]
except TypeError:
# raise Exception("statSum[stat] should be number %s %s" % (statSum[stat], stat, n[stat]))
print "ERROR: statSum[stat] should be number %s %s %s" % (statSum[stat], stat, n[stat])
# do nothing
else:
try:
statSum[stat] = n[stat] + 0.0
except TypeError:
pass # ignore non-numbers
trials += 1
if trials%5 == 0:
h2o.check_sandbox_for_errors()
time.sleep(retryDelaySecs)
if ((time.time() - start) > timeoutSecs):
raise Exception("Timeout while polling in pollStatsWhileBusy: %s seconds" % timeoutSecs)
# now print man
print "Did %s polls" % polls
statMean = {}
print "Values are summed across all nodes (cloud members), so divide by node count"
for s in statSum:
statMean[s] = round((statSum[s] + 0.0) / polls, 2)
print "per poll mean", s + ':', statMean[s]
return statMean
# statMean['tot_mem_bytes'],
# statMean['num_cpus'],
# statMean['my_cpu_%'],
# statMean['sys_cpu_%'],
# statMean['system_load']
# poll the Jobs queue and wait if not all done.
# Return matching keys to a pattern for 'destination_key"
# for a job (model usually)
# FIX! the pattern doesn't limit the jobs you wait for (sounds like it does)
# I suppose it's rare that we'd want to wait for a subset of jobs, but lets
# 'key' 'description' 'destination_key' could all be interesting things you want to pattern match agains?
# what the heck, just look for a match in any of the 3 (no regex)
# if pattern is not None, only stall on jobs that match the pattern (in any of those 3)
def pollWaitJobs(pattern=None, errorIfCancelled=False, timeoutSecs=60, pollTimeoutSecs=60, retryDelaySecs=5, benchmarkLogging=None, stallForNJobs=None):
wait = True
waitTime = 0
ignoredJobs = set()
while (wait):
a = h2o.nodes[0].jobs_admin(timeoutSecs=pollTimeoutSecs)
h2o.verboseprint("jobs_admin():", h2o.dump_json(a))
jobs = a['jobs']
busy = 0
for j in jobs:
cancelled = j['cancelled'] or (j['result'].get('val', None)=='CANCELLED')
description = j['description']
destination_key = j['destination_key']
end_time = j['end_time']
key = j['key']
progress = j['progress']
# has exception and val?
result = j['result']
start_time = j['start_time']
# for now, don't ignore any exceptions
if 'exception' in result and result['exception']:
h2o.check_sandbox_for_errors()
msg = "ERROR: pollWaitJobs found a job with a exception result when it shouldn't have:\n %s" % h2o.dump_json(j)
raise Exception(msg)
if result:
# ignore if 'val' is 'OK'
if 'val' in result and result['val'] == 'OK':
pass
else:
print "non-empty result: %s for %s" % (result, key)
if errorIfCancelled and cancelled:
h2o.check_sandbox_for_errors()
print ("ERROR: not stopping, but: pollWaitJobs found a cancelled job when it shouldn't have:\n %s" % h2o.dump_json(j))
print ("Continuing so maybe a json response will give more info")
### h2o.verboseprint(j)
# don't include cancelled jobs here
elif end_time=='' and not cancelled:
if not pattern:
# always print progress if busy job (no pattern used
print "time:", time.strftime("%I:%M:%S"), "progress:", progress, destination_key
h2o.verboseprint("description:", description, "end_time:", end_time)
busy +=1
h2o.verboseprint("pollWaitJobs: found a busy job, now: %s" % busy)
else:
if (pattern in key) or (pattern in destination_key) or (pattern in description):
## print "description:", description, "end_time:", end_time
busy += 1
h2o.verboseprint("pollWaitJobs: found a pattern-matched busy job, now %s" % busy)
# always print progress if pattern is used and matches
print "time:", time.strftime("%I:%M:%S"), "progress:", progress, destination_key
# we only want to print the warning message once
elif key not in ignoredJobs:
jobMsg = "%s %s %s" % (key, description, destination_key)
h2o.verboseprint(" %s job in progress but we're ignoring it. Doesn't match pattern." % jobMsg)
# I guess "key" is supposed to be unique over all time for a job id?
ignoredJobs.add(key)
if stallForNJobs:
waitFor = stallForNJobs
else:
waitFor = 0
print " %s jobs in progress." % busy, "Waiting until %s in progress." % waitFor
wait = busy > waitFor
if not wait:
break
### h2b.browseJsonHistoryAsUrlLastMatch("Jobs")
if (wait and waitTime > timeoutSecs):
print h2o.dump_json(jobs)
raise Exception("Some queued jobs haven't completed after", timeoutSecs, "seconds")
sys.stdout.write('.')
sys.stdout.flush()
time.sleep(retryDelaySecs)
waitTime += retryDelaySecs
# any time we're sitting around polling we might want to save logging info (cpu/disk/jstack)
# test would pass ['cpu','disk','jstack'] kind of list
if benchmarkLogging:
h2o.cloudPerfH2O.get_log_save(benchmarkLogging)
# check the sandbox for stack traces! just like we do when polling normally
h2o.check_sandbox_for_errors()
patternKeys = []
for j in jobs:
# save the destination keys in progress that match pattern (for returning)
if pattern and pattern in j['destination_key']:
patternKeys.append(j['destination_key'])
return patternKeys
def showAllJobs():
print "Showing all jobs"
a = h2o.nodes[0].jobs_admin(timeoutSecs=10)
print h2o.dump_json(a)
#*******************************************************************************************
def cancelAllJobs(timeoutSecs=10, **kwargs): # I guess you could pass pattern
# what if jobs had just been dispatched? wait until they get in the queue state correctly
time.sleep(2)
a = h2o.nodes[0].jobs_admin(timeoutSecs=120)
print "jobs_admin():", h2o.dump_json(a)
jobsList = a['jobs']
for j in jobsList:
if j['end_time'] == '':
b = h2o.nodes[0].jobs_cancel(key=j['key'])
print "jobs_cancel():", h2o.dump_json(b)
# it's possible we could be in a bad state where jobs don't cancel cleanly
pollWaitJobs(timeoutSecs=timeoutSecs, **kwargs) # wait for all the cancels to happen. If we missed one, we might timeout here.
| apache-2.0 |
luceatnobis/youtube-dl | test/test_cache.py | 177 | 1575 | #!/usr/bin/env python
# coding: utf-8
from __future__ import unicode_literals
import shutil
# Allow direct execution
import os
import sys
import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from test.helper import FakeYDL
from youtube_dl.cache import Cache
def _is_empty(d):
return not bool(os.listdir(d))
def _mkdir(d):
if not os.path.exists(d):
os.mkdir(d)
class TestCache(unittest.TestCase):
def setUp(self):
TEST_DIR = os.path.dirname(os.path.abspath(__file__))
TESTDATA_DIR = os.path.join(TEST_DIR, 'testdata')
_mkdir(TESTDATA_DIR)
self.test_dir = os.path.join(TESTDATA_DIR, 'cache_test')
self.tearDown()
def tearDown(self):
if os.path.exists(self.test_dir):
shutil.rmtree(self.test_dir)
def test_cache(self):
ydl = FakeYDL({
'cachedir': self.test_dir,
})
c = Cache(ydl)
obj = {'x': 1, 'y': ['ä', '\\a', True]}
self.assertEqual(c.load('test_cache', 'k.'), None)
c.store('test_cache', 'k.', obj)
self.assertEqual(c.load('test_cache', 'k2'), None)
self.assertFalse(_is_empty(self.test_dir))
self.assertEqual(c.load('test_cache', 'k.'), obj)
self.assertEqual(c.load('test_cache', 'y'), None)
self.assertEqual(c.load('test_cache2', 'k.'), None)
c.remove()
self.assertFalse(os.path.exists(self.test_dir))
self.assertEqual(c.load('test_cache', 'k.'), None)
if __name__ == '__main__':
unittest.main()
| unlicense |
NickSanzotta/WiFiSuite | wifisuite/arguments.py | 1 | 9054 | import argparse
from argparse import RawTextHelpFormatter
# Colors, Banner and cls
from theme import *
# Removes arg parse default usage Prefix
class HelpFormatter(argparse.HelpFormatter):
def add_usage(self, usage, actions, groups, prefix=None):
if prefix is None:
prefix = ''
return super(HelpFormatter, self).add_usage(
usage, actions, groups, prefix)
def parse_args():
''' CLI Argument Options'''
cls()
# General Help
general_help = colors.blue + ' Modules' + colors.normal + \
'\n' + ' SCAN, ENUM, SPRAY, EVILTWIN, CONNECT, MAC, DATABASE\n' + \
'\n' +colors.blue + ' Interface' + colors.normal + """
[-i, --interface] Defines interface ex: wlan0
"""
# SCAN Help
scan_help = '\n' + colors.blue + ' SCAN' + colors.normal + """
Usage Example:
wifisuite.py -i wlan0 scan
Basic Options:
[--location] Tag your access point scans with a location: --location CoffeeShop]
"""
# EVILTWIN Help
eviltwin_help = '\n'+ colors.blue + ' EVILTWIN' + colors.normal + """
Usage Example: (Self Signed Cert)
wifisuite.py -iwlan0 -s"New Corp WiFi" -m 66:55:44:AB:40:88 -c4 --certname='WiFISuite' --band b eviltwin
Usage Example: (Publicly Signed Cert)
python wifisuite.py -i wlan0 --certname="corp.wifisuite.com" --email "youremail@mail.com" -s "Corporate WiFi" --public eviltwin
Basic Options:
[-s, --ssid] Define EvilTwin's SSID ex: --ssid New Corp WiFi
[-m ] Define EvilTwin's MAC address ex: -m 66:55:44:AB:40:88
[-c, --channel] Define EvilTwin's channel ex: --channel 4
[--certname] Define the TLS certificate's name (Tip: Seen by the end user) ex: WiFiSuite
[--public] Define if EvilTwin's TLS certificate will be a publicly signed cert.
[--band] Define evil access point's WiFi band [A, B, G] ex: --band a
Advanced Options: (Default values assigned)
[--country] Define country listed on the evil access point's TLS certificate ex: --country=US
[--state] Define state listed on the evil access point's TLS certificate ex: --state=NY
[--city] Define city listed on the evil access point's TLS certificate ex: --city=NY
[--company] Define company listed on the evil access point's TLS certificate ex: --company=NY
[--ou] Define organizational unit listed on the evil access point's TLS certificate ex: --ou=IT
[--email] Define email address listed on the evil access point's TLS certificate ex: --email=it@wifisuite.com
[--debug] Displays Hostapd-wpe & CRTB's output to STDOUT ex: --debug
"""
# ENUM Help
enum_help = '\n'+ colors.blue + ' ENUM' + colors.normal + """
Usage Example:
wifisuite.py enum -i wlan0 -c 4 -d 10:da:43:a8:61:e4
Basic Options:
[-c, --channel] Define access point channel for enum mode ex: --channel 11]
[-d, --deauth] Deauthenticate clients for enum mode ex: --deauth 10:10:10:A9:72:E6]
Advanced Options:
[--packets] Define number of deauth packets to send ex: --packets=30]
[--seconds] Define Duration to Sniff Packets ex: --seconds=360]
"""
# SPRAY/CONNECT Help
spray_help = '\n' + colors.blue + ' SPRAY/CONNECT' + colors.normal + """
Usage Example:
wifisuite.py spray -i wlan0 -s FreeWiFi -u users.txt -p Summer2017
Basic Options:
[-s, --ssid] Define SSID ex: --ssid FreeWiFi]
[-u, --user] Define user or user list ex: --user jsmith ex: users.txt
[-p, --password] Define password ex: --password Summer2017
Advanced Options:
[--client_cert] Define client side certificate ex: --client_cert
[--server_cert] Define server side Certificate Authority (CA) ex: --ca_cert /RadiusServer.pem]
"""
# DATABASE Help
database_help = '\n' + colors.blue + ' DATABASE' + colors.normal + """
Basic Options:
[database] Access WS database navigator ex: python wifisuite.py database
[--database] Use non-default database ex: --database=/opt/WiFiSuite/WiFiSuite.bak.db
"""
cheat_sheet = '\n' + colors.blue + ' Cheat Sheet' + colors.normal + """
SCAN: python wifisuite.py -iwlan0 scan --location="CoffeeShop"
EVILTWIN (EAP): python wifisuite.py -iwlan0 -s"Corp WiFi" -m 66:55:44:AB:40:88 -c4 --certname="WiFISuite" --band b eviltwin
ENUM: python wifisuite.py -iwlan0 -d 10:10:10:A9:72:E6 -c4 enum --seconds=30 --packets=5
SPRAY (EAP): python wifisuite.py -iwlan0 -s"Corp WiFi" -u data/users.txt -pWelcome1 spray
SPRAY (WPA): python wifisuite.py -iwlan0 -s"Corp Hotspot" -p data/passwords.txt spray
CONNECT (EAP): python wifisuite.py -iwlan0 -s"Corp WiFi" -ubeamr -pWelcome1 connect
CONNECT (WPA): python wifisuite.py -iwlan0 -s"CompanyXYZ Hotspot" -p Password123 connect
CONNECT (Open): python wifisuite.py -iwlan0 -s"CompanyXYZ Hotspot" connect
MAC (Randomize):python wifisuite.py -iwlan0 mac
MAC (Manual): python wifisuite.py -iwlan0 -m 10:10:10:A9:72:E6 mac
DATABASE: python wifisuite.py database
"""
# Custom Help
Custom_help = general_help + scan_help + eviltwin_help + enum_help + spray_help + database_help + cheat_sheet
# Create Parser
parser = argparse.ArgumentParser(formatter_class=HelpFormatter, description=' '+
str(banner()), usage=Custom_help, add_help=False)
# MODULES
mode_group = parser.add_argument_group(colors.blue + ' Modules' + colors.normal)
mode_group.add_argument('mode', choices=['scan', 'eviltwin', 'enum', 'spray', 'connect', 'mac','database'], type=str.lower,\
metavar='SCAN, EVILTWIN, ENUM, SPRAY, CONNECT, MAC, DATABASE', default='scan', help='')
# INTERFACE
interface_group = parser.add_argument_group(colors.blue + ' Interface' + colors.normal)
interface_group.add_argument('-i','--interface', type=str, metavar='', nargs='?', help='')
# SCAN OPTIONS
scan_group = parser.add_argument_group(colors.blue + ' SCAN' + colors.normal)
scan_group.add_argument('--location', type=str.upper, metavar='', help='')
#EVILTWIN OPTIONS
eviltwin_group = parser.add_argument_group(colors.blue + 'EVILTWIN' + colors.normal)
eviltwin_group.add_argument('--certname', type=str, metavar='Default [WiFiSuite]', default = 'WiFiSuite', help='')
eviltwin_group.add_argument('--public', action='store_true', default = '', help='')
eviltwin_group.add_argument('--band', choices=['A', 'B', 'G'], type=str.upper, default = 'G', metavar='a, b, g, [Default] G', help='')
#EVILTWIN ADV OPTIONS
eviltwin_group.add_argument('--server_cert', type=str, metavar='Default [data/certs/server_cert.pem]', default='data/certs/server_cert.pem', help='')
eviltwin_group.add_argument('--private_key', type=str, metavar='Default [data/certs/private_key.pem]', default='data/certs/private_key.pem', help='')
eviltwin_group.add_argument('--country', type=str, metavar='Default [US]', default='US', help='')
eviltwin_group.add_argument('--state', type=str, metavar='Default [NY]', default = 'NY', help='')
eviltwin_group.add_argument('--city', type=str, metavar='Default [NY]', default = 'NY', help='')
eviltwin_group.add_argument('--company', type=str, metavar='Default [WiFISuite, Inc]', default = 'WiFISuite, Inc', help='')
eviltwin_group.add_argument('--ou', type=str, metavar='Default [IT]', default = 'IT', help='')
eviltwin_group.add_argument('--email', type=str, metavar='Default [support@wifisuite.com]', default = 'supoprt@wifisuite.com', help='')
eviltwin_group.add_argument('--debug', action='store_true', default ='', help='')
# ENUM OPTIONS
enum_group = parser.add_argument_group(colors.blue + ' ENUM' + colors.normal)
enum_group.add_argument('-c','--channel', type=int, metavar='',default=11, help='')
enum_group.add_argument('-d','--deauth', type=str, metavar='', help='')
enum_group.add_argument('--packets', type=int, metavar='', default=30, help='')
enum_group.add_argument('--seconds', type=int, metavar='', default=360, help='')
# SPRAY OPTIONS
spray_group = parser.add_argument_group(spray_help)
spray_group.add_argument('-s','--ssid', type=str, metavar='', default='WiFiSuite', help='')
spray_group.add_argument('-u','--user', type=str, metavar='', help='')
spray_group.add_argument('-p','--password', type=str, metavar='', help='')
spray_group.add_argument('--client_cert', type=str, metavar='', help='')
# spray_group.add_argument('--server_cert', type=str, metavar='', help='')
# MAC OPTIONS
mac_group = parser.add_argument_group(colors.blue + ' MAC' + colors.normal)
mac_group.add_argument('-m','--mac', type=str, metavar='', help='')
# DATABASE OPTIONS
database_group = parser.add_argument_group(colors.blue + ' DATABASE' + colors.normal)
database_group.add_argument('--database', type=str, metavar='', default='data/WiFiSuite.db', help='')
# Create parser instance
args = parser.parse_args()
# Checks for Modules that require -i/--interface option.
if args.mode != 'database' and args.interface is None:
parser.error('\n'+red('!') + args.mode + ' requires -i/--interface')
# Return arg values
return args
| mit |
google-research/group_testing | setup.py | 1 | 1480 | # Copyright 2020 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Setup script for installing group_testing as a pip module."""
import setuptools
VERSION = '1.0.0'
install_requires = [
'absl-py>=0.7.0',
'gin-config>=0.3.0',
'jax>=0.1.67',
'jaxlib>=0.1.47',
'pandas>=1.0.3',
'numpy>=1.16.0',
'scipy>=1.4.1',
'scikit-learn>=0.23.0'
]
description = ('Group Testing. This is the code that allows reproducing '
'the results in the scientific paper '
'https://arxiv.org/pdf/2004.12508.pdf.')
setuptools.setup(
name='group_testing',
version=VERSION,
packages=setuptools.find_packages(),
description=description,
long_description=description,
url='https://github.com/google-research/group_testing',
author='Google LLC',
author_email='opensource@google.com',
install_requires=install_requires,
license='Apache 2.0',
keywords='bayesian group testing monte carlo',
)
| apache-2.0 |
dvliman/jaikuengine | .google_appengine/lib/django-1.3/django/db/backends/oracle/base.py | 150 | 32300 | """
Oracle database backend for Django.
Requires cx_Oracle: http://cx-oracle.sourceforge.net/
"""
import datetime
import sys
import time
from decimal import Decimal
def _setup_environment(environ):
import platform
# Cygwin requires some special voodoo to set the environment variables
# properly so that Oracle will see them.
if platform.system().upper().startswith('CYGWIN'):
try:
import ctypes
except ImportError, e:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("Error loading ctypes: %s; "
"the Oracle backend requires ctypes to "
"operate correctly under Cygwin." % e)
kernel32 = ctypes.CDLL('kernel32')
for name, value in environ:
kernel32.SetEnvironmentVariableA(name, value)
else:
import os
os.environ.update(environ)
_setup_environment([
# Oracle takes client-side character set encoding from the environment.
('NLS_LANG', '.UTF8'),
# This prevents unicode from getting mangled by getting encoded into the
# potentially non-unicode database character set.
('ORA_NCHAR_LITERAL_REPLACE', 'TRUE'),
])
try:
import cx_Oracle as Database
except ImportError, e:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("Error loading cx_Oracle module: %s" % e)
from django.db import utils
from django.db.backends import *
from django.db.backends.signals import connection_created
from django.db.backends.oracle.client import DatabaseClient
from django.db.backends.oracle.creation import DatabaseCreation
from django.db.backends.oracle.introspection import DatabaseIntrospection
from django.utils.encoding import smart_str, force_unicode
DatabaseError = Database.DatabaseError
IntegrityError = Database.IntegrityError
# Check whether cx_Oracle was compiled with the WITH_UNICODE option. This will
# also be True in Python 3.0.
if int(Database.version.split('.', 1)[0]) >= 5 and not hasattr(Database, 'UNICODE'):
convert_unicode = force_unicode
else:
convert_unicode = smart_str
class DatabaseFeatures(BaseDatabaseFeatures):
empty_fetchmany_value = ()
needs_datetime_string_cast = False
interprets_empty_strings_as_nulls = True
uses_savepoints = True
can_return_id_from_insert = True
allow_sliced_subqueries = False
supports_subqueries_in_group_by = False
supports_timezones = False
supports_bitwise_or = False
can_defer_constraint_checks = True
ignores_nulls_in_unique_constraints = False
class DatabaseOperations(BaseDatabaseOperations):
compiler_module = "django.db.backends.oracle.compiler"
def autoinc_sql(self, table, column):
# To simulate auto-incrementing primary keys in Oracle, we have to
# create a sequence and a trigger.
sq_name = get_sequence_name(table)
tr_name = get_trigger_name(table)
tbl_name = self.quote_name(table)
col_name = self.quote_name(column)
sequence_sql = """
DECLARE
i INTEGER;
BEGIN
SELECT COUNT(*) INTO i FROM USER_CATALOG
WHERE TABLE_NAME = '%(sq_name)s' AND TABLE_TYPE = 'SEQUENCE';
IF i = 0 THEN
EXECUTE IMMEDIATE 'CREATE SEQUENCE "%(sq_name)s"';
END IF;
END;
/""" % locals()
trigger_sql = """
CREATE OR REPLACE TRIGGER "%(tr_name)s"
BEFORE INSERT ON %(tbl_name)s
FOR EACH ROW
WHEN (new.%(col_name)s IS NULL)
BEGIN
SELECT "%(sq_name)s".nextval
INTO :new.%(col_name)s FROM dual;
END;
/""" % locals()
return sequence_sql, trigger_sql
def date_extract_sql(self, lookup_type, field_name):
# http://download-east.oracle.com/docs/cd/B10501_01/server.920/a96540/functions42a.htm#1017163
if lookup_type == 'week_day':
# TO_CHAR(field, 'D') returns an integer from 1-7, where 1=Sunday.
return "TO_CHAR(%s, 'D')" % field_name
else:
return "EXTRACT(%s FROM %s)" % (lookup_type, field_name)
def date_interval_sql(self, sql, connector, timedelta):
"""
Implements the interval functionality for expressions
format for Oracle:
(datefield + INTERVAL '3 00:03:20.000000' DAY(1) TO SECOND(6))
"""
minutes, seconds = divmod(timedelta.seconds, 60)
hours, minutes = divmod(minutes, 60)
days = str(timedelta.days)
day_precision = len(days)
fmt = "(%s %s INTERVAL '%s %02d:%02d:%02d.%06d' DAY(%d) TO SECOND(6))"
return fmt % (sql, connector, days, hours, minutes, seconds,
timedelta.microseconds, day_precision)
def date_trunc_sql(self, lookup_type, field_name):
# Oracle uses TRUNC() for both dates and numbers.
# http://download-east.oracle.com/docs/cd/B10501_01/server.920/a96540/functions155a.htm#SQLRF06151
if lookup_type == 'day':
sql = 'TRUNC(%s)' % field_name
else:
sql = "TRUNC(%s, '%s')" % (field_name, lookup_type)
return sql
def convert_values(self, value, field):
if isinstance(value, Database.LOB):
value = value.read()
if field and field.get_internal_type() == 'TextField':
value = force_unicode(value)
# Oracle stores empty strings as null. We need to undo this in
# order to adhere to the Django convention of using the empty
# string instead of null, but only if the field accepts the
# empty string.
if value is None and field and field.empty_strings_allowed:
value = u''
# Convert 1 or 0 to True or False
elif value in (1, 0) and field and field.get_internal_type() in ('BooleanField', 'NullBooleanField'):
value = bool(value)
# Force floats to the correct type
elif value is not None and field and field.get_internal_type() == 'FloatField':
value = float(value)
# Convert floats to decimals
elif value is not None and field and field.get_internal_type() == 'DecimalField':
value = util.typecast_decimal(field.format_number(value))
# cx_Oracle always returns datetime.datetime objects for
# DATE and TIMESTAMP columns, but Django wants to see a
# python datetime.date, .time, or .datetime. We use the type
# of the Field to determine which to cast to, but it's not
# always available.
# As a workaround, we cast to date if all the time-related
# values are 0, or to time if the date is 1/1/1900.
# This could be cleaned a bit by adding a method to the Field
# classes to normalize values from the database (the to_python
# method is used for validation and isn't what we want here).
elif isinstance(value, Database.Timestamp):
# In Python 2.3, the cx_Oracle driver returns its own
# Timestamp object that we must convert to a datetime class.
if not isinstance(value, datetime.datetime):
value = datetime.datetime(value.year, value.month,
value.day, value.hour, value.minute, value.second,
value.fsecond)
if field and field.get_internal_type() == 'DateTimeField':
pass
elif field and field.get_internal_type() == 'DateField':
value = value.date()
elif field and field.get_internal_type() == 'TimeField' or (value.year == 1900 and value.month == value.day == 1):
value = value.time()
elif value.hour == value.minute == value.second == value.microsecond == 0:
value = value.date()
return value
def datetime_cast_sql(self):
return "TO_TIMESTAMP(%s, 'YYYY-MM-DD HH24:MI:SS.FF')"
def deferrable_sql(self):
return " DEFERRABLE INITIALLY DEFERRED"
def drop_sequence_sql(self, table):
return "DROP SEQUENCE %s;" % self.quote_name(get_sequence_name(table))
def fetch_returned_insert_id(self, cursor):
return long(cursor._insert_id_var.getvalue())
def field_cast_sql(self, db_type):
if db_type and db_type.endswith('LOB'):
return "DBMS_LOB.SUBSTR(%s)"
else:
return "%s"
def last_insert_id(self, cursor, table_name, pk_name):
sq_name = get_sequence_name(table_name)
cursor.execute('SELECT "%s".currval FROM dual' % sq_name)
return cursor.fetchone()[0]
def lookup_cast(self, lookup_type):
if lookup_type in ('iexact', 'icontains', 'istartswith', 'iendswith'):
return "UPPER(%s)"
return "%s"
def max_in_list_size(self):
return 1000
def max_name_length(self):
return 30
def prep_for_iexact_query(self, x):
return x
def process_clob(self, value):
if value is None:
return u''
return force_unicode(value.read())
def quote_name(self, name):
# SQL92 requires delimited (quoted) names to be case-sensitive. When
# not quoted, Oracle has case-insensitive behavior for identifiers, but
# always defaults to uppercase.
# We simplify things by making Oracle identifiers always uppercase.
if not name.startswith('"') and not name.endswith('"'):
name = '"%s"' % util.truncate_name(name.upper(),
self.max_name_length())
return name.upper()
def random_function_sql(self):
return "DBMS_RANDOM.RANDOM"
def regex_lookup_9(self, lookup_type):
raise NotImplementedError("Regexes are not supported in Oracle before version 10g.")
def regex_lookup_10(self, lookup_type):
if lookup_type == 'regex':
match_option = "'c'"
else:
match_option = "'i'"
return 'REGEXP_LIKE(%%s, %%s, %s)' % match_option
def regex_lookup(self, lookup_type):
# If regex_lookup is called before it's been initialized, then create
# a cursor to initialize it and recur.
from django.db import connection
connection.cursor()
return connection.ops.regex_lookup(lookup_type)
def return_insert_id(self):
return "RETURNING %s INTO %%s", (InsertIdVar(),)
def savepoint_create_sql(self, sid):
return convert_unicode("SAVEPOINT " + self.quote_name(sid))
def savepoint_rollback_sql(self, sid):
return convert_unicode("ROLLBACK TO SAVEPOINT " + self.quote_name(sid))
def sql_flush(self, style, tables, sequences):
# Return a list of 'TRUNCATE x;', 'TRUNCATE y;',
# 'TRUNCATE z;'... style SQL statements
if tables:
# Oracle does support TRUNCATE, but it seems to get us into
# FK referential trouble, whereas DELETE FROM table works.
sql = ['%s %s %s;' % \
(style.SQL_KEYWORD('DELETE'),
style.SQL_KEYWORD('FROM'),
style.SQL_FIELD(self.quote_name(table)))
for table in tables]
# Since we've just deleted all the rows, running our sequence
# ALTER code will reset the sequence to 0.
for sequence_info in sequences:
sequence_name = get_sequence_name(sequence_info['table'])
table_name = self.quote_name(sequence_info['table'])
column_name = self.quote_name(sequence_info['column'] or 'id')
query = _get_sequence_reset_sql() % {'sequence': sequence_name,
'table': table_name,
'column': column_name}
sql.append(query)
return sql
else:
return []
def sequence_reset_sql(self, style, model_list):
from django.db import models
output = []
query = _get_sequence_reset_sql()
for model in model_list:
for f in model._meta.local_fields:
if isinstance(f, models.AutoField):
table_name = self.quote_name(model._meta.db_table)
sequence_name = get_sequence_name(model._meta.db_table)
column_name = self.quote_name(f.column)
output.append(query % {'sequence': sequence_name,
'table': table_name,
'column': column_name})
# Only one AutoField is allowed per model, so don't
# continue to loop
break
for f in model._meta.many_to_many:
if not f.rel.through:
table_name = self.quote_name(f.m2m_db_table())
sequence_name = get_sequence_name(f.m2m_db_table())
column_name = self.quote_name('id')
output.append(query % {'sequence': sequence_name,
'table': table_name,
'column': column_name})
return output
def start_transaction_sql(self):
return ''
def tablespace_sql(self, tablespace, inline=False):
return "%sTABLESPACE %s" % ((inline and "USING INDEX " or ""),
self.quote_name(tablespace))
def value_to_db_datetime(self, value):
# Oracle doesn't support tz-aware datetimes
if getattr(value, 'tzinfo', None) is not None:
raise ValueError("Oracle backend does not support timezone-aware datetimes.")
return super(DatabaseOperations, self).value_to_db_datetime(value)
def value_to_db_time(self, value):
if value is None:
return None
if isinstance(value, basestring):
return datetime.datetime(*(time.strptime(value, '%H:%M:%S')[:6]))
# Oracle doesn't support tz-aware datetimes
if value.tzinfo is not None:
raise ValueError("Oracle backend does not support timezone-aware datetimes.")
return datetime.datetime(1900, 1, 1, value.hour, value.minute,
value.second, value.microsecond)
def year_lookup_bounds_for_date_field(self, value):
first = '%s-01-01'
second = '%s-12-31'
return [first % value, second % value]
def combine_expression(self, connector, sub_expressions):
"Oracle requires special cases for %% and & operators in query expressions"
if connector == '%%':
return 'MOD(%s)' % ','.join(sub_expressions)
elif connector == '&':
return 'BITAND(%s)' % ','.join(sub_expressions)
elif connector == '|':
raise NotImplementedError("Bit-wise or is not supported in Oracle.")
return super(DatabaseOperations, self).combine_expression(connector, sub_expressions)
class _UninitializedOperatorsDescriptor(object):
def __get__(self, instance, owner):
# If connection.operators is looked up before a connection has been
# created, transparently initialize connection.operators to avert an
# AttributeError.
if instance is None:
raise AttributeError("operators not available as class attribute")
# Creating a cursor will initialize the operators.
instance.cursor().close()
return instance.__dict__['operators']
class DatabaseWrapper(BaseDatabaseWrapper):
vendor = 'oracle'
operators = _UninitializedOperatorsDescriptor()
_standard_operators = {
'exact': '= %s',
'iexact': '= UPPER(%s)',
'contains': "LIKE TRANSLATE(%s USING NCHAR_CS) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
'icontains': "LIKE UPPER(TRANSLATE(%s USING NCHAR_CS)) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': "LIKE TRANSLATE(%s USING NCHAR_CS) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
'endswith': "LIKE TRANSLATE(%s USING NCHAR_CS) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
'istartswith': "LIKE UPPER(TRANSLATE(%s USING NCHAR_CS)) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
'iendswith': "LIKE UPPER(TRANSLATE(%s USING NCHAR_CS)) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
}
_likec_operators = _standard_operators.copy()
_likec_operators.update({
'contains': "LIKEC %s ESCAPE '\\'",
'icontains': "LIKEC UPPER(%s) ESCAPE '\\'",
'startswith': "LIKEC %s ESCAPE '\\'",
'endswith': "LIKEC %s ESCAPE '\\'",
'istartswith': "LIKEC UPPER(%s) ESCAPE '\\'",
'iendswith': "LIKEC UPPER(%s) ESCAPE '\\'",
})
def __init__(self, *args, **kwargs):
super(DatabaseWrapper, self).__init__(*args, **kwargs)
self.oracle_version = None
self.features = DatabaseFeatures(self)
use_returning_into = self.settings_dict["OPTIONS"].get('use_returning_into', True)
self.features.can_return_id_from_insert = use_returning_into
self.ops = DatabaseOperations()
self.client = DatabaseClient(self)
self.creation = DatabaseCreation(self)
self.introspection = DatabaseIntrospection(self)
self.validation = BaseDatabaseValidation(self)
def _valid_connection(self):
return self.connection is not None
def _connect_string(self):
settings_dict = self.settings_dict
if not settings_dict['HOST'].strip():
settings_dict['HOST'] = 'localhost'
if settings_dict['PORT'].strip():
dsn = Database.makedsn(settings_dict['HOST'],
int(settings_dict['PORT']),
settings_dict['NAME'])
else:
dsn = settings_dict['NAME']
return "%s/%s@%s" % (settings_dict['USER'],
settings_dict['PASSWORD'], dsn)
def _cursor(self):
cursor = None
if not self._valid_connection():
conn_string = convert_unicode(self._connect_string())
conn_params = self.settings_dict['OPTIONS'].copy()
if 'use_returning_into' in conn_params:
del conn_params['use_returning_into']
self.connection = Database.connect(conn_string, **conn_params)
cursor = FormatStylePlaceholderCursor(self.connection)
# Set oracle date to ansi date format. This only needs to execute
# once when we create a new connection. We also set the Territory
# to 'AMERICA' which forces Sunday to evaluate to a '1' in TO_CHAR().
cursor.execute("ALTER SESSION SET NLS_DATE_FORMAT = 'YYYY-MM-DD HH24:MI:SS' "
"NLS_TIMESTAMP_FORMAT = 'YYYY-MM-DD HH24:MI:SS.FF' "
"NLS_TERRITORY = 'AMERICA'")
if 'operators' not in self.__dict__:
# Ticket #14149: Check whether our LIKE implementation will
# work for this connection or we need to fall back on LIKEC.
# This check is performed only once per DatabaseWrapper
# instance per thread, since subsequent connections will use
# the same settings.
try:
cursor.execute("SELECT 1 FROM DUAL WHERE DUMMY %s"
% self._standard_operators['contains'],
['X'])
except utils.DatabaseError:
self.operators = self._likec_operators
else:
self.operators = self._standard_operators
try:
self.oracle_version = int(self.connection.version.split('.')[0])
# There's no way for the DatabaseOperations class to know the
# currently active Oracle version, so we do some setups here.
# TODO: Multi-db support will need a better solution (a way to
# communicate the current version).
if self.oracle_version <= 9:
self.ops.regex_lookup = self.ops.regex_lookup_9
else:
self.ops.regex_lookup = self.ops.regex_lookup_10
except ValueError:
pass
try:
self.connection.stmtcachesize = 20
except:
# Django docs specify cx_Oracle version 4.3.1 or higher, but
# stmtcachesize is available only in 4.3.2 and up.
pass
connection_created.send(sender=self.__class__, connection=self)
if not cursor:
cursor = FormatStylePlaceholderCursor(self.connection)
return cursor
# Oracle doesn't support savepoint commits. Ignore them.
def _savepoint_commit(self, sid):
pass
def _commit(self):
if self.connection is not None:
try:
return self.connection.commit()
except Database.IntegrityError, e:
# In case cx_Oracle implements (now or in a future version)
# raising this specific exception
raise utils.IntegrityError, utils.IntegrityError(*tuple(e)), sys.exc_info()[2]
except Database.DatabaseError, e:
# cx_Oracle 5.0.4 raises a cx_Oracle.DatabaseError exception
# with the following attributes and values:
# code = 2091
# message = 'ORA-02091: transaction rolled back
# 'ORA-02291: integrity constraint (TEST_DJANGOTEST.SYS
# _C00102056) violated - parent key not found'
# We convert that particular case to our IntegrityError exception
x = e.args[0]
if hasattr(x, 'code') and hasattr(x, 'message') \
and x.code == 2091 and 'ORA-02291' in x.message:
raise utils.IntegrityError, utils.IntegrityError(*tuple(e)), sys.exc_info()[2]
raise utils.DatabaseError, utils.DatabaseError(*tuple(e)), sys.exc_info()[2]
class OracleParam(object):
"""
Wrapper object for formatting parameters for Oracle. If the string
representation of the value is large enough (greater than 4000 characters)
the input size needs to be set as CLOB. Alternatively, if the parameter
has an `input_size` attribute, then the value of the `input_size` attribute
will be used instead. Otherwise, no input size will be set for the
parameter when executing the query.
"""
def __init__(self, param, cursor, strings_only=False):
if hasattr(param, 'bind_parameter'):
self.smart_str = param.bind_parameter(cursor)
else:
self.smart_str = convert_unicode(param, cursor.charset,
strings_only)
if hasattr(param, 'input_size'):
# If parameter has `input_size` attribute, use that.
self.input_size = param.input_size
elif isinstance(param, basestring) and len(param) > 4000:
# Mark any string param greater than 4000 characters as a CLOB.
self.input_size = Database.CLOB
else:
self.input_size = None
class VariableWrapper(object):
"""
An adapter class for cursor variables that prevents the wrapped object
from being converted into a string when used to instanciate an OracleParam.
This can be used generally for any other object that should be passed into
Cursor.execute as-is.
"""
def __init__(self, var):
self.var = var
def bind_parameter(self, cursor):
return self.var
def __getattr__(self, key):
return getattr(self.var, key)
def __setattr__(self, key, value):
if key == 'var':
self.__dict__[key] = value
else:
setattr(self.var, key, value)
class InsertIdVar(object):
"""
A late-binding cursor variable that can be passed to Cursor.execute
as a parameter, in order to receive the id of the row created by an
insert statement.
"""
def bind_parameter(self, cursor):
param = cursor.cursor.var(Database.NUMBER)
cursor._insert_id_var = param
return param
class FormatStylePlaceholderCursor(object):
"""
Django uses "format" (e.g. '%s') style placeholders, but Oracle uses ":var"
style. This fixes it -- but note that if you want to use a literal "%s" in
a query, you'll need to use "%%s".
We also do automatic conversion between Unicode on the Python side and
UTF-8 -- for talking to Oracle -- in here.
"""
charset = 'utf-8'
def __init__(self, connection):
self.cursor = connection.cursor()
# Necessary to retrieve decimal values without rounding error.
self.cursor.numbersAsStrings = True
# Default arraysize of 1 is highly sub-optimal.
self.cursor.arraysize = 100
def _format_params(self, params):
return tuple([OracleParam(p, self, True) for p in params])
def _guess_input_sizes(self, params_list):
sizes = [None] * len(params_list[0])
for params in params_list:
for i, value in enumerate(params):
if value.input_size:
sizes[i] = value.input_size
self.setinputsizes(*sizes)
def _param_generator(self, params):
return [p.smart_str for p in params]
def execute(self, query, params=None):
if params is None:
params = []
else:
params = self._format_params(params)
args = [(':arg%d' % i) for i in range(len(params))]
# cx_Oracle wants no trailing ';' for SQL statements. For PL/SQL, it
# it does want a trailing ';' but not a trailing '/'. However, these
# characters must be included in the original query in case the query
# is being passed to SQL*Plus.
if query.endswith(';') or query.endswith('/'):
query = query[:-1]
query = convert_unicode(query % tuple(args), self.charset)
self._guess_input_sizes([params])
try:
return self.cursor.execute(query, self._param_generator(params))
except Database.IntegrityError, e:
raise utils.IntegrityError, utils.IntegrityError(*tuple(e)), sys.exc_info()[2]
except Database.DatabaseError, e:
# cx_Oracle <= 4.4.0 wrongly raises a DatabaseError for ORA-01400.
if hasattr(e.args[0], 'code') and e.args[0].code == 1400 and not isinstance(e, IntegrityError):
raise utils.IntegrityError, utils.IntegrityError(*tuple(e)), sys.exc_info()[2]
raise utils.DatabaseError, utils.DatabaseError(*tuple(e)), sys.exc_info()[2]
def executemany(self, query, params=None):
try:
args = [(':arg%d' % i) for i in range(len(params[0]))]
except (IndexError, TypeError):
# No params given, nothing to do
return None
# cx_Oracle wants no trailing ';' for SQL statements. For PL/SQL, it
# it does want a trailing ';' but not a trailing '/'. However, these
# characters must be included in the original query in case the query
# is being passed to SQL*Plus.
if query.endswith(';') or query.endswith('/'):
query = query[:-1]
query = convert_unicode(query % tuple(args), self.charset)
formatted = [self._format_params(i) for i in params]
self._guess_input_sizes(formatted)
try:
return self.cursor.executemany(query,
[self._param_generator(p) for p in formatted])
except Database.IntegrityError, e:
raise utils.IntegrityError, utils.IntegrityError(*tuple(e)), sys.exc_info()[2]
except Database.DatabaseError, e:
# cx_Oracle <= 4.4.0 wrongly raises a DatabaseError for ORA-01400.
if hasattr(e.args[0], 'code') and e.args[0].code == 1400 and not isinstance(e, IntegrityError):
raise utils.IntegrityError, utils.IntegrityError(*tuple(e)), sys.exc_info()[2]
raise utils.DatabaseError, utils.DatabaseError(*tuple(e)), sys.exc_info()[2]
def fetchone(self):
row = self.cursor.fetchone()
if row is None:
return row
return _rowfactory(row, self.cursor)
def fetchmany(self, size=None):
if size is None:
size = self.arraysize
return tuple([_rowfactory(r, self.cursor)
for r in self.cursor.fetchmany(size)])
def fetchall(self):
return tuple([_rowfactory(r, self.cursor)
for r in self.cursor.fetchall()])
def var(self, *args):
return VariableWrapper(self.cursor.var(*args))
def arrayvar(self, *args):
return VariableWrapper(self.cursor.arrayvar(*args))
def __getattr__(self, attr):
if attr in self.__dict__:
return self.__dict__[attr]
else:
return getattr(self.cursor, attr)
def __iter__(self):
return CursorIterator(self.cursor)
class CursorIterator(object):
"""Cursor iterator wrapper that invokes our custom row factory."""
def __init__(self, cursor):
self.cursor = cursor
self.iter = iter(cursor)
def __iter__(self):
return self
def next(self):
return _rowfactory(self.iter.next(), self.cursor)
def _rowfactory(row, cursor):
# Cast numeric values as the appropriate Python type based upon the
# cursor description, and convert strings to unicode.
casted = []
for value, desc in zip(row, cursor.description):
if value is not None and desc[1] is Database.NUMBER:
precision, scale = desc[4:6]
if scale == -127:
if precision == 0:
# NUMBER column: decimal-precision floating point
# This will normally be an integer from a sequence,
# but it could be a decimal value.
if '.' in value:
value = Decimal(value)
else:
value = int(value)
else:
# FLOAT column: binary-precision floating point.
# This comes from FloatField columns.
value = float(value)
elif precision > 0:
# NUMBER(p,s) column: decimal-precision fixed point.
# This comes from IntField and DecimalField columns.
if scale == 0:
value = int(value)
else:
value = Decimal(value)
elif '.' in value:
# No type information. This normally comes from a
# mathematical expression in the SELECT list. Guess int
# or Decimal based on whether it has a decimal point.
value = Decimal(value)
else:
value = int(value)
elif desc[1] in (Database.STRING, Database.FIXED_CHAR,
Database.LONG_STRING):
value = to_unicode(value)
casted.append(value)
return tuple(casted)
def to_unicode(s):
"""
Convert strings to Unicode objects (and return all other data types
unchanged).
"""
if isinstance(s, basestring):
return force_unicode(s)
return s
def _get_sequence_reset_sql():
# TODO: colorize this SQL code with style.SQL_KEYWORD(), etc.
return """
DECLARE
table_value integer;
seq_value integer;
BEGIN
SELECT NVL(MAX(%(column)s), 0) INTO table_value FROM %(table)s;
SELECT NVL(last_number - cache_size, 0) INTO seq_value FROM user_sequences
WHERE sequence_name = '%(sequence)s';
WHILE table_value > seq_value LOOP
SELECT "%(sequence)s".nextval INTO seq_value FROM dual;
END LOOP;
END;
/"""
def get_sequence_name(table):
name_length = DatabaseOperations().max_name_length() - 3
return '%s_SQ' % util.truncate_name(table, name_length).upper()
def get_trigger_name(table):
name_length = DatabaseOperations().max_name_length() - 3
return '%s_TR' % util.truncate_name(table, name_length).upper()
| apache-2.0 |
gauribhoite/personfinder | env/google_appengine/lib/django-1.2/django/contrib/messages/tests/session.py | 413 | 1230 | from django.contrib.messages.tests.base import BaseTest
from django.contrib.messages.storage.session import SessionStorage
def set_session_data(storage, messages):
"""
Sets the messages into the backend request's session and remove the
backend's loaded data cache.
"""
storage.request.session[storage.session_key] = messages
if hasattr(storage, '_loaded_data'):
del storage._loaded_data
def stored_session_messages_count(storage):
data = storage.request.session.get(storage.session_key, [])
return len(data)
class SessionTest(BaseTest):
storage_class = SessionStorage
def get_request(self):
self.session = {}
request = super(SessionTest, self).get_request()
request.session = self.session
return request
def stored_messages_count(self, storage, response):
return stored_session_messages_count(storage)
def test_get(self):
storage = self.storage_class(self.get_request())
# Set initial data.
example_messages = ['test', 'me']
set_session_data(storage, example_messages)
# Test that the message actually contains what we expect.
self.assertEqual(list(storage), example_messages)
| apache-2.0 |
lightbase/WSCServer | wscserver/model/Acao.py | 1 | 2290 | from pyramid_restler.model import SQLAlchemyORMContext
from sqlalchemy import Table
from sqlalchemy.orm import mapper
from sqlalchemy.schema import Column
from sqlalchemy.types import *
from wscserver.model import Base, session
class Acao():
"""
Classe que define a tabela 'acao'
"""
__tablename__ = 'acao'
id_acao = Column(String(30), primary_key=True, nullable=False)
te_descricao_breve = Column(String(100))
te_descricao = Column(String)
te_nome_curto_modulo = Column(String(20))
dt_hr_alteracao = Column(DateTime(timezone=False))
cs_opcional = Column(String(1), nullable=False)
def __init__(self, id_acao, te_descricao_breve, te_descricao,
te_nome_curto_modulo, dt_hr_alteracao, cs_opcional):
"""
Metodo que chama as colunas
"""
self.id_acao = id_acao
self.te_descricao_breve = te_descricao_breve
self.te_descricao = te_descricao
self.te_nome_curto_modulo = te_nome_curto_modulo
self.dt_hr_alteracao = dt_hr_alteracao
self.cs_opcional = cs_opcional
def __repr__(self):
"""
Metodo que passa a lista de parametros da classe
"""
return "<Acao('%s, %s, %s, %s, %s, %s')>" % (self.id_acao,
self.te_descricao_breve,
self.te_descricao,
self.te_nome_curto_modulo,
self.dt_hr_alteracao,
self.cs_opcional
)
class AcaoContextFactory(SQLAlchemyORMContext):
entity = Acao
def session_factory(self):
return session
acao = Table('acao', Base.metadata,
Column('id_acao', String(30), primary_key=True, nullable=False),
Column('te_descricao_breve', String(100)),
Column('te_descricao', String),
Column('te_nome_curto_modulo', String(20)),
Column('dt_hr_alteracao', DateTime(timezone=False)),
Column('cs_opcional', String(1), nullable=False),
extend_existing=True
)
mapper(Acao, acao)
| gpl-2.0 |
FurCode/RoboCop | lib/bs4/builder/_html5lib.py | 14 | 7801 | __all__ = [
'HTML5TreeBuilder',
]
import warnings
from bs4.builder import (
PERMISSIVE,
HTML,
HTML_5,
HTMLTreeBuilder,
)
from bs4.element import NamespacedAttribute
import html5lib
from html5lib.constants import namespaces
from bs4.element import (
Comment,
Doctype,
NavigableString,
Tag,
)
class HTML5TreeBuilder(HTMLTreeBuilder):
"""Use html5lib to build a tree."""
features = ['html5lib', PERMISSIVE, HTML_5, HTML]
def prepare_markup(self, markup, user_specified_encoding):
# Store the user-specified encoding for use later on.
self.user_specified_encoding = user_specified_encoding
return markup, None, None, False
# These methods are defined by Beautiful Soup.
def feed(self, markup):
if self.soup.parse_only is not None:
warnings.warn("You provided a value for parse_only, but the html5lib tree builder doesn't support parse_only. The entire document will be parsed.")
parser = html5lib.HTMLParser(tree=self.create_treebuilder)
doc = parser.parse(markup, encoding=self.user_specified_encoding)
# Set the character encoding detected by the tokenizer.
if isinstance(markup, unicode):
# We need to special-case this because html5lib sets
# charEncoding to UTF-8 if it gets Unicode input.
doc.original_encoding = None
else:
doc.original_encoding = parser.tokenizer.stream.charEncoding[0]
def create_treebuilder(self, namespaceHTMLElements):
self.underlying_builder = TreeBuilderForHtml5lib(
self.soup, namespaceHTMLElements)
return self.underlying_builder
def test_fragment_to_document(self, fragment):
"""See `TreeBuilder`."""
return u'<html><head></head><body>%s</body></html>' % fragment
class TreeBuilderForHtml5lib(html5lib.treebuilders._base.TreeBuilder):
def __init__(self, soup, namespaceHTMLElements):
self.soup = soup
super(TreeBuilderForHtml5lib, self).__init__(namespaceHTMLElements)
def documentClass(self):
self.soup.reset()
return Element(self.soup, self.soup, None)
def insertDoctype(self, token):
name = token["name"]
publicId = token["publicId"]
systemId = token["systemId"]
doctype = Doctype.for_name_and_ids(name, publicId, systemId)
self.soup.object_was_parsed(doctype)
def elementClass(self, name, namespace):
tag = self.soup.new_tag(name, namespace)
return Element(tag, self.soup, namespace)
def commentClass(self, data):
return TextNode(Comment(data), self.soup)
def fragmentClass(self):
self.soup = BeautifulSoup("")
self.soup.name = "[document_fragment]"
return Element(self.soup, self.soup, None)
def appendChild(self, node):
# XXX This code is not covered by the BS4 tests.
self.soup.append(node.element)
def getDocument(self):
return self.soup
def getFragment(self):
return html5lib.treebuilders._base.TreeBuilder.getFragment(self).element
class AttrList(object):
def __init__(self, element):
self.element = element
self.attrs = dict(self.element.attrs)
def __iter__(self):
return list(self.attrs.items()).__iter__()
def __setitem__(self, name, value):
"set attr", name, value
self.element[name] = value
def items(self):
return list(self.attrs.items())
def keys(self):
return list(self.attrs.keys())
def __len__(self):
return len(self.attrs)
def __getitem__(self, name):
return self.attrs[name]
def __contains__(self, name):
return name in list(self.attrs.keys())
class Element(html5lib.treebuilders._base.Node):
def __init__(self, element, soup, namespace):
html5lib.treebuilders._base.Node.__init__(self, element.name)
self.element = element
self.soup = soup
self.namespace = namespace
def appendChild(self, node):
if (node.element.__class__ == NavigableString and self.element.contents
and self.element.contents[-1].__class__ == NavigableString):
# Concatenate new text onto old text node
# XXX This has O(n^2) performance, for input like
# "a</a>a</a>a</a>..."
old_element = self.element.contents[-1]
new_element = self.soup.new_string(old_element + node.element)
old_element.replace_with(new_element)
self.soup._most_recent_element = new_element
else:
self.soup.object_was_parsed(node.element, parent=self.element)
def getAttributes(self):
return AttrList(self.element)
def setAttributes(self, attributes):
if attributes is not None and len(attributes) > 0:
converted_attributes = []
for name, value in list(attributes.items()):
if isinstance(name, tuple):
new_name = NamespacedAttribute(*name)
del attributes[name]
attributes[new_name] = value
self.soup.builder._replace_cdata_list_attribute_values(
self.name, attributes)
for name, value in attributes.items():
self.element[name] = value
# The attributes may contain variables that need substitution.
# Call set_up_substitutions manually.
#
# The Tag constructor called this method when the Tag was created,
# but we just set/changed the attributes, so call it again.
self.soup.builder.set_up_substitutions(self.element)
attributes = property(getAttributes, setAttributes)
def insertText(self, data, insertBefore=None):
text = TextNode(self.soup.new_string(data), self.soup)
if insertBefore:
self.insertBefore(text, insertBefore)
else:
self.appendChild(text)
def insertBefore(self, node, refNode):
index = self.element.index(refNode.element)
if (node.element.__class__ == NavigableString and self.element.contents
and self.element.contents[index-1].__class__ == NavigableString):
# (See comments in appendChild)
old_node = self.element.contents[index-1]
new_str = self.soup.new_string(old_node + node.element)
old_node.replace_with(new_str)
else:
self.element.insert(index, node.element)
node.parent = self
def removeChild(self, node):
node.element.extract()
def reparentChildren(self, newParent):
while self.element.contents:
child = self.element.contents[0]
child.extract()
if isinstance(child, Tag):
newParent.appendChild(
Element(child, self.soup, namespaces["html"]))
else:
newParent.appendChild(
TextNode(child, self.soup))
def cloneNode(self):
tag = self.soup.new_tag(self.element.name, self.namespace)
node = Element(tag, self.soup, self.namespace)
for key,value in self.attributes:
node.attributes[key] = value
return node
def hasContent(self):
return self.element.contents
def getNameTuple(self):
if self.namespace == None:
return namespaces["html"], self.name
else:
return self.namespace, self.name
nameTuple = property(getNameTuple)
class TextNode(Element):
def __init__(self, element, soup):
html5lib.treebuilders._base.Node.__init__(self, None)
self.element = element
self.soup = soup
def cloneNode(self):
raise NotImplementedError
| gpl-3.0 |
DailyActie/Surrogate-Model | 01-codes/scipy-master/scipy/sparse/csc.py | 1 | 6951 | """Compressed Sparse Column matrix format"""
from __future__ import division, print_function, absolute_import
__docformat__ = "restructuredtext en"
__all__ = ['csc_matrix', 'isspmatrix_csc']
import numpy as np
from scipy._lib.six import xrange
from .base import spmatrix
from ._sparsetools import csc_tocsr
from . import _sparsetools
from .sputils import upcast, isintlike, IndexMixin, get_index_dtype
from .compressed import _cs_matrix
class csc_matrix(_cs_matrix, IndexMixin):
"""
Compressed Sparse Column matrix
This can be instantiated in several ways:
csc_matrix(D)
with a dense matrix or rank-2 ndarray D
csc_matrix(S)
with another sparse matrix S (equivalent to S.tocsc())
csc_matrix((M, N), [dtype])
to construct an empty matrix with shape (M, N)
dtype is optional, defaulting to dtype='d'.
csc_matrix((data, (row_ind, col_ind)), [shape=(M, N)])
where ``data``, ``row_ind`` and ``col_ind`` satisfy the
relationship ``a[row_ind[k], col_ind[k]] = data[k]``.
csc_matrix((data, indices, indptr), [shape=(M, N)])
is the standard CSC representation where the row indices for
column i are stored in ``indices[indptr[i]:indptr[i+1]]``
and their corresponding values are stored in
``data[indptr[i]:indptr[i+1]]``. If the shape parameter is
not supplied, the matrix dimensions are inferred from
the index arrays.
Attributes
----------
dtype : dtype
Data type of the matrix
shape : 2-tuple
Shape of the matrix
ndim : int
Number of dimensions (this is always 2)
nnz
Number of nonzero elements
data
Data array of the matrix
indices
CSC format index array
indptr
CSC format index pointer array
has_sorted_indices
Whether indices are sorted
Notes
-----
Sparse matrices can be used in arithmetic operations: they support
addition, subtraction, multiplication, division, and matrix power.
Advantages of the CSC format
- efficient arithmetic operations CSC + CSC, CSC * CSC, etc.
- efficient column slicing
- fast matrix vector products (CSR, BSR may be faster)
Disadvantages of the CSC format
- slow row slicing operations (consider CSR)
- changes to the sparsity structure are expensive (consider LIL or DOK)
Examples
--------
>>> import numpy as np
>>> from scipy.sparse import csc_matrix
>>> csc_matrix((3, 4), dtype=np.int8).toarray()
array([[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]], dtype=int8)
>>> row = np.array([0, 2, 2, 0, 1, 2])
>>> col = np.array([0, 0, 1, 2, 2, 2])
>>> data = np.array([1, 2, 3, 4, 5, 6])
>>> csc_matrix((data, (row, col)), shape=(3, 3)).toarray()
array([[1, 0, 4],
[0, 0, 5],
[2, 3, 6]])
>>> indptr = np.array([0, 2, 3, 6])
>>> indices = np.array([0, 2, 2, 0, 1, 2])
>>> data = np.array([1, 2, 3, 4, 5, 6])
>>> csc_matrix((data, indices, indptr), shape=(3, 3)).toarray()
array([[1, 0, 4],
[0, 0, 5],
[2, 3, 6]])
"""
format = 'csc'
def transpose(self, axes=None, copy=False):
if axes is not None:
raise ValueError(("Sparse matrices do not support "
"an 'axes' parameter because swapping "
"dimensions is the only logical permutation."))
M, N = self.shape
from .csr import csr_matrix
return csr_matrix((self.data, self.indices,
self.indptr), (N, M), copy=copy)
transpose.__doc__ = spmatrix.transpose.__doc__
def __iter__(self):
csr = self.tocsr()
for r in xrange(self.shape[0]):
yield csr[r, :]
def tocsc(self, copy=False):
if copy:
return self.copy()
else:
return self
tocsc.__doc__ = spmatrix.tocsc.__doc__
def tocsr(self, copy=False):
M, N = self.shape
idx_dtype = get_index_dtype((self.indptr, self.indices),
maxval=max(self.nnz, N))
indptr = np.empty(M + 1, dtype=idx_dtype)
indices = np.empty(self.nnz, dtype=idx_dtype)
data = np.empty(self.nnz, dtype=upcast(self.dtype))
csc_tocsr(M, N,
self.indptr.astype(idx_dtype),
self.indices.astype(idx_dtype),
self.data,
indptr,
indices,
data)
from .csr import csr_matrix
A = csr_matrix((data, indices, indptr), shape=self.shape)
A.has_sorted_indices = True
return A
tocsr.__doc__ = spmatrix.tocsr.__doc__
def __getitem__(self, key):
# Use CSR to implement fancy indexing.
row, col = self._unpack_index(key)
# Things that return submatrices. row or col is a int or slice.
if (isinstance(row, slice) or isinstance(col, slice) or
isintlike(row) or isintlike(col)):
return self.T[col, row].T
# Things that return a sequence of values.
else:
return self.T[col, row]
def nonzero(self):
# CSC can't use _cs_matrix's .nonzero method because it
# returns the indices sorted for self transposed.
# Get row and col indices, from _cs_matrix.tocoo
major_dim, minor_dim = self._swap(self.shape)
minor_indices = self.indices
major_indices = np.empty(len(minor_indices), dtype=self.indices.dtype)
_sparsetools.expandptr(major_dim, self.indptr, major_indices)
row, col = self._swap((major_indices, minor_indices))
# Remove explicit zeros
nz_mask = self.data != 0
row = row[nz_mask]
col = col[nz_mask]
# Sort them to be in C-style order
ind = np.argsort(row, kind='mergesort')
row = row[ind]
col = col[ind]
return row, col
nonzero.__doc__ = _cs_matrix.nonzero.__doc__
def getrow(self, i):
"""Returns a copy of row i of the matrix, as a (1 x n)
CSR matrix (row vector).
"""
# we convert to CSR to maintain compatibility with old impl.
# in spmatrix.getrow()
return self._get_submatrix(i, slice(None)).tocsr()
def getcol(self, i):
"""Returns a copy of column i of the matrix, as a (m x 1)
CSC matrix (column vector).
"""
return self._get_submatrix(slice(None), i)
# these functions are used by the parent class (_cs_matrix)
# to remove redudancy between csc_matrix and csr_matrix
def _swap(self, x):
"""swap the members of x if this is a column-oriented matrix
"""
return (x[1], x[0])
def isspmatrix_csc(x):
return isinstance(x, csc_matrix)
| mit |
yyt030/flasky-zhihu | app/models.py | 2 | 14708 | from datetime import datetime
import hashlib
from werkzeug.security import generate_password_hash, check_password_hash
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
from markdown import markdown
import bleach
from flask import current_app, request, url_for
from flask.ext.login import UserMixin, AnonymousUserMixin
from app.exceptions import ValidationError
from . import db, login_manager
from random import randint
class Permission:
FOLLOW = 0x01
COMMENT = 0x02
WRITE_ARTICLES = 0x04
MODERATE_COMMENTS = 0x08
ADMINISTER = 0x80
class Role(db.Model):
__tablename__ = 'roles'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64), unique=True)
default = db.Column(db.Boolean, default=False, index=True)
permissions = db.Column(db.Integer)
users = db.relationship('User', backref='role', lazy='dynamic')
@staticmethod
def insert_roles():
roles = {
'User': (Permission.FOLLOW |
Permission.COMMENT |
Permission.WRITE_ARTICLES, True),
'Moderator': (Permission.FOLLOW |
Permission.COMMENT |
Permission.WRITE_ARTICLES |
Permission.MODERATE_COMMENTS, False),
'Administrator': (0xff, False)
}
for r in roles:
role = Role.query.filter_by(name=r).first()
if role is None:
role = Role(name=r)
role.permissions = roles[r][0]
role.default = roles[r][1]
db.session.add(role)
db.session.commit()
def __repr__(self):
return '<Role %r>' % self.name
class Follow(db.Model):
__tablename__ = 'follows'
follower_id = db.Column(db.Integer, db.ForeignKey('users.id'),
primary_key=True)
followed_id = db.Column(db.Integer, db.ForeignKey('users.id'),
primary_key=True)
timestamp = db.Column(db.DateTime, default=datetime.utcnow)
class User(UserMixin, db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(64), unique=True, index=True)
username = db.Column(db.String(64), unique=True, index=True)
role_id = db.Column(db.Integer, db.ForeignKey('roles.id'))
password_hash = db.Column(db.String(128))
confirmed = db.Column(db.Boolean, default=False)
name = db.Column(db.String(64))
location = db.Column(db.String(64))
about_me = db.Column(db.Text())
member_since = db.Column(db.DateTime(), default=datetime.utcnow)
last_seen = db.Column(db.DateTime(), default=datetime.utcnow)
avatar_hash = db.Column(db.String(32))
posts = db.relationship('Post', backref='author', lazy='dynamic')
followed = db.relationship('Follow',
foreign_keys=[Follow.follower_id],
backref=db.backref('follower', lazy='joined'),
lazy='dynamic',
cascade='all, delete-orphan')
followers = db.relationship('Follow',
foreign_keys=[Follow.followed_id],
backref=db.backref('followed', lazy='joined'),
lazy='dynamic',
cascade='all, delete-orphan')
comments = db.relationship('Comment', backref='author', lazy='dynamic')
@staticmethod
def generate_fake(count=100):
from sqlalchemy.exc import IntegrityError
from random import seed
import forgery_py
seed()
for i in range(count):
u = User(email=forgery_py.internet.email_address(),
username=forgery_py.internet.user_name(True),
password=forgery_py.lorem_ipsum.word(),
confirmed=True,
name=forgery_py.name.full_name(),
location=forgery_py.address.city(),
about_me=forgery_py.lorem_ipsum.sentence(),
member_since=forgery_py.date.date(True))
db.session.add(u)
try:
db.session.commit()
except IntegrityError:
db.session.rollback()
@staticmethod
def add_self_follows():
for user in User.query.all():
if not user.is_following(user):
user.follow(user)
db.session.add(user)
db.session.commit()
def __init__(self, **kwargs):
super(User, self).__init__(**kwargs)
if self.role is None:
if self.email == current_app.config['FLASKY_ADMIN']:
self.role = Role.query.filter_by(permissions=0xff).first()
if self.role is None:
self.role = Role.query.filter_by(default=True).first()
if self.email is not None and self.avatar_hash is None:
self.avatar_hash = hashlib.md5(
self.email.encode('utf-8')).hexdigest()
self.followed.append(Follow(followed=self))
@property
def password(self):
raise AttributeError('password is not a readable attribute')
@password.setter
def password(self, password):
self.password_hash = generate_password_hash(password)
def verify_password(self, password):
return check_password_hash(self.password_hash, password)
def generate_confirmation_token(self, expiration=3600):
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'confirm': self.id})
def confirm(self, token):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return False
if data.get('confirm') != self.id:
return False
self.confirmed = True
db.session.add(self)
return True
def generate_reset_token(self, expiration=3600):
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'reset': self.id})
def reset_password(self, token, new_password):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return False
if data.get('reset') != self.id:
return False
self.password = new_password
db.session.add(self)
return True
def generate_email_change_token(self, new_email, expiration=3600):
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'change_email': self.id, 'new_email': new_email})
def change_email(self, token):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return False
if data.get('change_email') != self.id:
return False
new_email = data.get('new_email')
if new_email is None:
return False
if self.query.filter_by(email=new_email).first() is not None:
return False
self.email = new_email
self.avatar_hash = hashlib.md5(
self.email.encode('utf-8')).hexdigest()
db.session.add(self)
return True
def can(self, permissions):
return self.role is not None and \
(self.role.permissions & permissions) == permissions
def is_administrator(self):
return self.can(Permission.ADMINISTER)
def ping(self):
self.last_seen = datetime.utcnow()
db.session.add(self)
def gravatar(self, size=100, default='identicon', rating='g'):
# if request.is_secure:
# url = 'http://cn.gravatar.com/avatar'
# else:
# url = 'http://cn.gravatar.com/avatar'
# hash = self.avatar_hash or hashlib.md5(
# self.email.encode('utf-8')).hexdigest()
# return '{url}/{hash}?s={size}&d={default}&r={rating}'.format(
# url=url, hash=hash, size=size, default=default, rating=rating)
n = randint(0, 9)
if size >= 100:
return url_for('static', filename='pic/%s.png' % n)
elif size == 16:
return url_for('static', filename='pic/000.png')
else:
return url_for('static', filename='pic/%s%s.png' % (n, n))
def follow(self, user):
if not self.is_following(user):
f = Follow(follower=self, followed=user)
db.session.add(f)
def unfollow(self, user):
f = self.followed.filter_by(followed_id=user.id).first()
if f:
db.session.delete(f)
def is_following(self, user):
return self.followed.filter_by(
followed_id=user.id).first() is not None
def is_followed_by(self, user):
return self.followers.filter_by(
follower_id=user.id).first() is not None
@property
def followed_posts(self):
return Post.query.join(Follow, Follow.followed_id == Post.author_id) \
.filter(Follow.follower_id == self.id)
def to_json(self):
json_user = {
'url': url_for('api.get_post', id=self.id, _external=True),
'username': self.username,
'member_since': self.member_since,
'last_seen': self.last_seen,
'posts': url_for('api.get_user_posts', id=self.id, _external=True),
'followed_posts': url_for('api.get_user_followed_posts',
id=self.id, _external=True),
'post_count': self.posts.count()
}
return json_user
def generate_auth_token(self, expiration):
s = Serializer(current_app.config['SECRET_KEY'],
expires_in=expiration)
return s.dumps({'id': self.id}).decode('ascii')
@staticmethod
def verify_auth_token(token):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return None
return User.query.get(data['id'])
def __repr__(self):
return '<User %r>' % self.username
class AnonymousUser(AnonymousUserMixin):
def can(self, permissions):
return False
def is_administrator(self):
return False
login_manager.anonymous_user = AnonymousUser
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
class Post(db.Model):
__tablename__ = 'posts'
id = db.Column(db.Integer, primary_key=True)
body = db.Column(db.Text)
body_html = db.Column(db.Text)
timestamp = db.Column(db.DateTime, index=True, default=datetime.utcnow)
views = db.Column(db.Integer)
author_id = db.Column(db.Integer, db.ForeignKey('users.id'))
comments = db.relationship('Comment', backref='post', lazy='dynamic')
topicmapping = db.relationship('TopicMapping', backref='posts', lazy='dynamic')
@staticmethod
def generate_fake(count=100):
from random import seed, randint
import forgery_py
seed()
user_count = User.query.count()
for i in range(count):
u = User.query.offset(randint(0, user_count - 1)).first()
p = Post(body=forgery_py.lorem_ipsum.sentences(randint(1, 5)),
timestamp=forgery_py.date.date(True),
author=u)
db.session.add(p)
db.session.commit()
@staticmethod
def on_changed_body(target, value, oldvalue, initiator):
allowed_tags = ['a', 'abbr', 'acronym', 'b', 'blockquote', 'code',
'em', 'i', 'li', 'ol', 'pre', 'strong', 'ul',
'h1', 'h2', 'h3', 'p']
target.body_html = bleach.linkify(bleach.clean(
markdown(value, output_format='html'),
tags=allowed_tags, strip=True))
def to_json(self):
json_post = {
'url': url_for('api.get_post', id=self.id, _external=True),
'body': self.body,
'body_html': self.body_html,
'timestamp': self.timestamp,
'author': url_for('api.get_user', id=self.author_id,
_external=True),
'comments': url_for('api.get_post_comments', id=self.id,
_external=True),
'comment_count': self.comments.count()
}
return json_post
@staticmethod
def from_json(json_post):
body = json_post.get('body')
if body is None or body == '':
raise ValidationError('post does not have a body')
return Post(body=body)
db.event.listen(Post.body, 'set', Post.on_changed_body)
class Comment(db.Model):
__tablename__ = 'comments'
id = db.Column(db.Integer, primary_key=True)
body = db.Column(db.Text)
body_html = db.Column(db.Text)
timestamp = db.Column(db.DateTime, index=True, default=datetime.utcnow)
disabled = db.Column(db.Boolean)
author_id = db.Column(db.Integer, db.ForeignKey('users.id'))
post_id = db.Column(db.Integer, db.ForeignKey('posts.id'))
@staticmethod
def on_changed_body(target, value, oldvalue, initiator):
allowed_tags = ['a', 'abbr', 'acronym', 'b', 'code', 'em', 'i',
'strong']
target.body_html = bleach.linkify(bleach.clean(
markdown(value, output_format='html'),
tags=allowed_tags, strip=True))
def to_json(self):
json_comment = {
'url': url_for('api.get_comment', id=self.id, _external=True),
'post': url_for('api.get_post', id=self.post_id, _external=True),
'body': self.body,
'body_html': self.body_html,
'timestamp': self.timestamp,
'author': url_for('api.get_user', id=self.author_id,
_external=True),
}
return json_comment
@staticmethod
def from_json(json_comment):
body = json_comment.get('body')
if body is None or body == '':
raise ValidationError('comment does not have a body')
return Comment(body=body)
db.event.listen(Comment.body, 'set', Comment.on_changed_body)
class Topic(db.Model):
__tablename__ = 'topics'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String)
add_time = db.Column(db.Integer)
class TopicMapping(db.Model):
__tablename__ = 'topic_mapping'
id = db.Column(db.Integer, primary_key=True)
post_id = db.Column(db.Integer, db.ForeignKey('posts.id'))
topic_id = db.Column(db.Integer, db.ForeignKey('topics.id'))
topics = db.relationship('Topic', backref='topicmapping', uselist=False)
| mit |
Vvucinic/Wander | venv_2_7/lib/python2.7/site-packages/Django-1.9-py2.7.egg/django/views/decorators/cache.py | 586 | 2304 | from functools import wraps
from django.middleware.cache import CacheMiddleware
from django.utils.cache import add_never_cache_headers, patch_cache_control
from django.utils.decorators import (
available_attrs, decorator_from_middleware_with_args,
)
def cache_page(*args, **kwargs):
"""
Decorator for views that tries getting the page from the cache and
populates the cache if the page isn't in the cache yet.
The cache is keyed by the URL and some data from the headers.
Additionally there is the key prefix that is used to distinguish different
cache areas in a multi-site setup. You could use the
get_current_site().domain, for example, as that is unique across a Django
project.
Additionally, all headers from the response's Vary header will be taken
into account on caching -- just like the middleware does.
"""
# We also add some asserts to give better error messages in case people are
# using other ways to call cache_page that no longer work.
if len(args) != 1 or callable(args[0]):
raise TypeError("cache_page has a single mandatory positional argument: timeout")
cache_timeout = args[0]
cache_alias = kwargs.pop('cache', None)
key_prefix = kwargs.pop('key_prefix', None)
if kwargs:
raise TypeError("cache_page has two optional keyword arguments: cache and key_prefix")
return decorator_from_middleware_with_args(CacheMiddleware)(
cache_timeout=cache_timeout, cache_alias=cache_alias, key_prefix=key_prefix
)
def cache_control(**kwargs):
def _cache_controller(viewfunc):
@wraps(viewfunc, assigned=available_attrs(viewfunc))
def _cache_controlled(request, *args, **kw):
response = viewfunc(request, *args, **kw)
patch_cache_control(response, **kwargs)
return response
return _cache_controlled
return _cache_controller
def never_cache(view_func):
"""
Decorator that adds headers to a response so that it will
never be cached.
"""
@wraps(view_func, assigned=available_attrs(view_func))
def _wrapped_view_func(request, *args, **kwargs):
response = view_func(request, *args, **kwargs)
add_never_cache_headers(response)
return response
return _wrapped_view_func
| artistic-2.0 |
romain-li/edx-platform | cms/djangoapps/contentstore/views/tests/test_header_menu.py | 24 | 1802 | #-*- coding: utf-8 -*-
"""
Course Header Menu Tests.
"""
from django.conf import settings
from django.test.utils import override_settings
from contentstore.tests.utils import CourseTestCase
from contentstore.utils import reverse_course_url
from util.testing import UrlResetMixin
FEATURES_WITH_CERTS_ENABLED = settings.FEATURES.copy()
FEATURES_WITH_CERTS_ENABLED['CERTIFICATES_HTML_VIEW'] = True
@override_settings(FEATURES=FEATURES_WITH_CERTS_ENABLED)
class TestHeaderMenu(CourseTestCase, UrlResetMixin):
"""
Unit tests for the course header menu.
"""
def setUp(self):
"""
Set up the for the course header menu tests.
"""
super(TestHeaderMenu, self).setUp()
self.reset_urls()
def test_header_menu_without_web_certs_enabled(self):
"""
Tests course header menu should not have `Certificates` menu item
if course has not web/HTML certificates enabled.
"""
outline_url = reverse_course_url('course_handler', self.course.id)
resp = self.client.get(outline_url, HTTP_ACCEPT='text/html')
self.assertEqual(resp.status_code, 200)
self.assertNotContains(resp, '<li class="nav-item nav-course-settings-certificates">')
def test_header_menu_with_web_certs_enabled(self):
"""
Tests course header menu should have `Certificates` menu item
if course has web/HTML certificates enabled.
"""
self.course.cert_html_view_enabled = True
self.save_course()
outline_url = reverse_course_url('course_handler', self.course.id)
resp = self.client.get(outline_url, HTTP_ACCEPT='text/html')
self.assertEqual(resp.status_code, 200)
self.assertContains(resp, '<li class="nav-item nav-course-settings-certificates">')
| agpl-3.0 |
tangfeixiong/nova | nova/tests/unit/api/ec2/test_ec2utils.py | 84 | 2549 | # Copyright 2014 - Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.api.ec2 import ec2utils
from nova import context
from nova import objects
from nova import test
class EC2UtilsTestCase(test.TestCase):
def setUp(self):
self.ctxt = context.get_admin_context()
ec2utils.reset_cache()
super(EC2UtilsTestCase, self).setUp()
def test_get_int_id_from_snapshot_uuid(self):
smap = objects.EC2SnapshotMapping(self.ctxt, uuid='fake-uuid')
smap.create()
smap_id = ec2utils.get_int_id_from_snapshot_uuid(self.ctxt,
'fake-uuid')
self.assertEqual(smap.id, smap_id)
def test_get_int_id_from_snapshot_uuid_creates_mapping(self):
smap_id = ec2utils.get_int_id_from_snapshot_uuid(self.ctxt,
'fake-uuid')
smap = objects.EC2SnapshotMapping.get_by_id(self.ctxt, smap_id)
self.assertEqual('fake-uuid', smap.uuid)
def test_get_snapshot_uuid_from_int_id(self):
smap = objects.EC2SnapshotMapping(self.ctxt, uuid='fake-uuid')
smap.create()
smap_uuid = ec2utils.get_snapshot_uuid_from_int_id(self.ctxt, smap.id)
self.assertEqual(smap.uuid, smap_uuid)
def test_id_to_glance_id(self):
s3imap = objects.S3ImageMapping(self.ctxt, uuid='fake-uuid')
s3imap.create()
uuid = ec2utils.id_to_glance_id(self.ctxt, s3imap.id)
self.assertEqual(uuid, s3imap.uuid)
def test_glance_id_to_id(self):
s3imap = objects.S3ImageMapping(self.ctxt, uuid='fake-uuid')
s3imap.create()
s3imap_id = ec2utils.glance_id_to_id(self.ctxt, s3imap.uuid)
self.assertEqual(s3imap_id, s3imap.id)
def test_glance_id_to_id_creates_mapping(self):
s3imap_id = ec2utils.glance_id_to_id(self.ctxt, 'fake-uuid')
s3imap = objects.S3ImageMapping.get_by_id(self.ctxt, s3imap_id)
self.assertEqual('fake-uuid', s3imap.uuid)
| apache-2.0 |
mohamed--abdel-maksoud/chromium.src | third_party/python_gflags/setup.py | 376 | 1991 | #!/usr/bin/env python
# Copyright (c) 2007, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from setuptools import setup
setup(name='python-gflags',
version='2.0',
description='Google Commandline Flags Module',
license='BSD',
author='Google Inc. and others',
author_email='google-gflags@googlegroups.com',
url='http://code.google.com/p/python-gflags',
py_modules=["gflags", "gflags_validators"],
data_files=[("bin", ["gflags2man.py"])],
include_package_data=True,
)
| bsd-3-clause |
stivosaurus/rpi-snippets | reference_scripts/buttons_pygame.py | 1 | 1815 | import pygame, sys
from pygame.locals import *
TIMER = 30
SCREEN_X = 200
SCREEN_Y = 200
screen = pygame.display.set_mode((SCREEN_X, SCREEN_Y))
clock = pygame.time.Clock() #tick-tock
ending = button1 = button2 = False
corner1 = (28,18) #Top Left corner of button 1
corner2 = (56,18) #Top Left corner of button 2
image_length = 100 #length of the buttons
image_height = 100 #height of the buttons
counter = 0
#Main Loop:
while ending==False:
counter+=1
clock.tick(TIMER)
for event in pygame.event.get():
if event.type == KEYDOWN:
if event.key == K_ESCAPE:
ending=True # Time to leave
print("Game Stopped Early by user")
elif event.type == MOUSEBUTTONDOWN:
if event.button == 1:
mouse_x, mouse_y = event.pos
if (mouse_x >= corner1[0]) and (mouse_x <= corner1[0]+image_length) and (mouse_y >= corner1[1]) and (mouse_y <= corner1[1]+image_height):
print ("Button one is selected")
button1=True
button2=False
elif (mouse_x >= corner2[0]) and (mouse_x <= corner2[0]+image_length) and (mouse_y >= corner2[1]) and (mouse_y <= corner2[1]+image_height):
print ("Button two is selected")
button1=False
button2=True
else:
print ("That's not a button")
button1=False
button2=False
if counter == TIMER: #prints the statements once a second
counter=0
if button1==True:
print ("Button one is currently selected")
elif button2==True:
print ("Button two is currently selected")
else:
print ("No buttons currently selected")
| unlicense |
gdi2290/django | tests/generic_views/views.py | 35 | 7535 | from __future__ import unicode_literals
from django.contrib.auth.decorators import login_required
from django.core.paginator import Paginator
from django.core.urlresolvers import reverse, reverse_lazy
from django.utils.decorators import method_decorator
from django.views import generic
from .test_forms import AuthorForm, ContactForm
from .models import Artist, Author, Book, Page, BookSigning
class CustomTemplateView(generic.TemplateView):
template_name = 'generic_views/about.html'
def get_context_data(self, **kwargs):
context = super(CustomTemplateView, self).get_context_data(**kwargs)
context.update({'key': 'value'})
return context
class ObjectDetail(generic.DetailView):
template_name = 'generic_views/detail.html'
def get_object(self):
return {'foo': 'bar'}
class ArtistDetail(generic.DetailView):
queryset = Artist.objects.all()
class AuthorDetail(generic.DetailView):
queryset = Author.objects.all()
class PageDetail(generic.DetailView):
queryset = Page.objects.all()
template_name_field = 'template'
class DictList(generic.ListView):
"""A ListView that doesn't use a model."""
queryset = [
{'first': 'John', 'last': 'Lennon'},
{'first': 'Yoko', 'last': 'Ono'}
]
template_name = 'generic_views/list.html'
class ArtistList(generic.ListView):
template_name = 'generic_views/list.html'
queryset = Artist.objects.all()
class AuthorList(generic.ListView):
queryset = Author.objects.all()
class BookList(generic.ListView):
model = Book
class CustomPaginator(Paginator):
def __init__(self, queryset, page_size, orphans=0, allow_empty_first_page=True):
super(CustomPaginator, self).__init__(
queryset,
page_size,
orphans=2,
allow_empty_first_page=allow_empty_first_page)
class AuthorListCustomPaginator(AuthorList):
paginate_by = 5
def get_paginator(self, queryset, page_size, orphans=0, allow_empty_first_page=True):
return super(AuthorListCustomPaginator, self).get_paginator(
queryset,
page_size,
orphans=2,
allow_empty_first_page=allow_empty_first_page)
class ContactView(generic.FormView):
form_class = ContactForm
success_url = reverse_lazy('authors_list')
template_name = 'generic_views/form.html'
class ArtistCreate(generic.CreateView):
model = Artist
fields = '__all__'
class NaiveAuthorCreate(generic.CreateView):
queryset = Author.objects.all()
fields = '__all__'
class TemplateResponseWithoutTemplate(generic.detail.SingleObjectTemplateResponseMixin, generic.View):
# we don't define the usual template_name here
def __init__(self):
# Dummy object, but attr is required by get_template_name()
self.object = None
class AuthorCreate(generic.CreateView):
model = Author
success_url = '/list/authors/'
fields = '__all__'
class SpecializedAuthorCreate(generic.CreateView):
model = Author
form_class = AuthorForm
template_name = 'generic_views/form.html'
context_object_name = 'thingy'
def get_success_url(self):
return reverse('author_detail', args=[self.object.id])
class AuthorCreateRestricted(AuthorCreate):
post = method_decorator(login_required)(AuthorCreate.post)
class ArtistUpdate(generic.UpdateView):
model = Artist
fields = '__all__'
class NaiveAuthorUpdate(generic.UpdateView):
queryset = Author.objects.all()
fields = '__all__'
class AuthorUpdate(generic.UpdateView):
model = Author
success_url = '/list/authors/'
fields = '__all__'
class OneAuthorUpdate(generic.UpdateView):
success_url = '/list/authors/'
fields = '__all__'
def get_object(self):
return Author.objects.get(pk=1)
class SpecializedAuthorUpdate(generic.UpdateView):
model = Author
form_class = AuthorForm
template_name = 'generic_views/form.html'
context_object_name = 'thingy'
def get_success_url(self):
return reverse('author_detail', args=[self.object.id])
class NaiveAuthorDelete(generic.DeleteView):
queryset = Author.objects.all()
class AuthorDelete(generic.DeleteView):
model = Author
success_url = '/list/authors/'
class SpecializedAuthorDelete(generic.DeleteView):
queryset = Author.objects.all()
template_name = 'generic_views/confirm_delete.html'
context_object_name = 'thingy'
def get_success_url(self):
return reverse('authors_list')
class BookConfig(object):
queryset = Book.objects.all()
date_field = 'pubdate'
class BookArchive(BookConfig, generic.ArchiveIndexView):
pass
class BookYearArchive(BookConfig, generic.YearArchiveView):
pass
class BookMonthArchive(BookConfig, generic.MonthArchiveView):
pass
class BookWeekArchive(BookConfig, generic.WeekArchiveView):
pass
class BookDayArchive(BookConfig, generic.DayArchiveView):
pass
class BookTodayArchive(BookConfig, generic.TodayArchiveView):
pass
class BookDetail(BookConfig, generic.DateDetailView):
pass
class AuthorGetQuerySetFormView(generic.edit.ModelFormMixin):
fields = '__all__'
def get_queryset(self):
return Author.objects.all()
class BookDetailGetObjectCustomQueryset(BookDetail):
def get_object(self, queryset=None):
return super(BookDetailGetObjectCustomQueryset, self).get_object(
queryset=Book.objects.filter(pk=2))
class CustomMultipleObjectMixinView(generic.list.MultipleObjectMixin, generic.View):
queryset = [
{'name': 'John'},
{'name': 'Yoko'},
]
def get(self, request):
self.object_list = self.get_queryset()
class CustomContextView(generic.detail.SingleObjectMixin, generic.View):
model = Book
object = Book(name='dummy')
def get_object(self):
return Book(name="dummy")
def get_context_data(self, **kwargs):
context = {'custom_key': 'custom_value'}
context.update(kwargs)
return super(CustomContextView, self).get_context_data(**context)
def get_context_object_name(self, obj):
return "test_name"
class CustomSingleObjectView(generic.detail.SingleObjectMixin, generic.View):
model = Book
object = Book(name="dummy")
class BookSigningConfig(object):
model = BookSigning
date_field = 'event_date'
# use the same templates as for books
def get_template_names(self):
return ['generic_views/book%s.html' % self.template_name_suffix]
class BookSigningArchive(BookSigningConfig, generic.ArchiveIndexView):
pass
class BookSigningYearArchive(BookSigningConfig, generic.YearArchiveView):
pass
class BookSigningMonthArchive(BookSigningConfig, generic.MonthArchiveView):
pass
class BookSigningWeekArchive(BookSigningConfig, generic.WeekArchiveView):
pass
class BookSigningDayArchive(BookSigningConfig, generic.DayArchiveView):
pass
class BookSigningTodayArchive(BookSigningConfig, generic.TodayArchiveView):
pass
class BookSigningDetail(BookSigningConfig, generic.DateDetailView):
context_object_name = 'book'
class NonModel(object):
id = "non_model_1"
_meta = None
class NonModelDetail(generic.DetailView):
template_name = 'generic_views/detail.html'
model = NonModel
def get_object(self, queryset=None):
return NonModel()
class ObjectDoesNotExistDetail(generic.DetailView):
def get_queryset(self):
return Book.does_not_exist.all()
| bsd-3-clause |
greasypizza/grpc | src/python/grpcio_tests/tests/unit/_junkdrawer/__init__.py | 901 | 1528 | # Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
| bsd-3-clause |
argriffing/scipy | scipy/io/__init__.py | 81 | 2377 | # -*- encoding:utf-8 -*-
"""
==================================
Input and output (:mod:`scipy.io`)
==================================
.. currentmodule:: scipy.io
SciPy has many modules, classes, and functions available to read data
from and write data to a variety of file formats.
.. seealso:: :ref:`numpy-reference.routines.io` (in Numpy)
MATLAB® files
=============
.. autosummary::
:toctree: generated/
loadmat - Read a MATLAB style mat file (version 4 through 7.1)
savemat - Write a MATLAB style mat file (version 4 through 7.1)
whosmat - List contents of a MATLAB style mat file (version 4 through 7.1)
IDL® files
==========
.. autosummary::
:toctree: generated/
readsav - Read an IDL 'save' file
Matrix Market files
===================
.. autosummary::
:toctree: generated/
mminfo - Query matrix info from Matrix Market formatted file
mmread - Read matrix from Matrix Market formatted file
mmwrite - Write matrix to Matrix Market formatted file
Unformatted Fortran files
===============================
.. autosummary::
:toctree: generated/
FortranFile - A file object for unformatted sequential Fortran files
Netcdf
======
.. autosummary::
:toctree: generated/
netcdf_file - A file object for NetCDF data
netcdf_variable - A data object for the netcdf module
Harwell-Boeing files
====================
.. autosummary::
:toctree: generated/
hb_read -- read H-B file
hb_write -- write H-B file
Wav sound files (:mod:`scipy.io.wavfile`)
=========================================
.. module:: scipy.io.wavfile
.. autosummary::
:toctree: generated/
read
write
WavFileWarning
Arff files (:mod:`scipy.io.arff`)
=================================
.. module:: scipy.io.arff
.. autosummary::
:toctree: generated/
loadarff
MetaData
ArffError
ParseArffError
"""
from __future__ import division, print_function, absolute_import
# matfile read and write
from .matlab import loadmat, savemat, whosmat, byteordercodes
# netCDF file support
from .netcdf import netcdf_file, netcdf_variable
# Fortran file support
from ._fortran import FortranFile
from .mmio import mminfo, mmread, mmwrite
from .idl import readsav
from .harwell_boeing import hb_read, hb_write
__all__ = [s for s in dir() if not s.startswith('_')]
from numpy.testing import Tester
test = Tester().test
| bsd-3-clause |
x2nie/odoo | addons/account_anglo_saxon/invoice.py | 9 | 12540 | ##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C)
# 2004-2010 Tiny SPRL (<http://tiny.be>).
# 2009-2010 Veritos (http://veritos.nl).
# All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv, fields
from openerp.tools.float_utils import float_round as round
class account_invoice_line(osv.osv):
_inherit = "account.invoice.line"
_columns = {
'move_id': fields.many2one('stock.move', string="Move line", help="If the invoice was generated from a stock.picking, reference to the related move line."),
}
def move_line_get(self, cr, uid, invoice_id, context=None):
res = super(account_invoice_line,self).move_line_get(cr, uid, invoice_id, context=context)
inv = self.pool.get('account.invoice').browse(cr, uid, invoice_id, context=context)
company_currency = inv.company_id.currency_id.id
def get_price(cr, uid, inv, company_currency, i_line, price_unit):
cur_obj = self.pool.get('res.currency')
decimal_precision = self.pool.get('decimal.precision')
if inv.currency_id.id != company_currency:
price = cur_obj.compute(cr, uid, company_currency, inv.currency_id.id, price_unit * i_line.quantity, context={'date': inv.date_invoice})
else:
price = price_unit * i_line.quantity
return round(price, decimal_precision.precision_get(cr, uid, 'Account'))
if inv.type in ('out_invoice','out_refund'):
for i_line in inv.invoice_line:
if i_line.product_id and i_line.product_id.valuation == 'real_time':
# debit account dacc will be the output account
# first check the product, if empty check the category
dacc = i_line.product_id.property_stock_account_output and i_line.product_id.property_stock_account_output.id
if not dacc:
dacc = i_line.product_id.categ_id.property_stock_account_output_categ and i_line.product_id.categ_id.property_stock_account_output_categ.id
# in both cases the credit account cacc will be the expense account
# first check the product, if empty check the category
cacc = i_line.product_id.property_account_expense and i_line.product_id.property_account_expense.id
if not cacc:
cacc = i_line.product_id.categ_id.property_account_expense_categ and i_line.product_id.categ_id.property_account_expense_categ.id
if dacc and cacc:
price_unit = i_line.move_id and i_line.move_id.price_unit or i_line.product_id.standard_price
res.append({
'type':'src',
'name': i_line.name[:64],
'price_unit':price_unit,
'quantity':i_line.quantity,
'price':get_price(cr, uid, inv, company_currency, i_line, price_unit),
'account_id':dacc,
'product_id':i_line.product_id.id,
'uos_id':i_line.uos_id.id,
'account_analytic_id': False,
'taxes':i_line.invoice_line_tax_id,
})
res.append({
'type':'src',
'name': i_line.name[:64],
'price_unit':price_unit,
'quantity':i_line.quantity,
'price': -1 * get_price(cr, uid, inv, company_currency, i_line, price_unit),
'account_id':cacc,
'product_id':i_line.product_id.id,
'uos_id':i_line.uos_id.id,
'account_analytic_id': False,
'taxes':i_line.invoice_line_tax_id,
})
elif inv.type in ('in_invoice','in_refund'):
for i_line in inv.invoice_line:
if i_line.product_id and i_line.product_id.valuation == 'real_time':
if i_line.product_id.type != 'service':
# get the price difference account at the product
acc = i_line.product_id.property_account_creditor_price_difference and i_line.product_id.property_account_creditor_price_difference.id
if not acc:
# if not found on the product get the price difference account at the category
acc = i_line.product_id.categ_id.property_account_creditor_price_difference_categ and i_line.product_id.categ_id.property_account_creditor_price_difference_categ.id
a = None
# oa will be the stock input account
# first check the product, if empty check the category
oa = i_line.product_id.property_stock_account_input and i_line.product_id.property_stock_account_input.id
if not oa:
oa = i_line.product_id.categ_id.property_stock_account_input_categ and i_line.product_id.categ_id.property_stock_account_input_categ.id
if oa:
# get the fiscal position
fpos = i_line.invoice_id.fiscal_position or False
a = self.pool.get('account.fiscal.position').map_account(cr, uid, fpos, oa)
diff_res = []
decimal_precision = self.pool.get('decimal.precision')
account_prec = decimal_precision.precision_get(cr, uid, 'Account')
# calculate and write down the possible price difference between invoice price and product price
for line in res:
if line.get('invl_id', 0) == i_line.id and a == line['account_id']:
uom = i_line.product_id.uos_id or i_line.product_id.uom_id
valuation_price_unit = self.pool.get('product.uom')._compute_price(cr, uid, uom.id, i_line.product_id.standard_price, i_line.uos_id.id)
if i_line.product_id.cost_method != 'standard' and i_line.purchase_line_id:
#for average/fifo/lifo costing method, fetch real cost price from incomming moves
stock_move_obj = self.pool.get('stock.move')
valuation_stock_move = stock_move_obj.search(cr, uid, [('purchase_line_id', '=', i_line.purchase_line_id.id)], limit=1, context=context)
if valuation_stock_move:
valuation_price_unit = stock_move_obj.browse(cr, uid, valuation_stock_move[0], context=context).price_unit
if inv.currency_id.id != company_currency:
valuation_price_unit = self.pool.get('res.currency').compute(cr, uid, company_currency, inv.currency_id.id, valuation_price_unit, context={'date': inv.date_invoice})
if valuation_price_unit != i_line.price_unit and line['price_unit'] == i_line.price_unit and acc:
price_diff = round(i_line.price_unit - valuation_price_unit, account_prec)
line.update({'price': round(valuation_price_unit * line['quantity'], account_prec)})
diff_res.append({
'type': 'src',
'name': i_line.name[:64],
'price_unit': price_diff,
'quantity': line['quantity'],
'price': round(price_diff * line['quantity'], account_prec),
'account_id': acc,
'product_id': line['product_id'],
'uos_id': line['uos_id'],
'account_analytic_id': line['account_analytic_id'],
'taxes': line.get('taxes', []),
})
res += diff_res
return res
def product_id_change(self, cr, uid, ids, product, uom_id, qty=0, name='', type='out_invoice', partner_id=False, fposition_id=False, price_unit=False, currency_id=False, company_id=None, context=None):
fiscal_pool = self.pool.get('account.fiscal.position')
res = super(account_invoice_line, self).product_id_change(cr, uid, ids, product, uom_id, qty, name, type, partner_id, fposition_id, price_unit, currency_id, company_id, context)
if not product:
return res
if type in ('in_invoice','in_refund'):
product_obj = self.pool.get('product.product').browse(cr, uid, product, context=context)
if type == 'in_invoice':
oa = product_obj.property_stock_account_input and product_obj.property_stock_account_input.id
if not oa:
oa = product_obj.categ_id.property_stock_account_input_categ and product_obj.categ_id.property_stock_account_input_categ.id
else:
oa = product_obj.property_stock_account_output and product_obj.property_stock_account_output.id
if not oa:
oa = product_obj.categ_id.property_stock_account_output_categ and product_obj.categ_id.property_stock_account_output_categ.id
if oa:
fpos = fposition_id and fiscal_pool.browse(cr, uid, fposition_id, context=context) or False
a = fiscal_pool.map_account(cr, uid, fpos, oa)
res['value'].update({'account_id':a})
return res
class account_invoice(osv.osv):
_inherit = "account.invoice"
def _prepare_refund(self, cr, uid, invoice, date=None, period_id=None, description=None, journal_id=None, context=None):
invoice_data = super(account_invoice, self)._prepare_refund(cr, uid, invoice, date, period_id,
description, journal_id, context=context)
if invoice.type == 'in_invoice':
fiscal_position = self.pool.get('account.fiscal.position')
for _, _, line_dict in invoice_data['invoice_line']:
if line_dict.get('product_id'):
product = self.pool.get('product.product').browse(cr, uid, line_dict['product_id'], context=context)
counterpart_acct_id = product.property_stock_account_output and \
product.property_stock_account_output.id
if not counterpart_acct_id:
counterpart_acct_id = product.categ_id.property_stock_account_output_categ and \
product.categ_id.property_stock_account_output_categ.id
if counterpart_acct_id:
fpos = invoice.fiscal_position or False
line_dict['account_id'] = fiscal_position.map_account(cr, uid,
fpos,
counterpart_acct_id)
return invoice_data
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
40223250/2015cd_midterm2 | static/Brython3.1.1-20150328-091302/Lib/subprocess.py | 728 | 67282 | # subprocess - Subprocesses with accessible I/O streams
#
# For more information about this module, see PEP 324.
#
# Copyright (c) 2003-2005 by Peter Astrand <astrand@lysator.liu.se>
#
# Licensed to PSF under a Contributor Agreement.
# See http://www.python.org/2.4/license for licensing details.
r"""subprocess - Subprocesses with accessible I/O streams
This module allows you to spawn processes, connect to their
input/output/error pipes, and obtain their return codes. This module
intends to replace several other, older modules and functions, like:
os.system
os.spawn*
Information about how the subprocess module can be used to replace these
modules and functions can be found below.
Using the subprocess module
===========================
This module defines one class called Popen:
class Popen(args, bufsize=-1, executable=None,
stdin=None, stdout=None, stderr=None,
preexec_fn=None, close_fds=True, shell=False,
cwd=None, env=None, universal_newlines=False,
startupinfo=None, creationflags=0,
restore_signals=True, start_new_session=False, pass_fds=()):
Arguments are:
args should be a string, or a sequence of program arguments. The
program to execute is normally the first item in the args sequence or
string, but can be explicitly set by using the executable argument.
On POSIX, with shell=False (default): In this case, the Popen class
uses os.execvp() to execute the child program. args should normally
be a sequence. A string will be treated as a sequence with the string
as the only item (the program to execute).
On POSIX, with shell=True: If args is a string, it specifies the
command string to execute through the shell. If args is a sequence,
the first item specifies the command string, and any additional items
will be treated as additional shell arguments.
On Windows: the Popen class uses CreateProcess() to execute the child
program, which operates on strings. If args is a sequence, it will be
converted to a string using the list2cmdline method. Please note that
not all MS Windows applications interpret the command line the same
way: The list2cmdline is designed for applications using the same
rules as the MS C runtime.
bufsize will be supplied as the corresponding argument to the io.open()
function when creating the stdin/stdout/stderr pipe file objects:
0 means unbuffered (read & write are one system call and can return short),
1 means line buffered, any other positive value means use a buffer of
approximately that size. A negative bufsize, the default, means the system
default of io.DEFAULT_BUFFER_SIZE will be used.
stdin, stdout and stderr specify the executed programs' standard
input, standard output and standard error file handles, respectively.
Valid values are PIPE, an existing file descriptor (a positive
integer), an existing file object, and None. PIPE indicates that a
new pipe to the child should be created. With None, no redirection
will occur; the child's file handles will be inherited from the
parent. Additionally, stderr can be STDOUT, which indicates that the
stderr data from the applications should be captured into the same
file handle as for stdout.
On POSIX, if preexec_fn is set to a callable object, this object will be
called in the child process just before the child is executed. The use
of preexec_fn is not thread safe, using it in the presence of threads
could lead to a deadlock in the child process before the new executable
is executed.
If close_fds is true, all file descriptors except 0, 1 and 2 will be
closed before the child process is executed. The default for close_fds
varies by platform: Always true on POSIX. True when stdin/stdout/stderr
are None on Windows, false otherwise.
pass_fds is an optional sequence of file descriptors to keep open between the
parent and child. Providing any pass_fds implicitly sets close_fds to true.
if shell is true, the specified command will be executed through the
shell.
If cwd is not None, the current directory will be changed to cwd
before the child is executed.
On POSIX, if restore_signals is True all signals that Python sets to
SIG_IGN are restored to SIG_DFL in the child process before the exec.
Currently this includes the SIGPIPE, SIGXFZ and SIGXFSZ signals. This
parameter does nothing on Windows.
On POSIX, if start_new_session is True, the setsid() system call will be made
in the child process prior to executing the command.
If env is not None, it defines the environment variables for the new
process.
If universal_newlines is false, the file objects stdin, stdout and stderr
are opened as binary files, and no line ending conversion is done.
If universal_newlines is true, the file objects stdout and stderr are
opened as a text files, but lines may be terminated by any of '\n',
the Unix end-of-line convention, '\r', the old Macintosh convention or
'\r\n', the Windows convention. All of these external representations
are seen as '\n' by the Python program. Also, the newlines attribute
of the file objects stdout, stdin and stderr are not updated by the
communicate() method.
The startupinfo and creationflags, if given, will be passed to the
underlying CreateProcess() function. They can specify things such as
appearance of the main window and priority for the new process.
(Windows only)
This module also defines some shortcut functions:
call(*popenargs, **kwargs):
Run command with arguments. Wait for command to complete, then
return the returncode attribute.
The arguments are the same as for the Popen constructor. Example:
>>> retcode = subprocess.call(["ls", "-l"])
check_call(*popenargs, **kwargs):
Run command with arguments. Wait for command to complete. If the
exit code was zero then return, otherwise raise
CalledProcessError. The CalledProcessError object will have the
return code in the returncode attribute.
The arguments are the same as for the Popen constructor. Example:
>>> subprocess.check_call(["ls", "-l"])
0
getstatusoutput(cmd):
Return (status, output) of executing cmd in a shell.
Execute the string 'cmd' in a shell with os.popen() and return a 2-tuple
(status, output). cmd is actually run as '{ cmd ; } 2>&1', so that the
returned output will contain output or error messages. A trailing newline
is stripped from the output. The exit status for the command can be
interpreted according to the rules for the C function wait(). Example:
>>> subprocess.getstatusoutput('ls /bin/ls')
(0, '/bin/ls')
>>> subprocess.getstatusoutput('cat /bin/junk')
(256, 'cat: /bin/junk: No such file or directory')
>>> subprocess.getstatusoutput('/bin/junk')
(256, 'sh: /bin/junk: not found')
getoutput(cmd):
Return output (stdout or stderr) of executing cmd in a shell.
Like getstatusoutput(), except the exit status is ignored and the return
value is a string containing the command's output. Example:
>>> subprocess.getoutput('ls /bin/ls')
'/bin/ls'
check_output(*popenargs, **kwargs):
Run command with arguments and return its output.
If the exit code was non-zero it raises a CalledProcessError. The
CalledProcessError object will have the return code in the returncode
attribute and output in the output attribute.
The arguments are the same as for the Popen constructor. Example:
>>> output = subprocess.check_output(["ls", "-l", "/dev/null"])
Exceptions
----------
Exceptions raised in the child process, before the new program has
started to execute, will be re-raised in the parent. Additionally,
the exception object will have one extra attribute called
'child_traceback', which is a string containing traceback information
from the child's point of view.
The most common exception raised is OSError. This occurs, for
example, when trying to execute a non-existent file. Applications
should prepare for OSErrors.
A ValueError will be raised if Popen is called with invalid arguments.
Exceptions defined within this module inherit from SubprocessError.
check_call() and check_output() will raise CalledProcessError if the
called process returns a non-zero return code. TimeoutExpired
be raised if a timeout was specified and expired.
Security
--------
Unlike some other popen functions, this implementation will never call
/bin/sh implicitly. This means that all characters, including shell
metacharacters, can safely be passed to child processes.
Popen objects
=============
Instances of the Popen class have the following methods:
poll()
Check if child process has terminated. Returns returncode
attribute.
wait()
Wait for child process to terminate. Returns returncode attribute.
communicate(input=None)
Interact with process: Send data to stdin. Read data from stdout
and stderr, until end-of-file is reached. Wait for process to
terminate. The optional input argument should be a string to be
sent to the child process, or None, if no data should be sent to
the child.
communicate() returns a tuple (stdout, stderr).
Note: The data read is buffered in memory, so do not use this
method if the data size is large or unlimited.
The following attributes are also available:
stdin
If the stdin argument is PIPE, this attribute is a file object
that provides input to the child process. Otherwise, it is None.
stdout
If the stdout argument is PIPE, this attribute is a file object
that provides output from the child process. Otherwise, it is
None.
stderr
If the stderr argument is PIPE, this attribute is file object that
provides error output from the child process. Otherwise, it is
None.
pid
The process ID of the child process.
returncode
The child return code. A None value indicates that the process
hasn't terminated yet. A negative value -N indicates that the
child was terminated by signal N (POSIX only).
Replacing older functions with the subprocess module
====================================================
In this section, "a ==> b" means that b can be used as a replacement
for a.
Note: All functions in this section fail (more or less) silently if
the executed program cannot be found; this module raises an OSError
exception.
In the following examples, we assume that the subprocess module is
imported with "from subprocess import *".
Replacing /bin/sh shell backquote
---------------------------------
output=`mycmd myarg`
==>
output = Popen(["mycmd", "myarg"], stdout=PIPE).communicate()[0]
Replacing shell pipe line
-------------------------
output=`dmesg | grep hda`
==>
p1 = Popen(["dmesg"], stdout=PIPE)
p2 = Popen(["grep", "hda"], stdin=p1.stdout, stdout=PIPE)
output = p2.communicate()[0]
Replacing os.system()
---------------------
sts = os.system("mycmd" + " myarg")
==>
p = Popen("mycmd" + " myarg", shell=True)
pid, sts = os.waitpid(p.pid, 0)
Note:
* Calling the program through the shell is usually not required.
* It's easier to look at the returncode attribute than the
exitstatus.
A more real-world example would look like this:
try:
retcode = call("mycmd" + " myarg", shell=True)
if retcode < 0:
print("Child was terminated by signal", -retcode, file=sys.stderr)
else:
print("Child returned", retcode, file=sys.stderr)
except OSError as e:
print("Execution failed:", e, file=sys.stderr)
Replacing os.spawn*
-------------------
P_NOWAIT example:
pid = os.spawnlp(os.P_NOWAIT, "/bin/mycmd", "mycmd", "myarg")
==>
pid = Popen(["/bin/mycmd", "myarg"]).pid
P_WAIT example:
retcode = os.spawnlp(os.P_WAIT, "/bin/mycmd", "mycmd", "myarg")
==>
retcode = call(["/bin/mycmd", "myarg"])
Vector example:
os.spawnvp(os.P_NOWAIT, path, args)
==>
Popen([path] + args[1:])
Environment example:
os.spawnlpe(os.P_NOWAIT, "/bin/mycmd", "mycmd", "myarg", env)
==>
Popen(["/bin/mycmd", "myarg"], env={"PATH": "/usr/bin"})
"""
import sys
mswindows = (sys.platform == "win32")
import io
import os
import time
import traceback
import gc
import signal
import builtins
import warnings
import errno
try:
from time import monotonic as _time
except ImportError:
from time import time as _time
# Exception classes used by this module.
class SubprocessError(Exception): pass
class CalledProcessError(SubprocessError):
"""This exception is raised when a process run by check_call() or
check_output() returns a non-zero exit status.
The exit status will be stored in the returncode attribute;
check_output() will also store the output in the output attribute.
"""
def __init__(self, returncode, cmd, output=None):
self.returncode = returncode
self.cmd = cmd
self.output = output
def __str__(self):
return "Command '%s' returned non-zero exit status %d" % (self.cmd, self.returncode)
class TimeoutExpired(SubprocessError):
"""This exception is raised when the timeout expires while waiting for a
child process.
"""
def __init__(self, cmd, timeout, output=None):
self.cmd = cmd
self.timeout = timeout
self.output = output
def __str__(self):
return ("Command '%s' timed out after %s seconds" %
(self.cmd, self.timeout))
if mswindows:
import threading
import msvcrt
import _winapi
class STARTUPINFO:
dwFlags = 0
hStdInput = None
hStdOutput = None
hStdError = None
wShowWindow = 0
class pywintypes:
error = IOError
else:
import select
_has_poll = hasattr(select, 'poll')
import _posixsubprocess
_create_pipe = _posixsubprocess.cloexec_pipe
# When select or poll has indicated that the file is writable,
# we can write up to _PIPE_BUF bytes without risk of blocking.
# POSIX defines PIPE_BUF as >= 512.
_PIPE_BUF = getattr(select, 'PIPE_BUF', 512)
__all__ = ["Popen", "PIPE", "STDOUT", "call", "check_call", "getstatusoutput",
"getoutput", "check_output", "CalledProcessError", "DEVNULL"]
if mswindows:
from _winapi import (CREATE_NEW_CONSOLE, CREATE_NEW_PROCESS_GROUP,
STD_INPUT_HANDLE, STD_OUTPUT_HANDLE,
STD_ERROR_HANDLE, SW_HIDE,
STARTF_USESTDHANDLES, STARTF_USESHOWWINDOW)
__all__.extend(["CREATE_NEW_CONSOLE", "CREATE_NEW_PROCESS_GROUP",
"STD_INPUT_HANDLE", "STD_OUTPUT_HANDLE",
"STD_ERROR_HANDLE", "SW_HIDE",
"STARTF_USESTDHANDLES", "STARTF_USESHOWWINDOW"])
class Handle(int):
closed = False
def Close(self, CloseHandle=_winapi.CloseHandle):
if not self.closed:
self.closed = True
CloseHandle(self)
def Detach(self):
if not self.closed:
self.closed = True
return int(self)
raise ValueError("already closed")
def __repr__(self):
return "Handle(%d)" % int(self)
__del__ = Close
__str__ = __repr__
try:
MAXFD = os.sysconf("SC_OPEN_MAX")
except:
MAXFD = 256
# This lists holds Popen instances for which the underlying process had not
# exited at the time its __del__ method got called: those processes are wait()ed
# for synchronously from _cleanup() when a new Popen object is created, to avoid
# zombie processes.
_active = []
def _cleanup():
for inst in _active[:]:
res = inst._internal_poll(_deadstate=sys.maxsize)
if res is not None:
try:
_active.remove(inst)
except ValueError:
# This can happen if two threads create a new Popen instance.
# It's harmless that it was already removed, so ignore.
pass
PIPE = -1
STDOUT = -2
DEVNULL = -3
def _eintr_retry_call(func, *args):
while True:
try:
return func(*args)
except InterruptedError:
continue
# XXX This function is only used by multiprocessing and the test suite,
# but it's here so that it can be imported when Python is compiled without
# threads.
def _args_from_interpreter_flags():
"""Return a list of command-line arguments reproducing the current
settings in sys.flags and sys.warnoptions."""
flag_opt_map = {
'debug': 'd',
# 'inspect': 'i',
# 'interactive': 'i',
'optimize': 'O',
'dont_write_bytecode': 'B',
'no_user_site': 's',
'no_site': 'S',
'ignore_environment': 'E',
'verbose': 'v',
'bytes_warning': 'b',
'quiet': 'q',
'hash_randomization': 'R',
}
args = []
for flag, opt in flag_opt_map.items():
v = getattr(sys.flags, flag)
if v > 0:
args.append('-' + opt * v)
for opt in sys.warnoptions:
args.append('-W' + opt)
return args
def call(*popenargs, timeout=None, **kwargs):
"""Run command with arguments. Wait for command to complete or
timeout, then return the returncode attribute.
The arguments are the same as for the Popen constructor. Example:
retcode = call(["ls", "-l"])
"""
with Popen(*popenargs, **kwargs) as p:
try:
return p.wait(timeout=timeout)
except:
p.kill()
p.wait()
raise
def check_call(*popenargs, **kwargs):
"""Run command with arguments. Wait for command to complete. If
the exit code was zero then return, otherwise raise
CalledProcessError. The CalledProcessError object will have the
return code in the returncode attribute.
The arguments are the same as for the call function. Example:
check_call(["ls", "-l"])
"""
retcode = call(*popenargs, **kwargs)
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise CalledProcessError(retcode, cmd)
return 0
def check_output(*popenargs, timeout=None, **kwargs):
r"""Run command with arguments and return its output.
If the exit code was non-zero it raises a CalledProcessError. The
CalledProcessError object will have the return code in the returncode
attribute and output in the output attribute.
The arguments are the same as for the Popen constructor. Example:
>>> check_output(["ls", "-l", "/dev/null"])
b'crw-rw-rw- 1 root root 1, 3 Oct 18 2007 /dev/null\n'
The stdout argument is not allowed as it is used internally.
To capture standard error in the result, use stderr=STDOUT.
>>> check_output(["/bin/sh", "-c",
... "ls -l non_existent_file ; exit 0"],
... stderr=STDOUT)
b'ls: non_existent_file: No such file or directory\n'
If universal_newlines=True is passed, the return value will be a
string rather than bytes.
"""
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
with Popen(*popenargs, stdout=PIPE, **kwargs) as process:
try:
output, unused_err = process.communicate(timeout=timeout)
except TimeoutExpired:
process.kill()
output, unused_err = process.communicate()
raise TimeoutExpired(process.args, timeout, output=output)
except:
process.kill()
process.wait()
raise
retcode = process.poll()
if retcode:
raise CalledProcessError(retcode, process.args, output=output)
return output
def list2cmdline(seq):
"""
Translate a sequence of arguments into a command line
string, using the same rules as the MS C runtime:
1) Arguments are delimited by white space, which is either a
space or a tab.
2) A string surrounded by double quotation marks is
interpreted as a single argument, regardless of white space
contained within. A quoted string can be embedded in an
argument.
3) A double quotation mark preceded by a backslash is
interpreted as a literal double quotation mark.
4) Backslashes are interpreted literally, unless they
immediately precede a double quotation mark.
5) If backslashes immediately precede a double quotation mark,
every pair of backslashes is interpreted as a literal
backslash. If the number of backslashes is odd, the last
backslash escapes the next double quotation mark as
described in rule 3.
"""
# See
# http://msdn.microsoft.com/en-us/library/17w5ykft.aspx
# or search http://msdn.microsoft.com for
# "Parsing C++ Command-Line Arguments"
result = []
needquote = False
for arg in seq:
bs_buf = []
# Add a space to separate this argument from the others
if result:
result.append(' ')
needquote = (" " in arg) or ("\t" in arg) or not arg
if needquote:
result.append('"')
for c in arg:
if c == '\\':
# Don't know if we need to double yet.
bs_buf.append(c)
elif c == '"':
# Double backslashes.
result.append('\\' * len(bs_buf)*2)
bs_buf = []
result.append('\\"')
else:
# Normal char
if bs_buf:
result.extend(bs_buf)
bs_buf = []
result.append(c)
# Add remaining backslashes, if any.
if bs_buf:
result.extend(bs_buf)
if needquote:
result.extend(bs_buf)
result.append('"')
return ''.join(result)
# Various tools for executing commands and looking at their output and status.
#
# NB This only works (and is only relevant) for POSIX.
def getstatusoutput(cmd):
"""Return (status, output) of executing cmd in a shell.
Execute the string 'cmd' in a shell with os.popen() and return a 2-tuple
(status, output). cmd is actually run as '{ cmd ; } 2>&1', so that the
returned output will contain output or error messages. A trailing newline
is stripped from the output. The exit status for the command can be
interpreted according to the rules for the C function wait(). Example:
>>> import subprocess
>>> subprocess.getstatusoutput('ls /bin/ls')
(0, '/bin/ls')
>>> subprocess.getstatusoutput('cat /bin/junk')
(256, 'cat: /bin/junk: No such file or directory')
>>> subprocess.getstatusoutput('/bin/junk')
(256, 'sh: /bin/junk: not found')
"""
with os.popen('{ ' + cmd + '; } 2>&1', 'r') as pipe:
try:
text = pipe.read()
sts = pipe.close()
except:
process = pipe._proc
process.kill()
process.wait()
raise
if sts is None:
sts = 0
if text[-1:] == '\n':
text = text[:-1]
return sts, text
def getoutput(cmd):
"""Return output (stdout or stderr) of executing cmd in a shell.
Like getstatusoutput(), except the exit status is ignored and the return
value is a string containing the command's output. Example:
>>> import subprocess
>>> subprocess.getoutput('ls /bin/ls')
'/bin/ls'
"""
return getstatusoutput(cmd)[1]
_PLATFORM_DEFAULT_CLOSE_FDS = object()
class Popen(object):
def __init__(self, args, bufsize=-1, executable=None,
stdin=None, stdout=None, stderr=None,
preexec_fn=None, close_fds=_PLATFORM_DEFAULT_CLOSE_FDS,
shell=False, cwd=None, env=None, universal_newlines=False,
startupinfo=None, creationflags=0,
restore_signals=True, start_new_session=False,
pass_fds=()):
"""Create new Popen instance."""
_cleanup()
self._child_created = False
self._input = None
self._communication_started = False
if bufsize is None:
bufsize = -1 # Restore default
if not isinstance(bufsize, int):
raise TypeError("bufsize must be an integer")
if mswindows:
if preexec_fn is not None:
raise ValueError("preexec_fn is not supported on Windows "
"platforms")
any_stdio_set = (stdin is not None or stdout is not None or
stderr is not None)
if close_fds is _PLATFORM_DEFAULT_CLOSE_FDS:
if any_stdio_set:
close_fds = False
else:
close_fds = True
elif close_fds and any_stdio_set:
raise ValueError(
"close_fds is not supported on Windows platforms"
" if you redirect stdin/stdout/stderr")
else:
# POSIX
if close_fds is _PLATFORM_DEFAULT_CLOSE_FDS:
close_fds = True
if pass_fds and not close_fds:
warnings.warn("pass_fds overriding close_fds.", RuntimeWarning)
close_fds = True
if startupinfo is not None:
raise ValueError("startupinfo is only supported on Windows "
"platforms")
if creationflags != 0:
raise ValueError("creationflags is only supported on Windows "
"platforms")
self.args = args
self.stdin = None
self.stdout = None
self.stderr = None
self.pid = None
self.returncode = None
self.universal_newlines = universal_newlines
# Input and output objects. The general principle is like
# this:
#
# Parent Child
# ------ -----
# p2cwrite ---stdin---> p2cread
# c2pread <--stdout--- c2pwrite
# errread <--stderr--- errwrite
#
# On POSIX, the child objects are file descriptors. On
# Windows, these are Windows file handles. The parent objects
# are file descriptors on both platforms. The parent objects
# are -1 when not using PIPEs. The child objects are -1
# when not redirecting.
(p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite) = self._get_handles(stdin, stdout, stderr)
# We wrap OS handles *before* launching the child, otherwise a
# quickly terminating child could make our fds unwrappable
# (see #8458).
#fix me brython syntax error
#if mswindows:
# if p2cwrite != -1:
# p2cwrite = msvcrt.open_osfhandle(p2cwrite.Detach(), 0)
# if c2pread != -1:
# c2pread = msvcrt.open_osfhandle(c2pread.Detach(), 0)
# if errread != -1:
# errread = msvcrt.open_osfhandle(errread.Detach(), 0)
if p2cwrite != -1:
self.stdin = io.open(p2cwrite, 'wb', bufsize)
if universal_newlines:
self.stdin = io.TextIOWrapper(self.stdin, write_through=True)
if c2pread != -1:
self.stdout = io.open(c2pread, 'rb', bufsize)
if universal_newlines:
self.stdout = io.TextIOWrapper(self.stdout)
if errread != -1:
self.stderr = io.open(errread, 'rb', bufsize)
if universal_newlines:
self.stderr = io.TextIOWrapper(self.stderr)
self._closed_child_pipe_fds = False
try:
self._execute_child(args, executable, preexec_fn, close_fds,
pass_fds, cwd, env,
startupinfo, creationflags, shell,
p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite,
restore_signals, start_new_session)
except:
# Cleanup if the child failed starting.
for f in filter(None, (self.stdin, self.stdout, self.stderr)):
try:
f.close()
except EnvironmentError:
pass # Ignore EBADF or other errors.
if not self._closed_child_pipe_fds:
to_close = []
if stdin == PIPE:
to_close.append(p2cread)
if stdout == PIPE:
to_close.append(c2pwrite)
if stderr == PIPE:
to_close.append(errwrite)
if hasattr(self, '_devnull'):
to_close.append(self._devnull)
for fd in to_close:
try:
os.close(fd)
except EnvironmentError:
pass
raise
def _translate_newlines(self, data, encoding):
data = data.decode(encoding)
return data.replace("\r\n", "\n").replace("\r", "\n")
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
if self.stdout:
self.stdout.close()
if self.stderr:
self.stderr.close()
if self.stdin:
self.stdin.close()
# Wait for the process to terminate, to avoid zombies.
self.wait()
def __del__(self, _maxsize=sys.maxsize, _active=_active):
# If __init__ hasn't had a chance to execute (e.g. if it
# was passed an undeclared keyword argument), we don't
# have a _child_created attribute at all.
if not getattr(self, '_child_created', False):
# We didn't get to successfully create a child process.
return
# In case the child hasn't been waited on, check if it's done.
self._internal_poll(_deadstate=_maxsize)
if self.returncode is None and _active is not None:
# Child is still running, keep us alive until we can wait on it.
_active.append(self)
def _get_devnull(self):
if not hasattr(self, '_devnull'):
self._devnull = os.open(os.devnull, os.O_RDWR)
return self._devnull
def communicate(self, input=None, timeout=None):
"""Interact with process: Send data to stdin. Read data from
stdout and stderr, until end-of-file is reached. Wait for
process to terminate. The optional input argument should be
bytes to be sent to the child process, or None, if no data
should be sent to the child.
communicate() returns a tuple (stdout, stderr)."""
if self._communication_started and input:
raise ValueError("Cannot send input after starting communication")
# Optimization: If we are not worried about timeouts, we haven't
# started communicating, and we have one or zero pipes, using select()
# or threads is unnecessary.
if (timeout is None and not self._communication_started and
[self.stdin, self.stdout, self.stderr].count(None) >= 2):
stdout = None
stderr = None
if self.stdin:
if input:
try:
self.stdin.write(input)
except IOError as e:
if e.errno != errno.EPIPE and e.errno != errno.EINVAL:
raise
self.stdin.close()
elif self.stdout:
stdout = _eintr_retry_call(self.stdout.read)
self.stdout.close()
elif self.stderr:
stderr = _eintr_retry_call(self.stderr.read)
self.stderr.close()
self.wait()
else:
if timeout is not None:
endtime = _time() + timeout
else:
endtime = None
try:
stdout, stderr = self._communicate(input, endtime, timeout)
finally:
self._communication_started = True
sts = self.wait(timeout=self._remaining_time(endtime))
return (stdout, stderr)
def poll(self):
return self._internal_poll()
def _remaining_time(self, endtime):
"""Convenience for _communicate when computing timeouts."""
if endtime is None:
return None
else:
return endtime - _time()
def _check_timeout(self, endtime, orig_timeout):
"""Convenience for checking if a timeout has expired."""
if endtime is None:
return
if _time() > endtime:
raise TimeoutExpired(self.args, orig_timeout)
if mswindows:
#
# Windows methods
#
def _get_handles(self, stdin, stdout, stderr):
"""Construct and return tuple with IO objects:
p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite
"""
if stdin is None and stdout is None and stderr is None:
return (-1, -1, -1, -1, -1, -1)
p2cread, p2cwrite = -1, -1
c2pread, c2pwrite = -1, -1
errread, errwrite = -1, -1
if stdin is None:
p2cread = _winapi.GetStdHandle(_winapi.STD_INPUT_HANDLE)
if p2cread is None:
p2cread, _ = _winapi.CreatePipe(None, 0)
p2cread = Handle(p2cread)
_winapi.CloseHandle(_)
elif stdin == PIPE:
p2cread, p2cwrite = _winapi.CreatePipe(None, 0)
p2cread, p2cwrite = Handle(p2cread), Handle(p2cwrite)
elif stdin == DEVNULL:
p2cread = msvcrt.get_osfhandle(self._get_devnull())
elif isinstance(stdin, int):
p2cread = msvcrt.get_osfhandle(stdin)
else:
# Assuming file-like object
p2cread = msvcrt.get_osfhandle(stdin.fileno())
p2cread = self._make_inheritable(p2cread)
if stdout is None:
c2pwrite = _winapi.GetStdHandle(_winapi.STD_OUTPUT_HANDLE)
if c2pwrite is None:
_, c2pwrite = _winapi.CreatePipe(None, 0)
c2pwrite = Handle(c2pwrite)
_winapi.CloseHandle(_)
elif stdout == PIPE:
c2pread, c2pwrite = _winapi.CreatePipe(None, 0)
c2pread, c2pwrite = Handle(c2pread), Handle(c2pwrite)
elif stdout == DEVNULL:
c2pwrite = msvcrt.get_osfhandle(self._get_devnull())
elif isinstance(stdout, int):
c2pwrite = msvcrt.get_osfhandle(stdout)
else:
# Assuming file-like object
c2pwrite = msvcrt.get_osfhandle(stdout.fileno())
c2pwrite = self._make_inheritable(c2pwrite)
if stderr is None:
errwrite = _winapi.GetStdHandle(_winapi.STD_ERROR_HANDLE)
if errwrite is None:
_, errwrite = _winapi.CreatePipe(None, 0)
errwrite = Handle(errwrite)
_winapi.CloseHandle(_)
elif stderr == PIPE:
errread, errwrite = _winapi.CreatePipe(None, 0)
errread, errwrite = Handle(errread), Handle(errwrite)
elif stderr == STDOUT:
errwrite = c2pwrite
elif stderr == DEVNULL:
errwrite = msvcrt.get_osfhandle(self._get_devnull())
elif isinstance(stderr, int):
errwrite = msvcrt.get_osfhandle(stderr)
else:
# Assuming file-like object
errwrite = msvcrt.get_osfhandle(stderr.fileno())
errwrite = self._make_inheritable(errwrite)
return (p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite)
def _make_inheritable(self, handle):
"""Return a duplicate of handle, which is inheritable"""
h = _winapi.DuplicateHandle(
_winapi.GetCurrentProcess(), handle,
_winapi.GetCurrentProcess(), 0, 1,
_winapi.DUPLICATE_SAME_ACCESS)
return Handle(h)
def _find_w9xpopen(self):
"""Find and return absolut path to w9xpopen.exe"""
w9xpopen = os.path.join(
os.path.dirname(_winapi.GetModuleFileName(0)),
"w9xpopen.exe")
if not os.path.exists(w9xpopen):
# Eeek - file-not-found - possibly an embedding
# situation - see if we can locate it in sys.exec_prefix
w9xpopen = os.path.join(os.path.dirname(sys.base_exec_prefix),
"w9xpopen.exe")
if not os.path.exists(w9xpopen):
raise RuntimeError("Cannot locate w9xpopen.exe, which is "
"needed for Popen to work with your "
"shell or platform.")
return w9xpopen
def _execute_child(self, args, executable, preexec_fn, close_fds,
pass_fds, cwd, env,
startupinfo, creationflags, shell,
p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite,
unused_restore_signals, unused_start_new_session):
"""Execute program (MS Windows version)"""
assert not pass_fds, "pass_fds not supported on Windows."
if not isinstance(args, str):
args = list2cmdline(args)
# Process startup details
if startupinfo is None:
startupinfo = STARTUPINFO()
if -1 not in (p2cread, c2pwrite, errwrite):
startupinfo.dwFlags |= _winapi.STARTF_USESTDHANDLES
startupinfo.hStdInput = p2cread
startupinfo.hStdOutput = c2pwrite
startupinfo.hStdError = errwrite
if shell:
startupinfo.dwFlags |= _winapi.STARTF_USESHOWWINDOW
startupinfo.wShowWindow = _winapi.SW_HIDE
comspec = os.environ.get("COMSPEC", "cmd.exe")
args = '{} /c "{}"'.format (comspec, args)
if (_winapi.GetVersion() >= 0x80000000 or
os.path.basename(comspec).lower() == "command.com"):
# Win9x, or using command.com on NT. We need to
# use the w9xpopen intermediate program. For more
# information, see KB Q150956
# (http://web.archive.org/web/20011105084002/http://support.microsoft.com/support/kb/articles/Q150/9/56.asp)
w9xpopen = self._find_w9xpopen()
args = '"%s" %s' % (w9xpopen, args)
# Not passing CREATE_NEW_CONSOLE has been known to
# cause random failures on win9x. Specifically a
# dialog: "Your program accessed mem currently in
# use at xxx" and a hopeful warning about the
# stability of your system. Cost is Ctrl+C won't
# kill children.
creationflags |= _winapi.CREATE_NEW_CONSOLE
# Start the process
try:
hp, ht, pid, tid = _winapi.CreateProcess(executable, args,
# no special security
None, None,
int(not close_fds),
creationflags,
env,
cwd,
startupinfo)
except pywintypes.error as e:
# Translate pywintypes.error to WindowsError, which is
# a subclass of OSError. FIXME: We should really
# translate errno using _sys_errlist (or similar), but
# how can this be done from Python?
raise WindowsError(*e.args)
finally:
# Child is launched. Close the parent's copy of those pipe
# handles that only the child should have open. You need
# to make sure that no handles to the write end of the
# output pipe are maintained in this process or else the
# pipe will not close when the child process exits and the
# ReadFile will hang.
if p2cread != -1:
p2cread.Close()
if c2pwrite != -1:
c2pwrite.Close()
if errwrite != -1:
errwrite.Close()
if hasattr(self, '_devnull'):
os.close(self._devnull)
# Retain the process handle, but close the thread handle
self._child_created = True
self._handle = Handle(hp)
self.pid = pid
_winapi.CloseHandle(ht)
def _internal_poll(self, _deadstate=None,
_WaitForSingleObject=_winapi.WaitForSingleObject,
_WAIT_OBJECT_0=_winapi.WAIT_OBJECT_0,
_GetExitCodeProcess=_winapi.GetExitCodeProcess):
"""Check if child process has terminated. Returns returncode
attribute.
This method is called by __del__, so it can only refer to objects
in its local scope.
"""
if self.returncode is None:
if _WaitForSingleObject(self._handle, 0) == _WAIT_OBJECT_0:
self.returncode = _GetExitCodeProcess(self._handle)
return self.returncode
def wait(self, timeout=None, endtime=None):
"""Wait for child process to terminate. Returns returncode
attribute."""
if endtime is not None:
timeout = self._remaining_time(endtime)
if timeout is None:
timeout_millis = _winapi.INFINITE
else:
timeout_millis = int(timeout * 1000)
if self.returncode is None:
result = _winapi.WaitForSingleObject(self._handle,
timeout_millis)
if result == _winapi.WAIT_TIMEOUT:
raise TimeoutExpired(self.args, timeout)
self.returncode = _winapi.GetExitCodeProcess(self._handle)
return self.returncode
def _readerthread(self, fh, buffer):
buffer.append(fh.read())
fh.close()
def _communicate(self, input, endtime, orig_timeout):
# Start reader threads feeding into a list hanging off of this
# object, unless they've already been started.
if self.stdout and not hasattr(self, "_stdout_buff"):
self._stdout_buff = []
self.stdout_thread = \
threading.Thread(target=self._readerthread,
args=(self.stdout, self._stdout_buff))
self.stdout_thread.daemon = True
self.stdout_thread.start()
if self.stderr and not hasattr(self, "_stderr_buff"):
self._stderr_buff = []
self.stderr_thread = \
threading.Thread(target=self._readerthread,
args=(self.stderr, self._stderr_buff))
self.stderr_thread.daemon = True
self.stderr_thread.start()
if self.stdin:
if input is not None:
try:
self.stdin.write(input)
except IOError as e:
if e.errno != errno.EPIPE:
raise
self.stdin.close()
# Wait for the reader threads, or time out. If we time out, the
# threads remain reading and the fds left open in case the user
# calls communicate again.
if self.stdout is not None:
self.stdout_thread.join(self._remaining_time(endtime))
if self.stdout_thread.is_alive():
raise TimeoutExpired(self.args, orig_timeout)
if self.stderr is not None:
self.stderr_thread.join(self._remaining_time(endtime))
if self.stderr_thread.is_alive():
raise TimeoutExpired(self.args, orig_timeout)
# Collect the output from and close both pipes, now that we know
# both have been read successfully.
stdout = None
stderr = None
if self.stdout:
stdout = self._stdout_buff
self.stdout.close()
if self.stderr:
stderr = self._stderr_buff
self.stderr.close()
# All data exchanged. Translate lists into strings.
if stdout is not None:
stdout = stdout[0]
if stderr is not None:
stderr = stderr[0]
return (stdout, stderr)
def send_signal(self, sig):
"""Send a signal to the process
"""
if sig == signal.SIGTERM:
self.terminate()
elif sig == signal.CTRL_C_EVENT:
os.kill(self.pid, signal.CTRL_C_EVENT)
elif sig == signal.CTRL_BREAK_EVENT:
os.kill(self.pid, signal.CTRL_BREAK_EVENT)
else:
raise ValueError("Unsupported signal: {}".format(sig))
def terminate(self):
"""Terminates the process
"""
try:
_winapi.TerminateProcess(self._handle, 1)
except PermissionError:
# ERROR_ACCESS_DENIED (winerror 5) is received when the
# process already died.
rc = _winapi.GetExitCodeProcess(self._handle)
if rc == _winapi.STILL_ACTIVE:
raise
self.returncode = rc
kill = terminate
else:
#
# POSIX methods
#
def _get_handles(self, stdin, stdout, stderr):
"""Construct and return tuple with IO objects:
p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite
"""
p2cread, p2cwrite = -1, -1
c2pread, c2pwrite = -1, -1
errread, errwrite = -1, -1
if stdin is None:
pass
elif stdin == PIPE:
p2cread, p2cwrite = _create_pipe()
elif stdin == DEVNULL:
p2cread = self._get_devnull()
elif isinstance(stdin, int):
p2cread = stdin
else:
# Assuming file-like object
p2cread = stdin.fileno()
if stdout is None:
pass
elif stdout == PIPE:
c2pread, c2pwrite = _create_pipe()
elif stdout == DEVNULL:
c2pwrite = self._get_devnull()
elif isinstance(stdout, int):
c2pwrite = stdout
else:
# Assuming file-like object
c2pwrite = stdout.fileno()
if stderr is None:
pass
elif stderr == PIPE:
errread, errwrite = _create_pipe()
elif stderr == STDOUT:
errwrite = c2pwrite
elif stderr == DEVNULL:
errwrite = self._get_devnull()
elif isinstance(stderr, int):
errwrite = stderr
else:
# Assuming file-like object
errwrite = stderr.fileno()
return (p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite)
def _close_fds(self, fds_to_keep):
start_fd = 3
for fd in sorted(fds_to_keep):
if fd >= start_fd:
os.closerange(start_fd, fd)
start_fd = fd + 1
if start_fd <= MAXFD:
os.closerange(start_fd, MAXFD)
def _execute_child(self, args, executable, preexec_fn, close_fds,
pass_fds, cwd, env,
startupinfo, creationflags, shell,
p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite,
restore_signals, start_new_session):
"""Execute program (POSIX version)"""
if isinstance(args, (str, bytes)):
args = [args]
else:
args = list(args)
if shell:
args = ["/bin/sh", "-c"] + args
if executable:
args[0] = executable
if executable is None:
executable = args[0]
orig_executable = executable
# For transferring possible exec failure from child to parent.
# Data format: "exception name:hex errno:description"
# Pickle is not used; it is complex and involves memory allocation.
errpipe_read, errpipe_write = _create_pipe()
try:
try:
# We must avoid complex work that could involve
# malloc or free in the child process to avoid
# potential deadlocks, thus we do all this here.
# and pass it to fork_exec()
if env is not None:
env_list = [os.fsencode(k) + b'=' + os.fsencode(v)
for k, v in env.items()]
else:
env_list = None # Use execv instead of execve.
executable = os.fsencode(executable)
if os.path.dirname(executable):
executable_list = (executable,)
else:
# This matches the behavior of os._execvpe().
executable_list = tuple(
os.path.join(os.fsencode(dir), executable)
for dir in os.get_exec_path(env))
fds_to_keep = set(pass_fds)
fds_to_keep.add(errpipe_write)
self.pid = _posixsubprocess.fork_exec(
args, executable_list,
close_fds, sorted(fds_to_keep), cwd, env_list,
p2cread, p2cwrite, c2pread, c2pwrite,
errread, errwrite,
errpipe_read, errpipe_write,
restore_signals, start_new_session, preexec_fn)
self._child_created = True
finally:
# be sure the FD is closed no matter what
os.close(errpipe_write)
# self._devnull is not always defined.
devnull_fd = getattr(self, '_devnull', None)
if p2cread != -1 and p2cwrite != -1 and p2cread != devnull_fd:
os.close(p2cread)
if c2pwrite != -1 and c2pread != -1 and c2pwrite != devnull_fd:
os.close(c2pwrite)
if errwrite != -1 and errread != -1 and errwrite != devnull_fd:
os.close(errwrite)
if devnull_fd is not None:
os.close(devnull_fd)
# Prevent a double close of these fds from __init__ on error.
self._closed_child_pipe_fds = True
# Wait for exec to fail or succeed; possibly raising an
# exception (limited in size)
errpipe_data = bytearray()
while True:
part = _eintr_retry_call(os.read, errpipe_read, 50000)
errpipe_data += part
if not part or len(errpipe_data) > 50000:
break
finally:
# be sure the FD is closed no matter what
os.close(errpipe_read)
if errpipe_data:
try:
_eintr_retry_call(os.waitpid, self.pid, 0)
except OSError as e:
if e.errno != errno.ECHILD:
raise
try:
exception_name, hex_errno, err_msg = (
errpipe_data.split(b':', 2))
except ValueError:
exception_name = b'RuntimeError'
hex_errno = b'0'
err_msg = (b'Bad exception data from child: ' +
repr(errpipe_data))
child_exception_type = getattr(
builtins, exception_name.decode('ascii'),
RuntimeError)
err_msg = err_msg.decode(errors="surrogatepass")
if issubclass(child_exception_type, OSError) and hex_errno:
errno_num = int(hex_errno, 16)
child_exec_never_called = (err_msg == "noexec")
if child_exec_never_called:
err_msg = ""
if errno_num != 0:
err_msg = os.strerror(errno_num)
if errno_num == errno.ENOENT:
if child_exec_never_called:
# The error must be from chdir(cwd).
err_msg += ': ' + repr(cwd)
else:
err_msg += ': ' + repr(orig_executable)
raise child_exception_type(errno_num, err_msg)
raise child_exception_type(err_msg)
def _handle_exitstatus(self, sts, _WIFSIGNALED=os.WIFSIGNALED,
_WTERMSIG=os.WTERMSIG, _WIFEXITED=os.WIFEXITED,
_WEXITSTATUS=os.WEXITSTATUS):
# This method is called (indirectly) by __del__, so it cannot
# refer to anything outside of its local scope."""
if _WIFSIGNALED(sts):
self.returncode = -_WTERMSIG(sts)
elif _WIFEXITED(sts):
self.returncode = _WEXITSTATUS(sts)
else:
# Should never happen
raise RuntimeError("Unknown child exit status!")
def _internal_poll(self, _deadstate=None, _waitpid=os.waitpid,
_WNOHANG=os.WNOHANG, _os_error=os.error, _ECHILD=errno.ECHILD):
"""Check if child process has terminated. Returns returncode
attribute.
This method is called by __del__, so it cannot reference anything
outside of the local scope (nor can any methods it calls).
"""
if self.returncode is None:
try:
pid, sts = _waitpid(self.pid, _WNOHANG)
if pid == self.pid:
self._handle_exitstatus(sts)
except _os_error as e:
if _deadstate is not None:
self.returncode = _deadstate
elif e.errno == _ECHILD:
# This happens if SIGCLD is set to be ignored or
# waiting for child processes has otherwise been
# disabled for our process. This child is dead, we
# can't get the status.
# http://bugs.python.org/issue15756
self.returncode = 0
return self.returncode
def _try_wait(self, wait_flags):
try:
(pid, sts) = _eintr_retry_call(os.waitpid, self.pid, wait_flags)
except OSError as e:
if e.errno != errno.ECHILD:
raise
# This happens if SIGCLD is set to be ignored or waiting
# for child processes has otherwise been disabled for our
# process. This child is dead, we can't get the status.
pid = self.pid
sts = 0
return (pid, sts)
def wait(self, timeout=None, endtime=None):
"""Wait for child process to terminate. Returns returncode
attribute."""
if self.returncode is not None:
return self.returncode
# endtime is preferred to timeout. timeout is only used for
# printing.
if endtime is not None or timeout is not None:
if endtime is None:
endtime = _time() + timeout
elif timeout is None:
timeout = self._remaining_time(endtime)
if endtime is not None:
# Enter a busy loop if we have a timeout. This busy loop was
# cribbed from Lib/threading.py in Thread.wait() at r71065.
delay = 0.0005 # 500 us -> initial delay of 1 ms
while True:
(pid, sts) = self._try_wait(os.WNOHANG)
assert pid == self.pid or pid == 0
if pid == self.pid:
self._handle_exitstatus(sts)
break
remaining = self._remaining_time(endtime)
if remaining <= 0:
raise TimeoutExpired(self.args, timeout)
delay = min(delay * 2, remaining, .05)
time.sleep(delay)
else:
while self.returncode is None:
(pid, sts) = self._try_wait(0)
# Check the pid and loop as waitpid has been known to return
# 0 even without WNOHANG in odd situations. issue14396.
if pid == self.pid:
self._handle_exitstatus(sts)
return self.returncode
def _communicate(self, input, endtime, orig_timeout):
if self.stdin and not self._communication_started:
# Flush stdio buffer. This might block, if the user has
# been writing to .stdin in an uncontrolled fashion.
self.stdin.flush()
if not input:
self.stdin.close()
if _has_poll:
stdout, stderr = self._communicate_with_poll(input, endtime,
orig_timeout)
else:
stdout, stderr = self._communicate_with_select(input, endtime,
orig_timeout)
self.wait(timeout=self._remaining_time(endtime))
# All data exchanged. Translate lists into strings.
if stdout is not None:
stdout = b''.join(stdout)
if stderr is not None:
stderr = b''.join(stderr)
# Translate newlines, if requested.
# This also turns bytes into strings.
if self.universal_newlines:
if stdout is not None:
stdout = self._translate_newlines(stdout,
self.stdout.encoding)
if stderr is not None:
stderr = self._translate_newlines(stderr,
self.stderr.encoding)
return (stdout, stderr)
def _save_input(self, input):
# This method is called from the _communicate_with_*() methods
# so that if we time out while communicating, we can continue
# sending input if we retry.
if self.stdin and self._input is None:
self._input_offset = 0
self._input = input
if self.universal_newlines and input is not None:
self._input = self._input.encode(self.stdin.encoding)
def _communicate_with_poll(self, input, endtime, orig_timeout):
stdout = None # Return
stderr = None # Return
if not self._communication_started:
self._fd2file = {}
poller = select.poll()
def register_and_append(file_obj, eventmask):
poller.register(file_obj.fileno(), eventmask)
self._fd2file[file_obj.fileno()] = file_obj
def close_unregister_and_remove(fd):
poller.unregister(fd)
self._fd2file[fd].close()
self._fd2file.pop(fd)
if self.stdin and input:
register_and_append(self.stdin, select.POLLOUT)
# Only create this mapping if we haven't already.
if not self._communication_started:
self._fd2output = {}
if self.stdout:
self._fd2output[self.stdout.fileno()] = []
if self.stderr:
self._fd2output[self.stderr.fileno()] = []
select_POLLIN_POLLPRI = select.POLLIN | select.POLLPRI
if self.stdout:
register_and_append(self.stdout, select_POLLIN_POLLPRI)
stdout = self._fd2output[self.stdout.fileno()]
if self.stderr:
register_and_append(self.stderr, select_POLLIN_POLLPRI)
stderr = self._fd2output[self.stderr.fileno()]
self._save_input(input)
while self._fd2file:
timeout = self._remaining_time(endtime)
if timeout is not None and timeout < 0:
raise TimeoutExpired(self.args, orig_timeout)
try:
ready = poller.poll(timeout)
except select.error as e:
if e.args[0] == errno.EINTR:
continue
raise
self._check_timeout(endtime, orig_timeout)
# XXX Rewrite these to use non-blocking I/O on the
# file objects; they are no longer using C stdio!
for fd, mode in ready:
if mode & select.POLLOUT:
chunk = self._input[self._input_offset :
self._input_offset + _PIPE_BUF]
try:
self._input_offset += os.write(fd, chunk)
except OSError as e:
if e.errno == errno.EPIPE:
close_unregister_and_remove(fd)
else:
raise
else:
if self._input_offset >= len(self._input):
close_unregister_and_remove(fd)
elif mode & select_POLLIN_POLLPRI:
data = os.read(fd, 4096)
if not data:
close_unregister_and_remove(fd)
self._fd2output[fd].append(data)
else:
# Ignore hang up or errors.
close_unregister_and_remove(fd)
return (stdout, stderr)
def _communicate_with_select(self, input, endtime, orig_timeout):
if not self._communication_started:
self._read_set = []
self._write_set = []
if self.stdin and input:
self._write_set.append(self.stdin)
if self.stdout:
self._read_set.append(self.stdout)
if self.stderr:
self._read_set.append(self.stderr)
self._save_input(input)
stdout = None # Return
stderr = None # Return
if self.stdout:
if not self._communication_started:
self._stdout_buff = []
stdout = self._stdout_buff
if self.stderr:
if not self._communication_started:
self._stderr_buff = []
stderr = self._stderr_buff
while self._read_set or self._write_set:
timeout = self._remaining_time(endtime)
if timeout is not None and timeout < 0:
raise TimeoutExpired(self.args, orig_timeout)
try:
(rlist, wlist, xlist) = \
select.select(self._read_set, self._write_set, [],
timeout)
except select.error as e:
if e.args[0] == errno.EINTR:
continue
raise
# According to the docs, returning three empty lists indicates
# that the timeout expired.
if not (rlist or wlist or xlist):
raise TimeoutExpired(self.args, orig_timeout)
# We also check what time it is ourselves for good measure.
self._check_timeout(endtime, orig_timeout)
# XXX Rewrite these to use non-blocking I/O on the
# file objects; they are no longer using C stdio!
if self.stdin in wlist:
chunk = self._input[self._input_offset :
self._input_offset + _PIPE_BUF]
try:
bytes_written = os.write(self.stdin.fileno(), chunk)
except OSError as e:
if e.errno == errno.EPIPE:
self.stdin.close()
self._write_set.remove(self.stdin)
else:
raise
else:
self._input_offset += bytes_written
if self._input_offset >= len(self._input):
self.stdin.close()
self._write_set.remove(self.stdin)
if self.stdout in rlist:
data = os.read(self.stdout.fileno(), 1024)
if not data:
self.stdout.close()
self._read_set.remove(self.stdout)
stdout.append(data)
if self.stderr in rlist:
data = os.read(self.stderr.fileno(), 1024)
if not data:
self.stderr.close()
self._read_set.remove(self.stderr)
stderr.append(data)
return (stdout, stderr)
def send_signal(self, sig):
"""Send a signal to the process
"""
os.kill(self.pid, sig)
def terminate(self):
"""Terminate the process with SIGTERM
"""
self.send_signal(signal.SIGTERM)
def kill(self):
"""Kill the process with SIGKILL
"""
self.send_signal(signal.SIGKILL)
| agpl-3.0 |
KhronosGroup/COLLADA-CTS | StandardDataSets/collada/library_visual_scenes/visual_scene/node/skew/node_skew_cube3/node_skew_cube3.py | 6 | 3940 |
# Copyright (c) 2012 The Khronos Group Inc.
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and /or associated documentation files (the "Materials "), to deal in the Materials without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Materials, and to permit persons to whom the Materials are furnished to do so, subject to
# the following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Materials.
# THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
# See Core.Logic.FJudgementContext for the information
# of the 'context' parameter.
# This sample judging object does the following:
#
# JudgeBaseline: just verifies that the standard steps did not crash.
# JudgeSuperior: also verifies that the validation steps are not in error.
# JudgeExemplary: same as intermediate badge.
# We import an assistant script that includes the common verifications
# methods. The assistant buffers its checks, so that running them again
# does not incurs an unnecessary performance hint.
from StandardDataSets.scripts import JudgeAssistant
# Please feed your node list here:
tagLst = []
attrName = ''
attrVal = ''
dataToCheck = ''
class SimpleJudgingObject:
def __init__(self, _tagLst, _attrName, _attrVal, _data):
self.tagList = _tagLst
self.attrName = _attrName
self.attrVal = _attrVal
self.dataToCheck = _data
self.status_baseline = False
self.status_superior = False
self.status_exemplary = False
self.__assistant = JudgeAssistant.JudgeAssistant()
def JudgeBaseline(self, context):
# No step should not crash
self.__assistant.CheckCrashes(context)
# Import/export/validate must exist and pass, while Render must only exist.
self.__assistant.CheckSteps(context, ["Import", "Export", "Validate"], ["Render"])
if (self.__assistant.GetResults() == False):
self.status_baseline = False
return False
# Compare the rendered images between import and export
# Then compare images against reference test to check for non-equivalence
if ( self.__assistant.CompareRenderedImages(context) ):
self.__assistant.CompareImagesAgainst(context, "node_skew_cube", None, None, 5, True, False)
self.status_baseline = self.__assistant.DeferJudgement(context)
return self.status_baseline
# To pass intermediate you need to pass basic, this object could also include additional
# tests that were specific to the intermediate badge.
def JudgeSuperior(self, context):
self.status_superior = self.status_baseline
return self.status_superior
# To pass advanced you need to pass intermediate, this object could also include additional
# tests that were specific to the advanced badge
def JudgeExemplary(self, context):
self.status_exemplary = self.status_superior
return self.status_exemplary
# This is where all the work occurs: "judgingObject" is an absolutely necessary token.
# The dynamic loader looks very specifically for a class instance named "judgingObject".
#
judgingObject = SimpleJudgingObject(tagLst, attrName, attrVal, dataToCheck);
| mit |
hellodmp/segmentnet | DataManager.py | 1 | 7357 | import numpy as np
import SimpleITK as sitk
from LabelManager import LabelManager
from os import listdir
from os.path import isfile, isdir, join, splitext
import utilities
from RTExport import RTExport
from scipy import ndimage
from skimage import measure
class DataManager(object):
params=None
srcFolder=None
resultsDir=None
fileList=None
gtList=None
sitkImages=None
sitkGT=None
meanIntensityTrain = None
#label_list = ["Urinary Bladder","FemoralHead"]
#label_list = ["CTV","PTV"]
def __init__(self, srcFolder, resultsDir, parameters):
self.params=parameters
self.srcFolder=srcFolder
self.resultsDir=resultsDir
def createImageFileList(self):
self.fileList = [f for f in listdir(self.srcFolder) if isdir(join(self.srcFolder, f))]
print 'FILE LIST: ' + str(self.fileList)
def loadImages(self):
self.sitkImages = dict()
rescalFilt = sitk.RescaleIntensityImageFilter()
rescalFilt.SetOutputMaximum(1)
rescalFilt.SetOutputMinimum(0)
reader = sitk.ImageSeriesReader()
for dir in self.fileList:
dir = join(self.srcFolder, dir)
series_list = reader.GetGDCMSeriesIDs(dir)
for series_id in series_list:
dicom_names = reader.GetGDCMSeriesFileNames(dir, series_id)
if len(dicom_names) > 1:
break
reader.SetFileNames(dicom_names)
self.sitkImages[dir] = [rescalFilt.Execute(sitk.Cast(reader.Execute(),sitk.sitkFloat32))]
def loadTrainingData(self):
self.createImageFileList()
self.loadImages()
#load labels
key = self.sitkImages.keys()[0]
spacing = self.sitkImages[key][0].GetSpacing()
manager = LabelManager(self.srcFolder, spacing)
manager.createLabelFileList()
self.sitkGT = manager.load_labels(self.params['labelList'])
def loadTestData(self):
self.fileList = [self.srcFolder]
self.loadImages()
'''
# load labels
key = self.sitkImages.keys()[0]
spacing = self.sitkImages[key][0].GetSpacing()
manager = LabelManager(self.srcFolder, spacing)
manager.createLabelFileList()
self.sitkGT = manager.load_labels(self.label_list)
'''
def getNumpyImages(self):
dat = self.getNumpyData(self.sitkImages,sitk.sitkLinear)
for key in dat:
dat[key] = dat[key][0]
return dat
def getNumpyGT(self):
dat = self.getNumpyData(self.sitkGT,sitk.sitkLinear)
for key in dat:
dat_list = dat[key]
num_dat = np.zeros([len(dat_list), self.params['NumVolSize'][0], self.params['NumVolSize'][1],
self.params['NumVolSize'][2]], dtype=np.float32)
for i in range(len(dat_list)):
num_dat[i,:,:,:] = (dat_list[i]>0.5).astype(dtype=np.float32)
dat[key] = num_dat
return dat
def getNumpyData(self, dat, method):
ret=dict()
for key in dat:
dat_list = dat[key]
result_list = []
for i in range(len(dat_list)):
img = dat_list[i]
# we rotate the image according to its transformation using the direction and according to the final spacing we want
factor = np.asarray(img.GetSpacing()) / [self.params['dstRes'][0], self.params['dstRes'][1],
self.params['dstRes'][2]]
factorSize = np.asarray(img.GetSize() * factor, dtype=float)
newSize = np.max([factorSize, self.params['NumVolSize']], axis=0)
newSize = newSize.astype(dtype=int)
resampler = sitk.ResampleImageFilter()
resampler.SetReferenceImage(img)
resampler.SetOutputSpacing([self.params['dstRes'][0], self.params['dstRes'][1], self.params['dstRes'][2]])
resampler.SetSize(newSize)
resampler.SetInterpolator(method)
if self.params['normDir']:
T = sitk.AffineTransform(3)
T.SetMatrix(img.GetDirection())
resampler.SetTransform(T.GetInverse())
imgResampled = resampler.Execute(img)
imgCentroid = np.asarray(newSize, dtype=float) / 2.0
imgStartPx = (imgCentroid - self.params['NumVolSize'] / 2.0).astype(dtype=int)
regionExtractor = sitk.RegionOfInterestImageFilter()
regionExtractor.SetSize(list(self.params['NumVolSize'].astype(dtype=int)))
regionExtractor.SetIndex(list(imgStartPx))
imgResampledCropped = regionExtractor.Execute(imgResampled)
result_list.append(np.transpose(sitk.GetArrayFromImage(imgResampledCropped).astype(dtype=float), [1, 2, 0]))
ret[key] = result_list
return ret
def filter(self, dat):
(w,h,d) = dat.shape
for i in range(0,d):
count = np.sum(dat[:,:,i])
if count < 40:
dat[:, :, i] = np.zeros((w,h),dtype=float)
return dat
def result2Points(self, result, dicomPath):
result = ndimage.median_filter(result, 9)
#result = filter(result)
img = self.sitkImages[dicomPath][0]
factor = np.asarray([self.params['dstRes'][0], self.params['dstRes'][1], self.params['dstRes'][2]]) \
/ [img.GetSpacing()[0], img.GetSpacing()[1], img.GetSpacing()[2]]
newSize = np.asarray(result.shape * factor, dtype=int)
start = (img.GetSize() - newSize) / 2
points_list = []
for i in range(result.shape[2]):
temp_list = []
contours = measure.find_contours(np.transpose(result[:, :, i], [1, 0]), 0.1)
for contour in contours:
if len(contour) < 20:
continue
points = contour * factor[0:2]
points += start[0:2]
points = points * img.GetSpacing()[0:2]
temp_list.append(points)
points_list.append((i + start[2], temp_list))
return points_list
'''
def writeResultsFromNumpyLabel(self, result, dicomPath, structureName, sourcePath, destPath):
result = ndimage.median_filter(result, 9)
img = self.sitkImages[dicomPath][0]
factor = np.asarray([self.params['dstRes'][0], self.params['dstRes'][1],self.params['dstRes'][2]]) \
/ [img.GetSpacing()[0], img.GetSpacing()[1], img.GetSpacing()[2]]
newSize = np.asarray(result.shape * factor, dtype=int)
start = (img.GetSize() - newSize) / 2
point_list = []
for i in range(result.shape[2]):
contours = measure.find_contours(np.transpose(result[:,:,i], [1, 0]), 0.3)
for contour in contours:
if len(contour) < 20:
continue
points = contour*factor[0:2]
points += start[0:2]
points = points*img.GetSpacing()[0:2]
list.append(points)
point_list.append((i+start[2], list))
rtExport = RTExport(dicomPath, sourcePath, destPath)
rtExport.save(structureName, point_list)
print "ok"
'''
| gpl-3.0 |
DrSleep/tensorflow-deeplab-resnet | kaffe/tensorflow/network.py | 2 | 10990 | import numpy as np
import tensorflow as tf
slim = tf.contrib.slim
DEFAULT_PADDING = 'SAME'
def layer(op):
'''Decorator for composable network layers.'''
def layer_decorated(self, *args, **kwargs):
# Automatically set a name if not provided.
name = kwargs.setdefault('name', self.get_unique_name(op.__name__))
# Figure out the layer inputs.
if len(self.terminals) == 0:
raise RuntimeError('No input variables found for layer %s.' % name)
elif len(self.terminals) == 1:
layer_input = self.terminals[0]
else:
layer_input = list(self.terminals)
# Perform the operation and get the output.
layer_output = op(self, layer_input, *args, **kwargs)
# Add to layer LUT.
self.layers[name] = layer_output
# This output is now the input for the next layer.
self.feed(layer_output)
# Return self for chained calls.
return self
return layer_decorated
class Network(object):
def __init__(self, inputs, trainable=True, is_training=False, num_classes=21):
# The input nodes for this network
self.inputs = inputs
# The current list of terminal nodes
self.terminals = []
# Mapping from layer names to layers
self.layers = dict(inputs)
# If true, the resulting variables are set as trainable
self.trainable = trainable
# Switch variable for dropout
self.use_dropout = tf.placeholder_with_default(tf.constant(1.0),
shape=[],
name='use_dropout')
self.setup(is_training, num_classes)
def setup(self, is_training):
'''Construct the network. '''
raise NotImplementedError('Must be implemented by the subclass.')
def load(self, data_path, session, ignore_missing=False):
'''Load network weights.
data_path: The path to the numpy-serialized network weights
session: The current TensorFlow session
ignore_missing: If true, serialized weights for missing layers are ignored.
'''
data_dict = np.load(data_path).item()
for op_name in data_dict:
with tf.variable_scope(op_name, reuse=True):
for param_name, data in data_dict[op_name].iteritems():
try:
var = tf.get_variable(param_name)
session.run(var.assign(data))
except ValueError:
if not ignore_missing:
raise
def feed(self, *args):
'''Set the input(s) for the next operation by replacing the terminal nodes.
The arguments can be either layer names or the actual layers.
'''
assert len(args) != 0
self.terminals = []
for fed_layer in args:
if isinstance(fed_layer, basestring):
try:
fed_layer = self.layers[fed_layer]
except KeyError:
raise KeyError('Unknown layer name fed: %s' % fed_layer)
self.terminals.append(fed_layer)
return self
def get_output(self):
'''Returns the current network output.'''
return self.terminals[-1]
def get_unique_name(self, prefix):
'''Returns an index-suffixed unique name for the given prefix.
This is used for auto-generating layer names based on the type-prefix.
'''
ident = sum(t.startswith(prefix) for t, _ in self.layers.items()) + 1
return '%s_%d' % (prefix, ident)
def make_var(self, name, shape):
'''Creates a new TensorFlow variable.'''
return tf.get_variable(name, shape, trainable=self.trainable)
def validate_padding(self, padding):
'''Verifies that the padding is one of the supported ones.'''
assert padding in ('SAME', 'VALID')
@layer
def conv(self,
input,
k_h,
k_w,
c_o,
s_h,
s_w,
name,
relu=True,
padding=DEFAULT_PADDING,
group=1,
biased=True):
# Verify that the padding is acceptable
self.validate_padding(padding)
# Get the number of channels in the input
c_i = input.get_shape()[-1]
# Verify that the grouping parameter is valid
assert c_i % group == 0
assert c_o % group == 0
# Convolution for a given input and kernel
convolve = lambda i, k: tf.nn.conv2d(i, k, [1, s_h, s_w, 1], padding=padding)
with tf.variable_scope(name) as scope:
kernel = self.make_var('weights', shape=[k_h, k_w, c_i / group, c_o])
if group == 1:
# This is the common-case. Convolve the input without any further complications.
output = convolve(input, kernel)
else:
# Split the input into groups and then convolve each of them independently
input_groups = tf.split(3, group, input)
kernel_groups = tf.split(3, group, kernel)
output_groups = [convolve(i, k) for i, k in zip(input_groups, kernel_groups)]
# Concatenate the groups
output = tf.concat(3, output_groups)
# Add the biases
if biased:
biases = self.make_var('biases', [c_o])
output = tf.nn.bias_add(output, biases)
if relu:
# ReLU non-linearity
output = tf.nn.relu(output, name=scope.name)
return output
@layer
def atrous_conv(self,
input,
k_h,
k_w,
c_o,
dilation,
name,
relu=True,
padding=DEFAULT_PADDING,
group=1,
biased=True):
# Verify that the padding is acceptable
self.validate_padding(padding)
# Get the number of channels in the input
c_i = input.get_shape()[-1]
# Verify that the grouping parameter is valid
assert c_i % group == 0
assert c_o % group == 0
# Convolution for a given input and kernel
convolve = lambda i, k: tf.nn.atrous_conv2d(i, k, dilation, padding=padding)
with tf.variable_scope(name) as scope:
kernel = self.make_var('weights', shape=[k_h, k_w, c_i / group, c_o])
if group == 1:
# This is the common-case. Convolve the input without any further complications.
output = convolve(input, kernel)
else:
# Split the input into groups and then convolve each of them independently
input_groups = tf.split(3, group, input)
kernel_groups = tf.split(3, group, kernel)
output_groups = [convolve(i, k) for i, k in zip(input_groups, kernel_groups)]
# Concatenate the groups
output = tf.concat(3, output_groups)
# Add the biases
if biased:
biases = self.make_var('biases', [c_o])
output = tf.nn.bias_add(output, biases)
if relu:
# ReLU non-linearity
output = tf.nn.relu(output, name=scope.name)
return output
@layer
def relu(self, input, name):
return tf.nn.relu(input, name=name)
@layer
def max_pool(self, input, k_h, k_w, s_h, s_w, name, padding=DEFAULT_PADDING):
self.validate_padding(padding)
return tf.nn.max_pool(input,
ksize=[1, k_h, k_w, 1],
strides=[1, s_h, s_w, 1],
padding=padding,
name=name)
@layer
def avg_pool(self, input, k_h, k_w, s_h, s_w, name, padding=DEFAULT_PADDING):
self.validate_padding(padding)
return tf.nn.avg_pool(input,
ksize=[1, k_h, k_w, 1],
strides=[1, s_h, s_w, 1],
padding=padding,
name=name)
@layer
def lrn(self, input, radius, alpha, beta, name, bias=1.0):
return tf.nn.local_response_normalization(input,
depth_radius=radius,
alpha=alpha,
beta=beta,
bias=bias,
name=name)
@layer
def concat(self, inputs, axis, name):
return tf.concat(concat_dim=axis, values=inputs, name=name)
@layer
def add(self, inputs, name):
return tf.add_n(inputs, name=name)
@layer
def fc(self, input, num_out, name, relu=True):
with tf.variable_scope(name) as scope:
input_shape = input.get_shape()
if input_shape.ndims == 4:
# The input is spatial. Vectorize it first.
dim = 1
for d in input_shape[1:].as_list():
dim *= d
feed_in = tf.reshape(input, [-1, dim])
else:
feed_in, dim = (input, input_shape[-1].value)
weights = self.make_var('weights', shape=[dim, num_out])
biases = self.make_var('biases', [num_out])
op = tf.nn.relu_layer if relu else tf.nn.xw_plus_b
fc = op(feed_in, weights, biases, name=scope.name)
return fc
@layer
def softmax(self, input, name):
input_shape = map(lambda v: v.value, input.get_shape())
if len(input_shape) > 2:
# For certain models (like NiN), the singleton spatial dimensions
# need to be explicitly squeezed, since they're not broadcast-able
# in TensorFlow's NHWC ordering (unlike Caffe's NCHW).
if input_shape[1] == 1 and input_shape[2] == 1:
input = tf.squeeze(input, squeeze_dims=[1, 2])
else:
raise ValueError('Rank 2 tensor input expected for softmax!')
return tf.nn.softmax(input, name)
@layer
def batch_normalization(self, input, name, is_training, activation_fn=None, scale=True):
with tf.variable_scope(name) as scope:
output = slim.batch_norm(
input,
activation_fn=activation_fn,
is_training=is_training,
updates_collections=None,
scale=scale,
scope=scope)
return output
@layer
def dropout(self, input, keep_prob, name):
keep = 1 - self.use_dropout + (self.use_dropout * keep_prob)
return tf.nn.dropout(input, keep, name=name)
| mit |
holygits/incubator-airflow | airflow/hooks/postgres_hook.py | 17 | 2329 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import psycopg2
import psycopg2.extensions
from airflow.hooks.dbapi_hook import DbApiHook
class PostgresHook(DbApiHook):
"""
Interact with Postgres.
You can specify ssl parameters in the extra field of your connection
as ``{"sslmode": "require", "sslcert": "/path/to/cert.pem", etc}``.
"""
conn_name_attr = 'postgres_conn_id'
default_conn_name = 'postgres_default'
supports_autocommit = True
def __init__(self, *args, **kwargs):
super(PostgresHook, self).__init__(*args, **kwargs)
self.schema = kwargs.pop("schema", None)
def get_conn(self):
conn = self.get_connection(self.postgres_conn_id)
conn_args = dict(
host=conn.host,
user=conn.login,
password=conn.password,
dbname=self.schema or conn.schema,
port=conn.port)
# check for ssl parameters in conn.extra
for arg_name, arg_val in conn.extra_dejson.items():
if arg_name in ['sslmode', 'sslcert', 'sslkey', 'sslrootcert', 'sslcrl', 'application_name']:
conn_args[arg_name] = arg_val
psycopg2_conn = psycopg2.connect(**conn_args)
return psycopg2_conn
@staticmethod
def _serialize_cell(cell, conn):
"""
Postgresql will adapt all arguments to the execute() method internally,
hence we return cell without any conversion.
See http://initd.org/psycopg/docs/advanced.html#adapting-new-types for
more information.
:param cell: The cell to insert into the table
:type cell: object
:param conn: The database connection
:type conn: connection object
:return: The cell
:rtype: object
"""
return cell
| apache-2.0 |
hectord/lettuce | tests/integration/lib/Django-1.3/django/contrib/sitemaps/tests/basic.py | 155 | 7620 | import os
from datetime import date
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.sitemaps import Sitemap
from django.contrib.sites.models import Site
from django.core.exceptions import ImproperlyConfigured
from django.test import TestCase
from django.utils.unittest import skipUnless
from django.utils.formats import localize
from django.utils.translation import activate, deactivate
class SitemapTests(TestCase):
urls = 'django.contrib.sitemaps.tests.urls'
def setUp(self):
if Site._meta.installed:
self.base_url = 'http://example.com'
else:
self.base_url = 'http://testserver'
self.old_USE_L10N = settings.USE_L10N
self.old_Site_meta_installed = Site._meta.installed
self.old_TEMPLATE_DIRS = settings.TEMPLATE_DIRS
self.old_Site_meta_installed = Site._meta.installed
settings.TEMPLATE_DIRS = (
os.path.join(os.path.dirname(__file__), 'templates'),
)
# Create a user that will double as sitemap content
User.objects.create_user('testuser', 'test@example.com', 's3krit')
def tearDown(self):
settings.USE_L10N = self.old_USE_L10N
Site._meta.installed = self.old_Site_meta_installed
settings.TEMPLATE_DIRS = self.old_TEMPLATE_DIRS
Site._meta.installed = self.old_Site_meta_installed
def test_simple_sitemap_index(self):
"A simple sitemap index can be rendered"
# Retrieve the sitemap.
response = self.client.get('/simple/index.xml')
# Check for all the important bits:
self.assertEqual(response.content, """<?xml version="1.0" encoding="UTF-8"?>
<sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<sitemap><loc>%s/simple/sitemap-simple.xml</loc></sitemap>
</sitemapindex>
""" % self.base_url)
def test_simple_sitemap_custom_index(self):
"A simple sitemap index can be rendered with a custom template"
# Retrieve the sitemap.
response = self.client.get('/simple/custom-index.xml')
# Check for all the important bits:
self.assertEqual(response.content, """<?xml version="1.0" encoding="UTF-8"?>
<!-- This is a customised template -->
<sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<sitemap><loc>%s/simple/sitemap-simple.xml</loc></sitemap>
</sitemapindex>
""" % self.base_url)
def test_simple_sitemap(self):
"A simple sitemap can be rendered"
# Retrieve the sitemap.
response = self.client.get('/simple/sitemap.xml')
# Check for all the important bits:
self.assertEqual(response.content, """<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<url><loc>%s/location/</loc><lastmod>%s</lastmod><changefreq>never</changefreq><priority>0.5</priority></url>
</urlset>
""" % (self.base_url, date.today().strftime('%Y-%m-%d')))
def test_simple_custom_sitemap(self):
"A simple sitemap can be rendered with a custom template"
# Retrieve the sitemap.
response = self.client.get('/simple/custom-sitemap.xml')
# Check for all the important bits:
self.assertEqual(response.content, """<?xml version="1.0" encoding="UTF-8"?>
<!-- This is a customised template -->
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<url><loc>%s/location/</loc><lastmod>%s</lastmod><changefreq>never</changefreq><priority>0.5</priority></url>
</urlset>
""" % (self.base_url, date.today().strftime('%Y-%m-%d')))
@skipUnless(settings.USE_I18N, "Internationalization is not enabled")
def test_localized_priority(self):
"The priority value should not be localized (Refs #14164)"
# Localization should be active
settings.USE_L10N = True
activate('fr')
self.assertEqual(u'0,3', localize(0.3))
# Retrieve the sitemap. Check that priorities
# haven't been rendered in localized format
response = self.client.get('/simple/sitemap.xml')
self.assertContains(response, '<priority>0.5</priority>')
self.assertContains(response, '<lastmod>%s</lastmod>' % date.today().strftime('%Y-%m-%d'))
deactivate()
def test_generic_sitemap(self):
"A minimal generic sitemap can be rendered"
# Retrieve the sitemap.
response = self.client.get('/generic/sitemap.xml')
expected = ''
for username in User.objects.values_list("username", flat=True):
expected += "<url><loc>%s/users/%s/</loc></url>" % (self.base_url, username)
# Check for all the important bits:
self.assertEqual(response.content, """<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
%s
</urlset>
""" % expected)
@skipUnless("django.contrib.flatpages" in settings.INSTALLED_APPS, "django.contrib.flatpages app not installed.")
def test_flatpage_sitemap(self):
"Basic FlatPage sitemap test"
# Import FlatPage inside the test so that when django.contrib.flatpages
# is not installed we don't get problems trying to delete Site
# objects (FlatPage has an M2M to Site, Site.delete() tries to
# delete related objects, but the M2M table doesn't exist.
from django.contrib.flatpages.models import FlatPage
public = FlatPage.objects.create(
url=u'/public/',
title=u'Public Page',
enable_comments=True,
registration_required=False,
)
public.sites.add(settings.SITE_ID)
private = FlatPage.objects.create(
url=u'/private/',
title=u'Private Page',
enable_comments=True,
registration_required=True
)
private.sites.add(settings.SITE_ID)
response = self.client.get('/flatpages/sitemap.xml')
# Public flatpage should be in the sitemap
self.assertContains(response, '<loc>%s%s</loc>' % (self.base_url, public.url))
# Private flatpage should not be in the sitemap
self.assertNotContains(response, '<loc>%s%s</loc>' % (self.base_url, private.url))
def test_requestsite_sitemap(self):
# Make sure hitting the flatpages sitemap without the sites framework
# installed doesn't raise an exception
Site._meta.installed = False
# Retrieve the sitemap.
response = self.client.get('/simple/sitemap.xml')
# Check for all the important bits:
self.assertEqual(response.content, """<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<url><loc>http://testserver/location/</loc><lastmod>%s</lastmod><changefreq>never</changefreq><priority>0.5</priority></url>
</urlset>
""" % date.today().strftime('%Y-%m-%d'))
@skipUnless("django.contrib.sites" in settings.INSTALLED_APPS, "django.contrib.sites app not installed.")
def test_sitemap_get_urls_no_site_1(self):
"""
Check we get ImproperlyConfigured if we don't pass a site object to
Sitemap.get_urls and no Site objects exist
"""
Site.objects.all().delete()
self.assertRaises(ImproperlyConfigured, Sitemap().get_urls)
def test_sitemap_get_urls_no_site_2(self):
"""
Check we get ImproperlyConfigured when we don't pass a site object to
Sitemap.get_urls if Site objects exists, but the sites framework is not
actually installed.
"""
Site._meta.installed = False
self.assertRaises(ImproperlyConfigured, Sitemap().get_urls)
| gpl-3.0 |
cpennington/edx-platform | common/djangoapps/track/segment.py | 4 | 3059 | """
Wrapper methods for emitting events to Segment directly (rather than through tracking log events).
These take advantage of properties that are extracted from incoming requests by track middleware,
stored in tracking context objects, and extracted here to be passed to Segment as part of context
required by server-side events.
To use, call "from track import segment", then call segment.track() or segment.identify().
"""
import analytics
from django.conf import settings
from eventtracking import tracker
from six.moves.urllib.parse import urlunsplit
def track(user_id, event_name, properties=None, context=None):
"""
Wrapper for emitting Segment track event, including augmenting context information from middleware.
"""
if event_name is not None and hasattr(settings, 'LMS_SEGMENT_KEY') and settings.LMS_SEGMENT_KEY:
properties = properties or {}
segment_context = dict(context) if context else {}
tracking_context = tracker.get_tracker().resolve_context()
if 'ip' not in segment_context and 'ip' in tracking_context:
segment_context['ip'] = tracking_context.get('ip')
if ('Google Analytics' not in segment_context or 'clientId' not in segment_context['Google Analytics']) and 'client_id' in tracking_context:
segment_context['Google Analytics'] = {
'clientId': tracking_context.get('client_id')
}
if 'userAgent' not in segment_context and 'agent' in tracking_context:
segment_context['userAgent'] = tracking_context.get('agent')
path = tracking_context.get('path')
referer = tracking_context.get('referer')
page = tracking_context.get('page')
if path and not page:
# Try to put together a url from host and path, hardcoding the schema.
# (Segment doesn't care about the schema for GA, but will extract the host and path from the url.)
host = tracking_context.get('host')
if host:
parts = ("https", host, path, "", "")
page = urlunsplit(parts)
if path is not None or referer is not None or page is not None:
if 'page' not in segment_context:
segment_context['page'] = {}
if path is not None and 'path' not in segment_context['page']:
segment_context['page']['path'] = path
if referer is not None and 'referrer' not in segment_context['page']:
segment_context['page']['referrer'] = referer
if page is not None and 'url' not in segment_context['page']:
segment_context['page']['url'] = page
analytics.track(user_id, event_name, properties, segment_context)
def identify(user_id, properties, context=None):
"""
Wrapper for emitting Segment identify event.
"""
if hasattr(settings, 'LMS_SEGMENT_KEY') and settings.LMS_SEGMENT_KEY:
segment_context = dict(context) if context else {}
analytics.identify(user_id, properties, segment_context)
| agpl-3.0 |
int19h/PTVS | Python/Product/Miniconda/Miniconda3-x64/Lib/encodings/mac_farsi.py | 272 | 15170 | """ Python Character Mapping Codec mac_farsi generated from 'MAPPINGS/VENDORS/APPLE/FARSI.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='mac-farsi',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> CONTROL CHARACTER
'\x01' # 0x01 -> CONTROL CHARACTER
'\x02' # 0x02 -> CONTROL CHARACTER
'\x03' # 0x03 -> CONTROL CHARACTER
'\x04' # 0x04 -> CONTROL CHARACTER
'\x05' # 0x05 -> CONTROL CHARACTER
'\x06' # 0x06 -> CONTROL CHARACTER
'\x07' # 0x07 -> CONTROL CHARACTER
'\x08' # 0x08 -> CONTROL CHARACTER
'\t' # 0x09 -> CONTROL CHARACTER
'\n' # 0x0A -> CONTROL CHARACTER
'\x0b' # 0x0B -> CONTROL CHARACTER
'\x0c' # 0x0C -> CONTROL CHARACTER
'\r' # 0x0D -> CONTROL CHARACTER
'\x0e' # 0x0E -> CONTROL CHARACTER
'\x0f' # 0x0F -> CONTROL CHARACTER
'\x10' # 0x10 -> CONTROL CHARACTER
'\x11' # 0x11 -> CONTROL CHARACTER
'\x12' # 0x12 -> CONTROL CHARACTER
'\x13' # 0x13 -> CONTROL CHARACTER
'\x14' # 0x14 -> CONTROL CHARACTER
'\x15' # 0x15 -> CONTROL CHARACTER
'\x16' # 0x16 -> CONTROL CHARACTER
'\x17' # 0x17 -> CONTROL CHARACTER
'\x18' # 0x18 -> CONTROL CHARACTER
'\x19' # 0x19 -> CONTROL CHARACTER
'\x1a' # 0x1A -> CONTROL CHARACTER
'\x1b' # 0x1B -> CONTROL CHARACTER
'\x1c' # 0x1C -> CONTROL CHARACTER
'\x1d' # 0x1D -> CONTROL CHARACTER
'\x1e' # 0x1E -> CONTROL CHARACTER
'\x1f' # 0x1F -> CONTROL CHARACTER
' ' # 0x20 -> SPACE, left-right
'!' # 0x21 -> EXCLAMATION MARK, left-right
'"' # 0x22 -> QUOTATION MARK, left-right
'#' # 0x23 -> NUMBER SIGN, left-right
'$' # 0x24 -> DOLLAR SIGN, left-right
'%' # 0x25 -> PERCENT SIGN, left-right
'&' # 0x26 -> AMPERSAND, left-right
"'" # 0x27 -> APOSTROPHE, left-right
'(' # 0x28 -> LEFT PARENTHESIS, left-right
')' # 0x29 -> RIGHT PARENTHESIS, left-right
'*' # 0x2A -> ASTERISK, left-right
'+' # 0x2B -> PLUS SIGN, left-right
',' # 0x2C -> COMMA, left-right; in Arabic-script context, displayed as 0x066C ARABIC THOUSANDS SEPARATOR
'-' # 0x2D -> HYPHEN-MINUS, left-right
'.' # 0x2E -> FULL STOP, left-right; in Arabic-script context, displayed as 0x066B ARABIC DECIMAL SEPARATOR
'/' # 0x2F -> SOLIDUS, left-right
'0' # 0x30 -> DIGIT ZERO; in Arabic-script context, displayed as 0x06F0 EXTENDED ARABIC-INDIC DIGIT ZERO
'1' # 0x31 -> DIGIT ONE; in Arabic-script context, displayed as 0x06F1 EXTENDED ARABIC-INDIC DIGIT ONE
'2' # 0x32 -> DIGIT TWO; in Arabic-script context, displayed as 0x06F2 EXTENDED ARABIC-INDIC DIGIT TWO
'3' # 0x33 -> DIGIT THREE; in Arabic-script context, displayed as 0x06F3 EXTENDED ARABIC-INDIC DIGIT THREE
'4' # 0x34 -> DIGIT FOUR; in Arabic-script context, displayed as 0x06F4 EXTENDED ARABIC-INDIC DIGIT FOUR
'5' # 0x35 -> DIGIT FIVE; in Arabic-script context, displayed as 0x06F5 EXTENDED ARABIC-INDIC DIGIT FIVE
'6' # 0x36 -> DIGIT SIX; in Arabic-script context, displayed as 0x06F6 EXTENDED ARABIC-INDIC DIGIT SIX
'7' # 0x37 -> DIGIT SEVEN; in Arabic-script context, displayed as 0x06F7 EXTENDED ARABIC-INDIC DIGIT SEVEN
'8' # 0x38 -> DIGIT EIGHT; in Arabic-script context, displayed as 0x06F8 EXTENDED ARABIC-INDIC DIGIT EIGHT
'9' # 0x39 -> DIGIT NINE; in Arabic-script context, displayed as 0x06F9 EXTENDED ARABIC-INDIC DIGIT NINE
':' # 0x3A -> COLON, left-right
';' # 0x3B -> SEMICOLON, left-right
'<' # 0x3C -> LESS-THAN SIGN, left-right
'=' # 0x3D -> EQUALS SIGN, left-right
'>' # 0x3E -> GREATER-THAN SIGN, left-right
'?' # 0x3F -> QUESTION MARK, left-right
'@' # 0x40 -> COMMERCIAL AT
'A' # 0x41 -> LATIN CAPITAL LETTER A
'B' # 0x42 -> LATIN CAPITAL LETTER B
'C' # 0x43 -> LATIN CAPITAL LETTER C
'D' # 0x44 -> LATIN CAPITAL LETTER D
'E' # 0x45 -> LATIN CAPITAL LETTER E
'F' # 0x46 -> LATIN CAPITAL LETTER F
'G' # 0x47 -> LATIN CAPITAL LETTER G
'H' # 0x48 -> LATIN CAPITAL LETTER H
'I' # 0x49 -> LATIN CAPITAL LETTER I
'J' # 0x4A -> LATIN CAPITAL LETTER J
'K' # 0x4B -> LATIN CAPITAL LETTER K
'L' # 0x4C -> LATIN CAPITAL LETTER L
'M' # 0x4D -> LATIN CAPITAL LETTER M
'N' # 0x4E -> LATIN CAPITAL LETTER N
'O' # 0x4F -> LATIN CAPITAL LETTER O
'P' # 0x50 -> LATIN CAPITAL LETTER P
'Q' # 0x51 -> LATIN CAPITAL LETTER Q
'R' # 0x52 -> LATIN CAPITAL LETTER R
'S' # 0x53 -> LATIN CAPITAL LETTER S
'T' # 0x54 -> LATIN CAPITAL LETTER T
'U' # 0x55 -> LATIN CAPITAL LETTER U
'V' # 0x56 -> LATIN CAPITAL LETTER V
'W' # 0x57 -> LATIN CAPITAL LETTER W
'X' # 0x58 -> LATIN CAPITAL LETTER X
'Y' # 0x59 -> LATIN CAPITAL LETTER Y
'Z' # 0x5A -> LATIN CAPITAL LETTER Z
'[' # 0x5B -> LEFT SQUARE BRACKET, left-right
'\\' # 0x5C -> REVERSE SOLIDUS, left-right
']' # 0x5D -> RIGHT SQUARE BRACKET, left-right
'^' # 0x5E -> CIRCUMFLEX ACCENT, left-right
'_' # 0x5F -> LOW LINE, left-right
'`' # 0x60 -> GRAVE ACCENT
'a' # 0x61 -> LATIN SMALL LETTER A
'b' # 0x62 -> LATIN SMALL LETTER B
'c' # 0x63 -> LATIN SMALL LETTER C
'd' # 0x64 -> LATIN SMALL LETTER D
'e' # 0x65 -> LATIN SMALL LETTER E
'f' # 0x66 -> LATIN SMALL LETTER F
'g' # 0x67 -> LATIN SMALL LETTER G
'h' # 0x68 -> LATIN SMALL LETTER H
'i' # 0x69 -> LATIN SMALL LETTER I
'j' # 0x6A -> LATIN SMALL LETTER J
'k' # 0x6B -> LATIN SMALL LETTER K
'l' # 0x6C -> LATIN SMALL LETTER L
'm' # 0x6D -> LATIN SMALL LETTER M
'n' # 0x6E -> LATIN SMALL LETTER N
'o' # 0x6F -> LATIN SMALL LETTER O
'p' # 0x70 -> LATIN SMALL LETTER P
'q' # 0x71 -> LATIN SMALL LETTER Q
'r' # 0x72 -> LATIN SMALL LETTER R
's' # 0x73 -> LATIN SMALL LETTER S
't' # 0x74 -> LATIN SMALL LETTER T
'u' # 0x75 -> LATIN SMALL LETTER U
'v' # 0x76 -> LATIN SMALL LETTER V
'w' # 0x77 -> LATIN SMALL LETTER W
'x' # 0x78 -> LATIN SMALL LETTER X
'y' # 0x79 -> LATIN SMALL LETTER Y
'z' # 0x7A -> LATIN SMALL LETTER Z
'{' # 0x7B -> LEFT CURLY BRACKET, left-right
'|' # 0x7C -> VERTICAL LINE, left-right
'}' # 0x7D -> RIGHT CURLY BRACKET, left-right
'~' # 0x7E -> TILDE
'\x7f' # 0x7F -> CONTROL CHARACTER
'\xc4' # 0x80 -> LATIN CAPITAL LETTER A WITH DIAERESIS
'\xa0' # 0x81 -> NO-BREAK SPACE, right-left
'\xc7' # 0x82 -> LATIN CAPITAL LETTER C WITH CEDILLA
'\xc9' # 0x83 -> LATIN CAPITAL LETTER E WITH ACUTE
'\xd1' # 0x84 -> LATIN CAPITAL LETTER N WITH TILDE
'\xd6' # 0x85 -> LATIN CAPITAL LETTER O WITH DIAERESIS
'\xdc' # 0x86 -> LATIN CAPITAL LETTER U WITH DIAERESIS
'\xe1' # 0x87 -> LATIN SMALL LETTER A WITH ACUTE
'\xe0' # 0x88 -> LATIN SMALL LETTER A WITH GRAVE
'\xe2' # 0x89 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
'\xe4' # 0x8A -> LATIN SMALL LETTER A WITH DIAERESIS
'\u06ba' # 0x8B -> ARABIC LETTER NOON GHUNNA
'\xab' # 0x8C -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK, right-left
'\xe7' # 0x8D -> LATIN SMALL LETTER C WITH CEDILLA
'\xe9' # 0x8E -> LATIN SMALL LETTER E WITH ACUTE
'\xe8' # 0x8F -> LATIN SMALL LETTER E WITH GRAVE
'\xea' # 0x90 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
'\xeb' # 0x91 -> LATIN SMALL LETTER E WITH DIAERESIS
'\xed' # 0x92 -> LATIN SMALL LETTER I WITH ACUTE
'\u2026' # 0x93 -> HORIZONTAL ELLIPSIS, right-left
'\xee' # 0x94 -> LATIN SMALL LETTER I WITH CIRCUMFLEX
'\xef' # 0x95 -> LATIN SMALL LETTER I WITH DIAERESIS
'\xf1' # 0x96 -> LATIN SMALL LETTER N WITH TILDE
'\xf3' # 0x97 -> LATIN SMALL LETTER O WITH ACUTE
'\xbb' # 0x98 -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK, right-left
'\xf4' # 0x99 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
'\xf6' # 0x9A -> LATIN SMALL LETTER O WITH DIAERESIS
'\xf7' # 0x9B -> DIVISION SIGN, right-left
'\xfa' # 0x9C -> LATIN SMALL LETTER U WITH ACUTE
'\xf9' # 0x9D -> LATIN SMALL LETTER U WITH GRAVE
'\xfb' # 0x9E -> LATIN SMALL LETTER U WITH CIRCUMFLEX
'\xfc' # 0x9F -> LATIN SMALL LETTER U WITH DIAERESIS
' ' # 0xA0 -> SPACE, right-left
'!' # 0xA1 -> EXCLAMATION MARK, right-left
'"' # 0xA2 -> QUOTATION MARK, right-left
'#' # 0xA3 -> NUMBER SIGN, right-left
'$' # 0xA4 -> DOLLAR SIGN, right-left
'\u066a' # 0xA5 -> ARABIC PERCENT SIGN
'&' # 0xA6 -> AMPERSAND, right-left
"'" # 0xA7 -> APOSTROPHE, right-left
'(' # 0xA8 -> LEFT PARENTHESIS, right-left
')' # 0xA9 -> RIGHT PARENTHESIS, right-left
'*' # 0xAA -> ASTERISK, right-left
'+' # 0xAB -> PLUS SIGN, right-left
'\u060c' # 0xAC -> ARABIC COMMA
'-' # 0xAD -> HYPHEN-MINUS, right-left
'.' # 0xAE -> FULL STOP, right-left
'/' # 0xAF -> SOLIDUS, right-left
'\u06f0' # 0xB0 -> EXTENDED ARABIC-INDIC DIGIT ZERO, right-left (need override)
'\u06f1' # 0xB1 -> EXTENDED ARABIC-INDIC DIGIT ONE, right-left (need override)
'\u06f2' # 0xB2 -> EXTENDED ARABIC-INDIC DIGIT TWO, right-left (need override)
'\u06f3' # 0xB3 -> EXTENDED ARABIC-INDIC DIGIT THREE, right-left (need override)
'\u06f4' # 0xB4 -> EXTENDED ARABIC-INDIC DIGIT FOUR, right-left (need override)
'\u06f5' # 0xB5 -> EXTENDED ARABIC-INDIC DIGIT FIVE, right-left (need override)
'\u06f6' # 0xB6 -> EXTENDED ARABIC-INDIC DIGIT SIX, right-left (need override)
'\u06f7' # 0xB7 -> EXTENDED ARABIC-INDIC DIGIT SEVEN, right-left (need override)
'\u06f8' # 0xB8 -> EXTENDED ARABIC-INDIC DIGIT EIGHT, right-left (need override)
'\u06f9' # 0xB9 -> EXTENDED ARABIC-INDIC DIGIT NINE, right-left (need override)
':' # 0xBA -> COLON, right-left
'\u061b' # 0xBB -> ARABIC SEMICOLON
'<' # 0xBC -> LESS-THAN SIGN, right-left
'=' # 0xBD -> EQUALS SIGN, right-left
'>' # 0xBE -> GREATER-THAN SIGN, right-left
'\u061f' # 0xBF -> ARABIC QUESTION MARK
'\u274a' # 0xC0 -> EIGHT TEARDROP-SPOKED PROPELLER ASTERISK, right-left
'\u0621' # 0xC1 -> ARABIC LETTER HAMZA
'\u0622' # 0xC2 -> ARABIC LETTER ALEF WITH MADDA ABOVE
'\u0623' # 0xC3 -> ARABIC LETTER ALEF WITH HAMZA ABOVE
'\u0624' # 0xC4 -> ARABIC LETTER WAW WITH HAMZA ABOVE
'\u0625' # 0xC5 -> ARABIC LETTER ALEF WITH HAMZA BELOW
'\u0626' # 0xC6 -> ARABIC LETTER YEH WITH HAMZA ABOVE
'\u0627' # 0xC7 -> ARABIC LETTER ALEF
'\u0628' # 0xC8 -> ARABIC LETTER BEH
'\u0629' # 0xC9 -> ARABIC LETTER TEH MARBUTA
'\u062a' # 0xCA -> ARABIC LETTER TEH
'\u062b' # 0xCB -> ARABIC LETTER THEH
'\u062c' # 0xCC -> ARABIC LETTER JEEM
'\u062d' # 0xCD -> ARABIC LETTER HAH
'\u062e' # 0xCE -> ARABIC LETTER KHAH
'\u062f' # 0xCF -> ARABIC LETTER DAL
'\u0630' # 0xD0 -> ARABIC LETTER THAL
'\u0631' # 0xD1 -> ARABIC LETTER REH
'\u0632' # 0xD2 -> ARABIC LETTER ZAIN
'\u0633' # 0xD3 -> ARABIC LETTER SEEN
'\u0634' # 0xD4 -> ARABIC LETTER SHEEN
'\u0635' # 0xD5 -> ARABIC LETTER SAD
'\u0636' # 0xD6 -> ARABIC LETTER DAD
'\u0637' # 0xD7 -> ARABIC LETTER TAH
'\u0638' # 0xD8 -> ARABIC LETTER ZAH
'\u0639' # 0xD9 -> ARABIC LETTER AIN
'\u063a' # 0xDA -> ARABIC LETTER GHAIN
'[' # 0xDB -> LEFT SQUARE BRACKET, right-left
'\\' # 0xDC -> REVERSE SOLIDUS, right-left
']' # 0xDD -> RIGHT SQUARE BRACKET, right-left
'^' # 0xDE -> CIRCUMFLEX ACCENT, right-left
'_' # 0xDF -> LOW LINE, right-left
'\u0640' # 0xE0 -> ARABIC TATWEEL
'\u0641' # 0xE1 -> ARABIC LETTER FEH
'\u0642' # 0xE2 -> ARABIC LETTER QAF
'\u0643' # 0xE3 -> ARABIC LETTER KAF
'\u0644' # 0xE4 -> ARABIC LETTER LAM
'\u0645' # 0xE5 -> ARABIC LETTER MEEM
'\u0646' # 0xE6 -> ARABIC LETTER NOON
'\u0647' # 0xE7 -> ARABIC LETTER HEH
'\u0648' # 0xE8 -> ARABIC LETTER WAW
'\u0649' # 0xE9 -> ARABIC LETTER ALEF MAKSURA
'\u064a' # 0xEA -> ARABIC LETTER YEH
'\u064b' # 0xEB -> ARABIC FATHATAN
'\u064c' # 0xEC -> ARABIC DAMMATAN
'\u064d' # 0xED -> ARABIC KASRATAN
'\u064e' # 0xEE -> ARABIC FATHA
'\u064f' # 0xEF -> ARABIC DAMMA
'\u0650' # 0xF0 -> ARABIC KASRA
'\u0651' # 0xF1 -> ARABIC SHADDA
'\u0652' # 0xF2 -> ARABIC SUKUN
'\u067e' # 0xF3 -> ARABIC LETTER PEH
'\u0679' # 0xF4 -> ARABIC LETTER TTEH
'\u0686' # 0xF5 -> ARABIC LETTER TCHEH
'\u06d5' # 0xF6 -> ARABIC LETTER AE
'\u06a4' # 0xF7 -> ARABIC LETTER VEH
'\u06af' # 0xF8 -> ARABIC LETTER GAF
'\u0688' # 0xF9 -> ARABIC LETTER DDAL
'\u0691' # 0xFA -> ARABIC LETTER RREH
'{' # 0xFB -> LEFT CURLY BRACKET, right-left
'|' # 0xFC -> VERTICAL LINE, right-left
'}' # 0xFD -> RIGHT CURLY BRACKET, right-left
'\u0698' # 0xFE -> ARABIC LETTER JEH
'\u06d2' # 0xFF -> ARABIC LETTER YEH BARREE
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| apache-2.0 |
srickardti/openthread | tests/scripts/thread-cert/Cert_5_2_05_AddressQuery.py | 1 | 8021 | #!/usr/bin/env python3
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import unittest
import command
import config
import thread_cert
LEADER = 1
ROUTER1 = 2
BR = 3
ED1 = 17
DUT_REED = 18
ROUTER_SELECTION_JITTER = 1
class Cert_5_2_5_AddressQuery(thread_cert.TestCase):
TOPOLOGY = {
LEADER: {
'mode': 'rdn',
'panid': 0xface,
'allowlist': [ROUTER1, BR, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, ED1]
},
ROUTER1: {
'mode': 'rdn',
'panid': 0xface,
'router_selection_jitter': 1,
'allowlist': [LEADER, DUT_REED]
},
BR: {
'mode': 'rdn',
'panid': 0xface,
'router_selection_jitter': 1,
'allowlist': [LEADER]
},
4: {
'mode': 'rdn',
'panid': 0xface,
'router_selection_jitter': 1,
'allowlist': [LEADER]
},
5: {
'mode': 'rdn',
'panid': 0xface,
'router_selection_jitter': 1,
'allowlist': [LEADER]
},
6: {
'mode': 'rdn',
'panid': 0xface,
'router_selection_jitter': 1,
'allowlist': [LEADER]
},
7: {
'mode': 'rdn',
'panid': 0xface,
'router_selection_jitter': 1,
'allowlist': [LEADER]
},
8: {
'mode': 'rdn',
'panid': 0xface,
'router_selection_jitter': 1,
'allowlist': [LEADER]
},
9: {
'mode': 'rdn',
'panid': 0xface,
'router_selection_jitter': 1,
'allowlist': [LEADER]
},
10: {
'mode': 'rdn',
'panid': 0xface,
'router_selection_jitter': 1,
'allowlist': [LEADER]
},
11: {
'mode': 'rdn',
'panid': 0xface,
'router_selection_jitter': 1,
'allowlist': [LEADER]
},
12: {
'mode': 'rdn',
'panid': 0xface,
'router_selection_jitter': 1,
'allowlist': [LEADER]
},
13: {
'mode': 'rdn',
'panid': 0xface,
'router_selection_jitter': 1,
'allowlist': [LEADER]
},
14: {
'mode': 'rdn',
'panid': 0xface,
'router_selection_jitter': 1,
'allowlist': [LEADER]
},
15: {
'mode': 'rdn',
'panid': 0xface,
'router_selection_jitter': 1,
'allowlist': [LEADER]
},
16: {
'mode': 'rdn',
'panid': 0xface,
'router_selection_jitter': 1,
'allowlist': [LEADER]
},
ED1: {
'is_mtd': True,
'mode': 'rn',
'panid': 0xface,
'allowlist': [LEADER]
},
DUT_REED: {
'mode': 'rdn',
'panid': 0xface,
'router_selection_jitter': 1,
'allowlist': [ROUTER1]
},
}
def test(self):
# 1. LEADER: DHCPv6 Server for prefix 2001::/64.
self.nodes[LEADER].start()
self.simulator.go(5)
self.assertEqual(self.nodes[LEADER].get_state(), 'leader')
self.nodes[LEADER].add_prefix('2001::/64', 'pdros')
self.nodes[LEADER].register_netdata()
self.simulator.set_lowpan_context(1, '2001::/64')
# 2. BR: SLAAC Server for prefix 2002::/64.
self.nodes[BR].start()
self.simulator.go(5)
self.assertEqual(self.nodes[BR].get_state(), 'router')
self.nodes[BR].add_prefix('2002::/64', 'paros')
self.nodes[BR].register_netdata()
self.simulator.set_lowpan_context(2, '2002::/64')
# 3. Bring up remaining devices except DUT_REED.
for i in range(2, 17):
if i == BR:
continue
self.nodes[i].start()
self.simulator.go(5)
self.assertEqual(self.nodes[i].get_state(), 'router')
self.nodes[ED1].start()
self.simulator.go(5)
self.assertEqual(self.nodes[ED1].get_state(), 'child')
# 4. Bring up DUT_REED.
self.nodes[DUT_REED].start()
self.simulator.go(5)
self.simulator.go(ROUTER_SELECTION_JITTER)
reed_messages = self.simulator.get_messages_sent_by(DUT_REED)
# Verify DUT_REED doesn't try to become router.
msg = reed_messages.does_not_contain_coap_message()
assert msg is True, "Error: The REED sent an Address Solicit Request"
# 5. Enable a link between the DUT and BR to create a one-way link.
self.nodes[DUT_REED].add_allowlist(self.nodes[BR].get_addr64())
self.nodes[BR].add_allowlist(self.nodes[DUT_REED].get_addr64())
# 6. Verify DUT_REED would send Address Notification when ping to its
# ML-EID.
mleid = self.nodes[DUT_REED].get_ip6_address(config.ADDRESS_TYPE.ML_EID)
self.assertTrue(self.nodes[ED1].ping(mleid))
# Wait for sniffer collecting packets
self.simulator.go(1)
reed_messages = self.simulator.get_messages_sent_by(DUT_REED)
msg = reed_messages.next_coap_message('0.02', '/a/an')
command.check_address_notification(msg, self.nodes[DUT_REED], self.nodes[LEADER])
# 7 & 8. Verify DUT_REED would send Address Notification when ping to
# its 2001::EID and 2002::EID.
flag2001 = 0
flag2002 = 0
for global_address in self.nodes[DUT_REED].get_ip6_address(config.ADDRESS_TYPE.GLOBAL):
if global_address[0:4] == '2001':
flag2001 += 1
elif global_address[0:4] == '2002':
flag2002 += 1
else:
raise "Error: Address is unexpected."
self.assertTrue(self.nodes[ED1].ping(global_address))
# Wait for sniffer collecting packets
self.simulator.go(1)
reed_messages = self.simulator.get_messages_sent_by(DUT_REED)
msg = reed_messages.next_coap_message('0.02', '/a/an')
command.check_address_notification(msg, self.nodes[DUT_REED], self.nodes[LEADER])
assert flag2001 == 1, "Error: Expecting address 2001::EID not appear."
assert flag2002 == 1, "Error: Expecting address 2002::EID not appear."
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
catapult-project/catapult-csm | third_party/gsutil/third_party/boto/boto/file/simpleresultset.py | 264 | 1321 | # Copyright 2010 Google Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
class SimpleResultSet(list):
"""
ResultSet facade built from a simple list, rather than via XML parsing.
"""
def __init__(self, input_list):
for x in input_list:
self.append(x)
self.is_truncated = False
| bsd-3-clause |
Canonical-kernel/Ubuntu-kernel | tools/perf/scripts/python/check-perf-trace.py | 1997 | 2539 | # perf script event handlers, generated by perf script -g python
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# This script tests basic functionality such as flag and symbol
# strings, common_xxx() calls back into perf, begin, end, unhandled
# events, etc. Basically, if this script runs successfully and
# displays expected results, Python scripting support should be ok.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Core import *
from perf_trace_context import *
unhandled = autodict()
def trace_begin():
print "trace_begin"
pass
def trace_end():
print_unhandled()
def irq__softirq_entry(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, vec):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "vec=%s\n" % \
(symbol_str("irq__softirq_entry", "vec", vec)),
def kmem__kmalloc(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, call_site, ptr, bytes_req, bytes_alloc,
gfp_flags):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "call_site=%u, ptr=%u, bytes_req=%u, " \
"bytes_alloc=%u, gfp_flags=%s\n" % \
(call_site, ptr, bytes_req, bytes_alloc,
flag_str("kmem__kmalloc", "gfp_flags", gfp_flags)),
def trace_unhandled(event_name, context, event_fields_dict):
try:
unhandled[event_name] += 1
except TypeError:
unhandled[event_name] = 1
def print_header(event_name, cpu, secs, nsecs, pid, comm):
print "%-20s %5u %05u.%09u %8u %-20s " % \
(event_name, cpu, secs, nsecs, pid, comm),
# print trace fields not included in handler args
def print_uncommon(context):
print "common_preempt_count=%d, common_flags=%s, common_lock_depth=%d, " \
% (common_pc(context), trace_flag_str(common_flags(context)), \
common_lock_depth(context))
def print_unhandled():
keys = unhandled.keys()
if not keys:
return
print "\nunhandled events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for event_name in keys:
print "%-40s %10d\n" % (event_name, unhandled[event_name])
| gpl-2.0 |
sander76/home-assistant | homeassistant/components/ovo_energy/__init__.py | 1 | 4806 | """Support for OVO Energy."""
from __future__ import annotations
from datetime import datetime, timedelta
import logging
from typing import Any
import aiohttp
import async_timeout
from ovoenergy import OVODailyUsage
from ovoenergy.ovoenergy import OVOEnergy
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_PASSWORD, CONF_USERNAME
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers.typing import ConfigType, HomeAssistantType
from homeassistant.helpers.update_coordinator import (
CoordinatorEntity,
DataUpdateCoordinator,
UpdateFailed,
)
from .const import DATA_CLIENT, DATA_COORDINATOR, DOMAIN
_LOGGER = logging.getLogger(__name__)
async def async_setup(hass: HomeAssistantType, config: ConfigType) -> bool:
"""Set up the OVO Energy components."""
return True
async def async_setup_entry(hass: HomeAssistantType, entry: ConfigEntry) -> bool:
"""Set up OVO Energy from a config entry."""
client = OVOEnergy()
try:
authenticated = await client.authenticate(
entry.data[CONF_USERNAME], entry.data[CONF_PASSWORD]
)
except aiohttp.ClientError as exception:
_LOGGER.warning(exception)
raise ConfigEntryNotReady from exception
if not authenticated:
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={"source": "reauth"}, data=entry.data
)
)
return False
async def async_update_data() -> OVODailyUsage:
"""Fetch data from OVO Energy."""
async with async_timeout.timeout(10):
try:
authenticated = await client.authenticate(
entry.data[CONF_USERNAME], entry.data[CONF_PASSWORD]
)
except aiohttp.ClientError as exception:
raise UpdateFailed(exception) from exception
if not authenticated:
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={"source": "reauth"}, data=entry.data
)
)
raise UpdateFailed("Not authenticated with OVO Energy")
return await client.get_daily_usage(datetime.utcnow().strftime("%Y-%m"))
coordinator = DataUpdateCoordinator(
hass,
_LOGGER,
# Name of the data. For logging purposes.
name="sensor",
update_method=async_update_data,
# Polling interval. Will only be polled if there are subscribers.
update_interval=timedelta(seconds=300),
)
hass.data.setdefault(DOMAIN, {})
hass.data[DOMAIN][entry.entry_id] = {
DATA_CLIENT: client,
DATA_COORDINATOR: coordinator,
}
# Fetch initial data so we have data when entities subscribe
await coordinator.async_config_entry_first_refresh()
# Setup components
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, "sensor")
)
return True
async def async_unload_entry(hass: HomeAssistantType, entry: ConfigType) -> bool:
"""Unload OVO Energy config entry."""
# Unload sensors
await hass.config_entries.async_forward_entry_unload(entry, "sensor")
del hass.data[DOMAIN][entry.entry_id]
return True
class OVOEnergyEntity(CoordinatorEntity):
"""Defines a base OVO Energy entity."""
def __init__(
self,
coordinator: DataUpdateCoordinator,
client: OVOEnergy,
key: str,
name: str,
icon: str,
) -> None:
"""Initialize the OVO Energy entity."""
super().__init__(coordinator)
self._client = client
self._key = key
self._name = name
self._icon = icon
self._available = True
@property
def unique_id(self) -> str:
"""Return the unique ID for this sensor."""
return self._key
@property
def name(self) -> str:
"""Return the name of the entity."""
return self._name
@property
def icon(self) -> str:
"""Return the mdi icon of the entity."""
return self._icon
@property
def available(self) -> bool:
"""Return True if entity is available."""
return self.coordinator.last_update_success and self._available
class OVOEnergyDeviceEntity(OVOEnergyEntity):
"""Defines a OVO Energy device entity."""
@property
def device_info(self) -> dict[str, Any]:
"""Return device information about this OVO Energy instance."""
return {
"identifiers": {(DOMAIN, self._client.account_id)},
"manufacturer": "OVO Energy",
"name": self._client.username,
"entry_type": "service",
}
| apache-2.0 |
admcrae/tensorflow | tensorflow/contrib/learn/python/learn/estimators/linear.py | 11 | 36922 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Linear Estimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import six
from tensorflow.contrib import layers
from tensorflow.contrib.framework import deprecated
from tensorflow.contrib.framework import deprecated_arg_values
from tensorflow.contrib.framework.python.ops import variables as contrib_variables
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import head as head_lib
from tensorflow.contrib.learn.python.learn.estimators import prediction_key
from tensorflow.contrib.learn.python.learn.utils import export
from tensorflow.contrib.linear_optimizer.python import sdca_optimizer
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import gradients
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import session_run_hook
from tensorflow.python.training import training as train
# The default learning rate of 0.2 is a historical artifact of the initial
# implementation, but seems a reasonable choice.
_LEARNING_RATE = 0.2
def _get_optimizer(spec):
if isinstance(spec, six.string_types):
return layers.OPTIMIZER_CLS_NAMES[spec](
learning_rate=_LEARNING_RATE)
elif callable(spec):
return spec()
return spec
# TODO(ispir): Remove this function by fixing '_infer_model' with single outputs
# and as_iteable case.
def _as_iterable(preds, output):
for pred in preds:
yield pred[output]
def _add_bias_column(feature_columns, columns_to_tensors, bias_variable,
columns_to_variables):
"""Adds a fake bias feature column filled with all 1s."""
# TODO(b/31008490): Move definition to a common constants place.
bias_column_name = "tf_virtual_bias_column"
if any(col.name is bias_column_name for col in feature_columns):
raise ValueError("%s is a reserved column name." % bias_column_name)
if not feature_columns:
raise ValueError("feature_columns can't be empty.")
# Loop through input tensors until we can figure out batch_size.
batch_size = None
for column in columns_to_tensors.values():
if isinstance(column, tuple):
column = column[0]
if isinstance(column, sparse_tensor.SparseTensor):
shape = tensor_util.constant_value(column.dense_shape)
if shape is not None:
batch_size = shape[0]
break
else:
batch_size = array_ops.shape(column)[0]
break
if batch_size is None:
raise ValueError("Could not infer batch size from input features.")
bias_column = layers.real_valued_column(bias_column_name)
columns_to_tensors[bias_column] = array_ops.ones([batch_size, 1],
dtype=dtypes.float32)
columns_to_variables[bias_column] = [bias_variable]
def _linear_model_fn(features, labels, mode, params, config=None):
"""A model_fn for linear models that use a gradient-based optimizer.
Args:
features: `Tensor` or dict of `Tensor` (depends on data passed to `fit`).
labels: `Tensor` of shape [batch_size, 1] or [batch_size] labels of
dtype `int32` or `int64` in the range `[0, n_classes)`.
mode: Defines whether this is training, evaluation or prediction.
See `ModeKeys`.
params: A dict of hyperparameters.
The following hyperparameters are expected:
* head: A `Head` instance.
* feature_columns: An iterable containing all the feature columns used by
the model.
* optimizer: string, `Optimizer` object, or callable that defines the
optimizer to use for training. If `None`, will use a FTRL optimizer.
* gradient_clip_norm: A float > 0. If provided, gradients are
clipped to their global norm with this clipping ratio.
* joint_weights: If True, the weights for all columns will be stored in a
single (possibly partitioned) variable. It's more efficient, but it's
incompatible with SDCAOptimizer, and requires all feature columns are
sparse and use the 'sum' combiner.
config: `RunConfig` object to configure the runtime settings.
Returns:
A `ModelFnOps` instance.
Raises:
ValueError: If mode is not any of the `ModeKeys`.
"""
head = params["head"]
feature_columns = params["feature_columns"]
optimizer = params.get("optimizer") or _get_default_optimizer(feature_columns)
gradient_clip_norm = params.get("gradient_clip_norm", None)
num_ps_replicas = config.num_ps_replicas if config else 0
joint_weights = params.get("joint_weights", False)
if not isinstance(features, dict):
features = {"": features}
parent_scope = "linear"
partitioner = partitioned_variables.min_max_variable_partitioner(
max_partitions=num_ps_replicas,
min_slice_size=64 << 20)
with variable_scope.variable_scope(
parent_scope,
values=tuple(six.itervalues(features)),
partitioner=partitioner) as scope:
if joint_weights:
layer_fn = layers.joint_weighted_sum_from_feature_columns
else:
layer_fn = layers.weighted_sum_from_feature_columns
logits, _, _ = layer_fn(
columns_to_tensors=features,
feature_columns=feature_columns,
num_outputs=head.logits_dimension,
weight_collections=[parent_scope],
scope=scope)
def _train_op_fn(loss):
global_step = contrib_variables.get_global_step()
my_vars = ops.get_collection(parent_scope)
grads = gradients.gradients(loss, my_vars)
if gradient_clip_norm:
grads, _ = clip_ops.clip_by_global_norm(grads, gradient_clip_norm)
return (_get_optimizer(optimizer).apply_gradients(
zip(grads, my_vars), global_step=global_step))
return head.create_model_fn_ops(
features=features,
mode=mode,
labels=labels,
train_op_fn=_train_op_fn,
logits=logits)
def sdca_model_fn(features, labels, mode, params):
"""A model_fn for linear models that use the SDCA optimizer.
Args:
features: A dict of `Tensor` keyed by column name.
labels: `Tensor` of shape [batch_size, 1] or [batch_size] labels of
dtype `int32` or `int64` in the range `[0, n_classes)`.
mode: Defines whether this is training, evaluation or prediction.
See `ModeKeys`.
params: A dict of hyperparameters.
The following hyperparameters are expected:
* head: A `Head` instance. Type must be one of `_BinarySvmHead`,
`_RegressionHead` or `_BinaryLogisticHead`.
* feature_columns: An iterable containing all the feature columns used by
the model.
* optimizer: An `SDCAOptimizer` instance.
* weight_column_name: A string defining the weight feature column, or
None if there are no weights.
* update_weights_hook: A `SessionRunHook` object or None. Used to update
model weights.
Returns:
A `ModelFnOps` instance.
Raises:
ValueError: If `optimizer` is not an `SDCAOptimizer` instance.
ValueError: If the type of head is neither `_BinarySvmHead`, nor
`_RegressionHead` nor `_MultiClassHead`.
ValueError: If mode is not any of the `ModeKeys`.
"""
head = params["head"]
feature_columns = params["feature_columns"]
optimizer = params["optimizer"]
weight_column_name = params["weight_column_name"]
update_weights_hook = params.get("update_weights_hook", None)
if not isinstance(optimizer, sdca_optimizer.SDCAOptimizer):
raise ValueError("Optimizer must be of type SDCAOptimizer")
if isinstance(head, head_lib._BinarySvmHead): # pylint: disable=protected-access
loss_type = "hinge_loss"
elif isinstance(head, head_lib._BinaryLogisticHead): # pylint: disable=protected-access
loss_type = "logistic_loss"
elif isinstance(head, head_lib._RegressionHead): # pylint: disable=protected-access
assert head.logits_dimension == 1, ("SDCA only applies for "
"logits_dimension=1.")
loss_type = "squared_loss"
else:
raise ValueError("Unsupported head type: {}".format(head))
parent_scope = "linear"
with variable_scope.variable_op_scope(
features.values(), parent_scope) as scope:
features = features.copy()
features.update(layers.transform_features(features, feature_columns))
logits, columns_to_variables, bias = (
layers.weighted_sum_from_feature_columns(
columns_to_tensors=features,
feature_columns=feature_columns,
num_outputs=1,
scope=scope))
_add_bias_column(feature_columns, features, bias, columns_to_variables)
def _train_op_fn(unused_loss):
global_step = contrib_variables.get_global_step()
sdca_model, train_op = optimizer.get_train_step(columns_to_variables,
weight_column_name,
loss_type, features,
labels, global_step)
if update_weights_hook is not None:
update_weights_hook.set_parameters(sdca_model, train_op)
return train_op
model_fn_ops = head.create_model_fn_ops(
features=features,
labels=labels,
mode=mode,
train_op_fn=_train_op_fn,
logits=logits)
if update_weights_hook is not None:
return model_fn_ops._replace(
training_chief_hooks=(model_fn_ops.training_chief_hooks +
[update_weights_hook]))
return model_fn_ops
# Ensures consistency with LinearComposableModel.
def _get_default_optimizer(feature_columns):
learning_rate = min(_LEARNING_RATE, 1.0 / math.sqrt(len(feature_columns)))
return train.FtrlOptimizer(learning_rate=learning_rate)
class _SdcaUpdateWeightsHook(session_run_hook.SessionRunHook):
"""SessionRunHook to update and shrink SDCA model weights."""
def __init__(self):
pass
def set_parameters(self, sdca_model, train_op):
self._sdca_model = sdca_model
self._train_op = train_op
def begin(self):
"""Construct the update_weights op.
The op is implicitly added to the default graph.
"""
self._update_op = self._sdca_model.update_weights(self._train_op)
def before_run(self, run_context):
"""Return the update_weights op so that it is executed during this run."""
return session_run_hook.SessionRunArgs(self._update_op)
class LinearClassifier(estimator.Estimator):
"""Linear classifier model.
Train a linear model to classify instances into one of multiple possible
classes. When number of possible classes is 2, this is binary classification.
Example:
```python
sparse_column_a = sparse_column_with_hash_bucket(...)
sparse_column_b = sparse_column_with_hash_bucket(...)
sparse_feature_a_x_sparse_feature_b = crossed_column(...)
# Estimator using the default optimizer.
estimator = LinearClassifier(
feature_columns=[sparse_column_a, sparse_feature_a_x_sparse_feature_b])
# Or estimator using the FTRL optimizer with regularization.
estimator = LinearClassifier(
feature_columns=[sparse_column_a, sparse_feature_a_x_sparse_feature_b],
optimizer=tf.train.FtrlOptimizer(
learning_rate=0.1,
l1_regularization_strength=0.001
))
# Or estimator using the SDCAOptimizer.
estimator = LinearClassifier(
feature_columns=[sparse_column_a, sparse_feature_a_x_sparse_feature_b],
optimizer=tf.contrib.linear_optimizer.SDCAOptimizer(
example_id_column='example_id',
num_loss_partitions=...,
symmetric_l2_regularization=2.0
))
# Input builders
def input_fn_train: # returns x, y (where y represents label's class index).
...
def input_fn_eval: # returns x, y (where y represents label's class index).
...
estimator.fit(input_fn=input_fn_train)
estimator.evaluate(input_fn=input_fn_eval)
estimator.predict(x=x) # returns predicted labels (i.e. label's class index).
```
Input of `fit` and `evaluate` should have following features,
otherwise there will be a `KeyError`:
* if `weight_column_name` is not `None`, a feature with
`key=weight_column_name` whose value is a `Tensor`.
* for each `column` in `feature_columns`:
- if `column` is a `SparseColumn`, a feature with `key=column.name`
whose `value` is a `SparseTensor`.
- if `column` is a `WeightedSparseColumn`, two features: the first with
`key` the id column name, the second with `key` the weight column name.
Both features' `value` must be a `SparseTensor`.
- if `column` is a `RealValuedColumn`, a feature with `key=column.name`
whose `value` is a `Tensor`.
"""
def __init__(self, # _joint_weight pylint: disable=invalid-name
feature_columns,
model_dir=None,
n_classes=2,
weight_column_name=None,
optimizer=None,
gradient_clip_norm=None,
enable_centered_bias=False,
_joint_weight=False,
config=None,
feature_engineering_fn=None):
"""Construct a `LinearClassifier` estimator object.
Args:
feature_columns: An iterable containing all the feature columns used by
the model. All items in the set should be instances of classes derived
from `FeatureColumn`.
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator
to continue training a previously saved model.
n_classes: number of label classes. Default is binary classification.
Note that class labels are integers representing the class index (i.e.
values from 0 to n_classes-1). For arbitrary label values (e.g. string
labels), convert to class indices first.
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
optimizer: The optimizer used to train the model. If specified, it should
be either an instance of `tf.Optimizer` or the SDCAOptimizer. If `None`,
the Ftrl optimizer will be used.
gradient_clip_norm: A `float` > 0. If provided, gradients are clipped
to their global norm with this clipping ratio. See
`tf.clip_by_global_norm` for more details.
enable_centered_bias: A bool. If True, estimator will learn a centered
bias variable for each class. Rest of the model structure learns the
residual after centered bias.
_joint_weight: If True, the weights for all columns will be stored in a
single (possibly partitioned) variable. It's more efficient, but it's
incompatible with SDCAOptimizer, and requires all feature columns are
sparse and use the 'sum' combiner.
config: `RunConfig` object to configure the runtime settings.
feature_engineering_fn: Feature engineering function. Takes features and
labels which are the output of `input_fn` and
returns features and labels which will be fed
into the model.
Returns:
A `LinearClassifier` estimator.
Raises:
ValueError: if n_classes < 2.
"""
# TODO(zoy): Give an unsupported error if enable_centered_bias is
# requested for SDCA once its default changes to False.
self._feature_columns = tuple(feature_columns or [])
assert self._feature_columns
chief_hook = None
if (isinstance(optimizer, sdca_optimizer.SDCAOptimizer) and
enable_centered_bias):
enable_centered_bias = False
logging.warning("centered_bias is not supported with SDCA, "
"please disable it explicitly.")
head = head_lib.multi_class_head(
n_classes,
weight_column_name=weight_column_name,
enable_centered_bias=enable_centered_bias)
params = {
"head": head,
"feature_columns": feature_columns,
"optimizer": optimizer,
}
if isinstance(optimizer, sdca_optimizer.SDCAOptimizer):
assert not _joint_weight, ("_joint_weight is incompatible with the"
" SDCAOptimizer")
assert n_classes == 2, "SDCA only applies to binary classification."
model_fn = sdca_model_fn
# The model_fn passes the model parameters to the chief_hook. We then use
# the hook to update weights and shrink step only on the chief.
chief_hook = _SdcaUpdateWeightsHook()
params.update({
"weight_column_name": weight_column_name,
"update_weights_hook": chief_hook,
})
else:
model_fn = _linear_model_fn
params.update({
"gradient_clip_norm": gradient_clip_norm,
"joint_weights": _joint_weight,
})
super(LinearClassifier, self).__init__(
model_fn=model_fn,
model_dir=model_dir,
config=config,
params=params,
feature_engineering_fn=feature_engineering_fn)
@deprecated_arg_values(
estimator.AS_ITERABLE_DATE, estimator.AS_ITERABLE_INSTRUCTIONS,
as_iterable=False)
@deprecated_arg_values(
"2017-03-01",
"Please switch to predict_classes, or set `outputs` argument.",
outputs=None)
def predict(self, x=None, input_fn=None, batch_size=None, outputs=None,
as_iterable=True):
"""Returns predictions for given features.
By default, returns predicted classes. But this default will be dropped
soon. Users should either pass `outputs`, or call `predict_classes` method.
Args:
x: features.
input_fn: Input function. If set, x must be None.
batch_size: Override default batch size.
outputs: list of `str`, name of the output to predict.
If `None`, returns classes.
as_iterable: If True, return an iterable which keeps yielding predictions
for each example until inputs are exhausted. Note: The inputs must
terminate if you want the iterable to terminate (e.g. be sure to pass
num_epochs=1 if you are using something like read_batch_features).
Returns:
Numpy array of predicted classes with shape [batch_size] (or an iterable
of predicted classes if as_iterable is True). Each predicted class is
represented by its class index (i.e. integer from 0 to n_classes-1).
If `outputs` is set, returns a dict of predictions.
"""
if not outputs:
return self.predict_classes(
x=x,
input_fn=input_fn,
batch_size=batch_size,
as_iterable=as_iterable)
return super(LinearClassifier, self).predict(
x=x,
input_fn=input_fn,
batch_size=batch_size,
outputs=outputs,
as_iterable=as_iterable)
@deprecated_arg_values(
estimator.AS_ITERABLE_DATE, estimator.AS_ITERABLE_INSTRUCTIONS,
as_iterable=False)
def predict_classes(self, x=None, input_fn=None, batch_size=None,
as_iterable=True):
"""Returns predicted classes for given features.
Args:
x: features.
input_fn: Input function. If set, x must be None.
batch_size: Override default batch size.
as_iterable: If True, return an iterable which keeps yielding predictions
for each example until inputs are exhausted. Note: The inputs must
terminate if you want the iterable to terminate (e.g. be sure to pass
num_epochs=1 if you are using something like read_batch_features).
Returns:
Numpy array of predicted classes with shape [batch_size] (or an iterable
of predicted classes if as_iterable is True). Each predicted class is
represented by its class index (i.e. integer from 0 to n_classes-1).
"""
key = prediction_key.PredictionKey.CLASSES
preds = super(LinearClassifier, self).predict(
x=x,
input_fn=input_fn,
batch_size=batch_size,
outputs=[key],
as_iterable=as_iterable)
if as_iterable:
return _as_iterable(preds, output=key)
return preds[key]
@deprecated_arg_values(
estimator.AS_ITERABLE_DATE, estimator.AS_ITERABLE_INSTRUCTIONS,
as_iterable=False)
def predict_proba(self, x=None, input_fn=None, batch_size=None,
as_iterable=True):
"""Returns predicted probabilities for given features.
Args:
x: features.
input_fn: Input function. If set, x and y must be None.
batch_size: Override default batch size.
as_iterable: If True, return an iterable which keeps yielding predictions
for each example until inputs are exhausted. Note: The inputs must
terminate if you want the iterable to terminate (e.g. be sure to pass
num_epochs=1 if you are using something like read_batch_features).
Returns:
Numpy array of predicted probabilities with shape [batch_size, n_classes]
(or an iterable of predicted probabilities if as_iterable is True).
"""
key = prediction_key.PredictionKey.PROBABILITIES
preds = super(LinearClassifier, self).predict(
x=x,
input_fn=input_fn,
batch_size=batch_size,
outputs=[key],
as_iterable=as_iterable)
if as_iterable:
return _as_iterable(preds, output=key)
return preds[key]
@deprecated("2017-03-25", "Please use Estimator.export_savedmodel() instead.")
def export(self,
export_dir,
input_fn=None,
input_feature_key=None,
use_deprecated_input_fn=True,
signature_fn=None,
default_batch_size=1,
exports_to_keep=None):
"""See BaseEstimator.export."""
def default_input_fn(unused_estimator, examples):
return layers.parse_feature_columns_from_examples(
examples, self._feature_columns)
return super(LinearClassifier, self).export(
export_dir=export_dir,
input_fn=input_fn or default_input_fn,
input_feature_key=input_feature_key,
use_deprecated_input_fn=use_deprecated_input_fn,
signature_fn=(signature_fn or
export.classification_signature_fn_with_prob),
prediction_key=prediction_key.PredictionKey.PROBABILITIES,
default_batch_size=default_batch_size,
exports_to_keep=exports_to_keep)
class LinearRegressor(estimator.Estimator):
"""Linear regressor model.
Train a linear regression model to predict label value given observation of
feature values.
Example:
```python
sparse_column_a = sparse_column_with_hash_bucket(...)
sparse_column_b = sparse_column_with_hash_bucket(...)
sparse_feature_a_x_sparse_feature_b = crossed_column(...)
estimator = LinearRegressor(
feature_columns=[sparse_column_a, sparse_feature_a_x_sparse_feature_b])
# Input builders
def input_fn_train: # returns x, y
...
def input_fn_eval: # returns x, y
...
estimator.fit(input_fn=input_fn_train)
estimator.evaluate(input_fn=input_fn_eval)
estimator.predict(x=x)
```
Input of `fit` and `evaluate` should have following features,
otherwise there will be a KeyError:
* if `weight_column_name` is not `None`:
key=weight_column_name, value=a `Tensor`
* for column in `feature_columns`:
- if isinstance(column, `SparseColumn`):
key=column.name, value=a `SparseTensor`
- if isinstance(column, `WeightedSparseColumn`):
{key=id column name, value=a `SparseTensor`,
key=weight column name, value=a `SparseTensor`}
- if isinstance(column, `RealValuedColumn`):
key=column.name, value=a `Tensor`
"""
def __init__(self, # _joint_weights: pylint: disable=invalid-name
feature_columns,
model_dir=None,
weight_column_name=None,
optimizer=None,
gradient_clip_norm=None,
enable_centered_bias=False,
label_dimension=1,
_joint_weights=False,
config=None,
feature_engineering_fn=None):
"""Construct a `LinearRegressor` estimator object.
Args:
feature_columns: An iterable containing all the feature columns used by
the model. All items in the set should be instances of classes derived
from `FeatureColumn`.
model_dir: Directory to save model parameters, graph, etc. This can
also be used to load checkpoints from the directory into a estimator
to continue training a previously saved model.
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
optimizer: An instance of `tf.Optimizer` used to train the model. If
`None`, will use an Ftrl optimizer.
gradient_clip_norm: A `float` > 0. If provided, gradients are clipped
to their global norm with this clipping ratio. See
`tf.clip_by_global_norm` for more details.
enable_centered_bias: A bool. If True, estimator will learn a centered
bias variable for each class. Rest of the model structure learns the
residual after centered bias.
label_dimension: Number of regression targets per example. This is the
size of the last dimension of the labels and logits `Tensor` objects
(typically, these have shape `[batch_size, label_dimension]`).
_joint_weights: If True use a single (possibly partitioned) variable to
store the weights. It's faster, but requires all feature columns are
sparse and have the 'sum' combiner. Incompatible with SDCAOptimizer.
config: `RunConfig` object to configure the runtime settings.
feature_engineering_fn: Feature engineering function. Takes features and
labels which are the output of `input_fn` and
returns features and labels which will be fed
into the model.
Returns:
A `LinearRegressor` estimator.
"""
self._feature_columns = tuple(feature_columns or [])
assert self._feature_columns
chief_hook = None
if (isinstance(optimizer, sdca_optimizer.SDCAOptimizer) and
enable_centered_bias):
enable_centered_bias = False
logging.warning("centered_bias is not supported with SDCA, "
"please disable it explicitly.")
head = head_lib.regression_head(
weight_column_name=weight_column_name,
label_dimension=label_dimension,
enable_centered_bias=enable_centered_bias)
params = {
"head": head,
"feature_columns": feature_columns,
"optimizer": optimizer,
}
if isinstance(optimizer, sdca_optimizer.SDCAOptimizer):
assert label_dimension == 1, "SDCA only applies for label_dimension=1."
assert not _joint_weights, ("_joint_weights is incompatible with"
" SDCAOptimizer.")
model_fn = sdca_model_fn
# The model_fn passes the model parameters to the chief_hook. We then use
# the hook to update weights and shrink step only on the chief.
chief_hook = _SdcaUpdateWeightsHook()
params.update({
"weight_column_name": weight_column_name,
"update_weights_hook": chief_hook,
})
else:
model_fn = _linear_model_fn
params.update({
"gradient_clip_norm": gradient_clip_norm,
"joint_weights": _joint_weights,
})
super(LinearRegressor, self).__init__(
model_fn=model_fn,
model_dir=model_dir,
config=config,
params=params,
feature_engineering_fn=feature_engineering_fn)
@deprecated_arg_values(
estimator.AS_ITERABLE_DATE, estimator.AS_ITERABLE_INSTRUCTIONS,
as_iterable=False)
@deprecated_arg_values(
"2017-03-01",
"Please switch to predict_scores, or set `outputs` argument.",
outputs=None)
def predict(self, x=None, input_fn=None, batch_size=None, outputs=None,
as_iterable=True):
"""Returns predictions for given features.
By default, returns predicted scores. But this default will be dropped
soon. Users should either pass `outputs`, or call `predict_scores` method.
Args:
x: features.
input_fn: Input function. If set, x must be None.
batch_size: Override default batch size.
outputs: list of `str`, name of the output to predict.
If `None`, returns scores.
as_iterable: If True, return an iterable which keeps yielding predictions
for each example until inputs are exhausted. Note: The inputs must
terminate if you want the iterable to terminate (e.g. be sure to pass
num_epochs=1 if you are using something like read_batch_features).
Returns:
Numpy array of predicted scores (or an iterable of predicted scores if
as_iterable is True). If `label_dimension == 1`, the shape of the output
is `[batch_size]`, otherwise the shape is `[batch_size, label_dimension]`.
If `outputs` is set, returns a dict of predictions.
"""
if not outputs:
return self.predict_scores(
x=x,
input_fn=input_fn,
batch_size=batch_size,
as_iterable=as_iterable)
return super(LinearRegressor, self).predict(
x=x,
input_fn=input_fn,
batch_size=batch_size,
outputs=outputs,
as_iterable=as_iterable)
@deprecated_arg_values(
estimator.AS_ITERABLE_DATE, estimator.AS_ITERABLE_INSTRUCTIONS,
as_iterable=False)
def predict_scores(self, x=None, input_fn=None, batch_size=None,
as_iterable=True):
"""Returns predicted scores for given features.
Args:
x: features.
input_fn: Input function. If set, x must be None.
batch_size: Override default batch size.
as_iterable: If True, return an iterable which keeps yielding predictions
for each example until inputs are exhausted. Note: The inputs must
terminate if you want the iterable to terminate (e.g. be sure to pass
num_epochs=1 if you are using something like read_batch_features).
Returns:
Numpy array of predicted scores (or an iterable of predicted scores if
as_iterable is True). If `label_dimension == 1`, the shape of the output
is `[batch_size]`, otherwise the shape is `[batch_size, label_dimension]`.
"""
key = prediction_key.PredictionKey.SCORES
preds = super(LinearRegressor, self).predict(
x=x,
input_fn=input_fn,
batch_size=batch_size,
outputs=[key],
as_iterable=as_iterable)
if as_iterable:
return _as_iterable(preds, output=key)
return preds[key]
@deprecated("2017-03-25", "Please use Estimator.export_savedmodel() instead.")
def export(self,
export_dir,
input_fn=None,
input_feature_key=None,
use_deprecated_input_fn=True,
signature_fn=None,
default_batch_size=1,
exports_to_keep=None):
"""See BaseEstimator.export."""
def default_input_fn(unused_estimator, examples):
return layers.parse_feature_columns_from_examples(
examples, self._feature_columns)
return super(LinearRegressor, self).export(
export_dir=export_dir,
input_fn=input_fn or default_input_fn,
input_feature_key=input_feature_key,
use_deprecated_input_fn=use_deprecated_input_fn,
signature_fn=(signature_fn or export.regression_signature_fn),
prediction_key=prediction_key.PredictionKey.SCORES,
default_batch_size=default_batch_size,
exports_to_keep=exports_to_keep)
class LinearEstimator(estimator.Estimator):
"""Linear model with user specified head.
Train a generalized linear model to predict label value given observation of
feature values.
Example:
To do poisson regression,
```python
sparse_column_a = sparse_column_with_hash_bucket(...)
sparse_column_b = sparse_column_with_hash_bucket(...)
sparse_feature_a_x_sparse_feature_b = crossed_column(...)
estimator = LinearEstimator(
feature_columns=[sparse_column_a, sparse_feature_a_x_sparse_feature_b],
head=head_lib.poisson_regression_head())
# Input builders
def input_fn_train: # returns x, y
...
def input_fn_eval: # returns x, y
...
estimator.fit(input_fn=input_fn_train)
estimator.evaluate(input_fn=input_fn_eval)
estimator.predict(x=x)
```
Input of `fit` and `evaluate` should have following features,
otherwise there will be a KeyError:
* if `weight_column_name` is not `None`:
key=weight_column_name, value=a `Tensor`
* for column in `feature_columns`:
- if isinstance(column, `SparseColumn`):
key=column.name, value=a `SparseTensor`
- if isinstance(column, `WeightedSparseColumn`):
{key=id column name, value=a `SparseTensor`,
key=weight column name, value=a `SparseTensor`}
- if isinstance(column, `RealValuedColumn`):
key=column.name, value=a `Tensor`
"""
def __init__(self, # _joint_weights: pylint: disable=invalid-name
feature_columns,
head,
model_dir=None,
weight_column_name=None,
optimizer=None,
gradient_clip_norm=None,
_joint_weights=False,
config=None,
feature_engineering_fn=None):
"""Construct a `LinearEstimator` object.
Args:
feature_columns: An iterable containing all the feature columns used by
the model. All items in the set should be instances of classes derived
from `FeatureColumn`.
head: An instance of _Head class.
model_dir: Directory to save model parameters, graph, etc. This can
also be used to load checkpoints from the directory into a estimator
to continue training a previously saved model.
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
optimizer: An instance of `tf.Optimizer` used to train the model. If
`None`, will use an Ftrl optimizer.
gradient_clip_norm: A `float` > 0. If provided, gradients are clipped
to their global norm with this clipping ratio. See
`tf.clip_by_global_norm` for more details.
_joint_weights: If True use a single (possibly partitioned) variable to
store the weights. It's faster, but requires all feature columns are
sparse and have the 'sum' combiner. Incompatible with SDCAOptimizer.
config: `RunConfig` object to configure the runtime settings.
feature_engineering_fn: Feature engineering function. Takes features and
labels which are the output of `input_fn` and
returns features and labels which will be fed
into the model.
Returns:
A `LinearEstimator` estimator.
Raises:
ValueError: if optimizer is not supported, e.g., SDCAOptimizer
"""
assert feature_columns
if isinstance(optimizer, sdca_optimizer.SDCAOptimizer):
raise ValueError("LinearEstimator does not support SDCA optimizer.")
params = {
"head": head,
"feature_columns": feature_columns,
"optimizer": optimizer,
"gradient_clip_norm": gradient_clip_norm,
"joint_weights": _joint_weights,
}
super(LinearEstimator, self).__init__(
model_fn=_linear_model_fn,
model_dir=model_dir,
config=config,
params=params,
feature_engineering_fn=feature_engineering_fn)
| apache-2.0 |
SivilTaram/edx-platform | lms/djangoapps/verify_student/migrations/0010_auto__del_field_softwaresecurephotoverification_window.py | 70 | 9298 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'SoftwareSecurePhotoVerification.window'
db.delete_column('verify_student_softwaresecurephotoverification', 'window_id')
def backwards(self, orm):
# Add field 'SoftwareSecurePhotoVerification.window'. Setting its default value to None
if db.backend_name == 'mysql':
db.execute('ALTER TABLE verify_student_softwaresecurephotoverification ADD `window_id` int(11) DEFAULT NULL;')
else:
db.add_column('verify_student_softwaresecurephotoverification', 'window',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['reverification.MidcourseReverificationWindow'], null=True),
keep_default=False)
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'verify_student.incoursereverificationconfiguration': {
'Meta': {'ordering': "('-change_date',)", 'object_name': 'InCourseReverificationConfiguration'},
'change_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'changed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'verify_student.skippedreverification': {
'Meta': {'unique_together': "(('user', 'course_id'),)", 'object_name': 'SkippedReverification'},
'checkpoint': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'skipped_checkpoint'", 'to': "orm['verify_student.VerificationCheckpoint']"}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'verify_student.softwaresecurephotoverification': {
'Meta': {'ordering': "['-created_at']", 'object_name': 'SoftwareSecurePhotoVerification'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'display': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'error_code': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'error_msg': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'face_image_url': ('django.db.models.fields.URLField', [], {'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'photo_id_image_url': ('django.db.models.fields.URLField', [], {'max_length': '255', 'blank': 'True'}),
'photo_id_key': ('django.db.models.fields.TextField', [], {'max_length': '1024'}),
'receipt_id': ('django.db.models.fields.CharField', [], {'default': "'c6b63663-5694-49b2-ae71-494b9afee0cf'", 'max_length': '255', 'db_index': 'True'}),
'reviewing_service': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'reviewing_user': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'photo_verifications_reviewed'", 'null': 'True', 'to': "orm['auth.User']"}),
'status': ('model_utils.fields.StatusField', [], {'default': "'created'", 'max_length': '100', u'no_check_for_status': 'True'}),
'status_changed': ('model_utils.fields.MonitorField', [], {'default': 'datetime.datetime.now', u'monitor': "u'status'"}),
'submitted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'verify_student.verificationcheckpoint': {
'Meta': {'unique_together': "(('course_id', 'checkpoint_location'),)", 'object_name': 'VerificationCheckpoint'},
'checkpoint_location': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'photo_verification': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['verify_student.SoftwareSecurePhotoVerification']", 'symmetrical': 'False'})
},
'verify_student.verificationstatus': {
'Meta': {'object_name': 'VerificationStatus'},
'checkpoint': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'checkpoint_status'", 'to': "orm['verify_student.VerificationCheckpoint']"}),
'error': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'response': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
}
}
complete_apps = ['verify_student']
| agpl-3.0 |
windygu/xbmc-addon | plugin.video.xunlei/mechanize/_firefox3cookiejar.py | 134 | 8345 | """Firefox 3 "cookies.sqlite" cookie persistence.
Copyright 2008 John J Lee <jjl@pobox.com>
This code is free software; you can redistribute it and/or modify it
under the terms of the BSD or ZPL 2.1 licenses (see the file
COPYING.txt included with the distribution).
"""
import logging
import time
from _clientcookie import CookieJar, Cookie, MappingIterator
from _util import isstringlike, experimental
debug = logging.getLogger("mechanize.cookies").debug
class Firefox3CookieJar(CookieJar):
"""Firefox 3 cookie jar.
The cookies are stored in Firefox 3's "cookies.sqlite" format.
Constructor arguments:
filename: filename of cookies.sqlite (typically found at the top level
of a firefox profile directory)
autoconnect: as a convenience, connect to the SQLite cookies database at
Firefox3CookieJar construction time (default True)
policy: an object satisfying the mechanize.CookiePolicy interface
Note that this is NOT a FileCookieJar, and there are no .load(),
.save() or .restore() methods. The database is in sync with the
cookiejar object's state after each public method call.
Following Firefox's own behaviour, session cookies are never saved to
the database.
The file is created, and an sqlite database written to it, if it does
not already exist. The moz_cookies database table is created if it does
not already exist.
"""
# XXX
# handle DatabaseError exceptions
# add a FileCookieJar (explicit .save() / .revert() / .load() methods)
def __init__(self, filename, autoconnect=True, policy=None):
experimental("Firefox3CookieJar is experimental code")
CookieJar.__init__(self, policy)
if filename is not None and not isstringlike(filename):
raise ValueError("filename must be string-like")
self.filename = filename
self._conn = None
if autoconnect:
self.connect()
def connect(self):
import sqlite3 # not available in Python 2.4 stdlib
self._conn = sqlite3.connect(self.filename)
self._conn.isolation_level = "DEFERRED"
self._create_table_if_necessary()
def close(self):
self._conn.close()
def _transaction(self, func):
try:
cur = self._conn.cursor()
try:
result = func(cur)
finally:
cur.close()
except:
self._conn.rollback()
raise
else:
self._conn.commit()
return result
def _execute(self, query, params=()):
return self._transaction(lambda cur: cur.execute(query, params))
def _query(self, query, params=()):
# XXX should we bother with a transaction?
cur = self._conn.cursor()
try:
cur.execute(query, params)
return cur.fetchall()
finally:
cur.close()
def _create_table_if_necessary(self):
self._execute("""\
CREATE TABLE IF NOT EXISTS moz_cookies (id INTEGER PRIMARY KEY, name TEXT,
value TEXT, host TEXT, path TEXT,expiry INTEGER,
lastAccessed INTEGER, isSecure INTEGER, isHttpOnly INTEGER)""")
def _cookie_from_row(self, row):
(pk, name, value, domain, path, expires,
last_accessed, secure, http_only) = row
version = 0
domain = domain.encode("ascii", "ignore")
path = path.encode("ascii", "ignore")
name = name.encode("ascii", "ignore")
value = value.encode("ascii", "ignore")
secure = bool(secure)
# last_accessed isn't a cookie attribute, so isn't added to rest
rest = {}
if http_only:
rest["HttpOnly"] = None
if name == "":
name = value
value = None
initial_dot = domain.startswith(".")
domain_specified = initial_dot
discard = False
if expires == "":
expires = None
discard = True
return Cookie(version, name, value,
None, False,
domain, domain_specified, initial_dot,
path, False,
secure,
expires,
discard,
None,
None,
rest)
def clear(self, domain=None, path=None, name=None):
CookieJar.clear(self, domain, path, name)
where_parts = []
sql_params = []
if domain is not None:
where_parts.append("host = ?")
sql_params.append(domain)
if path is not None:
where_parts.append("path = ?")
sql_params.append(path)
if name is not None:
where_parts.append("name = ?")
sql_params.append(name)
where = " AND ".join(where_parts)
if where:
where = " WHERE " + where
def clear(cur):
cur.execute("DELETE FROM moz_cookies%s" % where,
tuple(sql_params))
self._transaction(clear)
def _row_from_cookie(self, cookie, cur):
expires = cookie.expires
if cookie.discard:
expires = ""
domain = unicode(cookie.domain)
path = unicode(cookie.path)
name = unicode(cookie.name)
value = unicode(cookie.value)
secure = bool(int(cookie.secure))
if value is None:
value = name
name = ""
last_accessed = int(time.time())
http_only = cookie.has_nonstandard_attr("HttpOnly")
query = cur.execute("""SELECT MAX(id) + 1 from moz_cookies""")
pk = query.fetchone()[0]
if pk is None:
pk = 1
return (pk, name, value, domain, path, expires,
last_accessed, secure, http_only)
def set_cookie(self, cookie):
if cookie.discard:
CookieJar.set_cookie(self, cookie)
return
def set_cookie(cur):
# XXX
# is this RFC 2965-correct?
# could this do an UPDATE instead?
row = self._row_from_cookie(cookie, cur)
name, unused, domain, path = row[1:5]
cur.execute("""\
DELETE FROM moz_cookies WHERE host = ? AND path = ? AND name = ?""",
(domain, path, name))
cur.execute("""\
INSERT INTO moz_cookies VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)
""", row)
self._transaction(set_cookie)
def __iter__(self):
# session (non-persistent) cookies
for cookie in MappingIterator(self._cookies):
yield cookie
# persistent cookies
for row in self._query("""\
SELECT * FROM moz_cookies ORDER BY name, path, host"""):
yield self._cookie_from_row(row)
def _cookies_for_request(self, request):
session_cookies = CookieJar._cookies_for_request(self, request)
def get_cookies(cur):
query = cur.execute("SELECT host from moz_cookies")
domains = [row[0] for row in query.fetchall()]
cookies = []
for domain in domains:
cookies += self._persistent_cookies_for_domain(domain,
request, cur)
return cookies
persistent_coookies = self._transaction(get_cookies)
return session_cookies + persistent_coookies
def _persistent_cookies_for_domain(self, domain, request, cur):
cookies = []
if not self._policy.domain_return_ok(domain, request):
return []
debug("Checking %s for cookies to return", domain)
query = cur.execute("""\
SELECT * from moz_cookies WHERE host = ? ORDER BY path""",
(domain,))
cookies = [self._cookie_from_row(row) for row in query.fetchall()]
last_path = None
r = []
for cookie in cookies:
if (cookie.path != last_path and
not self._policy.path_return_ok(cookie.path, request)):
last_path = cookie.path
continue
if not self._policy.return_ok(cookie, request):
debug(" not returning cookie")
continue
debug(" it's a match")
r.append(cookie)
return r
| apache-2.0 |
floyd-fuh/afl-crash-analyzer | utilities/OutputUtility.py | 2 | 2074 | #!/usr/bin/env python2.7
'''
AFL crash analyzer, crash triage for the American Fuzzy Lop fuzzer
Copyright (C) 2015 floyd
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Created on Apr 13, 2015
@author: floyd, http://floyd.ch, @floyd_ch
'''
import os
def get_new_output_file_name(path, filename, extension, max_digets):
new_filename = filename
i = 1
while os.path.exists(os.path.join(path, new_filename+extension)) and i < 10**max_digets:
formatstr = "%0"+str(max_digets)+"d"
new_number = formatstr % i
new_filename = filename + new_number
i += 1
return new_filename + extension
def list_as_intervals(li, as_hex=False):
li = list(set(li))
if len(li) == 1:
return str(li[0])
elif len(li) == 0:
return ""
li.sort()
out = []
last = li[0]
start = last
for x in li[1:]:
if not x - last <= 1:
if start == last:
out.append(hex(start) if as_hex else str(start))
else:
val = hex(start) if as_hex else str(start)
val2 = hex(last) if as_hex else str(last)
out.append(val+"-"+str(val2))
start = x
last = x
if start == last:
out.append(hex(start) if as_hex else str(start))
else:
val = hex(start) if as_hex else str(start)
val2 = hex(last) if as_hex else str(last)
out.append(val+"-"+str(val2))
return ", ".join(out) | gpl-3.0 |
mskrzypkows/servo | tests/wpt/web-platform-tests/tools/six/documentation/conf.py | 420 | 7015 | # -*- coding: utf-8 -*-
#
# six documentation build configuration file
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = "1.0"
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ["sphinx.ext.intersphinx"]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix of source filenames.
source_suffix = ".rst"
# The encoding of source files.
#source_encoding = "utf-8-sig"
# The master toctree document.
master_doc = "index"
# General information about the project.
project = u"six"
copyright = u"2010-2014, Benjamin Peterson"
sys.path.append(os.path.abspath(os.path.join(".", "..")))
from six import __version__ as six_version
sys.path.pop()
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = six_version[:-2]
# The full version, including alpha/beta/rc tags.
release = six_version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build"]
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "default"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'sixdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
("index", "six.tex", u"six Documentation",
u"Benjamin Peterson", "manual"),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
("index", "six", u"six Documentation",
[u"Benjamin Peterson"], 1)
]
# -- Intersphinx ---------------------------------------------------------------
intersphinx_mapping = {"py2" : ("https://docs.python.org/2/", None),
"py3" : ("https://docs.python.org/3/", None)}
| mpl-2.0 |
yhj630520/zookeeper | src/contrib/huebrowser/zkui/src/zkui/stats.py | 114 | 4911 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import socket
import re
from StringIO import StringIO
class Session(object):
class BrokenLine(Exception): pass
def __init__(self, session):
m = re.search('/(\d+\.\d+\.\d+\.\d+):(\d+)\[(\d+)\]\((.*)\)', session)
if m:
self.host = m.group(1)
self.port = m.group(2)
self.interest_ops = m.group(3)
for d in m.group(4).split(","):
k,v = d.split("=")
self.__dict__[k] = v
else:
raise Session.BrokenLine()
class ZooKeeperStats(object):
def __init__(self, host='localhost', port='2181', timeout=1):
self._address = (host, int(port))
self._timeout = timeout
def get_stats(self):
""" Get ZooKeeper server stats as a map """
data = self._send_cmd('mntr')
if data:
return self._parse(data)
else:
data = self._send_cmd('stat')
return self._parse_stat(data)
def get_clients(self):
""" Get ZooKeeper server clients """
clients = []
stat = self._send_cmd('stat')
if not stat:
return clients
sio = StringIO(stat)
#skip two lines
sio.readline()
sio.readline()
for line in sio:
if not line.strip():
break
try:
clients.append(Session(line.strip()))
except Session.BrokenLine:
continue
return clients
def _create_socket(self):
return socket.socket()
def _send_cmd(self, cmd):
""" Send a 4letter word command to the server """
s = self._create_socket()
s.settimeout(self._timeout)
s.connect(self._address)
s.send(cmd)
data = s.recv(2048)
s.close()
return data
def _parse(self, data):
""" Parse the output from the 'mntr' 4letter word command """
h = StringIO(data)
result = {}
for line in h.readlines():
try:
key, value = self._parse_line(line)
result[key] = value
except ValueError:
pass # ignore broken lines
return result
def _parse_stat(self, data):
""" Parse the output from the 'stat' 4letter word command """
h = StringIO(data)
result = {}
version = h.readline()
if version:
result['zk_version'] = version[version.index(':')+1:].strip()
# skip all lines until we find the empty one
while h.readline().strip(): pass
for line in h.readlines():
m = re.match('Latency min/avg/max: (\d+)/(\d+)/(\d+)', line)
if m is not None:
result['zk_min_latency'] = int(m.group(1))
result['zk_avg_latency'] = int(m.group(2))
result['zk_max_latency'] = int(m.group(3))
continue
m = re.match('Received: (\d+)', line)
if m is not None:
result['zk_packets_received'] = int(m.group(1))
continue
m = re.match('Sent: (\d+)', line)
if m is not None:
result['zk_packets_sent'] = int(m.group(1))
continue
m = re.match('Outstanding: (\d+)', line)
if m is not None:
result['zk_outstanding_requests'] = int(m.group(1))
continue
m = re.match('Mode: (.*)', line)
if m is not None:
result['zk_server_state'] = m.group(1)
continue
m = re.match('Node count: (\d+)', line)
if m is not None:
result['zk_znode_count'] = int(m.group(1))
continue
return result
def _parse_line(self, line):
try:
key, value = map(str.strip, line.split('\t'))
except ValueError:
raise ValueError('Found invalid line: %s' % line)
if not key:
raise ValueError('The key is mandatory and should not be empty')
try:
value = int(value)
except (TypeError, ValueError):
pass
return key, value
| apache-2.0 |
vitmod/enigma2-1 | lib/python/Components/Pixmap.py | 54 | 4329 | from ConditionalWidget import ConditionalWidget
from GUIComponent import GUIComponent
from enigma import ePixmap, eTimer
from Tools.Directories import resolveFilename, fileExists, SCOPE_SKIN_IMAGE, SCOPE_ACTIVE_SKIN, SCOPE_ACTIVE_LCDSKIN
from os import path
from skin import loadPixmap
class Pixmap(GUIComponent):
GUI_WIDGET = ePixmap
def getSize(self):
s = self.instance.size()
return s.width(), s.height()
class PixmapConditional(ConditionalWidget, Pixmap):
def __init__(self, withTimer = True):
ConditionalWidget.__init__(self)
Pixmap.__init__(self)
class MovingPixmap(Pixmap):
def __init__(self):
Pixmap.__init__(self)
self.moving = False
# TODO: get real values
self.x = 0.0
self.y = 0.0
self.clearPath()
self.moveTimer = eTimer()
self.moveTimer.callback.append(self.doMove)
def clearPath(self, repeated = False):
if self.moving:
self.moving = False
self.moveTimer.stop()
self.path = []
self.currDest = 0
self.repeated = repeated
def addMovePoint(self, x, y, time = 20):
self.path.append((x, y, time))
def moveTo(self, x, y, time = 20):
self.clearPath()
self.addMovePoint(x, y, time)
def startMoving(self):
if not self.moving:
self.time = self.path[self.currDest][2]
self.stepX = (self.path[self.currDest][0] - self.x) / float(self.time)
self.stepY = (self.path[self.currDest][1] - self.y) / float(self.time)
self.moving = True
self.moveTimer.start(100)
def stopMoving(self):
self.moving = False
self.moveTimer.stop()
def doMove(self):
self.x += self.stepX
self.y += self.stepY
self.time -= 1
try:
self.move(int(self.x), int(self.y))
except: # moving not possible... widget not there any more... stop moving
self.stopMoving()
if self.time == 0:
self.currDest += 1
self.moveTimer.stop()
self.moving = False
if self.currDest >= len(self.path): # end of path
if self.repeated:
self.currDest = 0
self.moving = False
self.startMoving()
else:
self.moving = False
self.startMoving()
class MultiPixmap(Pixmap):
def __init__(self):
Pixmap.__init__(self)
self.pixmaps = []
def applySkin(self, desktop, screen):
if self.skinAttributes is not None:
skin_path_prefix = getattr(screen, "skin_path", path)
pixmap = None
attribs = [ ]
for (attrib, value) in self.skinAttributes:
if attrib == "pixmaps":
pixmaps = value.split(',')
for p in pixmaps:
pngfile = resolveFilename(SCOPE_ACTIVE_SKIN, p, path_prefix=skin_path_prefix)
if fileExists(resolveFilename(SCOPE_SKIN_IMAGE, p, path_prefix=skin_path_prefix)):
pngfile = resolveFilename(SCOPE_SKIN_IMAGE, p, path_prefix=skin_path_prefix)
elif fileExists(resolveFilename(SCOPE_ACTIVE_LCDSKIN, p, path_prefix=skin_path_prefix)):
pngfile = resolveFilename(SCOPE_ACTIVE_LCDSKIN, p, path_prefix=skin_path_prefix)
if path.exists(pngfile):
self.pixmaps.append(loadPixmap(pngfile, desktop))
if not pixmap:
pixmap = resolveFilename(SCOPE_ACTIVE_SKIN, pixmaps[0], path_prefix=skin_path_prefix)
if fileExists(resolveFilename(SCOPE_SKIN_IMAGE, pixmaps[0], path_prefix=skin_path_prefix)):
pixmap = resolveFilename(SCOPE_SKIN_IMAGE, pixmaps[0], path_prefix=skin_path_prefix)
elif fileExists(resolveFilename(SCOPE_ACTIVE_LCDSKIN, pixmaps[0], path_prefix=skin_path_prefix)):
pixmap = resolveFilename(SCOPE_ACTIVE_LCDSKIN, pixmaps[0], path_prefix=skin_path_prefix)
elif attrib == "pixmap":
pixmap = resolveFilename(SCOPE_ACTIVE_SKIN, value, path_prefix=skin_path_prefix)
if fileExists(resolveFilename(SCOPE_SKIN_IMAGE, value, path_prefix=skin_path_prefix)):
pixmap = resolveFilename(SCOPE_SKIN_IMAGE, value, path_prefix=skin_path_prefix)
elif fileExists(resolveFilename(SCOPE_ACTIVE_LCDSKIN, value, path_prefix=skin_path_prefix)):
pixmap = resolveFilename(SCOPE_ACTIVE_LCDSKIN, value, path_prefix=skin_path_prefix)
else:
attribs.append((attrib,value))
if pixmap:
attribs.append(("pixmap", pixmap))
self.skinAttributes = attribs
return GUIComponent.applySkin(self, desktop, screen)
def setPixmapNum(self, x):
if self.instance:
if len(self.pixmaps) > x:
self.instance.setPixmap(self.pixmaps[x])
else:
print "setPixmapNum(%d) failed! defined pixmaps:" % x, self.pixmaps
| gpl-2.0 |
chanceraine/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_qtagg.py | 73 | 4972 | """
Render to qt from agg
"""
from __future__ import division
import os, sys
import matplotlib
from matplotlib import verbose
from matplotlib.figure import Figure
from backend_agg import FigureCanvasAgg
from backend_qt import qt, FigureManagerQT, FigureCanvasQT,\
show, draw_if_interactive, backend_version, \
NavigationToolbar2QT
DEBUG = False
def new_figure_manager( num, *args, **kwargs ):
"""
Create a new figure manager instance
"""
if DEBUG: print 'backend_qtagg.new_figure_manager'
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass( *args, **kwargs )
canvas = FigureCanvasQTAgg( thisFig )
return FigureManagerQTAgg( canvas, num )
class NavigationToolbar2QTAgg(NavigationToolbar2QT):
def _get_canvas(self, fig):
return FigureCanvasQTAgg(fig)
class FigureManagerQTAgg(FigureManagerQT):
def _get_toolbar(self, canvas, parent):
# must be inited after the window, drawingArea and figure
# attrs are set
if matplotlib.rcParams['toolbar']=='classic':
print "Classic toolbar is not yet supported"
elif matplotlib.rcParams['toolbar']=='toolbar2':
toolbar = NavigationToolbar2QTAgg(canvas, parent)
else:
toolbar = None
return toolbar
class FigureCanvasQTAgg( FigureCanvasAgg, FigureCanvasQT ):
"""
The canvas the figure renders into. Calls the draw and print fig
methods, creates the renderers, etc...
Public attribute
figure - A Figure instance
"""
def __init__( self, figure ):
if DEBUG: print 'FigureCanvasQtAgg: ', figure
FigureCanvasQT.__init__( self, figure )
FigureCanvasAgg.__init__( self, figure )
self.drawRect = False
self.rect = []
self.replot = True
self.pixmap = qt.QPixmap()
def resizeEvent( self, e ):
FigureCanvasQT.resizeEvent( self, e )
def drawRectangle( self, rect ):
self.rect = rect
self.drawRect = True
# False in repaint does not clear the image before repainting
self.repaint( False )
def paintEvent( self, e ):
"""
Draw to the Agg backend and then copy the image to the qt.drawable.
In Qt, all drawing should be done inside of here when a widget is
shown onscreen.
"""
FigureCanvasQT.paintEvent( self, e )
if DEBUG: print 'FigureCanvasQtAgg.paintEvent: ', self, \
self.get_width_height()
p = qt.QPainter( self )
# only replot data when needed
if type(self.replot) is bool: # might be a bbox for blitting
if self.replot:
FigureCanvasAgg.draw( self )
#stringBuffer = str( self.buffer_rgba(0,0) )
# matplotlib is in rgba byte order.
# qImage wants to put the bytes into argb format and
# is in a 4 byte unsigned int. little endian system is LSB first
# and expects the bytes in reverse order (bgra).
if ( qt.QImage.systemByteOrder() == qt.QImage.LittleEndian ):
stringBuffer = self.renderer._renderer.tostring_bgra()
else:
stringBuffer = self.renderer._renderer.tostring_argb()
qImage = qt.QImage( stringBuffer, self.renderer.width,
self.renderer.height, 32, None, 0,
qt.QImage.IgnoreEndian )
self.pixmap.convertFromImage( qImage, qt.QPixmap.Color )
p.drawPixmap( qt.QPoint( 0, 0 ), self.pixmap )
# draw the zoom rectangle to the QPainter
if ( self.drawRect ):
p.setPen( qt.QPen( qt.Qt.black, 1, qt.Qt.DotLine ) )
p.drawRect( self.rect[0], self.rect[1], self.rect[2], self.rect[3] )
# we are blitting here
else:
bbox = self.replot
l, b, r, t = bbox.extents
w = int(r) - int(l)
h = int(t) - int(b)
reg = self.copy_from_bbox(bbox)
stringBuffer = reg.to_string_argb()
qImage = qt.QImage(stringBuffer, w, h, 32, None, 0, qt.QImage.IgnoreEndian)
self.pixmap.convertFromImage(qImage, qt.QPixmap.Color)
p.drawPixmap(qt.QPoint(l, self.renderer.height-t), self.pixmap)
p.end()
self.replot = False
self.drawRect = False
def draw( self ):
"""
Draw the figure when xwindows is ready for the update
"""
if DEBUG: print "FigureCanvasQtAgg.draw", self
self.replot = True
FigureCanvasAgg.draw(self)
self.repaint(False)
def blit(self, bbox=None):
"""
Blit the region in bbox
"""
self.replot = bbox
self.repaint(False)
def print_figure(self, *args, **kwargs):
FigureCanvasAgg.print_figure(self, *args, **kwargs)
self.draw()
| agpl-3.0 |
activityhistory/selfspy | selfspy/sniff_win.py | 1 | 4737 | # -*- coding: utf-8 -*-
# -*- coding: utf-8 -*-
"""
Selfspy: Track your computer activity
Copyright (C) 2012 Morten Linderud
Modified 2014 by Adam Rule, Aurélien Tabard, and Jonas Keper
Selfspy is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Selfspy is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Selfspy. If not, see <http://www.gnu.org/licenses/>.
"""
import pyHook
import pythoncom
import sys
import threading
import ctypes
class SnifferThread(threading.Thread):
def __init__(self, hook):
threading.Thread.__init__(self)
self.daemon = True
self.encoding = sys.stdin.encoding
self.key_hook = lambda x: True
self.mouse_button_hook = lambda x: True
self.mouse_move_hook = lambda x: True
self.screen_hook = lambda x: True
self.remap = {
248: u"\xf8",
216: u"\xd8",
230: u"\xe6",
198: u"\xc6",
229: u"\xe5",
197: u"\xc5"
}
self.hm = hook
def run(self):
self.hm.KeyDown = self.KeyboardEvent
self.hm.MouseAllButtonsDown = self.MouseButtons
self.hm.MouseMove = self.MouseMove
self.hm.HookKeyboard()
self.hm.HookMouse()
pythoncom.PumpMessages()
def MouseButtons(self, event):
loc = event.Position
if event.MessageName == "mouse right down":
self.mouse_button_hook(3, loc[0], loc[1],)
if event.MessageName == "mouse left down":
self.mouse_button_hook(1, loc[0], loc[1])
if event.MessageName == "mouse middle down":
self.mouse_button_hook(2, loc[0], loc[1])
try:
string_event = event.WindowName.decode(self.encoding)
except AttributeError:
string_event = ""
self.screen_hook(str(event.Window), string_event, loc[0], loc[1], 0, 0)
return True
def MouseMove(self, event):
loc = event.Position
if event.MessageName == "mouse move":
self.mouse_move_hook(loc[0], loc[1])
if event.MessageName == "mouse wheel":
if event.Wheel == -1:
self.mouse_button_hook(5, loc[0], loc[1],)
elif event.Wheel == 1:
self.mouse_button_hook(4, loc[0], loc[1],)
return True
def KeyboardEvent(self, event):
modifiers = []
if event.Key in ["Lshift", "Rshift"]:
modifiers.append('Shift')
elif event.Key in ["Lmenu", "Rmenu"]:
modifiers.append('Alt')
elif event.Key in ["Rcontrol", "Lcontrol"]:
modifiers.append('Ctrl')
elif event.Key in ["Rwin", "Lwin"]:
modifiers.append('Super')
if event.Ascii in self.remap.keys():
string = self.remap[event.Ascii]
else:
string = unicode(chr(event.Ascii))
self.key_hook(str(event.Ascii), modifiers, string, False)
self.screen_hook(str(event.Window), event.WindowName.decode(self.encoding), 0, 0, 0, 0)
return True
class Sniffer:
"""Winning!"""
def __init__(self):
self.encoding = sys.stdin.encoding
self.key_hook = lambda x: True
self.mouse_button_hook = lambda x: True
self.mouse_move_hook = lambda x: True
self.screen_hook = lambda x: True
self.remap = {
248: u"\xf8",
216: u"\xd8",
230: u"\xe6",
198: u"\xc6",
229: u"\xe5",
197: u"\xc5"
}
def run(self):
try:
self.hm = pyHook.HookManager()
self.thread = SnifferThread(self.hm)
# pythoncom.PumpMessages needs to be in the same thread as the events
self.thread.mouse_button_hook = self.mouse_button_hook
self.thread.mouse_move_hook = self.mouse_move_hook
self.thread.screen_hook = self.screen_hook
self.thread.key_hook = self.key_hook
self.thread.start()
while True:
self.thread.join(100)
except:
self.cancel()
def cancel(self):
ctypes.windll.user32.PostQuitMessage(0)
self.hm.UnhookKeyboard()
self.hm.UnhookMouse()
del self.thread
del self.hm
| gpl-3.0 |
puzan/ansible | lib/ansible/modules/cloud/openstack/_nova_keypair.py | 21 | 5629 | #!/usr/bin/python
#coding: utf-8 -*-
# (c) 2013, Benno Joy <benno@ansible.com>
# (c) 2013, John Dewey <john@dewey.ws>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['deprecated'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: nova_keypair
version_added: "1.2"
author:
- "Benno Joy (@bennojoy)"
- "Michael DeHaan"
deprecated: Deprecated in 2.0. Use M(os_keypair) instead.
short_description: Add/Delete key pair from nova
description:
- Add or Remove key pair from nova .
options:
login_username:
description:
- login username to authenticate to keystone
required: true
default: admin
login_password:
description:
- Password of login user
required: true
default: 'yes'
login_tenant_name:
description:
- The tenant name of the login user
required: true
default: 'yes'
auth_url:
description:
- The keystone url for authentication
required: false
default: http://127.0.0.1:35357/v2.0/
region_name:
description:
- Name of the region
required: false
default: None
state:
description:
- Indicate desired state of the resource
choices: ['present', 'absent']
default: present
name:
description:
- Name that has to be given to the key pair
required: true
default: None
public_key:
description:
- The public key that would be uploaded to nova and injected to vm's upon creation
required: false
default: None
requirements:
- "python >= 2.6"
- "python-novaclient"
'''
EXAMPLES = '''
- name: Create a key pair with the running users public key
nova_keypair:
state: present
login_username: admin
login_password: admin
login_tenant_name: admin
name: ansible_key
public_key: "{{ lookup('file','~/.ssh/id_rsa.pub') }}"
- name: Create a new key pair and the private key returned after the run.
nova_keypair:
state: present
login_username: admin
login_password: admin
login_tenant_name: admin
name: ansible_key
'''
import time
try:
from novaclient.v1_1 import client as nova_client
from novaclient import exceptions as exc
HAS_NOVACLIENT = True
except ImportError:
HAS_NOVACLIENT = False
def main():
argument_spec = openstack_argument_spec()
argument_spec.update(dict(
name = dict(required=True),
public_key = dict(default=None),
state = dict(default='present', choices=['absent', 'present'])
))
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_NOVACLIENT:
module.fail_json(msg='python-novaclient is required for this module to work')
nova = nova_client.Client(module.params['login_username'],
module.params['login_password'],
module.params['login_tenant_name'],
module.params['auth_url'],
region_name=module.params['region_name'],
service_type='compute')
try:
nova.authenticate()
except exc.Unauthorized as e:
module.fail_json(msg = "Invalid OpenStack Nova credentials.: %s" % e.message)
except exc.AuthorizationFailure as e:
module.fail_json(msg = "Unable to authorize user: %s" % e.message)
if module.params['state'] == 'present':
for key in nova.keypairs.list():
if key.name == module.params['name']:
if module.params['public_key'] and (module.params['public_key'] != key.public_key ):
module.fail_json(msg = "name {} present but key hash not the same as offered. Delete key first.".format(key['name']))
else:
module.exit_json(changed = False, result = "Key present")
try:
key = nova.keypairs.create(module.params['name'], module.params['public_key'])
except Exception as e:
module.exit_json(msg = "Error in creating the keypair: %s" % e.message)
if not module.params['public_key']:
module.exit_json(changed = True, key = key.private_key)
module.exit_json(changed = True, key = None)
if module.params['state'] == 'absent':
for key in nova.keypairs.list():
if key.name == module.params['name']:
try:
nova.keypairs.delete(module.params['name'])
except Exception as e:
module.fail_json(msg = "The keypair deletion has failed: %s" % e.message)
module.exit_json( changed = True, result = "deleted")
module.exit_json(changed = False, result = "not present")
# this is magic, see lib/ansible/module.params['common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()
| gpl-3.0 |
redhatrises/freeipa | ipaserver/install/cainstance.py | 1 | 70145 | # Authors: Rob Crittenden <rcritten@redhat.com>
# Ade Lee <alee@redhat.com>
# Andrew Wnuk <awnuk@redhat.com>
#
# Copyright (C) 2009 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import print_function
import base64
import dbus
import ldap
import os
import pwd
import grp
import re
import shutil
import sys
import syslog
import time
import tempfile
import six
# pylint: disable=import-error
from six.moves.configparser import ConfigParser, RawConfigParser
# pylint: enable=import-error
from cryptography.hazmat.primitives import serialization
from ipalib import api
from ipalib import x509
from ipalib import errors
import ipalib.constants
from ipalib.install import certmonger
from ipaplatform import services
from ipaplatform.paths import paths
from ipaplatform.tasks import tasks
from ipapython import dogtag
from ipapython import ipautil
from ipapython import ipaldap
from ipapython.certdb import get_ca_nickname
from ipapython.dn import DN
from ipapython.ipa_log_manager import log_mgr,\
standard_logging_setup, root_logger
from ipaserver.secrets.kem import IPAKEMKeys
from ipaserver.install import certs
from ipaserver.install import custodiainstance
from ipaserver.install import dsinstance
from ipaserver.install import installutils
from ipaserver.install import ldapupdate
from ipaserver.install import replication
from ipaserver.install import sysupgrade
from ipaserver.install.dogtaginstance import DogtagInstance
from ipaserver.plugins import ldap2
# We need to reset the template because the CA uses the regular boot
# information
INF_TEMPLATE = """
[General]
FullMachineName= $FQDN
SuiteSpotUserID= $USER
SuiteSpotGroup= $GROUP
ServerRoot= $SERVER_ROOT
[slapd]
ServerPort= $DSPORT
ServerIdentifier= $SERVERID
Suffix= $SUFFIX
RootDN= cn=Directory Manager
RootDNPwd= $PASSWORD
ConfigFile = /usr/share/pki/ca/conf/database.ldif
"""
ADMIN_GROUPS = [
'Enterprise CA Administrators',
'Enterprise KRA Administrators',
'Security Domain Administrators'
]
def check_port():
"""
Check that dogtag port (8443) is available.
Returns True when the port is free, False if it's taken.
"""
return not ipautil.host_port_open(None, 8443)
def get_preop_pin(instance_root, instance_name):
# Only used for Dogtag 9
preop_pin = None
filename = instance_root + "/" + instance_name + "/conf/CS.cfg"
# read the config file and get the preop pin
try:
f = open(filename)
except IOError as e:
root_logger.error("Cannot open configuration file." + str(e))
raise e
data = f.read()
data = data.split('\n')
pattern = re.compile("preop.pin=(.*)")
for line in data:
match = re.search(pattern, line)
if match:
preop_pin = match.group(1)
break
if preop_pin is None:
raise RuntimeError(
"Unable to find preop.pin in %s. Is your CA already configured?" %
filename)
return preop_pin
def import_pkcs12(input_file, input_passwd, cert_database,
cert_passwd):
ipautil.run([paths.PK12UTIL, "-d", cert_database,
"-i", input_file,
"-k", cert_passwd,
"-w", input_passwd])
def get_value(s):
"""
Parse out a name/value pair from a Javascript variable.
"""
try:
expr = s.split('=', 1)
value = expr[1]
value = value.replace('\"', '')
value = value.replace(';', '')
value = value.replace('\\n', '\n')
value = value.replace('\\r', '\r')
return value
except IndexError:
return None
def find_substring(data, value):
"""
Scan through a list looking for a string that starts with value.
"""
for d in data:
if d.startswith(value):
return get_value(d)
def get_defList(data):
"""
Return a dictionary of defList name/value pairs.
A certificate signing request is specified as a series of these.
"""
varname = None
value = None
skip = False
defdict = {}
for d in data:
if d.startswith("defList = new Object"):
varname = None
value = None
skip = False
if d.startswith("defList.defId"):
varname = get_value(d)
if d.startswith("defList.defVal"):
value = get_value(d)
if skip:
varname = None
value = None
skip = False
if d.startswith("defList.defConstraint"):
ctype = get_value(d)
if ctype == "readonly":
skip = True
if varname and value:
defdict[varname] = value
varname = None
value = None
return defdict
def get_outputList(data):
"""
Return a dictionary of outputList name/value pairs.
The output from issuing a certificate is a series of these.
"""
varname = None
value = None
outputdict = {}
for d in data:
if d.startswith("outputList = new"):
varname = None
value = None
if d.startswith("outputList.outputId"):
varname = get_value(d)
if d.startswith("outputList.outputVal"):
value = get_value(d)
if varname and value:
outputdict[varname] = value
varname = None
value = None
return outputdict
def get_crl_files(path=None):
"""
Traverse dogtag's CRL files in default CRL publish directory or in chosen
target directory.
@param path Custom target directory
"""
if path is None:
path = paths.PKI_CA_PUBLISH_DIR
files = os.listdir(path)
for f in files:
if f == "MasterCRL.bin":
yield os.path.join(path, f)
elif f.endswith(".der"):
yield os.path.join(path, f)
def is_step_one_done():
"""Read CS.cfg and determine if step one of an external CA install is done
"""
path = paths.CA_CS_CFG_PATH
if not os.path.exists(path):
return False
test = installutils.get_directive(path, 'preop.ca.type', '=')
if test == "otherca":
return True
return False
def is_ca_installed_locally():
"""Check if CA is installed locally by checking for existence of CS.cfg
:return:True/False
"""
return os.path.exists(paths.CA_CS_CFG_PATH)
class CAInstance(DogtagInstance):
"""
When using a dogtag CA the DS database contains just the
server cert for DS. The mod_nss database will contain the RA agent
cert that will be used to do authenticated requests against dogtag.
This is done because we use python-nss and will inherit the opened
NSS database in mod_python. In nsslib.py we do an nssinit but this will
return success if the database is already initialized. It doesn't care
if the database is different or not.
external is a state machine:
0 = not an externally signed CA
1 = generating CSR to be signed
2 = have signed cert, continue installation
"""
tracking_reqs = (('auditSigningCert cert-pki-ca', None),
('ocspSigningCert cert-pki-ca', None),
('subsystemCert cert-pki-ca', None),
('caSigningCert cert-pki-ca', 'ipaCACertRenewal'))
server_cert_name = 'Server-Cert cert-pki-ca'
def __init__(self, realm=None, host_name=None):
super(CAInstance, self).__init__(
realm=realm,
subsystem="CA",
service_desc="certificate server",
host_name=host_name,
service_prefix=ipalib.constants.PKI_GSSAPI_SERVICE_NAME,
)
# for external CAs
self.external = 0
self.csr_file = None
self.cert_file = None
self.cert_chain_file = None
if realm is not None:
self.canickname = get_ca_nickname(realm)
else:
self.canickname = None
self.ra_cert = None
self.requestId = None
self.log = log_mgr.get_logger(self)
self.no_db_setup = False
self.keytab = os.path.join(
paths.PKI_TOMCAT, self.service_prefix + '.keytab')
def configure_instance(self, host_name, dm_password, admin_password,
pkcs12_info=None, master_host=None, csr_file=None,
cert_file=None, cert_chain_file=None,
master_replication_port=None,
subject_base=None, ca_subject=None,
ca_signing_algorithm=None,
ca_type=None, ra_p12=None, ra_only=False,
promote=False, use_ldaps=False):
"""Create a CA instance.
To create a clone, pass in pkcs12_info.
Creating a CA with an external signer is a 2-step process. In
step 1 we generate a CSR. In step 2 we are given the cert and
chain and actually proceed to create the CA. For step 1 set
csr_file. For step 2 set cert_file and cert_chain_file.
"""
self.fqdn = host_name
self.dm_password = dm_password
self.admin_user = "admin"
self.admin_groups = ADMIN_GROUPS
self.admin_password = admin_password
self.pkcs12_info = pkcs12_info
if self.pkcs12_info is not None:
self.clone = True
self.master_host = master_host
self.master_replication_port = master_replication_port
self.ra_p12 = ra_p12
self.subject_base = \
subject_base or installutils.default_subject_base(self.realm)
self.ca_subject = \
ca_subject or installutils.default_ca_subject_dn(self.subject_base)
if ca_signing_algorithm is None:
self.ca_signing_algorithm = 'SHA256withRSA'
else:
self.ca_signing_algorithm = ca_signing_algorithm
if ca_type is not None:
self.ca_type = ca_type
else:
self.ca_type = 'generic'
self.no_db_setup = promote
self.use_ldaps = use_ldaps
# Determine if we are installing as an externally-signed CA and
# what stage we're in.
if csr_file is not None:
self.csr_file = csr_file
self.external = 1
elif cert_file is not None:
self.cert_file = cert_file
self.cert_chain_file = cert_chain_file
self.external = 2
if self.clone:
has_ra_cert = os.path.exists(paths.RA_AGENT_PEM)
else:
has_ra_cert = False
if not ra_only:
if promote:
# Setup Database
self.step("creating certificate server db", self.__create_ds_db)
self.step("setting up initial replication", self.__setup_replication)
self.step("creating installation admin user", self.setup_admin)
self.step("configuring certificate server instance",
self.__spawn_instance)
self.step("exporting Dogtag certificate store pin",
self.create_certstore_passwdfile)
self.step("stopping certificate server instance to update CS.cfg", self.stop_instance)
self.step("backing up CS.cfg", self.backup_config)
self.step("disabling nonces", self.__disable_nonce)
self.step("set up CRL publishing", self.__enable_crl_publish)
self.step("enable PKIX certificate path discovery and validation", self.enable_pkix)
if promote:
self.step("destroying installation admin user", self.teardown_admin)
self.step("starting certificate server instance", self.start_instance)
# Step 1 of external is getting a CSR so we don't need to do these
# steps until we get a cert back from the external CA.
if self.external != 1:
if not has_ra_cert:
self.step("configure certmonger for renewals",
self.configure_certmonger_renewal)
if not self.clone:
self.step("requesting RA certificate from CA", self.__request_ra_certificate)
elif promote:
self.step("Importing RA key", self.__import_ra_key)
else:
self.step("importing RA certificate from PKCS #12 file",
self.__import_ra_cert)
if not ra_only:
self.step("setting up signing cert profile", self.__setup_sign_profile)
self.step("setting audit signing renewal to 2 years", self.set_audit_renewal)
self.step("restarting certificate server", self.restart_instance)
if not self.clone:
self.step("publishing the CA certificate",
self.__export_ca_chain)
self.step("adding RA agent as a trusted user", self.__create_ca_agent)
self.step("authorizing RA to modify profiles", configure_profiles_acl)
self.step("authorizing RA to manage lightweight CAs",
configure_lightweight_ca_acls)
self.step("Ensure lightweight CAs container exists",
ensure_lightweight_cas_container)
if self.clone and not promote:
self.step(
"Ensuring backward compatibility",
self.__dogtag10_migration)
self.step("configure certificate renewals", self.configure_renewal)
self.step("configure Server-Cert certificate renewal", self.track_servercert)
self.step("Configure HTTP to proxy connections",
self.http_proxy)
self.step("restarting certificate server", self.restart_instance)
if not promote:
self.step("migrating certificate profiles to LDAP",
migrate_profiles_to_ldap)
self.step("importing IPA certificate profiles",
import_included_profiles)
self.step("adding default CA ACL", ensure_default_caacl)
self.step("adding 'ipa' CA entry", ensure_ipa_authority_entry)
self.step("updating IPA configuration", update_ipa_conf)
self.step("enabling CA instance", self.__enable_instance)
self.step("configuring certmonger renewal for lightweight CAs",
self.__add_lightweight_ca_tracking_requests)
if ra_only:
runtime = None
else:
runtime = 180
try:
self.start_creation(runtime=runtime)
finally:
self.clean_pkispawn_files()
def __spawn_instance(self):
"""
Create and configure a new CA instance using pkispawn.
Creates the config file with IPA specific parameters
and passes it to the base class to call pkispawn
"""
# Create an empty and secured file
(cfg_fd, cfg_file) = tempfile.mkstemp()
os.close(cfg_fd)
pent = pwd.getpwnam(self.service_user)
os.chown(cfg_file, pent.pw_uid, pent.pw_gid)
self.tmp_agent_db = tempfile.mkdtemp(
prefix="tmp-", dir=paths.VAR_LIB_IPA)
self.tmp_agent_pwd = ipautil.ipa_generate_password()
# Create CA configuration
config = ConfigParser()
config.optionxform = str
config.add_section("CA")
# Server
config.set("CA", "pki_security_domain_name", self.security_domain_name)
config.set("CA", "pki_enable_proxy", "True")
config.set("CA", "pki_restart_configured_instance", "False")
config.set("CA", "pki_backup_keys", "True")
config.set("CA", "pki_backup_password", self.admin_password)
config.set("CA", "pki_profiles_in_ldap", "True")
config.set("CA", "pki_default_ocsp_uri",
"http://{}.{}/ca/ocsp".format(
ipalib.constants.IPA_CA_RECORD,
ipautil.format_netloc(api.env.domain)))
# Client security database
config.set("CA", "pki_client_database_dir", self.tmp_agent_db)
config.set("CA", "pki_client_database_password", self.tmp_agent_pwd)
config.set("CA", "pki_client_database_purge", "False")
config.set("CA", "pki_client_pkcs12_password", self.admin_password)
# Administrator
config.set("CA", "pki_admin_name", self.admin_user)
config.set("CA", "pki_admin_uid", self.admin_user)
config.set("CA", "pki_admin_email", "root@localhost")
config.set("CA", "pki_admin_password", self.admin_password)
config.set("CA", "pki_admin_nickname", "ipa-ca-agent")
config.set("CA", "pki_admin_subject_dn",
str(DN(('cn', 'ipa-ca-agent'), self.subject_base)))
config.set("CA", "pki_client_admin_cert_p12", paths.DOGTAG_ADMIN_P12)
# Directory server
config.set("CA", "pki_ds_ldap_port", "389")
config.set("CA", "pki_ds_password", self.dm_password)
config.set("CA", "pki_ds_base_dn", six.text_type(self.basedn))
config.set("CA", "pki_ds_database", "ipaca")
if self.use_ldaps:
self._use_ldaps_during_spawn(config)
# Certificate subject DN's
config.set("CA", "pki_subsystem_subject_dn",
str(DN(('cn', 'CA Subsystem'), self.subject_base)))
config.set("CA", "pki_ocsp_signing_subject_dn",
str(DN(('cn', 'OCSP Subsystem'), self.subject_base)))
config.set("CA", "pki_ssl_server_subject_dn",
str(DN(('cn', self.fqdn), self.subject_base)))
config.set("CA", "pki_audit_signing_subject_dn",
str(DN(('cn', 'CA Audit'), self.subject_base)))
config.set(
"CA", "pki_ca_signing_subject_dn",
str(self.ca_subject))
# Certificate nicknames
config.set("CA", "pki_subsystem_nickname", "subsystemCert cert-pki-ca")
config.set("CA", "pki_ocsp_signing_nickname", "ocspSigningCert cert-pki-ca")
config.set("CA", "pki_ssl_server_nickname", "Server-Cert cert-pki-ca")
config.set("CA", "pki_audit_signing_nickname", "auditSigningCert cert-pki-ca")
config.set("CA", "pki_ca_signing_nickname", "caSigningCert cert-pki-ca")
# CA key algorithm
config.set("CA", "pki_ca_signing_key_algorithm", self.ca_signing_algorithm)
if not (os.path.isdir(paths.PKI_TOMCAT_ALIAS_DIR) and
os.path.isfile(paths.PKI_TOMCAT_PASSWORD_CONF)):
# generate pin which we know can be used for FIPS NSS database
pki_pin = ipautil.ipa_generate_password()
config.set("CA", "pki_pin", pki_pin)
else:
pki_pin = None
if self.clone:
if self.no_db_setup:
config.set("CA", "pki_ds_create_new_db", "False")
config.set("CA", "pki_clone_setup_replication", "False")
config.set("CA", "pki_clone_reindex_data", "True")
cafile = self.pkcs12_info[0]
shutil.copy(cafile, paths.TMP_CA_P12)
pent = pwd.getpwnam(self.service_user)
os.chown(paths.TMP_CA_P12, pent.pw_uid, pent.pw_gid)
# Security domain registration
config.set("CA", "pki_security_domain_hostname", self.master_host)
config.set("CA", "pki_security_domain_https_port", "443")
config.set("CA", "pki_security_domain_user", self.admin_user)
config.set("CA", "pki_security_domain_password", self.admin_password)
# Clone
config.set("CA", "pki_clone", "True")
config.set("CA", "pki_clone_pkcs12_path", paths.TMP_CA_P12)
config.set("CA", "pki_clone_pkcs12_password", self.dm_password)
config.set("CA", "pki_clone_replication_security", "TLS")
config.set("CA", "pki_clone_replication_master_port", str(self.master_replication_port))
config.set("CA", "pki_clone_replication_clone_port", "389")
config.set("CA", "pki_clone_replicate_schema", "False")
config.set("CA", "pki_clone_uri", "https://%s" % ipautil.format_netloc(self.master_host, 443))
# External CA
if self.external == 1:
config.set("CA", "pki_external", "True")
config.set("CA", "pki_external_csr_path", self.csr_file)
if self.ca_type == 'ms-cs':
# Include MS template name extension in the CSR
config.set("CA", "pki_req_ext_add", "True")
config.set("CA", "pki_req_ext_oid", "1.3.6.1.4.1.311.20.2")
config.set("CA", "pki_req_ext_critical", "False")
config.set("CA", "pki_req_ext_data", "1E0A00530075006200430041")
elif self.external == 2:
cert_file = tempfile.NamedTemporaryFile()
with open(self.cert_file) as f:
x509.write_certificate(f.read(), cert_file.name)
cert_file.flush()
result = ipautil.run(
[paths.OPENSSL, 'crl2pkcs7',
'-certfile', self.cert_chain_file,
'-nocrl'],
capture_output=True)
cert_chain = result.output
# Dogtag chokes on the header and footer, remove them
# https://bugzilla.redhat.com/show_bug.cgi?id=1127838
cert_chain = re.search(
r'(?<=-----BEGIN PKCS7-----).*?(?=-----END PKCS7-----)',
cert_chain, re.DOTALL).group(0)
cert_chain_file = ipautil.write_tmp_file(cert_chain)
config.set("CA", "pki_external", "True")
config.set("CA", "pki_external_ca_cert_path", cert_file.name)
config.set("CA", "pki_external_ca_cert_chain_path", cert_chain_file.name)
config.set("CA", "pki_external_step_two", "True")
# Generate configuration file
with open(cfg_file, "w") as f:
config.write(f)
self.backup_state('installed', True)
try:
DogtagInstance.spawn_instance(
self, cfg_file,
nolog_list=(self.dm_password,
self.admin_password,
pki_pin,
self.tmp_agent_pwd)
)
finally:
os.remove(cfg_file)
if self.external == 1:
print("The next step is to get %s signed by your CA and re-run %s as:" % (self.csr_file, sys.argv[0]))
print("%s --external-cert-file=/path/to/signed_certificate --external-cert-file=/path/to/external_ca_certificate" % sys.argv[0])
sys.exit(0)
else:
shutil.move(paths.CA_BACKUP_KEYS_P12,
paths.CACERT_P12)
self.log.debug("completed creating ca instance")
def backup_config(self):
try:
backup_config()
except Exception as e:
root_logger.warning("Failed to backup CS.cfg: %s", e)
def create_certstore_passwdfile(self):
"""
This method creates a 'pwdfile.txt' file in the Dogtag certificate
store so that this file can be assumed and used for NSSDatabase/CertDB
operations in 'certutil' calls.
"""
passwd = None
token = 'internal'
with open(paths.PKI_TOMCAT_PASSWORD_CONF, 'r') as f:
for line in f:
(tok, pin) = line.split('=', 1)
if token == tok:
passwd = pin.strip()
break
else:
raise RuntimeError(
"The password to the 'internal' token of the Dogtag "
"certificate store was not found.")
db = certs.CertDB(self.realm, nssdir=paths.PKI_TOMCAT_ALIAS_DIR)
db.create_passwd_file(passwd)
def __update_topology(self):
ld = ldapupdate.LDAPUpdate(ldapi=True, sub_dict={
'SUFFIX': api.env.basedn,
'FQDN': self.fqdn,
})
ld.update([paths.CA_TOPOLOGY_ULDIF])
def __disable_nonce(self):
# Turn off Nonces
update_result = installutils.update_file(
paths.CA_CS_CFG_PATH, 'ca.enableNonces=true',
'ca.enableNonces=false')
if update_result != 0:
raise RuntimeError("Disabling nonces failed")
pent = pwd.getpwnam(self.service_user)
os.chown(paths.CA_CS_CFG_PATH, pent.pw_uid, pent.pw_gid)
def enable_pkix(self):
installutils.set_directive(paths.SYSCONFIG_PKI_TOMCAT,
'NSS_ENABLE_PKIX_VERIFY', '1',
quotes=False, separator='=')
def __import_ra_cert(self):
"""
Helper method for IPA domain level 0 replica install
"""
self.import_ra_cert(self.ra_p12, self.dm_password)
def import_ra_cert(self, rafile, password=''):
"""
Cloned RAs will use the same RA agent cert as the master so we
need to import from a PKCS#12 file.
Used when setting up replication
"""
with ipautil.write_tmp_file(password + '\n') as f:
pwdarg = 'file:{file}'.format(file=f.name)
# get the private key from the file
ipautil.run([paths.OPENSSL,
"pkcs12",
"-in", rafile,
"-nocerts", "-nodes",
"-out", paths.RA_AGENT_KEY,
"-passin", pwdarg])
# get the certificate from the pkcs12 file
ipautil.run([paths.OPENSSL,
"pkcs12",
"-in", rafile,
"-clcerts", "-nokeys",
"-out", paths.RA_AGENT_PEM,
"-passin", pwdarg])
self.__set_ra_cert_perms()
self.configure_agent_renewal()
def __import_ra_key(self):
custodia = custodiainstance.CustodiaInstance(host_name=self.fqdn,
realm=self.realm)
custodia.import_ra_key(self.master_host)
self.__set_ra_cert_perms()
self.configure_agent_renewal()
def __set_ra_cert_perms(self):
"""
Sets the correct permissions for the RA_AGENT_PEM, RA_AGENT_KEY files
"""
ipaapi_gid = grp.getgrnam(ipalib.constants.IPAAPI_GROUP).gr_gid
for fname in (paths.RA_AGENT_PEM, paths.RA_AGENT_KEY):
os.chown(fname, -1, ipaapi_gid)
os.chmod(fname, 0o440)
tasks.restore_context(fname)
def __create_ca_agent(self):
"""
Create CA agent, assign a certificate, and add the user to
the appropriate groups for accessing CA services.
"""
# get RA certificate
cert_data = self.ra_cert.public_bytes(serialization.Encoding.DER)
# connect to CA database
server_id = installutils.realm_to_serverid(api.env.realm)
dogtag_uri = 'ldapi://%%2fvar%%2frun%%2fslapd-%s.socket' % server_id
conn = ldap2.ldap2(api, ldap_uri=dogtag_uri)
conn.connect(autobind=True)
# create ipara user with RA certificate
user_dn = DN(('uid', "ipara"), ('ou', 'People'), self.basedn)
entry = conn.make_entry(
user_dn,
objectClass=['top', 'person', 'organizationalPerson',
'inetOrgPerson', 'cmsuser'],
uid=["ipara"],
sn=["ipara"],
cn=["ipara"],
usertype=["agentType"],
userstate=["1"],
userCertificate=[cert_data],
description=['2;%s;%s;%s' % (
self.ra_cert.serial_number,
DN(self.ca_subject),
DN(('CN', 'IPA RA'), self.subject_base))])
conn.add_entry(entry)
# add ipara user to Certificate Manager Agents group
group_dn = DN(('cn', 'Certificate Manager Agents'), ('ou', 'groups'),
self.basedn)
conn.add_entry_to_group(user_dn, group_dn, 'uniqueMember')
# add ipara user to Registration Manager Agents group
group_dn = DN(('cn', 'Registration Manager Agents'), ('ou', 'groups'),
self.basedn)
conn.add_entry_to_group(user_dn, group_dn, 'uniqueMember')
conn.disconnect()
def __get_ca_chain(self):
try:
return dogtag.get_ca_certchain(ca_host=self.fqdn)
except Exception as e:
raise RuntimeError("Unable to retrieve CA chain: %s" % str(e))
def __export_ca_chain(self):
"""
Get the CA chain from Dogtag NSS DB and write it to paths.IPA_CA_CRT
"""
# Getting Dogtag CA chain
chain = self.__get_ca_chain()
# Convert to DER because the chain comes back as one long string which
# makes openssl throw up.
data = base64.b64decode(chain)
# Get list of PEM certificates
certlist = x509.pkcs7_to_pems(data, x509.DER)
# We have all the certificates in certlist, write them to a PEM file
with open(paths.IPA_CA_CRT, 'w') as ipaca_pem:
for cert in certlist:
ipaca_pem.write(cert)
ipaca_pem.write('\n')
def __request_ra_certificate(self):
# create a temp file storing the pwd
agent_file = tempfile.NamedTemporaryFile(
mode="w", dir=paths.VAR_LIB_IPA, delete=False)
agent_file.write(self.tmp_agent_pwd)
agent_file.close()
# create a temp pem file storing the CA chain
chain_file = tempfile.NamedTemporaryFile(
mode="w", dir=paths.VAR_LIB_IPA, delete=False)
chain_file.close()
chain = self.__get_ca_chain()
data = base64.b64decode(chain)
ipautil.run(
[paths.OPENSSL,
"pkcs7",
"-inform",
"DER",
"-print_certs",
"-out", chain_file.name,
], stdin=data, capture_output=False)
agent_args = [paths.DOGTAG_IPA_CA_RENEW_AGENT_SUBMIT,
"--dbdir", self.tmp_agent_db,
"--nickname", "ipa-ca-agent",
"--cafile", chain_file.name,
"--ee-url", 'http://%s:8080/ca/ee/ca/' % self.fqdn,
"--agent-url",
'https://%s:8443/ca/agent/ca/' % self.fqdn,
"--sslpinfile", agent_file.name]
helper = " ".join(agent_args)
# configure certmonger renew agent to use temporary agent cert
old_helper = certmonger.modify_ca_helper(
ipalib.constants.RENEWAL_CA_NAME, helper)
try:
# The certificate must be requested using caServerCert profile
# because this profile does not require agent authentication
reqId = certmonger.request_and_wait_for_cert(
certpath=(paths.RA_AGENT_PEM, paths.RA_AGENT_KEY),
principal='host/%s' % self.fqdn,
subject=str(DN(('CN', 'IPA RA'), self.subject_base)),
ca=ipalib.constants.RENEWAL_CA_NAME,
profile='caServerCert',
pre_command='renew_ra_cert_pre',
post_command='renew_ra_cert',
storage="FILE")
self.__set_ra_cert_perms()
self.requestId = str(reqId)
self.ra_cert = x509.load_certificate_from_file(
paths.RA_AGENT_PEM)
finally:
# we can restore the helper parameters
certmonger.modify_ca_helper(
ipalib.constants.RENEWAL_CA_NAME, old_helper)
# remove the pwdfile
for f in (agent_file, chain_file):
try:
os.remove(f.name)
except OSError:
pass
def __setup_sign_profile(self):
# Tell the profile to automatically issue certs for RAs
installutils.set_directive(
paths.CAJARSIGNINGCERT_CFG, 'auth.instance_id', 'raCertAuth',
quotes=False, separator='=')
def prepare_crl_publish_dir(self):
"""
Prepare target directory for CRL publishing
Returns a path to the CRL publishing directory
"""
publishdir = paths.PKI_CA_PUBLISH_DIR
if not os.path.exists(publishdir):
os.mkdir(publishdir)
os.chmod(publishdir, 0o775)
pent = pwd.getpwnam(self.service_user)
os.chown(publishdir, 0, pent.pw_gid)
tasks.restore_context(publishdir)
return publishdir
def __enable_crl_publish(self):
"""
Enable file-based CRL publishing and disable LDAP publishing.
https://access.redhat.com/knowledge/docs/en-US/Red_Hat_Certificate_System/8.0/html/Admin_Guide/Setting_up_Publishing.html
"""
caconfig = paths.CA_CS_CFG_PATH
publishdir = self.prepare_crl_publish_dir()
# Enable file publishing, disable LDAP
installutils.set_directive(caconfig, 'ca.publish.enable', 'true', quotes=False, separator='=')
installutils.set_directive(caconfig, 'ca.publish.ldappublish.enable', 'false', quotes=False, separator='=')
# Create the file publisher, der only, not b64
installutils.set_directive(caconfig, 'ca.publish.publisher.impl.FileBasedPublisher.class','com.netscape.cms.publish.publishers.FileBasedPublisher', quotes=False, separator='=')
installutils.set_directive(caconfig, 'ca.publish.publisher.instance.FileBaseCRLPublisher.crlLinkExt', 'bin', quotes=False, separator='=')
installutils.set_directive(caconfig, 'ca.publish.publisher.instance.FileBaseCRLPublisher.directory', publishdir, quotes=False, separator='=')
installutils.set_directive(caconfig, 'ca.publish.publisher.instance.FileBaseCRLPublisher.latestCrlLink', 'true', quotes=False, separator='=')
installutils.set_directive(caconfig, 'ca.publish.publisher.instance.FileBaseCRLPublisher.pluginName', 'FileBasedPublisher', quotes=False, separator='=')
installutils.set_directive(caconfig, 'ca.publish.publisher.instance.FileBaseCRLPublisher.timeStamp', 'LocalTime', quotes=False, separator='=')
installutils.set_directive(caconfig, 'ca.publish.publisher.instance.FileBaseCRLPublisher.zipCRLs', 'false', quotes=False, separator='=')
installutils.set_directive(caconfig, 'ca.publish.publisher.instance.FileBaseCRLPublisher.zipLevel', '9', quotes=False, separator='=')
installutils.set_directive(caconfig, 'ca.publish.publisher.instance.FileBaseCRLPublisher.Filename.b64', 'false', quotes=False, separator='=')
installutils.set_directive(caconfig, 'ca.publish.publisher.instance.FileBaseCRLPublisher.Filename.der', 'true', quotes=False, separator='=')
# The publishing rule
installutils.set_directive(caconfig, 'ca.publish.rule.instance.FileCrlRule.enable', 'true', quotes=False, separator='=')
installutils.set_directive(caconfig, 'ca.publish.rule.instance.FileCrlRule.mapper', 'NoMap', quotes=False, separator='=')
installutils.set_directive(caconfig, 'ca.publish.rule.instance.FileCrlRule.pluginName', 'Rule', quotes=False, separator='=')
installutils.set_directive(caconfig, 'ca.publish.rule.instance.FileCrlRule.predicate=', '', quotes=False, separator='')
installutils.set_directive(caconfig, 'ca.publish.rule.instance.FileCrlRule.publisher', 'FileBaseCRLPublisher', quotes=False, separator='=')
installutils.set_directive(caconfig, 'ca.publish.rule.instance.FileCrlRule.type', 'crl', quotes=False, separator='=')
# Now disable LDAP publishing
installutils.set_directive(caconfig, 'ca.publish.rule.instance.LdapCaCertRule.enable', 'false', quotes=False, separator='=')
installutils.set_directive(caconfig, 'ca.publish.rule.instance.LdapCrlRule.enable', 'false', quotes=False, separator='=')
installutils.set_directive(caconfig, 'ca.publish.rule.instance.LdapUserCertRule.enable', 'false', quotes=False, separator='=')
installutils.set_directive(caconfig, 'ca.publish.rule.instance.LdapXCertRule.enable', 'false', quotes=False, separator='=')
# If we are the initial master then we are the CRL generator, otherwise
# we point to that master for CRLs.
if not self.clone:
# These next two are defaults, but I want to be explicit that the
# initial master is the CRL generator.
installutils.set_directive(caconfig, 'ca.crl.MasterCRL.enableCRLCache', 'true', quotes=False, separator='=')
installutils.set_directive(caconfig, 'ca.crl.MasterCRL.enableCRLUpdates', 'true', quotes=False, separator='=')
installutils.set_directive(caconfig, 'ca.listenToCloneModifications', 'true', quotes=False, separator='=')
else:
installutils.set_directive(caconfig, 'ca.crl.MasterCRL.enableCRLCache', 'false', quotes=False, separator='=')
installutils.set_directive(caconfig, 'ca.crl.MasterCRL.enableCRLUpdates', 'false', quotes=False, separator='=')
installutils.set_directive(caconfig, 'ca.listenToCloneModifications', 'false', quotes=False, separator='=')
def uninstall(self):
# just eat state
self.restore_state("enabled")
DogtagInstance.uninstall(self)
self.restore_state("installed")
# At one time we removed this user on uninstall. That can potentially
# orphan files, or worse, if another useradd runs in the interim,
# cause files to have a new owner.
self.restore_state("user_exists")
services.knownservices.messagebus.start()
cmonger = services.knownservices.certmonger
cmonger.start()
bus = dbus.SystemBus()
obj = bus.get_object('org.fedorahosted.certmonger',
'/org/fedorahosted/certmonger')
iface = dbus.Interface(obj, 'org.fedorahosted.certmonger')
path = iface.find_ca_by_nickname('dogtag-ipa-ca-renew-agent')
if path:
iface.remove_known_ca(path)
cmonger.stop()
# remove CRL files
self.log.info("Remove old CRL files")
try:
for f in get_crl_files():
self.log.debug("Remove %s", f)
installutils.remove_file(f)
except OSError as e:
self.log.warning("Error while removing old CRL files: %s", e)
# remove CRL directory
self.log.info("Remove CRL directory")
if os.path.exists(paths.PKI_CA_PUBLISH_DIR):
try:
shutil.rmtree(paths.PKI_CA_PUBLISH_DIR)
except OSError as e:
self.log.warning("Error while removing CRL publish "
"directory: %s", e)
def unconfigure_certmonger_renewal_guard(self):
if not self.is_configured():
return
helper = self.restore_state('certmonger_dogtag_helper')
if helper:
bus = dbus.SystemBus()
obj = bus.get_object('org.fedorahosted.certmonger',
'/org/fedorahosted/certmonger')
iface = dbus.Interface(obj, 'org.fedorahosted.certmonger')
path = iface.find_ca_by_nickname('dogtag-ipa-renew-agent')
if path:
ca_obj = bus.get_object('org.fedorahosted.certmonger', path)
ca_iface = dbus.Interface(ca_obj,
'org.freedesktop.DBus.Properties')
ca_iface.Set('org.fedorahosted.certmonger.ca',
'external-helper', helper)
def configure_agent_renewal(self):
try:
certmonger.start_tracking(
certpath=(paths.RA_AGENT_PEM, paths.RA_AGENT_KEY),
ca='dogtag-ipa-ca-renew-agent',
pre_command='renew_ra_cert_pre',
post_command='renew_ra_cert',
storage='FILE')
except RuntimeError as e:
self.log.error(
"certmonger failed to start tracking certificate: %s", e)
def stop_tracking_certificates(self):
"""Stop tracking our certificates. Called on uninstall.
"""
super(CAInstance, self).stop_tracking_certificates(False)
# stop tracking lightweight CA signing certs
for request_id in certmonger.get_requests_for_dir(self.nss_db):
nickname = certmonger.get_request_value(request_id, 'key-nickname')
if nickname.startswith('caSigningCert cert-pki-ca '):
certmonger.stop_tracking(self.nss_db, nickname=nickname)
try:
certmonger.stop_tracking(certfile=paths.RA_AGENT_PEM)
except RuntimeError as e:
root_logger.error(
"certmonger failed to stop tracking certificate: %s", e)
services.knownservices.certmonger.stop()
def set_audit_renewal(self):
"""
The default renewal time for the audit signing certificate is
six months rather than two years. Fix it. This is BZ 843979.
"""
# Check the default validity period of the audit signing cert
# and set it to 2 years if it is 6 months.
cert_range = installutils.get_directive(
paths.CASIGNEDLOGCERT_CFG,
'policyset.caLogSigningSet.2.default.params.range',
separator='='
)
self.log.debug(
'caSignedLogCert.cfg profile validity range is %s', cert_range)
if cert_range == "180":
installutils.set_directive(
paths.CASIGNEDLOGCERT_CFG,
'policyset.caLogSigningSet.2.default.params.range',
'720',
quotes=False,
separator='='
)
installutils.set_directive(
paths.CASIGNEDLOGCERT_CFG,
'policyset.caLogSigningSet.2.constraint.params.range',
'720',
quotes=False,
separator='='
)
self.log.debug(
'updated caSignedLogCert.cfg profile validity range to 720')
return True
return False
def is_renewal_master(self, fqdn=None):
if fqdn is None:
fqdn = api.env.host
dn = DN(('cn', 'CA'), ('cn', fqdn), ('cn', 'masters'), ('cn', 'ipa'),
('cn', 'etc'), api.env.basedn)
renewal_filter = '(ipaConfigString=caRenewalMaster)'
try:
api.Backend.ldap2.get_entries(base_dn=dn, filter=renewal_filter,
attrs_list=[])
except errors.NotFound:
return False
return True
def set_renewal_master(self, fqdn=None):
if fqdn is None:
fqdn = api.env.host
base_dn = DN(('cn', 'masters'), ('cn', 'ipa'), ('cn', 'etc'),
api.env.basedn)
filter = '(&(cn=CA)(ipaConfigString=caRenewalMaster))'
try:
entries = api.Backend.ldap2.get_entries(
base_dn=base_dn, filter=filter, attrs_list=['ipaConfigString'])
except errors.NotFound:
entries = []
dn = DN(('cn', 'CA'), ('cn', fqdn), base_dn)
master_entry = api.Backend.ldap2.get_entry(dn, ['ipaConfigString'])
for entry in entries:
if master_entry is not None and entry.dn == master_entry.dn:
master_entry = None
continue
entry['ipaConfigString'] = [x for x in entry['ipaConfigString']
if x.lower() != 'carenewalmaster']
api.Backend.ldap2.update_entry(entry)
if master_entry is not None:
master_entry['ipaConfigString'].append('caRenewalMaster')
api.Backend.ldap2.update_entry(master_entry)
@staticmethod
def update_cert_config(nickname, cert):
"""
When renewing a CA subsystem certificate the configuration file
needs to get the new certificate as well.
nickname is one of the known nicknames.
cert is a DER-encoded certificate.
"""
# The cert directive to update per nickname
directives = {'auditSigningCert cert-pki-ca': 'ca.audit_signing.cert',
'ocspSigningCert cert-pki-ca': 'ca.ocsp_signing.cert',
'caSigningCert cert-pki-ca': 'ca.signing.cert',
'subsystemCert cert-pki-ca': 'ca.subsystem.cert',
'Server-Cert cert-pki-ca': 'ca.sslserver.cert'}
try:
backup_config()
except Exception as e:
syslog.syslog(syslog.LOG_ERR, "Failed to backup CS.cfg: %s" % e)
if nickname in directives:
DogtagInstance.update_cert_cs_cfg(
directives[nickname], cert, paths.CA_CS_CFG_PATH)
def __create_ds_db(self):
'''
Create PKI database. Is needed when pkispawn option
pki_ds_create_new_db is set to False
'''
backend = 'ipaca'
suffix = DN(('o', 'ipaca'))
# replication
dn = DN(('cn', str(suffix)), ('cn', 'mapping tree'), ('cn', 'config'))
entry = api.Backend.ldap2.make_entry(
dn,
objectclass=["top", "extensibleObject", "nsMappingTree"],
cn=[suffix],
)
entry['nsslapd-state'] = ['Backend']
entry['nsslapd-backend'] = [backend]
api.Backend.ldap2.add_entry(entry)
# database
dn = DN(('cn', 'ipaca'), ('cn', 'ldbm database'), ('cn', 'plugins'),
('cn', 'config'))
entry = api.Backend.ldap2.make_entry(
dn,
objectclass=["top", "extensibleObject", "nsBackendInstance"],
cn=[backend],
)
entry['nsslapd-suffix'] = [suffix]
api.Backend.ldap2.add_entry(entry)
def __setup_replication(self):
repl = replication.CAReplicationManager(self.realm, self.fqdn)
repl.setup_cs_replication(self.master_host)
# Activate Topology for o=ipaca segments
self.__update_topology()
def __enable_instance(self):
basedn = ipautil.realm_to_suffix(self.realm)
if not self.clone:
config = ['caRenewalMaster']
else:
config = []
self.ldap_enable('CA', self.fqdn, None, basedn, config)
def setup_lightweight_ca_key_retrieval(self):
if sysupgrade.get_upgrade_state('dogtag', 'setup_lwca_key_retrieval'):
return
root_logger.info('[Set up lightweight CA key retrieval]')
self.__setup_lightweight_ca_key_retrieval_kerberos()
self.__setup_lightweight_ca_key_retrieval_custodia()
root_logger.info('Configuring key retriever')
directives = [
('features.authority.keyRetrieverClass',
'com.netscape.ca.ExternalProcessKeyRetriever'),
('features.authority.keyRetrieverConfig.executable',
'/usr/libexec/ipa/ipa-pki-retrieve-key'),
]
for k, v in directives:
installutils.set_directive(
paths.CA_CS_CFG_PATH, k, v, quotes=False, separator='=')
sysupgrade.set_upgrade_state('dogtag', 'setup_lwca_key_retieval', True)
def __setup_lightweight_ca_key_retrieval_kerberos(self):
pent = pwd.getpwnam(self.service_user)
root_logger.info('Creating principal')
installutils.kadmin_addprinc(self.principal)
self.suffix = ipautil.realm_to_suffix(self.realm)
self.move_service(self.principal)
root_logger.info('Retrieving keytab')
installutils.create_keytab(self.keytab, self.principal)
os.chmod(self.keytab, 0o600)
os.chown(self.keytab, pent.pw_uid, pent.pw_gid)
def __setup_lightweight_ca_key_retrieval_custodia(self):
pent = pwd.getpwnam(self.service_user)
root_logger.info('Creating Custodia keys')
custodia_basedn = DN(
('cn', 'custodia'), ('cn', 'ipa'), ('cn', 'etc'), api.env.basedn)
ensure_entry(
custodia_basedn,
objectclass=['top', 'nsContainer'],
cn=['custodia'],
)
ensure_entry(
DN(('cn', 'dogtag'), custodia_basedn),
objectclass=['top', 'nsContainer'],
cn=['dogtag'],
)
keyfile = os.path.join(paths.PKI_TOMCAT, self.service_prefix + '.keys')
keystore = IPAKEMKeys({'server_keys': keyfile})
keystore.generate_keys(self.service_prefix)
os.chmod(keyfile, 0o600)
os.chown(keyfile, pent.pw_uid, pent.pw_gid)
def __add_lightweight_ca_tracking_requests(self):
try:
lwcas = api.Backend.ldap2.get_entries(
base_dn=api.env.basedn,
filter='(objectclass=ipaca)',
attrs_list=['cn', 'ipacaid'],
)
add_lightweight_ca_tracking_requests(self.log, lwcas)
except errors.NotFound:
# shouldn't happen, but don't fail if it does
root_logger.warning(
"Did not find any lightweight CAs; nothing to track")
def __dogtag10_migration(self):
ld = ldapupdate.LDAPUpdate(ldapi=True, sub_dict={
'SUFFIX': api.env.basedn,
'FQDN': self.fqdn,
})
ld.update([os.path.join(paths.UPDATES_DIR,
'50-dogtag10-migration.update')]
)
def replica_ca_install_check(config, promote):
if promote:
return
# Check if the master has the necessary schema in its CA instance
ca_ldap_url = 'ldap://%s:%s' % (config.ca_host_name, config.ca_ds_port)
objectclass = 'ipaObject'
root_logger.debug('Checking if IPA schema is present in %s', ca_ldap_url)
try:
with ipaldap.LDAPClient(
ca_ldap_url,
start_tls=True,
cacert=config.dir + "/ca.crt",
force_schema_updates=False) as connection:
connection.simple_bind(bind_dn=ipaldap.DIRMAN_DN,
bind_password=config.dirman_password)
rschema = connection.schema
result = rschema.get_obj(ldap.schema.models.ObjectClass,
objectclass)
except Exception:
root_logger.critical(
'CA DS schema check failed. Make sure the PKI service on the '
'remote master is operational.')
raise
if result:
root_logger.debug('Check OK')
else:
root_logger.critical(
'The master CA directory server does not have necessary schema. '
'Please run copy-schema-to-ca.py on all CA masters.\n'
'If you are certain that this is a false positive, use '
'--skip-schema-check.')
exit('IPA schema missing on master CA directory server')
def backup_config():
"""
Create a backup copy of CS.cfg
"""
path = paths.CA_CS_CFG_PATH
if services.knownservices['pki_tomcatd'].is_running('pki-tomcat'):
raise RuntimeError(
"Dogtag must be stopped when creating backup of %s" % path)
shutil.copy(path, path + '.ipabkp')
def __update_entry_from_cert(make_filter, make_entry, dercert):
"""
Given a certificate and functions to make a filter based on the
cert, and make a new entry based on the cert, update database
accordingly.
``make_filter``
function that takes a certificate in DER format and
returns an LDAP search filter
``make_entry``
function that takes a certificate in DER format and an
LDAP entry, and returns the new state of the LDAP entry.
Return the input unchanged to skip an entry.
``dercert``
An X509.3 certificate in DER format
Logging is done via syslog.
Return ``True`` if all updates were successful (zero updates is
vacuously successful) otherwise ``False``.
"""
base_dn = DN(('o', 'ipaca'))
attempts = 0
server_id = installutils.realm_to_serverid(api.env.realm)
dogtag_uri = 'ldapi://%%2fvar%%2frun%%2fslapd-%s.socket' % server_id
updated = False
while attempts < 10:
conn = None
try:
conn = ldap2.ldap2(api, ldap_uri=dogtag_uri)
conn.connect(autobind=True)
db_filter = make_filter(dercert)
try:
entries = conn.get_entries(base_dn, conn.SCOPE_SUBTREE, db_filter)
except errors.NotFound:
entries = []
updated = True
for entry in entries:
syslog.syslog(
syslog.LOG_NOTICE, 'Updating entry %s' % str(entry.dn))
try:
entry = make_entry(dercert, entry)
conn.update_entry(entry)
except errors.EmptyModlist:
pass
except Exception as e:
syslog.syslog(
syslog.LOG_ERR,
'Updating entry %s failed: %s' % (str(entry.dn), e))
updated = False
break
except errors.NetworkError:
syslog.syslog(
syslog.LOG_ERR,
'Connection to %s failed, sleeping 30s' % dogtag_uri)
time.sleep(30)
attempts += 1
except Exception as e:
syslog.syslog(syslog.LOG_ERR, 'Caught unhandled exception: %s' % e)
break
finally:
if conn is not None and conn.isconnected():
conn.disconnect()
if not updated:
syslog.syslog(syslog.LOG_ERR, 'Update failed.')
return False
return True
def update_people_entry(dercert):
"""
Update the userCerticate for an entry in the dogtag ou=People. This
is needed when a certificate is renewed.
"""
def make_filter(dercert):
cert = x509.load_certificate(dercert, datatype=x509.DER)
subject = DN(cert.subject)
issuer = DN(cert.issuer)
return ldap2.ldap2.combine_filters(
[
ldap2.ldap2.make_filter({'objectClass': 'inetOrgPerson'}),
ldap2.ldap2.make_filter(
{'description': ';%s;%s' % (issuer, subject)},
exact=False, trailing_wildcard=False),
],
ldap2.ldap2.MATCH_ALL)
def make_entry(dercert, entry):
cert = x509.load_certificate(dercert, datatype=x509.DER)
serial_number = cert.serial
subject = DN(cert.subject)
issuer = DN(cert.issuer)
entry['usercertificate'].append(dercert)
entry['description'] = '2;%d;%s;%s' % (serial_number, issuer, subject)
return entry
return __update_entry_from_cert(make_filter, make_entry, dercert)
def update_authority_entry(dercert):
"""
Find the authority entry for the given cert, and update the
serial number to match the given cert.
"""
def make_filter(dercert):
cert = x509.load_certificate(dercert, datatype=x509.DER)
subject = str(DN(cert.subject))
return ldap2.ldap2.make_filter(
dict(objectclass='authority', authoritydn=subject),
rules=ldap2.ldap2.MATCH_ALL,
)
def make_entry(dercert, entry):
cert = x509.load_certificate(dercert, datatype=x509.DER)
entry['authoritySerial'] = cert.serial_number
return entry
return __update_entry_from_cert(make_filter, make_entry, dercert)
def ensure_ldap_profiles_container():
ensure_entry(
DN(('ou', 'certificateProfiles'), ('ou', 'ca'), ('o', 'ipaca')),
objectclass=['top', 'organizationalUnit'],
ou=['certificateProfiles'],
)
def ensure_lightweight_cas_container():
return ensure_entry(
DN(('ou', 'authorities'), ('ou', 'ca'), ('o', 'ipaca')),
objectclass=['top', 'organizationalUnit'],
ou=['authorities'],
)
def ensure_entry(dn, **attrs):
"""Ensure an entry exists.
If an entry with the given DN already exists, return ``False``,
otherwise add the entry and return ``True``.
"""
server_id = installutils.realm_to_serverid(api.env.realm)
dogtag_uri = 'ldapi://%%2fvar%%2frun%%2fslapd-%s.socket' % server_id
conn = ldap2.ldap2(api, ldap_uri=dogtag_uri)
if not conn.isconnected():
conn.connect(autobind=True)
try:
conn.get_entry(dn)
return False
except errors.NotFound:
# entry doesn't exist; add it
entry = conn.make_entry(dn, **attrs)
conn.add_entry(entry)
return True
finally:
conn.disconnect()
def configure_profiles_acl():
"""Allow the Certificate Manager Agents group to modify profiles."""
new_rules = [
'certServer.profile.configuration:read,modify:allow (read,modify) '
'group="Certificate Manager Agents":'
'Certificate Manager agents may modify (create/update/delete) and read profiles',
'certServer.ca.account:login,logout:allow (login,logout) '
'user="anybody":Anybody can login and logout',
]
return __add_acls(new_rules)
def configure_lightweight_ca_acls():
"""Allow Certificate Manager Agents to manage lightweight CAs."""
new_rules = [
'certServer.ca.authorities:list,read'
':allow (list,read) user="anybody"'
':Anybody may list and read lightweight authorities',
'certServer.ca.authorities:create,modify'
':allow (create,modify) group="Administrators"'
':Administrators may create and modify lightweight authorities',
'certServer.ca.authorities:delete'
':allow (delete) group="Administrators"'
':Administrators may delete lightweight authorities',
'certServer.ca.authorities:create,modify,delete'
':allow (create,modify,delete) group="Certificate Manager Agents"'
':Certificate Manager Agents may manage lightweight authorities',
]
return __add_acls(new_rules)
def __add_acls(new_rules):
"""Add the given Dogtag ACLs.
``new_rules``
Iterable of ACL rule values to add
Return ``True`` if any ACLs were added otherwise ``False``.
"""
updated = False
dn = DN(('cn', 'aclResources'), ('o', 'ipaca'))
conn = api.Backend.ldap2
entry = conn.get_entry(dn)
cur_rules = entry.get('resourceACLS', [])
add_rules = [rule for rule in new_rules if rule not in cur_rules]
if add_rules:
cur_rules.extend(add_rules)
conn.update_entry(entry)
updated = True
return updated
def __get_profile_config(profile_id):
sub_dict = dict(
DOMAIN=ipautil.format_netloc(api.env.domain),
IPA_CA_RECORD=ipalib.constants.IPA_CA_RECORD,
CRL_ISSUER='CN=Certificate Authority,o=ipaca',
SUBJECT_DN_O=dsinstance.DsInstance().find_subject_base(),
)
return ipautil.template_file(
'/usr/share/ipa/profiles/{}.cfg'.format(profile_id), sub_dict)
def import_included_profiles():
server_id = installutils.realm_to_serverid(api.env.realm)
dogtag_uri = 'ldapi://%%2fvar%%2frun%%2fslapd-%s.socket' % server_id
conn = ldap2.ldap2(api, ldap_uri=dogtag_uri)
if not conn.isconnected():
conn.connect(autobind=True)
ensure_entry(
DN(('cn', 'ca'), api.env.basedn),
objectclass=['top', 'nsContainer'],
cn=['ca'],
)
ensure_entry(
DN(api.env.container_certprofile, api.env.basedn),
objectclass=['top', 'nsContainer'],
cn=['certprofiles'],
)
api.Backend.ra_certprofile.override_port = 8443
for (profile_id, desc, store_issued) in dogtag.INCLUDED_PROFILES:
dn = DN(('cn', profile_id),
api.env.container_certprofile, api.env.basedn)
try:
conn.get_entry(dn)
continue # the profile is present
except errors.NotFound:
# profile not found; add it
entry = conn.make_entry(
dn,
objectclass=['ipacertprofile'],
cn=[profile_id],
description=[desc],
ipacertprofilestoreissued=['TRUE' if store_issued else 'FALSE'],
)
conn.add_entry(entry)
# Create the profile, replacing any existing profile of same name
profile_data = __get_profile_config(profile_id)
_create_dogtag_profile(profile_id, profile_data, overwrite=True)
root_logger.info("Imported profile '%s'", profile_id)
api.Backend.ra_certprofile.override_port = None
conn.disconnect()
def repair_profile_caIPAserviceCert():
"""
A regression caused replica installation to replace the FreeIPA
version of caIPAserviceCert with the version shipped by Dogtag.
This function detects and repairs occurrences of this problem.
"""
api.Backend.ra_certprofile.override_port = 8443
profile_id = 'caIPAserviceCert'
with api.Backend.ra_certprofile as profile_api:
try:
cur_config = profile_api.read_profile(profile_id).splitlines()
except errors.RemoteRetrieveError:
# no profile there to check/repair
api.Backend.ra_certprofile.override_port = None
return
indicators = [
"policyset.serverCertSet.1.default.params.name="
"CN=$request.req_subject_name.cn$, OU=pki-ipa, O=IPA ",
"policyset.serverCertSet.9.default.params.crlDistPointsPointName_0="
"https://ipa.example.com/ipa/crl/MasterCRL.bin",
]
need_repair = all(l in cur_config for l in indicators)
if need_repair:
root_logger.debug(
"Detected that profile '{}' has been replaced with "
"incorrect version; begin repair.".format(profile_id))
_create_dogtag_profile(
profile_id, __get_profile_config(profile_id), overwrite=True)
root_logger.debug("Repair of profile '{}' complete.".format(profile_id))
api.Backend.ra_certprofile.override_port = None
def migrate_profiles_to_ldap():
"""Migrate profiles from filesystem to LDAP.
This must be run *after* switching to the LDAPProfileSubsystem
and restarting the CA.
The profile might already exist, e.g. if a replica was already
upgraded, so this case is ignored.
"""
ensure_ldap_profiles_container()
api.Backend.ra_certprofile.override_port = 8443
with open(paths.CA_CS_CFG_PATH) as f:
cs_cfg = f.read()
match = re.search(r'^profile\.list=(\S*)', cs_cfg, re.MULTILINE)
profile_ids = match.group(1).split(',')
for profile_id in profile_ids:
match = re.search(
r'^profile\.{}\.config=(\S*)'.format(profile_id),
cs_cfg, re.MULTILINE
)
if match is None:
root_logger.info("No file for profile '%s'; skipping", profile_id)
continue
filename = match.group(1)
match = re.search(
r'^profile\.{}\.class_id=(\S*)'.format(profile_id),
cs_cfg, re.MULTILINE
)
if match is None:
root_logger.info("No class_id for profile '%s'; skipping", profile_id)
continue
class_id = match.group(1)
with open(filename) as f:
profile_data = f.read()
if profile_data[-1] != '\n':
profile_data += '\n'
profile_data += 'profileId={}\n'.format(profile_id)
profile_data += 'classId={}\n'.format(class_id)
# Import the profile, but do not replace it if it already exists.
# This prevents replicas from replacing IPA-managed profiles with
# Dogtag default profiles of same name.
#
_create_dogtag_profile(profile_id, profile_data, overwrite=False)
api.Backend.ra_certprofile.override_port = None
def _create_dogtag_profile(profile_id, profile_data, overwrite):
with api.Backend.ra_certprofile as profile_api:
# import the profile
try:
profile_api.create_profile(profile_data)
root_logger.info("Profile '%s' successfully migrated to LDAP",
profile_id)
except errors.RemoteRetrieveError as e:
root_logger.debug("Error migrating '{}': {}".format(
profile_id, e))
# profile already exists
if overwrite:
try:
profile_api.disable_profile(profile_id)
except errors.RemoteRetrieveError:
root_logger.debug(
"Failed to disable profile '%s' "
"(it is probably already disabled)")
profile_api.update_profile(profile_id, profile_data)
# enable the profile
try:
profile_api.enable_profile(profile_id)
except errors.RemoteRetrieveError:
root_logger.debug(
"Failed to enable profile '%s' "
"(it is probably already enabled)")
def ensure_ipa_authority_entry():
"""Add the IPA CA ipaCa object if missing.
This requires the "host authority" authority entry to have been
created, which Dogtag will do automatically upon startup, if the
ou=authorities,ou=ca,o=ipaca container exists. Therefore, the
``ensure_lightweight_cas_container`` function must be executed,
and Dogtag restarted, before executing this function.
"""
# find out authority id, issuer DN and subject DN of IPA CA
api.Backend.ra_lightweight_ca.override_port = 8443
with api.Backend.ra_lightweight_ca as lwca:
data = lwca.read_ca('host-authority')
attrs = dict(
ipacaid=data['id'],
ipacaissuerdn=data['issuerDN'],
ipacasubjectdn=data['dn'],
)
api.Backend.ra_lightweight_ca.override_port = None
ensure_entry(
DN(api.env.container_ca, api.env.basedn),
objectclass=['top', 'nsContainer'],
cn=['cas'],
)
ensure_entry(
DN(('cn', ipalib.constants.IPA_CA_CN), api.env.container_ca, api.env.basedn),
objectclass=['top', 'ipaca'],
cn=[ipalib.constants.IPA_CA_CN],
description=['IPA CA'],
**attrs
)
def ensure_default_caacl():
"""Add the default CA ACL if missing."""
ensure_entry(
DN(('cn', 'ca'), api.env.basedn),
objectclass=['top', 'nsContainer'],
cn=['ca'],
)
ensure_entry(
DN(api.env.container_caacl, api.env.basedn),
objectclass=['top', 'nsContainer'],
cn=['certprofiles'],
)
if not api.Command.caacl_find()['result']:
api.Command.caacl_add(u'hosts_services_caIPAserviceCert',
hostcategory=u'all', servicecategory=u'all')
api.Command.caacl_add_profile(u'hosts_services_caIPAserviceCert',
certprofile=(u'caIPAserviceCert',))
def add_lightweight_ca_tracking_requests(logger, lwcas):
"""Add tracking requests for the given lightweight CAs.
The entries must have the 'cn' and 'ipacaid' attributes.
The IPA CA, if present, is skipped.
"""
for entry in lwcas:
if ipalib.constants.IPA_CA_CN in entry['cn']:
continue
nickname = "{} {}".format(
ipalib.constants.IPA_CA_NICKNAME,
entry['ipacaid'][0])
criteria = {
'cert-database': paths.PKI_TOMCAT_ALIAS_DIR,
'cert-nickname': nickname,
'ca-name': ipalib.constants.RENEWAL_CA_NAME,
}
request_id = certmonger.get_request_id(criteria)
if request_id is None:
try:
certmonger.start_tracking(
certpath=paths.PKI_TOMCAT_ALIAS_DIR,
pin=certmonger.get_pin('internal'),
nickname=nickname,
ca=ipalib.constants.RENEWAL_CA_NAME,
pre_command='stop_pkicad',
post_command='renew_ca_cert "%s"' % nickname,
)
request_id = certmonger.get_request_id(criteria)
certmonger.modify(request_id, profile='ipaCACertRenewal')
logger.debug(
'Lightweight CA renewal: '
'added tracking request for "%s"', nickname)
except RuntimeError as e:
logger.error(
'Lightweight CA renewal: Certmonger failed to '
'start tracking certificate: %s', e)
else:
logger.debug(
'Lightweight CA renewal: '
'already tracking certificate "%s"', nickname)
def update_ipa_conf():
"""
Update IPA configuration file to ensure that RA plugins are enabled and
that CA host points to localhost
"""
parser = RawConfigParser()
parser.read(paths.IPA_DEFAULT_CONF)
parser.set('global', 'enable_ra', 'True')
parser.set('global', 'ra_plugin', 'dogtag')
parser.set('global', 'dogtag_version', '10')
parser.remove_option('global', 'ca_host')
with open(paths.IPA_DEFAULT_CONF, 'w') as f:
parser.write(f)
if __name__ == "__main__":
standard_logging_setup("install.log")
ds = dsinstance.DsInstance()
ca = CAInstance("EXAMPLE.COM")
ca.configure_instance("catest.example.com", "password", "password")
| gpl-3.0 |
akhilari7/pa-dude | lib/python2.7/site-packages/setuptools/command/build_ext.py | 314 | 11964 | from distutils.command.build_ext import build_ext as _du_build_ext
from distutils.file_util import copy_file
from distutils.ccompiler import new_compiler
from distutils.sysconfig import customize_compiler
from distutils.errors import DistutilsError
from distutils import log
import os
import sys
import itertools
from setuptools.extension import Library
try:
# Attempt to use Cython for building extensions, if available
from Cython.Distutils.build_ext import build_ext as _build_ext
except ImportError:
_build_ext = _du_build_ext
try:
# Python 2.7 or >=3.2
from sysconfig import _CONFIG_VARS
except ImportError:
from distutils.sysconfig import get_config_var
get_config_var("LDSHARED") # make sure _config_vars is initialized
del get_config_var
from distutils.sysconfig import _config_vars as _CONFIG_VARS
have_rtld = False
use_stubs = False
libtype = 'shared'
if sys.platform == "darwin":
use_stubs = True
elif os.name != 'nt':
try:
import dl
use_stubs = have_rtld = hasattr(dl, 'RTLD_NOW')
except ImportError:
pass
if_dl = lambda s: s if have_rtld else ''
class build_ext(_build_ext):
def run(self):
"""Build extensions in build directory, then copy if --inplace"""
old_inplace, self.inplace = self.inplace, 0
_build_ext.run(self)
self.inplace = old_inplace
if old_inplace:
self.copy_extensions_to_source()
def copy_extensions_to_source(self):
build_py = self.get_finalized_command('build_py')
for ext in self.extensions:
fullname = self.get_ext_fullname(ext.name)
filename = self.get_ext_filename(fullname)
modpath = fullname.split('.')
package = '.'.join(modpath[:-1])
package_dir = build_py.get_package_dir(package)
dest_filename = os.path.join(package_dir,
os.path.basename(filename))
src_filename = os.path.join(self.build_lib, filename)
# Always copy, even if source is older than destination, to ensure
# that the right extensions for the current Python/platform are
# used.
copy_file(
src_filename, dest_filename, verbose=self.verbose,
dry_run=self.dry_run
)
if ext._needs_stub:
self.write_stub(package_dir or os.curdir, ext, True)
def get_ext_filename(self, fullname):
filename = _build_ext.get_ext_filename(self, fullname)
if fullname in self.ext_map:
ext = self.ext_map[fullname]
if isinstance(ext, Library):
fn, ext = os.path.splitext(filename)
return self.shlib_compiler.library_filename(fn, libtype)
elif use_stubs and ext._links_to_dynamic:
d, fn = os.path.split(filename)
return os.path.join(d, 'dl-' + fn)
return filename
def initialize_options(self):
_build_ext.initialize_options(self)
self.shlib_compiler = None
self.shlibs = []
self.ext_map = {}
def finalize_options(self):
_build_ext.finalize_options(self)
self.extensions = self.extensions or []
self.check_extensions_list(self.extensions)
self.shlibs = [ext for ext in self.extensions
if isinstance(ext, Library)]
if self.shlibs:
self.setup_shlib_compiler()
for ext in self.extensions:
ext._full_name = self.get_ext_fullname(ext.name)
for ext in self.extensions:
fullname = ext._full_name
self.ext_map[fullname] = ext
# distutils 3.1 will also ask for module names
# XXX what to do with conflicts?
self.ext_map[fullname.split('.')[-1]] = ext
ltd = self.shlibs and self.links_to_dynamic(ext) or False
ns = ltd and use_stubs and not isinstance(ext, Library)
ext._links_to_dynamic = ltd
ext._needs_stub = ns
filename = ext._file_name = self.get_ext_filename(fullname)
libdir = os.path.dirname(os.path.join(self.build_lib, filename))
if ltd and libdir not in ext.library_dirs:
ext.library_dirs.append(libdir)
if ltd and use_stubs and os.curdir not in ext.runtime_library_dirs:
ext.runtime_library_dirs.append(os.curdir)
def setup_shlib_compiler(self):
compiler = self.shlib_compiler = new_compiler(
compiler=self.compiler, dry_run=self.dry_run, force=self.force
)
if sys.platform == "darwin":
tmp = _CONFIG_VARS.copy()
try:
# XXX Help! I don't have any idea whether these are right...
_CONFIG_VARS['LDSHARED'] = (
"gcc -Wl,-x -dynamiclib -undefined dynamic_lookup")
_CONFIG_VARS['CCSHARED'] = " -dynamiclib"
_CONFIG_VARS['SO'] = ".dylib"
customize_compiler(compiler)
finally:
_CONFIG_VARS.clear()
_CONFIG_VARS.update(tmp)
else:
customize_compiler(compiler)
if self.include_dirs is not None:
compiler.set_include_dirs(self.include_dirs)
if self.define is not None:
# 'define' option is a list of (name,value) tuples
for (name, value) in self.define:
compiler.define_macro(name, value)
if self.undef is not None:
for macro in self.undef:
compiler.undefine_macro(macro)
if self.libraries is not None:
compiler.set_libraries(self.libraries)
if self.library_dirs is not None:
compiler.set_library_dirs(self.library_dirs)
if self.rpath is not None:
compiler.set_runtime_library_dirs(self.rpath)
if self.link_objects is not None:
compiler.set_link_objects(self.link_objects)
# hack so distutils' build_extension() builds a library instead
compiler.link_shared_object = link_shared_object.__get__(compiler)
def get_export_symbols(self, ext):
if isinstance(ext, Library):
return ext.export_symbols
return _build_ext.get_export_symbols(self, ext)
def build_extension(self, ext):
ext._convert_pyx_sources_to_lang()
_compiler = self.compiler
try:
if isinstance(ext, Library):
self.compiler = self.shlib_compiler
_build_ext.build_extension(self, ext)
if ext._needs_stub:
cmd = self.get_finalized_command('build_py').build_lib
self.write_stub(cmd, ext)
finally:
self.compiler = _compiler
def links_to_dynamic(self, ext):
"""Return true if 'ext' links to a dynamic lib in the same package"""
# XXX this should check to ensure the lib is actually being built
# XXX as dynamic, and not just using a locally-found version or a
# XXX static-compiled version
libnames = dict.fromkeys([lib._full_name for lib in self.shlibs])
pkg = '.'.join(ext._full_name.split('.')[:-1] + [''])
return any(pkg + libname in libnames for libname in ext.libraries)
def get_outputs(self):
return _build_ext.get_outputs(self) + self.__get_stubs_outputs()
def __get_stubs_outputs(self):
# assemble the base name for each extension that needs a stub
ns_ext_bases = (
os.path.join(self.build_lib, *ext._full_name.split('.'))
for ext in self.extensions
if ext._needs_stub
)
# pair each base with the extension
pairs = itertools.product(ns_ext_bases, self.__get_output_extensions())
return list(base + fnext for base, fnext in pairs)
def __get_output_extensions(self):
yield '.py'
yield '.pyc'
if self.get_finalized_command('build_py').optimize:
yield '.pyo'
def write_stub(self, output_dir, ext, compile=False):
log.info("writing stub loader for %s to %s", ext._full_name,
output_dir)
stub_file = (os.path.join(output_dir, *ext._full_name.split('.')) +
'.py')
if compile and os.path.exists(stub_file):
raise DistutilsError(stub_file + " already exists! Please delete.")
if not self.dry_run:
f = open(stub_file, 'w')
f.write(
'\n'.join([
"def __bootstrap__():",
" global __bootstrap__, __file__, __loader__",
" import sys, os, pkg_resources, imp" + if_dl(", dl"),
" __file__ = pkg_resources.resource_filename"
"(__name__,%r)"
% os.path.basename(ext._file_name),
" del __bootstrap__",
" if '__loader__' in globals():",
" del __loader__",
if_dl(" old_flags = sys.getdlopenflags()"),
" old_dir = os.getcwd()",
" try:",
" os.chdir(os.path.dirname(__file__))",
if_dl(" sys.setdlopenflags(dl.RTLD_NOW)"),
" imp.load_dynamic(__name__,__file__)",
" finally:",
if_dl(" sys.setdlopenflags(old_flags)"),
" os.chdir(old_dir)",
"__bootstrap__()",
"" # terminal \n
])
)
f.close()
if compile:
from distutils.util import byte_compile
byte_compile([stub_file], optimize=0,
force=True, dry_run=self.dry_run)
optimize = self.get_finalized_command('install_lib').optimize
if optimize > 0:
byte_compile([stub_file], optimize=optimize,
force=True, dry_run=self.dry_run)
if os.path.exists(stub_file) and not self.dry_run:
os.unlink(stub_file)
if use_stubs or os.name == 'nt':
# Build shared libraries
#
def link_shared_object(
self, objects, output_libname, output_dir=None, libraries=None,
library_dirs=None, runtime_library_dirs=None, export_symbols=None,
debug=0, extra_preargs=None, extra_postargs=None, build_temp=None,
target_lang=None):
self.link(
self.SHARED_LIBRARY, objects, output_libname,
output_dir, libraries, library_dirs, runtime_library_dirs,
export_symbols, debug, extra_preargs, extra_postargs,
build_temp, target_lang
)
else:
# Build static libraries everywhere else
libtype = 'static'
def link_shared_object(
self, objects, output_libname, output_dir=None, libraries=None,
library_dirs=None, runtime_library_dirs=None, export_symbols=None,
debug=0, extra_preargs=None, extra_postargs=None, build_temp=None,
target_lang=None):
# XXX we need to either disallow these attrs on Library instances,
# or warn/abort here if set, or something...
# libraries=None, library_dirs=None, runtime_library_dirs=None,
# export_symbols=None, extra_preargs=None, extra_postargs=None,
# build_temp=None
assert output_dir is None # distutils build_ext doesn't pass this
output_dir, filename = os.path.split(output_libname)
basename, ext = os.path.splitext(filename)
if self.library_filename("x").startswith('lib'):
# strip 'lib' prefix; this is kludgy if some platform uses
# a different prefix
basename = basename[3:]
self.create_static_lib(
objects, basename, output_dir, debug, target_lang
)
| mit |
psychopy/versions | psychopy/event.py | 1 | 47437 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""To handle input from keyboard, mouse and joystick (joysticks require
pygame to be installed).
See demo_mouse.py and i{demo_joystick.py} for examples
"""
# Part of the PsychoPy library
# Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2020 Open Science Tools Ltd.
# Distributed under the terms of the GNU General Public License (GPL).
# 01/2011 modified by Dave Britton to get mouse event timing
from __future__ import absolute_import, division, print_function
from past.builtins import basestring
from builtins import str
from builtins import object
import sys
import string
import copy
import numpy
from collections import namedtuple, OrderedDict, MutableMapping
from psychopy.preferences import prefs
# try to import pyglet & pygame and hope the user has at least one of them!
try:
from pygame import mouse, locals, joystick, display
import pygame.key
import pygame.event as evt
havePygame = True
except ImportError:
havePygame = False
try:
import pyglet
havePyglet = True
except ImportError:
havePyglet = False
try:
import glfw
haveGLFW = True
except ImportError:
haveGLFW = False
if havePygame:
usePygame = True # will become false later if win not initialised
else:
usePygame = False
if haveGLFW:
useGLFW = True
else:
useGLFW = False
if havePyglet:
# get the default display
if pyglet.version < '1.4':
_default_display_ = pyglet.window.get_platform().get_default_display()
else:
_default_display_ = pyglet.canvas.get_display()
import psychopy.core
from psychopy.tools.monitorunittools import cm2pix, deg2pix, pix2cm, pix2deg
from psychopy import logging
from psychopy.constants import NOT_STARTED
if havePyglet or haveGLFW:
# importing from mouse takes ~250ms, so do it now
if havePyglet:
from pyglet.window.mouse import LEFT, MIDDLE, RIGHT
from pyglet.window.key import (
MOD_SHIFT,
MOD_CTRL,
MOD_ALT,
MOD_CAPSLOCK,
MOD_NUMLOCK,
MOD_WINDOWS,
MOD_COMMAND,
MOD_OPTION,
MOD_SCROLLLOCK
)
_keyBuffer = []
mouseButtons = [0, 0, 0]
mouseWheelRel = numpy.array([0.0, 0.0])
# list of 3 clocks that are reset on mouse button presses
mouseClick = [psychopy.core.Clock(), psychopy.core.Clock(),
psychopy.core.Clock()]
# container for time elapsed from last reset of mouseClick[n] for any
# button pressed
mouseTimes = [0.0, 0.0, 0.0]
# clock for tracking time of mouse movement, reset when mouse is moved,
# reset on mouse motion:
mouseMove = psychopy.core.Clock()
# global eventThread
# eventThread = _EventDispatchThread()
# eventThread.start()
if haveGLFW:
# GLFW keycodes for special characters
_glfw_keycodes_ = {
glfw.KEY_SPACE: 'space',
glfw.KEY_ESCAPE: 'esc',
glfw.KEY_ENTER: 'return',
glfw.KEY_TAB: 'tab',
glfw.KEY_BACKSPACE: 'backspace',
glfw.KEY_INSERT: 'insert',
glfw.KEY_DELETE: 'delete',
glfw.KEY_RIGHT: 'right',
glfw.KEY_LEFT: 'left',
glfw.KEY_DOWN: 'down',
glfw.KEY_UP: 'up',
glfw.KEY_PAGE_UP: 'pageup',
glfw.KEY_PAGE_DOWN: 'pagedn',
glfw.KEY_HOME: 'home',
glfw.KEY_END: 'end',
glfw.KEY_CAPS_LOCK: 'capslock',
glfw.KEY_SCROLL_LOCK: 'scrolllock',
glfw.KEY_NUM_LOCK: 'numlock',
glfw.KEY_PRINT_SCREEN: 'printscreen',
glfw.KEY_PAUSE: 'pause',
glfw.KEY_F1: 'f1',
glfw.KEY_F2: 'f2',
glfw.KEY_F3: 'f3',
glfw.KEY_F4: 'f4',
glfw.KEY_F5: 'f5',
glfw.KEY_F6: 'f6',
glfw.KEY_F7: 'f7',
glfw.KEY_F8: 'f8',
glfw.KEY_F9: 'f9',
glfw.KEY_F10: 'f10',
glfw.KEY_F11: 'f11',
glfw.KEY_F12: 'f12',
glfw.KEY_F13: 'f13',
glfw.KEY_F14: 'f14',
glfw.KEY_F15: 'f15',
glfw.KEY_F16: 'f16',
glfw.KEY_F17: 'f17',
glfw.KEY_F18: 'f18',
glfw.KEY_F19: 'f19',
glfw.KEY_F20: 'f20',
glfw.KEY_F21: 'f21',
glfw.KEY_F22: 'f22',
glfw.KEY_F23: 'f23',
glfw.KEY_F24: 'f24',
glfw.KEY_F25: 'f25',
}
useText = False # By default _onPygletText is not used
def _onPygletText(text, emulated=False):
"""handler for on_text pyglet events, or call directly to emulate a text
event.
S Mathot 2012: This function only acts when the key that is pressed
corresponds to a non-ASCII text character (Greek, Arabic, Hebrew, etc.).
In that case the symbol that is passed to _onPygletKey() is translated
into a useless 'user_key()' string. If this happens, _onPygletText takes
over the role of capturing the key. Unfortunately, _onPygletText()
cannot solely handle all input, because it does not respond to spacebar
presses, etc.
"""
global useText
if not useText: # _onPygletKey has handled the input
return
# This is needed because sometimes the execution
# sequence is messed up (somehow)
useText = False
# capture when the key was pressed:
keyTime = psychopy.core.getTime()
if emulated:
keySource = 'EmulatedKey'
else:
keySource = 'KeyPress'
_keyBuffer.append((text.lower(), lastModifiers, keyTime))
logging.data("%s: %s" % (keySource, text))
def _onPygletKey(symbol, modifiers, emulated=False):
"""handler for on_key_press pyglet events; call directly to emulate a
key press
Appends a tuple with (keyname, timepressed) into the global _keyBuffer.
The _keyBuffer can then be accessed as normal using event.getKeys(),
.waitKeys(), clearBuffer(), etc.
J Gray 2012: Emulated means add a key (symbol) to the buffer virtually.
This is useful for fMRI_launchScan, and for unit testing (in testTheApp)
Logging distinguishes EmulatedKey events from real Keypress events.
For emulation, the key added to the buffer is unicode(symbol), instead of
pyglet.window.key.symbol_string(symbol).
S Mathot 2012: Implement fallback to _onPygletText
5AM Solutions 2016: Add the keyboard modifier flags to the key buffer.
M Cutone 2018: Added GLFW backend support.
"""
global useText, lastModifiers
keyTime = psychopy.core.getTime() # capture when the key was pressed
if emulated:
if not isinstance(modifiers, int):
msg = 'Modifiers must be passed as an integer value.'
raise ValueError(msg)
thisKey = str(symbol)
keySource = 'EmulatedKey'
else:
thisKey = pyglet.window.key.symbol_string(
symbol).lower() # convert symbol into key string
# convert pyglet symbols to pygame forms ( '_1'='1', 'NUM_1'='[1]')
# 'user_key' indicates that Pyglet has been unable to make sense
# out of the keypress. In that case, we fall back to _onPygletText
# to handle the input.
if 'user_key' in thisKey:
useText = True
lastModifiers = modifiers
return
useText = False
thisKey = thisKey.lstrip('_').lstrip('NUM_')
# Pyglet 1.3.0 returns 'enter' when Return key (0xFF0D) is pressed
# in Windows Python3. So we have to replace 'enter' with 'return'.
if thisKey == 'enter':
thisKey = 'return'
keySource = 'Keypress'
_keyBuffer.append((thisKey, modifiers, keyTime)) # tuple
logging.data("%s: %s" % (keySource, thisKey))
_process_global_event_key(thisKey, modifiers)
def _process_global_event_key(key, modifiers):
if modifiers == 0:
modifier_keys = ()
else:
modifier_keys = ['%s' % m.strip('MOD_').lower() for m in
(pyglet.window.key.modifiers_string(modifiers)
.split('|'))]
# Ignore Num Lock.
if 'numlock' in modifier_keys:
modifier_keys.remove('numlock')
index_key = globalKeys._gen_index_key((key, modifier_keys))
if index_key in globalKeys:
event = globalKeys[index_key]
logging.exp('Global key event: %s. Calling %s.'
% (event.name, event.func))
r = event.func(*event.func_args, **event.func_kwargs)
return r
def _onPygletMousePress(x, y, button, modifiers, emulated=False):
"""button left=1, middle=2, right=4;
specify multiple buttons with | operator
"""
global mouseButtons, mouseClick, mouseTimes
now = psychopy.clock.getTime()
if emulated:
label = 'Emulated'
else:
label = ''
if button & LEFT:
mouseButtons[0] = 1
mouseTimes[0] = now - mouseClick[0].getLastResetTime()
label += ' Left'
if button & MIDDLE:
mouseButtons[1] = 1
mouseTimes[1] = now - mouseClick[1].getLastResetTime()
label += ' Middle'
if button & RIGHT:
mouseButtons[2] = 1
mouseTimes[2] = now - mouseClick[2].getLastResetTime()
label += ' Right'
logging.data("Mouse: %s button down, pos=(%i,%i)" % (label.strip(), x, y))
def _onPygletMouseRelease(x, y, button, modifiers, emulated=False):
global mouseButtons
if emulated:
label = 'Emulated'
else:
label = ''
if button & LEFT:
mouseButtons[0] = 0
label += ' Left'
if button & MIDDLE:
mouseButtons[1] = 0
label += ' Middle'
if button & RIGHT:
mouseButtons[2] = 0
label += ' Right'
logging.data("Mouse: %s button up, pos=(%i,%i)" % (label, x, y))
def _onPygletMouseWheel(x, y, scroll_x, scroll_y):
global mouseWheelRel
mouseWheelRel = mouseWheelRel + numpy.array([scroll_x, scroll_y])
msg = "Mouse: wheel shift=(%i,%i), pos=(%i,%i)"
logging.data(msg % (scroll_x, scroll_y, x, y))
# will this work? how are pyglet event handlers defined?
def _onPygletMouseMotion(x, y, dx, dy):
global mouseMove
# mouseMove is a core.Clock() that is reset when the mouse moves
# default is None, but start and stopMoveClock() create and remove it,
# mouseMove.reset() resets it by hand
if mouseMove:
mouseMove.reset()
def startMoveClock():
global mouseMove
mouseMove = psychopy.core.Clock()
def stopMoveClock():
global mouseMove
mouseMove = None
def resetMoveClock():
global mouseMove
if mouseMove:
mouseMove.reset()
else:
startMoveClock()
# class Keyboard:
# """The keyboard class is currently just a helper class to allow common
# attributes with other objects (like mouse and stimuli). In particular
# it allows storage of the .status property (NOT_STARTED, STARTED, STOPPED).
# It isn't really needed for most users - the functions it supports (e.g.
# getKeys()) are directly callable from the event module.
# Note that multiple Keyboard instances will not keep separate buffers.
# """
# def __init__(self):
# self.status=NOT_STARTED
# def getKeys(keyList=None, timeStamped=False):
# return getKeys(keyList=keyList, timeStamped=timeStamped)
# def waitKeys(maxWait = None, keyList=None):
# return def waitKeys(maxWait = maxWait, keyList=keyList)
def modifiers_dict(modifiers):
"""Return dict where the key is a keyboard modifier flag
and the value is the boolean state of that flag.
"""
return {(mod[4:].lower()): modifiers & getattr(sys.modules[__name__], mod) > 0 for mod in [
'MOD_SHIFT',
'MOD_CTRL',
'MOD_ALT',
'MOD_CAPSLOCK',
'MOD_NUMLOCK',
'MOD_WINDOWS',
'MOD_COMMAND',
'MOD_OPTION',
'MOD_SCROLLLOCK'
]}
def getKeys(keyList=None, modifiers=False, timeStamped=False):
"""Returns a list of keys that were pressed.
:Parameters:
keyList : **None** or []
Allows the user to specify a set of keys to check for.
Only keypresses from this set of keys will be removed from
the keyboard buffer. If the keyList is `None`, all keys will be
checked and the key buffer will be cleared completely.
NB, pygame doesn't return timestamps (they are always 0)
modifiers : **False** or True
If True will return a list of tuples instead of a list of
keynames. Each tuple has (keyname, modifiers). The modifiers
are a dict of keyboard modifier flags keyed by the modifier
name (eg. 'shift', 'ctrl').
timeStamped : **False**, True, or `Clock`
If True will return a list of tuples instead of a list of
keynames. Each tuple has (keyname, time). If a `core.Clock`
is given then the time will be relative to the `Clock`'s last
reset.
:Author:
- 2003 written by Jon Peirce
- 2009 keyList functionality added by Gary Strangman
- 2009 timeStamped code provided by Dave Britton
- 2016 modifiers code provided by 5AM Solutions
"""
keys = []
if havePygame and display.get_init():
# see if pygame has anything instead (if it exists)
windowSystem = 'pygame'
for evts in evt.get(locals.KEYDOWN):
# pygame has no keytimes
keys.append((pygame.key.name(evts.key), 0))
elif havePyglet:
# for each (pyglet) window, dispatch its events before checking event
# buffer
windowSystem = 'pyglet'
for win in _default_display_.get_windows():
try:
win.dispatch_events() # pump events on pyglet windows
except ValueError as e: # pragma: no cover
# Pressing special keys, such as 'volume-up', results in a
# ValueError. This appears to be a bug in pyglet, and may be
# specific to certain systems and versions of Python.
logging.error(u'Failed to handle keypress')
global _keyBuffer
if len(_keyBuffer) > 0:
# then pyglet is running - just use this
keys = _keyBuffer
# _keyBuffer = [] # DO /NOT/ CLEAR THE KEY BUFFER ENTIRELY
elif haveGLFW:
windowSystem = 'glfw'
# 'poll_events' is called when a window is flipped, all the callbacks
# populate the buffer
if len(_keyBuffer) > 0:
keys = _keyBuffer
if keyList is None:
_keyBuffer = [] # clear buffer entirely
targets = keys # equivalent behavior to getKeys()
else:
nontargets = []
targets = []
# split keys into keepers and pass-thrus
for key in keys:
if key[0] in keyList:
targets.append(key)
else:
nontargets.append(key)
_keyBuffer = nontargets # save these
# now we have a list of tuples called targets
# did the user want timestamped tuples or keynames?
if modifiers == False and timeStamped == False:
keyNames = [k[0] for k in targets]
return keyNames
elif timeStamped == False:
keyNames = [(k[0], modifiers_dict(k[1])) for k in targets]
return keyNames
elif timeStamped and windowSystem=='pygame':
# provide a warning and set timestamps to be None
logging.warning('Pygame keyboard events do not support timestamped=True')
relTuple = [[_f for _f in (k[0], modifiers and modifiers_dict(k[1]) or None, None) if _f] for k in targets]
return relTuple
elif hasattr(timeStamped, 'getLastResetTime'):
# keys were originally time-stamped with
# core.monotonicClock._lastResetTime
# we need to shift that by the difference between it and
# our custom clock
_last = timeStamped.getLastResetTime()
_clockLast = psychopy.core.monotonicClock.getLastResetTime()
timeBaseDiff = _last - _clockLast
relTuple = [[_f for _f in (k[0], modifiers and modifiers_dict(k[1]) or None, k[-1] - timeBaseDiff) if _f] for k in targets]
return relTuple
elif timeStamped is True:
return [[_f for _f in (k[0], modifiers and modifiers_dict(k[1]) or None, k[-1]) if _f] for k in targets]
elif isinstance(timeStamped, (float, int, int)):
relTuple = [[_f for _f in (k[0], modifiers and modifiers_dict(k[1]) or None, k[-1] - timeStamped) if _f] for k in targets]
return relTuple
else: ## danger - catch anything that gets here because it shouldn't!
raise ValueError("We received an unknown combination of params to "
"getKeys(): timestamped={}, windowSystem={}, "
"modifiers={}"
.format(timeStamped, windowSystem, modifiers))
def waitKeys(maxWait=float('inf'), keyList=None, modifiers=False,
timeStamped=False, clearEvents=True):
"""Same as `~psychopy.event.getKeys`, but halts everything
(including drawing) while awaiting input from keyboard.
:Parameters:
maxWait : any numeric value.
Maximum number of seconds period and which keys to wait for.
Default is float('inf') which simply waits forever.
keyList : **None** or []
Allows the user to specify a set of keys to check for.
Only keypresses from this set of keys will be removed from
the keyboard buffer. If the keyList is `None`, all keys will be
checked and the key buffer will be cleared completely.
NB, pygame doesn't return timestamps (they are always 0)
modifiers : **False** or True
If True will return a list of tuples instead of a list of
keynames. Each tuple has (keyname, modifiers). The modifiers
are a dict of keyboard modifier flags keyed by the modifier
name (eg. 'shift', 'ctrl').
timeStamped : **False**, True, or `Clock`
If True will return a list of tuples instead of a list of
keynames. Each tuple has (keyname, time). If a `core.Clock`
is given then the time will be relative to the `Clock`'s last
reset.
clearEvents : **True** or False
Whether to clear the keyboard event buffer (and discard preceding
keypresses) before starting to monitor for new keypresses.
Returns None if times out.
"""
if clearEvents:
# Only consider keypresses from here onwards.
# We need to invoke clearEvents(), but our keyword argument is
# also called clearEvents. We can work around this conflict by
# accessing the global scope explicitly.
globals()['clearEvents']('keyboard')
# Check for keypresses until maxWait is exceeded
#
# NB pygame.event does have a wait() function that will
# do this and maybe leave more cpu idle time?
timer = psychopy.core.Clock()
got_keypress = False
while not got_keypress and timer.getTime() < maxWait:
# Pump events on pyglet windows if they exist.
if havePyglet:
for win in _default_display_.get_windows():
win.dispatch_events()
# Get keypresses and return if anything is pressed.
keys = getKeys(keyList=keyList, modifiers=modifiers,
timeStamped=timeStamped)
if keys:
got_keypress = True
if got_keypress:
return keys
else:
logging.data('No keypress (maxWait exceeded)')
return None
def xydist(p1=(0.0, 0.0), p2=(0.0, 0.0)):
"""Helper function returning the cartesian distance between p1 and p2
"""
return numpy.sqrt(pow(p1[0] - p2[0], 2) + pow(p1[1] - p2[1], 2))
class Mouse(object):
"""Easy way to track what your mouse is doing.
It needn't be a class, but since Joystick works better
as a class this may as well be one too for consistency
Create your `visual.Window` before creating a Mouse.
:Parameters:
visible : **True** or False
makes the mouse invisible if necessary
newPos : **None** or [x,y]
gives the mouse a particular starting position
(pygame `Window` only)
win : **None** or `Window`
the window to which this mouse is attached
(the first found if None provided)
"""
def __init__(self,
visible=True,
newPos=None,
win=None):
super(Mouse, self).__init__()
self.visible = visible
self.lastPos = None
self.prevPos = None # used for motion detection and timing
if win:
self.win = win
else:
try:
# to avoid circular imports, core.openWindows is defined
# by visual.py and updated in core namespace;
# it's circular to "import visual" here in event
self.win = psychopy.core.openWindows[0]()
logging.info('Mouse: using default window')
except (NameError, IndexError):
logging.error('Mouse: failed to get a default visual.Window'
' (need to create one first)')
self.win = None
# for builder: set status to STARTED, NOT_STARTED etc
self.status = None
self.mouseClock = psychopy.core.Clock()
self.movedistance = 0.0
# if pygame isn't initialised then we must use pyglet
global usePygame
if havePygame and not pygame.display.get_init():
usePygame = False
if not usePygame:
global mouseButtons
mouseButtons = [0, 0, 0]
self.setVisible(visible)
if newPos is not None:
self.setPos(newPos)
@property
def units(self):
"""The units for this mouse
(will match the current units for the Window it lives in)
"""
return self.win.units
def setPos(self, newPos=(0, 0)):
"""Sets the current position of the mouse,
in the same units as the :class:`~visual.Window`. (0,0) is the center.
:Parameters:
newPos : (x,y) or [x,y]
the new position on the screen
"""
newPosPix = self._windowUnits2pix(numpy.array(newPos))
if usePygame:
newPosPix[1] = self.win.size[1] / 2 - newPosPix[1]
newPosPix[0] = self.win.size[0] / 2 + newPosPix[0]
mouse.set_pos(newPosPix)
else:
if hasattr(self.win.winHandle, 'set_mouse_position'):
if self.win.useRetina:
newPosPix = numpy.array(self.win.size) / 4 + newPosPix / 2
else:
newPosPix = numpy.array(self.win.size) / 2 + newPosPix
x, y = int(newPosPix[0]), int(newPosPix[1])
self.win.winHandle.set_mouse_position(x, y)
self.win.winHandle._mouse_x = x
self.win.winHandle._mouse_y = y
else:
msg = 'mouse position could not be set (pyglet %s)'
logging.error(msg % pyglet.version)
def getPos(self):
"""Returns the current position of the mouse,
in the same units as the :class:`~visual.Window` (0,0) is at centre
"""
if usePygame: # for pygame top left is 0,0
lastPosPix = numpy.array(mouse.get_pos())
# set (0,0) to centre
lastPosPix[1] = self.win.size[1] / 2 - lastPosPix[1]
lastPosPix[0] = lastPosPix[0] - self.win.size[0] / 2
else: # for pyglet bottom left is 0,0
# use default window if we don't have one
if self.win:
w = self.win.winHandle
else:
defDisplay = _default_display_
w = defDisplay.get_windows()[0]
# get position in window
lastPosPix = numpy.array([w._mouse_x, w._mouse_y])
# set (0,0) to centre
if self.win.useRetina:
lastPosPix = lastPosPix*2 - numpy.array(self.win.size) / 2
else:
lastPosPix = lastPosPix - numpy.array(self.win.size) / 2
self.lastPos = self._pix2windowUnits(lastPosPix)
return copy.copy(self.lastPos)
def mouseMoved(self, distance=None, reset=False):
"""Determine whether/how far the mouse has moved.
With no args returns true if mouse has moved at all since last
getPos() call, or distance (x,y) can be set to pos or neg
distances from x and y to see if moved either x or y that
far from lastPos, or distance can be an int/float to test if
new coordinates are more than that far in a straight line
from old coords.
Retrieve time of last movement from self.mouseClock.getTime().
Reset can be to 'here' or to screen coords (x,y) which allows
measuring distance from there to mouse when moved. If reset is
(x,y) and distance is set, then prevPos is set to (x,y) and
distance from (x,y) to here is checked, mouse.lastPos is set as
current (x,y) by getPos(), mouse.prevPos holds lastPos from
last time mouseMoved was called.
"""
# mouseMove = clock that gets reset by pyglet mouse movement handler:
global mouseMove
# needs initialization before getPos resets lastPos
self.prevPos = copy.copy(self.lastPos)
self.getPos() # sets self.lastPos to current position
if not reset:
if distance is None:
if self.prevPos[0] != self.lastPos[0]:
return True
if self.prevPos[1] != self.lastPos[1]:
return True
else:
if isinstance(distance, int) or isinstance(distance, float):
self.movedistance = xydist(self.prevPos, self.lastPos)
if self.movedistance > distance:
return True
else:
return False
if self.prevPos[0] + distance[0] - self.lastPos[0] > 0.0:
return True # moved on X-axis
if self.prevPos[1] + distance[1] - self.lastPos[0] > 0.0:
return True # moved on Y-axis
return False
if reset is True:
# just reset the last move time: starts/zeroes the move clock
mouseMove.reset() # resets the global mouseMove clock
return False
if reset == 'here':
# set to wherever we are
self.prevPos = copy.copy(self.lastPos) # lastPos set in getPos()
return False
if hasattr(reset, '__len__'):
# a tuple or list of (x,y)
# reset to (x,y) to check movement from there
self.prevPos = copy.copy(reset)
if not distance:
return False # just resetting prevPos, not checking distance
else:
# checking distance of current pos to newly reset prevposition
if isinstance(distance, int) or isinstance(distance, float):
self.movedistance = xydist(self.prevPos, self.lastPos)
if self.movedistance > distance:
return True
else:
return False
# distance is x,y tuple, to check if the mouse moved that
# far on either x or y axis
# distance must be (dx,dy), and reset is (rx,ry), current pos
# (cx,cy): Is cx-rx > dx ?
if abs(self.lastPos[0] - self.prevPos[0]) > distance[0]:
return True # moved on X-axis
if abs(self.lastPos[1] - self.prevPos[1]) > distance[1]:
return True # moved on Y-axis
return False
return False
def mouseMoveTime(self):
global mouseMove
if mouseMove:
return mouseMove.getTime()
else:
return 0 # mouseMove clock not started
def getRel(self):
"""Returns the new position of the mouse relative to the
last call to getRel or getPos, in the same units as the
:class:`~visual.Window`.
"""
if usePygame:
relPosPix = numpy.array(mouse.get_rel()) * [1, -1]
return self._pix2windowUnits(relPosPix)
else:
# NB getPost() resets lastPos so MUST retrieve lastPos first
if self.lastPos is None:
relPos = self.getPos()
else:
# DON't switch to (this-lastPos)
relPos = -self.lastPos + self.getPos()
return relPos
def getWheelRel(self):
"""Returns the travel of the mouse scroll wheel since last call.
Returns a numpy.array(x,y) but for most wheels y is the only
value that will change (except Mac mighty mice?)
"""
global mouseWheelRel
rel = mouseWheelRel
mouseWheelRel = numpy.array([0.0, 0.0])
return rel
def getVisible(self):
"""Gets the visibility of the mouse (1 or 0)
"""
if usePygame:
return mouse.get_visible()
else:
print("Getting the mouse visibility is not supported under"
" pyglet, but you can set it anyway")
def setVisible(self, visible):
"""Sets the visibility of the mouse to 1 or 0
NB when the mouse is not visible its absolute position is held
at (0, 0) to prevent it from going off the screen and getting lost!
You can still use getRel() in that case.
"""
if self.win: # use default window if we don't have one
self.win.setMouseVisible(visible)
elif usePygame:
mouse.set_visible(visible)
else: # try communicating with window directly?
plat = _default_display_
w = plat.get_windows()[0]
w.set_mouse_visible(visible)
def clickReset(self, buttons=(0, 1, 2)):
"""Reset a 3-item list of core.Clocks use in timing button clicks.
The pyglet mouse-button-pressed handler uses their
clock.getLastResetTime() when a button is pressed so the user
can reset them at stimulus onset or offset to measure RT. The
default is to reset all, but they can be reset individually as
specified in buttons list
"""
global mouseClick
for c in buttons:
mouseClick[c].reset()
mouseTimes[c] = 0.0
def getPressed(self, getTime=False):
"""Returns a 3-item list indicating whether or not buttons 0,1,2
are currently pressed.
If `getTime=True` (False by default) then `getPressed` will
return all buttons that have been pressed since the last call
to `mouse.clickReset` as well as their time stamps::
buttons = mouse.getPressed()
buttons, times = mouse.getPressed(getTime=True)
Typically you want to call :ref:`mouse.clickReset()` at stimulus
onset, then after the button is pressed in reaction to it, the
total time elapsed from the last reset to click is in mouseTimes.
This is the actual RT, regardless of when the call to `getPressed()`
was made.
"""
global mouseButtons, mouseTimes
if usePygame:
return mouse.get_pressed()
else:
# False: # havePyglet: # like in getKeys - pump the events
# for each (pyglet) window, dispatch its events before checking
# event buffer
for win in _default_display_.get_windows():
win.dispatch_events() # pump events on pyglet windows
# else:
if not getTime:
return copy.copy(mouseButtons)
else:
return copy.copy(mouseButtons), copy.copy(mouseTimes)
def isPressedIn(self, shape, buttons=(0, 1, 2)):
"""Returns `True` if the mouse is currently inside the shape and
one of the mouse buttons is pressed. The default is that any of
the 3 buttons can indicate a click; for only a left-click,
specify `buttons=[0]`::
if mouse.isPressedIn(shape):
if mouse.isPressedIn(shape, buttons=[0]): # left-clicks only
Ideally, `shape` can be anything that has a `.contains()` method,
like `ShapeStim` or `Polygon`. Not tested with `ImageStim`.
"""
wanted = numpy.zeros(3, dtype=numpy.int)
for c in buttons:
wanted[c] = 1
pressed = self.getPressed()
return any(wanted & pressed) and shape.contains(self)
def _pix2windowUnits(self, pos):
if self.win.units == 'pix':
if self.win.useRetina:
pos /= 2.0
return pos
elif self.win.units == 'norm':
return pos * 2.0 / self.win.size
elif self.win.units == 'cm':
return pix2cm(pos, self.win.monitor)
elif self.win.units == 'deg':
return pix2deg(pos, self.win.monitor)
elif self.win.units == 'height':
return pos / float(self.win.size[1])
def _windowUnits2pix(self, pos):
if self.win.units == 'pix':
return pos
elif self.win.units == 'norm':
return pos * self.win.size / 2.0
elif self.win.units == 'cm':
return cm2pix(pos, self.win.monitor)
elif self.win.units == 'deg':
return deg2pix(pos, self.win.monitor)
elif self.win.units == 'height':
return pos * float(self.win.size[1])
def setExclusive(self, exclusivity):
"""Binds the mouse to the experiment window. Only works in Pyglet.
In multi-monitor settings, or with a window that is not fullscreen,
the mouse pointer can drift, and thereby PsychoPy might not get the
events from that window. setExclusive(True) works with Pyglet to
bind the mouse to the experiment window.
Note that binding the mouse pointer to a window will cause the
pointer to vanish, and absolute positions will no longer be
meaningful getPos() returns [0, 0] in this case.
"""
if type(exclusivity) is not bool:
raise ValueError('Exclusivity must be a boolean!')
if not usePygame:
msg = ('Setting mouse exclusivity in Pyglet will cause the '
'cursor to disappear, and getPos() will be rendered '
'meaningless, returning [0, 0]')
psychopy.logging.warning(msg)
self.win.winHandle.set_exclusive_mouse(exclusivity)
else:
print('Mouse exclusivity can only be set for Pyglet!')
class BuilderKeyResponse(object):
"""Used in scripts created by the builder to keep track of a clock and
the current status (whether or not we are currently checking the keyboard)
"""
def __init__(self):
super(BuilderKeyResponse, self).__init__()
self.status = NOT_STARTED
self.keys = [] # the key(s) pressed
self.corr = 0 # was the resp correct this trial? (0=no, 1=yes)
self.rt = [] # response time(s)
self.clock = psychopy.core.Clock() # we'll use this to measure the rt
def clearEvents(eventType=None):
"""Clears all events currently in the event buffer.
Optional argument, eventType, specifies only certain types to be
cleared.
:Parameters:
eventType : **None**, 'mouse', 'joystick', 'keyboard'
If this is not None then only events of the given type are cleared
"""
if not havePygame or not display.get_init(): # pyglet
# For each window, dispatch its events before
# checking event buffer.
for win in _default_display_.get_windows():
win.dispatch_events() # pump events on pyglet windows
if eventType == 'mouse':
pass
elif eventType == 'joystick':
pass
else: # eventType='keyboard' or eventType=None.
global _keyBuffer
_keyBuffer = []
else: # pygame
if eventType == 'mouse':
evt.get([locals.MOUSEMOTION, locals.MOUSEBUTTONUP,
locals.MOUSEBUTTONDOWN])
elif eventType == 'keyboard':
evt.get([locals.KEYDOWN, locals.KEYUP])
elif eventType == 'joystick':
evt.get([locals.JOYAXISMOTION, locals.JOYBALLMOTION,
locals.JOYHATMOTION, locals.JOYBUTTONUP,
locals.JOYBUTTONDOWN])
else:
evt.get()
class _GlobalEventKeys(MutableMapping):
"""
Global event keys for the pyglet backend.
Global event keys are single keys (or combinations of a single key
and one or more "modifier" keys such as Ctrl, Alt, etc.) with an
associated Python callback function. This function will be executed
if the key (or key/modifiers combination) was pressed.
PsychoPy fully automatically monitors and processes key presses
during most portions of the experimental run, for example during
`core.wait()` periods, or when calling `win.flip()`. If a global
event key press is detected, the specified function will be run
immediately. You are not required to manually poll and check for key
presses. This can be particularly useful to implement a global
"shutdown" key, or to trigger laboratory equipment on a key press
when testing your experimental script -- without cluttering the code.
But of course the application is not limited to these two scenarios.
In fact, you can associate any Python function with a global event key.
The PsychoPy preferences for `shutdownKey` and `shutdownKeyModifiers`
(both unset by default) will be used to automatically create a global
shutdown key once the `psychopy.event` module is being imported.
:Notes:
All keyboard -> event associations are stored in the `self._events`
OrderedDict. The dictionary keys are namedtuples with the elements
`key` and `mofifiers`. `key` is a string defining an (ordinary)
keyboard key, and `modifiers` is a tuple of modifier key strings,
e.g., `('ctrl', 'alt')`. The user does not access this attribute
directly, but should index the class instance itself (via
`globalKeys[key, modifiers]`). That way, the `modifiers` sequence
will be transparently converted into a tuple (which is a hashable
type) before trying to index `self._events`.
"""
_GlobalEvent = namedtuple(
'_GlobalEvent',
['func', 'func_args', 'func_kwargs', 'name'])
_IndexKey = namedtuple('_IndexKey', ['key', 'modifiers'])
_valid_keys = set(string.ascii_lowercase + string.digits
+ string.punctuation + ' \t')
_valid_keys.update(['escape', 'left', 'right', 'up', 'down', 'space'])
_valid_modifiers = {'shift', 'ctrl', 'alt', 'capslock',
'scrolllock', 'command', 'option', 'windows'}
def __init__(self):
super(_GlobalEventKeys, self).__init__()
self._events = OrderedDict()
if prefs.general['shutdownKey']:
msg = ('Found shutdown key definition in preferences; '
'enabling shutdown key.')
logging.info(msg)
self.add(key=prefs.general['shutdownKey'],
modifiers=prefs.general['shutdownKeyModifiers'],
func=psychopy.core.quit,
name='shutdown (auto-created from prefs)')
def __repr__(self):
info = ''
for index_key, event in list(self._events.items()):
info += '\n\t'
if index_key.modifiers:
_modifiers = ['[%s]' % m.upper() for m in index_key.modifiers]
info += '%s + ' % ' + '.join(_modifiers)
info += ("[%s] -> '%s' %s"
% (index_key.key.upper(), event.name, event.func))
return '<_GlobalEventKeys : %s\n>' % info
def __str__(self):
return ('<_GlobalEventKeys : %i key->event mappings defined.>'
% len(self))
def __len__(self):
return len(self._events)
def __getitem__(self, key):
index_key = self._gen_index_key(key)
return self._events[index_key]
def __setitem__(self, key, value):
msg = 'Please use `.add()` to add a new global event key.'
raise NotImplementedError(msg)
def __delitem__(self, key):
index_key = self._gen_index_key(key)
event = self._events.pop(index_key, None)
if event is None:
msg = 'Requested to remove unregistered global event key.'
raise KeyError(msg)
else:
logging.exp("Removed global key event: '%s'." % event.name)
def __iter__(self):
return iter(self._events.keys())
def _gen_index_key(self, key):
if isinstance(key, basestring): # Single key, passed as a string.
index_key = self._IndexKey(key, ())
else: # Convert modifiers into a hashable type.
index_key = self._IndexKey(key[0], tuple(key[1]))
return index_key
def add(self, key, func, func_args=(), func_kwargs=None,
modifiers=(), name=None):
"""
Add a global event key.
:Parameters:
key : string
The key to add.
func : function
The function to invoke once the specified keys were pressed.
func_args : iterable
Positional arguments to be passed to the specified function.
func_kwargs : dict
Keyword arguments to be passed to the specified function.
modifiers : collection of strings
Modifier keys. Valid keys are:
'shift', 'ctrl', 'alt' (not on macOS), 'capslock',
'scrolllock', 'command' (macOS only), 'option' (macOS only)
Num Lock is not supported.
name : string
The name of the event. Will be used for logging. If None,
will use the name of the specified function.
:Raises:
ValueError
If the specified key or modifiers are invalid, or if the
key / modifier combination has already been assigned to a global
event.
"""
if key not in self._valid_keys:
raise ValueError('Unknown key specified: %s' % key)
if not set(modifiers).issubset(self._valid_modifiers):
raise ValueError('Unknown modifier key specified.')
index_key = self._gen_index_key((key, modifiers))
if index_key in self._events:
msg = ('The specified key is already assigned to a global event. '
'Use `.remove()` to remove it first.')
raise ValueError(msg)
if func_kwargs is None:
func_kwargs = {}
if name is None:
name = func.__name__
self._events[index_key] = self._GlobalEvent(func, func_args,
func_kwargs, name)
logging.exp('Added new global key event: %s' % name)
def remove(self, key, modifiers=()):
"""
Remove a global event key.
:Parameters:
key : string
A single key name. If `'all'`, remove all event keys.
modifiers : collection of strings
Modifier keys. Valid keys are:
'shift', 'ctrl', 'alt' (not on macOS), 'capslock', 'numlock',
'scrolllock', 'command' (macOS only), 'option' (macOS only),
'windows' (Windows only)
"""
if key == 'all':
self._events = OrderedDict()
logging.exp('Removed all global key events.')
return
del self[key, modifiers]
def _onGLFWKey(*args, **kwargs):
"""Callback for key/character events for the GLFW backend.
:return:
"""
keyTime = psychopy.core.getTime() # get timestamp
# TODO - support for key emulation
win_ptr, key, scancode, action, modifiers = args
global useText
if key == glfw.KEY_UNKNOWN:
useText = True
return
useText = False
# get the printable name, always make lowercase
key_name = glfw.get_key_name(key, scancode)
# if there is no localized key name or space
if key_name is None or key_name == ' ':
try:
key_name = _glfw_keycodes_[key]
except KeyError:
pass
else:
key_name = key_name.lower()
# TODO - modifier integration
keySource = 'Keypress'
_keyBuffer.append((key_name, modifiers, keyTime)) # tuple
logging.data("%s: %s" % (keySource, key_name))
def _onGLFWText(*args, **kwargs):
"""Handle unicode character events if _onGLFWKey() cannot.
:return:
"""
keyTime = psychopy.core.getTime() # get timestamp
# TODO - support for key emulation
win_ptr, codepoint, modifiers = args
# win = glfw.get_window_user_pointer(win_ptr)
text = chr(codepoint) # convert to unicode character (Python 3.0)
global useText
if not useText: # _onPygletKey has handled the input
return
keySource = 'KeyPress'
_keyBuffer.append((text, keyTime))
logging.data("%s: %s" % (keySource, text))
def _onGLFWMouseButton(*args, **kwargs):
"""Callback for mouse press events. Both press and release actions are
handled by this function as they both invoke the same callback.
"""
global mouseButtons, mouseClick, mouseTimes
now = psychopy.core.getTime()
win_ptr, button, action, modifier = args
# win = glfw.get_window_user_pointer(win_ptr)
# get current position of the mouse
# this might not be at the exact location of the mouse press
x, y = glfw.get_cursor_pos(win_ptr)
# process actions
if action == glfw.PRESS:
if button == glfw.MOUSE_BUTTON_LEFT:
mouseButtons[0] = 1
mouseTimes[0] = now - mouseClick[0].getLastResetTime()
elif button == glfw.MOUSE_BUTTON_MIDDLE:
mouseButtons[1] = 1
mouseTimes[1] = now - mouseClick[1].getLastResetTime()
elif button == glfw.MOUSE_BUTTON_RIGHT:
mouseButtons[2] = 1
mouseTimes[2] = now - mouseClick[2].getLastResetTime()
elif action == glfw.RELEASE:
if button == glfw.MOUSE_BUTTON_LEFT:
mouseButtons[0] = 0
elif button == glfw.MOUSE_BUTTON_MIDDLE:
mouseButtons[1] = 0
elif button == glfw.MOUSE_BUTTON_RIGHT:
mouseButtons[2] = 0
def _onGLFWMouseScroll(*args, **kwargs):
"""Callback for mouse scrolling events. For most computer mice with scroll
wheels, only the vertical (Y-offset) is relevant.
"""
window_ptr, x_offset, y_offset = args
global mouseWheelRel
mouseWheelRel = mouseWheelRel + numpy.array([x_offset, y_offset])
msg = "Mouse: wheel shift=(%i,%i)"
logging.data(msg % (x_offset, y_offset))
def _getGLFWJoystickButtons(*args, **kwargs):
"""
:return:
"""
pass
def _getGLFWJoystickAxes(*args, **kwargs):
"""
:return:
"""
pass
if havePyglet:
globalKeys = _GlobalEventKeys()
| gpl-3.0 |
ESS-LLP/erpnext-medical | erpnext/setup/setup_wizard/test_setup_wizard.py | 18 | 1795 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe, time
from frappe.utils.selenium_testdriver import TestDriver
def run_setup_wizard_test():
driver = TestDriver()
frappe.db.set_default('in_selenium', '1')
frappe.db.commit()
driver.login('#page-setup-wizard')
print('Running Setup Wizard Test...')
# Language slide
driver.wait_for_ajax(True)
time.sleep(1)
driver.set_select("language", "English (United States)")
driver.wait_for_ajax(True)
time.sleep(1)
driver.click(".next-btn")
# Region slide
driver.wait_for_ajax(True)
driver.set_select("country", "India")
driver.wait_for_ajax(True)
time.sleep(1)
driver.click(".next-btn")
# Profile slide
driver.set_field("full_name", "Great Tester")
driver.set_field("email", "great@example.com")
driver.set_field("password", "test")
driver.wait_for_ajax(True)
time.sleep(1)
driver.click(".next-btn")
time.sleep(1)
# domain slide
driver.set_multicheck("domains", ["Manufacturing"])
time.sleep(1)
driver.click(".next-btn")
# Org slide
driver.set_field("company_name", "For Testing")
time.sleep(1)
driver.print_console()
driver.click(".next-btn")
driver.set_field("company_tagline", "Just for GST")
driver.set_field("bank_account", "HDFC")
time.sleep(3)
driver.click(".complete-btn")
# Wait for desktop
driver.wait_for('#page-desktop', timeout=600)
driver.print_console()
time.sleep(3)
frappe.db.set_default('in_selenium', None)
frappe.db.set_value("Company", "For Testing", "write_off_account", "Write Off - FT")
frappe.db.set_value("Company", "For Testing", "exchange_gain_loss_account", "Exchange Gain/Loss - FT")
frappe.db.commit()
driver.close()
return True
| gpl-3.0 |
IITBinterns13/edx-platform-dev | common/djangoapps/cache_toolbox/templatetags/cache_toolbox.py | 239 | 2059 | from django import template
from django.core.cache import cache
from django.template import Node, TemplateSyntaxError, Variable
from django.template import resolve_variable
register = template.Library()
class CacheNode(Node):
def __init__(self, nodelist, expire_time, key):
self.nodelist = nodelist
self.expire_time = Variable(expire_time)
self.key = key
def render(self, context):
key = resolve_variable(self.key, context)
expire_time = int(self.expire_time.resolve(context))
value = cache.get(key)
if value is None:
value = self.nodelist.render(context)
cache.set(key, value, expire_time)
return value
@register.tag
def cachedeterministic(parser, token):
"""
This will cache the contents of a template fragment for a given amount of
time, just like {% cache .. %} except that the key is deterministic and not
mangled or run through MD5.
Usage::
{% cachedeterministic [expire_time] [key] %}
.. some expensive processing ..
{% endcachedeterministic %}
"""
nodelist = parser.parse(('endcachedeterministic',))
parser.delete_first_token()
tokens = token.contents.split()
if len(tokens) != 3:
raise TemplateSyntaxError(u"'%r' tag requires 2 arguments." % tokens[0])
return CacheNode(nodelist, tokens[1], tokens[2])
class ShowIfCachedNode(Node):
def __init__(self, key):
self.key = key
def render(self, context):
key = resolve_variable(self.key, context)
return cache.get(key) or ''
@register.tag
def showifcached(parser, token):
"""
Show content if it exists in the cache, otherwise display nothing.
The key is entirely deterministic and not mangled or run through MD5 (cf.
{% cache %})
Usage::
{% showifcached [key] %}
"""
tokens = token.contents.split()
if len(tokens) != 2:
raise TemplateSyntaxError(u"'%r' tag requires 1 argument." % tokens[0])
return ShowIfCachedNode(tokens[1])
| agpl-3.0 |
gacarrillor/QGIS | python/plugins/processing/algs/gdal/sieve.py | 15 | 5672 | # -*- coding: utf-8 -*-
"""
***************************************************************************
sieve.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
import os
from qgis.PyQt.QtGui import QIcon
from qgis.core import (QgsRasterFileWriter,
QgsProcessingException,
QgsProcessingParameterDefinition,
QgsProcessingParameterRasterLayer,
QgsProcessingParameterNumber,
QgsProcessingParameterBoolean,
QgsProcessingParameterString,
QgsProcessingParameterRasterDestination)
from processing.algs.gdal.GdalAlgorithm import GdalAlgorithm
from processing.tools.system import isWindows
from processing.algs.gdal.GdalUtils import GdalUtils
pluginPath = os.path.split(os.path.split(os.path.dirname(__file__))[0])[0]
class sieve(GdalAlgorithm):
INPUT = 'INPUT'
THRESHOLD = 'THRESHOLD'
EIGHT_CONNECTEDNESS = 'EIGHT_CONNECTEDNESS'
NO_MASK = 'NO_MASK'
MASK_LAYER = 'MASK_LAYER'
EXTRA = 'EXTRA'
OUTPUT = 'OUTPUT'
def __init__(self):
super().__init__()
def initAlgorithm(self, config=None):
self.addParameter(QgsProcessingParameterRasterLayer(self.INPUT, self.tr('Input layer')))
self.addParameter(QgsProcessingParameterNumber(self.THRESHOLD,
self.tr('Threshold'),
type=QgsProcessingParameterNumber.Integer,
minValue=0,
defaultValue=10))
self.addParameter(QgsProcessingParameterBoolean(self.EIGHT_CONNECTEDNESS,
self.tr('Use 8-connectedness'),
defaultValue=False))
self.addParameter(QgsProcessingParameterBoolean(self.NO_MASK,
self.tr('Do not use the default validity mask for the input band'),
defaultValue=False))
self.addParameter(QgsProcessingParameterRasterLayer(self.MASK_LAYER,
self.tr('Validity mask'),
optional=True))
extra_param = QgsProcessingParameterString(self.EXTRA,
self.tr('Additional command-line parameters'),
defaultValue=None,
optional=True)
extra_param.setFlags(extra_param.flags() | QgsProcessingParameterDefinition.FlagAdvanced)
self.addParameter(extra_param)
self.addParameter(QgsProcessingParameterRasterDestination(self.OUTPUT, self.tr('Sieved')))
def name(self):
return 'sieve'
def displayName(self):
return self.tr('Sieve')
def group(self):
return self.tr('Raster analysis')
def groupId(self):
return 'rasteranalysis'
def icon(self):
return QIcon(os.path.join(pluginPath, 'images', 'gdaltools', 'sieve.png'))
def commandName(self):
return 'gdal_sieve'
def getConsoleCommands(self, parameters, context, feedback, executing=True):
arguments = [
'-st',
str(self.parameterAsInt(parameters, self.THRESHOLD, context)),
]
if self.parameterAsBoolean(parameters, self.EIGHT_CONNECTEDNESS, context):
arguments.append('-8')
else:
arguments.append('-4')
if self.parameterAsBoolean(parameters, self.NO_MASK, context):
arguments.append('-nomask')
mask = self.parameterAsRasterLayer(parameters, self.MASK_LAYER, context)
if mask:
arguments.append('-mask')
arguments.append(mask.source())
out = self.parameterAsOutputLayer(parameters, self.OUTPUT, context)
self.setOutputValue(self.OUTPUT, out)
arguments.append('-of')
arguments.append(QgsRasterFileWriter.driverForExtension(os.path.splitext(out)[1]))
if self.EXTRA in parameters and parameters[self.EXTRA] not in (None, ''):
extra = self.parameterAsString(parameters, self.EXTRA, context)
arguments.append(extra)
raster = self.parameterAsRasterLayer(parameters, self.INPUT, context)
if raster is None:
raise QgsProcessingException(self.invalidRasterError(parameters, self.INPUT))
arguments.append(raster.source())
arguments.append(out)
return [self.commandName() + ('.bat' if isWindows() else '.py'), GdalUtils.escapeAndJoin(arguments)]
| gpl-2.0 |
thaim/ansible | lib/ansible/modules/network/opx/opx_cps.py | 38 | 12542 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2018 Dell Inc. or its subsidiaries. All Rights Reserved.
#
# This file is part of Ansible by Red Hat
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: opx_cps
version_added: "2.7"
author: "Senthil Kumar Ganesan (@skg-net)"
short_description: CPS operations on networking device running Openswitch (OPX)
description:
- Executes the given operation on the YANG object, using CPS API in the
networking device running OpenSwitch (OPX). It uses the YANG models
provided in https://github.com/open-switch/opx-base-model.
options:
module_name:
description:
- Yang path to be configured.
attr_type:
description:
- Attribute Yang type.
attr_data:
description:
- Attribute Yang path and their corresponding data.
operation:
description:
- Operation to be performed on the object.
default: create
choices: ['delete', 'create', 'set', 'action', 'get']
db:
description:
- Queries/Writes the specified yang path from/to the db.
type: bool
default: 'no'
qualifier:
description:
- A qualifier provides the type of object data to retrieve or act on.
default: target
choices: ['target', 'observed', 'proposed', 'realtime', 'registration', 'running', 'startup']
commit_event:
description:
- Attempts to force the auto-commit event to the specified yang object.
type: bool
default: 'no'
requirements:
- "cps"
- "cps_object"
- "cps_utils"
"""
EXAMPLES = """
- name: Create VLAN
opx_cps:
module_name: "dell-base-if-cmn/if/interfaces/interface"
attr_data: {
"base-if-vlan/if/interfaces/interface/id": 230,
"if/interfaces/interface/name": "br230",
"if/interfaces/interface/type": "ianaift:l2vlan"
}
operation: "create"
- name: Get VLAN
opx_cps:
module_name: "dell-base-if-cmn/if/interfaces/interface"
attr_data: {
"if/interfaces/interface/name": "br230",
}
operation: "get"
- name: Modify some attributes in VLAN
opx_cps:
module_name: "dell-base-if-cmn/if/interfaces/interface"
attr_data: {
"cps/key_data":
{ "if/interfaces/interface/name": "br230" },
"dell-if/if/interfaces/interface/untagged-ports": ["e101-008-0"],
}
operation: "set"
- name: Delete VLAN
opx_cps:
module_name: "dell-base-if-cmn/if/interfaces/interface"
attr_data: {
"if/interfaces/interface/name": "br230",
}
operation: "delete"
"""
RETURN = """
response:
description: Output from the CPS transaction.
Output of CPS Get operation if CPS set/create/delete not done.
returned: when a CPS transaction is successfully performed.
type: list
sample:
[{
"data": {
"base-if-vlan/if/interfaces/interface/id": 230,
"cps/object-group/return-code": 0,
"dell-base-if-cmn/if/interfaces/interface/if-index": 46,
"if/interfaces/interface/name": "br230",
"if/interfaces/interface/type": "ianaift:l2vlan"
},
"key": "target/dell-base-if-cmn/if/interfaces/interface"
}]
cps_curr_config:
description: Returns the CPS Get output i.e. the running configuration
before CPS operation of set/delete is performed
returned: when CPS operations set, delete
type: dict
sample:
[{
"data": {
"base-if-vlan/if/interfaces/interface/id": 230,
"cps/key_data": {
"if/interfaces/interface/name": "br230"
},
"dell-base-if-cmn/if/interfaces/interface/if-index": 44,
"dell-if/if/interfaces/interface/learning-mode": 1,
"dell-if/if/interfaces/interface/mtu": 1532,
"dell-if/if/interfaces/interface/phys-address": "",
"dell-if/if/interfaces/interface/vlan-type": 1,
"if/interfaces/interface/enabled": 0,
"if/interfaces/interface/type": "ianaift:l2vlan"
},
"key": "target/dell-base-if-cmn/if/interfaces/interface"
}]
diff:
description: The actual configuration that will be pushed comparing
the running configuration and input attributes
returned: when CPS operations set, delete
type: dict
sample:
{
"cps/key_data": {
"if/interfaces/interface/name": "br230"
},
"dell-if/if/interfaces/interface/untagged-ports": [
"e101-007-0"
]
}
db:
description: Denotes if CPS DB transaction was performed
returned: when db is set to True in module options
type: bool
sample: True
commit_event:
description: Denotes if auto-commit event is set
returned: when commit_event is set to True in module options
type: bool
sample: True
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six import iteritems
from ansible.module_utils.network.common.utils import dict_diff
try:
import cps
import cps_object
import cps_utils
HAS_CPS = True
except ImportError:
HAS_CPS = False
def convert_cps_raw_list(raw_list):
resp_list = []
if raw_list:
for raw_elem in raw_list:
processed_element = convert_cps_raw_data(raw_elem)
if processed_element:
raw_key = raw_elem['key']
individual_element = {}
individual_element['data'] = processed_element
individual_element['key'] = (cps.qual_from_key(raw_key) + "/" +
cps.name_from_key(raw_key, 1))
resp_list.append(individual_element)
return resp_list
def convert_cps_raw_data(raw_elem):
d = {}
obj = cps_object.CPSObject(obj=raw_elem)
for attr in raw_elem['data']:
d[attr] = obj.get_attr_data(attr)
return d
def parse_cps_parameters(module_name, qualifier, attr_type,
attr_data, operation=None, db=None,
commit_event=None):
obj = cps_object.CPSObject(module=module_name, qual=qualifier)
if operation:
obj.set_property('oper', operation)
if attr_type:
for key, val in iteritems(attr_type):
cps_utils.cps_attr_types_map.add_type(key, val)
for key, val in iteritems(attr_data):
embed_attrs = key.split(',')
embed_attrs_len = len(embed_attrs)
if embed_attrs_len >= 3:
obj.add_embed_attr(embed_attrs, val, embed_attrs_len - 2)
else:
if isinstance(val, str):
val_list = val.split(',')
# Treat as list if value contains ',' but is not
# enclosed within {}
if len(val_list) == 1 or val.startswith('{'):
obj.add_attr(key, val)
else:
obj.add_attr(key, val_list)
else:
obj.add_attr(key, val)
if db:
cps.set_ownership_type(obj.get_key(), 'db')
obj.set_property('db', True)
else:
obj.set_property('db', False)
if commit_event:
cps.set_auto_commit_event(obj.get_key(), True)
obj.set_property('commit-event', True)
return obj
def cps_get(obj):
RESULT = dict()
key = obj.get()
l = []
cps.get([key], l)
resp_list = convert_cps_raw_list(l)
RESULT["response"] = resp_list
return RESULT
def cps_transaction(obj):
RESULT = dict()
ch = {'operation': obj.get_property('oper'), 'change': obj.get()}
if cps.transaction([ch]):
RESULT["response"] = convert_cps_raw_list([ch['change']])
RESULT["changed"] = True
else:
error_msg = "Transaction error while " + obj.get_property('oper')
raise RuntimeError(error_msg)
return RESULT
def parse_key_data(attrs):
res = dict()
for key, val in iteritems(attrs):
if key == 'cps/key_data':
res.update(val)
else:
res[key] = val
return res
def main():
"""
main entry point for module execution
"""
argument_spec = dict(
qualifier=dict(required=False,
default="target",
type='str',
choices=['target', 'observed', 'proposed', 'realtime',
'registration', 'running', 'startup']),
module_name=dict(required=True, type='str'),
attr_type=dict(required=False, type='dict'),
attr_data=dict(required=True, type='dict'),
operation=dict(required=False,
default="create",
type='str',
choices=['delete', 'create', 'set', 'action', 'get']),
db=dict(required=False, default=False, type='bool'),
commit_event=dict(required=False, default=False, type='bool')
)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=False)
if not HAS_CPS:
module.fail_json(msg='CPS library required for this module')
qualifier = module.params['qualifier']
module_name = module.params['module_name']
attr_type = module.params["attr_type"]
attr_data = module.params["attr_data"]
operation = module.params['operation']
db = module.params["db"]
commit_event = module.params["commit_event"]
RESULT = dict(changed=False, db=False, commit_event=False)
if db:
RESULT['db'] = True
if commit_event:
RESULT['commit_event'] = True
try:
# First do a CPS get operation
get_obj = parse_cps_parameters(module_name, qualifier, attr_type,
attr_data, 'get', db, commit_event)
curr_config = cps_get(get_obj)
if operation == 'get':
RESULT.update(curr_config)
else:
diff = attr_data
# Evaluate the changes in the attributes
cfg = dict()
if curr_config and curr_config['response']:
cfg = curr_config['response'][0]['data']
key_d = 'cps/key_data'
# diff computation is not needed for delete
if operation != 'delete':
configs = parse_key_data(cfg)
attributes = parse_key_data(attr_data)
diff = dict_diff(configs, attributes)
# Append diff with any 'cps/key_data' from attr_data
if diff and key_d in attr_data:
diff[key_d] = attr_data[key_d]
# Append diff with any 'cps/key_data' from curr_config
# Needed for all operations including delete
if diff and key_d in cfg:
if key_d in diff:
diff[key_d].update(cfg[key_d])
else:
diff[key_d] = cfg[key_d]
RESULT.update({"diff": diff})
# Create object for cps operation
obj = parse_cps_parameters(module_name, qualifier, attr_type,
diff, operation, db, commit_event)
res = dict()
if operation == "delete":
if cfg:
res = cps_transaction(obj)
else:
if diff:
res = cps_transaction(obj)
if not res and cfg:
res.update({"response": curr_config['response']})
else:
res.update({"cps_curr_config": curr_config['response']})
RESULT.update(res)
except Exception as e:
module.fail_json(msg=str(type(e).__name__) + ": " + str(e))
module.exit_json(**RESULT)
if __name__ == '__main__':
main()
| mit |
tntnatbry/tensorflow | tensorflow/python/kernel_tests/reduce_join_op_test.py | 116 | 14445 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ReduceJoin op from string_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.platform import test
def _input_array(num_dims):
"""Creates an ndarray where each element is the binary of its linear index.
Args:
num_dims: The number of dimensions to create.
Returns:
An ndarray of shape [2] * num_dims.
"""
formatter = "{:0%db}" % num_dims
strings = [formatter.format(i) for i in xrange(2**num_dims)]
return np.array(strings, dtype="S%d" % num_dims).reshape([2] * num_dims)
def _joined_array(num_dims, reduce_dim):
"""Creates an ndarray with the result from reduce_join on input_array.
Args:
num_dims: The number of dimensions of the original input array.
reduce_dim: The dimension to reduce.
Returns:
An ndarray of shape [2] * (num_dims - 1).
"""
formatter = "{:0%db}" % (num_dims - 1)
result = np.zeros(shape=[2] * (num_dims - 1), dtype="S%d" % (2 * num_dims))
flat = result.ravel()
for i in xrange(2**(num_dims - 1)):
dims = formatter.format(i)
flat[i] = "".join([(dims[:reduce_dim] + "%d" + dims[reduce_dim:]) % j
for j in xrange(2)])
return result
class UnicodeTestCase(test.TestCase):
"""Test case with Python3-compatible string comparator."""
def assertAllEqualUnicode(self, truth, actual):
self.assertAllEqual(
np.array(truth).astype("U"), np.array(actual).astype("U"))
class ReduceJoinTestHelperTest(UnicodeTestCase):
"""Tests for helper functions."""
def testInputArray(self):
num_dims = 3
truth = ["{:03b}".format(i) for i in xrange(2**num_dims)]
output_array = _input_array(num_dims).reshape([-1])
self.assertAllEqualUnicode(truth, output_array)
def testJoinedArray(self):
num_dims = 3
truth_dim_zero = [["000100", "001101"], ["010110", "011111"]]
truth_dim_one = [["000010", "001011"], ["100110", "101111"]]
truth_dim_two = [["000001", "010011"], ["100101", "110111"]]
output_array_dim_zero = _joined_array(num_dims, reduce_dim=0)
output_array_dim_one = _joined_array(num_dims, reduce_dim=1)
output_array_dim_two = _joined_array(num_dims, reduce_dim=2)
self.assertAllEqualUnicode(truth_dim_zero, output_array_dim_zero)
self.assertAllEqualUnicode(truth_dim_one, output_array_dim_one)
self.assertAllEqualUnicode(truth_dim_two, output_array_dim_two)
class ReduceJoinTest(UnicodeTestCase):
def _testReduceJoin(self,
input_array,
truth,
truth_shape,
reduction_indices,
keep_dims=False,
separator=""):
"""Compares the output of reduce_join to an expected result.
Args:
input_array: The string input to be joined.
truth: An array or np.array of the expected result.
truth_shape: An array or np.array of the expected shape.
reduction_indices: The indices to reduce over.
keep_dims: Whether or not to retain reduced dimensions.
separator: The separator to use for joining.
"""
with self.test_session():
output = string_ops.reduce_join(
inputs=input_array,
reduction_indices=reduction_indices,
keep_dims=keep_dims,
separator=separator)
output_array = output.eval()
self.assertAllEqualUnicode(truth, output_array)
self.assertAllEqual(truth_shape, output.get_shape())
def _testMultipleReduceJoin(self,
input_array,
reduction_indices,
separator=" "):
"""Tests reduce_join for one input and multiple reduction_indices.
Does so by comparing the output to that from nested reduce_string_joins.
The correctness of single-dimension reduce_join is verified by other
tests below using _testReduceJoin.
Args:
input_array: The input to test.
reduction_indices: The indices to reduce.
separator: The separator to use when joining.
"""
with self.test_session():
output = string_ops.reduce_join(
inputs=input_array,
reduction_indices=reduction_indices,
keep_dims=False,
separator=separator)
output_keep_dims = string_ops.reduce_join(
inputs=input_array,
reduction_indices=reduction_indices,
keep_dims=True,
separator=separator)
truth = input_array
for index in reduction_indices:
truth = string_ops.reduce_join(
inputs=truth,
reduction_indices=index,
keep_dims=True,
separator=separator)
if not reduction_indices:
truth = constant_op.constant(truth)
truth_squeezed = array_ops.squeeze(truth, squeeze_dims=reduction_indices)
output_array = output.eval()
output_keep_dims_array = output_keep_dims.eval()
truth_array = truth.eval()
truth_squeezed_array = truth_squeezed.eval()
self.assertAllEqualUnicode(truth_array, output_keep_dims_array)
self.assertAllEqualUnicode(truth_squeezed_array, output_array)
self.assertAllEqual(truth.get_shape(), output_keep_dims.get_shape())
self.assertAllEqual(truth_squeezed.get_shape(), output.get_shape())
def testRankOne(self):
input_array = ["this", "is", "a", "test"]
truth = "thisisatest"
truth_shape = []
self._testReduceJoin(input_array, truth, truth_shape, reduction_indices=0)
def testRankTwo(self):
input_array = [["this", "is", "a", "test"],
["please", "do", "not", "panic"]]
truth_dim_zero = ["thisplease", "isdo", "anot", "testpanic"]
truth_shape_dim_zero = [4]
truth_dim_one = ["thisisatest", "pleasedonotpanic"]
truth_shape_dim_one = [2]
self._testReduceJoin(
input_array, truth_dim_zero, truth_shape_dim_zero, reduction_indices=0)
self._testReduceJoin(
input_array, truth_dim_one, truth_shape_dim_one, reduction_indices=1)
expected_val = "thisisatestpleasedonotpanic"
expected_shape = None
self._testReduceJoin(
input_array, expected_val, expected_shape, reduction_indices=None)
# When using Tensor for input with reduction_indices=None, shape is known.
expected_val = "thisisatestpleasedonotpanic"
expected_shape = []
self._testReduceJoin(
constant_op.constant(input_array), expected_val,
expected_shape, reduction_indices=None)
# Using [] reduction_indices is a no-op.
expected_val = input_array
expected_shape = [2, 4]
self._testReduceJoin(
input_array, expected_val, expected_shape, reduction_indices=[])
def testRankFive(self):
input_array = _input_array(num_dims=5)
truths = [_joined_array(num_dims=5, reduce_dim=i) for i in xrange(5)]
truth_shape = [2] * 4
for i in xrange(5):
self._testReduceJoin(
input_array, truths[i], truth_shape, reduction_indices=i)
def testNegative(self):
input_array = _input_array(num_dims=5)
truths = [_joined_array(num_dims=5, reduce_dim=i) for i in xrange(5)]
truth_shape = [2] * 4
for i in xrange(5):
self._testReduceJoin(
input_array, truths[i], truth_shape, reduction_indices=i - 5)
def testSingletonDimension(self):
input_arrays = [
_input_array(num_dims=5).reshape([2] * i + [1] + [2] * (5 - i))
for i in xrange(6)
]
truth = _input_array(num_dims=5)
truth_shape = [2] * 5
for i in xrange(6):
self._testReduceJoin(
input_arrays[i], truth, truth_shape, reduction_indices=i)
def testSeparator(self):
input_array = [["this", "is", "a", "test"],
["please", "do", "not", "panic"]]
truth_dim_zero = ["this please", "is do", "a not", "test panic"]
truth_shape_dim_zero = [4]
truth_dim_one = ["this is a test", "please do not panic"]
truth_shape_dim_one = [2]
self._testReduceJoin(
input_array,
truth_dim_zero,
truth_shape_dim_zero,
reduction_indices=0,
separator=" ")
self._testReduceJoin(
input_array,
truth_dim_one,
truth_shape_dim_one,
reduction_indices=1,
separator=" ")
def testUnknownShape(self):
input_array = [["a"], ["b"]]
truth = ["ab"]
truth_shape = None
with self.test_session():
placeholder = array_ops.placeholder(dtypes.string, name="placeholder")
reduced = string_ops.reduce_join(placeholder, reduction_indices=0)
output_array = reduced.eval(feed_dict={placeholder.name: input_array})
self.assertAllEqualUnicode(truth, output_array)
self.assertAllEqual(truth_shape, reduced.get_shape())
def testUnknownIndices(self):
input_array = [["this", "is", "a", "test"],
["please", "do", "not", "panic"]]
truth_dim_zero = ["thisplease", "isdo", "anot", "testpanic"]
truth_dim_one = ["thisisatest", "pleasedonotpanic"]
truth_shape = None
with self.test_session():
placeholder = array_ops.placeholder(dtypes.int32, name="placeholder")
reduced = string_ops.reduce_join(
input_array, reduction_indices=placeholder)
output_array_dim_zero = reduced.eval(feed_dict={placeholder.name: [0]})
output_array_dim_one = reduced.eval(feed_dict={placeholder.name: [1]})
self.assertAllEqualUnicode(truth_dim_zero, output_array_dim_zero)
self.assertAllEqualUnicode(truth_dim_one, output_array_dim_one)
self.assertAllEqual(truth_shape, reduced.get_shape())
def testKeepDims(self):
input_array = [["this", "is", "a", "test"],
["please", "do", "not", "panic"]]
truth_dim_zero = [["thisplease", "isdo", "anot", "testpanic"]]
truth_shape_dim_zero = [1, 4]
truth_dim_one = [["thisisatest"], ["pleasedonotpanic"]]
truth_shape_dim_one = [2, 1]
self._testReduceJoin(
input_array,
truth_dim_zero,
truth_shape_dim_zero,
reduction_indices=0,
keep_dims=True)
self._testReduceJoin(
input_array,
truth_dim_one,
truth_shape_dim_one,
reduction_indices=1,
keep_dims=True)
expected_val = [["thisisatestpleasedonotpanic"]]
expected_shape = [1, 1]
self._testReduceJoin(
constant_op.constant(input_array), expected_val, expected_shape,
keep_dims=True, reduction_indices=None)
# Using [] reduction_indices is a no-op.
expected_val = input_array
expected_shape = [2, 4]
self._testReduceJoin(
input_array, expected_val, expected_shape,
keep_dims=True, reduction_indices=[])
def testMultiIndex(self):
num_dims = 3
input_array = _input_array(num_dims=num_dims)
# Also tests [].
for i in xrange(num_dims + 1):
for permutation in itertools.permutations(xrange(num_dims), i):
self._testMultipleReduceJoin(input_array, reduction_indices=permutation)
def testInvalidReductionIndices(self):
with self.test_session():
with self.assertRaisesRegexp(ValueError, "Invalid reduction dim"):
string_ops.reduce_join(inputs="", reduction_indices=0)
with self.assertRaisesRegexp(ValueError,
"Invalid reduction dimension -3"):
string_ops.reduce_join(inputs=[[""]], reduction_indices=-3)
with self.assertRaisesRegexp(ValueError, "Invalid reduction dimension 2"):
string_ops.reduce_join(inputs=[[""]], reduction_indices=2)
with self.assertRaisesRegexp(ValueError,
"Invalid reduction dimension -3"):
string_ops.reduce_join(inputs=[[""]], reduction_indices=[0, -3])
with self.assertRaisesRegexp(ValueError, "Invalid reduction dimension 2"):
string_ops.reduce_join(inputs=[[""]], reduction_indices=[0, 2])
def testZeroDims(self):
with self.test_session():
inputs = np.zeros([0, 1], dtype=str)
# Reduction that drops the dim of size 0.
output = string_ops.reduce_join(inputs=inputs, reduction_indices=0)
self.assertAllEqualUnicode([""], output.eval())
# Reduction that keeps the dim of size 0.
output = string_ops.reduce_join(inputs=inputs, reduction_indices=1)
output_shape = output.eval().shape
self.assertAllEqual([0], output_shape)
def testInvalidArgsUnknownShape(self):
with self.test_session():
placeholder = array_ops.placeholder(dtypes.string, name="placeholder")
index_too_high = string_ops.reduce_join(placeholder, reduction_indices=1)
duplicate_index = string_ops.reduce_join(
placeholder, reduction_indices=[-1, 1])
with self.assertRaisesOpError("Invalid reduction dimension 1"):
index_too_high.eval(feed_dict={placeholder.name: [""]})
with self.assertRaisesOpError("Duplicate reduction dimension 1"):
duplicate_index.eval(feed_dict={placeholder.name: [[""]]})
def testInvalidArgsUnknownIndices(self):
with self.test_session():
placeholder = array_ops.placeholder(dtypes.int32, name="placeholder")
reduced = string_ops.reduce_join(
["test", "test2"], reduction_indices=placeholder)
with self.assertRaisesOpError("reduction dimension -2"):
reduced.eval(feed_dict={placeholder.name: -2})
with self.assertRaisesOpError("reduction dimension 2"):
reduced.eval(feed_dict={placeholder.name: 2})
if __name__ == "__main__":
test.main()
| apache-2.0 |
kiran/bart-sign | venv/lib/python2.7/site-packages/numpy/polynomial/hermite.py | 49 | 56931 | """
Objects for dealing with Hermite series.
This module provides a number of objects (mostly functions) useful for
dealing with Hermite series, including a `Hermite` class that
encapsulates the usual arithmetic operations. (General information
on how this module represents and works with such polynomials is in the
docstring for its "parent" sub-package, `numpy.polynomial`).
Constants
---------
- `hermdomain` -- Hermite series default domain, [-1,1].
- `hermzero` -- Hermite series that evaluates identically to 0.
- `hermone` -- Hermite series that evaluates identically to 1.
- `hermx` -- Hermite series for the identity map, ``f(x) = x``.
Arithmetic
----------
- `hermmulx` -- multiply a Hermite series in ``P_i(x)`` by ``x``.
- `hermadd` -- add two Hermite series.
- `hermsub` -- subtract one Hermite series from another.
- `hermmul` -- multiply two Hermite series.
- `hermdiv` -- divide one Hermite series by another.
- `hermval` -- evaluate a Hermite series at given points.
- `hermval2d` -- evaluate a 2D Hermite series at given points.
- `hermval3d` -- evaluate a 3D Hermite series at given points.
- `hermgrid2d` -- evaluate a 2D Hermite series on a Cartesian product.
- `hermgrid3d` -- evaluate a 3D Hermite series on a Cartesian product.
Calculus
--------
- `hermder` -- differentiate a Hermite series.
- `hermint` -- integrate a Hermite series.
Misc Functions
--------------
- `hermfromroots` -- create a Hermite series with specified roots.
- `hermroots` -- find the roots of a Hermite series.
- `hermvander` -- Vandermonde-like matrix for Hermite polynomials.
- `hermvander2d` -- Vandermonde-like matrix for 2D power series.
- `hermvander3d` -- Vandermonde-like matrix for 3D power series.
- `hermgauss` -- Gauss-Hermite quadrature, points and weights.
- `hermweight` -- Hermite weight function.
- `hermcompanion` -- symmetrized companion matrix in Hermite form.
- `hermfit` -- least-squares fit returning a Hermite series.
- `hermtrim` -- trim leading coefficients from a Hermite series.
- `hermline` -- Hermite series of given straight line.
- `herm2poly` -- convert a Hermite series to a polynomial.
- `poly2herm` -- convert a polynomial to a Hermite series.
Classes
-------
- `Hermite` -- A Hermite series class.
See also
--------
`numpy.polynomial`
"""
from __future__ import division, absolute_import, print_function
import warnings
import numpy as np
import numpy.linalg as la
from . import polyutils as pu
from ._polybase import ABCPolyBase
__all__ = [
'hermzero', 'hermone', 'hermx', 'hermdomain', 'hermline', 'hermadd',
'hermsub', 'hermmulx', 'hermmul', 'hermdiv', 'hermpow', 'hermval',
'hermder', 'hermint', 'herm2poly', 'poly2herm', 'hermfromroots',
'hermvander', 'hermfit', 'hermtrim', 'hermroots', 'Hermite',
'hermval2d', 'hermval3d', 'hermgrid2d', 'hermgrid3d', 'hermvander2d',
'hermvander3d', 'hermcompanion', 'hermgauss', 'hermweight']
hermtrim = pu.trimcoef
def poly2herm(pol):
"""
poly2herm(pol)
Convert a polynomial to a Hermite series.
Convert an array representing the coefficients of a polynomial (relative
to the "standard" basis) ordered from lowest degree to highest, to an
array of the coefficients of the equivalent Hermite series, ordered
from lowest to highest degree.
Parameters
----------
pol : array_like
1-D array containing the polynomial coefficients
Returns
-------
c : ndarray
1-D array containing the coefficients of the equivalent Hermite
series.
See Also
--------
herm2poly
Notes
-----
The easy way to do conversions between polynomial basis sets
is to use the convert method of a class instance.
Examples
--------
>>> from numpy.polynomial.hermite import poly2herm
>>> poly2herm(np.arange(4))
array([ 1. , 2.75 , 0.5 , 0.375])
"""
[pol] = pu.as_series([pol])
deg = len(pol) - 1
res = 0
for i in range(deg, -1, -1):
res = hermadd(hermmulx(res), pol[i])
return res
def herm2poly(c):
"""
Convert a Hermite series to a polynomial.
Convert an array representing the coefficients of a Hermite series,
ordered from lowest degree to highest, to an array of the coefficients
of the equivalent polynomial (relative to the "standard" basis) ordered
from lowest to highest degree.
Parameters
----------
c : array_like
1-D array containing the Hermite series coefficients, ordered
from lowest order term to highest.
Returns
-------
pol : ndarray
1-D array containing the coefficients of the equivalent polynomial
(relative to the "standard" basis) ordered from lowest order term
to highest.
See Also
--------
poly2herm
Notes
-----
The easy way to do conversions between polynomial basis sets
is to use the convert method of a class instance.
Examples
--------
>>> from numpy.polynomial.hermite import herm2poly
>>> herm2poly([ 1. , 2.75 , 0.5 , 0.375])
array([ 0., 1., 2., 3.])
"""
from .polynomial import polyadd, polysub, polymulx
[c] = pu.as_series([c])
n = len(c)
if n == 1:
return c
if n == 2:
c[1] *= 2
return c
else:
c0 = c[-2]
c1 = c[-1]
# i is the current degree of c1
for i in range(n - 1, 1, -1):
tmp = c0
c0 = polysub(c[i - 2], c1*(2*(i - 1)))
c1 = polyadd(tmp, polymulx(c1)*2)
return polyadd(c0, polymulx(c1)*2)
#
# These are constant arrays are of integer type so as to be compatible
# with the widest range of other types, such as Decimal.
#
# Hermite
hermdomain = np.array([-1, 1])
# Hermite coefficients representing zero.
hermzero = np.array([0])
# Hermite coefficients representing one.
hermone = np.array([1])
# Hermite coefficients representing the identity x.
hermx = np.array([0, 1/2])
def hermline(off, scl):
"""
Hermite series whose graph is a straight line.
Parameters
----------
off, scl : scalars
The specified line is given by ``off + scl*x``.
Returns
-------
y : ndarray
This module's representation of the Hermite series for
``off + scl*x``.
See Also
--------
polyline, chebline
Examples
--------
>>> from numpy.polynomial.hermite import hermline, hermval
>>> hermval(0,hermline(3, 2))
3.0
>>> hermval(1,hermline(3, 2))
5.0
"""
if scl != 0:
return np.array([off, scl/2])
else:
return np.array([off])
def hermfromroots(roots):
"""
Generate a Hermite series with given roots.
The function returns the coefficients of the polynomial
.. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n),
in Hermite form, where the `r_n` are the roots specified in `roots`.
If a zero has multiplicity n, then it must appear in `roots` n times.
For instance, if 2 is a root of multiplicity three and 3 is a root of
multiplicity 2, then `roots` looks something like [2, 2, 2, 3, 3]. The
roots can appear in any order.
If the returned coefficients are `c`, then
.. math:: p(x) = c_0 + c_1 * H_1(x) + ... + c_n * H_n(x)
The coefficient of the last term is not generally 1 for monic
polynomials in Hermite form.
Parameters
----------
roots : array_like
Sequence containing the roots.
Returns
-------
out : ndarray
1-D array of coefficients. If all roots are real then `out` is a
real array, if some of the roots are complex, then `out` is complex
even if all the coefficients in the result are real (see Examples
below).
See Also
--------
polyfromroots, legfromroots, lagfromroots, chebfromroots,
hermefromroots.
Examples
--------
>>> from numpy.polynomial.hermite import hermfromroots, hermval
>>> coef = hermfromroots((-1, 0, 1))
>>> hermval((-1, 0, 1), coef)
array([ 0., 0., 0.])
>>> coef = hermfromroots((-1j, 1j))
>>> hermval((-1j, 1j), coef)
array([ 0.+0.j, 0.+0.j])
"""
if len(roots) == 0:
return np.ones(1)
else:
[roots] = pu.as_series([roots], trim=False)
roots.sort()
p = [hermline(-r, 1) for r in roots]
n = len(p)
while n > 1:
m, r = divmod(n, 2)
tmp = [hermmul(p[i], p[i+m]) for i in range(m)]
if r:
tmp[0] = hermmul(tmp[0], p[-1])
p = tmp
n = m
return p[0]
def hermadd(c1, c2):
"""
Add one Hermite series to another.
Returns the sum of two Hermite series `c1` + `c2`. The arguments
are sequences of coefficients ordered from lowest order term to
highest, i.e., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Hermite series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Array representing the Hermite series of their sum.
See Also
--------
hermsub, hermmul, hermdiv, hermpow
Notes
-----
Unlike multiplication, division, etc., the sum of two Hermite series
is a Hermite series (without having to "reproject" the result onto
the basis set) so addition, just like that of "standard" polynomials,
is simply "component-wise."
Examples
--------
>>> from numpy.polynomial.hermite import hermadd
>>> hermadd([1, 2, 3], [1, 2, 3, 4])
array([ 2., 4., 6., 4.])
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2):
c1[:c2.size] += c2
ret = c1
else:
c2[:c1.size] += c1
ret = c2
return pu.trimseq(ret)
def hermsub(c1, c2):
"""
Subtract one Hermite series from another.
Returns the difference of two Hermite series `c1` - `c2`. The
sequences of coefficients are from lowest order term to highest, i.e.,
[1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Hermite series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Of Hermite series coefficients representing their difference.
See Also
--------
hermadd, hermmul, hermdiv, hermpow
Notes
-----
Unlike multiplication, division, etc., the difference of two Hermite
series is a Hermite series (without having to "reproject" the result
onto the basis set) so subtraction, just like that of "standard"
polynomials, is simply "component-wise."
Examples
--------
>>> from numpy.polynomial.hermite import hermsub
>>> hermsub([1, 2, 3, 4], [1, 2, 3])
array([ 0., 0., 0., 4.])
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2):
c1[:c2.size] -= c2
ret = c1
else:
c2 = -c2
c2[:c1.size] += c1
ret = c2
return pu.trimseq(ret)
def hermmulx(c):
"""Multiply a Hermite series by x.
Multiply the Hermite series `c` by x, where x is the independent
variable.
Parameters
----------
c : array_like
1-D array of Hermite series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Array representing the result of the multiplication.
Notes
-----
The multiplication uses the recursion relationship for Hermite
polynomials in the form
.. math::
xP_i(x) = (P_{i + 1}(x)/2 + i*P_{i - 1}(x))
Examples
--------
>>> from numpy.polynomial.hermite import hermmulx
>>> hermmulx([1, 2, 3])
array([ 2. , 6.5, 1. , 1.5])
"""
# c is a trimmed copy
[c] = pu.as_series([c])
# The zero series needs special treatment
if len(c) == 1 and c[0] == 0:
return c
prd = np.empty(len(c) + 1, dtype=c.dtype)
prd[0] = c[0]*0
prd[1] = c[0]/2
for i in range(1, len(c)):
prd[i + 1] = c[i]/2
prd[i - 1] += c[i]*i
return prd
def hermmul(c1, c2):
"""
Multiply one Hermite series by another.
Returns the product of two Hermite series `c1` * `c2`. The arguments
are sequences of coefficients, from lowest order "term" to highest,
e.g., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Hermite series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Of Hermite series coefficients representing their product.
See Also
--------
hermadd, hermsub, hermdiv, hermpow
Notes
-----
In general, the (polynomial) product of two C-series results in terms
that are not in the Hermite polynomial basis set. Thus, to express
the product as a Hermite series, it is necessary to "reproject" the
product onto said basis set, which may produce "unintuitive" (but
correct) results; see Examples section below.
Examples
--------
>>> from numpy.polynomial.hermite import hermmul
>>> hermmul([1, 2, 3], [0, 1, 2])
array([ 52., 29., 52., 7., 6.])
"""
# s1, s2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2):
c = c2
xs = c1
else:
c = c1
xs = c2
if len(c) == 1:
c0 = c[0]*xs
c1 = 0
elif len(c) == 2:
c0 = c[0]*xs
c1 = c[1]*xs
else:
nd = len(c)
c0 = c[-2]*xs
c1 = c[-1]*xs
for i in range(3, len(c) + 1):
tmp = c0
nd = nd - 1
c0 = hermsub(c[-i]*xs, c1*(2*(nd - 1)))
c1 = hermadd(tmp, hermmulx(c1)*2)
return hermadd(c0, hermmulx(c1)*2)
def hermdiv(c1, c2):
"""
Divide one Hermite series by another.
Returns the quotient-with-remainder of two Hermite series
`c1` / `c2`. The arguments are sequences of coefficients from lowest
order "term" to highest, e.g., [1,2,3] represents the series
``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Hermite series coefficients ordered from low to
high.
Returns
-------
[quo, rem] : ndarrays
Of Hermite series coefficients representing the quotient and
remainder.
See Also
--------
hermadd, hermsub, hermmul, hermpow
Notes
-----
In general, the (polynomial) division of one Hermite series by another
results in quotient and remainder terms that are not in the Hermite
polynomial basis set. Thus, to express these results as a Hermite
series, it is necessary to "reproject" the results onto the Hermite
basis set, which may produce "unintuitive" (but correct) results; see
Examples section below.
Examples
--------
>>> from numpy.polynomial.hermite import hermdiv
>>> hermdiv([ 52., 29., 52., 7., 6.], [0, 1, 2])
(array([ 1., 2., 3.]), array([ 0.]))
>>> hermdiv([ 54., 31., 52., 7., 6.], [0, 1, 2])
(array([ 1., 2., 3.]), array([ 2., 2.]))
>>> hermdiv([ 53., 30., 52., 7., 6.], [0, 1, 2])
(array([ 1., 2., 3.]), array([ 1., 1.]))
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if c2[-1] == 0:
raise ZeroDivisionError()
lc1 = len(c1)
lc2 = len(c2)
if lc1 < lc2:
return c1[:1]*0, c1
elif lc2 == 1:
return c1/c2[-1], c1[:1]*0
else:
quo = np.empty(lc1 - lc2 + 1, dtype=c1.dtype)
rem = c1
for i in range(lc1 - lc2, - 1, -1):
p = hermmul([0]*i + [1], c2)
q = rem[-1]/p[-1]
rem = rem[:-1] - q*p[:-1]
quo[i] = q
return quo, pu.trimseq(rem)
def hermpow(c, pow, maxpower=16):
"""Raise a Hermite series to a power.
Returns the Hermite series `c` raised to the power `pow`. The
argument `c` is a sequence of coefficients ordered from low to high.
i.e., [1,2,3] is the series ``P_0 + 2*P_1 + 3*P_2.``
Parameters
----------
c : array_like
1-D array of Hermite series coefficients ordered from low to
high.
pow : integer
Power to which the series will be raised
maxpower : integer, optional
Maximum power allowed. This is mainly to limit growth of the series
to unmanageable size. Default is 16
Returns
-------
coef : ndarray
Hermite series of power.
See Also
--------
hermadd, hermsub, hermmul, hermdiv
Examples
--------
>>> from numpy.polynomial.hermite import hermpow
>>> hermpow([1, 2, 3], 2)
array([ 81., 52., 82., 12., 9.])
"""
# c is a trimmed copy
[c] = pu.as_series([c])
power = int(pow)
if power != pow or power < 0:
raise ValueError("Power must be a non-negative integer.")
elif maxpower is not None and power > maxpower:
raise ValueError("Power is too large")
elif power == 0:
return np.array([1], dtype=c.dtype)
elif power == 1:
return c
else:
# This can be made more efficient by using powers of two
# in the usual way.
prd = c
for i in range(2, power + 1):
prd = hermmul(prd, c)
return prd
def hermder(c, m=1, scl=1, axis=0):
"""
Differentiate a Hermite series.
Returns the Hermite series coefficients `c` differentiated `m` times
along `axis`. At each iteration the result is multiplied by `scl` (the
scaling factor is for use in a linear change of variable). The argument
`c` is an array of coefficients from low to high degree along each
axis, e.g., [1,2,3] represents the series ``1*H_0 + 2*H_1 + 3*H_2``
while [[1,2],[1,2]] represents ``1*H_0(x)*H_0(y) + 1*H_1(x)*H_0(y) +
2*H_0(x)*H_1(y) + 2*H_1(x)*H_1(y)`` if axis=0 is ``x`` and axis=1 is
``y``.
Parameters
----------
c : array_like
Array of Hermite series coefficients. If `c` is multidimensional the
different axis correspond to different variables with the degree in
each axis given by the corresponding index.
m : int, optional
Number of derivatives taken, must be non-negative. (Default: 1)
scl : scalar, optional
Each differentiation is multiplied by `scl`. The end result is
multiplication by ``scl**m``. This is for use in a linear change of
variable. (Default: 1)
axis : int, optional
Axis over which the derivative is taken. (Default: 0).
.. versionadded:: 1.7.0
Returns
-------
der : ndarray
Hermite series of the derivative.
See Also
--------
hermint
Notes
-----
In general, the result of differentiating a Hermite series does not
resemble the same operation on a power series. Thus the result of this
function may be "unintuitive," albeit correct; see Examples section
below.
Examples
--------
>>> from numpy.polynomial.hermite import hermder
>>> hermder([ 1. , 0.5, 0.5, 0.5])
array([ 1., 2., 3.])
>>> hermder([-0.5, 1./2., 1./8., 1./12., 1./16.], m=2)
array([ 1., 2., 3.])
"""
c = np.array(c, ndmin=1, copy=1)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
cnt, iaxis = [int(t) for t in [m, axis]]
if cnt != m:
raise ValueError("The order of derivation must be integer")
if cnt < 0:
raise ValueError("The order of derivation must be non-negative")
if iaxis != axis:
raise ValueError("The axis must be integer")
if not -c.ndim <= iaxis < c.ndim:
raise ValueError("The axis is out of range")
if iaxis < 0:
iaxis += c.ndim
if cnt == 0:
return c
c = np.rollaxis(c, iaxis)
n = len(c)
if cnt >= n:
c = c[:1]*0
else:
for i in range(cnt):
n = n - 1
c *= scl
der = np.empty((n,) + c.shape[1:], dtype=c.dtype)
for j in range(n, 0, -1):
der[j - 1] = (2*j)*c[j]
c = der
c = np.rollaxis(c, 0, iaxis + 1)
return c
def hermint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
"""
Integrate a Hermite series.
Returns the Hermite series coefficients `c` integrated `m` times from
`lbnd` along `axis`. At each iteration the resulting series is
**multiplied** by `scl` and an integration constant, `k`, is added.
The scaling factor is for use in a linear change of variable. ("Buyer
beware": note that, depending on what one is doing, one may want `scl`
to be the reciprocal of what one might expect; for more information,
see the Notes section below.) The argument `c` is an array of
coefficients from low to high degree along each axis, e.g., [1,2,3]
represents the series ``H_0 + 2*H_1 + 3*H_2`` while [[1,2],[1,2]]
represents ``1*H_0(x)*H_0(y) + 1*H_1(x)*H_0(y) + 2*H_0(x)*H_1(y) +
2*H_1(x)*H_1(y)`` if axis=0 is ``x`` and axis=1 is ``y``.
Parameters
----------
c : array_like
Array of Hermite series coefficients. If c is multidimensional the
different axis correspond to different variables with the degree in
each axis given by the corresponding index.
m : int, optional
Order of integration, must be positive. (Default: 1)
k : {[], list, scalar}, optional
Integration constant(s). The value of the first integral at
``lbnd`` is the first value in the list, the value of the second
integral at ``lbnd`` is the second value, etc. If ``k == []`` (the
default), all constants are set to zero. If ``m == 1``, a single
scalar can be given instead of a list.
lbnd : scalar, optional
The lower bound of the integral. (Default: 0)
scl : scalar, optional
Following each integration the result is *multiplied* by `scl`
before the integration constant is added. (Default: 1)
axis : int, optional
Axis over which the integral is taken. (Default: 0).
.. versionadded:: 1.7.0
Returns
-------
S : ndarray
Hermite series coefficients of the integral.
Raises
------
ValueError
If ``m < 0``, ``len(k) > m``, ``np.isscalar(lbnd) == False``, or
``np.isscalar(scl) == False``.
See Also
--------
hermder
Notes
-----
Note that the result of each integration is *multiplied* by `scl`.
Why is this important to note? Say one is making a linear change of
variable :math:`u = ax + b` in an integral relative to `x`. Then
.. math::`dx = du/a`, so one will need to set `scl` equal to
:math:`1/a` - perhaps not what one would have first thought.
Also note that, in general, the result of integrating a C-series needs
to be "reprojected" onto the C-series basis set. Thus, typically,
the result of this function is "unintuitive," albeit correct; see
Examples section below.
Examples
--------
>>> from numpy.polynomial.hermite import hermint
>>> hermint([1,2,3]) # integrate once, value 0 at 0.
array([ 1. , 0.5, 0.5, 0.5])
>>> hermint([1,2,3], m=2) # integrate twice, value & deriv 0 at 0
array([-0.5 , 0.5 , 0.125 , 0.08333333, 0.0625 ])
>>> hermint([1,2,3], k=1) # integrate once, value 1 at 0.
array([ 2. , 0.5, 0.5, 0.5])
>>> hermint([1,2,3], lbnd=-1) # integrate once, value 0 at -1
array([-2. , 0.5, 0.5, 0.5])
>>> hermint([1,2,3], m=2, k=[1,2], lbnd=-1)
array([ 1.66666667, -0.5 , 0.125 , 0.08333333, 0.0625 ])
"""
c = np.array(c, ndmin=1, copy=1)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
if not np.iterable(k):
k = [k]
cnt, iaxis = [int(t) for t in [m, axis]]
if cnt != m:
raise ValueError("The order of integration must be integer")
if cnt < 0:
raise ValueError("The order of integration must be non-negative")
if len(k) > cnt:
raise ValueError("Too many integration constants")
if iaxis != axis:
raise ValueError("The axis must be integer")
if not -c.ndim <= iaxis < c.ndim:
raise ValueError("The axis is out of range")
if iaxis < 0:
iaxis += c.ndim
if cnt == 0:
return c
c = np.rollaxis(c, iaxis)
k = list(k) + [0]*(cnt - len(k))
for i in range(cnt):
n = len(c)
c *= scl
if n == 1 and np.all(c[0] == 0):
c[0] += k[i]
else:
tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype)
tmp[0] = c[0]*0
tmp[1] = c[0]/2
for j in range(1, n):
tmp[j + 1] = c[j]/(2*(j + 1))
tmp[0] += k[i] - hermval(lbnd, tmp)
c = tmp
c = np.rollaxis(c, 0, iaxis + 1)
return c
def hermval(x, c, tensor=True):
"""
Evaluate an Hermite series at points x.
If `c` is of length `n + 1`, this function returns the value:
.. math:: p(x) = c_0 * H_0(x) + c_1 * H_1(x) + ... + c_n * H_n(x)
The parameter `x` is converted to an array only if it is a tuple or a
list, otherwise it is treated as a scalar. In either case, either `x`
or its elements must support multiplication and addition both with
themselves and with the elements of `c`.
If `c` is a 1-D array, then `p(x)` will have the same shape as `x`. If
`c` is multidimensional, then the shape of the result depends on the
value of `tensor`. If `tensor` is true the shape will be c.shape[1:] +
x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that
scalars have shape (,).
Trailing zeros in the coefficients will be used in the evaluation, so
they should be avoided if efficiency is a concern.
Parameters
----------
x : array_like, compatible object
If `x` is a list or tuple, it is converted to an ndarray, otherwise
it is left unchanged and treated as a scalar. In either case, `x`
or its elements must support addition and multiplication with
with themselves and with the elements of `c`.
c : array_like
Array of coefficients ordered so that the coefficients for terms of
degree n are contained in c[n]. If `c` is multidimensional the
remaining indices enumerate multiple polynomials. In the two
dimensional case the coefficients may be thought of as stored in
the columns of `c`.
tensor : boolean, optional
If True, the shape of the coefficient array is extended with ones
on the right, one for each dimension of `x`. Scalars have dimension 0
for this action. The result is that every column of coefficients in
`c` is evaluated for every element of `x`. If False, `x` is broadcast
over the columns of `c` for the evaluation. This keyword is useful
when `c` is multidimensional. The default value is True.
.. versionadded:: 1.7.0
Returns
-------
values : ndarray, algebra_like
The shape of the return value is described above.
See Also
--------
hermval2d, hermgrid2d, hermval3d, hermgrid3d
Notes
-----
The evaluation uses Clenshaw recursion, aka synthetic division.
Examples
--------
>>> from numpy.polynomial.hermite import hermval
>>> coef = [1,2,3]
>>> hermval(1, coef)
11.0
>>> hermval([[1,2],[3,4]], coef)
array([[ 11., 51.],
[ 115., 203.]])
"""
c = np.array(c, ndmin=1, copy=0)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
if isinstance(x, (tuple, list)):
x = np.asarray(x)
if isinstance(x, np.ndarray) and tensor:
c = c.reshape(c.shape + (1,)*x.ndim)
x2 = x*2
if len(c) == 1:
c0 = c[0]
c1 = 0
elif len(c) == 2:
c0 = c[0]
c1 = c[1]
else:
nd = len(c)
c0 = c[-2]
c1 = c[-1]
for i in range(3, len(c) + 1):
tmp = c0
nd = nd - 1
c0 = c[-i] - c1*(2*(nd - 1))
c1 = tmp + c1*x2
return c0 + c1*x2
def hermval2d(x, y, c):
"""
Evaluate a 2-D Hermite series at points (x, y).
This function returns the values:
.. math:: p(x,y) = \\sum_{i,j} c_{i,j} * H_i(x) * H_j(y)
The parameters `x` and `y` are converted to arrays only if they are
tuples or a lists, otherwise they are treated as a scalars and they
must have the same shape after conversion. In either case, either `x`
and `y` or their elements must support multiplication and addition both
with themselves and with the elements of `c`.
If `c` is a 1-D array a one is implicitly appended to its shape to make
it 2-D. The shape of the result will be c.shape[2:] + x.shape.
Parameters
----------
x, y : array_like, compatible objects
The two dimensional series is evaluated at the points `(x, y)`,
where `x` and `y` must have the same shape. If `x` or `y` is a list
or tuple, it is first converted to an ndarray, otherwise it is left
unchanged and if it isn't an ndarray it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term
of multi-degree i,j is contained in ``c[i,j]``. If `c` has
dimension greater than two the remaining indices enumerate multiple
sets of coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional polynomial at points formed with
pairs of corresponding values from `x` and `y`.
See Also
--------
hermval, hermgrid2d, hermval3d, hermgrid3d
Notes
-----
.. versionadded::1.7.0
"""
try:
x, y = np.array((x, y), copy=0)
except:
raise ValueError('x, y are incompatible')
c = hermval(x, c)
c = hermval(y, c, tensor=False)
return c
def hermgrid2d(x, y, c):
"""
Evaluate a 2-D Hermite series on the Cartesian product of x and y.
This function returns the values:
.. math:: p(a,b) = \sum_{i,j} c_{i,j} * H_i(a) * H_j(b)
where the points `(a, b)` consist of all pairs formed by taking
`a` from `x` and `b` from `y`. The resulting points form a grid with
`x` in the first dimension and `y` in the second.
The parameters `x` and `y` are converted to arrays only if they are
tuples or a lists, otherwise they are treated as a scalars. In either
case, either `x` and `y` or their elements must support multiplication
and addition both with themselves and with the elements of `c`.
If `c` has fewer than two dimensions, ones are implicitly appended to
its shape to make it 2-D. The shape of the result will be c.shape[2:] +
x.shape.
Parameters
----------
x, y : array_like, compatible objects
The two dimensional series is evaluated at the points in the
Cartesian product of `x` and `y`. If `x` or `y` is a list or
tuple, it is first converted to an ndarray, otherwise it is left
unchanged and, if it isn't an ndarray, it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficients for terms of
degree i,j are contained in ``c[i,j]``. If `c` has dimension
greater than two the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional polynomial at points in the Cartesian
product of `x` and `y`.
See Also
--------
hermval, hermval2d, hermval3d, hermgrid3d
Notes
-----
.. versionadded::1.7.0
"""
c = hermval(x, c)
c = hermval(y, c)
return c
def hermval3d(x, y, z, c):
"""
Evaluate a 3-D Hermite series at points (x, y, z).
This function returns the values:
.. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * H_i(x) * H_j(y) * H_k(z)
The parameters `x`, `y`, and `z` are converted to arrays only if
they are tuples or a lists, otherwise they are treated as a scalars and
they must have the same shape after conversion. In either case, either
`x`, `y`, and `z` or their elements must support multiplication and
addition both with themselves and with the elements of `c`.
If `c` has fewer than 3 dimensions, ones are implicitly appended to its
shape to make it 3-D. The shape of the result will be c.shape[3:] +
x.shape.
Parameters
----------
x, y, z : array_like, compatible object
The three dimensional series is evaluated at the points
`(x, y, z)`, where `x`, `y`, and `z` must have the same shape. If
any of `x`, `y`, or `z` is a list or tuple, it is first converted
to an ndarray, otherwise it is left unchanged and if it isn't an
ndarray it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term of
multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension
greater than 3 the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the multidimensional polynomial on points formed with
triples of corresponding values from `x`, `y`, and `z`.
See Also
--------
hermval, hermval2d, hermgrid2d, hermgrid3d
Notes
-----
.. versionadded::1.7.0
"""
try:
x, y, z = np.array((x, y, z), copy=0)
except:
raise ValueError('x, y, z are incompatible')
c = hermval(x, c)
c = hermval(y, c, tensor=False)
c = hermval(z, c, tensor=False)
return c
def hermgrid3d(x, y, z, c):
"""
Evaluate a 3-D Hermite series on the Cartesian product of x, y, and z.
This function returns the values:
.. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * H_i(a) * H_j(b) * H_k(c)
where the points `(a, b, c)` consist of all triples formed by taking
`a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form
a grid with `x` in the first dimension, `y` in the second, and `z` in
the third.
The parameters `x`, `y`, and `z` are converted to arrays only if they
are tuples or a lists, otherwise they are treated as a scalars. In
either case, either `x`, `y`, and `z` or their elements must support
multiplication and addition both with themselves and with the elements
of `c`.
If `c` has fewer than three dimensions, ones are implicitly appended to
its shape to make it 3-D. The shape of the result will be c.shape[3:] +
x.shape + y.shape + z.shape.
Parameters
----------
x, y, z : array_like, compatible objects
The three dimensional series is evaluated at the points in the
Cartesian product of `x`, `y`, and `z`. If `x`,`y`, or `z` is a
list or tuple, it is first converted to an ndarray, otherwise it is
left unchanged and, if it isn't an ndarray, it is treated as a
scalar.
c : array_like
Array of coefficients ordered so that the coefficients for terms of
degree i,j are contained in ``c[i,j]``. If `c` has dimension
greater than two the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional polynomial at points in the Cartesian
product of `x` and `y`.
See Also
--------
hermval, hermval2d, hermgrid2d, hermval3d
Notes
-----
.. versionadded::1.7.0
"""
c = hermval(x, c)
c = hermval(y, c)
c = hermval(z, c)
return c
def hermvander(x, deg):
"""Pseudo-Vandermonde matrix of given degree.
Returns the pseudo-Vandermonde matrix of degree `deg` and sample points
`x`. The pseudo-Vandermonde matrix is defined by
.. math:: V[..., i] = H_i(x),
where `0 <= i <= deg`. The leading indices of `V` index the elements of
`x` and the last index is the degree of the Hermite polynomial.
If `c` is a 1-D array of coefficients of length `n + 1` and `V` is the
array ``V = hermvander(x, n)``, then ``np.dot(V, c)`` and
``hermval(x, c)`` are the same up to roundoff. This equivalence is
useful both for least squares fitting and for the evaluation of a large
number of Hermite series of the same degree and sample points.
Parameters
----------
x : array_like
Array of points. The dtype is converted to float64 or complex128
depending on whether any of the elements are complex. If `x` is
scalar it is converted to a 1-D array.
deg : int
Degree of the resulting matrix.
Returns
-------
vander : ndarray
The pseudo-Vandermonde matrix. The shape of the returned matrix is
``x.shape + (deg + 1,)``, where The last index is the degree of the
corresponding Hermite polynomial. The dtype will be the same as
the converted `x`.
Examples
--------
>>> from numpy.polynomial.hermite import hermvander
>>> x = np.array([-1, 0, 1])
>>> hermvander(x, 3)
array([[ 1., -2., 2., 4.],
[ 1., 0., -2., -0.],
[ 1., 2., 2., -4.]])
"""
ideg = int(deg)
if ideg != deg:
raise ValueError("deg must be integer")
if ideg < 0:
raise ValueError("deg must be non-negative")
x = np.array(x, copy=0, ndmin=1) + 0.0
dims = (ideg + 1,) + x.shape
dtyp = x.dtype
v = np.empty(dims, dtype=dtyp)
v[0] = x*0 + 1
if ideg > 0:
x2 = x*2
v[1] = x2
for i in range(2, ideg + 1):
v[i] = (v[i-1]*x2 - v[i-2]*(2*(i - 1)))
return np.rollaxis(v, 0, v.ndim)
def hermvander2d(x, y, deg):
"""Pseudo-Vandermonde matrix of given degrees.
Returns the pseudo-Vandermonde matrix of degrees `deg` and sample
points `(x, y)`. The pseudo-Vandermonde matrix is defined by
.. math:: V[..., deg[1]*i + j] = H_i(x) * H_j(y),
where `0 <= i <= deg[0]` and `0 <= j <= deg[1]`. The leading indices of
`V` index the points `(x, y)` and the last index encodes the degrees of
the Hermite polynomials.
If ``V = hermvander2d(x, y, [xdeg, ydeg])``, then the columns of `V`
correspond to the elements of a 2-D coefficient array `c` of shape
(xdeg + 1, ydeg + 1) in the order
.. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ...
and ``np.dot(V, c.flat)`` and ``hermval2d(x, y, c)`` will be the same
up to roundoff. This equivalence is useful both for least squares
fitting and for the evaluation of a large number of 2-D Hermite
series of the same degrees and sample points.
Parameters
----------
x, y : array_like
Arrays of point coordinates, all of the same shape. The dtypes
will be converted to either float64 or complex128 depending on
whether any of the elements are complex. Scalars are converted to 1-D
arrays.
deg : list of ints
List of maximum degrees of the form [x_deg, y_deg].
Returns
-------
vander2d : ndarray
The shape of the returned matrix is ``x.shape + (order,)``, where
:math:`order = (deg[0]+1)*(deg([1]+1)`. The dtype will be the same
as the converted `x` and `y`.
See Also
--------
hermvander, hermvander3d. hermval2d, hermval3d
Notes
-----
.. versionadded::1.7.0
"""
ideg = [int(d) for d in deg]
is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)]
if is_valid != [1, 1]:
raise ValueError("degrees must be non-negative integers")
degx, degy = ideg
x, y = np.array((x, y), copy=0) + 0.0
vx = hermvander(x, degx)
vy = hermvander(y, degy)
v = vx[..., None]*vy[..., None,:]
return v.reshape(v.shape[:-2] + (-1,))
def hermvander3d(x, y, z, deg):
"""Pseudo-Vandermonde matrix of given degrees.
Returns the pseudo-Vandermonde matrix of degrees `deg` and sample
points `(x, y, z)`. If `l, m, n` are the given degrees in `x, y, z`,
then The pseudo-Vandermonde matrix is defined by
.. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = H_i(x)*H_j(y)*H_k(z),
where `0 <= i <= l`, `0 <= j <= m`, and `0 <= j <= n`. The leading
indices of `V` index the points `(x, y, z)` and the last index encodes
the degrees of the Hermite polynomials.
If ``V = hermvander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns
of `V` correspond to the elements of a 3-D coefficient array `c` of
shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order
.. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},...
and ``np.dot(V, c.flat)`` and ``hermval3d(x, y, z, c)`` will be the
same up to roundoff. This equivalence is useful both for least squares
fitting and for the evaluation of a large number of 3-D Hermite
series of the same degrees and sample points.
Parameters
----------
x, y, z : array_like
Arrays of point coordinates, all of the same shape. The dtypes will
be converted to either float64 or complex128 depending on whether
any of the elements are complex. Scalars are converted to 1-D
arrays.
deg : list of ints
List of maximum degrees of the form [x_deg, y_deg, z_deg].
Returns
-------
vander3d : ndarray
The shape of the returned matrix is ``x.shape + (order,)``, where
:math:`order = (deg[0]+1)*(deg([1]+1)*(deg[2]+1)`. The dtype will
be the same as the converted `x`, `y`, and `z`.
See Also
--------
hermvander, hermvander3d. hermval2d, hermval3d
Notes
-----
.. versionadded::1.7.0
"""
ideg = [int(d) for d in deg]
is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)]
if is_valid != [1, 1, 1]:
raise ValueError("degrees must be non-negative integers")
degx, degy, degz = ideg
x, y, z = np.array((x, y, z), copy=0) + 0.0
vx = hermvander(x, degx)
vy = hermvander(y, degy)
vz = hermvander(z, degz)
v = vx[..., None, None]*vy[..., None,:, None]*vz[..., None, None,:]
return v.reshape(v.shape[:-3] + (-1,))
def hermfit(x, y, deg, rcond=None, full=False, w=None):
"""
Least squares fit of Hermite series to data.
Return the coefficients of a Hermite series of degree `deg` that is the
least squares fit to the data values `y` given at points `x`. If `y` is
1-D the returned coefficients will also be 1-D. If `y` is 2-D multiple
fits are done, one for each column of `y`, and the resulting
coefficients are stored in the corresponding columns of a 2-D return.
The fitted polynomial(s) are in the form
.. math:: p(x) = c_0 + c_1 * H_1(x) + ... + c_n * H_n(x),
where `n` is `deg`.
Parameters
----------
x : array_like, shape (M,)
x-coordinates of the M sample points ``(x[i], y[i])``.
y : array_like, shape (M,) or (M, K)
y-coordinates of the sample points. Several data sets of sample
points sharing the same x-coordinates can be fitted at once by
passing in a 2D-array that contains one dataset per column.
deg : int
Degree of the fitting polynomial
rcond : float, optional
Relative condition number of the fit. Singular values smaller than
this relative to the largest singular value will be ignored. The
default value is len(x)*eps, where eps is the relative precision of
the float type, about 2e-16 in most cases.
full : bool, optional
Switch determining nature of return value. When it is False (the
default) just the coefficients are returned, when True diagnostic
information from the singular value decomposition is also returned.
w : array_like, shape (`M`,), optional
Weights. If not None, the contribution of each point
``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the
weights are chosen so that the errors of the products ``w[i]*y[i]``
all have the same variance. The default value is None.
Returns
-------
coef : ndarray, shape (M,) or (M, K)
Hermite coefficients ordered from low to high. If `y` was 2-D,
the coefficients for the data in column k of `y` are in column
`k`.
[residuals, rank, singular_values, rcond] : list
These values are only returned if `full` = True
resid -- sum of squared residuals of the least squares fit
rank -- the numerical rank of the scaled Vandermonde matrix
sv -- singular values of the scaled Vandermonde matrix
rcond -- value of `rcond`.
For more details, see `linalg.lstsq`.
Warns
-----
RankWarning
The rank of the coefficient matrix in the least-squares fit is
deficient. The warning is only raised if `full` = False. The
warnings can be turned off by
>>> import warnings
>>> warnings.simplefilter('ignore', RankWarning)
See Also
--------
chebfit, legfit, lagfit, polyfit, hermefit
hermval : Evaluates a Hermite series.
hermvander : Vandermonde matrix of Hermite series.
hermweight : Hermite weight function
linalg.lstsq : Computes a least-squares fit from the matrix.
scipy.interpolate.UnivariateSpline : Computes spline fits.
Notes
-----
The solution is the coefficients of the Hermite series `p` that
minimizes the sum of the weighted squared errors
.. math:: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2,
where the :math:`w_j` are the weights. This problem is solved by
setting up the (typically) overdetermined matrix equation
.. math:: V(x) * c = w * y,
where `V` is the weighted pseudo Vandermonde matrix of `x`, `c` are the
coefficients to be solved for, `w` are the weights, `y` are the
observed values. This equation is then solved using the singular value
decomposition of `V`.
If some of the singular values of `V` are so small that they are
neglected, then a `RankWarning` will be issued. This means that the
coefficient values may be poorly determined. Using a lower order fit
will usually get rid of the warning. The `rcond` parameter can also be
set to a value smaller than its default, but the resulting fit may be
spurious and have large contributions from roundoff error.
Fits using Hermite series are probably most useful when the data can be
approximated by ``sqrt(w(x)) * p(x)``, where `w(x)` is the Hermite
weight. In that case the weight ``sqrt(w(x[i])`` should be used
together with data values ``y[i]/sqrt(w(x[i])``. The weight function is
available as `hermweight`.
References
----------
.. [1] Wikipedia, "Curve fitting",
http://en.wikipedia.org/wiki/Curve_fitting
Examples
--------
>>> from numpy.polynomial.hermite import hermfit, hermval
>>> x = np.linspace(-10, 10)
>>> err = np.random.randn(len(x))/10
>>> y = hermval(x, [1, 2, 3]) + err
>>> hermfit(x, y, 2)
array([ 0.97902637, 1.99849131, 3.00006 ])
"""
order = int(deg) + 1
x = np.asarray(x) + 0.0
y = np.asarray(y) + 0.0
# check arguments.
if deg < 0:
raise ValueError("expected deg >= 0")
if x.ndim != 1:
raise TypeError("expected 1D vector for x")
if x.size == 0:
raise TypeError("expected non-empty vector for x")
if y.ndim < 1 or y.ndim > 2:
raise TypeError("expected 1D or 2D array for y")
if len(x) != len(y):
raise TypeError("expected x and y to have same length")
# set up the least squares matrices in transposed form
lhs = hermvander(x, deg).T
rhs = y.T
if w is not None:
w = np.asarray(w) + 0.0
if w.ndim != 1:
raise TypeError("expected 1D vector for w")
if len(x) != len(w):
raise TypeError("expected x and w to have same length")
# apply weights. Don't use inplace operations as they
# can cause problems with NA.
lhs = lhs * w
rhs = rhs * w
# set rcond
if rcond is None:
rcond = len(x)*np.finfo(x.dtype).eps
# Determine the norms of the design matrix columns.
if issubclass(lhs.dtype.type, np.complexfloating):
scl = np.sqrt((np.square(lhs.real) + np.square(lhs.imag)).sum(1))
else:
scl = np.sqrt(np.square(lhs).sum(1))
scl[scl == 0] = 1
# Solve the least squares problem.
c, resids, rank, s = la.lstsq(lhs.T/scl, rhs.T, rcond)
c = (c.T/scl).T
# warn on rank reduction
if rank != order and not full:
msg = "The fit may be poorly conditioned"
warnings.warn(msg, pu.RankWarning)
if full:
return c, [resids, rank, s, rcond]
else:
return c
def hermcompanion(c):
"""Return the scaled companion matrix of c.
The basis polynomials are scaled so that the companion matrix is
symmetric when `c` is an Hermite basis polynomial. This provides
better eigenvalue estimates than the unscaled case and for basis
polynomials the eigenvalues are guaranteed to be real if
`numpy.linalg.eigvalsh` is used to obtain them.
Parameters
----------
c : array_like
1-D array of Hermite series coefficients ordered from low to high
degree.
Returns
-------
mat : ndarray
Scaled companion matrix of dimensions (deg, deg).
Notes
-----
.. versionadded::1.7.0
"""
# c is a trimmed copy
[c] = pu.as_series([c])
if len(c) < 2:
raise ValueError('Series must have maximum degree of at least 1.')
if len(c) == 2:
return np.array([[-.5*c[0]/c[1]]])
n = len(c) - 1
mat = np.zeros((n, n), dtype=c.dtype)
scl = np.hstack((1., 1./np.sqrt(2.*np.arange(n - 1, 0, -1))))
scl = np.multiply.accumulate(scl)[::-1]
top = mat.reshape(-1)[1::n+1]
bot = mat.reshape(-1)[n::n+1]
top[...] = np.sqrt(.5*np.arange(1, n))
bot[...] = top
mat[:, -1] -= scl*c[:-1]/(2.0*c[-1])
return mat
def hermroots(c):
"""
Compute the roots of a Hermite series.
Return the roots (a.k.a. "zeros") of the polynomial
.. math:: p(x) = \\sum_i c[i] * H_i(x).
Parameters
----------
c : 1-D array_like
1-D array of coefficients.
Returns
-------
out : ndarray
Array of the roots of the series. If all the roots are real,
then `out` is also real, otherwise it is complex.
See Also
--------
polyroots, legroots, lagroots, chebroots, hermeroots
Notes
-----
The root estimates are obtained as the eigenvalues of the companion
matrix, Roots far from the origin of the complex plane may have large
errors due to the numerical instability of the series for such
values. Roots with multiplicity greater than 1 will also show larger
errors as the value of the series near such points is relatively
insensitive to errors in the roots. Isolated roots near the origin can
be improved by a few iterations of Newton's method.
The Hermite series basis polynomials aren't powers of `x` so the
results of this function may seem unintuitive.
Examples
--------
>>> from numpy.polynomial.hermite import hermroots, hermfromroots
>>> coef = hermfromroots([-1, 0, 1])
>>> coef
array([ 0. , 0.25 , 0. , 0.125])
>>> hermroots(coef)
array([ -1.00000000e+00, -1.38777878e-17, 1.00000000e+00])
"""
# c is a trimmed copy
[c] = pu.as_series([c])
if len(c) <= 1:
return np.array([], dtype=c.dtype)
if len(c) == 2:
return np.array([-.5*c[0]/c[1]])
m = hermcompanion(c)
r = la.eigvals(m)
r.sort()
return r
def _normed_hermite_n(x, n):
"""
Evaluate a normalized Hermite polynomial.
Compute the value of the normalized Hermite polynomial of degree ``n``
at the points ``x``.
Parameters
----------
x : ndarray of double.
Points at which to evaluate the function
n : int
Degree of the normalized Hermite function to be evaluated.
Returns
-------
values : ndarray
The shape of the return value is described above.
Notes
-----
.. versionadded:: 1.10.0
This function is needed for finding the Gauss points and integration
weights for high degrees. The values of the standard Hermite functions
overflow when n >= 207.
"""
if n == 0:
return np.ones(x.shape)/np.sqrt(np.sqrt(np.pi))
c0 = 0.
c1 = 1./np.sqrt(np.sqrt(np.pi))
nd = float(n)
for i in range(n - 1):
tmp = c0
c0 = -c1*np.sqrt((nd - 1.)/nd)
c1 = tmp + c1*x*np.sqrt(2./nd)
nd = nd - 1.0
return c0 + c1*x*np.sqrt(2)
def hermgauss(deg):
"""
Gauss-Hermite quadrature.
Computes the sample points and weights for Gauss-Hermite quadrature.
These sample points and weights will correctly integrate polynomials of
degree :math:`2*deg - 1` or less over the interval :math:`[-\inf, \inf]`
with the weight function :math:`f(x) = \exp(-x^2)`.
Parameters
----------
deg : int
Number of sample points and weights. It must be >= 1.
Returns
-------
x : ndarray
1-D ndarray containing the sample points.
y : ndarray
1-D ndarray containing the weights.
Notes
-----
.. versionadded::1.7.0
The results have only been tested up to degree 100, higher degrees may
be problematic. The weights are determined by using the fact that
.. math:: w_k = c / (H'_n(x_k) * H_{n-1}(x_k))
where :math:`c` is a constant independent of :math:`k` and :math:`x_k`
is the k'th root of :math:`H_n`, and then scaling the results to get
the right value when integrating 1.
"""
ideg = int(deg)
if ideg != deg or ideg < 1:
raise ValueError("deg must be a non-negative integer")
# first approximation of roots. We use the fact that the companion
# matrix is symmetric in this case in order to obtain better zeros.
c = np.array([0]*deg + [1], dtype=np.float64)
m = hermcompanion(c)
x = la.eigvalsh(m)
x.sort()
# improve roots by one application of Newton
dy = _normed_hermite_n(x, ideg)
df = _normed_hermite_n(x, ideg - 1) * np.sqrt(2*ideg)
x -= dy/df
# compute the weights. We scale the factor to avoid possible numerical
# overflow.
fm = _normed_hermite_n(x, ideg - 1)
fm /= np.abs(fm).max()
w = 1/(fm * fm)
# for Hermite we can also symmetrize
w = (w + w[::-1])/2
x = (x - x[::-1])/2
# scale w to get the right value
w *= np.sqrt(np.pi) / w.sum()
return x, w
def hermweight(x):
"""
Weight function of the Hermite polynomials.
The weight function is :math:`\exp(-x^2)` and the interval of
integration is :math:`[-\inf, \inf]`. the Hermite polynomials are
orthogonal, but not normalized, with respect to this weight function.
Parameters
----------
x : array_like
Values at which the weight function will be computed.
Returns
-------
w : ndarray
The weight function at `x`.
Notes
-----
.. versionadded::1.7.0
"""
w = np.exp(-x**2)
return w
#
# Hermite series class
#
class Hermite(ABCPolyBase):
"""An Hermite series class.
The Hermite class provides the standard Python numerical methods
'+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the
attributes and methods listed in the `ABCPolyBase` documentation.
Parameters
----------
coef : array_like
Hermite coefficients in order of increasing degree, i.e,
``(1, 2, 3)`` gives ``1*H_0(x) + 2*H_1(X) + 3*H_2(x)``.
domain : (2,) array_like, optional
Domain to use. The interval ``[domain[0], domain[1]]`` is mapped
to the interval ``[window[0], window[1]]`` by shifting and scaling.
The default value is [-1, 1].
window : (2,) array_like, optional
Window, see `domain` for its use. The default value is [-1, 1].
.. versionadded:: 1.6.0
"""
# Virtual Functions
_add = staticmethod(hermadd)
_sub = staticmethod(hermsub)
_mul = staticmethod(hermmul)
_div = staticmethod(hermdiv)
_pow = staticmethod(hermpow)
_val = staticmethod(hermval)
_int = staticmethod(hermint)
_der = staticmethod(hermder)
_fit = staticmethod(hermfit)
_line = staticmethod(hermline)
_roots = staticmethod(hermroots)
_fromroots = staticmethod(hermfromroots)
# Virtual properties
nickname = 'herm'
domain = np.array(hermdomain)
window = np.array(hermdomain)
| mit |
Juggerr/pykml | src/examples/KmlReference/animatedupdate_example.py | 7 | 2167 | #!/usr/bin/env python
'''Generate a KML string that matches the animated update example.
References:
http://code.google.com/apis/kml/documentation/kmlreference.html#gxanimatedupdate
http://code.google.com/apis/kml/documentation/kmlfiles/animatedupdate_example.kml
Note that as of 12/1/2010, the KML code displayed beneath the animatedupdate_example.kml link
is not valid.
* The <scale> element should not be a subelement of <Icon>.
* The <gx:duration> element should be the first subelement of <gx:FlyTo>
'''
from lxml import etree
from pykml.parser import Schema
from pykml.factory import KML_ElementMaker as KML
from pykml.factory import GX_ElementMaker as GX
doc = KML.kml(
KML.Document(
KML.name("gx:AnimatedUpdate example"),
KML.Style(
KML.IconStyle(
KML.scale(1.0),
KML.Icon(
KML.href("http://maps.google.com/mapfiles/kml/pushpin/ylw-pushpin.png"),
),
id="mystyle"
),
id="pushpin"
),
KML.Placemark(
KML.name("Pin on a mountaintop"),
KML.styleUrl("#pushpin"),
KML.Point(
KML.coordinates(170.1435558771009,-43.60505741890396,0)
),
id="mountainpin1"
),
GX.Tour(
KML.name("Play me!"),
GX.Playlist(
GX.FlyTo(
GX.duration(3),
GX.flyToMode("bounce"),
KML.Camera(
KML.longitude(170.157),
KML.latitude(-43.671),
KML.altitude(9700),
KML.heading(-6.333),
KML.tilt(33.5),
)
),
GX.AnimatedUpdate(
GX.duration(5),
KML.Update(
KML.targetHref(),
KML.Change(
KML.IconStyle(
KML.scale(10.0),
targetId="mystyle"
)
)
)
),
GX.Wait(
GX.duration(5)
)
)
)
)
)
print etree.tostring(doc, pretty_print=True)
# output a KML file (named based on the Python script)
outfile = file(__file__.rstrip('.py')+'.kml','w')
outfile.write(etree.tostring(doc, pretty_print=True))
schema = Schema('kml22gx.xsd')
import ipdb; ipdb.set_trace()
schema.validate(doc)
| bsd-3-clause |
openstenoproject/plover | plover/machine/base.py | 1 | 8197 | # Copyright (c) 2010-2011 Joshua Harlan Lifton.
# See LICENSE.txt for details.
# TODO: add tests for all machines
# TODO: add tests for new status callbacks
"""Base classes for machine types. Do not use directly."""
import binascii
import threading
import serial
from plover import _, log
from plover.machine.keymap import Keymap
from plover.misc import boolean
# i18n: Machine state.
STATE_STOPPED = _('stopped')
# i18n: Machine state.
STATE_INITIALIZING = _('initializing')
# i18n: Machine state.
STATE_RUNNING = _('connected')
# i18n: Machine state.
STATE_ERROR = _('disconnected')
class StenotypeBase:
"""The base class for all Stenotype classes."""
# Layout of physical keys.
KEYS_LAYOUT = ''
# And special actions to map to.
ACTIONS = ()
# Fallback to use as machine type for finding a compatible keymap
# if one is not already available for this machine type.
KEYMAP_MACHINE_TYPE = None
def __init__(self):
# Setup default keymap with no translation of keys.
keys = self.get_keys()
self.keymap = Keymap(keys, keys)
self.keymap.set_mappings(zip(keys, keys))
self.stroke_subscribers = []
self.state_subscribers = []
self.state = STATE_STOPPED
def set_keymap(self, keymap):
"""Setup machine keymap."""
self.keymap = keymap
def start_capture(self):
"""Begin listening for output from the stenotype machine."""
pass
def stop_capture(self):
"""Stop listening for output from the stenotype machine."""
pass
def add_stroke_callback(self, callback):
"""Subscribe to output from the stenotype machine.
Argument:
callback -- The function to call whenever there is output from
the stenotype machine and output is being captured.
"""
self.stroke_subscribers.append(callback)
def remove_stroke_callback(self, callback):
"""Unsubscribe from output from the stenotype machine.
Argument:
callback -- A function that was previously subscribed.
"""
self.stroke_subscribers.remove(callback)
def add_state_callback(self, callback):
self.state_subscribers.append(callback)
def remove_state_callback(self, callback):
self.state_subscribers.remove(callback)
def _notify(self, steno_keys):
"""Invoke the callback of each subscriber with the given argument."""
for callback in self.stroke_subscribers:
callback(steno_keys)
def set_suppression(self, enabled):
'''Enable keyboard suppression.
This is only of use for the keyboard machine,
to suppress the keyboard when then engine is running.
'''
pass
def suppress_last_stroke(self, send_backspaces):
'''Suppress the last stroke key events after the fact.
This is only of use for the keyboard machine,
and the engine is resumed with a command stroke.
Argument:
send_backspaces -- The function to use to send backspaces.
'''
pass
def _set_state(self, state):
self.state = state
for callback in self.state_subscribers:
callback(state)
def _stopped(self):
self._set_state(STATE_STOPPED)
def _initializing(self):
self._set_state(STATE_INITIALIZING)
def _ready(self):
self._set_state(STATE_RUNNING)
def _error(self):
self._set_state(STATE_ERROR)
@classmethod
def get_actions(cls):
"""List of supported actions to map to."""
return cls.ACTIONS
@classmethod
def get_keys(cls):
return tuple(cls.KEYS_LAYOUT.split())
@classmethod
def get_option_info(cls):
"""Get the default options for this machine."""
return {}
class ThreadedStenotypeBase(StenotypeBase, threading.Thread):
"""Base class for thread based machines.
Subclasses should override run.
"""
def __init__(self):
threading.Thread.__init__(self)
self.name += '-machine'
StenotypeBase.__init__(self)
self.finished = threading.Event()
def run(self):
"""This method should be overridden by a subclass."""
pass
def start_capture(self):
"""Begin listening for output from the stenotype machine."""
self.finished.clear()
self._initializing()
self.start()
def stop_capture(self):
"""Stop listening for output from the stenotype machine."""
self.finished.set()
try:
self.join()
except RuntimeError:
pass
self._stopped()
class SerialStenotypeBase(ThreadedStenotypeBase):
"""For use with stenotype machines that connect via serial port.
This class implements the three methods necessary for a standard
stenotype interface: start_capture, stop_capture, and
add_callback.
"""
# Default serial parameters.
SERIAL_PARAMS = {
'port': None,
'baudrate': 9600,
'bytesize': 8,
'parity': 'N',
'stopbits': 1,
'timeout': 2.0,
}
def __init__(self, serial_params):
"""Monitor the stenotype over a serial port.
The key-value pairs in the <serial_params> dict are the same
as the keyword arguments for a serial.Serial object.
"""
ThreadedStenotypeBase.__init__(self)
self.serial_port = None
self.serial_params = serial_params
def _close_port(self):
if self.serial_port is None:
return
self.serial_port.close()
self.serial_port = None
def start_capture(self):
self._close_port()
try:
self.serial_port = serial.Serial(**self.serial_params)
except (serial.SerialException, OSError):
log.warning('Can\'t open serial port', exc_info=True)
self._error()
return
if not self.serial_port.isOpen():
log.warning('Serial port is not open: %s', self.serial_params.get('port'))
self._error()
return
return ThreadedStenotypeBase.start_capture(self)
def stop_capture(self):
"""Stop listening for output from the stenotype machine."""
ThreadedStenotypeBase.stop_capture(self)
self._close_port()
@classmethod
def get_option_info(cls):
"""Get the default options for this machine."""
sb = lambda s: int(float(s)) if float(s).is_integer() else float(s)
converters = {
'port': str,
'baudrate': int,
'bytesize': int,
'parity': str,
'stopbits': sb,
'timeout': float,
'xonxoff': boolean,
'rtscts': boolean,
}
return {
setting: (default, converters[setting])
for setting, default in cls.SERIAL_PARAMS.items()
}
def _iter_packets(self, packet_size):
"""Yield packets of <packets_size> bytes until the machine is stopped.
N.B.: to workaround the fact that the Toshiba Bluetooth stack
on Windows does not correctly handle the read timeout setting
(returning immediately if some data is already available):
- the effective timeout is re-configured to <timeout/packet_size>
- multiple reads are done (until a packet is complete)
- an incomplete packet will only be discarded if one of
those reads return no data (but not on short read)
"""
self.serial_port.timeout = max(
self.serial_params.get('timeout', 1.0) / packet_size,
0.01,
)
packet = b''
while not self.finished.isSet():
raw = self.serial_port.read(packet_size - len(packet))
if not raw:
if packet:
log.error('discarding incomplete packet: %s',
binascii.hexlify(packet))
packet = b''
continue
packet += raw
if len(packet) != packet_size:
continue
yield packet
packet = b''
| gpl-2.0 |
taigaio/taiga-ncurses | tests/controllers/test_backlog_controller.py | 3 | 20304 | from concurrent.futures import Future
from unittest import mock
from taiga_ncurses.ui import signals, views
from taiga_ncurses import controllers
from taiga_ncurses.config import settings
from taiga_ncurses.executor import Executor
from taiga_ncurses.core import StateMachine
from tests import factories
def test_backlog_controller_show_the_help_popup():
project = factories.project()
project_view = views.projects.ProjectDetailView(project)
executor = factories.patched_executor()
_ = mock.Mock()
project_detail_controller = controllers.projects.ProjectDetailController(project_view, executor, _)
assert not hasattr(project_detail_controller.view.backlog, "help_popup")
project_detail_controller.handle(settings.data.backlog.keys.help)
assert hasattr(project_detail_controller.view.backlog, "help_popup")
def test_backlog_controller_close_the_help_popup():
project = factories.project()
project_view = views.projects.ProjectDetailView(project)
executor = factories.patched_executor()
_ = mock.Mock()
project_detail_controller = controllers.projects.ProjectDetailController(project_view, executor, _)
project_detail_controller.handle(settings.data.backlog.keys.help)
assert hasattr(project_detail_controller.view.backlog, "help_popup")
help_popup = project_detail_controller.view.backlog.help_popup
signals.emit(help_popup.close_button, "click")
assert not hasattr(project_detail_controller.view.backlog, "help_popup")
def test_backlog_controller_reload():
project = factories.project()
project_view = views.projects.ProjectDetailView(project)
executor = factories.patched_executor()
_ = mock.Mock()
project_detail_controller = controllers.projects.ProjectDetailController(project_view, executor, _)
executor.project_stats.reset_mock()
executor.unassigned_user_stories.reset_mock()
assert executor.project_stats.call_count == 0
assert executor.unassigned_user_stories.call_count == 0
project_detail_controller.handle(settings.data.backlog.keys.reload)
assert executor.project_stats.call_count == 1
assert executor.unassigned_user_stories.call_count == 1
def test_backlog_controller_show_the_new_user_story_form():
project = factories.project()
project_view = views.projects.ProjectDetailView(project)
executor = factories.patched_executor()
_ = mock.Mock()
project_detail_controller = controllers.projects.ProjectDetailController(project_view, executor, _)
assert not hasattr(project_detail_controller.view.backlog, "user_story_form")
project_detail_controller.handle(settings.data.backlog.keys.create)
assert hasattr(project_detail_controller.view.backlog, "user_story_form")
def test_backlog_controller_cancel_the_new_user_story_form():
project = factories.project()
project_view = views.projects.ProjectDetailView(project)
executor = factories.patched_executor()
_ = mock.Mock()
project_detail_controller = controllers.projects.ProjectDetailController(project_view, executor, _)
project_detail_controller.handle(settings.data.backlog.keys.create)
assert hasattr(project_detail_controller.view.backlog, "user_story_form")
form = project_detail_controller.view.backlog.user_story_form
signals.emit(form.cancel_button, "click")
assert not hasattr(project_detail_controller.view.backlog, "user_story_form")
def test_backlog_controller_submit_new_user_story_form_with_errors():
project = factories.project()
project_view = views.projects.ProjectDetailView(project)
project_view.backlog.notifier = mock.Mock()
executor = factories.patched_executor()
_ = mock.Mock()
project_detail_controller = controllers.projects.ProjectDetailController(project_view, executor, _)
project_detail_controller.handle(settings.data.backlog.keys.create)
form = project_detail_controller.view.backlog.user_story_form
signals.emit(form.save_button, "click")
assert project_view.backlog.notifier.error_msg.call_count == 1
def test_backlog_controller_submit_new_user_story_form_successfully():
us_subject = "Create a new user story"
project = factories.project()
project_view = views.projects.ProjectDetailView(project)
project_view.backlog.notifier = mock.Mock()
executor = factories.patched_executor(create_user_story_response=factories.future(
factories.successful_create_user_story_response(us_subject)))
_ = mock.Mock()
project_detail_controller = controllers.projects.ProjectDetailController(project_view, executor, _)
project_detail_controller.handle(settings.data.backlog.keys.create)
form = project_detail_controller.view.backlog.user_story_form
project_view.backlog.notifier.reset_mock()
form._subject_edit.set_edit_text(us_subject)
signals.emit(form.save_button, "click")
assert project_view.backlog.notifier.info_msg.call_count == 1
assert executor.create_user_story.call_args.call_list()[0][0][0]["subject"] == us_subject
assert executor.create_user_story.call_count == 1
assert executor.create_user_story.return_value.result()["subject"] == us_subject
def test_backlog_controller_show_the_edit_user_story_form():
project = factories.project()
project_view = views.projects.ProjectDetailView(project)
executor = factories.patched_executor()
_ = mock.Mock()
project_detail_controller = controllers.projects.ProjectDetailController(project_view, executor, _)
assert not hasattr(project_detail_controller.view.backlog, "user_story_form")
project_detail_controller.handle(settings.data.backlog.keys.edit)
assert hasattr(project_detail_controller.view.backlog, "user_story_form")
assert (project_detail_controller.view.backlog.user_story_form.user_story ==
project_detail_controller.view.backlog.user_stories.widget.get_focus().user_story)
def test_backlog_controller_cancel_the_edit_user_story_form():
project = factories.project()
project_view = views.projects.ProjectDetailView(project)
executor = factories.patched_executor()
_ = mock.Mock()
project_detail_controller = controllers.projects.ProjectDetailController(project_view, executor, _)
project_detail_controller.handle(settings.data.backlog.keys.edit)
assert hasattr(project_detail_controller.view.backlog, "user_story_form")
form = project_detail_controller.view.backlog.user_story_form
signals.emit(form.cancel_button, "click")
assert not hasattr(project_detail_controller.view.backlog, "user_story_form")
def test_backlog_controller_submit_the_edit_user_story_form_with_errors():
project = factories.project()
project_view = views.projects.ProjectDetailView(project)
project_view.backlog.notifier = mock.Mock()
executor = factories.patched_executor()
_ = mock.Mock()
project_detail_controller = controllers.projects.ProjectDetailController(project_view, executor, _)
project_detail_controller.handle(settings.data.backlog.keys.edit)
form = project_detail_controller.view.backlog.user_story_form
form._subject_edit.set_edit_text("")
signals.emit(form.save_button, "click")
assert project_view.backlog.notifier.error_msg.call_count == 1
def test_backlog_controller_submit_edit_user_story_form_successfully():
us_subject = "Update a user story"
project = factories.project()
project_view = views.projects.ProjectDetailView(project)
project_view.backlog.notifier = mock.Mock()
executor = factories.patched_executor(update_user_story_response=factories.future(
factories.successful_update_user_story_response(us_subject)))
_ = mock.Mock()
project_detail_controller = controllers.projects.ProjectDetailController(project_view, executor, _)
project_detail_controller.handle(settings.data.backlog.keys.edit)
form = project_detail_controller.view.backlog.user_story_form
project_view.backlog.notifier.reset_mock()
form._subject_edit.set_edit_text(us_subject)
signals.emit(form.save_button, "click")
assert project_view.backlog.notifier.info_msg.call_count == 1
assert (executor.update_user_story.call_args.call_list()[0][0][0]["id"] == form.user_story["id"])
assert executor.update_user_story.call_args.call_list()[0][0][1]["subject"] == us_subject
assert executor.update_user_story.call_count == 1
assert executor.update_user_story.return_value.result()["subject"] == us_subject
def test_backlog_controller_move_user_story_down():
project = factories.project()
project_view = views.projects.ProjectDetailView(project)
project_view.backlog.notifier = mock.Mock()
executor = factories.patched_executor()
_ = mock.Mock()
project_detail_controller = controllers.projects.ProjectDetailController(project_view, executor, _)
project_view.backlog.notifier.reset_mock()
us_a_old = project_detail_controller.backlog.user_stories[0]
us_b_old = project_detail_controller.backlog.user_stories[1]
project_detail_controller.handle(settings.data.backlog.keys.decrease_priority)
assert project_view.backlog.notifier.info_msg.call_count == 1
us_b_new = project_detail_controller.backlog.user_stories[0]
us_a_new = project_detail_controller.backlog.user_stories[1]
assert us_a_old == us_a_new
assert us_b_old == us_b_new
def test_backlog_controller_move_user_story_up():
project = factories.project()
project_view = views.projects.ProjectDetailView(project)
project_view.backlog.notifier = mock.Mock()
executor = factories.patched_executor()
_ = mock.Mock()
project_detail_controller = controllers.projects.ProjectDetailController(project_view, executor, _)
project_detail_controller.view.backlog.user_stories.widget.contents.focus = 2
project_view.backlog.notifier.reset_mock()
us_a_old = project_detail_controller.backlog.user_stories[0]
us_b_old = project_detail_controller.backlog.user_stories[1]
project_detail_controller.handle(settings.data.backlog.keys.increase_priority)
assert project_view.backlog.notifier.info_msg.call_count == 1
us_b_new = project_detail_controller.backlog.user_stories[0]
us_a_new = project_detail_controller.backlog.user_stories[1]
assert us_a_old == us_a_new
assert us_b_old == us_b_new
def test_backlog_controller_update_user_stories_order_with_errors():
project = factories.project()
project_view = views.projects.ProjectDetailView(project)
project_view.backlog.notifier = mock.Mock()
executor = factories.patched_executor(update_user_stories_order_response=factories.future(None))
_ = mock.Mock()
project_detail_controller = controllers.projects.ProjectDetailController(project_view, executor, _)
project_detail_controller.handle(settings.data.backlog.keys.update_order)
assert project_view.backlog.notifier.error_msg.call_count == 1
def test_backlog_controller_update_user_stories_order_with_success():
project = factories.project()
project_view = views.projects.ProjectDetailView(project)
project_view.backlog.notifier = mock.Mock()
executor = factories.patched_executor()
_ = mock.Mock()
project_detail_controller = controllers.projects.ProjectDetailController(project_view, executor, _)
project_view.backlog.notifier.reset_mock()
project_detail_controller.handle(settings.data.backlog.keys.update_order)
assert project_view.backlog.notifier.info_msg.call_count == 1
def test_backlog_controller_delete_user_story_with_errors():
project = factories.project()
project_view = views.projects.ProjectDetailView(project)
project_view.backlog.notifier = mock.Mock()
executor = factories.patched_executor(delete_user_story_response=factories.future(None))
_ = mock.Mock()
project_detail_controller = controllers.projects.ProjectDetailController(project_view, executor, _)
project_detail_controller.handle(settings.data.backlog.keys.delete)
assert project_view.backlog.notifier.error_msg.call_count == 1
assert (executor.delete_user_story.call_args.call_list()[0][0][0]["id"] ==
project_detail_controller.backlog.user_stories[0]["id"])
def test_backlog_controller_delete_user_story_with_success():
project = factories.project()
project_view = views.projects.ProjectDetailView(project)
project_view.backlog.notifier = mock.Mock()
executor = factories.patched_executor()
_ = mock.Mock()
project_detail_controller = controllers.projects.ProjectDetailController(project_view, executor, _)
project_view.backlog.notifier.reset_mock()
project_detail_controller.handle(settings.data.backlog.keys.delete)
assert project_view.backlog.notifier.info_msg.call_count == 1
assert (executor.delete_user_story.call_args.call_list()[0][0][0]["id"] ==
project_detail_controller.backlog.user_stories[0]["id"])
def test_backlog_controller_show_the_milestone_selector_popup():
project = factories.project()
project_view = views.projects.ProjectDetailView(project)
executor = factories.patched_executor()
_ = mock.Mock()
project_detail_controller = controllers.projects.ProjectDetailController(project_view, executor, _)
assert not hasattr(project_detail_controller.view.backlog, "milestone_selector_popup")
project_detail_controller.handle(settings.data.backlog.keys.move_to_milestone)
assert hasattr(project_detail_controller.view.backlog, "milestone_selector_popup")
def test_backlog_controller_close_the_milestone_selector_popup():
project = factories.project()
project_view = views.projects.ProjectDetailView(project)
executor = factories.patched_executor()
_ = mock.Mock()
project_detail_controller = controllers.projects.ProjectDetailController(project_view, executor, _)
project_detail_controller.handle(settings.data.backlog.keys.move_to_milestone)
assert hasattr(project_detail_controller.view.backlog, "milestone_selector_popup")
milestone_selector_popup = project_detail_controller.view.backlog.milestone_selector_popup
signals.emit(milestone_selector_popup.cancel_button, "click")
assert not hasattr(project_detail_controller.view.backlog, "milestone_selector_popup")
def test_backlog_controller_move_a_user_story_to_a_milestone():
project = factories.project()
project_view = views.projects.ProjectDetailView(project)
project_view.backlog.notifier = mock.Mock()
executor = factories.patched_executor()
_ = mock.Mock()
project_detail_controller = controllers.projects.ProjectDetailController(project_view, executor, _)
project_detail_controller.handle(settings.data.backlog.keys.move_to_milestone)
milestone_selector_popup = project_detail_controller.view.backlog.milestone_selector_popup
project_view.backlog.notifier.reset_mock()
assert project_view.backlog.notifier.info_msg.call_count == 0
assert executor.update_user_story.call_count == 0
signals.emit(milestone_selector_popup.options[2], "click")
assert project_view.backlog.notifier.info_msg.call_count == 1
assert executor.update_user_story.call_count == 1
assert (executor.update_user_story.call_args.call_list()[0][0][1]["milestone"] ==
milestone_selector_popup.project["list_of_milestones"][-3]["id"])
def test_backlog_controller_change_user_story_status():
project = factories.project()
project_view = views.projects.ProjectDetailView(project)
project_view.backlog.notifier = mock.Mock()
executor = factories.patched_executor()
_ = mock.Mock()
project_detail_controller = controllers.projects.ProjectDetailController(project_view, executor, _)
project_detail_controller.handle(settings.data.backlog.keys.edit)
project_view.backlog.notifier.reset_mock()
us = project_detail_controller.view.backlog.user_stories.widget.contents[1][0]
combo = us.base_widget.widget.contents[5][0] # 5 => status
item = combo.menu.get_item(0) # 0 => New
combo.item_changed(item, True)
assert project_view.backlog.notifier.info_msg.call_count == 1
assert executor.update_user_story.call_args.call_list()[0][0][1]["status"] == item.value
assert executor.update_user_story.call_count == 1
def test_backlog_controller_change_user_story_points():
project = factories.project()
project_view = views.projects.ProjectDetailView(project)
project_view.backlog.notifier = mock.Mock()
executor = factories.patched_executor()
_ = mock.Mock()
project_detail_controller = controllers.projects.ProjectDetailController(project_view, executor, _)
project_detail_controller.handle(settings.data.backlog.keys.edit)
project_view.backlog.notifier.reset_mock()
us = project_detail_controller.view.backlog.user_stories.widget.contents[1][0]
combo = us.base_widget.widget.contents[6][0] # 6 => points
item = combo.menu.get_item(2) # 2 => 1/2
combo.item_changed(item, True)
assert project_view.backlog.notifier.info_msg.call_count == 1
assert list(executor.update_user_story.call_args.call_list()[0][0][1]["points"].values())[0] == item.value
assert executor.update_user_story.call_count == 1
# BULK
def test_backlog_controller_show_the_new_user_stories_in_bulk_form():
project = factories.project()
project_view = views.projects.ProjectDetailView(project)
executor = factories.patched_executor()
_ = mock.Mock()
project_detail_controller = controllers.projects.ProjectDetailController(project_view, executor, _)
assert not hasattr(project_detail_controller.view.backlog, "user_stories_in_bulk_form")
project_detail_controller.handle(settings.data.backlog.keys.create_in_bulk)
assert hasattr(project_detail_controller.view.backlog, "user_stories_in_bulk_form")
def test_backlog_controller_cancel_the_new_user_stories_in_bulk_form():
project = factories.project()
project_view = views.projects.ProjectDetailView(project)
executor = factories.patched_executor()
_ = mock.Mock()
project_detail_controller = controllers.projects.ProjectDetailController(project_view, executor, _)
project_detail_controller.handle(settings.data.backlog.keys.create_in_bulk)
assert hasattr(project_detail_controller.view.backlog, "user_stories_in_bulk_form")
form = project_detail_controller.view.backlog.user_stories_in_bulk_form
signals.emit(form.cancel_button, "click")
assert not hasattr(project_detail_controller.view.backlog, "user_stories_in_bulk_form")
def test_backlog_controller_submit_new_user_stories_in_bulk_form_with_errors():
project = factories.project()
project_view = views.projects.ProjectDetailView(project)
project_view.backlog.notifier = mock.Mock()
executor = factories.patched_executor()
_ = mock.Mock()
project_detail_controller = controllers.projects.ProjectDetailController(project_view, executor, _)
project_detail_controller.handle(settings.data.backlog.keys.create_in_bulk)
form = project_detail_controller.view.backlog.user_stories_in_bulk_form
signals.emit(form.save_button, "click")
assert project_view.backlog.notifier.error_msg.call_count == 1
def test_backlog_controller_submit_new_user_stories_in_bulk_form_successfully():
us_subjects = "Create a new user story 1\nCreate a new user story 2"
project = factories.project()
project_view = views.projects.ProjectDetailView(project)
project_view.backlog.notifier = mock.Mock()
executor = factories.patched_executor()
_ = mock.Mock()
project_detail_controller = controllers.projects.ProjectDetailController(project_view, executor, _)
project_detail_controller.handle(settings.data.backlog.keys.create_in_bulk)
form = project_detail_controller.view.backlog.user_stories_in_bulk_form
project_view.backlog.notifier.reset_mock()
form._subjects_edit.set_edit_text(us_subjects)
signals.emit(form.save_button, "click")
assert project_view.backlog.notifier.info_msg.call_count == 1
assert executor.create_user_stories_in_bulk.call_args.call_list()[0][0][0]["bulkStories"] == us_subjects
assert executor.create_user_stories_in_bulk.call_count == 1
assert executor.create_user_stories_in_bulk.return_value.result()
| apache-2.0 |
jfhumann/servo | tests/wpt/css-tests/tools/html5lib/html5lib/serializer/htmlserializer.py | 423 | 12897 | from __future__ import absolute_import, division, unicode_literals
from six import text_type
import gettext
_ = gettext.gettext
try:
from functools import reduce
except ImportError:
pass
from ..constants import voidElements, booleanAttributes, spaceCharacters
from ..constants import rcdataElements, entities, xmlEntities
from .. import utils
from xml.sax.saxutils import escape
spaceCharacters = "".join(spaceCharacters)
try:
from codecs import register_error, xmlcharrefreplace_errors
except ImportError:
unicode_encode_errors = "strict"
else:
unicode_encode_errors = "htmlentityreplace"
encode_entity_map = {}
is_ucs4 = len("\U0010FFFF") == 1
for k, v in list(entities.items()):
# skip multi-character entities
if ((is_ucs4 and len(v) > 1) or
(not is_ucs4 and len(v) > 2)):
continue
if v != "&":
if len(v) == 2:
v = utils.surrogatePairToCodepoint(v)
else:
v = ord(v)
if v not in encode_entity_map or k.islower():
# prefer < over < and similarly for &, >, etc.
encode_entity_map[v] = k
def htmlentityreplace_errors(exc):
if isinstance(exc, (UnicodeEncodeError, UnicodeTranslateError)):
res = []
codepoints = []
skip = False
for i, c in enumerate(exc.object[exc.start:exc.end]):
if skip:
skip = False
continue
index = i + exc.start
if utils.isSurrogatePair(exc.object[index:min([exc.end, index + 2])]):
codepoint = utils.surrogatePairToCodepoint(exc.object[index:index + 2])
skip = True
else:
codepoint = ord(c)
codepoints.append(codepoint)
for cp in codepoints:
e = encode_entity_map.get(cp)
if e:
res.append("&")
res.append(e)
if not e.endswith(";"):
res.append(";")
else:
res.append("&#x%s;" % (hex(cp)[2:]))
return ("".join(res), exc.end)
else:
return xmlcharrefreplace_errors(exc)
register_error(unicode_encode_errors, htmlentityreplace_errors)
del register_error
class HTMLSerializer(object):
# attribute quoting options
quote_attr_values = False
quote_char = '"'
use_best_quote_char = True
# tag syntax options
omit_optional_tags = True
minimize_boolean_attributes = True
use_trailing_solidus = False
space_before_trailing_solidus = True
# escaping options
escape_lt_in_attrs = False
escape_rcdata = False
resolve_entities = True
# miscellaneous options
alphabetical_attributes = False
inject_meta_charset = True
strip_whitespace = False
sanitize = False
options = ("quote_attr_values", "quote_char", "use_best_quote_char",
"omit_optional_tags", "minimize_boolean_attributes",
"use_trailing_solidus", "space_before_trailing_solidus",
"escape_lt_in_attrs", "escape_rcdata", "resolve_entities",
"alphabetical_attributes", "inject_meta_charset",
"strip_whitespace", "sanitize")
def __init__(self, **kwargs):
"""Initialize HTMLSerializer.
Keyword options (default given first unless specified) include:
inject_meta_charset=True|False
Whether it insert a meta element to define the character set of the
document.
quote_attr_values=True|False
Whether to quote attribute values that don't require quoting
per HTML5 parsing rules.
quote_char=u'"'|u"'"
Use given quote character for attribute quoting. Default is to
use double quote unless attribute value contains a double quote,
in which case single quotes are used instead.
escape_lt_in_attrs=False|True
Whether to escape < in attribute values.
escape_rcdata=False|True
Whether to escape characters that need to be escaped within normal
elements within rcdata elements such as style.
resolve_entities=True|False
Whether to resolve named character entities that appear in the
source tree. The XML predefined entities < > & " '
are unaffected by this setting.
strip_whitespace=False|True
Whether to remove semantically meaningless whitespace. (This
compresses all whitespace to a single space except within pre.)
minimize_boolean_attributes=True|False
Shortens boolean attributes to give just the attribute value,
for example <input disabled="disabled"> becomes <input disabled>.
use_trailing_solidus=False|True
Includes a close-tag slash at the end of the start tag of void
elements (empty elements whose end tag is forbidden). E.g. <hr/>.
space_before_trailing_solidus=True|False
Places a space immediately before the closing slash in a tag
using a trailing solidus. E.g. <hr />. Requires use_trailing_solidus.
sanitize=False|True
Strip all unsafe or unknown constructs from output.
See `html5lib user documentation`_
omit_optional_tags=True|False
Omit start/end tags that are optional.
alphabetical_attributes=False|True
Reorder attributes to be in alphabetical order.
.. _html5lib user documentation: http://code.google.com/p/html5lib/wiki/UserDocumentation
"""
if 'quote_char' in kwargs:
self.use_best_quote_char = False
for attr in self.options:
setattr(self, attr, kwargs.get(attr, getattr(self, attr)))
self.errors = []
self.strict = False
def encode(self, string):
assert(isinstance(string, text_type))
if self.encoding:
return string.encode(self.encoding, unicode_encode_errors)
else:
return string
def encodeStrict(self, string):
assert(isinstance(string, text_type))
if self.encoding:
return string.encode(self.encoding, "strict")
else:
return string
def serialize(self, treewalker, encoding=None):
self.encoding = encoding
in_cdata = False
self.errors = []
if encoding and self.inject_meta_charset:
from ..filters.inject_meta_charset import Filter
treewalker = Filter(treewalker, encoding)
# WhitespaceFilter should be used before OptionalTagFilter
# for maximum efficiently of this latter filter
if self.strip_whitespace:
from ..filters.whitespace import Filter
treewalker = Filter(treewalker)
if self.sanitize:
from ..filters.sanitizer import Filter
treewalker = Filter(treewalker)
if self.omit_optional_tags:
from ..filters.optionaltags import Filter
treewalker = Filter(treewalker)
# Alphabetical attributes must be last, as other filters
# could add attributes and alter the order
if self.alphabetical_attributes:
from ..filters.alphabeticalattributes import Filter
treewalker = Filter(treewalker)
for token in treewalker:
type = token["type"]
if type == "Doctype":
doctype = "<!DOCTYPE %s" % token["name"]
if token["publicId"]:
doctype += ' PUBLIC "%s"' % token["publicId"]
elif token["systemId"]:
doctype += " SYSTEM"
if token["systemId"]:
if token["systemId"].find('"') >= 0:
if token["systemId"].find("'") >= 0:
self.serializeError(_("System identifer contains both single and double quote characters"))
quote_char = "'"
else:
quote_char = '"'
doctype += " %s%s%s" % (quote_char, token["systemId"], quote_char)
doctype += ">"
yield self.encodeStrict(doctype)
elif type in ("Characters", "SpaceCharacters"):
if type == "SpaceCharacters" or in_cdata:
if in_cdata and token["data"].find("</") >= 0:
self.serializeError(_("Unexpected </ in CDATA"))
yield self.encode(token["data"])
else:
yield self.encode(escape(token["data"]))
elif type in ("StartTag", "EmptyTag"):
name = token["name"]
yield self.encodeStrict("<%s" % name)
if name in rcdataElements and not self.escape_rcdata:
in_cdata = True
elif in_cdata:
self.serializeError(_("Unexpected child element of a CDATA element"))
for (attr_namespace, attr_name), attr_value in token["data"].items():
# TODO: Add namespace support here
k = attr_name
v = attr_value
yield self.encodeStrict(' ')
yield self.encodeStrict(k)
if not self.minimize_boolean_attributes or \
(k not in booleanAttributes.get(name, tuple())
and k not in booleanAttributes.get("", tuple())):
yield self.encodeStrict("=")
if self.quote_attr_values or not v:
quote_attr = True
else:
quote_attr = reduce(lambda x, y: x or (y in v),
spaceCharacters + ">\"'=", False)
v = v.replace("&", "&")
if self.escape_lt_in_attrs:
v = v.replace("<", "<")
if quote_attr:
quote_char = self.quote_char
if self.use_best_quote_char:
if "'" in v and '"' not in v:
quote_char = '"'
elif '"' in v and "'" not in v:
quote_char = "'"
if quote_char == "'":
v = v.replace("'", "'")
else:
v = v.replace('"', """)
yield self.encodeStrict(quote_char)
yield self.encode(v)
yield self.encodeStrict(quote_char)
else:
yield self.encode(v)
if name in voidElements and self.use_trailing_solidus:
if self.space_before_trailing_solidus:
yield self.encodeStrict(" /")
else:
yield self.encodeStrict("/")
yield self.encode(">")
elif type == "EndTag":
name = token["name"]
if name in rcdataElements:
in_cdata = False
elif in_cdata:
self.serializeError(_("Unexpected child element of a CDATA element"))
yield self.encodeStrict("</%s>" % name)
elif type == "Comment":
data = token["data"]
if data.find("--") >= 0:
self.serializeError(_("Comment contains --"))
yield self.encodeStrict("<!--%s-->" % token["data"])
elif type == "Entity":
name = token["name"]
key = name + ";"
if key not in entities:
self.serializeError(_("Entity %s not recognized" % name))
if self.resolve_entities and key not in xmlEntities:
data = entities[key]
else:
data = "&%s;" % name
yield self.encodeStrict(data)
else:
self.serializeError(token["data"])
def render(self, treewalker, encoding=None):
if encoding:
return b"".join(list(self.serialize(treewalker, encoding)))
else:
return "".join(list(self.serialize(treewalker)))
def serializeError(self, data="XXX ERROR MESSAGE NEEDED"):
# XXX The idea is to make data mandatory.
self.errors.append(data)
if self.strict:
raise SerializeError
def SerializeError(Exception):
"""Error in serialized tree"""
pass
| mpl-2.0 |
damiansoriano/odoo | addons/account/account_analytic_line.py | 23 | 8053 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields
from openerp.osv import osv
from openerp.tools.translate import _
class account_analytic_line(osv.osv):
_inherit = 'account.analytic.line'
_description = 'Analytic Line'
_columns = {
'product_uom_id': fields.many2one('product.uom', 'Unit of Measure'),
'product_id': fields.many2one('product.product', 'Product'),
'general_account_id': fields.many2one('account.account', 'Financial Account', required=True, ondelete='restrict'),
'move_id': fields.many2one('account.move.line', 'Move Line', ondelete='cascade', select=True),
'journal_id': fields.many2one('account.analytic.journal', 'Analytic Journal', required=True, ondelete='restrict', select=True),
'code': fields.char('Code', size=8),
'ref': fields.char('Ref.'),
'currency_id': fields.related('move_id', 'currency_id', type='many2one', relation='res.currency', string='Account Currency', store=True, help="The related account currency if not equal to the company one.", readonly=True),
'amount_currency': fields.related('move_id', 'amount_currency', type='float', string='Amount Currency', store=True, help="The amount expressed in the related account currency if not equal to the company one.", readonly=True),
'partner_id': fields.related('account_id', 'partner_id', type='many2one', relation='res.partner', string='Partner', store=True),
}
_defaults = {
'company_id': lambda self,cr,uid,c: self.pool.get('res.company')._company_default_get(cr, uid, 'account.analytic.line', context=c),
}
_order = 'date desc'
def search(self, cr, uid, args, offset=0, limit=None, order=None, context=None, count=False):
if context is None:
context = {}
if context.get('from_date',False):
args.append(['date', '>=', context['from_date']])
if context.get('to_date',False):
args.append(['date','<=', context['to_date']])
return super(account_analytic_line, self).search(cr, uid, args, offset, limit,
order, context=context, count=count)
def _check_company(self, cr, uid, ids, context=None):
lines = self.browse(cr, uid, ids, context=context)
for l in lines:
if l.move_id and not l.account_id.company_id.id == l.move_id.account_id.company_id.id:
return False
return True
# Compute the cost based on the price type define into company
# property_valuation_price_type property
def on_change_unit_amount(self, cr, uid, id, prod_id, quantity, company_id,
unit=False, journal_id=False, context=None):
if context==None:
context={}
if not journal_id:
j_ids = self.pool.get('account.analytic.journal').search(cr, uid, [('type','=','purchase')])
journal_id = j_ids and j_ids[0] or False
if not journal_id or not prod_id:
return {}
product_obj = self.pool.get('product.product')
analytic_journal_obj =self.pool.get('account.analytic.journal')
product_price_type_obj = self.pool.get('product.price.type')
product_uom_obj = self.pool.get('product.uom')
j_id = analytic_journal_obj.browse(cr, uid, journal_id, context=context)
prod = product_obj.browse(cr, uid, prod_id, context=context)
result = 0.0
if prod_id:
unit_obj = False
if unit:
unit_obj = product_uom_obj.browse(cr, uid, unit, context=context)
if not unit_obj or prod.uom_id.category_id.id != unit_obj.category_id.id:
unit = prod.uom_id.id
if j_id.type == 'purchase':
if not unit_obj or prod.uom_po_id.category_id.id != unit_obj.category_id.id:
unit = prod.uom_po_id.id
if j_id.type <> 'sale':
a = prod.property_account_expense.id
if not a:
a = prod.categ_id.property_account_expense_categ.id
if not a:
raise osv.except_osv(_('Error!'),
_('There is no expense account defined ' \
'for this product: "%s" (id:%d).') % \
(prod.name, prod.id,))
else:
a = prod.property_account_income.id
if not a:
a = prod.categ_id.property_account_income_categ.id
if not a:
raise osv.except_osv(_('Error!'),
_('There is no income account defined ' \
'for this product: "%s" (id:%d).') % \
(prod.name, prod_id,))
flag = False
# Compute based on pricetype
product_price_type_ids = product_price_type_obj.search(cr, uid, [('field','=','standard_price')], context=context)
pricetype = product_price_type_obj.browse(cr, uid, product_price_type_ids, context=context)[0]
if journal_id:
journal = analytic_journal_obj.browse(cr, uid, journal_id, context=context)
if journal.type == 'sale':
product_price_type_ids = product_price_type_obj.search(cr, uid, [('field','=','list_price')], context=context)
if product_price_type_ids:
pricetype = product_price_type_obj.browse(cr, uid, product_price_type_ids, context=context)[0]
# Take the company currency as the reference one
if pricetype.field == 'list_price':
flag = True
ctx = context.copy()
if unit:
# price_get() will respect a 'uom' in its context, in order
# to return a default price for those units
ctx['uom'] = unit
amount_unit = prod.price_get(pricetype.field, context=ctx)[prod.id]
prec = self.pool.get('decimal.precision').precision_get(cr, uid, 'Account')
amount = amount_unit * quantity or 0.0
result = round(amount, prec)
if not flag:
result *= -1
return {'value': {
'amount': result,
'general_account_id': a,
'product_uom_id': unit
}
}
def view_header_get(self, cr, user, view_id, view_type, context=None):
if context is None:
context = {}
if context.get('account_id', False):
# account_id in context may also be pointing to an account.account.id
cr.execute('select name from account_analytic_account where id=%s', (context['account_id'],))
res = cr.fetchone()
if res:
res = _('Entries: ')+ (res[0] or '')
return res
return False
class res_partner(osv.osv):
""" Inherits partner and adds contract information in the partner form """
_inherit = 'res.partner'
_columns = {
'contract_ids': fields.one2many('account.analytic.account', \
'partner_id', 'Contracts', readonly=True),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
sdhash/sdhash | sdhash-ui/cherrypy/test/test_http.py | 36 | 8169 | """Tests for managing HTTP issues (malformed requests, etc)."""
import errno
import mimetypes
import socket
import sys
import cherrypy
from cherrypy._cpcompat import HTTPConnection, HTTPSConnection, ntob, py3k
def encode_multipart_formdata(files):
"""Return (content_type, body) ready for httplib.HTTP instance.
files: a sequence of (name, filename, value) tuples for multipart uploads.
"""
BOUNDARY = '________ThIs_Is_tHe_bouNdaRY_$'
L = []
for key, filename, value in files:
L.append('--' + BOUNDARY)
L.append('Content-Disposition: form-data; name="%s"; filename="%s"' %
(key, filename))
ct = mimetypes.guess_type(filename)[0] or 'application/octet-stream'
L.append('Content-Type: %s' % ct)
L.append('')
L.append(value)
L.append('--' + BOUNDARY + '--')
L.append('')
body = '\r\n'.join(L)
content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
return content_type, body
from cherrypy.test import helper
class HTTPTests(helper.CPWebCase):
def setup_server():
class Root:
def index(self, *args, **kwargs):
return "Hello world!"
index.exposed = True
def no_body(self, *args, **kwargs):
return "Hello world!"
no_body.exposed = True
no_body._cp_config = {'request.process_request_body': False}
def post_multipart(self, file):
"""Return a summary ("a * 65536\nb * 65536") of the uploaded file."""
contents = file.file.read()
summary = []
curchar = None
count = 0
for c in contents:
if c == curchar:
count += 1
else:
if count:
if py3k: curchar = chr(curchar)
summary.append("%s * %d" % (curchar, count))
count = 1
curchar = c
if count:
if py3k: curchar = chr(curchar)
summary.append("%s * %d" % (curchar, count))
return ", ".join(summary)
post_multipart.exposed = True
cherrypy.tree.mount(Root())
cherrypy.config.update({'server.max_request_body_size': 30000000})
setup_server = staticmethod(setup_server)
def test_no_content_length(self):
# "The presence of a message-body in a request is signaled by the
# inclusion of a Content-Length or Transfer-Encoding header field in
# the request's message-headers."
#
# Send a message with neither header and no body. Even though
# the request is of method POST, this should be OK because we set
# request.process_request_body to False for our handler.
if self.scheme == "https":
c = HTTPSConnection('%s:%s' % (self.interface(), self.PORT))
else:
c = HTTPConnection('%s:%s' % (self.interface(), self.PORT))
c.request("POST", "/no_body")
response = c.getresponse()
self.body = response.fp.read()
self.status = str(response.status)
self.assertStatus(200)
self.assertBody(ntob('Hello world!'))
# Now send a message that has no Content-Length, but does send a body.
# Verify that CP times out the socket and responds
# with 411 Length Required.
if self.scheme == "https":
c = HTTPSConnection('%s:%s' % (self.interface(), self.PORT))
else:
c = HTTPConnection('%s:%s' % (self.interface(), self.PORT))
c.request("POST", "/")
response = c.getresponse()
self.body = response.fp.read()
self.status = str(response.status)
self.assertStatus(411)
def test_post_multipart(self):
alphabet = "abcdefghijklmnopqrstuvwxyz"
# generate file contents for a large post
contents = "".join([c * 65536 for c in alphabet])
# encode as multipart form data
files=[('file', 'file.txt', contents)]
content_type, body = encode_multipart_formdata(files)
body = body.encode('Latin-1')
# post file
if self.scheme == 'https':
c = HTTPSConnection('%s:%s' % (self.interface(), self.PORT))
else:
c = HTTPConnection('%s:%s' % (self.interface(), self.PORT))
c.putrequest('POST', '/post_multipart')
c.putheader('Content-Type', content_type)
c.putheader('Content-Length', str(len(body)))
c.endheaders()
c.send(body)
response = c.getresponse()
self.body = response.fp.read()
self.status = str(response.status)
self.assertStatus(200)
self.assertBody(", ".join(["%s * 65536" % c for c in alphabet]))
def test_malformed_request_line(self):
if getattr(cherrypy.server, "using_apache", False):
return self.skip("skipped due to known Apache differences...")
# Test missing version in Request-Line
if self.scheme == 'https':
c = HTTPSConnection('%s:%s' % (self.interface(), self.PORT))
else:
c = HTTPConnection('%s:%s' % (self.interface(), self.PORT))
c._output(ntob('GET /'))
c._send_output()
if hasattr(c, 'strict'):
response = c.response_class(c.sock, strict=c.strict, method='GET')
else:
# Python 3.2 removed the 'strict' feature, saying:
# "http.client now always assumes HTTP/1.x compliant servers."
response = c.response_class(c.sock, method='GET')
response.begin()
self.assertEqual(response.status, 400)
self.assertEqual(response.fp.read(22), ntob("Malformed Request-Line"))
c.close()
def test_malformed_header(self):
if self.scheme == 'https':
c = HTTPSConnection('%s:%s' % (self.interface(), self.PORT))
else:
c = HTTPConnection('%s:%s' % (self.interface(), self.PORT))
c.putrequest('GET', '/')
c.putheader('Content-Type', 'text/plain')
# See http://www.cherrypy.org/ticket/941
c._output(ntob('Re, 1.2.3.4#015#012'))
c.endheaders()
response = c.getresponse()
self.status = str(response.status)
self.assertStatus(400)
self.body = response.fp.read(20)
self.assertBody("Illegal header line.")
def test_http_over_https(self):
if self.scheme != 'https':
return self.skip("skipped (not running HTTPS)... ")
# Try connecting without SSL.
conn = HTTPConnection('%s:%s' % (self.interface(), self.PORT))
conn.putrequest("GET", "/", skip_host=True)
conn.putheader("Host", self.HOST)
conn.endheaders()
response = conn.response_class(conn.sock, method="GET")
try:
response.begin()
self.assertEqual(response.status, 400)
self.body = response.read()
self.assertBody("The client sent a plain HTTP request, but this "
"server only speaks HTTPS on this port.")
except socket.error:
e = sys.exc_info()[1]
# "Connection reset by peer" is also acceptable.
if e.errno != errno.ECONNRESET:
raise
def test_garbage_in(self):
# Connect without SSL regardless of server.scheme
c = HTTPConnection('%s:%s' % (self.interface(), self.PORT))
c._output(ntob('gjkgjklsgjklsgjkljklsg'))
c._send_output()
response = c.response_class(c.sock, method="GET")
try:
response.begin()
self.assertEqual(response.status, 400)
self.assertEqual(response.fp.read(22), ntob("Malformed Request-Line"))
c.close()
except socket.error:
e = sys.exc_info()[1]
# "Connection reset by peer" is also acceptable.
if e.errno != errno.ECONNRESET:
raise
| apache-2.0 |
sunlianqiang/kbengine | kbe/res/scripts/common/Lib/site-packages/pip/vcs/git.py | 473 | 7898 | import tempfile
import re
import os.path
from pip.util import call_subprocess
from pip.util import display_path, rmtree
from pip.vcs import vcs, VersionControl
from pip.log import logger
from pip.backwardcompat import url2pathname, urlparse
urlsplit = urlparse.urlsplit
urlunsplit = urlparse.urlunsplit
class Git(VersionControl):
name = 'git'
dirname = '.git'
repo_name = 'clone'
schemes = ('git', 'git+http', 'git+https', 'git+ssh', 'git+git', 'git+file')
bundle_file = 'git-clone.txt'
guide = ('# This was a Git repo; to make it a repo again run:\n'
'git init\ngit remote add origin %(url)s -f\ngit checkout %(rev)s\n')
def __init__(self, url=None, *args, **kwargs):
# Works around an apparent Git bug
# (see http://article.gmane.org/gmane.comp.version-control.git/146500)
if url:
scheme, netloc, path, query, fragment = urlsplit(url)
if scheme.endswith('file'):
initial_slashes = path[:-len(path.lstrip('/'))]
newpath = initial_slashes + url2pathname(path).replace('\\', '/').lstrip('/')
url = urlunsplit((scheme, netloc, newpath, query, fragment))
after_plus = scheme.find('+') + 1
url = scheme[:after_plus] + urlunsplit((scheme[after_plus:], netloc, newpath, query, fragment))
super(Git, self).__init__(url, *args, **kwargs)
def parse_vcs_bundle_file(self, content):
url = rev = None
for line in content.splitlines():
if not line.strip() or line.strip().startswith('#'):
continue
url_match = re.search(r'git\s*remote\s*add\s*origin(.*)\s*-f', line)
if url_match:
url = url_match.group(1).strip()
rev_match = re.search(r'^git\s*checkout\s*-q\s*(.*)\s*', line)
if rev_match:
rev = rev_match.group(1).strip()
if url and rev:
return url, rev
return None, None
def export(self, location):
"""Export the Git repository at the url to the destination location"""
temp_dir = tempfile.mkdtemp('-export', 'pip-')
self.unpack(temp_dir)
try:
if not location.endswith('/'):
location = location + '/'
call_subprocess(
[self.cmd, 'checkout-index', '-a', '-f', '--prefix', location],
filter_stdout=self._filter, show_stdout=False, cwd=temp_dir)
finally:
rmtree(temp_dir)
def check_rev_options(self, rev, dest, rev_options):
"""Check the revision options before checkout to compensate that tags
and branches may need origin/ as a prefix.
Returns the SHA1 of the branch or tag if found.
"""
revisions = self.get_refs(dest)
origin_rev = 'origin/%s' % rev
if origin_rev in revisions:
# remote branch
return [revisions[origin_rev]]
elif rev in revisions:
# a local tag or branch name
return [revisions[rev]]
else:
logger.warn("Could not find a tag or branch '%s', assuming commit." % rev)
return rev_options
def switch(self, dest, url, rev_options):
call_subprocess(
[self.cmd, 'config', 'remote.origin.url', url], cwd=dest)
call_subprocess(
[self.cmd, 'checkout', '-q'] + rev_options, cwd=dest)
self.update_submodules(dest)
def update(self, dest, rev_options):
# First fetch changes from the default remote
call_subprocess([self.cmd, 'fetch', '-q'], cwd=dest)
# Then reset to wanted revision (maby even origin/master)
if rev_options:
rev_options = self.check_rev_options(rev_options[0], dest, rev_options)
call_subprocess([self.cmd, 'reset', '--hard', '-q'] + rev_options, cwd=dest)
#: update submodules
self.update_submodules(dest)
def obtain(self, dest):
url, rev = self.get_url_rev()
if rev:
rev_options = [rev]
rev_display = ' (to %s)' % rev
else:
rev_options = ['origin/master']
rev_display = ''
if self.check_destination(dest, url, rev_options, rev_display):
logger.notify('Cloning %s%s to %s' % (url, rev_display, display_path(dest)))
call_subprocess([self.cmd, 'clone', '-q', url, dest])
#: repo may contain submodules
self.update_submodules(dest)
if rev:
rev_options = self.check_rev_options(rev, dest, rev_options)
# Only do a checkout if rev_options differs from HEAD
if not self.get_revision(dest).startswith(rev_options[0]):
call_subprocess([self.cmd, 'checkout', '-q'] + rev_options, cwd=dest)
def get_url(self, location):
url = call_subprocess(
[self.cmd, 'config', 'remote.origin.url'],
show_stdout=False, cwd=location)
return url.strip()
def get_revision(self, location):
current_rev = call_subprocess(
[self.cmd, 'rev-parse', 'HEAD'], show_stdout=False, cwd=location)
return current_rev.strip()
def get_refs(self, location):
"""Return map of named refs (branches or tags) to commit hashes."""
output = call_subprocess([self.cmd, 'show-ref'],
show_stdout=False, cwd=location)
rv = {}
for line in output.strip().splitlines():
commit, ref = line.split(' ', 1)
ref = ref.strip()
ref_name = None
if ref.startswith('refs/remotes/'):
ref_name = ref[len('refs/remotes/'):]
elif ref.startswith('refs/heads/'):
ref_name = ref[len('refs/heads/'):]
elif ref.startswith('refs/tags/'):
ref_name = ref[len('refs/tags/'):]
if ref_name is not None:
rv[ref_name] = commit.strip()
return rv
def get_src_requirement(self, dist, location, find_tags):
repo = self.get_url(location)
if not repo.lower().startswith('git:'):
repo = 'git+' + repo
egg_project_name = dist.egg_name().split('-', 1)[0]
if not repo:
return None
current_rev = self.get_revision(location)
refs = self.get_refs(location)
# refs maps names to commit hashes; we need the inverse
# if multiple names map to a single commit, this arbitrarily picks one
names_by_commit = dict((commit, ref) for ref, commit in refs.items())
if current_rev in names_by_commit:
# It's a tag
full_egg_name = '%s-%s' % (egg_project_name, names_by_commit[current_rev])
else:
full_egg_name = '%s-dev' % egg_project_name
return '%s@%s#egg=%s' % (repo, current_rev, full_egg_name)
def get_url_rev(self):
"""
Prefixes stub URLs like 'user@hostname:user/repo.git' with 'ssh://'.
That's required because although they use SSH they sometimes doesn't
work with a ssh:// scheme (e.g. Github). But we need a scheme for
parsing. Hence we remove it again afterwards and return it as a stub.
"""
if not '://' in self.url:
assert not 'file:' in self.url
self.url = self.url.replace('git+', 'git+ssh://')
url, rev = super(Git, self).get_url_rev()
url = url.replace('ssh://', '')
else:
url, rev = super(Git, self).get_url_rev()
return url, rev
def update_submodules(self, location):
if not os.path.exists(os.path.join(location, '.gitmodules')):
return
call_subprocess([self.cmd, 'submodule', 'update', '--init', '--recursive', '-q'],
cwd=location)
vcs.register(Git)
| lgpl-3.0 |
ramons03/djangoldapsample | ldapsample/ldapdb/models/base.py | 1 | 5282 | # -*- coding: utf-8 -*-
# This software is distributed under the two-clause BSD license.
# Copyright (c) The django-ldapdb project
from __future__ import unicode_literals
import ldap
import logging
import django.db.models
from django.db import connections, router
from django.db.models import signals
import ldapdb # noqa
logger = logging.getLogger('ldapdb')
class Model(django.db.models.base.Model):
"""
Base class for all LDAP models.
"""
dn = django.db.models.fields.CharField(max_length=200, primary_key=True)
# meta-data
base_dn = None
search_scope = ldap.SCOPE_SUBTREE
object_classes = ['top']
def __init__(self, *args, **kwargs):
super(Model, self).__init__(*args, **kwargs)
self.saved_pk = self.pk
def build_rdn(self):
"""
Build the Relative Distinguished Name for this entry.
"""
bits = []
for field in self._meta.fields:
if field.db_column and field.primary_key:
bits.append("%s=%s" % (field.db_column,
getattr(self, field.name)))
if not len(bits):
raise Exception("Could not build Distinguished Name")
return '+'.join(bits)
def build_dn(self):
"""
Build the Distinguished Name for this entry.
"""
return "%s,%s" % (self.build_rdn(), self.base_dn)
raise Exception("Could not build Distinguished Name")
def delete(self, using=None):
"""
Delete this entry.
"""
using = using or router.db_for_write(self.__class__, instance=self)
connection = connections[using]
logger.debug("Deleting LDAP entry %s" % self.dn)
connection.delete_s(self.dn)
signals.post_delete.send(sender=self.__class__, instance=self)
def _save_table(self, raw=False, cls=None, force_insert=None, force_update=None, using=None, update_fields=None):
"""
Saves the current instance.
"""
# Connection aliasing
connection = connections[using]
create = bool(force_insert or not self.dn)
# Prepare fields
if update_fields:
target_fields = [
self._meta.get_field(name)
for name in update_fields
]
else:
target_fields = [
field
for field in cls._meta.get_fields(include_hidden=True)
if field.concrete and not field.primary_key
]
def get_field_value(field, instance):
python_value = getattr(instance, field.attname)
return field.get_db_prep_save(python_value, connection=connection)
if create:
old = None
else:
old = cls.objects.using(using).get(pk=self.saved_pk)
changes = {
field.db_column: (
None if old is None else get_field_value(field, old),
get_field_value(field, self),
)
for field in target_fields
}
# Actual saving
old_dn = self.dn
new_dn = self.build_dn()
updated = False
# Insertion
if create:
# FIXME(rbarrois): This should be handled through a hidden field.
hidden_values = [
('objectClass', [obj_class.encode('utf-8') for obj_class in self.object_classes])
]
new_values = hidden_values + [
(colname, change[1])
for colname, change in sorted(changes.items())
if change[1] is not None
]
new_dn = self.build_dn()
logger.debug("Creating new LDAP entry %s", new_dn)
connection.add_s(new_dn, new_values)
# Update
else:
modlist = []
for colname, change in sorted(changes.items()):
old_value, new_value = change
if old_value == new_value:
continue
modlist.append((
ldap.MOD_DELETE if new_value is None else ldap.MOD_REPLACE,
colname,
new_value,
))
if new_dn != old_dn:
logger.debug("renaming ldap entry %s to %s", old_dn, new_dn)
connection.rename_s(old_dn, self.build_rdn())
logger.debug("Modifying existing LDAP entry %s", new_dn)
connection.modify_s(new_dn, modlist)
updated = True
self.dn = new_dn
# Finishing
self.saved_pk = self.pk
return updated
@classmethod
def scoped(base_class, base_dn):
"""
Returns a copy of the current class with a different base_dn.
"""
class Meta:
proxy = True
verbose_name = base_class._meta.verbose_name
verbose_name_plural = base_class._meta.verbose_name_plural
import re
suffix = re.sub('[=,]', '_', base_dn)
name = "%s_%s" % (base_class.__name__, str(suffix))
new_class = type(str(name), (base_class,), {
'base_dn': base_dn, '__module__': base_class.__module__,
'Meta': Meta})
return new_class
class Meta:
abstract = True
| mit |
mosbasik/buzhug | javasrc/lib/Jython/Lib/test/test_gettext.py | 19 | 17886 | import os
import base64
import shutil
import gettext
import unittest
from test.test_support import run_suite
# TODO:
# - Add new tests, for example for "dgettext"
# - Remove dummy tests, for example testing for single and double quotes
# has no sense, it would have if we were testing a parser (i.e. pygettext)
# - Tests should have only one assert.
GNU_MO_DATA = '''\
3hIElQAAAAAGAAAAHAAAAEwAAAALAAAAfAAAAAAAAACoAAAAFQAAAKkAAAAjAAAAvwAAAKEAAADj
AAAABwAAAIUBAAALAAAAjQEAAEUBAACZAQAAFgAAAN8CAAAeAAAA9gIAAKEAAAAVAwAABQAAALcD
AAAJAAAAvQMAAAEAAAADAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAABQAAAAYAAAACAAAAAFJh
eW1vbmQgTHV4dXJ5IFlhY2gtdABUaGVyZSBpcyAlcyBmaWxlAFRoZXJlIGFyZSAlcyBmaWxlcwBU
aGlzIG1vZHVsZSBwcm92aWRlcyBpbnRlcm5hdGlvbmFsaXphdGlvbiBhbmQgbG9jYWxpemF0aW9u
CnN1cHBvcnQgZm9yIHlvdXIgUHl0aG9uIHByb2dyYW1zIGJ5IHByb3ZpZGluZyBhbiBpbnRlcmZh
Y2UgdG8gdGhlIEdOVQpnZXR0ZXh0IG1lc3NhZ2UgY2F0YWxvZyBsaWJyYXJ5LgBtdWxsdXNrAG51
ZGdlIG51ZGdlAFByb2plY3QtSWQtVmVyc2lvbjogMi4wClBPLVJldmlzaW9uLURhdGU6IDIwMDAt
MDgtMjkgMTI6MTktMDQ6MDAKTGFzdC1UcmFuc2xhdG9yOiBKLiBEYXZpZCBJYsOhw7FleiA8ai1k
YXZpZEBub29zLmZyPgpMYW5ndWFnZS1UZWFtOiBYWCA8cHl0aG9uLWRldkBweXRob24ub3JnPgpN
SU1FLVZlcnNpb246IDEuMApDb250ZW50LVR5cGU6IHRleHQvcGxhaW47IGNoYXJzZXQ9aXNvLTg4
NTktMQpDb250ZW50LVRyYW5zZmVyLUVuY29kaW5nOiBub25lCkdlbmVyYXRlZC1CeTogcHlnZXR0
ZXh0LnB5IDEuMQpQbHVyYWwtRm9ybXM6IG5wbHVyYWxzPTI7IHBsdXJhbD1uIT0xOwoAVGhyb2F0
d29iYmxlciBNYW5ncm92ZQBIYXkgJXMgZmljaGVybwBIYXkgJXMgZmljaGVyb3MAR3V2ZiB6YnFo
eXIgY2ViaXZxcmYgdmFncmVhbmd2YmFueXZtbmd2YmEgbmFxIHlicG55dm1uZ3ZiYQpmaGNjYmVn
IHNiZSBsYmhlIENsZ3ViYSBjZWJ0ZW56ZiBvbCBjZWJpdnF2YXQgbmEgdmFncmVzbnByIGdiIGd1
ciBUQUgKdHJnZ3JrZyB6cmZmbnRyIHBuZ255YnQgeXZvZW5lbC4AYmFjb24Ad2luayB3aW5rAA==
'''
UMO_DATA = '''\
3hIElQAAAAACAAAAHAAAACwAAAAFAAAAPAAAAAAAAABQAAAABAAAAFEAAAAPAQAAVgAAAAQAAABm
AQAAAQAAAAIAAAAAAAAAAAAAAAAAAAAAYWLDngBQcm9qZWN0LUlkLVZlcnNpb246IDIuMApQTy1S
ZXZpc2lvbi1EYXRlOiAyMDAzLTA0LTExIDEyOjQyLTA0MDAKTGFzdC1UcmFuc2xhdG9yOiBCYXJy
eSBBLiBXQXJzYXcgPGJhcnJ5QHB5dGhvbi5vcmc+Ckxhbmd1YWdlLVRlYW06IFhYIDxweXRob24t
ZGV2QHB5dGhvbi5vcmc+Ck1JTUUtVmVyc2lvbjogMS4wCkNvbnRlbnQtVHlwZTogdGV4dC9wbGFp
bjsgY2hhcnNldD11dGYtOApDb250ZW50LVRyYW5zZmVyLUVuY29kaW5nOiA3Yml0CkdlbmVyYXRl
ZC1CeTogbWFudWFsbHkKAMKkeXoA
'''
MMO_DATA = '''\
3hIElQAAAAABAAAAHAAAACQAAAADAAAALAAAAAAAAAA4AAAAeAEAADkAAAABAAAAAAAAAAAAAAAA
UHJvamVjdC1JZC1WZXJzaW9uOiBObyBQcm9qZWN0IDAuMApQT1QtQ3JlYXRpb24tRGF0ZTogV2Vk
IERlYyAxMSAwNzo0NDoxNSAyMDAyClBPLVJldmlzaW9uLURhdGU6IDIwMDItMDgtMTQgMDE6MTg6
NTgrMDA6MDAKTGFzdC1UcmFuc2xhdG9yOiBKb2huIERvZSA8amRvZUBleGFtcGxlLmNvbT4KSmFu
ZSBGb29iYXIgPGpmb29iYXJAZXhhbXBsZS5jb20+Ckxhbmd1YWdlLVRlYW06IHh4IDx4eEBleGFt
cGxlLmNvbT4KTUlNRS1WZXJzaW9uOiAxLjAKQ29udGVudC1UeXBlOiB0ZXh0L3BsYWluOyBjaGFy
c2V0PWlzby04ODU5LTE1CkNvbnRlbnQtVHJhbnNmZXItRW5jb2Rpbmc6IHF1b3RlZC1wcmludGFi
bGUKR2VuZXJhdGVkLUJ5OiBweWdldHRleHQucHkgMS4zCgA=
'''
LOCALEDIR = os.path.join('xx', 'LC_MESSAGES')
MOFILE = os.path.join(LOCALEDIR, 'gettext.mo')
UMOFILE = os.path.join(LOCALEDIR, 'ugettext.mo')
MMOFILE = os.path.join(LOCALEDIR, 'metadata.mo')
try:
LANG = os.environ['LANGUAGE']
except:
LANG = 'en'
class GettextBaseTest(unittest.TestCase):
def setUp(self):
if not os.path.isdir(LOCALEDIR):
os.makedirs(LOCALEDIR)
fp = open(MOFILE, 'wb')
fp.write(base64.decodestring(GNU_MO_DATA))
fp.close()
fp = open(UMOFILE, 'wb')
fp.write(base64.decodestring(UMO_DATA))
fp.close()
fp = open(MMOFILE, 'wb')
fp.write(base64.decodestring(MMO_DATA))
fp.close()
os.environ['LANGUAGE'] = 'xx'
def tearDown(self):
os.environ['LANGUAGE'] = LANG
shutil.rmtree(os.path.split(LOCALEDIR)[0])
class GettextTestCase1(GettextBaseTest):
def setUp(self):
GettextBaseTest.setUp(self)
self.localedir = os.curdir
self.mofile = MOFILE
gettext.install('gettext', self.localedir)
def test_some_translations(self):
eq = self.assertEqual
# test some translations
eq(_('albatross'), 'albatross')
eq(_(u'mullusk'), 'bacon')
eq(_(r'Raymond Luxury Yach-t'), 'Throatwobbler Mangrove')
eq(_(ur'nudge nudge'), 'wink wink')
def test_double_quotes(self):
eq = self.assertEqual
# double quotes
eq(_("albatross"), 'albatross')
eq(_(u"mullusk"), 'bacon')
eq(_(r"Raymond Luxury Yach-t"), 'Throatwobbler Mangrove')
eq(_(ur"nudge nudge"), 'wink wink')
def test_triple_single_quotes(self):
eq = self.assertEqual
# triple single quotes
eq(_('''albatross'''), 'albatross')
eq(_(u'''mullusk'''), 'bacon')
eq(_(r'''Raymond Luxury Yach-t'''), 'Throatwobbler Mangrove')
eq(_(ur'''nudge nudge'''), 'wink wink')
def test_triple_double_quotes(self):
eq = self.assertEqual
# triple double quotes
eq(_("""albatross"""), 'albatross')
eq(_(u"""mullusk"""), 'bacon')
eq(_(r"""Raymond Luxury Yach-t"""), 'Throatwobbler Mangrove')
eq(_(ur"""nudge nudge"""), 'wink wink')
def test_multiline_strings(self):
eq = self.assertEqual
# multiline strings
eq(_('''This module provides internationalization and localization
support for your Python programs by providing an interface to the GNU
gettext message catalog library.'''),
'''Guvf zbqhyr cebivqrf vagreangvbanyvmngvba naq ybpnyvmngvba
fhccbeg sbe lbhe Clguba cebtenzf ol cebivqvat na vagresnpr gb gur TAH
trggrkg zrffntr pngnybt yvoenel.''')
def test_the_alternative_interface(self):
eq = self.assertEqual
# test the alternative interface
fp = open(self.mofile, 'rb')
t = gettext.GNUTranslations(fp)
fp.close()
# Install the translation object
t.install()
eq(_('nudge nudge'), 'wink wink')
# Try unicode return type
t.install(unicode=True)
eq(_('mullusk'), 'bacon')
# Test installation of other methods
import __builtin__
t.install(unicode=True, names=["gettext", "lgettext"])
eq(_, t.ugettext)
eq(__builtin__.gettext, t.ugettext)
eq(lgettext, t.lgettext)
del __builtin__.gettext
del __builtin__.lgettext
class GettextTestCase2(GettextBaseTest):
def setUp(self):
GettextBaseTest.setUp(self)
self.localedir = os.curdir
# Set up the bindings
gettext.bindtextdomain('gettext', self.localedir)
gettext.textdomain('gettext')
# For convenience
self._ = gettext.gettext
def test_bindtextdomain(self):
self.assertEqual(gettext.bindtextdomain('gettext'), self.localedir)
def test_textdomain(self):
self.assertEqual(gettext.textdomain(), 'gettext')
def test_some_translations(self):
eq = self.assertEqual
# test some translations
eq(self._('albatross'), 'albatross')
eq(self._(u'mullusk'), 'bacon')
eq(self._(r'Raymond Luxury Yach-t'), 'Throatwobbler Mangrove')
eq(self._(ur'nudge nudge'), 'wink wink')
def test_double_quotes(self):
eq = self.assertEqual
# double quotes
eq(self._("albatross"), 'albatross')
eq(self._(u"mullusk"), 'bacon')
eq(self._(r"Raymond Luxury Yach-t"), 'Throatwobbler Mangrove')
eq(self._(ur"nudge nudge"), 'wink wink')
def test_triple_single_quotes(self):
eq = self.assertEqual
# triple single quotes
eq(self._('''albatross'''), 'albatross')
eq(self._(u'''mullusk'''), 'bacon')
eq(self._(r'''Raymond Luxury Yach-t'''), 'Throatwobbler Mangrove')
eq(self._(ur'''nudge nudge'''), 'wink wink')
def test_triple_double_quotes(self):
eq = self.assertEqual
# triple double quotes
eq(self._("""albatross"""), 'albatross')
eq(self._(u"""mullusk"""), 'bacon')
eq(self._(r"""Raymond Luxury Yach-t"""), 'Throatwobbler Mangrove')
eq(self._(ur"""nudge nudge"""), 'wink wink')
def test_multiline_strings(self):
eq = self.assertEqual
# multiline strings
eq(self._('''This module provides internationalization and localization
support for your Python programs by providing an interface to the GNU
gettext message catalog library.'''),
'''Guvf zbqhyr cebivqrf vagreangvbanyvmngvba naq ybpnyvmngvba
fhccbeg sbe lbhe Clguba cebtenzf ol cebivqvat na vagresnpr gb gur TAH
trggrkg zrffntr pngnybt yvoenel.''')
class PluralFormsTestCase(GettextBaseTest):
def setUp(self):
GettextBaseTest.setUp(self)
self.mofile = MOFILE
def test_plural_forms1(self):
eq = self.assertEqual
x = gettext.ngettext('There is %s file', 'There are %s files', 1)
eq(x, 'Hay %s fichero')
x = gettext.ngettext('There is %s file', 'There are %s files', 2)
eq(x, 'Hay %s ficheros')
def test_plural_forms2(self):
eq = self.assertEqual
fp = open(self.mofile, 'rb')
t = gettext.GNUTranslations(fp)
fp.close()
x = t.ngettext('There is %s file', 'There are %s files', 1)
eq(x, 'Hay %s fichero')
x = t.ngettext('There is %s file', 'There are %s files', 2)
eq(x, 'Hay %s ficheros')
def test_hu(self):
eq = self.assertEqual
f = gettext.c2py('0')
s = ''.join([ str(f(x)) for x in range(200) ])
eq(s, "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")
def test_de(self):
eq = self.assertEqual
f = gettext.c2py('n != 1')
s = ''.join([ str(f(x)) for x in range(200) ])
eq(s, "10111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111")
def test_fr(self):
eq = self.assertEqual
f = gettext.c2py('n>1')
s = ''.join([ str(f(x)) for x in range(200) ])
eq(s, "00111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111")
def test_gd(self):
eq = self.assertEqual
f = gettext.c2py('n==1 ? 0 : n==2 ? 1 : 2')
s = ''.join([ str(f(x)) for x in range(200) ])
eq(s, "20122222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222")
def test_gd2(self):
eq = self.assertEqual
# Tests the combination of parentheses and "?:"
f = gettext.c2py('n==1 ? 0 : (n==2 ? 1 : 2)')
s = ''.join([ str(f(x)) for x in range(200) ])
eq(s, "20122222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222")
def test_lt(self):
eq = self.assertEqual
f = gettext.c2py('n%10==1 && n%100!=11 ? 0 : n%10>=2 && (n%100<10 || n%100>=20) ? 1 : 2')
s = ''.join([ str(f(x)) for x in range(200) ])
eq(s, "20111111112222222222201111111120111111112011111111201111111120111111112011111111201111111120111111112011111111222222222220111111112011111111201111111120111111112011111111201111111120111111112011111111")
def test_ru(self):
eq = self.assertEqual
f = gettext.c2py('n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2')
s = ''.join([ str(f(x)) for x in range(200) ])
eq(s, "20111222222222222222201112222220111222222011122222201112222220111222222011122222201112222220111222222011122222222222222220111222222011122222201112222220111222222011122222201112222220111222222011122222")
def test_pl(self):
eq = self.assertEqual
f = gettext.c2py('n==1 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2')
s = ''.join([ str(f(x)) for x in range(200) ])
eq(s, "20111222222222222222221112222222111222222211122222221112222222111222222211122222221112222222111222222211122222222222222222111222222211122222221112222222111222222211122222221112222222111222222211122222")
def test_sl(self):
eq = self.assertEqual
f = gettext.c2py('n%100==1 ? 0 : n%100==2 ? 1 : n%100==3 || n%100==4 ? 2 : 3')
s = ''.join([ str(f(x)) for x in range(200) ])
eq(s, "30122333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333012233333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333")
def test_security(self):
raises = self.assertRaises
# Test for a dangerous expression
raises(ValueError, gettext.c2py, "os.chmod('/etc/passwd',0777)")
class UnicodeTranslationsTest(GettextBaseTest):
def setUp(self):
GettextBaseTest.setUp(self)
fp = open(UMOFILE, 'rb')
try:
self.t = gettext.GNUTranslations(fp)
finally:
fp.close()
self._ = self.t.ugettext
def test_unicode_msgid(self):
unless = self.failUnless
unless(isinstance(self._(''), unicode))
unless(isinstance(self._(u''), unicode))
def test_unicode_msgstr(self):
eq = self.assertEqual
eq(self._(u'ab\xde'), u'\xa4yz')
class WeirdMetadataTest(GettextBaseTest):
def setUp(self):
GettextBaseTest.setUp(self)
fp = open(MMOFILE, 'rb')
try:
try:
self.t = gettext.GNUTranslations(fp)
except:
self.tearDown()
raise
finally:
fp.close()
def test_weird_metadata(self):
info = self.t.info()
self.assertEqual(info['last-translator'],
'John Doe <jdoe@example.com>\nJane Foobar <jfoobar@example.com>')
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(GettextTestCase1))
suite.addTest(unittest.makeSuite(GettextTestCase2))
suite.addTest(unittest.makeSuite(PluralFormsTestCase))
suite.addTest(unittest.makeSuite(UnicodeTranslationsTest))
suite.addTest(unittest.makeSuite(WeirdMetadataTest))
return suite
def test_main():
run_suite(suite())
if __name__ == '__main__':
test_main()
# For reference, here's the .po file used to created the GNU_MO_DATA above.
#
# The original version was automatically generated from the sources with
# pygettext. Later it was manually modified to add plural forms support.
'''
# Dummy translation for the Python test_gettext.py module.
# Copyright (C) 2001 Python Software Foundation
# Barry Warsaw <barry@python.org>, 2000.
#
msgid ""
msgstr ""
"Project-Id-Version: 2.0\n"
"PO-Revision-Date: 2003-04-11 14:32-0400\n"
"Last-Translator: J. David Ibanez <j-david@noos.fr>\n"
"Language-Team: XX <python-dev@python.org>\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=iso-8859-1\n"
"Content-Transfer-Encoding: 8bit\n"
"Generated-By: pygettext.py 1.1\n"
"Plural-Forms: nplurals=2; plural=n!=1;\n"
#: test_gettext.py:19 test_gettext.py:25 test_gettext.py:31 test_gettext.py:37
#: test_gettext.py:51 test_gettext.py:80 test_gettext.py:86 test_gettext.py:92
#: test_gettext.py:98
msgid "nudge nudge"
msgstr "wink wink"
#: test_gettext.py:16 test_gettext.py:22 test_gettext.py:28 test_gettext.py:34
#: test_gettext.py:77 test_gettext.py:83 test_gettext.py:89 test_gettext.py:95
msgid "albatross"
msgstr ""
#: test_gettext.py:18 test_gettext.py:24 test_gettext.py:30 test_gettext.py:36
#: test_gettext.py:79 test_gettext.py:85 test_gettext.py:91 test_gettext.py:97
msgid "Raymond Luxury Yach-t"
msgstr "Throatwobbler Mangrove"
#: test_gettext.py:17 test_gettext.py:23 test_gettext.py:29 test_gettext.py:35
#: test_gettext.py:56 test_gettext.py:78 test_gettext.py:84 test_gettext.py:90
#: test_gettext.py:96
msgid "mullusk"
msgstr "bacon"
#: test_gettext.py:40 test_gettext.py:101
msgid ""
"This module provides internationalization and localization\n"
"support for your Python programs by providing an interface to the GNU\n"
"gettext message catalog library."
msgstr ""
"Guvf zbqhyr cebivqrf vagreangvbanyvmngvba naq ybpnyvmngvba\n"
"fhccbeg sbe lbhe Clguba cebtenzf ol cebivqvat na vagresnpr gb gur TAH\n"
"trggrkg zrffntr pngnybt yvoenel."
# Manually added, as neither pygettext nor xgettext support plural forms
# in Python.
msgid "There is %s file"
msgid_plural "There are %s files"
msgstr[0] "Hay %s fichero"
msgstr[1] "Hay %s ficheros"
'''
# Here's the second example po file example, used to generate the UMO_DATA
# containing utf-8 encoded Unicode strings
'''
# Dummy translation for the Python test_gettext.py module.
# Copyright (C) 2001 Python Software Foundation
# Barry Warsaw <barry@python.org>, 2000.
#
msgid ""
msgstr ""
"Project-Id-Version: 2.0\n"
"PO-Revision-Date: 2003-04-11 12:42-0400\n"
"Last-Translator: Barry A. WArsaw <barry@python.org>\n"
"Language-Team: XX <python-dev@python.org>\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=utf-8\n"
"Content-Transfer-Encoding: 7bit\n"
"Generated-By: manually\n"
#: nofile:0
msgid "ab\xc3\x9e"
msgstr "\xc2\xa4yz"
'''
# Here's the third example po file, used to generate MMO_DATA
'''
msgid ""
msgstr ""
"Project-Id-Version: No Project 0.0\n"
"POT-Creation-Date: Wed Dec 11 07:44:15 2002\n"
"PO-Revision-Date: 2002-08-14 01:18:58+00:00\n"
"Last-Translator: John Doe <jdoe@example.com>\n"
"Jane Foobar <jfoobar@example.com>\n"
"Language-Team: xx <xx@example.com>\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=iso-8859-15\n"
"Content-Transfer-Encoding: quoted-printable\n"
"Generated-By: pygettext.py 1.3\n"
'''
| bsd-3-clause |
jhawkesworth/ansible | lib/ansible/modules/cloud/google/gcp_compute_https_health_check.py | 15 | 13242 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Google
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# ----------------------------------------------------------------------------
#
# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
#
# ----------------------------------------------------------------------------
#
# This file is automatically generated by Magic Modules and manual
# changes will be clobbered when the file is regenerated.
#
# Please read more about how to change this file at
# https://www.github.com/GoogleCloudPlatform/magic-modules
#
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
__metaclass__ = type
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_compute_https_health_check
description:
- An HttpsHealthCheck resource. This resource defines a template for how individual
VMs should be checked for health, via HTTPS.
short_description: Creates a GCP HttpsHealthCheck
version_added: 2.6
author: Google Inc. (@googlecloudplatform)
requirements:
- python >= 2.6
- requests >= 2.18.4
- google-auth >= 1.3.0
options:
state:
description:
- Whether the given object should exist in GCP
choices:
- present
- absent
default: present
check_interval_sec:
description:
- How often (in seconds) to send a health check. The default value is 5 seconds.
required: false
description:
description:
- An optional description of this resource. Provide this property when you create
the resource.
required: false
healthy_threshold:
description:
- A so-far unhealthy instance will be marked healthy after this many consecutive
successes. The default value is 2.
required: false
host:
description:
- The value of the host header in the HTTPS health check request. If left empty
(default value), the public IP on behalf of which this health check is performed
will be used.
required: false
name:
description:
- Name of the resource. Provided by the client when the resource is created. The
name must be 1-63 characters long, and comply with RFC1035. Specifically, the
name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`
which means the first character must be a lowercase letter, and all following
characters must be a dash, lowercase letter, or digit, except the last character,
which cannot be a dash.
required: true
port:
description:
- The TCP port number for the HTTPS health check request.
- The default value is 80.
required: false
request_path:
description:
- The request path of the HTTPS health check request.
- The default value is /.
required: false
timeout_sec:
description:
- How long (in seconds) to wait before claiming failure.
- The default value is 5 seconds. It is invalid for timeoutSec to have greater
value than checkIntervalSec.
required: false
aliases:
- timeout_seconds
unhealthy_threshold:
description:
- A so-far healthy instance will be marked unhealthy after this many consecutive
failures. The default value is 2.
required: false
extends_documentation_fragment: gcp
notes:
- 'API Reference: U(https://cloud.google.com/compute/docs/reference/v1/httpsHealthChecks)'
- 'Adding Health Checks: U(https://cloud.google.com/compute/docs/load-balancing/health-checks#legacy_health_checks)'
'''
EXAMPLES = '''
- name: create a https health check
gcp_compute_https_health_check:
name: test_object
healthy_threshold: 10
port: 8080
timeout_sec: 2
unhealthy_threshold: 5
project: test_project
auth_kind: serviceaccount
service_account_file: "/tmp/auth.pem"
state: present
'''
RETURN = '''
checkIntervalSec:
description:
- How often (in seconds) to send a health check. The default value is 5 seconds.
returned: success
type: int
creationTimestamp:
description:
- Creation timestamp in RFC3339 text format.
returned: success
type: str
description:
description:
- An optional description of this resource. Provide this property when you create
the resource.
returned: success
type: str
healthyThreshold:
description:
- A so-far unhealthy instance will be marked healthy after this many consecutive
successes. The default value is 2.
returned: success
type: int
host:
description:
- The value of the host header in the HTTPS health check request. If left empty
(default value), the public IP on behalf of which this health check is performed
will be used.
returned: success
type: str
id:
description:
- The unique identifier for the resource. This identifier is defined by the server.
returned: success
type: int
name:
description:
- Name of the resource. Provided by the client when the resource is created. The
name must be 1-63 characters long, and comply with RFC1035. Specifically, the
name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`
which means the first character must be a lowercase letter, and all following
characters must be a dash, lowercase letter, or digit, except the last character,
which cannot be a dash.
returned: success
type: str
port:
description:
- The TCP port number for the HTTPS health check request.
- The default value is 80.
returned: success
type: int
requestPath:
description:
- The request path of the HTTPS health check request.
- The default value is /.
returned: success
type: str
timeoutSec:
description:
- How long (in seconds) to wait before claiming failure.
- The default value is 5 seconds. It is invalid for timeoutSec to have greater value
than checkIntervalSec.
returned: success
type: int
unhealthyThreshold:
description:
- A so-far healthy instance will be marked unhealthy after this many consecutive
failures. The default value is 2.
returned: success
type: int
'''
################################################################################
# Imports
################################################################################
from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, replace_resource_dict
import json
import time
################################################################################
# Main
################################################################################
def main():
"""Main function"""
module = GcpModule(
argument_spec=dict(
state=dict(default='present', choices=['present', 'absent'], type='str'),
check_interval_sec=dict(type='int'),
description=dict(type='str'),
healthy_threshold=dict(type='int'),
host=dict(type='str'),
name=dict(required=True, type='str'),
port=dict(type='int'),
request_path=dict(type='str'),
timeout_sec=dict(type='int', aliases=['timeout_seconds']),
unhealthy_threshold=dict(type='int'),
)
)
if not module.params['scopes']:
module.params['scopes'] = ['https://www.googleapis.com/auth/compute']
state = module.params['state']
kind = 'compute#httpsHealthCheck'
fetch = fetch_resource(module, self_link(module), kind)
changed = False
if fetch:
if state == 'present':
if is_different(module, fetch):
update(module, self_link(module), kind)
fetch = fetch_resource(module, self_link(module), kind)
changed = True
else:
delete(module, self_link(module), kind)
fetch = {}
changed = True
else:
if state == 'present':
fetch = create(module, collection(module), kind)
changed = True
else:
fetch = {}
fetch.update({'changed': changed})
module.exit_json(**fetch)
def create(module, link, kind):
auth = GcpSession(module, 'compute')
return wait_for_operation(module, auth.post(link, resource_to_request(module)))
def update(module, link, kind):
auth = GcpSession(module, 'compute')
return wait_for_operation(module, auth.put(link, resource_to_request(module)))
def delete(module, link, kind):
auth = GcpSession(module, 'compute')
return wait_for_operation(module, auth.delete(link))
def resource_to_request(module):
request = {
u'kind': 'compute#httpsHealthCheck',
u'checkIntervalSec': module.params.get('check_interval_sec'),
u'description': module.params.get('description'),
u'healthyThreshold': module.params.get('healthy_threshold'),
u'host': module.params.get('host'),
u'name': module.params.get('name'),
u'port': module.params.get('port'),
u'requestPath': module.params.get('request_path'),
u'timeoutSec': module.params.get('timeout_sec'),
u'unhealthyThreshold': module.params.get('unhealthy_threshold'),
}
return_vals = {}
for k, v in request.items():
if v or v is False:
return_vals[k] = v
return return_vals
def fetch_resource(module, link, kind, allow_not_found=True):
auth = GcpSession(module, 'compute')
return return_if_object(module, auth.get(link), kind, allow_not_found)
def self_link(module):
return "https://www.googleapis.com/compute/v1/projects/{project}/global/httpsHealthChecks/{name}".format(**module.params)
def collection(module):
return "https://www.googleapis.com/compute/v1/projects/{project}/global/httpsHealthChecks".format(**module.params)
def return_if_object(module, response, kind, allow_not_found=False):
# If not found, return nothing.
if allow_not_found and response.status_code == 404:
return None
# If no content, return nothing.
if response.status_code == 204:
return None
try:
module.raise_for_status(response)
result = response.json()
except getattr(json.decoder, 'JSONDecodeError', ValueError):
module.fail_json(msg="Invalid JSON response with error: %s" % response.text)
if navigate_hash(result, ['error', 'errors']):
module.fail_json(msg=navigate_hash(result, ['error', 'errors']))
return result
def is_different(module, response):
request = resource_to_request(module)
response = response_to_hash(module, response)
# Remove all output-only from response.
response_vals = {}
for k, v in response.items():
if k in request:
response_vals[k] = v
request_vals = {}
for k, v in request.items():
if k in response:
request_vals[k] = v
return GcpRequest(request_vals) != GcpRequest(response_vals)
# Remove unnecessary properties from the response.
# This is for doing comparisons with Ansible's current parameters.
def response_to_hash(module, response):
return {
u'checkIntervalSec': response.get(u'checkIntervalSec'),
u'creationTimestamp': response.get(u'creationTimestamp'),
u'description': response.get(u'description'),
u'healthyThreshold': response.get(u'healthyThreshold'),
u'host': response.get(u'host'),
u'id': response.get(u'id'),
u'name': module.params.get('name'),
u'port': response.get(u'port'),
u'requestPath': response.get(u'requestPath'),
u'timeoutSec': response.get(u'timeoutSec'),
u'unhealthyThreshold': response.get(u'unhealthyThreshold'),
}
def async_op_url(module, extra_data=None):
if extra_data is None:
extra_data = {}
url = "https://www.googleapis.com/compute/v1/projects/{project}/global/operations/{op_id}"
combined = extra_data.copy()
combined.update(module.params)
return url.format(**combined)
def wait_for_operation(module, response):
op_result = return_if_object(module, response, 'compute#operation')
if op_result is None:
return {}
status = navigate_hash(op_result, ['status'])
wait_done = wait_for_completion(status, op_result, module)
return fetch_resource(module, navigate_hash(wait_done, ['targetLink']), 'compute#httpsHealthCheck')
def wait_for_completion(status, op_result, module):
op_id = navigate_hash(op_result, ['name'])
op_uri = async_op_url(module, {'op_id': op_id})
while status != 'DONE':
raise_if_errors(op_result, ['error', 'errors'], module)
time.sleep(1.0)
op_result = fetch_resource(module, op_uri, 'compute#operation', False)
status = navigate_hash(op_result, ['status'])
return op_result
def raise_if_errors(response, err_path, module):
errors = navigate_hash(response, err_path)
if errors is not None:
module.fail_json(msg=errors)
if __name__ == '__main__':
main()
| gpl-3.0 |
shastikk/youtube-dl | youtube_dl/extractor/addanime.py | 117 | 3269 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_HTTPError,
compat_str,
compat_urllib_parse,
compat_urllib_parse_urlparse,
)
from ..utils import (
ExtractorError,
qualities,
)
class AddAnimeIE(InfoExtractor):
_VALID_URL = r'http://(?:\w+\.)?add-anime\.net/(?:watch_video\.php\?(?:.*?)v=|video/)(?P<id>[\w_]+)'
_TESTS = [{
'url': 'http://www.add-anime.net/watch_video.php?v=24MR3YO5SAS9',
'md5': '72954ea10bc979ab5e2eb288b21425a0',
'info_dict': {
'id': '24MR3YO5SAS9',
'ext': 'mp4',
'description': 'One Piece 606',
'title': 'One Piece 606',
}
}, {
'url': 'http://add-anime.net/video/MDUGWYKNGBD8/One-Piece-687',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
try:
webpage = self._download_webpage(url, video_id)
except ExtractorError as ee:
if not isinstance(ee.cause, compat_HTTPError) or \
ee.cause.code != 503:
raise
redir_webpage = ee.cause.read().decode('utf-8')
action = self._search_regex(
r'<form id="challenge-form" action="([^"]+)"',
redir_webpage, 'Redirect form')
vc = self._search_regex(
r'<input type="hidden" name="jschl_vc" value="([^"]+)"/>',
redir_webpage, 'redirect vc value')
av = re.search(
r'a\.value = ([0-9]+)[+]([0-9]+)[*]([0-9]+);',
redir_webpage)
if av is None:
raise ExtractorError('Cannot find redirect math task')
av_res = int(av.group(1)) + int(av.group(2)) * int(av.group(3))
parsed_url = compat_urllib_parse_urlparse(url)
av_val = av_res + len(parsed_url.netloc)
confirm_url = (
parsed_url.scheme + '://' + parsed_url.netloc +
action + '?' +
compat_urllib_parse.urlencode({
'jschl_vc': vc, 'jschl_answer': compat_str(av_val)}))
self._download_webpage(
confirm_url, video_id,
note='Confirming after redirect')
webpage = self._download_webpage(url, video_id)
FORMATS = ('normal', 'hq')
quality = qualities(FORMATS)
formats = []
for format_id in FORMATS:
rex = r"var %s_video_file = '(.*?)';" % re.escape(format_id)
video_url = self._search_regex(rex, webpage, 'video file URLx',
fatal=False)
if not video_url:
continue
formats.append({
'format_id': format_id,
'url': video_url,
'quality': quality(format_id),
})
self._sort_formats(formats)
video_title = self._og_search_title(webpage)
video_description = self._og_search_description(webpage)
return {
'_type': 'video',
'id': video_id,
'formats': formats,
'title': video_title,
'description': video_description
}
| unlicense |
normanmaurer/autobahntestsuite-maven-plugin | src/main/resources/twisted/web/template.py | 44 | 17302 | # -*- test-case-name: twisted.web.test.test_template -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
HTML rendering for twisted.web.
@var VALID_HTML_TAG_NAMES: A list of recognized HTML tag names, used by the
L{tag} object.
@var TEMPLATE_NAMESPACE: The XML namespace used to identify attributes and
elements used by the templating system, which should be removed from the
final output document.
@var tags: A convenience object which can produce L{Tag} objects on demand via
attribute access. For example: C{tags.div} is equivalent to C{Tag("div")}.
Tags not specified in L{VALID_HTML_TAG_NAMES} will result in an
L{AttributeError}.
"""
__all__ = [
'TEMPLATE_NAMESPACE', 'VALID_HTML_TAG_NAMES', 'Element', 'TagLoader',
'XMLString', 'XMLFile', 'renderer', 'flatten', 'flattenString', 'tags',
'Comment', 'CDATA', 'Tag', 'slot', 'CharRef', 'renderElement'
]
import warnings
from zope.interface import implements
from cStringIO import StringIO
from xml.sax import make_parser, handler
from twisted.web._stan import Tag, slot, Comment, CDATA, CharRef
from twisted.python.filepath import FilePath
TEMPLATE_NAMESPACE = 'http://twistedmatrix.com/ns/twisted.web.template/0.1'
from twisted.web.iweb import ITemplateLoader
from twisted.python import log
# Go read the definition of NOT_DONE_YET. For lulz. This is totally
# equivalent. And this turns out to be necessary, because trying to import
# NOT_DONE_YET in this module causes a circular import which we cannot escape
# from. From which we cannot escape. Etc. glyph is okay with this solution for
# now, and so am I, as long as this comment stays to explain to future
# maintainers what it means. ~ C.
#
# See http://twistedmatrix.com/trac/ticket/5557 for progress on fixing this.
NOT_DONE_YET = 1
class _NSContext(object):
"""
A mapping from XML namespaces onto their prefixes in the document.
"""
def __init__(self, parent=None):
"""
Pull out the parent's namespaces, if there's no parent then default to
XML.
"""
self.parent = parent
if parent is not None:
self.nss = dict(parent.nss)
else:
self.nss = {'http://www.w3.org/XML/1998/namespace':'xml'}
def get(self, k, d=None):
"""
Get a prefix for a namespace.
@param d: The default prefix value.
"""
return self.nss.get(k, d)
def __setitem__(self, k, v):
"""
Proxy through to setting the prefix for the namespace.
"""
self.nss.__setitem__(k, v)
def __getitem__(self, k):
"""
Proxy through to getting the prefix for the namespace.
"""
return self.nss.__getitem__(k)
class _ToStan(handler.ContentHandler, handler.EntityResolver):
"""
A SAX parser which converts an XML document to the Twisted STAN
Document Object Model.
"""
def __init__(self, sourceFilename):
"""
@param sourceFilename: the filename to load the XML out of.
"""
self.sourceFilename = sourceFilename
self.prefixMap = _NSContext()
self.inCDATA = False
def setDocumentLocator(self, locator):
"""
Set the document locator, which knows about line and character numbers.
"""
self.locator = locator
def startDocument(self):
"""
Initialise the document.
"""
self.document = []
self.current = self.document
self.stack = []
self.xmlnsAttrs = []
def endDocument(self):
"""
Document ended.
"""
def processingInstruction(self, target, data):
"""
Processing instructions are ignored.
"""
def startPrefixMapping(self, prefix, uri):
"""
Set up the prefix mapping, which maps fully qualified namespace URIs
onto namespace prefixes.
This gets called before startElementNS whenever an C{xmlns} attribute
is seen.
"""
self.prefixMap = _NSContext(self.prefixMap)
self.prefixMap[uri] = prefix
# Ignore the template namespace; we'll replace those during parsing.
if uri == TEMPLATE_NAMESPACE:
return
# Add to a list that will be applied once we have the element.
if prefix is None:
self.xmlnsAttrs.append(('xmlns',uri))
else:
self.xmlnsAttrs.append(('xmlns:%s'%prefix,uri))
def endPrefixMapping(self, prefix):
"""
"Pops the stack" on the prefix mapping.
Gets called after endElementNS.
"""
self.prefixMap = self.prefixMap.parent
def startElementNS(self, namespaceAndName, qname, attrs):
"""
Gets called when we encounter a new xmlns attribute.
@param namespaceAndName: a (namespace, name) tuple, where name
determines which type of action to take, if the namespace matches
L{TEMPLATE_NAMESPACE}.
@param qname: ignored.
@param attrs: attributes on the element being started.
"""
filename = self.sourceFilename
lineNumber = self.locator.getLineNumber()
columnNumber = self.locator.getColumnNumber()
ns, name = namespaceAndName
if ns == TEMPLATE_NAMESPACE:
if name == 'transparent':
name = ''
elif name == 'slot':
try:
# Try to get the default value for the slot
default = attrs[(None, 'default')]
except KeyError:
# If there wasn't one, then use None to indicate no
# default.
default = None
el = slot(
attrs[(None, 'name')], default=default,
filename=filename, lineNumber=lineNumber,
columnNumber=columnNumber)
self.stack.append(el)
self.current.append(el)
self.current = el.children
return
render = None
attrs = dict(attrs)
for k, v in attrs.items():
attrNS, justTheName = k
if attrNS != TEMPLATE_NAMESPACE:
continue
if justTheName == 'render':
render = v
del attrs[k]
# nonTemplateAttrs is a dictionary mapping attributes that are *not* in
# TEMPLATE_NAMESPACE to their values. Those in TEMPLATE_NAMESPACE were
# just removed from 'attrs' in the loop immediately above. The key in
# nonTemplateAttrs is either simply the attribute name (if it was not
# specified as having a namespace in the template) or prefix:name,
# preserving the xml namespace prefix given in the document.
nonTemplateAttrs = {}
for (attrNs, attrName), v in attrs.items():
nsPrefix = self.prefixMap.get(attrNs)
if nsPrefix is None:
attrKey = attrName
else:
attrKey = '%s:%s' % (nsPrefix, attrName)
nonTemplateAttrs[attrKey] = v
if ns == TEMPLATE_NAMESPACE and name == 'attr':
if not self.stack:
# TODO: define a better exception for this?
raise AssertionError(
'<{%s}attr> as top-level element' % (TEMPLATE_NAMESPACE,))
if 'name' not in nonTemplateAttrs:
# TODO: same here
raise AssertionError(
'<{%s}attr> requires a name attribute' % (TEMPLATE_NAMESPACE,))
el = Tag('', render=render, filename=filename,
lineNumber=lineNumber, columnNumber=columnNumber)
self.stack[-1].attributes[nonTemplateAttrs['name']] = el
self.stack.append(el)
self.current = el.children
return
# Apply any xmlns attributes
if self.xmlnsAttrs:
nonTemplateAttrs.update(dict(self.xmlnsAttrs))
self.xmlnsAttrs = []
# Add the prefix that was used in the parsed template for non-template
# namespaces (which will not be consumed anyway).
if ns != TEMPLATE_NAMESPACE and ns is not None:
prefix = self.prefixMap[ns]
if prefix is not None:
name = '%s:%s' % (self.prefixMap[ns],name)
el = Tag(
name, attributes=dict(nonTemplateAttrs), render=render,
filename=filename, lineNumber=lineNumber,
columnNumber=columnNumber)
self.stack.append(el)
self.current.append(el)
self.current = el.children
def characters(self, ch):
"""
Called when we receive some characters. CDATA characters get passed
through as is.
@type ch: C{string}
"""
if self.inCDATA:
self.stack[-1].append(ch)
return
self.current.append(ch)
def endElementNS(self, name, qname):
"""
A namespace tag is closed. Pop the stack, if there's anything left in
it, otherwise return to the document's namespace.
"""
self.stack.pop()
if self.stack:
self.current = self.stack[-1].children
else:
self.current = self.document
def startDTD(self, name, publicId, systemId):
"""
DTDs are ignored.
"""
def endDTD(self, *args):
"""
DTDs are ignored.
"""
def startCDATA(self):
"""
We're starting to be in a CDATA element, make a note of this.
"""
self.inCDATA = True
self.stack.append([])
def endCDATA(self):
"""
We're no longer in a CDATA element. Collect up the characters we've
parsed and put them in a new CDATA object.
"""
self.inCDATA = False
comment = ''.join(self.stack.pop())
self.current.append(CDATA(comment))
def comment(self, content):
"""
Add an XML comment which we've encountered.
"""
self.current.append(Comment(content))
def _flatsaxParse(fl):
"""
Perform a SAX parse of an XML document with the _ToStan class.
@param fl: The XML document to be parsed.
@type fl: A file object or filename.
@return: a C{list} of Stan objects.
"""
parser = make_parser()
parser.setFeature(handler.feature_validation, 0)
parser.setFeature(handler.feature_namespaces, 1)
parser.setFeature(handler.feature_external_ges, 0)
parser.setFeature(handler.feature_external_pes, 0)
s = _ToStan(getattr(fl, "name", None))
parser.setContentHandler(s)
parser.setEntityResolver(s)
parser.setProperty(handler.property_lexical_handler, s)
parser.parse(fl)
return s.document
class TagLoader(object):
"""
An L{ITemplateLoader} that loads existing L{IRenderable} providers.
@ivar tag: The object which will be loaded.
@type tag: An L{IRenderable} provider.
"""
implements(ITemplateLoader)
def __init__(self, tag):
"""
@param tag: The object which will be loaded.
@type tag: An L{IRenderable} provider.
"""
self.tag = tag
def load(self):
return [self.tag]
class XMLString(object):
"""
An L{ITemplateLoader} that loads and parses XML from a string.
@ivar _loadedTemplate: The loaded document.
@type _loadedTemplate: a C{list} of Stan objects.
"""
implements(ITemplateLoader)
def __init__(self, s):
"""
Run the parser on a StringIO copy of the string.
@param s: The string from which to load the XML.
@type s: C{str}
"""
self._loadedTemplate = _flatsaxParse(StringIO(s))
def load(self):
"""
Return the document.
@return: the loaded document.
@rtype: a C{list} of Stan objects.
"""
return self._loadedTemplate
class XMLFile(object):
"""
An L{ITemplateLoader} that loads and parses XML from a file.
@ivar _loadedTemplate: The loaded document, or C{None}, if not loaded.
@type _loadedTemplate: a C{list} of Stan objects, or C{None}.
@ivar _path: The L{FilePath}, file object, or filename that is being
loaded from.
"""
implements(ITemplateLoader)
def __init__(self, path):
"""
Run the parser on a file.
@param path: The file from which to load the XML.
@type path: L{FilePath}
"""
if not isinstance(path, FilePath):
warnings.warn(
"Passing filenames or file objects to XMLFile is deprecated "
"since Twisted 12.1. Pass a FilePath instead.",
category=DeprecationWarning, stacklevel=2)
self._loadedTemplate = None
self._path = path
def _loadDoc(self):
"""
Read and parse the XML.
@return: the loaded document.
@rtype: a C{list} of Stan objects.
"""
if not isinstance(self._path, FilePath):
return _flatsaxParse(self._path)
else:
f = self._path.open('r')
try:
return _flatsaxParse(f)
finally:
f.close()
def __repr__(self):
return '<XMLFile of %r>' % (self._path,)
def load(self):
"""
Return the document, first loading it if necessary.
@return: the loaded document.
@rtype: a C{list} of Stan objects.
"""
if self._loadedTemplate is None:
self._loadedTemplate = self._loadDoc()
return self._loadedTemplate
# Last updated October 2011, using W3Schools as a reference. Link:
# http://www.w3schools.com/html5/html5_reference.asp
# Note that <xmp> is explicitly omitted; its semantics do not work with
# t.w.template and it is officially deprecated.
VALID_HTML_TAG_NAMES = set([
'a', 'abbr', 'acronym', 'address', 'applet', 'area', 'article', 'aside',
'audio', 'b', 'base', 'basefont', 'bdi', 'bdo', 'big', 'blockquote',
'body', 'br', 'button', 'canvas', 'caption', 'center', 'cite', 'code',
'col', 'colgroup', 'command', 'datalist', 'dd', 'del', 'details', 'dfn',
'dir', 'div', 'dl', 'dt', 'em', 'embed', 'fieldset', 'figcaption',
'figure', 'font', 'footer', 'form', 'frame', 'frameset', 'h1', 'h2', 'h3',
'h4', 'h5', 'h6', 'head', 'header', 'hgroup', 'hr', 'html', 'i', 'iframe',
'img', 'input', 'ins', 'isindex', 'keygen', 'kbd', 'label', 'legend',
'li', 'link', 'map', 'mark', 'menu', 'meta', 'meter', 'nav', 'noframes',
'noscript', 'object', 'ol', 'optgroup', 'option', 'output', 'p', 'param',
'pre', 'progress', 'q', 'rp', 'rt', 'ruby', 's', 'samp', 'script',
'section', 'select', 'small', 'source', 'span', 'strike', 'strong',
'style', 'sub', 'summary', 'sup', 'table', 'tbody', 'td', 'textarea',
'tfoot', 'th', 'thead', 'time', 'title', 'tr', 'tt', 'u', 'ul', 'var',
'video', 'wbr',
])
class _TagFactory(object):
"""
A factory for L{Tag} objects; the implementation of the L{tags} object.
This allows for the syntactic convenience of C{from twisted.web.html import
tags; tags.a(href="linked-page.html")}, where 'a' can be basically any HTML
tag.
The class is not exposed publicly because you only ever need one of these,
and we already made it for you.
@see: L{tags}
"""
def __getattr__(self, tagName):
if tagName == 'transparent':
return Tag('')
# allow for E.del as E.del_
tagName = tagName.rstrip('_')
if tagName not in VALID_HTML_TAG_NAMES:
raise AttributeError('unknown tag %r' % (tagName,))
return Tag(tagName)
tags = _TagFactory()
def renderElement(request, element,
doctype='<!DOCTYPE html>', _failElement=None):
"""
Render an element or other C{IRenderable}.
@param request: The C{Request} being rendered to.
@param element: An C{IRenderable} which will be rendered.
@param doctype: A C{str} which will be written as the first line of
the request, or C{None} to disable writing of a doctype. The C{string}
should not include a trailing newline and will default to the HTML5
doctype C{'<!DOCTYPE html>'}.
@returns: NOT_DONE_YET
@since: 12.1
"""
if doctype is not None:
request.write(doctype)
request.write('\n')
if _failElement is None:
_failElement = twisted.web.util.FailureElement
d = flatten(request, element, request.write)
def eb(failure):
log.err(failure, "An error occurred while rendering the response.")
if request.site.displayTracebacks:
return flatten(request, _failElement(failure), request.write)
else:
request.write(
('<div style="font-size:800%;'
'background-color:#FFF;'
'color:#F00'
'">An error occurred while rendering the response.</div>'))
d.addErrback(eb)
d.addBoth(lambda _: request.finish())
return NOT_DONE_YET
from twisted.web._element import Element, renderer
from twisted.web._flatten import flatten, flattenString
import twisted.web.util
| apache-2.0 |
mytliulei/boundless | docker/dockerfile/xfdsend/Dscapy/layers/mgcp.py | 4 | 1592 | ## This file is part of Scapy
## See http://www.secdev.org/projects/scapy for more informations
## Copyright (C) Philippe Biondi <phil@secdev.org>
## This program is published under a GPLv2 license
from scapy.packet import *
from scapy.fields import *
from scapy.layers.inet import UDP
class MGCP(Packet):
name = "MGCP"
longname = "Media Gateway Control Protocol"
fields_desc = [ StrStopField("verb","AUEP"," ", -1),
StrFixedLenField("sep1"," ",1),
StrStopField("transaction_id","1234567"," ", -1),
StrFixedLenField("sep2"," ",1),
StrStopField("endpoint","dummy@dummy.net"," ", -1),
StrFixedLenField("sep3"," ",1),
StrStopField("version","MGCP 1.0 NCS 1.0","\x0a", -1),
StrFixedLenField("sep4","\x0a",1),
]
#class MGCP(Packet):
# name = "MGCP"
# longname = "Media Gateway Control Protocol"
# fields_desc = [ ByteEnumField("type",0, ["request","response","others"]),
# ByteField("code0",0),
# ByteField("code1",0),
# ByteField("code2",0),
# ByteField("code3",0),
# ByteField("code4",0),
# IntField("trasid",0),
# IntField("req_time",0),
# ByteField("is_duplicate",0),
# ByteField("req_available",0) ]
#
bind_layers( UDP, MGCP, dport=2727)
bind_layers( UDP, MGCP, sport=2727)
| apache-2.0 |
airr-community/airr-formats | lang/python/versioneer.py | 386 | 68611 |
# Version: 0.18
"""The Versioneer - like a rocketeer, but for versions.
The Versioneer
==============
* like a rocketeer, but for versions!
* https://github.com/warner/python-versioneer
* Brian Warner
* License: Public Domain
* Compatible With: python2.6, 2.7, 3.2, 3.3, 3.4, 3.5, 3.6, and pypy
* [![Latest Version]
(https://pypip.in/version/versioneer/badge.svg?style=flat)
](https://pypi.python.org/pypi/versioneer/)
* [![Build Status]
(https://travis-ci.org/warner/python-versioneer.png?branch=master)
](https://travis-ci.org/warner/python-versioneer)
This is a tool for managing a recorded version number in distutils-based
python projects. The goal is to remove the tedious and error-prone "update
the embedded version string" step from your release process. Making a new
release should be as easy as recording a new tag in your version-control
system, and maybe making new tarballs.
## Quick Install
* `pip install versioneer` to somewhere to your $PATH
* add a `[versioneer]` section to your setup.cfg (see below)
* run `versioneer install` in your source tree, commit the results
## Version Identifiers
Source trees come from a variety of places:
* a version-control system checkout (mostly used by developers)
* a nightly tarball, produced by build automation
* a snapshot tarball, produced by a web-based VCS browser, like github's
"tarball from tag" feature
* a release tarball, produced by "setup.py sdist", distributed through PyPI
Within each source tree, the version identifier (either a string or a number,
this tool is format-agnostic) can come from a variety of places:
* ask the VCS tool itself, e.g. "git describe" (for checkouts), which knows
about recent "tags" and an absolute revision-id
* the name of the directory into which the tarball was unpacked
* an expanded VCS keyword ($Id$, etc)
* a `_version.py` created by some earlier build step
For released software, the version identifier is closely related to a VCS
tag. Some projects use tag names that include more than just the version
string (e.g. "myproject-1.2" instead of just "1.2"), in which case the tool
needs to strip the tag prefix to extract the version identifier. For
unreleased software (between tags), the version identifier should provide
enough information to help developers recreate the same tree, while also
giving them an idea of roughly how old the tree is (after version 1.2, before
version 1.3). Many VCS systems can report a description that captures this,
for example `git describe --tags --dirty --always` reports things like
"0.7-1-g574ab98-dirty" to indicate that the checkout is one revision past the
0.7 tag, has a unique revision id of "574ab98", and is "dirty" (it has
uncommitted changes.
The version identifier is used for multiple purposes:
* to allow the module to self-identify its version: `myproject.__version__`
* to choose a name and prefix for a 'setup.py sdist' tarball
## Theory of Operation
Versioneer works by adding a special `_version.py` file into your source
tree, where your `__init__.py` can import it. This `_version.py` knows how to
dynamically ask the VCS tool for version information at import time.
`_version.py` also contains `$Revision$` markers, and the installation
process marks `_version.py` to have this marker rewritten with a tag name
during the `git archive` command. As a result, generated tarballs will
contain enough information to get the proper version.
To allow `setup.py` to compute a version too, a `versioneer.py` is added to
the top level of your source tree, next to `setup.py` and the `setup.cfg`
that configures it. This overrides several distutils/setuptools commands to
compute the version when invoked, and changes `setup.py build` and `setup.py
sdist` to replace `_version.py` with a small static file that contains just
the generated version data.
## Installation
See [INSTALL.md](./INSTALL.md) for detailed installation instructions.
## Version-String Flavors
Code which uses Versioneer can learn about its version string at runtime by
importing `_version` from your main `__init__.py` file and running the
`get_versions()` function. From the "outside" (e.g. in `setup.py`), you can
import the top-level `versioneer.py` and run `get_versions()`.
Both functions return a dictionary with different flavors of version
information:
* `['version']`: A condensed version string, rendered using the selected
style. This is the most commonly used value for the project's version
string. The default "pep440" style yields strings like `0.11`,
`0.11+2.g1076c97`, or `0.11+2.g1076c97.dirty`. See the "Styles" section
below for alternative styles.
* `['full-revisionid']`: detailed revision identifier. For Git, this is the
full SHA1 commit id, e.g. "1076c978a8d3cfc70f408fe5974aa6c092c949ac".
* `['date']`: Date and time of the latest `HEAD` commit. For Git, it is the
commit date in ISO 8601 format. This will be None if the date is not
available.
* `['dirty']`: a boolean, True if the tree has uncommitted changes. Note that
this is only accurate if run in a VCS checkout, otherwise it is likely to
be False or None
* `['error']`: if the version string could not be computed, this will be set
to a string describing the problem, otherwise it will be None. It may be
useful to throw an exception in setup.py if this is set, to avoid e.g.
creating tarballs with a version string of "unknown".
Some variants are more useful than others. Including `full-revisionid` in a
bug report should allow developers to reconstruct the exact code being tested
(or indicate the presence of local changes that should be shared with the
developers). `version` is suitable for display in an "about" box or a CLI
`--version` output: it can be easily compared against release notes and lists
of bugs fixed in various releases.
The installer adds the following text to your `__init__.py` to place a basic
version in `YOURPROJECT.__version__`:
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
## Styles
The setup.cfg `style=` configuration controls how the VCS information is
rendered into a version string.
The default style, "pep440", produces a PEP440-compliant string, equal to the
un-prefixed tag name for actual releases, and containing an additional "local
version" section with more detail for in-between builds. For Git, this is
TAG[+DISTANCE.gHEX[.dirty]] , using information from `git describe --tags
--dirty --always`. For example "0.11+2.g1076c97.dirty" indicates that the
tree is like the "1076c97" commit but has uncommitted changes (".dirty"), and
that this commit is two revisions ("+2") beyond the "0.11" tag. For released
software (exactly equal to a known tag), the identifier will only contain the
stripped tag, e.g. "0.11".
Other styles are available. See [details.md](details.md) in the Versioneer
source tree for descriptions.
## Debugging
Versioneer tries to avoid fatal errors: if something goes wrong, it will tend
to return a version of "0+unknown". To investigate the problem, run `setup.py
version`, which will run the version-lookup code in a verbose mode, and will
display the full contents of `get_versions()` (including the `error` string,
which may help identify what went wrong).
## Known Limitations
Some situations are known to cause problems for Versioneer. This details the
most significant ones. More can be found on Github
[issues page](https://github.com/warner/python-versioneer/issues).
### Subprojects
Versioneer has limited support for source trees in which `setup.py` is not in
the root directory (e.g. `setup.py` and `.git/` are *not* siblings). The are
two common reasons why `setup.py` might not be in the root:
* Source trees which contain multiple subprojects, such as
[Buildbot](https://github.com/buildbot/buildbot), which contains both
"master" and "slave" subprojects, each with their own `setup.py`,
`setup.cfg`, and `tox.ini`. Projects like these produce multiple PyPI
distributions (and upload multiple independently-installable tarballs).
* Source trees whose main purpose is to contain a C library, but which also
provide bindings to Python (and perhaps other langauges) in subdirectories.
Versioneer will look for `.git` in parent directories, and most operations
should get the right version string. However `pip` and `setuptools` have bugs
and implementation details which frequently cause `pip install .` from a
subproject directory to fail to find a correct version string (so it usually
defaults to `0+unknown`).
`pip install --editable .` should work correctly. `setup.py install` might
work too.
Pip-8.1.1 is known to have this problem, but hopefully it will get fixed in
some later version.
[Bug #38](https://github.com/warner/python-versioneer/issues/38) is tracking
this issue. The discussion in
[PR #61](https://github.com/warner/python-versioneer/pull/61) describes the
issue from the Versioneer side in more detail.
[pip PR#3176](https://github.com/pypa/pip/pull/3176) and
[pip PR#3615](https://github.com/pypa/pip/pull/3615) contain work to improve
pip to let Versioneer work correctly.
Versioneer-0.16 and earlier only looked for a `.git` directory next to the
`setup.cfg`, so subprojects were completely unsupported with those releases.
### Editable installs with setuptools <= 18.5
`setup.py develop` and `pip install --editable .` allow you to install a
project into a virtualenv once, then continue editing the source code (and
test) without re-installing after every change.
"Entry-point scripts" (`setup(entry_points={"console_scripts": ..})`) are a
convenient way to specify executable scripts that should be installed along
with the python package.
These both work as expected when using modern setuptools. When using
setuptools-18.5 or earlier, however, certain operations will cause
`pkg_resources.DistributionNotFound` errors when running the entrypoint
script, which must be resolved by re-installing the package. This happens
when the install happens with one version, then the egg_info data is
regenerated while a different version is checked out. Many setup.py commands
cause egg_info to be rebuilt (including `sdist`, `wheel`, and installing into
a different virtualenv), so this can be surprising.
[Bug #83](https://github.com/warner/python-versioneer/issues/83) describes
this one, but upgrading to a newer version of setuptools should probably
resolve it.
### Unicode version strings
While Versioneer works (and is continually tested) with both Python 2 and
Python 3, it is not entirely consistent with bytes-vs-unicode distinctions.
Newer releases probably generate unicode version strings on py2. It's not
clear that this is wrong, but it may be surprising for applications when then
write these strings to a network connection or include them in bytes-oriented
APIs like cryptographic checksums.
[Bug #71](https://github.com/warner/python-versioneer/issues/71) investigates
this question.
## Updating Versioneer
To upgrade your project to a new release of Versioneer, do the following:
* install the new Versioneer (`pip install -U versioneer` or equivalent)
* edit `setup.cfg`, if necessary, to include any new configuration settings
indicated by the release notes. See [UPGRADING](./UPGRADING.md) for details.
* re-run `versioneer install` in your source tree, to replace
`SRC/_version.py`
* commit any changed files
## Future Directions
This tool is designed to make it easily extended to other version-control
systems: all VCS-specific components are in separate directories like
src/git/ . The top-level `versioneer.py` script is assembled from these
components by running make-versioneer.py . In the future, make-versioneer.py
will take a VCS name as an argument, and will construct a version of
`versioneer.py` that is specific to the given VCS. It might also take the
configuration arguments that are currently provided manually during
installation by editing setup.py . Alternatively, it might go the other
direction and include code from all supported VCS systems, reducing the
number of intermediate scripts.
## License
To make Versioneer easier to embed, all its code is dedicated to the public
domain. The `_version.py` that it creates is also in the public domain.
Specifically, both are released under the Creative Commons "Public Domain
Dedication" license (CC0-1.0), as described in
https://creativecommons.org/publicdomain/zero/1.0/ .
"""
from __future__ import print_function
try:
import configparser
except ImportError:
import ConfigParser as configparser
import errno
import json
import os
import re
import subprocess
import sys
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_root():
"""Get the project root directory.
We require that all commands are run from the project root, i.e. the
directory that contains setup.py, setup.cfg, and versioneer.py .
"""
root = os.path.realpath(os.path.abspath(os.getcwd()))
setup_py = os.path.join(root, "setup.py")
versioneer_py = os.path.join(root, "versioneer.py")
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
# allow 'python path/to/setup.py COMMAND'
root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0])))
setup_py = os.path.join(root, "setup.py")
versioneer_py = os.path.join(root, "versioneer.py")
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
err = ("Versioneer was unable to run the project root directory. "
"Versioneer requires setup.py to be executed from "
"its immediate directory (like 'python setup.py COMMAND'), "
"or in a way that lets it use sys.argv[0] to find the root "
"(like 'python path/to/setup.py COMMAND').")
raise VersioneerBadRootError(err)
try:
# Certain runtime workflows (setup.py install/develop in a setuptools
# tree) execute all dependencies in a single python process, so
# "versioneer" may be imported multiple times, and python's shared
# module-import table will cache the first one. So we can't use
# os.path.dirname(__file__), as that will find whichever
# versioneer.py was first imported, even in later projects.
me = os.path.realpath(os.path.abspath(__file__))
me_dir = os.path.normcase(os.path.splitext(me)[0])
vsr_dir = os.path.normcase(os.path.splitext(versioneer_py)[0])
if me_dir != vsr_dir:
print("Warning: build in %s is using versioneer.py from %s"
% (os.path.dirname(me), versioneer_py))
except NameError:
pass
return root
def get_config_from_root(root):
"""Read the project setup.cfg file to determine Versioneer config."""
# This might raise EnvironmentError (if setup.cfg is missing), or
# configparser.NoSectionError (if it lacks a [versioneer] section), or
# configparser.NoOptionError (if it lacks "VCS="). See the docstring at
# the top of versioneer.py for instructions on writing your setup.cfg .
setup_cfg = os.path.join(root, "setup.cfg")
parser = configparser.SafeConfigParser()
with open(setup_cfg, "r") as f:
parser.readfp(f)
VCS = parser.get("versioneer", "VCS") # mandatory
def get(parser, name):
if parser.has_option("versioneer", name):
return parser.get("versioneer", name)
return None
cfg = VersioneerConfig()
cfg.VCS = VCS
cfg.style = get(parser, "style") or ""
cfg.versionfile_source = get(parser, "versionfile_source")
cfg.versionfile_build = get(parser, "versionfile_build")
cfg.tag_prefix = get(parser, "tag_prefix")
if cfg.tag_prefix in ("''", '""'):
cfg.tag_prefix = ""
cfg.parentdir_prefix = get(parser, "parentdir_prefix")
cfg.verbose = get(parser, "verbose")
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
# these dictionaries contain VCS-specific tools
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
LONG_VERSION_PY['git'] = '''
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.18 (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s"
git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s"
git_date = "%(DOLLAR)sFormat:%%ci%(DOLLAR)s"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "%(STYLE)s"
cfg.tag_prefix = "%(TAG_PREFIX)s"
cfg.parentdir_prefix = "%(PARENTDIR_PREFIX)s"
cfg.versionfile_source = "%(VERSIONFILE_SOURCE)s"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %%s" %% dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %%s" %% (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %%s (error)" %% dispcmd)
print("stdout was %%s" %% stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %%s but none started with prefix %%s" %%
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %%d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%%s', no digits" %% ",".join(refs - tags))
if verbose:
print("likely tags: %%s" %% ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %%s" %% r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %%s not under git control" %% root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%%s*" %% tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%%s'"
%% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%%s' doesn't start with prefix '%%s'"
print(fmt %% (full_tag, tag_prefix))
pieces["error"] = ("tag '%%s' doesn't start with prefix '%%s'"
%% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%%ci", "HEAD"],
cwd=root)[0].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%%d.g%%s" %% (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%%d.g%%s" %% (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%%d" %% pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%%d" %% pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%%s" %% pieces["short"]
else:
# exception #1
rendered = "0.post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%%s" %% pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%%s'" %% style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version", "date": None}
'''
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%s*" % tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"],
cwd=root)[0].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def do_vcs_install(manifest_in, versionfile_source, ipy):
"""Git-specific installation logic for Versioneer.
For Git, this means creating/changing .gitattributes to mark _version.py
for export-subst keyword substitution.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
files = [manifest_in, versionfile_source]
if ipy:
files.append(ipy)
try:
me = __file__
if me.endswith(".pyc") or me.endswith(".pyo"):
me = os.path.splitext(me)[0] + ".py"
versioneer_file = os.path.relpath(me)
except NameError:
versioneer_file = "versioneer.py"
files.append(versioneer_file)
present = False
try:
f = open(".gitattributes", "r")
for line in f.readlines():
if line.strip().startswith(versionfile_source):
if "export-subst" in line.strip().split()[1:]:
present = True
f.close()
except EnvironmentError:
pass
if not present:
f = open(".gitattributes", "a+")
f.write("%s export-subst\n" % versionfile_source)
f.close()
files.append(".gitattributes")
run_command(GITS, ["add", "--"] + files)
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %s but none started with prefix %s" %
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
SHORT_VERSION_PY = """
# This file was generated by 'versioneer.py' (0.18) from
# revision-control system data, or from the parent directory name of an
# unpacked source archive. Distribution tarballs contain a pre-generated copy
# of this file.
import json
version_json = '''
%s
''' # END VERSION_JSON
def get_versions():
return json.loads(version_json)
"""
def versions_from_file(filename):
"""Try to determine the version from _version.py if present."""
try:
with open(filename) as f:
contents = f.read()
except EnvironmentError:
raise NotThisMethod("unable to read _version.py")
mo = re.search(r"version_json = '''\n(.*)''' # END VERSION_JSON",
contents, re.M | re.S)
if not mo:
mo = re.search(r"version_json = '''\r\n(.*)''' # END VERSION_JSON",
contents, re.M | re.S)
if not mo:
raise NotThisMethod("no version_json in _version.py")
return json.loads(mo.group(1))
def write_to_version_file(filename, versions):
"""Write the given version number to the given _version.py file."""
os.unlink(filename)
contents = json.dumps(versions, sort_keys=True,
indent=1, separators=(",", ": "))
with open(filename, "w") as f:
f.write(SHORT_VERSION_PY % contents)
print("set %s to '%s'" % (filename, versions["version"]))
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
class VersioneerBadRootError(Exception):
"""The project root directory is unknown or missing key files."""
def get_versions(verbose=False):
"""Get the project version from whatever source is available.
Returns dict with two keys: 'version' and 'full'.
"""
if "versioneer" in sys.modules:
# see the discussion in cmdclass.py:get_cmdclass()
del sys.modules["versioneer"]
root = get_root()
cfg = get_config_from_root(root)
assert cfg.VCS is not None, "please set [versioneer]VCS= in setup.cfg"
handlers = HANDLERS.get(cfg.VCS)
assert handlers, "unrecognized VCS '%s'" % cfg.VCS
verbose = verbose or cfg.verbose
assert cfg.versionfile_source is not None, \
"please set versioneer.versionfile_source"
assert cfg.tag_prefix is not None, "please set versioneer.tag_prefix"
versionfile_abs = os.path.join(root, cfg.versionfile_source)
# extract version from first of: _version.py, VCS command (e.g. 'git
# describe'), parentdir. This is meant to work for developers using a
# source checkout, for users of a tarball created by 'setup.py sdist',
# and for users of a tarball/zipball created by 'git archive' or github's
# download-from-tag feature or the equivalent in other VCSes.
get_keywords_f = handlers.get("get_keywords")
from_keywords_f = handlers.get("keywords")
if get_keywords_f and from_keywords_f:
try:
keywords = get_keywords_f(versionfile_abs)
ver = from_keywords_f(keywords, cfg.tag_prefix, verbose)
if verbose:
print("got version from expanded keyword %s" % ver)
return ver
except NotThisMethod:
pass
try:
ver = versions_from_file(versionfile_abs)
if verbose:
print("got version from file %s %s" % (versionfile_abs, ver))
return ver
except NotThisMethod:
pass
from_vcs_f = handlers.get("pieces_from_vcs")
if from_vcs_f:
try:
pieces = from_vcs_f(cfg.tag_prefix, root, verbose)
ver = render(pieces, cfg.style)
if verbose:
print("got version from VCS %s" % ver)
return ver
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
ver = versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
if verbose:
print("got version from parentdir %s" % ver)
return ver
except NotThisMethod:
pass
if verbose:
print("unable to compute version")
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None, "error": "unable to compute version",
"date": None}
def get_version():
"""Get the short version string for this project."""
return get_versions()["version"]
def get_cmdclass():
"""Get the custom setuptools/distutils subclasses used by Versioneer."""
if "versioneer" in sys.modules:
del sys.modules["versioneer"]
# this fixes the "python setup.py develop" case (also 'install' and
# 'easy_install .'), in which subdependencies of the main project are
# built (using setup.py bdist_egg) in the same python process. Assume
# a main project A and a dependency B, which use different versions
# of Versioneer. A's setup.py imports A's Versioneer, leaving it in
# sys.modules by the time B's setup.py is executed, causing B to run
# with the wrong versioneer. Setuptools wraps the sub-dep builds in a
# sandbox that restores sys.modules to it's pre-build state, so the
# parent is protected against the child's "import versioneer". By
# removing ourselves from sys.modules here, before the child build
# happens, we protect the child from the parent's versioneer too.
# Also see https://github.com/warner/python-versioneer/issues/52
cmds = {}
# we add "version" to both distutils and setuptools
from distutils.core import Command
class cmd_version(Command):
description = "report generated version string"
user_options = []
boolean_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
vers = get_versions(verbose=True)
print("Version: %s" % vers["version"])
print(" full-revisionid: %s" % vers.get("full-revisionid"))
print(" dirty: %s" % vers.get("dirty"))
print(" date: %s" % vers.get("date"))
if vers["error"]:
print(" error: %s" % vers["error"])
cmds["version"] = cmd_version
# we override "build_py" in both distutils and setuptools
#
# most invocation pathways end up running build_py:
# distutils/build -> build_py
# distutils/install -> distutils/build ->..
# setuptools/bdist_wheel -> distutils/install ->..
# setuptools/bdist_egg -> distutils/install_lib -> build_py
# setuptools/install -> bdist_egg ->..
# setuptools/develop -> ?
# pip install:
# copies source tree to a tempdir before running egg_info/etc
# if .git isn't copied too, 'git describe' will fail
# then does setup.py bdist_wheel, or sometimes setup.py install
# setup.py egg_info -> ?
# we override different "build_py" commands for both environments
if "setuptools" in sys.modules:
from setuptools.command.build_py import build_py as _build_py
else:
from distutils.command.build_py import build_py as _build_py
class cmd_build_py(_build_py):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
_build_py.run(self)
# now locate _version.py in the new build/ directory and replace
# it with an updated value
if cfg.versionfile_build:
target_versionfile = os.path.join(self.build_lib,
cfg.versionfile_build)
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
cmds["build_py"] = cmd_build_py
if "cx_Freeze" in sys.modules: # cx_freeze enabled?
from cx_Freeze.dist import build_exe as _build_exe
# nczeczulin reports that py2exe won't like the pep440-style string
# as FILEVERSION, but it can be used for PRODUCTVERSION, e.g.
# setup(console=[{
# "version": versioneer.get_version().split("+", 1)[0], # FILEVERSION
# "product_version": versioneer.get_version(),
# ...
class cmd_build_exe(_build_exe):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
target_versionfile = cfg.versionfile_source
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
_build_exe.run(self)
os.unlink(target_versionfile)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(LONG %
{"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
})
cmds["build_exe"] = cmd_build_exe
del cmds["build_py"]
if 'py2exe' in sys.modules: # py2exe enabled?
try:
from py2exe.distutils_buildexe import py2exe as _py2exe # py3
except ImportError:
from py2exe.build_exe import py2exe as _py2exe # py2
class cmd_py2exe(_py2exe):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
target_versionfile = cfg.versionfile_source
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
_py2exe.run(self)
os.unlink(target_versionfile)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(LONG %
{"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
})
cmds["py2exe"] = cmd_py2exe
# we override different "sdist" commands for both environments
if "setuptools" in sys.modules:
from setuptools.command.sdist import sdist as _sdist
else:
from distutils.command.sdist import sdist as _sdist
class cmd_sdist(_sdist):
def run(self):
versions = get_versions()
self._versioneer_generated_versions = versions
# unless we update this, the command will keep using the old
# version
self.distribution.metadata.version = versions["version"]
return _sdist.run(self)
def make_release_tree(self, base_dir, files):
root = get_root()
cfg = get_config_from_root(root)
_sdist.make_release_tree(self, base_dir, files)
# now locate _version.py in the new base_dir directory
# (remembering that it may be a hardlink) and replace it with an
# updated value
target_versionfile = os.path.join(base_dir, cfg.versionfile_source)
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile,
self._versioneer_generated_versions)
cmds["sdist"] = cmd_sdist
return cmds
CONFIG_ERROR = """
setup.cfg is missing the necessary Versioneer configuration. You need
a section like:
[versioneer]
VCS = git
style = pep440
versionfile_source = src/myproject/_version.py
versionfile_build = myproject/_version.py
tag_prefix =
parentdir_prefix = myproject-
You will also need to edit your setup.py to use the results:
import versioneer
setup(version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(), ...)
Please read the docstring in ./versioneer.py for configuration instructions,
edit setup.cfg, and re-run the installer or 'python versioneer.py setup'.
"""
SAMPLE_CONFIG = """
# See the docstring in versioneer.py for instructions. Note that you must
# re-run 'versioneer.py setup' after changing this section, and commit the
# resulting files.
[versioneer]
#VCS = git
#style = pep440
#versionfile_source =
#versionfile_build =
#tag_prefix =
#parentdir_prefix =
"""
INIT_PY_SNIPPET = """
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
"""
def do_setup():
"""Main VCS-independent setup function for installing Versioneer."""
root = get_root()
try:
cfg = get_config_from_root(root)
except (EnvironmentError, configparser.NoSectionError,
configparser.NoOptionError) as e:
if isinstance(e, (EnvironmentError, configparser.NoSectionError)):
print("Adding sample versioneer config to setup.cfg",
file=sys.stderr)
with open(os.path.join(root, "setup.cfg"), "a") as f:
f.write(SAMPLE_CONFIG)
print(CONFIG_ERROR, file=sys.stderr)
return 1
print(" creating %s" % cfg.versionfile_source)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(LONG % {"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
})
ipy = os.path.join(os.path.dirname(cfg.versionfile_source),
"__init__.py")
if os.path.exists(ipy):
try:
with open(ipy, "r") as f:
old = f.read()
except EnvironmentError:
old = ""
if INIT_PY_SNIPPET not in old:
print(" appending to %s" % ipy)
with open(ipy, "a") as f:
f.write(INIT_PY_SNIPPET)
else:
print(" %s unmodified" % ipy)
else:
print(" %s doesn't exist, ok" % ipy)
ipy = None
# Make sure both the top-level "versioneer.py" and versionfile_source
# (PKG/_version.py, used by runtime code) are in MANIFEST.in, so
# they'll be copied into source distributions. Pip won't be able to
# install the package without this.
manifest_in = os.path.join(root, "MANIFEST.in")
simple_includes = set()
try:
with open(manifest_in, "r") as f:
for line in f:
if line.startswith("include "):
for include in line.split()[1:]:
simple_includes.add(include)
except EnvironmentError:
pass
# That doesn't cover everything MANIFEST.in can do
# (http://docs.python.org/2/distutils/sourcedist.html#commands), so
# it might give some false negatives. Appending redundant 'include'
# lines is safe, though.
if "versioneer.py" not in simple_includes:
print(" appending 'versioneer.py' to MANIFEST.in")
with open(manifest_in, "a") as f:
f.write("include versioneer.py\n")
else:
print(" 'versioneer.py' already in MANIFEST.in")
if cfg.versionfile_source not in simple_includes:
print(" appending versionfile_source ('%s') to MANIFEST.in" %
cfg.versionfile_source)
with open(manifest_in, "a") as f:
f.write("include %s\n" % cfg.versionfile_source)
else:
print(" versionfile_source already in MANIFEST.in")
# Make VCS-specific changes. For git, this means creating/changing
# .gitattributes to mark _version.py for export-subst keyword
# substitution.
do_vcs_install(manifest_in, cfg.versionfile_source, ipy)
return 0
def scan_setup_py():
"""Validate the contents of setup.py against Versioneer's expectations."""
found = set()
setters = False
errors = 0
with open("setup.py", "r") as f:
for line in f.readlines():
if "import versioneer" in line:
found.add("import")
if "versioneer.get_cmdclass()" in line:
found.add("cmdclass")
if "versioneer.get_version()" in line:
found.add("get_version")
if "versioneer.VCS" in line:
setters = True
if "versioneer.versionfile_source" in line:
setters = True
if len(found) != 3:
print("")
print("Your setup.py appears to be missing some important items")
print("(but I might be wrong). Please make sure it has something")
print("roughly like the following:")
print("")
print(" import versioneer")
print(" setup( version=versioneer.get_version(),")
print(" cmdclass=versioneer.get_cmdclass(), ...)")
print("")
errors += 1
if setters:
print("You should remove lines like 'versioneer.VCS = ' and")
print("'versioneer.versionfile_source = ' . This configuration")
print("now lives in setup.cfg, and should be removed from setup.py")
print("")
errors += 1
return errors
if __name__ == "__main__":
cmd = sys.argv[1]
if cmd == "setup":
errors = do_setup()
errors += scan_setup_py()
if errors:
sys.exit(1)
| mit |
gforcada/isort | isort/__init__.py | 4 | 1351 | """__init__.py.
Defines the isort module to include the SortImports utility class as well as any defined settings.
Copyright (C) 2013 Timothy Edmund Crosley
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from . import settings
from .isort import SortImports
__version__ = "4.1.2"
| mit |
adamhaney/airflow | airflow/utils/log/s3_task_handler.py | 6 | 7120 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
from airflow import configuration
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.utils.log.file_task_handler import FileTaskHandler
class S3TaskHandler(FileTaskHandler, LoggingMixin):
"""
S3TaskHandler is a python log handler that handles and reads
task instance logs. It extends airflow FileTaskHandler and
uploads to and reads from S3 remote storage.
"""
def __init__(self, base_log_folder, s3_log_folder, filename_template):
super(S3TaskHandler, self).__init__(base_log_folder, filename_template)
self.remote_base = s3_log_folder
self.log_relative_path = ''
self._hook = None
self.closed = False
self.upload_on_close = True
def _build_hook(self):
remote_conn_id = configuration.conf.get('core', 'REMOTE_LOG_CONN_ID')
try:
from airflow.hooks.S3_hook import S3Hook
return S3Hook(remote_conn_id)
except Exception:
self.log.error(
'Could not create an S3Hook with connection id "%s". '
'Please make sure that airflow[s3] is installed and '
'the S3 connection exists.', remote_conn_id
)
@property
def hook(self):
if self._hook is None:
self._hook = self._build_hook()
return self._hook
def set_context(self, ti):
super(S3TaskHandler, self).set_context(ti)
# Local location and remote location is needed to open and
# upload local log file to S3 remote storage.
self.log_relative_path = self._render_filename(ti, ti.try_number)
self.upload_on_close = not ti.raw
def close(self):
"""
Close and upload local log file to remote storage S3.
"""
# When application exit, system shuts down all handlers by
# calling close method. Here we check if logger is already
# closed to prevent uploading the log to remote storage multiple
# times when `logging.shutdown` is called.
if self.closed:
return
super(S3TaskHandler, self).close()
if not self.upload_on_close:
return
local_loc = os.path.join(self.local_base, self.log_relative_path)
remote_loc = os.path.join(self.remote_base, self.log_relative_path)
if os.path.exists(local_loc):
# read log and remove old logs to get just the latest additions
with open(local_loc, 'r') as logfile:
log = logfile.read()
self.s3_write(log, remote_loc)
# Mark closed so we don't double write if close is called twice
self.closed = True
def _read(self, ti, try_number, metadata=None):
"""
Read logs of given task instance and try_number from S3 remote storage.
If failed, read the log from task instance host machine.
:param ti: task instance object
:param try_number: task instance try_number to read logs from
:param metadata: log metadata,
can be used for steaming log reading and auto-tailing.
"""
# Explicitly getting log relative path is necessary as the given
# task instance might be different than task instance passed in
# in set_context method.
log_relative_path = self._render_filename(ti, try_number)
remote_loc = os.path.join(self.remote_base, log_relative_path)
if self.s3_log_exists(remote_loc):
# If S3 remote file exists, we do not fetch logs from task instance
# local machine even if there are errors reading remote logs, as
# returned remote_log will contain error messages.
remote_log = self.s3_read(remote_loc, return_error=True)
log = '*** Reading remote log from {}.\n{}\n'.format(
remote_loc, remote_log)
return log, {'end_of_log': True}
else:
return super(S3TaskHandler, self)._read(ti, try_number)
def s3_log_exists(self, remote_log_location):
"""
Check if remote_log_location exists in remote storage
:param remote_log_location: log's location in remote storage
:return: True if location exists else False
"""
try:
return self.hook.get_key(remote_log_location) is not None
except Exception:
pass
return False
def s3_read(self, remote_log_location, return_error=False):
"""
Returns the log found at the remote_log_location. Returns '' if no
logs are found or there is an error.
:param remote_log_location: the log's location in remote storage
:type remote_log_location: str (path)
:param return_error: if True, returns a string error message if an
error occurs. Otherwise returns '' when an error occurs.
:type return_error: bool
"""
try:
return self.hook.read_key(remote_log_location)
except Exception:
msg = 'Could not read logs from {}'.format(remote_log_location)
self.log.exception(msg)
# return error if needed
if return_error:
return msg
def s3_write(self, log, remote_log_location, append=True):
"""
Writes the log to the remote_log_location. Fails silently if no hook
was created.
:param log: the log to write to the remote_log_location
:type log: str
:param remote_log_location: the log's location in remote storage
:type remote_log_location: str (path)
:param append: if False, any existing log file is overwritten. If True,
the new log is appended to any existing logs.
:type append: bool
"""
if append and self.s3_log_exists(remote_log_location):
old_log = self.s3_read(remote_log_location)
log = '\n'.join([old_log, log]) if old_log else log
try:
self.hook.load_string(
log,
key=remote_log_location,
replace=True,
encrypt=configuration.conf.getboolean('core', 'ENCRYPT_S3_LOGS'),
)
except Exception:
self.log.exception('Could not write logs to %s', remote_log_location)
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.