repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
mysql/mysql-utilities | mysql/utilities/common/options.py | 1 | 54162 | #
# Copyright (c) 2010, 2016, Oracle and/or its affiliates. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
"""
This module contains the following methods design to support common option
parsing among the multiple utilities.
Methods:
setup_common_options() Setup standard options for utilities
"""
import copy
import optparse
from optparse import Option as CustomOption, OptionValueError
import os.path
import re
from datetime import datetime
from ip_parser import find_password, parse_login_values_config_path
from mysql.utilities import LICENSE_FRM, VERSION_FRM
from mysql.utilities.exception import UtilError, FormatError
from mysql.connector.conversion import MySQLConverter
from mysql.utilities.common.messages import (PARSE_ERR_OBJ_NAME_FORMAT,
PARSE_ERR_OPT_INVALID_DATE,
PARSE_ERR_OPT_INVALID_DATE_TIME,
PARSE_ERR_OPT_INVALID_NUM_DAYS,
PARSE_ERR_OPT_INVALID_VALUE,
EXTERNAL_SCRIPT_DOES_NOT_EXIST,
INSUFFICIENT_FILE_PERMISSIONS)
from mysql.utilities.common.my_print_defaults import (MyDefaultsReader,
my_login_config_exists)
from mysql.utilities.common.pattern_matching import parse_object_name
from mysql.utilities.common.sql_transform import (is_quoted_with_backticks,
remove_backtick_quoting)
_PERMITTED_FORMATS = ["grid", "tab", "csv", "vertical"]
_PERMITTED_DIFFS = ["unified", "context", "differ"]
_PERMITTED_RPL_DUMP = ["master", "slave"]
class UtilitiesParser(optparse.OptionParser):
"""Special subclass of parser that allows showing of version information
when --help is used.
"""
def print_help(self, output=None):
"""Show version information before help
"""
print self.version
optparse.OptionParser.print_help(self, output)
def format_epilog(self, formatter):
return self.epilog if self.epilog is not None else ''
def prefix_check_choice(option, opt, value):
"""Check option values using case insensitive prefix compare
This method checks to see if the value specified is a prefix of one of the
choices. It converts the string provided by the user (value) to lower case
to permit case insensitive comparison of the user input. If multiple
choices are found for a prefix, an error is thrown. If the value being
compared does not match the list of choices, an error is thrown.
option[in] Option class instance
opt[in] option name
value[in] the value provided by the user
Returns string - valid option chosen
"""
# String of choices
choices = ", ".join([repr(choice) for choice in option.choices])
# Get matches for prefix given
alts = [alt for alt in option.choices if alt.startswith(value.lower())]
if len(alts) == 1: # only 1 match
return alts[0]
elif len(alts) > 1: # multiple matches
raise OptionValueError(
("option %s: there are multiple prefixes "
"matching: %r (choose from %s)") % (opt, value, choices))
# Doesn't match. Show user possible choices.
raise OptionValueError("option %s: invalid choice: %r (choose from %s)"
% (opt, value, choices))
def license_callback(self, opt, value, parser, *args, **kwargs):
"""Show license information and exit.
"""
print(LICENSE_FRM.format(program=parser.prog))
parser.exit()
def path_callback(option, opt, value, parser):
"""Verify that the given path is an existing file. If it is then add it
to the parser values.
option[in] option instance
opt[in] option name
value[in] given user value
parser[in] parser instance
"""
if not os.path.exists(value):
parser.error("the given path '{0}' in option {1} does not"
" exist or can not be accessed".format(value, opt))
if not os.path.isfile(value):
parser.error("the given path '{0}' in option {1} does not"
" correspond to a file".format(value, opt))
setattr(parser.values, option.dest, value)
def ssl_callback(option, opt, value, parser):
"""Verify that the given path is an existing file. If it is then add it
to the parser values.
option[in] option instance
opt[in] option name
value[in] given user value
parser[in] parser instance
"""
if not (value == 0 or value == 1 or value == ''):
parser.error("the given value '{0}' in option {1} is not"
" valid, valid values are 0 or 1.".format(value, opt))
setattr(parser.values, option.dest, value)
def add_config_path_option(parser):
"""Add the config_path option.
parser[in] the parser instance
"""
# --config-path option: config_path
parser.add_option("--config-path", action="callback",
callback=path_callback,
type="string", help="The path to a MySQL option file "
"with the login options")
def add_ssl_options(parser):
"""Add the ssl options.
parser[in] the parser instance
"""
# --ssl options: ssl_ca, ssl_cert, ssl_key
parser.add_option("--ssl-ca", action="callback",
callback=path_callback,
type="string", help="path to a file that contains "
"a list of trusted SSL CAs.")
parser.add_option("--ssl-cert", action="callback",
callback=path_callback,
type="string", help="name of the SSL certificate "
"file to use for establishing a secure connection.")
parser.add_option("--ssl-key", action="callback",
callback=path_callback,
type="string", help="name of the SSL key file to "
"use for establishing a secure connection.")
parser.add_option("--ssl", action="callback", callback=ssl_callback,
type="int", help="specifies if the server "
"connection requires use of SSL. If an encrypted "
"connection cannot be established, the connection "
"attempt fails. By default 0 (SSL not required).")
class CaseInsensitiveChoicesOption(CustomOption):
"""Case insensitive choices option class
This is an extension of the Option class. It replaces the check_choice
method with the prefix_check_choice() method above to provide
shortcut aware choice selection. It also ensures the choice compare is
done with a case insensitve test.
"""
TYPE_CHECKER = copy.copy(CustomOption.TYPE_CHECKER)
TYPE_CHECKER["choice"] = prefix_check_choice
def __init__(self, *opts, **attrs):
if 'choices' in attrs:
attrs['choices'] = [attr.lower() for attr in attrs['choices']]
CustomOption.__init__(self, *opts, **attrs)
def setup_common_options(program_name, desc_str, usage_str,
append=False, server=True,
server_default="root@localhost:3306",
extended_help=None,
add_ssl=False):
"""Setup option parser and options common to all MySQL Utilities.
This method creates an option parser and adds options for user
login and connection options to a MySQL database system including
user, password, host, socket, and port.
program_name[in] The program name
desc_str[in] The description of the utility
usage_str[in] A brief usage example
append[in] If True, allow --server to be specified multiple times
(default = False)
server[in] If True, add the --server option
(default = True)
server_default[in] Default value for option
(default = "root@localhost:3306")
extended_help[in] Extended help (by default: None).
add_ssl[in] adds the --ssl-options, however these are added
automatically if server is True, (default = False)
Returns parser object
"""
program_name = program_name.replace(".py", "")
parser = UtilitiesParser(
version=VERSION_FRM.format(program=program_name),
description=desc_str,
usage=usage_str,
add_help_option=False,
option_class=CaseInsensitiveChoicesOption,
epilog=extended_help,
prog=program_name)
parser.add_option("--help", action="help", help="display a help message "
"and exit")
parser.add_option("--license", action='callback',
callback=license_callback,
help="display program's license and exit")
if server:
# Connection information for the first server
if append:
parser.add_option("--server", action="append", dest="server",
help="connection information for the server in "
"the form: <user>[:<password>]@<host>[:<port>]"
"[:<socket>] or <login-path>[:<port>]"
"[:<socket>] or <config-path>[<[group]>].")
else:
parser.add_option("--server", action="store", dest="server",
type="string", default=server_default,
help="connection information for the server in "
"the form: <user>[:<password>]@<host>[:<port>]"
"[:<socket>] or <login-path>[:<port>]"
"[:<socket>] or <config-path>[<[group]>].")
if server or add_ssl:
add_ssl_options(parser)
return parser
def add_character_set_option(parser):
"""Add the --character-set option.
parser[in] the parser instance
"""
parser.add_option("--character-set", action="store", dest="charset",
type="string", default=None,
help="sets the client character set. The default is "
"retrieved from the server variable "
"'character_set_client'.")
_SKIP_VALUES = (
"tables", "views", "triggers", "procedures",
"functions", "events", "grants", "data",
"create_db"
)
def add_skip_options(parser):
"""Add the common --skip options for database utilties.
parser[in] the parser instance
"""
parser.add_option("--skip", action="store", dest="skip_objects",
default=None, help="specify objects to skip in the "
"operation in the form of a comma-separated list (no "
"spaces). Valid values = tables, views, triggers, proc"
"edures, functions, events, grants, data, create_db")
def check_skip_options(skip_list):
"""Check skip options for validity
skip_list[in] List of items from parser option.
Returns new skip list with items converted to upper case.
"""
new_skip_list = []
if skip_list is not None:
items = skip_list.split(",")
for item in items:
obj = item.lower()
if obj in _SKIP_VALUES:
new_skip_list.append(obj)
else:
raise UtilError("The value %s is not a valid value for "
"--skip." % item)
return new_skip_list
def add_format_option(parser, help_text, default_val, sql=False,
extra_formats=None):
"""Add the format option.
parser[in] the parser instance
help_text[in] help text
default_val[in] default value
sql[in] if True, add 'sql' format
default=False
extra_formats[in] list with extra formats
Returns corrected format value
"""
formats = _PERMITTED_FORMATS
if sql:
formats.append('sql')
if extra_formats:
formats.extend(extra_formats)
parser.add_option("-f", "--format", action="store", dest="format",
default=default_val, help=help_text, type="choice",
choices=formats)
def add_format_option_with_extras(parser, help_text, default_val,
extra_formats):
"""Add the format option.
parser[in] the parser instance
help_text[in] help text
default_val[in] default value
extra_formats[in] list of additional formats to support
Returns corrected format value
"""
formats = _PERMITTED_FORMATS
formats.extend(extra_formats)
parser.add_option("-f", "--format", action="store", dest="format",
default=default_val, help=help_text, type="choice",
choices=formats)
def add_no_headers_option(parser, restricted_formats=None, help_msg=None):
"""Add the --no-headers option.
parser[in] The parser instance.
restricted_formats[in] List of formats supported by this option (only
applies to them).
help_msg[in] Alternative help message to use, otherwise a
default one is used.
"""
# Create the help message according to any format restriction.
if restricted_formats:
plural = "s" if len(restricted_formats) > 1 else ""
formats_msg = (" (only applies to format{0}: "
"{1})").format(plural, ", ".join(restricted_formats))
else:
formats_msg = ""
if help_msg:
help_msg = "{0}{1}.".format(help_msg, formats_msg)
else:
help_msg = "do not show column headers{0}.".format(formats_msg)
# Add the option.
parser.add_option("-h", "--no-headers", action="store_true",
dest="no_headers", default=False, help=help_msg)
def add_verbosity(parser, quiet=True):
"""Add the verbosity and quiet options.
parser[in] the parser instance
quiet[in] if True, include the --quiet option
(default is True)
"""
parser.add_option("-v", "--verbose", action="count", dest="verbosity",
help="control how much information is displayed. "
"e.g., -v = verbose, -vv = more verbose, -vvv = debug")
if quiet:
parser.add_option("-q", "--quiet", action="store_true", dest="quiet",
help="turn off all messages for quiet execution.",
default=False)
def check_verbosity(options):
"""Check to see if both verbosity and quiet are being used.
"""
# Warn if quiet and verbosity are both specified
if options.quiet is not None and options.quiet and \
options.verbosity is not None and options.verbosity > 0:
print "WARNING: --verbosity is ignored when --quiet is specified."
options.verbosity = None
def add_changes_for(parser, default="server1"):
"""Add the changes_for option.
parser[in] the parser instance
"""
parser.add_option("--changes-for", action="store", dest="changes_for",
type="choice", default=default, help="specify the "
"server to show transformations to match the other "
"server. For example, to see the transformation for "
"transforming server1 to match server2, use "
"--changes-for=server1. Valid values are 'server1' or "
"'server2'. The default is 'server1'.",
choices=['server1', 'server2'])
def add_reverse(parser):
"""Add the show-reverse option.
parser[in] the parser instance
"""
parser.add_option("--show-reverse", action="store_true", dest="reverse",
default=False, help="produce a transformation report "
"containing the SQL statements to transform the object "
"definitions specified in reverse. For example if "
"--changes-for is set to server1, also generate the "
"transformation for server2. Note: the reverse changes "
"are annotated and marked as comments.")
def add_difftype(parser, allow_sql=False, default="unified"):
"""Add the difftype option.
parser[in] the parser instance
allow_sql[in] if True, allow sql as a valid option
(default is False)
default[in] the default option
(default is unified)
"""
choice_list = ['unified', 'context', 'differ']
if allow_sql:
choice_list.append('sql')
parser.add_option("-d", "--difftype", action="store", dest="difftype",
type="choice", default="unified", choices=choice_list,
help="display differences in context format in one of "
"the following formats: [%s] (default: unified)." %
'|'.join(choice_list))
def add_engines(parser):
"""Add the engine and default-storage-engine options.
parser[in] the parser instance
"""
# Add engine
parser.add_option("--new-storage-engine", action="store",
dest="new_engine", default=None, help="change all "
"tables to use this storage engine if storage engine "
"exists on the destination.")
# Add default storage engine
parser.add_option("--default-storage-engine", action="store",
dest="def_engine", default=None, help="change all "
"tables to use this storage engine if the original "
"storage engine does not exist on the destination.")
def check_engine_options(server, new_engine, def_engine,
fail=False, quiet=False):
"""Check to see if storage engines specified in options exist.
This method will check to see if the storage engine in new exists on the
server. If new_engine is None, the check is skipped. If the storage engine
does not exist and fail is True, an exception is thrown else if quiet is
False, a warning message is printed.
Similarly, def_engine will be checked and if not present and fail is True,
an exception is thrown else if quiet is False a warning is printed.
server[in] server instance to be checked
new_engine[in] new storage engine
def_engine[in] default storage engine
fail[in] If True, issue exception on failure else print warning
default = False
quiet[in] If True, suppress warning messages (not exceptions)
default = False
"""
def _find_engine(server, target, message, fail, default):
"""Find engine
"""
if target is not None:
found = server.has_storage_engine(target)
if not found and fail:
raise UtilError(message)
elif not found and not quiet:
print message
server.get_storage_engines()
message = "WARNING: %s storage engine %s is not supported on the server."
_find_engine(server, new_engine,
message % ("New", new_engine),
fail, quiet)
_find_engine(server, def_engine,
message % ("Default", def_engine),
fail, quiet)
def add_all(parser, objects):
"""Add the --all option.
parser[in] the parser instance
objects[in] name of the objects for which all includes
"""
parser.add_option("-a", "--all", action="store_true", dest="all",
default=False, help="include all %s" % objects)
def check_all(parser, options, args, objects):
"""Check to see if both all and specific arguments are used.
This method will throw an exception if there are arguments listed and
the all option has been turned on.
parser[in] the parser instance
options[in] command options
args[in] arguments list
objects[in] name of the objects for which all includes
"""
if options.all and len(args) > 0:
parser.error("You cannot use the --all option with a list of "
"%s." % objects)
def add_locking(parser):
"""Add the --locking option.
parser[in] the parser instance
"""
parser.add_option("--locking", action="store", dest="locking",
type="choice", default="snapshot",
choices=['no-locks', 'lock-all', 'snapshot'],
help="choose the lock type for the operation: no-locks "
"= do not use any table locks, lock-all = use table "
"locks but no transaction and no consistent read, "
"snaphot (default): consistent read using a single "
"transaction.")
def add_exclude(parser, object_type="objects",
example1="db1.t1", example2="db1.t% or db%.%"):
"""Add the --exclude option.
parser[in] the parser instance
example1[in]
example2[in]
"""
parser.add_option("-x", "--exclude", action="append", dest="exclude",
type="string", default=None, help="exclude one or more "
"{0} from the operation using either a specific "
"name (e.g. {1}), a LIKE pattern (e.g. {2}) or a REGEXP "
"search pattern. To use a REGEXP search pattern for all "
"exclusions, you must also specify the --regexp option. "
"Repeat the --exclude option for multiple exclusions."
"".format(object_type, example1, example2))
def check_exclude_pattern(exclude_list, use_regexp):
"""Check the --exclude pattern to determine if there are special symbols
that may be regexp symbols and the --use-regexp option is not specified.
Prints warning if this is true.
parser[in] the parser instance
use_regexp[in] the option to use regexp
"""
# ignore null lists
if not exclude_list:
return True
for row in exclude_list:
# replace _ and % and see if still not alnum()
test = row.replace('_', '').replace('%', '').replace('`', '')
test = test.replace("'", "").replace('.', '').replace('"', '')
if len(test) > 0 and not test.isalnum() and not use_regexp:
print "# WARNING: One or more of your --exclude patterns " \
"contains symbols that could be regexp patterns. You may " \
"need to include --regexp to ensure your exclude pattern " \
"is evaluated as REGEXP and not a SQL LIKE expression."
return False
return True
def add_regexp(parser):
"""Add the --regexp option.
parser[in] the parser instance
"""
parser.add_option("-G", "--basic-regexp", "--regexp", dest="use_regexp",
action="store_true", default=False, help="use 'REGEXP' "
"operator to match pattern. Default is to use 'LIKE'.")
def add_rpl_user(parser):
"""Add the --rpl-user option.
parser[in] the parser instance
"""
parser.add_option("--rpl-user", action="store", dest="rpl_user",
type="string",
help="the user and password for the replication "
"user requirement, in the form: <user>[:<password>]"
" or <login-path>. E.g. rpl:passwd")
def add_rpl_mode(parser, do_both=True, add_file=True):
"""Add the --rpl and --rpl-file options.
parser[in] the parser instance
do_both[in] if True, include the "both" value for the --rpl option
Default = True
add_file[in] if True, add the --rpl-file option
Default = True
"""
rpl_mode_both = ""
rpl_mode_options = _PERMITTED_RPL_DUMP
if do_both:
rpl_mode_options.append("both")
rpl_mode_both = (", and 'both' = include 'master' and 'slave' options "
"where applicable")
parser.add_option("--rpl", "--replication", dest="rpl_mode",
action="store", help="include replication information. "
"Choices: 'master' = include the CHANGE MASTER command "
"using the source server as the master, "
"'slave' = include the CHANGE MASTER command for "
"the source server's master (only works if the source "
"server is a slave){0}.".format(rpl_mode_both),
choices=rpl_mode_options)
if add_file:
parser.add_option("--rpl-file", "--replication-file", dest="rpl_file",
action="store", help="path and file name to place "
"the replication information generated. Valid on if "
"the --rpl option is specified.")
def check_rpl_options(parser, options):
"""Check replication dump options for validity
This method ensures the optional --rpl-* options are valid only when
--rpl is specified.
parser[in] the parser instance
options[in] command options
"""
if options.rpl_mode is None:
errors = []
if parser.has_option("--comment-rpl") and options.rpl_file is not None:
errors.append("--rpl-file")
if options.rpl_user is not None:
errors.append("--rpl-user")
# It's Ok if the options do not include --comment-rpl
if parser.has_option("--comment-rpl") and options.comment_rpl:
errors.append("--comment-rpl")
if len(errors) > 1:
num_opt_str = "s"
else:
num_opt_str = ""
if len(errors) > 0:
parser.error("The %s option%s must be used with the --rpl "
"option." % (", ".join(errors), num_opt_str))
def add_discover_slaves_option(parser):
"""Add the --discover-slaves-login option.
This method adds the --discover-slaves-login option that is used to
discover the list of slaves associated to the specified login (user and
password).
parser[in] the parser instance.
"""
parser.add_option("--discover-slaves-login", action="store",
dest="discover", default=None, type="string",
help="at startup, query master for all registered "
"slaves and use the user name and password specified to "
"connect. Supply the user and password in the form "
"<user>[:<password>] or <login-path>. For example, "
"--discover-slaves-login=joe:secret will use 'joe' as "
"the user and 'secret' as the password for each "
"discovered slave.")
def add_log_option(parser):
"""Add the --log option.
This method adds the --log option that is used the specify the target file
for logging messages from the utility.
parser[in] the parser instance.
"""
parser.add_option("--log", action="store", dest="log_file", default=None,
type="string", help="specify a log file to use for "
"logging messages")
def add_master_option(parser):
"""Add the --master option.
This method adds the --master option that is used to specify the connection
string for the server with the master role.
parser[in] the parser instance.
"""
parser.add_option("--master", action="store", dest="master", default=None,
type="string", help="connection information for master "
"server in the form: <user>[:<password>]@<host>[:<port>]"
"[:<socket>] or <login-path>[:<port>][:<socket>]"
" or <config-path>[<[group]>].")
def add_slaves_option(parser):
"""Add the --slaves option.
This method adds the --slaves option that is used to specify a list of
slaves, more precisely their connection strings (separated by comma).
parser[in] the parser instance.
"""
parser.add_option("--slaves", action="store", dest="slaves",
type="string", default=None,
help="connection information for slave servers in "
"the form: <user>[:<password>]@<host>[:<port>]"
"[:<socket>] or <login-path>[:<port>][:<socket>]"
" or <config-path>[<[group]>]. "
"List multiple slaves in comma-separated list.")
def add_failover_options(parser):
"""Add the common failover options.
This adds the following options:
--candidates
--discover-slaves-login
--exec-after
--exec-before
--log
--log-age
--master
--max-position
--ping
--seconds-behind
--slaves
--timeout
--script-threshold
parser[in] the parser instance
"""
parser.add_option("--candidates", action="store", dest="candidates",
type="string", default=None,
help="connection information for candidate slave servers"
" for failover in the form: <user>[:<password>]@<host>[:"
"<port>][:<socket>] or <login-path>[:<port>][:<socket>]"
" or <config-path>[<[group]>]"
" Valid only with failover command. List multiple slaves"
" in comma-separated list.")
add_discover_slaves_option(parser)
parser.add_option("--exec-after", action="store", dest="exec_after",
default=None, type="string", help="name of script to "
"execute after failover or switchover")
parser.add_option("--exec-before", action="store", dest="exec_before",
default=None, type="string", help="name of script to "
"execute before failover or switchover")
add_log_option(parser)
parser.add_option("--log-age", action="store", dest="log_age", default=7,
type="int", help="specify maximum age of log entries in "
"days. Entries older than this will be purged on "
"startup. Default = 7 days.")
add_master_option(parser)
parser.add_option("--max-position", action="store", dest="max_position",
default=0, type="int", help="used to detect slave "
"delay. The maximum difference between the master's "
"log position and the slave's reported read position of "
"the master. A value greater than this means the slave "
"is too far behind the master. Default is 0.")
parser.add_option("--ping", action="store", dest="ping", default=None,
help="Number of ping attempts for detecting downed "
"server.")
parser.add_option("--seconds-behind", action="store", dest="max_delay",
default=0, type="int", help="used to detect slave "
"delay. The maximum number of seconds behind the master "
"permitted before slave is considered behind the "
"master. Default is 0.")
add_slaves_option(parser)
parser.add_option("--timeout", action="store", dest="timeout", default=300,
help="maximum timeout in seconds to wait for each "
"replication command to complete. For example, timeout "
"for slave waiting to catch up to master. "
"Default = 300.")
parser.add_option("--script-threshold", action="store", default=None,
dest="script_threshold",
help="Value for external scripts to trigger aborting "
"the operation if result is greater than or equal to "
"the threshold. Default = None (no threshold "
"checking).")
def check_server_lists(parser, master, slaves):
"""Check to see if master is listed in slaves list
Returns bool - True = master not in slaves, issue error if it appears
"""
if slaves:
for slave in slaves.split(',', 1):
if master == slave:
parser.error("You cannot list the master as a slave.")
return True
def obj2sql(obj):
"""Convert a Python object to an SQL object.
This function convert Python objects to SQL values using the
conversion functions in the database connector package."""
return MySQLConverter().quote(obj)
def parse_user_password(userpass_values, my_defaults_reader=None,
options=None):
""" This function parses a string with the user/password credentials.
This function parses the login string, determines the used format, i.e.
user[:password], config-path or login-path. If the ':' (colon) is not in
the login string, the it can refer to a config-path, login-path or to a
username (without a password). In this case, first it is assumed that the
specified value is a config-path and tries to retrive the user and password
from the configuration file secondly assume it is a login-path and the
function attempts to retrieve the associated username and password, in a
quiet way (i.e., without raising exceptions). If it fails to retrieve the
login-path data, then the value is assumed to be a username.
userpass_values[in] String indicating the user/password credentials. It
must be in the form: user[:password] or login-path.
my_defaults_reader[in] Instance of MyDefaultsReader to read the
information of the login-path from configuration
files. By default, the value is None.
options[in] Dictionary of options (e.g. basedir), from the used
utility. By default, it set with an empty
dictionary. Note: also supports options values
from optparse.
Returns a tuple with the username and password.
"""
if options is None:
options = {}
# Split on the first ':' to determine if a login-path is used.
login_values = userpass_values.split(':', 1)
if len(login_values) == 1:
# Format is config-path, login-path or user (without a password):
# First check if the value is a config-path
# The following method call also initializes the user and passwd with
# default values in case the login_values are not from a config-path
user, passwd = parse_login_values_config_path(login_values[0],
quietly=True)
# Second assume it's a login-path and quietly try to retrieve the user
# and password, in case of success overwrite the values previously set
# and in case of failure return these ones instead.
# Check if the login configuration file (.mylogin.cnf) exists
if login_values[0] and not my_login_config_exists():
return user, passwd
if not my_defaults_reader:
# Attempt to create the MyDefaultsReader
try:
my_defaults_reader = MyDefaultsReader(options)
except UtilError:
# Raise an UtilError when my_print_defaults tool is not found.
return user, passwd
elif not my_defaults_reader.tool_path:
# Try to find the my_print_defaults tool
try:
my_defaults_reader.search_my_print_defaults_tool()
except UtilError:
# Raise an UtilError when my_print_defaults tool is not found.
return user, passwd
# Check if the my_print_default tool is able to read a login-path from
# the mylogin configuration file
if not my_defaults_reader.check_login_path_support():
return user, passwd
# Read and parse the login-path data (i.e., user and password)
try:
loginpath_data = my_defaults_reader.get_group_data(login_values[0])
if loginpath_data:
user = loginpath_data.get('user', None)
passwd = loginpath_data.get('password', None)
return user, passwd
else:
return user, passwd
except UtilError:
# Raise an UtilError if unable to get the login-path group data
return user, passwd
elif len(login_values) == 2:
# Format is user:password; return a tuple with the user and password
return login_values[0], login_values[1]
else:
# Invalid user credentials format
raise FormatError("Unable to parse the specified user credentials "
"(accepted formats: <user>[:<password> or "
"<login-path>): %s" % userpass_values)
def add_basedir_option(parser):
""" Add the --basedir option.
"""
parser.add_option("--basedir", action="store", dest="basedir",
default=None, type="string",
help="the base directory for the server")
def check_dir_option(parser, opt_value, opt_name, check_access=False,
read_only=False):
""" Check if the specified directory option is valid.
Check if the value specified for the option is a valid directory, and if
the user has appropriate access privileges. An appropriate parser error
is issued if the specified directory is invalid.
parser[in] Instance of the option parser (optparse).
opt_value[in] Value specified for the option.
opt_name[in] Option name (e.g., --basedir).
check_access[in] Flag specifying if the access privileges need to be
checked. By default, False (no access check).
read_only[in] Flag indicating if the access required is only for
read or read/write. By default, False (read/write
access). Note: only used if check_access=True.
Return the absolute path for the specified directory or None if an empty
value is specified.
"""
# Check existence of specified directory.
if opt_value:
full_path = get_absolute_path(opt_value)
if not os.path.isdir(full_path):
parser.error("The specified path for {0} option is not a "
"directory: {1}".format(opt_name, opt_value))
if check_access:
mode = os.R_OK if read_only else os.R_OK | os.W_OK
if not os.access(full_path, mode):
parser.error("You do not have enough privileges to access the "
"folder specified by {0}.".format(opt_name))
return full_path
return None
def check_script_option(parser, opt_value, check_executable=True):
""" Check if the specified script option is valid.
Check if the script specified for the option exists, and if
the user has appropriate access privileges to it. An appropriate parser
error is issued if the specified directory does not exist or is not
executable.
parser[in] Instance of the option parser (optparse).
opt_value[in] Value specified for the option.
check_executable[in] Flag specifying if the executable privileges need to
be checked. By default, True(needs to be executable).
Return the absolute path for the specified script or None if an empty
value is specified.
"""
if opt_value:
abs_path = os.path.abspath(opt_value)
if not os.path.isfile(abs_path):
parser.error(EXTERNAL_SCRIPT_DOES_NOT_EXIST.format(
path=opt_value))
if check_executable and not os.access(abs_path, os.X_OK):
parser.error(INSUFFICIENT_FILE_PERMISSIONS.format(
path=opt_value, permissions='execute'))
return opt_value
else:
return None
def get_absolute_path(path):
""" Returns the absolute path.
"""
return os.path.abspath(os.path.expanduser(os.path.normpath(path)))
def db_objects_list_to_dictionary(parser, obj_list, option_desc,
db_over_tables=True, sql_mode=''):
"""Process database object list and convert to a dictionary.
Check the qualified name format of the given database objects and convert
the given list of object to a dictionary organized by database names and
sets of specific objects.
Note: It is assumed that the given object list is obtained from the
arguments or an option returned by the parser.
parser[in] Instance of the used option/arguments parser
obj_list[in] List of objects to process.
option_desc[in] Short description of the option for the object list
(e.g., "the --exclude option", "the database/table
arguments") to refer appropriately in any parsing
error.
db_over_tables[in] If True specifying a db alone overrides all
occurrences of table objects from that db (e.g.
if True and we have both db and db.table1, db.table1
is ignored).
returns a dictionary with the objects grouped by database (without
duplicates). None value associated to a database entry means that all
objects are to be considered.
E.g. {'db_name1': set(['table1','table2']), 'db_name2': None}.
"""
db_objs_dict = {}
for obj_name in obj_list:
m_objs = parse_object_name(obj_name, sql_mode)
if m_objs[0] is None:
parser.error(PARSE_ERR_OBJ_NAME_FORMAT.format(
obj_name=obj_name, option=option_desc
))
else:
db_name, obj_name = m_objs
# Remove backtick quotes.
db_name = remove_backtick_quoting(db_name, sql_mode) \
if is_quoted_with_backticks(db_name, sql_mode) else db_name
obj_name = remove_backtick_quoting(obj_name, sql_mode) \
if obj_name and is_quoted_with_backticks(obj_name, sql_mode) \
else obj_name
# Add database object to result dictionary.
if not obj_name:
# If only the database is specified and db_over_tables is True,
# then add entry with db name and value None (to include all
# objects) even if a previous specific object was already
# added, else if db_over_tables is False, add None value to the
# list, so that we know db was specified without any
# table/routine.
if db_name in db_objs_dict:
if db_objs_dict[db_name] and not db_over_tables:
db_objs_dict[db_name].add(None)
else:
db_objs_dict[db_name] = None
else:
if db_over_tables:
db_objs_dict[db_name] = None
else:
db_objs_dict[db_name] = set([None])
else:
# If a specific object object is given add it to the set
# associated to the database, except if the database entry
# is None (meaning that all objects are included).
if db_name in db_objs_dict:
if db_objs_dict[db_name]:
db_objs_dict[db_name].add(obj_name)
else:
db_objs_dict[db_name] = set([obj_name])
return db_objs_dict
def get_ssl_dict(parser_options=None):
"""Returns a dictionary with the SSL certificates
parser_options[in] options instance from the used option/arguments parser
Returns a dictionary with the SSL certificates, each certificate name as
the key with underscore instead of dash. If no certificate has been given
by the user in arguments, returns an empty dictionary.
Note: parser_options is a Values instance, that does not have method get as
a dictionary instance.
"""
conn_options = {}
if parser_options is not None:
certs_paths = {}
if 'ssl_ca' in dir(parser_options):
certs_paths['ssl_ca'] = parser_options.ssl_ca
if 'ssl_cert' in dir(parser_options):
certs_paths['ssl_cert'] = parser_options.ssl_cert
if 'ssl_key' in dir(parser_options):
certs_paths['ssl_key'] = parser_options.ssl_key
if 'ssl' in dir(parser_options):
certs_paths['ssl'] = parser_options.ssl
conn_options.update(certs_paths)
return conn_options
def get_value_intervals_list(parser, option_value, option_name, value_name):
"""Get and check the list of values for the given option.
Convert the string value for the given option to the corresponding
list of integer values and tuple of integers (for intervals). For example,
converts the option_value '3,5-8,11' to the list [3, (5,8), 11].
A parser error is issued if the used values or format are invalid.
parser[in] Instance of the used option/arguments parser.
option_value[in] Value specified for the option (e.g., '3,5-8,11').
option_name[in] Name of the option (e.g., '--status').
value_name[in] Name describing each option value (e.g., 'status').
Returns a list of integers and tuple of integers (for intervals)
representing the given option value string.
"""
# Filter empty values and convert all to integers (values and intervals).
values = option_value.split(",")
values = [value for value in values if value]
if len(values) <= 0:
parser.error(PARSE_ERR_OPT_INVALID_VALUE.format(option=option_name,
value=option_value))
res_list = []
for value in values:
interval = value.split('-')
if len(interval) == 2:
# Convert lower and higher value of the interval.
try:
lv = int(interval[0])
except ValueError:
parser.error("Invalid {0} value '{1}' (must be a "
"non-negative integer) for interval "
"'{2}'.".format(value_name, interval[0], value))
try:
hv = int(interval[1])
except ValueError:
parser.error("Invalid {0} value '{1}' (must be a "
"non-negative integer) for interval "
"'{2}'.".format(value_name, interval[1], value))
# Add interval (tuple) to the list.
res_list.append((lv, hv))
elif len(interval) == 1:
# Add single value to the status list.
try:
res_list.append(int(value))
except ValueError:
parser.error("Invalid {0} value '{1}' (must be a "
"non-negative integer).".format(value_name,
value))
else:
# Invalid format.
parser.error("Invalid format for {0} interval (a single "
"dash must be used): '{1}'.".format(value_name,
value))
return res_list
def check_date_time(parser, date_value, date_type, allow_days=False):
"""Check the date/time value for the given option.
Check if the date/time value for the option is valid. The supported
formats are 'yyyy-mm-ddThh:mm:ss' and 'yyyy-mm-dd'. If the allow days
flag is ON then an integer valuse representing the number of days is
also accepted.
A parser error is issued if the date/time value is invalid.
parser[in] Instance of the used option/arguments parser.
date_value[in] Date/time value specified for the option.
date_type[in] Name describing the type of date being checked
(e.g., start, end, modified).
allow_days[in] Flag indicating if the specified value can also be an
integer representing the number of of days (> 0).
Returns the date in the format 'yyyy-mm-ddThh:mm:ss' or an integer
representing the number of days.
"""
if allow_days:
# Check if it is a valid number of days.
try:
days = int(date_value)
except ValueError:
# Not a valid integer (i.e., number of days).
days = None
if days:
if days < 1:
parser.error(PARSE_ERR_OPT_INVALID_NUM_DAYS.format(
date_type, date_value))
return days
# Check if it is a valid date/time format.
_, _, time = date_value.partition("T")
if time:
try:
dt_date = datetime.strptime(date_value, '%Y-%m-%dT%H:%M:%S')
except ValueError:
parser.error(PARSE_ERR_OPT_INVALID_DATE_TIME.format(date_type,
date_value))
else:
try:
dt_date = datetime.strptime(date_value, '%Y-%m-%d')
except ValueError:
parser.error(PARSE_ERR_OPT_INVALID_DATE.format(date_type,
date_value))
return dt_date.strftime('%Y-%m-%dT%H:%M:%S')
def check_gtid_set_format(parser, gtid_set):
"""Check the format of the GTID set given for the option.
Perform some basic checks to verify the syntax of the specified string
for the GTID set value. A parse error is issued if the format is incorrect.
parser[in] Instance of the used option/arguments parser.
gtid_set[in] GTID set value specified for the option.
"""
# UUID format: hhhhhhhh-hhhh-hhhh-hhhh-hhhhhhhhhhhh
re_uuid = re.compile(
r"(?:[a-f]|\d){8}(?:-(?:[a-f]|\d){4}){3}-(?:[a-f]|\d){12}",
re.IGNORECASE)
# interval format: n[-n]
re_interval = re.compile(r"(?:\d+)(?:-\d+)?")
uuid_sets = gtid_set.split(',')
for uuid_set in uuid_sets:
uuid_set_elements = uuid_set.split(':')
if len(uuid_set_elements) < 2:
parser.error("Invalid GTID set '{0}' for option --gtid-set, "
"missing UUID or interval. Valid format: "
"uuid:interval[:interval].".format(uuid_set))
# Check server UUID format.
if not re_uuid.match(uuid_set_elements[0]):
parser.error("Invalid UUID '{0}' for option --gtid-set. Valid "
"format: hhhhhhhh-hhhh-hhhh-hhhh-hhhhhhhhhhhh."
"".format(uuid_set_elements[0]))
# Check intervals.
for interval in uuid_set_elements[1:]:
if not re_interval.match(interval):
parser.error("Invalid interval '{0}' for option --gtid-set. "
"Valid format: n[-n].".format(interval))
try:
start_val, end_val = interval.split('-')
if int(start_val) >= int(end_val):
parser.error(
"Invalid interval '{0}' for option --gtid-set. Start "
"value must be lower than the end value."
"".format(interval))
except ValueError:
# Error raised for intervals with a single value.
pass # Ignore no need to compare start and end value.
def check_password_security(options, args, prefix=""):
"""Check command line for passwords and report a warning.
This method checks all options for passwords in the form ':%@'. If
this pattern is found, the method with issue a warning to stdout and
return True, else it returns False.
Note: this allows us to make it possible to abort if command-line
passwords are found (not the default...yet).
options[in] list of options
args[in] list of arguments
prefix[in] (optional) allows preface statement with # or something
for making the message a comment in-stream
Returns - bool : False = no passwords, True = password found and msg shown
"""
result = False
for value in options.__dict__.values():
if isinstance(value, list):
for item in value:
if find_password(item):
result = True
else:
if find_password(value):
result = True
for arg in args:
if find_password(arg):
result = True
if result:
print("{0}WARNING: Using a password on the command line interface"
" can be insecure.".format(prefix))
return result
| gpl-2.0 |
Agana/MyBlogAgain | django/forms/models.py | 152 | 42947 | """
Helper functions for creating Form classes from Django models
and database field objects.
"""
from django.utils.encoding import smart_unicode, force_unicode
from django.utils.datastructures import SortedDict
from django.utils.text import get_text_list, capfirst
from django.utils.translation import ugettext_lazy as _, ugettext
from django.core.exceptions import ValidationError, NON_FIELD_ERRORS, \
FieldError
from django.core.validators import EMPTY_VALUES
from util import ErrorList
from forms import BaseForm, get_declared_fields
from fields import Field, ChoiceField
from widgets import SelectMultiple, HiddenInput, MultipleHiddenInput
from widgets import media_property
from formsets import BaseFormSet, formset_factory
__all__ = (
'ModelForm', 'BaseModelForm', 'model_to_dict', 'fields_for_model',
'save_instance', 'ModelChoiceField', 'ModelMultipleChoiceField',
)
def construct_instance(form, instance, fields=None, exclude=None):
"""
Constructs and returns a model instance from the bound ``form``'s
``cleaned_data``, but does not save the returned instance to the
database.
"""
from django.db import models
opts = instance._meta
cleaned_data = form.cleaned_data
file_field_list = []
for f in opts.fields:
if not f.editable or isinstance(f, models.AutoField) \
or not f.name in cleaned_data:
continue
if fields is not None and f.name not in fields:
continue
if exclude and f.name in exclude:
continue
# Defer saving file-type fields until after the other fields, so a
# callable upload_to can use the values from other fields.
if isinstance(f, models.FileField):
file_field_list.append(f)
else:
f.save_form_data(instance, cleaned_data[f.name])
for f in file_field_list:
f.save_form_data(instance, cleaned_data[f.name])
return instance
def save_instance(form, instance, fields=None, fail_message='saved',
commit=True, exclude=None, construct=True):
"""
Saves bound Form ``form``'s cleaned_data into model instance ``instance``.
If commit=True, then the changes to ``instance`` will be saved to the
database. Returns ``instance``.
If construct=False, assume ``instance`` has already been constructed and
just needs to be saved.
"""
if construct:
instance = construct_instance(form, instance, fields, exclude)
opts = instance._meta
if form.errors:
raise ValueError("The %s could not be %s because the data didn't"
" validate." % (opts.object_name, fail_message))
# Wrap up the saving of m2m data as a function.
def save_m2m():
cleaned_data = form.cleaned_data
for f in opts.many_to_many:
if fields and f.name not in fields:
continue
if f.name in cleaned_data:
f.save_form_data(instance, cleaned_data[f.name])
if commit:
# If we are committing, save the instance and the m2m data immediately.
instance.save()
save_m2m()
else:
# We're not committing. Add a method to the form to allow deferred
# saving of m2m data.
form.save_m2m = save_m2m
return instance
# ModelForms #################################################################
def model_to_dict(instance, fields=None, exclude=None):
"""
Returns a dict containing the data in ``instance`` suitable for passing as
a Form's ``initial`` keyword argument.
``fields`` is an optional list of field names. If provided, only the named
fields will be included in the returned dict.
``exclude`` is an optional list of field names. If provided, the named
fields will be excluded from the returned dict, even if they are listed in
the ``fields`` argument.
"""
# avoid a circular import
from django.db.models.fields.related import ManyToManyField
opts = instance._meta
data = {}
for f in opts.fields + opts.many_to_many:
if not f.editable:
continue
if fields and not f.name in fields:
continue
if exclude and f.name in exclude:
continue
if isinstance(f, ManyToManyField):
# If the object doesn't have a primry key yet, just use an empty
# list for its m2m fields. Calling f.value_from_object will raise
# an exception.
if instance.pk is None:
data[f.name] = []
else:
# MultipleChoiceWidget needs a list of pks, not object instances.
data[f.name] = [obj.pk for obj in f.value_from_object(instance)]
else:
data[f.name] = f.value_from_object(instance)
return data
def fields_for_model(model, fields=None, exclude=None, widgets=None, formfield_callback=None):
"""
Returns a ``SortedDict`` containing form fields for the given model.
``fields`` is an optional list of field names. If provided, only the named
fields will be included in the returned fields.
``exclude`` is an optional list of field names. If provided, the named
fields will be excluded from the returned fields, even if they are listed
in the ``fields`` argument.
"""
field_list = []
ignored = []
opts = model._meta
for f in opts.fields + opts.many_to_many:
if not f.editable:
continue
if fields is not None and not f.name in fields:
continue
if exclude and f.name in exclude:
continue
if widgets and f.name in widgets:
kwargs = {'widget': widgets[f.name]}
else:
kwargs = {}
if formfield_callback is None:
formfield = f.formfield(**kwargs)
elif not callable(formfield_callback):
raise TypeError('formfield_callback must be a function or callable')
else:
formfield = formfield_callback(f, **kwargs)
if formfield:
field_list.append((f.name, formfield))
else:
ignored.append(f.name)
field_dict = SortedDict(field_list)
if fields:
field_dict = SortedDict(
[(f, field_dict.get(f)) for f in fields
if ((not exclude) or (exclude and f not in exclude)) and (f not in ignored)]
)
return field_dict
class ModelFormOptions(object):
def __init__(self, options=None):
self.model = getattr(options, 'model', None)
self.fields = getattr(options, 'fields', None)
self.exclude = getattr(options, 'exclude', None)
self.widgets = getattr(options, 'widgets', None)
class ModelFormMetaclass(type):
def __new__(cls, name, bases, attrs):
formfield_callback = attrs.pop('formfield_callback', None)
try:
parents = [b for b in bases if issubclass(b, ModelForm)]
except NameError:
# We are defining ModelForm itself.
parents = None
declared_fields = get_declared_fields(bases, attrs, False)
new_class = super(ModelFormMetaclass, cls).__new__(cls, name, bases,
attrs)
if not parents:
return new_class
if 'media' not in attrs:
new_class.media = media_property(new_class)
opts = new_class._meta = ModelFormOptions(getattr(new_class, 'Meta', None))
if opts.model:
# If a model is defined, extract form fields from it.
fields = fields_for_model(opts.model, opts.fields,
opts.exclude, opts.widgets, formfield_callback)
# make sure opts.fields doesn't specify an invalid field
none_model_fields = [k for k, v in fields.iteritems() if not v]
missing_fields = set(none_model_fields) - \
set(declared_fields.keys())
if missing_fields:
message = 'Unknown field(s) (%s) specified for %s'
message = message % (', '.join(missing_fields),
opts.model.__name__)
raise FieldError(message)
# Override default model fields with any custom declared ones
# (plus, include all the other declared fields).
fields.update(declared_fields)
else:
fields = declared_fields
new_class.declared_fields = declared_fields
new_class.base_fields = fields
return new_class
class BaseModelForm(BaseForm):
def __init__(self, data=None, files=None, auto_id='id_%s', prefix=None,
initial=None, error_class=ErrorList, label_suffix=':',
empty_permitted=False, instance=None):
opts = self._meta
if instance is None:
if opts.model is None:
raise ValueError('ModelForm has no model class specified.')
# if we didn't get an instance, instantiate a new one
self.instance = opts.model()
object_data = {}
else:
self.instance = instance
object_data = model_to_dict(instance, opts.fields, opts.exclude)
# if initial was provided, it should override the values from instance
if initial is not None:
object_data.update(initial)
# self._validate_unique will be set to True by BaseModelForm.clean().
# It is False by default so overriding self.clean() and failing to call
# super will stop validate_unique from being called.
self._validate_unique = False
super(BaseModelForm, self).__init__(data, files, auto_id, prefix, object_data,
error_class, label_suffix, empty_permitted)
def _update_errors(self, message_dict):
for k, v in message_dict.items():
if k != NON_FIELD_ERRORS:
self._errors.setdefault(k, self.error_class()).extend(v)
# Remove the data from the cleaned_data dict since it was invalid
if k in self.cleaned_data:
del self.cleaned_data[k]
if NON_FIELD_ERRORS in message_dict:
messages = message_dict[NON_FIELD_ERRORS]
self._errors.setdefault(NON_FIELD_ERRORS, self.error_class()).extend(messages)
def _get_validation_exclusions(self):
"""
For backwards-compatibility, several types of fields need to be
excluded from model validation. See the following tickets for
details: #12507, #12521, #12553
"""
exclude = []
# Build up a list of fields that should be excluded from model field
# validation and unique checks.
for f in self.instance._meta.fields:
field = f.name
# Exclude fields that aren't on the form. The developer may be
# adding these values to the model after form validation.
if field not in self.fields:
exclude.append(f.name)
# Don't perform model validation on fields that were defined
# manually on the form and excluded via the ModelForm's Meta
# class. See #12901.
elif self._meta.fields and field not in self._meta.fields:
exclude.append(f.name)
elif self._meta.exclude and field in self._meta.exclude:
exclude.append(f.name)
# Exclude fields that failed form validation. There's no need for
# the model fields to validate them as well.
elif field in self._errors.keys():
exclude.append(f.name)
# Exclude empty fields that are not required by the form, if the
# underlying model field is required. This keeps the model field
# from raising a required error. Note: don't exclude the field from
# validaton if the model field allows blanks. If it does, the blank
# value may be included in a unique check, so cannot be excluded
# from validation.
else:
form_field = self.fields[field]
field_value = self.cleaned_data.get(field, None)
if not f.blank and not form_field.required and field_value in EMPTY_VALUES:
exclude.append(f.name)
return exclude
def clean(self):
self._validate_unique = True
return self.cleaned_data
def _post_clean(self):
opts = self._meta
# Update the model instance with self.cleaned_data.
self.instance = construct_instance(self, self.instance, opts.fields, opts.exclude)
exclude = self._get_validation_exclusions()
# Foreign Keys being used to represent inline relationships
# are excluded from basic field value validation. This is for two
# reasons: firstly, the value may not be supplied (#12507; the
# case of providing new values to the admin); secondly the
# object being referred to may not yet fully exist (#12749).
# However, these fields *must* be included in uniqueness checks,
# so this can't be part of _get_validation_exclusions().
for f_name, field in self.fields.items():
if isinstance(field, InlineForeignKeyField):
exclude.append(f_name)
# Clean the model instance's fields.
try:
self.instance.clean_fields(exclude=exclude)
except ValidationError, e:
self._update_errors(e.message_dict)
# Call the model instance's clean method.
try:
self.instance.clean()
except ValidationError, e:
self._update_errors({NON_FIELD_ERRORS: e.messages})
# Validate uniqueness if needed.
if self._validate_unique:
self.validate_unique()
def validate_unique(self):
"""
Calls the instance's validate_unique() method and updates the form's
validation errors if any were raised.
"""
exclude = self._get_validation_exclusions()
try:
self.instance.validate_unique(exclude=exclude)
except ValidationError, e:
self._update_errors(e.message_dict)
def save(self, commit=True):
"""
Saves this ``form``'s cleaned_data into model instance
``self.instance``.
If commit=True, then the changes to ``instance`` will be saved to the
database. Returns ``instance``.
"""
if self.instance.pk is None:
fail_message = 'created'
else:
fail_message = 'changed'
return save_instance(self, self.instance, self._meta.fields,
fail_message, commit, construct=False)
save.alters_data = True
class ModelForm(BaseModelForm):
__metaclass__ = ModelFormMetaclass
def modelform_factory(model, form=ModelForm, fields=None, exclude=None,
formfield_callback=None):
# Create the inner Meta class. FIXME: ideally, we should be able to
# construct a ModelForm without creating and passing in a temporary
# inner class.
# Build up a list of attributes that the Meta object will have.
attrs = {'model': model}
if fields is not None:
attrs['fields'] = fields
if exclude is not None:
attrs['exclude'] = exclude
# If parent form class already has an inner Meta, the Meta we're
# creating needs to inherit from the parent's inner meta.
parent = (object,)
if hasattr(form, 'Meta'):
parent = (form.Meta, object)
Meta = type('Meta', parent, attrs)
# Give this new form class a reasonable name.
class_name = model.__name__ + 'Form'
# Class attributes for the new form class.
form_class_attrs = {
'Meta': Meta,
'formfield_callback': formfield_callback
}
return ModelFormMetaclass(class_name, (form,), form_class_attrs)
# ModelFormSets ##############################################################
class BaseModelFormSet(BaseFormSet):
"""
A ``FormSet`` for editing a queryset and/or adding new objects to it.
"""
model = None
def __init__(self, data=None, files=None, auto_id='id_%s', prefix=None,
queryset=None, **kwargs):
self.queryset = queryset
defaults = {'data': data, 'files': files, 'auto_id': auto_id, 'prefix': prefix}
defaults.update(kwargs)
super(BaseModelFormSet, self).__init__(**defaults)
def initial_form_count(self):
"""Returns the number of forms that are required in this FormSet."""
if not (self.data or self.files):
return len(self.get_queryset())
return super(BaseModelFormSet, self).initial_form_count()
def _existing_object(self, pk):
if not hasattr(self, '_object_dict'):
self._object_dict = dict([(o.pk, o) for o in self.get_queryset()])
return self._object_dict.get(pk)
def _construct_form(self, i, **kwargs):
if self.is_bound and i < self.initial_form_count():
# Import goes here instead of module-level because importing
# django.db has side effects.
from django.db import connections
pk_key = "%s-%s" % (self.add_prefix(i), self.model._meta.pk.name)
pk = self.data[pk_key]
pk_field = self.model._meta.pk
pk = pk_field.get_db_prep_lookup('exact', pk,
connection=connections[self.get_queryset().db])
if isinstance(pk, list):
pk = pk[0]
kwargs['instance'] = self._existing_object(pk)
if i < self.initial_form_count() and not kwargs.get('instance'):
kwargs['instance'] = self.get_queryset()[i]
return super(BaseModelFormSet, self)._construct_form(i, **kwargs)
def get_queryset(self):
if not hasattr(self, '_queryset'):
if self.queryset is not None:
qs = self.queryset
else:
qs = self.model._default_manager.get_query_set()
# If the queryset isn't already ordered we need to add an
# artificial ordering here to make sure that all formsets
# constructed from this queryset have the same form order.
if not qs.ordered:
qs = qs.order_by(self.model._meta.pk.name)
# Removed queryset limiting here. As per discussion re: #13023
# on django-dev, max_num should not prevent existing
# related objects/inlines from being displayed.
self._queryset = qs
return self._queryset
def save_new(self, form, commit=True):
"""Saves and returns a new model instance for the given form."""
return form.save(commit=commit)
def save_existing(self, form, instance, commit=True):
"""Saves and returns an existing model instance for the given form."""
return form.save(commit=commit)
def save(self, commit=True):
"""Saves model instances for every form, adding and changing instances
as necessary, and returns the list of instances.
"""
if not commit:
self.saved_forms = []
def save_m2m():
for form in self.saved_forms:
form.save_m2m()
self.save_m2m = save_m2m
return self.save_existing_objects(commit) + self.save_new_objects(commit)
def clean(self):
self.validate_unique()
def validate_unique(self):
# Collect unique_checks and date_checks to run from all the forms.
all_unique_checks = set()
all_date_checks = set()
for form in self.forms:
if not hasattr(form, 'cleaned_data'):
continue
exclude = form._get_validation_exclusions()
unique_checks, date_checks = form.instance._get_unique_checks(exclude=exclude)
all_unique_checks = all_unique_checks.union(set(unique_checks))
all_date_checks = all_date_checks.union(set(date_checks))
errors = []
# Do each of the unique checks (unique and unique_together)
for uclass, unique_check in all_unique_checks:
seen_data = set()
for form in self.forms:
# if the form doesn't have cleaned_data then we ignore it,
# it's already invalid
if not hasattr(form, "cleaned_data"):
continue
# get data for each field of each of unique_check
row_data = tuple([form.cleaned_data[field] for field in unique_check if field in form.cleaned_data])
if row_data and not None in row_data:
# if we've aready seen it then we have a uniqueness failure
if row_data in seen_data:
# poke error messages into the right places and mark
# the form as invalid
errors.append(self.get_unique_error_message(unique_check))
form._errors[NON_FIELD_ERRORS] = self.error_class([self.get_form_error()])
del form.cleaned_data
break
# mark the data as seen
seen_data.add(row_data)
# iterate over each of the date checks now
for date_check in all_date_checks:
seen_data = set()
uclass, lookup, field, unique_for = date_check
for form in self.forms:
# if the form doesn't have cleaned_data then we ignore it,
# it's already invalid
if not hasattr(self, 'cleaned_data'):
continue
# see if we have data for both fields
if (form.cleaned_data and form.cleaned_data[field] is not None
and form.cleaned_data[unique_for] is not None):
# if it's a date lookup we need to get the data for all the fields
if lookup == 'date':
date = form.cleaned_data[unique_for]
date_data = (date.year, date.month, date.day)
# otherwise it's just the attribute on the date/datetime
# object
else:
date_data = (getattr(form.cleaned_data[unique_for], lookup),)
data = (form.cleaned_data[field],) + date_data
# if we've aready seen it then we have a uniqueness failure
if data in seen_data:
# poke error messages into the right places and mark
# the form as invalid
errors.append(self.get_date_error_message(date_check))
form._errors[NON_FIELD_ERRORS] = self.error_class([self.get_form_error()])
del form.cleaned_data
break
seen_data.add(data)
if errors:
raise ValidationError(errors)
def get_unique_error_message(self, unique_check):
if len(unique_check) == 1:
return ugettext("Please correct the duplicate data for %(field)s.") % {
"field": unique_check[0],
}
else:
return ugettext("Please correct the duplicate data for %(field)s, "
"which must be unique.") % {
"field": get_text_list(unique_check, unicode(_("and"))),
}
def get_date_error_message(self, date_check):
return ugettext("Please correct the duplicate data for %(field_name)s "
"which must be unique for the %(lookup)s in %(date_field)s.") % {
'field_name': date_check[2],
'date_field': date_check[3],
'lookup': unicode(date_check[1]),
}
def get_form_error(self):
return ugettext("Please correct the duplicate values below.")
def save_existing_objects(self, commit=True):
self.changed_objects = []
self.deleted_objects = []
if not self.get_queryset():
return []
saved_instances = []
for form in self.initial_forms:
pk_name = self._pk_field.name
raw_pk_value = form._raw_value(pk_name)
# clean() for different types of PK fields can sometimes return
# the model instance, and sometimes the PK. Handle either.
pk_value = form.fields[pk_name].clean(raw_pk_value)
pk_value = getattr(pk_value, 'pk', pk_value)
obj = self._existing_object(pk_value)
if self.can_delete and self._should_delete_form(form):
self.deleted_objects.append(obj)
obj.delete()
continue
if form.has_changed():
self.changed_objects.append((obj, form.changed_data))
saved_instances.append(self.save_existing(form, obj, commit=commit))
if not commit:
self.saved_forms.append(form)
return saved_instances
def save_new_objects(self, commit=True):
self.new_objects = []
for form in self.extra_forms:
if not form.has_changed():
continue
# If someone has marked an add form for deletion, don't save the
# object.
if self.can_delete and self._should_delete_form(form):
continue
self.new_objects.append(self.save_new(form, commit=commit))
if not commit:
self.saved_forms.append(form)
return self.new_objects
def add_fields(self, form, index):
"""Add a hidden field for the object's primary key."""
from django.db.models import AutoField, OneToOneField, ForeignKey
self._pk_field = pk = self.model._meta.pk
# If a pk isn't editable, then it won't be on the form, so we need to
# add it here so we can tell which object is which when we get the
# data back. Generally, pk.editable should be false, but for some
# reason, auto_created pk fields and AutoField's editable attribute is
# True, so check for that as well.
def pk_is_not_editable(pk):
return ((not pk.editable) or (pk.auto_created or isinstance(pk, AutoField))
or (pk.rel and pk.rel.parent_link and pk_is_not_editable(pk.rel.to._meta.pk)))
if pk_is_not_editable(pk) or pk.name not in form.fields:
if form.is_bound:
pk_value = form.instance.pk
else:
try:
if index is not None:
pk_value = self.get_queryset()[index].pk
else:
pk_value = None
except IndexError:
pk_value = None
if isinstance(pk, OneToOneField) or isinstance(pk, ForeignKey):
qs = pk.rel.to._default_manager.get_query_set()
else:
qs = self.model._default_manager.get_query_set()
qs = qs.using(form.instance._state.db)
form.fields[self._pk_field.name] = ModelChoiceField(qs, initial=pk_value, required=False, widget=HiddenInput)
super(BaseModelFormSet, self).add_fields(form, index)
def modelformset_factory(model, form=ModelForm, formfield_callback=None,
formset=BaseModelFormSet,
extra=1, can_delete=False, can_order=False,
max_num=None, fields=None, exclude=None):
"""
Returns a FormSet class for the given Django model class.
"""
form = modelform_factory(model, form=form, fields=fields, exclude=exclude,
formfield_callback=formfield_callback)
FormSet = formset_factory(form, formset, extra=extra, max_num=max_num,
can_order=can_order, can_delete=can_delete)
FormSet.model = model
return FormSet
# InlineFormSets #############################################################
class BaseInlineFormSet(BaseModelFormSet):
"""A formset for child objects related to a parent."""
def __init__(self, data=None, files=None, instance=None,
save_as_new=False, prefix=None, queryset=None):
from django.db.models.fields.related import RelatedObject
if instance is None:
self.instance = self.fk.rel.to()
else:
self.instance = instance
self.save_as_new = save_as_new
# is there a better way to get the object descriptor?
self.rel_name = RelatedObject(self.fk.rel.to, self.model, self.fk).get_accessor_name()
if queryset is None:
queryset = self.model._default_manager
qs = queryset.filter(**{self.fk.name: self.instance})
super(BaseInlineFormSet, self).__init__(data, files, prefix=prefix,
queryset=qs)
def initial_form_count(self):
if self.save_as_new:
return 0
return super(BaseInlineFormSet, self).initial_form_count()
def _construct_form(self, i, **kwargs):
form = super(BaseInlineFormSet, self)._construct_form(i, **kwargs)
if self.save_as_new:
# Remove the primary key from the form's data, we are only
# creating new instances
form.data[form.add_prefix(self._pk_field.name)] = None
# Remove the foreign key from the form's data
form.data[form.add_prefix(self.fk.name)] = None
# Set the fk value here so that the form can do it's validation.
setattr(form.instance, self.fk.get_attname(), self.instance.pk)
return form
#@classmethod
def get_default_prefix(cls):
from django.db.models.fields.related import RelatedObject
return RelatedObject(cls.fk.rel.to, cls.model, cls.fk).get_accessor_name().replace('+','')
get_default_prefix = classmethod(get_default_prefix)
def save_new(self, form, commit=True):
# Use commit=False so we can assign the parent key afterwards, then
# save the object.
obj = form.save(commit=False)
pk_value = getattr(self.instance, self.fk.rel.field_name)
setattr(obj, self.fk.get_attname(), getattr(pk_value, 'pk', pk_value))
if commit:
obj.save()
# form.save_m2m() can be called via the formset later on if commit=False
if commit and hasattr(form, 'save_m2m'):
form.save_m2m()
return obj
def add_fields(self, form, index):
super(BaseInlineFormSet, self).add_fields(form, index)
if self._pk_field == self.fk:
name = self._pk_field.name
kwargs = {'pk_field': True}
else:
# The foreign key field might not be on the form, so we poke at the
# Model field to get the label, since we need that for error messages.
name = self.fk.name
kwargs = {
'label': getattr(form.fields.get(name), 'label', capfirst(self.fk.verbose_name))
}
if self.fk.rel.field_name != self.fk.rel.to._meta.pk.name:
kwargs['to_field'] = self.fk.rel.field_name
form.fields[name] = InlineForeignKeyField(self.instance, **kwargs)
# Add the generated field to form._meta.fields if it's defined to make
# sure validation isn't skipped on that field.
if form._meta.fields:
if isinstance(form._meta.fields, tuple):
form._meta.fields = list(form._meta.fields)
form._meta.fields.append(self.fk.name)
def get_unique_error_message(self, unique_check):
unique_check = [field for field in unique_check if field != self.fk.name]
return super(BaseInlineFormSet, self).get_unique_error_message(unique_check)
def _get_foreign_key(parent_model, model, fk_name=None, can_fail=False):
"""
Finds and returns the ForeignKey from model to parent if there is one
(returns None if can_fail is True and no such field exists). If fk_name is
provided, assume it is the name of the ForeignKey field. Unles can_fail is
True, an exception is raised if there is no ForeignKey from model to
parent_model.
"""
# avoid circular import
from django.db.models import ForeignKey
opts = model._meta
if fk_name:
fks_to_parent = [f for f in opts.fields if f.name == fk_name]
if len(fks_to_parent) == 1:
fk = fks_to_parent[0]
if not isinstance(fk, ForeignKey) or \
(fk.rel.to != parent_model and
fk.rel.to not in parent_model._meta.get_parent_list()):
raise Exception("fk_name '%s' is not a ForeignKey to %s" % (fk_name, parent_model))
elif len(fks_to_parent) == 0:
raise Exception("%s has no field named '%s'" % (model, fk_name))
else:
# Try to discover what the ForeignKey from model to parent_model is
fks_to_parent = [
f for f in opts.fields
if isinstance(f, ForeignKey)
and (f.rel.to == parent_model
or f.rel.to in parent_model._meta.get_parent_list())
]
if len(fks_to_parent) == 1:
fk = fks_to_parent[0]
elif len(fks_to_parent) == 0:
if can_fail:
return
raise Exception("%s has no ForeignKey to %s" % (model, parent_model))
else:
raise Exception("%s has more than 1 ForeignKey to %s" % (model, parent_model))
return fk
def inlineformset_factory(parent_model, model, form=ModelForm,
formset=BaseInlineFormSet, fk_name=None,
fields=None, exclude=None,
extra=3, can_order=False, can_delete=True, max_num=None,
formfield_callback=None):
"""
Returns an ``InlineFormSet`` for the given kwargs.
You must provide ``fk_name`` if ``model`` has more than one ``ForeignKey``
to ``parent_model``.
"""
fk = _get_foreign_key(parent_model, model, fk_name=fk_name)
# enforce a max_num=1 when the foreign key to the parent model is unique.
if fk.unique:
max_num = 1
kwargs = {
'form': form,
'formfield_callback': formfield_callback,
'formset': formset,
'extra': extra,
'can_delete': can_delete,
'can_order': can_order,
'fields': fields,
'exclude': exclude,
'max_num': max_num,
}
FormSet = modelformset_factory(model, **kwargs)
FormSet.fk = fk
return FormSet
# Fields #####################################################################
class InlineForeignKeyHiddenInput(HiddenInput):
def _has_changed(self, initial, data):
return False
class InlineForeignKeyField(Field):
"""
A basic integer field that deals with validating the given value to a
given parent instance in an inline.
"""
default_error_messages = {
'invalid_choice': _(u'The inline foreign key did not match the parent instance primary key.'),
}
def __init__(self, parent_instance, *args, **kwargs):
self.parent_instance = parent_instance
self.pk_field = kwargs.pop("pk_field", False)
self.to_field = kwargs.pop("to_field", None)
if self.parent_instance is not None:
if self.to_field:
kwargs["initial"] = getattr(self.parent_instance, self.to_field)
else:
kwargs["initial"] = self.parent_instance.pk
kwargs["required"] = False
kwargs["widget"] = InlineForeignKeyHiddenInput
super(InlineForeignKeyField, self).__init__(*args, **kwargs)
def clean(self, value):
if value in EMPTY_VALUES:
if self.pk_field:
return None
# if there is no value act as we did before.
return self.parent_instance
# ensure the we compare the values as equal types.
if self.to_field:
orig = getattr(self.parent_instance, self.to_field)
else:
orig = self.parent_instance.pk
if force_unicode(value) != force_unicode(orig):
raise ValidationError(self.error_messages['invalid_choice'])
return self.parent_instance
class ModelChoiceIterator(object):
def __init__(self, field):
self.field = field
self.queryset = field.queryset
def __iter__(self):
if self.field.empty_label is not None:
yield (u"", self.field.empty_label)
if self.field.cache_choices:
if self.field.choice_cache is None:
self.field.choice_cache = [
self.choice(obj) for obj in self.queryset.all()
]
for choice in self.field.choice_cache:
yield choice
else:
for obj in self.queryset.all():
yield self.choice(obj)
def __len__(self):
return len(self.queryset)
def choice(self, obj):
return (self.field.prepare_value(obj), self.field.label_from_instance(obj))
class ModelChoiceField(ChoiceField):
"""A ChoiceField whose choices are a model QuerySet."""
# This class is a subclass of ChoiceField for purity, but it doesn't
# actually use any of ChoiceField's implementation.
default_error_messages = {
'invalid_choice': _(u'Select a valid choice. That choice is not one of'
u' the available choices.'),
}
def __init__(self, queryset, empty_label=u"---------", cache_choices=False,
required=True, widget=None, label=None, initial=None,
help_text=None, to_field_name=None, *args, **kwargs):
if required and (initial is not None):
self.empty_label = None
else:
self.empty_label = empty_label
self.cache_choices = cache_choices
# Call Field instead of ChoiceField __init__() because we don't need
# ChoiceField.__init__().
Field.__init__(self, required, widget, label, initial, help_text,
*args, **kwargs)
self.queryset = queryset
self.choice_cache = None
self.to_field_name = to_field_name
def __deepcopy__(self, memo):
result = super(ChoiceField, self).__deepcopy__(memo)
# Need to force a new ModelChoiceIterator to be created, bug #11183
result.queryset = result.queryset
return result
def _get_queryset(self):
return self._queryset
def _set_queryset(self, queryset):
self._queryset = queryset
self.widget.choices = self.choices
queryset = property(_get_queryset, _set_queryset)
# this method will be used to create object labels by the QuerySetIterator.
# Override it to customize the label.
def label_from_instance(self, obj):
"""
This method is used to convert objects into strings; it's used to
generate the labels for the choices presented by this object. Subclasses
can override this method to customize the display of the choices.
"""
return smart_unicode(obj)
def _get_choices(self):
# If self._choices is set, then somebody must have manually set
# the property self.choices. In this case, just return self._choices.
if hasattr(self, '_choices'):
return self._choices
# Otherwise, execute the QuerySet in self.queryset to determine the
# choices dynamically. Return a fresh ModelChoiceIterator that has not been
# consumed. Note that we're instantiating a new ModelChoiceIterator *each*
# time _get_choices() is called (and, thus, each time self.choices is
# accessed) so that we can ensure the QuerySet has not been consumed. This
# construct might look complicated but it allows for lazy evaluation of
# the queryset.
return ModelChoiceIterator(self)
choices = property(_get_choices, ChoiceField._set_choices)
def prepare_value(self, value):
if hasattr(value, '_meta'):
if self.to_field_name:
return value.serializable_value(self.to_field_name)
else:
return value.pk
return super(ModelChoiceField, self).prepare_value(value)
def to_python(self, value):
if value in EMPTY_VALUES:
return None
try:
key = self.to_field_name or 'pk'
value = self.queryset.get(**{key: value})
except (ValueError, self.queryset.model.DoesNotExist):
raise ValidationError(self.error_messages['invalid_choice'])
return value
def validate(self, value):
return Field.validate(self, value)
class ModelMultipleChoiceField(ModelChoiceField):
"""A MultipleChoiceField whose choices are a model QuerySet."""
widget = SelectMultiple
hidden_widget = MultipleHiddenInput
default_error_messages = {
'list': _(u'Enter a list of values.'),
'invalid_choice': _(u'Select a valid choice. %s is not one of the'
u' available choices.'),
'invalid_pk_value': _(u'"%s" is not a valid value for a primary key.')
}
def __init__(self, queryset, cache_choices=False, required=True,
widget=None, label=None, initial=None,
help_text=None, *args, **kwargs):
super(ModelMultipleChoiceField, self).__init__(queryset, None,
cache_choices, required, widget, label, initial, help_text,
*args, **kwargs)
def clean(self, value):
if self.required and not value:
raise ValidationError(self.error_messages['required'])
elif not self.required and not value:
return []
if not isinstance(value, (list, tuple)):
raise ValidationError(self.error_messages['list'])
key = self.to_field_name or 'pk'
for pk in value:
try:
self.queryset.filter(**{key: pk})
except ValueError:
raise ValidationError(self.error_messages['invalid_pk_value'] % pk)
qs = self.queryset.filter(**{'%s__in' % key: value})
pks = set([force_unicode(getattr(o, key)) for o in qs])
for val in value:
if force_unicode(val) not in pks:
raise ValidationError(self.error_messages['invalid_choice'] % val)
# Since this overrides the inherited ModelChoiceField.clean
# we run custom validators here
self.run_validators(value)
return qs
def prepare_value(self, value):
if hasattr(value, '__iter__'):
return [super(ModelMultipleChoiceField, self).prepare_value(v) for v in value]
return super(ModelMultipleChoiceField, self).prepare_value(value)
| bsd-3-clause |
egafford/sahara | sahara/tests/unit/db/templates/test_utils.py | 4 | 5311 | # Copyright 2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from sahara import context
from sahara.db.templates import utils
from sahara.tests.unit.conductor import base
from sahara.tests.unit.db.templates import common as c
class FakeNGT(object):
def __init__(self, id):
self.node_group_template_id = id
class FakeCluster(object):
def __init__(self, name, node_groups=None, cluster_template_id=None):
self.name = name
self.node_groups = node_groups or []
self.cluster_template_id = cluster_template_id
class TemplateUtilsTestCase(base.ConductorManagerTestCase):
def test_substitute_ng_ids(self):
cl = {"node_groups":
[{"name": "worker",
"node_group_template_id": "{vanilla-worker}",
"count": 3},
{"name": "master",
"node_group_template_id": "{vanilla-master}",
"count": 1},
{"name": "secondary-name",
"node_group_template_id": "some_id"}]}
ng_dict = {"vanilla-worker": 1,
"vanilla-master": 2}
utils.substitute_ng_ids(cl, ng_dict)
self.assertEqual("1", cl["node_groups"][0]["node_group_template_id"])
self.assertEqual("2", cl["node_groups"][1]["node_group_template_id"])
self.assertEqual("some_id",
cl["node_groups"][2]["node_group_template_id"])
def test_check_plugin_version(self):
template = {"plugin_name": "vanilla",
"hadoop_version": "2.7.1"}
self.assertTrue(utils.check_plugin_version(template, None))
self.assertTrue(utils.check_plugin_version(template, ["2.7.1"]))
self.assertTrue(utils.check_plugin_version(template,
["vanilla.2.7.1"]))
self.assertFalse(utils.check_plugin_version(template, ["1.2.1"]))
def test_check_plugin_name_and_version(self):
template = {"plugin_name": "vanilla",
"hadoop_version": "2.7.1"}
self.assertTrue(utils.check_plugin_name_and_version(
template, None, ["2.7.1"]))
self.assertTrue(utils.check_plugin_name_and_version(
template, ["vanilla"], None))
self.assertTrue(utils.check_plugin_name_and_version(
template, ["vanilla"], ["2.7.1"]))
self.assertTrue(utils.check_plugin_name_and_version(
template, ["vanilla"], ["vanilla.2.7.1"]))
self.assertFalse(utils.check_plugin_name_and_version(
template, ["hdp"], ["2.7.1"]))
def test_check_node_group_template_usage(self):
ng1 = FakeNGT(1)
ng2 = FakeNGT(2)
cluster = FakeCluster("cluster", [ng1])
template = FakeCluster("template", [ng2])
cluster_users, template_users = utils.check_node_group_template_usage(
1, [cluster], [template])
self.assertEqual([cluster.name], cluster_users)
self.assertEqual([], template_users)
cluster_users, template_users = utils.check_node_group_template_usage(
2, [cluster], [template])
self.assertEqual([], cluster_users)
self.assertEqual([template.name], template_users)
def test_check_cluster_template_usage(self):
cluster = FakeCluster("cluster", cluster_template_id=1)
cluster_users = utils.check_cluster_template_usage(1, [cluster])
self.assertEqual([cluster.name], cluster_users)
def test_find_node_group_template_by_name(self):
ctx = context.ctx()
t = self.api.node_group_template_create(ctx, c.SAMPLE_NGT)
found = utils.find_node_group_template_by_name(ctx,
c.SAMPLE_NGT["name"])
self.assertEqual(t["id"], found["id"])
found = utils.find_node_group_template_by_name(ctx, "fred")
self.assertIsNone(found)
def test_find_cluster_template_by_name(self):
ctx = context.ctx()
t = self.api.cluster_template_create(ctx, c.SAMPLE_CLT)
found = utils.find_cluster_template_by_name(ctx, c.SAMPLE_CLT["name"])
self.assertEqual(t["id"], found["id"])
found = utils.find_cluster_template_by_name(ctx, "fred")
self.assertIsNone(found)
def test_value_diff(self):
current = {"cat": "meow",
"dog": "woof",
"horse": ["neigh", "whinny"]}
new_values = {"dog": "bark",
"horse": "snort"}
original = copy.deepcopy(current)
backup = utils.value_diff(current, new_values)
self.assertEqual({"dog": "woof",
"horse": ["neigh", "whinny"]}, backup)
# current is unchanged
self.assertEqual(original, current)
| apache-2.0 |
jeremiah-c-leary/vhdl-style-guide | vsg/tests/process/test_rule_023.py | 1 | 1145 |
import os
import unittest
from vsg.rules import process
from vsg import vhdlFile
from vsg.tests import utils
sTestDir = os.path.dirname(__file__)
lFile, eError =vhdlFile.utils.read_vhdlfile(os.path.join(sTestDir,'rule_023_test_input.vhd'))
lExpected = []
lExpected.append('')
utils.read_file(os.path.join(sTestDir, 'rule_023_test_input.fixed.vhd'), lExpected)
class test_process_rule(unittest.TestCase):
def setUp(self):
self.oFile = vhdlFile.vhdlFile(lFile)
self.assertIsNone(eError)
def test_rule_023(self):
oRule = process.rule_023()
self.assertTrue(oRule)
self.assertEqual(oRule.name, 'process')
self.assertEqual(oRule.identifier, '023')
lExpected = [20]
oRule.analyze(self.oFile)
self.assertEqual(lExpected, utils.extract_violation_lines_from_violation_object(oRule.violations))
def test_fix_rule_023(self):
oRule = process.rule_023()
oRule.fix(self.oFile)
lActual = self.oFile.get_lines()
self.assertEqual(lExpected, lActual)
oRule.analyze(self.oFile)
self.assertEqual(oRule.violations, [])
| gpl-3.0 |
nluchs/autojump | bin/autojump_utils.py | 18 | 4357 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import errno
from itertools import islice
import os
import platform
import re
import shutil
import sys
import unicodedata
if sys.version_info[0] == 3:
imap = map
os.getcwdu = os.getcwd
else:
from itertools import imap
def create_dir(path):
"""Creates a directory atomically."""
try:
os.makedirs(path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
def encode_local(string):
"""Converts string into user's preferred encoding."""
return string.encode(sys.getfilesystemencoding() or 'utf-8')
def first(xs):
it = iter(xs)
try:
if is_python3():
return it.__next__()
return it.next()
except StopIteration:
return None
def get_tab_entry_info(entry, separator):
"""
Given a tab entry in the following format return needle, index, and path:
[needle]__[index]__[path]
"""
needle, index, path = None, None, None
match_needle = re.search(r'(.*?)' + separator, entry)
match_index = re.search(separator + r'([0-9]{1})', entry)
match_path = re.search(
separator + r'[0-9]{1}' + separator + r'(.*)',
entry)
if match_needle:
needle = match_needle.group(1)
if match_index:
index = int(match_index.group(1))
if match_path:
path = match_path.group(1)
return needle, index, path
def get_pwd():
try:
return os.getcwdu()
except OSError:
print("Current directory no longer exists.", file=sys.stderr)
raise
def has_uppercase(string):
if is_python3():
return any(ch.isupper() for ch in string)
return any(unicodedata.category(c) == 'Lu' for c in unicode(string))
def in_bash():
return 'bash' in os.getenv('SHELL')
def is_python2():
return sys.version_info[0] == 2
def is_python3():
return sys.version_info[0] == 3
def is_linux():
return platform.system() == 'Linux'
def is_osx():
return platform.system() == 'Darwin'
def is_windows():
return platform.system() == 'Windows'
def last(xs):
it = iter(xs)
tmp = None
try:
if is_python3():
while True:
tmp = it.__next__()
else:
while True:
tmp = it.next()
except StopIteration:
return tmp
def move_file(src, dst):
"""
Atomically move file.
Windows does not allow for atomic file renaming (which is used by
os.rename / shutil.move) so destination paths must first be deleted.
"""
if is_windows() and os.path.exists(dst):
# raises exception if file is in use on Windows
os.remove(dst)
shutil.move(src, dst)
def print_entry(entry):
print_local("%.1f:\t%s" % (entry.weight, entry.path))
def print_local(string):
print(encode_local(string))
def print_tab_menu(needle, tab_entries, separator):
"""
Prints the tab completion menu according to the following format:
[needle]__[index]__[possible_match]
The needle (search pattern) and index are necessary to recreate the results
on subsequent calls.
"""
for i, entry in enumerate(tab_entries):
print_local(
'%s%s%d%s%s' % (
needle,
separator,
i + 1,
separator,
entry.path))
def sanitize(directories):
# edge case to allow '/' as a valid path
clean = lambda x: unico(x) if x == os.sep else unico(x).rstrip(os.sep)
return list(imap(clean, directories))
def second(xs):
it = iter(xs)
try:
it.next()
return it.next()
except StopIteration:
return None
def surround_quotes(string):
"""
Bash has problems dealing with certain paths so we're surrounding all
path outputs with quotes.
"""
if in_bash() and string:
# Python 2.6 requres field numbers
return '"{0}"'.format(string)
return string
def take(n, iterable):
"""Return first n items of an iterable."""
return islice(iterable, n)
def unico(string):
"""Converts into Unicode string."""
if is_python2() and not isinstance(string, unicode):
return unicode(string, encoding='utf-8', errors='replace')
return string
| gpl-3.0 |
mpattyn/fumiste | prototypePython/steamapi/requests/status_codes.py | 695 | 3136 | # -*- coding: utf-8 -*-
from .structures import LookupDict
_codes = {
# Informational.
100: ('continue',),
101: ('switching_protocols',),
102: ('processing',),
103: ('checkpoint',),
122: ('uri_too_long', 'request_uri_too_long'),
200: ('ok', 'okay', 'all_ok', 'all_okay', 'all_good', '\\o/', '✓'),
201: ('created',),
202: ('accepted',),
203: ('non_authoritative_info', 'non_authoritative_information'),
204: ('no_content',),
205: ('reset_content', 'reset'),
206: ('partial_content', 'partial'),
207: ('multi_status', 'multiple_status', 'multi_stati', 'multiple_stati'),
208: ('already_reported',),
226: ('im_used',),
# Redirection.
300: ('multiple_choices',),
301: ('moved_permanently', 'moved', '\\o-'),
302: ('found',),
303: ('see_other', 'other'),
304: ('not_modified',),
305: ('use_proxy',),
306: ('switch_proxy',),
307: ('temporary_redirect', 'temporary_moved', 'temporary'),
308: ('resume_incomplete', 'resume'),
# Client Error.
400: ('bad_request', 'bad'),
401: ('unauthorized',),
402: ('payment_required', 'payment'),
403: ('forbidden',),
404: ('not_found', '-o-'),
405: ('method_not_allowed', 'not_allowed'),
406: ('not_acceptable',),
407: ('proxy_authentication_required', 'proxy_auth', 'proxy_authentication'),
408: ('request_timeout', 'timeout'),
409: ('conflict',),
410: ('gone',),
411: ('length_required',),
412: ('precondition_failed', 'precondition'),
413: ('request_entity_too_large',),
414: ('request_uri_too_large',),
415: ('unsupported_media_type', 'unsupported_media', 'media_type'),
416: ('requested_range_not_satisfiable', 'requested_range', 'range_not_satisfiable'),
417: ('expectation_failed',),
418: ('im_a_teapot', 'teapot', 'i_am_a_teapot'),
422: ('unprocessable_entity', 'unprocessable'),
423: ('locked',),
424: ('failed_dependency', 'dependency'),
425: ('unordered_collection', 'unordered'),
426: ('upgrade_required', 'upgrade'),
428: ('precondition_required', 'precondition'),
429: ('too_many_requests', 'too_many'),
431: ('header_fields_too_large', 'fields_too_large'),
444: ('no_response', 'none'),
449: ('retry_with', 'retry'),
450: ('blocked_by_windows_parental_controls', 'parental_controls'),
451: ('unavailable_for_legal_reasons', 'legal_reasons'),
499: ('client_closed_request',),
# Server Error.
500: ('internal_server_error', 'server_error', '/o\\', '✗'),
501: ('not_implemented',),
502: ('bad_gateway',),
503: ('service_unavailable', 'unavailable'),
504: ('gateway_timeout',),
505: ('http_version_not_supported', 'http_version'),
506: ('variant_also_negotiates',),
507: ('insufficient_storage',),
509: ('bandwidth_limit_exceeded', 'bandwidth'),
510: ('not_extended',),
}
codes = LookupDict(name='status_codes')
for (code, titles) in list(_codes.items()):
for title in titles:
setattr(codes, title, code)
if not title.startswith('\\'):
setattr(codes, title.upper(), code)
| mit |
RobertABT/heightmap | build/scipy/scipy/weave/blitz_tools.py | 97 | 4159 | from __future__ import absolute_import, print_function
import parser
import sys
import warnings
import copy
import numpy
from . import ast_tools
from . import slice_handler
from . import size_check
from . import converters
from . import inline_tools
from .inline_tools import attempt_function_call
function_catalog = inline_tools.function_catalog
function_cache = inline_tools.function_cache
class BlitzWarning(UserWarning):
"""Warns about compilation failures etc."""
pass
def blitz(expr,local_dict=None, global_dict=None,check_size=1,verbose=0,**kw):
# this could call inline, but making a copy of the
# code here is more efficient for several reasons.
global function_catalog
# this grabs the local variables from the *previous* call
# frame -- that is the locals from the function that called
# inline.
call_frame = sys._getframe().f_back
if local_dict is None:
local_dict = call_frame.f_locals
if global_dict is None:
global_dict = call_frame.f_globals
# 1. Check the sizes of the arrays and make sure they are compatible.
# This is expensive, so unsetting the check_size flag can save a lot
# of time. It also can cause core-dumps if the sizes of the inputs
# aren't compatible.
if check_size and not size_check.check_expr(expr,local_dict,global_dict):
raise ValueError("inputs failed to pass size check.")
# 2. try local cache
try:
results = apply(function_cache[expr],(local_dict,global_dict))
return results
except:
pass
try:
results = attempt_function_call(expr,local_dict,global_dict)
# 3. build the function
except ValueError:
# This section is pretty much the only difference
# between blitz and inline
ast = parser.suite(expr)
ast_list = ast.tolist()
expr_code = ast_to_blitz_expr(ast_list)
arg_names = ast_tools.harvest_variables(ast_list)
module_dir = global_dict.get('__file__',None)
func = inline_tools.compile_function(expr_code,arg_names,local_dict,
global_dict,module_dir,
compiler='gcc',auto_downcast=1,
verbose=verbose,
type_converters=converters.blitz,
**kw)
function_catalog.add_function(expr,func,module_dir)
try:
results = attempt_function_call(expr,local_dict,global_dict)
except ValueError:
warnings.warn('compilation failed. Executing as python code',
BlitzWarning)
exec(expr, global_dict, local_dict)
def ast_to_blitz_expr(ast_seq):
"""Convert an ast_sequence to a blitz expression."""
# Don't overwrite orignal sequence in call to transform slices.
ast_seq = copy.deepcopy(ast_seq)
slice_handler.transform_slices(ast_seq)
# Build the actual program statement from ast_seq
expr = ast_tools.ast_to_string(ast_seq)
# Now find and replace specific symbols to convert this to
# a blitz++ compatible statement.
# I'm doing this with string replacement here. It could
# also be done on the actual ast tree (and probably should from
# a purest standpoint...).
# this one isn't necessary but it helps code readability
# and compactness. It requires that
# Range _all = blitz::Range::all();
# be included in the generated code.
# These could all alternatively be done to the ast in
# build_slice_atom()
expr = expr.replace('slice(_beg,_end)', '_all')
expr = expr.replace('slice', 'blitz::Range')
expr = expr.replace('[','(')
expr = expr.replace(']', ')')
expr = expr.replace('_stp', '1')
# Instead of blitz::fromStart and blitz::toEnd. This requires
# the following in the generated code.
# Range _beg = blitz::fromStart;
# Range _end = blitz::toEnd;
#expr = expr.replace('_beg', 'blitz::fromStart' )
#expr = expr.replace('_end', 'blitz::toEnd' )
return expr + ';\n'
| mit |
sangeethah/validation-tests | tests/validation/cattlevalidationtest/core/test_private_registry.py | 6 | 7392 | from common_fixtures import * # NOQA
quay_creds = {}
quay_creds["email"] = os.environ.get('QUAY_EMAIL')
quay_creds["username"] = os.environ.get('QUAY_USERNAME')
quay_creds["password"] = os.environ.get('QUAY_PASSWORD')
quay_creds["image"] = os.environ.get('QUAY_IMAGE')
quay_creds["serverAddress"] = "quay.io"
quay_creds["name"] = "quay"
dockerhub_creds = {}
dockerhub_creds["email"] = os.environ.get('DOCKERHUB_EMAIL')
dockerhub_creds["username"] = os.environ.get('DOCKERHUB_USERNAME')
dockerhub_creds["password"] = os.environ.get('DOCKERHUB_PASSWORD')
dockerhub_creds["image"] = os.environ.get('DOCKERHUB_IMAGE')
dockerhub_creds["serverAddress"] = "index.docker.io"
dockerhub_creds["name"] = "docker"
registry_list = {}
parallelThreads = os.environ.get("CATTLE_TEST_PARALLEL_THREADS")
multiThreaded = parallelThreads is not None and parallelThreads > 1
if_quay_creds_available = pytest.mark.skipif(
None in quay_creds.values() or "" in quay_creds.values() or multiThreaded,
reason='Not all Quay credentials are available '
'or tests are run in parallel')
if_docker_creds_available = pytest.mark.skipif(
None in dockerhub_creds.values() or
"" in dockerhub_creds.values() or
multiThreaded, reason='Not all Docker credentials are avaialable' +
'or tests are run in parallel')
print quay_creds.values()
print dockerhub_creds.values()
print None in quay_creds.values()
print "" in dockerhub_creds.values()
def create_registry(client, registry_creds):
registry = client.create_registry(
serverAddress=registry_creds["serverAddress"],
name=registry_creds["name"])
registry = client.wait_success(registry)
reg_cred = client.create_registry_credential(
registryId=registry.id,
email=registry_creds["email"],
publicValue=registry_creds["username"],
secretValue=registry_creds["password"])
reg_cred = client.wait_success(reg_cred)
return reg_cred
@pytest.fixture(scope='session')
def registries(client, admin_client, request):
if len(registry_list.keys()) > 0:
return
reg_cred = create_registry(client, quay_creds)
registry_list[quay_creds["name"]] = reg_cred
reg_cred = create_registry(client, dockerhub_creds)
registry_list[dockerhub_creds["name"]] = reg_cred
def remove_registries():
for reg_cred in registry_list.values():
reg_cred = client.wait_success(reg_cred.deactivate())
reg_cred = client.delete(reg_cred)
reg_cred = client.wait_success(reg_cred)
assert reg_cred.state == 'removed'
registry = admin_client.by_id('registry', reg_cred.registryId)
registry = client.wait_success(registry.deactivate())
assert registry.state == 'inactive'
registry = client.delete(registry)
registry = client.wait_success(registry)
assert registry.state == 'removed'
request.addfinalizer(remove_registries)
@if_quay_creds_available
def test_create_container_with_quay_registry_credential(client,
socat_containers,
registries):
image_id = quay_creds["serverAddress"]+"/" + quay_creds["image"]
cleanup_images(client, [image_id+":latest"])
image_id = "docker:"+quay_creds["serverAddress"]+"/" + quay_creds["image"]
reg_cred = registry_list[quay_creds["name"]]
container = client.create_container(name=random_str(),
imageUuid=image_id,
registryCredentialId=reg_cred.id)
container = client.wait_success(container, 180)
assert container.state == "running"
delete_all(client, [container])
@if_quay_creds_available
def test_create_services_with_quay_registry_credential(client, admin_client,
socat_containers,
registries):
image_id = quay_creds["serverAddress"]+"/" + quay_creds["image"]
cleanup_images(client, [image_id+":latest"]),
launch_config = {"imageUuid": "docker:"+image_id}
scale = 2
service, env = create_env_and_svc(client, launch_config,
scale)
# Activate Services
service = service.activate()
service = client.wait_success(service, 300)
assert service.state == "active"
check_container_in_service(admin_client, service)
delete_all(client, [env])
@if_docker_creds_available
def test_create_container_with_docker_registry_credential(client,
socat_containers,
registries):
image_id = dockerhub_creds["image"]
cleanup_images(client, [image_id+":latest"])
reg_cred = registry_list[dockerhub_creds["name"]]
container = client.create_container(name=random_str(),
imageUuid="docker:"+image_id,
registryCredentialId=reg_cred.id,
stdinOpen=True,
tty=True)
container = client.wait_success(container, 180)
assert container.state == "running"
delete_all(client, [container])
@if_docker_creds_available
def test_create_services_with_docker_registry_credential(client, admin_client,
socat_containers,
registries):
image_id = dockerhub_creds["image"]
cleanup_images(client, [image_id+":latest"])
launch_config = {"imageUuid": "docker:"+image_id,
"stdinOpen": "True",
"tty": "True"}
scale = 2
service, env = create_env_and_svc(client, launch_config,
scale)
# Activate Services
service = service.activate()
service = client.wait_success(service, 300)
assert service.state == "active"
check_container_in_service(admin_client, service)
delete_all(client, [env])
@if_quay_creds_available
def test_create_container_with_quay(client, socat_containers,
registries):
image_id = quay_creds["serverAddress"]+"/" + quay_creds["image"]
cleanup_images(client, [image_id+":latest"])
image_id = "docker:"+quay_creds["serverAddress"]+"/" + quay_creds["image"]
container = client.create_container(name=random_str(),
imageUuid=image_id)
container = client.wait_success(container, 180)
assert container.state == "running"
delete_all(client, [container])
@if_docker_creds_available
def test_create_container_with_docker(client, socat_containers,
registries):
image_id = dockerhub_creds["image"]
cleanup_images(client, [image_id+":latest"])
container = client.create_container(name=random_str(),
imageUuid="docker:"+image_id,
stdinOpen=True,
tty=True)
container = client.wait_success(container, 180)
assert container.state == "running"
delete_all(client, [container])
| apache-2.0 |
UsingtcNower/opencv-practice | misc/softmax.py | 1 | 1259 | #!/usr/bin/python
#coding: utf-8
import numpy as np
def softmax_loss_native(W,X,y,reg):
dW = np.zeros_like(W)
dW_each = np.zeros_like(W)
num_train = X.shape[0]
num_label = W.shape[1]
f = X.dot(W)
f_max = np.reshape(np.max(f, axis=1),(num_train,1))
# or f_max = np.max(f, axis=1, keepdims=True)
prob = np.exp(f-f_max)/np.sum(np.exp(f-f_max), axis=1, keepdims=True)
y_trueClass = np.zeros_like(prob)
y_trueClass[np.arrange(num_train), y] = 1.0 #?
loss = 0
for i in xrange(num_train):
for j in xrange(num_label):
loss += y_trueClass[i,j]*np.log(prob[i,j])
dW_each[:,j] = -(y_trueClass[i,j]-prob[i,j])*X[i,:]
dW += dW_each
loss /= num_train
loss += .5*reg*np.sum(W*W)
dW /= num_train
return loss,dW
def softmax_loss_vectorized(W,X,y,reg):
loss = .0
dW = np.zeros_like(W)
num_train = X.shape[0]
f = X.dot(W)
f_max = np.reshape(np.max(f, axis=1), (num_train,1 ))
prob = np.exp(f-f_max)/np.sum(np.exp(f-f_max), axis=1, keepdims = True)
y_trueClass = np.zeros_like(prob)
y_trueClass[arrange(num_train),y] = 1.0
loss = np.sum(y_trueClass*np.log(prob)/num_train+0.5*reg*np.sum(W*W))
dW = -np.dot(X.T,y_trueClass-prob)/num_train+reg*W
return loss,dW
| gpl-2.0 |
lonniev/com.predictableresponse.alfred.gotomeeting | workflow/update.py | 37 | 10611 | #!/usr/bin/env python
# encoding: utf-8
#
# Copyright © 2014 Fabio Niephaus <fabio.niephaus@gmail.com>,
# Dean Jackson <deanishe@deanishe.net>
#
# MIT Licence. See http://opensource.org/licenses/MIT
#
# Created on 2014-08-16
#
"""
Self-updating from GitHub
.. versionadded:: 1.9
.. note::
This module is not intended to be used directly. Automatic updates
are controlled by the ``update_settings`` :class:`dict` passed to
:class:`~workflow.workflow.Workflow` objects.
"""
from __future__ import print_function, unicode_literals
import os
import tempfile
import re
import subprocess
import workflow
import web
# __all__ = []
RELEASES_BASE = 'https://api.github.com/repos/{0}/releases'
_wf = None
def wf():
global _wf
if _wf is None:
_wf = workflow.Workflow()
return _wf
class Version(object):
"""Mostly semantic versioning
The main difference to proper :ref:`semantic versioning <semver>`
is that this implementation doesn't require a minor or patch version.
"""
#: Match version and pre-release/build information in version strings
match_version = re.compile(r'([0-9\.]+)(.+)?').match
def __init__(self, vstr):
self.vstr = vstr
self.major = 0
self.minor = 0
self.patch = 0
self.suffix = ''
self.build = ''
self._parse(vstr)
def _parse(self, vstr):
if vstr.startswith('v'):
m = self.match_version(vstr[1:])
else:
m = self.match_version(vstr)
if not m:
raise ValueError('Invalid version number: {0}'.format(vstr))
version, suffix = m.groups()
parts = self._parse_dotted_string(version)
self.major = parts.pop(0)
if len(parts):
self.minor = parts.pop(0)
if len(parts):
self.patch = parts.pop(0)
if not len(parts) == 0:
raise ValueError('Invalid version (too long) : {0}'.format(vstr))
if suffix:
# Build info
idx = suffix.find('+')
if idx > -1:
self.build = suffix[idx+1:]
suffix = suffix[:idx]
if suffix:
if not suffix.startswith('-'):
raise ValueError(
'Invalid suffix : `{0}`. Must start with `-`'.format(
suffix))
self.suffix = suffix[1:]
# wf().logger.debug('version str `{}` -> {}'.format(vstr, repr(self)))
def _parse_dotted_string(self, s):
"""Parse string ``s`` into list of ints and strings"""
parsed = []
parts = s.split('.')
for p in parts:
if p.isdigit():
p = int(p)
parsed.append(p)
return parsed
@property
def tuple(self):
"""Return version number as a tuple of major, minor, patch, pre-release
"""
return (self.major, self.minor, self.patch, self.suffix)
def __lt__(self, other):
if not isinstance(other, Version):
raise ValueError('Not a Version instance: {0!r}'.format(other))
t = self.tuple[:3]
o = other.tuple[:3]
if t < o:
return True
if t == o: # We need to compare suffixes
if self.suffix and not other.suffix:
return True
if other.suffix and not self.suffix:
return False
return (self._parse_dotted_string(self.suffix) <
self._parse_dotted_string(other.suffix))
# t > o
return False
def __eq__(self, other):
if not isinstance(other, Version):
raise ValueError('Not a Version instance: {0!r}'.format(other))
return self.tuple == other.tuple
def __ne__(self, other):
return not self.__eq__(other)
def __gt__(self, other):
if not isinstance(other, Version):
raise ValueError('Not a Version instance: {0!r}'.format(other))
return other.__lt__(self)
def __le__(self, other):
if not isinstance(other, Version):
raise ValueError('Not a Version instance: {0!r}'.format(other))
return not other.__lt__(self)
def __ge__(self, other):
return not self.__lt__(other)
def __str__(self):
vstr = '{0}.{1}.{2}'.format(self.major, self.minor, self.patch)
if self.suffix:
vstr += '-{0}'.format(self.suffix)
if self.build:
vstr += '+{0}'.format(self.build)
return vstr
def __repr__(self):
return "Version('{0}')".format(str(self))
def download_workflow(url):
"""Download workflow at ``url`` to a local temporary file
:param url: URL to .alfredworkflow file in GitHub repo
:returns: path to downloaded file
"""
filename = url.split("/")[-1]
if (not url.endswith('.alfredworkflow') or
not filename.endswith('.alfredworkflow')):
raise ValueError('Attachment `{}` not a workflow'.format(filename))
local_path = os.path.join(tempfile.gettempdir(), filename)
wf().logger.debug(
'Downloading updated workflow from `{0}` to `{1}` ...'.format(
url, local_path))
response = web.get(url)
with open(local_path, 'wb') as output:
output.write(response.content)
return local_path
def build_api_url(slug):
"""Generate releases URL from GitHub slug
:param slug: Repo name in form ``username/repo``
:returns: URL to the API endpoint for the repo's releases
"""
if len(slug.split('/')) != 2:
raise ValueError('Invalid GitHub slug : {0}'.format(slug))
return RELEASES_BASE.format(slug)
def get_valid_releases(github_slug):
"""Return list of all valid releases
:param github_slug: ``username/repo`` for workflow's GitHub repo
:returns: list of dicts. Each :class:`dict` has the form
``{'version': '1.1', 'download_url': 'http://github.com/...'}``
A valid release is one that contains one ``.alfredworkflow`` file.
If the GitHub version (i.e. tag) is of the form ``v1.1``, the leading
``v`` will be stripped.
"""
api_url = build_api_url(github_slug)
releases = []
wf().logger.debug('Retrieving releases list from `{0}` ...'.format(
api_url))
def retrieve_releases():
wf().logger.info(
'Retrieving releases for `{0}` ...'.format(github_slug))
return web.get(api_url).json()
slug = github_slug.replace('/', '-')
for release in wf().cached_data('gh-releases-{0}'.format(slug),
retrieve_releases):
version = release['tag_name']
download_urls = []
for asset in release.get('assets', []):
url = asset.get('browser_download_url')
if not url or not url.endswith('.alfredworkflow'):
continue
download_urls.append(url)
# Validate release
if release['prerelease']:
wf().logger.warning(
'Invalid release {0} : pre-release detected'.format(version))
continue
if not download_urls:
wf().logger.warning(
'Invalid release {0} : No workflow file'.format(version))
continue
if len(download_urls) > 1:
wf().logger.warning(
'Invalid release {0} : multiple workflow files'.format(version))
continue
wf().logger.debug('Release `{0}` : {1}'.format(version, url))
releases.append({'version': version, 'download_url': download_urls[0]})
return releases
def check_update(github_slug, current_version):
"""Check whether a newer release is available on GitHub
:param github_slug: ``username/repo`` for workflow's GitHub repo
:param current_version: the currently installed version of the
workflow. :ref:`Semantic versioning <semver>` is required.
:type current_version: ``unicode``
:returns: ``True`` if an update is available, else ``False``
If an update is available, its version number and download URL will
be cached.
"""
releases = get_valid_releases(github_slug)
wf().logger.info('{0} releases for {1}'.format(len(releases),
github_slug))
if not len(releases):
raise ValueError('No valid releases for {0}'.format(github_slug))
# GitHub returns releases newest-first
latest_release = releases[0]
# (latest_version, download_url) = get_latest_release(releases)
vr = Version(latest_release['version'])
vl = Version(current_version)
wf().logger.debug('Latest : {0!r} Installed : {1!r}'.format(vr, vl))
if vr > vl:
wf().cache_data('__workflow_update_status', {
'version': latest_release['version'],
'download_url': latest_release['download_url'],
'available': True
})
return True
wf().cache_data('__workflow_update_status', {
'available': False
})
return False
def install_update(github_slug, current_version):
"""If a newer release is available, download and install it
:param github_slug: ``username/repo`` for workflow's GitHub repo
:param current_version: the currently installed version of the
workflow. :ref:`Semantic versioning <semver>` is required.
:type current_version: ``unicode``
If an update is available, it will be downloaded and installed.
:returns: ``True`` if an update is installed, else ``False``
"""
# TODO: `github_slug` and `current_version` are both unusued.
update_data = wf().cached_data('__workflow_update_status', max_age=0)
if not update_data or not update_data.get('available'):
wf().logger.info('No update available')
return False
local_file = download_workflow(update_data['download_url'])
wf().logger.info('Installing updated workflow ...')
subprocess.call(['open', local_file])
update_data['available'] = False
wf().cache_data('__workflow_update_status', update_data)
return True
if __name__ == '__main__': # pragma: nocover
import sys
def show_help():
print('Usage : update.py (check|install) github_slug version')
sys.exit(1)
if len(sys.argv) != 4:
show_help()
action, github_slug, version = sys.argv[1:]
if action not in ('check', 'install'):
show_help()
if action == 'check':
check_update(github_slug, version)
elif action == 'install':
install_update(github_slug, version)
| cc0-1.0 |
romain-dartigues/ansible | contrib/inventory/apstra_aos.py | 27 | 20400 | #!/usr/bin/env python
#
# (c) 2017 Apstra Inc, <community@apstra.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
"""
Apstra AOS external inventory script
====================================
Ansible has a feature where instead of reading from /etc/ansible/hosts
as a text file, it can query external programs to obtain the list
of hosts, groups the hosts are in, and even variables to assign to each host.
To use this:
- copy this file over /etc/ansible/hosts and chmod +x the file.
- Copy both files (.py and .ini) in your preferred directory
More information about Ansible Dynamic Inventory here
http://unix.stackexchange.com/questions/205479/in-ansible-dynamic-inventory-json-can-i-render-hostvars-based-on-the-hostname
2 modes are currently, supported: **device based** or **blueprint based**:
- For **Device based**, the list of device is taken from the global device list
the serial ID will be used as the inventory_hostname
- For **Blueprint based**, the list of device is taken from the given blueprint
the Node name will be used as the inventory_hostname
Input parameters parameter can be provided using either with the ini file or by using Environment Variables:
The following list of Environment Variables are supported: AOS_SERVER, AOS_PORT, AOS_USERNAME, AOS_PASSWORD, AOS_BLUEPRINT
The config file takes precedence over the Environment Variables
Tested with Apstra AOS 1.1
This script has been inspired by the cobbler.py inventory. thanks
Author: Damien Garros (@dgarros)
Version: 0.2.0
"""
import json
import os
import re
import sys
try:
import argparse
HAS_ARGPARSE = True
except ImportError:
HAS_ARGPARSE = False
try:
from apstra.aosom.session import Session
HAS_AOS_PYEZ = True
except ImportError:
HAS_AOS_PYEZ = False
from ansible.module_utils.six.moves import configparser
"""
##
Expected output format in Device mode
{
"Cumulus": {
"hosts": [
"52540073956E",
"52540022211A"
],
"vars": {}
},
"EOS": {
"hosts": [
"5254001CAFD8",
"525400DDDF72"
],
"vars": {}
},
"Generic Model": {
"hosts": [
"525400E5486D"
],
"vars": {}
},
"Ubuntu GNU/Linux": {
"hosts": [
"525400E5486D"
],
"vars": {}
},
"VX": {
"hosts": [
"52540073956E",
"52540022211A"
],
"vars": {}
},
"_meta": {
"hostvars": {
"5254001CAFD8": {
"agent_start_time": "2017-02-03T00:49:16.000000Z",
"ansible_ssh_host": "172.20.52.6",
"aos_hcl_model": "Arista_vEOS",
"aos_server": "",
"aos_version": "AOS_1.1.1_OB.5",
"comm_state": "on",
"device_start_time": "2017-02-03T00:47:58.454480Z",
"domain_name": "",
"error_message": "",
"fqdn": "localhost",
"hostname": "localhost",
"hw_model": "vEOS",
"hw_version": "",
"is_acknowledged": false,
"mgmt_ifname": "Management1",
"mgmt_ipaddr": "172.20.52.6",
"mgmt_macaddr": "52:54:00:1C:AF:D8",
"os_arch": "x86_64",
"os_family": "EOS",
"os_version": "4.16.6M",
"os_version_info": {
"build": "6M",
"major": "4",
"minor": "16"
},
"serial_number": "5254001CAFD8",
"state": "OOS-QUARANTINED",
"vendor": "Arista"
},
"52540022211A": {
"agent_start_time": "2017-02-03T00:45:22.000000Z",
"ansible_ssh_host": "172.20.52.7",
"aos_hcl_model": "Cumulus_VX",
"aos_server": "172.20.52.3",
"aos_version": "AOS_1.1.1_OB.5",
"comm_state": "on",
"device_start_time": "2017-02-03T00:45:11.019189Z",
"domain_name": "",
"error_message": "",
"fqdn": "cumulus",
"hostname": "cumulus",
"hw_model": "VX",
"hw_version": "",
"is_acknowledged": false,
"mgmt_ifname": "eth0",
"mgmt_ipaddr": "172.20.52.7",
"mgmt_macaddr": "52:54:00:22:21:1a",
"os_arch": "x86_64",
"os_family": "Cumulus",
"os_version": "3.1.1",
"os_version_info": {
"build": "1",
"major": "3",
"minor": "1"
},
"serial_number": "52540022211A",
"state": "OOS-QUARANTINED",
"vendor": "Cumulus"
},
"52540073956E": {
"agent_start_time": "2017-02-03T00:45:19.000000Z",
"ansible_ssh_host": "172.20.52.8",
"aos_hcl_model": "Cumulus_VX",
"aos_server": "172.20.52.3",
"aos_version": "AOS_1.1.1_OB.5",
"comm_state": "on",
"device_start_time": "2017-02-03T00:45:11.030113Z",
"domain_name": "",
"error_message": "",
"fqdn": "cumulus",
"hostname": "cumulus",
"hw_model": "VX",
"hw_version": "",
"is_acknowledged": false,
"mgmt_ifname": "eth0",
"mgmt_ipaddr": "172.20.52.8",
"mgmt_macaddr": "52:54:00:73:95:6e",
"os_arch": "x86_64",
"os_family": "Cumulus",
"os_version": "3.1.1",
"os_version_info": {
"build": "1",
"major": "3",
"minor": "1"
},
"serial_number": "52540073956E",
"state": "OOS-QUARANTINED",
"vendor": "Cumulus"
},
"525400DDDF72": {
"agent_start_time": "2017-02-03T00:49:07.000000Z",
"ansible_ssh_host": "172.20.52.5",
"aos_hcl_model": "Arista_vEOS",
"aos_server": "",
"aos_version": "AOS_1.1.1_OB.5",
"comm_state": "on",
"device_start_time": "2017-02-03T00:47:46.929921Z",
"domain_name": "",
"error_message": "",
"fqdn": "localhost",
"hostname": "localhost",
"hw_model": "vEOS",
"hw_version": "",
"is_acknowledged": false,
"mgmt_ifname": "Management1",
"mgmt_ipaddr": "172.20.52.5",
"mgmt_macaddr": "52:54:00:DD:DF:72",
"os_arch": "x86_64",
"os_family": "EOS",
"os_version": "4.16.6M",
"os_version_info": {
"build": "6M",
"major": "4",
"minor": "16"
},
"serial_number": "525400DDDF72",
"state": "OOS-QUARANTINED",
"vendor": "Arista"
},
"525400E5486D": {
"agent_start_time": "2017-02-02T18:44:42.000000Z",
"ansible_ssh_host": "172.20.52.4",
"aos_hcl_model": "Generic_Server_1RU_1x10G",
"aos_server": "172.20.52.3",
"aos_version": "AOS_1.1.1_OB.5",
"comm_state": "on",
"device_start_time": "2017-02-02T21:11:25.188734Z",
"domain_name": "",
"error_message": "",
"fqdn": "localhost",
"hostname": "localhost",
"hw_model": "Generic Model",
"hw_version": "pc-i440fx-trusty",
"is_acknowledged": false,
"mgmt_ifname": "eth0",
"mgmt_ipaddr": "172.20.52.4",
"mgmt_macaddr": "52:54:00:e5:48:6d",
"os_arch": "x86_64",
"os_family": "Ubuntu GNU/Linux",
"os_version": "14.04 LTS",
"os_version_info": {
"build": "",
"major": "14",
"minor": "04"
},
"serial_number": "525400E5486D",
"state": "OOS-QUARANTINED",
"vendor": "Generic Manufacturer"
}
}
},
"all": {
"hosts": [
"5254001CAFD8",
"52540073956E",
"525400DDDF72",
"525400E5486D",
"52540022211A"
],
"vars": {}
},
"vEOS": {
"hosts": [
"5254001CAFD8",
"525400DDDF72"
],
"vars": {}
}
}
"""
def fail(msg):
sys.stderr.write("%s\n" % msg)
sys.exit(1)
class AosInventory(object):
def __init__(self):
""" Main execution path """
if not HAS_AOS_PYEZ:
raise Exception('aos-pyez is not installed. Please see details here: https://github.com/Apstra/aos-pyez')
if not HAS_ARGPARSE:
raise Exception('argparse is not installed. Please install the argparse library or upgrade to python-2.7')
# Initialize inventory
self.inventory = dict() # A list of groups and the hosts in that group
self.inventory['_meta'] = dict()
self.inventory['_meta']['hostvars'] = dict()
# Read settings and parse CLI arguments
self.read_settings()
self.parse_cli_args()
# ----------------------------------------------------
# Open session to AOS
# ----------------------------------------------------
aos = Session(server=self.aos_server,
port=self.aos_server_port,
user=self.aos_username,
passwd=self.aos_password)
aos.login()
# Save session information in variables of group all
self.add_var_to_group('all', 'aos_session', aos.session)
# Add the AOS server itself in the inventory
self.add_host_to_group("all", 'aos')
self.add_var_to_host("aos", "ansible_ssh_host", self.aos_server)
self.add_var_to_host("aos", "ansible_ssh_pass", self.aos_password)
self.add_var_to_host("aos", "ansible_ssh_user", self.aos_username)
# ----------------------------------------------------
# Build the inventory
# 2 modes are supported: device based or blueprint based
# - For device based, the list of device is taken from the global device list
# the serial ID will be used as the inventory_hostname
# - For Blueprint based, the list of device is taken from the given blueprint
# the Node name will be used as the inventory_hostname
# ----------------------------------------------------
if self.aos_blueprint:
bp = aos.Blueprints[self.aos_blueprint]
if bp.exists is False:
fail("Unable to find the Blueprint: %s" % self.aos_blueprint)
for dev_name, dev_id in bp.params['devices'].value.items():
self.add_host_to_group('all', dev_name)
device = aos.Devices.find(uid=dev_id)
if 'facts' in device.value.keys():
self.add_device_facts_to_var(dev_name, device)
# Define admin State and Status
if 'user_config' in device.value.keys():
if 'admin_state' in device.value['user_config'].keys():
self.add_var_to_host(dev_name, 'admin_state', device.value['user_config']['admin_state'])
self.add_device_status_to_var(dev_name, device)
# Go over the contents data structure
for node in bp.contents['system']['nodes']:
if node['display_name'] == dev_name:
self.add_host_to_group(node['role'], dev_name)
# Check for additional attribute to import
attributes_to_import = [
'loopback_ip',
'asn',
'role',
'position',
]
for attr in attributes_to_import:
if attr in node.keys():
self.add_var_to_host(dev_name, attr, node[attr])
# if blueprint_interface is enabled in the configuration
# Collect links information
if self.aos_blueprint_int:
interfaces = dict()
for link in bp.contents['system']['links']:
# each link has 2 sides [0,1], and it's unknown which one match this device
# at first we assume, first side match(0) and peer is (1)
peer_id = 1
for side in link['endpoints']:
if side['display_name'] == dev_name:
# import local information first
int_name = side['interface']
# init dict
interfaces[int_name] = dict()
if 'ip' in side.keys():
interfaces[int_name]['ip'] = side['ip']
if 'interface' in side.keys():
interfaces[int_name]['name'] = side['interface']
if 'display_name' in link['endpoints'][peer_id].keys():
interfaces[int_name]['peer'] = link['endpoints'][peer_id]['display_name']
if 'ip' in link['endpoints'][peer_id].keys():
interfaces[int_name]['peer_ip'] = link['endpoints'][peer_id]['ip']
if 'type' in link['endpoints'][peer_id].keys():
interfaces[int_name]['peer_type'] = link['endpoints'][peer_id]['type']
else:
# if we haven't match the first time, prepare the peer_id
# for the second loop iteration
peer_id = 0
self.add_var_to_host(dev_name, 'interfaces', interfaces)
else:
for device in aos.Devices:
# If not reacheable, create by key and
# If reacheable, create by hostname
self.add_host_to_group('all', device.name)
# populate information for this host
self.add_device_status_to_var(device.name, device)
if 'user_config' in device.value.keys():
for key, value in device.value['user_config'].items():
self.add_var_to_host(device.name, key, value)
# Based on device status online|offline, collect facts as well
if device.value['status']['comm_state'] == 'on':
if 'facts' in device.value.keys():
self.add_device_facts_to_var(device.name, device)
# Check if device is associated with a blueprint
# if it's create a new group
if 'blueprint_active' in device.value['status'].keys():
if 'blueprint_id' in device.value['status'].keys():
bp = aos.Blueprints.find(uid=device.value['status']['blueprint_id'])
if bp:
self.add_host_to_group(bp.name, device.name)
# ----------------------------------------------------
# Convert the inventory and return a JSON String
# ----------------------------------------------------
data_to_print = ""
data_to_print += self.json_format_dict(self.inventory, True)
print(data_to_print)
def read_settings(self):
""" Reads the settings from the apstra_aos.ini file """
config = configparser.ConfigParser()
config.read(os.path.dirname(os.path.realpath(__file__)) + '/apstra_aos.ini')
# Default Values
self.aos_blueprint = False
self.aos_blueprint_int = True
self.aos_username = 'admin'
self.aos_password = 'admin'
self.aos_server_port = 8888
# Try to reach all parameters from File, if not available try from ENV
try:
self.aos_server = config.get('aos', 'aos_server')
except:
if 'AOS_SERVER' in os.environ.keys():
self.aos_server = os.environ['AOS_SERVER']
try:
self.aos_server_port = config.get('aos', 'port')
except:
if 'AOS_PORT' in os.environ.keys():
self.aos_server_port = os.environ['AOS_PORT']
try:
self.aos_username = config.get('aos', 'username')
except:
if 'AOS_USERNAME' in os.environ.keys():
self.aos_username = os.environ['AOS_USERNAME']
try:
self.aos_password = config.get('aos', 'password')
except:
if 'AOS_PASSWORD' in os.environ.keys():
self.aos_password = os.environ['AOS_PASSWORD']
try:
self.aos_blueprint = config.get('aos', 'blueprint')
except:
if 'AOS_BLUEPRINT' in os.environ.keys():
self.aos_blueprint = os.environ['AOS_BLUEPRINT']
try:
if config.get('aos', 'blueprint_interface') in ['false', 'no']:
self.aos_blueprint_int = False
except:
pass
def parse_cli_args(self):
""" Command line argument processing """
parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on Apstra AOS')
parser.add_argument('--list', action='store_true', default=True, help='List instances (default: True)')
parser.add_argument('--host', action='store', help='Get all the variables about a specific instance')
self.args = parser.parse_args()
def json_format_dict(self, data, pretty=False):
""" Converts a dict to a JSON object and dumps it as a formatted string """
if pretty:
return json.dumps(data, sort_keys=True, indent=2)
else:
return json.dumps(data)
def add_host_to_group(self, group, host):
# Cleanup group name first
clean_group = self.cleanup_group_name(group)
# Check if the group exist, if not initialize it
if clean_group not in self.inventory.keys():
self.inventory[clean_group] = {}
self.inventory[clean_group]['hosts'] = []
self.inventory[clean_group]['vars'] = {}
self.inventory[clean_group]['hosts'].append(host)
def add_var_to_host(self, host, var, value):
# Check if the host exist, if not initialize it
if host not in self.inventory['_meta']['hostvars'].keys():
self.inventory['_meta']['hostvars'][host] = {}
self.inventory['_meta']['hostvars'][host][var] = value
def add_var_to_group(self, group, var, value):
# Cleanup group name first
clean_group = self.cleanup_group_name(group)
# Check if the group exist, if not initialize it
if clean_group not in self.inventory.keys():
self.inventory[clean_group] = {}
self.inventory[clean_group]['hosts'] = []
self.inventory[clean_group]['vars'] = {}
self.inventory[clean_group]['vars'][var] = value
def add_device_facts_to_var(self, device_name, device):
# Populate variables for this host
self.add_var_to_host(device_name,
'ansible_ssh_host',
device.value['facts']['mgmt_ipaddr'])
self.add_var_to_host(device_name, 'id', device.id)
# self.add_host_to_group('all', device.name)
for key, value in device.value['facts'].items():
self.add_var_to_host(device_name, key, value)
if key == 'os_family':
self.add_host_to_group(value, device_name)
elif key == 'hw_model':
self.add_host_to_group(value, device_name)
def cleanup_group_name(self, group_name):
"""
Clean up group name by :
- Replacing all non-alphanumeric caracter by underscore
- Converting to lowercase
"""
rx = re.compile(r'\W+')
clean_group = rx.sub('_', group_name).lower()
return clean_group
def add_device_status_to_var(self, device_name, device):
if 'status' in device.value.keys():
for key, value in device.value['status'].items():
self.add_var_to_host(device.name, key, value)
# Run the script
if __name__ == '__main__':
AosInventory()
| gpl-3.0 |
sonovice/sonycam | docs/conf.py | 1 | 9571 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Sony Camera Remote API for Python documentation build configuration file, created by
# sphinx-quickstart on Sat Dec 26 18:17:38 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.coverage',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Sony Camera Remote API for Python'
copyright = '2015, Simon Waloschek'
author = 'Simon Waloschek'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'alabaster'
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'SonyCameraRemoteAPIforPythondoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'SonyCameraRemoteAPIforPython.tex', 'Sony Camera Remote API for Python Documentation',
'Simon Waloschek', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'sonycameraremoteapiforpython', 'Sony Camera Remote API for Python Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'SonyCameraRemoteAPIforPython', 'Sony Camera Remote API for Python Documentation',
author, 'SonyCameraRemoteAPIforPython', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| gpl-3.0 |
opensvn/RGP_PyQt | chap03/ordereddict.py | 1 | 2958 | #!/usr/bin/env python
import bisect
class OrderedDict(object):
def __init__(self, dictionary=None):
self.__keys = []
self.__dict = {}
if dictionary is not None:
if isinstance(dictionary, OrderedDict):
self.__dict = dictionary.__dict.copy()
self.__keys = dictionary.__keys[:]
else:
self.__dict = dict(dictionary).copy()
self.__keys = sorted(self.__dict.keys())
def getAt(self, index):
return self.__dict[self.__keys[index]]
def setAt(self, index, value):
self.__dict[self.__keys[index]] = value
def __getitem__(self, key):
return self.__dict[key]
def __setitem__(self, key, value):
if key not in self.__dict:
bisect.insort_left(self.__keys, key)
self.__dict[key] = value
def __delitem__(self, key):
i = bisect.bisect_left(self.__keys, key)
del self.__keys[i]
del self.__dict[key]
def get(self, key, value=None):
return self.__dict.get(key, value)
def setdefault(self, key, value):
if key not in self.__dict:
bisect.insort_left(self.__keys, key)
return self.__dict.setdefault(key, value)
def pop(self, key, value=None):
if key not in self.__dict:
return value
i = bisect.bisect_left(self.__keys, key)
del self.__keys[i]
return self.__dict.pop(key, value)
def popitem(self):
item = self.__dict.popitem()
i = bisect.bisect_left(self.__keys, item[0])
del self.__keys[i]
return item
def has_key(self, key):
return key in self.__dict
def __contains__(self, key):
return key in self.__dict
def __len__(self):
return len(self.__dict)
def keys(self):
return self.__keys[:]
def values(self):
return [self.__dict[key] for key in self.__keys]
def items(self):
return [(key, self.__keys[key]) for key in self.__keys]
def __iter__(self):
return iter(self.__keys)
def iterkeys(self):
return iter(self.__keys)
def itervalues(self):
for key in self.__keys:
yield self.__dict[key]
def iteritems(self):
for key in self.__keys:
yield key, self.__dict[key]
def copy(self):
dictionary = OrderedDict()
dictionary.__keys = self.__keys[:]
dictionary.__dict = self.__dict.copy()
return dictionary
def clear(self):
self.__keys = []
self.__dict = {}
def __repr__(self):
pieces = []
for key in self.__keys:
pieces.append("%r: %r" % (key, self.__dict[key]))
return "OrderedDict({%s})" % ", ".join(pieces)
def main():
d = OrderedDict(dict(s=1, a=2, n=3, i=4, t=5))
print repr(d)
d2 = OrderedDict({2:'a', 3:'m', 1:'x'})
print `d2`
if __name__ == '__main__':
main()
| gpl-2.0 |
darkopevec/kivy | kivy/factory_registers.py | 4 | 9766 | # Auto-generated file by setup.py build_factory
from kivy.factory import Factory
r = Factory.register
r('Adapter', module='kivy.adapters.adapter')
r('ListAdapter', module='kivy.adapters.listadapter')
r('SimpleListAdapter', module='kivy.adapters.simplelistadapter')
r('DictAdapter', module='kivy.adapters.dictadapter')
r('SelectableDataItem', module='kivy.adapters.models')
r('Animation', module='kivy.animation')
r('AnimationTransition', module='kivy.animation')
r('ExceptionHandler', module='kivy.base')
r('Cache', module='kivy.cache')
r('ClockBase', module='kivy.clock')
r('ColorPicker', module='kivy.uix.colorpicker')
r('ColorWheel', module='kivy.uix.colorpicker')
r('ConfigParser', module='kivy.config')
r('EventDispatcher', module='kivy.event')
r('Observable', module='kivy.event')
r('FactoryException', module='kivy.factory')
r('Gesture', module='kivy.gesture')
r('GestureDatabase', module='kivy.gesture')
r('GesturePoint', module='kivy.gesture')
r('GestureStroke', module='kivy.gesture')
r('Parser', module='kivy.lang.parser')
r('LoaderBase', module='kivy.loader')
r('ProxyImage', module='kivy.loader')
r('LoggerHistory', module='kivy.logger')
r('NumericProperty', module='kivy.properties')
r('StringProperty', module='kivy.properties')
r('ListProperty', module='kivy.properties')
r('ObjectProperty', module='kivy.properties')
r('BooleanProperty', module='kivy.properties')
r('BoundedNumericProperty', module='kivy.properties')
r('OptionProperty', module='kivy.properties')
r('ReferenceListProperty', module='kivy.properties')
r('AliasProperty', module='kivy.properties')
r('NumericProperty', module='kivy.properties')
r('DictProperty', module='kivy.properties')
r('VariableListProperty', module='kivy.properties')
r('ConfigParserProperty', module='kivy.properties')
r('Property', module='kivy.properties')
r('SafeList', module='kivy.utils')
r('Vector', module='kivy.vector')
r('Color', module='kivy.graphics.context_instructions')
r('BindTexture', module='kivy.graphics.context_instructions')
r('PushMatrix', module='kivy.graphics.context_instructions')
r('PopMatrix', module='kivy.graphics.context_instructions')
r('Rotate', module='kivy.graphics.context_instructions')
r('Scale', module='kivy.graphics.context_instructions')
r('Translate', module='kivy.graphics.context_instructions')
r('MatrixInstruction', module='kivy.graphics.context_instructions')
r('Fbo', module='kivy.graphics.fbo')
r('Instruction', module='kivy.graphics.instructions')
r('InstructionGroup', module='kivy.graphics.instructions')
r('ContextInstruction', module='kivy.graphics.instructions')
r('VertexInstruction', module='kivy.graphics.instructions')
r('Canvas', module='kivy.graphics.instructions')
r('CanvasBase', module='kivy.graphics.instructions')
r('RenderContext', module='kivy.graphics.instructions')
r('Shader', module='kivy.graphics.shader')
r('Texture', module='kivy.graphics.texture')
r('TextureRegion', module='kivy.graphics.texture')
r('Matrix', module='kivy.graphics.transformation')
r('VBO', module='kivy.graphics.vbo')
r('VertexBatch', module='kivy.graphics.vbo')
r('StencilPush', module='kivy.graphics.stencil_instructions')
r('StencilPop', module='kivy.graphics.stencil_instructions')
r('StencilUse', module='kivy.graphics.stencil_instructions')
r('StencilUnUse', module='kivy.graphics.stencil_instructions')
r('ScissorPush', module='kivy.graphics.scissor_instructions')
r('ScissorPop', module='kivy.graphics.scissor_instructions')
r('Triangle', module='kivy.graphics.vertex_instructions')
r('Quad', module='kivy.graphics.vertex_instructions')
r('Rectangle', module='kivy.graphics.vertex_instructions')
r('RoundedRectangle', module='kivy.graphics.vertex_instructions')
r('BorderImage', module='kivy.graphics.vertex_instructions')
r('Ellipse', module='kivy.graphics.vertex_instructions')
r('Line', module='kivy.graphics.vertex_instructions')
r('SmoothLine', module='kivy.graphics.vertex_instructions')
r('Point', module='kivy.graphics.vertex_instructions')
r('Bezier', module='kivy.graphics.vertex_instructions')
r('Mesh', module='kivy.graphics.vertex_instructions')
r('Svg', module='kivy.graphics.svg')
r('MotionEventFactory', module='kivy.input.factory')
r('MotionEventProvider', module='kivy.input.provider')
r('Shape', module='kivy.input.shape')
r('ShapeRect', module='kivy.input.shape')
r('ActionBar', module='kivy.uix.actionbar')
r('ActionItem', module='kivy.uix.actionbar')
r('ActionButton', module='kivy.uix.actionbar')
r('ActionToggleButton', module='kivy.uix.actionbar')
r('ActionCheck', module='kivy.uix.actionbar')
r('ActionSeparator', module='kivy.uix.actionbar')
r('ActionDropDown', module='kivy.uix.actionbar')
r('ActionGroup', module='kivy.uix.actionbar')
r('ActionOverflow', module='kivy.uix.actionbar')
r('ActionView', module='kivy.uix.actionbar')
r('ContextualActionView', module='kivy.uix.actionbar')
r('AnchorLayout', module='kivy.uix.anchorlayout')
r('BoxLayout', module='kivy.uix.boxlayout')
r('GridLayout', module='kivy.uix.gridlayout')
r('PageLayout', module='kivy.uix.pagelayout')
r('Accordion', module='kivy.uix.accordion')
r('AccordionItem', module='kivy.uix.accordion')
r('Button', module='kivy.uix.button')
r('ButtonBehavior', module='kivy.uix.behaviors.button')
r('ToggleButtonBehavior', module='kivy.uix.behaviors.togglebutton')
r('DragBehavior', module='kivy.uix.behaviors.drag')
r('FocusBehavior', module='kivy.uix.behaviors.focus')
r('CompoundSelectionBehavior', module='kivy.uix.behaviors.compoundselection')
r('KNSpaceBehavior', module='kivy.uix.behaviors.knspace')
r('CodeNavigationBehavior', module='kivy.uix.behaviors.codenavigation')
r('EmacsBehavior', module='kivy.uix.behaviors.emacs')
r('Bubble', module='kivy.uix.bubble')
r('BubbleButton', module='kivy.uix.bubble')
r('Camera', module='kivy.uix.camera')
r('Carousel', module='kivy.uix.carousel')
r('CodeInput', module='kivy.uix.codeinput')
r('CheckBox', module='kivy.uix.checkbox')
r('DropDown', module='kivy.uix.dropdown')
r('EffectWidget', module='kivy.uix.effectwidget')
r('FloatLayout', module='kivy.uix.floatlayout')
r('RelativeLayout', module='kivy.uix.relativelayout')
r('ScatterLayout', module='kivy.uix.scatterlayout')
r('ScatterPlaneLayout', module='kivy.uix.scatterlayout')
r('FileChooserListView', module='kivy.uix.filechooser')
r('FileChooserIconView', module='kivy.uix.filechooser')
r('FileChooser', module='kivy.uix.filechooser')
r('Image', module='kivy.uix.image')
r('AsyncImage', module='kivy.uix.image')
r('Label', module='kivy.uix.label')
r('Layout', module='kivy.uix.layout')
r('AbstractView', module='kivy.uix.abstractview')
r('CompositeListItem', module='kivy.uix.listview')
r('ListItemButton', module='kivy.uix.listview')
r('ListItemLabel', module='kivy.uix.listview')
r('ListView', module='kivy.uix.listview')
r('SelectableView', module='kivy.uix.selectableview')
r('ModalView', module='kivy.uix.modalview')
r('ProgressBar', module='kivy.uix.progressbar')
r('Popup', module='kivy.uix.popup')
r('Scatter', module='kivy.uix.scatter')
r('ScatterPlane', module='kivy.uix.scatter')
r('ScrollView', module='kivy.uix.scrollview')
r('Settings', module='kivy.uix.settings')
r('Slider', module='kivy.uix.slider')
r('Screen', module='kivy.uix.screenmanager')
r('ScreenManager', module='kivy.uix.screenmanager')
r('Spinner', module='kivy.uix.spinner')
r('Splitter', module='kivy.uix.splitter')
r('StackLayout', module='kivy.uix.stacklayout')
r('StencilView', module='kivy.uix.stencilview')
r('Switch', module='kivy.uix.switch')
r('TabbedPanel', module='kivy.uix.tabbedpanel')
r('TabbedPanelHeader', module='kivy.uix.tabbedpanel')
r('TextInput', module='kivy.uix.textinput')
r('ToggleButton', module='kivy.uix.togglebutton')
r('TreeView', module='kivy.uix.treeview')
r('TreeViewLabel', module='kivy.uix.treeview')
r('TreeViewNode', module='kivy.uix.treeview')
r('ShaderTransition', module='kivy.uix.screenmanager')
r('SlideTransition', module='kivy.uix.screenmanager')
r('SwapTransition', module='kivy.uix.screenmanager')
r('WipeTransition', module='kivy.uix.screenmanager')
r('FadeTransition', module='kivy.uix.screenmanager')
r('Sandbox', module='kivy.uix.sandbox')
r('Video', module='kivy.uix.video')
r('VideoPlayer', module='kivy.uix.videoplayer')
r('VideoPlayerVolume', module='kivy.uix.videoplayer')
r('VideoPlayerStop', module='kivy.uix.videoplayer')
r('VideoPlayerPlayPause', module='kivy.uix.videoplayer')
r('VideoPlayerProgressBar', module='kivy.uix.videoplayer')
r('VKeyboard', module='kivy.uix.vkeyboard')
r('Widget', module='kivy.uix.widget')
r('WidgetException', module='kivy.uix.widget')
r('RstDocument', module='kivy.uix.rst')
r('KineticEffect', module='kivy.effects.kinetic')
r('ScrollEffect', module='kivy.effects.scroll')
r('DampedScrollEffect', module='kivy.effects.dampedscroll')
r('OpacityScrollEffect', module='kivy.effects.opacityscroll')
r('Recognizer', module='kivy.multistroke')
r('MultistrokeGesture', module='kivy.multistroke')
r('UnistrokeTemplate', module='kivy.multistroke')
r('ProgressTracker', module='kivy.multistroke')
r('GestureSurface', module='kivy.uix.gesturesurface')
r('GestureContainer', module='kivy.uix.gesturesurface')
r('RecycleViewBehavior', module='kivy.uix.recycleview.__init__')
r('RecycleView', module='kivy.uix.recycleview.__init__')
r('LayoutSelectionBehavior', module='kivy.uix.recycleview.layout')
r('RecycleLayoutManagerBehavior', module='kivy.uix.recycleview.layout')
r('RecycleDataViewBehavior', module='kivy.uix.recycleview.views')
r('RecycleDataAdapter', module='kivy.uix.recycleview.views')
r('RecycleDataModelBehavior', module='kivy.uix.recycleview.datamodel')
r('RecycleDataModel', module='kivy.uix.recycleview.datamodel')
r('RecycleLayout', module='kivy.uix.recyclelayout')
r('RecycleGridLayout', module='kivy.uix.recyclegridlayout')
r('RecycleBoxLayout', module='kivy.uix.recycleboxlayout')
| mit |
JuPeg/tools-artbio | unstable/development/msp_hmmer/hmmer.py | 4 | 1535 | """
Hmmer classes
"""
import data
import logging
import re
import string
from cgi import escape
from galaxy.datatypes.metadata import MetadataElement
from galaxy.datatypes import metadata
import galaxy.model
from galaxy import util
from sniff import *
log = logging.getLogger(__name__)
class Hmm( data.Text ):
"""Class for hmmer database files"""
file_ext = 'hmm'
def init_meta( self, dataset, copy_from=None ):
data.Text.init_meta( self, dataset, copy_from=copy_from )
class HmmPressed( Hmm ):
"""Class describing a hmmer database produced by hmmpress"""
file_ext = 'hmmPressed'
composite_type='basic'
MetadataElement( readonly=True, optional=True, visible=False, no_value=0 )
def __init__(self,**kwd):
data.Data.__init__(self, **kwd)
self.add_composite_file('hmm.h3m')
self.add_composite_file('hmm.h3i')
self.add_composite_file('hmm.h3f')
self.add_composite_file('hmm.h3p')
def set_peek( self, dataset, is_multi_byte=False ):
if not dataset.dataset.purged:
dataset.peek = "Folder of multiple files"
dataset.blurb = "Folder of multiple files"
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def display_peek( self, dataset ):
try:
return dataset.peek
except:
return "Folder of multiple files"
def get_mime(self):
"""Returns the mime type of the datatype"""
return 'text/plain'
| mit |
openiitbombayx/edx-platform | lms/djangoapps/notes/views.py | 142 | 1741 | """
Views to support the edX Notes feature.
"""
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.http import Http404
from edxmako.shortcuts import render_to_response
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from courseware.courses import get_course_with_access
from courseware.tabs import EnrolledTab
from notes.models import Note
from notes.utils import notes_enabled_for_course
from xmodule.annotator_token import retrieve_token
from django.utils.translation import ugettext_noop
@login_required
def notes(request, course_id):
''' Displays the student's notes. '''
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
course = get_course_with_access(request.user, 'load', course_key)
if not notes_enabled_for_course(course):
raise Http404
notes = Note.objects.filter(course_id=course_key, user=request.user).order_by('-created', 'uri')
student = request.user
storage = course.annotation_storage_url
context = {
'course': course,
'notes': notes,
'student': student,
'storage': storage,
'token': retrieve_token(student.email, course.annotation_token_secret),
'default_tab': 'myNotes',
}
return render_to_response('notes.html', context)
class NotesTab(EnrolledTab):
"""
A tab for the course notes.
"""
type = 'notes'
title = ugettext_noop("My Notes")
view_name = "notes"
@classmethod
def is_enabled(cls, course, user=None):
if not super(NotesTab, cls).is_enabled(course, user):
return False
return settings.FEATURES.get('ENABLE_STUDENT_NOTES') and "notes" in course.advanced_modules
| agpl-3.0 |
cjcjameson/gpdb | gpMgmt/bin/gppylib/test/regress/test_regress_gpssh.py | 18 | 1446 | #!/usr/bin/env python
import os, signal, time, re
import unittest
import psutil
from subprocess import PIPE
class GpsshTestCase(unittest.TestCase):
# return count of stranded ssh processes
def searchForProcessOrChildren(self):
euid = os.getuid()
count = 0
for p in psutil.process_iter():
if p.uids().effective != euid:
continue
if not re.search('ssh', ' '.join(p.cmdline())):
continue
if p.ppid() != 1:
continue
count += 1
return count
def test00_gpssh_sighup(self):
"""Verify that gppsh handles sighup
and terminates cleanly.
"""
before_count = self.searchForProcessOrChildren()
p = psutil.Popen("gpssh -h localhost", shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE)
pid = p.pid
time.sleep(3)
try:
os.kill(int(pid), signal.SIGHUP)
except Exception:
pass
max_attempts = 6
for i in range(max_attempts):
after_count = self.searchForProcessOrChildren()
error_count = after_count - before_count
if error_count:
if (i + 1) == max_attempts:
self.fail("Found %d new stranded gpssh processes after issuing sig HUP" % error_count)
time.sleep(.5)
if __name__ == "__main__":
unittest.main()
| apache-2.0 |
vponomaryov/rally | rally/plugins/openstack/scenarios/ceilometer/utils.py | 1 | 9012 | # All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally.benchmark.scenarios import base
from rally.benchmark import utils as bench_utils
class CeilometerScenario(base.Scenario):
"""Base class for Ceilometer scenarios with basic atomic actions."""
RESOURCE_NAME_PREFIX = "rally_ceilometer_"
def _get_alarm_dict(self, **kwargs):
"""Prepare and return an alarm dict for creating an alarm.
:param kwargs: optional parameters to create alarm
:returns: alarm dictionary used to create an alarm
"""
alarm_id = self._generate_random_name()
alarm = {"alarm_id": alarm_id,
"name": alarm_id,
"description": "Test Alarm"}
alarm.update(kwargs)
return alarm
@base.atomic_action_timer("ceilometer.list_alarms")
def _list_alarms(self, alarm_id=None):
"""List alarms.
List alarm matching alarm_id. It fetches all alarms
if alarm_id is None.
:param alarm_id: specifies id of the alarm
:returns: list of alarms
"""
if alarm_id:
return self.clients("ceilometer").alarms.get(alarm_id)
else:
return self.clients("ceilometer").alarms.list()
@base.atomic_action_timer("ceilometer.create_alarm")
def _create_alarm(self, meter_name, threshold, kwargs):
"""Create an alarm.
:param meter_name: specifies meter name of the alarm
:param threshold: specifies alarm threshold
:param kwargs: contains optional features of alarm to be created
:returns: alarm
"""
kwargs.update({"meter_name": meter_name,
"threshold": threshold})
alarm_dict = self._get_alarm_dict(**kwargs)
alarm = self.clients("ceilometer").alarms.create(**alarm_dict)
return alarm
@base.atomic_action_timer("ceilometer.delete_alarm")
def _delete_alarm(self, alarm_id):
"""Delete an alarm.
:param alarm_id: specifies id of the alarm
"""
self.clients("ceilometer").alarms.delete(alarm_id)
@base.atomic_action_timer("ceilometer.update_alarm")
def _update_alarm(self, alarm_id, alarm_dict_delta):
"""Update an alarm.
:param alarm_id: specifies id of the alarm
:param alarm_dict_delta: features of alarm to be updated
"""
self.clients("ceilometer").alarms.update(alarm_id, **alarm_dict_delta)
@base.atomic_action_timer("ceilometer.get_alarm_history")
def _get_alarm_history(self, alarm_id):
"""Assemble the alarm history requested.
:param alarm_id: specifies id of the alarm
:returns: list of alarm changes
"""
return self.clients("ceilometer").alarms.get_history(alarm_id)
@base.atomic_action_timer("ceilometer.get_alarm_state")
def _get_alarm_state(self, alarm_id):
"""Get the state of the alarm.
:param alarm_id: specifies id of the alarm
:returns: state of the alarm
"""
return self.clients("ceilometer").alarms.get_state(alarm_id)
@base.atomic_action_timer("ceilometer.set_alarm_state")
def _set_alarm_state(self, alarm, state, timeout):
"""Set the state of the alarm.
:param alarm: alarm instance
:param state: an alarm state to be set
:param timeout: The number of seconds for which to attempt a
successful check of the alarm state.
:returns: alarm in the set state
"""
self.clients("ceilometer").alarms.set_state(alarm.alarm_id, state)
return bench_utils.wait_for(alarm,
is_ready=bench_utils.resource_is(state),
update_resource=bench_utils
.get_from_manager(),
timeout=timeout, check_interval=1)
@base.atomic_action_timer("ceilometer.get_meters")
def _list_meters(self):
"""Get list of user's meters."""
return self.clients("ceilometer").meters.list()
@base.atomic_action_timer("ceilometer.list_resources")
def _list_resources(self):
"""List all resources.
:returns: list of all resources
"""
return self.clients("ceilometer").resources.list()
@base.atomic_action_timer("ceilometer.list_samples")
def _list_samples(self):
"""List all Samples.
:returns: list of all samples
"""
return self.clients("ceilometer").samples.list()
@base.atomic_action_timer("ceilometer.get_resource")
def _get_resource(self, resource_id):
"""Retrieve details about one resource."""
return self.clients("ceilometer").resources.get(resource_id)
@base.atomic_action_timer("ceilometer.get_stats")
def _get_stats(self, meter_name):
"""Get stats for a specific meter.
:param meter_name: Name of ceilometer meter
"""
return self.clients("ceilometer").statistics.list(meter_name)
@base.atomic_action_timer("ceilometer.create_meter")
def _create_meter(self, **kwargs):
"""Create a new meter.
:param name_length: Length of meter name to be generated
:param kwargs: Contains the optional attributes for meter creation
:returns: Newly created meter
"""
name = self._generate_random_name()
samples = self.clients("ceilometer").samples.create(
counter_name=name, **kwargs)
return samples[0]
@base.atomic_action_timer("ceilometer.query_alarms")
def _query_alarms(self, filter, orderby, limit):
"""Query alarms with specific parameters.
If no input params are provided, it returns all the results
in the database.
:param limit: optional param for maximum number of results returned
:param orderby: optional param for specifying ordering of results
:param filter: optional filter query
:returns: queried alarms
"""
return self.clients("ceilometer").query_alarms.query(
filter, orderby, limit)
@base.atomic_action_timer("ceilometer.query_alarm_history")
def _query_alarm_history(self, filter, orderby, limit):
"""Query history of an alarm.
If no input params are provided, it returns all the results
in the database.
:param limit: optional param for maximum number of results returned
:param orderby: optional param for specifying ordering of results
:param filter: optional filter query
:returns: alarm history
"""
return self.clients("ceilometer").query_alarm_history.query(
filter, orderby, limit)
@base.atomic_action_timer("ceilometer.create_sample")
def _create_sample(self, counter_name, counter_type, counter_unit,
counter_volume, resource_id=None, **kwargs):
"""Create a Sample with specified parameters.
:param counter_name: specifies name of the counter
:param counter_type: specifies type of the counter
:param counter_unit: specifies name of the counter
:param counter_volume: specifies name of the counter
:param resource_id: specifies resource id for the sample created
:param kwargs: contains optional parameters for creating a sample
:returns: created sample
"""
kwargs.update({"counter_name": counter_name,
"counter_type": counter_type,
"counter_unit": counter_unit,
"counter_volume": counter_volume,
"resource_id": resource_id if resource_id
else self._generate_random_name(
prefix="rally_ctx_resource_")})
return self.clients("ceilometer").samples.create(**kwargs)
@base.atomic_action_timer("ceilometer.query_samples")
def _query_samples(self, filter, orderby, limit):
"""Query samples with specified parameters.
If no input params are provided, it returns all the results
in the database.
:param limit: optional param for maximum number of results returned
:param orderby: optional param for specifying ordering of results
:param filter: optional filter query
:returns: queried samples
"""
return self.clients("ceilometer").query_samples.query(
filter, orderby, limit)
| apache-2.0 |
tinchoss/Python_Android | python/src/Lib/UserList.py | 327 | 3644 | """A more or less complete user-defined wrapper around list objects."""
import collections
class UserList(collections.MutableSequence):
def __init__(self, initlist=None):
self.data = []
if initlist is not None:
# XXX should this accept an arbitrary sequence?
if type(initlist) == type(self.data):
self.data[:] = initlist
elif isinstance(initlist, UserList):
self.data[:] = initlist.data[:]
else:
self.data = list(initlist)
def __repr__(self): return repr(self.data)
def __lt__(self, other): return self.data < self.__cast(other)
def __le__(self, other): return self.data <= self.__cast(other)
def __eq__(self, other): return self.data == self.__cast(other)
def __ne__(self, other): return self.data != self.__cast(other)
def __gt__(self, other): return self.data > self.__cast(other)
def __ge__(self, other): return self.data >= self.__cast(other)
def __cast(self, other):
if isinstance(other, UserList): return other.data
else: return other
def __cmp__(self, other):
return cmp(self.data, self.__cast(other))
__hash__ = None # Mutable sequence, so not hashable
def __contains__(self, item): return item in self.data
def __len__(self): return len(self.data)
def __getitem__(self, i): return self.data[i]
def __setitem__(self, i, item): self.data[i] = item
def __delitem__(self, i): del self.data[i]
def __getslice__(self, i, j):
i = max(i, 0); j = max(j, 0)
return self.__class__(self.data[i:j])
def __setslice__(self, i, j, other):
i = max(i, 0); j = max(j, 0)
if isinstance(other, UserList):
self.data[i:j] = other.data
elif isinstance(other, type(self.data)):
self.data[i:j] = other
else:
self.data[i:j] = list(other)
def __delslice__(self, i, j):
i = max(i, 0); j = max(j, 0)
del self.data[i:j]
def __add__(self, other):
if isinstance(other, UserList):
return self.__class__(self.data + other.data)
elif isinstance(other, type(self.data)):
return self.__class__(self.data + other)
else:
return self.__class__(self.data + list(other))
def __radd__(self, other):
if isinstance(other, UserList):
return self.__class__(other.data + self.data)
elif isinstance(other, type(self.data)):
return self.__class__(other + self.data)
else:
return self.__class__(list(other) + self.data)
def __iadd__(self, other):
if isinstance(other, UserList):
self.data += other.data
elif isinstance(other, type(self.data)):
self.data += other
else:
self.data += list(other)
return self
def __mul__(self, n):
return self.__class__(self.data*n)
__rmul__ = __mul__
def __imul__(self, n):
self.data *= n
return self
def append(self, item): self.data.append(item)
def insert(self, i, item): self.data.insert(i, item)
def pop(self, i=-1): return self.data.pop(i)
def remove(self, item): self.data.remove(item)
def count(self, item): return self.data.count(item)
def index(self, item, *args): return self.data.index(item, *args)
def reverse(self): self.data.reverse()
def sort(self, *args, **kwds): self.data.sort(*args, **kwds)
def extend(self, other):
if isinstance(other, UserList):
self.data.extend(other.data)
else:
self.data.extend(other)
| apache-2.0 |
karimbahgat/PythonGis | (sandbox,tobemerged)/pythongis/raster/manager.py | 1 | 9512 | import PIL, PIL.Image
import pyagg
def resample(raster, width=None, height=None, cellwidth=None, cellheight=None):
raster = raster.copy()
del raster._cached_mask # so that mask can be recreated for changed raster
print "copied",raster.bbox
if width and height:
# calculate new cell dimensions based on the new raster size
widthfactor = raster.width / float(width)
heightfactor = raster.height / float(height)
oldcellwidth, oldcellheight = raster.info["cellwidth"], raster.info["cellheight"]
newcellwidth, newcellheight = oldcellwidth * widthfactor, oldcellheight * heightfactor
# resample each grid
for grid in raster:
grid.img = grid.img.resize((width, height), PIL.Image.NEAREST)
# update cells access
grid.cells = grid.img.load()
# remember new celldimensions
raster.info["cellwidth"] = newcellwidth
raster.info["cellheight"] = newcellheight # reverse bc opposite image and geo yaxis
raster.update_geotransform()
return raster
elif cellwidth and cellheight:
# calculate new raster size based on the new cell dimensions
widthfactor = raster.info["cellwidth"] / float(cellwidth)
heightfactor = raster.info["cellheight"] / float(cellheight)
oldwidth, oldheight = raster.width, raster.height
newwidth, newheight = int(round(oldwidth * widthfactor)), int(round(oldheight * heightfactor))
print raster.info
if newwidth < 0: newwidth *= -1
if newheight < 0: newheight *= -1
# resample each grid
print newwidth,newheight
for grid in raster:
grid.img = grid.img.resize((newwidth, newheight), PIL.Image.NEAREST)
print grid.img
# update cells access
grid.cells = grid.img.load()
# remember new celldimensions
raster.info["cellwidth"] = cellwidth
raster.info["cellheight"] = cellheight # reverse bc opposite image and geo yaxis
raster.update_geotransform()
print "blii",raster.info,raster.width,raster.height,raster.bbox
return raster
else:
raise Exception("To rescale raster, either width and height or cellwidth and cellheight must be specified.")
def combine_bands():
pass
def split_bands():
pass
def align_rasters(*rasters):
for rast in rasters: print rast.bbox
# resample to same dimensions of first raster (arbitrary)
#rasters = [resample(rast, width=rasters[0].width, height=rasters[0].height)
# for rast in rasters]
# get coord bbox containing all rasters
print rasters
for rast in rasters: print rast.bbox
xlefts,ytops,xrights,ybottoms = zip(*[rast.bbox for rast in rasters])
if xlefts[0] < xrights[0]:
xleft,xright = min(xlefts),max(xrights)
else: xleft,xright = max(xlefts),min(xrights)
if ytops[0] > ybottoms[0]:
ytop,ybottom = max(ytops),min(ybottoms)
else: ytop,ybottom = min(ytops),max(ybottoms)
# get the required pixel dimensions (based on first raster, arbitrary)
xs,ys = (xleft,xright),(ytop,ybottom)
coordwidth,coordheight = max(xs)-min(xs), max(ys)-min(ys)
rast = rasters[0]
orig_xs,orig_ys = (rast.bbox[0],rast.bbox[2]),(rast.bbox[1],rast.bbox[3])
orig_coordwidth,orig_coordheight = max(orig_xs)-min(orig_xs), max(orig_ys)-min(orig_ys)
widthratio,heightratio = coordwidth/orig_coordwidth, coordheight/orig_coordheight
reqwidth = int(round(rast.width*widthratio))
reqheight = int(round(rast.height*heightratio))
# position into same coordbbox
aligned = []
for rast in rasters:
#rast.grids[0].img.save("C:/Users/kimo/Desktop/realpos.png")
coordbbox = [xleft,ytop,xright,ybottom]
print coordbbox
positioned,mask = rast.positioned(reqwidth, reqheight, coordbbox)
aligned.append((positioned,mask))
return aligned
def mosaic(*rasters, **kwargs):
"""
Mosaic rasters covering different areas together into one file.
Parts of the rasters may overlap each other, in which case one must
specify how to choose the final value.
"""
# use cellsize of first raster if not manually assigned
## firstraster = rasters[0]
## cellwidth = kwargs.get("cellwidth")
## cellheight = kwargs.get("cellheight")
## if not (cellwidth and cellheight):
## cellwidth = firstraster.info["cellwidth"]
## cellheight = firstraster.info["cellheight"]
# align first band of all rasters and resample to output
aligned = align_rasters(*rasters)
#aligned = [resample(rast, cellwidth=cellwidth, cellheight=cellheight)
# for rast in aligned]
firstalign,firstmask = aligned[0]
merged = firstalign.copy()
for rast,mask in aligned[1:]:
merged.grids[0].img.paste(rast.grids[0].img, (0,0), mask)
return merged
## firstraster = rasters[0]
##
## # use cellsize of first raster if not manually assigned
## if not (cellwidth and cellheight):
## cellwidth = firstraster.info["cellwidth"]
## cellheight = firstraster.info["cellheight"]
##
## # compute the required output coordinate bbox from all raster's coordinate bboxes
## bboxes = [raster.bbox for raster in rasters]
## xmins,ymins,xmaxs,ymaxs = zip(*bboxes)
## bbox = min(xmins), min(ymins), max(xmaxs), max(ymaxs)
##
## # create a new blank image big enough to encompass the coord bbox of all input rasters
## rasterwidth = int(round((bbox[2]-bbox[0])/float(cellwidth)))
## rasterheight = int(round((bbox[3]-bbox[1])/float(cellheight)))
## merged = pyagg.new("F", rasterwidth, rasterheight)
## merged.custom_space(*bbox)
##
## # add rasters to blank image
## for raster in rasters:
## # transform/bend image to its coordinate positions
## warped = raster.warped()
##
## # resize raster to new cellsize
## resized = resample(warped, cellwidth=cellwidth, cellheight=cellheight)
##
## # find coord of nw corner
## nw = resized.cell_to_geo(0,0)
##
## # find that coord's pixel location in larger output image
## pasteloc = merged.geo_to_cell(*nw)
##
## # only take first grid to make them compatible
## img = resized.grids[0].img
##
## # paste to pixel location of that coord
## merged.paste(pasteloc, img)
## #AND/OR consider overlapping pixel rule
## #ie first(paste using original as mask),last(paste straight on top),
## #mean(PIL blend with 0.5 alpha),min,or max(use a .eval function?)
##
## # create raster from output image and return
## # ...
## # ...
def georeference(rasterdata, controlpointpairs):
pass
# change controlpoint pairs to degenerate rectangles
# and send these to PIL mesh
def from_vector(vectordata, valuefield, cellwidth, cellheight, **options):
# ie rasterize
# calculate required raster size from cell dimensions
vectorbbox = vectordata.bbox
xmin,ymin,xmax,ymax = vectorbbox
oldwidth, oldheight = xmax-xmin, ymax-ymin
newwidth, newheight = oldwidth/float(cellwidth), oldheight/float(cellheight)
newwidth, newheight = int(round(newwidth)), int(round(newheight))
# simply create pyagg image with specified image size
canvas = pyagg.Canvas(newwidth, newheight)
# set the coordspace to vectordata bbox
canvas.custom_space(*vectorbbox)
# draw the vector data
for feat in vectordata:
geotype = feat.geometry["type"]
# NOTE: NEED TO MAKE SURE IS NUMBER, AND PYAGG ONLY TAKES RGB VALID NRS
# FIX...
value = feat[valuefield]
# polygon, basic black fill, no outline
if "Polygon" in geotype:
canvas.draw_geojson(feat.geometry, fillcolor=(value,0,0), outlinecolor=None)
# line, 1 pixel line thickness
elif "LineString" in geotype:
canvas.draw_geojson(feat.geometry, fillcolor=(value,0,0), fillsize=1)
# point, 1 pixel square size
elif "Point" in geotype:
canvas.draw_geojson(feat.geometry, fillcolor=(value,0,0), fillsize=1)
# create raster from the drawn image (only first band)
img = canvas.get_image().split()[0]
raster = pg.Raster(image=img, cellwidth=cellwidth, cellheight=cellheight,
**options)
return raster
def to_vector(raster, vectortype="point"):
# create new vector data
# for each cell,
# set polygon geom with the cell's coord bbox
# set row to the value of each grid at that cell location, eg [r,g,b]
# OR...
# (only for single band)
# recursively follow neighbouring cells with same value
# collect coordinate for each cell with a different neighbour (edge cell)
# sort all coordinates as incr x and incr y, ie ccw
# add new polygon with sorted coords
pass
def clip_keep(raster, clipdata):
if isinstance(clipdata, pg.GeoTable):
# rasterize vector data using same gridsize as main raster
# create blank image
# paste main raster onto blank image using rasterized as mask
pass
elif isinstance(clipdata, pg.Raster):
# create blank image
# paste raster onto blank image using clip raster as mask
pass
def clip_exclude(data):
# ...
pass
| mit |
amyvmiwei/kbengine | kbe/src/lib/python/Lib/test/test_codecmaps_kr.py | 16 | 1370 | #
# test_codecmaps_kr.py
# Codec mapping tests for ROK encodings
#
from test import support
from test import multibytecodec_support
import unittest
class TestCP949Map(multibytecodec_support.TestBase_Mapping,
unittest.TestCase):
encoding = 'cp949'
mapfileurl = 'http://www.unicode.org/Public/MAPPINGS/VENDORS/MICSFT' \
'/WINDOWS/CP949.TXT'
class TestEUCKRMap(multibytecodec_support.TestBase_Mapping,
unittest.TestCase):
encoding = 'euc_kr'
mapfileurl = 'http://people.freebsd.org/~perky/i18n/EUC-KR.TXT'
# A4D4 HANGUL FILLER indicates the begin of 8-bytes make-up sequence.
pass_enctest = [(b'\xa4\xd4', '\u3164')]
pass_dectest = [(b'\xa4\xd4', '\u3164')]
class TestJOHABMap(multibytecodec_support.TestBase_Mapping,
unittest.TestCase):
encoding = 'johab'
mapfileurl = 'http://www.unicode.org/Public/MAPPINGS/OBSOLETE/EASTASIA/' \
'KSC/JOHAB.TXT'
# KS X 1001 standard assigned 0x5c as WON SIGN.
# but, in early 90s that is the only era used johab widely,
# the most softwares implements it as REVERSE SOLIDUS.
# So, we ignore the standard here.
pass_enctest = [(b'\\', '\u20a9')]
pass_dectest = [(b'\\', '\u20a9')]
def test_main():
support.run_unittest(__name__)
if __name__ == "__main__":
test_main()
| lgpl-3.0 |
pombredanne/MOG | nova/volume/encryptors/cryptsetup.py | 11 | 3964 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova import utils
from nova.volume.encryptors import base
LOG = logging.getLogger(__name__)
class CryptsetupEncryptor(base.VolumeEncryptor):
"""A VolumeEncryptor based on dm-crypt.
This VolumeEncryptor uses dm-crypt to encrypt the specified volume.
"""
def __init__(self, connection_info, **kwargs):
super(CryptsetupEncryptor, self).__init__(connection_info, **kwargs)
# the device's path as given to libvirt -- e.g., /dev/disk/by-path/...
self.symlink_path = connection_info['data']['device_path']
# a unique name for the volume -- e.g., the iSCSI participant name
self.dev_name = self.symlink_path.split('/')[-1]
# the device's actual path on the compute host -- e.g., /dev/sd_
self.dev_path = os.path.realpath(self.symlink_path)
def _get_passphrase(self, key):
return ''.join(hex(x).replace('0x', '') for x in key)
def _open_volume(self, passphrase, **kwargs):
"""Opens the LUKS partition on the volume using the specified
passphrase.
:param passphrase: the passphrase used to access the volume
"""
LOG.debug(_("opening encrypted volume %s"), self.dev_path)
# NOTE(joel-coffman): cryptsetup will strip trailing newlines from
# input specified on stdin unless --key-file=- is specified.
cmd = ["cryptsetup", "create", "--key-file=-"]
cipher = kwargs.get("cipher", None)
if cipher is not None:
cmd.extend(["--cipher", cipher])
key_size = kwargs.get("key_size", None)
if key_size is not None:
cmd.extend(["--key-size", key_size])
cmd.extend([self.dev_name, self.dev_path])
utils.execute(*cmd, process_input=passphrase,
check_exit_code=True, run_as_root=True)
def attach_volume(self, context, **kwargs):
"""Shadows the device and passes an unencrypted version to the
instance.
Transparent disk encryption is achieved by mounting the volume via
dm-crypt and passing the resulting device to the instance. The
instance is unaware of the underlying encryption due to modifying the
original symbolic link to refer to the device mounted by dm-crypt.
"""
key = self._get_key(context).get_encoded()
passphrase = self._get_passphrase(key)
self._open_volume(passphrase, **kwargs)
# modify the original symbolic link to refer to the decrypted device
utils.execute('ln', '--symbolic', '--force',
'/dev/mapper/%s' % self.dev_name, self.symlink_path,
run_as_root=True, check_exit_code=True)
def _close_volume(self, **kwargs):
"""Closes the device (effectively removes the dm-crypt mapping)."""
LOG.debug(_("closing encrypted volume %s"), self.dev_path)
utils.execute('cryptsetup', 'remove', self.dev_name,
run_as_root=True, check_exit_code=True)
def detach_volume(self, **kwargs):
"""Removes the dm-crypt mapping for the device."""
self._close_volume(**kwargs)
| apache-2.0 |
swiftstack/swift | swift/common/middleware/symlink.py | 2 | 35197 | # Copyright (c) 2010-2017 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Symlink Middleware
Symlinks are objects stored in Swift that contain a reference to another
object (hereinafter, this is called "target object"). They are analogous to
symbolic links in Unix-like operating systems. The existence of a symlink
object does not affect the target object in any way. An important use case is
to use a path in one container to access an object in a different container,
with a different policy. This allows policy cost/performance trade-offs to be
made on individual objects.
Clients create a Swift symlink by performing a zero-length PUT request
with the header ``X-Symlink-Target: <container>/<object>``. For a cross-account
symlink, the header ``X-Symlink-Target-Account: <account>`` must be included.
If omitted, it is inserted automatically with the account of the symlink
object in the PUT request process.
Symlinks must be zero-byte objects. Attempting to PUT a symlink with a
non-empty request body will result in a 400-series error. Also, POST with
``X-Symlink-Target`` header always results in a 400-series error. The target
object need not exist at symlink creation time.
Clients may optionally include a ``X-Symlink-Target-Etag: <etag>`` header
during the PUT. If present, this will create a "static symlink" instead of a
"dynamic symlink". Static symlinks point to a specific object rather than a
specific name. They do this by using the value set in their
``X-Symlink-Target-Etag`` header when created to verify it still matches the
ETag of the object they're pointing at on a GET. In contrast to a dynamic
symlink the target object referenced in the ``X-Symlink-Target`` header must
exist and its ETag must match the ``X-Symlink-Target-Etag`` or the symlink
creation will return a client error.
A GET/HEAD request to a symlink will result in a request to the target
object referenced by the symlink's ``X-Symlink-Target-Account`` and
``X-Symlink-Target`` headers. The response of the GET/HEAD request will contain
a ``Content-Location`` header with the path location of the target object. A
GET/HEAD request to a symlink with the query parameter ``?symlink=get`` will
result in the request targeting the symlink itself.
A symlink can point to another symlink. Chained symlinks will be traversed
until the target is not a symlink. If the number of chained symlinks exceeds
the limit ``symloop_max`` an error response will be produced. The value of
``symloop_max`` can be defined in the symlink config section of
`proxy-server.conf`. If not specified, the default ``symloop_max`` value is 2.
If a value less than 1 is specified, the default value will be used.
If a static symlink (i.e. a symlink created with a ``X-Symlink-Target-Etag``
header) targets another static symlink, both of the ``X-Symlink-Target-Etag``
headers must match the target object for the GET to succeed. If a static
symlink targets a dynamic symlink (i.e. a symlink created without a
``X-Symlink-Target-Etag`` header) then the ``X-Symlink-Target-Etag`` header of
the static symlink must be the Etag of the zero-byte object. If a symlink with
a ``X-Symlink-Target-Etag`` targets a large object manifest it must match the
ETag of the manifest (e.g. the ETag as returned by ``multipart-manifest=get``
or value in the ``X-Manifest-Etag`` header).
A HEAD/GET request to a symlink object behaves as a normal HEAD/GET request
to the target object. Therefore issuing a HEAD request to the symlink will
return the target metadata, and issuing a GET request to the symlink will
return the data and metadata of the target object. To return the symlink
metadata (with its empty body) a GET/HEAD request with the ``?symlink=get``
query parameter must be sent to a symlink object.
A POST request to a symlink will result in a 307 Temporary Redirect response.
The response will contain a ``Location`` header with the path of the target
object as the value. The request is never redirected to the target object by
Swift. Nevertheless, the metadata in the POST request will be applied to the
symlink because object servers cannot know for sure if the current object is a
symlink or not in eventual consistency.
A symlink's ``Content-Type`` is completely independent from its target. As a
convenience Swift will automatically set the ``Content-Type`` on a symlink PUT
if not explicitly set by the client. If the client sends a
``X-Symlink-Target-Etag`` Swift will set the symlink's ``Content-Type`` to that
of the target, otherwise it will be set to ``application/symlink``. You can
review a symlink's ``Content-Type`` using the ``?symlink=get`` interface. You
can change a symlink's ``Content-Type`` using a POST request. The symlink's
``Content-Type`` will appear in the container listing.
A DELETE request to a symlink will delete the symlink itself. The target
object will not be deleted.
A COPY request, or a PUT request with a ``X-Copy-From`` header, to a symlink
will copy the target object. The same request to a symlink with the query
parameter ``?symlink=get`` will copy the symlink itself.
An OPTIONS request to a symlink will respond with the options for the symlink
only; the request will not be redirected to the target object. Please note that
if the symlink's target object is in another container with CORS settings, the
response will not reflect the settings.
Tempurls can be used to GET/HEAD symlink objects, but PUT is not allowed and
will result in a 400-series error. The GET/HEAD tempurls honor the scope of
the tempurl key. Container tempurl will only work on symlinks where the target
container is the same as the symlink. In case a symlink targets an object
in a different container, a GET/HEAD request will result in a 401 Unauthorized
error. The account level tempurl will allow cross-container symlinks, but not
cross-account symlinks.
If a symlink object is overwritten while it is in a versioned container, the
symlink object itself is versioned, not the referenced object.
A GET request with query parameter ``?format=json`` to a container which
contains symlinks will respond with additional information ``symlink_path``
for each symlink object in the container listing. The ``symlink_path`` value
is the target path of the symlink. Clients can differentiate symlinks and
other objects by this function. Note that responses in any other format
(e.g. ``?format=xml``) won't include ``symlink_path`` info. If a
``X-Symlink-Target-Etag`` header was included on the symlink, JSON container
listings will include that value in a ``symlink_etag`` key and the target
object's ``Content-Length`` will be included in the key ``symlink_bytes``.
If a static symlink targets a static large object manifest it will carry
forward the SLO's size and slo_etag in the container listing using the
``symlink_bytes`` and ``slo_etag`` keys. However, manifests created before
swift v2.12.0 (released Dec 2016) do not contain enough metadata to propagate
the extra SLO information to the listing. Clients may recreate the manifest
(COPY w/ ``?multipart-manfiest=get``) before creating a static symlink to add
the requisite metadata.
Errors
* PUT with the header ``X-Symlink-Target`` with non-zero Content-Length
will produce a 400 BadRequest error.
* POST with the header ``X-Symlink-Target`` will produce a
400 BadRequest error.
* GET/HEAD traversing more than ``symloop_max`` chained symlinks will
produce a 409 Conflict error.
* PUT/GET/HEAD on a symlink that inclues a ``X-Symlink-Target-Etag`` header
that does not match the target will poduce a 409 Conflict error.
* POSTs will produce a 307 Temporary Redirect error.
----------
Deployment
----------
Symlinks are enabled by adding the `symlink` middleware to the proxy server
WSGI pipeline and including a corresponding filter configuration section in the
`proxy-server.conf` file. The `symlink` middleware should be placed after
`slo`, `dlo` and `versioned_writes` middleware, but before `encryption`
middleware in the pipeline. See the `proxy-server.conf-sample` file for further
details. :ref:`Additional steps <symlink_container_sync_client_config>` are
required if the container sync feature is being used.
.. note::
Once you have deployed `symlink` middleware in your pipeline, you should
neither remove the `symlink` middleware nor downgrade swift to a version
earlier than symlinks being supported. Doing so may result in unexpected
container listing results in addition to symlink objects behaving like a
normal object.
.. _symlink_container_sync_client_config:
Container sync configuration
----------------------------
If container sync is being used then the `symlink` middleware
must be added to the container sync internal client pipeline. The following
configuration steps are required:
#. Create a custom internal client configuration file for container sync (if
one is not already in use) based on the sample file
`internal-client.conf-sample`. For example, copy
`internal-client.conf-sample` to `/etc/swift/container-sync-client.conf`.
#. Modify this file to include the `symlink` middleware in the pipeline in
the same way as described above for the proxy server.
#. Modify the container-sync section of all container server config files to
point to this internal client config file using the
``internal_client_conf_path`` option. For example::
internal_client_conf_path = /etc/swift/container-sync-client.conf
.. note::
These container sync configuration steps will be necessary for container
sync probe tests to pass if the `symlink` middleware is included in the
proxy pipeline of a test cluster.
"""
import json
import os
from cgi import parse_header
from swift.common.utils import get_logger, register_swift_info, split_path, \
MD5_OF_EMPTY_STRING, close_if_possible, closing_if_possible, \
config_true_value, drain_and_close
from swift.common.constraints import check_account_format
from swift.common.wsgi import WSGIContext, make_subrequest, \
make_pre_authed_request
from swift.common.request_helpers import get_sys_meta_prefix, \
check_path_header, get_container_update_override_key, \
update_ignore_range_header
from swift.common.swob import Request, HTTPBadRequest, HTTPTemporaryRedirect, \
HTTPException, HTTPConflict, HTTPPreconditionFailed, wsgi_quote, \
wsgi_unquote, status_map, normalize_etag
from swift.common.http import is_success, HTTP_NOT_FOUND
from swift.common.exceptions import LinkIterError
from swift.common.header_key_dict import HeaderKeyDict
DEFAULT_SYMLOOP_MAX = 2
# Header values for symlink target path strings will be quoted values.
TGT_OBJ_SYMLINK_HDR = 'x-symlink-target'
TGT_ACCT_SYMLINK_HDR = 'x-symlink-target-account'
TGT_ETAG_SYMLINK_HDR = 'x-symlink-target-etag'
TGT_BYTES_SYMLINK_HDR = 'x-symlink-target-bytes'
TGT_OBJ_SYSMETA_SYMLINK_HDR = get_sys_meta_prefix('object') + 'symlink-target'
TGT_ACCT_SYSMETA_SYMLINK_HDR = \
get_sys_meta_prefix('object') + 'symlink-target-account'
TGT_ETAG_SYSMETA_SYMLINK_HDR = \
get_sys_meta_prefix('object') + 'symlink-target-etag'
TGT_BYTES_SYSMETA_SYMLINK_HDR = \
get_sys_meta_prefix('object') + 'symlink-target-bytes'
SYMLOOP_EXTEND = get_sys_meta_prefix('object') + 'symloop-extend'
ALLOW_RESERVED_NAMES = get_sys_meta_prefix('object') + 'allow-reserved-names'
def _validate_and_prep_request_headers(req):
"""
Validate that the value from x-symlink-target header is well formatted
and that the x-symlink-target-etag header (if present) does not contain
problematic characters. We assume the caller ensures that
x-symlink-target header is present in req.headers.
:param req: HTTP request object
:returns: a tuple, the full versioned path to the object (as a WSGI string)
and the X-Symlink-Target-Etag header value which may be None
:raise: HTTPPreconditionFailed if x-symlink-target value
is not well formatted.
:raise: HTTPBadRequest if the x-symlink-target value points to the request
path.
:raise: HTTPBadRequest if the x-symlink-target-etag value contains
a semicolon, double-quote, or backslash.
"""
# N.B. check_path_header doesn't assert the leading slash and
# copy middleware may accept the format. In the symlink, API
# says apparently to use "container/object" format so add the
# validation first, here.
error_body = 'X-Symlink-Target header must be of the form ' \
'<container name>/<object name>'
if wsgi_unquote(req.headers[TGT_OBJ_SYMLINK_HDR]).startswith('/'):
raise HTTPPreconditionFailed(
body=error_body,
request=req, content_type='text/plain')
# check container and object format
container, obj = check_path_header(
req, TGT_OBJ_SYMLINK_HDR, 2,
error_body)
req.headers[TGT_OBJ_SYMLINK_HDR] = wsgi_quote('%s/%s' % (container, obj))
# Check account format if it exists
account = check_account_format(
req, wsgi_unquote(req.headers[TGT_ACCT_SYMLINK_HDR])) \
if TGT_ACCT_SYMLINK_HDR in req.headers else None
# Extract request path
_junk, req_acc, req_cont, req_obj = req.split_path(4, 4, True)
if account:
req.headers[TGT_ACCT_SYMLINK_HDR] = wsgi_quote(account)
else:
account = req_acc
# Check if symlink targets the symlink itself or not
if (account, container, obj) == (req_acc, req_cont, req_obj):
raise HTTPBadRequest(
body='Symlink cannot target itself',
request=req, content_type='text/plain')
etag = normalize_etag(req.headers.get(TGT_ETAG_SYMLINK_HDR, None))
if etag and any(c in etag for c in ';"\\'):
# See cgi.parse_header for why the above chars are problematic
raise HTTPBadRequest(
body='Bad %s format' % TGT_ETAG_SYMLINK_HDR.title(),
request=req, content_type='text/plain')
if not (etag or req.headers.get('Content-Type')):
req.headers['Content-Type'] = 'application/symlink'
return '/v1/%s/%s/%s' % (account, container, obj), etag
def symlink_usermeta_to_sysmeta(headers):
"""
Helper function to translate from client-facing X-Symlink-* headers
to cluster-facing X-Object-Sysmeta-Symlink-* headers.
:param headers: request headers dict. Note that the headers dict
will be updated directly.
"""
# To preseve url-encoded value in the symlink header, use raw value
for user_hdr, sysmeta_hdr in (
(TGT_OBJ_SYMLINK_HDR, TGT_OBJ_SYSMETA_SYMLINK_HDR),
(TGT_ACCT_SYMLINK_HDR, TGT_ACCT_SYSMETA_SYMLINK_HDR)):
if user_hdr in headers:
headers[sysmeta_hdr] = headers.pop(user_hdr)
def symlink_sysmeta_to_usermeta(headers):
"""
Helper function to translate from cluster-facing
X-Object-Sysmeta-Symlink-* headers to client-facing X-Symlink-* headers.
:param headers: request headers dict. Note that the headers dict
will be updated directly.
"""
for user_hdr, sysmeta_hdr in (
(TGT_OBJ_SYMLINK_HDR, TGT_OBJ_SYSMETA_SYMLINK_HDR),
(TGT_ACCT_SYMLINK_HDR, TGT_ACCT_SYSMETA_SYMLINK_HDR),
(TGT_ETAG_SYMLINK_HDR, TGT_ETAG_SYSMETA_SYMLINK_HDR),
(TGT_BYTES_SYMLINK_HDR, TGT_BYTES_SYSMETA_SYMLINK_HDR)):
if sysmeta_hdr in headers:
headers[user_hdr] = headers.pop(sysmeta_hdr)
class SymlinkContainerContext(WSGIContext):
def __init__(self, wsgi_app, logger):
super(SymlinkContainerContext, self).__init__(wsgi_app)
self.logger = logger
def handle_container(self, req, start_response):
"""
Handle container requests.
:param req: a :class:`~swift.common.swob.Request`
:param start_response: start_response function
:return: Response Iterator after start_response called.
"""
app_resp = self._app_call(req.environ)
if req.method == 'GET' and is_success(self._get_status_int()):
app_resp = self._process_json_resp(app_resp, req)
start_response(self._response_status, self._response_headers,
self._response_exc_info)
return app_resp
def _process_json_resp(self, resp_iter, req):
"""
Iterate through json body looking for symlinks and modify its content
:return: modified json body
"""
with closing_if_possible(resp_iter):
resp_body = b''.join(resp_iter)
body_json = json.loads(resp_body)
swift_version, account, _junk = split_path(req.path, 2, 3, True)
new_body = json.dumps(
[self._extract_symlink_path_json(obj_dict, swift_version, account)
for obj_dict in body_json]).encode('ascii')
self.update_content_length(len(new_body))
return [new_body]
def _extract_symlink_path_json(self, obj_dict, swift_version, account):
"""
Extract the symlink info from the hash value
:return: object dictionary with additional key:value pairs when object
is a symlink. i.e. new symlink_path, symlink_etag and
symlink_bytes keys
"""
if 'hash' in obj_dict:
hash_value, meta = parse_header(obj_dict['hash'])
obj_dict['hash'] = hash_value
target = None
for key in meta:
if key == 'symlink_target':
target = meta[key]
elif key == 'symlink_target_account':
account = meta[key]
elif key == 'symlink_target_etag':
obj_dict['symlink_etag'] = meta[key]
elif key == 'symlink_target_bytes':
obj_dict['symlink_bytes'] = int(meta[key])
else:
# make sure to add all other (key, values) back in place
obj_dict['hash'] += '; %s=%s' % (key, meta[key])
else:
if target:
obj_dict['symlink_path'] = os.path.join(
'/', swift_version, account, target)
return obj_dict
class SymlinkObjectContext(WSGIContext):
def __init__(self, wsgi_app, logger, symloop_max):
super(SymlinkObjectContext, self).__init__(wsgi_app)
self.symloop_max = symloop_max
self.logger = logger
# N.B. _loop_count and _last_target_path are used to keep
# the statement in the _recursive_get. Hence they should not be touched
# from other resources.
self._loop_count = 0
self._last_target_path = None
def handle_get_head_symlink(self, req):
"""
Handle get/head request when client sent parameter ?symlink=get
:param req: HTTP GET or HEAD object request with param ?symlink=get
:returns: Response Iterator
"""
resp = self._app_call(req.environ)
response_header_dict = HeaderKeyDict(self._response_headers)
symlink_sysmeta_to_usermeta(response_header_dict)
self._response_headers = response_header_dict.items()
return resp
def handle_get_head(self, req):
"""
Handle get/head request and in case the response is a symlink,
redirect request to target object.
:param req: HTTP GET or HEAD object request
:returns: Response Iterator
"""
update_ignore_range_header(req, TGT_OBJ_SYSMETA_SYMLINK_HDR)
try:
return self._recursive_get_head(req)
except LinkIterError:
errmsg = 'Too many levels of symbolic links, ' \
'maximum allowed is %d' % self.symloop_max
raise HTTPConflict(body=errmsg, request=req,
content_type='text/plain')
def _recursive_get_head(self, req, target_etag=None,
follow_softlinks=True, orig_req=None):
if not orig_req:
orig_req = req
resp = self._app_call(req.environ)
def build_traversal_req(symlink_target):
"""
:returns: new request for target path if it's symlink otherwise
None
"""
version, account, _junk = req.split_path(2, 3, True)
account = self._response_header_value(
TGT_ACCT_SYSMETA_SYMLINK_HDR) or wsgi_quote(account)
target_path = os.path.join(
'/', version, account,
symlink_target.lstrip('/'))
self._last_target_path = target_path
subreq_headers = dict(req.headers)
if self._response_header_value(ALLOW_RESERVED_NAMES):
# this symlink's sysmeta says it can point to reserved names,
# we're infering that some piece of middleware had previously
# authorized this request because users can't access reserved
# names directly
subreq_meth = make_pre_authed_request
subreq_headers['X-Backend-Allow-Reserved-Names'] = 'true'
else:
subreq_meth = make_subrequest
new_req = subreq_meth(orig_req.environ, path=target_path,
method=req.method, headers=subreq_headers,
swift_source='SYM')
new_req.headers.pop('X-Backend-Storage-Policy-Index', None)
return new_req
symlink_target = self._response_header_value(
TGT_OBJ_SYSMETA_SYMLINK_HDR)
resp_etag = self._response_header_value(
TGT_ETAG_SYSMETA_SYMLINK_HDR)
if symlink_target and (resp_etag or follow_softlinks):
# Should be a zero-byte object
drain_and_close(resp)
found_etag = resp_etag or self._response_header_value('etag')
if target_etag and target_etag != found_etag:
raise HTTPConflict(
body='X-Symlink-Target-Etag headers do not match',
headers={
'Content-Type': 'text/plain',
'Content-Location': self._last_target_path})
if self._loop_count >= self.symloop_max:
raise LinkIterError()
# format: /<account name>/<container name>/<object name>
new_req = build_traversal_req(symlink_target)
if not config_true_value(
self._response_header_value(SYMLOOP_EXTEND)):
self._loop_count += 1
return self._recursive_get_head(new_req, target_etag=resp_etag,
orig_req=req)
else:
final_etag = self._response_header_value('etag')
if final_etag and target_etag and target_etag != final_etag:
# do *not* drain; we don't know how big this is
close_if_possible(resp)
body = ('Object Etag %r does not match '
'X-Symlink-Target-Etag header %r')
raise HTTPConflict(
body=body % (final_etag, target_etag),
headers={
'Content-Type': 'text/plain',
'Content-Location': self._last_target_path})
if self._last_target_path:
# Content-Location will be applied only when one or more
# symlink recursion occurred.
# In this case, Content-Location is applied to show which
# object path caused the error response.
# To preserve '%2F'(= quote('/')) in X-Symlink-Target
# header value as it is, Content-Location value comes from
# TGT_OBJ_SYMLINK_HDR, not req.path
self._response_headers.extend(
[('Content-Location', self._last_target_path)])
return resp
def _validate_etag_and_update_sysmeta(self, req, symlink_target_path,
etag):
if req.environ.get('swift.symlink_override'):
req.headers[TGT_ETAG_SYSMETA_SYMLINK_HDR] = etag
req.headers[TGT_BYTES_SYSMETA_SYMLINK_HDR] = \
req.headers[TGT_BYTES_SYMLINK_HDR]
return
# next we'll make sure the E-Tag matches a real object
new_req = make_subrequest(
req.environ, path=wsgi_quote(symlink_target_path), method='HEAD',
swift_source='SYM')
if req.allow_reserved_names:
new_req.headers['X-Backend-Allow-Reserved-Names'] = 'true'
self._last_target_path = symlink_target_path
resp = self._recursive_get_head(new_req, target_etag=etag,
follow_softlinks=False)
if self._get_status_int() == HTTP_NOT_FOUND:
raise HTTPConflict(
body='X-Symlink-Target does not exist',
request=req,
headers={
'Content-Type': 'text/plain',
'Content-Location': self._last_target_path})
if not is_success(self._get_status_int()):
drain_and_close(resp)
raise status_map[self._get_status_int()](request=req)
response_headers = HeaderKeyDict(self._response_headers)
# carry forward any etag update params (e.g. "slo_etag"), we'll append
# symlink_target_* params to this header after this method returns
override_header = get_container_update_override_key('etag')
if override_header in response_headers and \
override_header not in req.headers:
sep, params = response_headers[override_header].partition(';')[1:]
req.headers[override_header] = MD5_OF_EMPTY_STRING + sep + params
# It's troublesome that there's so much leakage with SLO
if 'X-Object-Sysmeta-Slo-Etag' in response_headers and \
override_header not in req.headers:
req.headers[override_header] = '%s; slo_etag=%s' % (
MD5_OF_EMPTY_STRING,
response_headers['X-Object-Sysmeta-Slo-Etag'])
req.headers[TGT_BYTES_SYSMETA_SYMLINK_HDR] = (
response_headers.get('x-object-sysmeta-slo-size') or
response_headers['Content-Length'])
req.headers[TGT_ETAG_SYSMETA_SYMLINK_HDR] = etag
if not req.headers.get('Content-Type'):
req.headers['Content-Type'] = response_headers['Content-Type']
def handle_put(self, req):
"""
Handle put request when it contains X-Symlink-Target header.
Symlink headers are validated and moved to sysmeta namespace.
:param req: HTTP PUT object request
:returns: Response Iterator
"""
if req.content_length is None:
has_body = (req.body_file.read(1) != b'')
else:
has_body = (req.content_length != 0)
if has_body:
raise HTTPBadRequest(
body='Symlink requests require a zero byte body',
request=req,
content_type='text/plain')
symlink_target_path, etag = _validate_and_prep_request_headers(req)
if etag:
self._validate_etag_and_update_sysmeta(
req, symlink_target_path, etag)
# N.B. TGT_ETAG_SYMLINK_HDR was converted as part of verifying it
symlink_usermeta_to_sysmeta(req.headers)
# Store info in container update that this object is a symlink.
# We have a design decision to use etag space to store symlink info for
# object listing because it's immutable unless the object is
# overwritten. This may impact the downgrade scenario that the symlink
# info can appear as the suffix in the hash value of object
# listing result for clients.
# To create override etag easily, we have a constraint that the symlink
# must be 0 byte so we can add etag of the empty string + symlink info
# here, simply (if no other override etag was provided). Note that this
# override etag may be encrypted in the container db by encryption
# middleware.
etag_override = [
req.headers.get(get_container_update_override_key('etag'),
MD5_OF_EMPTY_STRING),
'symlink_target=%s' % req.headers[TGT_OBJ_SYSMETA_SYMLINK_HDR]
]
if TGT_ACCT_SYSMETA_SYMLINK_HDR in req.headers:
etag_override.append(
'symlink_target_account=%s' %
req.headers[TGT_ACCT_SYSMETA_SYMLINK_HDR])
if TGT_ETAG_SYSMETA_SYMLINK_HDR in req.headers:
# if _validate_etag_and_update_sysmeta or a middleware sets
# TGT_ETAG_SYSMETA_SYMLINK_HDR then they need to also set
# TGT_BYTES_SYSMETA_SYMLINK_HDR. If they forget, they get a
# KeyError traceback and client gets a ServerError
etag_override.extend([
'symlink_target_etag=%s' %
req.headers[TGT_ETAG_SYSMETA_SYMLINK_HDR],
'symlink_target_bytes=%s' %
req.headers[TGT_BYTES_SYSMETA_SYMLINK_HDR],
])
req.headers[get_container_update_override_key('etag')] = \
'; '.join(etag_override)
return self._app_call(req.environ)
def handle_post(self, req):
"""
Handle post request. If POSTing to a symlink, a HTTPTemporaryRedirect
error message is returned to client.
Clients that POST to symlinks should understand that the POST is not
redirected to the target object like in a HEAD/GET request. POSTs to a
symlink will be handled just like a normal object by the object server.
It cannot reject it because it may not have symlink state when the POST
lands. The object server has no knowledge of what is a symlink object
is. On the other hand, on POST requests, the object server returns all
sysmeta of the object. This method uses that sysmeta to determine if
the stored object is a symlink or not.
:param req: HTTP POST object request
:raises: HTTPTemporaryRedirect if POSTing to a symlink.
:returns: Response Iterator
"""
if TGT_OBJ_SYMLINK_HDR in req.headers:
raise HTTPBadRequest(
body='A PUT request is required to set a symlink target',
request=req,
content_type='text/plain')
resp = self._app_call(req.environ)
if not is_success(self._get_status_int()):
return resp
tgt_co = self._response_header_value(TGT_OBJ_SYSMETA_SYMLINK_HDR)
if tgt_co:
version, account, _junk = req.split_path(2, 3, True)
target_acc = self._response_header_value(
TGT_ACCT_SYSMETA_SYMLINK_HDR) or wsgi_quote(account)
location_hdr = os.path.join(
'/', version, target_acc, tgt_co)
headers = {'location': location_hdr}
tgt_etag = self._response_header_value(
TGT_ETAG_SYSMETA_SYMLINK_HDR)
if tgt_etag:
headers[TGT_ETAG_SYMLINK_HDR] = tgt_etag
req.environ['swift.leave_relative_location'] = True
errmsg = 'The requested POST was applied to a symlink. POST ' +\
'directly to the target to apply requested metadata.'
for key, value in self._response_headers:
if key.lower().startswith('x-object-sysmeta-'):
headers[key] = value
raise HTTPTemporaryRedirect(
body=errmsg, headers=headers)
else:
return resp
def handle_object(self, req, start_response):
"""
Handle object requests.
:param req: a :class:`~swift.common.swob.Request`
:param start_response: start_response function
:returns: Response Iterator after start_response has been called
"""
if req.method in ('GET', 'HEAD'):
if req.params.get('symlink') == 'get':
resp = self.handle_get_head_symlink(req)
else:
resp = self.handle_get_head(req)
elif req.method == 'PUT' and (TGT_OBJ_SYMLINK_HDR in req.headers):
resp = self.handle_put(req)
elif req.method == 'POST':
resp = self.handle_post(req)
else:
# DELETE and OPTIONS reqs for a symlink and
# PUT reqs without X-Symlink-Target behave like any other object
resp = self._app_call(req.environ)
start_response(self._response_status, self._response_headers,
self._response_exc_info)
return resp
class SymlinkMiddleware(object):
"""
Middleware that implements symlinks.
Symlinks are objects stored in Swift that contain a reference to another
object (i.e., the target object). An important use case is to use a path in
one container to access an object in a different container, with a
different policy. This allows policy cost/performance trade-offs to be made
on individual objects.
"""
def __init__(self, app, conf, symloop_max):
self.app = app
self.conf = conf
self.logger = get_logger(self.conf, log_route='symlink')
self.symloop_max = symloop_max
def __call__(self, env, start_response):
req = Request(env)
try:
version, acc, cont, obj = req.split_path(3, 4, True)
is_cont_or_obj_req = True
except ValueError:
is_cont_or_obj_req = False
if not is_cont_or_obj_req:
return self.app(env, start_response)
try:
if obj:
# object context
context = SymlinkObjectContext(self.app, self.logger,
self.symloop_max)
return context.handle_object(req, start_response)
else:
# container context
context = SymlinkContainerContext(self.app, self.logger)
return context.handle_container(req, start_response)
except HTTPException as err_resp:
return err_resp(env, start_response)
def filter_factory(global_conf, **local_conf):
conf = global_conf.copy()
conf.update(local_conf)
symloop_max = int(conf.get('symloop_max', DEFAULT_SYMLOOP_MAX))
if symloop_max < 1:
symloop_max = int(DEFAULT_SYMLOOP_MAX)
register_swift_info('symlink', symloop_max=symloop_max, static_links=True)
def symlink_mw(app):
return SymlinkMiddleware(app, conf, symloop_max)
return symlink_mw
| apache-2.0 |
aapav01/android_kernel_samsung_ms013g-caf | tools/perf/scripts/python/failed-syscalls-by-pid.py | 11180 | 2058 | # failed system call counts, by pid
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide failed system call totals, broken down by pid.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s syscall-counts-by-pid.py [comm|pid]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_error_totals()
def raw_syscalls__sys_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, ret):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
if ret < 0:
try:
syscalls[common_comm][common_pid][id][ret] += 1
except TypeError:
syscalls[common_comm][common_pid][id][ret] = 1
def print_error_totals():
if for_comm is not None:
print "\nsyscall errors for %s:\n\n" % (for_comm),
else:
print "\nsyscall errors:\n\n",
print "%-30s %10s\n" % ("comm [pid]", "count"),
print "%-30s %10s\n" % ("------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id in id_keys:
print " syscall: %-16s\n" % syscall_name(id),
ret_keys = syscalls[comm][pid][id].keys()
for ret, val in sorted(syscalls[comm][pid][id].iteritems(), key = lambda(k, v): (v, k), reverse = True):
print " err = %-20s %10d\n" % (strerror(ret), val),
| gpl-2.0 |
nolram/NewsReader-Django | Site/migrations/0001_initial.py | 1 | 4331 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('Crawler', '0001_initial'),
('auth', '0006_require_contenttypes_0002'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Assinatura',
fields=[
('id_usuario', models.OneToOneField(primary_key=True, to=settings.AUTH_USER_MODEL, serialize=False)),
('data_adicionado', models.DateTimeField(auto_now_add=True)),
('data_modificado', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='FavoritosNoticiasUsuario',
fields=[
('id_favoritos_noticias', models.AutoField(primary_key=True, serialize=False)),
('data_adicionado', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='HistoricoNoticiasUsuario',
fields=[
('id_historico_noticias', models.AutoField(primary_key=True, serialize=False)),
('data_adicionado', models.DateTimeField(auto_now_add=True)),
('fk_noticia', models.ForeignKey(to='Crawler.Postagens', related_name='fk_noticia_histo_usuario')),
],
),
migrations.CreateModel(
name='Planos',
fields=[
('id_plano', models.AutoField(primary_key=True, serialize=False)),
('nome', models.CharField(max_length=100)),
('valor', models.DecimalField(max_digits=5, decimal_places=2)),
('data_adicionado', models.DateTimeField(auto_now_add=True)),
('data_modificado', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='ProvedoresDeLogin',
fields=[
('id_provedor', models.AutoField(primary_key=True, serialize=False)),
('nome', models.CharField(db_index=True, max_length=50)),
('key', models.CharField(null=True, max_length=500)),
('secret_key', models.CharField(null=True, max_length=500)),
],
),
migrations.CreateModel(
name='UsuariosProvedor',
fields=[
('id_usuario', models.OneToOneField(primary_key=True, to=settings.AUTH_USER_MODEL, serialize=False)),
('key_o_auth', models.CharField(db_index=True, max_length=700)),
('data_registro', models.DateTimeField(auto_now_add=True)),
('data_atualizacao', models.DateTimeField(auto_now=True)),
('fk_provedor', models.ForeignKey(to='Site.ProvedoresDeLogin', related_name='fk_provedor')),
],
),
migrations.CreateModel(
name='ConteudoFavoritos',
fields=[
('id_conteudo_favoritos', models.OneToOneField(primary_key=True, to='Site.FavoritosNoticiasUsuario', serialize=False)),
('titulo', models.CharField(max_length=500)),
('conteudo', models.TextField()),
],
),
migrations.AddField(
model_name='historiconoticiasusuario',
name='fk_usuario',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL, related_name='fk_usua_histo'),
),
migrations.AddField(
model_name='favoritosnoticiasusuario',
name='fk_noticia',
field=models.ForeignKey(to='Crawler.Postagens', related_name='fk_noticia_fav_usuario'),
),
migrations.AddField(
model_name='favoritosnoticiasusuario',
name='fk_usuario',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL, related_name='fk_usua_fav'),
),
migrations.AddField(
model_name='assinatura',
name='fk_plano',
field=models.ForeignKey(to='Site.Planos', related_name='fk_plano_usuario'),
),
migrations.AlterUniqueTogether(
name='usuariosprovedor',
unique_together=set([('id_usuario', 'fk_provedor')]),
),
]
| mit |
graphicore/fontbakery | snippets/check-custom.py | 3 | 3498 | import os
from fontbakery.checkrunner import Section, PASS, FAIL, WARN, ERROR, INFO, SKIP
from fontbakery.callable import condition, check, disable
from fontbakery.constants import PriorityLevel
from fontbakery.message import Message
from fontbakery.fonts_profile import profile_factory
from fontbakery.profiles.universal import UNIVERSAL_PROFILE_CHECKS
profile_imports = ("fontbakery.profiles.universal",)
profile = profile_factory(default_section=Section("Custom Checks"))
# ================================================
#
# Custom check list
#
# ================================================
# define new custom checks that are implemented in
# this source file by check ID. Format these as a
# Python list.
# The check ID here is an example that demonstrates
# the example pass check in this module. It is safe
# to remove the check ID when you remove the example
# check from this module
CUSTOM_PROFILE_CHECKS = UNIVERSAL_PROFILE_CHECKS + [
"org.example/check/valid/testpass",
]
# ================================================
#
# Fontbakery check exclusion list
#
# ================================================
# define check ID's in the upstream `universal` profile
# that should be excluded as a Python tuple
excluded_check_ids = (
# "com.google.fonts/check/ftxvalidator_is_available",
# "com.google.fonts/check/dsig",
)
# ================================================
#
# Example test implementation
#
# ================================================
# The following are example implementations of
# pass and fail checks. They are commented out
# and are not executed.
# # Test failure template
# @check(
# id="org.example/check/valid/testfail",
# rationale="""
# This is the test failure rationale.
# """,
# )
# def org_example_check_valid_test_fail():
# """A test failure example."""
# yield FAIL, "test failure message"
# # Test pass template
# @check(
# id="org.example/check/valid/testpass",
# rationale="""
# This is the test pass rationale.
# """,
# )
# def org_example_check_valid_test_pass():
# """A test pass example."""
# yield PASS, "test pass message"
# ================================================
#
# Begin check definitions
#
# ================================================
# TEST PASS EXAMPLE
# This check is a live example that is executed when
# you run this custom profile. It is safe to remove
# this code when you implement your own checks.
@check(
id="org.example/check/valid/testpass",
rationale="""
This is a test pass rationale.
""",
)
def org_example_check_valid_test_pass():
"""A test pass example."""
yield PASS, "test pass message"
# ================================================
#
# End check definitions
#
# ================================================
# This function identifies the skipped checks that are
# defined in the `excluded_check_ids` tuple at the head
# of this module. Do not remove this function unless
# you do not intend to filter checks imported from the
# fontbakery universal profile test suite
def check_skip_filter(checkid, font=None, **iterargs):
if font and checkid in excluded_check_ids:
return False, ("Check skipped in Valid project profile")
return True, None
# You should not need to edit the following block of code
profile.check_skip_filter = check_skip_filter
profile.auto_register(globals())
profile.test_expected_checks(CUSTOM_PROFILE_CHECKS, exclusive=True)
| apache-2.0 |
JamesMGreene/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/tool/steps/postdiffforrevert.py | 130 | 2470 | # Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from webkitpy.common.net.bugzilla import Attachment
from webkitpy.tool.steps.abstractstep import AbstractStep
class PostDiffForRevert(AbstractStep):
def run(self, state):
comment_text = "Any committer can land this patch automatically by \
marking it commit-queue+. The commit-queue will build and test \
the patch before landing to ensure that the rollout will be \
successful. This process takes approximately 15 minutes.\n\n\
If you would like to land the rollout faster, you can use the \
following command:\n\n\
webkit-patch land-attachment ATTACHMENT_ID\n\n\
where ATTACHMENT_ID is the ID of this attachment."
self._tool.bugs.add_patch_to_bug(
state["bug_id"],
self.cached_lookup(state, "diff"),
"%s%s" % (Attachment.rollout_preamble, state["revision"]),
comment_text=comment_text,
mark_for_review=False,
mark_for_commit_queue=True)
| bsd-3-clause |
BitWriters/Zenith_project | zango/lib/python3.5/site-packages/pip/_vendor/requests/packages/chardet/cp949prober.py | 2801 | 1782 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import EUCKRDistributionAnalysis
from .mbcssm import CP949SMModel
class CP949Prober(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(CP949SMModel)
# NOTE: CP949 is a superset of EUC-KR, so the distribution should be
# not different.
self._mDistributionAnalyzer = EUCKRDistributionAnalysis()
self.reset()
def get_charset_name(self):
return "CP949"
| mit |
joelpinheiro/safebox-smartcard-auth | Client/veclient/lib/python2.7/site-packages/pip/log.py | 344 | 9455 | """Logging
"""
import sys
import os
import logging
from pip import backwardcompat
from pip._vendor import colorama, pkg_resources
def _color_wrap(*colors):
def wrapped(inp):
return "".join(list(colors) + [inp, colorama.Style.RESET_ALL])
return wrapped
def should_color(consumer, environ, std=(sys.stdout, sys.stderr)):
real_consumer = (consumer if not isinstance(consumer, colorama.AnsiToWin32)
else consumer.wrapped)
# If consumer isn't stdout or stderr we shouldn't colorize it
if real_consumer not in std:
return False
# If consumer is a tty we should color it
if hasattr(real_consumer, "isatty") and real_consumer.isatty():
return True
# If we have an ASNI term we should color it
if environ.get("TERM") == "ANSI":
return True
# If anything else we should not color it
return False
def should_warn(current_version, removal_version):
# Our Significant digits on versions is 2, so remove everything but the
# first two places.
current_version = ".".join(current_version.split(".")[:2])
removal_version = ".".join(removal_version.split(".")[:2])
# Our warning threshold is one minor version before removal, so we
# decrement the minor version by one
major, minor = removal_version.split(".")
minor = str(int(minor) - 1)
warn_version = ".".join([major, minor])
# Test if our current_version should be a warn
return (pkg_resources.parse_version(current_version)
< pkg_resources.parse_version(warn_version))
class Logger(object):
"""
Logging object for use in command-line script. Allows ranges of
levels, to avoid some redundancy of displayed information.
"""
VERBOSE_DEBUG = logging.DEBUG - 1
DEBUG = logging.DEBUG
INFO = logging.INFO
NOTIFY = (logging.INFO + logging.WARN) / 2
WARN = WARNING = logging.WARN
ERROR = logging.ERROR
FATAL = logging.FATAL
LEVELS = [VERBOSE_DEBUG, DEBUG, INFO, NOTIFY, WARN, ERROR, FATAL]
COLORS = {
WARN: _color_wrap(colorama.Fore.YELLOW),
ERROR: _color_wrap(colorama.Fore.RED),
FATAL: _color_wrap(colorama.Fore.RED),
}
def __init__(self):
self.consumers = []
self.indent = 0
self.explicit_levels = False
self.in_progress = None
self.in_progress_hanging = False
def add_consumers(self, *consumers):
if sys.platform.startswith("win"):
for level, consumer in consumers:
if hasattr(consumer, "write"):
self.consumers.append(
(level, colorama.AnsiToWin32(consumer)),
)
else:
self.consumers.append((level, consumer))
else:
self.consumers.extend(consumers)
def debug(self, msg, *args, **kw):
self.log(self.DEBUG, msg, *args, **kw)
def info(self, msg, *args, **kw):
self.log(self.INFO, msg, *args, **kw)
def notify(self, msg, *args, **kw):
self.log(self.NOTIFY, msg, *args, **kw)
def warn(self, msg, *args, **kw):
self.log(self.WARN, msg, *args, **kw)
def error(self, msg, *args, **kw):
self.log(self.ERROR, msg, *args, **kw)
def fatal(self, msg, *args, **kw):
self.log(self.FATAL, msg, *args, **kw)
def deprecated(self, removal_version, msg, *args, **kwargs):
"""
Logs deprecation message which is log level WARN if the
``removal_version`` is > 1 minor release away and log level ERROR
otherwise.
removal_version should be the version that the deprecated feature is
expected to be removed in, so something that will not exist in
version 1.7, but will in 1.6 would have a removal_version of 1.7.
"""
from pip import __version__
if should_warn(__version__, removal_version):
self.warn(msg, *args, **kwargs)
else:
self.error(msg, *args, **kwargs)
def log(self, level, msg, *args, **kw):
if args:
if kw:
raise TypeError(
"You may give positional or keyword arguments, not both")
args = args or kw
# render
if args:
rendered = msg % args
else:
rendered = msg
rendered = ' ' * self.indent + rendered
if self.explicit_levels:
## FIXME: should this be a name, not a level number?
rendered = '%02i %s' % (level, rendered)
for consumer_level, consumer in self.consumers:
if self.level_matches(level, consumer_level):
if (self.in_progress_hanging
and consumer in (sys.stdout, sys.stderr)):
self.in_progress_hanging = False
sys.stdout.write('\n')
sys.stdout.flush()
if hasattr(consumer, 'write'):
write_content = rendered + '\n'
if should_color(consumer, os.environ):
# We are printing to stdout or stderr and it supports
# colors so render our text colored
colorizer = self.COLORS.get(level, lambda x: x)
write_content = colorizer(write_content)
consumer.write(write_content)
if hasattr(consumer, 'flush'):
consumer.flush()
else:
consumer(rendered)
def _show_progress(self):
"""Should we display download progress?"""
return (self.stdout_level_matches(self.NOTIFY) and sys.stdout.isatty())
def start_progress(self, msg):
assert not self.in_progress, (
"Tried to start_progress(%r) while in_progress %r"
% (msg, self.in_progress))
if self._show_progress():
sys.stdout.write(' ' * self.indent + msg)
sys.stdout.flush()
self.in_progress_hanging = True
else:
self.in_progress_hanging = False
self.in_progress = msg
self.last_message = None
def end_progress(self, msg='done.'):
assert self.in_progress, (
"Tried to end_progress without start_progress")
if self._show_progress():
if not self.in_progress_hanging:
# Some message has been printed out since start_progress
sys.stdout.write('...' + self.in_progress + msg + '\n')
sys.stdout.flush()
else:
# These erase any messages shown with show_progress (besides .'s)
logger.show_progress('')
logger.show_progress('')
sys.stdout.write(msg + '\n')
sys.stdout.flush()
self.in_progress = None
self.in_progress_hanging = False
def show_progress(self, message=None):
"""If we are in a progress scope, and no log messages have been
shown, write out another '.'"""
if self.in_progress_hanging:
if message is None:
sys.stdout.write('.')
sys.stdout.flush()
else:
if self.last_message:
padding = ' ' * max(0, len(self.last_message) - len(message))
else:
padding = ''
sys.stdout.write('\r%s%s%s%s' %
(' ' * self.indent, self.in_progress, message, padding))
sys.stdout.flush()
self.last_message = message
def stdout_level_matches(self, level):
"""Returns true if a message at this level will go to stdout"""
return self.level_matches(level, self._stdout_level())
def _stdout_level(self):
"""Returns the level that stdout runs at"""
for level, consumer in self.consumers:
if consumer is sys.stdout:
return level
return self.FATAL
def level_matches(self, level, consumer_level):
"""
>>> l = Logger()
>>> l.level_matches(3, 4)
False
>>> l.level_matches(3, 2)
True
>>> l.level_matches(slice(None, 3), 3)
False
>>> l.level_matches(slice(None, 3), 2)
True
>>> l.level_matches(slice(1, 3), 1)
True
>>> l.level_matches(slice(2, 3), 1)
False
"""
if isinstance(level, slice):
start, stop = level.start, level.stop
if start is not None and start > consumer_level:
return False
if stop is not None or stop <= consumer_level:
return False
return True
else:
return level >= consumer_level
@classmethod
def level_for_integer(cls, level):
levels = cls.LEVELS
if level < 0:
return levels[0]
if level >= len(levels):
return levels[-1]
return levels[level]
def move_stdout_to_stderr(self):
to_remove = []
to_add = []
for consumer_level, consumer in self.consumers:
if consumer == sys.stdout:
to_remove.append((consumer_level, consumer))
to_add.append((consumer_level, sys.stderr))
for item in to_remove:
self.consumers.remove(item)
self.consumers.extend(to_add)
logger = Logger()
| gpl-2.0 |
CyanogenMod/android_kernel_samsung_galaxytab-cdma | tools/perf/scripts/python/syscall-counts-by-pid.py | 944 | 1744 | # system call counts, by pid
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
usage = "perf trace -s syscall-counts-by-pid.py [comm]\n";
for_comm = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
pass
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[common_comm][common_pid][id] += 1
except TypeError:
syscalls[common_comm][common_pid][id] = 1
def print_syscall_totals():
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events by comm/pid:\n\n",
print "%-40s %10s\n" % ("comm [pid]/syscalls", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id, val in sorted(syscalls[comm][pid].iteritems(), \
key = lambda(k, v): (v, k), reverse = True):
print " %-38d %10d\n" % (id, val),
| gpl-2.0 |
Grumbel/viewer | tools/blenderexp.py | 1 | 9257 | ## Blender Export Script
## Copyright (C) 2012 Ingo Ruhnke <grumbel@gmail.com>
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
import bpy
import sys
from mathutils import Matrix, Vector, Euler, Quaternion
from collections import namedtuple
# http://www.blender.org/forum/viewtopic.php?t=19102&highlight=batch+render
# blender -b data/mech.blend --python blenderexp.py -- extra args
print("Argv:", sys.argv)
def pm(mat):
print("Matrix(%6.2f %6.2f %6.2f %6.2f\n"
" %6.2f %6.2f %6.2f %6.2f\n"
" %6.2f %6.2f %6.2f %6.2f\n"
" %6.2f %6.2f %6.2f %6.2f)" %
(mat[0][0], mat[1][0], mat[2][0], mat[3][0],
mat[0][1], mat[1][1], mat[2][1], mat[3][1],
mat[0][2], mat[1][2], mat[2][2], mat[3][2],
mat[0][3], mat[1][3], mat[2][3], mat[3][3]))
Face = namedtuple('Face', ['v1', 'v2', 'v3'])
Vertex = namedtuple('Vertex', ['co', 'n', 'uv', 'bones'])
if True:
def b2gl_vec3(v):
return (v.x, v.z, -v.y)
def b2gl_vec4(v):
return (v.x, v.z, -v.y, v.w)
def b2gl_scale(v):
return (v.x, v.z, v.y)
def b2gl_quat(q):
axis, angle = q.to_axis_angle()
axis = (axis.x, axis.z, -axis.y)
return Quaternion(axis, angle)
def b2gl_uv(uv): return uv
else:
def b2gl_vec3(v): return tuple(v)
def b2gl_vec4(v): return tuple(v)
def b2gl_scale(v): return tuple(v)
def b2gl_quat(q): return tuple(q)
def b2gl_uv(uv): return tuple(uv)
def write_mesh(obj):
# http://wiki.blender.org/index.php/User:Pepribal/Ref/Appendices/ParentInverse
outfile.write("o %s\n" % obj.name)
if len(obj.material_slots) > 0:
outfile.write("mat %s.material\n" % obj.material_slots[0].name)
if obj.parent and (obj.parent.type == 'MESH' or obj.parent.type == 'EMPTY'):
outfile.write("parent %s\n" % obj.parent.name)
m = obj.matrix_local
loc = b2gl_vec3(m.to_translation())
quat = b2gl_quat(m.to_quaternion())
scale = b2gl_scale(m.to_scale())
outfile.write("loc %f %f %f\n" % tuple(loc))
outfile.write("rot %f %f %f %f\n" % tuple(quat))
outfile.write("scale %f %f %f\n" % tuple(scale))
if obj.type == 'MESH':
faces = collect_faces(obj)
faces, vertices = index_vertices(faces)
print("vertices: %d" % len(vertices))
print("faces: %d" % len(faces))
for v in vertices:
outfile.write("vn %f %f %f\n" % v.n)
if v.uv:
outfile.write("vt %f %f\n" % b2gl_uv(v.uv))
if v.bones:
bones = list(v.bones)
bones.sort(key=lambda bone: bone[1], reverse=True)
while len(bones) < 4:
bones.append((0, 0.0))
while len(bones) > 4:
bones.pop()
bone_index = [g for g, w in bones]
bone_weight = [w for g, w in bones]
bone_weight = [w / sum(bone_weight) for w in bone_weight]
outfile.write("bi %d %d %d %d\n" % tuple(bone_index))
outfile.write("bw %f %f %f %f\n" % tuple(bone_weight))
outfile.write("v %f %f %f\n" % v.co)
for f in faces:
outfile.write("f %d %d %d\n" % (f.v1, f.v2, f.v3))
def index_vertices(faces):
# collect vertices
vertices = {}
for face in faces:
vertices[face.v1] = None
vertices[face.v2] = None
vertices[face.v3] = None
# number the vertices
i = 0
for k in vertices.keys():
vertices[k] = i
i += 1
# replace vertices with index
out_faces = []
for face in faces:
out_faces.append(Face(vertices[face.v1],
vertices[face.v2],
vertices[face.v3]))
return out_faces, list(vertices.keys())
def collect_faces(obj):
"""collect data from the given mesh and triangulate it"""
bone_name2idx = {}
if len(obj.modifiers) == 1 and obj.modifiers[0].type == "ARMATURE":
for i, bone in enumerate(obj.modifiers[0].object.data.bones):
bone_name2idx[bone.name] = i
# print(bone_name2idx)
mesh = obj.data
uv_faces = None
# print(dir(mesh))
mesh.update(calc_tessface=True)
faces = mesh.tessfaces
print("Faces: ", faces)
if mesh.uv_textures.active:
uv_faces = mesh.tessface_uv_textures.active.data
out_faces = []
# bpy.data.objects[0].data.vertices[1].groups -> VertexGroupElement
# bpy.data.objects[0].data.vertices[1].groups[0] -> weight
for face in faces:
num_vertices = len(face.vertices)
v = [mesh.vertices[face.vertices[i]] for i in range(0, num_vertices)]
if uv_faces:
uv = [tuple(uv_faces[face.index].uv[i]) for i in range(0, num_vertices)]
else:
uv = [None, None, None, None]
bones = [[], [], [], []]
for i, vert in enumerate(v):
for j, g in enumerate(mesh.vertices[vert.index].groups):
bones[i].append((bone_name2idx[obj.vertex_groups[g.group].name], g.weight))
bones[i] = tuple(bones[i])
bones = tuple(bones)
out_faces.append(
Face(Vertex(b2gl_vec3(v[0].co), b2gl_vec3(v[0].normal), uv[0], bones[0]),
Vertex(b2gl_vec3(v[1].co), b2gl_vec3(v[1].normal), uv[1], bones[1]),
Vertex(b2gl_vec3(v[2].co), b2gl_vec3(v[2].normal), uv[2], bones[2])))
if num_vertices == 4:
out_faces.append(
Face(Vertex(b2gl_vec3(v[0].co), b2gl_vec3(v[0].normal), uv[0], bones[0]),
Vertex(b2gl_vec3(v[2].co), b2gl_vec3(v[2].normal), uv[2], bones[2]),
Vertex(b2gl_vec3(v[3].co), b2gl_vec3(v[3].normal), uv[3], bones[3])))
return out_faces
def vec3_str(v):
v = b2gl_vec3(v)
return "%6.2f %6.2f %6.2f" % (v[0], v[1], v[2])
def vec4_str(v):
v = b2gl_vec4(v)
return "%6.2f %6.2f %6.2f %6.2f" % (v[0], v[1], v[2], v[3])
def mat3_str(m):
return "%s %s %s" % (vec3_str(m[0]), vec3_str(m[1]), vec3_str(m[2]))
def mat4_str(m):
return "%s %s %s %s" % (vec4_str(m[0]), vec4_str(m[1]), vec4_str(m[2]), vec4_str(m[3]))
def write_armature(obj):
with open("/tmp/blender.bones", "w") as f:
f.write("# exported by %s\n" % __file__)
armature = obj.data
for bone in armature.bones:
# _local is in armature space, the other in bone space
f.write("bone %s\n" % bone.name)
if bone.parent:
f.write(" parent %s\n" % bone.parent.name)
f.write(" head %s\n" % vec3_str(bone.head))
f.write(" tail %s\n" % vec3_str(bone.tail))
f.write(" head_local %s\n" % vec3_str(bone.head_local))
f.write(" tail_local %s\n" % vec3_str(bone.tail_local))
f.write(" matrix %s\n" % mat3_str(bone.matrix)) # a 3x3 matrix
f.write(" matrix_local %s\n" % mat4_str(bone.matrix_local)) # a 4x4 matrix
f.write("\n")
with open("/tmp/blender.pose", "w") as f:
f.write("# exported by %s\n" % __file__)
for bone in obj.pose.bones:
f.write("bone %s\n" % bone.name)
f.write(" matrix %s\n" % mat4_str(bone.matrix))
f.write(" matrix_basis %s\n" % mat4_str(bone.matrix_basis))
f.write("\n")
# [(x.name, k) for k,v in enumerate(bpy.data.objects[0].data.bones)]
# [(v.name, v.index) for v in bpy.data.objects[1].vertex_groups]
def export_material(fout, mat):
mat.name
mat.specular_alpha
mat.specular_color
mat.diffuse_color
mat.alpha
for tex in mat.texture_slots:
tex.specular_factor
tex.specular_color_factor
tex.diffuse_factor # intensity
if tex.use_map_color_diffuse:
tex.diffuse_color_factor
tex.texture_coords == 'UV'
tex.texture.type == 'IMAGE'
tex.texture.image
tex.texture.image.source == 'FILE'
tex.texture.image.save_render("/tmp/out.png")
with open("/tmp/blender.mod", "w") as outfile:
outfile.write("# exported by %s\n" % __file__)
objects = [obj for obj in bpy.data.objects if obj.layers[0]]
meshes = [obj for obj in objects if obj.type == 'MESH' or obj.type == 'EMPTY']
armatures = [obj for obj in objects if obj.type == 'ARMATURE']
for mesh in meshes:
write_mesh(mesh)
for armature in armatures:
write_armature(armature)
outfile.write("\n# EOF #\n")
print("-- export complete --")
# EOF #
| gpl-3.0 |
jason-weirather/Au-public | iron/code/mappability/genepred_to_mappability.py | 1 | 2569 | #!/usr/bin/python
import sys, argparse, re, os
from SequenceBasics import read_fasta_into_hash, encode_name
from GenePredBasics import GenePredEntry as GPD
from subprocess import Popen, PIPE
from StatisticsBasics import average, median
from multiprocessing import cpu_count
import gzip
null = open(os.devnull,'w')
def main():
parser = argparse.ArgumentParser(description="For every genepred entry report its alignability",formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('input',help="Genepred can be gzipped or - for STDIN")
parser.add_argument('-r','--reference',required=True,help="Reference fasta")
parser.add_argument('-k','--fragment_size',default=100,type=int,help="Fragment size to try to align")
parser.add_argument('-x','--hisat_index',required=True,help="HISAT index base name")
parser.add_argument('--threads',type=int,default=cpu_count(),help="number of threads")
parser.add_argument('--type',choices=['mean','median'],default='mean',help="How to bring together overlapping reads")
parser.add_argument('--perbase',action='store_true')
parser.add_argument('--output','-o',help="output file or leave unset for STDOUT")
args = parser.parse_args()
if args.input=='-': args.input=sys.stdin
elif re.search('\.gz$',args.input):
args.input = gzip.open(args.input)
else: args.input = open(args.input)
udir = os.path.dirname(os.path.realpath(__file__))
cmd2 = udir+'/genepred_counts_to_mappability.py -'
cmd2 += ' --threads '+str(args.threads)
cmd2 += ' -k '+str(args.fragment_size)
if args.perbase: cmd2 += ' --perbase'
if args.output: cmd2 += ' --output '+args.output
if args.type: cmd2 += ' --type '+args.type
p2 = Popen(cmd2.split(),stdin=PIPE)
ref = read_fasta_into_hash(args.reference)
cmd1 = 'hisat -x '+args.hisat_index+' -U - -f --reorder -p '+str(args.threads)
p1 = Popen(cmd1.split(),stdin=PIPE,stdout=p2.stdin,stderr=null)
#p1 = Popen(cmd1.split(),stdin=PIPE,stdout=p2.stdin)
line_number = 0
for line in args.input:
line_number +=1
gpd = GPD(line.rstrip())
#print gpd.entry['name']
#print gpd.length()
if gpd.length() < args.fragment_size: continue
seq = gpd.get_sequence(ref)
for i in range(0,len(seq)-args.fragment_size+1):
info = gpd.value('name')+"\t"+gpd.value('gene_name')+"\t"+str(line_number)+"\t"+str(len(seq))+"\t"+str(i)
einfo = encode_name(info)
p1.stdin.write('>'+einfo+"\n")
p1.stdin.write(seq[i:i+args.fragment_size]+"\n")
p1.communicate()
p2.communicate()
if __name__=="__main__":
main()
| apache-2.0 |
Suwings/Yeinw | src/Crypto/SelfTest/Cipher/test_Blowfish.py | 119 | 5832 | # -*- coding: utf-8 -*-
#
# SelfTest/Cipher/test_Blowfish.py: Self-test for the Blowfish cipher
#
# Written in 2008 by Dwayne C. Litzenberger <dlitz@dlitz.net>
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
"""Self-test suite for Crypto.Cipher.Blowfish"""
__revision__ = "$Id$"
from Crypto.Util.py3compat import *
# This is a list of (plaintext, ciphertext, key) tuples.
test_data = [
# Test vectors from http://www.schneier.com/code/vectors.txt
('0000000000000000', '4ef997456198dd78', '0000000000000000'),
('ffffffffffffffff', '51866fd5b85ecb8a', 'ffffffffffffffff'),
('1000000000000001', '7d856f9a613063f2', '3000000000000000'),
('1111111111111111', '2466dd878b963c9d', '1111111111111111'),
('1111111111111111', '61f9c3802281b096', '0123456789abcdef'),
('0123456789abcdef', '7d0cc630afda1ec7', '1111111111111111'),
('0000000000000000', '4ef997456198dd78', '0000000000000000'),
('0123456789abcdef', '0aceab0fc6a0a28d', 'fedcba9876543210'),
('01a1d6d039776742', '59c68245eb05282b', '7ca110454a1a6e57'),
('5cd54ca83def57da', 'b1b8cc0b250f09a0', '0131d9619dc1376e'),
('0248d43806f67172', '1730e5778bea1da4', '07a1133e4a0b2686'),
('51454b582ddf440a', 'a25e7856cf2651eb', '3849674c2602319e'),
('42fd443059577fa2', '353882b109ce8f1a', '04b915ba43feb5b6'),
('059b5e0851cf143a', '48f4d0884c379918', '0113b970fd34f2ce'),
('0756d8e0774761d2', '432193b78951fc98', '0170f175468fb5e6'),
('762514b829bf486a', '13f04154d69d1ae5', '43297fad38e373fe'),
('3bdd119049372802', '2eedda93ffd39c79', '07a7137045da2a16'),
('26955f6835af609a', 'd887e0393c2da6e3', '04689104c2fd3b2f'),
('164d5e404f275232', '5f99d04f5b163969', '37d06bb516cb7546'),
('6b056e18759f5cca', '4a057a3b24d3977b', '1f08260d1ac2465e'),
('004bd6ef09176062', '452031c1e4fada8e', '584023641aba6176'),
('480d39006ee762f2', '7555ae39f59b87bd', '025816164629b007'),
('437540c8698f3cfa', '53c55f9cb49fc019', '49793ebc79b3258f'),
('072d43a077075292', '7a8e7bfa937e89a3', '4fb05e1515ab73a7'),
('02fe55778117f12a', 'cf9c5d7a4986adb5', '49e95d6d4ca229bf'),
('1d9d5c5018f728c2', 'd1abb290658bc778', '018310dc409b26d6'),
('305532286d6f295a', '55cb3774d13ef201', '1c587f1c13924fef'),
('0123456789abcdef', 'fa34ec4847b268b2', '0101010101010101'),
('0123456789abcdef', 'a790795108ea3cae', '1f1f1f1f0e0e0e0e'),
('0123456789abcdef', 'c39e072d9fac631d', 'e0fee0fef1fef1fe'),
('ffffffffffffffff', '014933e0cdaff6e4', '0000000000000000'),
('0000000000000000', 'f21e9a77b71c49bc', 'ffffffffffffffff'),
('0000000000000000', '245946885754369a', '0123456789abcdef'),
('ffffffffffffffff', '6b5c5a9c5d9e0a5a', 'fedcba9876543210'),
('fedcba9876543210', 'f9ad597c49db005e', 'f0'),
('fedcba9876543210', 'e91d21c1d961a6d6', 'f0e1'),
('fedcba9876543210', 'e9c2b70a1bc65cf3', 'f0e1d2'),
('fedcba9876543210', 'be1e639408640f05', 'f0e1d2c3'),
('fedcba9876543210', 'b39e44481bdb1e6e', 'f0e1d2c3b4'),
('fedcba9876543210', '9457aa83b1928c0d', 'f0e1d2c3b4a5'),
('fedcba9876543210', '8bb77032f960629d', 'f0e1d2c3b4a596'),
('fedcba9876543210', 'e87a244e2cc85e82', 'f0e1d2c3b4a59687'),
('fedcba9876543210', '15750e7a4f4ec577', 'f0e1d2c3b4a5968778'),
('fedcba9876543210', '122ba70b3ab64ae0', 'f0e1d2c3b4a596877869'),
('fedcba9876543210', '3a833c9affc537f6', 'f0e1d2c3b4a5968778695a'),
('fedcba9876543210', '9409da87a90f6bf2', 'f0e1d2c3b4a5968778695a4b'),
('fedcba9876543210', '884f80625060b8b4', 'f0e1d2c3b4a5968778695a4b3c'),
('fedcba9876543210', '1f85031c19e11968', 'f0e1d2c3b4a5968778695a4b3c2d'),
('fedcba9876543210', '79d9373a714ca34f', 'f0e1d2c3b4a5968778695a4b3c2d1e'),
('fedcba9876543210', '93142887ee3be15c',
'f0e1d2c3b4a5968778695a4b3c2d1e0f'),
('fedcba9876543210', '03429e838ce2d14b',
'f0e1d2c3b4a5968778695a4b3c2d1e0f00'),
('fedcba9876543210', 'a4299e27469ff67b',
'f0e1d2c3b4a5968778695a4b3c2d1e0f0011'),
('fedcba9876543210', 'afd5aed1c1bc96a8',
'f0e1d2c3b4a5968778695a4b3c2d1e0f001122'),
('fedcba9876543210', '10851c0e3858da9f',
'f0e1d2c3b4a5968778695a4b3c2d1e0f00112233'),
('fedcba9876543210', 'e6f51ed79b9db21f',
'f0e1d2c3b4a5968778695a4b3c2d1e0f0011223344'),
('fedcba9876543210', '64a6e14afd36b46f',
'f0e1d2c3b4a5968778695a4b3c2d1e0f001122334455'),
('fedcba9876543210', '80c7d7d45a5479ad',
'f0e1d2c3b4a5968778695a4b3c2d1e0f00112233445566'),
('fedcba9876543210', '05044b62fa52d080',
'f0e1d2c3b4a5968778695a4b3c2d1e0f0011223344556677'),
]
def get_tests(config={}):
from Crypto.Cipher import Blowfish
from common import make_block_tests
return make_block_tests(Blowfish, "Blowfish", test_data)
if __name__ == '__main__':
import unittest
suite = lambda: unittest.TestSuite(get_tests())
unittest.main(defaultTest='suite')
# vim:set ts=4 sw=4 sts=4 expandtab:
| gpl-3.0 |
illicitonion/givabit | lib/sdks/google_appengine_1.7.1/google_appengine/lib/django_1_2/tests/regressiontests/signals_regress/tests.py | 43 | 3760 | import sys
from StringIO import StringIO
from django.test import TestCase
from django.db import models
from regressiontests.signals_regress.models import Author, Book
signal_output = []
def pre_save_test(signal, sender, instance, **kwargs):
signal_output.append('pre_save signal, %s' % instance)
if kwargs.get('raw'):
signal_output.append('Is raw')
def post_save_test(signal, sender, instance, **kwargs):
signal_output.append('post_save signal, %s' % instance)
if 'created' in kwargs:
if kwargs['created']:
signal_output.append('Is created')
else:
signal_output.append('Is updated')
if kwargs.get('raw'):
signal_output.append('Is raw')
def pre_delete_test(signal, sender, instance, **kwargs):
signal_output.append('pre_save signal, %s' % instance)
signal_output.append('instance.id is not None: %s' % (instance.id != None))
def post_delete_test(signal, sender, instance, **kwargs):
signal_output.append('post_delete signal, %s' % instance)
signal_output.append('instance.id is not None: %s' % (instance.id != None))
class SignalsRegressTests(TestCase):
"""
Testing signals before/after saving and deleting.
"""
def get_signal_output(self, fn, *args, **kwargs):
# Flush any existing signal output
global signal_output
signal_output = []
fn(*args, **kwargs)
return signal_output
def setUp(self):
# Save up the number of connected signals so that we can check at the end
# that all the signals we register get properly unregistered (#9989)
self.pre_signals = (len(models.signals.pre_save.receivers),
len(models.signals.post_save.receivers),
len(models.signals.pre_delete.receivers),
len(models.signals.post_delete.receivers))
models.signals.pre_save.connect(pre_save_test)
models.signals.post_save.connect(post_save_test)
models.signals.pre_delete.connect(pre_delete_test)
models.signals.post_delete.connect(post_delete_test)
def tearDown(self):
models.signals.post_delete.disconnect(post_delete_test)
models.signals.pre_delete.disconnect(pre_delete_test)
models.signals.post_save.disconnect(post_save_test)
models.signals.pre_save.disconnect(pre_save_test)
# Check that all our signals got disconnected properly.
post_signals = (len(models.signals.pre_save.receivers),
len(models.signals.post_save.receivers),
len(models.signals.pre_delete.receivers),
len(models.signals.post_delete.receivers))
self.assertEquals(self.pre_signals, post_signals)
def test_model_signals(self):
""" Model saves should throw some signals. """
a1 = Author(name='Neal Stephenson')
self.assertEquals(self.get_signal_output(a1.save), [
"pre_save signal, Neal Stephenson",
"post_save signal, Neal Stephenson",
"Is created"
])
b1 = Book(name='Snow Crash')
self.assertEquals(self.get_signal_output(b1.save), [
"pre_save signal, Snow Crash",
"post_save signal, Snow Crash",
"Is created"
])
def test_m2m_signals(self):
""" Assigning and removing to/from m2m shouldn't generate an m2m signal """
b1 = Book(name='Snow Crash')
self.get_signal_output(b1.save)
a1 = Author(name='Neal Stephenson')
self.get_signal_output(a1.save)
self.assertEquals(self.get_signal_output(setattr, b1, 'authors', [a1]), [])
self.assertEquals(self.get_signal_output(setattr, b1, 'authors', []), [])
| apache-2.0 |
vh21/codezero | scripts/config/projpaths.py | 3 | 2986 | #! /usr/bin/env python2.6
# -*- mode: python; coding: utf-8; -*-
import os, sys, shelve, shutil
from os.path import join
# Way to get project root from any script importing this one :-)
PROJROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), '../..'))
BUILDDIR = join(PROJROOT, 'build')
TOOLSDIR = join(PROJROOT, 'tools')
LOADERDIR = join(PROJROOT, 'loader')
KERNEL_HEADERS = join(PROJROOT, 'include')
SCRIPTS_DIR = join(PROJROOT, 'scripts')
KERNEL_ELF = join(BUILDDIR, 'kernel.elf')
FINAL_ELF = join(BUILDDIR, 'final.elf')
USERLIBS_RELDIR = 'conts/userlibs'
USERLIBS_DIR = join(PROJROOT, USERLIBS_RELDIR)
LIBL4_RELDIR = join(USERLIBS_RELDIR, 'libl4')
LIBL4_DIR = join(PROJROOT, LIBL4_RELDIR)
LIBL4_INCLUDE = join(LIBL4_DIR, 'include')
LIBL4_LIBPATH = join(BUILDDIR, LIBL4_RELDIR)
LIBC_RELDIR = join(USERLIBS_RELDIR, 'libc')
LIBC_DIR = join(PROJROOT, LIBC_RELDIR)
LIBC_LIBPATH = join(BUILDDIR, LIBC_RELDIR)
LIBC_INCLUDE = [join(LIBC_DIR, 'include')]
LIBDEV_RELDIR = join(USERLIBS_RELDIR, 'libdev')
LIBDEV_DIR = join(PROJROOT, LIBDEV_RELDIR)
LIBDEV_INCLUDE = [join(LIBDEV_DIR, 'uart/include'), join(LIBDEV_DIR, 'include')]
LIBDEV_USER_LIBPATH = join(join(BUILDDIR, LIBDEV_RELDIR), 'sys-userspace')
LIBDEV_BAREMETAL_LIBPATH = join(join(BUILDDIR, LIBDEV_RELDIR), 'sys-baremetal')
LIBMEM_RELDIR = join(USERLIBS_RELDIR, 'libmem')
LIBMEM_DIR = join(PROJROOT, LIBMEM_RELDIR)
LIBMEM_LIBPATH = join(BUILDDIR, LIBMEM_RELDIR)
LIBMEM_INCLUDE = join(LIBMEM_DIR, 'include')
CML2_CONFIG_SRCDIR = join(SCRIPTS_DIR, 'config/cml')
CML2_CONT_DEFFILE = join(CML2_CONFIG_SRCDIR, 'container_ruleset.template')
CML2TOOLSDIR = join(TOOLSDIR, 'cml2-tools')
CML2_COMPILED_RULES = join(BUILDDIR, 'rules.compiled')
CML2_CONFIG_FILE = join(BUILDDIR, 'config.cml')
CML2_CONFIG_H = join(BUILDDIR, 'config.h')
CML2_AUTOGEN_RULES = join(BUILDDIR, 'config.rules')
CONFIG_H = join(PROJROOT, 'include/l4/config.h')
CONFIG_SHELVE_DIR = join(BUILDDIR, 'configdata')
CONFIG_SHELVE_FILENAME = 'configuration'
CONFIG_SHELVE = join(CONFIG_SHELVE_DIR, CONFIG_SHELVE_FILENAME)
KERNEL_CINFO_PATH = join(PROJROOT, "src/generic/cinfo.c")
LINUXDIR = join(PROJROOT, 'conts/linux')
LINUX_KERNELDIR = join(LINUXDIR, 'kernel-2.6.34')
LINUX_ROOTFSDIR = join(LINUXDIR, 'rootfs')
LINUX_ATAGSDIR = join(LINUXDIR, 'atags')
POSIXDIR = join(PROJROOT, 'conts/posix')
POSIX_BOOTDESCDIR = join(POSIXDIR, 'bootdesc')
projpaths = {
'LINUX_ATAGSDIR' : LINUX_ATAGSDIR,
'LINUX_ROOTFSDIR' : LINUX_ROOTFSDIR,
'LINUX_KERNELDIR' : LINUX_KERNELDIR,
'LINUXDIR' : LINUXDIR,
'BUILDDIR' : BUILDDIR,
'POSIXDIR' : POSIXDIR,
'POSIX_BOOTDESCDIR' : POSIX_BOOTDESCDIR
}
def define_config_dependent_projpaths(config):
LIBC_INCLUDE.append([join(LIBC_DIR, 'include/arch/' + config.arch)])
return None
| gpl-3.0 |
guibernardino/mezzanine | mezzanine/twitter/migrations/0002_auto__chg_field_query_value.py | 20 | 2320 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Query.value'
db.alter_column('twitter_query', 'value', self.gf('django.db.models.fields.CharField')(max_length=140))
def backwards(self, orm):
# Changing field 'Query.value'
db.alter_column('twitter_query', 'value', self.gf('django.db.models.fields.CharField')(max_length=50))
models = {
'twitter.query': {
'Meta': {'object_name': 'Query'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'interested': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '140'})
},
'twitter.tweet': {
'Meta': {'object_name': 'Tweet'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'full_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'profile_image_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'}),
'query': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'tweets'", 'to': "orm['twitter.Query']"}),
'remote_id': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'retweeter_full_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'retweeter_profile_image_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'}),
'retweeter_user_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'text': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'user_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'})
}
}
complete_apps = ['twitter']
| bsd-2-clause |
sharkdata/sharkdata | app_ctdprofiles/ctdprofiles_core.py | 1 | 3299 | #!/usr/bin/python3
# -*- coding:utf-8 -*-
# Project: https://sharkdata.smhi.se
# Copyright (c) 2018-present SMHI, Swedish Meteorological and Hydrological Institute
# License: MIT License (see LICENSE.txt or http://opensource.org/licenses/mit).
import datetime
import pathlib
import folium
import bokeh
# from bokeh.plotting import figure
from bokeh.resources import CDN
from bokeh.embed import file_html
from app_ctdprofiles import ctd_profile_plot
class CtdProfilesCore:
""" """
def __init__(self):
""" """
def createMap(self, lat_long_desc_table=[]):
""" Plots positions on an interactive OpenStreetMap by using the folium library. """
m = folium.Map([60.0, 15.0], zoom_start=5)
test = folium.Html("<b>Hello world</b>", script=True)
popup = folium.Popup(test, max_width=2650)
# TEST DATA.
if not lat_long_desc_table:
lat_long_desc_table = [
# [55.6175, 14.8675, 'Svea-2019-1111'],
# [60.0, 20.0, 'Svea-2019-1222'],
]
#
for lat, long, desc in lat_long_desc_table:
marker = folium.Marker([lat, long], popup=desc).add_to(m)
return m.get_root().render()
def createPlot(self, path_zipfile, profile_name):
""" Plots ... """
rzip = ctd_profile_plot.ReadZipFile(path_zipfile, profile_name)
parameter_list = [
"PRES_CTD [dbar]",
"CNDC_CTD [S/m]",
"CNDC2_CTD [S/m]",
"SALT_CTD [psu (PSS-78)]",
"SALT2_CTD [psu (PSS-78)]",
"TEMP_CTD [°C (ITS-90)]",
"TEMP2_CTD [°C (ITS-90)]",
"DOXY_CTD [ml/l]",
"DOXY2_CTD [ml/l]",
"PAR_CTD [µE/(cm2 ·sec)]",
"CHLFLUO_CTD [mg/m3]",
"TURB_CTD [NTU]",
"PHYC_CTD [ppb]",
]
data = rzip.get_data(parameter_list)
profile = ctd_profile_plot.ProfilePlot(data, parameters=parameter_list)
plot = profile.plot(
x="TEMP_CTD [°C (ITS-90)]",
y="PRES_CTD [dbar]",
z="SALT_CTD [psu (PSS-78)]",
name=profile_name,
)
html = file_html(plot, CDN, "my plot")
return html
def downloadProfile(self, path_zipfile, profile_name):
""" Plots ... """
rzip = ctd_profile_plot.ReadZipFile(path_zipfile, profile_name)
parameter_list = [
"PRES_CTD [dbar]",
"CNDC_CTD [S/m]",
"CNDC2_CTD [S/m]",
"SALT_CTD [psu (PSS-78)]",
"SALT2_CTD [psu (PSS-78)]",
"TEMP_CTD [°C (ITS-90)]",
"TEMP2_CTD [°C (ITS-90)]",
"DOXY_CTD [ml/l]",
"DOXY2_CTD [ml/l]",
"PAR_CTD [µE/(cm2 ·sec)]",
"CHLFLUO_CTD [mg/m3]",
"TURB_CTD [NTU]",
"PHYC_CTD [ppb]",
]
data = rzip.get_data(parameter_list)
profile = ctd_profile_plot.ProfilePlot(data, parameters=parameter_list)
plot = profile.plot(
x="TEMP_CTD [°C (ITS-90)]",
y="PRES_CTD [dbar]",
z="SALT_CTD [psu (PSS-78)]",
name=profile_name,
)
html = file_html(plot, CDN, "my plot")
return html
| mit |
Gadal/sympy | sympy/polys/domains/polynomialring.py | 101 | 4409 | """Implementation of :class:`PolynomialRing` class. """
from __future__ import print_function, division
from sympy.polys.domains.ring import Ring
from sympy.polys.domains.compositedomain import CompositeDomain
from sympy.polys.polyerrors import CoercionFailed, GeneratorsError
from sympy.utilities import public
@public
class PolynomialRing(Ring, CompositeDomain):
"""A class for representing multivariate polynomial rings. """
is_PolynomialRing = is_Poly = True
has_assoc_Ring = True
has_assoc_Field = True
def __init__(self, domain_or_ring, symbols=None, order=None):
from sympy.polys.rings import PolyRing
if isinstance(domain_or_ring, PolyRing) and symbols is None and order is None:
ring = domain_or_ring
else:
ring = PolyRing(symbols, domain_or_ring, order)
self.ring = ring
self.dtype = ring.dtype
self.gens = ring.gens
self.ngens = ring.ngens
self.symbols = ring.symbols
self.domain = ring.domain
# TODO: remove this
self.dom = self.domain
def new(self, element):
return self.ring.ring_new(element)
@property
def zero(self):
return self.ring.zero
@property
def one(self):
return self.ring.one
@property
def order(self):
return self.ring.order
def __str__(self):
return str(self.domain) + '[' + ','.join(map(str, self.symbols)) + ']'
def __hash__(self):
return hash((self.__class__.__name__, self.dtype, self.domain, self.symbols))
def __eq__(self, other):
"""Returns `True` if two domains are equivalent. """
return isinstance(other, PolynomialRing) and \
self.dtype == other.dtype and self.ring == other.ring
def to_sympy(self, a):
"""Convert `a` to a SymPy object. """
return a.as_expr()
def from_sympy(self, a):
"""Convert SymPy's expression to `dtype`. """
return self.ring.from_expr(a)
def from_ZZ_python(K1, a, K0):
"""Convert a Python `int` object to `dtype`. """
return K1(K1.domain.convert(a, K0))
def from_QQ_python(K1, a, K0):
"""Convert a Python `Fraction` object to `dtype`. """
return K1(K1.domain.convert(a, K0))
def from_ZZ_gmpy(K1, a, K0):
"""Convert a GMPY `mpz` object to `dtype`. """
return K1(K1.domain.convert(a, K0))
def from_QQ_gmpy(K1, a, K0):
"""Convert a GMPY `mpq` object to `dtype`. """
return K1(K1.domain.convert(a, K0))
def from_RealField(K1, a, K0):
"""Convert a mpmath `mpf` object to `dtype`. """
return K1(K1.domain.convert(a, K0))
def from_AlgebraicField(K1, a, K0):
"""Convert an algebraic number to ``dtype``. """
if K1.domain == K0:
return K1.new(a)
def from_PolynomialRing(K1, a, K0):
"""Convert a polynomial to ``dtype``. """
try:
return a.set_ring(K1.ring)
except (CoercionFailed, GeneratorsError):
return None
def from_FractionField(K1, a, K0):
"""Convert a rational function to ``dtype``. """
denom = K0.denom(a)
if denom.is_ground:
return K1.from_PolynomialRing(K0.numer(a)/denom, K0.field.ring.to_domain())
else:
return None
def get_field(self):
"""Returns a field associated with `self`. """
return self.ring.to_field().to_domain()
def is_positive(self, a):
"""Returns True if `LC(a)` is positive. """
return self.domain.is_positive(a.LC)
def is_negative(self, a):
"""Returns True if `LC(a)` is negative. """
return self.domain.is_negative(a.LC)
def is_nonpositive(self, a):
"""Returns True if `LC(a)` is non-positive. """
return self.domain.is_nonpositive(a.LC)
def is_nonnegative(self, a):
"""Returns True if `LC(a)` is non-negative. """
return self.domain.is_nonnegative(a.LC)
def gcdex(self, a, b):
"""Extended GCD of `a` and `b`. """
return a.gcdex(b)
def gcd(self, a, b):
"""Returns GCD of `a` and `b`. """
return a.gcd(b)
def lcm(self, a, b):
"""Returns LCM of `a` and `b`. """
return a.lcm(b)
def factorial(self, a):
"""Returns factorial of `a`. """
return self.dtype(self.domain.factorial(a))
| bsd-3-clause |
sio2project/linaro-django-pagination | setup.py | 2 | 2901 | #!/usr/bin/env python
# Copyright (c) 2008, Eric Florenzano
# Copyright (c) 2010, 2011 Linaro Limited
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the author nor the names of other
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from setuptools import setup, find_packages
setup(
name='linaro-django-pagination',
# Magic version handling with versiontools
version=":versiontools:linaro_django_pagination:__version__",
author='Zygmunt Krynicki',
author_email='zygmunt.krynicki@linaro.org',
description="linaro-django-pagination",
long_description=open("README").read(),
keywords='pagination,django',
url='https://github.com/zyga/django-pagination',
test_suite='linaro_django_pagination.test_project.tests.run_tests',
license='BSD',
packages=find_packages(),
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: Web Environment",
"Framework :: Django",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
],
install_requires=[
'django >= 1.2',
],
tests_require=[
'django-testproject >= 0.1',
],
setup_requires=[
'versiontools >= 1.3.1'
],
include_package_data=True,
)
| bsd-3-clause |
linpan/MSpider | function/crawl.py | 11 | 1438 | #!/usr/bin/env python
# coding:utf-8
# manning 2015-1-27
import lxml.html
import urlparse
import time
import sys
sys.path.append("..")
from fetch import fetcher
from config.config import *
from node import UrlNode,HtmlNode
def timestamp():
return str(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
def crawler(html_node):
link_list = []
html = html_node.html
url = html_node.url
if html == '':
return []
else:
#获取页面内的links
try:
tmp = lxml.html.document_fromstring(html)
tmp.make_links_absolute(url)
links = tmp.iterlinks()
link_list = list(set([i[2] for i in links]))
except Exception, e:
pass
#过滤不期待页面后缀
try:
temp_list = []
for i in link_list:
if urlparse.urlparse(i)[2].split('.')[-1].lower() not in IGNORE_EXT:
temp_list.append(i)
link_list = temp_list
except Exception, e:
print str(e)
tmp_url_node = []
for i in link_list:
tmp_url_node.append(UrlNode(urlparse.urlunparse((urlparse.urlparse(i)[0],urlparse.urlparse(i)[1],urlparse.urlparse(i)[2],urlparse.urlparse(i)[3],urlparse.urlparse(i)[4],'')),url,len(html),timestamp(),'',html_node.depth))
return tmp_url_node
if __name__ == '__main__':
pass | gpl-2.0 |
haveal/googleads-python-lib | examples/dfa/v1_20/create_campaign.py | 4 | 2699 | #!/usr/bin/python
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example creates a campaign in a given advertiser.
To create an advertiser, run create_advertiser.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
"""
# Import appropriate modules from the client library.
from googleads import dfa
ADVERTISER_ID = 'INSERT_ADVERTISER_ID_HERE'
CAMPAIGN_NAME = 'INSERT_CAMPAIGN_NAME_HERE'
URL = 'INSERT_LANDING_PAGE_URL_HERE'
LANDING_PAGE_NAME = 'INSERT_LANDING_PAGE_NAME_HERE'
START_DATE = '%(year)s-%(month)02d-%(day)02dT12:00:00' % {
'year': 'INSERT_START_YEAR_HERE',
'month': int('INSERT_START_MONTH_HERE'),
'day': int('INSERT_START_DAY_HERE')}
END_DATE = '%(year)s-%(month)02d-%(day)02dT12:00:00' % {
'year': 'INSERT_END_YEAR_HERE',
'month': int('INSERT_END_MONTH_HERE'),
'day': int('INSERT_END_DAY_HERE')}
def main(client, advertiser_id, campaign_name, url, landing_page_name,
start_date, end_date):
# Initialize appropriate service.
campaign_service = client.GetService(
'campaign', 'v1.20', 'https://advertisersapitest.doubleclick.net')
# Create a default landing page for the campaign and save it.
default_landing_page = {
'url': url,
'name': landing_page_name
}
default_landing_page_id = campaign_service.saveLandingPage(
default_landing_page)['id']
# Construct and save the campaign.
campaign = {
'name': campaign_name,
'advertiserId': advertiser_id,
'defaultLandingPageId': default_landing_page_id,
'archived': 'false',
'startDate': start_date,
'endDate': end_date
}
result = campaign_service.saveCampaign(campaign)
# Display results.
print 'Campaign with ID \'%s\' was created.' % result['id']
if __name__ == '__main__':
# Initialize client object.
dfa_client = dfa.DfaClient.LoadFromStorage()
main(dfa_client, ADVERTISER_ID, CAMPAIGN_NAME, URL, LANDING_PAGE_NAME,
START_DATE, END_DATE)
| apache-2.0 |
ClearCorp-dev/odoo | addons/stock/report/report_stock.py | 376 | 2486 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.sql import drop_view_if_exists
class report_stock_lines_date(osv.osv):
_name = "report.stock.lines.date"
_description = "Dates of Inventories and latest Moves"
_auto = False
_order = "date"
_columns = {
'id': fields.integer('Product Id', readonly=True),
'product_id': fields.many2one('product.product', 'Product', readonly=True, select=True),
'date': fields.datetime('Date of latest Inventory', readonly=True),
'move_date': fields.datetime('Date of latest Stock Move', readonly=True),
"active": fields.boolean("Active", readonly=True),
}
def init(self, cr):
drop_view_if_exists(cr, 'report_stock_lines_date')
cr.execute("""
create or replace view report_stock_lines_date as (
select
p.id as id,
p.id as product_id,
max(s.date) as date,
max(m.date) as move_date,
p.active as active
from
product_product p
left join (
stock_inventory_line l
inner join stock_inventory s on (l.inventory_id=s.id and s.state = 'done')
) on (p.id=l.product_id)
left join stock_move m on (m.product_id=p.id and m.state = 'done')
group by p.id
)""")
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
testmana2/test | Helpviewer/AdBlock/AdBlockIcon.py | 2 | 6964 | # -*- coding: utf-8 -*-
# Copyright (c) 2012 - 2015 Detlev Offenbach <detlev@die-offenbachs.de>
#
"""
Module implementing the AdBlock icon for the main window status bar.
"""
from __future__ import unicode_literals
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QAction, QMenu
from E5Gui.E5ClickableLabel import E5ClickableLabel
import UI.PixmapCache
class AdBlockIcon(E5ClickableLabel):
"""
Class implementing the AdBlock icon for the main window status bar.
"""
def __init__(self, parent):
"""
Constructor
@param parent reference to the parent widget (HelpWindow)
"""
super(AdBlockIcon, self).__init__(parent)
self.__mw = parent
self.__menuAction = None
self.__enabled = False
self.setMaximumHeight(16)
self.setCursor(Qt.PointingHandCursor)
self.setToolTip(self.tr(
"AdBlock lets you block unwanted content on web pages."))
self.clicked.connect(self.__showMenu)
def setEnabled(self, enabled):
"""
Public slot to set the enabled state.
@param enabled enabled state (boolean)
"""
self.__enabled = enabled
if enabled:
self.currentChanged()
else:
self.setPixmap(
UI.PixmapCache.getPixmap("adBlockPlusDisabled16.png"))
def __createMenu(self, menu=None):
"""
Private slot to create the context menu.
@param menu parent menu (QMenu)
"""
if menu is None:
menu = self.sender()
if menu is None:
return
menu.clear()
import Helpviewer.HelpWindow
manager = Helpviewer.HelpWindow.HelpWindow.adBlockManager()
if manager.isEnabled():
menu.addAction(
UI.PixmapCache.getIcon("adBlockPlusDisabled.png"),
self.tr("Disable AdBlock"),
self.__enableAdBlock).setData(False)
else:
menu.addAction(
UI.PixmapCache.getIcon("adBlockPlus.png"),
self.tr("Enable AdBlock"),
self.__enableAdBlock).setData(True)
menu.addSeparator()
if manager.isEnabled() and \
self.__mw.currentBrowser().page().url().host():
if self.__isCurrentHostExcepted():
menu.addAction(
UI.PixmapCache.getIcon("adBlockPlus.png"),
self.tr("Remove AdBlock Exception"),
self.__setException).setData(False)
else:
menu.addAction(
UI.PixmapCache.getIcon("adBlockPlusGreen.png"),
self.tr("Add AdBlock Exception"),
self.__setException).setData(True)
menu.addAction(
UI.PixmapCache.getIcon("adBlockPlusGreen.png"),
self.tr("AdBlock Exceptions..."), manager.showExceptionsDialog)
menu.addSeparator()
menu.addAction(
UI.PixmapCache.getIcon("adBlockPlus.png"),
self.tr("AdBlock Configuration..."), manager.showDialog)
menu.addSeparator()
entries = self.__mw.currentBrowser().page().getAdBlockedPageEntries()
if entries:
menu.addAction(self.tr(
"Blocked URL (AdBlock Rule) - click to edit rule"))\
.setEnabled(False)
for entry in entries:
address = entry.urlString()[-55:]
actionText = self.tr("{0} with ({1})").format(
address, entry.rule.filter()).replace("&", "&&")
act = menu.addAction(actionText, manager.showRule)
act.setData(entry.rule)
else:
menu.addAction(self.tr("No content blocked")).setEnabled(False)
def menuAction(self):
"""
Public method to get a reference to the menu action.
@return reference to the menu action (QAction)
"""
if not self.__menuAction:
self.__menuAction = QAction(self.tr("AdBlock"))
self.__menuAction.setMenu(QMenu())
self.__menuAction.menu().aboutToShow.connect(self.__createMenu)
if self.__enabled:
self.__menuAction.setIcon(
UI.PixmapCache.getIcon("adBlockPlus.png"))
else:
self.__menuAction.setIcon(
UI.PixmapCache.getIcon("adBlockPlusDisabled.png"))
return self.__menuAction
def __showMenu(self, pos):
"""
Private slot to show the context menu.
@param pos position the context menu should be shown (QPoint)
"""
menu = QMenu()
self.__createMenu(menu)
menu.exec_(pos)
def __enableAdBlock(self):
"""
Private slot to enable or disable AdBlock.
"""
act = self.sender()
if act is not None:
import Helpviewer.HelpWindow
Helpviewer.HelpWindow.HelpWindow.adBlockManager().setEnabled(
act.data())
def __isCurrentHostExcepted(self):
"""
Private method to check, if the host of the current browser is
excepted.
@return flag indicating an exception (boolean)
"""
browser = self.__mw.currentBrowser()
urlHost = browser.page().url().host()
import Helpviewer.HelpWindow
return urlHost and \
Helpviewer.HelpWindow.HelpWindow.adBlockManager()\
.isHostExcepted(urlHost)
def currentChanged(self):
"""
Public slot to handle a change of the current browser tab.
"""
if self.__enabled:
if self.__isCurrentHostExcepted():
self.setPixmap(
UI.PixmapCache.getPixmap("adBlockPlusGreen16.png"))
else:
self.setPixmap(UI.PixmapCache.getPixmap("adBlockPlus16.png"))
def __setException(self):
"""
Private slot to add or remove the current host from the list of
exceptions.
"""
act = self.sender()
if act is not None:
import Helpviewer.HelpWindow
urlHost = self.__mw.currentBrowser().page().url().host()
if act.data():
Helpviewer.HelpWindow.HelpWindow.adBlockManager()\
.addException(urlHost)
else:
Helpviewer.HelpWindow.HelpWindow.adBlockManager()\
.removeException(urlHost)
self.currentChanged()
def sourceChanged(self, browser, url):
"""
Public slot to handle URL changes.
@param browser reference to the browser (HelpBrowser)
@param url new URL (QUrl)
"""
if browser == self.__mw.currentBrowser():
self.currentChanged()
| gpl-3.0 |
fusionpig/ansible | v1/ansible/utils/hashing.py | 104 | 3014 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
# Note, sha1 is the only hash algorithm compatible with python2.4 and with
# FIPS-140 mode (as of 11-2014)
try:
from hashlib import sha1 as sha1
except ImportError:
from sha import sha as sha1
# Backwards compat only
try:
from hashlib import md5 as _md5
except ImportError:
try:
from md5 import md5 as _md5
except ImportError:
# Assume we're running in FIPS mode here
_md5 = None
def secure_hash_s(data, hash_func=sha1):
''' Return a secure hash hex digest of data. '''
digest = hash_func()
try:
digest.update(data)
except UnicodeEncodeError:
digest.update(data.encode('utf-8'))
return digest.hexdigest()
def secure_hash(filename, hash_func=sha1):
''' Return a secure hash hex digest of local file, None if file is not present or a directory. '''
if not os.path.exists(filename) or os.path.isdir(filename):
return None
digest = hash_func()
blocksize = 64 * 1024
try:
infile = open(filename, 'rb')
block = infile.read(blocksize)
while block:
digest.update(block)
block = infile.read(blocksize)
infile.close()
except IOError, e:
raise errors.AnsibleError("error while accessing the file %s, error was: %s" % (filename, e))
return digest.hexdigest()
# The checksum algorithm must match with the algorithm in ShellModule.checksum() method
checksum = secure_hash
checksum_s = secure_hash_s
# Backwards compat functions. Some modules include md5s in their return values
# Continue to support that for now. As of ansible-1.8, all of those modules
# should also return "checksum" (sha1 for now)
# Do not use md5 unless it is needed for:
# 1) Optional backwards compatibility
# 2) Compliance with a third party protocol
#
# MD5 will not work on systems which are FIPS-140-2 compliant.
def md5s(data):
if not _md5:
raise ValueError('MD5 not available. Possibly running in FIPS mode')
return secure_hash_s(data, _md5)
def md5(filename):
if not _md5:
raise ValueError('MD5 not available. Possibly running in FIPS mode')
return secure_hash(filename, _md5)
| gpl-3.0 |
JoeLaMartina/aima-python | submissions/LaMartina/vacuum2.py | 18 | 6879 | import agents as ag
def HW2Agent() -> object:
def program(percept):
bump, status = percept
# if program.startTop == 'false' and bump == 'None':
# action = 'Up'
# return action
# if bump == 'None':
# action = 'Up'
# return action
# if bump == 'Bump':
# program.startTop = 'true'
# else:
# if program.startTop == 'false' and bump == 'Bump':
# program.startTop = 'true'
#action = 'Left'
#else:
if status == 'Dirty':
action = 'Suck'
# else:
# if program.startTop == 'false' and bump == 'None':
# action = 'Up'
# else:
# if program.startTop == 'false' and bump == 'Bump':
# program.startTop = 'true'
# action = 'Left'
else:
lastBump, lastStatus = program.oldPercepts[-1]
lastBump2, lastStatus2 = program.oldPercepts[-2]
lastAction = program.oldActions[-1]
lastAction2 = program.oldActions[-2]
# Useless: if bump == 'Bump' and lastBump == 'Bump' and lastBump2 == 'Bump':
# action = 'Up'
#else: Useless
#Works:
# if bump == 'Bump' and lastBump == 'Bump' and (lastAction == 'Right' or lastAction == 'Left'):
# action = 'Down'
# else:
# if bump == 'None' and lastAction != 'Suck': #and lastBump == 'None'
# action = lastAction
# else:
# if bump == 'None' and lastAction != 'Suck': # and lastBump == 'Bump'
# action = switchAction(lastAction)
# else:
# if bump == 'Bump' and lastAction != 'Suck':
# action = switchAction(lastAction)
# else:
# action = lastAction2
if program.left == 'false' and bump == 'None':
action = 'Left'
else:
if program.left == 'false' and bump == 'Bump':
program.left = 'true'
action = 'Right'
else:
if program.right == 'false' and bump == 'None':
action = 'Right'
else:
if program.right == 'false' and bump == 'Bump':
program.right = 'true'
action = "Down"
else:
if program.down == 'false' and bump == 'None':
action = 'Down'
else:
if program.down == 'false' and bump == 'Bump':
program.down = 'true'
action = 'Up'
else:
if program.up == 'false' and bump == 'None':
action = 'Up'
else:
if program.up == 'false' and bump == 'Bump':
program.up = 'true'
action = "Down"
else:
if bump == 'None' and lastAction != 'Suck' and lastBump == 'None':
action = lastAction
else:
if bump == 'None' and lastAction != 'Suck' and lastBump == 'Bump' and lastAction == 'Down' and program.lastDown == 'Left':
lastAction = 'Down2'
action = switchAction(lastAction)
program.lastDown = action
else:
if bump == 'None' and lastAction != 'Suck' and lastBump == 'Bump' and lastAction == 'Down':
action = switchAction(lastAction)
program.lastDown = action
else:
if bump == 'Bump' and lastAction == 'Down' and program.lastDown =='Left':
lastAction = 'Down2'
action = switchAction(lastAction)
program.lastDown = action
else:
if bump == 'Bump' and lastAction == 'Down':
action = switchAction(lastAction)
program.lastDown = action
else:
if bump == 'Bump' and lastAction != 'Suck':
action = switchAction(lastAction)
else:
action = lastAction2
program.oldPercepts.append(percept)
program.oldActions.append(action)
return action
# assign static variables here
program.oldPercepts = [('None', 'Clean'),('None', 'Clean')]
program.oldActions = ['Right', 'Right']
program.right = 'false'
program.left = 'false'
program.up = 'false'
program.down = 'false'
program.lastDown = ''
program.startTop = 'false'
# def switchAction(action):
# if action == 'Right':
# newAction = 'Left'
# if action == 'Left':
# newAction = 'Right'
# if action == 'Down':
# newAction = 'Up'
# if action == 'Up':
# newAction = 'Down'
# return newAction
def switchAction(action):
if action == 'Down':
newAction = 'Left'
if action == 'Left':
newAction = 'Down'
if action == 'Right':
newAction = 'Down'
if action == 'Down2':
newAction = 'Right'
return newAction
switchAction.newAction = ''
agt = ag.Agent(program)
# assign class attributes here:
# agt.direction = ag.Direction('left')
return agt | mit |
qitaos/robotframework-seleniumlibrary | test/unit/test_seleniumlibrary.py | 7 | 4172 | import unittest
import os
from SeleniumLibrary import (SeleniumLibrary, _server_startup_command,
_server_startup_params, FIREFOX_TEMPLATE_ARG, FIREFOX_PROFILE_DIR,
FIREFOX_DEFAULT_PROFILE, SELENIUM_SERVER_PATH)
class TestGetBrowser(unittest.TestCase):
def setUp(self):
self.lib = SeleniumLibrary()
def test_ie_aliases(self):
for alias in ['ie', 'IE', 'Internet Explorer', 'INTernETexplOrEr']:
self.assertEquals(self.lib._get_browser(alias), '*iexplore')
def test_firefox_aliases(self):
for alias in ['ff', 'FF', 'firefox', 'FireFox']:
self.assertEquals(self.lib._get_browser(alias), '*firefox')
def test_non_alias_is_not_modified(self):
for non_alias in [
'FIREFUX',
'i e 8',
'C:\\Program Files\\mybrowser\\brow.exe',
'{"username": "user", "access-key": "7A9cea40-84f7-4d3b-8748-0e94fCd4dX4f"}']:
self.assertEquals(self.lib._get_browser(non_alias), non_alias)
def test_patched_remote_control(self):
rc_path = os.path.join(os.path.dirname(__file__), '..', '..', 'src',
'SeleniumLibrary', 'selenium.py')
self.assertTrue('conn.close()' in open(rc_path).read())
class TestServerArguments(unittest.TestCase):
def test_default_jar_path_is_correctly_determined(self):
self.assertEquals(_server_startup_command(None)[:3],
['java', '-jar', SELENIUM_SERVER_PATH])
def test_given_jar_path_is_used(self):
self.assertEquals(_server_startup_command('/some/jar.jar')[:3],
['java', '-jar', '/some/jar.jar'])
def test_selenium_lib_default_profile_is_used_when_no_profile_given(self):
self.assertEquals(_server_startup_params([]),
[FIREFOX_TEMPLATE_ARG, FIREFOX_PROFILE_DIR])
def test_given_profile_is_not_overridden(self):
params = _server_startup_params([FIREFOX_TEMPLATE_ARG, 'foo'])
self.assertEquals(params, [FIREFOX_TEMPLATE_ARG, 'foo'])
def test_real_default_profile_can_be_used(self):
params = [FIREFOX_TEMPLATE_ARG, FIREFOX_DEFAULT_PROFILE]
self.assertEquals(_server_startup_params(params), [])
def test_other_options_are_preserved(self):
params = ['-someOpt', 'value', '-otherOpt']
self.assertEquals(_server_startup_params(params),
params + [FIREFOX_TEMPLATE_ARG, FIREFOX_PROFILE_DIR])
def test_jvm_options(self):
params = ['-foo', 'bar', 'jvm=-DsysProp=bar -Dht.Tp=qx']
expected = ['java', '-DsysProp=bar', '-Dht.Tp=qx', '-jar',
SELENIUM_SERVER_PATH, '-foo', 'bar']
self.assertEquals(_server_startup_command(None, *params)[:7], expected)
class TestInitialization(unittest.TestCase):
def test_host_and_port_have_default_values(self):
self._verify_host_and_port(SeleniumLibrary(), 'localhost', 4444)
def test_host_and_port_can_be_given_separately(self):
lib = SeleniumLibrary(server_host='1.2.3.4', server_port='1234')
self._verify_host_and_port(lib, '1.2.3.4', 1234)
def test_protocol_and_path_are_ignored_in_host(self):
for host in 'http://1.2.3.4', 'http://1.2.3.4/', 'http://1.2.3.4/path':
lib = SeleniumLibrary(server_host=host)
self._verify_host_and_port(lib, '1.2.3.4', 4444)
def test_port_can_be_given_as_part_of_host(self):
lib = SeleniumLibrary(server_host='http://1.2.3.4:8001/')
self._verify_host_and_port(lib, '1.2.3.4', 8001)
lib = SeleniumLibrary(server_host='127.0.0.1:1000')
self._verify_host_and_port(lib, '127.0.0.1', 1000)
def test_port_given_as_part_of_host_overrides_possible_port(self):
lib = SeleniumLibrary(server_host='http://1.2.3.4:8001',
server_port='1234')
self._verify_host_and_port(lib, '1.2.3.4', 8001)
def _verify_host_and_port(self, lib, host, port):
self.assertEquals(lib._server_host, host)
self.assertEquals(lib._server_port, port)
if __name__ == "__main__":
unittest.main()
| apache-2.0 |
ssorgatem/pulsar | pulsar/web/routes.py | 2 | 9844 | import os
from webob import exc
from json import loads
from galaxy.util import (
copy_to_path,
copy_to_temp,
)
from pulsar.client.job_directory import verify_is_in_directory
from pulsar.web.framework import Controller
from pulsar.manager_factory import DEFAULT_MANAGER_NAME
from pulsar.manager_endpoint_util import (
submit_job,
setup_job,
status_dict,
)
from pulsar.client.action_mapper import path_type
import logging
log = logging.getLogger(__name__)
class PulsarController(Controller):
def __init__(self, **kwargs):
super(PulsarController, self).__init__(**kwargs)
def _check_access(self, req, environ, start_response):
if req.app.private_token:
sent_private_token = req.GET.get("private_token", None)
if not (req.app.private_token == sent_private_token):
return exc.HTTPUnauthorized()(environ, start_response)
def _app_args(self, args, req):
app = req.app
managers = app.managers
manager_name = args.get('manager_name', DEFAULT_MANAGER_NAME)
app_args = {}
app_args['manager'] = managers[manager_name]
app_args['file_cache'] = getattr(app, 'file_cache', None)
app_args['object_store'] = getattr(app, 'object_store', None)
return app_args
@PulsarController(path="/jobs", method="POST", response_type='json')
def setup(manager, job_id, tool_id=None, tool_version=None):
return __setup(manager, job_id, tool_id=tool_id, tool_version=tool_version)
def __setup(manager, job_id, tool_id, tool_version):
response = setup_job(manager, job_id, tool_id, tool_version)
log.debug("Setup job with configuration: %s" % response)
return response
@PulsarController(path="/jobs/{job_id}", method="DELETE")
def clean(manager, job_id):
manager.clean(job_id)
@PulsarController(path="/jobs/{job_id}/submit", method="POST")
def submit(manager, job_id, command_line, params='{}', dependencies_description='null', setup_params='{}', remote_staging='{}', env='[]'):
submit_params = loads(params)
setup_params = loads(setup_params)
dependencies_description = loads(dependencies_description)
env = loads(env)
remote_staging = loads(remote_staging)
submit_config = dict(
job_id=job_id,
command_line=command_line,
setup_params=setup_params,
submit_params=submit_params,
dependencies_description=dependencies_description,
env=env,
remote_staging=remote_staging
)
submit_job(manager, submit_config)
@PulsarController(path="/jobs/{job_id}/status", response_type='json')
def status(manager, job_id):
return status_dict(manager, job_id)
@PulsarController(path="/jobs/{job_id}/cancel", method="PUT")
def cancel(manager, job_id):
manager.kill(job_id)
@PulsarController(path="/jobs/{job_id}/files", method="POST", response_type='json')
def upload_file(manager, type, file_cache, job_id, name, body, cache_token=None):
# Input type should be one of input, config, workdir, tool, or unstructured (see action_mapper.path_type)
path = manager.job_directory(job_id).calculate_path(name, type)
return _handle_upload(file_cache, path, body, cache_token=cache_token)
@PulsarController(path="/jobs/{job_id}/files/path", method="GET", response_type='json')
def path(manager, type, job_id, name):
if type in [path_type.OUTPUT, path_type.OUTPUT_WORKDIR]:
path = _output_path(manager, job_id, name, type)
else:
path = manager.job_directory(job_id).calculate_path(name, type)
return {'path': path}
@PulsarController(path="/jobs/{job_id}/files", method="GET", response_type='file')
def download_output(manager, job_id, name, type=path_type.OUTPUT):
return _output_path(manager, job_id, name, type)
def output_path(manager, job_id, name, type=path_type.OUTPUT):
# output_type should be one of...
# work_dir, direct
# Added for non-transfer downloading.
return {"path": _output_path(manager, job_id, name, type)}
def _output_path(manager, job_id, name, output_type):
"""
"""
directory = manager.job_directory(job_id).outputs_directory()
if output_type == path_type.OUTPUT_WORKDIR: # action_mapper.path_type.OUTPUT_WORKDIR
directory = manager.job_directory(job_id).working_directory()
path = os.path.join(directory, name)
verify_is_in_directory(path, directory)
return path
@PulsarController(path="/cache/status", method="GET", response_type='json')
def file_available(file_cache, ip, path):
""" Returns {token: <token>, ready: <bool>}
"""
return file_cache.file_available(ip, path)
@PulsarController(path="/cache", method="PUT", response_type='json')
def cache_required(file_cache, ip, path):
""" Returns bool indicating whether this client should
execute cache_insert. Either way client should be follow up
with file_available.
"""
return file_cache.cache_required(ip, path)
@PulsarController(path="/cache", method="POST", response_type='json')
def cache_insert(file_cache, ip, path, body):
temp_path = copy_to_temp(body)
file_cache.cache_file(temp_path, ip, path)
# TODO: coerce booleans and None values into correct types - simplejson may
# do this already but need to check.
@PulsarController(path="/objects/{object_id}/exists", response_type='json')
def object_store_exists(object_store, object_id, base_dir=None, dir_only=False, extra_dir=None, extra_dir_at_root=False, alt_name=None):
obj = PulsarDataset(object_id)
return object_store.exists(obj, base_dir=base_dir, dir_only=dir_only, extra_dir=extra_dir, extra_dir_at_root=extra_dir_at_root, alt_name=alt_name)
@PulsarController(path="/objects/{object_id}/file_ready", response_type='json')
def object_store_file_ready(object_store, object_id, base_dir=None, dir_only=False, extra_dir=None, extra_dir_at_root=False, alt_name=None):
obj = PulsarDataset(object_id)
return object_store.file_ready(obj, base_dir=base_dir, dir_only=dir_only,
extra_dir=extra_dir, extra_dir_at_root=extra_dir_at_root,
alt_name=alt_name)
@PulsarController(path="/objects/{object_id}", method="POST", response_type='json')
def object_store_create(object_store, object_id, base_dir=None, dir_only=False, extra_dir=None, extra_dir_at_root=False, alt_name=None):
obj = PulsarDataset(object_id)
return object_store.create(obj, base_dir=base_dir, dir_only=dir_only, extra_dir=extra_dir, extra_dir_at_root=extra_dir_at_root, alt_name=alt_name)
@PulsarController(path="/objects/{object_id}/empty", response_type='json')
def object_store_empty(object_store, object_id, base_dir=None, extra_dir=None, extra_dir_at_root=False, alt_name=None):
obj = PulsarDataset(object_id)
return object_store.empty(obj, base_dir=base_dir, extra_dir=extra_dir, extra_dir_at_root=extra_dir_at_root, alt_name=alt_name)
@PulsarController(path="/objects/{object_id}/size", response_type='json')
def object_store_size(object_store, object_id, extra_dir=None, extra_dir_at_root=False, alt_name=None):
obj = PulsarDataset(object_id)
return object_store.size(obj, extra_dir=extra_dir, extra_dir_at_root=extra_dir_at_root, alt_name=alt_name)
@PulsarController(path="/objects/{object_id}", method="DELETE", response_type='json')
def object_store_delete(object_store, object_id, entire_dir=False, base_dir=None, extra_dir=None, extra_dir_at_root=False, alt_name=None):
obj = PulsarDataset(object_id)
return object_store.delete(obj, entire_dir=False, base_dir=None, extra_dir=extra_dir, extra_dir_at_root=extra_dir_at_root, alt_name=alt_name)
@PulsarController(path="/objects/{object_id}", method="GET", response_type='json')
def object_store_get_data(object_store, object_id, start=0, count=-1, base_dir=None, extra_dir=None, extra_dir_at_root=False, alt_name=None):
obj = PulsarDataset(object_id)
return object_store.get_data(obj, start=int(start), count=int(count), entire_dir=False,
base_dir=None, extra_dir=extra_dir, extra_dir_at_root=extra_dir_at_root,
alt_name=alt_name)
@PulsarController(path="/objects/{object_id}/filename", response_type='json')
def object_store_get_filename(object_store, object_id, base_dir=None, dir_only=False, extra_dir=None, extra_dir_at_root=False, alt_name=None):
obj = PulsarDataset(object_id)
return object_store.get_filename(obj, base_dir=base_dir, dir_only=dir_only, extra_dir=extra_dir,
extra_dir_at_root=extra_dir_at_root, alt_name=alt_name)
@PulsarController(path="/objects/{object_id}", method="PUT", response_type='json')
def object_store_update_from_file(object_store, object_id, base_dir=None, extra_dir=None, extra_dir_at_root=False,
alt_name=None, file_name=None, create=False):
obj = PulsarDataset(object_id)
return object_store.update_from_file(obj, base_dir=base_dir, extra_dir=extra_dir, extra_dir_at_root=extra_dir_at_root,
alt_name=alt_name, file_name=file_name, create=create)
@PulsarController(path="/object_store_usage_percent", response_type='json')
def object_store_get_store_usage_percent(object_store):
return object_store.get_store_usage_percent()
class PulsarDataset(object):
"""Intermediary between Pulsar and objectstore."""
def __init__(self, id):
self.id = id
self.object_store_id = None
def _handle_upload(file_cache, path, body, cache_token=None):
source = body
if cache_token:
cached_file = file_cache.destination(cache_token)
source = open(cached_file, 'rb')
log.info("Copying cached file %s to %s" % (cached_file, path))
copy_to_path(source, path)
return {"path": path}
| apache-2.0 |
ewels/MultiQC_OSXApp | venv/lib/python2.7/site-packages/pip/utils/ui.py | 316 | 6774 | from __future__ import absolute_import
from __future__ import division
import itertools
import sys
from signal import signal, SIGINT, default_int_handler
from pip.compat import WINDOWS
from pip.utils import format_size
from pip.utils.logging import get_indentation
from pip._vendor import six
from pip._vendor.progress.bar import Bar, IncrementalBar
from pip._vendor.progress.helpers import WritelnMixin
from pip._vendor.progress.spinner import Spinner
try:
from pip._vendor import colorama
# Lots of different errors can come from this, including SystemError and
# ImportError.
except Exception:
colorama = None
def _select_progress_class(preferred, fallback):
encoding = getattr(preferred.file, "encoding", None)
# If we don't know what encoding this file is in, then we'll just assume
# that it doesn't support unicode and use the ASCII bar.
if not encoding:
return fallback
# Collect all of the possible characters we want to use with the preferred
# bar.
characters = [
getattr(preferred, "empty_fill", six.text_type()),
getattr(preferred, "fill", six.text_type()),
]
characters += list(getattr(preferred, "phases", []))
# Try to decode the characters we're using for the bar using the encoding
# of the given file, if this works then we'll assume that we can use the
# fancier bar and if not we'll fall back to the plaintext bar.
try:
six.text_type().join(characters).encode(encoding)
except UnicodeEncodeError:
return fallback
else:
return preferred
_BaseBar = _select_progress_class(IncrementalBar, Bar)
class InterruptibleMixin(object):
"""
Helper to ensure that self.finish() gets called on keyboard interrupt.
This allows downloads to be interrupted without leaving temporary state
(like hidden cursors) behind.
This class is similar to the progress library's existing SigIntMixin
helper, but as of version 1.2, that helper has the following problems:
1. It calls sys.exit().
2. It discards the existing SIGINT handler completely.
3. It leaves its own handler in place even after an uninterrupted finish,
which will have unexpected delayed effects if the user triggers an
unrelated keyboard interrupt some time after a progress-displaying
download has already completed, for example.
"""
def __init__(self, *args, **kwargs):
"""
Save the original SIGINT handler for later.
"""
super(InterruptibleMixin, self).__init__(*args, **kwargs)
self.original_handler = signal(SIGINT, self.handle_sigint)
# If signal() returns None, the previous handler was not installed from
# Python, and we cannot restore it. This probably should not happen,
# but if it does, we must restore something sensible instead, at least.
# The least bad option should be Python's default SIGINT handler, which
# just raises KeyboardInterrupt.
if self.original_handler is None:
self.original_handler = default_int_handler
def finish(self):
"""
Restore the original SIGINT handler after finishing.
This should happen regardless of whether the progress display finishes
normally, or gets interrupted.
"""
super(InterruptibleMixin, self).finish()
signal(SIGINT, self.original_handler)
def handle_sigint(self, signum, frame):
"""
Call self.finish() before delegating to the original SIGINT handler.
This handler should only be in place while the progress display is
active.
"""
self.finish()
self.original_handler(signum, frame)
class DownloadProgressMixin(object):
def __init__(self, *args, **kwargs):
super(DownloadProgressMixin, self).__init__(*args, **kwargs)
self.message = (" " * (get_indentation() + 2)) + self.message
@property
def downloaded(self):
return format_size(self.index)
@property
def download_speed(self):
# Avoid zero division errors...
if self.avg == 0.0:
return "..."
return format_size(1 / self.avg) + "/s"
@property
def pretty_eta(self):
if self.eta:
return "eta %s" % self.eta_td
return ""
def iter(self, it, n=1):
for x in it:
yield x
self.next(n)
self.finish()
class WindowsMixin(object):
def __init__(self, *args, **kwargs):
# The Windows terminal does not support the hide/show cursor ANSI codes
# even with colorama. So we'll ensure that hide_cursor is False on
# Windows.
# This call neds to go before the super() call, so that hide_cursor
# is set in time. The base progress bar class writes the "hide cursor"
# code to the terminal in its init, so if we don't set this soon
# enough, we get a "hide" with no corresponding "show"...
if WINDOWS and self.hide_cursor:
self.hide_cursor = False
super(WindowsMixin, self).__init__(*args, **kwargs)
# Check if we are running on Windows and we have the colorama module,
# if we do then wrap our file with it.
if WINDOWS and colorama:
self.file = colorama.AnsiToWin32(self.file)
# The progress code expects to be able to call self.file.isatty()
# but the colorama.AnsiToWin32() object doesn't have that, so we'll
# add it.
self.file.isatty = lambda: self.file.wrapped.isatty()
# The progress code expects to be able to call self.file.flush()
# but the colorama.AnsiToWin32() object doesn't have that, so we'll
# add it.
self.file.flush = lambda: self.file.wrapped.flush()
class DownloadProgressBar(WindowsMixin, InterruptibleMixin,
DownloadProgressMixin, _BaseBar):
file = sys.stdout
message = "%(percent)d%%"
suffix = "%(downloaded)s %(download_speed)s %(pretty_eta)s"
class DownloadProgressSpinner(WindowsMixin, InterruptibleMixin,
DownloadProgressMixin, WritelnMixin, Spinner):
file = sys.stdout
suffix = "%(downloaded)s %(download_speed)s"
def next_phase(self):
if not hasattr(self, "_phaser"):
self._phaser = itertools.cycle(self.phases)
return next(self._phaser)
def update(self):
message = self.message % self
phase = self.next_phase()
suffix = self.suffix % self
line = ''.join([
message,
" " if message else "",
phase,
" " if suffix else "",
suffix,
])
self.writeln(line)
| mit |
blade2005/dosage | dosagelib/plugins/v.py | 1 | 2213 | # -*- coding: utf-8 -*-
# Copyright (C) 2004-2005 Tristan Seligmann and Jonathan Jacobs
# Copyright (C) 2012-2014 Bastian Kleineidam
# Copyright (C) 2015-2016 Tobias Gruetzmacher
from __future__ import absolute_import, division, print_function
from re import compile
from ..scraper import _BasicScraper
from ..util import tagre
class VampireCheerleaders(_BasicScraper):
url = 'http://www.vampirecheerleaders.net/'
stripUrl = url + 'strips-vc/%s'
firstStripUrl = stripUrl % 'fang_service'
imageSearch = compile(tagre("img", "src", r'(/comics/[^"]+)'))
prevSearch = compile(tagre("a", "href", r'(http://www\.vampirecheerleaders\.net/strips-vc/[^"]+)', before="cndprev"))
help = 'Index format: name'
class VGCats(_BasicScraper):
url = 'http://www.vgcats.com/comics/'
stripUrl = url + '?strip_id=%s'
firstStripUrl = stripUrl % '0'
imageSearch = compile(tagre("img", "src", r'(images/\d{6}\.[^"]+)'))
prevSearch = compile(tagre("a", "href", r'(\?strip_id=\d+)') +
tagre("img", "src", r"back\.gif"))
help = 'Index format: n (unpadded)'
class VGCatsAdventure(VGCats):
name = 'VGCats/Adventure'
url = 'http://www.vgcats.com/ffxi/'
stripUrl = url + '?strip_id=%s'
class VGCatsSuper(VGCats):
name = 'VGCats/Super'
url = 'http://www.vgcats.com/super/'
stripUrl = url + '?strip_id=%s'
class VictimsOfTheSystem(_BasicScraper):
url = 'http://www.votscomic.com/'
stripUrl = url + '?id=%s.jpg'
firstStripUrl = stripUrl % '070103-002452'
imageSearch = compile(tagre("img", "src", r'(comicpro/strips/[^"]+)'))
prevSearch = compile(tagre("a", "href", r'(\?id=\d+-\d+\.jpg)') +
"Previous")
help = 'Index format: nnn-nnn'
class ViiviJaWagner(_BasicScraper):
url = 'http://www.hs.fi/viivijawagner/'
stripUrl = None
imageSearch = compile(tagre("img", "src", r'(http://hs\d+\.snstatic\.fi/webkuva/sarjis/[^"]+)'))
prevSearch = compile(tagre("a", "href", r'(/viivijawagner/[^"]+)',
before="prev-cm"))
help = 'Index format: none'
lang = 'fi'
def namer(self, image_url, page_url):
return image_url.split('=')[1]
| mit |
dlorenc/docker-py | tests/integration/exec_test.py | 12 | 4673 | import pytest
from .. import helpers
BUSYBOX = helpers.BUSYBOX
class ExecTest(helpers.BaseTestCase):
def test_execute_command(self):
if not helpers.exec_driver_is_native():
pytest.skip('Exec driver not native')
container = self.client.create_container(BUSYBOX, 'cat',
detach=True, stdin_open=True)
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
res = self.client.exec_create(id, ['echo', 'hello'])
self.assertIn('Id', res)
exec_log = self.client.exec_start(res)
self.assertEqual(exec_log, b'hello\n')
def test_exec_command_string(self):
if not helpers.exec_driver_is_native():
pytest.skip('Exec driver not native')
container = self.client.create_container(BUSYBOX, 'cat',
detach=True, stdin_open=True)
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
res = self.client.exec_create(id, 'echo hello world')
self.assertIn('Id', res)
exec_log = self.client.exec_start(res)
self.assertEqual(exec_log, b'hello world\n')
def test_exec_command_as_user(self):
if not helpers.exec_driver_is_native():
pytest.skip('Exec driver not native')
container = self.client.create_container(BUSYBOX, 'cat',
detach=True, stdin_open=True)
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
res = self.client.exec_create(id, 'whoami', user='default')
self.assertIn('Id', res)
exec_log = self.client.exec_start(res)
self.assertEqual(exec_log, b'default\n')
def test_exec_command_as_root(self):
if not helpers.exec_driver_is_native():
pytest.skip('Exec driver not native')
container = self.client.create_container(BUSYBOX, 'cat',
detach=True, stdin_open=True)
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
res = self.client.exec_create(id, 'whoami')
self.assertIn('Id', res)
exec_log = self.client.exec_start(res)
self.assertEqual(exec_log, b'root\n')
def test_exec_command_streaming(self):
if not helpers.exec_driver_is_native():
pytest.skip('Exec driver not native')
container = self.client.create_container(BUSYBOX, 'cat',
detach=True, stdin_open=True)
id = container['Id']
self.tmp_containers.append(id)
self.client.start(id)
exec_id = self.client.exec_create(id, ['echo', 'hello\nworld'])
self.assertIn('Id', exec_id)
res = b''
for chunk in self.client.exec_start(exec_id, stream=True):
res += chunk
self.assertEqual(res, b'hello\nworld\n')
def test_exec_start_socket(self):
if not helpers.exec_driver_is_native():
pytest.skip('Exec driver not native')
container = self.client.create_container(BUSYBOX, 'cat',
detach=True, stdin_open=True)
container_id = container['Id']
self.client.start(container_id)
self.tmp_containers.append(container_id)
line = 'yay, interactive exec!'
# `echo` appends CRLF, `printf` doesn't
exec_id = self.client.exec_create(
container_id, ['printf', line], tty=True)
self.assertIn('Id', exec_id)
socket = self.client.exec_start(exec_id, socket=True)
self.addCleanup(socket.close)
next_size = helpers.next_packet_size(socket)
self.assertEqual(next_size, len(line))
data = helpers.read_data(socket, next_size)
self.assertEqual(data.decode('utf-8'), line)
def test_exec_inspect(self):
if not helpers.exec_driver_is_native():
pytest.skip('Exec driver not native')
container = self.client.create_container(BUSYBOX, 'cat',
detach=True, stdin_open=True)
id = container['Id']
self.client.start(id)
self.tmp_containers.append(id)
exec_id = self.client.exec_create(id, ['mkdir', '/does/not/exist'])
self.assertIn('Id', exec_id)
self.client.exec_start(exec_id)
exec_info = self.client.exec_inspect(exec_id)
self.assertIn('ExitCode', exec_info)
self.assertNotEqual(exec_info['ExitCode'], 0)
| apache-2.0 |
hottwaj/django | tests/middleware/test_security.py | 291 | 7781 | from django.http import HttpResponse
from django.test import RequestFactory, SimpleTestCase
from django.test.utils import override_settings
class SecurityMiddlewareTest(SimpleTestCase):
@property
def middleware(self):
from django.middleware.security import SecurityMiddleware
return SecurityMiddleware()
@property
def secure_request_kwargs(self):
return {"wsgi.url_scheme": "https"}
def response(self, *args, **kwargs):
headers = kwargs.pop("headers", {})
response = HttpResponse(*args, **kwargs)
for k, v in headers.items():
response[k] = v
return response
def process_response(self, *args, **kwargs):
request_kwargs = {}
if kwargs.pop("secure", False):
request_kwargs.update(self.secure_request_kwargs)
request = (kwargs.pop("request", None) or
self.request.get("/some/url", **request_kwargs))
ret = self.middleware.process_request(request)
if ret:
return ret
return self.middleware.process_response(
request, self.response(*args, **kwargs))
request = RequestFactory()
def process_request(self, method, *args, **kwargs):
if kwargs.pop("secure", False):
kwargs.update(self.secure_request_kwargs)
req = getattr(self.request, method.lower())(*args, **kwargs)
return self.middleware.process_request(req)
@override_settings(SECURE_HSTS_SECONDS=3600)
def test_sts_on(self):
"""
With HSTS_SECONDS=3600, the middleware adds
"strict-transport-security: max-age=3600" to the response.
"""
self.assertEqual(
self.process_response(secure=True)["strict-transport-security"],
"max-age=3600")
@override_settings(SECURE_HSTS_SECONDS=3600)
def test_sts_already_present(self):
"""
The middleware will not override a "strict-transport-security" header
already present in the response.
"""
response = self.process_response(
secure=True,
headers={"strict-transport-security": "max-age=7200"})
self.assertEqual(response["strict-transport-security"], "max-age=7200")
@override_settings(HSTS_SECONDS=3600)
def test_sts_only_if_secure(self):
"""
The "strict-transport-security" header is not added to responses going
over an insecure connection.
"""
self.assertNotIn("strict-transport-security", self.process_response(secure=False))
@override_settings(HSTS_SECONDS=0)
def test_sts_off(self):
"""
With HSTS_SECONDS of 0, the middleware does not add a
"strict-transport-security" header to the response.
"""
self.assertNotIn("strict-transport-security", self.process_response(secure=True))
@override_settings(
SECURE_HSTS_SECONDS=600, SECURE_HSTS_INCLUDE_SUBDOMAINS=True)
def test_sts_include_subdomains(self):
"""
With HSTS_SECONDS non-zero and HSTS_INCLUDE_SUBDOMAINS
True, the middleware adds a "strict-transport-security" header with the
"includeSubDomains" tag to the response.
"""
response = self.process_response(secure=True)
self.assertEqual(
response["strict-transport-security"],
"max-age=600; includeSubDomains",
)
@override_settings(
SECURE_HSTS_SECONDS=600, SECURE_HSTS_INCLUDE_SUBDOMAINS=False)
def test_sts_no_include_subdomains(self):
"""
With HSTS_SECONDS non-zero and HSTS_INCLUDE_SUBDOMAINS
False, the middleware adds a "strict-transport-security" header without
the "includeSubDomains" tag to the response.
"""
response = self.process_response(secure=True)
self.assertEqual(response["strict-transport-security"], "max-age=600")
@override_settings(SECURE_CONTENT_TYPE_NOSNIFF=True)
def test_content_type_on(self):
"""
With CONTENT_TYPE_NOSNIFF set to True, the middleware adds
"x-content-type-options: nosniff" header to the response.
"""
self.assertEqual(self.process_response()["x-content-type-options"], "nosniff")
@override_settings(SECURE_CONTENT_TYPE_NO_SNIFF=True)
def test_content_type_already_present(self):
"""
The middleware will not override an "x-content-type-options" header
already present in the response.
"""
response = self.process_response(secure=True, headers={"x-content-type-options": "foo"})
self.assertEqual(response["x-content-type-options"], "foo")
@override_settings(SECURE_CONTENT_TYPE_NOSNIFF=False)
def test_content_type_off(self):
"""
With CONTENT_TYPE_NOSNIFF False, the middleware does not add an
"x-content-type-options" header to the response.
"""
self.assertNotIn("x-content-type-options", self.process_response())
@override_settings(SECURE_BROWSER_XSS_FILTER=True)
def test_xss_filter_on(self):
"""
With BROWSER_XSS_FILTER set to True, the middleware adds
"s-xss-protection: 1; mode=block" header to the response.
"""
self.assertEqual(
self.process_response()["x-xss-protection"],
"1; mode=block")
@override_settings(SECURE_BROWSER_XSS_FILTER=True)
def test_xss_filter_already_present(self):
"""
The middleware will not override an "x-xss-protection" header
already present in the response.
"""
response = self.process_response(secure=True, headers={"x-xss-protection": "foo"})
self.assertEqual(response["x-xss-protection"], "foo")
@override_settings(BROWSER_XSS_FILTER=False)
def test_xss_filter_off(self):
"""
With BROWSER_XSS_FILTER set to False, the middleware does not add an
"x-xss-protection" header to the response.
"""
self.assertNotIn("x-xss-protection", self.process_response())
@override_settings(SECURE_SSL_REDIRECT=True)
def test_ssl_redirect_on(self):
"""
With SSL_REDIRECT True, the middleware redirects any non-secure
requests to the https:// version of the same URL.
"""
ret = self.process_request("get", "/some/url?query=string")
self.assertEqual(ret.status_code, 301)
self.assertEqual(
ret["Location"], "https://testserver/some/url?query=string")
@override_settings(SECURE_SSL_REDIRECT=True)
def test_no_redirect_ssl(self):
"""
The middleware does not redirect secure requests.
"""
ret = self.process_request("get", "/some/url", secure=True)
self.assertEqual(ret, None)
@override_settings(
SECURE_SSL_REDIRECT=True, SECURE_REDIRECT_EXEMPT=["^insecure/"])
def test_redirect_exempt(self):
"""
The middleware does not redirect requests with URL path matching an
exempt pattern.
"""
ret = self.process_request("get", "/insecure/page")
self.assertEqual(ret, None)
@override_settings(
SECURE_SSL_REDIRECT=True, SECURE_SSL_HOST="secure.example.com")
def test_redirect_ssl_host(self):
"""
The middleware redirects to SSL_HOST if given.
"""
ret = self.process_request("get", "/some/url")
self.assertEqual(ret.status_code, 301)
self.assertEqual(ret["Location"], "https://secure.example.com/some/url")
@override_settings(SECURE_SSL_REDIRECT=False)
def test_ssl_redirect_off(self):
"""
With SSL_REDIRECT False, the middleware does no redirect.
"""
ret = self.process_request("get", "/some/url")
self.assertEqual(ret, None)
| bsd-3-clause |
jhuapl-marti/marti | crits/core/management/commands/create_indexes.py | 4 | 9051 | import pymongo
from django.core.management.base import BaseCommand
from django.conf import settings
from optparse import make_option
from crits.core.mongo_tools import mongo_connector
class Command(BaseCommand):
"""
Script Class.
"""
option_list = BaseCommand.option_list + (
make_option('--remove-indexes',
'-r',
action='store_true',
dest='remove',
default=False,
help='Remove all indexes. Does NOT create.'),
)
help = 'Creates indexes for MongoDB.'
def handle(self, *args, **options):
"""
Script Execution.
"""
remove = options.get('remove')
if remove:
remove_indexes()
else:
create_indexes()
def remove_indexes():
"""
Removes all indexes from all collections.
"""
coll_list = [settings.COL_BACKDOORS,
settings.COL_BUCKET_LISTS,
settings.COL_CAMPAIGNS,
settings.COL_COMMENTS,
settings.COL_DOMAINS,
settings.COL_EMAIL,
settings.COL_EVENTS,
settings.COL_EXPLOITS,
settings.COL_INDICATORS,
settings.COL_IPS,
settings.COL_NOTIFICATIONS,
'%s.files' % settings.COL_OBJECTS,
'%s.chunks' % settings.COL_OBJECTS,
settings.COL_PCAPS,
'%s.files' % settings.COL_PCAPS,
'%s.chunks' % settings.COL_PCAPS,
settings.COL_SAMPLES,
'%s.files' % settings.COL_SAMPLES,
'%s.chunks' % settings.COL_SAMPLES,
settings.COL_TARGETS,
]
for coll in coll_list:
print "Removing index for: %s" % coll
c = mongo_connector(coll)
c.drop_indexes()
def create_indexes():
"""
Creates the default set of indexes for the system. Depending on your use
cases, as well as quantity of data, admins may wish to tweak these indexes
to best fit their requirements.
"""
print "Creating indexes (duplicates will be ignored automatically)"
analysis_results = mongo_connector(settings.COL_ANALYSIS_RESULTS)
analysis_results.ensure_index("service_name", background=True)
analysis_results.ensure_index("object_type", background=True)
analysis_results.ensure_index("object_id", background=True)
bucket_lists = mongo_connector(settings.COL_BUCKET_LISTS)
bucket_lists.ensure_index("name", background=True)
backdoors = mongo_connector(settings.COL_BACKDOORS)
backdoors.ensure_index("name", background=True)
campaigns = mongo_connector(settings.COL_CAMPAIGNS)
campaigns.ensure_index("objects.value", background=True)
campaigns.ensure_index("relationships.value", background=True)
campaigns.ensure_index("bucket_list", background=True)
comments = mongo_connector(settings.COL_COMMENTS)
comments.ensure_index("obj_id", background=True)
comments.ensure_index("users", background=True)
comments.ensure_index("tags", background=True)
comments.ensure_index("status", background=True)
domains = mongo_connector(settings.COL_DOMAINS)
domains.ensure_index("domain", background=True)
domains.ensure_index("objects.value", background=True)
domains.ensure_index("relationships.value", background=True)
domains.ensure_index("campaign.name", background=True)
domains.ensure_index("bucket_list", background=True)
emails = mongo_connector(settings.COL_EMAIL)
emails.ensure_index("objects.value", background=True)
emails.ensure_index("relationships.value", background=True)
emails.ensure_index("campaign.name", background=True)
emails.ensure_index("bucket_list", background=True)
events = mongo_connector(settings.COL_EVENTS)
events.ensure_index("objects.value", background=True)
events.ensure_index("relationships.value", background=True)
events.ensure_index("campaign.name", background=True)
events.ensure_index("bucket_list", background=True)
exploits = mongo_connector(settings.COL_EXPLOITS)
exploits.ensure_index("name", background=True)
indicators = mongo_connector(settings.COL_INDICATORS)
indicators.ensure_index("value", background=True)
indicators.ensure_index("lower", background=True)
indicators.ensure_index("objects.value", background=True)
indicators.ensure_index("relationships.value", background=True)
indicators.ensure_index("campaign.name", background=True)
indicators.ensure_index("bucket_list", background=True)
ips = mongo_connector(settings.COL_IPS)
ips.ensure_index("ip", background=True)
ips.ensure_index("objects.value", background=True)
ips.ensure_index("relationships.value", background=True)
ips.ensure_index("campaign.name", background=True)
ips.ensure_index("bucket_list", background=True)
if settings.FILE_DB == settings.GRIDFS:
objects_files = mongo_connector('%s.files' % settings.COL_OBJECTS)
objects_files.ensure_index("md5", background=True)
objects_chunks = mongo_connector('%s.chunks' % settings.COL_OBJECTS)
objects_chunks.ensure_index([("files_id",pymongo.ASCENDING),
("n", pymongo.ASCENDING)],
unique=True)
notifications = mongo_connector(settings.COL_NOTIFICATIONS)
notifications.ensure_index("obj_id", background=True)
# auto-expire notifications after 30 days
notifications.ensure_index("date", background=True,
expireAfterSeconds=2592000)
notifications.ensure_index("users", background=True)
pcaps = mongo_connector(settings.COL_PCAPS)
pcaps.ensure_index("md5", background=True)
pcaps.ensure_index("objects.value", background=True)
pcaps.ensure_index("relationships.value", background=True)
pcaps.ensure_index("campaign.name", background=True)
pcaps.ensure_index("bucket_list", background=True)
if settings.FILE_DB == settings.GRIDFS:
pcaps_files = mongo_connector('%s.files' % settings.COL_PCAPS)
pcaps_files.ensure_index("md5", background=True)
pcaps_chunks = mongo_connector('%s.chunks' % settings.COL_PCAPS)
pcaps_chunks.ensure_index([("files_id", pymongo.ASCENDING),
("n", pymongo.ASCENDING)],
unique=True)
raw_data = mongo_connector(settings.COL_RAW_DATA)
raw_data.ensure_index("link_id", background=True)
raw_data.ensure_index("md5", background=True)
raw_data.ensure_index("objects.value", background=True)
raw_data.ensure_index("relationships.value", background=True)
raw_data.ensure_index("campaign.name", background=True)
raw_data.ensure_index("bucket_list", background=True)
signature = mongo_connector(settings.COL_SIGNATURES)
signature.ensure_index("link_id", background=True)
signature.ensure_index("md5", background=True)
signature.ensure_index("objects.value", background=True)
signature.ensure_index("relationships.value", background=True)
signature.ensure_index("campaign.name", background=True)
signature.ensure_index("bucket_list", background=True)
samples = mongo_connector(settings.COL_SAMPLES)
samples.ensure_index("source.name", background=True)
samples.ensure_index("md5", background=True)
samples.ensure_index("sha1", background=True)
samples.ensure_index("sha256", background=True)
samples.ensure_index("ssdeep", background=True)
samples.ensure_index("mimetype", background=True)
samples.ensure_index("filetype", background=True)
samples.ensure_index("size", background=True)
samples.ensure_index("filename", background=True)
samples.ensure_index("objects.value", background=True)
samples.ensure_index("relationships.value", background=True)
samples.ensure_index("campaign.name", background=True)
samples.ensure_index("analysis.results.result", background=True)
samples.ensure_index("analysis.results.md5", background=True)
samples.ensure_index("bucket_list", background=True)
if settings.FILE_DB == settings.GRIDFS:
samples_files = mongo_connector('%s.files' % settings.COL_SAMPLES)
samples_files.ensure_index("md5", background=True)
samples_chunks = mongo_connector('%s.chunks' % settings.COL_SAMPLES)
samples_chunks.ensure_index([("files_id", pymongo.ASCENDING),
("n", pymongo.ASCENDING)],
unique=True)
screenshots = mongo_connector(settings.COL_SCREENSHOTS)
screenshots.ensure_index("tags", background=True)
targets = mongo_connector(settings.COL_TARGETS)
targets.ensure_index("objects.value", background=True)
targets.ensure_index("relationships.value", background=True)
targets.ensure_index("campaign.name", background=True)
targets.ensure_index("bucket_list", background=True)
| mit |
mkieszek/odoo | addons/mail/models/mail_alias.py | 3 | 13262 | # -*- coding: utf-8 -*-
import logging
import re
import unicodedata
from openerp import _, api, fields, models, SUPERUSER_ID
from openerp.exceptions import UserError
from openerp.modules.registry import RegistryManager
from openerp.tools import ustr
from openerp.tools.safe_eval import safe_eval as eval
_logger = logging.getLogger(__name__)
# Inspired by http://stackoverflow.com/questions/517923
def remove_accents(input_str):
"""Suboptimal-but-better-than-nothing way to replace accented
latin letters by an ASCII equivalent. Will obviously change the
meaning of input_str and work only for some cases"""
input_str = ustr(input_str)
nkfd_form = unicodedata.normalize('NFKD', input_str)
return u''.join([c for c in nkfd_form if not unicodedata.combining(c)])
class Alias(models.Model):
"""A Mail Alias is a mapping of an email address with a given OpenERP Document
model. It is used by OpenERP's mail gateway when processing incoming emails
sent to the system. If the recipient address (To) of the message matches
a Mail Alias, the message will be either processed following the rules
of that alias. If the message is a reply it will be attached to the
existing discussion on the corresponding record, otherwise a new
record of the corresponding model will be created.
This is meant to be used in combination with a catch-all email configuration
on the company's mail server, so that as soon as a new mail.alias is
created, it becomes immediately usable and OpenERP will accept email for it.
"""
_name = 'mail.alias'
_description = "Email Aliases"
_rec_name = 'alias_name'
_order = 'alias_model_id, alias_name'
alias_name = fields.Char('Alias Name', help="The name of the email alias, e.g. 'jobs' if you want to catch emails for <jobs@example.odoo.com>")
alias_model_id = fields.Many2one('ir.model', 'Aliased Model', required=True, ondelete="cascade",
help="The model (Odoo Document Kind) to which this alias "
"corresponds. Any incoming email that does not reply to an "
"existing record will cause the creation of a new record "
"of this model (e.g. a Project Task)",
# hack to only allow selecting mail_thread models (we might
# (have a few false positives, though)
domain="[('field_id.name', '=', 'message_ids')]")
alias_user_id = fields.Many2one('res.users', 'Owner', defaults=lambda self: self.env.user,
help="The owner of records created upon receiving emails on this alias. "
"If this field is not set the system will attempt to find the right owner "
"based on the sender (From) address, or will use the Administrator account "
"if no system user is found for that address.")
alias_defaults = fields.Text('Default Values', required=True, default='{}',
help="A Python dictionary that will be evaluated to provide "
"default values when creating new records for this alias.")
alias_force_thread_id = fields.Integer(
'Record Thread ID',
help="Optional ID of a thread (record) to which all incoming messages will be attached, even "
"if they did not reply to it. If set, this will disable the creation of new records completely.")
alias_domain = fields.Char('Alias domain', compute='_get_alias_domain',
default=lambda self: self.env["ir.config_parameter"].get_param("mail.catchall.domain"))
alias_parent_model_id = fields.Many2one(
'ir.model', 'Parent Model',
help="Parent model holding the alias. The model holding the alias reference"
"is not necessarily the model given by alias_model_id"
"(example: project (parent_model) and task (model))")
alias_parent_thread_id = fields.Integer('Parent Record Thread ID', help="ID of the parent record holding the alias (example: project holding the task creation alias)")
alias_contact = fields.Selection([
('everyone', 'Everyone'),
('partners', 'Authenticated Partners'),
('followers', 'Followers only')], default='everyone',
string='Alias Contact Security', required=True,
help="Policy to post a message on the document using the mailgateway.\n"
"- everyone: everyone can post\n"
"- partners: only authenticated partners\n"
"- followers: only followers of the related document\n")
_sql_constraints = [
('alias_unique', 'UNIQUE(alias_name)', 'Unfortunately this email alias is already used, please choose a unique one')
]
@api.multi
def _get_alias_domain(self):
alias_domain = self.env["ir.config_parameter"].get_param("mail.catchall.domain")
for record in self:
record.alias_domain = alias_domain
@api.one
@api.constrains('alias_defaults')
def _check_alias_defaults(self):
try:
dict(eval(self.alias_defaults))
except Exception:
raise UserError(_('Invalid expression, it must be a literal python dictionary definition e.g. "{\'field\': \'value\'}"'))
@api.model
def create(self, vals):
""" Creates an email.alias record according to the values provided in ``vals``,
with 2 alterations: the ``alias_name`` value may be suffixed in order to
make it unique (and certain unsafe characters replaced), and
he ``alias_model_id`` value will set to the model ID of the ``model_name``
context value, if provided.
"""
model_name = self._context.get('alias_model_name')
parent_model_name = self._context.get('alias_parent_model_name')
if vals.get('alias_name'):
vals['alias_name'] = self._clean_and_make_unique(vals.get('alias_name'))
if model_name:
model = self.env['ir.model'].search([('model', '=', model_name)])
vals['alias_model_id'] = model.id
if parent_model_name:
model = self.env['ir.model'].search([('model', '=', parent_model_name)])
vals['alias_parent_model_id'] = model.id
return super(Alias, self).create(vals)
@api.multi
def write(self, vals):
""""give a unique alias name if given alias name is already assigned"""
if vals.get('alias_name') and self.ids:
vals['alias_name'] = self._clean_and_make_unique(vals.get('alias_name'), alias_ids=self.ids)
return super(Alias, self).write(vals)
@api.multi
def name_get(self):
"""Return the mail alias display alias_name, including the implicit
mail catchall domain if exists from config otherwise "New Alias".
e.g. `jobs@mail.odoo.com` or `jobs` or 'New Alias'
"""
res = []
for record in self:
if record.alias_name and record.alias_domain:
res.append((record['id'], "%s@%s" % (record.alias_name, record.alias_domain)))
elif record.alias_name:
res.append((record['id'], "%s" % (record.alias_name)))
else:
res.append((record['id'], _("Inactive Alias")))
return res
@api.model
def _find_unique(self, name, alias_ids=False):
"""Find a unique alias name similar to ``name``. If ``name`` is
already taken, make a variant by adding an integer suffix until
an unused alias is found.
"""
sequence = None
while True:
new_name = "%s%s" % (name, sequence) if sequence is not None else name
domain = [('alias_name', '=', new_name)]
if alias_ids:
domain += [('id', 'not in', alias_ids)]
if not self.search(domain):
break
sequence = (sequence + 1) if sequence else 2
return new_name
@api.model
def _clean_and_make_unique(self, name, alias_ids=False):
# when an alias name appears to already be an email, we keep the local part only
name = remove_accents(name).lower().split('@')[0]
name = re.sub(r'[^\w+.]+', '-', name)
return self._find_unique(name, alias_ids=alias_ids)
def migrate_to_alias(self, cr, child_model_name, child_table_name, child_model_auto_init_fct,
alias_model_name, alias_id_column, alias_key, alias_prefix='', alias_force_key='', alias_defaults={},
alias_generate_name=False, context=None):
""" Installation hook to create aliases for all users and avoid constraint errors.
:param child_model_name: model name of the child class (i.e. res.users)
:param child_table_name: table name of the child class (i.e. res_users)
:param child_model_auto_init_fct: pointer to the _auto_init function
(i.e. super(res_users,self)._auto_init(cr, context=context))
:param alias_model_name: name of the aliased model
:param alias_id_column: alias_id column (i.e. self._columns['alias_id'])
:param alias_key: name of the column used for the unique name (i.e. 'login')
:param alias_prefix: prefix for the unique name (i.e. 'jobs' + ...)
:param alias_force_key': name of the column for force_thread_id;
if empty string, not taken into account
:param alias_defaults: dict, keys = mail.alias columns, values = child
model column name used for default values (i.e. {'job_id': 'id'})
:param alias_generate_name: automatically generate alias name using prefix / alias key;
default alias_name value is False because since 8.0 it is not required anymore
"""
if context is None:
context = {}
# disable the unique alias_id not null constraint, to avoid spurious warning during
# super.auto_init. We'll reinstall it afterwards.
alias_id_column.required = False
# call _auto_init
res = child_model_auto_init_fct(cr, context=context)
registry = RegistryManager.get(cr.dbname)
mail_alias = registry.get('mail.alias')
child_class_model = registry[child_model_name]
no_alias_ids = child_class_model.search(cr, SUPERUSER_ID, [('alias_id', '=', False)], context={'active_test': False})
# Use read() not browse(), to avoid prefetching uninitialized inherited fields
for obj_data in child_class_model.read(cr, SUPERUSER_ID, no_alias_ids, [alias_key]):
alias_vals = {'alias_name': False}
if alias_generate_name:
alias_vals['alias_name'] = '%s%s' % (alias_prefix, obj_data[alias_key])
if alias_force_key:
alias_vals['alias_force_thread_id'] = obj_data[alias_force_key]
alias_vals['alias_defaults'] = dict((k, obj_data[v]) for k, v in alias_defaults.iteritems())
alias_vals['alias_parent_thread_id'] = obj_data['id']
alias_create_ctx = dict(context, alias_model_name=alias_model_name, alias_parent_model_name=child_model_name)
alias_id = mail_alias.create(cr, SUPERUSER_ID, alias_vals, context=alias_create_ctx)
child_class_model.write(cr, SUPERUSER_ID, obj_data['id'], {'alias_id': alias_id}, context={'mail_notrack': True})
_logger.info('Mail alias created for %s %s (id %s)', child_model_name, obj_data[alias_key], obj_data['id'])
# Finally attempt to reinstate the missing constraint
try:
cr.execute('ALTER TABLE %s ALTER COLUMN alias_id SET NOT NULL' % (child_table_name))
except Exception:
_logger.warning("Table '%s': unable to set a NOT NULL constraint on column '%s' !\n"\
"If you want to have it, you should update the records and execute manually:\n"\
"ALTER TABLE %s ALTER COLUMN %s SET NOT NULL",
child_table_name, 'alias_id', child_table_name, 'alias_id')
# set back the unique alias_id constraint
alias_id_column.required = True
return res
@api.multi
def open_document(self):
if not self.alias_model_id or not self.alias_force_thread_id:
return False
return {
'view_type': 'form',
'view_mode': 'form',
'res_model': self.alias_model_id.model,
'res_id': self.alias_force_thread_id,
'type': 'ir.actions.act_window',
}
@api.multi
def open_parent_document(self):
if not self.alias_parent_model_id or not self.alias_parent_thread_id:
return False
return {
'view_type': 'form',
'view_mode': 'form',
'res_model': self.alias_parent_model_id.model,
'res_id': self.alias_parent_thread_id,
'type': 'ir.actions.act_window',
}
| agpl-3.0 |
vv1133/home_web | build/lib.linux-armv6l-2.7/django/core/management/commands/diffsettings.py | 120 | 1648 | from optparse import make_option
from django.core.management.base import NoArgsCommand
def module_to_dict(module, omittable=lambda k: k.startswith('_')):
"""Converts a module namespace to a Python dictionary."""
return dict((k, repr(v)) for k, v in module.__dict__.items() if not omittable(k))
class Command(NoArgsCommand):
help = """Displays differences between the current settings.py and Django's
default settings. Settings that don't appear in the defaults are
followed by "###"."""
option_list = NoArgsCommand.option_list + (
make_option('--all', action='store_true', dest='all', default=False,
help='Display all settings, regardless of their value. '
'Default values are prefixed by "###".'),
)
requires_model_validation = False
def handle_noargs(self, **options):
# Inspired by Postfix's "postconf -n".
from django.conf import settings, global_settings
# Because settings are imported lazily, we need to explicitly load them.
settings._setup()
user_settings = module_to_dict(settings._wrapped)
default_settings = module_to_dict(global_settings)
output = []
for key in sorted(user_settings):
if key not in default_settings:
output.append("%s = %s ###" % (key, user_settings[key]))
elif user_settings[key] != default_settings[key]:
output.append("%s = %s" % (key, user_settings[key]))
elif options['all']:
output.append("### %s = %s" % (key, user_settings[key]))
return '\n'.join(output)
| bsd-3-clause |
talha81/TACTIC-DEV | src/pyasm/prod/biz/texture.py | 6 | 5425 | ###########################################################
#
# Copyright (c) 2005, Southpaw Technology
# All Rights Reserved
#
# PROPRIETARY INFORMATION. This software is proprietary to
# Southpaw Technology, and is not to be reproduced, transmitted,
# or disclosed in any way without written permission.
#
#
#
__all__ = ['Texture', 'TextureSource', 'ShotTexture']
from pyasm.search import *
from pyasm.biz import Project
class Texture(SObject):
SEARCH_TYPE = "prod/texture"
def get_relation(my, name):
from asset import Asset
relations = {}
relations['asset'] = Asset
relations['texture'] = Texture
return relations[name]
def get_icon_context(my, context=None):
return "publish"
# static functions
def create(cls, asset, code=None, category=None, description=None, sobject_context=None):
sobject = SearchType.create( cls.SEARCH_TYPE )
asset_code = asset.get_code()
#asset_code = asset.get_code()
sobject.set_value("asset_code", asset.get_code())
if sobject_context != None:
sobject.set_value("asset_context", sobject_context)
if code != None:
sobject.set_value("code", code)
if category != None:
sobject.set_value("category", category)
if description != None:
sobject.set_value("description", description)
sobject.commit()
return sobject
create = classmethod(create)
def get(cls, texture_code, parent_code, project_code=None, is_multi=False):
'''TODO: use search_type, id for the parent search'''
if not project_code:
project_code = Project.get_project_code()
search = Search( cls.SEARCH_TYPE, project_code )
#search.set_show_retired(True)
if texture_code:
search.add_filter('code', texture_code)
search.add_filter('asset_code', parent_code)
search_type = search.get_search_type()
key = "%s|%s|%s" % (search_type, texture_code, parent_code)
sobj = cls.get_by_search(search, key, is_multi=is_multi)
return sobj
get = classmethod(get)
class TextureSource(Texture):
SEARCH_TYPE = "prod/texture_source"
def create(cls, asset_code, code=None, category=None, description=None, sobject_context=None):
sobject = SearchType.create( cls.SEARCH_TYPE )
sobject.set_value("asset_code", asset_code)
if sobject_context != None:
sobject.set_value("asset_context", sobject_context)
if code != None:
sobject.set_value("code", code)
if category != None:
sobject.set_value("category", category)
if description != None:
sobject.set_value("description", description)
sobject.commit()
return sobject
create = classmethod(create)
class ShotTexture(Texture):
SEARCH_TYPE = "prod/shot_texture"
def get_shot_code(my):
shot_code = ''
search_type = my.get_value('search_type')
search = Search( search_type )
search.add_filter( 'id', my.get_value('search_id') )
parent = search.get_sobject()
if not parent:
return shot_code
if search_type.startswith('prod/shot_instance'):
shot_code = parent.get_value('shot_code')
else:
shot_code = parent.get_value('code')
return shot_code
# static functions
def create(cls, sobject, code=None, category=None, description=None, sobject_context=None):
texture = SearchType.create( cls.SEARCH_TYPE )
texture.set_value("search_type", sobject.get_search_type() )
texture.set_value("search_id", sobject.get_id())
#texture.set_value("shot_code", shot_code)
if sobject_context != None:
texture.set_value("asset_context", sobject_context)
if code != None:
texture.set_value("code", code)
if category != None:
texture.set_value("category", category)
if description != None:
texture.set_value("description", description)
texture.commit()
return texture
create = classmethod(create)
def get(cls, texture_code, parent_code, project_code=None, is_multi=False):
if not project_code:
project_code = Project.get_project_code()
search = Search( cls.SEARCH_TYPE, project_code )
#search.set_show_retired(True)
if texture_code:
search.add_filter('code', texture_code)
# backward compatible with using shot code
if isinstance(parent_code, basestring):
from pyasm.prod.biz import Shot
parent = Shot.get_by_code(parent_code)
else:
parent = parent_code
if not parent:
if is_multi:
return []
else:
return None
search.add_filter('search_type', parent.get_search_type())
search.add_filter('search_id', parent.get_id())
parent_key = SearchKey.get_by_sobject(parent)
search_type = search.get_search_type()
key = "%s|%s|%s" % (search_type, texture_code, parent_key)
sobj = cls.get_by_search(search, key, is_multi=is_multi)
return sobj
get = classmethod(get)
| epl-1.0 |
unnikrishnankgs/va | venv/lib/python3.5/site-packages/pygments/lexers/diff.py | 31 | 4873 | # -*- coding: utf-8 -*-
"""
pygments.lexers.diff
~~~~~~~~~~~~~~~~~~~~
Lexers for diff/patch formats.
:copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include, bygroups
from pygments.token import Text, Comment, Operator, Keyword, Name, Generic, \
Literal
__all__ = ['DiffLexer', 'DarcsPatchLexer', 'WDiffLexer']
class DiffLexer(RegexLexer):
"""
Lexer for unified or context-style diffs or patches.
"""
name = 'Diff'
aliases = ['diff', 'udiff']
filenames = ['*.diff', '*.patch']
mimetypes = ['text/x-diff', 'text/x-patch']
tokens = {
'root': [
(r' .*\n', Text),
(r'\+.*\n', Generic.Inserted),
(r'-.*\n', Generic.Deleted),
(r'!.*\n', Generic.Strong),
(r'@.*\n', Generic.Subheading),
(r'([Ii]ndex|diff).*\n', Generic.Heading),
(r'=.*\n', Generic.Heading),
(r'.*\n', Text),
]
}
def analyse_text(text):
if text[:7] == 'Index: ':
return True
if text[:5] == 'diff ':
return True
if text[:4] == '--- ':
return 0.9
class DarcsPatchLexer(RegexLexer):
"""
DarcsPatchLexer is a lexer for the various versions of the darcs patch
format. Examples of this format are derived by commands such as
``darcs annotate --patch`` and ``darcs send``.
.. versionadded:: 0.10
"""
name = 'Darcs Patch'
aliases = ['dpatch']
filenames = ['*.dpatch', '*.darcspatch']
DPATCH_KEYWORDS = ('hunk', 'addfile', 'adddir', 'rmfile', 'rmdir', 'move',
'replace')
tokens = {
'root': [
(r'<', Operator),
(r'>', Operator),
(r'\{', Operator),
(r'\}', Operator),
(r'(\[)((?:TAG )?)(.*)(\n)(.*)(\*\*)(\d+)(\s?)(\])',
bygroups(Operator, Keyword, Name, Text, Name, Operator,
Literal.Date, Text, Operator)),
(r'(\[)((?:TAG )?)(.*)(\n)(.*)(\*\*)(\d+)(\s?)',
bygroups(Operator, Keyword, Name, Text, Name, Operator,
Literal.Date, Text), 'comment'),
(r'New patches:', Generic.Heading),
(r'Context:', Generic.Heading),
(r'Patch bundle hash:', Generic.Heading),
(r'(\s*)(%s)(.*\n)' % '|'.join(DPATCH_KEYWORDS),
bygroups(Text, Keyword, Text)),
(r'\+', Generic.Inserted, "insert"),
(r'-', Generic.Deleted, "delete"),
(r'.*\n', Text),
],
'comment': [
(r'[^\]].*\n', Comment),
(r'\]', Operator, "#pop"),
],
'specialText': [ # darcs add [_CODE_] special operators for clarity
(r'\n', Text, "#pop"), # line-based
(r'\[_[^_]*_]', Operator),
],
'insert': [
include('specialText'),
(r'\[', Generic.Inserted),
(r'[^\n\[]+', Generic.Inserted),
],
'delete': [
include('specialText'),
(r'\[', Generic.Deleted),
(r'[^\n\[]+', Generic.Deleted),
],
}
class WDiffLexer(RegexLexer):
"""
A `wdiff <https://www.gnu.org/software/wdiff/>`_ lexer.
Note that:
* only to normal output (without option like -l).
* if target files of wdiff contain "[-", "-]", "{+", "+}",
especially they are unbalanced, this lexer will get confusing.
.. versionadded:: 2.2
"""
name = 'WDiff'
aliases = ['wdiff']
filenames = ['*.wdiff']
mimetypes = []
flags = re.MULTILINE | re.DOTALL
# We can only assume "[-" after "[-" before "-]" is `nested`,
# for instance wdiff to wdiff outputs. We have no way to
# distinct these marker is of wdiff output from original text.
ins_op = r"\{\+"
ins_cl = r"\+\}"
del_op = r"\[\-"
del_cl = r"\-\]"
normal = r'[^{}[\]+-]+' # for performance
tokens = {
'root': [
(ins_op, Generic.Inserted, 'inserted'),
(del_op, Generic.Deleted, 'deleted'),
(normal, Text),
(r'.', Text),
],
'inserted': [
(ins_op, Generic.Inserted, '#push'),
(del_op, Generic.Inserted, '#push'),
(del_cl, Generic.Inserted, '#pop'),
(ins_cl, Generic.Inserted, '#pop'),
(normal, Generic.Inserted),
(r'.', Generic.Inserted),
],
'deleted': [
(del_op, Generic.Deleted, '#push'),
(ins_op, Generic.Deleted, '#push'),
(ins_cl, Generic.Deleted, '#pop'),
(del_cl, Generic.Deleted, '#pop'),
(normal, Generic.Deleted),
(r'.', Generic.Deleted),
],
}
| bsd-2-clause |
zentner-kyle/servo | python/mozlog/mozlog/structured/handlers/base.py | 46 | 3764 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from threading import Lock
import codecs
from ..structuredlog import log_levels
class BaseHandler(object):
"""A base handler providing message handling facilities to
derived classes.
:param inner: A handler-like callable that may receive messages
from a log user.
"""
def __init__(self, inner):
self.wrapped = []
if hasattr(inner, "handle_message"):
self.wrapped.append(inner)
self.message_handlers = {}
def register_message_handlers(self, topic, handlers):
self.message_handlers[topic] = handlers
def handle_message(self, topic, cmd, *args):
"""Handles a message for the given topic by calling a subclass-defined
callback for the command.
:param topic: The topic of the broadcasted message. Handlers opt-in to
receiving messages by identifying a topic when calling
register_message_handlers.
:param command: The command to issue. This is a string that corresponds
to a callback provided by the target.
:param arg: Arguments to pass to the identified message callback, if any.
"""
rv = []
if topic in self.message_handlers and cmd in self.message_handlers[topic]:
rv.append(self.message_handlers[topic][cmd](*args))
for inner in self.wrapped:
rv.extend(inner.handle_message(topic, cmd, *args))
return rv
class LogLevelFilter(BaseHandler):
"""Handler that filters out messages with action of log and a level
lower than some specified level.
:param inner: Handler to use for messages that pass this filter
:param level: Minimum log level to process
"""
def __init__(self, inner, level):
BaseHandler.__init__(self, inner)
self.inner = inner
self.level = log_levels[level.upper()]
def __call__(self, item):
if (item["action"] != "log" or
log_levels[item["level"].upper()] <= self.level):
return self.inner(item)
class StreamHandler(BaseHandler):
"""Handler for writing to a file-like object
:param stream: File-like object to write log messages to
:param formatter: formatter to convert messages to string format
"""
_lock = Lock()
def __init__(self, stream, formatter):
BaseHandler.__init__(self, formatter)
assert stream is not None
# This is a hack to deal with the case where we are passed a
# StreamWriter (e.g. by mach for stdout). A StreamWriter requires
# the code to handle unicode in exactly the opposite way compared
# to a normal stream i.e. you always have to pass in a Unicode
# object rather than a string object. Cope with that by extracting
# the underlying raw stream.
if isinstance(stream, codecs.StreamWriter):
stream = stream.stream
self.formatter = formatter
self.stream = stream
def __call__(self, data):
"""Write a log message.
:param data: Structured log message dictionary."""
formatted = self.formatter(data)
if not formatted:
return
with self._lock:
if isinstance(formatted, unicode):
self.stream.write(formatted.encode("utf-8", "replace"))
elif isinstance(formatted, str):
self.stream.write(formatted)
else:
assert False, "Got output from the formatter of an unexpected type"
self.stream.flush()
| mpl-2.0 |
ayoubg/gem5-graphics | gem5/src/arch/x86/isa/insts/simd64/floating_point/compare/compare_and_write_mask.py | 91 | 2172 | # Copyright (c) 2007 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
microcode = '''
# PFCMPEQ
# PFCMPGT
# PFCMPGE
'''
| bsd-3-clause |
jhbsz/OSI-OS | contrib/subversion/gen-make.py | 5 | 11010 | #!/usr/bin/env python
#
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
#
#
# gen-make.py -- generate makefiles for building Subversion
#
import os
import sys
import getopt
try:
my_getopt = getopt.gnu_getopt
except AttributeError:
my_getopt = getopt.getopt
try:
# Python >=3.0
import configparser
except ImportError:
# Python <3.0
import ConfigParser as configparser
# for the generator modules
sys.path.insert(0, os.path.join('build', 'generator'))
# for getversion
sys.path.insert(1, 'build')
gen_modules = {
'make' : ('gen_make', 'Makefiles for POSIX systems'),
'dsp' : ('gen_msvc_dsp', 'MSVC 6.x project files'),
'vcproj' : ('gen_vcnet_vcproj', 'VC.Net project files'),
}
def main(fname, gentype, verfname=None,
skip_depends=0, other_options=None):
if verfname is None:
verfname = os.path.join('subversion', 'include', 'svn_version.h')
gen_module = __import__(gen_modules[gentype][0])
generator = gen_module.Generator(fname, verfname, other_options)
if not skip_depends:
generator.compute_hdr_deps()
generator.write()
generator.write_sqlite_headers()
if ('--debug', '') in other_options:
for dep_type, target_dict in generator.graph.deps.items():
sorted_targets = list(target_dict.keys()); sorted_targets.sort()
for target in sorted_targets:
print(dep_type + ": " + _objinfo(target))
for source in target_dict[target]:
print(" " + _objinfo(source))
print("=" * 72)
gen_keys = sorted(generator.__dict__.keys())
for name in gen_keys:
value = generator.__dict__[name]
if isinstance(value, list):
print(name + ": ")
for i in value:
print(" " + _objinfo(i))
print("=" * 72)
def _objinfo(o):
if isinstance(o, str):
return repr(o)
else:
t = o.__class__.__name__
n = getattr(o, 'name', '-')
f = getattr(o, 'filename', '-')
return "%s: %s %s" % (t,n,f)
def _usage_exit(err=None):
"print ERR (if any), print usage, then exit the script"
if err:
print("ERROR: %s\n" % (err))
print("USAGE: gen-make.py [options...] [conf-file]")
print(" -s skip dependency generation")
print(" --debug print lots of stuff only developers care about")
print(" --release release mode")
print(" --reload reuse all options from the previous invocation")
print(" of the script, except -s, -t, --debug and --reload")
print(" -t TYPE use the TYPE generator; can be one of:")
items = sorted(gen_modules.items())
for name, (module, desc) in items:
print(' %-12s %s' % (name, desc))
print("")
print(" The default generator type is 'make'")
print("")
print(" Makefile-specific options:")
print("")
print(" --assume-shared-libs")
print(" omit dependencies on libraries, on the assumption that")
print(" shared libraries will be built, so that it is unnecessary")
print(" to relink executables when the libraries that they depend")
print(" on change. This is an option for developers who want to")
print(" increase the speed of frequent rebuilds.")
print(" *** Do not use unless you understand the consequences. ***")
print("")
print(" UNIX-specific options:")
print("")
print(" --installed-libs")
print(" Comma-separated list of Subversion libraries to find")
print(" pre-installed instead of building (probably only")
print(" useful for packagers)")
print("")
print(" Windows-specific options:")
print("")
print(" --with-apr=DIR")
print(" the APR sources are in DIR")
print("")
print(" --with-apr-util=DIR")
print(" the APR-Util sources are in DIR")
print("")
print(" --with-apr-iconv=DIR")
print(" the APR-Iconv sources are in DIR")
print("")
print(" --with-berkeley-db=DIR")
print(" look for Berkeley DB headers and libs in")
print(" DIR")
print("")
print(" --with-serf=DIR")
print(" the Serf sources are in DIR")
print("")
print(" --with-httpd=DIR")
print(" the httpd sources and binaries required")
print(" for building mod_dav_svn are in DIR;")
print(" implies --with-apr{-util, -iconv}, but")
print(" you can override them")
print("")
print(" --with-libintl=DIR")
print(" look for GNU libintl headers and libs in DIR;")
print(" implies --enable-nls")
print("")
print(" --with-openssl=DIR")
print(" tell serf to look for OpenSSL headers")
print(" and libs in DIR")
print("")
print(" --with-zlib=DIR")
print(" tell Subversion to look for ZLib headers and")
print(" libs in DIR")
print("")
print(" --with-jdk=DIR")
print(" look for the java development kit here")
print("")
print(" --with-junit=DIR")
print(" look for the junit jar here")
print(" junit is for testing the java bindings")
print("")
print(" --with-swig=DIR")
print(" look for the swig program in DIR")
print("")
print(" --with-sqlite=DIR")
print(" look for sqlite in DIR")
print("")
print(" --with-sasl=DIR")
print(" look for the sasl headers and libs in DIR")
print("")
print(" --enable-pool-debug")
print(" turn on APR pool debugging")
print("")
print(" --enable-purify")
print(" add support for Purify instrumentation;")
print(" implies --enable-pool-debug")
print("")
print(" --enable-quantify")
print(" add support for Quantify instrumentation")
print("")
print(" --enable-nls")
print(" add support for gettext localization")
print("")
print(" --enable-bdb-in-apr-util")
print(" configure APR-Util to use Berkeley DB")
print("")
print(" --enable-ml")
print(" enable use of ML assembler with zlib")
print("")
print(" --disable-shared")
print(" only build static libraries")
print("")
print(" --with-static-apr")
print(" Use static apr and apr-util")
print("")
print(" --with-static-openssl")
print(" Use static openssl")
print("")
print(" --vsnet-version=VER")
print(" generate for VS.NET version VER (2002, 2003, 2005, 2008, 2010 or 2012)")
print(" [only valid in combination with '-t vcproj']")
print("")
print(" --with-apr_memcache=DIR")
print(" the apr_memcache sources are in DIR")
sys.exit(1)
class Options:
def __init__(self):
self.list = []
self.dict = {}
def add(self, opt, val):
if opt in self.dict:
self.list[self.dict[opt]] = (opt, val)
else:
self.dict[opt] = len(self.list)
self.list.append((opt, val))
if __name__ == '__main__':
try:
opts, args = my_getopt(sys.argv[1:], 'st:',
['debug',
'release',
'reload',
'assume-shared-libs',
'with-apr=',
'with-apr-util=',
'with-apr-iconv=',
'with-berkeley-db=',
'with-serf=',
'with-httpd=',
'with-libintl=',
'with-openssl=',
'with-zlib=',
'with-jdk=',
'with-junit=',
'with-swig=',
'with-sqlite=',
'with-sasl=',
'with-apr_memcache=',
'with-static-apr',
'with-static-openssl',
'enable-pool-debug',
'enable-purify',
'enable-quantify',
'enable-nls',
'enable-bdb-in-apr-util',
'enable-ml',
'disable-shared',
'installed-libs=',
'vsnet-version=',
# Keep distributions that help by adding a path
# working. On unix this would be filtered by
# configure, but on Windows gen-make.py is used
# directly.
'with-neon=',
'without-neon',
])
if len(args) > 1:
_usage_exit("Too many arguments")
except getopt.GetoptError, e:
_usage_exit(str(e))
conf = 'build.conf'
skip = 0
gentype = 'make'
rest = Options()
if args:
conf = args[0]
# First merge options with previously saved to gen-make.opts if --reload
# options used
for opt, val in opts:
if opt == '--reload':
prev_conf = configparser.ConfigParser()
prev_conf.read('gen-make.opts')
for opt, val in prev_conf.items('options'):
if opt != '--debug':
rest.add(opt, val)
del prev_conf
elif opt == '--with-neon' or opt == '--without-neon':
# Provide a warning that we ignored these arguments
print("Ignoring no longer supported argument '%s'" % opt)
else:
rest.add(opt, val)
# Parse options list
for opt, val in rest.list:
if opt == '-s':
skip = 1
elif opt == '-t':
gentype = val
else:
if opt == '--with-httpd':
rest.add('--with-apr', os.path.join(val, 'srclib', 'apr'))
rest.add('--with-apr-util', os.path.join(val, 'srclib', 'apr-util'))
rest.add('--with-apr-iconv', os.path.join(val, 'srclib', 'apr-iconv'))
# Remember all options so that --reload and other scripts can use them
opt_conf = open('gen-make.opts', 'w')
opt_conf.write('[options]\n')
for opt, val in rest.list:
opt_conf.write(opt + ' = ' + val + '\n')
opt_conf.close()
if gentype not in gen_modules.keys():
_usage_exit("Unknown module type '%s'" % (gentype))
main(conf, gentype, skip_depends=skip, other_options=rest.list)
### End of file.
| bsd-3-clause |
robbiet480/home-assistant | tests/components/cloud/test_client.py | 7 | 8311 | """Test the cloud.iot module."""
from aiohttp import web
import pytest
from homeassistant.components.cloud import DOMAIN
from homeassistant.components.cloud.client import CloudClient
from homeassistant.components.cloud.const import PREF_ENABLE_ALEXA, PREF_ENABLE_GOOGLE
from homeassistant.core import State
from homeassistant.setup import async_setup_component
from . import mock_cloud, mock_cloud_prefs
from tests.async_mock import AsyncMock, MagicMock, patch
from tests.components.alexa import test_smart_home as test_alexa
@pytest.fixture
def mock_cloud_inst():
"""Mock cloud class."""
return MagicMock(subscription_expired=False)
async def test_handler_alexa(hass):
"""Test handler Alexa."""
hass.states.async_set("switch.test", "on", {"friendly_name": "Test switch"})
hass.states.async_set("switch.test2", "on", {"friendly_name": "Test switch 2"})
await mock_cloud(
hass,
{
"alexa": {
"filter": {"exclude_entities": "switch.test2"},
"entity_config": {
"switch.test": {
"name": "Config name",
"description": "Config description",
"display_categories": "LIGHT",
}
},
}
},
)
mock_cloud_prefs(hass)
cloud = hass.data["cloud"]
resp = await cloud.client.async_alexa_message(
test_alexa.get_new_request("Alexa.Discovery", "Discover")
)
endpoints = resp["event"]["payload"]["endpoints"]
assert len(endpoints) == 1
device = endpoints[0]
assert device["description"] == "Config description via Home Assistant"
assert device["friendlyName"] == "Config name"
assert device["displayCategories"] == ["LIGHT"]
assert device["manufacturerName"] == "Home Assistant"
async def test_handler_alexa_disabled(hass, mock_cloud_fixture):
"""Test handler Alexa when user has disabled it."""
mock_cloud_fixture._prefs[PREF_ENABLE_ALEXA] = False
cloud = hass.data["cloud"]
resp = await cloud.client.async_alexa_message(
test_alexa.get_new_request("Alexa.Discovery", "Discover")
)
assert resp["event"]["header"]["namespace"] == "Alexa"
assert resp["event"]["header"]["name"] == "ErrorResponse"
assert resp["event"]["payload"]["type"] == "BRIDGE_UNREACHABLE"
async def test_handler_google_actions(hass):
"""Test handler Google Actions."""
hass.states.async_set("switch.test", "on", {"friendly_name": "Test switch"})
hass.states.async_set("switch.test2", "on", {"friendly_name": "Test switch 2"})
hass.states.async_set("group.all_locks", "on", {"friendly_name": "Evil locks"})
await mock_cloud(
hass,
{
"google_actions": {
"filter": {"exclude_entities": "switch.test2"},
"entity_config": {
"switch.test": {
"name": "Config name",
"aliases": "Config alias",
"room": "living room",
}
},
}
},
)
mock_cloud_prefs(hass)
cloud = hass.data["cloud"]
reqid = "5711642932632160983"
data = {"requestId": reqid, "inputs": [{"intent": "action.devices.SYNC"}]}
with patch(
"hass_nabucasa.Cloud._decode_claims",
return_value={"cognito:username": "myUserName"},
):
await cloud.client.get_google_config()
resp = await cloud.client.async_google_message(data)
assert resp["requestId"] == reqid
payload = resp["payload"]
assert payload["agentUserId"] == "myUserName"
devices = payload["devices"]
assert len(devices) == 1
device = devices[0]
assert device["id"] == "switch.test"
assert device["name"]["name"] == "Config name"
assert device["name"]["nicknames"] == ["Config name", "Config alias"]
assert device["type"] == "action.devices.types.SWITCH"
assert device["roomHint"] == "living room"
async def test_handler_google_actions_disabled(hass, mock_cloud_fixture):
"""Test handler Google Actions when user has disabled it."""
mock_cloud_fixture._prefs[PREF_ENABLE_GOOGLE] = False
with patch("hass_nabucasa.Cloud.start"):
assert await async_setup_component(hass, "cloud", {})
reqid = "5711642932632160983"
data = {"requestId": reqid, "inputs": [{"intent": "action.devices.SYNC"}]}
cloud = hass.data["cloud"]
resp = await cloud.client.async_google_message(data)
assert resp["requestId"] == reqid
assert resp["payload"]["errorCode"] == "deviceTurnedOff"
async def test_webhook_msg(hass, caplog):
"""Test webhook msg."""
with patch("hass_nabucasa.Cloud.start"):
setup = await async_setup_component(hass, "cloud", {"cloud": {}})
assert setup
cloud = hass.data["cloud"]
await cloud.client.prefs.async_initialize()
await cloud.client.prefs.async_update(
cloudhooks={
"mock-webhook-id": {
"webhook_id": "mock-webhook-id",
"cloudhook_id": "mock-cloud-id",
},
"no-longere-existing": {
"webhook_id": "no-longere-existing",
"cloudhook_id": "mock-nonexisting-id",
},
}
)
received = []
async def handler(hass, webhook_id, request):
"""Handle a webhook."""
received.append(request)
return web.json_response({"from": "handler"})
hass.components.webhook.async_register("test", "Test", "mock-webhook-id", handler)
response = await cloud.client.async_webhook_message(
{
"cloudhook_id": "mock-cloud-id",
"body": '{"hello": "world"}',
"headers": {"content-type": "application/json"},
"method": "POST",
"query": None,
}
)
assert response == {
"status": 200,
"body": '{"from": "handler"}',
"headers": {"Content-Type": "application/json"},
}
assert len(received) == 1
assert await received[0].json() == {"hello": "world"}
# Non existing webhook
caplog.clear()
response = await cloud.client.async_webhook_message(
{
"cloudhook_id": "mock-nonexisting-id",
"body": '{"nonexisting": "payload"}',
"headers": {"content-type": "application/json"},
"method": "POST",
"query": None,
}
)
assert response == {
"status": 200,
"body": None,
"headers": {"Content-Type": "application/octet-stream"},
}
assert (
"Received message for unregistered webhook no-longere-existing from cloud"
in caplog.text
)
assert '{"nonexisting": "payload"}' in caplog.text
async def test_google_config_expose_entity(hass, mock_cloud_setup, mock_cloud_login):
"""Test Google config exposing entity method uses latest config."""
cloud_client = hass.data[DOMAIN].client
state = State("light.kitchen", "on")
gconf = await cloud_client.get_google_config()
assert gconf.should_expose(state)
await cloud_client.prefs.async_update_google_entity_config(
entity_id="light.kitchen", should_expose=False
)
assert not gconf.should_expose(state)
async def test_google_config_should_2fa(hass, mock_cloud_setup, mock_cloud_login):
"""Test Google config disabling 2FA method uses latest config."""
cloud_client = hass.data[DOMAIN].client
gconf = await cloud_client.get_google_config()
state = State("light.kitchen", "on")
assert gconf.should_2fa(state)
await cloud_client.prefs.async_update_google_entity_config(
entity_id="light.kitchen", disable_2fa=True
)
assert not gconf.should_2fa(state)
async def test_set_username(hass):
"""Test we set username during login."""
prefs = MagicMock(
alexa_enabled=False,
google_enabled=False,
async_set_username=AsyncMock(return_value=None),
)
client = CloudClient(hass, prefs, None, {}, {})
client.cloud = MagicMock(is_logged_in=True, username="mock-username")
await client.logged_in()
assert len(prefs.async_set_username.mock_calls) == 1
assert prefs.async_set_username.mock_calls[0][1][0] == "mock-username"
| apache-2.0 |
elijah513/scikit-learn | examples/covariance/plot_outlier_detection.py | 235 | 3891 | """
==========================================
Outlier detection with several methods.
==========================================
When the amount of contamination is known, this example illustrates two
different ways of performing :ref:`outlier_detection`:
- based on a robust estimator of covariance, which is assuming that the
data are Gaussian distributed and performs better than the One-Class SVM
in that case.
- using the One-Class SVM and its ability to capture the shape of the
data set, hence performing better when the data is strongly
non-Gaussian, i.e. with two well-separated clusters;
The ground truth about inliers and outliers is given by the points colors
while the orange-filled area indicates which points are reported as inliers
by each method.
Here, we assume that we know the fraction of outliers in the datasets.
Thus rather than using the 'predict' method of the objects, we set the
threshold on the decision_function to separate out the corresponding
fraction.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager
from scipy import stats
from sklearn import svm
from sklearn.covariance import EllipticEnvelope
# Example settings
n_samples = 200
outliers_fraction = 0.25
clusters_separation = [0, 1, 2]
# define two outlier detection tools to be compared
classifiers = {
"One-Class SVM": svm.OneClassSVM(nu=0.95 * outliers_fraction + 0.05,
kernel="rbf", gamma=0.1),
"robust covariance estimator": EllipticEnvelope(contamination=.1)}
# Compare given classifiers under given settings
xx, yy = np.meshgrid(np.linspace(-7, 7, 500), np.linspace(-7, 7, 500))
n_inliers = int((1. - outliers_fraction) * n_samples)
n_outliers = int(outliers_fraction * n_samples)
ground_truth = np.ones(n_samples, dtype=int)
ground_truth[-n_outliers:] = 0
# Fit the problem with varying cluster separation
for i, offset in enumerate(clusters_separation):
np.random.seed(42)
# Data generation
X1 = 0.3 * np.random.randn(0.5 * n_inliers, 2) - offset
X2 = 0.3 * np.random.randn(0.5 * n_inliers, 2) + offset
X = np.r_[X1, X2]
# Add outliers
X = np.r_[X, np.random.uniform(low=-6, high=6, size=(n_outliers, 2))]
# Fit the model with the One-Class SVM
plt.figure(figsize=(10, 5))
for i, (clf_name, clf) in enumerate(classifiers.items()):
# fit the data and tag outliers
clf.fit(X)
y_pred = clf.decision_function(X).ravel()
threshold = stats.scoreatpercentile(y_pred,
100 * outliers_fraction)
y_pred = y_pred > threshold
n_errors = (y_pred != ground_truth).sum()
# plot the levels lines and the points
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
subplot = plt.subplot(1, 2, i + 1)
subplot.set_title("Outlier detection")
subplot.contourf(xx, yy, Z, levels=np.linspace(Z.min(), threshold, 7),
cmap=plt.cm.Blues_r)
a = subplot.contour(xx, yy, Z, levels=[threshold],
linewidths=2, colors='red')
subplot.contourf(xx, yy, Z, levels=[threshold, Z.max()],
colors='orange')
b = subplot.scatter(X[:-n_outliers, 0], X[:-n_outliers, 1], c='white')
c = subplot.scatter(X[-n_outliers:, 0], X[-n_outliers:, 1], c='black')
subplot.axis('tight')
subplot.legend(
[a.collections[0], b, c],
['learned decision function', 'true inliers', 'true outliers'],
prop=matplotlib.font_manager.FontProperties(size=11))
subplot.set_xlabel("%d. %s (errors: %d)" % (i + 1, clf_name, n_errors))
subplot.set_xlim((-7, 7))
subplot.set_ylim((-7, 7))
plt.subplots_adjust(0.04, 0.1, 0.96, 0.94, 0.1, 0.26)
plt.show()
| bsd-3-clause |
lightningkay/NoahGameFrame | Dependencies/googletest-release-1.8.0/googletest/test/gtest_help_test.py | 2968 | 5856 | #!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests the --help flag of Google C++ Testing Framework.
SYNOPSIS
gtest_help_test.py --build_dir=BUILD/DIR
# where BUILD/DIR contains the built gtest_help_test_ file.
gtest_help_test.py
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import re
import gtest_test_utils
IS_LINUX = os.name == 'posix' and os.uname()[0] == 'Linux'
IS_WINDOWS = os.name == 'nt'
PROGRAM_PATH = gtest_test_utils.GetTestExecutablePath('gtest_help_test_')
FLAG_PREFIX = '--gtest_'
DEATH_TEST_STYLE_FLAG = FLAG_PREFIX + 'death_test_style'
STREAM_RESULT_TO_FLAG = FLAG_PREFIX + 'stream_result_to'
UNKNOWN_FLAG = FLAG_PREFIX + 'unknown_flag_for_testing'
LIST_TESTS_FLAG = FLAG_PREFIX + 'list_tests'
INCORRECT_FLAG_VARIANTS = [re.sub('^--', '-', LIST_TESTS_FLAG),
re.sub('^--', '/', LIST_TESTS_FLAG),
re.sub('_', '-', LIST_TESTS_FLAG)]
INTERNAL_FLAG_FOR_TESTING = FLAG_PREFIX + 'internal_flag_for_testing'
SUPPORTS_DEATH_TESTS = "DeathTest" in gtest_test_utils.Subprocess(
[PROGRAM_PATH, LIST_TESTS_FLAG]).output
# The help message must match this regex.
HELP_REGEX = re.compile(
FLAG_PREFIX + r'list_tests.*' +
FLAG_PREFIX + r'filter=.*' +
FLAG_PREFIX + r'also_run_disabled_tests.*' +
FLAG_PREFIX + r'repeat=.*' +
FLAG_PREFIX + r'shuffle.*' +
FLAG_PREFIX + r'random_seed=.*' +
FLAG_PREFIX + r'color=.*' +
FLAG_PREFIX + r'print_time.*' +
FLAG_PREFIX + r'output=.*' +
FLAG_PREFIX + r'break_on_failure.*' +
FLAG_PREFIX + r'throw_on_failure.*' +
FLAG_PREFIX + r'catch_exceptions=0.*',
re.DOTALL)
def RunWithFlag(flag):
"""Runs gtest_help_test_ with the given flag.
Returns:
the exit code and the text output as a tuple.
Args:
flag: the command-line flag to pass to gtest_help_test_, or None.
"""
if flag is None:
command = [PROGRAM_PATH]
else:
command = [PROGRAM_PATH, flag]
child = gtest_test_utils.Subprocess(command)
return child.exit_code, child.output
class GTestHelpTest(gtest_test_utils.TestCase):
"""Tests the --help flag and its equivalent forms."""
def TestHelpFlag(self, flag):
"""Verifies correct behavior when help flag is specified.
The right message must be printed and the tests must
skipped when the given flag is specified.
Args:
flag: A flag to pass to the binary or None.
"""
exit_code, output = RunWithFlag(flag)
self.assertEquals(0, exit_code)
self.assert_(HELP_REGEX.search(output), output)
if IS_LINUX:
self.assert_(STREAM_RESULT_TO_FLAG in output, output)
else:
self.assert_(STREAM_RESULT_TO_FLAG not in output, output)
if SUPPORTS_DEATH_TESTS and not IS_WINDOWS:
self.assert_(DEATH_TEST_STYLE_FLAG in output, output)
else:
self.assert_(DEATH_TEST_STYLE_FLAG not in output, output)
def TestNonHelpFlag(self, flag):
"""Verifies correct behavior when no help flag is specified.
Verifies that when no help flag is specified, the tests are run
and the help message is not printed.
Args:
flag: A flag to pass to the binary or None.
"""
exit_code, output = RunWithFlag(flag)
self.assert_(exit_code != 0)
self.assert_(not HELP_REGEX.search(output), output)
def testPrintsHelpWithFullFlag(self):
self.TestHelpFlag('--help')
def testPrintsHelpWithShortFlag(self):
self.TestHelpFlag('-h')
def testPrintsHelpWithQuestionFlag(self):
self.TestHelpFlag('-?')
def testPrintsHelpWithWindowsStyleQuestionFlag(self):
self.TestHelpFlag('/?')
def testPrintsHelpWithUnrecognizedGoogleTestFlag(self):
self.TestHelpFlag(UNKNOWN_FLAG)
def testPrintsHelpWithIncorrectFlagStyle(self):
for incorrect_flag in INCORRECT_FLAG_VARIANTS:
self.TestHelpFlag(incorrect_flag)
def testRunsTestsWithoutHelpFlag(self):
"""Verifies that when no help flag is specified, the tests are run
and the help message is not printed."""
self.TestNonHelpFlag(None)
def testRunsTestsWithGtestInternalFlag(self):
"""Verifies that the tests are run and no help message is printed when
a flag starting with Google Test prefix and 'internal_' is supplied."""
self.TestNonHelpFlag(INTERNAL_FLAG_FOR_TESTING)
if __name__ == '__main__':
gtest_test_utils.Main()
| apache-2.0 |
sodafree/backend | tests/regressiontests/localflavor/de/tests.py | 33 | 1847 | from django.contrib.localflavor.de.forms import (DEZipCodeField, DEStateSelect,
DEIdentityCardNumberField)
from django.test import SimpleTestCase
class DELocalFlavorTests(SimpleTestCase):
def test_DEStateSelect(self):
f = DEStateSelect()
out = u'''<select name="states">
<option value="BW">Baden-Wuerttemberg</option>
<option value="BY">Bavaria</option>
<option value="BE">Berlin</option>
<option value="BB">Brandenburg</option>
<option value="HB">Bremen</option>
<option value="HH">Hamburg</option>
<option value="HE">Hessen</option>
<option value="MV">Mecklenburg-Western Pomerania</option>
<option value="NI">Lower Saxony</option>
<option value="NW">North Rhine-Westphalia</option>
<option value="RP">Rhineland-Palatinate</option>
<option value="SL">Saarland</option>
<option value="SN">Saxony</option>
<option value="ST">Saxony-Anhalt</option>
<option value="SH">Schleswig-Holstein</option>
<option value="TH" selected="selected">Thuringia</option>
</select>'''
self.assertHTMLEqual(f.render('states', 'TH'), out)
def test_DEZipCodeField(self):
error_format = [u'Enter a zip code in the format XXXXX.']
valid = {
'99423': '99423',
}
invalid = {
' 99423': error_format,
}
self.assertFieldOutput(DEZipCodeField, valid, invalid)
def test_DEIdentityCardNumberField(self):
error_format = [u'Enter a valid German identity card number in XXXXXXXXXXX-XXXXXXX-XXXXXXX-X format.']
valid = {
'7549313035D-6004103-0903042-0': '7549313035D-6004103-0903042-0',
'9786324830D 6104243 0910271 2': '9786324830D-6104243-0910271-2',
}
invalid = {
'0434657485D-6407276-0508137-9': error_format,
}
self.assertFieldOutput(DEIdentityCardNumberField, valid, invalid)
| bsd-3-clause |
nandhp/youtube-dl | youtube_dl/extractor/rutube.py | 15 | 6471 | # encoding: utf-8
from __future__ import unicode_literals
import re
import itertools
from .common import InfoExtractor
from ..compat import (
compat_str,
)
from ..utils import (
determine_ext,
unified_strdate,
)
class RutubeIE(InfoExtractor):
IE_NAME = 'rutube'
IE_DESC = 'Rutube videos'
_VALID_URL = r'https?://rutube\.ru/(?:video|play/embed)/(?P<id>[\da-z]{32})'
_TESTS = [{
'url': 'http://rutube.ru/video/3eac3b4561676c17df9132a9a1e62e3e/',
'info_dict': {
'id': '3eac3b4561676c17df9132a9a1e62e3e',
'ext': 'mp4',
'title': 'Раненный кенгуру забежал в аптеку',
'description': 'http://www.ntdtv.ru ',
'duration': 80,
'uploader': 'NTDRussian',
'uploader_id': '29790',
'upload_date': '20131016',
'age_limit': 0,
},
'params': {
# It requires ffmpeg (m3u8 download)
'skip_download': True,
},
}, {
'url': 'http://rutube.ru/play/embed/a10e53b86e8f349080f718582ce4c661',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
video = self._download_json(
'http://rutube.ru/api/video/%s/?format=json' % video_id,
video_id, 'Downloading video JSON')
# Some videos don't have the author field
author = video.get('author') or {}
options = self._download_json(
'http://rutube.ru/api/play/options/%s/?format=json' % video_id,
video_id, 'Downloading options JSON')
formats = []
for format_id, format_url in options['video_balancer'].items():
ext = determine_ext(format_url)
if ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
format_url, video_id, 'mp4', m3u8_id=format_id, fatal=False))
elif ext == 'f4m':
formats.extend(self._extract_f4m_formats(
format_url, video_id, f4m_id=format_id, fatal=False))
else:
formats.append({
'url': format_url,
'format_id': format_id,
})
self._sort_formats(formats)
return {
'id': video['id'],
'title': video['title'],
'description': video['description'],
'duration': video['duration'],
'view_count': video['hits'],
'formats': formats,
'thumbnail': video['thumbnail_url'],
'uploader': author.get('name'),
'uploader_id': compat_str(author['id']) if author else None,
'upload_date': unified_strdate(video['created_ts']),
'age_limit': 18 if video['is_adult'] else 0,
}
class RutubeEmbedIE(InfoExtractor):
IE_NAME = 'rutube:embed'
IE_DESC = 'Rutube embedded videos'
_VALID_URL = 'https?://rutube\.ru/(?:video|play)/embed/(?P<id>[0-9]+)'
_TESTS = [{
'url': 'http://rutube.ru/video/embed/6722881?vk_puid37=&vk_puid38=',
'info_dict': {
'id': 'a10e53b86e8f349080f718582ce4c661',
'ext': 'mp4',
'upload_date': '20131223',
'uploader_id': '297833',
'description': 'Видео группы ★http://vk.com/foxkidsreset★ музей Fox Kids и Jetix<br/><br/> восстановлено и сделано в шикоформате subziro89 http://vk.com/subziro89',
'uploader': 'subziro89 ILya',
'title': 'Мистический городок Эйри в Индиан 5 серия озвучка subziro89',
},
'params': {
'skip_download': 'Requires ffmpeg',
},
}, {
'url': 'http://rutube.ru/play/embed/8083783',
'only_matching': True,
}]
def _real_extract(self, url):
embed_id = self._match_id(url)
webpage = self._download_webpage(url, embed_id)
canonical_url = self._html_search_regex(
r'<link\s+rel="canonical"\s+href="([^"]+?)"', webpage,
'Canonical URL')
return self.url_result(canonical_url, 'Rutube')
class RutubeChannelIE(InfoExtractor):
IE_NAME = 'rutube:channel'
IE_DESC = 'Rutube channels'
_VALID_URL = r'https?://rutube\.ru/tags/video/(?P<id>\d+)'
_TESTS = [{
'url': 'http://rutube.ru/tags/video/1800/',
'info_dict': {
'id': '1800',
},
'playlist_mincount': 68,
}]
_PAGE_TEMPLATE = 'http://rutube.ru/api/tags/video/%s/?page=%s&format=json'
def _extract_videos(self, channel_id, channel_title=None):
entries = []
for pagenum in itertools.count(1):
page = self._download_json(
self._PAGE_TEMPLATE % (channel_id, pagenum),
channel_id, 'Downloading page %s' % pagenum)
results = page['results']
if not results:
break
entries.extend(self.url_result(result['video_url'], 'Rutube') for result in results)
if not page['has_next']:
break
return self.playlist_result(entries, channel_id, channel_title)
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
channel_id = mobj.group('id')
return self._extract_videos(channel_id)
class RutubeMovieIE(RutubeChannelIE):
IE_NAME = 'rutube:movie'
IE_DESC = 'Rutube movies'
_VALID_URL = r'https?://rutube\.ru/metainfo/tv/(?P<id>\d+)'
_TESTS = []
_MOVIE_TEMPLATE = 'http://rutube.ru/api/metainfo/tv/%s/?format=json'
_PAGE_TEMPLATE = 'http://rutube.ru/api/metainfo/tv/%s/video?page=%s&format=json'
def _real_extract(self, url):
movie_id = self._match_id(url)
movie = self._download_json(
self._MOVIE_TEMPLATE % movie_id, movie_id,
'Downloading movie JSON')
movie_name = movie['name']
return self._extract_videos(movie_id, movie_name)
class RutubePersonIE(RutubeChannelIE):
IE_NAME = 'rutube:person'
IE_DESC = 'Rutube person videos'
_VALID_URL = r'https?://rutube\.ru/video/person/(?P<id>\d+)'
_TESTS = [{
'url': 'http://rutube.ru/video/person/313878/',
'info_dict': {
'id': '313878',
},
'playlist_mincount': 37,
}]
_PAGE_TEMPLATE = 'http://rutube.ru/api/video/person/%s/?page=%s&format=json'
| unlicense |
HyperBaton/ansible | lib/ansible/modules/monitoring/statusio_maintenance.py | 10 | 16873 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Benjamin Copeland (@bhcopeland) <ben@copeland.me.uk>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: statusio_maintenance
short_description: Create maintenance windows for your status.io dashboard
description:
- Creates a maintenance window for status.io
- Deletes a maintenance window for status.io
notes:
- You can use the apiary API url (http://docs.statusio.apiary.io/) to
capture API traffic
- Use start_date and start_time with minutes to set future maintenance window
version_added: "2.2"
author: Benjamin Copeland (@bhcopeland) <ben@copeland.me.uk>
options:
title:
description:
- A descriptive title for the maintenance window
default: "A new maintenance window"
desc:
description:
- Message describing the maintenance window
default: "Created by Ansible"
state:
description:
- Desired state of the package.
default: "present"
choices: ["present", "absent"]
api_id:
description:
- Your unique API ID from status.io
required: true
api_key:
description:
- Your unique API Key from status.io
required: true
statuspage:
description:
- Your unique StatusPage ID from status.io
required: true
url:
description:
- Status.io API URL. A private apiary can be used instead.
default: "https://api.status.io"
components:
description:
- The given name of your component (server name)
aliases: ['component']
containers:
description:
- The given name of your container (data center)
aliases: ['container']
all_infrastructure_affected:
description:
- If it affects all components and containers
type: bool
default: 'no'
automation:
description:
- Automatically start and end the maintenance window
type: bool
default: 'no'
maintenance_notify_now:
description:
- Notify subscribers now
type: bool
default: 'no'
maintenance_notify_72_hr:
description:
- Notify subscribers 72 hours before maintenance start time
type: bool
default: 'no'
maintenance_notify_24_hr:
description:
- Notify subscribers 24 hours before maintenance start time
type: bool
default: 'no'
maintenance_notify_1_hr:
description:
- Notify subscribers 1 hour before maintenance start time
type: bool
default: 'no'
maintenance_id:
description:
- The maintenance id number when deleting a maintenance window
minutes:
description:
- The length of time in UTC that the maintenance will run
(starting from playbook runtime)
default: 10
start_date:
description:
- Date maintenance is expected to start (Month/Day/Year) (UTC)
- End Date is worked out from start_date + minutes
start_time:
description:
- Time maintenance is expected to start (Hour:Minutes) (UTC)
- End Time is worked out from start_time + minutes
'''
EXAMPLES = '''
- name: Create a maintenance window for 10 minutes on server1, with automation to stop the maintenance
statusio_maintenance:
title: Router Upgrade from ansible
desc: Performing a Router Upgrade
components: server1.example.com
api_id: api_id
api_key: api_key
statuspage: statuspage_id
maintenance_notify_1_hr: True
automation: True
- name: Create a maintenance window for 60 minutes on server1 and server2
statusio_maintenance:
title: Routine maintenance
desc: Some security updates
components:
- server1.example.com
- server2.example.com
minutes: 60
api_id: api_id
api_key: api_key
statuspage: statuspage_id
maintenance_notify_1_hr: True
automation: True
delegate_to: localhost
- name: Create a future maintenance window for 24 hours to all hosts inside the Primary Data Center
statusio_maintenance:
title: Data center downtime
desc: Performing a Upgrade to our data center
components: Primary Data Center
api_id: api_id
api_key: api_key
statuspage: statuspage_id
start_date: 01/01/2016
start_time: 12:00
minutes: 1440
- name: Delete a maintenance window
statusio_maintenance:
title: Remove a maintenance window
maintenance_id: 561f90faf74bc94a4700087b
statuspage: statuspage_id
api_id: api_id
api_key: api_key
state: absent
'''
# TODO: Add RETURN documentation.
RETURN = ''' # '''
import datetime
import json
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
from ansible.module_utils.urls import open_url
def get_api_auth_headers(api_id, api_key, url, statuspage):
headers = {
"x-api-id": api_id,
"x-api-key": api_key,
"Content-Type": "application/json"
}
try:
response = open_url(
url + "/v2/component/list/" + statuspage, headers=headers)
data = json.loads(response.read())
if data['status']['message'] == 'Authentication failed':
return 1, None, None, "Authentication failed: " \
"Check api_id/api_key and statuspage id."
else:
auth_headers = headers
auth_content = data
except Exception as e:
return 1, None, None, to_native(e)
return 0, auth_headers, auth_content, None
def get_component_ids(auth_content, components):
host_ids = []
lower_components = [x.lower() for x in components]
for result in auth_content["result"]:
if result['name'].lower() in lower_components:
data = {
"component_id": result["_id"],
"container_id": result["containers"][0]["_id"]
}
host_ids.append(data)
lower_components.remove(result['name'].lower())
if len(lower_components):
# items not found in the api
return 1, None, lower_components
return 0, host_ids, None
def get_container_ids(auth_content, containers):
host_ids = []
lower_containers = [x.lower() for x in containers]
for result in auth_content["result"]:
if result["containers"][0]["name"].lower() in lower_containers:
data = {
"component_id": result["_id"],
"container_id": result["containers"][0]["_id"]
}
host_ids.append(data)
lower_containers.remove(result["containers"][0]["name"].lower())
if len(lower_containers):
# items not found in the api
return 1, None, lower_containers
return 0, host_ids, None
def get_date_time(start_date, start_time, minutes):
returned_date = []
if start_date and start_time:
try:
datetime.datetime.strptime(start_date, '%m/%d/%Y')
returned_date.append(start_date)
except (NameError, ValueError):
return 1, None, "Not a valid start_date format."
try:
datetime.datetime.strptime(start_time, '%H:%M')
returned_date.append(start_time)
except (NameError, ValueError):
return 1, None, "Not a valid start_time format."
try:
# Work out end date/time based on minutes
date_time_start = datetime.datetime.strptime(
start_time + start_date, '%H:%M%m/%d/%Y')
delta = date_time_start + datetime.timedelta(minutes=minutes)
returned_date.append(delta.strftime("%m/%d/%Y"))
returned_date.append(delta.strftime("%H:%M"))
except (NameError, ValueError):
return 1, None, "Couldn't work out a valid date"
else:
now = datetime.datetime.utcnow()
delta = now + datetime.timedelta(minutes=minutes)
# start_date
returned_date.append(now.strftime("%m/%d/%Y"))
returned_date.append(now.strftime("%H:%M"))
# end_date
returned_date.append(delta.strftime("%m/%d/%Y"))
returned_date.append(delta.strftime("%H:%M"))
return 0, returned_date, None
def create_maintenance(auth_headers, url, statuspage, host_ids,
all_infrastructure_affected, automation, title, desc,
returned_date, maintenance_notify_now,
maintenance_notify_72_hr, maintenance_notify_24_hr,
maintenance_notify_1_hr):
returned_dates = [[x] for x in returned_date]
component_id = []
container_id = []
for val in host_ids:
component_id.append(val['component_id'])
container_id.append(val['container_id'])
try:
values = json.dumps({
"statuspage_id": statuspage,
"components": component_id,
"containers": container_id,
"all_infrastructure_affected": str(int(all_infrastructure_affected)),
"automation": str(int(automation)),
"maintenance_name": title,
"maintenance_details": desc,
"date_planned_start": returned_dates[0],
"time_planned_start": returned_dates[1],
"date_planned_end": returned_dates[2],
"time_planned_end": returned_dates[3],
"maintenance_notify_now": str(int(maintenance_notify_now)),
"maintenance_notify_72_hr": str(int(maintenance_notify_72_hr)),
"maintenance_notify_24_hr": str(int(maintenance_notify_24_hr)),
"maintenance_notify_1_hr": str(int(maintenance_notify_1_hr))
})
response = open_url(
url + "/v2/maintenance/schedule", data=values,
headers=auth_headers)
data = json.loads(response.read())
if data["status"]["error"] == "yes":
return 1, None, data["status"]["message"]
except Exception as e:
return 1, None, to_native(e)
return 0, None, None
def delete_maintenance(auth_headers, url, statuspage, maintenance_id):
try:
values = json.dumps({
"statuspage_id": statuspage,
"maintenance_id": maintenance_id,
})
response = open_url(
url=url + "/v2/maintenance/delete",
data=values,
headers=auth_headers)
data = json.loads(response.read())
if data["status"]["error"] == "yes":
return 1, None, "Invalid maintenance_id"
except Exception as e:
return 1, None, to_native(e)
return 0, None, None
def main():
module = AnsibleModule(
argument_spec=dict(
api_id=dict(required=True),
api_key=dict(required=True, no_log=True),
statuspage=dict(required=True),
state=dict(required=False, default='present',
choices=['present', 'absent']),
url=dict(default='https://api.status.io', required=False),
components=dict(type='list', required=False, default=None,
aliases=['component']),
containers=dict(type='list', required=False, default=None,
aliases=['container']),
all_infrastructure_affected=dict(type='bool', default=False,
required=False),
automation=dict(type='bool', default=False, required=False),
title=dict(required=False, default='A new maintenance window'),
desc=dict(required=False, default='Created by Ansible'),
minutes=dict(type='int', required=False, default=10),
maintenance_notify_now=dict(type='bool', default=False,
required=False),
maintenance_notify_72_hr=dict(type='bool', default=False,
required=False),
maintenance_notify_24_hr=dict(type='bool', default=False,
required=False),
maintenance_notify_1_hr=dict(type='bool', default=False,
required=False),
maintenance_id=dict(required=False, default=None),
start_date=dict(default=None, required=False),
start_time=dict(default=None, required=False)
),
supports_check_mode=True,
)
api_id = module.params['api_id']
api_key = module.params['api_key']
statuspage = module.params['statuspage']
state = module.params['state']
url = module.params['url']
components = module.params['components']
containers = module.params['containers']
all_infrastructure_affected = module.params['all_infrastructure_affected']
automation = module.params['automation']
title = module.params['title']
desc = module.params['desc']
minutes = module.params['minutes']
maintenance_notify_now = module.params['maintenance_notify_now']
maintenance_notify_72_hr = module.params['maintenance_notify_72_hr']
maintenance_notify_24_hr = module.params['maintenance_notify_24_hr']
maintenance_notify_1_hr = module.params['maintenance_notify_1_hr']
maintenance_id = module.params['maintenance_id']
start_date = module.params['start_date']
start_time = module.params['start_time']
if state == "present":
if api_id and api_key:
(rc, auth_headers, auth_content, error) = \
get_api_auth_headers(api_id, api_key, url, statuspage)
if rc != 0:
module.fail_json(msg="Failed to get auth keys: %s" % error)
else:
auth_headers = {}
auth_content = {}
if minutes or start_time and start_date:
(rc, returned_date, error) = get_date_time(
start_date, start_time, minutes)
if rc != 0:
module.fail_json(msg="Failed to set date/time: %s" % error)
if not components and not containers:
return module.fail_json(msg="A Component or Container must be "
"defined")
elif components and containers:
return module.fail_json(msg="Components and containers cannot "
"be used together")
else:
if components:
(rc, host_ids, error) = get_component_ids(auth_content,
components)
if rc != 0:
module.fail_json(msg="Failed to find component %s" % error)
if containers:
(rc, host_ids, error) = get_container_ids(auth_content,
containers)
if rc != 0:
module.fail_json(msg="Failed to find container %s" % error)
if module.check_mode:
module.exit_json(changed=True)
else:
(rc, _, error) = create_maintenance(
auth_headers, url, statuspage, host_ids,
all_infrastructure_affected, automation,
title, desc, returned_date, maintenance_notify_now,
maintenance_notify_72_hr, maintenance_notify_24_hr,
maintenance_notify_1_hr)
if rc == 0:
module.exit_json(changed=True, result="Successfully created "
"maintenance")
else:
module.fail_json(msg="Failed to create maintenance: %s"
% error)
if state == "absent":
if api_id and api_key:
(rc, auth_headers, auth_content, error) = \
get_api_auth_headers(api_id, api_key, url, statuspage)
if rc != 0:
module.fail_json(msg="Failed to get auth keys: %s" % error)
else:
auth_headers = {}
if module.check_mode:
module.exit_json(changed=True)
else:
(rc, _, error) = delete_maintenance(
auth_headers, url, statuspage, maintenance_id)
if rc == 0:
module.exit_json(
changed=True,
result="Successfully deleted maintenance"
)
else:
module.fail_json(
msg="Failed to delete maintenance: %s" % error)
if __name__ == '__main__':
main()
| gpl-3.0 |
ehirt/odoo | addons/hr_timesheet_invoice/report/hr_timesheet_invoice_report.py | 318 | 9494 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields,osv
from openerp.tools.sql import drop_view_if_exists
class report_timesheet_line(osv.osv):
_name = "report.timesheet.line"
_description = "Timesheet Line"
_auto = False
_columns = {
'name': fields.char('Year', required=False, readonly=True),
'user_id': fields.many2one('res.users', 'User', readonly=True),
'date': fields.date('Date', readonly=True),
'day': fields.char('Day', size=128, readonly=True),
'quantity': fields.float('Time', readonly=True),
'cost': fields.float('Cost', readonly=True),
'product_id': fields.many2one('product.product', 'Product',readonly=True),
'account_id': fields.many2one('account.analytic.account', 'Analytic Account', readonly=True),
'general_account_id': fields.many2one('account.account', 'General Account', readonly=True),
'invoice_id': fields.many2one('account.invoice', 'Invoiced', readonly=True),
'month': fields.selection([('01','January'), ('02','February'), ('03','March'), ('04','April'), ('05','May'), ('06','June'),
('07','July'), ('08','August'), ('09','September'), ('10','October'), ('11','November'), ('12','December')],'Month', readonly=True),
}
_order = 'name desc,user_id desc'
def init(self, cr):
drop_view_if_exists(cr, 'report_timesheet_line')
cr.execute("""
create or replace view report_timesheet_line as (
select
min(l.id) as id,
l.date as date,
to_char(l.date,'YYYY') as name,
to_char(l.date,'MM') as month,
l.user_id,
to_char(l.date, 'YYYY-MM-DD') as day,
l.invoice_id,
l.product_id,
l.account_id,
l.general_account_id,
sum(l.unit_amount) as quantity,
sum(l.amount) as cost
from
account_analytic_line l
where
l.user_id is not null
group by
l.date,
l.user_id,
l.product_id,
l.account_id,
l.general_account_id,
l.invoice_id
)
""")
class report_timesheet_user(osv.osv):
_name = "report_timesheet.user"
_description = "Timesheet per day"
_auto = False
_columns = {
'name': fields.char('Year', required=False, readonly=True),
'user_id':fields.many2one('res.users', 'User', readonly=True),
'quantity': fields.float('Time', readonly=True),
'cost': fields.float('Cost', readonly=True),
'month':fields.selection([('01','January'), ('02','February'), ('03','March'), ('04','April'), ('05','May'), ('06','June'),
('07','July'), ('08','August'), ('09','September'), ('10','October'), ('11','November'), ('12','December')],'Month', readonly=True),
}
_order = 'name desc,user_id desc'
def init(self, cr):
drop_view_if_exists(cr, 'report_timesheet_user')
cr.execute("""
create or replace view report_timesheet_user as (
select
min(l.id) as id,
to_char(l.date,'YYYY') as name,
to_char(l.date,'MM') as month,
l.user_id,
sum(l.unit_amount) as quantity,
sum(l.amount) as cost
from
account_analytic_line l
where
user_id is not null
group by l.date, to_char(l.date,'YYYY'),to_char(l.date,'MM'), l.user_id
)
""")
class report_timesheet_account(osv.osv):
_name = "report_timesheet.account"
_description = "Timesheet per account"
_auto = False
_columns = {
'name': fields.char('Year', required=False, readonly=True),
'user_id':fields.many2one('res.users', 'User', readonly=True),
'account_id':fields.many2one('account.analytic.account', 'Analytic Account', readonly=True),
'quantity': fields.float('Time', readonly=True),
'month':fields.selection([('01','January'), ('02','February'), ('03','March'), ('04','April'), ('05','May'), ('06','June'),
('07','July'), ('08','August'), ('09','September'), ('10','October'), ('11','November'), ('12','December')],'Month', readonly=True),
}
_order = 'name desc,account_id desc,user_id desc'
def init(self, cr):
drop_view_if_exists(cr, 'report_timesheet_account')
cr.execute("""
create or replace view report_timesheet_account as (
select
min(id) as id,
to_char(create_date, 'YYYY') as name,
to_char(create_date,'MM') as month,
user_id,
account_id,
sum(unit_amount) as quantity
from
account_analytic_line
group by
to_char(create_date, 'YYYY'),to_char(create_date, 'MM'), user_id, account_id
)
""")
class report_timesheet_account_date(osv.osv):
_name = "report_timesheet.account.date"
_description = "Daily timesheet per account"
_auto = False
_columns = {
'name': fields.char('Year', required=False, readonly=True),
'user_id':fields.many2one('res.users', 'User', readonly=True),
'account_id':fields.many2one('account.analytic.account', 'Analytic Account', readonly=True),
'quantity': fields.float('Time', readonly=True),
'month':fields.selection([('01','January'), ('02','February'), ('03','March'), ('04','April'), ('05','May'), ('06','June'),
('07','July'), ('08','August'), ('09','September'), ('10','October'), ('11','November'), ('12','December')],'Month', readonly=True),
}
_order = 'name desc,account_id desc,user_id desc'
def init(self, cr):
drop_view_if_exists(cr, 'report_timesheet_account_date')
cr.execute("""
create or replace view report_timesheet_account_date as (
select
min(id) as id,
to_char(date,'YYYY') as name,
to_char(date,'MM') as month,
user_id,
account_id,
sum(unit_amount) as quantity
from
account_analytic_line
group by
to_char(date,'YYYY'),to_char(date,'MM'), user_id, account_id
)
""")
class report_timesheet_invoice(osv.osv):
_name = "report_timesheet.invoice"
_description = "Costs to invoice"
_auto = False
_columns = {
'user_id':fields.many2one('res.users', 'User', readonly=True),
'account_id':fields.many2one('account.analytic.account', 'Project', readonly=True),
'manager_id':fields.many2one('res.users', 'Manager', readonly=True),
'quantity': fields.float('Time', readonly=True),
'amount_invoice': fields.float('To invoice', readonly=True)
}
_rec_name = 'user_id'
_order = 'user_id desc'
def init(self, cr):
drop_view_if_exists(cr, 'report_timesheet_invoice')
cr.execute("""
create or replace view report_timesheet_invoice as (
select
min(l.id) as id,
l.user_id as user_id,
l.account_id as account_id,
a.user_id as manager_id,
sum(l.unit_amount) as quantity,
sum(l.unit_amount * t.list_price) as amount_invoice
from account_analytic_line l
left join hr_timesheet_invoice_factor f on (l.to_invoice=f.id)
left join account_analytic_account a on (l.account_id=a.id)
left join product_product p on (l.to_invoice=f.id)
left join product_template t on (l.to_invoice=f.id)
where
l.to_invoice is not null and
l.invoice_id is null
group by
l.user_id,
l.account_id,
a.user_id
)
""")
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
lbt/spectacle | setup.py | 2 | 1459 | #!/usr/bin/env python
import os, sys
from distutils.core import setup
try:
import setuptools
# enable "setup.py develop", optional
except ImportError:
pass
if 'install' in sys.argv and \
'MAKEFLAGS' not in os.environ and \
'RPM_BUILD_ROOT' not in os.environ:
repl = raw_input('WARNING: Please use `make install` for installation, continue(y/N)? ')
if repl != 'y':
sys.exit(1)
# For debian based systems, '--install-layout=deb' is needed after 2.6
if sys.version_info[:2] <= (2, 5) and '--install-layout=deb' in sys.argv:
del sys.argv[sys.argv.index('--install-layout=deb')]
version_path = 'VERSION'
try:
# first line should be the version number
version = open(version_path).readline().strip()
ver_file = open('spectacle/__version__.py', 'w')
ver_file.write("VERSION = \"%s\"\n" % version)
ver_file.close()
except IOError:
print 'WARNING: Cannot write version number file'
setup(name='spectacle',
version = version,
description='Spectacle',
author='Anas Nashif, Jian-feng Ding',
author_email='anas.nashif@intel.com, jian-feng.ding@intel.com',
url='https://meego.gitorious.org/meego-developer-tools/spectacle',
scripts=['tools/specify', 'tools/ini2spectacle', 'tools/spec2spectacle', 'tools/deb2spectacle'],
packages=['spectacle', 'spectacle.spec', 'spectacle.dsc'],
package_data={'spectacle': ['data/*.csv', 'data/GROUPS']},
)
| gpl-2.0 |
autosportlabs/kivy | examples/tutorials/pong/steps/step4/main.py | 15 | 1210 | from kivy.app import App
from kivy.uix.widget import Widget
from kivy.properties import NumericProperty, ReferenceListProperty,\
ObjectProperty
from kivy.vector import Vector
from kivy.clock import Clock
from random import randint
class PongBall(Widget):
velocity_x = NumericProperty(0)
velocity_y = NumericProperty(0)
velocity = ReferenceListProperty(velocity_x, velocity_y)
def move(self):
self.pos = Vector(*self.velocity) + self.pos
class PongGame(Widget):
ball = ObjectProperty(None)
def serve_ball(self):
self.ball.center = self.center
self.ball.velocity = Vector(4, 0).rotate(randint(0, 360))
def update(self, dt):
self.ball.move()
# bounce off top and bottom
if (self.ball.y < 0) or (self.ball.top > self.height):
self.ball.velocity_y *= -1
# bounce off left and right
if (self.ball.x < 0) or (self.ball.right > self.width):
self.ball.velocity_x *= -1
class PongApp(App):
def build(self):
game = PongGame()
game.serve_ball()
Clock.schedule_interval(game.update, 1.0 / 60.0)
return game
if __name__ == '__main__':
PongApp().run()
| mit |
windyuuy/opera | chromium/src/tools/gyp/test/msvs/list_excluded/gyptest-all.py | 347 | 1292 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that msvs_list_excluded_files=0 doesn't list files that would
normally be in _excluded_files, and that if that flag is not set, then they
are still listed.
"""
import os
import TestGyp
test = TestGyp.TestGyp(formats=['msvs'], workdir='workarea_all')
# with the flag set to 0
try:
os.environ['GYP_GENERATOR_FLAGS'] = 'msvs_list_excluded_files=0'
test.run_gyp('hello_exclude.gyp')
finally:
del os.environ['GYP_GENERATOR_FLAGS']
if test.uses_msbuild:
test.must_not_contain('hello.vcxproj', 'hello_mac')
else:
test.must_not_contain('hello.vcproj', 'hello_mac')
# with the flag not set
test.run_gyp('hello_exclude.gyp')
if test.uses_msbuild:
test.must_contain('hello.vcxproj', 'hello_mac')
else:
test.must_contain('hello.vcproj', 'hello_mac')
# with the flag explicitly set to 1
try:
os.environ['GYP_GENERATOR_FLAGS'] = 'msvs_list_excluded_files=1'
test.run_gyp('hello_exclude.gyp')
finally:
del os.environ['GYP_GENERATOR_FLAGS']
if test.uses_msbuild:
test.must_contain('hello.vcxproj', 'hello_mac')
else:
test.must_contain('hello.vcproj', 'hello_mac')
test.pass_test()
| bsd-3-clause |
Infixz/WeRoBot | werobot/crypto/__init__.py | 2 | 4009 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import base64
import socket
import struct
import time
try:
from Crypto.Cipher import AES
except ImportError:
raise RuntimeError("You need to install PyCrypto.")
from . import pkcs7
from .exceptions import UnvalidEncodingAESKey, AppIdValidationError, InvalidSignature
from werobot.utils import to_text, to_binary, generate_token, byte2int, get_signature
class PrpCrypto(object):
"""
提供接收和推送给公众平台消息的加解密接口
"""
def __init__(self, key):
self.cipher = AES.new(key, AES.MODE_CBC, key[:16])
def get_random_string(self):
"""
:return: 长度为16的随即字符串
"""
return generate_token(16)
def encrypt(self, text, app_id):
"""
对明文进行加密
:param text: 需要加密的明文
:param app_id: 微信公众平台的 AppID
:return: 加密后的字符串
"""
text = b"".join([
self.get_random_string(),
struct.pack(b"I", socket.htonl(len(text))),
to_binary(text),
to_binary(app_id)
])
text = pkcs7.encode(text)
ciphertext = to_binary(self.cipher.encrypt(text))
return base64.b64encode(ciphertext)
def decrypt(self, text, app_id):
"""
对密文进行解密
:param text: 需要解密的密文
:param app_id: 微信公众平台的 AppID
:return: 解密后的字符串
"""
text = to_binary(text)
plain_text = self.cipher.decrypt(base64.b64decode(text))
padding = byte2int(plain_text, -1)
content = plain_text[16:-padding]
xml_len = socket.ntohl(struct.unpack("I", content[:4])[0])
xml_content = content[4:xml_len+4]
from_appid = content[xml_len+4:]
if to_text(from_appid) != app_id:
raise AppIdValidationError(text, app_id)
return xml_content
class MessageCrypt(object):
ENCRYPTED_MESSAGE_XML = """
<xml>
<Encrypt><![CDATA[{encrypt}]]></Encrypt>
<MsgSignature><![CDATA[{signature}]]></MsgSignature>
<TimeStamp>{timestamp}</TimeStamp>
<Nonce><![CDATA[{nonce}]]></Nonce>
</xml>
""".strip()
def __init__(self, token, encoding_aes_key, app_id):
key = base64.b64decode(to_binary(encoding_aes_key + '='))
if len(key) != 32:
raise UnvalidEncodingAESKey(encoding_aes_key)
self.prp_crypto = PrpCrypto(key)
self.token = token
self.app_id = app_id
def decrypt_message(self, timestamp, nonce, msg_signature, encrypt_msg):
"""
解密收到的微信消息
:param timestamp: 请求 URL 中收到的 timestamp
:param nonce: 请求 URL 中收到的 nonce
:param msg_signature: 请求 URL 中收到的 msg_signature
:param encrypt_msg: 收到的加密文本. ( XML 中的 <Encrypt> 部分 )
:return: 解密后的 XML 文本
"""
signature = get_signature(self.token, timestamp, nonce, encrypt_msg)
if signature != msg_signature:
raise InvalidSignature(msg_signature)
return self.prp_crypto.decrypt(encrypt_msg, self.app_id)
def encrypt_message(self, reply, timestamp=None, nonce=None):
"""
加密微信回复
:param reply: 加密前的回复
:type reply: WeChatReply 或 XML 文本
:return: 加密后的回复文本
"""
if hasattr(reply, "render"):
reply = reply.render()
timestamp = timestamp or to_binary(int(time.time()))
nonce = nonce or generate_token(5)
encrypt = to_text(self.prp_crypto.encrypt(reply, self.app_id))
signature = get_signature(self.token, timestamp, nonce, encrypt)
return to_text(self.ENCRYPTED_MESSAGE_XML.format(
encrypt=encrypt,
signature=signature,
timestamp=timestamp,
nonce=nonce
))
| mit |
alsrgv/tensorflow | tensorflow/python/kernel_tests/matrix_solve_ls_op_test.py | 13 | 13496 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.math_ops.matrix_solve."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python import tf2
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import benchmark
from tensorflow.python.platform import test as test_lib
def _AddTest(test, op_name, testcase_name, fn):
test_name = "_".join(["test", op_name, testcase_name])
if hasattr(test, test_name):
raise RuntimeError("Test %s defined more than once" % test_name)
setattr(test, test_name, fn)
def _GenerateTestData(matrix_shape, num_rhs):
batch_shape = matrix_shape[:-2]
matrix_shape = matrix_shape[-2:]
m = matrix_shape[-2]
np.random.seed(1)
matrix = np.random.uniform(
low=-1.0, high=1.0,
size=np.prod(matrix_shape)).reshape(matrix_shape).astype(np.float32)
rhs = np.ones([m, num_rhs]).astype(np.float32)
matrix = variables.Variable(
np.tile(matrix, batch_shape + (1, 1)), trainable=False)
rhs = variables.Variable(np.tile(rhs, batch_shape + (1, 1)), trainable=False)
return matrix, rhs
def _SolveWithNumpy(matrix, rhs, l2_regularizer=0):
if l2_regularizer == 0:
np_ans, _, _, _ = np.linalg.lstsq(matrix, rhs)
return np_ans
else:
rows = matrix.shape[-2]
cols = matrix.shape[-1]
if rows >= cols:
preconditioner = l2_regularizer * np.identity(cols)
gramian = np.dot(np.conj(matrix.T), matrix) + preconditioner
rhs = np.dot(np.conj(matrix.T), rhs)
return np.linalg.solve(gramian, rhs)
else:
preconditioner = l2_regularizer * np.identity(rows)
gramian = np.dot(matrix, np.conj(matrix.T)) + preconditioner
z = np.linalg.solve(gramian, rhs)
return np.dot(np.conj(matrix.T), z)
class MatrixSolveLsOpTest(test_lib.TestCase):
def _verifySolve(self,
x,
y,
dtype,
use_placeholder,
fast,
l2_regularizer,
batch_shape=()):
if not fast and l2_regularizer != 0:
# The slow path does not support regularization.
return
maxdim = np.max(x.shape)
if dtype == np.float32 or dtype == np.complex64:
tol = maxdim * 5e-4
else:
tol = maxdim * 5e-7
a = x.astype(dtype)
b = y.astype(dtype)
if dtype in [np.complex64, np.complex128]:
a.imag = a.real
b.imag = b.real
# numpy.linalg.lstqr does not batching, so we just solve a single system
# and replicate the solution. and residual norm.
np_ans = _SolveWithNumpy(x, y, l2_regularizer=l2_regularizer)
np_r = np.dot(np.conj(a.T), b - np.dot(a, np_ans))
np_r_norm = np.sqrt(np.sum(np.conj(np_r) * np_r))
if batch_shape is not ():
a = np.tile(a, batch_shape + (1, 1))
b = np.tile(b, batch_shape + (1, 1))
np_ans = np.tile(np_ans, batch_shape + (1, 1))
np_r_norm = np.tile(np_r_norm, batch_shape)
with self.cached_session(use_gpu=fast) as sess:
if use_placeholder:
a_ph = array_ops.placeholder(dtypes.as_dtype(dtype))
b_ph = array_ops.placeholder(dtypes.as_dtype(dtype))
feed_dict = {a_ph: a, b_ph: b}
tf_ans = linalg_ops.matrix_solve_ls(
a_ph, b_ph, fast=fast, l2_regularizer=l2_regularizer)
else:
tf_ans = linalg_ops.matrix_solve_ls(
a, b, fast=fast, l2_regularizer=l2_regularizer)
feed_dict = {}
self.assertEqual(np_ans.shape, tf_ans.get_shape())
if l2_regularizer == 0:
# The least squares solution should satisfy A^H * (b - A*x) = 0.
tf_r = b - math_ops.matmul(a, tf_ans)
tf_r = math_ops.matmul(a, tf_r, adjoint_a=True)
tf_r_norm = linalg_ops.norm(tf_r, ord="fro", axis=[-2, -1])
tf_ans_val, tf_r_norm_val = sess.run(
[tf_ans, tf_r_norm], feed_dict=feed_dict)
self.assertAllClose(np_r_norm, tf_r_norm_val, atol=tol, rtol=tol)
else:
tf_ans_val = sess.run(tf_ans, feed_dict=feed_dict)
self.assertEqual(np_ans.shape, tf_ans_val.shape)
self.assertAllClose(np_ans, tf_ans_val, atol=2 * tol, rtol=2 * tol)
@test_util.run_v1_only("b/120545219")
def testWrongDimensions(self):
# The matrix and right-hand sides should have the same number of rows.
with self.session(use_gpu=True):
matrix = constant_op.constant([[1., 0.], [0., 1.]])
rhs = constant_op.constant([[1., 0.]])
with self.assertRaises(ValueError):
linalg_ops.matrix_solve_ls(matrix, rhs)
def testEmpty(self):
full = np.array([[1., 2.], [3., 4.], [5., 6.]])
empty0 = np.empty([3, 0])
empty1 = np.empty([0, 2])
for fast in [True, False]:
with self.cached_session(use_gpu=True):
tf_ans = self.evaluate(
linalg_ops.matrix_solve_ls(empty0, empty0, fast=fast))
self.assertEqual(tf_ans.shape, (0, 0))
tf_ans = self.evaluate(
linalg_ops.matrix_solve_ls(empty0, full, fast=fast))
self.assertEqual(tf_ans.shape, (0, 2))
tf_ans = self.evaluate(
linalg_ops.matrix_solve_ls(full, empty0, fast=fast))
self.assertEqual(tf_ans.shape, (2, 0))
tf_ans = self.evaluate(
linalg_ops.matrix_solve_ls(empty1, empty1, fast=fast))
self.assertEqual(tf_ans.shape, (2, 2))
@test_util.run_v1_only("b/120545219")
def testBatchResultSize(self):
# 3x3x3 matrices, 3x3x1 right-hand sides.
matrix = np.array([1., 2., 3., 4., 5., 6., 7., 8., 9.] * 3).reshape(3, 3, 3)
rhs = np.array([1., 2., 3.] * 3).reshape(3, 3, 1)
answer = linalg_ops.matrix_solve(matrix, rhs)
ls_answer = linalg_ops.matrix_solve_ls(matrix, rhs)
self.assertEqual(ls_answer.get_shape(), [3, 3, 1])
self.assertEqual(answer.get_shape(), [3, 3, 1])
def _GetSmallMatrixSolveLsOpTests(dtype, use_placeholder, fast, l2_regularizer):
def Square(self):
# 2x2 matrices, 2x3 right-hand sides.
matrix = np.array([[1., 2.], [3., 4.]])
rhs = np.array([[1., 0., 1.], [0., 1., 1.]])
for batch_shape in (), (2, 3):
self._verifySolve(
matrix,
rhs,
dtype,
use_placeholder,
fast,
l2_regularizer,
batch_shape=batch_shape)
def Overdetermined(self):
# 2x2 matrices, 2x3 right-hand sides.
matrix = np.array([[1., 2.], [3., 4.], [5., 6.]])
rhs = np.array([[1., 0., 1.], [0., 1., 1.], [1., 1., 0.]])
for batch_shape in (), (2, 3):
self._verifySolve(
matrix,
rhs,
dtype,
use_placeholder,
fast,
l2_regularizer,
batch_shape=batch_shape)
def Underdetermined(self):
# 2x2 matrices, 2x3 right-hand sides.
matrix = np.array([[1., 2., 3], [4., 5., 6.]])
rhs = np.array([[1., 0., 1.], [0., 1., 1.]])
for batch_shape in (), (2, 3):
self._verifySolve(
matrix,
rhs,
dtype,
use_placeholder,
fast,
l2_regularizer,
batch_shape=batch_shape)
return (Square, Overdetermined, Underdetermined)
def _GetLargeMatrixSolveLsOpTests(dtype, use_placeholder, fast, l2_regularizer):
def LargeBatchSquare(self):
np.random.seed(1)
num_rhs = 1
matrix_shape = (127, 127)
matrix = np.random.uniform(
low=-1.0, high=1.0,
size=np.prod(matrix_shape)).reshape(matrix_shape).astype(np.float32)
rhs = np.ones([matrix_shape[0], num_rhs]).astype(np.float32)
self._verifySolve(
matrix,
rhs,
dtype,
use_placeholder,
fast,
l2_regularizer,
batch_shape=(16, 8))
def LargeBatchOverdetermined(self):
np.random.seed(1)
num_rhs = 1
matrix_shape = (127, 64)
matrix = np.random.uniform(
low=-1.0, high=1.0,
size=np.prod(matrix_shape)).reshape(matrix_shape).astype(np.float32)
rhs = np.ones([matrix_shape[0], num_rhs]).astype(np.float32)
self._verifySolve(
matrix,
rhs,
dtype,
use_placeholder,
fast,
l2_regularizer,
batch_shape=(16, 8))
def LargeBatchUnderdetermined(self):
np.random.seed(1)
num_rhs = 1
matrix_shape = (64, 127)
matrix = np.random.uniform(
low=-1.0, high=1.0,
size=np.prod(matrix_shape)).reshape(matrix_shape).astype(np.float32)
rhs = np.ones([matrix_shape[0], num_rhs]).astype(np.float32)
self._verifySolve(
matrix,
rhs,
dtype,
use_placeholder,
fast,
l2_regularizer,
batch_shape=(16, 8))
return (LargeBatchSquare, LargeBatchOverdetermined, LargeBatchUnderdetermined)
class MatrixSolveLsBenchmark(test_lib.Benchmark):
matrix_shapes = [
(4, 4),
(8, 4),
(4, 8),
(10, 10),
(10, 8),
(8, 10),
(16, 16),
(16, 10),
(10, 16),
(101, 101),
(101, 31),
(31, 101),
(256, 256),
(256, 200),
(200, 256),
(1001, 1001),
(1001, 501),
(501, 1001),
(1024, 1024),
(1024, 128),
(128, 1024),
(2048, 2048),
(2048, 64),
(64, 2048),
(513, 4, 4),
(513, 4, 2),
(513, 2, 4),
(513, 16, 16),
(513, 16, 10),
(513, 10, 16),
(513, 256, 256),
(513, 256, 128),
(513, 128, 256),
]
def benchmarkMatrixSolveLsOp(self):
run_gpu_test = test_lib.is_gpu_available(True)
regularizer = 1.0
for matrix_shape in self.matrix_shapes:
for num_rhs in 1, 2, matrix_shape[-1]:
with ops.Graph().as_default(), \
session.Session(config=benchmark.benchmark_config()) as sess, \
ops.device("/cpu:0"):
matrix, rhs = _GenerateTestData(matrix_shape, num_rhs)
x = linalg_ops.matrix_solve_ls(matrix, rhs, regularizer)
variables.global_variables_initializer().run()
self.run_op_benchmark(
sess,
control_flow_ops.group(x),
min_iters=25,
store_memory_usage=False,
name=("matrix_solve_ls_cpu_shape_{matrix_shape}_num_rhs_{num_rhs}"
).format(matrix_shape=matrix_shape, num_rhs=num_rhs))
if run_gpu_test and (len(matrix_shape) < 3 or matrix_shape[0] < 513):
with ops.Graph().as_default(), \
session.Session(config=benchmark.benchmark_config()) as sess, \
ops.device("/gpu:0"):
matrix, rhs = _GenerateTestData(matrix_shape, num_rhs)
x = linalg_ops.matrix_solve_ls(matrix, rhs, regularizer)
variables.global_variables_initializer().run()
self.run_op_benchmark(
sess,
control_flow_ops.group(x),
min_iters=25,
store_memory_usage=False,
name=("matrix_solve_ls_gpu_shape_{matrix_shape}_num_rhs_"
"{num_rhs}").format(
matrix_shape=matrix_shape, num_rhs=num_rhs))
if __name__ == "__main__":
for dtype_ in [np.float32, np.float64, np.complex64, np.complex128]:
# TF2 does not support placeholders under eager so we skip it
for use_placeholder_ in set([False, not tf2.enabled()]):
for fast_ in [True, False]:
l2_regularizers = [0] if dtype_ == np.complex128 else [0, 0.1]
for l2_regularizer_ in l2_regularizers:
for test_case in _GetSmallMatrixSolveLsOpTests(
dtype_, use_placeholder_, fast_, l2_regularizer_):
name = "%s_%s_placeholder_%s_fast_%s_regu_%s" % (test_case.__name__,
dtype_.__name__,
use_placeholder_,
fast_,
l2_regularizer_)
_AddTest(MatrixSolveLsOpTest, "MatrixSolveLsOpTest", name,
test_case)
for dtype_ in [np.float32, np.float64, np.complex64, np.complex128]:
for test_case in _GetLargeMatrixSolveLsOpTests(dtype_, False, True, 0.0):
name = "%s_%s" % (test_case.__name__, dtype_.__name__)
_AddTest(MatrixSolveLsOpTest, "MatrixSolveLsOpTest", name, test_case)
test_lib.main()
| apache-2.0 |
s-tar/project_kate | modules/contacts/admin.py | 1 | 2234 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'mr.S'
from kernel.module import Module
from entities.s_contacts import Contacts
from bottle import jinja2_view as view, request
module = Module(name="contacts", route="/admin/contacts", title="Контакты", is_admin=True)
@module.route('')
@view('admin/contacts/main')
def main():
contacts = request.db(Contacts).list_all_by_type()
return {'contacts': contacts}
@module.post('/save')
def save():
if not request.user.role('admin'):
return False
data = request.forms
addresses_ids = data.getall('address_id')
addresses = data.getall('address')
emails_ids = data.getall('email_id')
emails = data.getall('email')
phones_ids = data.getall('phone_id')
phones_nums = data.getall('phone_num')
phones_description = data.getall('phone_description')
feedback_ids = data.getall('feedback_id')
feedback_email = data.getall('feedback_email')
map_ids = data.getall('map_id')
map = data.getall('map')
sn_names = data.getall('sn.name')
sn_ids = data.getall('sn.id')
sn_links = data.getall('sn.link')
save_contacts(addresses_ids, addresses, 'address')
save_contacts(emails_ids, emails, 'email')
phones = []
for a, b in zip(phones_nums, phones_description):
phones.append(a + '|' + b if a != '<delete>' else '<delete>')
save_contacts(phones_ids, phones, 'phone')
save_contacts(feedback_ids, feedback_email, 'feedback')
save_contacts(map_ids, map, 'map')
for i, name in enumerate(sn_names):
save_contacts([sn_ids[i]], [sn_links[i]], name)
request.db.commit()
return
def save_contacts(ids, values, type):
if not request.user.role('admin'):
return False
for i, id in enumerate(ids):
if id != '':
contact = request.db(Contacts).get_by_id(id)
else:
contact = Contacts()
contact.cnt_type = type
contact.cnt_value = values[i]
if contact.cnt_value == '<delete>':
if contact.cnt_id is not None: request.db.delete(contact)
else:
request.db.add(contact) | mit |
talnoah/N5-kernel | tools/perf/scripts/python/syscall-counts.py | 11181 | 1522 | # system call counts
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import syscall_name
usage = "perf script -s syscall-counts.py [comm]\n";
for_comm = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals():
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
print "%-40s %10d\n" % (syscall_name(id), val),
| gpl-2.0 |
mkieszek/odoo | addons/sale_stock/report/sale_report.py | 3 | 1213 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from openerp.osv import fields, osv
from openerp import tools
class sale_report(osv.osv):
_inherit = "sale.report"
_columns = {
'shipped': fields.boolean('Shipped', readonly=True),
'shipped_qty_1': fields.integer('# of Shipped Lines', readonly=True),
'warehouse_id': fields.many2one('stock.warehouse', 'Warehouse',readonly=True),
'state': fields.selection([
('draft', 'Draft Quotation'),
('sent', 'Quotation Sent'),
('waiting_date', 'Waiting Schedule'),
('manual', 'Sale to Invoice'),
('progress', 'Sale Order'),
('shipping_except', 'Shipping Exception'),
('invoice_except', 'Invoice Exception'),
('done', 'Done'),
('cancel', 'Cancelled')
], 'Order Status', readonly=True),
}
def _select(self):
return super(sale_report, self)._select() + ", s.warehouse_id as warehouse_id, s.shipped, s.shipped::integer as shipped_qty_1"
def _group_by(self):
return super(sale_report, self)._group_by() + ", s.warehouse_id, s.shipped"
| agpl-3.0 |
rust-lang/sublime-rust | tests/test_context.py | 3 | 6809 | """Tests for the context commands."""
from rust_test_common import *
class TestContext(TestBase):
def test_pt_to_test_name(self):
self._with_open_file('tests/multi-targets/tests/test_context.rs',
self._test_pt_to_test_name)
def _test_pt_to_test_name(self, view):
expected = [
('test1', (3, 1), (7, 1)),
('expected_panic1', (8, 1), (15, 1)),
('test2', (16, 1), (22, 1)),
('test3', (22, 2), (26, 1)),
('test6', (36, 1), (39, 1)),
]
for fn_name, (start_row, start_col), (end_row, end_col) in expected:
start_pt = view.text_point(start_row - 1, start_col - 1)
end_pt = view.text_point(end_row - 1, end_col - 1)
for pt in range(start_pt, end_pt):
name = plugin.cargo_build._pt_to_test_name('test', pt, view)
self.assertEqual(name, fn_name,
'rowcol=%r' % (view.rowcol(pt),))
def test_cargo_test_here(self):
self._with_open_file('tests/multi-targets/tests/test_context.rs',
self._test_cargo_test_here)
def _test_cargo_test_here(self, view):
pt = view.text_point(4, 0)
x, y = view.text_to_window(pt)
view.window().run_command('cargo_test_here', args={
'event': {'x': x, 'y': y}
})
self._get_rust_thread().join()
output = self._get_build_output(view.window())
self.assertRegex(output,
r'\[Running: cargo test --test test_context --message-format=json -- --exact test1\]')
def test_cargo_test_at_cursor(self):
self._with_open_file('tests/multi-targets/tests/test_context.rs',
self._test_cargo_test_at_cursor)
def _test_cargo_test_at_cursor(self, view):
pt = view.text_point(12, 0)
sel = view.sel()
sel.clear()
sel.add(sublime.Region(pt))
view.run_command('cargo_test_at_cursor')
self._get_rust_thread().join()
output = self._get_build_output(view.window())
self.assertRegex(output,
r'\[Running: cargo test --test test_context --message-format=json -- --exact expected_panic1\]')
def test_cargo_test_current_file(self):
self._with_open_file('tests/multi-targets/tests/test_context.rs',
self._test_cargo_test_current_file)
def _test_cargo_test_current_file(self, view):
view.window().run_command('cargo_test_current_file')
self._get_rust_thread().join()
output = self._get_build_output(view.window())
self.assertRegex(output,
r'\[Running: cargo test --test test_context --message-format=json\]')
def test_cargo_bench_here(self):
self._with_open_file('tests/multi-targets/benches/bench_context.rs',
self._test_cargo_bench_here)
def _test_cargo_bench_here(self, view):
pt = view.text_point(15, 0)
x, y = view.text_to_window(pt)
view.window().run_command('cargo_bench_here', args={
'event': {'x': x, 'y': y}
})
self._get_rust_thread().join()
output = self._get_build_output(view.window())
self.assertRegex(output,
r'\[Running: cargo bench --bench bench_context --message-format=json -- --exact bench2\]')
def test_cargo_bench_at_cursor(self):
self._with_open_file('tests/multi-targets/benches/bench_context.rs',
self._test_cargo_bench_at_cursor)
def _test_cargo_bench_at_cursor(self, view):
pt = view.text_point(15, 0)
sel = view.sel()
sel.clear()
sel.add(sublime.Region(pt))
view.run_command('cargo_bench_at_cursor')
self._get_rust_thread().join()
output = self._get_build_output(view.window())
self.assertRegex(output,
r'\[Running: cargo bench --bench bench_context --message-format=json -- --exact bench2\]')
def test_cargo_bench_current_file(self):
self._with_open_file('tests/multi-targets/benches/bench_context.rs',
self._test_cargo_bench_current_file)
def _test_cargo_bench_current_file(self, view):
view.window().run_command('cargo_bench_current_file')
self._get_rust_thread().join()
output = self._get_build_output(view.window())
self.assertRegex(output,
r'\[Running: cargo bench --bench bench_context --message-format=json\]')
def test_cargo_run_current_file(self):
self._with_open_file('tests/multi-targets/examples/ex1.rs',
self._test_cargo_run_current_file)
def _test_cargo_run_current_file(self, view):
view.window().run_command('cargo_run_current_file')
self._get_rust_thread().join()
output = self._get_build_output(view.window())
self.assertRegex(output,
r'\[Running: cargo run --example ex1 --message-format=json\]')
def test_rust_list_messages(self):
self._with_open_file('tests/message-order/examples/ex_warning1.rs',
self._test_rust_list_messages)
def _test_rust_list_messages(self, view):
window = view.window()
self._cargo_clean(view)
window.run_command('cargo_exec', args={'command': 'auto'})
self._get_rust_thread().join()
sqp = window.__class__.show_quick_panel
window.__class__.show_quick_panel = self._quick_panel
try:
self._test_rust_list_messages2(view)
finally:
window.__class__.show_quick_panel = sqp
def _quick_panel(self, items, on_done, flags=0,
selected_index=-1, on_highlighted=None):
self.assertEqual(items, self.quick_panel_items)
on_done(self.quick_panel_index)
def _test_rust_list_messages2(self, view):
window = view.window()
self.quick_panel_items = [
['function is never used: `unused_a`',
os.path.join('tests', 'message-order', 'examples', 'warning1.rs') + ':1'],
['function is never used: `unused_b`',
os.path.join('tests', 'message-order', 'examples', 'warning1.rs') + ':5'],
['function is never used: `unused_in_2`',
os.path.join('tests', 'message-order', 'examples', 'warning2.rs') + ':82'],
]
self.quick_panel_index = 2
window.run_command('rust_list_messages')
expected_path = os.path.normpath(
os.path.join(plugin_path, 'tests/message-order/examples/warning2.rs'))
# Give Sublime some time to switch views.
for n in range(5):
new_view = window.active_view()
if new_view.file_name() == expected_path:
break
time.sleep(0.5)
else:
self.assertEqual(new_view.file_name(), expected_path)
new_view.run_command('close_file')
| mit |
kevinlee12/oppia | scripts/linters/test_files/invalid_datetime_now.py | 2 | 1420 | # coding: utf-8
#
# Copyright 2020 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Python file with invalid syntax, used by scripts/linters/
python_linter_test. This file use datetime.datetime.now() which is not allowed.
"""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import datetime
import python_utils
class FakeClass(python_utils.OBJECT):
"""This is a fake docstring for invalid syntax purposes."""
def __init__(self, fake_arg):
self.fake_arg = fake_arg
def fake_method(self):
"""This doesn't do anything.
Yields:
yields(datetime.time). Yields current time.
"""
# Function datetime.datetime.now() is not allowed to use.
curr_time = datetime.datetime.now()
yield curr_time
| apache-2.0 |
ClovisIRex/Snake-django | env/lib/python3.6/site-packages/rest_framework/relations.py | 5 | 19367 | # coding: utf-8
from __future__ import unicode_literals
from collections import OrderedDict
from django.core.exceptions import ImproperlyConfigured, ObjectDoesNotExist
from django.db.models import Manager
from django.db.models.query import QuerySet
from django.utils import six
from django.utils.encoding import (
python_2_unicode_compatible, smart_text, uri_to_iri
)
from django.utils.six.moves.urllib import parse as urlparse
from django.utils.translation import ugettext_lazy as _
from rest_framework.compat import (
NoReverseMatch, Resolver404, get_script_prefix, resolve
)
from rest_framework.fields import (
Field, empty, get_attribute, is_simple_callable, iter_options
)
from rest_framework.reverse import reverse
from rest_framework.settings import api_settings
from rest_framework.utils import html
def method_overridden(method_name, klass, instance):
"""
Determine if a method has been overridden.
"""
method = getattr(klass, method_name)
default_method = getattr(method, '__func__', method) # Python 3 compat
return default_method is not getattr(instance, method_name).__func__
class Hyperlink(six.text_type):
"""
A string like object that additionally has an associated name.
We use this for hyperlinked URLs that may render as a named link
in some contexts, or render as a plain URL in others.
"""
def __new__(self, url, obj):
ret = six.text_type.__new__(self, url)
ret.obj = obj
return ret
def __getnewargs__(self):
return(str(self), self.name,)
@property
def name(self):
# This ensures that we only called `__str__` lazily,
# as in some cases calling __str__ on a model instances *might*
# involve a database lookup.
return six.text_type(self.obj)
is_hyperlink = True
@python_2_unicode_compatible
class PKOnlyObject(object):
"""
This is a mock object, used for when we only need the pk of the object
instance, but still want to return an object with a .pk attribute,
in order to keep the same interface as a regular model instance.
"""
def __init__(self, pk):
self.pk = pk
def __str__(self):
return "%s" % self.pk
# We assume that 'validators' are intended for the child serializer,
# rather than the parent serializer.
MANY_RELATION_KWARGS = (
'read_only', 'write_only', 'required', 'default', 'initial', 'source',
'label', 'help_text', 'style', 'error_messages', 'allow_empty'
)
class RelatedField(Field):
queryset = None
html_cutoff = None
html_cutoff_text = None
def __init__(self, **kwargs):
self.queryset = kwargs.pop('queryset', self.queryset)
self.html_cutoff = kwargs.pop(
'html_cutoff',
self.html_cutoff or int(api_settings.HTML_SELECT_CUTOFF)
)
self.html_cutoff_text = kwargs.pop(
'html_cutoff_text',
self.html_cutoff_text or _(api_settings.HTML_SELECT_CUTOFF_TEXT)
)
if not method_overridden('get_queryset', RelatedField, self):
assert self.queryset is not None or kwargs.get('read_only', None), (
'Relational field must provide a `queryset` argument, '
'override `get_queryset`, or set read_only=`True`.'
)
assert not (self.queryset is not None and kwargs.get('read_only', None)), (
'Relational fields should not provide a `queryset` argument, '
'when setting read_only=`True`.'
)
kwargs.pop('many', None)
kwargs.pop('allow_empty', None)
super(RelatedField, self).__init__(**kwargs)
def __new__(cls, *args, **kwargs):
# We override this method in order to automagically create
# `ManyRelatedField` classes instead when `many=True` is set.
if kwargs.pop('many', False):
return cls.many_init(*args, **kwargs)
return super(RelatedField, cls).__new__(cls, *args, **kwargs)
@classmethod
def many_init(cls, *args, **kwargs):
"""
This method handles creating a parent `ManyRelatedField` instance
when the `many=True` keyword argument is passed.
Typically you won't need to override this method.
Note that we're over-cautious in passing most arguments to both parent
and child classes in order to try to cover the general case. If you're
overriding this method you'll probably want something much simpler, eg:
@classmethod
def many_init(cls, *args, **kwargs):
kwargs['child'] = cls()
return CustomManyRelatedField(*args, **kwargs)
"""
list_kwargs = {'child_relation': cls(*args, **kwargs)}
for key in kwargs.keys():
if key in MANY_RELATION_KWARGS:
list_kwargs[key] = kwargs[key]
return ManyRelatedField(**list_kwargs)
def run_validation(self, data=empty):
# We force empty strings to None values for relational fields.
if data == '':
data = None
return super(RelatedField, self).run_validation(data)
def get_queryset(self):
queryset = self.queryset
if isinstance(queryset, (QuerySet, Manager)):
# Ensure queryset is re-evaluated whenever used.
# Note that actually a `Manager` class may also be used as the
# queryset argument. This occurs on ModelSerializer fields,
# as it allows us to generate a more expressive 'repr' output
# for the field.
# Eg: 'MyRelationship(queryset=ExampleModel.objects.all())'
queryset = queryset.all()
return queryset
def use_pk_only_optimization(self):
return False
def get_attribute(self, instance):
if self.use_pk_only_optimization() and self.source_attrs:
# Optimized case, return a mock object only containing the pk attribute.
try:
instance = get_attribute(instance, self.source_attrs[:-1])
value = instance.serializable_value(self.source_attrs[-1])
if is_simple_callable(value):
# Handle edge case where the relationship `source` argument
# points to a `get_relationship()` method on the model
value = value().pk
return PKOnlyObject(pk=value)
except AttributeError:
pass
# Standard case, return the object instance.
return get_attribute(instance, self.source_attrs)
def get_choices(self, cutoff=None):
queryset = self.get_queryset()
if queryset is None:
# Ensure that field.choices returns something sensible
# even when accessed with a read-only field.
return {}
if cutoff is not None:
queryset = queryset[:cutoff]
return OrderedDict([
(
self.to_representation(item),
self.display_value(item)
)
for item in queryset
])
@property
def choices(self):
return self.get_choices()
@property
def grouped_choices(self):
return self.choices
def iter_options(self):
return iter_options(
self.get_choices(cutoff=self.html_cutoff),
cutoff=self.html_cutoff,
cutoff_text=self.html_cutoff_text
)
def display_value(self, instance):
return six.text_type(instance)
class StringRelatedField(RelatedField):
"""
A read only field that represents its targets using their
plain string representation.
"""
def __init__(self, **kwargs):
kwargs['read_only'] = True
super(StringRelatedField, self).__init__(**kwargs)
def to_representation(self, value):
return six.text_type(value)
class PrimaryKeyRelatedField(RelatedField):
default_error_messages = {
'required': _('This field is required.'),
'does_not_exist': _('Invalid pk "{pk_value}" - object does not exist.'),
'incorrect_type': _('Incorrect type. Expected pk value, received {data_type}.'),
}
def __init__(self, **kwargs):
self.pk_field = kwargs.pop('pk_field', None)
super(PrimaryKeyRelatedField, self).__init__(**kwargs)
def use_pk_only_optimization(self):
return True
def to_internal_value(self, data):
if self.pk_field is not None:
data = self.pk_field.to_internal_value(data)
try:
return self.get_queryset().get(pk=data)
except ObjectDoesNotExist:
self.fail('does_not_exist', pk_value=data)
except (TypeError, ValueError):
self.fail('incorrect_type', data_type=type(data).__name__)
def to_representation(self, value):
if self.pk_field is not None:
return self.pk_field.to_representation(value.pk)
return value.pk
class HyperlinkedRelatedField(RelatedField):
lookup_field = 'pk'
view_name = None
default_error_messages = {
'required': _('This field is required.'),
'no_match': _('Invalid hyperlink - No URL match.'),
'incorrect_match': _('Invalid hyperlink - Incorrect URL match.'),
'does_not_exist': _('Invalid hyperlink - Object does not exist.'),
'incorrect_type': _('Incorrect type. Expected URL string, received {data_type}.'),
}
def __init__(self, view_name=None, **kwargs):
if view_name is not None:
self.view_name = view_name
assert self.view_name is not None, 'The `view_name` argument is required.'
self.lookup_field = kwargs.pop('lookup_field', self.lookup_field)
self.lookup_url_kwarg = kwargs.pop('lookup_url_kwarg', self.lookup_field)
self.format = kwargs.pop('format', None)
# We include this simply for dependency injection in tests.
# We can't add it as a class attributes or it would expect an
# implicit `self` argument to be passed.
self.reverse = reverse
super(HyperlinkedRelatedField, self).__init__(**kwargs)
def use_pk_only_optimization(self):
return self.lookup_field == 'pk'
def get_object(self, view_name, view_args, view_kwargs):
"""
Return the object corresponding to a matched URL.
Takes the matched URL conf arguments, and should return an
object instance, or raise an `ObjectDoesNotExist` exception.
"""
lookup_value = view_kwargs[self.lookup_url_kwarg]
lookup_kwargs = {self.lookup_field: lookup_value}
return self.get_queryset().get(**lookup_kwargs)
def get_url(self, obj, view_name, request, format):
"""
Given an object, return the URL that hyperlinks to the object.
May raise a `NoReverseMatch` if the `view_name` and `lookup_field`
attributes are not configured to correctly match the URL conf.
"""
# Unsaved objects will not yet have a valid URL.
if hasattr(obj, 'pk') and obj.pk in (None, ''):
return None
lookup_value = getattr(obj, self.lookup_field)
kwargs = {self.lookup_url_kwarg: lookup_value}
return self.reverse(view_name, kwargs=kwargs, request=request, format=format)
def to_internal_value(self, data):
request = self.context.get('request', None)
try:
http_prefix = data.startswith(('http:', 'https:'))
except AttributeError:
self.fail('incorrect_type', data_type=type(data).__name__)
if http_prefix:
# If needed convert absolute URLs to relative path
data = urlparse.urlparse(data).path
prefix = get_script_prefix()
if data.startswith(prefix):
data = '/' + data[len(prefix):]
data = uri_to_iri(data)
try:
match = resolve(data)
except Resolver404:
self.fail('no_match')
try:
expected_viewname = request.versioning_scheme.get_versioned_viewname(
self.view_name, request
)
except AttributeError:
expected_viewname = self.view_name
if match.view_name != expected_viewname:
self.fail('incorrect_match')
try:
return self.get_object(match.view_name, match.args, match.kwargs)
except (ObjectDoesNotExist, TypeError, ValueError):
self.fail('does_not_exist')
def to_representation(self, value):
assert 'request' in self.context, (
"`%s` requires the request in the serializer"
" context. Add `context={'request': request}` when instantiating "
"the serializer." % self.__class__.__name__
)
request = self.context['request']
format = self.context.get('format', None)
# By default use whatever format is given for the current context
# unless the target is a different type to the source.
#
# Eg. Consider a HyperlinkedIdentityField pointing from a json
# representation to an html property of that representation...
#
# '/snippets/1/' should link to '/snippets/1/highlight/'
# ...but...
# '/snippets/1/.json' should link to '/snippets/1/highlight/.html'
if format and self.format and self.format != format:
format = self.format
# Return the hyperlink, or error if incorrectly configured.
try:
url = self.get_url(value, self.view_name, request, format)
except NoReverseMatch:
msg = (
'Could not resolve URL for hyperlinked relationship using '
'view name "%s". You may have failed to include the related '
'model in your API, or incorrectly configured the '
'`lookup_field` attribute on this field.'
)
if value in ('', None):
value_string = {'': 'the empty string', None: 'None'}[value]
msg += (
" WARNING: The value of the field on the model instance "
"was %s, which may be why it didn't match any "
"entries in your URL conf." % value_string
)
raise ImproperlyConfigured(msg % self.view_name)
if url is None:
return None
return Hyperlink(url, value)
class HyperlinkedIdentityField(HyperlinkedRelatedField):
"""
A read-only field that represents the identity URL for an object, itself.
This is in contrast to `HyperlinkedRelatedField` which represents the
URL of relationships to other objects.
"""
def __init__(self, view_name=None, **kwargs):
assert view_name is not None, 'The `view_name` argument is required.'
kwargs['read_only'] = True
kwargs['source'] = '*'
super(HyperlinkedIdentityField, self).__init__(view_name, **kwargs)
def use_pk_only_optimization(self):
# We have the complete object instance already. We don't need
# to run the 'only get the pk for this relationship' code.
return False
class SlugRelatedField(RelatedField):
"""
A read-write field that represents the target of the relationship
by a unique 'slug' attribute.
"""
default_error_messages = {
'does_not_exist': _('Object with {slug_name}={value} does not exist.'),
'invalid': _('Invalid value.'),
}
def __init__(self, slug_field=None, **kwargs):
assert slug_field is not None, 'The `slug_field` argument is required.'
self.slug_field = slug_field
super(SlugRelatedField, self).__init__(**kwargs)
def to_internal_value(self, data):
try:
return self.get_queryset().get(**{self.slug_field: data})
except ObjectDoesNotExist:
self.fail('does_not_exist', slug_name=self.slug_field, value=smart_text(data))
except (TypeError, ValueError):
self.fail('invalid')
def to_representation(self, obj):
return getattr(obj, self.slug_field)
class ManyRelatedField(Field):
"""
Relationships with `many=True` transparently get coerced into instead being
a ManyRelatedField with a child relationship.
The `ManyRelatedField` class is responsible for handling iterating through
the values and passing each one to the child relationship.
This class is treated as private API.
You shouldn't generally need to be using this class directly yourself,
and should instead simply set 'many=True' on the relationship.
"""
initial = []
default_empty_html = []
default_error_messages = {
'not_a_list': _('Expected a list of items but got type "{input_type}".'),
'empty': _('This list may not be empty.')
}
html_cutoff = None
html_cutoff_text = None
def __init__(self, child_relation=None, *args, **kwargs):
self.child_relation = child_relation
self.allow_empty = kwargs.pop('allow_empty', True)
self.html_cutoff = kwargs.pop(
'html_cutoff',
self.html_cutoff or int(api_settings.HTML_SELECT_CUTOFF)
)
self.html_cutoff_text = kwargs.pop(
'html_cutoff_text',
self.html_cutoff_text or _(api_settings.HTML_SELECT_CUTOFF_TEXT)
)
assert child_relation is not None, '`child_relation` is a required argument.'
super(ManyRelatedField, self).__init__(*args, **kwargs)
self.child_relation.bind(field_name='', parent=self)
def get_value(self, dictionary):
# We override the default field access in order to support
# lists in HTML forms.
if html.is_html_input(dictionary):
# Don't return [] if the update is partial
if self.field_name not in dictionary:
if getattr(self.root, 'partial', False):
return empty
return dictionary.getlist(self.field_name)
return dictionary.get(self.field_name, empty)
def to_internal_value(self, data):
if isinstance(data, type('')) or not hasattr(data, '__iter__'):
self.fail('not_a_list', input_type=type(data).__name__)
if not self.allow_empty and len(data) == 0:
self.fail('empty')
return [
self.child_relation.to_internal_value(item)
for item in data
]
def get_attribute(self, instance):
# Can't have any relationships if not created
if hasattr(instance, 'pk') and instance.pk is None:
return []
relationship = get_attribute(instance, self.source_attrs)
return relationship.all() if hasattr(relationship, 'all') else relationship
def to_representation(self, iterable):
return [
self.child_relation.to_representation(value)
for value in iterable
]
def get_choices(self, cutoff=None):
return self.child_relation.get_choices(cutoff)
@property
def choices(self):
return self.get_choices()
@property
def grouped_choices(self):
return self.choices
def iter_options(self):
return iter_options(
self.get_choices(cutoff=self.html_cutoff),
cutoff=self.html_cutoff,
cutoff_text=self.html_cutoff_text
)
| mit |
jurcicek/extended-hidden-vector-state-parser | semantics-4/src/dxml2txt.py | 1 | 1204 | #!/usr/bin/env python2.4
from svc.scripting import *
import sys
import codecs
from xml.dom.minidom import parse
class DXML2TXT(Script):
options = {
'infile': String,
'outfile': String,
}
def transformFile(self, fr, fw):
dom = parse(fr)
for element in dom.getElementsByTagName("text"):
if element.getAttribute('type') == 'normalized':
ret = []
for node in element.childNodes:
if node.nodeType == node.TEXT_NODE:
ret.append(node.data.strip())
print >> fw, ' '.join(ret)
def main(self, infile=None, outfile=None):
if infile == '-':
infile = None
if outfile == '-':
outfile = None
if infile is None:
infile = sys.stdin
else:
infile = file(infile, 'r')
if outfile is None:
outfile = codecs.getwriter('utf-8')(sys.stdout)
else:
outfile = codecs.open(outfile, 'w', 'utf-8')
self.transformFile(infile, outfile)
infile.close()
outfile.close()
if __name__ == '__main__':
s = DXML2TXT()
s.run()
| gpl-2.0 |
renyi533/tensorflow | tensorflow/python/data/kernel_tests/prefetch_test.py | 11 | 2998 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.Dataset.prefetch()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
from absl.testing import parameterized
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import combinations
from tensorflow.python.framework import errors
from tensorflow.python.platform import test
class PrefetchTest(test_base.DatasetTestBase, parameterized.TestCase):
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
combinations.combine(buffer_size=[-1, None, 0, 42])))
def testBufferSize(self, buffer_size):
dataset = dataset_ops.Dataset.range(10).prefetch(buffer_size=buffer_size)
self.assertDatasetProduces(dataset, expected_output=range(10))
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
combinations.combine(buffer_size=[-2, -42])))
def testInvalidBufferSize(self, buffer_size):
with self.assertRaises(errors.InvalidArgumentError):
dataset = dataset_ops.Dataset.range(10).prefetch(buffer_size=buffer_size)
self.evaluate(dataset._variant_tensor)
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(
buffer_size=[-1, None, 0, 42], slack_period=[1, 8])))
def testPrefetchWithSlack(self, buffer_size, slack_period):
dataset = dataset_ops.Dataset.range(100)
dataset = dataset_ops.PrefetchDataset(
dataset, buffer_size, slack_period=slack_period)
self.assertDatasetProduces(dataset, expected_output=range(100))
@combinations.generate(combinations.combine(tf_api_version=1, mode="graph"))
def testPrefetchCancellation(self):
def map_py_fn(x):
while x > -1:
x = x * 1
return x
dataset = dataset_ops.Dataset.range(10).map(map_py_fn).prefetch(3)
get_next = self.getNext(dataset)
with self.cached_session() as sess:
thread = self.checkedThread(self.assert_op_cancelled, args=(get_next(),))
thread.start()
time.sleep(0.5)
sess.close()
thread.join()
if __name__ == "__main__":
test.main()
| apache-2.0 |
yongshengwang/builthue | desktop/libs/indexer/src/indexer/views.py | 1 | 1564 | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import json
from django.http import HttpResponse
from django.utils.translation import ugettext as _
from desktop.lib.django_util import render
from indexer.management.commands import indexer_setup
LOG = logging.getLogger(__name__)
def collections(request, is_redirect=False):
return render('collections.mako', request, {})
def install_examples(request, is_redirect=False):
result = {'status': -1, 'message': ''}
if request.method != 'POST':
result['message'] = _('A POST request is required.')
else:
try:
indexer_setup.Command().handle_noargs()
result['status'] = 0
except Exception, e:
LOG.exception(e)
result['message'] = str(e)
return HttpResponse(json.dumps(result), mimetype="application/json")
| apache-2.0 |
Gravecorp/Gap | Gap/Lib/encodings/uu_codec.py | 383 | 3738 | """ Python 'uu_codec' Codec - UU content transfer encoding
Unlike most of the other codecs which target Unicode, this codec
will return Python string objects for both encode and decode.
Written by Marc-Andre Lemburg (mal@lemburg.com). Some details were
adapted from uu.py which was written by Lance Ellinghouse and
modified by Jack Jansen and Fredrik Lundh.
"""
import codecs, binascii
### Codec APIs
def uu_encode(input,errors='strict',filename='<data>',mode=0666):
""" Encodes the object input and returns a tuple (output
object, length consumed).
errors defines the error handling to apply. It defaults to
'strict' handling which is the only currently supported
error handling for this codec.
"""
assert errors == 'strict'
from cStringIO import StringIO
from binascii import b2a_uu
# using str() because of cStringIO's Unicode undesired Unicode behavior.
infile = StringIO(str(input))
outfile = StringIO()
read = infile.read
write = outfile.write
# Encode
write('begin %o %s\n' % (mode & 0777, filename))
chunk = read(45)
while chunk:
write(b2a_uu(chunk))
chunk = read(45)
write(' \nend\n')
return (outfile.getvalue(), len(input))
def uu_decode(input,errors='strict'):
""" Decodes the object input and returns a tuple (output
object, length consumed).
input must be an object which provides the bf_getreadbuf
buffer slot. Python strings, buffer objects and memory
mapped files are examples of objects providing this slot.
errors defines the error handling to apply. It defaults to
'strict' handling which is the only currently supported
error handling for this codec.
Note: filename and file mode information in the input data is
ignored.
"""
assert errors == 'strict'
from cStringIO import StringIO
from binascii import a2b_uu
infile = StringIO(str(input))
outfile = StringIO()
readline = infile.readline
write = outfile.write
# Find start of encoded data
while 1:
s = readline()
if not s:
raise ValueError, 'Missing "begin" line in input data'
if s[:5] == 'begin':
break
# Decode
while 1:
s = readline()
if not s or \
s == 'end\n':
break
try:
data = a2b_uu(s)
except binascii.Error, v:
# Workaround for broken uuencoders by /Fredrik Lundh
nbytes = (((ord(s[0])-32) & 63) * 4 + 5) / 3
data = a2b_uu(s[:nbytes])
#sys.stderr.write("Warning: %s\n" % str(v))
write(data)
if not s:
raise ValueError, 'Truncated input data'
return (outfile.getvalue(), len(input))
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return uu_encode(input,errors)
def decode(self,input,errors='strict'):
return uu_decode(input,errors)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return uu_encode(input, self.errors)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return uu_decode(input, self.errors)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='uu',
encode=uu_encode,
decode=uu_decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
| mpl-2.0 |
ppanczyk/ansible | lib/ansible/plugins/cliconf/__init__.py | 44 | 8340 | #
# (c) 2017 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import signal
from abc import ABCMeta, abstractmethod
from functools import wraps
from ansible.errors import AnsibleError, AnsibleConnectionFailure
from ansible.module_utils.six import with_metaclass
try:
from scp import SCPClient
HAS_SCP = True
except ImportError:
HAS_SCP = False
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
def enable_mode(func):
@wraps(func)
def wrapped(self, *args, **kwargs):
prompt = self.get_prompt()
if not str(prompt).strip().endswith('#'):
raise AnsibleError('operation requires privilege escalation')
return func(self, *args, **kwargs)
return wrapped
class CliconfBase(with_metaclass(ABCMeta, object)):
"""
A base class for implementing cli connections
.. note:: Unlike most of Ansible, nearly all strings in
:class:`CliconfBase` plugins are byte strings. This is because of
how close to the underlying platform these plugins operate. Remember
to mark literal strings as byte string (``b"string"``) and to use
:func:`~ansible.module_utils._text.to_bytes` and
:func:`~ansible.module_utils._text.to_text` to avoid unexpected
problems.
List of supported rpc's:
:get_config: Retrieves the specified configuration from the device
:edit_config: Loads the specified commands into the remote device
:get: Execute specified command on remote device
:get_capabilities: Retrieves device information and supported rpc methods
:commit: Load configuration from candidate to running
:discard_changes: Discard changes to candidate datastore
Note: List of supported rpc's for remote device can be extracted from
output of get_capabilities()
:returns: Returns output received from remote device as byte string
Usage:
from ansible.module_utils.connection import Connection
conn = Connection()
conn.get('show lldp neighbors detail'')
conn.get_config('running')
conn.edit_config(['hostname test', 'netconf ssh'])
"""
def __init__(self, connection):
self._connection = connection
def _alarm_handler(self, signum, frame):
raise AnsibleConnectionFailure('timeout waiting for command to complete')
def send_command(self, command, prompt=None, answer=None, sendonly=False):
"""Executes a cli command and returns the results
This method will execute the CLI command on the connection and return
the results to the caller. The command output will be returned as a
string
"""
timeout = self._connection._play_context.timeout or 30
signal.signal(signal.SIGALRM, self._alarm_handler)
signal.alarm(timeout)
display.display("command: %s" % command, log_only=True)
resp = self._connection.send(command, prompt, answer, sendonly)
signal.alarm(0)
return resp
def get_prompt(self):
"""Returns the current prompt from the device"""
return self._connection._matched_prompt
def get_base_rpc(self):
"""Returns list of base rpc method supported by remote device"""
return ['get_config', 'edit_config', 'get_capabilities', 'get']
@abstractmethod
def get_config(self, source='running', format='text'):
"""Retrieves the specified configuration from the device
This method will retrieve the configuration specified by source and
return it to the caller as a string. Subsequent calls to this method
will retrieve a new configuration from the device
:args:
arg[0] source: Datastore from which configuration should be retrieved eg: running/candidate/startup. (optional)
default is running.
arg[1] format: Output format in which configuration is retrieved
Note: Specified datastore should be supported by remote device.
:kwargs:
Keywords supported
:command: the command string to execute
:source: Datastore from which configuration should be retrieved
:format: Output format in which configuration is retrieved
:returns: Returns output received from remote device as byte string
"""
pass
@abstractmethod
def edit_config(self, commands):
"""Loads the specified commands into the remote device
This method will load the commands into the remote device. This
method will make sure the device is in the proper context before
send the commands (eg config mode)
:args:
arg[0] command: List of configuration commands
:kwargs:
Keywords supported
:command: the command string to execute
:returns: Returns output received from remote device as byte string
"""
pass
@abstractmethod
def get(self, *args, **kwargs):
"""Execute specified command on remote device
This method will retrieve the specified data and
return it to the caller as a string.
:args:
arg[0] command: command in string format to be executed on remote device
arg[1] prompt: the expected prompt generated by executing command.
This can be a string or a list of strings (optional)
arg[2] answer: the string to respond to the prompt with (optional)
arg[3] sendonly: bool to disable waiting for response, default is false (optional)
:kwargs:
:command: the command string to execute
:prompt: the expected prompt generated by executing command.
This can be a string or a list of strings
:answer: the string to respond to the prompt with
:sendonly: bool to disable waiting for response
:returns: Returns output received from remote device as byte string
"""
pass
@abstractmethod
def get_capabilities(self):
"""Retrieves device information and supported
rpc methods by device platform and return result
as a string
:returns: Returns output received from remote device as byte string
"""
pass
def commit(self, comment=None):
"""Commit configuration changes"""
return self._connection.method_not_found("commit is not supported by network_os %s" % self._play_context.network_os)
def discard_changes(self):
"Discard changes in candidate datastore"
return self._connection.method_not_found("discard_changes is not supported by network_os %s" % self._play_context.network_os)
def put_file(self, source, destination):
"""Copies file over scp to remote device"""
if not HAS_SCP:
self._connection.internal_error("Required library scp is not installed. Please install it using `pip install scp`")
ssh = self._connection._connect_uncached()
with SCPClient(ssh.get_transport()) as scp:
scp.put(source, destination)
def fetch_file(self, source, destination):
"""Fetch file over scp from remote device"""
if not HAS_SCP:
self._connection.internal_error("Required library scp is not installed. Please install it using `pip install scp`")
ssh = self._connection._connect_uncached()
with SCPClient(ssh.get_transport()) as scp:
scp.get(source, destination)
| gpl-3.0 |
edubrunaldi/kivy | kivy/tests/test_invalid_lang.py | 78 | 1072 |
import unittest
class InvalidLangTestCase(unittest.TestCase):
def test_invalid_childname(self):
from kivy.lang import Builder, ParserException
from kivy.factory import FactoryException
try:
Builder.load_string('''
Widget:
FloatLayout:
size: self.parent.size
Button:
text: "text"
size_hint:(0.1, 0.1)
pos_hint:{'x':0.45, 'y':0.45}
thecursor.Cursor:
''')
self.fail('Invalid children name')
except ParserException:
pass
except FactoryException:
pass
def test_invalid_childname_before(self):
from kivy.lang import Builder, ParserException
try:
Builder.load_string('''
Widget:
thecursor.Cursor:
FloatLayout:
size: self.parent.size
Button:
text: "text"
size_hint:(0.1, 0.1)
pos_hint:{'x':0.45, 'y':0.45}
''')
self.fail('Invalid children name')
except ParserException:
pass
| mit |
elahejalalpour/ELRyu | ryu/log.py | 36 | 3413 | # Copyright (C) 2011 Nippon Telegraph and Telephone Corporation.
# Copyright (C) 2011 Isaku Yamahata <yamahata at valinux co jp>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from ryu import cfg
import inspect
import platform
import logging
import logging.config
import logging.handlers
import os
import sys
try:
import ConfigParser
except ImportError:
import configparser as ConfigParser
CONF = cfg.CONF
CONF.register_cli_opts([
cfg.IntOpt('default-log-level', default=None, help='default log level'),
cfg.BoolOpt('verbose', default=False, help='show debug output'),
cfg.BoolOpt('use-stderr', default=True, help='log to standard error'),
cfg.BoolOpt('use-syslog', default=False, help='output to syslog'),
cfg.StrOpt('log-dir', default=None, help='log file directory'),
cfg.StrOpt('log-file', default=None, help='log file name'),
cfg.StrOpt('log-file-mode', default='0644',
help='default log file permission'),
cfg.StrOpt('log-config-file', default=None,
help='Path to a logging config file to use')
])
_EARLY_LOG_HANDLER = None
def early_init_log(level=None):
global _EARLY_LOG_HANDLER
_EARLY_LOG_HANDLER = logging.StreamHandler(sys.stderr)
log = logging.getLogger()
log.addHandler(_EARLY_LOG_HANDLER)
if level is not None:
log.setLevel(level)
def _get_log_file():
if CONF.log_file:
return CONF.log_file
if CONF.log_dir:
return os.path.join(CONF.log_dir,
os.path.basename(inspect.stack()[-1][1])) + '.log'
return None
def init_log():
global _EARLY_LOG_HANDLER
log = logging.getLogger()
if CONF.log_config_file:
try:
logging.config.fileConfig(CONF.log_config_file,
disable_existing_loggers=False)
except ConfigParser.Error as e:
print('Failed to parse %s: %s' % (CONF.log_config_file, e),
file=sys.stderr)
sys.exit(2)
return
if CONF.use_stderr:
log.addHandler(logging.StreamHandler(sys.stderr))
if _EARLY_LOG_HANDLER is not None:
log.removeHandler(_EARLY_LOG_HANDLER)
_EARLY_LOG_HANDLER = None
if CONF.use_syslog:
if platform.system() == 'Darwin':
address = '/var/run/syslog'
else:
address = '/dev/log'
syslog = logging.handlers.SysLogHandler(address=address)
log.addHandler(syslog)
log_file = _get_log_file()
if log_file is not None:
log.addHandler(logging.handlers.WatchedFileHandler(log_file))
mode = int(CONF.log_file_mode, 8)
os.chmod(log_file, mode)
if CONF.default_log_level is not None:
log.setLevel(CONF.default_log_level)
elif CONF.verbose:
log.setLevel(logging.DEBUG)
else:
log.setLevel(logging.INFO)
| apache-2.0 |
huggingface/transformers | examples/research_projects/bert-loses-patience/run_glue_with_pabee.py | 1 | 30507 | # coding=utf-8
# Copyright 2020 The Google AI Language Team Authors, The HuggingFace Inc. team and Microsoft Corporation.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Training and inference using the library models for sequence classification on GLUE (Bert, Albert) with PABEE."""
import argparse
import glob
import json
import logging
import os
import random
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
import transformers
from pabee.modeling_pabee_albert import AlbertForSequenceClassificationWithPabee
from pabee.modeling_pabee_bert import BertForSequenceClassificationWithPabee
from transformers import (
WEIGHTS_NAME,
AdamW,
AlbertConfig,
AlbertTokenizer,
BertConfig,
BertTokenizer,
get_linear_schedule_with_warmup,
)
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes as output_modes
from transformers import glue_processors as processors
from transformers.trainer_utils import is_main_process
try:
from torch.utils.tensorboard import SummaryWriter
except ImportError:
from tensorboardX import SummaryWriter
logger = logging.getLogger(__name__)
MODEL_CLASSES = {
"bert": (BertConfig, BertForSequenceClassificationWithPabee, BertTokenizer),
"albert": (AlbertConfig, AlbertForSequenceClassificationWithPabee, AlbertTokenizer),
}
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def train(args, train_dataset, model, tokenizer):
"""Train the model"""
if args.local_rank in [-1, 0]:
tb_writer = SummaryWriter()
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
else:
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args.weight_decay,
},
{"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0},
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total
)
# Check if saved optimizer or scheduler states exist
if os.path.isfile(os.path.join(args.model_name_or_path, "optimizer.pt")) and os.path.isfile(
os.path.join(args.model_name_or_path, "scheduler.pt")
):
# Load in optimizer and scheduler states
optimizer.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "optimizer.pt")))
scheduler.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "scheduler.pt")))
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1:
model = nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
model = nn.parallel.DistributedDataParallel(
model,
device_ids=[args.local_rank],
output_device=args.local_rank,
find_unused_parameters=True,
)
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
logger.info(
" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size
* args.gradient_accumulation_steps
* (torch.distributed.get_world_size() if args.local_rank != -1 else 1),
)
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
global_step = 0
epochs_trained = 0
steps_trained_in_current_epoch = 0
# Check if continuing training from a checkpoint
if os.path.exists(args.model_name_or_path):
# set global_step to gobal_step of last saved checkpoint from model path
global_step = int(args.model_name_or_path.split("-")[-1].split("/")[0])
epochs_trained = global_step // (len(train_dataloader) // args.gradient_accumulation_steps)
steps_trained_in_current_epoch = global_step % (len(train_dataloader) // args.gradient_accumulation_steps)
logger.info(" Continuing training from checkpoint, will skip to saved global_step")
logger.info(" Continuing training from epoch %d", epochs_trained)
logger.info(" Continuing training from global step %d", global_step)
logger.info(
" Will skip the first %d steps in the first epoch",
steps_trained_in_current_epoch,
)
tr_loss, logging_loss = 0.0, 0.0
model.zero_grad()
train_iterator = trange(
epochs_trained,
int(args.num_train_epochs),
desc="Epoch",
disable=args.local_rank not in [-1, 0],
)
set_seed(args) # Added here for reproductibility
for _ in train_iterator:
epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])
for step, batch in enumerate(epoch_iterator):
# Skip past any already trained steps if resuming training
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
continue
model.train()
batch = tuple(t.to(args.device) for t in batch)
inputs = {
"input_ids": batch[0],
"attention_mask": batch[1],
"labels": batch[3],
}
inputs["token_type_ids"] = batch[2]
outputs = model(**inputs)
loss = outputs[0] # model outputs are always tuple in transformers (see doc)
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
logs = {}
if (
args.local_rank == -1 and args.evaluate_during_training
): # Only evaluate when single GPU otherwise metrics may not average well
results = evaluate(args, model, tokenizer)
for key, value in results.items():
eval_key = "eval_{}".format(key)
logs[eval_key] = value
loss_scalar = (tr_loss - logging_loss) / args.logging_steps
learning_rate_scalar = scheduler.get_lr()[0]
logs["learning_rate"] = learning_rate_scalar
logs["loss"] = loss_scalar
logging_loss = tr_loss
for key, value in logs.items():
tb_writer.add_scalar(key, value, global_step)
print(json.dumps({**logs, **{"step": global_step}}))
if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0:
# Save model checkpoint
output_dir = os.path.join(args.output_dir, "checkpoint-{}".format(global_step))
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
model_to_save.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, "training_args.bin"))
logger.info("Saving model checkpoint to %s", output_dir)
torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
logger.info("Saving optimizer and scheduler states to %s", output_dir)
if args.max_steps > 0 and global_step > args.max_steps:
epoch_iterator.close()
break
if args.max_steps > 0 and global_step > args.max_steps:
train_iterator.close()
break
if args.local_rank in [-1, 0]:
tb_writer.close()
return global_step, tr_loss / global_step
def evaluate(args, model, tokenizer, prefix="", patience=0):
if args.model_type == "albert":
model.albert.set_regression_threshold(args.regression_threshold)
model.albert.set_patience(patience)
model.albert.reset_stats()
elif args.model_type == "bert":
model.bert.set_regression_threshold(args.regression_threshold)
model.bert.set_patience(patience)
model.bert.reset_stats()
else:
raise NotImplementedError()
# Loop to handle MNLI double evaluation (matched, mis-matched)
eval_task_names = ("mnli", "mnli-mm") if args.task_name == "mnli" else (args.task_name,)
eval_outputs_dirs = (args.output_dir, args.output_dir + "-MM") if args.task_name == "mnli" else (args.output_dir,)
results = {}
for eval_task, eval_output_dir in zip(eval_task_names, eval_outputs_dirs):
eval_dataset = load_and_cache_examples(args, eval_task, tokenizer, evaluate=True)
if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:
os.makedirs(eval_output_dir)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# multi-gpu eval
if args.n_gpu > 1 and not isinstance(model, nn.DataParallel):
model = nn.DataParallel(model)
# Eval!
logger.info("***** Running evaluation {} *****".format(prefix))
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
preds = None
out_label_ids = None
for batch in tqdm(eval_dataloader, desc="Evaluating"):
model.eval()
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
inputs = {
"input_ids": batch[0],
"attention_mask": batch[1],
"labels": batch[3],
}
inputs["token_type_ids"] = batch[2]
outputs = model(**inputs)
tmp_eval_loss, logits = outputs[:2]
eval_loss += tmp_eval_loss.mean().item()
nb_eval_steps += 1
if preds is None:
preds = logits.detach().cpu().numpy()
out_label_ids = inputs["labels"].detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(out_label_ids, inputs["labels"].detach().cpu().numpy(), axis=0)
eval_loss = eval_loss / nb_eval_steps
if args.output_mode == "classification":
preds = np.argmax(preds, axis=1)
elif args.output_mode == "regression":
preds = np.squeeze(preds)
result = compute_metrics(eval_task, preds, out_label_ids)
results.update(result)
output_eval_file = os.path.join(eval_output_dir, prefix, "eval_results.txt")
with open(output_eval_file, "w") as writer:
logger.info("***** Eval results {} *****".format(prefix))
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(result[key]))
print(" %s = %s" % (key, str(result[key])))
writer.write("%s = %s\n" % (key, str(result[key])))
if args.eval_all_checkpoints and patience != 0:
if args.model_type == "albert":
model.albert.log_stats()
elif args.model_type == "bert":
model.bert.log_stats()
else:
raise NotImplementedError()
return results
def load_and_cache_examples(args, task, tokenizer, evaluate=False):
if args.local_rank not in [-1, 0] and not evaluate:
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
processor = processors[task]()
output_mode = output_modes[task]
# Load data features from cache or dataset file
cached_features_file = os.path.join(
args.data_dir,
"cached_{}_{}_{}_{}".format(
"dev" if evaluate else "train",
list(filter(None, args.model_name_or_path.split("/"))).pop(),
str(args.max_seq_length),
str(task),
),
)
if os.path.exists(cached_features_file) and not args.overwrite_cache:
logger.info("Loading features from cached file %s", cached_features_file)
features = torch.load(cached_features_file)
else:
logger.info("Creating features from dataset file at %s", args.data_dir)
label_list = processor.get_labels()
if task in ["mnli", "mnli-mm"] and args.model_type in ["roberta", "xlmroberta"]:
# HACK(label indices are swapped in RoBERTa pretrained model)
label_list[1], label_list[2] = label_list[2], label_list[1]
examples = (
processor.get_dev_examples(args.data_dir) if evaluate else processor.get_train_examples(args.data_dir)
)
features = convert_examples_to_features(
examples,
tokenizer,
label_list=label_list,
max_length=args.max_seq_length,
output_mode=output_mode,
)
if args.local_rank in [-1, 0]:
logger.info("Saving features into cached file %s", cached_features_file)
torch.save(features, cached_features_file)
if args.local_rank == 0 and not evaluate:
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
# Convert to Tensors and build dataset
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long)
all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long)
if output_mode == "classification":
all_labels = torch.tensor([f.label for f in features], dtype=torch.long)
elif output_mode == "regression":
all_labels = torch.tensor([f.label for f in features], dtype=torch.float)
dataset = TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids, all_labels)
return dataset
def main():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--data_dir",
default=None,
type=str,
required=True,
help="The input data dir. Should contain the .tsv files (or other data files) for the task.",
)
parser.add_argument(
"--model_type",
default=None,
type=str,
required=True,
help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()),
)
parser.add_argument(
"--model_name_or_path",
default=None,
type=str,
required=True,
help="Path to pre-trained model or shortcut name.",
)
parser.add_argument(
"--task_name",
default=None,
type=str,
required=True,
help="The name of the task to train selected in the list: " + ", ".join(processors.keys()),
)
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model predictions and checkpoints will be written.",
)
parser.add_argument(
"--patience",
default="0",
type=str,
required=False,
)
parser.add_argument(
"--regression_threshold",
default=0,
type=float,
required=False,
)
# Other parameters
parser.add_argument(
"--config_name",
default="",
type=str,
help="Pretrained config name or path if not the same as model_name",
)
parser.add_argument(
"--tokenizer_name",
default="",
type=str,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--cache_dir",
default="",
type=str,
help="Where do you want to store the pre-trained models downloaded from huggingface.co",
)
parser.add_argument(
"--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.",
)
parser.add_argument("--do_train", action="store_true", help="Whether to run training.")
parser.add_argument("--do_eval", action="store_true", help="Whether to run eval on the dev set.")
parser.add_argument(
"--evaluate_during_training",
action="store_true",
help="Run evaluation during training at each logging step.",
)
parser.add_argument(
"--do_lower_case",
action="store_true",
help="Set this flag if you are using an uncased model.",
)
parser.add_argument(
"--per_gpu_train_batch_size",
default=8,
type=int,
help="Batch size per GPU/CPU for training.",
)
parser.add_argument(
"--per_gpu_eval_batch_size",
default=1,
type=int,
help="Batch size per GPU/CPU for evaluation.",
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument(
"--learning_rate",
default=5e-5,
type=float,
help="The initial learning rate for Adam.",
)
parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight decay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
parser.add_argument(
"--num_train_epochs",
default=3.0,
type=float,
help="Total number of training epochs to perform.",
)
parser.add_argument(
"--max_steps",
default=-1,
type=int,
help="If > 0: set total number of training steps to perform. Override num_train_epochs.",
)
parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.")
parser.add_argument("--logging_steps", type=int, default=500, help="Log every X updates steps.")
parser.add_argument(
"--save_steps",
type=int,
default=500,
help="Save checkpoint every X updates steps.",
)
parser.add_argument(
"--eval_all_checkpoints",
action="store_true",
help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number",
)
parser.add_argument("--no_cuda", action="store_true", help="Avoid using CUDA when available")
parser.add_argument(
"--overwrite_output_dir",
action="store_true",
help="Overwrite the content of the output directory",
)
parser.add_argument(
"--overwrite_cache",
action="store_true",
help="Overwrite the cached training and evaluation sets",
)
parser.add_argument("--seed", type=int, default=42, help="random seed for initialization")
parser.add_argument(
"--fp16",
action="store_true",
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit",
)
parser.add_argument(
"--fp16_opt_level",
type=str,
default="O1",
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html",
)
parser.add_argument(
"--local_rank",
type=int,
default=-1,
help="For distributed training: local_rank",
)
parser.add_argument("--server_ip", type=str, default="", help="For distant debugging.")
parser.add_argument("--server_port", type=str, default="", help="For distant debugging.")
args = parser.parse_args()
if (
os.path.exists(args.output_dir)
and os.listdir(args.output_dir)
and args.do_train
and not args.overwrite_output_dir
):
raise ValueError(
"Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(
args.output_dir
)
)
# Setup distant debugging if needed
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend="nccl")
args.n_gpu = 1
args.device = device
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN,
)
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
args.local_rank,
device,
args.n_gpu,
bool(args.local_rank != -1),
args.fp16,
)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set seed
set_seed(args)
# Prepare GLUE task
args.task_name = args.task_name.lower()
if args.task_name not in processors:
raise ValueError("Task not found: %s" % (args.task_name))
processor = processors[args.task_name]()
args.output_mode = output_modes[args.task_name]
label_list = processor.get_labels()
num_labels = len(label_list)
if args.patience != "0" and args.per_gpu_eval_batch_size != 1:
raise ValueError("The eval batch size must be 1 with PABEE inference on.")
# Load pretrained model and tokenizer
if args.local_rank not in [-1, 0]:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
args.model_type = args.model_type.lower()
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
config = config_class.from_pretrained(
args.config_name if args.config_name else args.model_name_or_path,
num_labels=num_labels,
finetuning_task=args.task_name,
cache_dir=args.cache_dir if args.cache_dir else None,
)
tokenizer = tokenizer_class.from_pretrained(
args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,
do_lower_case=args.do_lower_case,
cache_dir=args.cache_dir if args.cache_dir else None,
)
model = model_class.from_pretrained(
args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path),
config=config,
cache_dir=args.cache_dir if args.cache_dir else None,
)
if args.local_rank == 0:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
model.to(args.device)
print("Total Model Parameters:", sum(param.numel() for param in model.parameters()))
output_layers_param_num = sum(param.numel() for param in model.classifiers.parameters())
print("Output Layers Parameters:", output_layers_param_num)
single_output_layer_param_num = sum(param.numel() for param in model.classifiers[0].parameters())
print(
"Added Output Layers Parameters:",
output_layers_param_num - single_output_layer_param_num,
)
logger.info("Training/evaluation parameters %s", args)
# Training
if args.do_train:
train_dataset = load_and_cache_examples(args, args.task_name, tokenizer, evaluate=False)
global_step, tr_loss = train(args, train_dataset, model, tokenizer)
logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
# Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained()
if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
logger.info("Saving model checkpoint to %s", args.output_dir)
# Save a trained model, configuration and tokenizer using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
model_to_save.save_pretrained(args.output_dir)
tokenizer.save_pretrained(args.output_dir)
# Good practice: save your training arguments together with the trained model
torch.save(args, os.path.join(args.output_dir, "training_args.bin"))
# Load a trained model and vocabulary that you have fine-tuned
model = model_class.from_pretrained(args.output_dir)
tokenizer = tokenizer_class.from_pretrained(args.output_dir)
model.to(args.device)
# Evaluation
results = {}
if args.do_eval and args.local_rank in [-1, 0]:
patience_list = [int(x) for x in args.patience.split(",")]
tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case)
checkpoints = [args.output_dir]
if args.eval_all_checkpoints:
checkpoints = list(
os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + "/**/" + WEIGHTS_NAME, recursive=True))
)
logger.info("Evaluate the following checkpoints: %s", checkpoints)
for checkpoint in checkpoints:
global_step = checkpoint.split("-")[-1] if len(checkpoints) > 1 else ""
prefix = checkpoint.split("/")[-1] if checkpoint.find("checkpoint") != -1 else ""
model = model_class.from_pretrained(checkpoint)
model.to(args.device)
print(f"Evaluation for checkpoint {prefix}")
for patience in patience_list:
result = evaluate(args, model, tokenizer, prefix=prefix, patience=patience)
result = dict((k + "_{}".format(global_step), v) for k, v in result.items())
results.update(result)
return results
if __name__ == "__main__":
main()
| apache-2.0 |
centaurialpha/edis | tests/test_code_analizer.py | 1 | 1175 | # -*- coding: utf-8 -*-
# EDIS - a simple cross-platform IDE for C
#
# This file is part of Edis
# Copyright 2014-2015 - Gabriel Acosta <acostadariogabriel at gmail>
# License: GPLv3 (see http://www.gnu.org/licenses/gpl.html)
import unittest
import os
from src.tools.ctags import ctags
class CodeAnalizerTestCase(unittest.TestCase):
def setUp(self):
self.filename = os.path.join(os.path.dirname(__file__),
"c_files", "for_ctags.c")
def test_parse_symbols(self):
symbols_expected = {
'functions': {'7': 'main'},
'structs': {'3': 'ufo'},
'members': {'name': ('4', 'ufo')},
'globals': {'UFO': '5'}
}
symbols, _ = ctags.get_symbols(self.filename)
self.assertEqual(symbols, symbols_expected)
def test_parse_symbols_combo(self):
symbols_expected = {
7: ('main( int argc, char** argv )', 'function'),
3: ('ufo', 'struct')
}
_, symbols_combo = ctags.get_symbols(self.filename)
self.assertEqual(symbols_combo, symbols_expected)
if __name__ == "__main__":
unittest.main()
| gpl-3.0 |
therealbnut/swift | utils/swift_build_support/swift_build_support/which.py | 39 | 1478 | # swift_build_support/which.py - shutil.which() for Python 2.7 -*- python -*-
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
# ----------------------------------------------------------------------------
#
# A naive reimplementation of shutil.which() for Python 2.7. This can be
# removed if shutil.which() is backported, or if the Swift build toolchain
# migrates completely to Python 3.3+.
#
# ----------------------------------------------------------------------------
from __future__ import absolute_import
from . import cache_util
from . import shell
@cache_util.cached
def which(cmd):
"""
Return the path to an executable which would be run if
the given cmd was called. If no cmd would be called, return None.
Python 3.3+ provides this behavior via the shutil.which() function;
see: https://docs.python.org/3.3/library/shutil.html#shutil.which
We provide our own implementation because shutil.which() has not
been backported to Python 2.7, which we support.
"""
out = shell.capture(['which', cmd],
dry_run=False, echo=False, optional=True)
if out is None:
return None
return out.rstrip()
| apache-2.0 |
ZhangXinNan/tensorflow | tensorflow/python/kernel_tests/batch_gather_op_test.py | 2 | 4708 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.tf.gather."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
_TEST_TYPES = (dtypes.int64, dtypes.float32,
dtypes.complex64, dtypes.complex128)
class GatherTest(test.TestCase):
def _buildParams(self, data, dtype):
data = data.astype(dtype.as_numpy_dtype)
# For complex types, add an index-dependent imaginary component so we can
# tell we got the right value.
if dtype.is_complex:
return data + 10j * data
return data
def testSimpleGather(self):
data = np.array([0, 1, 2, 3, 7, 5, 8, 9, 10, 11, 15, 13])
indices = [3, 4]
with self.test_session(use_gpu=True):
for dtype in _TEST_TYPES:
params_np = self._buildParams(data, dtype)
params = constant_op.constant(params_np)
indices_tf = constant_op.constant(indices)
gather_t = array_ops.batch_gather(params, indices_tf)
expected_result = np.array([3, 7])
np_val = self._buildParams(expected_result, dtype)
gather_val = gather_t.eval()
self.assertAllEqual(np_val, gather_val)
self.assertEqual(np_val.shape, gather_t.get_shape())
def test2DArray(self):
data = np.array([[0, 1, 2, 3, 7, 5], [8, 9, 10, 11, 15, 13]])
indices = [[3], [4]]
with self.test_session(use_gpu=True):
for dtype in _TEST_TYPES:
params_np = self._buildParams(data, dtype)
params = constant_op.constant(params_np)
indices_tf = constant_op.constant(indices)
gather_t = array_ops.batch_gather(params, indices_tf)
expected_result = np.array([[3], [15]])
np_val = self._buildParams(expected_result, dtype)
gather_val = gather_t.eval()
self.assertAllEqual(np_val, gather_val)
self.assertEqual(np_val.shape, gather_t.get_shape())
def testHigherRank(self):
data = np.array([[[0, 1, 2], [3, 7, 5]], [[8, 9, 10], [11, 15, 13]]])
indices = [[[2, 0], [1, 2]], [[2, 0], [0, 1]]]
with self.test_session(use_gpu=True):
for dtype in _TEST_TYPES:
params_np = self._buildParams(data, dtype)
params = constant_op.constant(params_np)
indices_tf = constant_op.constant(indices)
gather_t = array_ops.batch_gather(params, indices_tf)
gather_val = gather_t.eval()
expected_result = np.array([[[2, 0], [7, 5]], [[10, 8], [11, 15]]])
np_val = self._buildParams(expected_result, dtype)
self.assertAllEqual(np_val, gather_val)
self.assertEqual(np_val.shape, gather_t.get_shape())
def testString(self):
params = np.array([[b"asdf", b"zxcv"], [b"qwer", b"uiop"]])
with self.test_session():
indices_tf = constant_op.constant([1])
self.assertAllEqual([[b"qwer", b"uiop"]],
array_ops.batch_gather(params, indices_tf).eval())
def testUnknownIndices(self):
params = constant_op.constant([[0, 1, 2]])
indices = array_ops.placeholder(dtypes.int32, shape=[None, None])
gather_t = array_ops.batch_gather(params, indices)
self.assertEqual([1, None], gather_t.get_shape().as_list())
def testBadIndicesCPU(self):
with self.test_session(use_gpu=False):
params = [[0, 1, 2], [3, 4, 5]]
with self.assertRaisesOpError(r"indices\[0\] = 7 is not in \[0, 2\)"):
array_ops.batch_gather(params, [7]).eval()
def testEmptySlices(self):
with self.test_session(use_gpu=True):
for dtype in _TEST_TYPES:
for itype in np.int32, np.int64:
params = np.zeros((7, 0, 0), dtype=dtype.as_numpy_dtype)
indices = np.array([3, 4], dtype=itype)
gather = array_ops.batch_gather(params, indices)
self.assertAllEqual(gather.eval(), np.zeros((2, 0, 0)))
if __name__ == "__main__":
test.main()
| apache-2.0 |
versatica/mediasoup | worker/deps/gyp/test/variables/filelist/gyptest-filelist-golden.py | 12 | 1625 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Test variable expansion of '<|(list.txt ...)' syntax commands.
"""
from __future__ import print_function
import os
import sys
import TestGyp
test = TestGyp.TestGyp(format='gypd')
expect = test.read('filelist.gyp.stdout')
if sys.platform == 'win32':
expect = expect.replace('/', r'\\').replace('\r\n', '\n')
test.run_gyp('src/filelist.gyp',
'--debug', 'variables',
stdout=expect, ignore_line_numbers=True)
# Verify the filelist.gypd against the checked-in expected contents.
#
# Normally, we should canonicalize line endings in the expected
# contents file setting the Subversion svn:eol-style to native,
# but that would still fail if multiple systems are sharing a single
# workspace on a network-mounted file system. Consequently, we
# massage the Windows line endings ('\r\n') in the output to the
# checked-in UNIX endings ('\n').
contents = test.read('src/filelist.gypd').replace(
'\r', '').replace('\\\\', '/')
expect = test.read('filelist.gypd.golden').replace('\r', '')
if not test.match(contents, expect):
print("Unexpected contents of `src/filelist.gypd'")
test.diff(expect, contents, 'src/filelist.gypd ')
test.fail_test()
contents = test.read('src/names.txt')
expect = 'John\nJacob\nJingleheimer\nSchmidt\n'
if not test.match(contents, expect):
print("Unexpected contents of `src/names.txt'")
test.diff(expect, contents, 'src/names.txt ')
test.fail_test()
test.pass_test()
| isc |
woutdenolf/spectrocrunch | scraps/test.sage.py | 1 | 2169 | # -*- coding: utf-8 -*-
from sage.all_cmdline import * # import sage library
_sage_const_2 = Integer(2)
_sage_const_1 = Integer(1)
_sage_const_0 = Integer(0)
_sage_const_4 = Integer(4) #!/usr/bin/env sage
from sage.all import *
# Number of layers
var("m")
assume(m, "integer")
assume(m > _sage_const_0)
# Number of interactions
var("n")
assume(n, "integer")
assume(n > _sage_const_0)
# Iterate over layers
var("l")
assume(l, "integer")
assume(l > _sage_const_0)
assume(l <= m)
# Absctract functions
mu = function("mu")
muF = function("muF")
rho = function("rho")
c = function("c")
__tmp__ = var("x,y")
s = symbolic_expression(_sage_const_1 if y >= x else -_sage_const_1).function(x, y)
w = function("w")
En = function("En")
d = function("d")
# Interaction cross-section
__tmp__ = var("i,E,theta,phi")
muint = symbolic_expression(muF(i) / (_sage_const_4 * pi)).function(i, E, theta, phi)
# Transmission
__tmp__ = var("x,y,a,b,E")
A = symbolic_expression(
mu(a, E) * rho(a) * s(x, y) * (x - c(a))
- mu(b, E) * rho(b) * s(x, y) * (y - c(b))
- sum(mu(l, E) * rho(l) * d(l), l, a + _sage_const_1, b - _sage_const_1)
).function(x, y, a, b, E)
__tmp__ = var("x,y,a,b,alpha,E")
T = symbolic_expression(exp(-A(x, y, a, b, E) / cos(alpha))).function(
x, y, a, b, alpha, E
)
# Interaction probablity
__tmp__ = var("j,E,z,alpha,theta,phi")
P = symbolic_expression(
w(j, z) * rho(z) / cos(alpha) * muint(j, E, theta, phi)
).function(j, E, z, alpha, theta, phi)
# Number of interactions
n = _sage_const_1
J = [None] * (n + _sage_const_2)
J[_sage_const_0] = var("J_0")
E = list(var("E_%d" % i) for i in (ellipsis_iter(_sage_const_0, Ellipsis, n)))
z = list(var("z_%d" % i) for i in (ellipsis_iter(_sage_const_0, Ellipsis, n)))
z_0 = _sage_const_0
alpha = list(
var("alpha_%d%d" % (i, i + _sage_const_1))
for i in (ellipsis_iter(_sage_const_0, Ellipsis, n))
)
J[_sage_const_1] = (
J[_sage_const_0]
* T(x, y, alpha[_sage_const_0], E[_sage_const_0])
* P(j, E[_sage_const_0], x, alpha[_sage_const_0], theta, phi)
).function(j, x, y, theta, phi)
print(J[_sage_const_1](j, z[_sage_const_0], z[_sage_const_1], theta, phi))
print(z_0)
| mit |
brainpower/arch-installer | main.py | 2 | 1346 | #!/usr/bin/env python3
# Copyright (C) 2013 anonymous <brainpower@gulli.com>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#TODO:
# - make every page of installergui a own class
# - noconfirm when installing additional pkgs?
import sys, os, threading
from gi.repository import Gtk, Vte, GLib, Gdk, GObject
from archinstallergui import ArchInstallerGui
def feed_command(cmd):
return term.feed_child(cmd, len(cmd))
def feed_archey(bt):
feed_command("archey\n")
def main():
GObject.threads_init()
Gdk.threads_init()
win = ArchInstallerGui()
win.resize(600,700)
win.set_size_request(1024,756)
win.connect("cancel", Gtk.main_quit)
win.connect("close", Gtk.main_quit)
win.show_all()
sys.exit(Gtk.main())
if __name__ == "__main__":
main()
| gpl-3.0 |
mlcommons/inference | language/bert/run.py | 1 | 4483 | # coding=utf-8
# Copyright 2021 Arm Limited and affiliates.
# Copyright (c) 2020 NVIDIA CORPORATION. All rights reserved.
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
sys.path.insert(0, os.getcwd())
import argparse
import mlperf_loadgen as lg
import subprocess
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("--backend", choices=["tf","pytorch","onnxruntime","tf_estimator"], default="tf", help="Backend")
parser.add_argument("--scenario", choices=["SingleStream", "Offline", "Server", "MultiStream"], default="Offline", help="Scenario")
parser.add_argument("--accuracy", action="store_true", help="enable accuracy pass")
parser.add_argument("--quantized", action="store_true", help="use quantized model (only valid for onnxruntime backend)")
parser.add_argument("--profile", action="store_true", help="enable profiling (only valid for onnxruntime backend)")
parser.add_argument("--mlperf_conf", default="build/mlperf.conf", help="mlperf rules config")
parser.add_argument("--user_conf", default="user.conf", help="user config for user LoadGen settings such as target QPS")
parser.add_argument("--max_examples", type=int, help="Maximum number of examples to consider (not limited by default)")
args = parser.parse_args()
return args
scenario_map = {
"SingleStream": lg.TestScenario.SingleStream,
"Offline": lg.TestScenario.Offline,
"Server": lg.TestScenario.Server,
"MultiStream": lg.TestScenario.MultiStream
}
def main():
args = get_args()
if args.backend == "pytorch":
assert not args.quantized, "Quantized model is only supported by onnxruntime backend!"
assert not args.profile, "Profiling is only supported by onnxruntime backend!"
from pytorch_SUT import get_pytorch_sut
sut = get_pytorch_sut(args)
elif args.backend == "tf":
assert not args.quantized, "Quantized model is only supported by onnxruntime backend!"
assert not args.profile, "Profiling is only supported by onnxruntime backend!"
from tf_SUT import get_tf_sut
sut = get_tf_sut(args)
elif args.backend == "tf_estimator":
assert not args.quantized, "Quantized model is only supported by onnxruntime backend!"
assert not args.profile, "Profiling is only supported by onnxruntime backend!"
from tf_estimator_SUT import get_tf_estimator_sut
sut = get_tf_estimator_sut()
elif args.backend == "onnxruntime":
from onnxruntime_SUT import get_onnxruntime_sut
sut = get_onnxruntime_sut(args)
else:
raise ValueError("Unknown backend: {:}".format(args.backend))
settings = lg.TestSettings()
settings.scenario = scenario_map[args.scenario]
settings.FromConfig(args.mlperf_conf, "bert", args.scenario)
settings.FromConfig(args.user_conf, "bert", args.scenario)
if args.accuracy:
settings.mode = lg.TestMode.AccuracyOnly
else:
settings.mode = lg.TestMode.PerformanceOnly
log_path = "build/logs"
if not os.path.exists(log_path):
os.makedirs(log_path)
log_output_settings = lg.LogOutputSettings()
log_output_settings.outdir = log_path
log_output_settings.copy_summary_to_stdout = True
log_settings = lg.LogSettings()
log_settings.log_output = log_output_settings
log_settings.enable_trace = True
print("Running LoadGen test...")
lg.StartTestWithLogSettings(sut.sut, sut.qsl.qsl, settings, log_settings)
if args.accuracy:
cmd = "python3 {:}/accuracy-squad.py {}".format(os.path.dirname(os.path.abspath(__file__)), '--max_examples={}'.format(args.max_examples) if args.max_examples else '')
subprocess.check_call(cmd, shell=True)
print("Done!")
print("Destroying SUT...")
lg.DestroySUT(sut.sut)
print("Destroying QSL...")
lg.DestroyQSL(sut.qsl.qsl)
if __name__ == "__main__":
main()
| apache-2.0 |
runningwolf666/bcloud | bcloud/decoder.py | 10 | 1642 |
# Copyright (C) 2014-2015 LiuLang <gsushzhsosgsu@gmail.com>
# Use of this source code is governed by GPLv3 license that can be found
# in http://www.gnu.org/licenses/gpl-3.0.html
import base64
import traceback
from bcloud.log import logger
def decode_flashget(link):
try:
l = base64.decodestring(link[11:len(link)-7].encode()).decode()
except ValueError:
logger.warn(traceback.format_exc())
l = base64.decodestring(link[11:len(link)-7].encode()).decode('gbk')
return l[10:len(l)-10]
def decode_thunder(link):
# AAhttp://127.0.0.1
if link.startswith('QUFodHRwOi8vMTI3LjAuMC4'):
return ''
try:
l = base64.decodestring(link[10:].encode()).decode('gbk')
except ValueError:
logger.warn(traceback.format_exc())
l = base64.decodestring(link[10:].encode()).decode()
return l[2:-2]
def decode_qqdl(link):
try:
return base64.decodestring(link[7:].encode()).decode()
except ValueError:
logger.warn(traceback.format_exc())
return base64.decodestring(link[7:].encode()).decode('gbk')
_router = {
'flashge': decode_flashget,
'thunder': decode_thunder,
'qqdl://': decode_qqdl,
}
def decode(link):
if not isinstance(link, str) or len(link) < 10:
logger.error('unknown link: %s' % link)
return ''
link_prefix = link[:7].lower()
if link_prefix in _router:
try:
return _router[link_prefix](link)
except ValueError:
logger.error(traceback.format_exc())
return ''
else:
logger.warn('unknown protocol: %s' % link)
return ''
| gpl-3.0 |
resalisbury/angular | scripts/ci/travis_after_all.py | 179 | 3535 | import os
import json
import time
import logging
try:
import urllib.request as urllib2
except ImportError:
import urllib2
log = logging.getLogger("travis.leader")
log.addHandler(logging.StreamHandler())
log.setLevel(logging.INFO)
TRAVIS_JOB_NUMBER = 'TRAVIS_JOB_NUMBER'
TRAVIS_BUILD_ID = 'TRAVIS_BUILD_ID'
POLLING_INTERVAL = 'LEADER_POLLING_INTERVAL'
build_id = os.getenv(TRAVIS_BUILD_ID)
polling_interval = int(os.getenv(POLLING_INTERVAL, '5'))
#assume, first job is the leader
is_leader = lambda job_number: job_number.endswith('.1')
if not os.getenv(TRAVIS_JOB_NUMBER):
# seems even for builds with only one job, this won't get here
log.fatal("Don't use defining leader for build without matrix")
exit(1)
elif is_leader(os.getenv(TRAVIS_JOB_NUMBER)):
log.info("This is a leader")
else:
#since python is subprocess, env variables are exported back via file
with open(".to_export_back", "w") as export_var:
export_var.write("BUILD_MINION=YES")
log.info("This is a minion")
exit(0)
class MatrixElement(object):
def __init__(self, json_raw):
self.allow_failure = json_raw['allow_failure']
self.is_finished = json_raw['finished_at'] is not None
self.is_succeeded = json_raw['result'] == 0
self.number = json_raw['number']
self.is_leader = is_leader(self.number)
def matrix_snapshot():
"""
:return: Matrix List
"""
response = urllib2.build_opener().open("https://api.travis-ci.org/builds/{0}".format(build_id)).read()
raw_json = json.loads(response)
matrix_without_leader = [MatrixElement(element) for element in raw_json["matrix"]]
return matrix_without_leader
def wait_others_to_finish():
def others_finished():
"""
Dumps others to finish
Leader cannot finish, it is working now
:return: tuple(True or False, List of not finished jobs)
"""
snapshot = matrix_snapshot()
finished = [el.is_finished for el in snapshot if not (el.is_leader or el.allow_failure)]
return reduce(lambda a, b: a and b, finished), [el.number for el in snapshot if
not el.is_leader and not el.is_finished]
while True:
finished, waiting_list = others_finished()
if finished: break
log.info("Leader waits for minions {0}...".format(waiting_list)) # just in case do not get "silence timeout"
time.sleep(polling_interval)
try:
wait_others_to_finish()
final_snapshot = matrix_snapshot()
log.info("Final Results: {0}".format([(e.number, e.is_succeeded, e.allow_failure) for e in final_snapshot]))
BUILD_AGGREGATE_STATUS = 'BUILD_AGGREGATE_STATUS'
others_snapshot = [el for el in final_snapshot if not (el.is_leader or el.allow_failure)]
if reduce(lambda a, b: a and b, [e.is_succeeded for e in others_snapshot]):
os.environ[BUILD_AGGREGATE_STATUS] = "others_succeeded"
elif reduce(lambda a, b: a and b, [not e.is_succeeded for e in others_snapshot]):
log.error("Others Failed")
os.environ[BUILD_AGGREGATE_STATUS] = "others_failed"
else:
log.warn("Others Unknown")
os.environ[BUILD_AGGREGATE_STATUS] = "unknown"
#since python is subprocess, env variables are exported back via file
with open(".to_export_back", "w") as export_var:
export_var.write("BUILD_LEADER=YES {0}={1}".format(BUILD_AGGREGATE_STATUS, os.environ[BUILD_AGGREGATE_STATUS]))
except Exception as e:
log.fatal(e)
| apache-2.0 |
kamalx/edx-platform | common/test/acceptance/accessibility/test_lms_dashboard_axs.py | 68 | 1293 | """
Accessibility tests for LMS dashboard page.
Run just this test with:
SELENIUM_BROWSER=phantomjs paver test_bokchoy -d accessibility -t test_lms_dashboard_axs.py
"""
from ..tests.lms.test_lms_dashboard import BaseLmsDashboardTest
class LmsDashboardAxsTest(BaseLmsDashboardTest):
"""
Class to test lms student dashboard accessibility.
"""
def test_dashboard_course_listings_axs(self):
"""
Test the accessibility of the course listings
"""
course_listings = self.dashboard_page.get_course_listings()
self.assertEqual(len(course_listings), 1)
report = self.dashboard_page.do_axs_audit()
# There was one page in this session
self.assertEqual(1, len(report))
result = report[0]
# Verify that this page has no accessibility errors.
self.assertEqual(0, len(result.errors))
# Verify that this page currently has 2 accessibility warnings.
self.assertEqual(2, len(result.warnings))
# And that these are the warnings that the page currently gives.
for warning in result.warnings:
self.assertTrue(
warning.startswith(('Warning: AX_FOCUS_01', 'Warning: AX_COLOR_01',)),
msg="Unexpected warning: {}".format(warning))
| agpl-3.0 |
mlperf/training_results_v0.5 | v0.5.0/google/cloud_v2.8/resnet-tpuv2-8/code/resnet/model/models/official/mnist/dataset.py | 7 | 4116 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""tf.data.Dataset interface to the MNIST dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import os
import shutil
import tempfile
import numpy as np
from six.moves import urllib
import tensorflow as tf
def read32(bytestream):
"""Read 4 bytes from bytestream as an unsigned 32-bit integer."""
dt = np.dtype(np.uint32).newbyteorder('>')
return np.frombuffer(bytestream.read(4), dtype=dt)[0]
def check_image_file_header(filename):
"""Validate that filename corresponds to images for the MNIST dataset."""
with tf.gfile.Open(filename, 'rb') as f:
magic = read32(f)
read32(f) # num_images, unused
rows = read32(f)
cols = read32(f)
if magic != 2051:
raise ValueError('Invalid magic number %d in MNIST file %s' % (magic,
f.name))
if rows != 28 or cols != 28:
raise ValueError(
'Invalid MNIST file %s: Expected 28x28 images, found %dx%d' %
(f.name, rows, cols))
def check_labels_file_header(filename):
"""Validate that filename corresponds to labels for the MNIST dataset."""
with tf.gfile.Open(filename, 'rb') as f:
magic = read32(f)
read32(f) # num_items, unused
if magic != 2049:
raise ValueError('Invalid magic number %d in MNIST file %s' % (magic,
f.name))
def download(directory, filename):
"""Download (and unzip) a file from the MNIST dataset if not already done."""
filepath = os.path.join(directory, filename)
if tf.gfile.Exists(filepath):
return filepath
if not tf.gfile.Exists(directory):
tf.gfile.MakeDirs(directory)
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
url = 'https://storage.googleapis.com/cvdf-datasets/mnist/' + filename + '.gz'
_, zipped_filepath = tempfile.mkstemp(suffix='.gz')
print('Downloading %s to %s' % (url, zipped_filepath))
urllib.request.urlretrieve(url, zipped_filepath)
with gzip.open(zipped_filepath, 'rb') as f_in, \
tf.gfile.Open(filepath, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
os.remove(zipped_filepath)
return filepath
def dataset(directory, images_file, labels_file):
"""Download and parse MNIST dataset."""
images_file = download(directory, images_file)
labels_file = download(directory, labels_file)
check_image_file_header(images_file)
check_labels_file_header(labels_file)
def decode_image(image):
# Normalize from [0, 255] to [0.0, 1.0]
image = tf.decode_raw(image, tf.uint8)
image = tf.cast(image, tf.float32)
image = tf.reshape(image, [784])
return image / 255.0
def decode_label(label):
label = tf.decode_raw(label, tf.uint8) # tf.string -> [tf.uint8]
label = tf.reshape(label, []) # label is a scalar
return tf.to_int32(label)
images = tf.data.FixedLengthRecordDataset(
images_file, 28 * 28, header_bytes=16).map(decode_image)
labels = tf.data.FixedLengthRecordDataset(
labels_file, 1, header_bytes=8).map(decode_label)
return tf.data.Dataset.zip((images, labels))
def train(directory):
"""tf.data.Dataset object for MNIST training data."""
return dataset(directory, 'train-images-idx3-ubyte',
'train-labels-idx1-ubyte')
def test(directory):
"""tf.data.Dataset object for MNIST test data."""
return dataset(directory, 't10k-images-idx3-ubyte', 't10k-labels-idx1-ubyte')
| apache-2.0 |
bzennn/blog_flask | python/lib/python2.7/site-packages/setuptools/namespaces.py | 196 | 3199 | import os
from distutils import log
import itertools
from setuptools.extern.six.moves import map
flatten = itertools.chain.from_iterable
class Installer:
nspkg_ext = '-nspkg.pth'
def install_namespaces(self):
nsp = self._get_all_ns_packages()
if not nsp:
return
filename, ext = os.path.splitext(self._get_target())
filename += self.nspkg_ext
self.outputs.append(filename)
log.info("Installing %s", filename)
lines = map(self._gen_nspkg_line, nsp)
if self.dry_run:
# always generate the lines, even in dry run
list(lines)
return
with open(filename, 'wt') as f:
f.writelines(lines)
def uninstall_namespaces(self):
filename, ext = os.path.splitext(self._get_target())
filename += self.nspkg_ext
if not os.path.exists(filename):
return
log.info("Removing %s", filename)
os.remove(filename)
def _get_target(self):
return self.target
_nspkg_tmpl = (
"import sys, types, os",
"has_mfs = sys.version_info > (3, 5)",
"p = os.path.join(%(root)s, *%(pth)r)",
"importlib = has_mfs and __import__('importlib.util')",
"has_mfs and __import__('importlib.machinery')",
"m = has_mfs and "
"sys.modules.setdefault(%(pkg)r, "
"importlib.util.module_from_spec("
"importlib.machinery.PathFinder.find_spec(%(pkg)r, "
"[os.path.dirname(p)])))",
"m = m or "
"sys.modules.setdefault(%(pkg)r, types.ModuleType(%(pkg)r))",
"mp = (m or []) and m.__dict__.setdefault('__path__',[])",
"(p not in mp) and mp.append(p)",
)
"lines for the namespace installer"
_nspkg_tmpl_multi = (
'm and setattr(sys.modules[%(parent)r], %(child)r, m)',
)
"additional line(s) when a parent package is indicated"
def _get_root(self):
return "sys._getframe(1).f_locals['sitedir']"
def _gen_nspkg_line(self, pkg):
# ensure pkg is not a unicode string under Python 2.7
pkg = str(pkg)
pth = tuple(pkg.split('.'))
root = self._get_root()
tmpl_lines = self._nspkg_tmpl
parent, sep, child = pkg.rpartition('.')
if parent:
tmpl_lines += self._nspkg_tmpl_multi
return ';'.join(tmpl_lines) % locals() + '\n'
def _get_all_ns_packages(self):
"""Return sorted list of all package namespaces"""
pkgs = self.distribution.namespace_packages or []
return sorted(flatten(map(self._pkg_names, pkgs)))
@staticmethod
def _pkg_names(pkg):
"""
Given a namespace package, yield the components of that
package.
>>> names = Installer._pkg_names('a.b.c')
>>> set(names) == set(['a', 'a.b', 'a.b.c'])
True
"""
parts = pkg.split('.')
while parts:
yield '.'.join(parts)
parts.pop()
class DevelopInstaller(Installer):
def _get_root(self):
return repr(str(self.egg_path))
def _get_target(self):
return self.egg_link
| gpl-3.0 |
akaihola/django | django/contrib/gis/tests/geo3d/tests.py | 94 | 11317 | from __future__ import absolute_import
import os
import re
from django.utils.unittest import TestCase
from django.contrib.gis.db.models import Union, Extent3D
from django.contrib.gis.geos import GEOSGeometry, Point, Polygon
from django.contrib.gis.utils import LayerMapping, LayerMapError
from .models import (City3D, Interstate2D, Interstate3D, InterstateProj2D,
InterstateProj3D, Point2D, Point3D, MultiPoint3D, Polygon2D, Polygon3D)
data_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', 'data'))
city_file = os.path.join(data_path, 'cities', 'cities.shp')
vrt_file = os.path.join(data_path, 'test_vrt', 'test_vrt.vrt')
# The coordinates of each city, with Z values corresponding to their
# altitude in meters.
city_data = (
('Houston', (-95.363151, 29.763374, 18)),
('Dallas', (-96.801611, 32.782057, 147)),
('Oklahoma City', (-97.521157, 34.464642, 380)),
('Wellington', (174.783117, -41.315268, 14)),
('Pueblo', (-104.609252, 38.255001, 1433)),
('Lawrence', (-95.235060, 38.971823, 251)),
('Chicago', (-87.650175, 41.850385, 181)),
('Victoria', (-123.305196, 48.462611, 15)),
)
# Reference mapping of city name to its altitude (Z value).
city_dict = dict((name, coords) for name, coords in city_data)
# 3D freeway data derived from the National Elevation Dataset:
# http://seamless.usgs.gov/products/9arc.php
interstate_data = (
('I-45',
'LINESTRING(-95.3708481 29.7765870 11.339,-95.3694580 29.7787980 4.536,-95.3690305 29.7797359 9.762,-95.3691886 29.7812450 12.448,-95.3696447 29.7850144 10.457,-95.3702511 29.7868518 9.418,-95.3706724 29.7881286 14.858,-95.3711632 29.7896157 15.386,-95.3714525 29.7936267 13.168,-95.3717848 29.7955007 15.104,-95.3717719 29.7969804 16.516,-95.3717305 29.7982117 13.923,-95.3717254 29.8000778 14.385,-95.3719875 29.8013539 15.160,-95.3720575 29.8026785 15.544,-95.3721321 29.8040912 14.975,-95.3722074 29.8050998 15.688,-95.3722779 29.8060430 16.099,-95.3733818 29.8076750 15.197,-95.3741563 29.8103686 17.268,-95.3749458 29.8129927 19.857,-95.3763564 29.8144557 15.435)',
( 11.339, 4.536, 9.762, 12.448, 10.457, 9.418, 14.858,
15.386, 13.168, 15.104, 16.516, 13.923, 14.385, 15.16 ,
15.544, 14.975, 15.688, 16.099, 15.197, 17.268, 19.857,
15.435),
),
)
# Bounding box polygon for inner-loop of Houston (in projected coordinate
# system 32140), with elevation values from the National Elevation Dataset
# (see above).
bbox_wkt = 'POLYGON((941527.97 4225693.20,962596.48 4226349.75,963152.57 4209023.95,942051.75 4208366.38,941527.97 4225693.20))'
bbox_z = (21.71, 13.21, 9.12, 16.40, 21.71)
def gen_bbox():
bbox_2d = GEOSGeometry(bbox_wkt, srid=32140)
bbox_3d = Polygon(tuple((x, y, z) for (x, y), z in zip(bbox_2d[0].coords, bbox_z)), srid=32140)
return bbox_2d, bbox_3d
class Geo3DTest(TestCase):
"""
Only a subset of the PostGIS routines are 3D-enabled, and this TestCase
tries to test the features that can handle 3D and that are also
available within GeoDjango. For more information, see the PostGIS docs
on the routines that support 3D:
http://postgis.refractions.net/documentation/manual-1.4/ch08.html#PostGIS_3D_Functions
"""
def test01_3d(self):
"Test the creation of 3D models."
# 3D models for the rest of the tests will be populated in here.
# For each 3D data set create model (and 2D version if necessary),
# retrieve, and assert geometry is in 3D and contains the expected
# 3D values.
for name, pnt_data in city_data:
x, y, z = pnt_data
pnt = Point(x, y, z, srid=4326)
City3D.objects.create(name=name, point=pnt)
city = City3D.objects.get(name=name)
self.assertTrue(city.point.hasz)
self.assertEqual(z, city.point.z)
# Interstate (2D / 3D and Geographic/Projected variants)
for name, line, exp_z in interstate_data:
line_3d = GEOSGeometry(line, srid=4269)
# Using `hex` attribute because it omits 3D.
line_2d = GEOSGeometry(line_3d.hex, srid=4269)
# Creating a geographic and projected version of the
# interstate in both 2D and 3D.
Interstate3D.objects.create(name=name, line=line_3d)
InterstateProj3D.objects.create(name=name, line=line_3d)
Interstate2D.objects.create(name=name, line=line_2d)
InterstateProj2D.objects.create(name=name, line=line_2d)
# Retrieving and making sure it's 3D and has expected
# Z values -- shouldn't change because of coordinate system.
interstate = Interstate3D.objects.get(name=name)
interstate_proj = InterstateProj3D.objects.get(name=name)
for i in [interstate, interstate_proj]:
self.assertTrue(i.line.hasz)
self.assertEqual(exp_z, tuple(i.line.z))
# Creating 3D Polygon.
bbox2d, bbox3d = gen_bbox()
Polygon2D.objects.create(name='2D BBox', poly=bbox2d)
Polygon3D.objects.create(name='3D BBox', poly=bbox3d)
p3d = Polygon3D.objects.get(name='3D BBox')
self.assertTrue(p3d.poly.hasz)
self.assertEqual(bbox3d, p3d.poly)
def test01a_3d_layermapping(self):
"Testing LayerMapping on 3D models."
from .models import Point2D, Point3D
point_mapping = {'point' : 'POINT'}
mpoint_mapping = {'mpoint' : 'MULTIPOINT'}
# The VRT is 3D, but should still be able to map sans the Z.
lm = LayerMapping(Point2D, vrt_file, point_mapping, transform=False)
lm.save()
self.assertEqual(3, Point2D.objects.count())
# The city shapefile is 2D, and won't be able to fill the coordinates
# in the 3D model -- thus, a LayerMapError is raised.
self.assertRaises(LayerMapError, LayerMapping,
Point3D, city_file, point_mapping, transform=False)
# 3D model should take 3D data just fine.
lm = LayerMapping(Point3D, vrt_file, point_mapping, transform=False)
lm.save()
self.assertEqual(3, Point3D.objects.count())
# Making sure LayerMapping.make_multi works right, by converting
# a Point25D into a MultiPoint25D.
lm = LayerMapping(MultiPoint3D, vrt_file, mpoint_mapping, transform=False)
lm.save()
self.assertEqual(3, MultiPoint3D.objects.count())
def test02a_kml(self):
"Test GeoQuerySet.kml() with Z values."
h = City3D.objects.kml(precision=6).get(name='Houston')
# KML should be 3D.
# `SELECT ST_AsKML(point, 6) FROM geo3d_city3d WHERE name = 'Houston';`
ref_kml_regex = re.compile(r'^<Point><coordinates>-95.363\d+,29.763\d+,18</coordinates></Point>$')
self.assertTrue(ref_kml_regex.match(h.kml))
def test02b_geojson(self):
"Test GeoQuerySet.geojson() with Z values."
h = City3D.objects.geojson(precision=6).get(name='Houston')
# GeoJSON should be 3D
# `SELECT ST_AsGeoJSON(point, 6) FROM geo3d_city3d WHERE name='Houston';`
ref_json_regex = re.compile(r'^{"type":"Point","coordinates":\[-95.363151,29.763374,18(\.0+)?\]}$')
self.assertTrue(ref_json_regex.match(h.geojson))
def test03a_union(self):
"Testing the Union aggregate of 3D models."
# PostGIS query that returned the reference EWKT for this test:
# `SELECT ST_AsText(ST_Union(point)) FROM geo3d_city3d;`
ref_ewkt = 'SRID=4326;MULTIPOINT(-123.305196 48.462611 15,-104.609252 38.255001 1433,-97.521157 34.464642 380,-96.801611 32.782057 147,-95.363151 29.763374 18,-95.23506 38.971823 251,-87.650175 41.850385 181,174.783117 -41.315268 14)'
ref_union = GEOSGeometry(ref_ewkt)
union = City3D.objects.aggregate(Union('point'))['point__union']
self.assertTrue(union.hasz)
self.assertEqual(ref_union, union)
def test03b_extent(self):
"Testing the Extent3D aggregate for 3D models."
# `SELECT ST_Extent3D(point) FROM geo3d_city3d;`
ref_extent3d = (-123.305196, -41.315268, 14,174.783117, 48.462611, 1433)
extent1 = City3D.objects.aggregate(Extent3D('point'))['point__extent3d']
extent2 = City3D.objects.extent3d()
def check_extent3d(extent3d, tol=6):
for ref_val, ext_val in zip(ref_extent3d, extent3d):
self.assertAlmostEqual(ref_val, ext_val, tol)
for e3d in [extent1, extent2]:
check_extent3d(e3d)
def test04_perimeter(self):
"Testing GeoQuerySet.perimeter() on 3D fields."
# Reference query for values below:
# `SELECT ST_Perimeter3D(poly), ST_Perimeter2D(poly) FROM geo3d_polygon3d;`
ref_perim_3d = 76859.2620451
ref_perim_2d = 76859.2577803
tol = 6
self.assertAlmostEqual(ref_perim_2d,
Polygon2D.objects.perimeter().get(name='2D BBox').perimeter.m,
tol)
self.assertAlmostEqual(ref_perim_3d,
Polygon3D.objects.perimeter().get(name='3D BBox').perimeter.m,
tol)
def test05_length(self):
"Testing GeoQuerySet.length() on 3D fields."
# ST_Length_Spheroid Z-aware, and thus does not need to use
# a separate function internally.
# `SELECT ST_Length_Spheroid(line, 'SPHEROID["GRS 1980",6378137,298.257222101]')
# FROM geo3d_interstate[2d|3d];`
tol = 3
ref_length_2d = 4368.1721949481
ref_length_3d = 4368.62547052088
self.assertAlmostEqual(ref_length_2d,
Interstate2D.objects.length().get(name='I-45').length.m,
tol)
self.assertAlmostEqual(ref_length_3d,
Interstate3D.objects.length().get(name='I-45').length.m,
tol)
# Making sure `ST_Length3D` is used on for a projected
# and 3D model rather than `ST_Length`.
# `SELECT ST_Length(line) FROM geo3d_interstateproj2d;`
ref_length_2d = 4367.71564892392
# `SELECT ST_Length3D(line) FROM geo3d_interstateproj3d;`
ref_length_3d = 4368.16897234101
self.assertAlmostEqual(ref_length_2d,
InterstateProj2D.objects.length().get(name='I-45').length.m,
tol)
self.assertAlmostEqual(ref_length_3d,
InterstateProj3D.objects.length().get(name='I-45').length.m,
tol)
def test06_scale(self):
"Testing GeoQuerySet.scale() on Z values."
# Mapping of City name to reference Z values.
zscales = (-3, 4, 23)
for zscale in zscales:
for city in City3D.objects.scale(1.0, 1.0, zscale):
self.assertEqual(city_dict[city.name][2] * zscale, city.scale.z)
def test07_translate(self):
"Testing GeoQuerySet.translate() on Z values."
ztranslations = (5.23, 23, -17)
for ztrans in ztranslations:
for city in City3D.objects.translate(0, 0, ztrans):
self.assertEqual(city_dict[city.name][2] + ztrans, city.translate.z)
| bsd-3-clause |
sheepray/laikaboss | laikaboss/dispatch.py | 17 | 20562 | # Copyright 2015 Lockheed Martin Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import time
import logging
import Queue
from util import get_scanObjectUID, listToSSV, yara_on_demand, \
log_module, log_module_error, getObjectHash, \
uniqueList, get_module_arguments, getRootObject
from objectmodel import ScanObject, QuitScanException, GlobalScanTimeoutError
from laikaboss import modules
import sys
import traceback
from laikaboss import config
from contextlib import contextmanager
from interruptingcow import timeout
module_pointers = {}
def _run_module(sm, scanObject, result, depth, args, earlyQuitTime=0):
'''
Description: The Dispatch function uses this private method to run a specific module against
an object. This method recursively calls the Dispatch method for any items
returned by a module.
Arguments:
- sm: a string containing the name of a scan module
- scanObject: the current object being scanned
- result: collects scan results of all objects being scanned in a dictionary
- depth: every time dispatch is called, depth is increased by 1. may be used to limit recursion.
- args: a dictionary containing arguments for a module provided by the Dispatcher
'''
logging.debug("si_dispatch - Attempting to run module %s against - uid: %s, filename %s with args %s" %
(sm,get_scanObjectUID(scanObject),scanObject.filename,repr(args)))
# Attempt the acquire the module given the name provided by the Dispatcher
sm = sm.upper()
if sm not in module_pointers:
if hasattr(modules, sm):
try:
module_pointers[sm] = getattr(modules, sm)()
except QuitScanException:
raise
except:
error = traceback.format_exc().splitlines()[-1]
errorText = "Module could not be initialized: %s Error: %s" % (sm, error)
log_module_error("si_dispatch",
scanObject,
result,
errorText)
logging.debug(errorText)
return
else:
logging.debug("module doesn't exist: %s" % sm)
log_module_error("si_dispatch",
scanObject,
result,
"module not found: %s" % sm)
return
newscan = module_pointers[sm]
# Add the current scan module to the list of scan modules run against this object
scanObject.scanModules.append(sm)
# Run the module
moduleResult = newscan.run(scanObject, result, depth, args)
if earlyQuitTime and earlyQuitTime < time.time():
raise GlobalScanTimeoutError()
# Perform a recursive scan for each item returned by the module (may be none)
for moduleObject in moduleResult:
moduleObject.externalVars.source = result.source
moduleObject.externalVars.parent = get_scanObjectUID(scanObject)
moduleObject.externalVars.parentModules = scanObject.scanModules
moduleObject.externalVars.sourceModule = sm
moduleObject.externalVars.flags = scanObject.flags
Dispatch(moduleObject.buffer, result, depth, externalVars=moduleObject.externalVars)
def _conditional_scan(scanObject, externalVars, result, depth):
'''
Description: This function performs a second pass scan of an object based on the results of a
previous scan only. The yara rules look at the flags applied to this object and
determine any additional scanning that may need to be performed based on these
flags.
Arguments:
- scanObject: the current object being scanned
- result: collects scan results of all objects being scanned in a dictionary
- depth: every time dispatch is called, depth is increased by 1. may be used to limit recursion.
'''
# Attempt to disposition based on flags from the first scan
try:
logging.debug("attempting conditional disposition on %s with %s uID: %s parent: %s" % (scanObject.filename, listToSSV(scanObject.flags), get_scanObjectUID(scanObject), scanObject.parent))
externals = {
'ext_parentModules': listToSSV(externalVars.parentModules) or 'NONE',
'ext_sourceModule': externalVars.sourceModule or 'NONE',
'ext_contentType': listToSSV(scanObject.contentType) or 'NONE',
'ext_fileType': listToSSV(scanObject.fileType) or 'NONE',
'ext_filename': externalVars.filename or 'NONE',
'ext_timestamp': externalVars.timestamp or 'NONE',
'ext_source': externalVars.source or 'NONE',
'ext_size': scanObject.objectSize,
'ext_depth': depth or 0
}
yresults = yara_on_demand(config.yaraconditionalrules, listToSSV(scanObject.flags), externals)
moduleQueue = _get_module_queue(yresults, result, scanObject, "Conditional Rules")
except (QuitScanException, GlobalScanTimeoutError):
raise
except Exception:
logging.exception("si_dispatch: ERROR occured on conditional_scan on UID:%s Check your configuration!", get_scanObjectUID(scanObject))
log_module_error("si_dispatch", scanObject, result, "error during conditional_scan: %s" % traceback.format_exc())
return
# Recusively call the Dispatcher if any conditional scans need to be performed.
if not moduleQueue.empty():
Dispatch(scanObject.buffer, result, depth,
scanObject=scanObject,
extScanModules=moduleQueue,
conditional=True)
def _addExtMetadata(scanObject, data):
'''
Description: Wrapper function around util function to facilitate adding external metadata.
Arguments:
- scanObject: the current object being scanned
- data: Data to be appended to the scanObject
'''
# If the data is a string or list, add it as a single value to the 'data' key
if isinstance(data, str) or isinstance(data, list):
scanObject.addMetadata("EXTERNAL", "data", data)
# If the data is a dict, loop through the dictionary and add each key, value
elif isinstance(data, dict):
for key, value in data.iteritems():
scanObject.addMetadata("EXTERNAL", key, value)
# If it is none of these, then add the repr() string to the 'object' key
else:
scanObject.addMetadata("EXTERNAL", "object", repr(data))
def _gather_metadata(buffer, externalVars, result, depth, maxBytes):
'''
Description: Helper function to set up a scanObject from various metadata sources.
Arguments:
- buffer: the binary contents of the current object
- externalVars: variables passed in from the caller or other modules
- result: collects scan results of all objects being scanned in a dictionary
- depth: every time dispatch is called, depth is increased by 1. may be used to limit recursion.
- extMetaData: information provided externally that will be attached as metadata to the scanObject
'''
# Set up the object.
contentType = externalVars.contentType if externalVars.contentType else []
scanObject = ScanObject(parent=externalVars.parent,
buffer=buffer,
objectHash=getObjectHash(buffer),
objectSize=len(buffer),
filename=externalVars.filename,
contentType=contentType,
fileType=[],
uniqID=externalVars.uniqID,
ephID=externalVars.ephID,
origRootUID=externalVars.origRootUID,
sourceModule=externalVars.sourceModule,
source=result.source,
level=result.level,
depth=depth,
order=len(result.files))
# Add the object to the scan result
uid = get_scanObjectUID(scanObject)
# Check to see if rootUID has been set, if it hasn't, then assume this is the root
if not result.rootUID:
result.rootUID = get_scanObjectUID(scanObject)
# In order to ensure that all objects have a rootUID, set this after we have set the result.rootUID
scanObject.rootUID = result.rootUID
result.files[uid] = scanObject
# If client provided metadata, append it to the scanObject
if externalVars.extMetaData is not None and externalVars.extMetaData:
_addExtMetadata(scanObject, externalVars.extMetaData)
return scanObject
def _get_module_queue(yresults, result, scanObject, metaLabel):
'''
Description: Takes the results from a dispatch yara scan and creates a priority queue from them.
The function also adds dispatch flags if they exist in the rule.
'''
moduleQueue = Queue.PriorityQueue()
dispatchFlags = []
parentDispatchFlags = []
for yr in yresults:
if 'scan_modules' in yr.meta:
# Check to see if the rule has a priority, if not use the default
if 'priority' in yr.meta:
priority = int(yr.meta['priority'])
logging.debug("Rule %s set priority %i" % (yr, priority))
else:
priority = int(config.defaultmodulepriority)
scanObject.addMetadata("DISPATCH", metaLabel, "%s (%i)" % (str(yr), priority))
moduleQueue.put((priority, uniqueList(yr.meta['scan_modules'].split())))
if 'flags' in yr.meta:
dispatchFlags.extend(yr.meta['flags'].split())
if 'parent_flags' in yr.meta:
parentDispatchFlags.extend(yr.meta['parent_flags'].split())
if 'file_type' in yr.meta:
scanObject.fileType.append(yr.meta['file_type'])
dispatchFlags = set(dispatchFlags)
for df in dispatchFlags:
scanObject.addFlag("dispatch::%s" % (df))
if scanObject.parent in result.files:
for pdf in parentDispatchFlags:
result.files[scanObject.parent].addFlag("dispatch::%s" % (pdf))
return moduleQueue
def _process_module_queue(moduleQueue, result, depth, scanObject, earlyQuitTime=0):
'''
Description: Takes a priority module queue and runs each module in the appropriate order.
Each module is tracked for uniqueness to prevent redundancy.
'''
MAXDEPTH = 0
if hasattr(config, 'maxdepth'):
# If the depth limit has been exceeded, then don't run any modules
MAXDEPTH = int(config.maxdepth)
if MAXDEPTH < 0:
MAXDEPTH = 0
moduleSeen = []
while True:
if MAXDEPTH and depth > MAXDEPTH:
errorText = "Depth has been exceeded. Only the dispatcher will be run on this object."
logging.debug(errorText)
log_module_error("si_dispatch",
scanObject,
result,
errorText)
scanObject.addFlag("dispatch:nfo:max_depth_exceeded")
break
# Read until the queue is empty
if moduleQueue.empty():
logging.debug("Module run queue is empty")
break
scanModules = moduleQueue.get()[1]
for sm in scanModules:
if sm in moduleSeen:
logging.debug("Already ran %s, continuing to the next module" % (sm))
continue
module, args = get_module_arguments(sm)
_run_module(module, scanObject, result, depth, args, earlyQuitTime)
moduleSeen.append(sm)
def close_modules():
"""
Description: Module callback API caller to close down (destruct) each module safely.
"""
for module_name, module_pointer in module_pointers.items():
module_pointer.close()
@contextmanager
def _with_true():
yield True
def _with_conditional(condition):
if condition:
return _with_true()
return False
def Dispatch(buffer, result, depth, externalVars=None,
scanObject=None,
extScanModules=None,
conditional=False ):
"""
Description: By default, this function uses yara to disposition a buffer and determine what scan modules
should be run against it. The function may be called recursively if a scan module returns
additional buffers to scan. The function collects all results into the original result object
passed in by the caller for easy retrieval.
Arguments: (* denotes OPTIONAL parameters):
- buffer: the binary contents of the current object
- result: collects scan results of all objects being scanned in a dictionary
- depth: every time dispatch is called, depth is increased by 1. may be used to limit recursion.
- *externalVars: variables passed in from the caller or other modules
- *scanModules: this function may be called with predefined scan modules set (string, space delimited)
- *conditional: determines whether this function has been called as the result of a conditional scan
- *externalVars: these variables are passed to yara along with the current object to aid in disposition
"""
skip_timeout = True
if depth == 0 or (externalVars is not None and int(externalVars.depth) > 0):
skip_timeout = False
global_scan_timeout = 3600
if hasattr(config, 'global_scan_timeout'):
global_scan_timeout = int(config.global_scan_timeout)
global_scan_timeout_endtime = result.startTime + global_scan_timeout
if externalVars is not None and externalVars.depth:
depth = externalVars.depth
starttime = time.time()
MAXBYTES = 0
if hasattr(config, 'dispatchmaxbytes'):
# If the depth limit has been exceeded, then don't run any modules
MAXBYTES= int(config.dispatchmaxbytes)
if MAXBYTES < 0:
MAXBYTES = 0
logging.debug('setting dispatch byte limit to %i' % (MAXBYTES))
#
# This branch is designed for first-pass scanning where file type and scan modules are unknown
# Yara is used to disposition the file and determine which modules should be run against it
# Using the result of each module, it is determined (using a separate yara scan on the flags)
# whether or not a conditional scan needs to be run.
if extScanModules is None:
# Generate the scan object from the parameters
scanObject = _gather_metadata(buffer, externalVars, result, depth, MAXBYTES)
# Increase the depth only if it is the first time scanning an object
depth += 1
logging.debug("si_dispatch - Attempting to dispatch - uid: %s, filename: %s, \
source module: %s" % (get_scanObjectUID(scanObject),
externalVars.filename,
externalVars.sourceModule))
# check to see if this object has a parent, get the modules run against the parent if it exists
#
externals = {
'ext_parentModules': listToSSV(externalVars.parentModules) or 'NONE',
'ext_sourceModule': externalVars.sourceModule or 'NONE',
'ext_contentType': listToSSV(scanObject.contentType) or 'NONE',
'ext_filename': externalVars.filename or 'NONE',
'ext_timestamp': externalVars.timestamp or 'NONE',
'ext_source': externalVars.source or 'NONE',
'ext_flags': listToSSV(externalVars.flags) or 'NONE',
'ext_size': scanObject.objectSize,
'ext_depth': int(depth) or 0
}
dispatch_rule_start = time.time()
yresults = yara_on_demand(config.yaradispatchrules, buffer, externals, MAXBYTES)
if config.modulelogging:
log_module("MSG", 'si_dispatch', time.time() - dispatch_rule_start, scanObject, result, "")
moduleQueue = _get_module_queue(yresults, result, scanObject, "Rules")
with _with_conditional(skip_timeout) or timeout(global_scan_timeout, exception=GlobalScanTimeoutError):
try:
_process_module_queue(moduleQueue, result, depth, scanObject, global_scan_timeout_endtime)
_conditional_scan(scanObject, externalVars, result, depth)
except GlobalScanTimeoutError:
# If the scan times out, add a flag and continue as a normal error
scanObject.addFlag("dispatch:err:scan_timeout")
# If not the root object, raise the exception to halt the parent scan
if depth > 0 and (externalVars is None or depth > int(externalVars.depth)):
raise
exc_type, exc_value, exc_traceback = sys.exc_info()
logging.exception("error on %s. exception details below: " % \
(get_scanObjectUID(getRootObject(result))))
log_module_error("dispatch:",
scanObject,
result,
repr(traceback.format_exception(exc_type,
exc_value,
exc_traceback)))
#
# This branch is designed for externally specified scan modules to be run against a buffer.
# It is not necessary to disposition the file type with yara because we are trusting the caller.
# This branch differs from a conditional scan in that there is no metadata about the buffer already,
# so it must be gathered before beginning the scan. It is also subject to conditional scanning.
elif extScanModules is not None and not conditional:
scanObject = _gather_metadata(buffer, externalVars, result, depth, MAXBYTES)
with _with_conditional(skip_timeout) or timeout(global_scan_timeout, exception=GlobalScanTimeoutError):
try:
for sm in extScanModules:
module, args = get_module_arguments(sm)
_run_module(module, scanObject, result, depth, args)
# Disable conditional scan
#_conditional_scan(scanObject, externalVars, result, depth)
except GlobalScanTimeoutError:
# If the scan times out, add a flag and continue as a normal error
scanObject.addFlag("dispatch:err:scan_timeout")
# If not the root object, raise the exception to halt the parent scan
if depth > 0 and (externalVars is None or depth > int(externalVars.depth)):
raise
exc_type, exc_value, exc_traceback = sys.exc_info()
logging.exception("error on %s. exception details below: " % \
(get_scanObjectUID(getRootObject(result))))
log_module_error("dispatch",
scanObject,
result,
repr(traceback.format_exception(exc_type,
exc_value,
exc_traceback)))
#
# This branch is specifically for conditional scans kicked off by this function. Metadata about
# the object has already been collected and all that needs to occur is scans by the specified modules.
else:
_process_module_queue(extScanModules, result, depth, scanObject)
logging.debug("si_dispatch - depth: %s, time: %s" % (depth, time.time() - starttime))
| apache-2.0 |
lmazuel/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/v2018_01_01/models/security_rule.py | 1 | 7517 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .sub_resource import SubResource
class SecurityRule(SubResource):
"""Network security rule.
All required parameters must be populated in order to send to Azure.
:param id: Resource ID.
:type id: str
:param description: A description for this rule. Restricted to 140 chars.
:type description: str
:param protocol: Required. Network protocol this rule applies to. Possible
values are 'Tcp', 'Udp', and '*'. Possible values include: 'Tcp', 'Udp',
'*'
:type protocol: str or
~azure.mgmt.network.v2018_01_01.models.SecurityRuleProtocol
:param source_port_range: The source port or range. Integer or range
between 0 and 65535. Asterix '*' can also be used to match all ports.
:type source_port_range: str
:param destination_port_range: The destination port or range. Integer or
range between 0 and 65535. Asterix '*' can also be used to match all
ports.
:type destination_port_range: str
:param source_address_prefix: The CIDR or source IP range. Asterix '*' can
also be used to match all source IPs. Default tags such as
'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used. If
this is an ingress rule, specifies where network traffic originates from.
:type source_address_prefix: str
:param source_address_prefixes: The CIDR or source IP ranges.
:type source_address_prefixes: list[str]
:param source_application_security_groups: The application security group
specified as source.
:type source_application_security_groups:
list[~azure.mgmt.network.v2018_01_01.models.ApplicationSecurityGroup]
:param destination_address_prefix: The destination address prefix. CIDR or
destination IP range. Asterix '*' can also be used to match all source
IPs. Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and
'Internet' can also be used.
:type destination_address_prefix: str
:param destination_address_prefixes: The destination address prefixes.
CIDR or destination IP ranges.
:type destination_address_prefixes: list[str]
:param destination_application_security_groups: The application security
group specified as destination.
:type destination_application_security_groups:
list[~azure.mgmt.network.v2018_01_01.models.ApplicationSecurityGroup]
:param source_port_ranges: The source port ranges.
:type source_port_ranges: list[str]
:param destination_port_ranges: The destination port ranges.
:type destination_port_ranges: list[str]
:param access: Required. The network traffic is allowed or denied.
Possible values are: 'Allow' and 'Deny'. Possible values include: 'Allow',
'Deny'
:type access: str or
~azure.mgmt.network.v2018_01_01.models.SecurityRuleAccess
:param priority: The priority of the rule. The value can be between 100
and 4096. The priority number must be unique for each rule in the
collection. The lower the priority number, the higher the priority of the
rule.
:type priority: int
:param direction: Required. The direction of the rule. The direction
specifies if rule will be evaluated on incoming or outcoming traffic.
Possible values are: 'Inbound' and 'Outbound'. Possible values include:
'Inbound', 'Outbound'
:type direction: str or
~azure.mgmt.network.v2018_01_01.models.SecurityRuleDirection
:param provisioning_state: The provisioning state of the public IP
resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:type provisioning_state: str
:param name: The name of the resource that is unique within a resource
group. This name can be used to access the resource.
:type name: str
:param etag: A unique read-only string that changes whenever the resource
is updated.
:type etag: str
"""
_validation = {
'protocol': {'required': True},
'access': {'required': True},
'direction': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'description': {'key': 'properties.description', 'type': 'str'},
'protocol': {'key': 'properties.protocol', 'type': 'str'},
'source_port_range': {'key': 'properties.sourcePortRange', 'type': 'str'},
'destination_port_range': {'key': 'properties.destinationPortRange', 'type': 'str'},
'source_address_prefix': {'key': 'properties.sourceAddressPrefix', 'type': 'str'},
'source_address_prefixes': {'key': 'properties.sourceAddressPrefixes', 'type': '[str]'},
'source_application_security_groups': {'key': 'properties.sourceApplicationSecurityGroups', 'type': '[ApplicationSecurityGroup]'},
'destination_address_prefix': {'key': 'properties.destinationAddressPrefix', 'type': 'str'},
'destination_address_prefixes': {'key': 'properties.destinationAddressPrefixes', 'type': '[str]'},
'destination_application_security_groups': {'key': 'properties.destinationApplicationSecurityGroups', 'type': '[ApplicationSecurityGroup]'},
'source_port_ranges': {'key': 'properties.sourcePortRanges', 'type': '[str]'},
'destination_port_ranges': {'key': 'properties.destinationPortRanges', 'type': '[str]'},
'access': {'key': 'properties.access', 'type': 'str'},
'priority': {'key': 'properties.priority', 'type': 'int'},
'direction': {'key': 'properties.direction', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, **kwargs):
super(SecurityRule, self).__init__(**kwargs)
self.description = kwargs.get('description', None)
self.protocol = kwargs.get('protocol', None)
self.source_port_range = kwargs.get('source_port_range', None)
self.destination_port_range = kwargs.get('destination_port_range', None)
self.source_address_prefix = kwargs.get('source_address_prefix', None)
self.source_address_prefixes = kwargs.get('source_address_prefixes', None)
self.source_application_security_groups = kwargs.get('source_application_security_groups', None)
self.destination_address_prefix = kwargs.get('destination_address_prefix', None)
self.destination_address_prefixes = kwargs.get('destination_address_prefixes', None)
self.destination_application_security_groups = kwargs.get('destination_application_security_groups', None)
self.source_port_ranges = kwargs.get('source_port_ranges', None)
self.destination_port_ranges = kwargs.get('destination_port_ranges', None)
self.access = kwargs.get('access', None)
self.priority = kwargs.get('priority', None)
self.direction = kwargs.get('direction', None)
self.provisioning_state = kwargs.get('provisioning_state', None)
self.name = kwargs.get('name', None)
self.etag = kwargs.get('etag', None)
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.