repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
jaytlennon/StarvationTraits | bin/TrimMovingAverage.py | 3 | 1396 | ###############################################################################
# Python Script to Trim Based on Moving Average
###############################################################################
# Written by Mario Muscarella
# Last Update 2 May 2013
# Directions:
# use the following command: > python TrimMovingAverage.py
from Bio import SeqIO
import sys
import glob
import numpy as numpy
#import MovingAverage as MovingAverage
window_size = 10
qual_cutoff = 35
def movingaverage(data_in, window_size):
window = numpy.ones(int(window_size))/float(window_size)
return numpy.convolve(data_in, window, 'same')
files = glob.glob("*.qual")
for x in files:
sample_qual = SeqIO.read(x, "qual")
sample_seq = SeqIO.read(sample_qual.id+".fasta", "fasta")
sample_qual_score = sample_qual.letter_annotations["phred_quality"]
sample_qual_MA = numpy.array(movingaverage(sample_qual_score, window_size))
qual_above = list(numpy.where(sample_qual_MA > qual_cutoff))[0]
sample_qual_min = numpy.min(qual_above)
sample_qual_max = numpy.max(qual_above)
sample_qual_trim = sample_qual[sample_qual_min:sample_qual_max]
sample_seq_trim = sample_seq[sample_qual_min:sample_qual_max]
SeqIO.write(sample_qual_trim, sample_qual.id+".trim.qual", "qual")
SeqIO.write(sample_seq_trim, sample_seq.id+".trim.fasta", "fasta")
print "trimmed fasta and qual file created for %s" % sample_qual.id
| gpl-3.0 |
mitocw/edx-platform | lms/djangoapps/verify_student/management/commands/tests/test_backfill_sso_verifications_for_old_account_links.py | 3 | 3101 | """
Tests for management command backfill_sso_verifications_for_old_account_links
"""
from mock import patch
from django.core.management import call_command
from django.core.management.base import CommandError
from lms.djangoapps.program_enrollments.management.commands.tests.utils import UserSocialAuthFactory
from lms.djangoapps.verify_student.models import SSOVerification
from lms.djangoapps.verify_student.tests.factories import SSOVerificationFactory
from third_party_auth.tests.testutil import TestCase
class TestBackfillSSOVerificationsCommand(TestCase):
"""
Tests for management command for backfilling SSO verification records
"""
slug = 'test'
def setUp(self):
super(TestBackfillSSOVerificationsCommand, self).setUp()
self.enable_saml()
self.provider = self.configure_saml_provider(
name="Test",
slug=self.slug,
enabled=True,
enable_sso_id_verification=True,
)
self.user_social_auth1 = UserSocialAuthFactory(slug=self.slug, provider=self.provider.backend_name)
self.user_social_auth1.save()
self.user1 = self.user_social_auth1.user
def test_fails_without_required_param(self):
with self.assertRaises(CommandError):
call_command('backfill_sso_verifications_for_old_account_links')
def test_fails_without_named_provider_config(self):
with self.assertRaises(CommandError):
call_command('backfill_sso_verifications_for_old_account_links', '--provider-slug', 'gatech')
def test_sso_updated_single_user(self):
self.assertTrue(SSOVerification.objects.count() == 0)
call_command('backfill_sso_verifications_for_old_account_links', '--provider-slug', self.provider.provider_id)
self.assertTrue(SSOVerification.objects.count() > 0)
self.assertEqual(SSOVerification.objects.get().user.id, self.user1.id)
def test_performance(self):
# TODO
#self.assertNumQueries(1)
call_command('backfill_sso_verifications_for_old_account_links', '--provider-slug', self.provider.provider_id)
#self.assertNumQueries(100)
def test_signal_called(self):
with patch('openedx.core.djangoapps.signals.signals.LEARNER_NOW_VERIFIED.send_robust') as mock_signal:
call_command('backfill_sso_verifications_for_old_account_links', '--provider-slug', self.provider.provider_id)
self.assertEqual(mock_signal.call_count, 1)
def test_fine_with_multiple_verification_records(self):
"""
Testing there are no issues with excluding learners with multiple sso verifications
"""
SSOVerificationFactory(
status='approved',
user=self.user1,
)
SSOVerificationFactory(
status='approved',
user=self.user1,
)
self.assertEqual(SSOVerification.objects.count(), 2)
call_command('backfill_sso_verifications_for_old_account_links', '--provider-slug', self.provider.provider_id)
self.assertEqual(SSOVerification.objects.count(), 2)
| agpl-3.0 |
emilroz/openmicroscopy | examples/ScriptingService/adminWorkflow.py | 3 | 15532 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
examples/ScriptingService/adminWorkflow.py
-----------------------------------------------------------------------------
Copyright (C) 2006-2014 University of Dundee. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
------------------------------------------------------------------------------
This script demonstrates how a server Admin might upload an "Offical" trusted
script and run it.
@author Donald MacDonald
<a href="mailto:donald@lifesci.dundee.ac.uk">donald@lifesci.dundee.ac.uk</a>
@author Will Moore
<a href="mailto:will@lifesci.dundee.ac.uk">will@lifesci.dundee.ac.uk</a>
@version 3.0
<small>
(<b>Internal version:</b> $Revision: $Date: $)
</small>
@since 3.0-Beta4.2
"""
import omero
import omero.scripts
import omero.util.script_utils as scriptUtil
import getopt, sys, os, subprocess
import omero_api_IScript_ice
import omero_SharedResources_ice
from omero.rtypes import rstring, RListI, robject, rint, rlong, rlist, unwrap
import getpass
import time
startTime = 0
def printDuration(reset=False):
""" Method for timing the running of scripts. For performance testing only """
global startTime
if startTime == 0 or reset:
startTime = time.time()
print "timer = %s secs" % (time.time() - startTime)
def uploadScript(session, scriptService, scriptPath):
"""
Tries to upload the specified script as an official script.
*WARNING*
If the script has already been uploaded, then it will be replaced with
the new script.
If uploaded scripts are not valid, they will not be returned by getScripts().
"""
file = open(scriptPath)
scriptText = file.read()
file.close()
#print scriptText
# first check if the script has already been uploaded
scriptId = scriptService.getScriptID(scriptPath)
if scriptId == None or scriptId < 0:
print "Uploading script:", scriptPath
# try upload new script
scriptId = scriptService.uploadOfficialScript(scriptPath, scriptText)
print "Script uploaded with ID:", scriptId
else:
print "Editing script:", scriptPath
# if it has, edit the existing script
scriptFile = session.getQueryService().get("OriginalFile", scriptId)
scriptService.editScript(scriptFile, scriptText)
print "Script ID: %s was edited" % scriptFile.id.val
return scriptId
def listScripts(scriptService):
"""
Prints out the available Official Scripts returned by getScripts()
and User Scripts returned by getUserScripts()
"""
print "--OFFICIAL SCRIPTS--"
scripts = scriptService.getScripts()
for s in scripts:
print s.id.val, s.path.val + s.name.val
print "--USER SCRIPTS--"
userGroups = [] # gives me available scripts for default group
scripts = scriptService.getUserScripts(userGroups)
for s in scripts:
print s.id.val, s.path.val + s.name.val
def getParams(scriptService, scriptPath):
"""
This simply queries the parameters of the script and prints them out.
Script can be specified by it's path or ID.
Prints various parameter attributes such as descriptions, default values etc.
Useful for checking that parameters are being defined correctly.
"""
try:
scriptId = long(scriptPath)
except:
scriptId = scriptService.getScriptID(scriptPath)
params = scriptService.getParams(scriptId)
print "\nScript Name:", params.name
print "Authors:", ", ".join([a for a in params.authors])
print "Contact:", params.contact
print "Version:", params.version
print "Institutions:", ", ".join([i for i in params.institutions])
print "Inputs:"
for key, param in params.inputs.items():
print " ", key
if not param.optional: print " * Required"
if param.description: print " ", param.description
if param.min: print " min:", param.min.getValue()
if param.max: print " max:", param.max.getValue()
if param.values: print " ", ", ".join([v.getValue() for v in param.values.getValue()])
if param.useDefault: print " default:", param.prototype.val
def runScript(session, scriptService, scriptPath):
"""
This will attempt to run the script (scriptPath can be path or ID), asking the user
for inputs for all the script parameters, and printing out the
results when the script completes.
"""
try:
scriptId = long(scriptPath)
except:
scriptId = scriptService.getScriptID(scriptPath)
# Identify the script we want to run: Get all 'official' scripts and filter by path.
print "Running script: %s with ID: %s" % (scriptPath, scriptId)
# make a map of all the parameters we want to pass to the script
# keys are strings. Values must be omero.rtypes such as rlong, rbool, rlist.
map = {}
params = scriptService.getParams(scriptId)
for key, param in params.inputs.items():
print ""
print key
if param.description: print param.description
if not param.optional: print " * Required"
if param.values: print "Options:", ", ".join(unwrap(param.values))
if param.min: print "Min:", param.min.getValue()
if param.max: print "Max:", param.max.getValue()
prototype = param.prototype
prompt = ": "
default = None
if param.useDefault:
default = param.prototype.val
prompt = "[%s]: " % default
pclass = prototype.__class__
if pclass == omero.rtypes.RListI:
valueList = []
listClass = omero.rtypes.rstring
l = prototype.val # list
if len(l) > 0: # check if a value type has been set (first item of prototype list)
listClass = l[0].getValue().__class__
if listClass == int(1).__class__:
listClass = omero.rtypes.rint
if listClass == long(1).__class__:
listClass = omero.rtypes.rlong
print "List:"
while(True):
value = raw_input(prompt)
if value == "": break
try:
obj = listClass(value)
except:
print "Invalid entry"
continue
if isinstance(obj, omero.model.IObject):
valueList.append(omero.rtypes.robject(obj))
else:
valueList.append(obj)
if len(valueList) > 0:
map[key] = omero.rtypes.rlist(valueList)
elif pclass == omero.rtypes.RMapI:
print "MAP!"
valueMap = {}
m = prototype.val # check if a value type has been set for the map
print m
else:
value = raw_input(prompt)
while(True):
if value == "":
if default: map[key] = param.prototype
break
try:
map[key] = pclass(value)
break
except:
print "Invalid entry"
print map
# The last parameter is how long to wait as an RInt
proc = scriptService.runScript(scriptId, map, None)
try:
cb = omero.scripts.ProcessCallbackI(client, proc)
while not cb.block(1000): # ms.
pass
cb.close()
results = proc.getResults(0) # ms
finally:
proc.close(False)
# handle any results from the script
#print results.keys()
if 'Message' in results:
print "\nRESULTS:", results['Message'].getValue()
for result in results.keys():
if result not in ["Message", "stdout", "stderr"]:
print "\n", result, results[result].getValue().__class__
printOutErr = True
if printOutErr:
rawFileService = session.createRawFileStore()
queryService = session.getQueryService()
if 'stdout' in results:
origFile = results['stdout'].getValue()
fileId = origFile.getId().getValue()
print "\n******** Script generated StdOut in file:%s *******" % fileId
print scriptUtil.readFromOriginalFile(rawFileService, queryService, fileId)
if 'stderr' in results:
origFile = results['stderr'].getValue()
fileId = origFile.getId().getValue()
print "\n******** Script generated StdErr in file:%s *******" % fileId
print scriptUtil.readFromOriginalFile(rawFileService, queryService, fileId)
rawFileService.close()
def disableScript(session, scriptId):
"""
This will simply stop a script, defined by ID, from being returned by getScripts()
by editing it's mime-type to 'text/plain'
"""
updateService = session.getUpdateService()
scriptFile = session.getQueryService().get("OriginalFile", long(scriptId))
print "Disabling script:", scriptFile.id.val, scriptFile.path.val + scriptFile.name.val
scriptFile.setMimetype(rstring("text/plain"))
updateService.saveObject(scriptFile)
def cleanUpScriptFiles(session, scriptService):
"""
In the case where official script files have been manually deleted (from /lib/scripts/ )
they will not be returned by getScripts(), but they are still in the OriginalFiles table in DB
which means that uploadScript(path, text) will fail.
This can be fixed by setting the mimetype to 'text/x-python' for all scripts that are still in the
OriginalFiles table, but not returned by getScripts() or getUserScripts() so that they become disabled,
allowing uploadScript(path, text) to work again.
"""
queryService = session.getQueryService()
updateService = session.getUpdateService()
scriptIds = []
scripts = scriptService.getScripts()
print "\n Scripts: "
for s in scripts:
scriptIds.append(s.id.val)
print s.id.val, s.path.val + s.name.val
userScripts = scriptService.getScripts()
print "\n User Scripts: "
for s in userScripts:
scriptIds.append(s.id.val)
print s.id.val, s.path.val + s.name.val
# get all script files in the DB
query_string = "select o from OriginalFile o where o.mimetype='text/x-python'"
scriptFiles = queryService.findAllByQuery(query_string, None)
print "\n DISABLING invalid scripts.... "
for s in scriptFiles:
#print s.id.val, s.path.val + s.name.val
if s.id.val not in scriptIds:
print s.id.val, s.path.val + s.name.val
s.setMimetype(rstring("text/plain"))
updateService.saveObject(s)
def usage():
print "USAGE: python adminWorkflow.py -s server -u username -f file [options]"
def printHelp(args):
print ""
usage()
print "\nThe -f argument to specifiy a script file (by path or ID) is only required for some options below"
print "Admin permissions are required for upload, disable and clean options"
print "\nOPTIONS:"
print "\n list"
print listScripts.__doc__
print "\n upload"
print uploadScript.__doc__
print "\n params"
print getParams.__doc__
print "\n run"
print runScript.__doc__
print "\n clean"
print cleanUpScriptFiles.__doc__
print "\n disable"
print disableScript.__doc__
def readCommandArgs():
"""
Read the arguments from the command line and put them in a map
@return A map of the command args, with keys: "host", "username", "password", "scriptId"
"""
host = ""
username = ""
password = ""
script = ""
try:
opts, args = getopt.getopt(sys.argv[1:] ,"s:u:p:f:", ["server=", "username=", "password=", "file="])
except getopt.GetoptError, err:
usage()
sys.exit(2)
returnMap = {}
for opt, arg in opts:
if opt in ("-s","--server"):
returnMap["host"] = arg
elif opt in ("-u","--username"):
returnMap["username"] = arg
elif opt in ("-p","--password"):
returnMap["password"] = arg
elif opt in ("-f","--file"):
returnMap["script"] = arg
return returnMap, args
def doWorkflow(client, commandArgs):
"""
The main workflow is performed here, creating a connection to the server,
processing the user commands and calling the appropriate methods.
"""
session = client.createSession(commandArgs["username"], commandArgs["password"])
scriptService = session.getScriptService()
print "got session..."
if len(args) == 0: print "Choose from these options by adding argument: help, list, upload, params, run, disable, clean"
# list scripts
if "list" in args:
listScripts(scriptService)
# upload script, assigning the script ID to commandArgs(for running etc without looking up ID)
if "upload" in args:
commandArgs["script"] = uploadScript(session, scriptService, commandArgs["script"])
# get params of script
if "params" in args:
getParams(scriptService, commandArgs["script"])
# run script
if "run" in args:
runScript(session, scriptService, commandArgs["script"])
# disables script by changing the OriginalFile mimetype, from 'text/x-python' to 'text/plain'
if "disable" in args:
disableScript(session, commandArgs["script"])
if "clean" in args:
cleanUpScriptFiles(session, scriptService)
if __name__ == "__main__":
commandArgs, args = readCommandArgs()
if "help" in args:
printHelp(args)
elif "host" not in commandArgs:
print "No server specified. Use -s serverName"
print "For more info, use: python adminWorkflow help"
elif "username" not in commandArgs:
print "No user specified. Use -u userName"
print "For more info, use: python adminWorkflow help"
else:
client = omero.client(commandArgs["host"])
try:
# log on to the server, create client and session and scripting service
if "password" not in commandArgs:
print "NB: you can also run script with -p yourPassword"
commandArgs["password"] = getpass.getpass()
doWorkflow(client, commandArgs)
except:
raise
finally:
client.closeSession()
| gpl-2.0 |
natanlailari/PennApps2015-Heartmates | venv/lib/python2.7/site-packages/werkzeug/wsgi.py | 146 | 37745 | # -*- coding: utf-8 -*-
"""
werkzeug.wsgi
~~~~~~~~~~~~~
This module implements WSGI related helpers.
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import re
import os
import sys
import posixpath
import mimetypes
from itertools import chain
from zlib import adler32
from time import time, mktime
from datetime import datetime
from functools import partial, update_wrapper
from werkzeug._compat import iteritems, text_type, string_types, \
implements_iterator, make_literal_wrapper, to_unicode, to_bytes, \
wsgi_get_bytes, try_coerce_native, PY2
from werkzeug._internal import _empty_stream, _encode_idna
from werkzeug.http import is_resource_modified, http_date
from werkzeug.urls import uri_to_iri, url_quote, url_parse, url_join
def responder(f):
"""Marks a function as responder. Decorate a function with it and it
will automatically call the return value as WSGI application.
Example::
@responder
def application(environ, start_response):
return Response('Hello World!')
"""
return update_wrapper(lambda *a: f(*a)(*a[-2:]), f)
def get_current_url(environ, root_only=False, strip_querystring=False,
host_only=False, trusted_hosts=None):
"""A handy helper function that recreates the full URL as IRI for the
current request or parts of it. Here an example:
>>> from werkzeug.test import create_environ
>>> env = create_environ("/?param=foo", "http://localhost/script")
>>> get_current_url(env)
'http://localhost/script/?param=foo'
>>> get_current_url(env, root_only=True)
'http://localhost/script/'
>>> get_current_url(env, host_only=True)
'http://localhost/'
>>> get_current_url(env, strip_querystring=True)
'http://localhost/script/'
This optionally it verifies that the host is in a list of trusted hosts.
If the host is not in there it will raise a
:exc:`~werkzeug.exceptions.SecurityError`.
Note that the string returned might contain unicode characters as the
representation is an IRI not an URI. If you need an ASCII only
representation you can use the :func:`~werkzeug.urls.iri_to_uri`
function:
>>> from werkzeug.urls import iri_to_uri
>>> iri_to_uri(get_current_url(env))
'http://localhost/script/?param=foo'
:param environ: the WSGI environment to get the current URL from.
:param root_only: set `True` if you only want the root URL.
:param strip_querystring: set to `True` if you don't want the querystring.
:param host_only: set to `True` if the host URL should be returned.
:param trusted_hosts: a list of trusted hosts, see :func:`host_is_trusted`
for more information.
"""
tmp = [environ['wsgi.url_scheme'], '://', get_host(environ, trusted_hosts)]
cat = tmp.append
if host_only:
return uri_to_iri(''.join(tmp) + '/')
cat(url_quote(wsgi_get_bytes(environ.get('SCRIPT_NAME', ''))).rstrip('/'))
cat('/')
if not root_only:
cat(url_quote(wsgi_get_bytes(environ.get('PATH_INFO', '')).lstrip(b'/')))
if not strip_querystring:
qs = get_query_string(environ)
if qs:
cat('?' + qs)
return uri_to_iri(''.join(tmp))
def host_is_trusted(hostname, trusted_list):
"""Checks if a host is trusted against a list. This also takes care
of port normalization.
.. versionadded:: 0.9
:param hostname: the hostname to check
:param trusted_list: a list of hostnames to check against. If a
hostname starts with a dot it will match against
all subdomains as well.
"""
if not hostname:
return False
if isinstance(trusted_list, string_types):
trusted_list = [trusted_list]
def _normalize(hostname):
if ':' in hostname:
hostname = hostname.rsplit(':', 1)[0]
return _encode_idna(hostname)
hostname = _normalize(hostname)
for ref in trusted_list:
if ref.startswith('.'):
ref = ref[1:]
suffix_match = True
else:
suffix_match = False
ref = _normalize(ref)
if ref == hostname:
return True
if suffix_match and hostname.endswith('.' + ref):
return True
return False
def get_host(environ, trusted_hosts=None):
"""Return the real host for the given WSGI environment. This takes care
of the `X-Forwarded-Host` header. Optionally it verifies that the host
is in a list of trusted hosts. If the host is not in there it will raise
a :exc:`~werkzeug.exceptions.SecurityError`.
:param environ: the WSGI environment to get the host of.
:param trusted_hosts: a list of trusted hosts, see :func:`host_is_trusted`
for more information.
"""
if 'HTTP_X_FORWARDED_HOST' in environ:
rv = environ['HTTP_X_FORWARDED_HOST'].split(',')[0].strip()
elif 'HTTP_HOST' in environ:
rv = environ['HTTP_HOST']
else:
rv = environ['SERVER_NAME']
if (environ['wsgi.url_scheme'], environ['SERVER_PORT']) not \
in (('https', '443'), ('http', '80')):
rv += ':' + environ['SERVER_PORT']
if trusted_hosts is not None:
if not host_is_trusted(rv, trusted_hosts):
from werkzeug.exceptions import SecurityError
raise SecurityError('Host "%s" is not trusted' % rv)
return rv
def get_content_length(environ):
"""Returns the content length from the WSGI environment as
integer. If it's not available `None` is returned.
.. versionadded:: 0.9
:param environ: the WSGI environ to fetch the content length from.
"""
content_length = environ.get('CONTENT_LENGTH')
if content_length is not None:
try:
return max(0, int(content_length))
except (ValueError, TypeError):
pass
def get_input_stream(environ, safe_fallback=True):
"""Returns the input stream from the WSGI environment and wraps it
in the most sensible way possible. The stream returned is not the
raw WSGI stream in most cases but one that is safe to read from
without taking into account the content length.
.. versionadded:: 0.9
:param environ: the WSGI environ to fetch the stream from.
:param safe: indicates weather the function should use an empty
stream as safe fallback or just return the original
WSGI input stream if it can't wrap it safely. The
default is to return an empty string in those cases.
"""
stream = environ['wsgi.input']
content_length = get_content_length(environ)
# A wsgi extension that tells us if the input is terminated. In
# that case we return the stream unchanged as we know we can savely
# read it until the end.
if environ.get('wsgi.input_terminated'):
return stream
# If we don't have a content length we fall back to an empty stream
# in case of a safe fallback, otherwise we return the stream unchanged.
# The non-safe fallback is not recommended but might be useful in
# some situations.
if content_length is None:
return safe_fallback and _empty_stream or stream
# Otherwise limit the stream to the content length
return LimitedStream(stream, content_length)
def get_query_string(environ):
"""Returns the `QUERY_STRING` from the WSGI environment. This also takes
care about the WSGI decoding dance on Python 3 environments as a
native string. The string returned will be restricted to ASCII
characters.
.. versionadded:: 0.9
:param environ: the WSGI environment object to get the query string from.
"""
qs = wsgi_get_bytes(environ.get('QUERY_STRING', ''))
# QUERY_STRING really should be ascii safe but some browsers
# will send us some unicode stuff (I am looking at you IE).
# In that case we want to urllib quote it badly.
return try_coerce_native(url_quote(qs, safe=':&%=+$!*\'(),'))
def get_path_info(environ, charset='utf-8', errors='replace'):
"""Returns the `PATH_INFO` from the WSGI environment and properly
decodes it. This also takes care about the WSGI decoding dance
on Python 3 environments. if the `charset` is set to `None` a
bytestring is returned.
.. versionadded:: 0.9
:param environ: the WSGI environment object to get the path from.
:param charset: the charset for the path info, or `None` if no
decoding should be performed.
:param errors: the decoding error handling.
"""
path = wsgi_get_bytes(environ.get('PATH_INFO', ''))
return to_unicode(path, charset, errors, allow_none_charset=True)
def get_script_name(environ, charset='utf-8', errors='replace'):
"""Returns the `SCRIPT_NAME` from the WSGI environment and properly
decodes it. This also takes care about the WSGI decoding dance
on Python 3 environments. if the `charset` is set to `None` a
bytestring is returned.
.. versionadded:: 0.9
:param environ: the WSGI environment object to get the path from.
:param charset: the charset for the path, or `None` if no
decoding should be performed.
:param errors: the decoding error handling.
"""
path = wsgi_get_bytes(environ.get('SCRIPT_NAME', ''))
return to_unicode(path, charset, errors, allow_none_charset=True)
def pop_path_info(environ, charset='utf-8', errors='replace'):
"""Removes and returns the next segment of `PATH_INFO`, pushing it onto
`SCRIPT_NAME`. Returns `None` if there is nothing left on `PATH_INFO`.
If the `charset` is set to `None` a bytestring is returned.
If there are empty segments (``'/foo//bar``) these are ignored but
properly pushed to the `SCRIPT_NAME`:
>>> env = {'SCRIPT_NAME': '/foo', 'PATH_INFO': '/a/b'}
>>> pop_path_info(env)
'a'
>>> env['SCRIPT_NAME']
'/foo/a'
>>> pop_path_info(env)
'b'
>>> env['SCRIPT_NAME']
'/foo/a/b'
.. versionadded:: 0.5
.. versionchanged:: 0.9
The path is now decoded and a charset and encoding
parameter can be provided.
:param environ: the WSGI environment that is modified.
"""
path = environ.get('PATH_INFO')
if not path:
return None
script_name = environ.get('SCRIPT_NAME', '')
# shift multiple leading slashes over
old_path = path
path = path.lstrip('/')
if path != old_path:
script_name += '/' * (len(old_path) - len(path))
if '/' not in path:
environ['PATH_INFO'] = ''
environ['SCRIPT_NAME'] = script_name + path
rv = wsgi_get_bytes(path)
else:
segment, path = path.split('/', 1)
environ['PATH_INFO'] = '/' + path
environ['SCRIPT_NAME'] = script_name + segment
rv = wsgi_get_bytes(segment)
return to_unicode(rv, charset, errors, allow_none_charset=True)
def peek_path_info(environ, charset='utf-8', errors='replace'):
"""Returns the next segment on the `PATH_INFO` or `None` if there
is none. Works like :func:`pop_path_info` without modifying the
environment:
>>> env = {'SCRIPT_NAME': '/foo', 'PATH_INFO': '/a/b'}
>>> peek_path_info(env)
'a'
>>> peek_path_info(env)
'a'
If the `charset` is set to `None` a bytestring is returned.
.. versionadded:: 0.5
.. versionchanged:: 0.9
The path is now decoded and a charset and encoding
parameter can be provided.
:param environ: the WSGI environment that is checked.
"""
segments = environ.get('PATH_INFO', '').lstrip('/').split('/', 1)
if segments:
return to_unicode(wsgi_get_bytes(segments[0]),
charset, errors, allow_none_charset=True)
def extract_path_info(environ_or_baseurl, path_or_url, charset='utf-8',
errors='replace', collapse_http_schemes=True):
"""Extracts the path info from the given URL (or WSGI environment) and
path. The path info returned is a unicode string, not a bytestring
suitable for a WSGI environment. The URLs might also be IRIs.
If the path info could not be determined, `None` is returned.
Some examples:
>>> extract_path_info('http://example.com/app', '/app/hello')
u'/hello'
>>> extract_path_info('http://example.com/app',
... 'https://example.com/app/hello')
u'/hello'
>>> extract_path_info('http://example.com/app',
... 'https://example.com/app/hello',
... collapse_http_schemes=False) is None
True
Instead of providing a base URL you can also pass a WSGI environment.
.. versionadded:: 0.6
:param environ_or_baseurl: a WSGI environment dict, a base URL or
base IRI. This is the root of the
application.
:param path_or_url: an absolute path from the server root, a
relative path (in which case it's the path info)
or a full URL. Also accepts IRIs and unicode
parameters.
:param charset: the charset for byte data in URLs
:param errors: the error handling on decode
:param collapse_http_schemes: if set to `False` the algorithm does
not assume that http and https on the
same server point to the same
resource.
"""
def _normalize_netloc(scheme, netloc):
parts = netloc.split(u'@', 1)[-1].split(u':', 1)
if len(parts) == 2:
netloc, port = parts
if (scheme == u'http' and port == u'80') or \
(scheme == u'https' and port == u'443'):
port = None
else:
netloc = parts[0]
port = None
if port is not None:
netloc += u':' + port
return netloc
# make sure whatever we are working on is a IRI and parse it
path = uri_to_iri(path_or_url, charset, errors)
if isinstance(environ_or_baseurl, dict):
environ_or_baseurl = get_current_url(environ_or_baseurl,
root_only=True)
base_iri = uri_to_iri(environ_or_baseurl, charset, errors)
base_scheme, base_netloc, base_path = url_parse(base_iri)[:3]
cur_scheme, cur_netloc, cur_path, = \
url_parse(url_join(base_iri, path))[:3]
# normalize the network location
base_netloc = _normalize_netloc(base_scheme, base_netloc)
cur_netloc = _normalize_netloc(cur_scheme, cur_netloc)
# is that IRI even on a known HTTP scheme?
if collapse_http_schemes:
for scheme in base_scheme, cur_scheme:
if scheme not in (u'http', u'https'):
return None
else:
if not (base_scheme in (u'http', u'https') and
base_scheme == cur_scheme):
return None
# are the netlocs compatible?
if base_netloc != cur_netloc:
return None
# are we below the application path?
base_path = base_path.rstrip(u'/')
if not cur_path.startswith(base_path):
return None
return u'/' + cur_path[len(base_path):].lstrip(u'/')
class SharedDataMiddleware(object):
"""A WSGI middleware that provides static content for development
environments or simple server setups. Usage is quite simple::
import os
from werkzeug.wsgi import SharedDataMiddleware
app = SharedDataMiddleware(app, {
'/shared': os.path.join(os.path.dirname(__file__), 'shared')
})
The contents of the folder ``./shared`` will now be available on
``http://example.com/shared/``. This is pretty useful during development
because a standalone media server is not required. One can also mount
files on the root folder and still continue to use the application because
the shared data middleware forwards all unhandled requests to the
application, even if the requests are below one of the shared folders.
If `pkg_resources` is available you can also tell the middleware to serve
files from package data::
app = SharedDataMiddleware(app, {
'/shared': ('myapplication', 'shared_files')
})
This will then serve the ``shared_files`` folder in the `myapplication`
Python package.
The optional `disallow` parameter can be a list of :func:`~fnmatch.fnmatch`
rules for files that are not accessible from the web. If `cache` is set to
`False` no caching headers are sent.
Currently the middleware does not support non ASCII filenames. If the
encoding on the file system happens to be the encoding of the URI it may
work but this could also be by accident. We strongly suggest using ASCII
only file names for static files.
The middleware will guess the mimetype using the Python `mimetype`
module. If it's unable to figure out the charset it will fall back
to `fallback_mimetype`.
.. versionchanged:: 0.5
The cache timeout is configurable now.
.. versionadded:: 0.6
The `fallback_mimetype` parameter was added.
:param app: the application to wrap. If you don't want to wrap an
application you can pass it :exc:`NotFound`.
:param exports: a dict of exported files and folders.
:param disallow: a list of :func:`~fnmatch.fnmatch` rules.
:param fallback_mimetype: the fallback mimetype for unknown files.
:param cache: enable or disable caching headers.
:Param cache_timeout: the cache timeout in seconds for the headers.
"""
def __init__(self, app, exports, disallow=None, cache=True,
cache_timeout=60 * 60 * 12, fallback_mimetype='text/plain'):
self.app = app
self.exports = {}
self.cache = cache
self.cache_timeout = cache_timeout
for key, value in iteritems(exports):
if isinstance(value, tuple):
loader = self.get_package_loader(*value)
elif isinstance(value, string_types):
if os.path.isfile(value):
loader = self.get_file_loader(value)
else:
loader = self.get_directory_loader(value)
else:
raise TypeError('unknown def %r' % value)
self.exports[key] = loader
if disallow is not None:
from fnmatch import fnmatch
self.is_allowed = lambda x: not fnmatch(x, disallow)
self.fallback_mimetype = fallback_mimetype
def is_allowed(self, filename):
"""Subclasses can override this method to disallow the access to
certain files. However by providing `disallow` in the constructor
this method is overwritten.
"""
return True
def _opener(self, filename):
return lambda: (
open(filename, 'rb'),
datetime.utcfromtimestamp(os.path.getmtime(filename)),
int(os.path.getsize(filename))
)
def get_file_loader(self, filename):
return lambda x: (os.path.basename(filename), self._opener(filename))
def get_package_loader(self, package, package_path):
from pkg_resources import DefaultProvider, ResourceManager, \
get_provider
loadtime = datetime.utcnow()
provider = get_provider(package)
manager = ResourceManager()
filesystem_bound = isinstance(provider, DefaultProvider)
def loader(path):
if path is None:
return None, None
path = posixpath.join(package_path, path)
if not provider.has_resource(path):
return None, None
basename = posixpath.basename(path)
if filesystem_bound:
return basename, self._opener(
provider.get_resource_filename(manager, path))
return basename, lambda: (
provider.get_resource_stream(manager, path),
loadtime,
0
)
return loader
def get_directory_loader(self, directory):
def loader(path):
if path is not None:
path = os.path.join(directory, path)
else:
path = directory
if os.path.isfile(path):
return os.path.basename(path), self._opener(path)
return None, None
return loader
def generate_etag(self, mtime, file_size, real_filename):
if not isinstance(real_filename, bytes):
real_filename = real_filename.encode(sys.getfilesystemencoding())
return 'wzsdm-%d-%s-%s' % (
mktime(mtime.timetuple()),
file_size,
adler32(real_filename) & 0xffffffff
)
def __call__(self, environ, start_response):
cleaned_path = get_path_info(environ)
if PY2:
cleaned_path = cleaned_path.encode(sys.getfilesystemencoding())
# sanitize the path for non unix systems
cleaned_path = cleaned_path.strip('/')
for sep in os.sep, os.altsep:
if sep and sep != '/':
cleaned_path = cleaned_path.replace(sep, '/')
path = '/'.join([''] + [x for x in cleaned_path.split('/')
if x and x != '..'])
file_loader = None
for search_path, loader in iteritems(self.exports):
if search_path == path:
real_filename, file_loader = loader(None)
if file_loader is not None:
break
if not search_path.endswith('/'):
search_path += '/'
if path.startswith(search_path):
real_filename, file_loader = loader(path[len(search_path):])
if file_loader is not None:
break
if file_loader is None or not self.is_allowed(real_filename):
return self.app(environ, start_response)
guessed_type = mimetypes.guess_type(real_filename)
mime_type = guessed_type[0] or self.fallback_mimetype
f, mtime, file_size = file_loader()
headers = [('Date', http_date())]
if self.cache:
timeout = self.cache_timeout
etag = self.generate_etag(mtime, file_size, real_filename)
headers += [
('Etag', '"%s"' % etag),
('Cache-Control', 'max-age=%d, public' % timeout)
]
if not is_resource_modified(environ, etag, last_modified=mtime):
f.close()
start_response('304 Not Modified', headers)
return []
headers.append(('Expires', http_date(time() + timeout)))
else:
headers.append(('Cache-Control', 'public'))
headers.extend((
('Content-Type', mime_type),
('Content-Length', str(file_size)),
('Last-Modified', http_date(mtime))
))
start_response('200 OK', headers)
return wrap_file(environ, f)
class DispatcherMiddleware(object):
"""Allows one to mount middlewares or applications in a WSGI application.
This is useful if you want to combine multiple WSGI applications::
app = DispatcherMiddleware(app, {
'/app2': app2,
'/app3': app3
})
"""
def __init__(self, app, mounts=None):
self.app = app
self.mounts = mounts or {}
def __call__(self, environ, start_response):
script = environ.get('PATH_INFO', '')
path_info = ''
while '/' in script:
if script in self.mounts:
app = self.mounts[script]
break
items = script.split('/')
script = '/'.join(items[:-1])
path_info = '/%s%s' % (items[-1], path_info)
else:
app = self.mounts.get(script, self.app)
original_script_name = environ.get('SCRIPT_NAME', '')
environ['SCRIPT_NAME'] = original_script_name + script
environ['PATH_INFO'] = path_info
return app(environ, start_response)
@implements_iterator
class ClosingIterator(object):
"""The WSGI specification requires that all middlewares and gateways
respect the `close` callback of an iterator. Because it is useful to add
another close action to a returned iterator and adding a custom iterator
is a boring task this class can be used for that::
return ClosingIterator(app(environ, start_response), [cleanup_session,
cleanup_locals])
If there is just one close function it can be passed instead of the list.
A closing iterator is not needed if the application uses response objects
and finishes the processing if the response is started::
try:
return response(environ, start_response)
finally:
cleanup_session()
cleanup_locals()
"""
def __init__(self, iterable, callbacks=None):
iterator = iter(iterable)
self._next = partial(next, iterator)
if callbacks is None:
callbacks = []
elif callable(callbacks):
callbacks = [callbacks]
else:
callbacks = list(callbacks)
iterable_close = getattr(iterator, 'close', None)
if iterable_close:
callbacks.insert(0, iterable_close)
self._callbacks = callbacks
def __iter__(self):
return self
def __next__(self):
return self._next()
def close(self):
for callback in self._callbacks:
callback()
def wrap_file(environ, file, buffer_size=8192):
"""Wraps a file. This uses the WSGI server's file wrapper if available
or otherwise the generic :class:`FileWrapper`.
.. versionadded:: 0.5
If the file wrapper from the WSGI server is used it's important to not
iterate over it from inside the application but to pass it through
unchanged. If you want to pass out a file wrapper inside a response
object you have to set :attr:`~BaseResponse.direct_passthrough` to `True`.
More information about file wrappers are available in :pep:`333`.
:param file: a :class:`file`-like object with a :meth:`~file.read` method.
:param buffer_size: number of bytes for one iteration.
"""
return environ.get('wsgi.file_wrapper', FileWrapper)(file, buffer_size)
@implements_iterator
class FileWrapper(object):
"""This class can be used to convert a :class:`file`-like object into
an iterable. It yields `buffer_size` blocks until the file is fully
read.
You should not use this class directly but rather use the
:func:`wrap_file` function that uses the WSGI server's file wrapper
support if it's available.
.. versionadded:: 0.5
If you're using this object together with a :class:`BaseResponse` you have
to use the `direct_passthrough` mode.
:param file: a :class:`file`-like object with a :meth:`~file.read` method.
:param buffer_size: number of bytes for one iteration.
"""
def __init__(self, file, buffer_size=8192):
self.file = file
self.buffer_size = buffer_size
def close(self):
if hasattr(self.file, 'close'):
self.file.close()
def __iter__(self):
return self
def __next__(self):
data = self.file.read(self.buffer_size)
if data:
return data
raise StopIteration()
def _make_chunk_iter(stream, limit, buffer_size):
"""Helper for the line and chunk iter functions."""
if isinstance(stream, (bytes, bytearray, text_type)):
raise TypeError('Passed a string or byte object instead of '
'true iterator or stream.')
if not hasattr(stream, 'read'):
for item in stream:
if item:
yield item
return
if not isinstance(stream, LimitedStream) and limit is not None:
stream = LimitedStream(stream, limit)
_read = stream.read
while 1:
item = _read(buffer_size)
if not item:
break
yield item
def make_line_iter(stream, limit=None, buffer_size=10 * 1024):
"""Safely iterates line-based over an input stream. If the input stream
is not a :class:`LimitedStream` the `limit` parameter is mandatory.
This uses the stream's :meth:`~file.read` method internally as opposite
to the :meth:`~file.readline` method that is unsafe and can only be used
in violation of the WSGI specification. The same problem applies to the
`__iter__` function of the input stream which calls :meth:`~file.readline`
without arguments.
If you need line-by-line processing it's strongly recommended to iterate
over the input stream using this helper function.
.. versionchanged:: 0.8
This function now ensures that the limit was reached.
.. versionadded:: 0.9
added support for iterators as input stream.
:param stream: the stream or iterate to iterate over.
:param limit: the limit in bytes for the stream. (Usually
content length. Not necessary if the `stream`
is a :class:`LimitedStream`.
:param buffer_size: The optional buffer size.
"""
_iter = _make_chunk_iter(stream, limit, buffer_size)
first_item = next(_iter, '')
if not first_item:
return
s = make_literal_wrapper(first_item)
empty = s('')
cr = s('\r')
lf = s('\n')
crlf = s('\r\n')
_iter = chain((first_item,), _iter)
def _iter_basic_lines():
_join = empty.join
buffer = []
while 1:
new_data = next(_iter, '')
if not new_data:
break
new_buf = []
for item in chain(buffer, new_data.splitlines(True)):
new_buf.append(item)
if item and item[-1:] in crlf:
yield _join(new_buf)
new_buf = []
buffer = new_buf
if buffer:
yield _join(buffer)
# This hackery is necessary to merge 'foo\r' and '\n' into one item
# of 'foo\r\n' if we were unlucky and we hit a chunk boundary.
previous = empty
for item in _iter_basic_lines():
if item == lf and previous[-1:] == cr:
previous += item
item = empty
if previous:
yield previous
previous = item
if previous:
yield previous
def make_chunk_iter(stream, separator, limit=None, buffer_size=10 * 1024):
"""Works like :func:`make_line_iter` but accepts a separator
which divides chunks. If you want newline based processing
you should use :func:`make_line_iter` instead as it
supports arbitrary newline markers.
.. versionadded:: 0.8
.. versionadded:: 0.9
added support for iterators as input stream.
:param stream: the stream or iterate to iterate over.
:param separator: the separator that divides chunks.
:param limit: the limit in bytes for the stream. (Usually
content length. Not necessary if the `stream`
is otherwise already limited).
:param buffer_size: The optional buffer size.
"""
_iter = _make_chunk_iter(stream, limit, buffer_size)
first_item = next(_iter, '')
if not first_item:
return
_iter = chain((first_item,), _iter)
if isinstance(first_item, text_type):
separator = to_unicode(separator)
_split = re.compile(r'(%s)' % re.escape(separator)).split
_join = u''.join
else:
separator = to_bytes(separator)
_split = re.compile(b'(' + re.escape(separator) + b')').split
_join = b''.join
buffer = []
while 1:
new_data = next(_iter, '')
if not new_data:
break
chunks = _split(new_data)
new_buf = []
for item in chain(buffer, chunks):
if item == separator:
yield _join(new_buf)
new_buf = []
else:
new_buf.append(item)
buffer = new_buf
if buffer:
yield _join(buffer)
@implements_iterator
class LimitedStream(object):
"""Wraps a stream so that it doesn't read more than n bytes. If the
stream is exhausted and the caller tries to get more bytes from it
:func:`on_exhausted` is called which by default returns an empty
string. The return value of that function is forwarded
to the reader function. So if it returns an empty string
:meth:`read` will return an empty string as well.
The limit however must never be higher than what the stream can
output. Otherwise :meth:`readlines` will try to read past the
limit.
.. admonition:: Note on WSGI compliance
calls to :meth:`readline` and :meth:`readlines` are not
WSGI compliant because it passes a size argument to the
readline methods. Unfortunately the WSGI PEP is not safely
implementable without a size argument to :meth:`readline`
because there is no EOF marker in the stream. As a result
of that the use of :meth:`readline` is discouraged.
For the same reason iterating over the :class:`LimitedStream`
is not portable. It internally calls :meth:`readline`.
We strongly suggest using :meth:`read` only or using the
:func:`make_line_iter` which safely iterates line-based
over a WSGI input stream.
:param stream: the stream to wrap.
:param limit: the limit for the stream, must not be longer than
what the string can provide if the stream does not
end with `EOF` (like `wsgi.input`)
"""
def __init__(self, stream, limit):
self._read = stream.read
self._readline = stream.readline
self._pos = 0
self.limit = limit
def __iter__(self):
return self
@property
def is_exhausted(self):
"""If the stream is exhausted this attribute is `True`."""
return self._pos >= self.limit
def on_exhausted(self):
"""This is called when the stream tries to read past the limit.
The return value of this function is returned from the reading
function.
"""
# Read null bytes from the stream so that we get the
# correct end of stream marker.
return self._read(0)
def on_disconnect(self):
"""What should happen if a disconnect is detected? The return
value of this function is returned from read functions in case
the client went away. By default a
:exc:`~werkzeug.exceptions.ClientDisconnected` exception is raised.
"""
from werkzeug.exceptions import ClientDisconnected
raise ClientDisconnected()
def exhaust(self, chunk_size=1024 * 64):
"""Exhaust the stream. This consumes all the data left until the
limit is reached.
:param chunk_size: the size for a chunk. It will read the chunk
until the stream is exhausted and throw away
the results.
"""
to_read = self.limit - self._pos
chunk = chunk_size
while to_read > 0:
chunk = min(to_read, chunk)
self.read(chunk)
to_read -= chunk
def read(self, size=None):
"""Read `size` bytes or if size is not provided everything is read.
:param size: the number of bytes read.
"""
if self._pos >= self.limit:
return self.on_exhausted()
if size is None or size == -1: # -1 is for consistence with file
size = self.limit
to_read = min(self.limit - self._pos, size)
try:
read = self._read(to_read)
except (IOError, ValueError):
return self.on_disconnect()
if to_read and len(read) != to_read:
return self.on_disconnect()
self._pos += len(read)
return read
def readline(self, size=None):
"""Reads one line from the stream."""
if self._pos >= self.limit:
return self.on_exhausted()
if size is None:
size = self.limit - self._pos
else:
size = min(size, self.limit - self._pos)
try:
line = self._readline(size)
except (ValueError, IOError):
return self.on_disconnect()
if size and not line:
return self.on_disconnect()
self._pos += len(line)
return line
def readlines(self, size=None):
"""Reads a file into a list of strings. It calls :meth:`readline`
until the file is read to the end. It does support the optional
`size` argument if the underlaying stream supports it for
`readline`.
"""
last_pos = self._pos
result = []
if size is not None:
end = min(self.limit, last_pos + size)
else:
end = self.limit
while 1:
if size is not None:
size -= last_pos - self._pos
if self._pos >= end:
break
result.append(self.readline(size))
if size is not None:
last_pos = self._pos
return result
def tell(self):
"""Returns the position of the stream.
.. versionadded:: 0.9
"""
return self._pos
def __next__(self):
line = self.readline()
if not line:
raise StopIteration()
return line
| apache-2.0 |
MattAllmendinger/coala | coalib/misc/BuildManPage.py | 16 | 7292 | import argparse
import datetime
from distutils.core import Command
from distutils.errors import DistutilsOptionError
class BuildManPage(Command):
"""
Add a ``build_manpage`` command to your setup.py.
To use this Command class add a command to call this class::
# For setuptools
setup(
entry_points={
"distutils.commands": [
"build_manpage = coalib.misc.BuildManPage:BuildManPage"
]
}
)
# For distutils
from coalib.misc.BuildManPage import BuildManPage
setup(
cmdclass={'build_manpage': BuildManPage}
)
You can then use the following setup command to produce a man page::
$ python setup.py build_manpage --output=coala.1 \
--parser=coalib.parsing.DefaultArgParser:default_arg_parser
If automatically want to build the man page every time you invoke
your build, add to your ```setup.cfg``` the following::
[build_manpage]
output = <appname>.1
parser = <path_to_your_parser>
"""
user_options = [
('output=', 'O', 'output file'),
('parser=', None, 'module path to an ArgumentParser instance'
'(e.g. mymod:func, where func is a method or function which return'
'an arparse.ArgumentParser instance.'),
]
def initialize_options(self):
self.output = None
self.parser = None
def finalize_options(self):
if self.output is None:
raise DistutilsOptionError('\'output\' option is required')
if self.parser is None:
raise DistutilsOptionError('\'parser\' option is required')
mod_name, func_name = self.parser.split(':')
fromlist = mod_name.split('.')
mod = __import__(mod_name, fromlist=fromlist)
self._parser = (
getattr(mod, func_name)(formatter_class=ManPageFormatter))
self.announce('Writing man page %s' % self.output)
self._today = datetime.date.today()
def run(self):
dist = self.distribution
homepage = dist.get_url()
maintainer = dist.get_maintainer()
_license = dist.get_license()
appname = self._parser.prog
sections = {"see also": ("Online documentation: {}".format(homepage)),
"maintainer(s)": maintainer,
"license": _license}
dist = self.distribution
mpf = ManPageFormatter(appname,
desc=dist.get_description(),
long_desc=dist.get_long_description(),
ext_sections=sections,
parser=self._parser)
formatted_man_page = mpf.format_man_page()
with open(self.output, 'w') as man_file:
man_file.write(formatted_man_page)
class ManPageFormatter(argparse.HelpFormatter):
def __init__(self,
prog,
indent_increment=2,
max_help_position=24,
width=None,
desc=None,
long_desc=None,
ext_sections=None,
parser=None):
argparse.HelpFormatter.__init__(self, prog)
self._prog = prog
self._section = 1
self._today = datetime.date.today().strftime('%Y\\-%m\\-%d')
self._desc = desc
self._long_desc = long_desc
self._ext_sections = ext_sections
self._parser = parser
def _format_action_invocation(self, action):
if not action.option_strings:
metavar, = self._metavar_formatter(action, action.dest)(1)
return metavar
else:
# if the Optional doesn't take a value, format is:
# -s, --long
if action.nargs == 0:
parts = [ManPageFormatter._bold(action_str)
for action_str in action.option_strings]
# if the Optional takes a value, format is:
# -s ARGS, --long ARGS
else:
default = ManPageFormatter._underline(action.dest.upper())
args_string = self._format_args(action, default)
parts = ['%s %s' % (self._bold(option_string), args_string)
for option_string in action.option_strings]
return ', '.join(parts)
@staticmethod
def _markup(string):
return string.replace('-', '\\-')
@staticmethod
def _add_format(string, front, back):
if not string.strip().startswith(front):
string = front + string
if not string.strip().endswith(back):
string = string + back
return string
@staticmethod
def _underline(string):
return ManPageFormatter._add_format(string, "\\fI", "\\fR")
@staticmethod
def _bold(string):
return ManPageFormatter._add_format(string, "\\fB", "\\fR")
def _mk_title(self):
return '.TH {0} {1} {2}\n'.format(self._prog,
self._section,
self._today)
def _mk_name(self):
return '.SH NAME\n%s\n' % (self._parser.prog)
def _mk_synopsis(self):
self.add_usage(self._parser.usage,
self._parser._actions,
self._parser._mutually_exclusive_groups,
prefix='')
usage = self._format_usage(None,
self._parser._actions,
self._parser._mutually_exclusive_groups,
'')
usage = usage.replace('%s ' % self._prog, '')
usage = ('.SH SYNOPSIS\n \\fB%s\\fR %s\n'
% (ManPageFormatter._markup(self._prog), usage))
return usage
def _mk_description(self):
if self._long_desc:
long_desc = self._long_desc.replace('\n', '\n.br\n')
return '.SH DESCRIPTION\n%s\n' % self._markup(long_desc)
else:
return ''
def _mk_options(self):
formatter = self._parser._get_formatter()
# positionals, optionals and user-defined groups
for action_group in self._parser._action_groups:
formatter.start_section(None)
formatter.add_text(None)
formatter.add_arguments(action_group._group_actions)
formatter.end_section()
# epilog
formatter.add_text(self._parser.epilog)
# determine help from format above
return '.SH OPTIONS\n' + formatter.format_help()
def _mk_footer(self):
sections = self._ext_sections
if not hasattr(sections, '__iter__'):
return ''
footer = []
for section in sorted(sections.keys()):
part = ".SH {}\n {}".format(section.upper(), sections[section])
footer.append(part)
return '\n'.join(footer)
def format_man_page(self):
page = []
page.append(self._mk_title())
page.append(self._mk_name())
page.append(self._mk_synopsis())
page.append(self._mk_description())
page.append(self._mk_options())
page.append(self._mk_footer())
return ''.join(page)
| agpl-3.0 |
shravan-achar/servo | tests/wpt/web-platform-tests/tools/pywebsocket/src/test/test_handshake_hybi.py | 413 | 22552 | #!/usr/bin/env python
#
# Copyright 2011, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests for handshake module."""
import unittest
import set_sys_path # Update sys.path to locate mod_pywebsocket module.
from mod_pywebsocket import common
from mod_pywebsocket.handshake._base import AbortedByUserException
from mod_pywebsocket.handshake._base import HandshakeException
from mod_pywebsocket.handshake._base import VersionException
from mod_pywebsocket.handshake.hybi import Handshaker
import mock
class RequestDefinition(object):
"""A class for holding data for constructing opening handshake strings for
testing the opening handshake processor.
"""
def __init__(self, method, uri, headers):
self.method = method
self.uri = uri
self.headers = headers
def _create_good_request_def():
return RequestDefinition(
'GET', '/demo',
{'Host': 'server.example.com',
'Upgrade': 'websocket',
'Connection': 'Upgrade',
'Sec-WebSocket-Key': 'dGhlIHNhbXBsZSBub25jZQ==',
'Sec-WebSocket-Version': '13',
'Origin': 'http://example.com'})
def _create_request(request_def):
conn = mock.MockConn('')
return mock.MockRequest(
method=request_def.method,
uri=request_def.uri,
headers_in=request_def.headers,
connection=conn)
def _create_handshaker(request):
handshaker = Handshaker(request, mock.MockDispatcher())
return handshaker
class SubprotocolChoosingDispatcher(object):
"""A dispatcher for testing. This dispatcher sets the i-th subprotocol
of requested ones to ws_protocol where i is given on construction as index
argument. If index is negative, default_value will be set to ws_protocol.
"""
def __init__(self, index, default_value=None):
self.index = index
self.default_value = default_value
def do_extra_handshake(self, conn_context):
if self.index >= 0:
conn_context.ws_protocol = conn_context.ws_requested_protocols[
self.index]
else:
conn_context.ws_protocol = self.default_value
def transfer_data(self, conn_context):
pass
class HandshakeAbortedException(Exception):
pass
class AbortingDispatcher(object):
"""A dispatcher for testing. This dispatcher raises an exception in
do_extra_handshake to reject the request.
"""
def do_extra_handshake(self, conn_context):
raise HandshakeAbortedException('An exception to reject the request')
def transfer_data(self, conn_context):
pass
class AbortedByUserDispatcher(object):
"""A dispatcher for testing. This dispatcher raises an
AbortedByUserException in do_extra_handshake to reject the request.
"""
def do_extra_handshake(self, conn_context):
raise AbortedByUserException('An AbortedByUserException to reject the '
'request')
def transfer_data(self, conn_context):
pass
_EXPECTED_RESPONSE = (
'HTTP/1.1 101 Switching Protocols\r\n'
'Upgrade: websocket\r\n'
'Connection: Upgrade\r\n'
'Sec-WebSocket-Accept: s3pPLMBiTxaQ9kYGzzhZRbK+xOo=\r\n\r\n')
class HandshakerTest(unittest.TestCase):
"""A unittest for draft-ietf-hybi-thewebsocketprotocol-06 and later
handshake processor.
"""
def test_do_handshake(self):
request = _create_request(_create_good_request_def())
dispatcher = mock.MockDispatcher()
handshaker = Handshaker(request, dispatcher)
handshaker.do_handshake()
self.assertTrue(dispatcher.do_extra_handshake_called)
self.assertEqual(
_EXPECTED_RESPONSE, request.connection.written_data())
self.assertEqual('/demo', request.ws_resource)
self.assertEqual('http://example.com', request.ws_origin)
self.assertEqual(None, request.ws_protocol)
self.assertEqual(None, request.ws_extensions)
self.assertEqual(common.VERSION_HYBI_LATEST, request.ws_version)
def test_do_handshake_with_extra_headers(self):
request_def = _create_good_request_def()
# Add headers not related to WebSocket opening handshake.
request_def.headers['FooKey'] = 'BarValue'
request_def.headers['EmptyKey'] = ''
request = _create_request(request_def)
handshaker = _create_handshaker(request)
handshaker.do_handshake()
self.assertEqual(
_EXPECTED_RESPONSE, request.connection.written_data())
def test_do_handshake_with_capitalized_value(self):
request_def = _create_good_request_def()
request_def.headers['upgrade'] = 'WEBSOCKET'
request = _create_request(request_def)
handshaker = _create_handshaker(request)
handshaker.do_handshake()
self.assertEqual(
_EXPECTED_RESPONSE, request.connection.written_data())
request_def = _create_good_request_def()
request_def.headers['Connection'] = 'UPGRADE'
request = _create_request(request_def)
handshaker = _create_handshaker(request)
handshaker.do_handshake()
self.assertEqual(
_EXPECTED_RESPONSE, request.connection.written_data())
def test_do_handshake_with_multiple_connection_values(self):
request_def = _create_good_request_def()
request_def.headers['Connection'] = 'Upgrade, keep-alive, , '
request = _create_request(request_def)
handshaker = _create_handshaker(request)
handshaker.do_handshake()
self.assertEqual(
_EXPECTED_RESPONSE, request.connection.written_data())
def test_aborting_handshake(self):
handshaker = Handshaker(
_create_request(_create_good_request_def()),
AbortingDispatcher())
# do_extra_handshake raises an exception. Check that it's not caught by
# do_handshake.
self.assertRaises(HandshakeAbortedException, handshaker.do_handshake)
def test_do_handshake_with_protocol(self):
request_def = _create_good_request_def()
request_def.headers['Sec-WebSocket-Protocol'] = 'chat, superchat'
request = _create_request(request_def)
handshaker = Handshaker(request, SubprotocolChoosingDispatcher(0))
handshaker.do_handshake()
EXPECTED_RESPONSE = (
'HTTP/1.1 101 Switching Protocols\r\n'
'Upgrade: websocket\r\n'
'Connection: Upgrade\r\n'
'Sec-WebSocket-Accept: s3pPLMBiTxaQ9kYGzzhZRbK+xOo=\r\n'
'Sec-WebSocket-Protocol: chat\r\n\r\n')
self.assertEqual(EXPECTED_RESPONSE, request.connection.written_data())
self.assertEqual('chat', request.ws_protocol)
def test_do_handshake_protocol_not_in_request_but_in_response(self):
request_def = _create_good_request_def()
request = _create_request(request_def)
handshaker = Handshaker(
request, SubprotocolChoosingDispatcher(-1, 'foobar'))
# No request has been made but ws_protocol is set. HandshakeException
# must be raised.
self.assertRaises(HandshakeException, handshaker.do_handshake)
def test_do_handshake_with_protocol_no_protocol_selection(self):
request_def = _create_good_request_def()
request_def.headers['Sec-WebSocket-Protocol'] = 'chat, superchat'
request = _create_request(request_def)
handshaker = _create_handshaker(request)
# ws_protocol is not set. HandshakeException must be raised.
self.assertRaises(HandshakeException, handshaker.do_handshake)
def test_do_handshake_with_extensions(self):
request_def = _create_good_request_def()
request_def.headers['Sec-WebSocket-Extensions'] = (
'permessage-compress; method=deflate, unknown')
EXPECTED_RESPONSE = (
'HTTP/1.1 101 Switching Protocols\r\n'
'Upgrade: websocket\r\n'
'Connection: Upgrade\r\n'
'Sec-WebSocket-Accept: s3pPLMBiTxaQ9kYGzzhZRbK+xOo=\r\n'
'Sec-WebSocket-Extensions: permessage-compress; method=deflate\r\n'
'\r\n')
request = _create_request(request_def)
handshaker = _create_handshaker(request)
handshaker.do_handshake()
self.assertEqual(EXPECTED_RESPONSE, request.connection.written_data())
self.assertEqual(1, len(request.ws_extensions))
extension = request.ws_extensions[0]
self.assertEqual(common.PERMESSAGE_COMPRESSION_EXTENSION,
extension.name())
self.assertEqual(['method'], extension.get_parameter_names())
self.assertEqual('deflate', extension.get_parameter_value('method'))
self.assertEqual(1, len(request.ws_extension_processors))
self.assertEqual(common.PERMESSAGE_COMPRESSION_EXTENSION,
request.ws_extension_processors[0].name())
def test_do_handshake_with_permessage_compress(self):
request_def = _create_good_request_def()
request_def.headers['Sec-WebSocket-Extensions'] = (
'permessage-compress; method=deflate')
request = _create_request(request_def)
handshaker = _create_handshaker(request)
handshaker.do_handshake()
self.assertEqual(1, len(request.ws_extensions))
self.assertEqual(common.PERMESSAGE_COMPRESSION_EXTENSION,
request.ws_extensions[0].name())
self.assertEqual(1, len(request.ws_extension_processors))
self.assertEqual(common.PERMESSAGE_COMPRESSION_EXTENSION,
request.ws_extension_processors[0].name())
def test_do_handshake_with_quoted_extensions(self):
request_def = _create_good_request_def()
request_def.headers['Sec-WebSocket-Extensions'] = (
'permessage-compress; method=deflate, , '
'unknown; e = "mc^2"; ma="\r\n \\\rf "; pv=nrt')
request = _create_request(request_def)
handshaker = _create_handshaker(request)
handshaker.do_handshake()
self.assertEqual(2, len(request.ws_requested_extensions))
first_extension = request.ws_requested_extensions[0]
self.assertEqual('permessage-compress', first_extension.name())
self.assertEqual(['method'], first_extension.get_parameter_names())
self.assertEqual('deflate',
first_extension.get_parameter_value('method'))
second_extension = request.ws_requested_extensions[1]
self.assertEqual('unknown', second_extension.name())
self.assertEqual(
['e', 'ma', 'pv'], second_extension.get_parameter_names())
self.assertEqual('mc^2', second_extension.get_parameter_value('e'))
self.assertEqual(' \rf ', second_extension.get_parameter_value('ma'))
self.assertEqual('nrt', second_extension.get_parameter_value('pv'))
def test_do_handshake_with_optional_headers(self):
request_def = _create_good_request_def()
request_def.headers['EmptyValue'] = ''
request_def.headers['AKey'] = 'AValue'
request = _create_request(request_def)
handshaker = _create_handshaker(request)
handshaker.do_handshake()
self.assertEqual(
'AValue', request.headers_in['AKey'])
self.assertEqual(
'', request.headers_in['EmptyValue'])
def test_abort_extra_handshake(self):
handshaker = Handshaker(
_create_request(_create_good_request_def()),
AbortedByUserDispatcher())
# do_extra_handshake raises an AbortedByUserException. Check that it's
# not caught by do_handshake.
self.assertRaises(AbortedByUserException, handshaker.do_handshake)
def test_do_handshake_with_mux_and_deflate_frame(self):
request_def = _create_good_request_def()
request_def.headers['Sec-WebSocket-Extensions'] = ('%s, %s' % (
common.MUX_EXTENSION,
common.DEFLATE_FRAME_EXTENSION))
request = _create_request(request_def)
handshaker = _create_handshaker(request)
handshaker.do_handshake()
# mux should be rejected.
self.assertEqual(1, len(request.ws_extensions))
self.assertEqual(common.DEFLATE_FRAME_EXTENSION,
request.ws_extensions[0].name())
self.assertEqual(2, len(request.ws_extension_processors))
self.assertEqual(common.MUX_EXTENSION,
request.ws_extension_processors[0].name())
self.assertEqual(common.DEFLATE_FRAME_EXTENSION,
request.ws_extension_processors[1].name())
self.assertFalse(hasattr(request, 'mux_processor'))
def test_do_handshake_with_deflate_frame_and_mux(self):
request_def = _create_good_request_def()
request_def.headers['Sec-WebSocket-Extensions'] = ('%s, %s' % (
common.DEFLATE_FRAME_EXTENSION,
common.MUX_EXTENSION))
request = _create_request(request_def)
handshaker = _create_handshaker(request)
handshaker.do_handshake()
# mux should be rejected.
self.assertEqual(1, len(request.ws_extensions))
first_extension = request.ws_extensions[0]
self.assertEqual(common.DEFLATE_FRAME_EXTENSION,
first_extension.name())
self.assertEqual(2, len(request.ws_extension_processors))
self.assertEqual(common.DEFLATE_FRAME_EXTENSION,
request.ws_extension_processors[0].name())
self.assertEqual(common.MUX_EXTENSION,
request.ws_extension_processors[1].name())
self.assertFalse(hasattr(request, 'mux'))
def test_do_handshake_with_permessage_compress_and_mux(self):
request_def = _create_good_request_def()
request_def.headers['Sec-WebSocket-Extensions'] = (
'%s; method=deflate, %s' % (
common.PERMESSAGE_COMPRESSION_EXTENSION,
common.MUX_EXTENSION))
request = _create_request(request_def)
handshaker = _create_handshaker(request)
handshaker.do_handshake()
self.assertEqual(1, len(request.ws_extensions))
self.assertEqual(common.MUX_EXTENSION,
request.ws_extensions[0].name())
self.assertEqual(2, len(request.ws_extension_processors))
self.assertEqual(common.PERMESSAGE_COMPRESSION_EXTENSION,
request.ws_extension_processors[0].name())
self.assertEqual(common.MUX_EXTENSION,
request.ws_extension_processors[1].name())
self.assertTrue(hasattr(request, 'mux_processor'))
self.assertTrue(request.mux_processor.is_active())
mux_extensions = request.mux_processor.extensions()
self.assertEqual(1, len(mux_extensions))
self.assertEqual(common.PERMESSAGE_COMPRESSION_EXTENSION,
mux_extensions[0].name())
def test_do_handshake_with_mux_and_permessage_compress(self):
request_def = _create_good_request_def()
request_def.headers['Sec-WebSocket-Extensions'] = (
'%s, %s; method=deflate' % (
common.MUX_EXTENSION,
common.PERMESSAGE_COMPRESSION_EXTENSION))
request = _create_request(request_def)
handshaker = _create_handshaker(request)
handshaker.do_handshake()
# mux should be rejected.
self.assertEqual(1, len(request.ws_extensions))
first_extension = request.ws_extensions[0]
self.assertEqual(common.PERMESSAGE_COMPRESSION_EXTENSION,
first_extension.name())
self.assertEqual(2, len(request.ws_extension_processors))
self.assertEqual(common.MUX_EXTENSION,
request.ws_extension_processors[0].name())
self.assertEqual(common.PERMESSAGE_COMPRESSION_EXTENSION,
request.ws_extension_processors[1].name())
self.assertFalse(hasattr(request, 'mux_processor'))
def test_bad_requests(self):
bad_cases = [
('HTTP request',
RequestDefinition(
'GET', '/demo',
{'Host': 'www.google.com',
'User-Agent':
'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.5;'
' en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3'
' GTB6 GTBA',
'Accept':
'text/html,application/xhtml+xml,application/xml;q=0.9,'
'*/*;q=0.8',
'Accept-Language': 'en-us,en;q=0.5',
'Accept-Encoding': 'gzip,deflate',
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7',
'Keep-Alive': '300',
'Connection': 'keep-alive'}), None, True)]
request_def = _create_good_request_def()
request_def.method = 'POST'
bad_cases.append(('Wrong method', request_def, None, True))
request_def = _create_good_request_def()
del request_def.headers['Host']
bad_cases.append(('Missing Host', request_def, None, True))
request_def = _create_good_request_def()
del request_def.headers['Upgrade']
bad_cases.append(('Missing Upgrade', request_def, None, True))
request_def = _create_good_request_def()
request_def.headers['Upgrade'] = 'nonwebsocket'
bad_cases.append(('Wrong Upgrade', request_def, None, True))
request_def = _create_good_request_def()
del request_def.headers['Connection']
bad_cases.append(('Missing Connection', request_def, None, True))
request_def = _create_good_request_def()
request_def.headers['Connection'] = 'Downgrade'
bad_cases.append(('Wrong Connection', request_def, None, True))
request_def = _create_good_request_def()
del request_def.headers['Sec-WebSocket-Key']
bad_cases.append(('Missing Sec-WebSocket-Key', request_def, 400, True))
request_def = _create_good_request_def()
request_def.headers['Sec-WebSocket-Key'] = (
'dGhlIHNhbXBsZSBub25jZQ==garbage')
bad_cases.append(('Wrong Sec-WebSocket-Key (with garbage on the tail)',
request_def, 400, True))
request_def = _create_good_request_def()
request_def.headers['Sec-WebSocket-Key'] = 'YQ==' # BASE64 of 'a'
bad_cases.append(
('Wrong Sec-WebSocket-Key (decoded value is not 16 octets long)',
request_def, 400, True))
request_def = _create_good_request_def()
# The last character right before == must be any of A, Q, w and g.
request_def.headers['Sec-WebSocket-Key'] = (
'AQIDBAUGBwgJCgsMDQ4PEC==')
bad_cases.append(
('Wrong Sec-WebSocket-Key (padding bits are not zero)',
request_def, 400, True))
request_def = _create_good_request_def()
request_def.headers['Sec-WebSocket-Key'] = (
'dGhlIHNhbXBsZSBub25jZQ==,dGhlIHNhbXBsZSBub25jZQ==')
bad_cases.append(
('Wrong Sec-WebSocket-Key (multiple values)',
request_def, 400, True))
request_def = _create_good_request_def()
del request_def.headers['Sec-WebSocket-Version']
bad_cases.append(('Missing Sec-WebSocket-Version', request_def, None,
True))
request_def = _create_good_request_def()
request_def.headers['Sec-WebSocket-Version'] = '3'
bad_cases.append(('Wrong Sec-WebSocket-Version', request_def, None,
False))
request_def = _create_good_request_def()
request_def.headers['Sec-WebSocket-Version'] = '13, 13'
bad_cases.append(('Wrong Sec-WebSocket-Version (multiple values)',
request_def, 400, True))
request_def = _create_good_request_def()
request_def.headers['Sec-WebSocket-Protocol'] = 'illegal\x09protocol'
bad_cases.append(('Illegal Sec-WebSocket-Protocol',
request_def, 400, True))
request_def = _create_good_request_def()
request_def.headers['Sec-WebSocket-Protocol'] = ''
bad_cases.append(('Empty Sec-WebSocket-Protocol',
request_def, 400, True))
for (case_name, request_def, expected_status,
expect_handshake_exception) in bad_cases:
request = _create_request(request_def)
handshaker = Handshaker(request, mock.MockDispatcher())
try:
handshaker.do_handshake()
self.fail('No exception thrown for \'%s\' case' % case_name)
except HandshakeException, e:
self.assertTrue(expect_handshake_exception)
self.assertEqual(expected_status, e.status)
except VersionException, e:
self.assertFalse(expect_handshake_exception)
if __name__ == '__main__':
unittest.main()
# vi:sts=4 sw=4 et
| mpl-2.0 |
Beeblio/django | tests/utils_tests/test_encoding.py | 39 | 1980 | # -*- encoding: utf-8 -*-
from __future__ import unicode_literals
import unittest
import datetime
from django.utils import six
from django.utils.encoding import (force_bytes, force_text, filepath_to_uri,
python_2_unicode_compatible)
class TestEncodingUtils(unittest.TestCase):
def test_force_text_exception(self):
"""
Check that broken __unicode__/__str__ actually raises an error.
"""
class MyString(object):
def __str__(self):
return b'\xc3\xb6\xc3\xa4\xc3\xbc'
__unicode__ = __str__
# str(s) raises a TypeError on python 3 if the result is not a text type.
# python 2 fails when it tries converting from str to unicode (via ASCII).
exception = TypeError if six.PY3 else UnicodeError
self.assertRaises(exception, force_text, MyString())
def test_force_bytes_exception(self):
"""
Test that force_bytes knows how to convert to bytes an exception
containing non-ASCII characters in its args.
"""
error_msg = "This is an exception, voilà"
exc = ValueError(error_msg)
result = force_bytes(exc)
self.assertEqual(result, error_msg.encode('utf-8'))
def test_force_bytes_strings_only(self):
today = datetime.date.today()
self.assertEqual(force_bytes(today, strings_only=True), today)
def test_filepath_to_uri(self):
self.assertEqual(filepath_to_uri('upload\\чубака.mp4'),
'upload/%D1%87%D1%83%D0%B1%D0%B0%D0%BA%D0%B0.mp4')
self.assertEqual(filepath_to_uri('upload\\чубака.mp4'.encode('utf-8')),
'upload/%D1%87%D1%83%D0%B1%D0%B0%D0%BA%D0%B0.mp4')
@unittest.skipIf(six.PY3, "tests a class not defining __str__ under Python 2")
def test_decorated_class_without_str(self):
with self.assertRaises(ValueError):
@python_2_unicode_compatible
class NoStr(object):
pass
| bsd-3-clause |
nhippenmeyer/django | django/utils/six.py | 408 | 30194 | """Utilities for writing code that runs on Python 2 and 3"""
# Copyright (c) 2010-2015 Benjamin Peterson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import
import functools
import itertools
import operator
import sys
import types
__author__ = "Benjamin Peterson <benjamin@python.org>"
__version__ = "1.9.0"
# Useful for very coarse version differentiation.
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
if PY3:
string_types = str,
integer_types = int,
class_types = type,
text_type = str
binary_type = bytes
MAXSIZE = sys.maxsize
else:
string_types = basestring,
integer_types = (int, long)
class_types = (type, types.ClassType)
text_type = unicode
binary_type = str
if sys.platform.startswith("java"):
# Jython always uses 32 bits.
MAXSIZE = int((1 << 31) - 1)
else:
# It's possible to have sizeof(long) != sizeof(Py_ssize_t).
class X(object):
def __len__(self):
return 1 << 31
try:
len(X())
except OverflowError:
# 32-bit
MAXSIZE = int((1 << 31) - 1)
else:
# 64-bit
MAXSIZE = int((1 << 63) - 1)
del X
def _add_doc(func, doc):
"""Add documentation to a function."""
func.__doc__ = doc
def _import_module(name):
"""Import module, returning the module after the last dot."""
__import__(name)
return sys.modules[name]
class _LazyDescr(object):
def __init__(self, name):
self.name = name
def __get__(self, obj, tp):
result = self._resolve()
setattr(obj, self.name, result) # Invokes __set__.
try:
# This is a bit ugly, but it avoids running this again by
# removing this descriptor.
delattr(obj.__class__, self.name)
except AttributeError:
pass
return result
class MovedModule(_LazyDescr):
def __init__(self, name, old, new=None):
super(MovedModule, self).__init__(name)
if PY3:
if new is None:
new = name
self.mod = new
else:
self.mod = old
def _resolve(self):
return _import_module(self.mod)
def __getattr__(self, attr):
_module = self._resolve()
value = getattr(_module, attr)
setattr(self, attr, value)
return value
class _LazyModule(types.ModuleType):
def __init__(self, name):
super(_LazyModule, self).__init__(name)
self.__doc__ = self.__class__.__doc__
def __dir__(self):
attrs = ["__doc__", "__name__"]
attrs += [attr.name for attr in self._moved_attributes]
return attrs
# Subclasses should override this
_moved_attributes = []
class MovedAttribute(_LazyDescr):
def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
super(MovedAttribute, self).__init__(name)
if PY3:
if new_mod is None:
new_mod = name
self.mod = new_mod
if new_attr is None:
if old_attr is None:
new_attr = name
else:
new_attr = old_attr
self.attr = new_attr
else:
self.mod = old_mod
if old_attr is None:
old_attr = name
self.attr = old_attr
def _resolve(self):
module = _import_module(self.mod)
return getattr(module, self.attr)
class _SixMetaPathImporter(object):
"""
A meta path importer to import six.moves and its submodules.
This class implements a PEP302 finder and loader. It should be compatible
with Python 2.5 and all existing versions of Python3
"""
def __init__(self, six_module_name):
self.name = six_module_name
self.known_modules = {}
def _add_module(self, mod, *fullnames):
for fullname in fullnames:
self.known_modules[self.name + "." + fullname] = mod
def _get_module(self, fullname):
return self.known_modules[self.name + "." + fullname]
def find_module(self, fullname, path=None):
if fullname in self.known_modules:
return self
return None
def __get_module(self, fullname):
try:
return self.known_modules[fullname]
except KeyError:
raise ImportError("This loader does not know module " + fullname)
def load_module(self, fullname):
try:
# in case of a reload
return sys.modules[fullname]
except KeyError:
pass
mod = self.__get_module(fullname)
if isinstance(mod, MovedModule):
mod = mod._resolve()
else:
mod.__loader__ = self
sys.modules[fullname] = mod
return mod
def is_package(self, fullname):
"""
Return true, if the named module is a package.
We need this method to get correct spec objects with
Python 3.4 (see PEP451)
"""
return hasattr(self.__get_module(fullname), "__path__")
def get_code(self, fullname):
"""Return None
Required, if is_package is implemented"""
self.__get_module(fullname) # eventually raises ImportError
return None
get_source = get_code # same as get_code
_importer = _SixMetaPathImporter(__name__)
class _MovedItems(_LazyModule):
"""Lazy loading of moved objects"""
__path__ = [] # mark as package
_moved_attributes = [
MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"),
MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
MovedAttribute("intern", "__builtin__", "sys"),
MovedAttribute("map", "itertools", "builtins", "imap", "map"),
MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("reload_module", "__builtin__", "imp", "reload"),
MovedAttribute("reduce", "__builtin__", "functools"),
MovedAttribute("shlex_quote", "pipes", "shlex", "quote"),
MovedAttribute("StringIO", "StringIO", "io"),
MovedAttribute("UserDict", "UserDict", "collections"),
MovedAttribute("UserList", "UserList", "collections"),
MovedAttribute("UserString", "UserString", "collections"),
MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"),
MovedModule("builtins", "__builtin__"),
MovedModule("configparser", "ConfigParser"),
MovedModule("copyreg", "copy_reg"),
MovedModule("dbm_gnu", "gdbm", "dbm.gnu"),
MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"),
MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
MovedModule("http_cookies", "Cookie", "http.cookies"),
MovedModule("html_entities", "htmlentitydefs", "html.entities"),
MovedModule("html_parser", "HTMLParser", "html.parser"),
MovedModule("http_client", "httplib", "http.client"),
MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"),
MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
MovedModule("cPickle", "cPickle", "pickle"),
MovedModule("queue", "Queue"),
MovedModule("reprlib", "repr"),
MovedModule("socketserver", "SocketServer"),
MovedModule("_thread", "thread", "_thread"),
MovedModule("tkinter", "Tkinter"),
MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"),
MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
MovedModule("tkinter_colorchooser", "tkColorChooser",
"tkinter.colorchooser"),
MovedModule("tkinter_commondialog", "tkCommonDialog",
"tkinter.commondialog"),
MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
MovedModule("tkinter_font", "tkFont", "tkinter.font"),
MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
"tkinter.simpledialog"),
MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"),
MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"),
MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"),
MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"),
MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"),
MovedModule("winreg", "_winreg"),
]
for attr in _moved_attributes:
setattr(_MovedItems, attr.name, attr)
if isinstance(attr, MovedModule):
_importer._add_module(attr, "moves." + attr.name)
del attr
_MovedItems._moved_attributes = _moved_attributes
moves = _MovedItems(__name__ + ".moves")
_importer._add_module(moves, "moves")
class Module_six_moves_urllib_parse(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_parse"""
_urllib_parse_moved_attributes = [
MovedAttribute("ParseResult", "urlparse", "urllib.parse"),
MovedAttribute("SplitResult", "urlparse", "urllib.parse"),
MovedAttribute("parse_qs", "urlparse", "urllib.parse"),
MovedAttribute("parse_qsl", "urlparse", "urllib.parse"),
MovedAttribute("urldefrag", "urlparse", "urllib.parse"),
MovedAttribute("urljoin", "urlparse", "urllib.parse"),
MovedAttribute("urlparse", "urlparse", "urllib.parse"),
MovedAttribute("urlsplit", "urlparse", "urllib.parse"),
MovedAttribute("urlunparse", "urlparse", "urllib.parse"),
MovedAttribute("urlunsplit", "urlparse", "urllib.parse"),
MovedAttribute("quote", "urllib", "urllib.parse"),
MovedAttribute("quote_plus", "urllib", "urllib.parse"),
MovedAttribute("unquote", "urllib", "urllib.parse"),
MovedAttribute("unquote_plus", "urllib", "urllib.parse"),
MovedAttribute("urlencode", "urllib", "urllib.parse"),
MovedAttribute("splitquery", "urllib", "urllib.parse"),
MovedAttribute("splittag", "urllib", "urllib.parse"),
MovedAttribute("splituser", "urllib", "urllib.parse"),
MovedAttribute("uses_fragment", "urlparse", "urllib.parse"),
MovedAttribute("uses_netloc", "urlparse", "urllib.parse"),
MovedAttribute("uses_params", "urlparse", "urllib.parse"),
MovedAttribute("uses_query", "urlparse", "urllib.parse"),
MovedAttribute("uses_relative", "urlparse", "urllib.parse"),
]
for attr in _urllib_parse_moved_attributes:
setattr(Module_six_moves_urllib_parse, attr.name, attr)
del attr
Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes
_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"),
"moves.urllib_parse", "moves.urllib.parse")
class Module_six_moves_urllib_error(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_error"""
_urllib_error_moved_attributes = [
MovedAttribute("URLError", "urllib2", "urllib.error"),
MovedAttribute("HTTPError", "urllib2", "urllib.error"),
MovedAttribute("ContentTooShortError", "urllib", "urllib.error"),
]
for attr in _urllib_error_moved_attributes:
setattr(Module_six_moves_urllib_error, attr.name, attr)
del attr
Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes
_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"),
"moves.urllib_error", "moves.urllib.error")
class Module_six_moves_urllib_request(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_request"""
_urllib_request_moved_attributes = [
MovedAttribute("urlopen", "urllib2", "urllib.request"),
MovedAttribute("install_opener", "urllib2", "urllib.request"),
MovedAttribute("build_opener", "urllib2", "urllib.request"),
MovedAttribute("pathname2url", "urllib", "urllib.request"),
MovedAttribute("url2pathname", "urllib", "urllib.request"),
MovedAttribute("getproxies", "urllib", "urllib.request"),
MovedAttribute("Request", "urllib2", "urllib.request"),
MovedAttribute("OpenerDirector", "urllib2", "urllib.request"),
MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"),
MovedAttribute("ProxyHandler", "urllib2", "urllib.request"),
MovedAttribute("BaseHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"),
MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"),
MovedAttribute("FileHandler", "urllib2", "urllib.request"),
MovedAttribute("FTPHandler", "urllib2", "urllib.request"),
MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"),
MovedAttribute("UnknownHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"),
MovedAttribute("urlretrieve", "urllib", "urllib.request"),
MovedAttribute("urlcleanup", "urllib", "urllib.request"),
MovedAttribute("URLopener", "urllib", "urllib.request"),
MovedAttribute("FancyURLopener", "urllib", "urllib.request"),
MovedAttribute("proxy_bypass", "urllib", "urllib.request"),
]
for attr in _urllib_request_moved_attributes:
setattr(Module_six_moves_urllib_request, attr.name, attr)
del attr
Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes
_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"),
"moves.urllib_request", "moves.urllib.request")
class Module_six_moves_urllib_response(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_response"""
_urllib_response_moved_attributes = [
MovedAttribute("addbase", "urllib", "urllib.response"),
MovedAttribute("addclosehook", "urllib", "urllib.response"),
MovedAttribute("addinfo", "urllib", "urllib.response"),
MovedAttribute("addinfourl", "urllib", "urllib.response"),
]
for attr in _urllib_response_moved_attributes:
setattr(Module_six_moves_urllib_response, attr.name, attr)
del attr
Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes
_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"),
"moves.urllib_response", "moves.urllib.response")
class Module_six_moves_urllib_robotparser(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_robotparser"""
_urllib_robotparser_moved_attributes = [
MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"),
]
for attr in _urllib_robotparser_moved_attributes:
setattr(Module_six_moves_urllib_robotparser, attr.name, attr)
del attr
Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes
_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"),
"moves.urllib_robotparser", "moves.urllib.robotparser")
class Module_six_moves_urllib(types.ModuleType):
"""Create a six.moves.urllib namespace that resembles the Python 3 namespace"""
__path__ = [] # mark as package
parse = _importer._get_module("moves.urllib_parse")
error = _importer._get_module("moves.urllib_error")
request = _importer._get_module("moves.urllib_request")
response = _importer._get_module("moves.urllib_response")
robotparser = _importer._get_module("moves.urllib_robotparser")
def __dir__(self):
return ['parse', 'error', 'request', 'response', 'robotparser']
_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"),
"moves.urllib")
def add_move(move):
"""Add an item to six.moves."""
setattr(_MovedItems, move.name, move)
def remove_move(name):
"""Remove item from six.moves."""
try:
delattr(_MovedItems, name)
except AttributeError:
try:
del moves.__dict__[name]
except KeyError:
raise AttributeError("no such move, %r" % (name,))
if PY3:
_meth_func = "__func__"
_meth_self = "__self__"
_func_closure = "__closure__"
_func_code = "__code__"
_func_defaults = "__defaults__"
_func_globals = "__globals__"
else:
_meth_func = "im_func"
_meth_self = "im_self"
_func_closure = "func_closure"
_func_code = "func_code"
_func_defaults = "func_defaults"
_func_globals = "func_globals"
try:
advance_iterator = next
except NameError:
def advance_iterator(it):
return it.next()
next = advance_iterator
try:
callable = callable
except NameError:
def callable(obj):
return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
if PY3:
def get_unbound_function(unbound):
return unbound
create_bound_method = types.MethodType
Iterator = object
else:
def get_unbound_function(unbound):
return unbound.im_func
def create_bound_method(func, obj):
return types.MethodType(func, obj, obj.__class__)
class Iterator(object):
def next(self):
return type(self).__next__(self)
callable = callable
_add_doc(get_unbound_function,
"""Get the function out of a possibly unbound function""")
get_method_function = operator.attrgetter(_meth_func)
get_method_self = operator.attrgetter(_meth_self)
get_function_closure = operator.attrgetter(_func_closure)
get_function_code = operator.attrgetter(_func_code)
get_function_defaults = operator.attrgetter(_func_defaults)
get_function_globals = operator.attrgetter(_func_globals)
if PY3:
def iterkeys(d, **kw):
return iter(d.keys(**kw))
def itervalues(d, **kw):
return iter(d.values(**kw))
def iteritems(d, **kw):
return iter(d.items(**kw))
def iterlists(d, **kw):
return iter(d.lists(**kw))
viewkeys = operator.methodcaller("keys")
viewvalues = operator.methodcaller("values")
viewitems = operator.methodcaller("items")
else:
def iterkeys(d, **kw):
return iter(d.iterkeys(**kw))
def itervalues(d, **kw):
return iter(d.itervalues(**kw))
def iteritems(d, **kw):
return iter(d.iteritems(**kw))
def iterlists(d, **kw):
return iter(d.iterlists(**kw))
viewkeys = operator.methodcaller("viewkeys")
viewvalues = operator.methodcaller("viewvalues")
viewitems = operator.methodcaller("viewitems")
_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.")
_add_doc(itervalues, "Return an iterator over the values of a dictionary.")
_add_doc(iteritems,
"Return an iterator over the (key, value) pairs of a dictionary.")
_add_doc(iterlists,
"Return an iterator over the (key, [values]) pairs of a dictionary.")
if PY3:
def b(s):
return s.encode("latin-1")
def u(s):
return s
unichr = chr
if sys.version_info[1] <= 1:
def int2byte(i):
return bytes((i,))
else:
# This is about 2x faster than the implementation above on 3.2+
int2byte = operator.methodcaller("to_bytes", 1, "big")
byte2int = operator.itemgetter(0)
indexbytes = operator.getitem
iterbytes = iter
import io
StringIO = io.StringIO
BytesIO = io.BytesIO
_assertCountEqual = "assertCountEqual"
_assertRaisesRegex = "assertRaisesRegex"
_assertRegex = "assertRegex"
else:
def b(s):
return s
# Workaround for standalone backslash
def u(s):
return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape")
unichr = unichr
int2byte = chr
def byte2int(bs):
return ord(bs[0])
def indexbytes(buf, i):
return ord(buf[i])
iterbytes = functools.partial(itertools.imap, ord)
import StringIO
StringIO = BytesIO = StringIO.StringIO
_assertCountEqual = "assertItemsEqual"
_assertRaisesRegex = "assertRaisesRegexp"
_assertRegex = "assertRegexpMatches"
_add_doc(b, """Byte literal""")
_add_doc(u, """Text literal""")
def assertCountEqual(self, *args, **kwargs):
return getattr(self, _assertCountEqual)(*args, **kwargs)
def assertRaisesRegex(self, *args, **kwargs):
return getattr(self, _assertRaisesRegex)(*args, **kwargs)
def assertRegex(self, *args, **kwargs):
return getattr(self, _assertRegex)(*args, **kwargs)
if PY3:
exec_ = getattr(moves.builtins, "exec")
def reraise(tp, value, tb=None):
if value is None:
value = tp()
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
else:
def exec_(_code_, _globs_=None, _locs_=None):
"""Execute code in a namespace."""
if _globs_ is None:
frame = sys._getframe(1)
_globs_ = frame.f_globals
if _locs_ is None:
_locs_ = frame.f_locals
del frame
elif _locs_ is None:
_locs_ = _globs_
exec("""exec _code_ in _globs_, _locs_""")
exec_("""def reraise(tp, value, tb=None):
raise tp, value, tb
""")
if sys.version_info[:2] == (3, 2):
exec_("""def raise_from(value, from_value):
if from_value is None:
raise value
raise value from from_value
""")
elif sys.version_info[:2] > (3, 2):
exec_("""def raise_from(value, from_value):
raise value from from_value
""")
else:
def raise_from(value, from_value):
raise value
print_ = getattr(moves.builtins, "print", None)
if print_ is None:
def print_(*args, **kwargs):
"""The new-style print function for Python 2.4 and 2.5."""
fp = kwargs.pop("file", sys.stdout)
if fp is None:
return
def write(data):
if not isinstance(data, basestring):
data = str(data)
# If the file has an encoding, encode unicode with it.
if (isinstance(fp, file) and
isinstance(data, unicode) and
fp.encoding is not None):
errors = getattr(fp, "errors", None)
if errors is None:
errors = "strict"
data = data.encode(fp.encoding, errors)
fp.write(data)
want_unicode = False
sep = kwargs.pop("sep", None)
if sep is not None:
if isinstance(sep, unicode):
want_unicode = True
elif not isinstance(sep, str):
raise TypeError("sep must be None or a string")
end = kwargs.pop("end", None)
if end is not None:
if isinstance(end, unicode):
want_unicode = True
elif not isinstance(end, str):
raise TypeError("end must be None or a string")
if kwargs:
raise TypeError("invalid keyword arguments to print()")
if not want_unicode:
for arg in args:
if isinstance(arg, unicode):
want_unicode = True
break
if want_unicode:
newline = unicode("\n")
space = unicode(" ")
else:
newline = "\n"
space = " "
if sep is None:
sep = space
if end is None:
end = newline
for i, arg in enumerate(args):
if i:
write(sep)
write(arg)
write(end)
if sys.version_info[:2] < (3, 3):
_print = print_
def print_(*args, **kwargs):
fp = kwargs.get("file", sys.stdout)
flush = kwargs.pop("flush", False)
_print(*args, **kwargs)
if flush and fp is not None:
fp.flush()
_add_doc(reraise, """Reraise an exception.""")
if sys.version_info[0:2] < (3, 4):
def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS,
updated=functools.WRAPPER_UPDATES):
def wrapper(f):
f = functools.wraps(wrapped, assigned, updated)(f)
f.__wrapped__ = wrapped
return f
return wrapper
else:
wraps = functools.wraps
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
# This requires a bit of explanation: the basic idea is to make a dummy
# metaclass for one level of class instantiation that replaces itself with
# the actual metaclass.
class metaclass(meta):
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
return type.__new__(metaclass, 'temporary_class', (), {})
def add_metaclass(metaclass):
"""Class decorator for creating a class with a metaclass."""
def wrapper(cls):
orig_vars = cls.__dict__.copy()
slots = orig_vars.get('__slots__')
if slots is not None:
if isinstance(slots, str):
slots = [slots]
for slots_var in slots:
orig_vars.pop(slots_var)
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
return metaclass(cls.__name__, cls.__bases__, orig_vars)
return wrapper
def python_2_unicode_compatible(klass):
"""
A decorator that defines __unicode__ and __str__ methods under Python 2.
Under Python 3 it does nothing.
To support Python 2 and 3 with a single code base, define a __str__ method
returning text and apply this decorator to the class.
"""
if PY2:
if '__str__' not in klass.__dict__:
raise ValueError("@python_2_unicode_compatible cannot be applied "
"to %s because it doesn't define __str__()." %
klass.__name__)
klass.__unicode__ = klass.__str__
klass.__str__ = lambda self: self.__unicode__().encode('utf-8')
return klass
# Complete the moves implementation.
# This code is at the end of this module to speed up module loading.
# Turn this module into a package.
__path__ = [] # required for PEP 302 and PEP 451
__package__ = __name__ # see PEP 366 @ReservedAssignment
if globals().get("__spec__") is not None:
__spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable
# Remove other six meta path importers, since they cause problems. This can
# happen if six is removed from sys.modules and then reloaded. (Setuptools does
# this for some reason.)
if sys.meta_path:
for i, importer in enumerate(sys.meta_path):
# Here's some real nastiness: Another "instance" of the six module might
# be floating around. Therefore, we can't use isinstance() to check for
# the six meta path importer, since the other six instance will have
# inserted an importer with different class.
if (type(importer).__name__ == "_SixMetaPathImporter" and
importer.name == __name__):
del sys.meta_path[i]
break
del i, importer
# Finally, add the importer to the meta path import hook.
sys.meta_path.append(_importer)
### Additional customizations for Django ###
if PY3:
memoryview = memoryview
buffer_types = (bytes, bytearray, memoryview)
else:
# memoryview and buffer are not strictly equivalent, but should be fine for
# django core usage (mainly BinaryField). However, Jython doesn't support
# buffer (see http://bugs.jython.org/issue1521), so we have to be careful.
if sys.platform.startswith('java'):
memoryview = memoryview
else:
memoryview = buffer
buffer_types = (bytearray, memoryview)
| bsd-3-clause |
mjfarmer/scada_py | env/lib/python2.7/site-packages/twisted/conch/test/test_agent.py | 2 | 13008 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.conch.ssh.agent}.
"""
import struct
from twisted.trial import unittest
from twisted.test import iosim
try:
import cryptography
except ImportError:
cryptography = None
try:
import pyasn1
except ImportError:
pyasn1 = None
if cryptography and pyasn1:
from twisted.conch.ssh import keys, agent
else:
keys = agent = None
from twisted.conch.test import keydata
from twisted.conch.error import ConchError, MissingKeyStoreError
class StubFactory(object):
"""
Mock factory that provides the keys attribute required by the
SSHAgentServerProtocol
"""
def __init__(self):
self.keys = {}
class AgentTestBase(unittest.TestCase):
"""
Tests for SSHAgentServer/Client.
"""
if iosim is None:
skip = "iosim requires SSL, but SSL is not available"
elif agent is None or keys is None:
skip = "Cannot run without cryptography or PyASN1"
def setUp(self):
# wire up our client <-> server
self.client, self.server, self.pump = iosim.connectedServerAndClient(
agent.SSHAgentServer, agent.SSHAgentClient)
# the server's end of the protocol is stateful and we store it on the
# factory, for which we only need a mock
self.server.factory = StubFactory()
# pub/priv keys of each kind
self.rsaPrivate = keys.Key.fromString(keydata.privateRSA_openssh)
self.dsaPrivate = keys.Key.fromString(keydata.privateDSA_openssh)
self.rsaPublic = keys.Key.fromString(keydata.publicRSA_openssh)
self.dsaPublic = keys.Key.fromString(keydata.publicDSA_openssh)
class ServerProtocolContractWithFactoryTests(AgentTestBase):
"""
The server protocol is stateful and so uses its factory to track state
across requests. This test asserts that the protocol raises if its factory
doesn't provide the necessary storage for that state.
"""
def test_factorySuppliesKeyStorageForServerProtocol(self):
# need a message to send into the server
msg = struct.pack('!LB',1, agent.AGENTC_REQUEST_IDENTITIES)
del self.server.factory.__dict__['keys']
self.assertRaises(MissingKeyStoreError,
self.server.dataReceived, msg)
class UnimplementedVersionOneServerTests(AgentTestBase):
"""
Tests for methods with no-op implementations on the server. We need these
for clients, such as openssh, that try v1 methods before going to v2.
Because the client doesn't expose these operations with nice method names,
we invoke sendRequest directly with an op code.
"""
def test_agentc_REQUEST_RSA_IDENTITIES(self):
"""
assert that we get the correct op code for an RSA identities request
"""
d = self.client.sendRequest(agent.AGENTC_REQUEST_RSA_IDENTITIES, '')
self.pump.flush()
def _cb(packet):
self.assertEqual(
agent.AGENT_RSA_IDENTITIES_ANSWER, ord(packet[0]))
return d.addCallback(_cb)
def test_agentc_REMOVE_RSA_IDENTITY(self):
"""
assert that we get the correct op code for an RSA remove identity request
"""
d = self.client.sendRequest(agent.AGENTC_REMOVE_RSA_IDENTITY, '')
self.pump.flush()
return d.addCallback(self.assertEqual, '')
def test_agentc_REMOVE_ALL_RSA_IDENTITIES(self):
"""
assert that we get the correct op code for an RSA remove all identities
request.
"""
d = self.client.sendRequest(agent.AGENTC_REMOVE_ALL_RSA_IDENTITIES, '')
self.pump.flush()
return d.addCallback(self.assertEqual, '')
if agent is not None:
class CorruptServer(agent.SSHAgentServer):
"""
A misbehaving server that returns bogus response op codes so that we can
verify that our callbacks that deal with these op codes handle such
miscreants.
"""
def agentc_REQUEST_IDENTITIES(self, data):
self.sendResponse(254, '')
def agentc_SIGN_REQUEST(self, data):
self.sendResponse(254, '')
class ClientWithBrokenServerTests(AgentTestBase):
"""
verify error handling code in the client using a misbehaving server
"""
def setUp(self):
AgentTestBase.setUp(self)
self.client, self.server, self.pump = iosim.connectedServerAndClient(
CorruptServer, agent.SSHAgentClient)
# the server's end of the protocol is stateful and we store it on the
# factory, for which we only need a mock
self.server.factory = StubFactory()
def test_signDataCallbackErrorHandling(self):
"""
Assert that L{SSHAgentClient.signData} raises a ConchError
if we get a response from the server whose opcode doesn't match
the protocol for data signing requests.
"""
d = self.client.signData(self.rsaPublic.blob(), "John Hancock")
self.pump.flush()
return self.assertFailure(d, ConchError)
def test_requestIdentitiesCallbackErrorHandling(self):
"""
Assert that L{SSHAgentClient.requestIdentities} raises a ConchError
if we get a response from the server whose opcode doesn't match
the protocol for identity requests.
"""
d = self.client.requestIdentities()
self.pump.flush()
return self.assertFailure(d, ConchError)
class AgentKeyAdditionTests(AgentTestBase):
"""
Test adding different flavors of keys to an agent.
"""
def test_addRSAIdentityNoComment(self):
"""
L{SSHAgentClient.addIdentity} adds the private key it is called
with to the SSH agent server to which it is connected, associating
it with the comment it is called with.
This test asserts that omitting the comment produces an
empty string for the comment on the server.
"""
d = self.client.addIdentity(self.rsaPrivate.privateBlob())
self.pump.flush()
def _check(ignored):
serverKey = self.server.factory.keys[self.rsaPrivate.blob()]
self.assertEqual(self.rsaPrivate, serverKey[0])
self.assertEqual('', serverKey[1])
return d.addCallback(_check)
def test_addDSAIdentityNoComment(self):
"""
L{SSHAgentClient.addIdentity} adds the private key it is called
with to the SSH agent server to which it is connected, associating
it with the comment it is called with.
This test asserts that omitting the comment produces an
empty string for the comment on the server.
"""
d = self.client.addIdentity(self.dsaPrivate.privateBlob())
self.pump.flush()
def _check(ignored):
serverKey = self.server.factory.keys[self.dsaPrivate.blob()]
self.assertEqual(self.dsaPrivate, serverKey[0])
self.assertEqual('', serverKey[1])
return d.addCallback(_check)
def test_addRSAIdentityWithComment(self):
"""
L{SSHAgentClient.addIdentity} adds the private key it is called
with to the SSH agent server to which it is connected, associating
it with the comment it is called with.
This test asserts that the server receives/stores the comment
as sent by the client.
"""
d = self.client.addIdentity(
self.rsaPrivate.privateBlob(), comment='My special key')
self.pump.flush()
def _check(ignored):
serverKey = self.server.factory.keys[self.rsaPrivate.blob()]
self.assertEqual(self.rsaPrivate, serverKey[0])
self.assertEqual('My special key', serverKey[1])
return d.addCallback(_check)
def test_addDSAIdentityWithComment(self):
"""
L{SSHAgentClient.addIdentity} adds the private key it is called
with to the SSH agent server to which it is connected, associating
it with the comment it is called with.
This test asserts that the server receives/stores the comment
as sent by the client.
"""
d = self.client.addIdentity(
self.dsaPrivate.privateBlob(), comment='My special key')
self.pump.flush()
def _check(ignored):
serverKey = self.server.factory.keys[self.dsaPrivate.blob()]
self.assertEqual(self.dsaPrivate, serverKey[0])
self.assertEqual('My special key', serverKey[1])
return d.addCallback(_check)
class AgentClientFailureTests(AgentTestBase):
def test_agentFailure(self):
"""
verify that the client raises ConchError on AGENT_FAILURE
"""
d = self.client.sendRequest(254, '')
self.pump.flush()
return self.assertFailure(d, ConchError)
class AgentIdentityRequestsTests(AgentTestBase):
"""
Test operations against a server with identities already loaded.
"""
def setUp(self):
AgentTestBase.setUp(self)
self.server.factory.keys[self.dsaPrivate.blob()] = (
self.dsaPrivate, 'a comment')
self.server.factory.keys[self.rsaPrivate.blob()] = (
self.rsaPrivate, 'another comment')
def test_signDataRSA(self):
"""
Sign data with an RSA private key and then verify it with the public
key.
"""
d = self.client.signData(self.rsaPublic.blob(), "John Hancock")
self.pump.flush()
signature = self.successResultOf(d)
expected = self.rsaPrivate.sign("John Hancock")
self.assertEqual(expected, signature)
self.assertTrue(self.rsaPublic.verify(signature, "John Hancock"))
def test_signDataDSA(self):
"""
Sign data with a DSA private key and then verify it with the public
key.
"""
d = self.client.signData(self.dsaPublic.blob(), "John Hancock")
self.pump.flush()
def _check(sig):
# Cannot do this b/c DSA uses random numbers when signing
# expected = self.dsaPrivate.sign("John Hancock")
# self.assertEqual(expected, sig)
self.assertTrue(self.dsaPublic.verify(sig, "John Hancock"))
return d.addCallback(_check)
def test_signDataRSAErrbackOnUnknownBlob(self):
"""
Assert that we get an errback if we try to sign data using a key that
wasn't added.
"""
del self.server.factory.keys[self.rsaPublic.blob()]
d = self.client.signData(self.rsaPublic.blob(), "John Hancock")
self.pump.flush()
return self.assertFailure(d, ConchError)
def test_requestIdentities(self):
"""
Assert that we get all of the keys/comments that we add when we issue a
request for all identities.
"""
d = self.client.requestIdentities()
self.pump.flush()
def _check(keyt):
expected = {}
expected[self.dsaPublic.blob()] = 'a comment'
expected[self.rsaPublic.blob()] = 'another comment'
received = {}
for k in keyt:
received[keys.Key.fromString(k[0], type='blob').blob()] = k[1]
self.assertEqual(expected, received)
return d.addCallback(_check)
class AgentKeyRemovalTests(AgentTestBase):
"""
Test support for removing keys in a remote server.
"""
def setUp(self):
AgentTestBase.setUp(self)
self.server.factory.keys[self.dsaPrivate.blob()] = (
self.dsaPrivate, 'a comment')
self.server.factory.keys[self.rsaPrivate.blob()] = (
self.rsaPrivate, 'another comment')
def test_removeRSAIdentity(self):
"""
Assert that we can remove an RSA identity.
"""
# only need public key for this
d = self.client.removeIdentity(self.rsaPrivate.blob())
self.pump.flush()
def _check(ignored):
self.assertEqual(1, len(self.server.factory.keys))
self.assertIn(self.dsaPrivate.blob(), self.server.factory.keys)
self.assertNotIn(self.rsaPrivate.blob(), self.server.factory.keys)
return d.addCallback(_check)
def test_removeDSAIdentity(self):
"""
Assert that we can remove a DSA identity.
"""
# only need public key for this
d = self.client.removeIdentity(self.dsaPrivate.blob())
self.pump.flush()
def _check(ignored):
self.assertEqual(1, len(self.server.factory.keys))
self.assertIn(self.rsaPrivate.blob(), self.server.factory.keys)
return d.addCallback(_check)
def test_removeAllIdentities(self):
"""
Assert that we can remove all identities.
"""
d = self.client.removeAllIdentities()
self.pump.flush()
def _check(ignored):
self.assertEqual(0, len(self.server.factory.keys))
return d.addCallback(_check)
| gpl-3.0 |
iut-ibk/DynaMind-ToolBox | DynaMind-Performance-Assessment/3rdparty/CD3Waterbalance/Module/helped to develop other modules/Groundwater.py | 2 | 3090 | # -*- coding: utf-8 -*-
"""
Created on Thu Oct 02 08:41:08 2014
@author: Acer
"""
import sys
import pycd3
class NodeFactory(pycd3.INodeFactory):
def __init__(self, node):
pycd3.INodeFactory.__init__(self)
self.node = node
print "NodeFactory.__init__"
def getNodeName(self):
print "NodeFactory.getName"
return self.node.__name__
def createNode(self):
print "NodeFactory.createNode"
n = self.node()
n.__disown__()
print "NodeFactory.disowned"
return n
def getSource(self):
print "NodeFactory.getSource"
return "Practice.py"
class Groundwater(pycd3.Node):
def __init__(self):
pycd3.Node.__init__(self)
self.groundw_in = pycd3.Flow()
#self.groundw_evapo = pycd3.Flow()
self.outdoor_use = pycd3.Flow()
#self.pF_Value = pycd3.Double()
self.check_storage = pycd3.Flow()
#dir (self.inf)
print "init node"
self.addInPort("groundw_in", self.groundw_in)
#self.addInPort("groundw_evapo", self.groundw_evapo)
#self.addOutPort("pF-Value", self.pF_Value)
self.addOutPort("outdoor_use", self.outdoor_use)
self.addOutPort("check_storage", self.check_storage)
self.area_property = pycd3.Double(1.0)
self.addParameter("area_property", self.area_property)
self.evapotr = pycd3.Double(0.00002113)
self.addParameter("evapotr [m/(6 min)]", self.evapotr)
#self.water_content = pycd3.Double(0.4)
#self.addParameter("initial_w_content", self.water_content)
#self.soil_type = pycd3.String()
#self.addParameter("soil_type", self.soil_type)
self.current_Volume= 0.0
#total_volume = 15.5*10**6*10
#self.addOutPort("gw", self.gw)
#self.addInPort("in", self.inf)
def init(self, start, stop, dt):
print start
print stop
print dt
return True
def f(self, current, dt):
self.current_Volume += self.groundw_in[0]-self.evapotr
if self.current_Volume < 0.0:
self.outdoor_use[0] = self.current_Volume*self.area_property
self.current_Volume = 0.0
self.check_storage[0] = self.current_Volume
elif self.current_Volume < 0.1:
self.outdoor_use[0] = 0.0
self.check_storage[0] = self.current_Volume
else:
self.current_Volume = 0.1
self.outdoor_use[0] = 0.0
self.check_storage[0] = self.current_Volume
return dt
def getClassName(self):
#print "getClassName"
return "Groundwater"
def register(nr):
for c in pycd3.Node.__subclasses__():
nf = NodeFactory(c)
nf.__disown__()
nr.addNodeFactory(nf)
# def test():
# nr = pycd3.NodeRegistry()
# nf = NodeFactory(Household).__disown__()
# nr.addNodeFactory(nf)
# node = nr.createNode("Household")
#test()
| gpl-2.0 |
Abhinav117/pymtl | proc/parc_fl/ParcProcFL_test.py | 4 | 35139 | #=========================================================================
# ParcProcFL_test
#=========================================================================
from __future__ import print_function
import pytest
import pisa
import struct
from pymtl import *
from pclib.test import TestSource, TestSink
from pclib.ifcs import MemMsg
from ParcProcFL import ParcProcFL
from GenericXcelFL import GenericXcelFL
from pclib.test.TestMemoryFuture import TestMemory
from pisa.pisa_inst_test_utils import asm_test
class TestHarness (Model):
#-----------------------------------------------------------------------
# constructor
#-----------------------------------------------------------------------
def __init__( s ):
# Instantiate models
s.src = TestSource ( 32, [], 0 )
s.sink = TestSink ( 32, [], 0 )
s.proc = ParcProcFL ()
s.mem = TestMemory ( MemMsg(32,32), 3 )
s.xcel = GenericXcelFL ()
#-----------------------------------------------------------------------
# elaborate
#-----------------------------------------------------------------------
def elaborate_logic( s ):
# Processor <-> Proc/Mngr
s.connect( s.proc.mngr2proc, s.src.out )
s.connect( s.proc.proc2mngr, s.sink.in_ )
# Processor <-> Memory
s.connect( s.proc.imemreq, s.mem.reqs[0] )
s.connect( s.proc.imemresp, s.mem.resps[0] )
s.connect( s.proc.dmemreq, s.mem.reqs[1] )
s.connect( s.proc.dmemresp, s.mem.resps[1] )
# Processor <-> Accelerator
s.connect( s.proc.xcelreq, s.xcel.xcelreq )
s.connect( s.proc.xcelresp, s.xcel.xcelresp )
# Accelerator <-> Memory
# s.connect( s.mvmult.memreq, s.mem.reqs[2] )
# s.connect( s.mvmult.memresp, s.mem.resps[2] )
#-----------------------------------------------------------------------
# load
#-----------------------------------------------------------------------
def load( self, mem_image ):
# Iterate over the sections
sections = mem_image.get_sections()
for section in sections:
# For .mngr2proc sections, copy section into mngr2proc src
if section.name == ".mngr2proc":
for i in xrange(0,len(section.data),4):
bits = struct.unpack_from("<I",buffer(section.data,i,4))[0]
self.src.src.msgs.append( Bits(32,bits) )
# For .proc2mngr sections, copy section into proc2mngr_ref src
elif section.name == ".proc2mngr":
for i in xrange(0,len(section.data),4):
bits = struct.unpack_from("<I",buffer(section.data,i,4))[0]
self.sink.sink.msgs.append( Bits(32,bits) )
# For all other sections, simply copy them into the memory
else:
start_addr = section.addr
stop_addr = section.addr + len(section.data)
self.mem.mem[start_addr:stop_addr] = section.data
#-----------------------------------------------------------------------
# cleanup
#-----------------------------------------------------------------------
def cleanup( s ):
del s.mem.mem[:]
#-----------------------------------------------------------------------
# done
#-----------------------------------------------------------------------
def done( s ):
return s.src.done and s.sink.done
#-----------------------------------------------------------------------
# line_trace
#-----------------------------------------------------------------------
def line_trace( s ):
return s.src.line_trace() + " > " + \
s.proc.line_trace() + "|" + \
s.xcel.line_trace() + " " + \
s.mem.line_trace() + " > " + \
s.sink.line_trace()
#-------------------------------------------------------------------------
# run_test
#-------------------------------------------------------------------------
def run_test( gen_test ):
# Instantiate and elaborate the model
model = TestHarness()
model.elaborate()
# Assemble the test program
mem_image = pisa.pisa_encoding.assemble( gen_test() )
# Load the program into the model
model.load( mem_image )
# Create a simulator using the simulation tool
sim = SimulationTool( model )
# Run the simulation
print()
sim.reset()
while not model.done():
sim.print_line_trace()
sim.cycle()
# Add a couple extra ticks so that the VCD dump is nicer
sim.cycle()
sim.cycle()
sim.cycle()
model.cleanup()
#-------------------------------------------------------------------------
# mngr
#-------------------------------------------------------------------------
import pisa.pisa_inst_mngr_test
@pytest.mark.parametrize( "name,test", [
asm_test( pisa.pisa_inst_mngr_test.gen_basic_test ),
asm_test( pisa.pisa_inst_mngr_test.gen_bypass_test ),
asm_test( pisa.pisa_inst_mngr_test.gen_value_test ),
])
def test_mngr( name, test ):
run_test( test )
#-------------------------------------------------------------------------
# addu
#-------------------------------------------------------------------------
import pisa.pisa_inst_addu_test
@pytest.mark.parametrize( "name,test", [
asm_test( pisa.pisa_inst_addu_test.gen_basic_test ),
asm_test( pisa.pisa_inst_addu_test.gen_dest_byp_test ),
asm_test( pisa.pisa_inst_addu_test.gen_src0_byp_test ),
asm_test( pisa.pisa_inst_addu_test.gen_src1_byp_test ),
asm_test( pisa.pisa_inst_addu_test.gen_srcs_byp_test ),
asm_test( pisa.pisa_inst_addu_test.gen_srcs_dest_test ),
asm_test( pisa.pisa_inst_addu_test.gen_value_test ),
asm_test( pisa.pisa_inst_addu_test.gen_random_test ),
])
def test_addu( name, test ):
run_test( test )
#-------------------------------------------------------------------------
# subu
#-------------------------------------------------------------------------
import pisa.pisa_inst_subu_test
@pytest.mark.parametrize( "name,test", [
asm_test( pisa.pisa_inst_subu_test.gen_basic_test ),
asm_test( pisa.pisa_inst_subu_test.gen_dest_byp_test ),
asm_test( pisa.pisa_inst_subu_test.gen_src0_byp_test ),
asm_test( pisa.pisa_inst_subu_test.gen_src1_byp_test ),
asm_test( pisa.pisa_inst_subu_test.gen_srcs_byp_test ),
asm_test( pisa.pisa_inst_subu_test.gen_srcs_dest_test ),
asm_test( pisa.pisa_inst_subu_test.gen_value_test ),
asm_test( pisa.pisa_inst_subu_test.gen_random_test ),
])
def test_subu( name, test ):
run_test( test )
#-------------------------------------------------------------------------
# and
#-------------------------------------------------------------------------
import pisa.pisa_inst_and_test
@pytest.mark.parametrize( "name,test", [
asm_test( pisa.pisa_inst_and_test.gen_basic_test ),
asm_test( pisa.pisa_inst_and_test.gen_dest_byp_test ),
asm_test( pisa.pisa_inst_and_test.gen_src0_byp_test ),
asm_test( pisa.pisa_inst_and_test.gen_src1_byp_test ),
asm_test( pisa.pisa_inst_and_test.gen_srcs_byp_test ),
asm_test( pisa.pisa_inst_and_test.gen_srcs_dest_test ),
asm_test( pisa.pisa_inst_and_test.gen_value_test ),
asm_test( pisa.pisa_inst_and_test.gen_random_test ),
])
def test_and( name, test ):
run_test( test )
#-------------------------------------------------------------------------
# or
#-------------------------------------------------------------------------
import pisa.pisa_inst_or_test
@pytest.mark.parametrize( "name,test", [
asm_test( pisa.pisa_inst_or_test.gen_basic_test ),
asm_test( pisa.pisa_inst_or_test.gen_dest_byp_test ),
asm_test( pisa.pisa_inst_or_test.gen_src0_byp_test ),
asm_test( pisa.pisa_inst_or_test.gen_src1_byp_test ),
asm_test( pisa.pisa_inst_or_test.gen_srcs_byp_test ),
asm_test( pisa.pisa_inst_or_test.gen_srcs_dest_test ),
asm_test( pisa.pisa_inst_or_test.gen_value_test ),
asm_test( pisa.pisa_inst_or_test.gen_random_test ),
])
def test_or( name, test ):
run_test( test )
#-------------------------------------------------------------------------
# xor
#-------------------------------------------------------------------------
import pisa.pisa_inst_xor_test
@pytest.mark.parametrize( "name,test", [
asm_test( pisa.pisa_inst_xor_test.gen_basic_test ),
asm_test( pisa.pisa_inst_xor_test.gen_dest_byp_test ),
asm_test( pisa.pisa_inst_xor_test.gen_src0_byp_test ),
asm_test( pisa.pisa_inst_xor_test.gen_src1_byp_test ),
asm_test( pisa.pisa_inst_xor_test.gen_srcs_byp_test ),
asm_test( pisa.pisa_inst_xor_test.gen_srcs_dest_test ),
asm_test( pisa.pisa_inst_xor_test.gen_value_test ),
asm_test( pisa.pisa_inst_xor_test.gen_random_test ),
])
def test_xor( name, test ):
run_test( test )
#-------------------------------------------------------------------------
# nor
#-------------------------------------------------------------------------
import pisa.pisa_inst_nor_test
@pytest.mark.parametrize( "name,test", [
asm_test( pisa.pisa_inst_nor_test.gen_basic_test ),
asm_test( pisa.pisa_inst_nor_test.gen_dest_byp_test ),
asm_test( pisa.pisa_inst_nor_test.gen_src0_byp_test ),
asm_test( pisa.pisa_inst_nor_test.gen_src1_byp_test ),
asm_test( pisa.pisa_inst_nor_test.gen_srcs_byp_test ),
asm_test( pisa.pisa_inst_nor_test.gen_srcs_dest_test ),
asm_test( pisa.pisa_inst_nor_test.gen_value_test ),
asm_test( pisa.pisa_inst_nor_test.gen_random_test ),
])
def test_nor( name, test ):
run_test( test )
#-------------------------------------------------------------------------
# slt
#-------------------------------------------------------------------------
import pisa.pisa_inst_slt_test
@pytest.mark.parametrize( "name,test", [
asm_test( pisa.pisa_inst_slt_test.gen_basic_test ),
asm_test( pisa.pisa_inst_slt_test.gen_dest_byp_test ),
asm_test( pisa.pisa_inst_slt_test.gen_src0_byp_test ),
asm_test( pisa.pisa_inst_slt_test.gen_src1_byp_test ),
asm_test( pisa.pisa_inst_slt_test.gen_srcs_byp_test ),
asm_test( pisa.pisa_inst_slt_test.gen_srcs_dest_test ),
asm_test( pisa.pisa_inst_slt_test.gen_value_test ),
asm_test( pisa.pisa_inst_slt_test.gen_random_test ),
])
def test_slt( name, test ):
run_test( test )
#-------------------------------------------------------------------------
# sltu
#-------------------------------------------------------------------------
import pisa.pisa_inst_sltu_test
@pytest.mark.parametrize( "name,test", [
asm_test( pisa.pisa_inst_sltu_test.gen_basic_test ),
asm_test( pisa.pisa_inst_sltu_test.gen_dest_byp_test ),
asm_test( pisa.pisa_inst_sltu_test.gen_src0_byp_test ),
asm_test( pisa.pisa_inst_sltu_test.gen_src1_byp_test ),
asm_test( pisa.pisa_inst_sltu_test.gen_srcs_byp_test ),
asm_test( pisa.pisa_inst_sltu_test.gen_srcs_dest_test ),
asm_test( pisa.pisa_inst_sltu_test.gen_value_test ),
asm_test( pisa.pisa_inst_sltu_test.gen_random_test ),
])
def test_sltu( name, test ):
run_test( test )
#-------------------------------------------------------------------------
# addiu
#-------------------------------------------------------------------------
import pisa.pisa_inst_addiu_test
@pytest.mark.parametrize( "name,test", [
asm_test( pisa.pisa_inst_addiu_test.gen_basic_test ),
asm_test( pisa.pisa_inst_addiu_test.gen_dest_byp_test ),
asm_test( pisa.pisa_inst_addiu_test.gen_src_byp_test ),
asm_test( pisa.pisa_inst_addiu_test.gen_srcs_dest_test ),
asm_test( pisa.pisa_inst_addiu_test.gen_value_test ),
asm_test( pisa.pisa_inst_addiu_test.gen_random_test ),
])
def test_addiu( name, test ):
run_test( test )
#-------------------------------------------------------------------------
# andi
#-------------------------------------------------------------------------
import pisa.pisa_inst_andi_test
@pytest.mark.parametrize( "name,test", [
asm_test( pisa.pisa_inst_andi_test.gen_basic_test ),
asm_test( pisa.pisa_inst_andi_test.gen_dest_byp_test ),
asm_test( pisa.pisa_inst_andi_test.gen_src_byp_test ),
asm_test( pisa.pisa_inst_andi_test.gen_srcs_dest_test ),
asm_test( pisa.pisa_inst_andi_test.gen_value_test ),
asm_test( pisa.pisa_inst_andi_test.gen_random_test ),
])
def test_andi( name, test ):
run_test( test )
#-------------------------------------------------------------------------
# ori
#-------------------------------------------------------------------------
import pisa.pisa_inst_ori_test
@pytest.mark.parametrize( "name,test", [
asm_test( pisa.pisa_inst_ori_test.gen_basic_test ),
asm_test( pisa.pisa_inst_ori_test.gen_dest_byp_test ),
asm_test( pisa.pisa_inst_ori_test.gen_src_byp_test ),
asm_test( pisa.pisa_inst_ori_test.gen_srcs_dest_test ),
asm_test( pisa.pisa_inst_ori_test.gen_value_test ),
asm_test( pisa.pisa_inst_ori_test.gen_random_test ),
])
def test_ori( name, test ):
run_test( test )
#-------------------------------------------------------------------------
# xori
#-------------------------------------------------------------------------
import pisa.pisa_inst_xori_test
@pytest.mark.parametrize( "name,test", [
asm_test( pisa.pisa_inst_xori_test.gen_basic_test ),
asm_test( pisa.pisa_inst_xori_test.gen_dest_byp_test ),
asm_test( pisa.pisa_inst_xori_test.gen_src_byp_test ),
asm_test( pisa.pisa_inst_xori_test.gen_srcs_dest_test ),
asm_test( pisa.pisa_inst_xori_test.gen_value_test ),
asm_test( pisa.pisa_inst_xori_test.gen_random_test ),
])
def test_xori( name, test ):
run_test( test )
#-------------------------------------------------------------------------
# slti
#-------------------------------------------------------------------------
import pisa.pisa_inst_slti_test
@pytest.mark.parametrize( "name,test", [
asm_test( pisa.pisa_inst_slti_test.gen_basic_test ),
asm_test( pisa.pisa_inst_slti_test.gen_dest_byp_test ),
asm_test( pisa.pisa_inst_slti_test.gen_src_byp_test ),
asm_test( pisa.pisa_inst_slti_test.gen_srcs_dest_test ),
asm_test( pisa.pisa_inst_slti_test.gen_value_test ),
asm_test( pisa.pisa_inst_slti_test.gen_random_test ),
])
def test_slti( name, test ):
run_test( test )
#-------------------------------------------------------------------------
# sltiu
#-------------------------------------------------------------------------
import pisa.pisa_inst_sltiu_test
@pytest.mark.parametrize( "name,test", [
asm_test( pisa.pisa_inst_sltiu_test.gen_basic_test ),
asm_test( pisa.pisa_inst_sltiu_test.gen_dest_byp_test ),
asm_test( pisa.pisa_inst_sltiu_test.gen_src_byp_test ),
asm_test( pisa.pisa_inst_sltiu_test.gen_srcs_dest_test ),
asm_test( pisa.pisa_inst_sltiu_test.gen_value_test ),
asm_test( pisa.pisa_inst_sltiu_test.gen_random_test ),
])
def test_sltiu( name, test ):
run_test( test )
#-------------------------------------------------------------------------
# sll
#-------------------------------------------------------------------------
import pisa.pisa_inst_sll_test
@pytest.mark.parametrize( "name,test", [
asm_test( pisa.pisa_inst_sll_test.gen_basic_test ),
asm_test( pisa.pisa_inst_sll_test.gen_dest_byp_test ),
asm_test( pisa.pisa_inst_sll_test.gen_src_byp_test ),
asm_test( pisa.pisa_inst_sll_test.gen_srcs_dest_test ),
asm_test( pisa.pisa_inst_sll_test.gen_value_test ),
asm_test( pisa.pisa_inst_sll_test.gen_random_test ),
])
def test_sll( name, test ):
run_test( test )
#-------------------------------------------------------------------------
# srl
#-------------------------------------------------------------------------
import pisa.pisa_inst_srl_test
@pytest.mark.parametrize( "name,test", [
asm_test( pisa.pisa_inst_srl_test.gen_basic_test ),
asm_test( pisa.pisa_inst_srl_test.gen_dest_byp_test ),
asm_test( pisa.pisa_inst_srl_test.gen_src_byp_test ),
asm_test( pisa.pisa_inst_srl_test.gen_srcs_dest_test ),
asm_test( pisa.pisa_inst_srl_test.gen_value_test ),
asm_test( pisa.pisa_inst_srl_test.gen_random_test ),
])
def test_srl( name, test ):
run_test( test )
#-------------------------------------------------------------------------
# sra
#-------------------------------------------------------------------------
import pisa.pisa_inst_sra_test
@pytest.mark.parametrize( "name,test", [
asm_test( pisa.pisa_inst_sra_test.gen_basic_test ),
asm_test( pisa.pisa_inst_sra_test.gen_dest_byp_test ),
asm_test( pisa.pisa_inst_sra_test.gen_src_byp_test ),
asm_test( pisa.pisa_inst_sra_test.gen_srcs_dest_test ),
asm_test( pisa.pisa_inst_sra_test.gen_value_test ),
asm_test( pisa.pisa_inst_sra_test.gen_random_test ),
])
def test_sra( name, test ):
run_test( test )
#-------------------------------------------------------------------------
# sllv
#-------------------------------------------------------------------------
import pisa.pisa_inst_sllv_test
@pytest.mark.parametrize( "name,test", [
asm_test( pisa.pisa_inst_sllv_test.gen_basic_test ),
asm_test( pisa.pisa_inst_sllv_test.gen_dest_byp_test ),
asm_test( pisa.pisa_inst_sllv_test.gen_src0_byp_test ),
asm_test( pisa.pisa_inst_sllv_test.gen_src1_byp_test ),
asm_test( pisa.pisa_inst_sllv_test.gen_srcs_byp_test ),
asm_test( pisa.pisa_inst_sllv_test.gen_srcs_dest_test ),
asm_test( pisa.pisa_inst_sllv_test.gen_value_test ),
asm_test( pisa.pisa_inst_sllv_test.gen_random_test ),
])
def test_sllv( name, test ):
run_test( test )
#-------------------------------------------------------------------------
# srlv
#-------------------------------------------------------------------------
import pisa.pisa_inst_srlv_test
@pytest.mark.parametrize( "name,test", [
asm_test( pisa.pisa_inst_srlv_test.gen_basic_test ),
asm_test( pisa.pisa_inst_srlv_test.gen_dest_byp_test ),
asm_test( pisa.pisa_inst_srlv_test.gen_src0_byp_test ),
asm_test( pisa.pisa_inst_srlv_test.gen_src1_byp_test ),
asm_test( pisa.pisa_inst_srlv_test.gen_srcs_byp_test ),
asm_test( pisa.pisa_inst_srlv_test.gen_srcs_dest_test ),
asm_test( pisa.pisa_inst_srlv_test.gen_value_test ),
asm_test( pisa.pisa_inst_srlv_test.gen_random_test ),
])
def test_srlv( name, test ):
run_test( test )
#-------------------------------------------------------------------------
# srav
#-------------------------------------------------------------------------
import pisa.pisa_inst_srav_test
@pytest.mark.parametrize( "name,test", [
asm_test( pisa.pisa_inst_srav_test.gen_basic_test ),
asm_test( pisa.pisa_inst_srav_test.gen_dest_byp_test ),
asm_test( pisa.pisa_inst_srav_test.gen_src0_byp_test ),
asm_test( pisa.pisa_inst_srav_test.gen_src1_byp_test ),
asm_test( pisa.pisa_inst_srav_test.gen_srcs_byp_test ),
asm_test( pisa.pisa_inst_srav_test.gen_srcs_dest_test ),
asm_test( pisa.pisa_inst_srav_test.gen_value_test ),
asm_test( pisa.pisa_inst_srav_test.gen_random_test ),
])
def test_srav( name, test ):
run_test( test )
#-------------------------------------------------------------------------
# lui
#-------------------------------------------------------------------------
import pisa.pisa_inst_lui_test
@pytest.mark.parametrize( "name,test", [
asm_test( pisa.pisa_inst_lui_test.gen_basic_test ),
asm_test( pisa.pisa_inst_lui_test.gen_dest_byp_test ),
asm_test( pisa.pisa_inst_lui_test.gen_value_test ),
asm_test( pisa.pisa_inst_lui_test.gen_random_test ),
])
def test_lui( name, test ):
run_test( test )
#-------------------------------------------------------------------------
# mul
#-------------------------------------------------------------------------
import pisa.pisa_inst_mul_test
@pytest.mark.parametrize( "name,test", [
asm_test( pisa.pisa_inst_mul_test.gen_basic_test ),
asm_test( pisa.pisa_inst_mul_test.gen_dest_byp_test ),
asm_test( pisa.pisa_inst_mul_test.gen_src0_byp_test ),
asm_test( pisa.pisa_inst_mul_test.gen_src1_byp_test ),
asm_test( pisa.pisa_inst_mul_test.gen_srcs_byp_test ),
asm_test( pisa.pisa_inst_mul_test.gen_srcs_dest_test ),
asm_test( pisa.pisa_inst_mul_test.gen_value_test ),
asm_test( pisa.pisa_inst_mul_test.gen_random_test ),
])
def test_mul( name, test ):
run_test( test )
#-------------------------------------------------------------------------
# div
#-------------------------------------------------------------------------
import pisa.pisa_inst_div_test
@pytest.mark.parametrize( "name,test", [
asm_test( pisa.pisa_inst_div_test.gen_basic_test ),
asm_test( pisa.pisa_inst_div_test.gen_dest_byp_test ),
asm_test( pisa.pisa_inst_div_test.gen_src0_byp_test ),
asm_test( pisa.pisa_inst_div_test.gen_src1_byp_test ),
asm_test( pisa.pisa_inst_div_test.gen_srcs_byp_test ),
asm_test( pisa.pisa_inst_div_test.gen_srcs_dest_test ),
asm_test( pisa.pisa_inst_div_test.gen_value_test ),
asm_test( pisa.pisa_inst_div_test.gen_random_test ),
])
def test_div( name, test ):
run_test( test )
#-------------------------------------------------------------------------
# divu
#-------------------------------------------------------------------------
import pisa.pisa_inst_divu_test
@pytest.mark.parametrize( "name,test", [
asm_test( pisa.pisa_inst_divu_test.gen_basic_test ),
asm_test( pisa.pisa_inst_divu_test.gen_dest_byp_test ),
asm_test( pisa.pisa_inst_divu_test.gen_src0_byp_test ),
asm_test( pisa.pisa_inst_divu_test.gen_src1_byp_test ),
asm_test( pisa.pisa_inst_divu_test.gen_srcs_byp_test ),
asm_test( pisa.pisa_inst_divu_test.gen_srcs_dest_test ),
asm_test( pisa.pisa_inst_divu_test.gen_value_test ),
asm_test( pisa.pisa_inst_divu_test.gen_random_test ),
])
def test_divu( name, test ):
run_test( test )
#-------------------------------------------------------------------------
# rem
#-------------------------------------------------------------------------
import pisa.pisa_inst_rem_test
@pytest.mark.parametrize( "name,test", [
asm_test( pisa.pisa_inst_rem_test.gen_basic_test ),
asm_test( pisa.pisa_inst_rem_test.gen_dest_byp_test ),
asm_test( pisa.pisa_inst_rem_test.gen_src0_byp_test ),
asm_test( pisa.pisa_inst_rem_test.gen_src1_byp_test ),
asm_test( pisa.pisa_inst_rem_test.gen_srcs_byp_test ),
asm_test( pisa.pisa_inst_rem_test.gen_srcs_dest_test ),
asm_test( pisa.pisa_inst_rem_test.gen_value_test ),
asm_test( pisa.pisa_inst_rem_test.gen_random_test ),
])
def test_rem( name, test ):
run_test( test )
#-------------------------------------------------------------------------
# remu
#-------------------------------------------------------------------------
import pisa.pisa_inst_remu_test
@pytest.mark.parametrize( "name,test", [
asm_test( pisa.pisa_inst_remu_test.gen_basic_test ),
asm_test( pisa.pisa_inst_remu_test.gen_dest_byp_test ),
asm_test( pisa.pisa_inst_remu_test.gen_src0_byp_test ),
asm_test( pisa.pisa_inst_remu_test.gen_src1_byp_test ),
asm_test( pisa.pisa_inst_remu_test.gen_srcs_byp_test ),
asm_test( pisa.pisa_inst_remu_test.gen_srcs_dest_test ),
asm_test( pisa.pisa_inst_remu_test.gen_value_test ),
asm_test( pisa.pisa_inst_remu_test.gen_random_test ),
])
def test_remu( name, test ):
run_test( test )
#-------------------------------------------------------------------------
# lw
#-------------------------------------------------------------------------
import pisa.pisa_inst_lw_test
@pytest.mark.parametrize( "name,test", [
asm_test( pisa.pisa_inst_lw_test.gen_basic_test ),
asm_test( pisa.pisa_inst_lw_test.gen_dest_byp_test ),
asm_test( pisa.pisa_inst_lw_test.gen_base_byp_test ),
asm_test( pisa.pisa_inst_lw_test.gen_srcs_dest_test ),
asm_test( pisa.pisa_inst_lw_test.gen_value_test ),
asm_test( pisa.pisa_inst_lw_test.gen_random_test ),
])
def test_lw( name, test ):
run_test( test )
#-------------------------------------------------------------------------
# lh
#-------------------------------------------------------------------------
import pisa.pisa_inst_lh_test
@pytest.mark.parametrize( "name,test", [
asm_test( pisa.pisa_inst_lh_test.gen_basic_test ),
asm_test( pisa.pisa_inst_lh_test.gen_dest_byp_test ),
asm_test( pisa.pisa_inst_lh_test.gen_base_byp_test ),
asm_test( pisa.pisa_inst_lh_test.gen_srcs_dest_test ),
asm_test( pisa.pisa_inst_lh_test.gen_value_test ),
asm_test( pisa.pisa_inst_lh_test.gen_random_test ),
])
def test_lh( name, test ):
run_test( test )
#-------------------------------------------------------------------------
# lhu
#-------------------------------------------------------------------------
import pisa.pisa_inst_lhu_test
@pytest.mark.parametrize( "name,test", [
asm_test( pisa.pisa_inst_lhu_test.gen_basic_test ),
asm_test( pisa.pisa_inst_lhu_test.gen_dest_byp_test ),
asm_test( pisa.pisa_inst_lhu_test.gen_base_byp_test ),
asm_test( pisa.pisa_inst_lhu_test.gen_srcs_dest_test ),
asm_test( pisa.pisa_inst_lhu_test.gen_value_test ),
asm_test( pisa.pisa_inst_lhu_test.gen_random_test ),
])
def test_lhu( name, test ):
run_test( test )
#-------------------------------------------------------------------------
# lb
#-------------------------------------------------------------------------
import pisa.pisa_inst_lb_test
@pytest.mark.parametrize( "name,test", [
asm_test( pisa.pisa_inst_lb_test.gen_basic_test ),
asm_test( pisa.pisa_inst_lb_test.gen_dest_byp_test ),
asm_test( pisa.pisa_inst_lb_test.gen_base_byp_test ),
asm_test( pisa.pisa_inst_lb_test.gen_srcs_dest_test ),
asm_test( pisa.pisa_inst_lb_test.gen_value_test ),
asm_test( pisa.pisa_inst_lb_test.gen_random_test ),
])
def test_lb( name, test ):
run_test( test )
#-------------------------------------------------------------------------
# lbu
#-------------------------------------------------------------------------
import pisa.pisa_inst_lbu_test
@pytest.mark.parametrize( "name,test", [
asm_test( pisa.pisa_inst_lbu_test.gen_basic_test ),
asm_test( pisa.pisa_inst_lbu_test.gen_dest_byp_test ),
asm_test( pisa.pisa_inst_lbu_test.gen_base_byp_test ),
asm_test( pisa.pisa_inst_lbu_test.gen_srcs_dest_test ),
asm_test( pisa.pisa_inst_lbu_test.gen_value_test ),
asm_test( pisa.pisa_inst_lbu_test.gen_random_test ),
])
def test_lbu( name, test ):
run_test( test )
#-------------------------------------------------------------------------
# sw
#-------------------------------------------------------------------------
import pisa.pisa_inst_sw_test
@pytest.mark.parametrize( "name,test", [
asm_test( pisa.pisa_inst_sw_test.gen_basic_test ),
asm_test( pisa.pisa_inst_sw_test.gen_dest_byp_test ),
asm_test( pisa.pisa_inst_sw_test.gen_base_byp_test ),
asm_test( pisa.pisa_inst_sw_test.gen_src_byp_test ),
asm_test( pisa.pisa_inst_sw_test.gen_srcs_byp_test ),
asm_test( pisa.pisa_inst_sw_test.gen_srcs_dest_test ),
asm_test( pisa.pisa_inst_sw_test.gen_value_test ),
asm_test( pisa.pisa_inst_sw_test.gen_random_test ),
])
def test_sw( name, test ):
run_test( test )
#-------------------------------------------------------------------------
# sh
#-------------------------------------------------------------------------
import pisa.pisa_inst_sh_test
@pytest.mark.parametrize( "name,test", [
asm_test( pisa.pisa_inst_sh_test.gen_basic_test ),
asm_test( pisa.pisa_inst_sh_test.gen_dest_byp_test ),
asm_test( pisa.pisa_inst_sh_test.gen_base_byp_test ),
asm_test( pisa.pisa_inst_sh_test.gen_src_byp_test ),
asm_test( pisa.pisa_inst_sh_test.gen_srcs_byp_test ),
asm_test( pisa.pisa_inst_sh_test.gen_srcs_dest_test ),
asm_test( pisa.pisa_inst_sh_test.gen_value_test ),
asm_test( pisa.pisa_inst_sh_test.gen_random_test ),
])
def test_sh( name, test ):
run_test( test )
#-------------------------------------------------------------------------
# sb
#-------------------------------------------------------------------------
import pisa.pisa_inst_sb_test
@pytest.mark.parametrize( "name,test", [
asm_test( pisa.pisa_inst_sb_test.gen_basic_test ),
asm_test( pisa.pisa_inst_sb_test.gen_dest_byp_test ),
asm_test( pisa.pisa_inst_sb_test.gen_base_byp_test ),
asm_test( pisa.pisa_inst_sb_test.gen_src_byp_test ),
asm_test( pisa.pisa_inst_sb_test.gen_srcs_byp_test ),
asm_test( pisa.pisa_inst_sb_test.gen_srcs_dest_test ),
asm_test( pisa.pisa_inst_sb_test.gen_value_test ),
asm_test( pisa.pisa_inst_sb_test.gen_random_test ),
])
def test_sb( name, test ):
run_test( test )
#-------------------------------------------------------------------------
# j
#-------------------------------------------------------------------------
import pisa.pisa_inst_j_test
@pytest.mark.parametrize( "name,test", [
asm_test( pisa.pisa_inst_j_test.gen_basic_test ),
asm_test( pisa.pisa_inst_j_test.gen_jump_test ),
])
def test_j( name, test ):
run_test( test )
#-------------------------------------------------------------------------
# jal
#-------------------------------------------------------------------------
import pisa.pisa_inst_jal_test
@pytest.mark.parametrize( "name,test", [
asm_test( pisa.pisa_inst_jal_test.gen_basic_test ),
asm_test( pisa.pisa_inst_jal_test.gen_link_byp_test ),
asm_test( pisa.pisa_inst_jal_test.gen_jump_test ),
])
def test_jal( name, test ):
run_test( test )
#-------------------------------------------------------------------------
# jr
#-------------------------------------------------------------------------
import pisa.pisa_inst_jr_test
@pytest.mark.parametrize( "name,test", [
asm_test( pisa.pisa_inst_jr_test.gen_basic_test ),
asm_test( pisa.pisa_inst_jr_test.gen_src_byp_test ),
asm_test( pisa.pisa_inst_jr_test.gen_jump_test ),
])
def test_jr( name, test ):
run_test( test )
#-------------------------------------------------------------------------
# jalr
#-------------------------------------------------------------------------
import pisa.pisa_inst_jalr_test
@pytest.mark.parametrize( "name,test", [
asm_test( pisa.pisa_inst_jalr_test.gen_basic_test ),
asm_test( pisa.pisa_inst_jalr_test.gen_link_byp_test ),
asm_test( pisa.pisa_inst_jalr_test.gen_src_byp_test ),
asm_test( pisa.pisa_inst_jalr_test.gen_jump_test ),
])
def test_jalr( name, test ):
run_test( test )
#-------------------------------------------------------------------------
# beq
#-------------------------------------------------------------------------
import pisa.pisa_inst_beq_test
@pytest.mark.parametrize( "name,test", [
asm_test( pisa.pisa_inst_beq_test.gen_basic_test ),
asm_test( pisa.pisa_inst_beq_test.gen_src0_byp_taken_test ),
asm_test( pisa.pisa_inst_beq_test.gen_src0_byp_nottaken_test ),
asm_test( pisa.pisa_inst_beq_test.gen_src1_byp_taken_test ),
asm_test( pisa.pisa_inst_beq_test.gen_src1_byp_nottaken_test ),
asm_test( pisa.pisa_inst_beq_test.gen_srcs_byp_taken_test ),
asm_test( pisa.pisa_inst_beq_test.gen_srcs_byp_nottaken_test ),
asm_test( pisa.pisa_inst_beq_test.gen_src0_eq_src1_test ),
asm_test( pisa.pisa_inst_beq_test.gen_value_test ),
asm_test( pisa.pisa_inst_beq_test.gen_random_test ),
])
def test_beq( name, test ):
run_test( test )
#-------------------------------------------------------------------------
# bne
#-------------------------------------------------------------------------
import pisa.pisa_inst_bne_test
@pytest.mark.parametrize( "name,test", [
asm_test( pisa.pisa_inst_bne_test.gen_basic_test ),
asm_test( pisa.pisa_inst_bne_test.gen_src0_byp_taken_test ),
asm_test( pisa.pisa_inst_bne_test.gen_src0_byp_nottaken_test ),
asm_test( pisa.pisa_inst_bne_test.gen_src1_byp_taken_test ),
asm_test( pisa.pisa_inst_bne_test.gen_src1_byp_nottaken_test ),
asm_test( pisa.pisa_inst_bne_test.gen_srcs_byp_taken_test ),
asm_test( pisa.pisa_inst_bne_test.gen_srcs_byp_nottaken_test ),
asm_test( pisa.pisa_inst_bne_test.gen_src0_eq_src1_test ),
asm_test( pisa.pisa_inst_bne_test.gen_value_test ),
asm_test( pisa.pisa_inst_bne_test.gen_random_test ),
])
def test_bne( name, test ):
run_test( test )
#-------------------------------------------------------------------------
# blez
#-------------------------------------------------------------------------
import pisa.pisa_inst_blez_test
@pytest.mark.parametrize( "name,test", [
asm_test( pisa.pisa_inst_blez_test.gen_basic_test ),
asm_test( pisa.pisa_inst_blez_test.gen_src_byp_taken_test ),
asm_test( pisa.pisa_inst_blez_test.gen_src_byp_nottaken_test ),
asm_test( pisa.pisa_inst_blez_test.gen_value_test ),
asm_test( pisa.pisa_inst_blez_test.gen_random_test ),
])
def test_blez( name, test ):
run_test( test )
#-------------------------------------------------------------------------
# bgtz
#-------------------------------------------------------------------------
import pisa.pisa_inst_bgtz_test
@pytest.mark.parametrize( "name,test", [
asm_test( pisa.pisa_inst_bgtz_test.gen_basic_test ),
asm_test( pisa.pisa_inst_bgtz_test.gen_src_byp_taken_test ),
asm_test( pisa.pisa_inst_bgtz_test.gen_src_byp_nottaken_test ),
asm_test( pisa.pisa_inst_bgtz_test.gen_value_test ),
asm_test( pisa.pisa_inst_bgtz_test.gen_random_test ),
])
def test_bgtz( name, test ):
run_test( test )
#-------------------------------------------------------------------------
# bltz
#-------------------------------------------------------------------------
import pisa.pisa_inst_bltz_test
@pytest.mark.parametrize( "name,test", [
asm_test( pisa.pisa_inst_bltz_test.gen_basic_test ),
asm_test( pisa.pisa_inst_bltz_test.gen_src_byp_taken_test ),
asm_test( pisa.pisa_inst_bltz_test.gen_src_byp_nottaken_test ),
asm_test( pisa.pisa_inst_bltz_test.gen_value_test ),
asm_test( pisa.pisa_inst_bltz_test.gen_random_test ),
])
def test_bltz( name, test ):
run_test( test )
#-------------------------------------------------------------------------
# bgez
#-------------------------------------------------------------------------
import pisa.pisa_inst_bgez_test
@pytest.mark.parametrize( "name,test", [
asm_test( pisa.pisa_inst_bgez_test.gen_basic_test ),
asm_test( pisa.pisa_inst_bgez_test.gen_src_byp_taken_test ),
asm_test( pisa.pisa_inst_bgez_test.gen_src_byp_nottaken_test ),
asm_test( pisa.pisa_inst_bgez_test.gen_value_test ),
asm_test( pisa.pisa_inst_bgez_test.gen_random_test ),
])
def test_bgez( name, test ):
run_test( test )
#-------------------------------------------------------------------------
# mtx/mfx
#-------------------------------------------------------------------------
import pisa.pisa_inst_xcel_test
@pytest.mark.parametrize( "name,test", [
asm_test( pisa.pisa_inst_xcel_test.gen_basic_test ),
asm_test( pisa.pisa_inst_xcel_test.gen_bypass_mtx_test ),
asm_test( pisa.pisa_inst_xcel_test.gen_bypass_mfx_test ),
asm_test( pisa.pisa_inst_xcel_test.gen_bypass_test ),
])
def test_mtx( name, test ):
run_test( test )
| bsd-3-clause |
ofer43211/unisubs | apps/webdriver_testing/pages/web/google.py | 6 | 1668 | import imaplib
import time
import re
class Google(Page):
_GOOGLE_PAGE = "div.google-header-bar"
_APPROVE = "input#approve_button"
_EMAIL = "input#Email"
_PASSWORD = "input#Passwd"
_SUBMIT = "input#signIn.g-button"
def activate_mc_user_account(self, email, password, url):
"""Activates a new Miro Community user's gmail account.
Returns the activation url.a
"""
print "Checking email for activation link"
mailUser = email
mailPassword = password
mail = imaplib.IMAP4_SSL('imap.gmail.com', 993)
mail.login(mailUser, mailPassword)
mail.select('Inbox')
result, data = mail.uid('search', None, '(HEADER Subject "Finish Signing Up at")')
latest_email_uid = data[0].split()[-1]
result, data = mail.uid('fetch', latest_email_uid, '(RFC822)')
raw_email = data[0][1]
lines = raw_email.split('\n')
for line in lines:
if line.startswith(url):
activationURL = line
break
else:
print 'failed to find link'
return activationURL
def google_login(self, user, passw, **kwargs):
if self.is_element_present(self._GOOGLE_PAGE):
if self.is_element_present(self._EMAIL):
self.type_by_css(self._EMAIL, user)
self.type_by_css(self._PASSWORD, passw)
self.click_by_css(self._SUBMIT)
if self.is_element_present(self._APPROVE): #signed in account, just needs approval to continue
self.click_by_css(self._APPROVE)
| agpl-3.0 |
jeremydane/Info3180-Project3 | server/lib/flask/globals.py | 783 | 1137 | # -*- coding: utf-8 -*-
"""
flask.globals
~~~~~~~~~~~~~
Defines all the global objects that are proxies to the current
active context.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from functools import partial
from werkzeug.local import LocalStack, LocalProxy
def _lookup_req_object(name):
top = _request_ctx_stack.top
if top is None:
raise RuntimeError('working outside of request context')
return getattr(top, name)
def _lookup_app_object(name):
top = _app_ctx_stack.top
if top is None:
raise RuntimeError('working outside of application context')
return getattr(top, name)
def _find_app():
top = _app_ctx_stack.top
if top is None:
raise RuntimeError('working outside of application context')
return top.app
# context locals
_request_ctx_stack = LocalStack()
_app_ctx_stack = LocalStack()
current_app = LocalProxy(_find_app)
request = LocalProxy(partial(_lookup_req_object, 'request'))
session = LocalProxy(partial(_lookup_req_object, 'session'))
g = LocalProxy(partial(_lookup_app_object, 'g'))
| apache-2.0 |
martydill/url_shortener | code/venv/lib/python2.7/site-packages/wheel/signatures/ed25519py.py | 565 | 1695 | # -*- coding: utf-8 -*-
import warnings
import os
from collections import namedtuple
from . import djbec
__all__ = ['crypto_sign', 'crypto_sign_open', 'crypto_sign_keypair', 'Keypair',
'PUBLICKEYBYTES', 'SECRETKEYBYTES', 'SIGNATUREBYTES']
PUBLICKEYBYTES=32
SECRETKEYBYTES=64
SIGNATUREBYTES=64
Keypair = namedtuple('Keypair', ('vk', 'sk')) # verifying key, secret key
def crypto_sign_keypair(seed=None):
"""Return (verifying, secret) key from a given seed, or os.urandom(32)"""
if seed is None:
seed = os.urandom(PUBLICKEYBYTES)
else:
warnings.warn("ed25519ll should choose random seed.",
RuntimeWarning)
if len(seed) != 32:
raise ValueError("seed must be 32 random bytes or None.")
skbytes = seed
vkbytes = djbec.publickey(skbytes)
return Keypair(vkbytes, skbytes+vkbytes)
def crypto_sign(msg, sk):
"""Return signature+message given message and secret key.
The signature is the first SIGNATUREBYTES bytes of the return value.
A copy of msg is in the remainder."""
if len(sk) != SECRETKEYBYTES:
raise ValueError("Bad signing key length %d" % len(sk))
vkbytes = sk[PUBLICKEYBYTES:]
skbytes = sk[:PUBLICKEYBYTES]
sig = djbec.signature(msg, skbytes, vkbytes)
return sig + msg
def crypto_sign_open(signed, vk):
"""Return message given signature+message and the verifying key."""
if len(vk) != PUBLICKEYBYTES:
raise ValueError("Bad verifying key length %d" % len(vk))
rc = djbec.checkvalid(signed[:SIGNATUREBYTES], signed[SIGNATUREBYTES:], vk)
if not rc:
raise ValueError("rc != True", rc)
return signed[SIGNATUREBYTES:]
| mit |
KohlsTechnology/ansible | test/units/modules/network/iosxr/test_iosxr_user.py | 49 | 3742 | # (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.compat.tests.mock import patch
from ansible.modules.network.iosxr import iosxr_user
from units.modules.utils import set_module_args
from .iosxr_module import TestIosxrModule, load_fixture
class TestIosxrUserModule(TestIosxrModule):
module = iosxr_user
def setUp(self):
super(TestIosxrUserModule, self).setUp()
self.mock_get_config = patch('ansible.modules.network.iosxr.iosxr_user.get_config')
self.get_config = self.mock_get_config.start()
self.mock_load_config = patch('ansible.modules.network.iosxr.iosxr_user.load_config')
self.load_config = self.mock_load_config.start()
self.mock_is_cliconf = patch('ansible.modules.network.iosxr.iosxr_user.is_cliconf')
self.is_cliconf = self.mock_is_cliconf.start()
def tearDown(self):
super(TestIosxrUserModule, self).tearDown()
self.mock_get_config.stop()
self.mock_load_config.stop()
self.mock_is_cliconf.stop()
def load_fixtures(self, commands=None, transport='cli'):
self.get_config.return_value = load_fixture('iosxr_user_config.cfg')
self.load_config.return_value = dict(diff=None, session='session')
self.is_cliconf.return_value = True
def test_iosxr_user_delete(self):
set_module_args(dict(name='ansible', state='absent'))
result = self.execute_module(changed=True)
self.assertEqual(result['commands'], ['no username ansible'])
def test_iosxr_user_password(self):
set_module_args(dict(name='ansible', configured_password='test'))
result = self.execute_module(changed=True)
self.assertEqual(result['commands'], ['username ansible secret test'])
def test_iosxr_user_purge(self):
set_module_args(dict(purge=True))
result = self.execute_module(changed=True)
self.assertEqual(result['commands'], ['no username ansible'])
def test_iosxr_user_group(self):
set_module_args(dict(name='ansible', group='sysadmin'))
result = self.execute_module(changed=True)
self.assertEqual(result['commands'], ['username ansible group sysadmin'])
def test_iosxr_user_update_password_changed(self):
set_module_args(dict(name='test', configured_password='test', update_password='on_create'))
result = self.execute_module(changed=True)
self.assertEqual(result['commands'],
['username test', 'username test secret test'])
def test_iosxr_user_update_password_on_create_ok(self):
set_module_args(dict(name='ansible', configured_password='test', update_password='on_create'))
self.execute_module()
def test_iosxr_user_update_password_always(self):
set_module_args(dict(name='ansible', configured_password='test', update_password='always'))
result = self.execute_module(changed=True)
self.assertEqual(result['commands'], ['username ansible secret test'])
| gpl-3.0 |
hashem78/namebench | nb_third_party/dns/rdtypes/ANY/GPOS.py | 248 | 5304 | # Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import dns.exception
import dns.rdata
import dns.tokenizer
def _validate_float_string(what):
if what[0] == '-' or what[0] == '+':
what = what[1:]
if what.isdigit():
return
(left, right) = what.split('.')
if left == '' and right == '':
raise dns.exception.FormError
if not left == '' and not left.isdigit():
raise dns.exception.FormError
if not right == '' and not right.isdigit():
raise dns.exception.FormError
class GPOS(dns.rdata.Rdata):
"""GPOS record
@ivar latitude: latitude
@type latitude: string
@ivar longitude: longitude
@type longitude: string
@ivar altitude: altitude
@type altitude: string
@see: RFC 1712"""
__slots__ = ['latitude', 'longitude', 'altitude']
def __init__(self, rdclass, rdtype, latitude, longitude, altitude):
super(GPOS, self).__init__(rdclass, rdtype)
if isinstance(latitude, float) or \
isinstance(latitude, int) or \
isinstance(latitude, long):
latitude = str(latitude)
if isinstance(longitude, float) or \
isinstance(longitude, int) or \
isinstance(longitude, long):
longitude = str(longitude)
if isinstance(altitude, float) or \
isinstance(altitude, int) or \
isinstance(altitude, long):
altitude = str(altitude)
_validate_float_string(latitude)
_validate_float_string(longitude)
_validate_float_string(altitude)
self.latitude = latitude
self.longitude = longitude
self.altitude = altitude
def to_text(self, origin=None, relativize=True, **kw):
return '%s %s %s' % (self.latitude, self.longitude, self.altitude)
def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
latitude = tok.get_string()
longitude = tok.get_string()
altitude = tok.get_string()
tok.get_eol()
return cls(rdclass, rdtype, latitude, longitude, altitude)
from_text = classmethod(from_text)
def to_wire(self, file, compress = None, origin = None):
l = len(self.latitude)
assert l < 256
byte = chr(l)
file.write(byte)
file.write(self.latitude)
l = len(self.longitude)
assert l < 256
byte = chr(l)
file.write(byte)
file.write(self.longitude)
l = len(self.altitude)
assert l < 256
byte = chr(l)
file.write(byte)
file.write(self.altitude)
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
l = ord(wire[current])
current += 1
rdlen -= 1
if l > rdlen:
raise dns.exception.FormError
latitude = wire[current : current + l]
current += l
rdlen -= l
l = ord(wire[current])
current += 1
rdlen -= 1
if l > rdlen:
raise dns.exception.FormError
longitude = wire[current : current + l]
current += l
rdlen -= l
l = ord(wire[current])
current += 1
rdlen -= 1
if l != rdlen:
raise dns.exception.FormError
altitude = wire[current : current + l]
return cls(rdclass, rdtype, latitude, longitude, altitude)
from_wire = classmethod(from_wire)
def _cmp(self, other):
v = cmp(self.latitude, other.latitude)
if v == 0:
v = cmp(self.longitude, other.longitude)
if v == 0:
v = cmp(self.altitude, other.altitude)
return v
def _get_float_latitude(self):
return float(self.latitude)
def _set_float_latitude(self, value):
self.latitude = str(value)
float_latitude = property(_get_float_latitude, _set_float_latitude,
doc="latitude as a floating point value")
def _get_float_longitude(self):
return float(self.longitude)
def _set_float_longitude(self, value):
self.longitude = str(value)
float_longitude = property(_get_float_longitude, _set_float_longitude,
doc="longitude as a floating point value")
def _get_float_altitude(self):
return float(self.altitude)
def _set_float_altitude(self, value):
self.altitude = str(value)
float_altitude = property(_get_float_altitude, _set_float_altitude,
doc="altitude as a floating point value")
| apache-2.0 |
meiordac/ecommerce | catalog/migrations/0001_initial.py | 1 | 3187 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-02-13 18:34
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('slug', models.SlugField(help_text='Unique value for product page URL, created from name.', unique=True)),
('description', models.TextField()),
('is_active', models.BooleanField(default=True)),
('meta_keywords', models.CharField(help_text='Comma-delimited set of SEO keywords for meta tag', max_length=255, verbose_name='Meta keywords')),
('meta_description', models.CharField(help_text='Content of description meta tag', max_length=255, verbose_name='Meta Description')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
options={
'ordering': ['name'],
'db_table': 'categories',
'verbose_name_plural': 'Categories',
},
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255, unique=True)),
('slug', models.SlugField(help_text='Unique value for product page URL, created from name.', max_length=255, unique=True)),
('brand', models.CharField(max_length=50)),
('sku', models.CharField(max_length=50)),
('price', models.DecimalField(decimal_places=2, max_digits=15)),
('old_price', models.DecimalField(blank=True, decimal_places=2, default=0.0, max_digits=15)),
('image', models.CharField(max_length=50)),
('is_active', models.BooleanField(default=True)),
('is_bestseller', models.BooleanField(default=False)),
('is_featured', models.BooleanField(default=False)),
('quantity', models.IntegerField()),
('description', models.TextField()),
('meta_keywords', models.CharField(help_text='Comma-delimited set of SEO keywords for meta tag', max_length=255, verbose_name='Meta keywords')),
('meta_description', models.CharField(help_text='Content of description meta tag', max_length=255, verbose_name='Meta Description')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('categories', models.ManyToManyField(to='catalog.Category')),
],
options={
'ordering': ['-created_at'],
'db_table': 'products',
},
),
]
| mit |
morelab/labmanager | alembic/versions/46a3d679666e_initial_database.py | 5 | 7635 | """Initial database
Revision ID: 46a3d679666e
Revises: None
Create Date: 2013-10-08 01:34:51.501561
"""
# revision identifiers, used by Alembic.
revision = '46a3d679666e'
down_revision = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('rlmss',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('kind', sa.Unicode(length=50), nullable=False),
sa.Column('location', sa.Unicode(length=50), nullable=False),
sa.Column('url', sa.Unicode(length=300), nullable=False),
sa.Column('version', sa.Unicode(length=50), nullable=False),
sa.Column('configuration', sa.Unicode(length=10240), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('learning_tools',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.Unicode(length=50), nullable=False),
sa.Column('full_name', sa.Unicode(length=50), nullable=False),
sa.Column('url', sa.Unicode(length=300), nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('full_name'),
sa.UniqueConstraint('name')
)
op.create_table('labmanager_users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('login', sa.Unicode(length=50), nullable=True),
sa.Column('name', sa.Unicode(length=50), nullable=False),
sa.Column('password', sa.Unicode(length=50), nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('login')
)
op.create_table('lt_users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('login', sa.Unicode(length=50), nullable=False),
sa.Column('full_name', sa.Unicode(length=50), nullable=False),
sa.Column('password', sa.Unicode(length=128), nullable=False),
sa.Column('access_level', sa.Unicode(length=50), nullable=False),
sa.Column('lt_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['lt_id'], ['learning_tools.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('login','lt_id')
)
op.create_table('basic_http_credentials',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('lt_id', sa.Integer(), nullable=False),
sa.Column('lt_login', sa.Unicode(length=50), nullable=False),
sa.Column('lt_password', sa.Unicode(length=50), nullable=False),
sa.Column('lt_url', sa.Unicode(length=300), nullable=True),
sa.Column('labmanager_login', sa.Unicode(length=50), nullable=True),
sa.Column('labmanager_password', sa.Unicode(length=50), nullable=True),
sa.ForeignKeyConstraint(['lt_id'], ['learning_tools.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('lt_id'),
sa.UniqueConstraint('lt_login')
)
op.create_table('laboratories',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.Unicode(length=50), nullable=False),
sa.Column('laboratory_id', sa.Unicode(length=50), nullable=False),
sa.Column('rlms_id', sa.Integer(), nullable=False),
sa.Column('visibility', sa.Unicode(length=50), nullable=False),
sa.Column('available', sa.Boolean(), nullable=False),
sa.Column('default_local_identifier', sa.Unicode(length=50), nullable=False),
sa.ForeignKeyConstraint(['rlms_id'], ['rlmss.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('laboratory_id','rlms_id')
)
op.create_table('courses',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('lt_id', sa.Integer(), nullable=False),
sa.Column('name', sa.Unicode(length=50), nullable=False),
sa.Column('context_id', sa.Unicode(length=50), nullable=False),
sa.ForeignKeyConstraint(['lt_id'], ['learning_tools.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('lt_id','context_id')
)
op.create_table('shindig_credentials',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('lt_id', sa.Integer(), nullable=False),
sa.Column('shindig_url', sa.Unicode(length=50), nullable=False),
sa.ForeignKeyConstraint(['lt_id'], ['learning_tools.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('lt_id')
)
op.create_table('request_permissions_lt',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('local_identifier', sa.Unicode(length=100), nullable=False),
sa.Column('laboratory_id', sa.Integer(), nullable=False),
sa.Column('lt_id', sa.Integer(), nullable=False),
sa.Column('configuration', sa.Unicode(length=10240), nullable=True),
sa.Column('accessible', sa.Boolean(), nullable=False),
sa.ForeignKeyConstraint(['laboratory_id'], ['laboratories.id'], ),
sa.ForeignKeyConstraint(['lt_id'], ['learning_tools.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('laboratory_id','lt_id'),
sa.UniqueConstraint('local_identifier','lt_id')
)
op.create_table('users2courses',
sa.Column('course_id', sa.Integer(), nullable=True),
sa.Column('lt_user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['course_id'], ['courses.id'], ),
sa.ForeignKeyConstraint(['lt_user_id'], ['lt_users.id'], ),
sa.PrimaryKeyConstraint()
)
op.create_table('permissions2lt',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('local_identifier', sa.Unicode(length=100), nullable=False),
sa.Column('laboratory_id', sa.Integer(), nullable=False),
sa.Column('lt_id', sa.Integer(), nullable=False),
sa.Column('configuration', sa.Unicode(length=10240), nullable=True),
sa.Column('accessible', sa.Boolean(), nullable=False),
sa.ForeignKeyConstraint(['laboratory_id'], ['laboratories.id'], ),
sa.ForeignKeyConstraint(['lt_id'], ['learning_tools.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('laboratory_id','lt_id'),
sa.UniqueConstraint('local_identifier','lt_id')
)
op.create_table('permissions2course',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('configuration', sa.Unicode(length=10240), nullable=True),
sa.Column('permission_to_lt_id', sa.Integer(), nullable=False),
sa.Column('course_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['course_id'], ['courses.id'], ),
sa.ForeignKeyConstraint(['permission_to_lt_id'], ['permissions2lt.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('permission_to_lt_id','course_id')
)
op.create_table('permissions2ltuser',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('permission_to_lt_id', sa.Integer(), nullable=False),
sa.Column('lt_user_id', sa.Integer(), nullable=False),
sa.Column('key', sa.Unicode(length=100), nullable=False),
sa.Column('secret', sa.Unicode(length=100), nullable=False),
sa.ForeignKeyConstraint(['lt_user_id'], ['lt_users.id'], ),
sa.ForeignKeyConstraint(['permission_to_lt_id'], ['permissions2lt.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('key'),
sa.UniqueConstraint('permission_to_lt_id','lt_user_id')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('permissions2ltuser')
op.drop_table('permissions2course')
op.drop_table('permissions2lt')
op.drop_table('users2courses')
op.drop_table('request_permissions_lt')
op.drop_table('shindig_credentials')
op.drop_table('courses')
op.drop_table('laboratories')
op.drop_table('basic_http_credentials')
op.drop_table('lt_users')
op.drop_table('labmanager_users')
op.drop_table('learning_tools')
op.drop_table('rlmss')
### end Alembic commands ###
| bsd-2-clause |
team-xue/xue | xue/cms/models/pluginmodel.py | 1 | 14306 | # -*- coding: utf-8 -*-
from cms.exceptions import DontUsePageAttributeWarning
from cms.models.placeholdermodel import Placeholder
from cms.plugin_rendering import PluginContext, PluginRenderer
from cms.utils.helpers import reversion_register
from cms.utils.placeholder import get_page_from_placeholder_if_exists
from datetime import datetime, date
from django.conf import settings
from django.core.exceptions import ValidationError, ObjectDoesNotExist
from django.db import models
from django.db.models.base import ModelBase, model_unpickle, \
simple_class_factory
from django.db.models.query_utils import DeferredAttribute
from django.utils.translation import ugettext_lazy as _
from os.path import join
from publisher.mptt_support import Mptt, install_mptt
import warnings
class PluginModelBase(ModelBase):
"""
Metaclass for all plugins.
"""
def __new__(cls, name, bases, attrs):
render_meta = attrs.pop('RenderMeta', None)
if render_meta is not None:
attrs['_render_meta'] = render_meta()
attrs = install_mptt(cls, name, bases, attrs)
new_class = super(PluginModelBase, cls).__new__(cls, name, bases, attrs)
found = False
bbases = bases
while bbases:
bcls = bbases[0]
if bcls.__name__ == "CMSPlugin":
found = True
bbases = False
else:
bbases = bcls.__bases__
if found:
if new_class._meta.db_table.startswith("%s_" % new_class._meta.app_label):
table = "cmsplugin_" + new_class._meta.db_table.split("%s_" % new_class._meta.app_label, 1)[1]
new_class._meta.db_table = table
return new_class
class CMSPlugin(Mptt):
'''
The base class for a CMS plugin model. When defining a new custom plugin, you should
store plugin-instance specific information on a subclass of this class.
An example for this would be to store the number of pictures to display in a galery.
Two restrictions apply when subclassing this to use in your own models:
1. Subclasses of CMSPlugin *cannot be further subclassed*
2. Subclasses of CMSPlugin cannot define a "text" field.
'''
__metaclass__ = PluginModelBase
placeholder = models.ForeignKey(Placeholder, editable=False, null=True)
parent = models.ForeignKey('self', blank=True, null=True, editable=False)
position = models.PositiveSmallIntegerField(_("position"), blank=True, null=True, editable=False)
language = models.CharField(_("language"), max_length=15, blank=False, db_index=True, editable=False)
plugin_type = models.CharField(_("plugin_name"), max_length=50, db_index=True, editable=False)
creation_date = models.DateTimeField(_("creation date"), editable=False, default=datetime.now)
level = models.PositiveIntegerField(db_index=True, editable=False)
lft = models.PositiveIntegerField(db_index=True, editable=False)
rght = models.PositiveIntegerField(db_index=True, editable=False)
tree_id = models.PositiveIntegerField(db_index=True, editable=False)
class Meta:
app_label = 'cms'
class RenderMeta:
index = 0
total = 1
text_enabled = False
def __reduce__(self):
"""
Provide pickling support. Normally, this just dispatches to Python's
standard handling. However, for models with deferred field loading, we
need to do things manually, as they're dynamically created classes and
only module-level classes can be pickled by the default path.
"""
data = self.__dict__
model = self.__class__
# The obvious thing to do here is to invoke super().__reduce__()
# for the non-deferred case. Don't do that.
# On Python 2.4, there is something wierd with __reduce__,
# and as a result, the super call will cause an infinite recursion.
# See #10547 and #12121.
defers = []
pk_val = None
if self._deferred:
factory = deferred_class_factory
for field in self._meta.fields:
if isinstance(self.__class__.__dict__.get(field.attname),
DeferredAttribute):
defers.append(field.attname)
if pk_val is None:
# The pk_val and model values are the same for all
# DeferredAttribute classes, so we only need to do this
# once.
obj = self.__class__.__dict__[field.attname]
model = obj.model_ref()
else:
factory = simple_class_factory
return (model_unpickle, (model, defers, factory), data)
def __unicode__(self):
return unicode(self.id)
def get_plugin_name(self):
from cms.plugin_pool import plugin_pool
return plugin_pool.get_plugin(self.plugin_type).name
def get_short_description(self):
instance = self.get_plugin_instance()[0]
if instance:
return instance.__unicode__()
else:
return _("<Empty>")
def get_plugin_class(self):
from cms.plugin_pool import plugin_pool
return plugin_pool.get_plugin(self.plugin_type)
def get_plugin_instance(self, admin=None):
from cms.plugin_pool import plugin_pool
plugin_class = plugin_pool.get_plugin(self.plugin_type)
plugin = plugin_class(plugin_class.model, admin)# needed so we have the same signature as the original ModelAdmin
if plugin.model != self.__class__: # and self.__class__ == CMSPlugin:
# (if self is actually a subclass, getattr below would break)
try:
instance = getattr(self, plugin.model.__name__.lower())
# could alternatively be achieved with:
# instance = plugin_class.model.objects.get(cmsplugin_ptr=self)
instance._render_meta = self._render_meta
except (AttributeError, ObjectDoesNotExist):
instance = None
else:
instance = self
return instance, plugin
def render_plugin(self, context=None, placeholder=None, admin=False, processors=None):
instance, plugin = self.get_plugin_instance()
if instance and not (admin and not plugin.admin_preview):
if isinstance(placeholder, Placeholder):
placeholder_slot = placeholder.slot
else:
placeholder_slot = placeholder or instance.placeholder.slot
placeholder = instance.placeholder
context = PluginContext(context, instance, placeholder)
context = plugin.render(context, instance, placeholder_slot)
if plugin.render_plugin:
template = hasattr(instance, 'render_template') and instance.render_template or plugin.render_template
if not template:
raise ValidationError("plugin has no render_template: %s" % plugin.__class__)
else:
template = None
renderer = PluginRenderer(context, instance, placeholder, template, processors)
return renderer.content
return ""
def get_media_path(self, filename):
pages = self.placeholder.page_set.all()
if pages.count():
return pages[0].get_media_path(filename)
else: # django 1.0.2 compatibility
today = date.today()
return join(settings.CMS_PAGE_MEDIA_PATH, str(today.year), str(today.month), str(today.day), filename)
@property
def page(self):
warnings.warn(
"Don't use the page attribute on CMSPlugins! CMSPlugins are not "
"guaranteed to have a page associated with them!",
DontUsePageAttributeWarning)
return get_page_from_placeholder_if_exists(self.placeholder)
def get_instance_icon_src(self):
"""
Get src URL for instance's icon
"""
instance, plugin = self.get_plugin_instance()
if instance:
return plugin.icon_src(instance)
else:
return u''
def get_instance_icon_alt(self):
"""
Get alt text for instance's icon
"""
instance, plugin = self.get_plugin_instance()
if instance:
return unicode(plugin.icon_alt(instance))
else:
return u''
def save(self, no_signals=False, *args, **kwargs):
if no_signals:# ugly hack because of mptt
super(CMSPlugin, self).save_base(cls=self.__class__)
else:
super(CMSPlugin, self).save()
def set_base_attr(self, plugin):
for attr in ['parent_id', 'placeholder', 'language', 'plugin_type', 'creation_date', 'level', 'lft', 'rght', 'position', 'tree_id']:
setattr(plugin, attr, getattr(self, attr))
def copy_plugin(self, target_placeholder, target_language, plugin_tree):
"""
Copy this plugin and return the new plugin.
"""
try:
plugin_instance, cls = self.get_plugin_instance()
except KeyError: #plugin type not found anymore
return
new_plugin = CMSPlugin()
new_plugin.placeholder = target_placeholder
new_plugin.tree_id = None
new_plugin.lft = None
new_plugin.rght = None
if self.parent:
pdif = self.level - plugin_tree[-1].level
if pdif < 0:
plugin_tree[:] = plugin_tree[:pdif-1]
new_plugin.parent = plugin_tree[-1]
if pdif != 0:
plugin_tree.append(new_plugin)
else:
plugin_tree[:] = [new_plugin]
new_plugin.level = None
new_plugin.language = target_language
new_plugin.plugin_type = self.plugin_type
new_plugin.position = self.position
new_plugin.save()
if plugin_instance:
plugin_instance.pk = new_plugin.pk
plugin_instance.id = new_plugin.pk
plugin_instance.placeholder = target_placeholder
plugin_instance.tree_id = new_plugin.tree_id
plugin_instance.lft = new_plugin.lft
plugin_instance.rght = new_plugin.rght
plugin_instance.level = new_plugin.level
plugin_instance.cmsplugin_ptr = new_plugin
plugin_instance.language = target_language
plugin_instance.position = new_plugin.position # added to retain the position when creating a public copy of a plugin
plugin_instance.save()
old_instance = plugin_instance.__class__.objects.get(pk=self.pk)
plugin_instance.copy_relations(old_instance)
return new_plugin
def post_copy(self, old_instance, new_old_ziplist):
"""
Handle more advanced cases (eg Text Plugins) after the original is
copied
"""
pass
def copy_relations(self, old_instance):
"""
Handle copying of any relations attached to this plugin. Custom plugins
have to do this themselves!
"""
pass
def delete_with_public(self):
"""
Delete the public copy of this plugin if it exists,
then delete the draft
"""
position = self.position
slot = self.placeholder.slot
page = get_page_from_placeholder_if_exists(self.placeholder)
if page and getattr(page, 'publisher_public'):
try:
placeholder = Placeholder.objects.get(page=page.publisher_public, slot=slot)
except Placeholder.DoesNotExist:
pass
else:
public_plugin = CMSPlugin.objects.filter(placeholder=placeholder, position=position)
public_plugin.delete()
self.placeholder = None
self.delete()
def has_change_permission(self, request):
page = get_page_from_placeholder_if_exists(self.placeholder)
if page:
return page.has_change_permission(request)
elif self.placeholder:
return self.placeholder.has_change_permission(request)
elif self.parent:
return self.parent.has_change_permission(request)
return False
def is_first_in_placeholder(self):
return self.position == 0
def is_last_in_placeholder(self):
"""
WARNING: this is a rather expensive call compared to is_first_in_placeholder!
"""
return self.placeholder.cmsplugin_set.all().order_by('-position')[0].pk == self.pk
def get_position_in_placeholder(self):
"""
1 based position!
"""
return self.position + 1
reversion_register(CMSPlugin)
def deferred_class_factory(model, attrs):
"""
Returns a class object that is a copy of "model" with the specified "attrs"
being replaced with DeferredAttribute objects. The "pk_value" ties the
deferred attributes to a particular instance of the model.
"""
class Meta:
pass
setattr(Meta, "proxy", True)
setattr(Meta, "app_label", model._meta.app_label)
class RenderMeta:
pass
setattr(RenderMeta, "index", model._render_meta.index)
setattr(RenderMeta, "total", model._render_meta.total)
setattr(RenderMeta, "text_enabled", model._render_meta.text_enabled)
# The app_cache wants a unique name for each model, otherwise the new class
# won't be created (we get an old one back). Therefore, we generate the
# name using the passed in attrs. It's OK to reuse an old case if the attrs
# are identical.
name = "%s_Deferred_%s" % (model.__name__, '_'.join(sorted(list(attrs))))
overrides = dict([(attr, DeferredAttribute(attr, model))
for attr in attrs])
overrides["Meta"] = RenderMeta
overrides["RenderMeta"] = RenderMeta
overrides["__module__"] = model.__module__
overrides["_deferred"] = True
return type(name, (model,), overrides)
# The above function is also used to unpickle model instances with deferred
# fields.
deferred_class_factory.__safe_for_unpickling__ = True
| bsd-3-clause |
hsaputra/tensorflow | tensorflow/python/framework/random_seed.py | 19 | 5811 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""For seeding individual ops based on a graph-level seed.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
DEFAULT_GRAPH_SEED = 87654321
_MAXINT32 = 2**31 - 1
def _truncate_seed(seed):
return seed % _MAXINT32 # Truncate to fit into 32-bit integer
def get_seed(op_seed):
"""Returns the local seeds an operation should use given an op-specific seed.
Given operation-specific seed, `op_seed`, this helper function returns two
seeds derived from graph-level and op-level seeds. Many random operations
internally use the two seeds to allow user to change the seed globally for a
graph, or for only specific operations.
For details on how the graph-level seed interacts with op seeds, see
@{tf.set_random_seed}.
Args:
op_seed: integer.
Returns:
A tuple of two integers that should be used for the local seed of this
operation.
"""
is_graph_mode = context.in_graph_mode()
if is_graph_mode:
global_seed = ops.get_default_graph().seed
else:
global_seed = context.global_seed()
if global_seed is not None:
if op_seed is None:
# pylint: disable=protected-access
if is_graph_mode:
op_seed = ops.get_default_graph()._last_id
else:
op_seed = context.internal_operation_seed()
seeds = _truncate_seed(global_seed), _truncate_seed(op_seed)
else:
if op_seed is not None:
seeds = DEFAULT_GRAPH_SEED, _truncate_seed(op_seed)
else:
seeds = None, None
# Avoid (0, 0) as the C++ ops interpret it as nondeterminism, which would
# be unexpected since Python docs say nondeterminism is (None, None).
if seeds == (0, 0):
return (0, _MAXINT32)
return seeds
def set_random_seed(seed):
"""Sets the graph-level random seed.
Operations that rely on a random seed actually derive it from two seeds:
the graph-level and operation-level seeds. This sets the graph-level seed.
Its interactions with operation-level seeds is as follows:
1. If neither the graph-level nor the operation seed is set:
A random seed is used for this op.
2. If the graph-level seed is set, but the operation seed is not:
The system deterministically picks an operation seed in conjunction
with the graph-level seed so that it gets a unique random sequence.
3. If the graph-level seed is not set, but the operation seed is set:
A default graph-level seed and the specified operation seed are used to
determine the random sequence.
4. If both the graph-level and the operation seed are set:
Both seeds are used in conjunction to determine the random sequence.
To illustrate the user-visible effects, consider these examples:
To generate different sequences across sessions, set neither
graph-level nor op-level seeds:
```python
a = tf.random_uniform([1])
b = tf.random_normal([1])
print("Session 1")
with tf.Session() as sess1:
print(sess1.run(a)) # generates 'A1'
print(sess1.run(a)) # generates 'A2'
print(sess1.run(b)) # generates 'B1'
print(sess1.run(b)) # generates 'B2'
print("Session 2")
with tf.Session() as sess2:
print(sess2.run(a)) # generates 'A3'
print(sess2.run(a)) # generates 'A4'
print(sess2.run(b)) # generates 'B3'
print(sess2.run(b)) # generates 'B4'
```
To generate the same repeatable sequence for an op across sessions, set the
seed for the op:
```python
a = tf.random_uniform([1], seed=1)
b = tf.random_normal([1])
# Repeatedly running this block with the same graph will generate the same
# sequence of values for 'a', but different sequences of values for 'b'.
print("Session 1")
with tf.Session() as sess1:
print(sess1.run(a)) # generates 'A1'
print(sess1.run(a)) # generates 'A2'
print(sess1.run(b)) # generates 'B1'
print(sess1.run(b)) # generates 'B2'
print("Session 2")
with tf.Session() as sess2:
print(sess2.run(a)) # generates 'A1'
print(sess2.run(a)) # generates 'A2'
print(sess2.run(b)) # generates 'B3'
print(sess2.run(b)) # generates 'B4'
```
To make the random sequences generated by all ops be repeatable across
sessions, set a graph-level seed:
```python
tf.set_random_seed(1234)
a = tf.random_uniform([1])
b = tf.random_normal([1])
# Repeatedly running this block with the same graph will generate the same
# sequences of 'a' and 'b'.
print("Session 1")
with tf.Session() as sess1:
print(sess1.run(a)) # generates 'A1'
print(sess1.run(a)) # generates 'A2'
print(sess1.run(b)) # generates 'B1'
print(sess1.run(b)) # generates 'B2'
print("Session 2")
with tf.Session() as sess2:
print(sess2.run(a)) # generates 'A1'
print(sess2.run(a)) # generates 'A2'
print(sess2.run(b)) # generates 'B1'
print(sess2.run(b)) # generates 'B2'
```
Args:
seed: integer.
"""
if context.in_graph_mode():
ops.get_default_graph().seed = seed
else:
context.set_global_seed(seed)
| apache-2.0 |
robbi/pyload | module/plugins/hoster/SendspaceCom.py | 8 | 2509 | # -*- coding: utf-8 -*-
import re
from ..internal.SimpleHoster import SimpleHoster
class SendspaceCom(SimpleHoster):
__name__ = "SendspaceCom"
__type__ = "hoster"
__version__ = "0.23"
__status__ = "testing"
__pattern__ = r'https?://(?:www\.)?sendspace\.com/file/\w+'
__config__ = [("activated", "bool", "Activated", True),
("use_premium", "bool", "Use premium account if available", True),
("fallback", "bool",
"Fallback to free download if premium fails", True),
("chk_filesize", "bool", "Check file size", True),
("max_wait", "int", "Reconnect if waiting time is greater than minutes", 10)]
__description__ = """Sendspace.com hoster plugin"""
__license__ = "GPLv3"
__authors__ = [("zoidberg", "zoidberg@mujmail.cz")]
NAME_PATTERN = r'<h2 class="bgray">\s*<(?:b|strong)>(?P<N>.+?)</'
SIZE_PATTERN = r'<div class="file_description reverse margin_center">\s*<b>File Size:</b>\s*(?P<S>[\d.,]+)(?P<U>[\w^_]+)\s*</div>'
OFFLINE_PATTERN = r'<div class="msg error" style="cursor: default">Sorry, the file you requested is not available.</div>'
LINK_FREE_PATTERN = r'<a id="download_button" class="download_page_button button1" href="(.+?)"'
CAPTCHA_PATTERN = r'<td><img src="(/captchas/captcha\.php?captcha=(.+?))"></td>'
USER_CAPTCHA_PATTERN = r'<td><img src="/captchas/captcha\.php?user=(.+?))"></td>'
def handle_free(self, pyfile):
m = re.search(self.LINK_FREE_PATTERN, self.data)
if m is not None:
self.link = m.group(1)
else:
m = re.search(self.CAPTCHA_PATTERN, self.data)
if m is None:
params = {'download': "Regular Download"}
else:
captcha_url1 = "http://www.sendspace.com/" + m.group(1)
m = re.search(self.USER_CAPTCHA_PATTERN, self.data)
captcha_url2 = "http://www.sendspace.com/" + m.group(1)
params = {'captcha_hash': m.group(2),
'captcha_submit': 'Verify',
'captcha_answer': self.captcha.decrypt(captcha_url1) + " " + self.captcha.decrypt(captcha_url2)}
self.log_debug(params)
self.data = self.load(pyfile.url, post=params)
m = re.search(self.LINK_FREE_PATTERN, self.data)
if m is None:
self.retry_captcha()
else:
self.link = m.group(1)
| gpl-3.0 |
slevenhagen/odoo | addons/stock/procurement.py | 227 | 22183 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT, float_compare, float_round
from openerp import SUPERUSER_ID
from dateutil.relativedelta import relativedelta
from datetime import datetime
from psycopg2 import OperationalError
import openerp
class procurement_group(osv.osv):
_inherit = 'procurement.group'
_columns = {
'partner_id': fields.many2one('res.partner', 'Partner')
}
class procurement_rule(osv.osv):
_inherit = 'procurement.rule'
def _get_action(self, cr, uid, context=None):
result = super(procurement_rule, self)._get_action(cr, uid, context=context)
return result + [('move', _('Move From Another Location'))]
def _get_rules(self, cr, uid, ids, context=None):
res = []
for route in self.browse(cr, uid, ids):
res += [x.id for x in route.pull_ids]
return res
_columns = {
'location_id': fields.many2one('stock.location', 'Procurement Location'),
'location_src_id': fields.many2one('stock.location', 'Source Location',
help="Source location is action=move"),
'route_id': fields.many2one('stock.location.route', 'Route',
help="If route_id is False, the rule is global"),
'procure_method': fields.selection([('make_to_stock', 'Take From Stock'), ('make_to_order', 'Create Procurement')], 'Move Supply Method', required=True,
help="""Determines the procurement method of the stock move that will be generated: whether it will need to 'take from the available stock' in its source location or needs to ignore its stock and create a procurement over there."""),
'route_sequence': fields.related('route_id', 'sequence', string='Route Sequence',
store={
'stock.location.route': (_get_rules, ['sequence'], 10),
'procurement.rule': (lambda self, cr, uid, ids, c={}: ids, ['route_id'], 10),
}),
'picking_type_id': fields.many2one('stock.picking.type', 'Picking Type',
help="Picking Type determines the way the picking should be shown in the view, reports, ..."),
'delay': fields.integer('Number of Days'),
'partner_address_id': fields.many2one('res.partner', 'Partner Address'),
'propagate': fields.boolean('Propagate cancel and split', help='If checked, when the previous move of the move (which was generated by a next procurement) is cancelled or split, the move generated by this move will too'),
'warehouse_id': fields.many2one('stock.warehouse', 'Served Warehouse', help='The warehouse this rule is for'),
'propagate_warehouse_id': fields.many2one('stock.warehouse', 'Warehouse to Propagate', help="The warehouse to propagate on the created move/procurement, which can be different of the warehouse this rule is for (e.g for resupplying rules from another warehouse)"),
}
_defaults = {
'procure_method': 'make_to_stock',
'propagate': True,
'delay': 0,
}
class procurement_order(osv.osv):
_inherit = "procurement.order"
_columns = {
'location_id': fields.many2one('stock.location', 'Procurement Location'), # not required because task may create procurements that aren't linked to a location with sale_service
'partner_dest_id': fields.many2one('res.partner', 'Customer Address', help="In case of dropshipping, we need to know the destination address more precisely"),
'move_ids': fields.one2many('stock.move', 'procurement_id', 'Moves', help="Moves created by the procurement"),
'move_dest_id': fields.many2one('stock.move', 'Destination Move', help="Move which caused (created) the procurement"),
'route_ids': fields.many2many('stock.location.route', 'stock_location_route_procurement', 'procurement_id', 'route_id', 'Preferred Routes', help="Preferred route to be followed by the procurement order. Usually copied from the generating document (SO) but could be set up manually."),
'warehouse_id': fields.many2one('stock.warehouse', 'Warehouse', help="Warehouse to consider for the route selection"),
'orderpoint_id': fields.many2one('stock.warehouse.orderpoint', 'Minimum Stock Rule'),
}
def propagate_cancel(self, cr, uid, procurement, context=None):
if procurement.rule_id.action == 'move' and procurement.move_ids:
self.pool.get('stock.move').action_cancel(cr, uid, [m.id for m in procurement.move_ids], context=context)
def cancel(self, cr, uid, ids, context=None):
if context is None:
context = {}
to_cancel_ids = self.get_cancel_ids(cr, uid, ids, context=context)
ctx = context.copy()
#set the context for the propagation of the procurement cancelation
ctx['cancel_procurement'] = True
for procurement in self.browse(cr, uid, to_cancel_ids, context=ctx):
self.propagate_cancel(cr, uid, procurement, context=ctx)
return super(procurement_order, self).cancel(cr, uid, to_cancel_ids, context=ctx)
def _find_parent_locations(self, cr, uid, procurement, context=None):
location = procurement.location_id
res = [location.id]
while location.location_id:
location = location.location_id
res.append(location.id)
return res
def change_warehouse_id(self, cr, uid, ids, warehouse_id, context=None):
if warehouse_id:
warehouse = self.pool.get('stock.warehouse').browse(cr, uid, warehouse_id, context=context)
return {'value': {'location_id': warehouse.lot_stock_id.id}}
return {}
def _search_suitable_rule(self, cr, uid, procurement, domain, context=None):
'''we try to first find a rule among the ones defined on the procurement order group and if none is found, we try on the routes defined for the product, and finally we fallback on the default behavior'''
pull_obj = self.pool.get('procurement.rule')
warehouse_route_ids = []
if procurement.warehouse_id:
domain += ['|', ('warehouse_id', '=', procurement.warehouse_id.id), ('warehouse_id', '=', False)]
warehouse_route_ids = [x.id for x in procurement.warehouse_id.route_ids]
product_route_ids = [x.id for x in procurement.product_id.route_ids + procurement.product_id.categ_id.total_route_ids]
procurement_route_ids = [x.id for x in procurement.route_ids]
res = pull_obj.search(cr, uid, domain + [('route_id', 'in', procurement_route_ids)], order='route_sequence, sequence', context=context)
if not res:
res = pull_obj.search(cr, uid, domain + [('route_id', 'in', product_route_ids)], order='route_sequence, sequence', context=context)
if not res:
res = warehouse_route_ids and pull_obj.search(cr, uid, domain + [('route_id', 'in', warehouse_route_ids)], order='route_sequence, sequence', context=context) or []
if not res:
res = pull_obj.search(cr, uid, domain + [('route_id', '=', False)], order='sequence', context=context)
return res
def _find_suitable_rule(self, cr, uid, procurement, context=None):
rule_id = super(procurement_order, self)._find_suitable_rule(cr, uid, procurement, context=context)
if not rule_id:
#a rule defined on 'Stock' is suitable for a procurement in 'Stock\Bin A'
all_parent_location_ids = self._find_parent_locations(cr, uid, procurement, context=context)
rule_id = self._search_suitable_rule(cr, uid, procurement, [('location_id', 'in', all_parent_location_ids)], context=context)
rule_id = rule_id and rule_id[0] or False
return rule_id
def _run_move_create(self, cr, uid, procurement, context=None):
''' Returns a dictionary of values that will be used to create a stock move from a procurement.
This function assumes that the given procurement has a rule (action == 'move') set on it.
:param procurement: browse record
:rtype: dictionary
'''
newdate = (datetime.strptime(procurement.date_planned, '%Y-%m-%d %H:%M:%S') - relativedelta(days=procurement.rule_id.delay or 0)).strftime('%Y-%m-%d %H:%M:%S')
group_id = False
if procurement.rule_id.group_propagation_option == 'propagate':
group_id = procurement.group_id and procurement.group_id.id or False
elif procurement.rule_id.group_propagation_option == 'fixed':
group_id = procurement.rule_id.group_id and procurement.rule_id.group_id.id or False
#it is possible that we've already got some move done, so check for the done qty and create
#a new move with the correct qty
already_done_qty = 0
already_done_qty_uos = 0
for move in procurement.move_ids:
already_done_qty += move.product_uom_qty if move.state == 'done' else 0
already_done_qty_uos += move.product_uos_qty if move.state == 'done' else 0
qty_left = max(procurement.product_qty - already_done_qty, 0)
qty_uos_left = max(procurement.product_uos_qty - already_done_qty_uos, 0)
vals = {
'name': procurement.name,
'company_id': procurement.rule_id.company_id.id or procurement.rule_id.location_src_id.company_id.id or procurement.rule_id.location_id.company_id.id or procurement.company_id.id,
'product_id': procurement.product_id.id,
'product_uom': procurement.product_uom.id,
'product_uom_qty': qty_left,
'product_uos_qty': (procurement.product_uos and qty_uos_left) or qty_left,
'product_uos': (procurement.product_uos and procurement.product_uos.id) or procurement.product_uom.id,
'partner_id': procurement.rule_id.partner_address_id.id or (procurement.group_id and procurement.group_id.partner_id.id) or False,
'location_id': procurement.rule_id.location_src_id.id,
'location_dest_id': procurement.location_id.id,
'move_dest_id': procurement.move_dest_id and procurement.move_dest_id.id or False,
'procurement_id': procurement.id,
'rule_id': procurement.rule_id.id,
'procure_method': procurement.rule_id.procure_method,
'origin': procurement.origin,
'picking_type_id': procurement.rule_id.picking_type_id.id,
'group_id': group_id,
'route_ids': [(4, x.id) for x in procurement.route_ids],
'warehouse_id': procurement.rule_id.propagate_warehouse_id.id or procurement.rule_id.warehouse_id.id,
'date': newdate,
'date_expected': newdate,
'propagate': procurement.rule_id.propagate,
'priority': procurement.priority,
}
return vals
def _run(self, cr, uid, procurement, context=None):
if procurement.rule_id and procurement.rule_id.action == 'move':
if not procurement.rule_id.location_src_id:
self.message_post(cr, uid, [procurement.id], body=_('No source location defined!'), context=context)
return False
move_obj = self.pool.get('stock.move')
move_dict = self._run_move_create(cr, uid, procurement, context=context)
#create the move as SUPERUSER because the current user may not have the rights to do it (mto product launched by a sale for example)
move_obj.create(cr, SUPERUSER_ID, move_dict, context=context)
return True
return super(procurement_order, self)._run(cr, uid, procurement, context=context)
def run(self, cr, uid, ids, autocommit=False, context=None):
new_ids = [x.id for x in self.browse(cr, uid, ids, context=context) if x.state not in ('running', 'done', 'cancel')]
res = super(procurement_order, self).run(cr, uid, new_ids, autocommit=autocommit, context=context)
#after all the procurements are run, check if some created a draft stock move that needs to be confirmed
#(we do that in batch because it fasts the picking assignation and the picking state computation)
move_to_confirm_ids = []
for procurement in self.browse(cr, uid, new_ids, context=context):
if procurement.state == "running" and procurement.rule_id and procurement.rule_id.action == "move":
move_to_confirm_ids += [m.id for m in procurement.move_ids if m.state == 'draft']
if move_to_confirm_ids:
self.pool.get('stock.move').action_confirm(cr, uid, move_to_confirm_ids, context=context)
return res
def _check(self, cr, uid, procurement, context=None):
''' Implement the procurement checking for rules of type 'move'. The procurement will be satisfied only if all related
moves are done/cancel and if the requested quantity is moved.
'''
if procurement.rule_id and procurement.rule_id.action == 'move':
uom_obj = self.pool.get('product.uom')
# In case Phantom BoM splits only into procurements
if not procurement.move_ids:
return True
cancel_test_list = [x.state == 'cancel' for x in procurement.move_ids]
done_cancel_test_list = [x.state in ('done', 'cancel') for x in procurement.move_ids]
at_least_one_cancel = any(cancel_test_list)
all_done_or_cancel = all(done_cancel_test_list)
all_cancel = all(cancel_test_list)
if not all_done_or_cancel:
return False
elif all_done_or_cancel and not all_cancel:
return True
elif all_cancel:
self.message_post(cr, uid, [procurement.id], body=_('All stock moves have been cancelled for this procurement.'), context=context)
self.write(cr, uid, [procurement.id], {'state': 'cancel'}, context=context)
return False
return super(procurement_order, self)._check(cr, uid, procurement, context)
def do_view_pickings(self, cr, uid, ids, context=None):
'''
This function returns an action that display the pickings of the procurements belonging
to the same procurement group of given ids.
'''
mod_obj = self.pool.get('ir.model.data')
act_obj = self.pool.get('ir.actions.act_window')
result = mod_obj.get_object_reference(cr, uid, 'stock', 'do_view_pickings')
id = result and result[1] or False
result = act_obj.read(cr, uid, [id], context=context)[0]
group_ids = set([proc.group_id.id for proc in self.browse(cr, uid, ids, context=context) if proc.group_id])
result['domain'] = "[('group_id','in',[" + ','.join(map(str, list(group_ids))) + "])]"
return result
def run_scheduler(self, cr, uid, use_new_cursor=False, company_id=False, context=None):
'''
Call the scheduler in order to check the running procurements (super method), to check the minimum stock rules
and the availability of moves. This function is intended to be run for all the companies at the same time, so
we run functions as SUPERUSER to avoid intercompanies and access rights issues.
@param self: The object pointer
@param cr: The current row, from the database cursor,
@param uid: The current user ID for security checks
@param ids: List of selected IDs
@param use_new_cursor: if set, use a dedicated cursor and auto-commit after processing each procurement.
This is appropriate for batch jobs only.
@param context: A standard dictionary for contextual values
@return: Dictionary of values
'''
super(procurement_order, self).run_scheduler(cr, uid, use_new_cursor=use_new_cursor, company_id=company_id, context=context)
if context is None:
context = {}
try:
if use_new_cursor:
cr = openerp.registry(cr.dbname).cursor()
move_obj = self.pool.get('stock.move')
#Minimum stock rules
self._procure_orderpoint_confirm(cr, SUPERUSER_ID, use_new_cursor=use_new_cursor, company_id=company_id, context=context)
#Search all confirmed stock_moves and try to assign them
confirmed_ids = move_obj.search(cr, uid, [('state', '=', 'confirmed')], limit=None, order='priority desc, date_expected asc', context=context)
for x in xrange(0, len(confirmed_ids), 100):
move_obj.action_assign(cr, uid, confirmed_ids[x:x + 100], context=context)
if use_new_cursor:
cr.commit()
if use_new_cursor:
cr.commit()
finally:
if use_new_cursor:
try:
cr.close()
except Exception:
pass
return {}
def _get_orderpoint_date_planned(self, cr, uid, orderpoint, start_date, context=None):
date_planned = start_date + relativedelta(days=orderpoint.product_id.seller_delay or 0.0)
return date_planned.strftime(DEFAULT_SERVER_DATE_FORMAT)
def _prepare_orderpoint_procurement(self, cr, uid, orderpoint, product_qty, context=None):
return {
'name': orderpoint.name,
'date_planned': self._get_orderpoint_date_planned(cr, uid, orderpoint, datetime.today(), context=context),
'product_id': orderpoint.product_id.id,
'product_qty': product_qty,
'company_id': orderpoint.company_id.id,
'product_uom': orderpoint.product_uom.id,
'location_id': orderpoint.location_id.id,
'origin': orderpoint.name,
'warehouse_id': orderpoint.warehouse_id.id,
'orderpoint_id': orderpoint.id,
'group_id': orderpoint.group_id.id,
}
def _product_virtual_get(self, cr, uid, order_point):
product_obj = self.pool.get('product.product')
return product_obj._product_available(cr, uid,
[order_point.product_id.id],
context={'location': order_point.location_id.id})[order_point.product_id.id]['virtual_available']
def _procure_orderpoint_confirm(self, cr, uid, use_new_cursor=False, company_id = False, context=None):
'''
Create procurement based on Orderpoint
:param bool use_new_cursor: if set, use a dedicated cursor and auto-commit after processing each procurement.
This is appropriate for batch jobs only.
'''
if context is None:
context = {}
if use_new_cursor:
cr = openerp.registry(cr.dbname).cursor()
orderpoint_obj = self.pool.get('stock.warehouse.orderpoint')
procurement_obj = self.pool.get('procurement.order')
dom = company_id and [('company_id', '=', company_id)] or []
orderpoint_ids = orderpoint_obj.search(cr, uid, dom)
prev_ids = []
while orderpoint_ids:
ids = orderpoint_ids[:100]
del orderpoint_ids[:100]
for op in orderpoint_obj.browse(cr, uid, ids, context=context):
try:
prods = self._product_virtual_get(cr, uid, op)
if prods is None:
continue
if float_compare(prods, op.product_min_qty, precision_rounding=op.product_uom.rounding) < 0:
qty = max(op.product_min_qty, op.product_max_qty) - prods
reste = op.qty_multiple > 0 and qty % op.qty_multiple or 0.0
if float_compare(reste, 0.0, precision_rounding=op.product_uom.rounding) > 0:
qty += op.qty_multiple - reste
if float_compare(qty, 0.0, precision_rounding=op.product_uom.rounding) <= 0:
continue
qty -= orderpoint_obj.subtract_procurements(cr, uid, op, context=context)
qty_rounded = float_round(qty, precision_rounding=op.product_uom.rounding)
if qty_rounded > 0:
proc_id = procurement_obj.create(cr, uid,
self._prepare_orderpoint_procurement(cr, uid, op, qty_rounded, context=context),
context=context)
self.check(cr, uid, [proc_id])
self.run(cr, uid, [proc_id])
if use_new_cursor:
cr.commit()
except OperationalError:
if use_new_cursor:
orderpoint_ids.append(op.id)
cr.rollback()
continue
else:
raise
if use_new_cursor:
cr.commit()
if prev_ids == ids:
break
else:
prev_ids = ids
if use_new_cursor:
cr.commit()
cr.close()
return {}
| agpl-3.0 |
scheibler/serieSandSubs | src/subtitle_manager.py | 1 | 5447 | import sleekxmpp
import magic # find out encoding of text files
import logging
import codecs
import sys
import time
class SubtitleManager(sleekxmpp.ClientXMPP):
"""
A XMPP message sender
"""
subtitles = []
def __init__(self, jid, password, subtitle_filename):
# make a jabber client instance
sleekxmpp.ClientXMPP.__init__(self, jid, password)
# load the subtitle file and parse it's contents
self.subtitles = self.parse_subtitle_file(subtitle_filename)
# The session_start event will be triggered when
# the bot establishes its connection with the server
# and the XML streams are ready for use. We want to
# listen for this event so that we we can intialize
# our roster.
self.add_event_handler("session_start", self.start)
# load a few sleekxmpp plugins
self.registerPlugin('xep_0030')
self.registerPlugin('xep_0199')
if self.connect():
self.process()
time.sleep(1)
else:
logging.critical("Can't connect to the jabber server")
helper.clean_and_exit(3)
def start(self, event):
"""
Process the session_start event.
Arguments:
event -- An empty dictionary. The session_start
event does not provide any additional
data.
"""
self.getRoster()
self.sendPresence()
def parse_subtitle_file(self, sub_filename):
"""
parse the entries in the subtitle file into an array
at the moment, the method can convert .srt files with the following structure:
1 (subtitle index)
00:00:01,234 --> 00:12,345
An example subtitle
over more than one line
which is separated by an empty line
"""
sub_array = []
found_time = False
i = 0
encoding = magic.from_file(sub_filename)
if encoding.find("ISO-8859") >= 0:
encoding = "ISO-8859-1"
else:
encoding = encoding.split(" ")[0]
try:
sub_file = codecs.open(sub_filename, "r", encoding)
except IOError as e:
logging.critical("Can't open the subtitle file\n" + str(e))
helper.clean_and_exit(3)
except LookupError as e:
logging.warning("Encoding of the subtitle file could not be detected\nTry to open it with UTF-8 encoding")
sub_file = codecs.open(sub_filename, "r", "UTF-8")
line = sub_file.readline()
error_counter = 0
while line:
line = line.strip()
if found_time == True:
if line == "":
found_time = False
i = i+1
else:
sub_array[i][2] = sub_array[i][2].__add__(line + " ")
if line.count(":") == 4 and line.find("-->") > 0:
times = line.strip().split(" --> ")
try:
start = self.transform_time(times[0])
end = self.transform_time(times[1])
except:
logging.warning("Could not parse the subtitle file " + sub_filename + "\nError in the line " + str(line) + " (subtitle " + str(i+1) + ")\n")
if error_counter >= 10:
logging.critical("Too many errors in the subtitle file")
helper.clean_and_exit(3)
error_counter = error_counter +1
sub_array.append([start, end, ""])
found_time = True
line = sub_file.readline()
sub_file.close()
return sub_array
def transform_time(self, time):
"""
Helper function to convert time string into seconds
example for input = '00:12:09,210'
returns float
"""
try:
t = time.split(":")
hour = int(t[0])
min = int(t[1])
t = t[2].split(",")
sec = int(t[0])
milli = int(t[1][0])
except e:
raise
return hour*3600 + min*60 + sec + milli*0.1
def get_current_subtitle(self, current_pos):
"""
This method returns the current subtitle from the subtitle array
current_pos is a float and contains the current video position in seconds
if no subtitle exists, an empty string is returned
"""
i = 0
while i < self.subtitles.__len__():
if i > 0:
if current_pos >= self.subtitles[i-1][1] and current_pos < self.subtitles[i][0]:
return ""
if current_pos >= self.subtitles[i][0] and current_pos < self.subtitles[i][1]:
return self.subtitles[i][2]
i = i+1
return ""
def send_msg(self, recipients, message):
"""
sends the given message
if recipients is a list, every the message will be send to every entry
otherwise recipients only contains one recipient (string)
"""
if type(recipients) == type([]):
for r in recipients:
if r != "":
self.send_message(mto=r, mbody=message, mtype='chat')
else:
self.send_message(mto=recipients, mbody=message, mtype='chat')
def end(self):
self.disconnect(wait=True)
| gpl-3.0 |
luniv/servo | components/script/dom/bindings/codegen/parser/tests/test_callback_interface.py | 134 | 1154 | import WebIDL
def WebIDLTest(parser, harness):
parser.parse("""
callback interface TestCallbackInterface {
attribute boolean bool;
};
""")
results = parser.finish()
iface = results[0]
harness.ok(iface.isCallback(), "Interface should be a callback")
parser = parser.reset()
threw = False
try:
parser.parse("""
interface TestInterface {
};
callback interface TestCallbackInterface : TestInterface {
attribute boolean bool;
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Should not allow non-callback parent of callback interface")
parser = parser.reset()
threw = False
try:
parser.parse("""
interface TestInterface : TestCallbackInterface {
};
callback interface TestCallbackInterface {
attribute boolean bool;
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Should not allow callback parent of non-callback interface")
| mpl-2.0 |
tangyiyong/odoo | openerp/tools/float_utils.py | 312 | 10296 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2011 OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import math
def _float_check_precision(precision_digits=None, precision_rounding=None):
assert (precision_digits is not None or precision_rounding is not None) and \
not (precision_digits and precision_rounding),\
"exactly one of precision_digits and precision_rounding must be specified"
if precision_digits is not None:
return 10 ** -precision_digits
return precision_rounding
def float_round(value, precision_digits=None, precision_rounding=None, rounding_method='HALF-UP'):
"""Return ``value`` rounded to ``precision_digits`` decimal digits,
minimizing IEEE-754 floating point representation errors, and applying
the tie-breaking rule selected with ``rounding_method``, by default
HALF-UP (away from zero).
Precision must be given by ``precision_digits`` or ``precision_rounding``,
not both!
:param float value: the value to round
:param int precision_digits: number of fractional digits to round to.
:param float precision_rounding: decimal number representing the minimum
non-zero value at the desired precision (for example, 0.01 for a
2-digit precision).
:param rounding_method: the rounding method used: 'HALF-UP' or 'UP', the first
one rounding up to the closest number with the rule that number>=0.5 is
rounded up to 1, and the latest one always rounding up.
:return: rounded float
"""
rounding_factor = _float_check_precision(precision_digits=precision_digits,
precision_rounding=precision_rounding)
if rounding_factor == 0 or value == 0: return 0.0
# NORMALIZE - ROUND - DENORMALIZE
# In order to easily support rounding to arbitrary 'steps' (e.g. coin values),
# we normalize the value before rounding it as an integer, and de-normalize
# after rounding: e.g. float_round(1.3, precision_rounding=.5) == 1.5
# TIE-BREAKING: HALF-UP (for normal rounding)
# We want to apply HALF-UP tie-breaking rules, i.e. 0.5 rounds away from 0.
# Due to IEE754 float/double representation limits, the approximation of the
# real value may be slightly below the tie limit, resulting in an error of
# 1 unit in the last place (ulp) after rounding.
# For example 2.675 == 2.6749999999999998.
# To correct this, we add a very small epsilon value, scaled to the
# the order of magnitude of the value, to tip the tie-break in the right
# direction.
# Credit: discussion with OpenERP community members on bug 882036
normalized_value = value / rounding_factor # normalize
epsilon_magnitude = math.log(abs(normalized_value), 2)
epsilon = 2**(epsilon_magnitude-53)
if rounding_method == 'HALF-UP':
normalized_value += cmp(normalized_value,0) * epsilon
rounded_value = round(normalized_value) # round to integer
# TIE-BREAKING: UP (for ceiling operations)
# When rounding the value up, we instead subtract the epsilon value
# as the the approximation of the real value may be slightly *above* the
# tie limit, this would result in incorrectly rounding up to the next number
# The math.ceil operation is applied on the absolute value in order to
# round "away from zero" and not "towards infinity", then the sign is
# restored.
elif rounding_method == 'UP':
sign = cmp(normalized_value, 0)
normalized_value -= sign*epsilon
rounded_value = math.ceil(abs(normalized_value))*sign # ceil to integer
result = rounded_value * rounding_factor # de-normalize
return result
def float_is_zero(value, precision_digits=None, precision_rounding=None):
"""Returns true if ``value`` is small enough to be treated as
zero at the given precision (smaller than the corresponding *epsilon*).
The precision (``10**-precision_digits`` or ``precision_rounding``)
is used as the zero *epsilon*: values less than that are considered
to be zero.
Precision must be given by ``precision_digits`` or ``precision_rounding``,
not both!
Warning: ``float_is_zero(value1-value2)`` is not equivalent to
``float_compare(value1,value2) == 0``, as the former will round after
computing the difference, while the latter will round before, giving
different results for e.g. 0.006 and 0.002 at 2 digits precision.
:param int precision_digits: number of fractional digits to round to.
:param float precision_rounding: decimal number representing the minimum
non-zero value at the desired precision (for example, 0.01 for a
2-digit precision).
:param float value: value to compare with the precision's zero
:return: True if ``value`` is considered zero
"""
epsilon = _float_check_precision(precision_digits=precision_digits,
precision_rounding=precision_rounding)
return abs(float_round(value, precision_rounding=epsilon)) < epsilon
def float_compare(value1, value2, precision_digits=None, precision_rounding=None):
"""Compare ``value1`` and ``value2`` after rounding them according to the
given precision. A value is considered lower/greater than another value
if their rounded value is different. This is not the same as having a
non-zero difference!
Precision must be given by ``precision_digits`` or ``precision_rounding``,
not both!
Example: 1.432 and 1.431 are equal at 2 digits precision,
so this method would return 0
However 0.006 and 0.002 are considered different (this method returns 1)
because they respectively round to 0.01 and 0.0, even though
0.006-0.002 = 0.004 which would be considered zero at 2 digits precision.
Warning: ``float_is_zero(value1-value2)`` is not equivalent to
``float_compare(value1,value2) == 0``, as the former will round after
computing the difference, while the latter will round before, giving
different results for e.g. 0.006 and 0.002 at 2 digits precision.
:param int precision_digits: number of fractional digits to round to.
:param float precision_rounding: decimal number representing the minimum
non-zero value at the desired precision (for example, 0.01 for a
2-digit precision).
:param float value1: first value to compare
:param float value2: second value to compare
:return: (resp.) -1, 0 or 1, if ``value1`` is (resp.) lower than,
equal to, or greater than ``value2``, at the given precision.
"""
rounding_factor = _float_check_precision(precision_digits=precision_digits,
precision_rounding=precision_rounding)
value1 = float_round(value1, precision_rounding=rounding_factor)
value2 = float_round(value2, precision_rounding=rounding_factor)
delta = value1 - value2
if float_is_zero(delta, precision_rounding=rounding_factor): return 0
return -1 if delta < 0.0 else 1
def float_repr(value, precision_digits):
"""Returns a string representation of a float with the
the given number of fractional digits. This should not be
used to perform a rounding operation (this is done via
:meth:`~.float_round`), but only to produce a suitable
string representation for a float.
:param int precision_digits: number of fractional digits to
include in the output
"""
# Can't use str() here because it seems to have an intrisic
# rounding to 12 significant digits, which causes a loss of
# precision. e.g. str(123456789.1234) == str(123456789.123)!!
return ("%%.%sf" % precision_digits) % value
if __name__ == "__main__":
import time
start = time.time()
count = 0
errors = 0
def try_round(amount, expected, precision_digits=3):
global count, errors; count += 1
result = float_repr(float_round(amount, precision_digits=precision_digits),
precision_digits=precision_digits)
if result != expected:
errors += 1
print '###!!! Rounding error: got %s , expected %s' % (result, expected)
# Extended float range test, inspired by Cloves Almeida's test on bug #882036.
fractions = [.0, .015, .01499, .675, .67499, .4555, .4555, .45555]
expecteds = ['.00', '.02', '.01', '.68', '.67', '.46', '.456', '.4556']
precisions = [2, 2, 2, 2, 2, 2, 3, 4]
for magnitude in range(7):
for i in xrange(len(fractions)):
frac, exp, prec = fractions[i], expecteds[i], precisions[i]
for sign in [-1,1]:
for x in xrange(0,10000,97):
n = x * 10**magnitude
f = sign * (n + frac)
f_exp = ('-' if f != 0 and sign == -1 else '') + str(n) + exp
try_round(f, f_exp, precision_digits=prec)
stop = time.time()
# Micro-bench results:
# 47130 round calls in 0.422306060791 secs, with Python 2.6.7 on Core i3 x64
# with decimal:
# 47130 round calls in 6.612248100021 secs, with Python 2.6.7 on Core i3 x64
print count, " round calls, ", errors, "errors, done in ", (stop-start), 'secs'
| agpl-3.0 |
rjschwei/azure-sdk-for-python | azure-mgmt-redis/azure/mgmt/redis/models/schedule_entry.py | 3 | 1724 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ScheduleEntry(Model):
"""Patch schedule entry for a Premium Redis Cache.
:param day_of_week: Day of the week when a cache can be patched. Possible
values include: 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday',
'Saturday', 'Sunday', 'Everyday', 'Weekend'
:type day_of_week: str or :class:`DayOfWeek
<azure.mgmt.redis.models.DayOfWeek>`
:param start_hour_utc: Start hour after which cache patching can start.
:type start_hour_utc: int
:param maintenance_window: ISO8601 timespan specifying how much time cache
patching can take.
:type maintenance_window: timedelta
"""
_validation = {
'day_of_week': {'required': True},
'start_hour_utc': {'required': True},
}
_attribute_map = {
'day_of_week': {'key': 'dayOfWeek', 'type': 'DayOfWeek'},
'start_hour_utc': {'key': 'startHourUtc', 'type': 'int'},
'maintenance_window': {'key': 'maintenanceWindow', 'type': 'duration'},
}
def __init__(self, day_of_week, start_hour_utc, maintenance_window=None):
self.day_of_week = day_of_week
self.start_hour_utc = start_hour_utc
self.maintenance_window = maintenance_window
| mit |
akolobov/ardupilot | Tools/autotest/pysim/util.py | 7 | 19226 | from __future__ import print_function
import atexit
import math
import os
import random
import re
import shlex
import subprocess
import sys
import tempfile
import time
from math import acos, atan2, cos, pi, sqrt
from subprocess import PIPE, Popen, call, check_call
import pexpect
from . rotmat import Matrix3, Vector3
if (sys.version_info[0] >= 3):
ENCODING = 'ascii'
else:
ENCODING = None
RADIUS_OF_EARTH = 6378100.0 # in meters
def m2ft(x):
"""Meters to feet."""
return float(x) / 0.3048
def ft2m(x):
"""Feet to meters."""
return float(x) * 0.3048
def kt2mps(x):
return x * 0.514444444
def mps2kt(x):
return x / 0.514444444
def topdir():
"""Return top of git tree where autotest is running from."""
d = os.path.dirname(os.path.realpath(__file__))
assert(os.path.basename(d) == 'pysim')
d = os.path.dirname(d)
assert(os.path.basename(d) == 'autotest')
d = os.path.dirname(d)
assert(os.path.basename(d) == 'Tools')
d = os.path.dirname(d)
return d
def reltopdir(path):
"""Return a path relative to topdir()."""
return os.path.normpath(os.path.join(topdir(), path))
def run_cmd(cmd, directory=".", show=True, output=False, checkfail=True):
"""Run a shell command."""
shell = False
if not isinstance(cmd, list):
cmd = [cmd]
shell = True
if show:
print("Running: (%s) in (%s)" % (cmd_as_shell(cmd), directory,))
if output:
return Popen(cmd, shell=shell, stdout=PIPE, cwd=directory).communicate()[0]
elif checkfail:
return check_call(cmd, shell=shell, cwd=directory)
else:
return call(cmd, shell=shell, cwd=directory)
def rmfile(path):
"""Remove a file if it exists."""
try:
os.unlink(path)
except Exception:
pass
def deltree(path):
"""Delete a tree of files."""
run_cmd('rm -rf %s' % path)
def relwaf():
return "./modules/waf/waf-light"
def waf_configure(board, j=None, debug=False, extra_args=[]):
cmd_configure = [relwaf(), "configure", "--board", board]
if debug:
cmd_configure.append('--debug')
if j is not None:
cmd_configure.extend(['-j', str(j)])
pieces = [shlex.split(x) for x in extra_args]
for piece in pieces:
cmd_configure.extend(piece)
run_cmd(cmd_configure, directory=topdir(), checkfail=True)
def waf_clean():
run_cmd([relwaf(), "clean"], directory=topdir(), checkfail=True)
def build_SITL(build_target, j=None, debug=False, board='sitl', clean=True, configure=True, extra_configure_args=[]):
"""Build desktop SITL."""
# first configure
if configure:
waf_configure(board, j=j, debug=debug, extra_args=extra_configure_args)
# then clean
if clean:
waf_clean()
# then build
cmd_make = [relwaf(), "build", "--target", build_target]
if j is not None:
cmd_make.extend(['-j', str(j)])
run_cmd(cmd_make, directory=topdir(), checkfail=True, show=True)
return True
def build_examples(board, j=None, debug=False, clean=False):
# first configure
waf_configure(board, j=j, debug=debug)
# then clean
if clean:
waf_clean()
# then build
cmd_make = [relwaf(), "examples"]
run_cmd(cmd_make, directory=topdir(), checkfail=True, show=True)
return True
def build_tests(board, j=None, debug=False, clean=False):
# first configure
waf_configure(board, j=j, debug=debug)
# then clean
if clean:
waf_clean()
# then build
run_cmd([relwaf(), "tests"], directory=topdir(), checkfail=True, show=True)
return True
# list of pexpect children to close on exit
close_list = []
def pexpect_autoclose(p):
"""Mark for autoclosing."""
global close_list
close_list.append(p)
def pexpect_close(p):
"""Close a pexpect child."""
global close_list
try:
p.close()
except Exception:
pass
try:
p.close(force=True)
except Exception:
pass
if p in close_list:
close_list.remove(p)
def pexpect_close_all():
"""Close all pexpect children."""
global close_list
for p in close_list[:]:
pexpect_close(p)
def pexpect_drain(p):
"""Drain any pending input."""
import pexpect
try:
p.read_nonblocking(1000, timeout=0)
except Exception:
pass
def cmd_as_shell(cmd):
return (" ".join(['"%s"' % x for x in cmd]))
def make_safe_filename(text):
"""Return a version of text safe for use as a filename."""
r = re.compile("([^a-zA-Z0-9_.+-])")
text.replace('/', '-')
filename = r.sub(lambda m: "%" + str(hex(ord(str(m.group(1))))).upper(), text)
return filename
def valgrind_log_filepath(binary, model):
return make_safe_filename('%s-%s-valgrind.log' % (os.path.basename(binary), model,))
def kill_screen_gdb():
cmd = ["screen", "-X", "-S", "ardupilot-gdb", "quit"]
subprocess.Popen(cmd)
def start_SITL(binary,
valgrind=False,
gdb=False,
wipe=False,
synthetic_clock=True,
home=None,
model=None,
speedup=1,
defaults_file=None,
unhide_parameters=False,
gdbserver=False,
breakpoints=[],
vicon=False):
"""Launch a SITL instance."""
cmd = []
if valgrind and os.path.exists('/usr/bin/valgrind'):
# we specify a prefix for vgdb-pipe because on Vagrant virtual
# machines the pipes are created on the mountpoint for the
# shared directory with the host machine. mmap's,
# unsurprisingly, fail on files created on that mountpoint.
vgdb_prefix = os.path.join(tempfile.gettempdir(), "vgdb-pipe")
log_file = valgrind_log_filepath(binary=binary, model=model)
cmd.extend([
'valgrind',
# adding this option allows valgrind to cope with the overload
# of operator new
"--soname-synonyms=somalloc=nouserintercepts",
'--vgdb-prefix=%s' % vgdb_prefix,
'-q',
'--log-file=%s' % log_file])
if gdbserver:
cmd.extend(['gdbserver', 'localhost:3333'])
if gdb:
# attach gdb to the gdbserver:
f = open("/tmp/x.gdb", "w")
f.write("target extended-remote localhost:3333\nc\n")
for breakpoint in breakpoints:
f.write("b %s\n" % (breakpoint,))
f.close()
run_cmd('screen -d -m -S ardupilot-gdbserver '
'bash -c "gdb -x /tmp/x.gdb"')
elif gdb:
f = open("/tmp/x.gdb", "w")
for breakpoint in breakpoints:
f.write("b %s\n" % (breakpoint,))
f.write("r\n")
f.close()
if os.environ.get('DISPLAY'):
cmd.extend(['xterm', '-e', 'gdb', '-x', '/tmp/x.gdb', '--args'])
else:
cmd.extend(['screen',
'-L', '-Logfile', 'gdb.log',
'-d',
'-m',
'-S', 'ardupilot-gdb',
'gdb', '-x', '/tmp/x.gdb', binary, '--args'])
cmd.append(binary)
if wipe:
cmd.append('-w')
if synthetic_clock:
cmd.append('-S')
if home is not None:
cmd.extend(['--home', home])
if model is not None:
cmd.extend(['--model', model])
if speedup != 1:
cmd.extend(['--speedup', str(speedup)])
if defaults_file is not None:
cmd.extend(['--defaults', defaults_file])
if unhide_parameters:
cmd.extend(['--unhide-groups'])
if vicon:
cmd.extend(["--uartF=sim:vicon:"])
if gdb and not os.getenv('DISPLAY'):
p = subprocess.Popen(cmd)
atexit.register(kill_screen_gdb)
# we are expected to return a pexpect wrapped around the
# stdout of the ArduPilot binary. Not going to happen until
# AP gets a redirect-stdout-to-filehandle option. So, in the
# meantime, return a dummy:
return pexpect.spawn("true", ["true"],
logfile=sys.stdout,
encoding=ENCODING,
timeout=5)
print("Running: %s" % cmd_as_shell(cmd))
first = cmd[0]
rest = cmd[1:]
child = pexpect.spawn(first, rest, logfile=sys.stdout, encoding=ENCODING, timeout=5)
pexpect_autoclose(child)
# give time for parameters to properly setup
time.sleep(3)
if gdb:
# if we run GDB we do so in an xterm. "Waiting for
# connection" is never going to appear on xterm's output.
# ... so let's give it another magic second.
time.sleep(1)
# TODO: have a SITL-compiled ardupilot able to have its
# console on an output fd.
else:
child.expect('Waiting for connection', timeout=300)
return child
def start_MAVProxy_SITL(atype, aircraft=None, setup=False, master='tcp:127.0.0.1:5760',
options=[], logfile=sys.stdout):
"""Launch mavproxy connected to a SITL instance."""
import pexpect
global close_list
MAVPROXY = os.getenv('MAVPROXY_CMD', 'mavproxy.py')
cmd = MAVPROXY + ' --master=%s --out=127.0.0.1:14550' % master
if setup:
cmd += ' --setup'
if aircraft is None:
aircraft = 'test.%s' % atype
cmd += ' --aircraft=%s' % aircraft
cmd += ' ' + ' '.join(options)
ret = pexpect.spawn(cmd, logfile=logfile, encoding=ENCODING, timeout=60)
ret.delaybeforesend = 0
pexpect_autoclose(ret)
return ret
def expect_setup_callback(e, callback):
"""Setup a callback that is called once a second while waiting for
patterns."""
import pexpect
def _expect_callback(pattern, timeout=e.timeout):
tstart = time.time()
while time.time() < tstart + timeout:
try:
ret = e.expect_saved(pattern, timeout=1)
return ret
except pexpect.TIMEOUT:
e.expect_user_callback(e)
pass
print("Timed out looking for %s" % pattern)
raise pexpect.TIMEOUT(timeout)
e.expect_user_callback = callback
e.expect_saved = e.expect
e.expect = _expect_callback
def mkdir_p(directory):
"""Like mkdir -p ."""
if not directory:
return
if directory.endswith("/"):
mkdir_p(directory[:-1])
return
if os.path.isdir(directory):
return
mkdir_p(os.path.dirname(directory))
os.mkdir(directory)
def loadfile(fname):
"""Load a file as a string."""
f = open(fname, mode='r')
r = f.read()
f.close()
return r
def lock_file(fname):
"""Lock a file."""
import fcntl
f = open(fname, mode='w')
try:
fcntl.lockf(f, fcntl.LOCK_EX | fcntl.LOCK_NB)
except Exception:
return None
return f
def check_parent(parent_pid=None):
"""Check our parent process is still alive."""
if parent_pid is None:
try:
parent_pid = os.getppid()
except Exception:
pass
if parent_pid is None:
return
try:
os.kill(parent_pid, 0)
except Exception:
print("Parent had finished - exiting")
sys.exit(1)
def EarthRatesToBodyRates(dcm, earth_rates):
"""Convert the angular velocities from earth frame to
body frame. Thanks to James Goppert for the formula
all inputs and outputs are in radians
returns a gyro vector in body frame, in rad/s .
"""
from math import sin, cos
(phi, theta, psi) = dcm.to_euler()
phiDot = earth_rates.x
thetaDot = earth_rates.y
psiDot = earth_rates.z
p = phiDot - psiDot * sin(theta)
q = cos(phi) * thetaDot + sin(phi) * psiDot * cos(theta)
r = cos(phi) * psiDot * cos(theta) - sin(phi) * thetaDot
return Vector3(p, q, r)
def BodyRatesToEarthRates(dcm, gyro):
"""Convert the angular velocities from body frame to
earth frame.
all inputs and outputs are in radians/s
returns a earth rate vector.
"""
from math import sin, cos, tan, fabs
p = gyro.x
q = gyro.y
r = gyro.z
(phi, theta, psi) = dcm.to_euler()
phiDot = p + tan(theta) * (q * sin(phi) + r * cos(phi))
thetaDot = q * cos(phi) - r * sin(phi)
if fabs(cos(theta)) < 1.0e-20:
theta += 1.0e-10
psiDot = (q * sin(phi) + r * cos(phi)) / cos(theta)
return Vector3(phiDot, thetaDot, psiDot)
def gps_newpos(lat, lon, bearing, distance):
"""Extrapolate latitude/longitude given a heading and distance
thanks to http://www.movable-type.co.uk/scripts/latlong.html .
"""
from math import sin, asin, cos, atan2, radians, degrees
lat1 = radians(lat)
lon1 = radians(lon)
brng = radians(bearing)
dr = distance / RADIUS_OF_EARTH
lat2 = asin(sin(lat1) * cos(dr) +
cos(lat1) * sin(dr) * cos(brng))
lon2 = lon1 + atan2(sin(brng) * sin(dr) * cos(lat1),
cos(dr) - sin(lat1) * sin(lat2))
return (degrees(lat2), degrees(lon2))
def gps_distance(lat1, lon1, lat2, lon2):
"""Return distance between two points in meters,
coordinates are in degrees
thanks to http://www.movable-type.co.uk/scripts/latlong.html ."""
lat1 = math.radians(lat1)
lat2 = math.radians(lat2)
lon1 = math.radians(lon1)
lon2 = math.radians(lon2)
dLat = lat2 - lat1
dLon = lon2 - lon1
a = math.sin(0.5 * dLat)**2 + math.sin(0.5 * dLon)**2 * math.cos(lat1) * math.cos(lat2)
c = 2.0 * math.atan2(math.sqrt(a), math.sqrt(1.0 - a))
return RADIUS_OF_EARTH * c
def gps_bearing(lat1, lon1, lat2, lon2):
"""Return bearing between two points in degrees, in range 0-360
thanks to http://www.movable-type.co.uk/scripts/latlong.html ."""
lat1 = math.radians(lat1)
lat2 = math.radians(lat2)
lon1 = math.radians(lon1)
lon2 = math.radians(lon2)
dLon = lon2 - lon1
y = math.sin(dLon) * math.cos(lat2)
x = math.cos(lat1) * math.sin(lat2) - math.sin(lat1) * math.cos(lat2) * math.cos(dLon)
bearing = math.degrees(math.atan2(y, x))
if bearing < 0:
bearing += 360.0
return bearing
class Wind(object):
"""A wind generation object."""
def __init__(self, windstring, cross_section=0.1):
a = windstring.split(',')
if len(a) != 3:
raise RuntimeError("Expected wind in speed,direction,turbulance form, not %s" % windstring)
self.speed = float(a[0]) # m/s
self.direction = float(a[1]) # direction the wind is going in
self.turbulance = float(a[2]) # turbulance factor (standard deviation)
# the cross-section of the aircraft to wind. This is multiplied by the
# difference in the wind and the velocity of the aircraft to give the acceleration
self.cross_section = cross_section
# the time constant for the turbulance - the average period of the
# changes over time
self.turbulance_time_constant = 5.0
# wind time record
self.tlast = time.time()
# initial turbulance multiplier
self.turbulance_mul = 1.0
def current(self, deltat=None):
"""Return current wind speed and direction as a tuple
speed is in m/s, direction in degrees."""
if deltat is None:
tnow = time.time()
deltat = tnow - self.tlast
self.tlast = tnow
# update turbulance random walk
w_delta = math.sqrt(deltat) * (1.0 - random.gauss(1.0, self.turbulance))
w_delta -= (self.turbulance_mul - 1.0) * (deltat / self.turbulance_time_constant)
self.turbulance_mul += w_delta
speed = self.speed * math.fabs(self.turbulance_mul)
return (speed, self.direction)
# Calculate drag.
def drag(self, velocity, deltat=None):
"""Return current wind force in Earth frame. The velocity parameter is
a Vector3 of the current velocity of the aircraft in earth frame, m/s ."""
from math import radians
# (m/s, degrees) : wind vector as a magnitude and angle.
(speed, direction) = self.current(deltat=deltat)
# speed = self.speed
# direction = self.direction
# Get the wind vector.
w = toVec(speed, radians(direction))
obj_speed = velocity.length()
# Compute the angle between the object vector and wind vector by taking
# the dot product and dividing by the magnitudes.
d = w.length() * obj_speed
if d == 0:
alpha = 0
else:
alpha = acos((w * velocity) / d)
# Get the relative wind speed and angle from the object. Note that the
# relative wind speed includes the velocity of the object; i.e., there
# is a headwind equivalent to the object's speed even if there is no
# absolute wind.
(rel_speed, beta) = apparent_wind(speed, obj_speed, alpha)
# Return the vector of the relative wind, relative to the coordinate
# system.
relWindVec = toVec(rel_speed, beta + atan2(velocity.y, velocity.x))
# Combine them to get the acceleration vector.
return Vector3(acc(relWindVec.x, drag_force(self, relWindVec.x)), acc(relWindVec.y, drag_force(self, relWindVec.y)), 0)
def apparent_wind(wind_sp, obj_speed, alpha):
"""http://en.wikipedia.org/wiki/Apparent_wind
Returns apparent wind speed and angle of apparent wind. Alpha is the angle
between the object and the true wind. alpha of 0 rads is a headwind; pi a
tailwind. Speeds should always be positive."""
delta = wind_sp * cos(alpha)
x = wind_sp**2 + obj_speed**2 + 2 * obj_speed * delta
rel_speed = sqrt(x)
if rel_speed == 0:
beta = pi
else:
beta = acos((delta + obj_speed) / rel_speed)
return (rel_speed, beta)
def drag_force(wind, sp):
"""See http://en.wikipedia.org/wiki/Drag_equation
Drag equation is F(a) = cl * p/2 * v^2 * a, where cl : drag coefficient
(let's assume it's low, .e.g., 0.2), p : density of air (assume about 1
kg/m^3, the density just over 1500m elevation), v : relative speed of wind
(to the body), a : area acted on (this is captured by the cross_section
parameter).
So then we have
F(a) = 0.2 * 1/2 * v^2 * cross_section = 0.1 * v^2 * cross_section."""
return (sp**2.0) * 0.1 * wind.cross_section
def acc(val, mag):
""" Function to make the force vector. relWindVec is the direction the apparent
wind comes *from*. We want to compute the accleration vector in the direction
the wind blows to."""
if val == 0:
return mag
else:
return (val / abs(val)) * (0 - mag)
def toVec(magnitude, angle):
"""Converts a magnitude and angle (radians) to a vector in the xy plane."""
v = Vector3(magnitude, 0, 0)
m = Matrix3()
m.from_euler(0, 0, angle)
return m.transposed() * v
def constrain(value, minv, maxv):
"""Constrain a value to a range."""
if value < minv:
value = minv
if value > maxv:
value = maxv
return value
if __name__ == "__main__":
import doctest
doctest.testmod()
| gpl-3.0 |
franga2000/django-machina | machina/apps/forum_permission/viewmixins.py | 1 | 4372 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from collections import Iterable
from django.conf import settings
from django.contrib.auth.decorators import REDIRECT_FIELD_NAME
from django.core.exceptions import ImproperlyConfigured
from django.core.exceptions import PermissionDenied
from django.http import HttpResponseRedirect
from django.shortcuts import resolve_url
from django.utils.http import urlquote
from django.utils.six import string_types
class PermissionRequiredMixin(object):
"""
This view mixin verifies if the current user has the permissions specified by the
'permission_required' attribute. This 'permissions check' behavior can be updated
in the 'perform_permissions_check()' method.
It provides the following workflow:
The mixin try to see if the view and the current user passes the permission check.
If the permission check fails and if the user isn't logged in, redirect to
settings.LOGIN_URL passing the current absolute path in the qyery string. Example:
/accounts/login/?next=/forum/3/
If the permission check fails and if the user is logged in, return a 403 error
page.
The permissions will be tested against a specific instance provided either by a
`get_object()` method or by an `object` attribute. In the case where the permissions
should be checked against an instance that is not the one associated with a specific
DetailView, it is possible to write a `get_controlled_object` method to which it will
be given priority over the methods and attributes mentioned previously.
"""
login_url = settings.LOGIN_URL
permission_required = None
redirect_field_name = REDIRECT_FIELD_NAME
def get_required_permissions(self, request):
"""
Returns the required permissions to access the considered object.
"""
perms = []
if not self.permission_required:
return perms
if isinstance(self.permission_required, string_types):
perms = [self.permission_required, ]
elif isinstance(self.permission_required, Iterable):
perms = [perm for perm in self.permission_required]
else:
raise ImproperlyConfigured(
'\'PermissionRequiredMixin\' requires \'permission_required\' '
'attribute to be set to \'<app_label>.<permission codename>\' but is set to {} '
'instead'.format(self.permission_required)
)
return perms
def perform_permissions_check(self, user, obj, perms):
"""
Performs a permissions check in order to tell if the passed user
can access the current view for the given object.
By default, this method checks whether the given user has all the
considered permissions in order to grant access. This behavior can
be overridden in any subclass.
"""
# Initializes a permission checker
checker = self.request.forum_permission_handler._get_checker(user)
# Check permissions
return all(checker.has_perm(perm, obj) for perm in perms)
def check_permissions(self, request):
"""
Retrieve the controlled object and perform the permissions check.
"""
obj = (hasattr(self, 'get_controlled_object') and self.get_controlled_object() or
hasattr(self, 'get_object') and self.get_object() or getattr(self, 'object', None))
user = request.user
# Get the permissions to check
perms = self.get_required_permissions(self)
# Check permissions
has_permissions = self.perform_permissions_check(user, obj, perms)
if not has_permissions and not user.is_authenticated:
return HttpResponseRedirect('{}?{}={}'.format(
resolve_url(self.login_url),
self.redirect_field_name,
urlquote(request.get_full_path())
))
elif not has_permissions:
raise PermissionDenied
def dispatch(self, request, *args, **kwargs):
self.request = request
self.args = args
self.kwargs = kwargs
response = self.check_permissions(request)
if response:
return response
return super(PermissionRequiredMixin, self).dispatch(request, *args, **kwargs)
| bsd-3-clause |
mightbejosh/dj-braintree | djbraintree/event_handlers.py | 1 | 2946 | # -*- coding: utf-8 -*-
"""
.. module:: djbraintree.event_handlers
:synopsis: dj-braintree - webhook event handlers for the various models
.. moduleauthor:: Bill Huneke (@wahuneke)
Implement webhook event handlers for all the models that need to respond to webhook events.
"""
from django.utils import timezone
from . import webhooks
from . import settings as djbraintree_settings
import stripe
from .models import Customer, CurrentSubscription, Charge, Transfer, Invoice
# ---------------------------
# Customer model events
# ---------------------------
@webhooks.handler_all
def customer_event_attach(event, event_data, event_type, event_subtype):
stripe_customer_crud_events = ["created", "updated", "deleted"]
skip_events = ["plan", "transfer"]
if event_type in skip_events:
return
elif event_type == "customer" and event_subtype in stripe_customer_crud_events:
stripe_customer_id = event_data["object"]["id"]
else:
stripe_customer_id = event_data["object"].get("customer", None)
if stripe_customer_id:
try:
event.customer = Customer.objects.get(stripe_id=stripe_customer_id)
except Customer.DoesNotExist:
pass
@webhooks.handler(['customer'])
def customer_webhook_handler(event, event_data, event_type, event_subtype):
customer = event.customer
if customer:
if event_subtype == "subscription.deleted":
customer.current_subscription.status = CurrentSubscription.STATUS_CANCELLED
customer.current_subscription.canceled_at = timezone.now()
customer.current_subscription.save()
elif event_subtype.startswith("subscription."):
customer.sync_current_subscription()
elif event_subtype == "deleted":
customer.purge()
# ---------------------------
# Transfer model events
# ---------------------------
@webhooks.handler(["transfer"])
def transfer_webhook_handler(event, event_data, event_type, event_subtype):
# TODO: re-retrieve this transfer object so we have it in proper API version
Transfer.process_transfer(event, event_data["object"])
# ---------------------------
# Invoice model events
# ---------------------------
@webhooks.handler(['invoice'])
def invoice_webhook_handler(event, event_data, event_type, event_subtype):
if event_subtype in ["payment_failed", "payment_succeeded", "created"]:
invoice_data = event_data["object"]
stripe_invoice = stripe.Invoice.retrieve(invoice_data["id"])
Invoice.sync_from_stripe_data(stripe_invoice, send_receipt=djbraintree_settings.SEND_INVOICE_RECEIPT_EMAILS)
# ---------------------------
# Charge model events
# ---------------------------
@webhooks.handler(['charge'])
def charge_webhook_handler(event, event_data, event_type, event_subtype):
event_data = stripe.Charge.retrieve(event_data["object"]["id"])
return Charge.sync_from_stripe_data(event_data)
| bsd-3-clause |
jgoclawski/django | tests/mail/tests.py | 116 | 52412 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import asyncore
import mimetypes
import os
import shutil
import smtpd
import sys
import tempfile
import threading
from email.mime.text import MIMEText
from smtplib import SMTP, SMTPException
from ssl import SSLError
from django.core import mail
from django.core.mail import (
EmailMessage, EmailMultiAlternatives, mail_admins, mail_managers,
send_mail, send_mass_mail,
)
from django.core.mail.backends import console, dummy, filebased, locmem, smtp
from django.core.mail.message import BadHeaderError
from django.test import SimpleTestCase, override_settings
from django.utils._os import upath
from django.utils.encoding import force_bytes, force_text
from django.utils.six import PY3, StringIO, binary_type
from django.utils.translation import ugettext_lazy
if PY3:
from email.utils import parseaddr
from email import message_from_bytes, message_from_binary_file
else:
from email.Utils import parseaddr
from email import (message_from_string as message_from_bytes,
message_from_file as message_from_binary_file)
class HeadersCheckMixin(object):
def assertMessageHasHeaders(self, message, headers):
"""
Check that :param message: has all :param headers: headers.
:param message: can be an instance of an email.Message subclass or a
string with the contents of an email message.
:param headers: should be a set of (header-name, header-value) tuples.
"""
if isinstance(message, binary_type):
message = message_from_bytes(message)
msg_headers = set(message.items())
self.assertTrue(headers.issubset(msg_headers), msg='Message is missing '
'the following headers: %s' % (headers - msg_headers),)
class MailTests(HeadersCheckMixin, SimpleTestCase):
"""
Non-backend specific tests.
"""
def test_ascii(self):
email = EmailMessage('Subject', 'Content', 'from@example.com', ['to@example.com'])
message = email.message()
self.assertEqual(message['Subject'], 'Subject')
self.assertEqual(message.get_payload(), 'Content')
self.assertEqual(message['From'], 'from@example.com')
self.assertEqual(message['To'], 'to@example.com')
def test_multiple_recipients(self):
email = EmailMessage('Subject', 'Content', 'from@example.com', ['to@example.com', 'other@example.com'])
message = email.message()
self.assertEqual(message['Subject'], 'Subject')
self.assertEqual(message.get_payload(), 'Content')
self.assertEqual(message['From'], 'from@example.com')
self.assertEqual(message['To'], 'to@example.com, other@example.com')
def test_cc(self):
"""Regression test for #7722"""
email = EmailMessage('Subject', 'Content', 'from@example.com', ['to@example.com'], cc=['cc@example.com'])
message = email.message()
self.assertEqual(message['Cc'], 'cc@example.com')
self.assertEqual(email.recipients(), ['to@example.com', 'cc@example.com'])
# Test multiple CC with multiple To
email = EmailMessage('Subject', 'Content', 'from@example.com', ['to@example.com', 'other@example.com'], cc=['cc@example.com', 'cc.other@example.com'])
message = email.message()
self.assertEqual(message['Cc'], 'cc@example.com, cc.other@example.com')
self.assertEqual(email.recipients(), ['to@example.com', 'other@example.com', 'cc@example.com', 'cc.other@example.com'])
# Testing with Bcc
email = EmailMessage('Subject', 'Content', 'from@example.com', ['to@example.com', 'other@example.com'], cc=['cc@example.com', 'cc.other@example.com'], bcc=['bcc@example.com'])
message = email.message()
self.assertEqual(message['Cc'], 'cc@example.com, cc.other@example.com')
self.assertEqual(email.recipients(), ['to@example.com', 'other@example.com', 'cc@example.com', 'cc.other@example.com', 'bcc@example.com'])
def test_reply_to(self):
email = EmailMessage(
'Subject', 'Content', 'from@example.com', ['to@example.com'],
reply_to=['reply_to@example.com'],
)
message = email.message()
self.assertEqual(message['Reply-To'], 'reply_to@example.com')
email = EmailMessage(
'Subject', 'Content', 'from@example.com', ['to@example.com'],
reply_to=['reply_to1@example.com', 'reply_to2@example.com']
)
message = email.message()
self.assertEqual(message['Reply-To'], 'reply_to1@example.com, reply_to2@example.com')
def test_recipients_as_tuple(self):
email = EmailMessage('Subject', 'Content', 'from@example.com', ('to@example.com', 'other@example.com'), cc=('cc@example.com', 'cc.other@example.com'), bcc=('bcc@example.com',))
message = email.message()
self.assertEqual(message['Cc'], 'cc@example.com, cc.other@example.com')
self.assertEqual(email.recipients(), ['to@example.com', 'other@example.com', 'cc@example.com', 'cc.other@example.com', 'bcc@example.com'])
def test_recipients_as_string(self):
with self.assertRaisesMessage(TypeError, '"to" argument must be a list or tuple'):
EmailMessage(to='foo@example.com')
with self.assertRaisesMessage(TypeError, '"cc" argument must be a list or tuple'):
EmailMessage(cc='foo@example.com')
with self.assertRaisesMessage(TypeError, '"bcc" argument must be a list or tuple'):
EmailMessage(bcc='foo@example.com')
with self.assertRaisesMessage(TypeError, '"reply_to" argument must be a list or tuple'):
EmailMessage(reply_to='reply_to@example.com')
def test_header_injection(self):
email = EmailMessage('Subject\nInjection Test', 'Content', 'from@example.com', ['to@example.com'])
self.assertRaises(BadHeaderError, email.message)
email = EmailMessage(ugettext_lazy('Subject\nInjection Test'), 'Content', 'from@example.com', ['to@example.com'])
self.assertRaises(BadHeaderError, email.message)
def test_space_continuation(self):
"""
Test for space continuation character in long (ASCII) subject headers (#7747)
"""
email = EmailMessage('Long subject lines that get wrapped should contain a space continuation character to get expected behavior in Outlook and Thunderbird', 'Content', 'from@example.com', ['to@example.com'])
message = email.message()
# Note that in Python 3, maximum line length has increased from 76 to 78
self.assertEqual(message['Subject'].encode(), b'Long subject lines that get wrapped should contain a space continuation\n character to get expected behavior in Outlook and Thunderbird')
def test_message_header_overrides(self):
"""
Specifying dates or message-ids in the extra headers overrides the
default values (#9233)
"""
headers = {"date": "Fri, 09 Nov 2001 01:08:47 -0000", "Message-ID": "foo"}
email = EmailMessage('subject', 'content', 'from@example.com', ['to@example.com'], headers=headers)
self.assertMessageHasHeaders(email.message(), {
('Content-Transfer-Encoding', '7bit'),
('Content-Type', 'text/plain; charset="utf-8"'),
('From', 'from@example.com'),
('MIME-Version', '1.0'),
('Message-ID', 'foo'),
('Subject', 'subject'),
('To', 'to@example.com'),
('date', 'Fri, 09 Nov 2001 01:08:47 -0000'),
})
def test_from_header(self):
"""
Make sure we can manually set the From header (#9214)
"""
email = EmailMessage('Subject', 'Content', 'bounce@example.com', ['to@example.com'], headers={'From': 'from@example.com'})
message = email.message()
self.assertEqual(message['From'], 'from@example.com')
def test_to_header(self):
"""
Make sure we can manually set the To header (#17444)
"""
email = EmailMessage('Subject', 'Content', 'bounce@example.com',
['list-subscriber@example.com', 'list-subscriber2@example.com'],
headers={'To': 'mailing-list@example.com'})
message = email.message()
self.assertEqual(message['To'], 'mailing-list@example.com')
self.assertEqual(email.to, ['list-subscriber@example.com', 'list-subscriber2@example.com'])
# If we don't set the To header manually, it should default to the `to` argument to the constructor
email = EmailMessage('Subject', 'Content', 'bounce@example.com',
['list-subscriber@example.com', 'list-subscriber2@example.com'])
message = email.message()
self.assertEqual(message['To'], 'list-subscriber@example.com, list-subscriber2@example.com')
self.assertEqual(email.to, ['list-subscriber@example.com', 'list-subscriber2@example.com'])
def test_reply_to_header(self):
"""
Specifying 'Reply-To' in headers should override reply_to.
"""
email = EmailMessage(
'Subject', 'Content', 'bounce@example.com', ['to@example.com'],
reply_to=['foo@example.com'], headers={'Reply-To': 'override@example.com'},
)
message = email.message()
self.assertEqual(message['Reply-To'], 'override@example.com')
def test_multiple_message_call(self):
"""
Regression for #13259 - Make sure that headers are not changed when
calling EmailMessage.message()
"""
email = EmailMessage('Subject', 'Content', 'bounce@example.com', ['to@example.com'], headers={'From': 'from@example.com'})
message = email.message()
self.assertEqual(message['From'], 'from@example.com')
message = email.message()
self.assertEqual(message['From'], 'from@example.com')
def test_unicode_address_header(self):
"""
Regression for #11144 - When a to/from/cc header contains unicode,
make sure the email addresses are parsed correctly (especially with
regards to commas)
"""
email = EmailMessage('Subject', 'Content', 'from@example.com', ['"Firstname Sürname" <to@example.com>', 'other@example.com'])
self.assertEqual(email.message()['To'], '=?utf-8?q?Firstname_S=C3=BCrname?= <to@example.com>, other@example.com')
email = EmailMessage('Subject', 'Content', 'from@example.com', ['"Sürname, Firstname" <to@example.com>', 'other@example.com'])
self.assertEqual(email.message()['To'], '=?utf-8?q?S=C3=BCrname=2C_Firstname?= <to@example.com>, other@example.com')
def test_unicode_headers(self):
email = EmailMessage("Gżegżółka", "Content", "from@example.com", ["to@example.com"],
headers={"Sender": '"Firstname Sürname" <sender@example.com>',
"Comments": 'My Sürname is non-ASCII'})
message = email.message()
self.assertEqual(message['Subject'], '=?utf-8?b?R8W8ZWfFvMOzxYJrYQ==?=')
self.assertEqual(message['Sender'], '=?utf-8?q?Firstname_S=C3=BCrname?= <sender@example.com>')
self.assertEqual(message['Comments'], '=?utf-8?q?My_S=C3=BCrname_is_non-ASCII?=')
def test_safe_mime_multipart(self):
"""
Make sure headers can be set with a different encoding than utf-8 in
SafeMIMEMultipart as well
"""
headers = {"Date": "Fri, 09 Nov 2001 01:08:47 -0000", "Message-ID": "foo"}
from_email, to = 'from@example.com', '"Sürname, Firstname" <to@example.com>'
text_content = 'This is an important message.'
html_content = '<p>This is an <strong>important</strong> message.</p>'
msg = EmailMultiAlternatives('Message from Firstname Sürname', text_content, from_email, [to], headers=headers)
msg.attach_alternative(html_content, "text/html")
msg.encoding = 'iso-8859-1'
self.assertEqual(msg.message()['To'], '=?iso-8859-1?q?S=FCrname=2C_Firstname?= <to@example.com>')
self.assertEqual(msg.message()['Subject'], '=?iso-8859-1?q?Message_from_Firstname_S=FCrname?=')
def test_encoding(self):
"""
Regression for #12791 - Encode body correctly with other encodings
than utf-8
"""
email = EmailMessage('Subject', 'Firstname Sürname is a great guy.', 'from@example.com', ['other@example.com'])
email.encoding = 'iso-8859-1'
message = email.message()
self.assertMessageHasHeaders(message, {
('MIME-Version', '1.0'),
('Content-Type', 'text/plain; charset="iso-8859-1"'),
('Content-Transfer-Encoding', 'quoted-printable'),
('Subject', 'Subject'),
('From', 'from@example.com'),
('To', 'other@example.com')})
self.assertEqual(message.get_payload(), 'Firstname S=FCrname is a great guy.')
# Make sure MIME attachments also works correctly with other encodings than utf-8
text_content = 'Firstname Sürname is a great guy.'
html_content = '<p>Firstname Sürname is a <strong>great</strong> guy.</p>'
msg = EmailMultiAlternatives('Subject', text_content, 'from@example.com', ['to@example.com'])
msg.encoding = 'iso-8859-1'
msg.attach_alternative(html_content, "text/html")
payload0 = msg.message().get_payload(0)
self.assertMessageHasHeaders(payload0, {
('MIME-Version', '1.0'),
('Content-Type', 'text/plain; charset="iso-8859-1"'),
('Content-Transfer-Encoding', 'quoted-printable')})
self.assertTrue(payload0.as_bytes().endswith(b'\n\nFirstname S=FCrname is a great guy.'))
payload1 = msg.message().get_payload(1)
self.assertMessageHasHeaders(payload1, {
('MIME-Version', '1.0'),
('Content-Type', 'text/html; charset="iso-8859-1"'),
('Content-Transfer-Encoding', 'quoted-printable')})
self.assertTrue(payload1.as_bytes().endswith(b'\n\n<p>Firstname S=FCrname is a <strong>great</strong> guy.</p>'))
def test_attachments(self):
"""Regression test for #9367"""
headers = {"Date": "Fri, 09 Nov 2001 01:08:47 -0000", "Message-ID": "foo"}
subject, from_email, to = 'hello', 'from@example.com', 'to@example.com'
text_content = 'This is an important message.'
html_content = '<p>This is an <strong>important</strong> message.</p>'
msg = EmailMultiAlternatives(subject, text_content, from_email, [to], headers=headers)
msg.attach_alternative(html_content, "text/html")
msg.attach("an attachment.pdf", b"%PDF-1.4.%...", mimetype="application/pdf")
msg_bytes = msg.message().as_bytes()
message = message_from_bytes(msg_bytes)
self.assertTrue(message.is_multipart())
self.assertEqual(message.get_content_type(), 'multipart/mixed')
self.assertEqual(message.get_default_type(), 'text/plain')
payload = message.get_payload()
self.assertEqual(payload[0].get_content_type(), 'multipart/alternative')
self.assertEqual(payload[1].get_content_type(), 'application/pdf')
def test_non_ascii_attachment_filename(self):
"""Regression test for #14964"""
headers = {"Date": "Fri, 09 Nov 2001 01:08:47 -0000", "Message-ID": "foo"}
subject, from_email, to = 'hello', 'from@example.com', 'to@example.com'
content = 'This is the message.'
msg = EmailMessage(subject, content, from_email, [to], headers=headers)
# Unicode in file name
msg.attach("une pièce jointe.pdf", b"%PDF-1.4.%...", mimetype="application/pdf")
msg_bytes = msg.message().as_bytes()
message = message_from_bytes(msg_bytes)
payload = message.get_payload()
self.assertEqual(payload[1].get_filename(), 'une pièce jointe.pdf')
def test_attach_file(self):
"""
Test attaching a file against different mimetypes and make sure that
a file will be attached and sent properly even if an invalid mimetype
is specified.
"""
files = (
# filename, actual mimetype
('file.txt', 'text/plain'),
('file.png', 'image/png'),
('file_txt', None),
('file_png', None),
('file_txt.png', 'image/png'),
('file_png.txt', 'text/plain'),
)
test_mimetypes = ['text/plain', 'image/png', None]
for basename, real_mimetype in files:
for mimetype in test_mimetypes:
email = EmailMessage('subject', 'body', 'from@example.com', ['to@example.com'])
self.assertEqual(mimetypes.guess_type(basename)[0], real_mimetype)
self.assertEqual(email.attachments, [])
file_path = os.path.join(os.path.dirname(upath(__file__)), 'attachments', basename)
email.attach_file(file_path, mimetype=mimetype)
self.assertEqual(len(email.attachments), 1)
self.assertIn(basename, email.attachments[0])
msgs_sent_num = email.send()
self.assertEqual(msgs_sent_num, 1)
def test_dummy_backend(self):
"""
Make sure that dummy backends returns correct number of sent messages
"""
connection = dummy.EmailBackend()
email = EmailMessage('Subject', 'Content', 'bounce@example.com', ['to@example.com'], headers={'From': 'from@example.com'})
self.assertEqual(connection.send_messages([email, email, email]), 3)
def test_arbitrary_keyword(self):
"""
Make sure that get_connection() accepts arbitrary keyword that might be
used with custom backends.
"""
c = mail.get_connection(fail_silently=True, foo='bar')
self.assertTrue(c.fail_silently)
def test_custom_backend(self):
"""Test custom backend defined in this suite."""
conn = mail.get_connection('mail.custombackend.EmailBackend')
self.assertTrue(hasattr(conn, 'test_outbox'))
email = EmailMessage('Subject', 'Content', 'bounce@example.com', ['to@example.com'], headers={'From': 'from@example.com'})
conn.send_messages([email])
self.assertEqual(len(conn.test_outbox), 1)
def test_backend_arg(self):
"""Test backend argument of mail.get_connection()"""
self.assertIsInstance(mail.get_connection('django.core.mail.backends.smtp.EmailBackend'), smtp.EmailBackend)
self.assertIsInstance(mail.get_connection('django.core.mail.backends.locmem.EmailBackend'), locmem.EmailBackend)
self.assertIsInstance(mail.get_connection('django.core.mail.backends.dummy.EmailBackend'), dummy.EmailBackend)
self.assertIsInstance(mail.get_connection('django.core.mail.backends.console.EmailBackend'), console.EmailBackend)
tmp_dir = tempfile.mkdtemp()
try:
self.assertIsInstance(mail.get_connection('django.core.mail.backends.filebased.EmailBackend', file_path=tmp_dir), filebased.EmailBackend)
finally:
shutil.rmtree(tmp_dir)
self.assertIsInstance(mail.get_connection(), locmem.EmailBackend)
@override_settings(
EMAIL_BACKEND='django.core.mail.backends.locmem.EmailBackend',
ADMINS=[('nobody', 'nobody@example.com')],
MANAGERS=[('nobody', 'nobody@example.com')])
def test_connection_arg(self):
"""Test connection argument to send_mail(), et. al."""
mail.outbox = []
# Send using non-default connection
connection = mail.get_connection('mail.custombackend.EmailBackend')
send_mail('Subject', 'Content', 'from@example.com', ['to@example.com'], connection=connection)
self.assertEqual(mail.outbox, [])
self.assertEqual(len(connection.test_outbox), 1)
self.assertEqual(connection.test_outbox[0].subject, 'Subject')
connection = mail.get_connection('mail.custombackend.EmailBackend')
send_mass_mail([
('Subject1', 'Content1', 'from1@example.com', ['to1@example.com']),
('Subject2', 'Content2', 'from2@example.com', ['to2@example.com']),
], connection=connection)
self.assertEqual(mail.outbox, [])
self.assertEqual(len(connection.test_outbox), 2)
self.assertEqual(connection.test_outbox[0].subject, 'Subject1')
self.assertEqual(connection.test_outbox[1].subject, 'Subject2')
connection = mail.get_connection('mail.custombackend.EmailBackend')
mail_admins('Admin message', 'Content', connection=connection)
self.assertEqual(mail.outbox, [])
self.assertEqual(len(connection.test_outbox), 1)
self.assertEqual(connection.test_outbox[0].subject, '[Django] Admin message')
connection = mail.get_connection('mail.custombackend.EmailBackend')
mail_managers('Manager message', 'Content', connection=connection)
self.assertEqual(mail.outbox, [])
self.assertEqual(len(connection.test_outbox), 1)
self.assertEqual(connection.test_outbox[0].subject, '[Django] Manager message')
def test_dont_mangle_from_in_body(self):
# Regression for #13433 - Make sure that EmailMessage doesn't mangle
# 'From ' in message body.
email = EmailMessage('Subject', 'From the future', 'bounce@example.com', ['to@example.com'], headers={'From': 'from@example.com'})
self.assertNotIn(b'>From the future', email.message().as_bytes())
def test_dont_base64_encode(self):
# Ticket #3472
# Shouldn't use Base64 encoding at all
msg = EmailMessage('Subject', 'UTF-8 encoded body', 'bounce@example.com', ['to@example.com'], headers={'From': 'from@example.com'})
self.assertNotIn(b'Content-Transfer-Encoding: base64', msg.message().as_bytes())
# Ticket #11212
# Shouldn't use quoted printable, should detect it can represent content with 7 bit data
msg = EmailMessage('Subject', 'Body with only ASCII characters.', 'bounce@example.com', ['to@example.com'], headers={'From': 'from@example.com'})
s = msg.message().as_bytes()
self.assertNotIn(b'Content-Transfer-Encoding: quoted-printable', s)
self.assertIn(b'Content-Transfer-Encoding: 7bit', s)
# Shouldn't use quoted printable, should detect it can represent content with 8 bit data
msg = EmailMessage('Subject', 'Body with latin characters: àáä.', 'bounce@example.com', ['to@example.com'], headers={'From': 'from@example.com'})
s = msg.message().as_bytes()
self.assertNotIn(b'Content-Transfer-Encoding: quoted-printable', s)
self.assertIn(b'Content-Transfer-Encoding: 8bit', s)
msg = EmailMessage('Subject', 'Body with non latin characters: А Б В Г Д Е Ж Ѕ З И І К Л М Н О П.', 'bounce@example.com', ['to@example.com'], headers={'From': 'from@example.com'})
s = msg.message().as_bytes()
self.assertNotIn(b'Content-Transfer-Encoding: quoted-printable', s)
self.assertIn(b'Content-Transfer-Encoding: 8bit', s)
def test_dont_base64_encode_message_rfc822(self):
# Ticket #18967
# Shouldn't use base64 encoding for a child EmailMessage attachment.
# Create a child message first
child_msg = EmailMessage('Child Subject', 'Some body of child message', 'bounce@example.com', ['to@example.com'], headers={'From': 'from@example.com'})
child_s = child_msg.message().as_string()
# Now create a parent
parent_msg = EmailMessage('Parent Subject', 'Some parent body', 'bounce@example.com', ['to@example.com'], headers={'From': 'from@example.com'})
# Attach to parent as a string
parent_msg.attach(content=child_s, mimetype='message/rfc822')
parent_s = parent_msg.message().as_string()
# Verify that the child message header is not base64 encoded
self.assertIn(str('Child Subject'), parent_s)
# Feature test: try attaching email.Message object directly to the mail.
parent_msg = EmailMessage('Parent Subject', 'Some parent body', 'bounce@example.com', ['to@example.com'], headers={'From': 'from@example.com'})
parent_msg.attach(content=child_msg.message(), mimetype='message/rfc822')
parent_s = parent_msg.message().as_string()
# Verify that the child message header is not base64 encoded
self.assertIn(str('Child Subject'), parent_s)
# Feature test: try attaching Django's EmailMessage object directly to the mail.
parent_msg = EmailMessage('Parent Subject', 'Some parent body', 'bounce@example.com', ['to@example.com'], headers={'From': 'from@example.com'})
parent_msg.attach(content=child_msg, mimetype='message/rfc822')
parent_s = parent_msg.message().as_string()
# Verify that the child message header is not base64 encoded
self.assertIn(str('Child Subject'), parent_s)
class PythonGlobalState(SimpleTestCase):
"""
Tests for #12422 -- Django smarts (#2472/#11212) with charset of utf-8 text
parts shouldn't pollute global email Python package charset registry when
django.mail.message is imported.
"""
def test_utf8(self):
txt = MIMEText('UTF-8 encoded body', 'plain', 'utf-8')
self.assertIn('Content-Transfer-Encoding: base64', txt.as_string())
def test_7bit(self):
txt = MIMEText('Body with only ASCII characters.', 'plain', 'utf-8')
self.assertIn('Content-Transfer-Encoding: base64', txt.as_string())
def test_8bit_latin(self):
txt = MIMEText('Body with latin characters: àáä.', 'plain', 'utf-8')
self.assertIn(str('Content-Transfer-Encoding: base64'), txt.as_string())
def test_8bit_non_latin(self):
txt = MIMEText('Body with non latin characters: А Б В Г Д Е Ж Ѕ З И І К Л М Н О П.', 'plain', 'utf-8')
self.assertIn(str('Content-Transfer-Encoding: base64'), txt.as_string())
class BaseEmailBackendTests(HeadersCheckMixin, object):
email_backend = None
def setUp(self):
self.settings_override = override_settings(EMAIL_BACKEND=self.email_backend)
self.settings_override.enable()
def tearDown(self):
self.settings_override.disable()
def assertStartsWith(self, first, second):
if not first.startswith(second):
self.longMessage = True
self.assertEqual(first[:len(second)], second, "First string doesn't start with the second.")
def get_mailbox_content(self):
raise NotImplementedError('subclasses of BaseEmailBackendTests must provide a get_mailbox_content() method')
def flush_mailbox(self):
raise NotImplementedError('subclasses of BaseEmailBackendTests may require a flush_mailbox() method')
def get_the_message(self):
mailbox = self.get_mailbox_content()
self.assertEqual(len(mailbox), 1,
"Expected exactly one message, got %d.\n%r" % (len(mailbox), [
m.as_string() for m in mailbox]))
return mailbox[0]
def test_send(self):
email = EmailMessage('Subject', 'Content', 'from@example.com', ['to@example.com'])
num_sent = mail.get_connection().send_messages([email])
self.assertEqual(num_sent, 1)
message = self.get_the_message()
self.assertEqual(message["subject"], "Subject")
self.assertEqual(message.get_payload(), "Content")
self.assertEqual(message["from"], "from@example.com")
self.assertEqual(message.get_all("to"), ["to@example.com"])
def test_send_unicode(self):
email = EmailMessage('Chère maman', 'Je t\'aime très fort', 'from@example.com', ['to@example.com'])
num_sent = mail.get_connection().send_messages([email])
self.assertEqual(num_sent, 1)
message = self.get_the_message()
self.assertEqual(message["subject"], '=?utf-8?q?Ch=C3=A8re_maman?=')
self.assertEqual(force_text(message.get_payload(decode=True)), 'Je t\'aime très fort')
def test_send_many(self):
email1 = EmailMessage('Subject', 'Content1', 'from@example.com', ['to@example.com'])
email2 = EmailMessage('Subject', 'Content2', 'from@example.com', ['to@example.com'])
num_sent = mail.get_connection().send_messages([email1, email2])
self.assertEqual(num_sent, 2)
messages = self.get_mailbox_content()
self.assertEqual(len(messages), 2)
self.assertEqual(messages[0].get_payload(), "Content1")
self.assertEqual(messages[1].get_payload(), "Content2")
def test_send_verbose_name(self):
email = EmailMessage("Subject", "Content", '"Firstname Sürname" <from@example.com>',
["to@example.com"])
email.send()
message = self.get_the_message()
self.assertEqual(message["subject"], "Subject")
self.assertEqual(message.get_payload(), "Content")
self.assertEqual(message["from"], "=?utf-8?q?Firstname_S=C3=BCrname?= <from@example.com>")
def test_plaintext_send_mail(self):
"""
Test send_mail without the html_message
regression test for adding html_message parameter to send_mail()
"""
send_mail('Subject', 'Content', 'sender@example.com', ['nobody@example.com'])
message = self.get_the_message()
self.assertEqual(message.get('subject'), 'Subject')
self.assertEqual(message.get_all('to'), ['nobody@example.com'])
self.assertFalse(message.is_multipart())
self.assertEqual(message.get_payload(), 'Content')
self.assertEqual(message.get_content_type(), 'text/plain')
def test_html_send_mail(self):
"""Test html_message argument to send_mail"""
send_mail('Subject', 'Content', 'sender@example.com', ['nobody@example.com'], html_message='HTML Content')
message = self.get_the_message()
self.assertEqual(message.get('subject'), 'Subject')
self.assertEqual(message.get_all('to'), ['nobody@example.com'])
self.assertTrue(message.is_multipart())
self.assertEqual(len(message.get_payload()), 2)
self.assertEqual(message.get_payload(0).get_payload(), 'Content')
self.assertEqual(message.get_payload(0).get_content_type(), 'text/plain')
self.assertEqual(message.get_payload(1).get_payload(), 'HTML Content')
self.assertEqual(message.get_payload(1).get_content_type(), 'text/html')
@override_settings(MANAGERS=[('nobody', 'nobody@example.com')])
def test_html_mail_managers(self):
"""Test html_message argument to mail_managers"""
mail_managers('Subject', 'Content', html_message='HTML Content')
message = self.get_the_message()
self.assertEqual(message.get('subject'), '[Django] Subject')
self.assertEqual(message.get_all('to'), ['nobody@example.com'])
self.assertTrue(message.is_multipart())
self.assertEqual(len(message.get_payload()), 2)
self.assertEqual(message.get_payload(0).get_payload(), 'Content')
self.assertEqual(message.get_payload(0).get_content_type(), 'text/plain')
self.assertEqual(message.get_payload(1).get_payload(), 'HTML Content')
self.assertEqual(message.get_payload(1).get_content_type(), 'text/html')
@override_settings(ADMINS=[('nobody', 'nobody@example.com')])
def test_html_mail_admins(self):
"""Test html_message argument to mail_admins """
mail_admins('Subject', 'Content', html_message='HTML Content')
message = self.get_the_message()
self.assertEqual(message.get('subject'), '[Django] Subject')
self.assertEqual(message.get_all('to'), ['nobody@example.com'])
self.assertTrue(message.is_multipart())
self.assertEqual(len(message.get_payload()), 2)
self.assertEqual(message.get_payload(0).get_payload(), 'Content')
self.assertEqual(message.get_payload(0).get_content_type(), 'text/plain')
self.assertEqual(message.get_payload(1).get_payload(), 'HTML Content')
self.assertEqual(message.get_payload(1).get_content_type(), 'text/html')
@override_settings(
ADMINS=[('nobody', 'nobody+admin@example.com')],
MANAGERS=[('nobody', 'nobody+manager@example.com')])
def test_manager_and_admin_mail_prefix(self):
"""
String prefix + lazy translated subject = bad output
Regression for #13494
"""
mail_managers(ugettext_lazy('Subject'), 'Content')
message = self.get_the_message()
self.assertEqual(message.get('subject'), '[Django] Subject')
self.flush_mailbox()
mail_admins(ugettext_lazy('Subject'), 'Content')
message = self.get_the_message()
self.assertEqual(message.get('subject'), '[Django] Subject')
@override_settings(ADMINS=[], MANAGERS=[])
def test_empty_admins(self):
"""
Test that mail_admins/mail_managers doesn't connect to the mail server
if there are no recipients (#9383)
"""
mail_admins('hi', 'there')
self.assertEqual(self.get_mailbox_content(), [])
mail_managers('hi', 'there')
self.assertEqual(self.get_mailbox_content(), [])
def test_message_cc_header(self):
"""
Regression test for #7722
"""
email = EmailMessage('Subject', 'Content', 'from@example.com', ['to@example.com'], cc=['cc@example.com'])
mail.get_connection().send_messages([email])
message = self.get_the_message()
self.assertMessageHasHeaders(message, {
('MIME-Version', '1.0'),
('Content-Type', 'text/plain; charset="utf-8"'),
('Content-Transfer-Encoding', '7bit'),
('Subject', 'Subject'),
('From', 'from@example.com'),
('To', 'to@example.com'),
('Cc', 'cc@example.com')})
self.assertIn('\nDate: ', message.as_string())
def test_idn_send(self):
"""
Regression test for #14301
"""
self.assertTrue(send_mail('Subject', 'Content', 'from@öäü.com', ['to@öäü.com']))
message = self.get_the_message()
self.assertEqual(message.get('subject'), 'Subject')
self.assertEqual(message.get('from'), 'from@xn--4ca9at.com')
self.assertEqual(message.get('to'), 'to@xn--4ca9at.com')
self.flush_mailbox()
m = EmailMessage('Subject', 'Content', 'from@öäü.com',
['to@öäü.com'], cc=['cc@öäü.com'])
m.send()
message = self.get_the_message()
self.assertEqual(message.get('subject'), 'Subject')
self.assertEqual(message.get('from'), 'from@xn--4ca9at.com')
self.assertEqual(message.get('to'), 'to@xn--4ca9at.com')
self.assertEqual(message.get('cc'), 'cc@xn--4ca9at.com')
def test_recipient_without_domain(self):
"""
Regression test for #15042
"""
self.assertTrue(send_mail("Subject", "Content", "tester", ["django"]))
message = self.get_the_message()
self.assertEqual(message.get('subject'), 'Subject')
self.assertEqual(message.get('from'), "tester")
self.assertEqual(message.get('to'), "django")
def test_lazy_addresses(self):
"""
Email sending should support lazy email addresses (#24416).
"""
_ = ugettext_lazy
self.assertTrue(send_mail('Subject', 'Content', _('tester'), [_('django')]))
message = self.get_the_message()
self.assertEqual(message.get('from'), 'tester')
self.assertEqual(message.get('to'), 'django')
self.flush_mailbox()
m = EmailMessage(
'Subject', 'Content', _('tester'), [_('to1'), _('to2')],
cc=[_('cc1'), _('cc2')],
bcc=[_('bcc')],
reply_to=[_('reply')],
)
self.assertEqual(m.recipients(), ['to1', 'to2', 'cc1', 'cc2', 'bcc'])
m.send()
message = self.get_the_message()
self.assertEqual(message.get('from'), 'tester')
self.assertEqual(message.get('to'), 'to1, to2')
self.assertEqual(message.get('cc'), 'cc1, cc2')
self.assertEqual(message.get('Reply-To'), 'reply')
def test_close_connection(self):
"""
Test that connection can be closed (even when not explicitly opened)
"""
conn = mail.get_connection(username='', password='')
conn.close()
def test_use_as_contextmanager(self):
"""
Test that the connection can be used as a contextmanager.
"""
opened = [False]
closed = [False]
conn = mail.get_connection(username='', password='')
def open():
opened[0] = True
conn.open = open
def close():
closed[0] = True
conn.close = close
with conn as same_conn:
self.assertTrue(opened[0])
self.assertIs(same_conn, conn)
self.assertFalse(closed[0])
self.assertTrue(closed[0])
class LocmemBackendTests(BaseEmailBackendTests, SimpleTestCase):
email_backend = 'django.core.mail.backends.locmem.EmailBackend'
def get_mailbox_content(self):
return [m.message() for m in mail.outbox]
def flush_mailbox(self):
mail.outbox = []
def tearDown(self):
super(LocmemBackendTests, self).tearDown()
mail.outbox = []
def test_locmem_shared_messages(self):
"""
Make sure that the locmen backend populates the outbox.
"""
connection = locmem.EmailBackend()
connection2 = locmem.EmailBackend()
email = EmailMessage('Subject', 'Content', 'bounce@example.com', ['to@example.com'], headers={'From': 'from@example.com'})
connection.send_messages([email])
connection2.send_messages([email])
self.assertEqual(len(mail.outbox), 2)
def test_validate_multiline_headers(self):
# Ticket #18861 - Validate emails when using the locmem backend
with self.assertRaises(BadHeaderError):
send_mail('Subject\nMultiline', 'Content', 'from@example.com', ['to@example.com'])
class FileBackendTests(BaseEmailBackendTests, SimpleTestCase):
email_backend = 'django.core.mail.backends.filebased.EmailBackend'
def setUp(self):
super(FileBackendTests, self).setUp()
self.tmp_dir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, self.tmp_dir)
self._settings_override = override_settings(EMAIL_FILE_PATH=self.tmp_dir)
self._settings_override.enable()
def tearDown(self):
self._settings_override.disable()
super(FileBackendTests, self).tearDown()
def flush_mailbox(self):
for filename in os.listdir(self.tmp_dir):
os.unlink(os.path.join(self.tmp_dir, filename))
def get_mailbox_content(self):
messages = []
for filename in os.listdir(self.tmp_dir):
with open(os.path.join(self.tmp_dir, filename), 'rb') as fp:
session = fp.read().split(force_bytes('\n' + ('-' * 79) + '\n', encoding='ascii'))
messages.extend(message_from_bytes(m) for m in session if m)
return messages
def test_file_sessions(self):
"""Make sure opening a connection creates a new file"""
msg = EmailMessage('Subject', 'Content', 'bounce@example.com', ['to@example.com'], headers={'From': 'from@example.com'})
connection = mail.get_connection()
connection.send_messages([msg])
self.assertEqual(len(os.listdir(self.tmp_dir)), 1)
with open(os.path.join(self.tmp_dir, os.listdir(self.tmp_dir)[0]), 'rb') as fp:
message = message_from_binary_file(fp)
self.assertEqual(message.get_content_type(), 'text/plain')
self.assertEqual(message.get('subject'), 'Subject')
self.assertEqual(message.get('from'), 'from@example.com')
self.assertEqual(message.get('to'), 'to@example.com')
connection2 = mail.get_connection()
connection2.send_messages([msg])
self.assertEqual(len(os.listdir(self.tmp_dir)), 2)
connection.send_messages([msg])
self.assertEqual(len(os.listdir(self.tmp_dir)), 2)
msg.connection = mail.get_connection()
self.assertTrue(connection.open())
msg.send()
self.assertEqual(len(os.listdir(self.tmp_dir)), 3)
msg.send()
self.assertEqual(len(os.listdir(self.tmp_dir)), 3)
connection.close()
class ConsoleBackendTests(BaseEmailBackendTests, SimpleTestCase):
email_backend = 'django.core.mail.backends.console.EmailBackend'
def setUp(self):
super(ConsoleBackendTests, self).setUp()
self.__stdout = sys.stdout
self.stream = sys.stdout = StringIO()
def tearDown(self):
del self.stream
sys.stdout = self.__stdout
del self.__stdout
super(ConsoleBackendTests, self).tearDown()
def flush_mailbox(self):
self.stream = sys.stdout = StringIO()
def get_mailbox_content(self):
messages = self.stream.getvalue().split(str('\n' + ('-' * 79) + '\n'))
return [message_from_bytes(force_bytes(m)) for m in messages if m]
def test_console_stream_kwarg(self):
"""
Test that the console backend can be pointed at an arbitrary stream.
"""
s = StringIO()
connection = mail.get_connection('django.core.mail.backends.console.EmailBackend', stream=s)
send_mail('Subject', 'Content', 'from@example.com', ['to@example.com'], connection=connection)
message = force_bytes(s.getvalue().split('\n' + ('-' * 79) + '\n')[0])
self.assertMessageHasHeaders(message, {
('MIME-Version', '1.0'),
('Content-Type', 'text/plain; charset="utf-8"'),
('Content-Transfer-Encoding', '7bit'),
('Subject', 'Subject'),
('From', 'from@example.com'),
('To', 'to@example.com')})
self.assertIn(b'\nDate: ', message)
class FakeSMTPChannel(smtpd.SMTPChannel):
def collect_incoming_data(self, data):
try:
super(FakeSMTPChannel, self).collect_incoming_data(data)
except UnicodeDecodeError:
# ignore decode error in SSL/TLS connection tests as we only care
# whether the connection attempt was made
pass
class FakeSMTPServer(smtpd.SMTPServer, threading.Thread):
"""
Asyncore SMTP server wrapped into a thread. Based on DummyFTPServer from:
http://svn.python.org/view/python/branches/py3k/Lib/test/test_ftplib.py?revision=86061&view=markup
"""
channel_class = FakeSMTPChannel
def __init__(self, *args, **kwargs):
threading.Thread.__init__(self)
# New kwarg added in Python 3.5; default switching to False in 3.6.
if sys.version_info >= (3, 5):
kwargs['decode_data'] = True
smtpd.SMTPServer.__init__(self, *args, **kwargs)
self._sink = []
self.active = False
self.active_lock = threading.Lock()
self.sink_lock = threading.Lock()
def process_message(self, peer, mailfrom, rcpttos, data):
if PY3:
data = data.encode('utf-8')
m = message_from_bytes(data)
maddr = parseaddr(m.get('from'))[1]
if mailfrom != maddr:
return "553 '%s' != '%s'" % (mailfrom, maddr)
with self.sink_lock:
self._sink.append(m)
def get_sink(self):
with self.sink_lock:
return self._sink[:]
def flush_sink(self):
with self.sink_lock:
self._sink[:] = []
def start(self):
assert not self.active
self.__flag = threading.Event()
threading.Thread.start(self)
self.__flag.wait()
def run(self):
self.active = True
self.__flag.set()
while self.active and asyncore.socket_map:
with self.active_lock:
asyncore.loop(timeout=0.1, count=1)
asyncore.close_all()
def stop(self):
if self.active:
self.active = False
self.join()
class SMTPBackendTestsBase(SimpleTestCase):
@classmethod
def setUpClass(cls):
super(SMTPBackendTestsBase, cls).setUpClass()
cls.server = FakeSMTPServer(('127.0.0.1', 0), None)
cls._settings_override = override_settings(
EMAIL_HOST="127.0.0.1",
EMAIL_PORT=cls.server.socket.getsockname()[1])
cls._settings_override.enable()
cls.server.start()
@classmethod
def tearDownClass(cls):
cls._settings_override.disable()
cls.server.stop()
super(SMTPBackendTestsBase, cls).tearDownClass()
class SMTPBackendTests(BaseEmailBackendTests, SMTPBackendTestsBase):
email_backend = 'django.core.mail.backends.smtp.EmailBackend'
def setUp(self):
super(SMTPBackendTests, self).setUp()
self.server.flush_sink()
def tearDown(self):
self.server.flush_sink()
super(SMTPBackendTests, self).tearDown()
def flush_mailbox(self):
self.server.flush_sink()
def get_mailbox_content(self):
return self.server.get_sink()
@override_settings(
EMAIL_HOST_USER="not empty username",
EMAIL_HOST_PASSWORD="not empty password")
def test_email_authentication_use_settings(self):
backend = smtp.EmailBackend()
self.assertEqual(backend.username, 'not empty username')
self.assertEqual(backend.password, 'not empty password')
@override_settings(
EMAIL_HOST_USER="not empty username",
EMAIL_HOST_PASSWORD="not empty password")
def test_email_authentication_override_settings(self):
backend = smtp.EmailBackend(username='username', password='password')
self.assertEqual(backend.username, 'username')
self.assertEqual(backend.password, 'password')
@override_settings(
EMAIL_HOST_USER="not empty username",
EMAIL_HOST_PASSWORD="not empty password")
def test_email_disabled_authentication(self):
backend = smtp.EmailBackend(username='', password='')
self.assertEqual(backend.username, '')
self.assertEqual(backend.password, '')
def test_auth_attempted(self):
"""
Test that opening the backend with non empty username/password tries
to authenticate against the SMTP server.
"""
backend = smtp.EmailBackend(
username='not empty username', password='not empty password')
try:
self.assertRaisesMessage(SMTPException,
'SMTP AUTH extension not supported by server.', backend.open)
finally:
backend.close()
def test_server_open(self):
"""
Test that open() tells us whether it opened a connection.
"""
backend = smtp.EmailBackend(username='', password='')
self.assertFalse(backend.connection)
opened = backend.open()
backend.close()
self.assertTrue(opened)
@override_settings(EMAIL_USE_TLS=True)
def test_email_tls_use_settings(self):
backend = smtp.EmailBackend()
self.assertTrue(backend.use_tls)
@override_settings(EMAIL_USE_TLS=True)
def test_email_tls_override_settings(self):
backend = smtp.EmailBackend(use_tls=False)
self.assertFalse(backend.use_tls)
def test_email_tls_default_disabled(self):
backend = smtp.EmailBackend()
self.assertFalse(backend.use_tls)
@override_settings(EMAIL_USE_SSL=True)
def test_email_ssl_use_settings(self):
backend = smtp.EmailBackend()
self.assertTrue(backend.use_ssl)
@override_settings(EMAIL_USE_SSL=True)
def test_email_ssl_override_settings(self):
backend = smtp.EmailBackend(use_ssl=False)
self.assertFalse(backend.use_ssl)
def test_email_ssl_default_disabled(self):
backend = smtp.EmailBackend()
self.assertFalse(backend.use_ssl)
@override_settings(EMAIL_SSL_CERTFILE='foo')
def test_email_ssl_certfile_use_settings(self):
backend = smtp.EmailBackend()
self.assertEqual(backend.ssl_certfile, 'foo')
@override_settings(EMAIL_SSL_CERTFILE='foo')
def test_email_ssl_certfile_override_settings(self):
backend = smtp.EmailBackend(ssl_certfile='bar')
self.assertEqual(backend.ssl_certfile, 'bar')
def test_email_ssl_certfile_default_disabled(self):
backend = smtp.EmailBackend()
self.assertEqual(backend.ssl_certfile, None)
@override_settings(EMAIL_SSL_KEYFILE='foo')
def test_email_ssl_keyfile_use_settings(self):
backend = smtp.EmailBackend()
self.assertEqual(backend.ssl_keyfile, 'foo')
@override_settings(EMAIL_SSL_KEYFILE='foo')
def test_email_ssl_keyfile_override_settings(self):
backend = smtp.EmailBackend(ssl_keyfile='bar')
self.assertEqual(backend.ssl_keyfile, 'bar')
def test_email_ssl_keyfile_default_disabled(self):
backend = smtp.EmailBackend()
self.assertEqual(backend.ssl_keyfile, None)
@override_settings(EMAIL_USE_TLS=True)
def test_email_tls_attempts_starttls(self):
backend = smtp.EmailBackend()
self.assertTrue(backend.use_tls)
try:
self.assertRaisesMessage(SMTPException,
'STARTTLS extension not supported by server.', backend.open)
finally:
backend.close()
@override_settings(EMAIL_USE_SSL=True)
def test_email_ssl_attempts_ssl_connection(self):
backend = smtp.EmailBackend()
self.assertTrue(backend.use_ssl)
try:
self.assertRaises(SSLError, backend.open)
finally:
backend.close()
def test_connection_timeout_default(self):
"""Test that the connection's timeout value is None by default."""
connection = mail.get_connection('django.core.mail.backends.smtp.EmailBackend')
self.assertEqual(connection.timeout, None)
def test_connection_timeout_custom(self):
"""Test that the timeout parameter can be customized."""
class MyEmailBackend(smtp.EmailBackend):
def __init__(self, *args, **kwargs):
kwargs.setdefault('timeout', 42)
super(MyEmailBackend, self).__init__(*args, **kwargs)
myemailbackend = MyEmailBackend()
myemailbackend.open()
self.assertEqual(myemailbackend.timeout, 42)
self.assertEqual(myemailbackend.connection.timeout, 42)
myemailbackend.close()
@override_settings(EMAIL_TIMEOUT=10)
def test_email_timeout_override_settings(self):
backend = smtp.EmailBackend()
self.assertEqual(backend.timeout, 10)
def test_email_msg_uses_crlf(self):
"""#23063 -- Test that RFC-compliant messages are sent over SMTP."""
send = SMTP.send
try:
smtp_messages = []
def mock_send(self, s):
smtp_messages.append(s)
return send(self, s)
SMTP.send = mock_send
email = EmailMessage('Subject', 'Content', 'from@example.com', ['to@example.com'])
mail.get_connection().send_messages([email])
# Find the actual message
msg = None
for i, m in enumerate(smtp_messages):
if m[:4] == 'data':
msg = smtp_messages[i + 1]
break
self.assertTrue(msg)
if PY3:
msg = msg.decode('utf-8')
# Ensure that the message only contains CRLF and not combinations of CRLF, LF, and CR.
msg = msg.replace('\r\n', '')
self.assertNotIn('\r', msg)
self.assertNotIn('\n', msg)
finally:
SMTP.send = send
class SMTPBackendStoppedServerTest(SMTPBackendTestsBase):
"""
This test requires a separate class, because it shuts down the
FakeSMTPServer started in setUpClass(). It cannot be restarted
("RuntimeError: threads can only be started once").
"""
def test_server_stopped(self):
"""
Test that closing the backend while the SMTP server is stopped doesn't
raise an exception.
"""
backend = smtp.EmailBackend(username='', password='')
backend.open()
self.server.stop()
backend.close()
| bsd-3-clause |
Theer108/invenio | invenio/modules/deposit/testsuite/test_deposit_models.py | 12 | 2304 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2014, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Unit tests for the Deposit models."""
from flask_registry import RegistryError
from invenio.testsuite import make_test_suite, run_test_suite, InvenioTestCase
class DepositionTest(InvenioTestCase):
def setUp(self):
from invenio.modules.deposit.models import DepositionType
from invenio.modules.deposit.registry import deposit_types, \
deposit_default_type
# Unregister any default types
try:
deposit_default_type.unregister()
except RegistryError:
pass
# Create some test types.
class DefaultType(DepositionType):
pass
class AnotherType(DepositionType):
pass
# Register types
self.DefaultType = DefaultType
self.AnotherType = AnotherType
deposit_types.register(DefaultType)
deposit_types.register(AnotherType)
deposit_default_type.register(DefaultType)
def test_create(self):
from invenio.ext.login.legacy_user import UserInfo
from invenio.modules.deposit.models import Deposition
user = UserInfo(uid=1)
d = Deposition.create(user)
assert d.type == self.DefaultType
assert Deposition.get(d.id).type == self.DefaultType
d = Deposition.create(user, type=self.AnotherType)
assert d.type == self.AnotherType
assert Deposition.get(d.id).type == self.AnotherType
TEST_SUITE = make_test_suite(DepositionTest)
if __name__ == "__main__":
run_test_suite(TEST_SUITE)
| gpl-2.0 |
abstract-open-solutions/OCB | addons/auth_openid/utils.py | 428 | 1589 | ##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010-2012 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
KEY_LENGTH = 16
SREG2AX = { # from http://www.axschema.org/types/#sreg
'nickname': 'http://axschema.org/namePerson/friendly',
'email': 'http://axschema.org/contact/email',
'fullname': 'http://axschema.org/namePerson',
'dob': 'http://axschema.org/birthDate',
'gender': 'http://axschema.org/person/gender',
'postcode': 'http://axschema.org/contact/postalCode/home',
'country': 'http://axschema.org/contact/country/home',
'language': 'http://axschema.org/pref/language',
'timezone': 'http://axschema.org/pref/timezone',
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
liqi328/rjrepaircompany | django/utils/unittest/loader.py | 353 | 13437 | """Loading unittests."""
import os
import re
import sys
import traceback
import types
import unittest
from fnmatch import fnmatch
from django.utils.unittest import case, suite
try:
from os.path import relpath
except ImportError:
from django.utils.unittest.compatibility import relpath
__unittest = True
def _CmpToKey(mycmp):
'Convert a cmp= function into a key= function'
class K(object):
def __init__(self, obj):
self.obj = obj
def __lt__(self, other):
return mycmp(self.obj, other.obj) == -1
return K
# what about .pyc or .pyo (etc)
# we would need to avoid loading the same tests multiple times
# from '.py', '.pyc' *and* '.pyo'
VALID_MODULE_NAME = re.compile(r'[_a-z]\w*\.py$', re.IGNORECASE)
def _make_failed_import_test(name, suiteClass):
message = 'Failed to import test module: %s' % name
if hasattr(traceback, 'format_exc'):
# Python 2.3 compatibility
# format_exc returns two frames of discover.py as well
message += '\n%s' % traceback.format_exc()
return _make_failed_test('ModuleImportFailure', name, ImportError(message),
suiteClass)
def _make_failed_load_tests(name, exception, suiteClass):
return _make_failed_test('LoadTestsFailure', name, exception, suiteClass)
def _make_failed_test(classname, methodname, exception, suiteClass):
def testFailure(self):
raise exception
attrs = {methodname: testFailure}
TestClass = type(classname, (case.TestCase,), attrs)
return suiteClass((TestClass(methodname),))
class TestLoader(unittest.TestLoader):
"""
This class is responsible for loading tests according to various criteria
and returning them wrapped in a TestSuite
"""
testMethodPrefix = 'test'
sortTestMethodsUsing = cmp
suiteClass = suite.TestSuite
_top_level_dir = None
def loadTestsFromTestCase(self, testCaseClass):
"""Return a suite of all tests cases contained in testCaseClass"""
if issubclass(testCaseClass, suite.TestSuite):
raise TypeError("Test cases should not be derived from TestSuite."
" Maybe you meant to derive from TestCase?")
testCaseNames = self.getTestCaseNames(testCaseClass)
if not testCaseNames and hasattr(testCaseClass, 'runTest'):
testCaseNames = ['runTest']
loaded_suite = self.suiteClass(map(testCaseClass, testCaseNames))
return loaded_suite
def loadTestsFromModule(self, module, use_load_tests=True):
"""Return a suite of all tests cases contained in the given module"""
tests = []
for name in dir(module):
obj = getattr(module, name)
if isinstance(obj, type) and issubclass(obj, unittest.TestCase):
tests.append(self.loadTestsFromTestCase(obj))
load_tests = getattr(module, 'load_tests', None)
tests = self.suiteClass(tests)
if use_load_tests and load_tests is not None:
try:
return load_tests(self, tests, None)
except Exception, e:
return _make_failed_load_tests(module.__name__, e,
self.suiteClass)
return tests
def loadTestsFromName(self, name, module=None):
"""Return a suite of all tests cases given a string specifier.
The name may resolve either to a module, a test case class, a
test method within a test case class, or a callable object which
returns a TestCase or TestSuite instance.
The method optionally resolves the names relative to a given module.
"""
parts = name.split('.')
if module is None:
parts_copy = parts[:]
while parts_copy:
try:
module = __import__('.'.join(parts_copy))
break
except ImportError:
del parts_copy[-1]
if not parts_copy:
raise
parts = parts[1:]
obj = module
for part in parts:
parent, obj = obj, getattr(obj, part)
if isinstance(obj, types.ModuleType):
return self.loadTestsFromModule(obj)
elif isinstance(obj, type) and issubclass(obj, unittest.TestCase):
return self.loadTestsFromTestCase(obj)
elif (isinstance(obj, types.UnboundMethodType) and
isinstance(parent, type) and
issubclass(parent, case.TestCase)):
return self.suiteClass([parent(obj.__name__)])
elif isinstance(obj, unittest.TestSuite):
return obj
elif hasattr(obj, '__call__'):
test = obj()
if isinstance(test, unittest.TestSuite):
return test
elif isinstance(test, unittest.TestCase):
return self.suiteClass([test])
else:
raise TypeError("calling %s returned %s, not a test" %
(obj, test))
else:
raise TypeError("don't know how to make test from: %s" % obj)
def loadTestsFromNames(self, names, module=None):
"""Return a suite of all tests cases found using the given sequence
of string specifiers. See 'loadTestsFromName()'.
"""
suites = [self.loadTestsFromName(name, module) for name in names]
return self.suiteClass(suites)
def getTestCaseNames(self, testCaseClass):
"""Return a sorted sequence of method names found within testCaseClass
"""
def isTestMethod(attrname, testCaseClass=testCaseClass,
prefix=self.testMethodPrefix):
return attrname.startswith(prefix) and \
hasattr(getattr(testCaseClass, attrname), '__call__')
testFnNames = filter(isTestMethod, dir(testCaseClass))
if self.sortTestMethodsUsing:
testFnNames.sort(key=_CmpToKey(self.sortTestMethodsUsing))
return testFnNames
def discover(self, start_dir, pattern='test*.py', top_level_dir=None):
"""Find and return all test modules from the specified start
directory, recursing into subdirectories to find them. Only test files
that match the pattern will be loaded. (Using shell style pattern
matching.)
All test modules must be importable from the top level of the project.
If the start directory is not the top level directory then the top
level directory must be specified separately.
If a test package name (directory with '__init__.py') matches the
pattern then the package will be checked for a 'load_tests' function. If
this exists then it will be called with loader, tests, pattern.
If load_tests exists then discovery does *not* recurse into the package,
load_tests is responsible for loading all tests in the package.
The pattern is deliberately not stored as a loader attribute so that
packages can continue discovery themselves. top_level_dir is stored so
load_tests does not need to pass this argument in to loader.discover().
"""
set_implicit_top = False
if top_level_dir is None and self._top_level_dir is not None:
# make top_level_dir optional if called from load_tests in a package
top_level_dir = self._top_level_dir
elif top_level_dir is None:
set_implicit_top = True
top_level_dir = start_dir
top_level_dir = os.path.abspath(top_level_dir)
if not top_level_dir in sys.path:
# all test modules must be importable from the top level directory
# should we *unconditionally* put the start directory in first
# in sys.path to minimise likelihood of conflicts between installed
# modules and development versions?
sys.path.insert(0, top_level_dir)
self._top_level_dir = top_level_dir
is_not_importable = False
if os.path.isdir(os.path.abspath(start_dir)):
start_dir = os.path.abspath(start_dir)
if start_dir != top_level_dir:
is_not_importable = not os.path.isfile(os.path.join(start_dir, '__init__.py'))
else:
# support for discovery from dotted module names
try:
__import__(start_dir)
except ImportError:
is_not_importable = True
else:
the_module = sys.modules[start_dir]
top_part = start_dir.split('.')[0]
start_dir = os.path.abspath(os.path.dirname((the_module.__file__)))
if set_implicit_top:
self._top_level_dir = os.path.abspath(os.path.dirname(os.path.dirname(sys.modules[top_part].__file__)))
sys.path.remove(top_level_dir)
if is_not_importable:
raise ImportError('Start directory is not importable: %r' % start_dir)
tests = list(self._find_tests(start_dir, pattern))
return self.suiteClass(tests)
def _get_name_from_path(self, path):
path = os.path.splitext(os.path.normpath(path))[0]
_relpath = relpath(path, self._top_level_dir)
assert not os.path.isabs(_relpath), "Path must be within the project"
assert not _relpath.startswith('..'), "Path must be within the project"
name = _relpath.replace(os.path.sep, '.')
return name
def _get_module_from_name(self, name):
__import__(name)
return sys.modules[name]
def _match_path(self, path, full_path, pattern):
# override this method to use alternative matching strategy
return fnmatch(path, pattern)
def _find_tests(self, start_dir, pattern):
"""Used by discovery. Yields test suites it loads."""
paths = os.listdir(start_dir)
for path in paths:
full_path = os.path.join(start_dir, path)
if os.path.isfile(full_path):
if not VALID_MODULE_NAME.match(path):
# valid Python identifiers only
continue
if not self._match_path(path, full_path, pattern):
continue
# if the test file matches, load it
name = self._get_name_from_path(full_path)
try:
module = self._get_module_from_name(name)
except:
yield _make_failed_import_test(name, self.suiteClass)
else:
mod_file = os.path.abspath(getattr(module, '__file__', full_path))
realpath = os.path.splitext(mod_file)[0]
fullpath_noext = os.path.splitext(full_path)[0]
if realpath.lower() != fullpath_noext.lower():
module_dir = os.path.dirname(realpath)
mod_name = os.path.splitext(os.path.basename(full_path))[0]
expected_dir = os.path.dirname(full_path)
msg = ("%r module incorrectly imported from %r. Expected %r. "
"Is this module globally installed?")
raise ImportError(msg % (mod_name, module_dir, expected_dir))
yield self.loadTestsFromModule(module)
elif os.path.isdir(full_path):
if not os.path.isfile(os.path.join(full_path, '__init__.py')):
continue
load_tests = None
tests = None
if fnmatch(path, pattern):
# only check load_tests if the package directory itself matches the filter
name = self._get_name_from_path(full_path)
package = self._get_module_from_name(name)
load_tests = getattr(package, 'load_tests', None)
tests = self.loadTestsFromModule(package, use_load_tests=False)
if load_tests is None:
if tests is not None:
# tests loaded from package file
yield tests
# recurse into the package
for test in self._find_tests(full_path, pattern):
yield test
else:
try:
yield load_tests(self, tests, pattern)
except Exception, e:
yield _make_failed_load_tests(package.__name__, e,
self.suiteClass)
defaultTestLoader = TestLoader()
def _makeLoader(prefix, sortUsing, suiteClass=None):
loader = TestLoader()
loader.sortTestMethodsUsing = sortUsing
loader.testMethodPrefix = prefix
if suiteClass:
loader.suiteClass = suiteClass
return loader
def getTestCaseNames(testCaseClass, prefix, sortUsing=cmp):
return _makeLoader(prefix, sortUsing).getTestCaseNames(testCaseClass)
def makeSuite(testCaseClass, prefix='test', sortUsing=cmp,
suiteClass=suite.TestSuite):
return _makeLoader(prefix, sortUsing, suiteClass).loadTestsFromTestCase(testCaseClass)
def findTestCases(module, prefix='test', sortUsing=cmp,
suiteClass=suite.TestSuite):
return _makeLoader(prefix, sortUsing, suiteClass).loadTestsFromModule(module)
| bsd-3-clause |
ErinCall/sync-engine | migrations/versions/143_add_reply_to_message_id.py | 9 | 1802 | """add reply_to_message_id
Revision ID: 1d7a72222b7c
Revises:2d8a350b4885
Create Date: 2015-02-18 21:40:50.082303
"""
# revision identifiers, used by Alembic.
revision = '1d7a72222b7c'
down_revision = '2d8a350b4885'
from alembic import op
def upgrade():
conn = op.get_bind()
# This rigamarole is only necessary in MySQL 5.5. In MySQL 5.6 you can just
# change the column name.
# The constraint name might be `message_ibfk_2` or `message_ibfk_3` or
# whatever, so figure out which it is first.
constraint_name = conn.execute(
'''SELECT constraint_name FROM information_schema.key_column_usage
WHERE table_name='message' AND referenced_table_name='message'
AND constraint_schema=DATABASE()''').fetchone()[0]
conn.execute('ALTER TABLE message DROP FOREIGN KEY {}'.format(constraint_name))
conn.execute('ALTER TABLE message CHANGE resolved_message_id reply_to_message_id INT(11)')
conn.execute('ALTER TABLE message ADD CONSTRAINT {} FOREIGN KEY (reply_to_message_id) REFERENCES message(id)'.
format(constraint_name))
def downgrade():
conn = op.get_bind()
constraint_name = conn.execute(
'''SELECT constraint_name FROM information_schema.key_column_usage
WHERE table_name='message' AND referenced_table_name='message'
AND constraint_schema=DATABASE()''').fetchone()[0]
conn.execute('ALTER TABLE message DROP FOREIGN KEY {}'.format(constraint_name))
conn.execute('ALTER TABLE message DROP FOREIGN KEY message_ibfk_3')
conn.execute('ALTER TABLE message CHANGE reply_to_message_id resolved_message_id INT(11)')
conn.execute('ALTER TABLE message ADD CONSTRAINT {} FOREIGN KEY (resolved_message_id) REFERENCES message(id)'.
format(constraint_name))
| agpl-3.0 |
RAtechntukan/CouchPotatoServer | couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/roxwel.py | 180 | 1965 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import unified_strdate, determine_ext
class RoxwelIE(InfoExtractor):
_VALID_URL = r'https?://www\.roxwel\.com/player/(?P<filename>.+?)(\.|\?|$)'
_TEST = {
'url': 'http://www.roxwel.com/player/passionpittakeawalklive.html',
'info_dict': {
'id': 'passionpittakeawalklive',
'ext': 'flv',
'title': 'Take A Walk (live)',
'uploader': 'Passion Pit',
'uploader_id': 'passionpit',
'upload_date': '20120928',
'description': 'Passion Pit performs "Take A Walk\" live at The Backyard in Austin, Texas. ',
},
'params': {
# rtmp download
'skip_download': True,
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
filename = mobj.group('filename')
info_url = 'http://www.roxwel.com/api/videos/%s' % filename
info = self._download_json(info_url, filename)
rtmp_rates = sorted([int(r.replace('flv_', '')) for r in info['media_rates'] if r.startswith('flv_')])
best_rate = rtmp_rates[-1]
url_page_url = 'http://roxwel.com/pl_one_time.php?filename=%s&quality=%s' % (filename, best_rate)
rtmp_url = self._download_webpage(url_page_url, filename, 'Downloading video url')
ext = determine_ext(rtmp_url)
if ext == 'f4v':
rtmp_url = rtmp_url.replace(filename, 'mp4:%s' % filename)
return {
'id': filename,
'title': info['title'],
'url': rtmp_url,
'ext': 'flv',
'description': info['description'],
'thumbnail': info.get('player_image_url') or info.get('image_url_large'),
'uploader': info['artist'],
'uploader_id': info['artistname'],
'upload_date': unified_strdate(info['dbdate']),
}
| gpl-3.0 |
d-plaindoux/rapido | src/main/resources/python/services.py | 1 | 3161 | #
# This file has been generated / Do not modify it
#
@[|------------------------------------------------------------------------------------------
Higher attributes for object construction
------------------------------------------------------------------------------------------|]
@DEFINE::Attributes
[|@OR
[|@VAL::object[|[@REP(, )::attributes[|'@VAL::name'|]]|]|]
[|[]|]|]
@DEFINE::Virtuals
[|@OR
[|@VAL::object[|[@REP(, )::virtual[|"@VAL::name"|]]|]|]
[|[]|]|]
@[|------------------------------------------------------------------------------------------
Service and method parameters
------------------------------------------------------------------------------------------|]
@DEFINE::ParameterNames
[|@REP::params[|, @VAL::name|]|]
@[|------------------------------------------------------------------------------------------
Path transformed using string interpolation and Path variables
------------------------------------------------------------------------------------------|]
@DEFINE::PathAsString
[|"/@REP::values[|@OR[|@VAL::name|][|%s|]|]"|]
@DEFINE::PathVariable
[|@REP(, )::values[|@OPT[|['@VAL::object'@REP::fields[|, '@VAL'|]]|]|]|]
@DEFINE::PathVariables
[|[@USE::PathVariable]|]
@[|------------------------------------------------------------------------------------------
Main for services generation
------------------------------------------------------------------------------------------|]
"""
Services:@REP(, )::services[|@VAL::name|]
"""
from @OPT[|@USE::package.|]core import services
@OPT[|from @USE::package |]import types
@REP::services[|
class __@VAL::name(services.BasicService):
#
# Constructor
#
def __init__(self, protocol, url@VAL::route[|@USE::ParameterNames|]):
@VAL::route[|services.BasicService.__init__(self, protocol, url)
self.implicit_data = self.merge_data([@REP(, )::params[|@VAL::name|]])
self.path = @VAL::path[|self.get_path(self.implicit_data, @USE::PathAsString, @USE::PathVariables)|]|]
#
# Public behaviors
#
@REP( )::entries[|def @VAL::name(self@VAL::signature::inputs[|@REP[|, @VAL::name|])|]:
data = self.merge_data([self.implicit_data@VAL::signature::inputs[|@REP[|, @VAL::name|]|]])
result = self.http_request(
path=@OR[|@VAL::path[|self.get_path(data, @USE::PathAsString, @USE::PathVariables)|]|][|""|],
operation="@VAL::operation",
params=@OR[|@VAL::params[|self.get_object(types.@VAL::name(data).to_dict(), @USE::Attributes + @USE::Virtuals)|]|][|{}|],
body=@OR[|@VAL::body[|self.get_object(types.@VAL::name(data).to_dict(), @USE::Attributes + @USE::Virtuals)|]|][|{}|],
header=@OR[|@VAL::header[|self.get_object(types.@VAL::name(data).to_dict(), @USE::Attributes + @USE::Virtuals)|]|][|{}|]
)
return @VAL::signature::output[|types.@VAL::name(result)|]
|]|]
#
# Service factories
#
@REP::services[|
def @VAL::name(proto, url):
return lambda@VAL::route[|@REP(, )::params[| @VAL::name|]|]: __@VAL::name(proto, url@VAL::route[|@USE::ParameterNames|])
|]
| lgpl-2.1 |
Intel-Corporation/tensorflow | tensorflow/python/data/experimental/kernel_tests/optimization/shuffle_and_repeat_fusion_test.py | 10 | 2109 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the `ShuffleAndRepeatFusion` optimization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.ops import optimization
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class ShuffleAndRepeatFusionTest(test_base.DatasetTestBase):
def testShuffleAndRepeatFusion(self):
dataset = dataset_ops.Dataset.range(10).apply(
optimization.assert_next(["ShuffleAndRepeat"])).shuffle(10).repeat(2)
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
options.experimental_optimization.shuffle_and_repeat_fusion = True
dataset = dataset.with_options(options)
get_next = self.getNext(dataset)
for _ in range(2):
results = []
for _ in range(10):
results.append(self.evaluate(get_next()))
self.assertAllEqual([x for x in range(10)], sorted(results))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
if __name__ == "__main__":
test.main()
| apache-2.0 |
marc-sensenich/ansible | test/units/modules/packaging/language/test_pip.py | 84 | 1432 | # Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
import json
import pytest
from ansible.modules.packaging.language import pip
pytestmark = pytest.mark.usefixtures('patch_ansible_module')
@pytest.mark.parametrize('patch_ansible_module', [{'name': 'six'}], indirect=['patch_ansible_module'])
def test_failure_when_pip_absent(mocker, capfd):
get_bin_path = mocker.patch('ansible.module_utils.basic.AnsibleModule.get_bin_path')
get_bin_path.return_value = None
with pytest.raises(SystemExit):
pip.main()
out, err = capfd.readouterr()
results = json.loads(out)
assert results['failed']
assert 'pip needs to be installed' in results['msg']
@pytest.mark.parametrize('patch_ansible_module, test_input, expected', [
[None, ['django>1.11.1', '<1.11.2', 'ipaddress', 'simpleproject<2.0.0', '>1.1.0'],
['django>1.11.1,<1.11.2', 'ipaddress', 'simpleproject<2.0.0,>1.1.0']],
[None, ['django>1.11.1,<1.11.2,ipaddress', 'simpleproject<2.0.0,>1.1.0'],
['django>1.11.1,<1.11.2', 'ipaddress', 'simpleproject<2.0.0,>1.1.0']],
[None, ['django>1.11.1', '<1.11.2', 'ipaddress,simpleproject<2.0.0,>1.1.0'],
['django>1.11.1,<1.11.2', 'ipaddress', 'simpleproject<2.0.0,>1.1.0']]])
def test_recover_package_name(test_input, expected):
assert pip._recover_package_name(test_input) == expected
| gpl-3.0 |
trustedanalytics/space-shuttle-demo | client/space_shuttle_client.py | 1 | 2720 | #
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import thread
import time
import argparse
from twisted.python import log
from twisted.internet import reactor
from autobahn.twisted.websocket import WebSocketClientFactory, \
WebSocketClientProtocol, \
connectWS
from client_config import Config
class DataStreamProtocol(WebSocketClientProtocol):
"""
Protocol class which specifies the behaviour of the client.
"""
def onOpen(self):
"""
Open connection handling.
"""
log.msg("Connection established")
def run():
log.msg("Sending data...")
while True:
self._send_data()
log.msg("End of file. Reopening...")
log.msg("Thread terminating...")
thread.start_new_thread(run, ())
def onClose(self, wasClean, code, reason):
log.msg("Websocket connection closed: {0}".format(reason))
def _send_data(self):
with open(config.file_path, "r") as data:
for line in data:
log.msg(line)
self.sendMessage("[" + line + "]")
time.sleep(0.1)
def parse_arguments():
parser = argparse.ArgumentParser(
description='Deployment script for Space Shuttle client')
parser.add_argument('--gateway-url', type=str,
help='gateway api url, '
'e.g. gateway-479613d7.demotrustedanalytics.com')
parser.add_argument('--use-https', dest='https', action='store_true',
help='set of flag cause use of `https_proxy` env '
'instead of default `http_proxy`')
return parser.parse_args()
if __name__ == '__main__':
log.startLogging(sys.stdout)
args = parse_arguments()
config = Config(external_gateway=args.gateway_url,
use_https=args.https)
uri = config.uri
proxy_config = config.http_proxy_config
factory = WebSocketClientFactory(uri, proxy=proxy_config)
factory.protocol = DataStreamProtocol
connectWS(factory)
reactor.run()
| apache-2.0 |
ChristopherHogan/numpy | numpy/core/tests/test_numeric.py | 46 | 83454 | from __future__ import division, absolute_import, print_function
import sys
import warnings
import itertools
import platform
from decimal import Decimal
import numpy as np
from numpy.core import umath
from numpy.random import rand, randint, randn
from numpy.testing import (
TestCase, run_module_suite, assert_, assert_equal, assert_raises,
assert_array_equal, assert_almost_equal, assert_array_almost_equal, dec
)
class TestResize(TestCase):
def test_copies(self):
A = np.array([[1, 2], [3, 4]])
Ar1 = np.array([[1, 2, 3, 4], [1, 2, 3, 4]])
assert_equal(np.resize(A, (2, 4)), Ar1)
Ar2 = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
assert_equal(np.resize(A, (4, 2)), Ar2)
Ar3 = np.array([[1, 2, 3], [4, 1, 2], [3, 4, 1], [2, 3, 4]])
assert_equal(np.resize(A, (4, 3)), Ar3)
def test_zeroresize(self):
A = np.array([[1, 2], [3, 4]])
Ar = np.resize(A, (0,))
assert_equal(Ar, np.array([]))
class TestNonarrayArgs(TestCase):
# check that non-array arguments to functions wrap them in arrays
def test_squeeze(self):
A = [[[1, 1, 1], [2, 2, 2], [3, 3, 3]]]
assert_(np.squeeze(A).shape == (3, 3))
def test_cumproduct(self):
A = [[1, 2, 3], [4, 5, 6]]
assert_(np.all(np.cumproduct(A) == np.array([1, 2, 6, 24, 120, 720])))
def test_size(self):
A = [[1, 2, 3], [4, 5, 6]]
assert_(np.size(A) == 6)
assert_(np.size(A, 0) == 2)
assert_(np.size(A, 1) == 3)
def test_mean(self):
A = [[1, 2, 3], [4, 5, 6]]
assert_(np.mean(A) == 3.5)
assert_(np.all(np.mean(A, 0) == np.array([2.5, 3.5, 4.5])))
assert_(np.all(np.mean(A, 1) == np.array([2., 5.])))
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', RuntimeWarning)
assert_(np.isnan(np.mean([])))
assert_(w[0].category is RuntimeWarning)
def test_std(self):
A = [[1, 2, 3], [4, 5, 6]]
assert_almost_equal(np.std(A), 1.707825127659933)
assert_almost_equal(np.std(A, 0), np.array([1.5, 1.5, 1.5]))
assert_almost_equal(np.std(A, 1), np.array([0.81649658, 0.81649658]))
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', RuntimeWarning)
assert_(np.isnan(np.std([])))
assert_(w[0].category is RuntimeWarning)
def test_var(self):
A = [[1, 2, 3], [4, 5, 6]]
assert_almost_equal(np.var(A), 2.9166666666666665)
assert_almost_equal(np.var(A, 0), np.array([2.25, 2.25, 2.25]))
assert_almost_equal(np.var(A, 1), np.array([0.66666667, 0.66666667]))
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', RuntimeWarning)
assert_(np.isnan(np.var([])))
assert_(w[0].category is RuntimeWarning)
class TestBoolScalar(TestCase):
def test_logical(self):
f = np.False_
t = np.True_
s = "xyz"
self.assertTrue((t and s) is s)
self.assertTrue((f and s) is f)
def test_bitwise_or(self):
f = np.False_
t = np.True_
self.assertTrue((t | t) is t)
self.assertTrue((f | t) is t)
self.assertTrue((t | f) is t)
self.assertTrue((f | f) is f)
def test_bitwise_and(self):
f = np.False_
t = np.True_
self.assertTrue((t & t) is t)
self.assertTrue((f & t) is f)
self.assertTrue((t & f) is f)
self.assertTrue((f & f) is f)
def test_bitwise_xor(self):
f = np.False_
t = np.True_
self.assertTrue((t ^ t) is f)
self.assertTrue((f ^ t) is t)
self.assertTrue((t ^ f) is t)
self.assertTrue((f ^ f) is f)
class TestBoolArray(TestCase):
def setUp(self):
# offset for simd tests
self.t = np.array([True] * 41, dtype=np.bool)[1::]
self.f = np.array([False] * 41, dtype=np.bool)[1::]
self.o = np.array([False] * 42, dtype=np.bool)[2::]
self.nm = self.f.copy()
self.im = self.t.copy()
self.nm[3] = True
self.nm[-2] = True
self.im[3] = False
self.im[-2] = False
def test_all_any(self):
self.assertTrue(self.t.all())
self.assertTrue(self.t.any())
self.assertFalse(self.f.all())
self.assertFalse(self.f.any())
self.assertTrue(self.nm.any())
self.assertTrue(self.im.any())
self.assertFalse(self.nm.all())
self.assertFalse(self.im.all())
# check bad element in all positions
for i in range(256 - 7):
d = np.array([False] * 256, dtype=np.bool)[7::]
d[i] = True
self.assertTrue(np.any(d))
e = np.array([True] * 256, dtype=np.bool)[7::]
e[i] = False
self.assertFalse(np.all(e))
assert_array_equal(e, ~d)
# big array test for blocked libc loops
for i in list(range(9, 6000, 507)) + [7764, 90021, -10]:
d = np.array([False] * 100043, dtype=np.bool)
d[i] = True
self.assertTrue(np.any(d), msg="%r" % i)
e = np.array([True] * 100043, dtype=np.bool)
e[i] = False
self.assertFalse(np.all(e), msg="%r" % i)
def test_logical_not_abs(self):
assert_array_equal(~self.t, self.f)
assert_array_equal(np.abs(~self.t), self.f)
assert_array_equal(np.abs(~self.f), self.t)
assert_array_equal(np.abs(self.f), self.f)
assert_array_equal(~np.abs(self.f), self.t)
assert_array_equal(~np.abs(self.t), self.f)
assert_array_equal(np.abs(~self.nm), self.im)
np.logical_not(self.t, out=self.o)
assert_array_equal(self.o, self.f)
np.abs(self.t, out=self.o)
assert_array_equal(self.o, self.t)
def test_logical_and_or_xor(self):
assert_array_equal(self.t | self.t, self.t)
assert_array_equal(self.f | self.f, self.f)
assert_array_equal(self.t | self.f, self.t)
assert_array_equal(self.f | self.t, self.t)
np.logical_or(self.t, self.t, out=self.o)
assert_array_equal(self.o, self.t)
assert_array_equal(self.t & self.t, self.t)
assert_array_equal(self.f & self.f, self.f)
assert_array_equal(self.t & self.f, self.f)
assert_array_equal(self.f & self.t, self.f)
np.logical_and(self.t, self.t, out=self.o)
assert_array_equal(self.o, self.t)
assert_array_equal(self.t ^ self.t, self.f)
assert_array_equal(self.f ^ self.f, self.f)
assert_array_equal(self.t ^ self.f, self.t)
assert_array_equal(self.f ^ self.t, self.t)
np.logical_xor(self.t, self.t, out=self.o)
assert_array_equal(self.o, self.f)
assert_array_equal(self.nm & self.t, self.nm)
assert_array_equal(self.im & self.f, False)
assert_array_equal(self.nm & True, self.nm)
assert_array_equal(self.im & False, self.f)
assert_array_equal(self.nm | self.t, self.t)
assert_array_equal(self.im | self.f, self.im)
assert_array_equal(self.nm | True, self.t)
assert_array_equal(self.im | False, self.im)
assert_array_equal(self.nm ^ self.t, self.im)
assert_array_equal(self.im ^ self.f, self.im)
assert_array_equal(self.nm ^ True, self.im)
assert_array_equal(self.im ^ False, self.im)
class TestBoolCmp(TestCase):
def setUp(self):
self.f = np.ones(256, dtype=np.float32)
self.ef = np.ones(self.f.size, dtype=np.bool)
self.d = np.ones(128, dtype=np.float64)
self.ed = np.ones(self.d.size, dtype=np.bool)
# generate values for all permutation of 256bit simd vectors
s = 0
for i in range(32):
self.f[s:s+8] = [i & 2**x for x in range(8)]
self.ef[s:s+8] = [(i & 2**x) != 0 for x in range(8)]
s += 8
s = 0
for i in range(16):
self.d[s:s+4] = [i & 2**x for x in range(4)]
self.ed[s:s+4] = [(i & 2**x) != 0 for x in range(4)]
s += 4
self.nf = self.f.copy()
self.nd = self.d.copy()
self.nf[self.ef] = np.nan
self.nd[self.ed] = np.nan
def test_float(self):
# offset for alignment test
for i in range(4):
assert_array_equal(self.f[i:] > 0, self.ef[i:])
assert_array_equal(self.f[i:] - 1 >= 0, self.ef[i:])
assert_array_equal(self.f[i:] == 0, ~self.ef[i:])
assert_array_equal(-self.f[i:] < 0, self.ef[i:])
assert_array_equal(-self.f[i:] + 1 <= 0, self.ef[i:])
r = self.f[i:] != 0
assert_array_equal(r, self.ef[i:])
r2 = self.f[i:] != np.zeros_like(self.f[i:])
r3 = 0 != self.f[i:]
assert_array_equal(r, r2)
assert_array_equal(r, r3)
# check bool == 0x1
assert_array_equal(r.view(np.int8), r.astype(np.int8))
assert_array_equal(r2.view(np.int8), r2.astype(np.int8))
assert_array_equal(r3.view(np.int8), r3.astype(np.int8))
# isnan on amd64 takes the same codepath
assert_array_equal(np.isnan(self.nf[i:]), self.ef[i:])
def test_double(self):
# offset for alignment test
for i in range(2):
assert_array_equal(self.d[i:] > 0, self.ed[i:])
assert_array_equal(self.d[i:] - 1 >= 0, self.ed[i:])
assert_array_equal(self.d[i:] == 0, ~self.ed[i:])
assert_array_equal(-self.d[i:] < 0, self.ed[i:])
assert_array_equal(-self.d[i:] + 1 <= 0, self.ed[i:])
r = self.d[i:] != 0
assert_array_equal(r, self.ed[i:])
r2 = self.d[i:] != np.zeros_like(self.d[i:])
r3 = 0 != self.d[i:]
assert_array_equal(r, r2)
assert_array_equal(r, r3)
# check bool == 0x1
assert_array_equal(r.view(np.int8), r.astype(np.int8))
assert_array_equal(r2.view(np.int8), r2.astype(np.int8))
assert_array_equal(r3.view(np.int8), r3.astype(np.int8))
# isnan on amd64 takes the same codepath
assert_array_equal(np.isnan(self.nd[i:]), self.ed[i:])
class TestSeterr(TestCase):
def test_default(self):
err = np.geterr()
self.assertEqual(err, dict(
divide='warn',
invalid='warn',
over='warn',
under='ignore',
))
def test_set(self):
with np.errstate():
err = np.seterr()
old = np.seterr(divide='print')
self.assertTrue(err == old)
new = np.seterr()
self.assertTrue(new['divide'] == 'print')
np.seterr(over='raise')
self.assertTrue(np.geterr()['over'] == 'raise')
self.assertTrue(new['divide'] == 'print')
np.seterr(**old)
self.assertTrue(np.geterr() == old)
@dec.skipif(platform.machine() == "armv5tel", "See gh-413.")
def test_divide_err(self):
with np.errstate(divide='raise'):
try:
np.array([1.]) / np.array([0.])
except FloatingPointError:
pass
else:
self.fail()
np.seterr(divide='ignore')
np.array([1.]) / np.array([0.])
def test_errobj(self):
olderrobj = np.geterrobj()
self.called = 0
try:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
with np.errstate(divide='warn'):
np.seterrobj([20000, 1, None])
np.array([1.]) / np.array([0.])
self.assertEqual(len(w), 1)
def log_err(*args):
self.called += 1
extobj_err = args
assert (len(extobj_err) == 2)
assert ("divide" in extobj_err[0])
with np.errstate(divide='ignore'):
np.seterrobj([20000, 3, log_err])
np.array([1.]) / np.array([0.])
self.assertEqual(self.called, 1)
np.seterrobj(olderrobj)
with np.errstate(divide='ignore'):
np.divide(1., 0., extobj=[20000, 3, log_err])
self.assertEqual(self.called, 2)
finally:
np.seterrobj(olderrobj)
del self.called
def test_errobj_noerrmask(self):
# errmask = 0 has a special code path for the default
olderrobj = np.geterrobj()
try:
# set errobj to something non default
np.seterrobj([umath.UFUNC_BUFSIZE_DEFAULT,
umath.ERR_DEFAULT + 1, None])
#call a ufunc
np.isnan(np.array([6]))
# same with the default, lots of times to get rid of possible
# pre-existing stack in the code
for i in range(10000):
np.seterrobj([umath.UFUNC_BUFSIZE_DEFAULT, umath.ERR_DEFAULT,
None])
np.isnan(np.array([6]))
finally:
np.seterrobj(olderrobj)
class TestFloatExceptions(TestCase):
def assert_raises_fpe(self, fpeerr, flop, x, y):
ftype = type(x)
try:
flop(x, y)
assert_(False,
"Type %s did not raise fpe error '%s'." % (ftype, fpeerr))
except FloatingPointError as exc:
assert_(str(exc).find(fpeerr) >= 0,
"Type %s raised wrong fpe error '%s'." % (ftype, exc))
def assert_op_raises_fpe(self, fpeerr, flop, sc1, sc2):
# Check that fpe exception is raised.
#
# Given a floating operation `flop` and two scalar values, check that
# the operation raises the floating point exception specified by
#`fpeerr`. Tests all variants with 0-d array scalars as well.
self.assert_raises_fpe(fpeerr, flop, sc1, sc2)
self.assert_raises_fpe(fpeerr, flop, sc1[()], sc2)
self.assert_raises_fpe(fpeerr, flop, sc1, sc2[()])
self.assert_raises_fpe(fpeerr, flop, sc1[()], sc2[()])
@dec.knownfailureif(True, "See ticket #2350")
def test_floating_exceptions(self):
# Test basic arithmetic function errors
with np.errstate(all='raise'):
# Test for all real and complex float types
for typecode in np.typecodes['AllFloat']:
ftype = np.obj2sctype(typecode)
if np.dtype(ftype).kind == 'f':
# Get some extreme values for the type
fi = np.finfo(ftype)
ft_tiny = fi.tiny
ft_max = fi.max
ft_eps = fi.eps
underflow = 'underflow'
divbyzero = 'divide by zero'
else:
# 'c', complex, corresponding real dtype
rtype = type(ftype(0).real)
fi = np.finfo(rtype)
ft_tiny = ftype(fi.tiny)
ft_max = ftype(fi.max)
ft_eps = ftype(fi.eps)
# The complex types raise different exceptions
underflow = ''
divbyzero = ''
overflow = 'overflow'
invalid = 'invalid'
self.assert_raises_fpe(underflow,
lambda a, b:a/b, ft_tiny, ft_max)
self.assert_raises_fpe(underflow,
lambda a, b:a*b, ft_tiny, ft_tiny)
self.assert_raises_fpe(overflow,
lambda a, b:a*b, ft_max, ftype(2))
self.assert_raises_fpe(overflow,
lambda a, b:a/b, ft_max, ftype(0.5))
self.assert_raises_fpe(overflow,
lambda a, b:a+b, ft_max, ft_max*ft_eps)
self.assert_raises_fpe(overflow,
lambda a, b:a-b, -ft_max, ft_max*ft_eps)
self.assert_raises_fpe(overflow,
np.power, ftype(2), ftype(2**fi.nexp))
self.assert_raises_fpe(divbyzero,
lambda a, b:a/b, ftype(1), ftype(0))
self.assert_raises_fpe(invalid,
lambda a, b:a/b, ftype(np.inf), ftype(np.inf))
self.assert_raises_fpe(invalid,
lambda a, b:a/b, ftype(0), ftype(0))
self.assert_raises_fpe(invalid,
lambda a, b:a-b, ftype(np.inf), ftype(np.inf))
self.assert_raises_fpe(invalid,
lambda a, b:a+b, ftype(np.inf), ftype(-np.inf))
self.assert_raises_fpe(invalid,
lambda a, b:a*b, ftype(0), ftype(np.inf))
def test_warnings(self):
# test warning code path
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
with np.errstate(all="warn"):
np.divide(1, 0.)
self.assertEqual(len(w), 1)
self.assertTrue("divide by zero" in str(w[0].message))
np.array(1e300) * np.array(1e300)
self.assertEqual(len(w), 2)
self.assertTrue("overflow" in str(w[-1].message))
np.array(np.inf) - np.array(np.inf)
self.assertEqual(len(w), 3)
self.assertTrue("invalid value" in str(w[-1].message))
np.array(1e-300) * np.array(1e-300)
self.assertEqual(len(w), 4)
self.assertTrue("underflow" in str(w[-1].message))
class TestTypes(TestCase):
def check_promotion_cases(self, promote_func):
#Tests that the scalars get coerced correctly.
b = np.bool_(0)
i8, i16, i32, i64 = np.int8(0), np.int16(0), np.int32(0), np.int64(0)
u8, u16, u32, u64 = np.uint8(0), np.uint16(0), np.uint32(0), np.uint64(0)
f32, f64, fld = np.float32(0), np.float64(0), np.longdouble(0)
c64, c128, cld = np.complex64(0), np.complex128(0), np.clongdouble(0)
# coercion within the same kind
assert_equal(promote_func(i8, i16), np.dtype(np.int16))
assert_equal(promote_func(i32, i8), np.dtype(np.int32))
assert_equal(promote_func(i16, i64), np.dtype(np.int64))
assert_equal(promote_func(u8, u32), np.dtype(np.uint32))
assert_equal(promote_func(f32, f64), np.dtype(np.float64))
assert_equal(promote_func(fld, f32), np.dtype(np.longdouble))
assert_equal(promote_func(f64, fld), np.dtype(np.longdouble))
assert_equal(promote_func(c128, c64), np.dtype(np.complex128))
assert_equal(promote_func(cld, c128), np.dtype(np.clongdouble))
assert_equal(promote_func(c64, fld), np.dtype(np.clongdouble))
# coercion between kinds
assert_equal(promote_func(b, i32), np.dtype(np.int32))
assert_equal(promote_func(b, u8), np.dtype(np.uint8))
assert_equal(promote_func(i8, u8), np.dtype(np.int16))
assert_equal(promote_func(u8, i32), np.dtype(np.int32))
assert_equal(promote_func(i64, u32), np.dtype(np.int64))
assert_equal(promote_func(u64, i32), np.dtype(np.float64))
assert_equal(promote_func(i32, f32), np.dtype(np.float64))
assert_equal(promote_func(i64, f32), np.dtype(np.float64))
assert_equal(promote_func(f32, i16), np.dtype(np.float32))
assert_equal(promote_func(f32, u32), np.dtype(np.float64))
assert_equal(promote_func(f32, c64), np.dtype(np.complex64))
assert_equal(promote_func(c128, f32), np.dtype(np.complex128))
assert_equal(promote_func(cld, f64), np.dtype(np.clongdouble))
# coercion between scalars and 1-D arrays
assert_equal(promote_func(np.array([b]), i8), np.dtype(np.int8))
assert_equal(promote_func(np.array([b]), u8), np.dtype(np.uint8))
assert_equal(promote_func(np.array([b]), i32), np.dtype(np.int32))
assert_equal(promote_func(np.array([b]), u32), np.dtype(np.uint32))
assert_equal(promote_func(np.array([i8]), i64), np.dtype(np.int8))
assert_equal(promote_func(u64, np.array([i32])), np.dtype(np.int32))
assert_equal(promote_func(i64, np.array([u32])), np.dtype(np.uint32))
assert_equal(promote_func(np.int32(-1), np.array([u64])),
np.dtype(np.float64))
assert_equal(promote_func(f64, np.array([f32])), np.dtype(np.float32))
assert_equal(promote_func(fld, np.array([f32])), np.dtype(np.float32))
assert_equal(promote_func(np.array([f64]), fld), np.dtype(np.float64))
assert_equal(promote_func(fld, np.array([c64])),
np.dtype(np.complex64))
assert_equal(promote_func(c64, np.array([f64])),
np.dtype(np.complex128))
assert_equal(promote_func(np.complex64(3j), np.array([f64])),
np.dtype(np.complex128))
# coercion between scalars and 1-D arrays, where
# the scalar has greater kind than the array
assert_equal(promote_func(np.array([b]), f64), np.dtype(np.float64))
assert_equal(promote_func(np.array([b]), i64), np.dtype(np.int64))
assert_equal(promote_func(np.array([b]), u64), np.dtype(np.uint64))
assert_equal(promote_func(np.array([i8]), f64), np.dtype(np.float64))
assert_equal(promote_func(np.array([u16]), f64), np.dtype(np.float64))
# uint and int are treated as the same "kind" for
# the purposes of array-scalar promotion.
assert_equal(promote_func(np.array([u16]), i32), np.dtype(np.uint16))
# float and complex are treated as the same "kind" for
# the purposes of array-scalar promotion, so that you can do
# (0j + float32array) to get a complex64 array instead of
# a complex128 array.
assert_equal(promote_func(np.array([f32]), c128),
np.dtype(np.complex64))
def test_coercion(self):
def res_type(a, b):
return np.add(a, b).dtype
self.check_promotion_cases(res_type)
# Use-case: float/complex scalar * bool/int8 array
# shouldn't narrow the float/complex type
for a in [np.array([True, False]), np.array([-3, 12], dtype=np.int8)]:
b = 1.234 * a
assert_equal(b.dtype, np.dtype('f8'), "array type %s" % a.dtype)
b = np.longdouble(1.234) * a
assert_equal(b.dtype, np.dtype(np.longdouble),
"array type %s" % a.dtype)
b = np.float64(1.234) * a
assert_equal(b.dtype, np.dtype('f8'), "array type %s" % a.dtype)
b = np.float32(1.234) * a
assert_equal(b.dtype, np.dtype('f4'), "array type %s" % a.dtype)
b = np.float16(1.234) * a
assert_equal(b.dtype, np.dtype('f2'), "array type %s" % a.dtype)
b = 1.234j * a
assert_equal(b.dtype, np.dtype('c16'), "array type %s" % a.dtype)
b = np.clongdouble(1.234j) * a
assert_equal(b.dtype, np.dtype(np.clongdouble),
"array type %s" % a.dtype)
b = np.complex128(1.234j) * a
assert_equal(b.dtype, np.dtype('c16'), "array type %s" % a.dtype)
b = np.complex64(1.234j) * a
assert_equal(b.dtype, np.dtype('c8'), "array type %s" % a.dtype)
# The following use-case is problematic, and to resolve its
# tricky side-effects requires more changes.
#
## Use-case: (1-t)*a, where 't' is a boolean array and 'a' is
## a float32, shouldn't promote to float64
#a = np.array([1.0, 1.5], dtype=np.float32)
#t = np.array([True, False])
#b = t*a
#assert_equal(b, [1.0, 0.0])
#assert_equal(b.dtype, np.dtype('f4'))
#b = (1-t)*a
#assert_equal(b, [0.0, 1.5])
#assert_equal(b.dtype, np.dtype('f4'))
## Probably ~t (bitwise negation) is more proper to use here,
## but this is arguably less intuitive to understand at a glance, and
## would fail if 't' is actually an integer array instead of boolean:
#b = (~t)*a
#assert_equal(b, [0.0, 1.5])
#assert_equal(b.dtype, np.dtype('f4'))
def test_result_type(self):
self.check_promotion_cases(np.result_type)
assert_(np.result_type(None) == np.dtype(None))
def test_promote_types_endian(self):
# promote_types should always return native-endian types
assert_equal(np.promote_types('<i8', '<i8'), np.dtype('i8'))
assert_equal(np.promote_types('>i8', '>i8'), np.dtype('i8'))
assert_equal(np.promote_types('>i8', '>U16'), np.dtype('U21'))
assert_equal(np.promote_types('<i8', '<U16'), np.dtype('U21'))
assert_equal(np.promote_types('>U16', '>i8'), np.dtype('U21'))
assert_equal(np.promote_types('<U16', '<i8'), np.dtype('U21'))
assert_equal(np.promote_types('<S5', '<U8'), np.dtype('U8'))
assert_equal(np.promote_types('>S5', '>U8'), np.dtype('U8'))
assert_equal(np.promote_types('<U8', '<S5'), np.dtype('U8'))
assert_equal(np.promote_types('>U8', '>S5'), np.dtype('U8'))
assert_equal(np.promote_types('<U5', '<U8'), np.dtype('U8'))
assert_equal(np.promote_types('>U8', '>U5'), np.dtype('U8'))
assert_equal(np.promote_types('<M8', '<M8'), np.dtype('M8'))
assert_equal(np.promote_types('>M8', '>M8'), np.dtype('M8'))
assert_equal(np.promote_types('<m8', '<m8'), np.dtype('m8'))
assert_equal(np.promote_types('>m8', '>m8'), np.dtype('m8'))
def test_promote_types_strings(self):
assert_equal(np.promote_types('bool', 'S'), np.dtype('S5'))
assert_equal(np.promote_types('b', 'S'), np.dtype('S4'))
assert_equal(np.promote_types('u1', 'S'), np.dtype('S3'))
assert_equal(np.promote_types('u2', 'S'), np.dtype('S5'))
assert_equal(np.promote_types('u4', 'S'), np.dtype('S10'))
assert_equal(np.promote_types('u8', 'S'), np.dtype('S20'))
assert_equal(np.promote_types('i1', 'S'), np.dtype('S4'))
assert_equal(np.promote_types('i2', 'S'), np.dtype('S6'))
assert_equal(np.promote_types('i4', 'S'), np.dtype('S11'))
assert_equal(np.promote_types('i8', 'S'), np.dtype('S21'))
assert_equal(np.promote_types('bool', 'U'), np.dtype('U5'))
assert_equal(np.promote_types('b', 'U'), np.dtype('U4'))
assert_equal(np.promote_types('u1', 'U'), np.dtype('U3'))
assert_equal(np.promote_types('u2', 'U'), np.dtype('U5'))
assert_equal(np.promote_types('u4', 'U'), np.dtype('U10'))
assert_equal(np.promote_types('u8', 'U'), np.dtype('U20'))
assert_equal(np.promote_types('i1', 'U'), np.dtype('U4'))
assert_equal(np.promote_types('i2', 'U'), np.dtype('U6'))
assert_equal(np.promote_types('i4', 'U'), np.dtype('U11'))
assert_equal(np.promote_types('i8', 'U'), np.dtype('U21'))
assert_equal(np.promote_types('bool', 'S1'), np.dtype('S5'))
assert_equal(np.promote_types('bool', 'S30'), np.dtype('S30'))
assert_equal(np.promote_types('b', 'S1'), np.dtype('S4'))
assert_equal(np.promote_types('b', 'S30'), np.dtype('S30'))
assert_equal(np.promote_types('u1', 'S1'), np.dtype('S3'))
assert_equal(np.promote_types('u1', 'S30'), np.dtype('S30'))
assert_equal(np.promote_types('u2', 'S1'), np.dtype('S5'))
assert_equal(np.promote_types('u2', 'S30'), np.dtype('S30'))
assert_equal(np.promote_types('u4', 'S1'), np.dtype('S10'))
assert_equal(np.promote_types('u4', 'S30'), np.dtype('S30'))
assert_equal(np.promote_types('u8', 'S1'), np.dtype('S20'))
assert_equal(np.promote_types('u8', 'S30'), np.dtype('S30'))
def test_can_cast(self):
assert_(np.can_cast(np.int32, np.int64))
assert_(np.can_cast(np.float64, np.complex))
assert_(not np.can_cast(np.complex, np.float))
assert_(np.can_cast('i8', 'f8'))
assert_(not np.can_cast('i8', 'f4'))
assert_(np.can_cast('i4', 'S11'))
assert_(np.can_cast('i8', 'i8', 'no'))
assert_(not np.can_cast('<i8', '>i8', 'no'))
assert_(np.can_cast('<i8', '>i8', 'equiv'))
assert_(not np.can_cast('<i4', '>i8', 'equiv'))
assert_(np.can_cast('<i4', '>i8', 'safe'))
assert_(not np.can_cast('<i8', '>i4', 'safe'))
assert_(np.can_cast('<i8', '>i4', 'same_kind'))
assert_(not np.can_cast('<i8', '>u4', 'same_kind'))
assert_(np.can_cast('<i8', '>u4', 'unsafe'))
assert_(np.can_cast('bool', 'S5'))
assert_(not np.can_cast('bool', 'S4'))
assert_(np.can_cast('b', 'S4'))
assert_(not np.can_cast('b', 'S3'))
assert_(np.can_cast('u1', 'S3'))
assert_(not np.can_cast('u1', 'S2'))
assert_(np.can_cast('u2', 'S5'))
assert_(not np.can_cast('u2', 'S4'))
assert_(np.can_cast('u4', 'S10'))
assert_(not np.can_cast('u4', 'S9'))
assert_(np.can_cast('u8', 'S20'))
assert_(not np.can_cast('u8', 'S19'))
assert_(np.can_cast('i1', 'S4'))
assert_(not np.can_cast('i1', 'S3'))
assert_(np.can_cast('i2', 'S6'))
assert_(not np.can_cast('i2', 'S5'))
assert_(np.can_cast('i4', 'S11'))
assert_(not np.can_cast('i4', 'S10'))
assert_(np.can_cast('i8', 'S21'))
assert_(not np.can_cast('i8', 'S20'))
assert_(np.can_cast('bool', 'S5'))
assert_(not np.can_cast('bool', 'S4'))
assert_(np.can_cast('b', 'U4'))
assert_(not np.can_cast('b', 'U3'))
assert_(np.can_cast('u1', 'U3'))
assert_(not np.can_cast('u1', 'U2'))
assert_(np.can_cast('u2', 'U5'))
assert_(not np.can_cast('u2', 'U4'))
assert_(np.can_cast('u4', 'U10'))
assert_(not np.can_cast('u4', 'U9'))
assert_(np.can_cast('u8', 'U20'))
assert_(not np.can_cast('u8', 'U19'))
assert_(np.can_cast('i1', 'U4'))
assert_(not np.can_cast('i1', 'U3'))
assert_(np.can_cast('i2', 'U6'))
assert_(not np.can_cast('i2', 'U5'))
assert_(np.can_cast('i4', 'U11'))
assert_(not np.can_cast('i4', 'U10'))
assert_(np.can_cast('i8', 'U21'))
assert_(not np.can_cast('i8', 'U20'))
assert_raises(TypeError, np.can_cast, 'i4', None)
assert_raises(TypeError, np.can_cast, None, 'i4')
# Custom exception class to test exception propagation in fromiter
class NIterError(Exception):
pass
class TestFromiter(TestCase):
def makegen(self):
for x in range(24):
yield x**2
def test_types(self):
ai32 = np.fromiter(self.makegen(), np.int32)
ai64 = np.fromiter(self.makegen(), np.int64)
af = np.fromiter(self.makegen(), float)
self.assertTrue(ai32.dtype == np.dtype(np.int32))
self.assertTrue(ai64.dtype == np.dtype(np.int64))
self.assertTrue(af.dtype == np.dtype(float))
def test_lengths(self):
expected = np.array(list(self.makegen()))
a = np.fromiter(self.makegen(), int)
a20 = np.fromiter(self.makegen(), int, 20)
self.assertTrue(len(a) == len(expected))
self.assertTrue(len(a20) == 20)
self.assertRaises(ValueError, np.fromiter,
self.makegen(), int, len(expected) + 10)
def test_values(self):
expected = np.array(list(self.makegen()))
a = np.fromiter(self.makegen(), int)
a20 = np.fromiter(self.makegen(), int, 20)
self.assertTrue(np.alltrue(a == expected, axis=0))
self.assertTrue(np.alltrue(a20 == expected[:20], axis=0))
def load_data(self, n, eindex):
# Utility method for the issue 2592 tests.
# Raise an exception at the desired index in the iterator.
for e in range(n):
if e == eindex:
raise NIterError('error at index %s' % eindex)
yield e
def test_2592(self):
# Test iteration exceptions are correctly raised.
count, eindex = 10, 5
self.assertRaises(NIterError, np.fromiter,
self.load_data(count, eindex), dtype=int, count=count)
def test_2592_edge(self):
# Test iter. exceptions, edge case (exception at end of iterator).
count = 10
eindex = count-1
self.assertRaises(NIterError, np.fromiter,
self.load_data(count, eindex), dtype=int, count=count)
class TestNonzero(TestCase):
def test_nonzero_trivial(self):
assert_equal(np.count_nonzero(np.array([])), 0)
assert_equal(np.count_nonzero(np.array([], dtype='?')), 0)
assert_equal(np.nonzero(np.array([])), ([],))
assert_equal(np.count_nonzero(np.array(0)), 0)
assert_equal(np.count_nonzero(np.array(0, dtype='?')), 0)
assert_equal(np.nonzero(np.array(0)), ([],))
assert_equal(np.count_nonzero(np.array(1)), 1)
assert_equal(np.count_nonzero(np.array(1, dtype='?')), 1)
assert_equal(np.nonzero(np.array(1)), ([0],))
def test_nonzero_onedim(self):
x = np.array([1, 0, 2, -1, 0, 0, 8])
assert_equal(np.count_nonzero(x), 4)
assert_equal(np.count_nonzero(x), 4)
assert_equal(np.nonzero(x), ([0, 2, 3, 6],))
x = np.array([(1, 2), (0, 0), (1, 1), (-1, 3), (0, 7)],
dtype=[('a', 'i4'), ('b', 'i2')])
assert_equal(np.count_nonzero(x['a']), 3)
assert_equal(np.count_nonzero(x['b']), 4)
assert_equal(np.nonzero(x['a']), ([0, 2, 3],))
assert_equal(np.nonzero(x['b']), ([0, 2, 3, 4],))
def test_nonzero_twodim(self):
x = np.array([[0, 1, 0], [2, 0, 3]])
assert_equal(np.count_nonzero(x), 3)
assert_equal(np.nonzero(x), ([0, 1, 1], [1, 0, 2]))
x = np.eye(3)
assert_equal(np.count_nonzero(x), 3)
assert_equal(np.nonzero(x), ([0, 1, 2], [0, 1, 2]))
x = np.array([[(0, 1), (0, 0), (1, 11)],
[(1, 1), (1, 0), (0, 0)],
[(0, 0), (1, 5), (0, 1)]], dtype=[('a', 'f4'), ('b', 'u1')])
assert_equal(np.count_nonzero(x['a']), 4)
assert_equal(np.count_nonzero(x['b']), 5)
assert_equal(np.nonzero(x['a']), ([0, 1, 1, 2], [2, 0, 1, 1]))
assert_equal(np.nonzero(x['b']), ([0, 0, 1, 2, 2], [0, 2, 0, 1, 2]))
assert_(not x['a'].T.flags.aligned)
assert_equal(np.count_nonzero(x['a'].T), 4)
assert_equal(np.count_nonzero(x['b'].T), 5)
assert_equal(np.nonzero(x['a'].T), ([0, 1, 1, 2], [1, 1, 2, 0]))
assert_equal(np.nonzero(x['b'].T), ([0, 0, 1, 2, 2], [0, 1, 2, 0, 2]))
def test_sparse(self):
# test special sparse condition boolean code path
for i in range(20):
c = np.zeros(200, dtype=np.bool)
c[i::20] = True
assert_equal(np.nonzero(c)[0], np.arange(i, 200 + i, 20))
c = np.zeros(400, dtype=np.bool)
c[10 + i:20 + i] = True
c[20 + i*2] = True
assert_equal(np.nonzero(c)[0],
np.concatenate((np.arange(10 + i, 20 + i), [20 + i*2])))
def test_return_type(self):
class C(np.ndarray):
pass
for view in (C, np.ndarray):
for nd in range(1, 4):
shape = tuple(range(2, 2+nd))
x = np.arange(np.prod(shape)).reshape(shape).view(view)
for nzx in (np.nonzero(x), x.nonzero()):
for nzx_i in nzx:
assert_(type(nzx_i) is np.ndarray)
assert_(nzx_i.flags.writeable)
class TestIndex(TestCase):
def test_boolean(self):
a = rand(3, 5, 8)
V = rand(5, 8)
g1 = randint(0, 5, size=15)
g2 = randint(0, 8, size=15)
V[g1, g2] = -V[g1, g2]
assert_((np.array([a[0][V > 0], a[1][V > 0], a[2][V > 0]]) == a[:, V > 0]).all())
def test_boolean_edgecase(self):
a = np.array([], dtype='int32')
b = np.array([], dtype='bool')
c = a[b]
assert_equal(c, [])
assert_equal(c.dtype, np.dtype('int32'))
class TestBinaryRepr(TestCase):
def test_zero(self):
assert_equal(np.binary_repr(0), '0')
def test_large(self):
assert_equal(np.binary_repr(10736848), '101000111101010011010000')
def test_negative(self):
assert_equal(np.binary_repr(-1), '-1')
assert_equal(np.binary_repr(-1, width=8), '11111111')
class TestBaseRepr(TestCase):
def test_base3(self):
assert_equal(np.base_repr(3**5, 3), '100000')
def test_positive(self):
assert_equal(np.base_repr(12, 10), '12')
assert_equal(np.base_repr(12, 10, 4), '000012')
assert_equal(np.base_repr(12, 4), '30')
assert_equal(np.base_repr(3731624803700888, 36), '10QR0ROFCEW')
def test_negative(self):
assert_equal(np.base_repr(-12, 10), '-12')
assert_equal(np.base_repr(-12, 10, 4), '-000012')
assert_equal(np.base_repr(-12, 4), '-30')
class TestArrayComparisons(TestCase):
def test_array_equal(self):
res = np.array_equal(np.array([1, 2]), np.array([1, 2]))
assert_(res)
assert_(type(res) is bool)
res = np.array_equal(np.array([1, 2]), np.array([1, 2, 3]))
assert_(not res)
assert_(type(res) is bool)
res = np.array_equal(np.array([1, 2]), np.array([3, 4]))
assert_(not res)
assert_(type(res) is bool)
res = np.array_equal(np.array([1, 2]), np.array([1, 3]))
assert_(not res)
assert_(type(res) is bool)
res = np.array_equal(np.array(['a'], dtype='S1'), np.array(['a'], dtype='S1'))
assert_(res)
assert_(type(res) is bool)
res = np.array_equal(np.array([('a', 1)], dtype='S1,u4'),
np.array([('a', 1)], dtype='S1,u4'))
assert_(res)
assert_(type(res) is bool)
def test_array_equiv(self):
res = np.array_equiv(np.array([1, 2]), np.array([1, 2]))
assert_(res)
assert_(type(res) is bool)
res = np.array_equiv(np.array([1, 2]), np.array([1, 2, 3]))
assert_(not res)
assert_(type(res) is bool)
res = np.array_equiv(np.array([1, 2]), np.array([3, 4]))
assert_(not res)
assert_(type(res) is bool)
res = np.array_equiv(np.array([1, 2]), np.array([1, 3]))
assert_(not res)
assert_(type(res) is bool)
res = np.array_equiv(np.array([1, 1]), np.array([1]))
assert_(res)
assert_(type(res) is bool)
res = np.array_equiv(np.array([1, 1]), np.array([[1], [1]]))
assert_(res)
assert_(type(res) is bool)
res = np.array_equiv(np.array([1, 2]), np.array([2]))
assert_(not res)
assert_(type(res) is bool)
res = np.array_equiv(np.array([1, 2]), np.array([[1], [2]]))
assert_(not res)
assert_(type(res) is bool)
res = np.array_equiv(np.array([1, 2]), np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]))
assert_(not res)
assert_(type(res) is bool)
def assert_array_strict_equal(x, y):
assert_array_equal(x, y)
# Check flags, 32 bit arches typically don't provide 16 byte alignment
if ((x.dtype.alignment <= 8 or
np.intp().dtype.itemsize != 4) and
sys.platform != 'win32'):
assert_(x.flags == y.flags)
else:
assert_(x.flags.owndata == y.flags.owndata)
assert_(x.flags.writeable == y.flags.writeable)
assert_(x.flags.c_contiguous == y.flags.c_contiguous)
assert_(x.flags.f_contiguous == y.flags.f_contiguous)
assert_(x.flags.updateifcopy == y.flags.updateifcopy)
# check endianness
assert_(x.dtype.isnative == y.dtype.isnative)
class TestClip(TestCase):
def setUp(self):
self.nr = 5
self.nc = 3
def fastclip(self, a, m, M, out=None):
if out is None:
return a.clip(m, M)
else:
return a.clip(m, M, out)
def clip(self, a, m, M, out=None):
# use slow-clip
selector = np.less(a, m) + 2*np.greater(a, M)
return selector.choose((a, m, M), out=out)
# Handy functions
def _generate_data(self, n, m):
return randn(n, m)
def _generate_data_complex(self, n, m):
return randn(n, m) + 1.j * rand(n, m)
def _generate_flt_data(self, n, m):
return (randn(n, m)).astype(np.float32)
def _neg_byteorder(self, a):
a = np.asarray(a)
if sys.byteorder == 'little':
a = a.astype(a.dtype.newbyteorder('>'))
else:
a = a.astype(a.dtype.newbyteorder('<'))
return a
def _generate_non_native_data(self, n, m):
data = randn(n, m)
data = self._neg_byteorder(data)
assert_(not data.dtype.isnative)
return data
def _generate_int_data(self, n, m):
return (10 * rand(n, m)).astype(np.int64)
def _generate_int32_data(self, n, m):
return (10 * rand(n, m)).astype(np.int32)
# Now the real test cases
def test_simple_double(self):
#Test native double input with scalar min/max.
a = self._generate_data(self.nr, self.nc)
m = 0.1
M = 0.6
ac = self.fastclip(a, m, M)
act = self.clip(a, m, M)
assert_array_strict_equal(ac, act)
def test_simple_int(self):
#Test native int input with scalar min/max.
a = self._generate_int_data(self.nr, self.nc)
a = a.astype(int)
m = -2
M = 4
ac = self.fastclip(a, m, M)
act = self.clip(a, m, M)
assert_array_strict_equal(ac, act)
def test_array_double(self):
#Test native double input with array min/max.
a = self._generate_data(self.nr, self.nc)
m = np.zeros(a.shape)
M = m + 0.5
ac = self.fastclip(a, m, M)
act = self.clip(a, m, M)
assert_array_strict_equal(ac, act)
def test_simple_nonnative(self):
#Test non native double input with scalar min/max.
#Test native double input with non native double scalar min/max.
a = self._generate_non_native_data(self.nr, self.nc)
m = -0.5
M = 0.6
ac = self.fastclip(a, m, M)
act = self.clip(a, m, M)
assert_array_equal(ac, act)
#Test native double input with non native double scalar min/max.
a = self._generate_data(self.nr, self.nc)
m = -0.5
M = self._neg_byteorder(0.6)
assert_(not M.dtype.isnative)
ac = self.fastclip(a, m, M)
act = self.clip(a, m, M)
assert_array_equal(ac, act)
def test_simple_complex(self):
#Test native complex input with native double scalar min/max.
#Test native input with complex double scalar min/max.
a = 3 * self._generate_data_complex(self.nr, self.nc)
m = -0.5
M = 1.
ac = self.fastclip(a, m, M)
act = self.clip(a, m, M)
assert_array_strict_equal(ac, act)
#Test native input with complex double scalar min/max.
a = 3 * self._generate_data(self.nr, self.nc)
m = -0.5 + 1.j
M = 1. + 2.j
ac = self.fastclip(a, m, M)
act = self.clip(a, m, M)
assert_array_strict_equal(ac, act)
def test_clip_complex(self):
# Address Issue gh-5354 for clipping complex arrays
# Test native complex input without explicit min/max
# ie, either min=None or max=None
a = np.ones(10, dtype=np.complex)
m = a.min()
M = a.max()
am = self.fastclip(a, m, None)
aM = self.fastclip(a, None, M)
assert_array_strict_equal(am, a)
assert_array_strict_equal(aM, a)
def test_clip_non_contig(self):
#Test clip for non contiguous native input and native scalar min/max.
a = self._generate_data(self.nr * 2, self.nc * 3)
a = a[::2, ::3]
assert_(not a.flags['F_CONTIGUOUS'])
assert_(not a.flags['C_CONTIGUOUS'])
ac = self.fastclip(a, -1.6, 1.7)
act = self.clip(a, -1.6, 1.7)
assert_array_strict_equal(ac, act)
def test_simple_out(self):
#Test native double input with scalar min/max.
a = self._generate_data(self.nr, self.nc)
m = -0.5
M = 0.6
ac = np.zeros(a.shape)
act = np.zeros(a.shape)
self.fastclip(a, m, M, ac)
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
def test_simple_int32_inout(self):
#Test native int32 input with double min/max and int32 out.
a = self._generate_int32_data(self.nr, self.nc)
m = np.float64(0)
M = np.float64(2)
ac = np.zeros(a.shape, dtype=np.int32)
act = ac.copy()
self.fastclip(a, m, M, ac)
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
def test_simple_int64_out(self):
#Test native int32 input with int32 scalar min/max and int64 out.
a = self._generate_int32_data(self.nr, self.nc)
m = np.int32(-1)
M = np.int32(1)
ac = np.zeros(a.shape, dtype=np.int64)
act = ac.copy()
self.fastclip(a, m, M, ac)
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
def test_simple_int64_inout(self):
#Test native int32 input with double array min/max and int32 out.
a = self._generate_int32_data(self.nr, self.nc)
m = np.zeros(a.shape, np.float64)
M = np.float64(1)
ac = np.zeros(a.shape, dtype=np.int32)
act = ac.copy()
self.fastclip(a, m, M, ac)
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
def test_simple_int32_out(self):
#Test native double input with scalar min/max and int out.
a = self._generate_data(self.nr, self.nc)
m = -1.0
M = 2.0
ac = np.zeros(a.shape, dtype=np.int32)
act = ac.copy()
self.fastclip(a, m, M, ac)
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
def test_simple_inplace_01(self):
#Test native double input with array min/max in-place.
a = self._generate_data(self.nr, self.nc)
ac = a.copy()
m = np.zeros(a.shape)
M = 1.0
self.fastclip(a, m, M, a)
self.clip(a, m, M, ac)
assert_array_strict_equal(a, ac)
def test_simple_inplace_02(self):
#Test native double input with scalar min/max in-place.
a = self._generate_data(self.nr, self.nc)
ac = a.copy()
m = -0.5
M = 0.6
self.fastclip(a, m, M, a)
self.clip(a, m, M, ac)
assert_array_strict_equal(a, ac)
def test_noncontig_inplace(self):
#Test non contiguous double input with double scalar min/max in-place.
a = self._generate_data(self.nr * 2, self.nc * 3)
a = a[::2, ::3]
assert_(not a.flags['F_CONTIGUOUS'])
assert_(not a.flags['C_CONTIGUOUS'])
ac = a.copy()
m = -0.5
M = 0.6
self.fastclip(a, m, M, a)
self.clip(a, m, M, ac)
assert_array_equal(a, ac)
def test_type_cast_01(self):
#Test native double input with scalar min/max.
a = self._generate_data(self.nr, self.nc)
m = -0.5
M = 0.6
ac = self.fastclip(a, m, M)
act = self.clip(a, m, M)
assert_array_strict_equal(ac, act)
def test_type_cast_02(self):
#Test native int32 input with int32 scalar min/max.
a = self._generate_int_data(self.nr, self.nc)
a = a.astype(np.int32)
m = -2
M = 4
ac = self.fastclip(a, m, M)
act = self.clip(a, m, M)
assert_array_strict_equal(ac, act)
def test_type_cast_03(self):
#Test native int32 input with float64 scalar min/max.
a = self._generate_int32_data(self.nr, self.nc)
m = -2
M = 4
ac = self.fastclip(a, np.float64(m), np.float64(M))
act = self.clip(a, np.float64(m), np.float64(M))
assert_array_strict_equal(ac, act)
def test_type_cast_04(self):
#Test native int32 input with float32 scalar min/max.
a = self._generate_int32_data(self.nr, self.nc)
m = np.float32(-2)
M = np.float32(4)
act = self.fastclip(a, m, M)
ac = self.clip(a, m, M)
assert_array_strict_equal(ac, act)
def test_type_cast_05(self):
#Test native int32 with double arrays min/max.
a = self._generate_int_data(self.nr, self.nc)
m = -0.5
M = 1.
ac = self.fastclip(a, m * np.zeros(a.shape), M)
act = self.clip(a, m * np.zeros(a.shape), M)
assert_array_strict_equal(ac, act)
def test_type_cast_06(self):
#Test native with NON native scalar min/max.
a = self._generate_data(self.nr, self.nc)
m = 0.5
m_s = self._neg_byteorder(m)
M = 1.
act = self.clip(a, m_s, M)
ac = self.fastclip(a, m_s, M)
assert_array_strict_equal(ac, act)
def test_type_cast_07(self):
#Test NON native with native array min/max.
a = self._generate_data(self.nr, self.nc)
m = -0.5 * np.ones(a.shape)
M = 1.
a_s = self._neg_byteorder(a)
assert_(not a_s.dtype.isnative)
act = a_s.clip(m, M)
ac = self.fastclip(a_s, m, M)
assert_array_strict_equal(ac, act)
def test_type_cast_08(self):
#Test NON native with native scalar min/max.
a = self._generate_data(self.nr, self.nc)
m = -0.5
M = 1.
a_s = self._neg_byteorder(a)
assert_(not a_s.dtype.isnative)
ac = self.fastclip(a_s, m, M)
act = a_s.clip(m, M)
assert_array_strict_equal(ac, act)
def test_type_cast_09(self):
#Test native with NON native array min/max.
a = self._generate_data(self.nr, self.nc)
m = -0.5 * np.ones(a.shape)
M = 1.
m_s = self._neg_byteorder(m)
assert_(not m_s.dtype.isnative)
ac = self.fastclip(a, m_s, M)
act = self.clip(a, m_s, M)
assert_array_strict_equal(ac, act)
def test_type_cast_10(self):
#Test native int32 with float min/max and float out for output argument.
a = self._generate_int_data(self.nr, self.nc)
b = np.zeros(a.shape, dtype=np.float32)
m = np.float32(-0.5)
M = np.float32(1)
act = self.clip(a, m, M, out=b)
ac = self.fastclip(a, m, M, out=b)
assert_array_strict_equal(ac, act)
def test_type_cast_11(self):
#Test non native with native scalar, min/max, out non native
a = self._generate_non_native_data(self.nr, self.nc)
b = a.copy()
b = b.astype(b.dtype.newbyteorder('>'))
bt = b.copy()
m = -0.5
M = 1.
self.fastclip(a, m, M, out=b)
self.clip(a, m, M, out=bt)
assert_array_strict_equal(b, bt)
def test_type_cast_12(self):
#Test native int32 input and min/max and float out
a = self._generate_int_data(self.nr, self.nc)
b = np.zeros(a.shape, dtype=np.float32)
m = np.int32(0)
M = np.int32(1)
act = self.clip(a, m, M, out=b)
ac = self.fastclip(a, m, M, out=b)
assert_array_strict_equal(ac, act)
def test_clip_with_out_simple(self):
#Test native double input with scalar min/max
a = self._generate_data(self.nr, self.nc)
m = -0.5
M = 0.6
ac = np.zeros(a.shape)
act = np.zeros(a.shape)
self.fastclip(a, m, M, ac)
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
def test_clip_with_out_simple2(self):
#Test native int32 input with double min/max and int32 out
a = self._generate_int32_data(self.nr, self.nc)
m = np.float64(0)
M = np.float64(2)
ac = np.zeros(a.shape, dtype=np.int32)
act = ac.copy()
self.fastclip(a, m, M, ac)
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
def test_clip_with_out_simple_int32(self):
#Test native int32 input with int32 scalar min/max and int64 out
a = self._generate_int32_data(self.nr, self.nc)
m = np.int32(-1)
M = np.int32(1)
ac = np.zeros(a.shape, dtype=np.int64)
act = ac.copy()
self.fastclip(a, m, M, ac)
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
def test_clip_with_out_array_int32(self):
#Test native int32 input with double array min/max and int32 out
a = self._generate_int32_data(self.nr, self.nc)
m = np.zeros(a.shape, np.float64)
M = np.float64(1)
ac = np.zeros(a.shape, dtype=np.int32)
act = ac.copy()
self.fastclip(a, m, M, ac)
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
def test_clip_with_out_array_outint32(self):
#Test native double input with scalar min/max and int out
a = self._generate_data(self.nr, self.nc)
m = -1.0
M = 2.0
ac = np.zeros(a.shape, dtype=np.int32)
act = ac.copy()
self.fastclip(a, m, M, ac)
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
def test_clip_inplace_array(self):
#Test native double input with array min/max
a = self._generate_data(self.nr, self.nc)
ac = a.copy()
m = np.zeros(a.shape)
M = 1.0
self.fastclip(a, m, M, a)
self.clip(a, m, M, ac)
assert_array_strict_equal(a, ac)
def test_clip_inplace_simple(self):
#Test native double input with scalar min/max
a = self._generate_data(self.nr, self.nc)
ac = a.copy()
m = -0.5
M = 0.6
self.fastclip(a, m, M, a)
self.clip(a, m, M, ac)
assert_array_strict_equal(a, ac)
def test_clip_func_takes_out(self):
# Ensure that the clip() function takes an out= argument.
a = self._generate_data(self.nr, self.nc)
ac = a.copy()
m = -0.5
M = 0.6
a2 = np.clip(a, m, M, out=a)
self.clip(a, m, M, ac)
assert_array_strict_equal(a2, ac)
self.assertTrue(a2 is a)
def test_clip_nan(self):
d = np.arange(7.)
assert_equal(d.clip(min=np.nan), d)
assert_equal(d.clip(max=np.nan), d)
assert_equal(d.clip(min=np.nan, max=np.nan), d)
assert_equal(d.clip(min=-2, max=np.nan), d)
assert_equal(d.clip(min=np.nan, max=10), d)
class TestAllclose(object):
rtol = 1e-5
atol = 1e-8
def setUp(self):
self.olderr = np.seterr(invalid='ignore')
def tearDown(self):
np.seterr(**self.olderr)
def tst_allclose(self, x, y):
assert_(np.allclose(x, y), "%s and %s not close" % (x, y))
def tst_not_allclose(self, x, y):
assert_(not np.allclose(x, y), "%s and %s shouldn't be close" % (x, y))
def test_ip_allclose(self):
#Parametric test factory.
arr = np.array([100, 1000])
aran = np.arange(125).reshape((5, 5, 5))
atol = self.atol
rtol = self.rtol
data = [([1, 0], [1, 0]),
([atol], [0]),
([1], [1+rtol+atol]),
(arr, arr + arr*rtol),
(arr, arr + arr*rtol + atol*2),
(aran, aran + aran*rtol),
(np.inf, np.inf),
(np.inf, [np.inf])]
for (x, y) in data:
yield (self.tst_allclose, x, y)
def test_ip_not_allclose(self):
#Parametric test factory.
aran = np.arange(125).reshape((5, 5, 5))
atol = self.atol
rtol = self.rtol
data = [([np.inf, 0], [1, np.inf]),
([np.inf, 0], [1, 0]),
([np.inf, np.inf], [1, np.inf]),
([np.inf, np.inf], [1, 0]),
([-np.inf, 0], [np.inf, 0]),
([np.nan, 0], [np.nan, 0]),
([atol*2], [0]),
([1], [1+rtol+atol*2]),
(aran, aran + aran*atol + atol*2),
(np.array([np.inf, 1]), np.array([0, np.inf]))]
for (x, y) in data:
yield (self.tst_not_allclose, x, y)
def test_no_parameter_modification(self):
x = np.array([np.inf, 1])
y = np.array([0, np.inf])
np.allclose(x, y)
assert_array_equal(x, np.array([np.inf, 1]))
assert_array_equal(y, np.array([0, np.inf]))
def test_min_int(self):
# Could make problems because of abs(min_int) == min_int
min_int = np.iinfo(np.int_).min
a = np.array([min_int], dtype=np.int_)
assert_(np.allclose(a, a))
def test_equalnan(self):
x = np.array([1.0, np.nan])
assert_(np.allclose(x, x, equal_nan=True))
class TestIsclose(object):
rtol = 1e-5
atol = 1e-8
def setup(self):
atol = self.atol
rtol = self.rtol
arr = np.array([100, 1000])
aran = np.arange(125).reshape((5, 5, 5))
self.all_close_tests = [
([1, 0], [1, 0]),
([atol], [0]),
([1], [1 + rtol + atol]),
(arr, arr + arr*rtol),
(arr, arr + arr*rtol + atol),
(aran, aran + aran*rtol),
(np.inf, np.inf),
(np.inf, [np.inf]),
([np.inf, -np.inf], [np.inf, -np.inf]),
]
self.none_close_tests = [
([np.inf, 0], [1, np.inf]),
([np.inf, -np.inf], [1, 0]),
([np.inf, np.inf], [1, -np.inf]),
([np.inf, np.inf], [1, 0]),
([np.nan, 0], [np.nan, -np.inf]),
([atol*2], [0]),
([1], [1 + rtol + atol*2]),
(aran, aran + rtol*1.1*aran + atol*1.1),
(np.array([np.inf, 1]), np.array([0, np.inf])),
]
self.some_close_tests = [
([np.inf, 0], [np.inf, atol*2]),
([atol, 1, 1e6*(1 + 2*rtol) + atol], [0, np.nan, 1e6]),
(np.arange(3), [0, 1, 2.1]),
(np.nan, [np.nan, np.nan, np.nan]),
([0], [atol, np.inf, -np.inf, np.nan]),
(0, [atol, np.inf, -np.inf, np.nan]),
]
self.some_close_results = [
[True, False],
[True, False, False],
[True, True, False],
[False, False, False],
[True, False, False, False],
[True, False, False, False],
]
def test_ip_isclose(self):
self.setup()
tests = self.some_close_tests
results = self.some_close_results
for (x, y), result in zip(tests, results):
yield (assert_array_equal, np.isclose(x, y), result)
def tst_all_isclose(self, x, y):
assert_(np.all(np.isclose(x, y)), "%s and %s not close" % (x, y))
def tst_none_isclose(self, x, y):
msg = "%s and %s shouldn't be close"
assert_(not np.any(np.isclose(x, y)), msg % (x, y))
def tst_isclose_allclose(self, x, y):
msg = "isclose.all() and allclose aren't same for %s and %s"
assert_array_equal(np.isclose(x, y).all(), np.allclose(x, y), msg % (x, y))
def test_ip_all_isclose(self):
self.setup()
for (x, y) in self.all_close_tests:
yield (self.tst_all_isclose, x, y)
def test_ip_none_isclose(self):
self.setup()
for (x, y) in self.none_close_tests:
yield (self.tst_none_isclose, x, y)
def test_ip_isclose_allclose(self):
self.setup()
tests = (self.all_close_tests + self.none_close_tests +
self.some_close_tests)
for (x, y) in tests:
yield (self.tst_isclose_allclose, x, y)
def test_equal_nan(self):
assert_array_equal(np.isclose(np.nan, np.nan, equal_nan=True), [True])
arr = np.array([1.0, np.nan])
assert_array_equal(np.isclose(arr, arr, equal_nan=True), [True, True])
def test_masked_arrays(self):
# Make sure to test the output type when arguments are interchanged.
x = np.ma.masked_where([True, True, False], np.arange(3))
assert_(type(x) is type(np.isclose(2, x)))
assert_(type(x) is type(np.isclose(x, 2)))
x = np.ma.masked_where([True, True, False], [np.nan, np.inf, np.nan])
assert_(type(x) is type(np.isclose(np.inf, x)))
assert_(type(x) is type(np.isclose(x, np.inf)))
x = np.ma.masked_where([True, True, False], [np.nan, np.nan, np.nan])
y = np.isclose(np.nan, x, equal_nan=True)
assert_(type(x) is type(y))
# Ensure that the mask isn't modified...
assert_array_equal([True, True, False], y.mask)
y = np.isclose(x, np.nan, equal_nan=True)
assert_(type(x) is type(y))
# Ensure that the mask isn't modified...
assert_array_equal([True, True, False], y.mask)
x = np.ma.masked_where([True, True, False], [np.nan, np.nan, np.nan])
y = np.isclose(x, x, equal_nan=True)
assert_(type(x) is type(y))
# Ensure that the mask isn't modified...
assert_array_equal([True, True, False], y.mask)
def test_scalar_return(self):
assert_(np.isscalar(np.isclose(1, 1)))
def test_no_parameter_modification(self):
x = np.array([np.inf, 1])
y = np.array([0, np.inf])
np.isclose(x, y)
assert_array_equal(x, np.array([np.inf, 1]))
assert_array_equal(y, np.array([0, np.inf]))
class TestStdVar(TestCase):
def setUp(self):
self.A = np.array([1, -1, 1, -1])
self.real_var = 1
def test_basic(self):
assert_almost_equal(np.var(self.A), self.real_var)
assert_almost_equal(np.std(self.A)**2, self.real_var)
def test_scalars(self):
assert_equal(np.var(1), 0)
assert_equal(np.std(1), 0)
def test_ddof1(self):
assert_almost_equal(np.var(self.A, ddof=1),
self.real_var*len(self.A)/float(len(self.A)-1))
assert_almost_equal(np.std(self.A, ddof=1)**2,
self.real_var*len(self.A)/float(len(self.A)-1))
def test_ddof2(self):
assert_almost_equal(np.var(self.A, ddof=2),
self.real_var*len(self.A)/float(len(self.A)-2))
assert_almost_equal(np.std(self.A, ddof=2)**2,
self.real_var*len(self.A)/float(len(self.A)-2))
def test_out_scalar(self):
d = np.arange(10)
out = np.array(0.)
r = np.std(d, out=out)
assert_(r is out)
assert_array_equal(r, out)
r = np.var(d, out=out)
assert_(r is out)
assert_array_equal(r, out)
r = np.mean(d, out=out)
assert_(r is out)
assert_array_equal(r, out)
class TestStdVarComplex(TestCase):
def test_basic(self):
A = np.array([1, 1.j, -1, -1.j])
real_var = 1
assert_almost_equal(np.var(A), real_var)
assert_almost_equal(np.std(A)**2, real_var)
def test_scalars(self):
assert_equal(np.var(1j), 0)
assert_equal(np.std(1j), 0)
class TestCreationFuncs(TestCase):
#Test ones, zeros, empty and filled
def setUp(self):
self.dtypes = ('b', 'i', 'u', 'f', 'c', 'S', 'a', 'U', 'V')
self.orders = {'C': 'c_contiguous', 'F': 'f_contiguous'}
self.ndims = 10
def check_function(self, func, fill_value=None):
par = (
(0, 1, 2),
range(self.ndims),
self.orders,
self.dtypes,
2**np.arange(9)
)
fill_kwarg = {}
if fill_value is not None:
fill_kwarg = {'fill_value': fill_value}
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
for size, ndims, order, type, bytes in itertools.product(*par):
shape = ndims * [size]
try:
dtype = np.dtype('{0}{1}'.format(type, bytes))
except TypeError: # dtype combination does not exist
continue
else:
# do not fill void type
if fill_value is not None and type in 'V':
continue
arr = func(shape, order=order, dtype=dtype,
**fill_kwarg)
assert_(arr.dtype == dtype)
assert_(getattr(arr.flags, self.orders[order]))
if fill_value is not None:
if dtype.str.startswith('|S'):
val = str(fill_value)
else:
val = fill_value
assert_equal(arr, dtype.type(val))
def test_zeros(self):
self.check_function(np.zeros)
def test_ones(self):
self.check_function(np.zeros)
def test_empty(self):
self.check_function(np.empty)
def test_filled(self):
self.check_function(np.full, 0)
self.check_function(np.full, 1)
def test_for_reference_leak(self):
# Make sure we have an object for reference
dim = 1
beg = sys.getrefcount(dim)
np.zeros([dim]*10)
assert_(sys.getrefcount(dim) == beg)
np.ones([dim]*10)
assert_(sys.getrefcount(dim) == beg)
np.empty([dim]*10)
assert_(sys.getrefcount(dim) == beg)
np.full([dim]*10, 0)
assert_(sys.getrefcount(dim) == beg)
class TestLikeFuncs(TestCase):
'''Test ones_like, zeros_like, empty_like and full_like'''
def setUp(self):
self.data = [
# Array scalars
(np.array(3.), None),
(np.array(3), 'f8'),
# 1D arrays
(np.arange(6, dtype='f4'), None),
(np.arange(6), 'c16'),
# 2D C-layout arrays
(np.arange(6).reshape(2, 3), None),
(np.arange(6).reshape(3, 2), 'i1'),
# 2D F-layout arrays
(np.arange(6).reshape((2, 3), order='F'), None),
(np.arange(6).reshape((3, 2), order='F'), 'i1'),
# 3D C-layout arrays
(np.arange(24).reshape(2, 3, 4), None),
(np.arange(24).reshape(4, 3, 2), 'f4'),
# 3D F-layout arrays
(np.arange(24).reshape((2, 3, 4), order='F'), None),
(np.arange(24).reshape((4, 3, 2), order='F'), 'f4'),
# 3D non-C/F-layout arrays
(np.arange(24).reshape(2, 3, 4).swapaxes(0, 1), None),
(np.arange(24).reshape(4, 3, 2).swapaxes(0, 1), '?'),
]
def compare_array_value(self, dz, value, fill_value):
if value is not None:
if fill_value:
try:
z = dz.dtype.type(value)
except OverflowError:
pass
else:
assert_(np.all(dz == z))
else:
assert_(np.all(dz == value))
def check_like_function(self, like_function, value, fill_value=False):
if fill_value:
fill_kwarg = {'fill_value': value}
else:
fill_kwarg = {}
for d, dtype in self.data:
# default (K) order, dtype
dz = like_function(d, dtype=dtype, **fill_kwarg)
assert_equal(dz.shape, d.shape)
assert_equal(np.array(dz.strides)*d.dtype.itemsize,
np.array(d.strides)*dz.dtype.itemsize)
assert_equal(d.flags.c_contiguous, dz.flags.c_contiguous)
assert_equal(d.flags.f_contiguous, dz.flags.f_contiguous)
if dtype is None:
assert_equal(dz.dtype, d.dtype)
else:
assert_equal(dz.dtype, np.dtype(dtype))
self.compare_array_value(dz, value, fill_value)
# C order, default dtype
dz = like_function(d, order='C', dtype=dtype, **fill_kwarg)
assert_equal(dz.shape, d.shape)
assert_(dz.flags.c_contiguous)
if dtype is None:
assert_equal(dz.dtype, d.dtype)
else:
assert_equal(dz.dtype, np.dtype(dtype))
self.compare_array_value(dz, value, fill_value)
# F order, default dtype
dz = like_function(d, order='F', dtype=dtype, **fill_kwarg)
assert_equal(dz.shape, d.shape)
assert_(dz.flags.f_contiguous)
if dtype is None:
assert_equal(dz.dtype, d.dtype)
else:
assert_equal(dz.dtype, np.dtype(dtype))
self.compare_array_value(dz, value, fill_value)
# A order
dz = like_function(d, order='A', dtype=dtype, **fill_kwarg)
assert_equal(dz.shape, d.shape)
if d.flags.f_contiguous:
assert_(dz.flags.f_contiguous)
else:
assert_(dz.flags.c_contiguous)
if dtype is None:
assert_equal(dz.dtype, d.dtype)
else:
assert_equal(dz.dtype, np.dtype(dtype))
self.compare_array_value(dz, value, fill_value)
# Test the 'subok' parameter
a = np.matrix([[1, 2], [3, 4]])
b = like_function(a, **fill_kwarg)
assert_(type(b) is np.matrix)
b = like_function(a, subok=False, **fill_kwarg)
assert_(type(b) is not np.matrix)
def test_ones_like(self):
self.check_like_function(np.ones_like, 1)
def test_zeros_like(self):
self.check_like_function(np.zeros_like, 0)
def test_empty_like(self):
self.check_like_function(np.empty_like, None)
def test_filled_like(self):
self.check_like_function(np.full_like, 0, True)
self.check_like_function(np.full_like, 1, True)
self.check_like_function(np.full_like, 1000, True)
self.check_like_function(np.full_like, 123.456, True)
self.check_like_function(np.full_like, np.inf, True)
class TestCorrelate(TestCase):
def _setup(self, dt):
self.x = np.array([1, 2, 3, 4, 5], dtype=dt)
self.xs = np.arange(1, 20)[::3]
self.y = np.array([-1, -2, -3], dtype=dt)
self.z1 = np.array([ -3., -8., -14., -20., -26., -14., -5.], dtype=dt)
self.z1_4 = np.array([-2., -5., -8., -11., -14., -5.], dtype=dt)
self.z1r = np.array([-15., -22., -22., -16., -10., -4., -1.], dtype=dt)
self.z2 = np.array([-5., -14., -26., -20., -14., -8., -3.], dtype=dt)
self.z2r = np.array([-1., -4., -10., -16., -22., -22., -15.], dtype=dt)
self.zs = np.array([-3., -14., -30., -48., -66., -84.,
-102., -54., -19.], dtype=dt)
def test_float(self):
self._setup(np.float)
z = np.correlate(self.x, self.y, 'full')
assert_array_almost_equal(z, self.z1)
z = np.correlate(self.x, self.y[:-1], 'full')
assert_array_almost_equal(z, self.z1_4)
z = np.correlate(self.y, self.x, 'full')
assert_array_almost_equal(z, self.z2)
z = np.correlate(self.x[::-1], self.y, 'full')
assert_array_almost_equal(z, self.z1r)
z = np.correlate(self.y, self.x[::-1], 'full')
assert_array_almost_equal(z, self.z2r)
z = np.correlate(self.xs, self.y, 'full')
assert_array_almost_equal(z, self.zs)
def test_object(self):
self._setup(Decimal)
z = np.correlate(self.x, self.y, 'full')
assert_array_almost_equal(z, self.z1)
z = np.correlate(self.y, self.x, 'full')
assert_array_almost_equal(z, self.z2)
def test_no_overwrite(self):
d = np.ones(100)
k = np.ones(3)
np.correlate(d, k)
assert_array_equal(d, np.ones(100))
assert_array_equal(k, np.ones(3))
def test_complex(self):
x = np.array([1, 2, 3, 4+1j], dtype=np.complex)
y = np.array([-1, -2j, 3+1j], dtype=np.complex)
r_z = np.array([3-1j, 6, 8+1j, 11+5j, -5+8j, -4-1j], dtype=np.complex)
r_z = r_z[::-1].conjugate()
z = np.correlate(y, x, mode='full')
assert_array_almost_equal(z, r_z)
class TestConvolve(TestCase):
def test_object(self):
d = [1.] * 100
k = [1.] * 3
assert_array_almost_equal(np.convolve(d, k)[2:-2], np.full(98, 3))
def test_no_overwrite(self):
d = np.ones(100)
k = np.ones(3)
np.convolve(d, k)
assert_array_equal(d, np.ones(100))
assert_array_equal(k, np.ones(3))
class TestArgwhere(object):
def test_2D(self):
x = np.arange(6).reshape((2, 3))
assert_array_equal(np.argwhere(x > 1),
[[0, 2],
[1, 0],
[1, 1],
[1, 2]])
def test_list(self):
assert_equal(np.argwhere([4, 0, 2, 1, 3]), [[0], [2], [3], [4]])
class TestStringFunction(object):
def test_set_string_function(self):
a = np.array([1])
np.set_string_function(lambda x: "FOO", repr=True)
assert_equal(repr(a), "FOO")
np.set_string_function(None, repr=True)
assert_equal(repr(a), "array([1])")
np.set_string_function(lambda x: "FOO", repr=False)
assert_equal(str(a), "FOO")
np.set_string_function(None, repr=False)
assert_equal(str(a), "[1]")
class TestRoll(TestCase):
def test_roll1d(self):
x = np.arange(10)
xr = np.roll(x, 2)
assert_equal(xr, np.array([8, 9, 0, 1, 2, 3, 4, 5, 6, 7]))
def test_roll2d(self):
x2 = np.reshape(np.arange(10), (2, 5))
x2r = np.roll(x2, 1)
assert_equal(x2r, np.array([[9, 0, 1, 2, 3], [4, 5, 6, 7, 8]]))
x2r = np.roll(x2, 1, axis=0)
assert_equal(x2r, np.array([[5, 6, 7, 8, 9], [0, 1, 2, 3, 4]]))
x2r = np.roll(x2, 1, axis=1)
assert_equal(x2r, np.array([[4, 0, 1, 2, 3], [9, 5, 6, 7, 8]]))
def test_roll_empty(self):
x = np.array([])
assert_equal(np.roll(x, 1), np.array([]))
class TestRollaxis(TestCase):
# expected shape indexed by (axis, start) for array of
# shape (1, 2, 3, 4)
tgtshape = {(0, 0): (1, 2, 3, 4), (0, 1): (1, 2, 3, 4),
(0, 2): (2, 1, 3, 4), (0, 3): (2, 3, 1, 4),
(0, 4): (2, 3, 4, 1),
(1, 0): (2, 1, 3, 4), (1, 1): (1, 2, 3, 4),
(1, 2): (1, 2, 3, 4), (1, 3): (1, 3, 2, 4),
(1, 4): (1, 3, 4, 2),
(2, 0): (3, 1, 2, 4), (2, 1): (1, 3, 2, 4),
(2, 2): (1, 2, 3, 4), (2, 3): (1, 2, 3, 4),
(2, 4): (1, 2, 4, 3),
(3, 0): (4, 1, 2, 3), (3, 1): (1, 4, 2, 3),
(3, 2): (1, 2, 4, 3), (3, 3): (1, 2, 3, 4),
(3, 4): (1, 2, 3, 4)}
def test_exceptions(self):
a = np.arange(1*2*3*4).reshape(1, 2, 3, 4)
assert_raises(ValueError, np.rollaxis, a, -5, 0)
assert_raises(ValueError, np.rollaxis, a, 0, -5)
assert_raises(ValueError, np.rollaxis, a, 4, 0)
assert_raises(ValueError, np.rollaxis, a, 0, 5)
def test_results(self):
a = np.arange(1*2*3*4).reshape(1, 2, 3, 4).copy()
aind = np.indices(a.shape)
assert_(a.flags['OWNDATA'])
for (i, j) in self.tgtshape:
# positive axis, positive start
res = np.rollaxis(a, axis=i, start=j)
i0, i1, i2, i3 = aind[np.array(res.shape) - 1]
assert_(np.all(res[i0, i1, i2, i3] == a))
assert_(res.shape == self.tgtshape[(i, j)], str((i,j)))
assert_(not res.flags['OWNDATA'])
# negative axis, positive start
ip = i + 1
res = np.rollaxis(a, axis=-ip, start=j)
i0, i1, i2, i3 = aind[np.array(res.shape) - 1]
assert_(np.all(res[i0, i1, i2, i3] == a))
assert_(res.shape == self.tgtshape[(4 - ip, j)])
assert_(not res.flags['OWNDATA'])
# positive axis, negative start
jp = j + 1 if j < 4 else j
res = np.rollaxis(a, axis=i, start=-jp)
i0, i1, i2, i3 = aind[np.array(res.shape) - 1]
assert_(np.all(res[i0, i1, i2, i3] == a))
assert_(res.shape == self.tgtshape[(i, 4 - jp)])
assert_(not res.flags['OWNDATA'])
# negative axis, negative start
ip = i + 1
jp = j + 1 if j < 4 else j
res = np.rollaxis(a, axis=-ip, start=-jp)
i0, i1, i2, i3 = aind[np.array(res.shape) - 1]
assert_(np.all(res[i0, i1, i2, i3] == a))
assert_(res.shape == self.tgtshape[(4 - ip, 4 - jp)])
assert_(not res.flags['OWNDATA'])
class TestCross(TestCase):
def test_2x2(self):
u = [1, 2]
v = [3, 4]
z = -2
cp = np.cross(u, v)
assert_equal(cp, z)
cp = np.cross(v, u)
assert_equal(cp, -z)
def test_2x3(self):
u = [1, 2]
v = [3, 4, 5]
z = np.array([10, -5, -2])
cp = np.cross(u, v)
assert_equal(cp, z)
cp = np.cross(v, u)
assert_equal(cp, -z)
def test_3x3(self):
u = [1, 2, 3]
v = [4, 5, 6]
z = np.array([-3, 6, -3])
cp = np.cross(u, v)
assert_equal(cp, z)
cp = np.cross(v, u)
assert_equal(cp, -z)
def test_broadcasting(self):
# Ticket #2624 (Trac #2032)
u = np.tile([1, 2], (11, 1))
v = np.tile([3, 4], (11, 1))
z = -2
assert_equal(np.cross(u, v), z)
assert_equal(np.cross(v, u), -z)
assert_equal(np.cross(u, u), 0)
u = np.tile([1, 2], (11, 1)).T
v = np.tile([3, 4, 5], (11, 1))
z = np.tile([10, -5, -2], (11, 1))
assert_equal(np.cross(u, v, axisa=0), z)
assert_equal(np.cross(v, u.T), -z)
assert_equal(np.cross(v, v), 0)
u = np.tile([1, 2, 3], (11, 1)).T
v = np.tile([3, 4], (11, 1)).T
z = np.tile([-12, 9, -2], (11, 1))
assert_equal(np.cross(u, v, axisa=0, axisb=0), z)
assert_equal(np.cross(v.T, u.T), -z)
assert_equal(np.cross(u.T, u.T), 0)
u = np.tile([1, 2, 3], (5, 1))
v = np.tile([4, 5, 6], (5, 1)).T
z = np.tile([-3, 6, -3], (5, 1))
assert_equal(np.cross(u, v, axisb=0), z)
assert_equal(np.cross(v.T, u), -z)
assert_equal(np.cross(u, u), 0)
def test_broadcasting_shapes(self):
u = np.ones((2, 1, 3))
v = np.ones((5, 3))
assert_equal(np.cross(u, v).shape, (2, 5, 3))
u = np.ones((10, 3, 5))
v = np.ones((2, 5))
assert_equal(np.cross(u, v, axisa=1, axisb=0).shape, (10, 5, 3))
assert_raises(ValueError, np.cross, u, v, axisa=1, axisb=2)
assert_raises(ValueError, np.cross, u, v, axisa=3, axisb=0)
u = np.ones((10, 3, 5, 7))
v = np.ones((5, 7, 2))
assert_equal(np.cross(u, v, axisa=1, axisc=2).shape, (10, 5, 3, 7))
assert_raises(ValueError, np.cross, u, v, axisa=-5, axisb=2)
assert_raises(ValueError, np.cross, u, v, axisa=1, axisb=-4)
# gh-5885
u = np.ones((3, 4, 2))
for axisc in range(-2, 2):
assert_equal(np.cross(u, u, axisc=axisc).shape, (3, 4))
def test_outer_out_param():
arr1 = np.ones((5,))
arr2 = np.ones((2,))
arr3 = np.linspace(-2, 2, 5)
out1 = np.ndarray(shape=(5,5))
out2 = np.ndarray(shape=(2, 5))
res1 = np.outer(arr1, arr3, out1)
assert_equal(res1, out1)
assert_equal(np.outer(arr2, arr3, out2), out2)
class TestRequire(object):
flag_names = ['C', 'C_CONTIGUOUS', 'CONTIGUOUS',
'F', 'F_CONTIGUOUS', 'FORTRAN',
'A', 'ALIGNED',
'W', 'WRITEABLE',
'O', 'OWNDATA']
def generate_all_false(self, dtype):
arr = np.zeros((2, 2), [('junk', 'i1'), ('a', dtype)])
arr.setflags(write=False)
a = arr['a']
assert_(not a.flags['C'])
assert_(not a.flags['F'])
assert_(not a.flags['O'])
assert_(not a.flags['W'])
assert_(not a.flags['A'])
return a
def set_and_check_flag(self, flag, dtype, arr):
if dtype is None:
dtype = arr.dtype
b = np.require(arr, dtype, [flag])
assert_(b.flags[flag])
assert_(b.dtype == dtype)
# a further call to np.require ought to return the same array
# unless OWNDATA is specified.
c = np.require(b, None, [flag])
if flag[0] != 'O':
assert_(c is b)
else:
assert_(c.flags[flag])
def test_require_each(self):
id = ['f8', 'i4']
fd = [None, 'f8', 'c16']
for idtype, fdtype, flag in itertools.product(id, fd, self.flag_names):
a = self.generate_all_false(idtype)
yield self.set_and_check_flag, flag, fdtype, a
def test_unknown_requirement(self):
a = self.generate_all_false('f8')
assert_raises(KeyError, np.require, a, None, 'Q')
def test_non_array_input(self):
a = np.require([1, 2, 3, 4], 'i4', ['C', 'A', 'O'])
assert_(a.flags['O'])
assert_(a.flags['C'])
assert_(a.flags['A'])
assert_(a.dtype == 'i4')
assert_equal(a, [1, 2, 3, 4])
def test_C_and_F_simul(self):
a = self.generate_all_false('f8')
assert_raises(ValueError, np.require, a, None, ['C', 'F'])
def test_ensure_array(self):
class ArraySubclass(np.ndarray):
pass
a = ArraySubclass((2,2))
b = np.require(a, None, ['E'])
assert_(type(b) is np.ndarray)
def test_preserve_subtype(self):
class ArraySubclass(np.ndarray):
pass
for flag in self.flag_names:
a = ArraySubclass((2,2))
yield self.set_and_check_flag, flag, None, a
class TestBroadcast(TestCase):
def test_broadcast_in_args(self):
# gh-5881
arrs = [np.empty((6, 7)), np.empty((5, 6, 1)), np.empty((7,)),
np.empty((5, 1, 7))]
mits = [np.broadcast(*arrs),
np.broadcast(np.broadcast(*arrs[:2]), np.broadcast(*arrs[2:])),
np.broadcast(arrs[0], np.broadcast(*arrs[1:-1]), arrs[-1])]
for mit in mits:
assert_equal(mit.shape, (5, 6, 7))
assert_equal(mit.nd, 3)
assert_equal(mit.numiter, 4)
for a, ia in zip(arrs, mit.iters):
assert_(a is ia.base)
def test_number_of_arguments(self):
arr = np.empty((5,))
for j in range(35):
arrs = [arr] * j
if j < 2 or j > 32:
assert_raises(ValueError, np.broadcast, *arrs)
else:
mit = np.broadcast(*arrs)
assert_equal(mit.numiter, j)
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause |
securestate/termineter | lib/termineter/modules/get_security_info.py | 1 | 4961 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# termineter/modules/get_security_info.py
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the project nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from __future__ import unicode_literals
from c1218.errors import C1218ReadTableError
from c1219.access.security import C1219SecurityAccess
from c1219.constants import C1219_TABLES, C1219_PROCEDURE_NAMES
from termineter.module import TermineterModuleOptical
class Module(TermineterModuleOptical):
def __init__(self, *args, **kwargs):
TermineterModuleOptical.__init__(self, *args, **kwargs)
self.author = ['Spencer McIntyre']
self.description = 'Get Information About The Meter\'s Access Control'
self.detailed_description = 'This module reads various tables from 40 to gather information regarding access control. Password constraints, and access permissions to procedures and tables can be gathered with this module.'
def run(self):
conn = self.frmwk.serial_connection
try:
security_ctl = C1219SecurityAccess(conn)
except C1218ReadTableError:
self.frmwk.print_error('Could not read necessary tables')
return
security_info = {}
security_info['Number of Passwords'] = security_ctl.nbr_passwords
security_info['Max Password Length'] = security_ctl.password_len
security_info['Number of Keys'] = security_ctl.nbr_keys
security_info['Number of Permissions'] = security_ctl.nbr_perm_used
self.frmwk.print_status('Security Information:')
fmt_string = " {0:.<38}.{1}"
keys = security_info.keys()
keys.sort()
for key in keys:
self.frmwk.print_status(fmt_string.format(key, security_info[key]))
self.frmwk.print_status('Passwords and Permissions:')
fmt_string = " {0:<5} {1:<40} {2}"
self.frmwk.print_status(fmt_string.format('Index', 'Password (In Hex)', 'Group Flags'))
self.frmwk.print_status(fmt_string.format('-----', '-----------------', '-----------'))
for idx, entry in security_ctl.passwords.items():
self.frmwk.print_status(fmt_string.format(idx, entry['password'].encode('hex'), entry['groups']))
self.frmwk.print_status('Table Permissions:')
fmt_string = " {0:<64} {1:<14} {2:<14}"
self.frmwk.print_status(fmt_string.format('Table Number', 'World Readable', 'World Writable'))
self.frmwk.print_status(fmt_string.format('------------', '--------------', '--------------'))
fmt_string = " {0:.<64} {1:<14} {2:<14}"
for idx, entry in security_ctl.table_permissions.items():
self.frmwk.print_status(fmt_string.format('#' + str(idx) + ' ' + (C1219_TABLES.get(idx) or 'Unknown'), str(entry['anyread']), str(entry['anywrite'])))
self.frmwk.print_status('Procedure Permissions:')
fmt_string = " {0:<64} {1:<14} {2:<16}"
self.frmwk.print_status(fmt_string.format('Procedure Number', 'World Readable', 'World Executable'))
self.frmwk.print_status(fmt_string.format('----------------', '--------------', '----------------'))
fmt_string = " {0:.<64} {1:<14} {2:<16}"
for idx, entry in security_ctl.procedure_permissions.items():
self.frmwk.print_status(fmt_string.format('#' + str(idx) + ' ' + (C1219_PROCEDURE_NAMES.get(idx) or 'Unknown'), str(entry['anyread']), str(entry['anywrite'])))
if len(security_ctl.keys):
self.frmwk.print_status('Stored Keys:')
fmt_string = " {0:<5} {1}"
self.frmwk.print_status(fmt_string.format('Index', 'Hex Value'))
self.frmwk.print_status(fmt_string.format('-----', '---------'))
for idx, entry in security_ctl.keys.items():
self.frmwk.print_status(fmt_string.format(idx, entry.encode('hex')))
return
| bsd-3-clause |
ndtran/l10n-switzerland | l10n_ch_fds_upload_dd/wizard/fds_inherit_post_dd_export_upload_wizard.py | 1 | 9710 | # -*- coding: utf-8 -*-
##############################################################################
#
# Swiss Postfinance File Delivery Services module for Odoo
# Copyright (C) 2014 Compassion CH
# @author: Nicolas Tran
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, fields, api, exceptions
import logging
import base64
import tempfile
import shutil
import pysftp
import os
_logger = logging.getLogger(__name__)
class fds_inherit_post_dd_export_upload_wizard(models.TransientModel):
''' This addon allows you to upload the Direct Debit geberate file to
your FDS Postfinance.
This addon will add upload button to the DD wizard.
This Class inherit from l10n_ch_lsv_dd.post_dd_export_wizard
'''
_inherit = 'post.dd.export.wizard'
fds_account_directory = fields.Many2one(
comodel_name='fds.postfinance.files.directory',
string='FDS files directory',
help='select one upload directory or config your upload directory'
)
state = fields.Selection(
selection=[('create', 'Create'),
('finish', 'finish'),
('upload', 'upload'),
('confirm', 'confirm')],
readonly=True,
default='create',
help='[Info] keep state of the wizard'
)
fds_account = fields.Many2one(
comodel_name='fds.postfinance.account',
string='FDS account',
help='select one FDS account or create a FDS account'
)
##################################
# Button action #
##################################
@api.multi
def upload_export_button(self):
''' change the view to the wizard or directly upload the file if
only one FDS account and only one upload directory selected.
Called by pressing upload button.
:returns action: configuration for the next wizard's view
'''
self.ensure_one()
# check if only one fds account
existing_account_ids = self.fds_account.search([]).ids
if len(existing_account_ids) != 1:
self._state_upload_on()
return self._do_populate_tasks()
self.fds_account = existing_account_ids[0]
# check if default upload directory exist
if not self.fds_account.upload_dd_directory:
self._state_upload_on()
return self._do_populate_tasks()
# check if default upload directory is allowed
dir_name = self.fds_account.upload_dd_directory.name
dir = self.fds_account_directory.search([('name', '=', dir_name)])
if not dir.allow_upload_file:
self._state_upload_on()
return self._do_populate_tasks()
self.fds_account_directory = self.fds_account.upload_dd_directory
return self.send_export_button()
@api.multi
def send_export_button(self):
''' upload pain_001 file to the FDS Postfinance by sftp
:returns action: configuration for the next wizard's view
:raises Warning:
- If no fds account and directory selected
- if current user do not have key
- if connection to sftp cannot
'''
self.ensure_one()
if not self.fds_account:
raise exceptions.Warning('Select a FDS account')
if not self.fds_account_directory:
raise exceptions.Warning('Select a directory')
# check key of active user
fds_authentication_key_obj = self.env['fds.authentication.keys']
key = fds_authentication_key_obj.search([
['user_id', '=', self.env.user.id],
['fds_account_id', '=', self.fds_account.id]])
if not key:
raise exceptions.Warning('You don\'t have key')
if not key.active_key:
raise exceptions.Warning('Key not active')
try:
# create tmp file
tmp_d = tempfile.mkdtemp()
tmp_key = self._create_tmp_file(key.private_key_crypted, tmp_d)[0]
tmp_f = self._create_tmp_file(self.file, tmp_d)[0]
old_path_f = os.path.join(tmp_d, tmp_f.name)
new_path_f = os.path.join(tmp_d, self.filename)
shutil.move(old_path_f, new_path_f)
key_pass = fds_authentication_key_obj.config()
# upload to sftp
with pysftp.Connection(self.fds_account.hostname,
username=self.fds_account.username,
private_key=tmp_key.name,
private_key_pass=key_pass) as sftp:
with sftp.cd(self.fds_account_directory.name):
sftp.put(new_path_f)
_logger.info("[OK] upload file (%s) to sftp",
(self.filename))
# change to initial name file (mandatory because of the close)
shutil.move(new_path_f, old_path_f)
self._state_confirm_on()
self._add2historical()
except Exception as e:
_logger.error("Unable to connect to the sftp: %s", e)
raise exceptions.Warning('Unable to connect to the sftp')
finally:
try:
tmp_key.close()
except:
_logger.error("remove tmp_key file failed")
try:
tmp_f.close()
except:
_logger.error("remove tmp_f file failed")
try:
shutil.rmtree(tmp_d)
except:
_logger.error("remove tmp directory failed")
return self._do_populate_tasks()
@api.multi
def back_button(self):
''' Go back to the finish view.
Called by pressing back button.
:returns action: configuration for the next wizard's view
'''
self.ensure_one()
self._state_finish_on()
return self._do_populate_tasks()
@api.multi
def close_button(self):
self.ensure_one()
self._state_finish_on()
self.confirm_export()
##############################
# function #
##############################
@api.multi
def _create_tmp_file(self, data, tmp_directory=None):
''' private function that write data to a tmp file and if no tmp
directory use, create one.
:param str data: data in base64 format
:param str tmp_directory: path of the directory
:returns (obj file, str directory): obj of type tempfile
'''
self.ensure_one()
try:
if not tmp_directory:
tmp_directory = tempfile.mkdtemp()
tmp_file = tempfile.NamedTemporaryFile(dir=tmp_directory)
tmp_file.write(base64.b64decode(data))
tmp_file.flush()
return (tmp_file, tmp_directory)
except Exception as e:
_logger.error("Bad handling tmp in fds_inherit_sepa_wizard: %s", e)
@api.multi
def _add2historical(self):
''' private function that add the upload file to historic
:returns: None
'''
self.ensure_one()
values = {
'banking_export_id': self.banking_export_ch_dd_id.id,
'fds_account_id': self.fds_account.id,
'filename': self.filename,
'directory_id': self.fds_account_directory.id,
'state': 'uploaded'}
historical_dd_obj = self.env['fds.postfinance.historical.dd']
historical_dd_obj.create(values)
@api.multi
def _state_finish_on(self):
''' private function that change state to finish
:returns: None
'''
self.ensure_one()
self.write({'state': 'finish'})
@api.multi
def _state_upload_on(self):
''' private function that change state to upload
:returns: None
'''
self.ensure_one()
self.write({'state': 'upload'})
@api.multi
def _state_confirm_on(self):
''' private function that change state to confirm
:returns: None
'''
self.ensure_one()
self.write({'state': 'confirm'})
@api.multi
def _do_populate_tasks(self):
''' private function that continue with the same wizard.
:returns action: configuration for the next wizard's view
'''
self.ensure_one()
action = {
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'form',
'res_model': self._name,
'res_id': self.id,
'target': 'new',
}
return action
| agpl-3.0 |
PersianWikipedia/pywikibot-core | tests/ui_tests.py | 2 | 29535 | # -*- coding: utf-8 -*-
"""Tests for the user interface."""
#
# (C) Pywikibot team, 2008-2019
#
# Distributed under the terms of the MIT license.
#
# NOTE FOR RUNNING WINDOWS UI TESTS
#
# Windows UI tests have to be run using the tests\ui_tests.bat helper script.
# This will set PYTHONPATH and PYWIKIBOT_DIR, and then run the tests. Do not
# touch mouse or keyboard while the tests are running, as this might disturb
# the interaction tests.
#
# The Windows tests were developed on a Dutch Windows 7 OS. You might need to
# adapt the helper functions in TestWindowsTerminalUnicode for other versions.
#
# For the Windows-based tests, you need the following packages installed:
# - pywin32, for clipboard access, which can be downloaded here:
# http://sourceforge.net/projects/pywin32/files/pywin32/Build%20218/
# make sure to download the package for the correct python version!
#
# - pywinauto, to send keys to the terminal, which can be installed using:
# pip install -U pywinauto
#
#
from __future__ import absolute_import, division, unicode_literals
import inspect
import io
import logging
import os
import subprocess
import sys
import time
import warnings
import pywikibot
from pywikibot.bot import (
ui, DEBUG, VERBOSE, INFO, STDOUT, INPUT, WARNING, ERROR, CRITICAL
)
from pywikibot.tools import (
PY2,
UnicodeType as unicode,
)
from pywikibot.userinterfaces import (
terminal_interface_win32, terminal_interface_base, terminal_interface_unix,
)
from tests.aspects import TestCase, TestCaseBase
from tests.utils import unittest, FakeModule
if os.name == 'nt':
from multiprocessing.managers import BaseManager
import threading
try:
import win32api
except ImportError:
win32api = None
try:
import pywinauto
except ImportError:
pywinauto = None
try:
import win32clipboard
except ImportError:
win32clipboard = None
class Stream(object):
"""Handler for a StringIO or BytesIO instance able to patch itself."""
def __init__(self, name, patched_streams):
"""
Create a new stream with a StringIO or BytesIO instance.
@param name: The part after 'std' (e.g. 'err').
@type name: str
@param patched_streams: A mapping which maps the original stream to
the patched stream.
@type patched_streams: dict
"""
self._stream = io.StringIO() if not PY2 else io.BytesIO()
self._name = 'std{0}'.format(name)
self._original = getattr(sys, self._name)
patched_streams[self._original] = self._stream
def __repr__(self):
return '<patched {} {!r} wrapping {!r}>'.format(
self._name, self._stream, self._original)
def reset(self):
"""Reset own stream."""
self._stream.truncate(0)
self._stream.seek(0)
if os.name == 'nt':
class pywikibotWrapper(object):
"""pywikibot wrapper class."""
def init(self):
pywikibot.version._get_program_dir()
def output(self, *args, **kwargs):
return pywikibot.output(*args, **kwargs)
def request_input(self, *args, **kwargs):
self.input = None
def threadedinput():
self.input = pywikibot.input(*args, **kwargs)
self.inputthread = threading.Thread(target=threadedinput)
self.inputthread.start()
def get_input(self):
self.inputthread.join()
return self.input
def set_config(self, key, value):
setattr(pywikibot.config, key, value)
def set_ui(self, key, value):
setattr(pywikibot.ui, key, value)
def cls(self):
os.system('cls')
class pywikibotManager(BaseManager):
"""pywikibot manager class."""
pass
pywikibotManager.register(str('pywikibot'), pywikibotWrapper)
_manager = pywikibotManager(
address=('127.0.0.1', 47228),
authkey=b'4DJSchgwy5L5JxueZEWbxyeG')
if len(sys.argv) > 1 and sys.argv[1] == '--run-as-slave-interpreter':
s = _manager.get_server()
s.serve_forever()
def patched_print(text, target_stream):
try:
stream = patched_streams[target_stream]
except KeyError:
assert isinstance(target_stream,
pywikibot.userinterfaces.win32_unicode.UnicodeOutput)
assert target_stream._stream
stream = patched_streams[target_stream._stream]
org_print(text, stream)
def patched_input():
return strin._stream.readline().strip()
patched_streams = {}
strout = Stream('out', patched_streams)
strerr = Stream('err', patched_streams)
strin = Stream('in', {})
newstdout = strout._stream
newstderr = strerr._stream
newstdin = strin._stream
if PY2:
# In Python 2 the sys.std* streams use bytes instead of unicode
# But this module is using unicode_literals so '…' will generate unicode
# So it'll convert those back into bytes
original_write = newstdin.write
def encoded_write(text):
if isinstance(text, unicode):
text = text.encode('utf8')
original_write(text)
newstdin.write = encoded_write
org_print = ui._print
org_input = ui._raw_input
def patch():
"""Patch standard terminal files."""
strout.reset()
strerr.reset()
strin.reset()
ui._print = patched_print
ui._raw_input = patched_input
def unpatch():
"""Un-patch standard terminal files."""
ui._print = org_print
ui._raw_input = org_input
logger = logging.getLogger('pywiki')
loggingcontext = {'caller_name': 'ui_tests',
'caller_file': 'ui_tests',
'caller_line': 0,
'newline': '\n'}
class UITestCase(TestCaseBase):
"""UI tests."""
net = False
def setUp(self):
super(UITestCase, self).setUp()
patch()
pywikibot.config.colorized_output = True
pywikibot.config.transliterate = False
pywikibot.ui.transliteration_target = None
pywikibot.ui.encoding = 'utf-8'
def tearDown(self):
super(UITestCase, self).tearDown()
unpatch()
def _encode(self, string, encoding='utf-8'):
if not PY2:
return string
else:
return string.encode(encoding)
class TestTerminalOutput(UITestCase):
"""Terminal output tests."""
def testOutputLevels_logging_debug(self):
logger.log(DEBUG, 'debug', extra=loggingcontext)
self.assertEqual(newstdout.getvalue(), '')
self.assertEqual(newstderr.getvalue(), '')
def testOutputLevels_logging_verbose(self):
logger.log(VERBOSE, 'verbose', extra=loggingcontext)
self.assertEqual(newstdout.getvalue(), '')
self.assertEqual(newstderr.getvalue(), '')
def testOutputLevels_logging_info(self):
logger.log(INFO, 'info', extra=loggingcontext)
self.assertEqual(newstdout.getvalue(), '')
self.assertEqual(newstderr.getvalue(), 'info\n')
def testOutputLevels_logging_stdout(self):
logger.log(STDOUT, 'stdout', extra=loggingcontext)
self.assertEqual(newstdout.getvalue(), 'stdout\n')
self.assertEqual(newstderr.getvalue(), '')
def testOutputLevels_logging_input(self):
logger.log(INPUT, 'input', extra=loggingcontext)
self.assertEqual(newstdout.getvalue(), '')
self.assertEqual(newstderr.getvalue(), 'input\n')
def testOutputLevels_logging_WARNING(self):
logger.log(WARNING, 'WARNING', extra=loggingcontext)
self.assertEqual(newstdout.getvalue(), '')
self.assertEqual(newstderr.getvalue(), 'WARNING: WARNING\n')
def testOutputLevels_logging_ERROR(self):
logger.log(ERROR, 'ERROR', extra=loggingcontext)
self.assertEqual(newstdout.getvalue(), '')
self.assertEqual(newstderr.getvalue(), 'ERROR: ERROR\n')
def testOutputLevels_logging_CRITICAL(self):
logger.log(CRITICAL, 'CRITICAL', extra=loggingcontext)
self.assertEqual(newstdout.getvalue(), '')
self.assertEqual(newstderr.getvalue(), 'CRITICAL: CRITICAL\n')
def test_output(self):
pywikibot.output('output', toStdout=False)
self.assertEqual(newstdout.getvalue(), '')
self.assertEqual(newstderr.getvalue(), 'output\n')
def test_output_stdout(self):
with warnings.catch_warnings(record=True) as w:
pywikibot.output('output', toStdout=True)
self.assertEqual(newstdout.getvalue(), 'output\n')
self.assertLength(w, 1)
self.assertEqual(w[0].category, DeprecationWarning)
message = str(w[0].message)
self.assertStringMethod(str.startswith, message,
'"toStdout" parameter is deprecated')
self.assertStringMethod(str.endswith, message,
'use pywikibot.stdout() instead.')
def test_stdout(self):
pywikibot.stdout('output')
self.assertEqual(newstdout.getvalue(), 'output\n')
self.assertEqual(newstderr.getvalue(), '')
def test_warning(self):
pywikibot.warning('warning')
self.assertEqual(newstdout.getvalue(), '')
self.assertEqual(newstderr.getvalue(), 'WARNING: warning\n')
def test_error(self):
pywikibot.error('error')
self.assertEqual(newstdout.getvalue(), '')
self.assertEqual(newstderr.getvalue(), 'ERROR: error\n')
def test_log(self):
pywikibot.log('log')
self.assertEqual(newstdout.getvalue(), '')
self.assertEqual(newstderr.getvalue(), '')
def test_critical(self):
pywikibot.critical('critical')
self.assertEqual(newstdout.getvalue(), '')
self.assertEqual(newstderr.getvalue(), 'CRITICAL: critical\n')
def test_debug(self):
pywikibot.debug('debug', 'test')
self.assertEqual(newstdout.getvalue(), '')
self.assertEqual(newstderr.getvalue(), '')
def test_exception(self):
class TestException(Exception):
"""Test exception."""
try:
raise TestException('Testing Exception')
except TestException:
pywikibot.exception('exception')
self.assertEqual(newstdout.getvalue(), '')
self.assertEqual(newstderr.getvalue(),
'ERROR: TestException: Testing Exception\n')
def test_exception_tb(self):
class TestException(Exception):
"""Test exception."""
try:
raise TestException('Testing Exception')
except TestException:
pywikibot.exception('exception', tb=True)
self.assertEqual(newstdout.getvalue(), '')
stderrlines = newstderr.getvalue().split('\n')
self.assertEqual(stderrlines[0],
'ERROR: TestException: Testing Exception')
self.assertEqual(stderrlines[1], 'Traceback (most recent call last):')
self.assertEqual(stderrlines[3],
" raise TestException('Testing Exception')")
self.assertTrue(stderrlines[4].endswith(': Testing Exception'))
self.assertNotEqual(stderrlines[-1], '\n')
class TestTerminalInput(UITestCase):
"""Terminal input tests."""
input_choice_output = 'question ([A]nswer 1, a[n]swer 2, an[s]wer 3): '
def testInput(self):
newstdin.write('input to read\n')
newstdin.seek(0)
returned = pywikibot.input('question')
self.assertEqual(newstdout.getvalue(), '')
self.assertEqual(newstderr.getvalue(), 'question: ')
self.assertIsInstance(returned, unicode)
self.assertEqual(returned, 'input to read')
def _call_input_choice(self):
rv = pywikibot.input_choice(
'question',
(('answer 1', 'A'),
('answer 2', 'N'),
('answer 3', 'S')),
'A',
automatic_quit=False)
self.assertEqual(newstdout.getvalue(), '')
self.assertIsInstance(rv, unicode)
return rv
def testInputChoiceDefault(self):
newstdin.write('\n')
newstdin.seek(0)
returned = self._call_input_choice()
self.assertEqual(returned, 'a')
def testInputChoiceCapital(self):
newstdin.write('N\n')
newstdin.seek(0)
returned = self._call_input_choice()
self.assertEqual(newstderr.getvalue(), self.input_choice_output)
self.assertEqual(returned, 'n')
def testInputChoiceNonCapital(self):
newstdin.write('n\n')
newstdin.seek(0)
returned = self._call_input_choice()
self.assertEqual(newstderr.getvalue(), self.input_choice_output)
self.assertEqual(returned, 'n')
def testInputChoiceIncorrectAnswer(self):
newstdin.write('X\nN\n')
newstdin.seek(0)
returned = self._call_input_choice()
self.assertEqual(newstderr.getvalue(),
self.input_choice_output * 2)
self.assertEqual(returned, 'n')
@unittest.skipUnless(os.name == 'posix', 'requires Unix console')
class TestTerminalOutputColorUnix(UITestCase):
"""Terminal output color tests."""
str1 = 'text \03{lightpurple}light purple text\03{default} text'
def testOutputColorizedText(self):
pywikibot.output(self.str1)
self.assertEqual(newstdout.getvalue(), '')
self.assertEqual(
newstderr.getvalue(),
'text \x1b[95mlight purple text\x1b[0m text\n')
def testOutputNoncolorizedText(self):
pywikibot.config.colorized_output = False
pywikibot.output(self.str1)
self.assertEqual(newstdout.getvalue(), '')
self.assertEqual(
newstderr.getvalue(),
'text light purple text text ***\n')
str2 = ('normal text \03{lightpurple} light purple '
'\03{lightblue} light blue \03{previous} light purple '
'\03{default} normal text')
def testOutputColorCascade_incorrect(self):
"""Test incorrect behavior of testOutputColorCascade."""
pywikibot.output(self.str2)
self.assertEqual(newstdout.getvalue(), '')
self.assertEqual(
newstderr.getvalue(),
'normal text \x1b[95m light purple '
'\x1b[94m light blue \x1b[95m light purple '
'\x1b[0m normal text\n')
@unittest.skipUnless(os.name == 'posix', 'requires Unix console')
class TestTerminalUnicodeUnix(UITestCase):
"""Terminal output tests for unix."""
def testOutputUnicodeText(self):
pywikibot.output('Заглавная_страница')
self.assertEqual(newstdout.getvalue(), '')
self.assertEqual(
newstderr.getvalue(),
self._encode('Заглавная_страница\n', 'utf-8'))
def testInputUnicodeText(self):
newstdin.write(self._encode('Заглавная_страница\n', 'utf-8'))
newstdin.seek(0)
returned = pywikibot.input('Википедию? ')
self.assertEqual(newstdout.getvalue(), '')
self.assertEqual(
newstderr.getvalue(),
self._encode('Википедию? ', 'utf-8'))
self.assertIsInstance(returned, unicode)
self.assertEqual(returned, 'Заглавная_страница')
@unittest.skipUnless(os.name == 'posix', 'requires Unix console')
class TestTransliterationUnix(UITestCase):
"""Terminal output transliteration tests."""
def testOutputTransliteratedUnicodeText(self):
pywikibot.ui.encoding = 'latin-1'
pywikibot.config.transliterate = True
pywikibot.output('abcd АБГД αβγδ あいうえお')
self.assertEqual(newstdout.getvalue(), '')
self.assertEqual(
newstderr.getvalue(),
'abcd \x1b[93mA\x1b[0m\x1b[93mB\x1b[0m\x1b[93mG\x1b[0m'
'\x1b[93mD\x1b[0m \x1b[93ma\x1b[0m\x1b[93mb\x1b[0m\x1b[93mg'
'\x1b[0m\x1b[93md\x1b[0m \x1b[93ma\x1b[0m\x1b[93mi\x1b[0m'
'\x1b[93mu\x1b[0m\x1b[93me\x1b[0m\x1b[93mo\x1b[0m\n')
@unittest.skipUnless(os.name == 'nt', 'requires Windows console')
class WindowsTerminalTestCase(UITestCase):
"""MS Windows terminal tests."""
@classmethod
def setUpClass(cls):
if os.name != 'nt':
raise unittest.SkipTest('requires Windows console')
if not win32api:
raise unittest.SkipTest('requires Windows package pywin32')
if not win32clipboard:
raise unittest.SkipTest('requires Windows package win32clipboard')
if not pywinauto:
raise unittest.SkipTest('requires Windows package pywinauto')
try:
# pywinauto 0.5.0
cls._app = pywinauto.Application()
except AttributeError as e1:
try:
cls._app = pywinauto.application.Application()
except AttributeError as e2:
raise unittest.SkipTest('pywinauto Application failed: {}\n{}'
.format(e1, e2))
super(WindowsTerminalTestCase, cls).setUpClass()
@classmethod
def setUpProcess(cls, command):
si = subprocess.STARTUPINFO()
si.dwFlags = subprocess.STARTF_USESTDHANDLES
cls._process = subprocess.Popen(
command, creationflags=subprocess.CREATE_NEW_CONSOLE)
cls._app.connect_(process=cls._process.pid)
# set truetype font (Lucida Console, hopefully)
try:
window = cls._app.window_()
except Exception as e:
cls.tearDownProcess()
raise unittest.SkipTest(
'Windows package pywinauto could not locate window: {!r}'
.format(e))
try:
window.TypeKeys('% {UP}{ENTER}%L{HOME}L{ENTER}', with_spaces=True)
except Exception as e:
cls.tearDownProcess()
raise unittest.SkipTest(
'Windows package pywinauto could not use window TypeKeys: {!r}'
.format(e))
@classmethod
def tearDownProcess(cls):
cls._process.kill()
def setUp(self):
super(WindowsTerminalTestCase, self).setUp()
self.setclip('')
def waitForWindow(self):
while not self._app.window_().IsEnabled():
time.sleep(0.01)
def getstdouterr(self):
sentinel = '~~~~SENTINEL~~~~cedcfc9f-7eed-44e2-a176-d8c73136c185'
# select all and copy to clipboard
self._app.window_().SetFocus()
self.waitForWindow()
self._app.window_().TypeKeys(
'% {UP}{UP}{UP}{RIGHT}{DOWN}{DOWN}{DOWN}{ENTER}{ENTER}',
with_spaces=True)
while True:
data = self.getclip()
if data != sentinel:
return data
time.sleep(0.01)
def setclip(self, text):
win32clipboard.OpenClipboard()
win32clipboard.SetClipboardData(win32clipboard.CF_UNICODETEXT,
unicode(text))
win32clipboard.CloseClipboard()
def getclip(self):
win32clipboard.OpenClipboard()
data = win32clipboard.GetClipboardData(win32clipboard.CF_UNICODETEXT)
win32clipboard.CloseClipboard()
data = data.split('\x00')[0]
data = data.replace('\r\n', '\n')
return data
def sendstdin(self, text):
self.setclip(text.replace('\n', '\r\n'))
self._app.window_().SetFocus()
self.waitForWindow()
self._app.window_().TypeKeys(
'% {UP}{UP}{UP}{RIGHT}{DOWN}{DOWN}{ENTER}',
with_spaces=True)
class TestWindowsTerminalUnicode(WindowsTerminalTestCase):
"""MS Windows terminal unicode tests."""
@classmethod
def setUpClass(cls):
super(TestWindowsTerminalUnicode, cls).setUpClass()
fn = inspect.getfile(inspect.currentframe())
cls.setUpProcess(['python', 'pwb.py', fn,
'--run-as-slave-interpreter'])
_manager.connect()
cls.pywikibot = _manager.pywikibot()
@classmethod
def tearDownClass(cls):
del cls.pywikibot
cls.tearDownProcess()
def setUp(self):
super(TestWindowsTerminalUnicode, self).setUp()
self.pywikibot.set_config('colorized_output', True)
self.pywikibot.set_config('transliterate', False)
self.pywikibot.set_config('console_encoding', 'utf-8')
self.pywikibot.set_ui('transliteration_target', None)
self.pywikibot.set_ui('encoding', 'utf-8')
self.pywikibot.cls()
def testOutputUnicodeText_no_transliterate(self):
self.pywikibot.output('Заглавная_страница')
self.assertEqual(self.getstdouterr(), 'Заглавная_страница\n')
def testOutputUnicodeText_transliterate(self):
self.pywikibot.set_config('transliterate', True)
self.pywikibot.set_ui('transliteration_target', 'latin-1')
self.pywikibot.output('Заглавная_страница')
self.assertEqual(self.getstdouterr(), 'Zaglavnaya_stranica\n')
def testInputUnicodeText(self):
self.pywikibot.set_config('transliterate', True)
self.pywikibot.request_input('Википедию? ')
self.assertEqual(self.getstdouterr(), 'Википедию?')
self.sendstdin('Заглавная_страница\n')
returned = self.pywikibot.get_input()
self.assertEqual(returned, 'Заглавная_страница')
class TestWindowsTerminalUnicodeArguments(WindowsTerminalTestCase):
"""MS Windows terminal unicode argument tests."""
@classmethod
def setUpClass(cls):
super(TestWindowsTerminalUnicodeArguments, cls).setUpClass()
cls.setUpProcess(['cmd', '/k', 'echo off'])
@classmethod
def tearDownClass(cls):
cls.tearDownProcess()
def testOutputUnicodeText_no_transliterate(self):
self.sendstdin(
"python -c \"import os, pywikibot; os.system('cls'); "
"pywikibot.output('\\n'.join(pywikibot.handleArgs()))\" "
'Alpha Bετα Гамма دلتا\n')
lines = []
for i in range(3):
lines = self.getstdouterr().split('\n')
if len(lines) >= 4 and 'Alpha' not in lines:
# if len(lines) < 4, we assume not all lines had been output
# yet, and retry. We check at least one of the lines contains
# "Alpha" to prevent using older clipboard content. We limit
# the number of retries to 3 so that the test will finish even
# if neither of these requirements are met.
break
time.sleep(1)
# empty line is the new command line
self.assertEqual(lines, ['Alpha', 'Bετα', 'Гамма', 'دلتا', ''])
# TODO: add tests for background colors.
class FakeUITest(TestCase):
"""Test case to allow doing uncolorized general UI tests."""
net = False
expected = 'Hello world you! ***'
expect_color = False
ui_class = terminal_interface_base.UI
def setUp(self):
"""Create dummy instances for the test and patch encounter_color."""
super(FakeUITest, self).setUp()
if PY2:
self.stream = io.BytesIO()
else:
self.stream = io.StringIO()
self.ui_obj = self.ui_class()
self._orig_encounter_color = self.ui_obj.encounter_color
self.ui_obj.encounter_color = self._encounter_color
self._index = 0
def tearDown(self):
"""Unpatch the encounter_color method."""
self.ui_obj.encounter_color = self._orig_encounter_color
super(FakeUITest, self).tearDown()
self.assertEqual(self._index,
len(self._colors) if self.expect_color else 0)
def _getvalue(self):
"""Get the value of the stream and also decode it on Python 2."""
value = self.stream.getvalue()
if PY2:
value = value.decode(self.ui_obj.encoding)
return value
def _encounter_color(self, color, target_stream):
"""Patched encounter_color method."""
assert False, 'This method should not be invoked'
def test_no_color(self):
"""Test a string without any colors."""
self._colors = ()
self.ui_obj._print('Hello world you!', self.stream)
self.assertEqual(self._getvalue(), 'Hello world you!')
def test_one_color(self):
"""Test a string using one color."""
self._colors = (('red', 6), ('default', 10))
self.ui_obj._print('Hello \03{red}world you!', self.stream)
self.assertEqual(self._getvalue(), self.expected)
def test_flat_color(self):
"""Test using colors with defaulting in between."""
self._colors = (('red', 6), ('default', 6), ('yellow', 3),
('default', 1))
self.ui_obj._print('Hello \03{red}world \03{default}you\03{yellow}!',
self.stream)
self.assertEqual(self._getvalue(), self.expected)
def test_stack_with_pop_color(self):
"""Test using stacked colors and just popping the latest color."""
self._colors = (('red', 6), ('yellow', 6), ('red', 3), ('default', 1))
self.ui_obj._print('Hello \03{red}world \03{yellow}you\03{previous}!',
self.stream)
self.assertEqual(self._getvalue(), self.expected)
def test_stack_implicit_color(self):
"""Test using stacked colors without popping any."""
self._colors = (('red', 6), ('yellow', 6), ('default', 4))
self.ui_obj._print('Hello \03{red}world \03{yellow}you!', self.stream)
self.assertEqual(self._getvalue(), self.expected)
def test_one_color_newline(self):
"""Test with trailing new line and one color."""
self._colors = (('red', 6), ('default', 11))
self.ui_obj._print('Hello \03{red}world you!\n', self.stream)
self.assertEqual(self._getvalue(), self.expected + '\n')
class FakeUIColorizedTestBase(TestCase):
"""Base class for test cases requiring that colorized output is active."""
expect_color = True
def setUp(self):
"""Force colorized_output to True."""
super(FakeUIColorizedTestBase, self).setUp()
self._old_config = pywikibot.config2.colorized_output
pywikibot.config2.colorized_output = True
def tearDown(self):
"""Undo colorized_output configuration."""
pywikibot.config2.colorized_output = self._old_config
super(FakeUIColorizedTestBase, self).tearDown()
class FakeUnixTest(FakeUIColorizedTestBase, FakeUITest):
"""Test case to allow doing colorized Unix tests in any environment."""
net = False
expected = 'Hello world you!'
ui_class = terminal_interface_unix.UnixUI
def _encounter_color(self, color, target_stream):
"""Verify that the written data, color and stream are correct."""
self.assertIs(target_stream, self.stream)
expected_color = self._colors[self._index][0]
self._index += 1
self.assertEqual(color, expected_color)
self.assertLength(self.stream.getvalue(),
sum(e[1] for e in self._colors[:self._index]))
class FakeWin32Test(FakeUIColorizedTestBase, FakeUITest):
"""
Test case to allow doing colorized Win32 tests in any environment.
This only patches the ctypes import in the terminal_interface_win32
module. As the Win32CtypesUI is using the std-streams from another
import these will be unpatched.
"""
net = False
expected = 'Hello world you!'
ui_class = terminal_interface_win32.Win32UI
def setUp(self):
"""Patch the ctypes import and initialize a stream and UI instance."""
super(FakeWin32Test, self).setUp()
self._orig_ctypes = terminal_interface_win32.ctypes
ctypes = FakeModule.create_dotted('ctypes.windll.kernel32')
ctypes.windll.kernel32.SetConsoleTextAttribute = self._handle_setattr
terminal_interface_win32.ctypes = ctypes
self.stream._hConsole = object()
def tearDown(self):
"""Unpatch the ctypes import and check that all colors were used."""
terminal_interface_win32.ctypes = self._orig_ctypes
super(FakeWin32Test, self).tearDown()
def _encounter_color(self, color, target_stream):
"""Call the original method."""
self._orig_encounter_color(color, target_stream)
def _handle_setattr(self, handle, attribute):
"""Dummy method to handle SetConsoleTextAttribute."""
self.assertIs(handle, self.stream._hConsole)
color = self._colors[self._index][0]
self._index += 1
color = terminal_interface_win32.windowsColors[color]
self.assertEqual(attribute, color)
self.assertLength(self.stream.getvalue(),
sum(e[1] for e in self._colors[:self._index]))
class FakeWin32UncolorizedTest(FakeWin32Test):
"""Test case to allow doing uncolorized Win32 tests in any environment."""
net = False
expected = 'Hello world you! ***'
expect_color = False
def setUp(self):
"""Change the local stream's console to None to disable colors."""
super(FakeWin32UncolorizedTest, self).setUp()
self.stream._hConsole = None
if __name__ == '__main__': # pragma: no cover
try:
try:
unittest.main()
except SystemExit:
pass
finally:
unpatch()
| mit |
kid143/shadowsocks | shadowsocks/crypto/rc4_md5.py | 1042 | 1339 | #!/usr/bin/env python
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import hashlib
from shadowsocks.crypto import openssl
__all__ = ['ciphers']
def create_cipher(alg, key, iv, op, key_as_bytes=0, d=None, salt=None,
i=1, padding=1):
md5 = hashlib.md5()
md5.update(key)
md5.update(iv)
rc4_key = md5.digest()
return openssl.OpenSSLCrypto(b'rc4', rc4_key, b'', op)
ciphers = {
'rc4-md5': (16, 16, create_cipher),
}
def test():
from shadowsocks.crypto import util
cipher = create_cipher('rc4-md5', b'k' * 32, b'i' * 16, 1)
decipher = create_cipher('rc4-md5', b'k' * 32, b'i' * 16, 0)
util.run_cipher(cipher, decipher)
if __name__ == '__main__':
test()
| apache-2.0 |
fijal/vmprof | vmprof/main.py | 1 | 2696 | # -*- coding: utf-8 -*-
import hashlib
import json
import zlib
from base64 import b64decode
from django.conf.urls import url, include
from django.contrib.staticfiles import views
from rest_framework import routers
from rest_framework.response import Response
from rest_framework import viewsets, serializers
from vmprof.process.addrspace import Profiles
from .models import Log
def present_function_items(log, func=None):
data = json.loads(zlib.decompress(log.data))
profiles = data['profiles']
raw_addresses = data['addresses']
addresses = {}
for k, v in raw_addresses.iteritems():
addresses[int(k)] = v
for profile in profiles:
cur = []
for item in profile[0]:
cur.append(addresses[item])
profile[0] = cur
profiles = Profiles(profiles)
functions = []
if func:
items, total = profiles.generate_per_function(func)
items = items.items()
items.sort(key=lambda i: -i[1])
else:
items = profiles.functions.items()
items.sort(key=lambda i: -i[1])
total = len(profiles.profiles)
for name, count in items:
segments = name.split(":")
functions.append({
"id": name,
"file": segments[1],
"name": segments[2],
"line": segments[3],
"time": int(float(count) / total * 100)
})
return functions
class LogSerializer(serializers.ModelSerializer):
functions = serializers.SerializerMethodField()
class Meta:
model = Log
fields = ('checksum', 'functions')
def get_functions(self, obj):
function = self.context['request'].GET.get('function')
return present_function_items(obj, function)
class LogViewSet(viewsets.ModelViewSet):
queryset = Log.objects.all()
serializer_class = LogSerializer
def create(self, request):
data = b64decode(request.POST['data'])
checksum = hashlib.md5(data).hexdigest()
try:
log = self.queryset.get(checksum=checksum)
except Log.DoesNotExist:
log = self.queryset.create(data=data, checksum=checksum)
return Response(log.checksum)
def retrieve(self, request, pk=None):
try:
log = self.queryset.get(pk=pk)
return Response(self.serializer_class(
log, context={'request': request}
).data)
except Log.DoesNotExist:
return Response(status=404)
router = routers.DefaultRouter()
router.register(r'log', LogViewSet)
urlpatterns = [
url(r'^api/', include(router.urls)),
url(r'^$', views.serve, {'path': 'index.html', 'insecure': True}),
]
| mit |
materia-coin/materia | qa/rpc-tests/p2p-compactblocks.py | 2 | 43214 | #!/usr/bin/env python3
# Copyright (c) 2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.mininode import *
from test_framework.test_framework import MateriaTestFramework
from test_framework.util import *
from test_framework.blocktools import create_block, create_coinbase, add_witness_commitment
from test_framework.siphash import siphash256
from test_framework.script import CScript, OP_TRUE
'''
CompactBlocksTest -- test compact blocks (BIP 152)
Version 1 compact blocks are pre-segwit (txids)
Version 2 compact blocks are post-segwit (wtxids)
'''
# TestNode: A peer we use to send messages to materiad, and store responses.
class TestNode(SingleNodeConnCB):
def __init__(self):
SingleNodeConnCB.__init__(self)
self.last_sendcmpct = []
self.last_headers = None
self.last_inv = None
self.last_cmpctblock = None
self.block_announced = False
self.last_getdata = None
self.last_getblocktxn = None
self.last_block = None
self.last_blocktxn = None
# Store the hashes of blocks we've seen announced.
# This is for synchronizing the p2p message traffic,
# so we can eg wait until a particular block is announced.
self.set_announced_blockhashes = set()
def on_sendcmpct(self, conn, message):
self.last_sendcmpct.append(message)
def on_block(self, conn, message):
self.last_block = message
def on_cmpctblock(self, conn, message):
self.last_cmpctblock = message
self.block_announced = True
self.last_cmpctblock.header_and_shortids.header.calc_sha256()
self.set_announced_blockhashes.add(self.last_cmpctblock.header_and_shortids.header.sha256)
def on_headers(self, conn, message):
self.last_headers = message
self.block_announced = True
for x in self.last_headers.headers:
x.calc_sha256()
self.set_announced_blockhashes.add(x.sha256)
def on_inv(self, conn, message):
self.last_inv = message
for x in self.last_inv.inv:
if x.type == 2:
self.block_announced = True
self.set_announced_blockhashes.add(x.hash)
def on_getdata(self, conn, message):
self.last_getdata = message
def on_getblocktxn(self, conn, message):
self.last_getblocktxn = message
def on_blocktxn(self, conn, message):
self.last_blocktxn = message
# Requires caller to hold mininode_lock
def received_block_announcement(self):
return self.block_announced
def clear_block_announcement(self):
with mininode_lock:
self.block_announced = False
self.last_inv = None
self.last_headers = None
self.last_cmpctblock = None
def get_headers(self, locator, hashstop):
msg = msg_getheaders()
msg.locator.vHave = locator
msg.hashstop = hashstop
self.connection.send_message(msg)
def send_header_for_blocks(self, new_blocks):
headers_message = msg_headers()
headers_message.headers = [CBlockHeader(b) for b in new_blocks]
self.send_message(headers_message)
def request_headers_and_sync(self, locator, hashstop=0):
self.clear_block_announcement()
self.get_headers(locator, hashstop)
assert(wait_until(self.received_block_announcement, timeout=30))
assert(self.received_block_announcement())
self.clear_block_announcement()
# Block until a block announcement for a particular block hash is
# received.
def wait_for_block_announcement(self, block_hash, timeout=30):
def received_hash():
return (block_hash in self.set_announced_blockhashes)
return wait_until(received_hash, timeout=timeout)
class CompactBlocksTest(MateriaTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
# Node0 = pre-segwit, node1 = segwit-aware
self.num_nodes = 2
self.utxos = []
def setup_network(self):
self.nodes = []
# Start up node0 to be a version 1, pre-segwit node.
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir,
[["-debug", "-logtimemicros=1", "-bip9params=segwit:0:0"],
["-debug", "-logtimemicros", "-txindex"]])
connect_nodes(self.nodes[0], 1)
def build_block_on_tip(self, node, segwit=False):
height = node.getblockcount()
tip = node.getbestblockhash()
mtp = node.getblockheader(tip)['mediantime']
block = create_block(int(tip, 16), create_coinbase(height + 1), mtp + 1)
block.nVersion = 4
if segwit:
add_witness_commitment(block)
block.solve()
return block
# Create 10 more anyone-can-spend utxo's for testing.
def make_utxos(self):
# Doesn't matter which node we use, just use node0.
block = self.build_block_on_tip(self.nodes[0])
self.test_node.send_and_ping(msg_block(block))
assert(int(self.nodes[0].getbestblockhash(), 16) == block.sha256)
self.nodes[0].generate(100)
total_value = block.vtx[0].vout[0].nValue
out_value = total_value // 10
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(block.vtx[0].sha256, 0), b''))
for i in range(10):
tx.vout.append(CTxOut(out_value, CScript([OP_TRUE])))
tx.rehash()
block2 = self.build_block_on_tip(self.nodes[0])
block2.vtx.append(tx)
block2.hashMerkleRoot = block2.calc_merkle_root()
block2.solve()
self.test_node.send_and_ping(msg_block(block2))
assert_equal(int(self.nodes[0].getbestblockhash(), 16), block2.sha256)
self.utxos.extend([[tx.sha256, i, out_value] for i in range(10)])
return
# Test "sendcmpct" (between peers preferring the same version):
# - No compact block announcements unless sendcmpct is sent.
# - If sendcmpct is sent with version > preferred_version, the message is ignored.
# - If sendcmpct is sent with boolean 0, then block announcements are not
# made with compact blocks.
# - If sendcmpct is then sent with boolean 1, then new block announcements
# are made with compact blocks.
# If old_node is passed in, request compact blocks with version=preferred-1
# and verify that it receives block announcements via compact block.
def test_sendcmpct(self, node, test_node, preferred_version, old_node=None):
# Make sure we get a SENDCMPCT message from our peer
def received_sendcmpct():
return (len(test_node.last_sendcmpct) > 0)
got_message = wait_until(received_sendcmpct, timeout=30)
assert(received_sendcmpct())
assert(got_message)
with mininode_lock:
# Check that the first version received is the preferred one
assert_equal(test_node.last_sendcmpct[0].version, preferred_version)
# And that we receive versions down to 1.
assert_equal(test_node.last_sendcmpct[-1].version, 1)
test_node.last_sendcmpct = []
tip = int(node.getbestblockhash(), 16)
def check_announcement_of_new_block(node, peer, predicate):
peer.clear_block_announcement()
block_hash = int(node.generate(1)[0], 16)
peer.wait_for_block_announcement(block_hash, timeout=30)
assert(peer.block_announced)
assert(got_message)
with mininode_lock:
assert predicate(peer), (
"block_hash={!r}, cmpctblock={!r}, inv={!r}".format(
block_hash, peer.last_cmpctblock, peer.last_inv))
# We shouldn't get any block announcements via cmpctblock yet.
check_announcement_of_new_block(node, test_node, lambda p: p.last_cmpctblock is None)
# Try one more time, this time after requesting headers.
test_node.request_headers_and_sync(locator=[tip])
check_announcement_of_new_block(node, test_node, lambda p: p.last_cmpctblock is None and p.last_inv is not None)
# Test a few ways of using sendcmpct that should NOT
# result in compact block announcements.
# Before each test, sync the headers chain.
test_node.request_headers_and_sync(locator=[tip])
# Now try a SENDCMPCT message with too-high version
sendcmpct = msg_sendcmpct()
sendcmpct.version = preferred_version+1
sendcmpct.announce = True
test_node.send_and_ping(sendcmpct)
check_announcement_of_new_block(node, test_node, lambda p: p.last_cmpctblock is None)
# Headers sync before next test.
test_node.request_headers_and_sync(locator=[tip])
# Now try a SENDCMPCT message with valid version, but announce=False
sendcmpct.version = preferred_version
sendcmpct.announce = False
test_node.send_and_ping(sendcmpct)
check_announcement_of_new_block(node, test_node, lambda p: p.last_cmpctblock is None)
# Headers sync before next test.
test_node.request_headers_and_sync(locator=[tip])
# Finally, try a SENDCMPCT message with announce=True
sendcmpct.version = preferred_version
sendcmpct.announce = True
test_node.send_and_ping(sendcmpct)
check_announcement_of_new_block(node, test_node, lambda p: p.last_cmpctblock is not None)
# Try one more time (no headers sync should be needed!)
check_announcement_of_new_block(node, test_node, lambda p: p.last_cmpctblock is not None)
# Try one more time, after turning on sendheaders
test_node.send_and_ping(msg_sendheaders())
check_announcement_of_new_block(node, test_node, lambda p: p.last_cmpctblock is not None)
# Try one more time, after sending a version-1, announce=false message.
sendcmpct.version = preferred_version-1
sendcmpct.announce = False
test_node.send_and_ping(sendcmpct)
check_announcement_of_new_block(node, test_node, lambda p: p.last_cmpctblock is not None)
# Now turn off announcements
sendcmpct.version = preferred_version
sendcmpct.announce = False
test_node.send_and_ping(sendcmpct)
check_announcement_of_new_block(node, test_node, lambda p: p.last_cmpctblock is None and p.last_headers is not None)
if old_node is not None:
# Verify that a peer using an older protocol version can receive
# announcements from this node.
sendcmpct.version = preferred_version-1
sendcmpct.announce = True
old_node.send_and_ping(sendcmpct)
# Header sync
old_node.request_headers_and_sync(locator=[tip])
check_announcement_of_new_block(node, old_node, lambda p: p.last_cmpctblock is not None)
# This test actually causes materiad to (reasonably!) disconnect us, so do this last.
def test_invalid_cmpctblock_message(self):
self.nodes[0].generate(101)
block = self.build_block_on_tip(self.nodes[0])
cmpct_block = P2PHeaderAndShortIDs()
cmpct_block.header = CBlockHeader(block)
cmpct_block.prefilled_txn_length = 1
# This index will be too high
prefilled_txn = PrefilledTransaction(1, block.vtx[0])
cmpct_block.prefilled_txn = [prefilled_txn]
self.test_node.send_and_ping(msg_cmpctblock(cmpct_block))
assert(int(self.nodes[0].getbestblockhash(), 16) == block.hashPrevBlock)
# Compare the generated shortids to what we expect based on BIP 152, given
# materiad's choice of nonce.
def test_compactblock_construction(self, node, test_node, version, use_witness_address):
# Generate a bunch of transactions.
node.generate(101)
num_transactions = 25
address = node.getnewaddress()
if use_witness_address:
# Want at least one segwit spend, so move all funds to
# a witness address.
address = node.addwitnessaddress(address)
value_to_send = node.getbalance()
node.sendtoaddress(address, satoshi_round(value_to_send-Decimal(0.1)))
node.generate(1)
segwit_tx_generated = False
for i in range(num_transactions):
txid = node.sendtoaddress(address, 0.1)
hex_tx = node.gettransaction(txid)["hex"]
tx = FromHex(CTransaction(), hex_tx)
if not tx.wit.is_null():
segwit_tx_generated = True
if use_witness_address:
assert(segwit_tx_generated) # check that our test is not broken
# Wait until we've seen the block announcement for the resulting tip
tip = int(node.getbestblockhash(), 16)
assert(test_node.wait_for_block_announcement(tip))
# Now mine a block, and look at the resulting compact block.
test_node.clear_block_announcement()
block_hash = int(node.generate(1)[0], 16)
# Store the raw block in our internal format.
block = FromHex(CBlock(), node.getblock("%02x" % block_hash, False))
[tx.calc_sha256() for tx in block.vtx]
block.rehash()
# Don't care which type of announcement came back for this test; just
# request the compact block if we didn't get one yet.
wait_until(test_node.received_block_announcement, timeout=30)
assert(test_node.received_block_announcement())
with mininode_lock:
if test_node.last_cmpctblock is None:
test_node.clear_block_announcement()
inv = CInv(4, block_hash) # 4 == "CompactBlock"
test_node.send_message(msg_getdata([inv]))
wait_until(test_node.received_block_announcement, timeout=30)
assert(test_node.received_block_announcement())
# Now we should have the compactblock
header_and_shortids = None
with mininode_lock:
assert(test_node.last_cmpctblock is not None)
# Convert the on-the-wire representation to absolute indexes
header_and_shortids = HeaderAndShortIDs(test_node.last_cmpctblock.header_and_shortids)
# Check that we got the right block!
header_and_shortids.header.calc_sha256()
assert_equal(header_and_shortids.header.sha256, block_hash)
# Make sure the prefilled_txn appears to have included the coinbase
assert(len(header_and_shortids.prefilled_txn) >= 1)
assert_equal(header_and_shortids.prefilled_txn[0].index, 0)
# Check that all prefilled_txn entries match what's in the block.
for entry in header_and_shortids.prefilled_txn:
entry.tx.calc_sha256()
# This checks the non-witness parts of the tx agree
assert_equal(entry.tx.sha256, block.vtx[entry.index].sha256)
# And this checks the witness
wtxid = entry.tx.calc_sha256(True)
if version == 2:
assert_equal(wtxid, block.vtx[entry.index].calc_sha256(True))
else:
# Shouldn't have received a witness
assert(entry.tx.wit.is_null())
# Check that the cmpctblock message announced all the transactions.
assert_equal(len(header_and_shortids.prefilled_txn) + len(header_and_shortids.shortids), len(block.vtx))
# And now check that all the shortids are as expected as well.
# Determine the siphash keys to use.
[k0, k1] = header_and_shortids.get_siphash_keys()
index = 0
while index < len(block.vtx):
if (len(header_and_shortids.prefilled_txn) > 0 and
header_and_shortids.prefilled_txn[0].index == index):
# Already checked prefilled transactions above
header_and_shortids.prefilled_txn.pop(0)
else:
tx_hash = block.vtx[index].sha256
if version == 2:
tx_hash = block.vtx[index].calc_sha256(True)
shortid = calculate_shortid(k0, k1, tx_hash)
assert_equal(shortid, header_and_shortids.shortids[0])
header_and_shortids.shortids.pop(0)
index += 1
# Test that materiad requests compact blocks when we announce new blocks
# via header or inv, and that responding to getblocktxn causes the block
# to be successfully reconstructed.
# Post-segwit: upgraded nodes would only make this request of cb-version-2,
# NODE_WITNESS peers. Unupgraded nodes would still make this request of
# any cb-version-1-supporting peer.
def test_compactblock_requests(self, node, test_node, version, segwit):
# Try announcing a block with an inv or header, expect a compactblock
# request
for announce in ["inv", "header"]:
block = self.build_block_on_tip(node, segwit=segwit)
with mininode_lock:
test_node.last_getdata = None
if announce == "inv":
test_node.send_message(msg_inv([CInv(2, block.sha256)]))
else:
test_node.send_header_for_blocks([block])
success = wait_until(lambda: test_node.last_getdata is not None, timeout=30)
assert(success)
assert_equal(len(test_node.last_getdata.inv), 1)
assert_equal(test_node.last_getdata.inv[0].type, 4)
assert_equal(test_node.last_getdata.inv[0].hash, block.sha256)
# Send back a compactblock message that omits the coinbase
comp_block = HeaderAndShortIDs()
comp_block.header = CBlockHeader(block)
comp_block.nonce = 0
[k0, k1] = comp_block.get_siphash_keys()
coinbase_hash = block.vtx[0].sha256
if version == 2:
coinbase_hash = block.vtx[0].calc_sha256(True)
comp_block.shortids = [
calculate_shortid(k0, k1, coinbase_hash) ]
test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p()))
assert_equal(int(node.getbestblockhash(), 16), block.hashPrevBlock)
# Expect a getblocktxn message.
with mininode_lock:
assert(test_node.last_getblocktxn is not None)
absolute_indexes = test_node.last_getblocktxn.block_txn_request.to_absolute()
assert_equal(absolute_indexes, [0]) # should be a coinbase request
# Send the coinbase, and verify that the tip advances.
if version == 2:
msg = msg_witness_blocktxn()
else:
msg = msg_blocktxn()
msg.block_transactions.blockhash = block.sha256
msg.block_transactions.transactions = [block.vtx[0]]
test_node.send_and_ping(msg)
assert_equal(int(node.getbestblockhash(), 16), block.sha256)
# Create a chain of transactions from given utxo, and add to a new block.
def build_block_with_transactions(self, node, utxo, num_transactions):
block = self.build_block_on_tip(node)
for i in range(num_transactions):
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(utxo[0], utxo[1]), b''))
tx.vout.append(CTxOut(utxo[2] - 100000, CScript([OP_TRUE])))
tx.rehash()
utxo = [tx.sha256, 0, tx.vout[0].nValue]
block.vtx.append(tx)
block.hashMerkleRoot = block.calc_merkle_root()
block.solve()
return block
# Test that we only receive getblocktxn requests for transactions that the
# node needs, and that responding to them causes the block to be
# reconstructed.
def test_getblocktxn_requests(self, node, test_node, version):
with_witness = (version==2)
def test_getblocktxn_response(compact_block, peer, expected_result):
msg = msg_cmpctblock(compact_block.to_p2p())
peer.send_and_ping(msg)
with mininode_lock:
assert(peer.last_getblocktxn is not None)
absolute_indexes = peer.last_getblocktxn.block_txn_request.to_absolute()
assert_equal(absolute_indexes, expected_result)
def test_tip_after_message(node, peer, msg, tip):
peer.send_and_ping(msg)
assert_equal(int(node.getbestblockhash(), 16), tip)
# First try announcing compactblocks that won't reconstruct, and verify
# that we receive getblocktxn messages back.
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 5)
self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
comp_block = HeaderAndShortIDs()
comp_block.initialize_from_block(block, use_witness=with_witness)
test_getblocktxn_response(comp_block, test_node, [1, 2, 3, 4, 5])
msg_bt = msg_blocktxn()
if with_witness:
msg_bt = msg_witness_blocktxn() # serialize with witnesses
msg_bt.block_transactions = BlockTransactions(block.sha256, block.vtx[1:])
test_tip_after_message(node, test_node, msg_bt, block.sha256)
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 5)
self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
# Now try interspersing the prefilled transactions
comp_block.initialize_from_block(block, prefill_list=[0, 1, 5], use_witness=with_witness)
test_getblocktxn_response(comp_block, test_node, [2, 3, 4])
msg_bt.block_transactions = BlockTransactions(block.sha256, block.vtx[2:5])
test_tip_after_message(node, test_node, msg_bt, block.sha256)
# Now try giving one transaction ahead of time.
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 5)
self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
test_node.send_and_ping(msg_tx(block.vtx[1]))
assert(block.vtx[1].hash in node.getrawmempool())
# Prefill 4 out of the 6 transactions, and verify that only the one
# that was not in the mempool is requested.
comp_block.initialize_from_block(block, prefill_list=[0, 2, 3, 4], use_witness=with_witness)
test_getblocktxn_response(comp_block, test_node, [5])
msg_bt.block_transactions = BlockTransactions(block.sha256, [block.vtx[5]])
test_tip_after_message(node, test_node, msg_bt, block.sha256)
# Now provide all transactions to the node before the block is
# announced and verify reconstruction happens immediately.
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 10)
self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
for tx in block.vtx[1:]:
test_node.send_message(msg_tx(tx))
test_node.sync_with_ping()
# Make sure all transactions were accepted.
mempool = node.getrawmempool()
for tx in block.vtx[1:]:
assert(tx.hash in mempool)
# Clear out last request.
with mininode_lock:
test_node.last_getblocktxn = None
# Send compact block
comp_block.initialize_from_block(block, prefill_list=[0], use_witness=with_witness)
test_tip_after_message(node, test_node, msg_cmpctblock(comp_block.to_p2p()), block.sha256)
with mininode_lock:
# Shouldn't have gotten a request for any transaction
assert(test_node.last_getblocktxn is None)
# Incorrectly responding to a getblocktxn shouldn't cause the block to be
# permanently failed.
def test_incorrect_blocktxn_response(self, node, test_node, version):
if (len(self.utxos) == 0):
self.make_utxos()
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 10)
self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
# Relay the first 5 transactions from the block in advance
for tx in block.vtx[1:6]:
test_node.send_message(msg_tx(tx))
test_node.sync_with_ping()
# Make sure all transactions were accepted.
mempool = node.getrawmempool()
for tx in block.vtx[1:6]:
assert(tx.hash in mempool)
# Send compact block
comp_block = HeaderAndShortIDs()
comp_block.initialize_from_block(block, prefill_list=[0], use_witness=(version == 2))
test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p()))
absolute_indexes = []
with mininode_lock:
assert(test_node.last_getblocktxn is not None)
absolute_indexes = test_node.last_getblocktxn.block_txn_request.to_absolute()
assert_equal(absolute_indexes, [6, 7, 8, 9, 10])
# Now give an incorrect response.
# Note that it's possible for materiad to be smart enough to know we're
# lying, since it could check to see if the shortid matches what we're
# sending, and eg disconnect us for misbehavior. If that behavior
# change were made, we could just modify this test by having a
# different peer provide the block further down, so that we're still
# verifying that the block isn't marked bad permanently. This is good
# enough for now.
msg = msg_blocktxn()
if version==2:
msg = msg_witness_blocktxn()
msg.block_transactions = BlockTransactions(block.sha256, [block.vtx[5]] + block.vtx[7:])
test_node.send_and_ping(msg)
# Tip should not have updated
assert_equal(int(node.getbestblockhash(), 16), block.hashPrevBlock)
# We should receive a getdata request
success = wait_until(lambda: test_node.last_getdata is not None, timeout=10)
assert(success)
assert_equal(len(test_node.last_getdata.inv), 1)
assert(test_node.last_getdata.inv[0].type == 2 or test_node.last_getdata.inv[0].type == 2|MSG_WITNESS_FLAG)
assert_equal(test_node.last_getdata.inv[0].hash, block.sha256)
# Deliver the block
if version==2:
test_node.send_and_ping(msg_witness_block(block))
else:
test_node.send_and_ping(msg_block(block))
assert_equal(int(node.getbestblockhash(), 16), block.sha256)
def test_getblocktxn_handler(self, node, test_node, version):
# materiad will not send blocktxn responses for blocks whose height is
# more than 10 blocks deep.
MAX_GETBLOCKTXN_DEPTH = 10
chain_height = node.getblockcount()
current_height = chain_height
while (current_height >= chain_height - MAX_GETBLOCKTXN_DEPTH):
block_hash = node.getblockhash(current_height)
block = FromHex(CBlock(), node.getblock(block_hash, False))
msg = msg_getblocktxn()
msg.block_txn_request = BlockTransactionsRequest(int(block_hash, 16), [])
num_to_request = random.randint(1, len(block.vtx))
msg.block_txn_request.from_absolute(sorted(random.sample(range(len(block.vtx)), num_to_request)))
test_node.send_message(msg)
success = wait_until(lambda: test_node.last_blocktxn is not None, timeout=10)
assert(success)
[tx.calc_sha256() for tx in block.vtx]
with mininode_lock:
assert_equal(test_node.last_blocktxn.block_transactions.blockhash, int(block_hash, 16))
all_indices = msg.block_txn_request.to_absolute()
for index in all_indices:
tx = test_node.last_blocktxn.block_transactions.transactions.pop(0)
tx.calc_sha256()
assert_equal(tx.sha256, block.vtx[index].sha256)
if version == 1:
# Witnesses should have been stripped
assert(tx.wit.is_null())
else:
# Check that the witness matches
assert_equal(tx.calc_sha256(True), block.vtx[index].calc_sha256(True))
test_node.last_blocktxn = None
current_height -= 1
# Next request should send a full block response, as we're past the
# allowed depth for a blocktxn response.
block_hash = node.getblockhash(current_height)
msg.block_txn_request = BlockTransactionsRequest(int(block_hash, 16), [0])
with mininode_lock:
test_node.last_block = None
test_node.last_blocktxn = None
test_node.send_and_ping(msg)
with mininode_lock:
test_node.last_block.block.calc_sha256()
assert_equal(test_node.last_block.block.sha256, int(block_hash, 16))
assert_equal(test_node.last_blocktxn, None)
def test_compactblocks_not_at_tip(self, node, test_node):
# Test that requesting old compactblocks doesn't work.
MAX_CMPCTBLOCK_DEPTH = 5
new_blocks = []
for i in range(MAX_CMPCTBLOCK_DEPTH + 1):
test_node.clear_block_announcement()
new_blocks.append(node.generate(1)[0])
wait_until(test_node.received_block_announcement, timeout=30)
test_node.clear_block_announcement()
test_node.send_message(msg_getdata([CInv(4, int(new_blocks[0], 16))]))
success = wait_until(lambda: test_node.last_cmpctblock is not None, timeout=30)
assert(success)
test_node.clear_block_announcement()
node.generate(1)
wait_until(test_node.received_block_announcement, timeout=30)
test_node.clear_block_announcement()
with mininode_lock:
test_node.last_block = None
test_node.send_message(msg_getdata([CInv(4, int(new_blocks[0], 16))]))
success = wait_until(lambda: test_node.last_block is not None, timeout=30)
assert(success)
with mininode_lock:
test_node.last_block.block.calc_sha256()
assert_equal(test_node.last_block.block.sha256, int(new_blocks[0], 16))
# Generate an old compactblock, and verify that it's not accepted.
cur_height = node.getblockcount()
hashPrevBlock = int(node.getblockhash(cur_height-5), 16)
block = self.build_block_on_tip(node)
block.hashPrevBlock = hashPrevBlock
block.solve()
comp_block = HeaderAndShortIDs()
comp_block.initialize_from_block(block)
test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p()))
tips = node.getchaintips()
found = False
for x in tips:
if x["hash"] == block.hash:
assert_equal(x["status"], "headers-only")
found = True
break
assert(found)
# Requesting this block via getblocktxn should silently fail
# (to avoid fingerprinting attacks).
msg = msg_getblocktxn()
msg.block_txn_request = BlockTransactionsRequest(block.sha256, [0])
with mininode_lock:
test_node.last_blocktxn = None
test_node.send_and_ping(msg)
with mininode_lock:
assert(test_node.last_blocktxn is None)
def activate_segwit(self, node):
node.generate(144*3)
assert_equal(get_bip9_status(node, "segwit")["status"], 'active')
def test_end_to_end_block_relay(self, node, listeners):
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 10)
[l.clear_block_announcement() for l in listeners]
# ToHex() won't serialize with witness, but this block has no witnesses
# anyway. TODO: repeat this test with witness tx's to a segwit node.
node.submitblock(ToHex(block))
for l in listeners:
wait_until(lambda: l.received_block_announcement(), timeout=30)
with mininode_lock:
for l in listeners:
assert(l.last_cmpctblock is not None)
l.last_cmpctblock.header_and_shortids.header.calc_sha256()
assert_equal(l.last_cmpctblock.header_and_shortids.header.sha256, block.sha256)
# Test that we don't get disconnected if we relay a compact block with valid header,
# but invalid transactions.
def test_invalid_tx_in_compactblock(self, node, test_node, use_segwit):
assert(len(self.utxos))
utxo = self.utxos[0]
block = self.build_block_with_transactions(node, utxo, 5)
del block.vtx[3]
block.hashMerkleRoot = block.calc_merkle_root()
if use_segwit:
# If we're testing with segwit, also drop the coinbase witness,
# but include the witness commitment.
add_witness_commitment(block)
block.vtx[0].wit.vtxinwit = []
block.solve()
# Now send the compact block with all transactions prefilled, and
# verify that we don't get disconnected.
comp_block = HeaderAndShortIDs()
comp_block.initialize_from_block(block, prefill_list=[0, 1, 2, 3, 4], use_witness=use_segwit)
msg = msg_cmpctblock(comp_block.to_p2p())
test_node.send_and_ping(msg)
# Check that the tip didn't advance
assert(int(node.getbestblockhash(), 16) is not block.sha256)
test_node.sync_with_ping()
# Helper for enabling cb announcements
# Send the sendcmpct request and sync headers
def request_cb_announcements(self, peer, node, version):
tip = node.getbestblockhash()
peer.get_headers(locator=[int(tip, 16)], hashstop=0)
msg = msg_sendcmpct()
msg.version = version
msg.announce = True
peer.send_and_ping(msg)
def test_compactblock_reconstruction_multiple_peers(self, node, stalling_peer, delivery_peer):
assert(len(self.utxos))
def announce_cmpct_block(node, peer):
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 5)
cmpct_block = HeaderAndShortIDs()
cmpct_block.initialize_from_block(block)
msg = msg_cmpctblock(cmpct_block.to_p2p())
peer.send_and_ping(msg)
with mininode_lock:
assert(peer.last_getblocktxn is not None)
return block, cmpct_block
block, cmpct_block = announce_cmpct_block(node, stalling_peer)
for tx in block.vtx[1:]:
delivery_peer.send_message(msg_tx(tx))
delivery_peer.sync_with_ping()
mempool = node.getrawmempool()
for tx in block.vtx[1:]:
assert(tx.hash in mempool)
delivery_peer.send_and_ping(msg_cmpctblock(cmpct_block.to_p2p()))
assert_equal(int(node.getbestblockhash(), 16), block.sha256)
self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
# Now test that delivering an invalid compact block won't break relay
block, cmpct_block = announce_cmpct_block(node, stalling_peer)
for tx in block.vtx[1:]:
delivery_peer.send_message(msg_tx(tx))
delivery_peer.sync_with_ping()
cmpct_block.prefilled_txn[0].tx.wit.vtxinwit = [ CTxInWitness() ]
cmpct_block.prefilled_txn[0].tx.wit.vtxinwit[0].scriptWitness.stack = [ser_uint256(0)]
cmpct_block.use_witness = True
delivery_peer.send_and_ping(msg_cmpctblock(cmpct_block.to_p2p()))
assert(int(node.getbestblockhash(), 16) != block.sha256)
msg = msg_blocktxn()
msg.block_transactions.blockhash = block.sha256
msg.block_transactions.transactions = block.vtx[1:]
stalling_peer.send_and_ping(msg)
assert_equal(int(node.getbestblockhash(), 16), block.sha256)
def run_test(self):
# Setup the p2p connections and start up the network thread.
self.test_node = TestNode()
self.segwit_node = TestNode()
self.old_node = TestNode() # version 1 peer <--> segwit node
connections = []
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], self.test_node))
connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1],
self.segwit_node, services=NODE_NETWORK|NODE_WITNESS))
connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1],
self.old_node, services=NODE_NETWORK))
self.test_node.add_connection(connections[0])
self.segwit_node.add_connection(connections[1])
self.old_node.add_connection(connections[2])
NetworkThread().start() # Start up network handling in another thread
# Test logic begins here
self.test_node.wait_for_verack()
# We will need UTXOs to construct transactions in later tests.
self.make_utxos()
print("Running tests, pre-segwit activation:")
print("\tTesting SENDCMPCT p2p message... ")
self.test_sendcmpct(self.nodes[0], self.test_node, 1)
sync_blocks(self.nodes)
self.test_sendcmpct(self.nodes[1], self.segwit_node, 2, old_node=self.old_node)
sync_blocks(self.nodes)
print("\tTesting compactblock construction...")
self.test_compactblock_construction(self.nodes[0], self.test_node, 1, False)
sync_blocks(self.nodes)
self.test_compactblock_construction(self.nodes[1], self.segwit_node, 2, False)
sync_blocks(self.nodes)
print("\tTesting compactblock requests... ")
self.test_compactblock_requests(self.nodes[0], self.test_node, 1, False)
sync_blocks(self.nodes)
self.test_compactblock_requests(self.nodes[1], self.segwit_node, 2, False)
sync_blocks(self.nodes)
print("\tTesting getblocktxn requests...")
self.test_getblocktxn_requests(self.nodes[0], self.test_node, 1)
sync_blocks(self.nodes)
self.test_getblocktxn_requests(self.nodes[1], self.segwit_node, 2)
sync_blocks(self.nodes)
print("\tTesting getblocktxn handler...")
self.test_getblocktxn_handler(self.nodes[0], self.test_node, 1)
sync_blocks(self.nodes)
self.test_getblocktxn_handler(self.nodes[1], self.segwit_node, 2)
self.test_getblocktxn_handler(self.nodes[1], self.old_node, 1)
sync_blocks(self.nodes)
print("\tTesting compactblock requests/announcements not at chain tip...")
self.test_compactblocks_not_at_tip(self.nodes[0], self.test_node)
sync_blocks(self.nodes)
self.test_compactblocks_not_at_tip(self.nodes[1], self.segwit_node)
self.test_compactblocks_not_at_tip(self.nodes[1], self.old_node)
sync_blocks(self.nodes)
print("\tTesting handling of incorrect blocktxn responses...")
self.test_incorrect_blocktxn_response(self.nodes[0], self.test_node, 1)
sync_blocks(self.nodes)
self.test_incorrect_blocktxn_response(self.nodes[1], self.segwit_node, 2)
sync_blocks(self.nodes)
# End-to-end block relay tests
print("\tTesting end-to-end block relay...")
self.request_cb_announcements(self.test_node, self.nodes[0], 1)
self.request_cb_announcements(self.old_node, self.nodes[1], 1)
self.request_cb_announcements(self.segwit_node, self.nodes[1], 2)
self.test_end_to_end_block_relay(self.nodes[0], [self.segwit_node, self.test_node, self.old_node])
self.test_end_to_end_block_relay(self.nodes[1], [self.segwit_node, self.test_node, self.old_node])
print("\tTesting handling of invalid compact blocks...")
self.test_invalid_tx_in_compactblock(self.nodes[0], self.test_node, False)
self.test_invalid_tx_in_compactblock(self.nodes[1], self.segwit_node, False)
self.test_invalid_tx_in_compactblock(self.nodes[1], self.old_node, False)
print("\tTesting reconstructing compact blocks from all peers...")
self.test_compactblock_reconstruction_multiple_peers(self.nodes[1], self.segwit_node, self.old_node)
sync_blocks(self.nodes)
# Advance to segwit activation
print ("\nAdvancing to segwit activation\n")
self.activate_segwit(self.nodes[1])
print ("Running tests, post-segwit activation...")
print("\tTesting compactblock construction...")
self.test_compactblock_construction(self.nodes[1], self.old_node, 1, True)
self.test_compactblock_construction(self.nodes[1], self.segwit_node, 2, True)
sync_blocks(self.nodes)
print("\tTesting compactblock requests (unupgraded node)... ")
self.test_compactblock_requests(self.nodes[0], self.test_node, 1, True)
print("\tTesting getblocktxn requests (unupgraded node)...")
self.test_getblocktxn_requests(self.nodes[0], self.test_node, 1)
# Need to manually sync node0 and node1, because post-segwit activation,
# node1 will not download blocks from node0.
print("\tSyncing nodes...")
assert(self.nodes[0].getbestblockhash() != self.nodes[1].getbestblockhash())
while (self.nodes[0].getblockcount() > self.nodes[1].getblockcount()):
block_hash = self.nodes[0].getblockhash(self.nodes[1].getblockcount()+1)
self.nodes[1].submitblock(self.nodes[0].getblock(block_hash, False))
assert_equal(self.nodes[0].getbestblockhash(), self.nodes[1].getbestblockhash())
print("\tTesting compactblock requests (segwit node)... ")
self.test_compactblock_requests(self.nodes[1], self.segwit_node, 2, True)
print("\tTesting getblocktxn requests (segwit node)...")
self.test_getblocktxn_requests(self.nodes[1], self.segwit_node, 2)
sync_blocks(self.nodes)
print("\tTesting getblocktxn handler (segwit node should return witnesses)...")
self.test_getblocktxn_handler(self.nodes[1], self.segwit_node, 2)
self.test_getblocktxn_handler(self.nodes[1], self.old_node, 1)
# Test that if we submitblock to node1, we'll get a compact block
# announcement to all peers.
# (Post-segwit activation, blocks won't propagate from node0 to node1
# automatically, so don't bother testing a block announced to node0.)
print("\tTesting end-to-end block relay...")
self.request_cb_announcements(self.test_node, self.nodes[0], 1)
self.request_cb_announcements(self.old_node, self.nodes[1], 1)
self.request_cb_announcements(self.segwit_node, self.nodes[1], 2)
self.test_end_to_end_block_relay(self.nodes[1], [self.segwit_node, self.test_node, self.old_node])
print("\tTesting handling of invalid compact blocks...")
self.test_invalid_tx_in_compactblock(self.nodes[0], self.test_node, False)
self.test_invalid_tx_in_compactblock(self.nodes[1], self.segwit_node, True)
self.test_invalid_tx_in_compactblock(self.nodes[1], self.old_node, True)
print("\tTesting invalid index in cmpctblock message...")
self.test_invalid_cmpctblock_message()
if __name__ == '__main__':
CompactBlocksTest().main()
| mit |
potatolondon/django-nonrel-1-4 | tests/modeltests/delete/tests.py | 40 | 8876 | from __future__ import absolute_import
from django.db import models, IntegrityError
from django.test import TestCase, skipUnlessDBFeature, skipIfDBFeature
from .models import (R, RChild, S, T, U, A, M, MR, MRNull,
create_a, get_default_r, User, Avatar, HiddenUser, HiddenUserProfile)
class OnDeleteTests(TestCase):
def setUp(self):
self.DEFAULT = get_default_r()
def test_auto(self):
a = create_a('auto')
a.auto.delete()
self.assertFalse(A.objects.filter(name='auto').exists())
def test_auto_nullable(self):
a = create_a('auto_nullable')
a.auto_nullable.delete()
self.assertFalse(A.objects.filter(name='auto_nullable').exists())
def test_setvalue(self):
a = create_a('setvalue')
a.setvalue.delete()
a = A.objects.get(pk=a.pk)
self.assertEqual(self.DEFAULT, a.setvalue)
def test_setnull(self):
a = create_a('setnull')
a.setnull.delete()
a = A.objects.get(pk=a.pk)
self.assertEqual(None, a.setnull)
def test_setdefault(self):
a = create_a('setdefault')
a.setdefault.delete()
a = A.objects.get(pk=a.pk)
self.assertEqual(self.DEFAULT, a.setdefault)
def test_setdefault_none(self):
a = create_a('setdefault_none')
a.setdefault_none.delete()
a = A.objects.get(pk=a.pk)
self.assertEqual(None, a.setdefault_none)
def test_cascade(self):
a = create_a('cascade')
a.cascade.delete()
self.assertFalse(A.objects.filter(name='cascade').exists())
def test_cascade_nullable(self):
a = create_a('cascade_nullable')
a.cascade_nullable.delete()
self.assertFalse(A.objects.filter(name='cascade_nullable').exists())
def test_protect(self):
a = create_a('protect')
self.assertRaises(IntegrityError, a.protect.delete)
def test_do_nothing(self):
# Testing DO_NOTHING is a bit harder: It would raise IntegrityError for a normal model,
# so we connect to pre_delete and set the fk to a known value.
replacement_r = R.objects.create()
def check_do_nothing(sender, **kwargs):
obj = kwargs['instance']
obj.donothing_set.update(donothing=replacement_r)
models.signals.pre_delete.connect(check_do_nothing)
a = create_a('do_nothing')
a.donothing.delete()
a = A.objects.get(pk=a.pk)
self.assertEqual(replacement_r, a.donothing)
models.signals.pre_delete.disconnect(check_do_nothing)
def test_inheritance_cascade_up(self):
child = RChild.objects.create()
child.delete()
self.assertFalse(R.objects.filter(pk=child.pk).exists())
def test_inheritance_cascade_down(self):
child = RChild.objects.create()
parent = child.r_ptr
parent.delete()
self.assertFalse(RChild.objects.filter(pk=child.pk).exists())
def test_cascade_from_child(self):
a = create_a('child')
a.child.delete()
self.assertFalse(A.objects.filter(name='child').exists())
self.assertFalse(R.objects.filter(pk=a.child_id).exists())
def test_cascade_from_parent(self):
a = create_a('child')
R.objects.get(pk=a.child_id).delete()
self.assertFalse(A.objects.filter(name='child').exists())
self.assertFalse(RChild.objects.filter(pk=a.child_id).exists())
def test_setnull_from_child(self):
a = create_a('child_setnull')
a.child_setnull.delete()
self.assertFalse(R.objects.filter(pk=a.child_setnull_id).exists())
a = A.objects.get(pk=a.pk)
self.assertEqual(None, a.child_setnull)
def test_setnull_from_parent(self):
a = create_a('child_setnull')
R.objects.get(pk=a.child_setnull_id).delete()
self.assertFalse(RChild.objects.filter(pk=a.child_setnull_id).exists())
a = A.objects.get(pk=a.pk)
self.assertEqual(None, a.child_setnull)
def test_o2o_setnull(self):
a = create_a('o2o_setnull')
a.o2o_setnull.delete()
a = A.objects.get(pk=a.pk)
self.assertEqual(None, a.o2o_setnull)
class DeletionTests(TestCase):
def test_m2m(self):
m = M.objects.create()
r = R.objects.create()
MR.objects.create(m=m, r=r)
r.delete()
self.assertFalse(MR.objects.exists())
r = R.objects.create()
MR.objects.create(m=m, r=r)
m.delete()
self.assertFalse(MR.objects.exists())
m = M.objects.create()
r = R.objects.create()
m.m2m.add(r)
r.delete()
through = M._meta.get_field('m2m').rel.through
self.assertFalse(through.objects.exists())
r = R.objects.create()
m.m2m.add(r)
m.delete()
self.assertFalse(through.objects.exists())
m = M.objects.create()
r = R.objects.create()
MRNull.objects.create(m=m, r=r)
r.delete()
self.assertFalse(not MRNull.objects.exists())
self.assertFalse(m.m2m_through_null.exists())
def test_bulk(self):
from django.db.models.sql.constants import GET_ITERATOR_CHUNK_SIZE
s = S.objects.create(r=R.objects.create())
for i in xrange(2*GET_ITERATOR_CHUNK_SIZE):
T.objects.create(s=s)
# 1 (select related `T` instances)
# + 1 (select related `U` instances)
# + 2 (delete `T` instances in batches)
# + 1 (delete `s`)
self.assertNumQueries(5, s.delete)
self.assertFalse(S.objects.exists())
def test_instance_update(self):
deleted = []
related_setnull_sets = []
def pre_delete(sender, **kwargs):
obj = kwargs['instance']
deleted.append(obj)
if isinstance(obj, R):
related_setnull_sets.append(list(a.pk for a in obj.setnull_set.all()))
models.signals.pre_delete.connect(pre_delete)
a = create_a('update_setnull')
a.setnull.delete()
a = create_a('update_cascade')
a.cascade.delete()
for obj in deleted:
self.assertEqual(None, obj.pk)
for pk_list in related_setnull_sets:
for a in A.objects.filter(id__in=pk_list):
self.assertEqual(None, a.setnull)
models.signals.pre_delete.disconnect(pre_delete)
def test_deletion_order(self):
pre_delete_order = []
post_delete_order = []
def log_post_delete(sender, **kwargs):
pre_delete_order.append((sender, kwargs['instance'].pk))
def log_pre_delete(sender, **kwargs):
post_delete_order.append((sender, kwargs['instance'].pk))
models.signals.post_delete.connect(log_post_delete)
models.signals.pre_delete.connect(log_pre_delete)
r = R.objects.create(pk=1)
s1 = S.objects.create(pk=1, r=r)
s2 = S.objects.create(pk=2, r=r)
t1 = T.objects.create(pk=1, s=s1)
t2 = T.objects.create(pk=2, s=s2)
r.delete()
self.assertEqual(
pre_delete_order, [(T, 2), (T, 1), (S, 2), (S, 1), (R, 1)]
)
self.assertEqual(
post_delete_order, [(T, 1), (T, 2), (S, 1), (S, 2), (R, 1)]
)
models.signals.post_delete.disconnect(log_post_delete)
models.signals.post_delete.disconnect(log_pre_delete)
@skipUnlessDBFeature("can_defer_constraint_checks")
def test_can_defer_constraint_checks(self):
u = User.objects.create(
avatar=Avatar.objects.create()
)
a = Avatar.objects.get(pk=u.avatar_id)
# 1 query to find the users for the avatar.
# 1 query to delete the user
# 1 query to delete the avatar
# The important thing is that when we can defer constraint checks there
# is no need to do an UPDATE on User.avatar to null it out.
self.assertNumQueries(3, a.delete)
self.assertFalse(User.objects.exists())
self.assertFalse(Avatar.objects.exists())
@skipIfDBFeature("can_defer_constraint_checks")
def test_cannot_defer_constraint_checks(self):
u = User.objects.create(
avatar=Avatar.objects.create()
)
a = Avatar.objects.get(pk=u.avatar_id)
# 1 query to find the users for the avatar.
# 1 query to delete the user
# 1 query to null out user.avatar, because we can't defer the constraint
# 1 query to delete the avatar
self.assertNumQueries(4, a.delete)
self.assertFalse(User.objects.exists())
self.assertFalse(Avatar.objects.exists())
def test_hidden_related(self):
r = R.objects.create()
h = HiddenUser.objects.create(r=r)
p = HiddenUserProfile.objects.create(user=h)
r.delete()
self.assertEqual(HiddenUserProfile.objects.count(), 0)
| bsd-3-clause |
nicjhan/MOM5 | test/model_test_setup.py | 4 | 5716 |
from __future__ import print_function
import os
import sys
import subprocess as sp
import shlex
import shutil
import tempfile
import time
import platform as plat
class ModelTestSetup(object):
def __init__(self):
self.my_dir = os.path.dirname(os.path.realpath(__file__))
self.exp_dir = os.path.join(self.my_dir, '../', 'exp')
self.data_dir = os.path.join(self.my_dir, '../data')
self.archive_dir = os.path.join(self.data_dir, 'archives')
self.work_dir = os.path.join(self.my_dir, '../', 'work')
def download_input_data(self, exp):
"""
Download the experiment input data.
This needs to be done before submitting the MOM_run.sh script because
the compute nodes may not have Internet access.
"""
filename = '{}.input.tar.gz'.format(exp)
input = os.path.join(self.archive_dir, filename)
ret = 0
if not os.path.exists(input):
cmd = '{} {}'.format(os.path.join(self.data_dir, 'get_exp_data.py'),
filename)
ret = sp.call(shlex.split(cmd))
if ret != 0:
return ret
assert(os.path.exists(input))
# Unzip into work directory.
if not os.path.exists(self.work_dir):
os.mkdir(self.work_dir)
if not os.path.exists(os.path.join(self.work_dir, filename)):
shutil.copy(input, self.work_dir)
if not os.path.exists(os.path.join(self.work_dir, exp)):
cmd = '/bin/tar -C {} -xvf {}'.format(self.work_dir, input)
ret += sp.call(shlex.split(cmd))
return ret
def get_qsub_output(self, fo, fe):
# The command has finished. Read output and write stdout.
# We don't know when output has stopped so just keep trying
# until it is all gone.
empty_reads = 0
stderr = ''
stdout = ''
while True:
so = os.read(fo, 1024*1024)
se = os.read(fe, 1024*1024)
if so == '' and se == '':
empty_reads += 1
else:
stdout += so
stderr += se
empty_reads = 0
if empty_reads > 10:
break
time.sleep(2)
return (stdout, stderr)
def get_platform(self):
# We need to get the node/platform - see if Jenkins has this set.
platform = 'nci'
try:
platform = os.environ['label']
except KeyError:
pass
return platform
def run(self, model_type, exp, walltime='01:00:00', ncpus='32',
npes=None, mem='64Gb', qsub=True, download_input_data=True,
valgrind=False):
"""
ncpus is for requested cpus, npes is for how many mom uses.
"""
if download_input_data:
ret = self.download_input_data(exp)
if ret != 0:
print('Error: could not download input data.',
file=sys.stderr)
return (ret, None, None)
os.chdir(self.exp_dir)
run_name = "CI_%s" % exp
# -N value is a maximum of 15 chars.
run_name = run_name[0:15]
if npes != None:
npes = '--npes %s' % npes
else:
npes = ''
if valgrind:
valgrind = '--valgrind'
else:
valgrind =''
# Get temporary file names for the stdout, stderr.
fo, stdout_file = tempfile.mkstemp(dir=self.exp_dir)
fe, stderr_file = tempfile.mkstemp(dir=self.exp_dir)
# Write script out as a file.
run_script = plat.run_scripts[self.get_platform()]
run_script = run_script.format(walltime=walltime, ncpus=ncpus,
mem=mem, stdout_file=stdout_file,
stderr_file=stderr_file,
run_name=run_name,
type=model_type, exp=exp, npes=npes,
valgrind=valgrind)
# Write out run script
frun, run_file = tempfile.mkstemp(dir=self.exp_dir)
os.write(frun, run_script)
os.close(frun)
os.chmod(run_file, 0o755)
# Submit the experiment. This will block until it has finished.
ret = 0
stdout = ''
stderr = ''
if qsub:
ret = sp.call(['qsub', run_file])
stdout, stderr = self.get_qsub_output(fo, fe)
else:
try:
stdout = sp.check_output([run_file], stderr=sp.STDOUT)
except sp.CalledProcessError as e:
ret = e.returncode
stdout = e.output
os.write(fo, stdout)
os.write(fe, stderr)
# Move temporary files to experiment directory.
shutil.move(stdout_file, os.path.join(self.work_dir, exp, 'fms.out'))
shutil.move(stderr_file, os.path.join(self.work_dir, exp, 'fms.err'))
shutil.move(run_file, os.path.join(self.work_dir, exp, 'run.sh'))
# Change back to test dir.
os.chdir(self.my_dir)
return (ret, stdout, stderr)
def build(self, model_type, unit_testing=True):
os.chdir(self.exp_dir)
if unit_testing:
unit_testing = '--unit_testing'
else:
unit_testing =''
platform = self.get_platform()
build_cmd = plat.build_cmd.format(type=model_type, platform=platform,
unit_testing=unit_testing)
# Build the model.
ret = sp.call(shlex.split(build_cmd))
os.chdir(self.my_dir)
return ret
| gpl-2.0 |
holmes/intellij-community | python/lib/Lib/site-packages/django/contrib/gis/gdal/tests/test_driver.py | 330 | 1207 | import os, os.path, unittest
from django.contrib.gis.gdal import Driver, OGRException
valid_drivers = ('ESRI Shapefile', 'MapInfo File', 'TIGER', 'S57', 'DGN',
'Memory', 'CSV', 'GML', 'KML')
invalid_drivers = ('Foo baz', 'clucka', 'ESRI Shp')
aliases = {'eSrI' : 'ESRI Shapefile',
'TigER/linE' : 'TIGER',
'SHAPE' : 'ESRI Shapefile',
'sHp' : 'ESRI Shapefile',
}
class DriverTest(unittest.TestCase):
def test01_valid_driver(self):
"Testing valid OGR Data Source Drivers."
for d in valid_drivers:
dr = Driver(d)
self.assertEqual(d, str(dr))
def test02_invalid_driver(self):
"Testing invalid OGR Data Source Drivers."
for i in invalid_drivers:
self.assertRaises(OGRException, Driver, i)
def test03_aliases(self):
"Testing driver aliases."
for alias, full_name in aliases.items():
dr = Driver(alias)
self.assertEqual(full_name, str(dr))
def suite():
s = unittest.TestSuite()
s.addTest(unittest.makeSuite(DriverTest))
return s
def run(verbosity=2):
unittest.TextTestRunner(verbosity=verbosity).run(suite())
| apache-2.0 |
manojgudi/sandhi | modules/gr36/gr-filter/examples/decimate.py | 13 | 5841 | #!/usr/bin/env python
#
# Copyright 2009,2012 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr
from gnuradio import filter
import sys, time
try:
import scipy
from scipy import fftpack
except ImportError:
print "Error: Program requires scipy (see: www.scipy.org)."
sys.exit(1)
try:
import pylab
from pylab import mlab
except ImportError:
print "Error: Program requires matplotlib (see: matplotlib.sourceforge.net)."
sys.exit(1)
class pfb_top_block(gr.top_block):
def __init__(self):
gr.top_block.__init__(self)
self._N = 10000000 # number of samples to use
self._fs = 10000 # initial sampling rate
self._decim = 20 # Decimation rate
# Generate the prototype filter taps for the decimators with a 200 Hz bandwidth
self._taps = filter.firdes.low_pass_2(1, self._fs,
200, 150,
attenuation_dB=120,
window=filter.firdes.WIN_BLACKMAN_hARRIS)
# Calculate the number of taps per channel for our own information
tpc = scipy.ceil(float(len(self._taps)) / float(self._decim))
print "Number of taps: ", len(self._taps)
print "Number of filters: ", self._decim
print "Taps per channel: ", tpc
# Build the input signal source
# We create a list of freqs, and a sine wave is generated and added to the source
# for each one of these frequencies.
self.signals = list()
self.add = gr.add_cc()
freqs = [10, 20, 2040]
for i in xrange(len(freqs)):
self.signals.append(gr.sig_source_c(self._fs, gr.GR_SIN_WAVE, freqs[i], 1))
self.connect(self.signals[i], (self.add,i))
self.head = gr.head(gr.sizeof_gr_complex, self._N)
# Construct a PFB decimator filter
self.pfb = filter.pfb.decimator_ccf(self._decim, self._taps, 0)
# Construct a standard FIR decimating filter
self.dec = filter.fir_filter_ccf(self._decim, self._taps)
self.snk_i = gr.vector_sink_c()
# Connect the blocks
self.connect(self.add, self.head, self.pfb)
self.connect(self.add, self.snk_i)
# Create the sink for the decimated siganl
self.snk = gr.vector_sink_c()
self.connect(self.pfb, self.snk)
def main():
tb = pfb_top_block()
tstart = time.time()
tb.run()
tend = time.time()
print "Run time: %f" % (tend - tstart)
if 1:
fig1 = pylab.figure(1, figsize=(16,9))
fig2 = pylab.figure(2, figsize=(16,9))
Ns = 10000
Ne = 10000
fftlen = 8192
winfunc = scipy.blackman
fs = tb._fs
# Plot the input to the decimator
d = tb.snk_i.data()[Ns:Ns+Ne]
sp1_f = fig1.add_subplot(2, 1, 1)
X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs,
window = lambda d: d*winfunc(fftlen),
scale_by_freq=True)
X_in = 10.0*scipy.log10(abs(fftpack.fftshift(X)))
f_in = scipy.arange(-fs/2.0, fs/2.0, fs/float(X_in.size))
p1_f = sp1_f.plot(f_in, X_in, "b")
sp1_f.set_xlim([min(f_in), max(f_in)+1])
sp1_f.set_ylim([-200.0, 50.0])
sp1_f.set_title("Input Signal", weight="bold")
sp1_f.set_xlabel("Frequency (Hz)")
sp1_f.set_ylabel("Power (dBW)")
Ts = 1.0/fs
Tmax = len(d)*Ts
t_in = scipy.arange(0, Tmax, Ts)
x_in = scipy.array(d)
sp1_t = fig1.add_subplot(2, 1, 2)
p1_t = sp1_t.plot(t_in, x_in.real, "b")
p1_t = sp1_t.plot(t_in, x_in.imag, "r")
sp1_t.set_ylim([-tb._decim*1.1, tb._decim*1.1])
sp1_t.set_xlabel("Time (s)")
sp1_t.set_ylabel("Amplitude")
# Plot the output of the decimator
fs_o = tb._fs / tb._decim
sp2_f = fig2.add_subplot(2, 1, 1)
d = tb.snk.data()[Ns:Ns+Ne]
X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs_o,
window = lambda d: d*winfunc(fftlen),
scale_by_freq=True)
X_o = 10.0*scipy.log10(abs(fftpack.fftshift(X)))
f_o = scipy.arange(-fs_o/2.0, fs_o/2.0, fs_o/float(X_o.size))
p2_f = sp2_f.plot(f_o, X_o, "b")
sp2_f.set_xlim([min(f_o), max(f_o)+1])
sp2_f.set_ylim([-200.0, 50.0])
sp2_f.set_title("PFB Decimated Signal", weight="bold")
sp2_f.set_xlabel("Frequency (Hz)")
sp2_f.set_ylabel("Power (dBW)")
Ts_o = 1.0/fs_o
Tmax_o = len(d)*Ts_o
x_o = scipy.array(d)
t_o = scipy.arange(0, Tmax_o, Ts_o)
sp2_t = fig2.add_subplot(2, 1, 2)
p2_t = sp2_t.plot(t_o, x_o.real, "b-o")
p2_t = sp2_t.plot(t_o, x_o.imag, "r-o")
sp2_t.set_ylim([-2.5, 2.5])
sp2_t.set_xlabel("Time (s)")
sp2_t.set_ylabel("Amplitude")
pylab.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 |
duttashi/Data-Analysis-Visualization | scripts/general/chiSquareTest.py | 1 | 3347 | __author__ = 'Ashoo'
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import scipy.stats
import warnings
sns.set(color_codes=True)
# Reading the data where low_memory=False increases the program efficiency
data= pd.read_csv("gapminder.csv", low_memory=False)
# setting variables that you will be working with to numeric
data['breastcancerper100th']= data['breastcancerper100th'].convert_objects(convert_numeric=True)
data['femaleemployrate']= data['femaleemployrate'].convert_objects(convert_numeric=True)
data['alcconsumption']= data['alcconsumption'].convert_objects(convert_numeric=True)
#print "Showing missing data coulmn-wise"
#print data.isnull().sum()
# Create a copy of the original dataset as sub5 by using the copy() method
sub5=data.copy()
# Since the data is all continuous variables therefore the use the mean() for missing value imputation
sub5.fillna(sub5['breastcancerper100th'].mean(), inplace=True)
sub5.fillna(sub5['femaleemployrate'].mean(), inplace=True)
sub5.fillna(sub5['alcconsumption'].mean(), inplace=True)
# Showing the count of null values after imputation
#print sub5.isnull().sum()
# categorize quantitative variable based on customized splits using the cut function
sub5['alco']=pd.qcut(sub5.alcconsumption,6,labels=["0","1-4","5-9","10-14","15-19","20-24"])
sub5['brst']=pd.qcut(sub5.breastcancerper100th,5,labels=["1-20","21-40","41-60","61-80","81-90"])
# Converting response variable to categorical
sub5['brst']=sub5['brst'].astype('category')
# Cross tabulating the response variable with explantory variable
ct1=pd.crosstab(sub5['brst'],sub5['alco'])
#ct1=pd.crosstab(sub5['alco'],sub5['brst'])
print "Contigency Table"
print ct1
print "\n\n"
# the axis=0 statement tells python to sum all the values in each column in python
colsum=ct1.sum(axis=0)
colpct=ct1/colsum
print(colpct)
# Chi-Square
print('\n\nChi-square value, p value, expected counts')
cs1=scipy.stats.chi2_contingency(ct1)
print(cs1)
sub5['brst']=sub5['brst'].astype('category')
sub5['alco']=sub5['alco'].convert_objects(convert_numeric=True)
#sns.factorplot(x='alcconsumption', y='breastcancerper100th', data=sub5, kind="bar", ci=None)
sns.factorplot(x='alco', y='brst', data=sub5, kind="bar",ci=None)
plt.xlabel("Alcohol consumption in Liters")
plt.ylabel("Breast Cancer cases per 100th women")
# ====================================================
# POST HOC COMPARISON TEST
recode2={1-20:1,21-40:2}
sub5['COMP1v2']=sub5['brst'].map(recode2)
ct2=pd.crosstab(sub5['brst'],sub5['COMP1v2'])
print "Contigency Table -2\n"
print ct2
print "\n\n"
# the axis=0 statement tells python to sum all the values in each column in python
colsum=ct2.sum(axis=0)
colpct=ct2/colsum
print(colpct)
# Chi-Square
print('\n\nChi-square value, p value, expected counts')
cs2=scipy.stats.chi2_contingency(ct2)
print(cs2)
#######################################################
recode3={41-60:3,61-80:4}
sub5['COMP1v3']=sub5['alco'].map(recode3)
ct3=pd.crosstab(sub5['brst'],sub5['COMP1v3'])
print "Contigency Table - 3\n"
print ct3
print "\n\n"
# the axis=0 statement tells python to sum all the values in each column in python
colsum=ct3.sum(axis=0)
colpct=ct3/colsum
print(colpct)
# Chi-Square
print('\n\nChi-square value, p value, expected counts')
cs3=scipy.stats.chi2_contingency(ct3)
print(cs3)
| mit |
GrimDerp/numpy | numpy/lib/_datasource.py | 148 | 21266 | """A file interface for handling local and remote data files.
The goal of datasource is to abstract some of the file system operations
when dealing with data files so the researcher doesn't have to know all the
low-level details. Through datasource, a researcher can obtain and use a
file with one function call, regardless of location of the file.
DataSource is meant to augment standard python libraries, not replace them.
It should work seemlessly with standard file IO operations and the os
module.
DataSource files can originate locally or remotely:
- local files : '/home/guido/src/local/data.txt'
- URLs (http, ftp, ...) : 'http://www.scipy.org/not/real/data.txt'
DataSource files can also be compressed or uncompressed. Currently only
gzip and bz2 are supported.
Example::
>>> # Create a DataSource, use os.curdir (default) for local storage.
>>> ds = datasource.DataSource()
>>>
>>> # Open a remote file.
>>> # DataSource downloads the file, stores it locally in:
>>> # './www.google.com/index.html'
>>> # opens the file and returns a file object.
>>> fp = ds.open('http://www.google.com/index.html')
>>>
>>> # Use the file as you normally would
>>> fp.read()
>>> fp.close()
"""
from __future__ import division, absolute_import, print_function
import os
import sys
import shutil
_open = open
# Using a class instead of a module-level dictionary
# to reduce the inital 'import numpy' overhead by
# deferring the import of bz2 and gzip until needed
# TODO: .zip support, .tar support?
class _FileOpeners(object):
"""
Container for different methods to open (un-)compressed files.
`_FileOpeners` contains a dictionary that holds one method for each
supported file format. Attribute lookup is implemented in such a way
that an instance of `_FileOpeners` itself can be indexed with the keys
of that dictionary. Currently uncompressed files as well as files
compressed with ``gzip`` or ``bz2`` compression are supported.
Notes
-----
`_file_openers`, an instance of `_FileOpeners`, is made available for
use in the `_datasource` module.
Examples
--------
>>> np.lib._datasource._file_openers.keys()
[None, '.bz2', '.gz']
>>> np.lib._datasource._file_openers['.gz'] is gzip.open
True
"""
def __init__(self):
self._loaded = False
self._file_openers = {None: open}
def _load(self):
if self._loaded:
return
try:
import bz2
self._file_openers[".bz2"] = bz2.BZ2File
except ImportError:
pass
try:
import gzip
self._file_openers[".gz"] = gzip.open
except ImportError:
pass
self._loaded = True
def keys(self):
"""
Return the keys of currently supported file openers.
Parameters
----------
None
Returns
-------
keys : list
The keys are None for uncompressed files and the file extension
strings (i.e. ``'.gz'``, ``'.bz2'``) for supported compression
methods.
"""
self._load()
return list(self._file_openers.keys())
def __getitem__(self, key):
self._load()
return self._file_openers[key]
_file_openers = _FileOpeners()
def open(path, mode='r', destpath=os.curdir):
"""
Open `path` with `mode` and return the file object.
If ``path`` is an URL, it will be downloaded, stored in the
`DataSource` `destpath` directory and opened from there.
Parameters
----------
path : str
Local file path or URL to open.
mode : str, optional
Mode to open `path`. Mode 'r' for reading, 'w' for writing, 'a' to
append. Available modes depend on the type of object specified by
path. Default is 'r'.
destpath : str, optional
Path to the directory where the source file gets downloaded to for
use. If `destpath` is None, a temporary directory will be created.
The default path is the current directory.
Returns
-------
out : file object
The opened file.
Notes
-----
This is a convenience function that instantiates a `DataSource` and
returns the file object from ``DataSource.open(path)``.
"""
ds = DataSource(destpath)
return ds.open(path, mode)
class DataSource (object):
"""
DataSource(destpath='.')
A generic data source file (file, http, ftp, ...).
DataSources can be local files or remote files/URLs. The files may
also be compressed or uncompressed. DataSource hides some of the
low-level details of downloading the file, allowing you to simply pass
in a valid file path (or URL) and obtain a file object.
Parameters
----------
destpath : str or None, optional
Path to the directory where the source file gets downloaded to for
use. If `destpath` is None, a temporary directory will be created.
The default path is the current directory.
Notes
-----
URLs require a scheme string (``http://``) to be used, without it they
will fail::
>>> repos = DataSource()
>>> repos.exists('www.google.com/index.html')
False
>>> repos.exists('http://www.google.com/index.html')
True
Temporary directories are deleted when the DataSource is deleted.
Examples
--------
::
>>> ds = DataSource('/home/guido')
>>> urlname = 'http://www.google.com/index.html'
>>> gfile = ds.open('http://www.google.com/index.html') # remote file
>>> ds.abspath(urlname)
'/home/guido/www.google.com/site/index.html'
>>> ds = DataSource(None) # use with temporary file
>>> ds.open('/home/guido/foobar.txt')
<open file '/home/guido.foobar.txt', mode 'r' at 0x91d4430>
>>> ds.abspath('/home/guido/foobar.txt')
'/tmp/tmpy4pgsP/home/guido/foobar.txt'
"""
def __init__(self, destpath=os.curdir):
"""Create a DataSource with a local path at destpath."""
if destpath:
self._destpath = os.path.abspath(destpath)
self._istmpdest = False
else:
import tempfile # deferring import to improve startup time
self._destpath = tempfile.mkdtemp()
self._istmpdest = True
def __del__(self):
# Remove temp directories
if self._istmpdest:
shutil.rmtree(self._destpath)
def _iszip(self, filename):
"""Test if the filename is a zip file by looking at the file extension.
"""
fname, ext = os.path.splitext(filename)
return ext in _file_openers.keys()
def _iswritemode(self, mode):
"""Test if the given mode will open a file for writing."""
# Currently only used to test the bz2 files.
_writemodes = ("w", "+")
for c in mode:
if c in _writemodes:
return True
return False
def _splitzipext(self, filename):
"""Split zip extension from filename and return filename.
*Returns*:
base, zip_ext : {tuple}
"""
if self._iszip(filename):
return os.path.splitext(filename)
else:
return filename, None
def _possible_names(self, filename):
"""Return a tuple containing compressed filename variations."""
names = [filename]
if not self._iszip(filename):
for zipext in _file_openers.keys():
if zipext:
names.append(filename+zipext)
return names
def _isurl(self, path):
"""Test if path is a net location. Tests the scheme and netloc."""
# We do this here to reduce the 'import numpy' initial import time.
if sys.version_info[0] >= 3:
from urllib.parse import urlparse
else:
from urlparse import urlparse
# BUG : URLs require a scheme string ('http://') to be used.
# www.google.com will fail.
# Should we prepend the scheme for those that don't have it and
# test that also? Similar to the way we append .gz and test for
# for compressed versions of files.
scheme, netloc, upath, uparams, uquery, ufrag = urlparse(path)
return bool(scheme and netloc)
def _cache(self, path):
"""Cache the file specified by path.
Creates a copy of the file in the datasource cache.
"""
# We import these here because importing urllib2 is slow and
# a significant fraction of numpy's total import time.
if sys.version_info[0] >= 3:
from urllib.request import urlopen
from urllib.error import URLError
else:
from urllib2 import urlopen
from urllib2 import URLError
upath = self.abspath(path)
# ensure directory exists
if not os.path.exists(os.path.dirname(upath)):
os.makedirs(os.path.dirname(upath))
# TODO: Doesn't handle compressed files!
if self._isurl(path):
try:
openedurl = urlopen(path)
f = _open(upath, 'wb')
try:
shutil.copyfileobj(openedurl, f)
finally:
f.close()
openedurl.close()
except URLError:
raise URLError("URL not found: %s" % path)
else:
shutil.copyfile(path, upath)
return upath
def _findfile(self, path):
"""Searches for ``path`` and returns full path if found.
If path is an URL, _findfile will cache a local copy and return the
path to the cached file. If path is a local file, _findfile will
return a path to that local file.
The search will include possible compressed versions of the file
and return the first occurence found.
"""
# Build list of possible local file paths
if not self._isurl(path):
# Valid local paths
filelist = self._possible_names(path)
# Paths in self._destpath
filelist += self._possible_names(self.abspath(path))
else:
# Cached URLs in self._destpath
filelist = self._possible_names(self.abspath(path))
# Remote URLs
filelist = filelist + self._possible_names(path)
for name in filelist:
if self.exists(name):
if self._isurl(name):
name = self._cache(name)
return name
return None
def abspath(self, path):
"""
Return absolute path of file in the DataSource directory.
If `path` is an URL, then `abspath` will return either the location
the file exists locally or the location it would exist when opened
using the `open` method.
Parameters
----------
path : str
Can be a local file or a remote URL.
Returns
-------
out : str
Complete path, including the `DataSource` destination directory.
Notes
-----
The functionality is based on `os.path.abspath`.
"""
# We do this here to reduce the 'import numpy' initial import time.
if sys.version_info[0] >= 3:
from urllib.parse import urlparse
else:
from urlparse import urlparse
# TODO: This should be more robust. Handles case where path includes
# the destpath, but not other sub-paths. Failing case:
# path = /home/guido/datafile.txt
# destpath = /home/alex/
# upath = self.abspath(path)
# upath == '/home/alex/home/guido/datafile.txt'
# handle case where path includes self._destpath
splitpath = path.split(self._destpath, 2)
if len(splitpath) > 1:
path = splitpath[1]
scheme, netloc, upath, uparams, uquery, ufrag = urlparse(path)
netloc = self._sanitize_relative_path(netloc)
upath = self._sanitize_relative_path(upath)
return os.path.join(self._destpath, netloc, upath)
def _sanitize_relative_path(self, path):
"""Return a sanitised relative path for which
os.path.abspath(os.path.join(base, path)).startswith(base)
"""
last = None
path = os.path.normpath(path)
while path != last:
last = path
# Note: os.path.join treats '/' as os.sep on Windows
path = path.lstrip(os.sep).lstrip('/')
path = path.lstrip(os.pardir).lstrip('..')
drive, path = os.path.splitdrive(path) # for Windows
return path
def exists(self, path):
"""
Test if path exists.
Test if `path` exists as (and in this order):
- a local file.
- a remote URL that has been downloaded and stored locally in the
`DataSource` directory.
- a remote URL that has not been downloaded, but is valid and
accessible.
Parameters
----------
path : str
Can be a local file or a remote URL.
Returns
-------
out : bool
True if `path` exists.
Notes
-----
When `path` is an URL, `exists` will return True if it's either
stored locally in the `DataSource` directory, or is a valid remote
URL. `DataSource` does not discriminate between the two, the file
is accessible if it exists in either location.
"""
# We import this here because importing urllib2 is slow and
# a significant fraction of numpy's total import time.
if sys.version_info[0] >= 3:
from urllib.request import urlopen
from urllib.error import URLError
else:
from urllib2 import urlopen
from urllib2 import URLError
# Test local path
if os.path.exists(path):
return True
# Test cached url
upath = self.abspath(path)
if os.path.exists(upath):
return True
# Test remote url
if self._isurl(path):
try:
netfile = urlopen(path)
netfile.close()
del(netfile)
return True
except URLError:
return False
return False
def open(self, path, mode='r'):
"""
Open and return file-like object.
If `path` is an URL, it will be downloaded, stored in the
`DataSource` directory and opened from there.
Parameters
----------
path : str
Local file path or URL to open.
mode : {'r', 'w', 'a'}, optional
Mode to open `path`. Mode 'r' for reading, 'w' for writing,
'a' to append. Available modes depend on the type of object
specified by `path`. Default is 'r'.
Returns
-------
out : file object
File object.
"""
# TODO: There is no support for opening a file for writing which
# doesn't exist yet (creating a file). Should there be?
# TODO: Add a ``subdir`` parameter for specifying the subdirectory
# used to store URLs in self._destpath.
if self._isurl(path) and self._iswritemode(mode):
raise ValueError("URLs are not writeable")
# NOTE: _findfile will fail on a new file opened for writing.
found = self._findfile(path)
if found:
_fname, ext = self._splitzipext(found)
if ext == 'bz2':
mode.replace("+", "")
return _file_openers[ext](found, mode=mode)
else:
raise IOError("%s not found." % path)
class Repository (DataSource):
"""
Repository(baseurl, destpath='.')
A data repository where multiple DataSource's share a base
URL/directory.
`Repository` extends `DataSource` by prepending a base URL (or
directory) to all the files it handles. Use `Repository` when you will
be working with multiple files from one base URL. Initialize
`Repository` with the base URL, then refer to each file by its filename
only.
Parameters
----------
baseurl : str
Path to the local directory or remote location that contains the
data files.
destpath : str or None, optional
Path to the directory where the source file gets downloaded to for
use. If `destpath` is None, a temporary directory will be created.
The default path is the current directory.
Examples
--------
To analyze all files in the repository, do something like this
(note: this is not self-contained code)::
>>> repos = np.lib._datasource.Repository('/home/user/data/dir/')
>>> for filename in filelist:
... fp = repos.open(filename)
... fp.analyze()
... fp.close()
Similarly you could use a URL for a repository::
>>> repos = np.lib._datasource.Repository('http://www.xyz.edu/data')
"""
def __init__(self, baseurl, destpath=os.curdir):
"""Create a Repository with a shared url or directory of baseurl."""
DataSource.__init__(self, destpath=destpath)
self._baseurl = baseurl
def __del__(self):
DataSource.__del__(self)
def _fullpath(self, path):
"""Return complete path for path. Prepends baseurl if necessary."""
splitpath = path.split(self._baseurl, 2)
if len(splitpath) == 1:
result = os.path.join(self._baseurl, path)
else:
result = path # path contains baseurl already
return result
def _findfile(self, path):
"""Extend DataSource method to prepend baseurl to ``path``."""
return DataSource._findfile(self, self._fullpath(path))
def abspath(self, path):
"""
Return absolute path of file in the Repository directory.
If `path` is an URL, then `abspath` will return either the location
the file exists locally or the location it would exist when opened
using the `open` method.
Parameters
----------
path : str
Can be a local file or a remote URL. This may, but does not
have to, include the `baseurl` with which the `Repository` was
initialized.
Returns
-------
out : str
Complete path, including the `DataSource` destination directory.
"""
return DataSource.abspath(self, self._fullpath(path))
def exists(self, path):
"""
Test if path exists prepending Repository base URL to path.
Test if `path` exists as (and in this order):
- a local file.
- a remote URL that has been downloaded and stored locally in the
`DataSource` directory.
- a remote URL that has not been downloaded, but is valid and
accessible.
Parameters
----------
path : str
Can be a local file or a remote URL. This may, but does not
have to, include the `baseurl` with which the `Repository` was
initialized.
Returns
-------
out : bool
True if `path` exists.
Notes
-----
When `path` is an URL, `exists` will return True if it's either
stored locally in the `DataSource` directory, or is a valid remote
URL. `DataSource` does not discriminate between the two, the file
is accessible if it exists in either location.
"""
return DataSource.exists(self, self._fullpath(path))
def open(self, path, mode='r'):
"""
Open and return file-like object prepending Repository base URL.
If `path` is an URL, it will be downloaded, stored in the
DataSource directory and opened from there.
Parameters
----------
path : str
Local file path or URL to open. This may, but does not have to,
include the `baseurl` with which the `Repository` was
initialized.
mode : {'r', 'w', 'a'}, optional
Mode to open `path`. Mode 'r' for reading, 'w' for writing,
'a' to append. Available modes depend on the type of object
specified by `path`. Default is 'r'.
Returns
-------
out : file object
File object.
"""
return DataSource.open(self, self._fullpath(path), mode)
def listdir(self):
"""
List files in the source Repository.
Returns
-------
files : list of str
List of file names (not containing a directory part).
Notes
-----
Does not currently work for remote repositories.
"""
if self._isurl(self._baseurl):
raise NotImplementedError(
"Directory listing of URLs, not supported yet.")
else:
return os.listdir(self._baseurl)
| bsd-3-clause |
d3trax/asuswrt-merlin | release/src/router/samba36/source4/scripting/python/samba/tests/hostconfig.py | 20 | 2204 | #!/usr/bin/env python
# Unix SMB/CIFS implementation. Tests for shares
# Copyright (C) Jelmer Vernooij <jelmer@samba.org> 2009
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""Tests for samba.hostconfig."""
from samba.hostconfig import SharesContainer
from samba.tests import TestCase
class MockService(object):
def __init__(self, data):
self.data = data
def __getitem__(self, name):
return self.data[name]
class MockLoadParm(object):
def __init__(self, data):
self.data = data
def __getitem__(self, name):
return MockService(self.data[name])
def __len__(self):
return len(self.data)
def services(self):
return self.data.keys()
class ShareTests(TestCase):
def _get_shares(self, conf):
return SharesContainer(MockLoadParm(conf))
def test_len_no_global(self):
shares = self._get_shares({})
self.assertEquals(0, len(shares))
def test_iter(self):
self.assertEquals([], list(self._get_shares({})))
self.assertEquals([], list(self._get_shares({"global":{}})))
self.assertEquals(["bla"], list(self._get_shares({"global":{}, "bla":{}})))
def test_len(self):
shares = self._get_shares({"global": {}})
self.assertEquals(0, len(shares))
def test_getitem_nonexistant(self):
shares = self._get_shares({"global": {}})
self.assertRaises(KeyError, shares.__getitem__, "bla")
def test_getitem_global(self):
shares = self._get_shares({"global": {}})
self.assertRaises(KeyError, shares.__getitem__, "global")
| gpl-2.0 |
Helias/Telegram-DMI-Bot | telegrambot.py | 1 | 2658 | # -*- coding: utf-8 -*-
from utilities import *
reload(sys)
sys.setdefaultencoding('utf8')
bot= telegram.Bot(TOKEN)
updater = Updater(TOKEN)
# Get the dispatcher to register handlers
dp = updater.dispatcher
def main():
dp.add_handler(RegexHandler('^(/help|/Help|/HELP)$',help))
dp.add_handler(RegexHandler('^(/rappresentanti|/Rappresentanti|/RAPPRESENTANTI)$',rappresentanti))
dp.add_handler(RegexHandler('^(/rappresentanti_dmi|/Rappresentanti_dmi|/RAPPRESENTANTI_DMI)$',rappresentanti_dmi))
dp.add_handler(RegexHandler('^(/rappresentanti_informatica|/rappresentanti_informatica|/RAPPRESENTANTI_INFORMATICA)$',rappresentanti_info))
dp.add_handler(RegexHandler('^(/rappresentanti_matematica|/rappresentanti_matematica|/RAPPRESENTANTI_MATEMATICA)$',rappresentanti_mate))
dp.add_handler(RegexHandler('/sdidattica',sdidattica))
dp.add_handler(RegexHandler('/sstudenti',sstudenti))
dp.add_handler(RegexHandler('/cea',cea))
dp.add_handler(RegexHandler('^(/ersu|/Ersu|/ERSU)$',ersu))
dp.add_handler(RegexHandler('^(/ufficioersu|/Ufficioersu|/UFFICIOERSU)$',ufficioersu))
dp.add_handler(RegexHandler('^(/urp|/Urp|/URP)$',urp))
dp.add_handler(RegexHandler('/prof',prof))
dp.add_handler(RegexHandler('^(/esami|/Esami|/ESAMI)$',esami))
dp.add_handler(RegexHandler('^(/mesami|/Mesami|/MESAMI)$',mesami))
dp.add_handler(RegexHandler('^(/aulario|/Aulario|/AULARIO)$',aulario))
dp.add_handler(RegexHandler('^(/mensa|/Mensa|/MENSA)$',mensa))
dp.add_handler(RegexHandler('^(/biblioteca|/Biblioteca|/BIBLIOTECA)$',biblioteca))
dp.add_handler(RegexHandler('^(/cus|/Cus|/CUS)$',cus))
dp.add_handler(RegexHandler('^(/smonta_portoni|/Smonta_portoni|/SMONTA_PORTONI)$',smonta_portoni))
dp.add_handler(RegexHandler('^(/santino|/Santino|/SANTINO)$',santino)) #NN VA
dp.add_handler(RegexHandler('^(/liste|/Liste|/LISTE)$',liste))
dp.add_handler(RegexHandler('^(/contributors|/Contributors|/CONTRIBUTORS)$',contributors))
dp.add_handler(RegexHandler('/forum',forum_bot))
dp.add_handler(RegexHandler('/news',news_))
dp.add_handler(RegexHandler('^(/spamnews|/Spamnews|/SPAMNEWS)$',spamnews))
dp.add_handler(RegexHandler('^(/disablenews|/Disablenews|/DISABLENEWS)$',disablenews))
dp.add_handler(RegexHandler('^(/enablenews|/Enablenews|/ENABLENEWS)$',enablenews))
dp.add_handler(RegexHandler('^(/drive|/Drive|/DRIVE)$',drive))
dp.add_handler(RegexHandler('/adddb',adddb))
dp.add_handler(RegexHandler('/request',request))
dp.add_handler(RegexHandler('^(/stats)',stats))
dp.add_handler(RegexHandler('^(/statsT)$',statsTot))
dp.add_handler(CallbackQueryHandler(callback))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
| gpl-3.0 |
abtink/openthread | tests/scripts/thread-cert/network_diag.py | 7 | 8897 | #!/usr/bin/env python3
#
# Copyright (c) 2020, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import struct
from enum import IntEnum
from typing import List
import common
import ipaddress
import mle
class TlvType(IntEnum):
EXT_ADDRESS = 0
ADDRESS16 = 1
MODE = 2
POLLING_PERIOD = 3
CONNECTIVITY = 4
ROUTE64 = 5
LEADER_DATA = 6
NETWORK_DATA = 7
IPV6_ADDRESS_LIST = 8
MAC_COUNTERS = 9
BATTERY_LEVEL = 14
SUPPLY_VOLTAGE = 15
CHILD_TABLE = 16
CHANNEL_PAGES = 17
TYPE_LIST = 18
MAX_CHILD_TIMEOUT = 19
class Ipv6AddressList:
def __init__(self, addresses: List[ipaddress.IPv6Address]):
self._addresses = addresses
@property
def addresses(self):
return self._addresses
def __eq__(self, other):
common.expect_the_same_class(self, other)
return self.addresses == other.addresses
def __repr__(self):
return f'Ipv6AddressList({self.addresses})'
class Ipv6AddressListFactory:
def parse(self, data, message_info):
addresses = []
while data.tell() < message_info.length:
addresses.append(ipaddress.IPv6Address(data.read(16)))
return Ipv6AddressList(addresses)
class MacCounters:
def __init__(self, counters: List[int]):
self._counters = counters
@property
def if_in_unknown_protos(self):
return self._counters[0]
@property
def if_in_errors(self):
return self._counters[1]
@property
def if_out_errors(self):
return self._counters[2]
@property
def if_in_ucast_pkts(self):
return self._counters[3]
@property
def if_in_broadcast_pkts(self):
return self._counters[4]
@property
def if_in_discards(self):
return self._counters[5]
@property
def if_out_ucast_pkts(self):
return self._counters[6]
@property
def if_out_broadcast_pkts(self):
return self._counters[7]
@property
def if_out_discards(self):
return self._counters[8]
@property
def counters(self):
return self._counters
def __eq__(self, other):
common.expect_the_same_class(self, other)
return self.counters == other.counters
def __repr__(self):
return ('MacCounters(' + f'if_in_unknown_protos={self.if_in_unknown_protos}, ' +
f'if_in_errors={self.if_in_errors}, ' + f'if_out_errors={self.if_out_errors}, ' +
f'if_in_ucast_pkts={self.if_in_ucast_pkts}, ' + f'if_in_broadcast_pkts={self.if_in_broadcast_pkts}, ' +
f'if_in_discards={self.if_in_discards}, ' + f'if_out_ucast_pkts={self.if_out_ucast_pkts}, ' +
f'if_out_broadcast_pkts={self.if_out_broadcast_pkts}, ' + f'if_out_discards={self.if_out_discards})')
class MacCountersFactory:
def parse(self, data, message_info):
return MacCounters(struct.unpack('>9I', data.read(4 * 9)))
class BatteryLevel:
def __init__(self, battery_level: int):
self._battery_level = battery_level
@property
def battery_level(self):
return self._battery_level
def __eq__(self, other):
common.expect_the_same_class(self, other)
return self.battery_level == other.battery_level
def __repr__(self):
return f'BatteryLevel(battery_level={self.battery_level})'
class BatteryLevelFactory:
def parse(self, data, message_info):
return BatteryLevel(struct.unpack('>B', data.read(1))[0])
class SupplyVoltage:
def __init__(self, supply_voltage: int):
self._supply_voltage = supply_voltage
@property
def supply_voltage(self):
return self._supply_voltage
def __eq__(self, other):
common.expect_the_same_class(self, other)
return self.supply_voltage == other.supply_voltage
def __repr__(self):
return f'SupplyVoltage(supply_voltage={self.supply_voltage})'
class SupplyVoltageFactory:
def parse(self, data, message_info):
return SupplyVoltage(struct.unpack('>H', data.read(2))[0])
class ChildTableEntry:
def __init__(self, timeout: int, child_id: int, mode: mle.Mode):
self._timeout = timeout
self._child_id = child_id
self._mode = mode
@property
def timeout(self):
return self._timeout
@property
def child_id(self):
return self._child_id
@property
def mode(self):
return self._mode
def __eq__(self, other):
common.expect_the_same_class(self, other)
return (self.timeout == other.timeout and self.child_id == other.child_id and self.mode == other.mode)
def __repr__(self):
return f'ChildTableEntry(timeout={self.timeout}, child_id={self.child_id}, mode={self.mode})'
class ChildTable:
def __init__(self, children: List[ChildTableEntry]):
self._children = sorted(children, key=lambda child: child.child_id)
@property
def children(self):
return self._children
def __eq__(self, other):
common.expect_the_same_class(self, other)
return self.children == other.children
def __repr__(self):
return f'ChildTable({self.children})'
class ChildTableFactory:
def parse(self, data, message_info):
children = []
while message_info.length > 0:
timeout_and_id = struct.unpack('>H', data.read(2))[0]
message_info.length -= 2
timeout = (timeout_and_id & 0xf800) >> 11
child_id = timeout_and_id & 0x1fff
mode = mle.ModeFactory().parse(data, message_info)
message_info.length -= 1
children.append(ChildTableEntry(timeout, child_id, mode))
return ChildTable(children)
class ChannelPages:
def __init__(self, channel_pages: bytes):
self._channel_pages = channel_pages
@property
def channel_pages(self):
return self._channel_pages
def __eq__(self, other):
common.expect_the_same_class(self, other)
return self.channel_pages == other.channel_pages
def __repr__(self):
return f'ChannelPages(channel_pages={self.channel_pages})'
class ChannelPagesFactory:
def parse(self, data, message_info):
return ChannelPages(data.getvalue())
class TypeList:
def __init__(self, tlv_types: List[int]):
self._tlv_types = tlv_types
@property
def tlv_types(self):
return self._tlv_types
def __eq__(self, other):
common.expect_the_same_class(self, other)
return self.tlv_types == other.tlv_types
def __repr__(self):
return f'TypeList(tlv_types={self.tlv_types})'
class TypeListFactory:
def parse(self, data, message_info):
return TypeList([ord(t) for t in data.getvalue()])
class MaxChildTimeout:
def __init__(self, max_child_timeout: int):
self._max_child_timeout = max_child_timeout
@property
def max_child_timeout(self):
return self._max_child_timeout
def __eq__(self, other):
common.expect_the_same_class(self, other)
return self.max_child_timeout == other.max_child_timeout
def __repr__(self):
return f'MaxChildTimeout(max_child_timeout={self.max_child_timeout})'
class MaxChildTimeoutFactory:
def parse(self, data, message_info):
return MaxChildTimeout(struct.unpack('>I', data.read(4))[0])
| bsd-3-clause |
TheMutley/openpilot | common/dbc.py | 2 | 6618 | import re
import os
import struct
import bitstring
import sys
import numbers
from collections import namedtuple
def int_or_float(s):
# return number, trying to maintain int format
try:
return int(s)
except ValueError:
return float(s)
DBCSignal = namedtuple(
"DBCSignal", ["name", "start_bit", "size", "is_little_endian", "is_signed",
"factor", "offset", "tmin", "tmax", "units"])
class dbc(object):
def __init__(self, fn):
self.name, _ = os.path.splitext(os.path.basename(fn))
with open(fn) as f:
self.txt = f.read().split("\n")
self._warned_addresses = set()
# regexps from https://github.com/ebroecker/canmatrix/blob/master/canmatrix/importdbc.py
bo_regexp = re.compile(r"^BO\_ (\w+) (\w+) *: (\w+) (\w+)")
sg_regexp = re.compile(r"^SG\_ (\w+) : (\d+)\|(\d+)@(\d+)([\+|\-]) \(([0-9.+\-eE]+),([0-9.+\-eE]+)\) \[([0-9.+\-eE]+)\|([0-9.+\-eE]+)\] \"(.*)\" (.*)")
sgm_regexp = re.compile(r"^SG\_ (\w+) (\w+) *: (\d+)\|(\d+)@(\d+)([\+|\-]) \(([0-9.+\-eE]+),([0-9.+\-eE]+)\) \[([0-9.+\-eE]+)\|([0-9.+\-eE]+)\] \"(.*)\" (.*)")
# A dictionary which maps message ids to tuples ((name, size), signals).
# name is the ASCII name of the message.
# size is the size of the message in bytes.
# signals is a list signals contained in the message.
# signals is a list of DBCSignal in order of increasing start_bit.
self.msgs = {}
# lookup to bit reverse each byte
self.bits_index = [(i & ~0b111) + ((-i-1) & 0b111) for i in xrange(64)]
for l in self.txt:
l = l.strip()
if l.startswith("BO_ "):
# new group
dat = bo_regexp.match(l)
if dat is None:
print "bad BO", l
name = dat.group(2)
size = int(dat.group(3))
ids = int(dat.group(1), 0) # could be hex
if ids in self.msgs:
sys.exit("Duplicate address detected %d %s" % (ids, self.name))
self.msgs[ids] = ((name, size), [])
if l.startswith("SG_ "):
# new signal
dat = sg_regexp.match(l)
go = 0
if dat is None:
dat = sgm_regexp.match(l)
go = 1
if dat is None:
print "bad SG", l
sgname = dat.group(1)
start_bit = int(dat.group(go+2))
signal_size = int(dat.group(go+3))
is_little_endian = int(dat.group(go+4))==1
is_signed = dat.group(go+5)=='-'
factor = int_or_float(dat.group(go+6))
offset = int_or_float(dat.group(go+7))
tmin = int_or_float(dat.group(go+8))
tmax = int_or_float(dat.group(go+9))
units = dat.group(go+10)
self.msgs[ids][1].append(
DBCSignal(sgname, start_bit, signal_size, is_little_endian,
is_signed, factor, offset, tmin, tmax, units))
for msg in self.msgs.viewvalues():
msg[1].sort(key=lambda x: x.start_bit)
self.msg_name_to_address = {}
for address, m in self.msgs.items():
name = m[0][0]
self.msg_name_to_address[name] = address
def lookup_msg_id(self, msg_id):
if not isinstance(msg_id, numbers.Number):
msg_id = self.msg_name_to_address[msg_id]
return msg_id
def encode(self, msg_id, dd):
"""Encode a CAN message using the dbc.
Inputs:
msg_id: The message ID.
dd: A dictionary mapping signal name to signal data.
"""
msg_id = self.lookup_msg_id(msg_id)
# TODO: Stop using bitstring, which is super slow.
msg_def = self.msgs[msg_id]
size = msg_def[0][1]
bsf = bitstring.Bits(hex="00"*size)
for s in msg_def[1]:
ival = dd.get(s.name)
if ival is not None:
ival = (ival / s.factor) - s.offset
ival = int(round(ival))
# should pack this
if s.is_little_endian:
ss = s.start_bit
else:
ss = self.bits_index[s.start_bit]
if s.is_signed:
tbs = bitstring.Bits(int=ival, length=s.size)
else:
tbs = bitstring.Bits(uint=ival, length=s.size)
lpad = bitstring.Bits(bin="0b"+"0"*ss)
rpad = bitstring.Bits(bin="0b"+"0"*(8*size-(ss+s.size)))
tbs = lpad+tbs+rpad
bsf |= tbs
return bsf.tobytes()
def decode(self, x, arr=None, debug=False):
"""Decode a CAN message using the dbc.
Inputs:
x: A collection with elements (address, time, data), where address is
the CAN address, time is the bus time, and data is the CAN data as a
hex string.
arr: Optional list of signals which should be decoded and returned.
debug: True to print debugging statements.
Returns:
A tuple (name, data), where name is the name of the CAN message and data
is the decoded result. If arr is None, data is a dict of properties.
Otherwise data is a list of the same length as arr.
Returns (None, None) if the message could not be decoded.
"""
if arr is None:
out = {}
else:
out = [None]*len(arr)
msg = self.msgs.get(x[0])
if msg is None:
if x[0] not in self._warned_addresses:
#print("WARNING: Unknown message address {}".format(x[0]))
self._warned_addresses.add(x[0])
return None, None
name = msg[0][0]
if debug:
print name
blen = 8*len(x[2])
st = x[2].rjust(8, '\x00')
le, be = None, None
for s in msg[1]:
if arr is not None and s[0] not in arr:
continue
# big or little endian?
# see http://vi-firmware.openxcplatform.com/en/master/config/bit-numbering.html
if s[3] is False:
ss = self.bits_index[s[1]]
if be is None:
be = struct.unpack(">Q", st)[0]
x2_int = be
data_bit_pos = (blen - (ss + s[2]))
else:
if le is None:
le = struct.unpack("<Q", st)[0]
x2_int = le
ss = s[1]
data_bit_pos = ss
if data_bit_pos < 0:
continue
ival = (x2_int >> data_bit_pos) & ((1 << (s[2])) - 1)
if s[4] and (ival & (1<<(s[2]-1))): # signed
ival -= (1<<s[2])
# control the offset
ival = (ival * s[5]) + s[6]
#if debug:
# print "%40s %2d %2d %7.2f %s" % (s[0], s[1], s[2], ival, s[-1])
if arr is None:
out[s[0]] = ival
else:
out[arr.index(s[0])] = ival
return name, out
def get_signals(self, msg):
msg = self.lookup_msg_id(msg)
return [sgs.name for sgs in self.msgs[msg][1]]
if __name__ == "__main__":
from opendbc import DBC_PATH
dbc_test = dbc(os.path.join(DBC_PATH, sys.argv[1]))
print dbc_test.get_signals(0xe4)
| mit |
kharts/kastodi | resources/lib/google/protobuf/internal/message_test.py | 15 | 72124 | #! /usr/bin/env python
#
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests python protocol buffers against the golden message.
Note that the golden messages exercise every known field type, thus this
test ends up exercising and verifying nearly all of the parsing and
serialization code in the whole library.
TODO(kenton): Merge with wire_format_test? It doesn't make a whole lot of
sense to call this a test of the "message" module, which only declares an
abstract interface.
"""
__author__ = 'gps@google.com (Gregory P. Smith)'
import collections
import copy
import math
import operator
import pickle
import six
import sys
try:
import unittest2 as unittest
except ImportError:
import unittest
from google.protobuf.internal import _parameterized
from google.protobuf import map_unittest_pb2
from google.protobuf import unittest_pb2
from google.protobuf import unittest_proto3_arena_pb2
from google.protobuf.internal import any_test_pb2
from google.protobuf.internal import api_implementation
from google.protobuf.internal import packed_field_test_pb2
from google.protobuf.internal import test_util
from google.protobuf import message
if six.PY3:
long = int
# Python pre-2.6 does not have isinf() or isnan() functions, so we have
# to provide our own.
def isnan(val):
# NaN is never equal to itself.
return val != val
def isinf(val):
# Infinity times zero equals NaN.
return not isnan(val) and isnan(val * 0)
def IsPosInf(val):
return isinf(val) and (val > 0)
def IsNegInf(val):
return isinf(val) and (val < 0)
@_parameterized.Parameters(
(unittest_pb2),
(unittest_proto3_arena_pb2))
class MessageTest(unittest.TestCase):
def testBadUtf8String(self, message_module):
if api_implementation.Type() != 'python':
self.skipTest("Skipping testBadUtf8String, currently only the python "
"api implementation raises UnicodeDecodeError when a "
"string field contains bad utf-8.")
bad_utf8_data = test_util.GoldenFileData('bad_utf8_string')
with self.assertRaises(UnicodeDecodeError) as context:
message_module.TestAllTypes.FromString(bad_utf8_data)
self.assertIn('TestAllTypes.optional_string', str(context.exception))
def testGoldenMessage(self, message_module):
# Proto3 doesn't have the "default_foo" members or foreign enums,
# and doesn't preserve unknown fields, so for proto3 we use a golden
# message that doesn't have these fields set.
if message_module is unittest_pb2:
golden_data = test_util.GoldenFileData(
'golden_message_oneof_implemented')
else:
golden_data = test_util.GoldenFileData('golden_message_proto3')
golden_message = message_module.TestAllTypes()
golden_message.ParseFromString(golden_data)
if message_module is unittest_pb2:
test_util.ExpectAllFieldsSet(self, golden_message)
self.assertEqual(golden_data, golden_message.SerializeToString())
golden_copy = copy.deepcopy(golden_message)
self.assertEqual(golden_data, golden_copy.SerializeToString())
def testGoldenPackedMessage(self, message_module):
golden_data = test_util.GoldenFileData('golden_packed_fields_message')
golden_message = message_module.TestPackedTypes()
golden_message.ParseFromString(golden_data)
all_set = message_module.TestPackedTypes()
test_util.SetAllPackedFields(all_set)
self.assertEqual(all_set, golden_message)
self.assertEqual(golden_data, all_set.SerializeToString())
golden_copy = copy.deepcopy(golden_message)
self.assertEqual(golden_data, golden_copy.SerializeToString())
def testPickleSupport(self, message_module):
golden_data = test_util.GoldenFileData('golden_message')
golden_message = message_module.TestAllTypes()
golden_message.ParseFromString(golden_data)
pickled_message = pickle.dumps(golden_message)
unpickled_message = pickle.loads(pickled_message)
self.assertEqual(unpickled_message, golden_message)
def testPositiveInfinity(self, message_module):
if message_module is unittest_pb2:
golden_data = (b'\x5D\x00\x00\x80\x7F'
b'\x61\x00\x00\x00\x00\x00\x00\xF0\x7F'
b'\xCD\x02\x00\x00\x80\x7F'
b'\xD1\x02\x00\x00\x00\x00\x00\x00\xF0\x7F')
else:
golden_data = (b'\x5D\x00\x00\x80\x7F'
b'\x61\x00\x00\x00\x00\x00\x00\xF0\x7F'
b'\xCA\x02\x04\x00\x00\x80\x7F'
b'\xD2\x02\x08\x00\x00\x00\x00\x00\x00\xF0\x7F')
golden_message = message_module.TestAllTypes()
golden_message.ParseFromString(golden_data)
self.assertTrue(IsPosInf(golden_message.optional_float))
self.assertTrue(IsPosInf(golden_message.optional_double))
self.assertTrue(IsPosInf(golden_message.repeated_float[0]))
self.assertTrue(IsPosInf(golden_message.repeated_double[0]))
self.assertEqual(golden_data, golden_message.SerializeToString())
def testNegativeInfinity(self, message_module):
if message_module is unittest_pb2:
golden_data = (b'\x5D\x00\x00\x80\xFF'
b'\x61\x00\x00\x00\x00\x00\x00\xF0\xFF'
b'\xCD\x02\x00\x00\x80\xFF'
b'\xD1\x02\x00\x00\x00\x00\x00\x00\xF0\xFF')
else:
golden_data = (b'\x5D\x00\x00\x80\xFF'
b'\x61\x00\x00\x00\x00\x00\x00\xF0\xFF'
b'\xCA\x02\x04\x00\x00\x80\xFF'
b'\xD2\x02\x08\x00\x00\x00\x00\x00\x00\xF0\xFF')
golden_message = message_module.TestAllTypes()
golden_message.ParseFromString(golden_data)
self.assertTrue(IsNegInf(golden_message.optional_float))
self.assertTrue(IsNegInf(golden_message.optional_double))
self.assertTrue(IsNegInf(golden_message.repeated_float[0]))
self.assertTrue(IsNegInf(golden_message.repeated_double[0]))
self.assertEqual(golden_data, golden_message.SerializeToString())
def testNotANumber(self, message_module):
golden_data = (b'\x5D\x00\x00\xC0\x7F'
b'\x61\x00\x00\x00\x00\x00\x00\xF8\x7F'
b'\xCD\x02\x00\x00\xC0\x7F'
b'\xD1\x02\x00\x00\x00\x00\x00\x00\xF8\x7F')
golden_message = message_module.TestAllTypes()
golden_message.ParseFromString(golden_data)
self.assertTrue(isnan(golden_message.optional_float))
self.assertTrue(isnan(golden_message.optional_double))
self.assertTrue(isnan(golden_message.repeated_float[0]))
self.assertTrue(isnan(golden_message.repeated_double[0]))
# The protocol buffer may serialize to any one of multiple different
# representations of a NaN. Rather than verify a specific representation,
# verify the serialized string can be converted into a correctly
# behaving protocol buffer.
serialized = golden_message.SerializeToString()
message = message_module.TestAllTypes()
message.ParseFromString(serialized)
self.assertTrue(isnan(message.optional_float))
self.assertTrue(isnan(message.optional_double))
self.assertTrue(isnan(message.repeated_float[0]))
self.assertTrue(isnan(message.repeated_double[0]))
def testPositiveInfinityPacked(self, message_module):
golden_data = (b'\xA2\x06\x04\x00\x00\x80\x7F'
b'\xAA\x06\x08\x00\x00\x00\x00\x00\x00\xF0\x7F')
golden_message = message_module.TestPackedTypes()
golden_message.ParseFromString(golden_data)
self.assertTrue(IsPosInf(golden_message.packed_float[0]))
self.assertTrue(IsPosInf(golden_message.packed_double[0]))
self.assertEqual(golden_data, golden_message.SerializeToString())
def testNegativeInfinityPacked(self, message_module):
golden_data = (b'\xA2\x06\x04\x00\x00\x80\xFF'
b'\xAA\x06\x08\x00\x00\x00\x00\x00\x00\xF0\xFF')
golden_message = message_module.TestPackedTypes()
golden_message.ParseFromString(golden_data)
self.assertTrue(IsNegInf(golden_message.packed_float[0]))
self.assertTrue(IsNegInf(golden_message.packed_double[0]))
self.assertEqual(golden_data, golden_message.SerializeToString())
def testNotANumberPacked(self, message_module):
golden_data = (b'\xA2\x06\x04\x00\x00\xC0\x7F'
b'\xAA\x06\x08\x00\x00\x00\x00\x00\x00\xF8\x7F')
golden_message = message_module.TestPackedTypes()
golden_message.ParseFromString(golden_data)
self.assertTrue(isnan(golden_message.packed_float[0]))
self.assertTrue(isnan(golden_message.packed_double[0]))
serialized = golden_message.SerializeToString()
message = message_module.TestPackedTypes()
message.ParseFromString(serialized)
self.assertTrue(isnan(message.packed_float[0]))
self.assertTrue(isnan(message.packed_double[0]))
def testExtremeFloatValues(self, message_module):
message = message_module.TestAllTypes()
# Most positive exponent, no significand bits set.
kMostPosExponentNoSigBits = math.pow(2, 127)
message.optional_float = kMostPosExponentNoSigBits
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_float == kMostPosExponentNoSigBits)
# Most positive exponent, one significand bit set.
kMostPosExponentOneSigBit = 1.5 * math.pow(2, 127)
message.optional_float = kMostPosExponentOneSigBit
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_float == kMostPosExponentOneSigBit)
# Repeat last two cases with values of same magnitude, but negative.
message.optional_float = -kMostPosExponentNoSigBits
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_float == -kMostPosExponentNoSigBits)
message.optional_float = -kMostPosExponentOneSigBit
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_float == -kMostPosExponentOneSigBit)
# Most negative exponent, no significand bits set.
kMostNegExponentNoSigBits = math.pow(2, -127)
message.optional_float = kMostNegExponentNoSigBits
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_float == kMostNegExponentNoSigBits)
# Most negative exponent, one significand bit set.
kMostNegExponentOneSigBit = 1.5 * math.pow(2, -127)
message.optional_float = kMostNegExponentOneSigBit
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_float == kMostNegExponentOneSigBit)
# Repeat last two cases with values of the same magnitude, but negative.
message.optional_float = -kMostNegExponentNoSigBits
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_float == -kMostNegExponentNoSigBits)
message.optional_float = -kMostNegExponentOneSigBit
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_float == -kMostNegExponentOneSigBit)
def testExtremeDoubleValues(self, message_module):
message = message_module.TestAllTypes()
# Most positive exponent, no significand bits set.
kMostPosExponentNoSigBits = math.pow(2, 1023)
message.optional_double = kMostPosExponentNoSigBits
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_double == kMostPosExponentNoSigBits)
# Most positive exponent, one significand bit set.
kMostPosExponentOneSigBit = 1.5 * math.pow(2, 1023)
message.optional_double = kMostPosExponentOneSigBit
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_double == kMostPosExponentOneSigBit)
# Repeat last two cases with values of same magnitude, but negative.
message.optional_double = -kMostPosExponentNoSigBits
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_double == -kMostPosExponentNoSigBits)
message.optional_double = -kMostPosExponentOneSigBit
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_double == -kMostPosExponentOneSigBit)
# Most negative exponent, no significand bits set.
kMostNegExponentNoSigBits = math.pow(2, -1023)
message.optional_double = kMostNegExponentNoSigBits
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_double == kMostNegExponentNoSigBits)
# Most negative exponent, one significand bit set.
kMostNegExponentOneSigBit = 1.5 * math.pow(2, -1023)
message.optional_double = kMostNegExponentOneSigBit
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_double == kMostNegExponentOneSigBit)
# Repeat last two cases with values of the same magnitude, but negative.
message.optional_double = -kMostNegExponentNoSigBits
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_double == -kMostNegExponentNoSigBits)
message.optional_double = -kMostNegExponentOneSigBit
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_double == -kMostNegExponentOneSigBit)
def testFloatPrinting(self, message_module):
message = message_module.TestAllTypes()
message.optional_float = 2.0
self.assertEqual(str(message), 'optional_float: 2.0\n')
def testHighPrecisionFloatPrinting(self, message_module):
message = message_module.TestAllTypes()
message.optional_double = 0.12345678912345678
if sys.version_info >= (3,):
self.assertEqual(str(message), 'optional_double: 0.12345678912345678\n')
else:
self.assertEqual(str(message), 'optional_double: 0.123456789123\n')
def testUnknownFieldPrinting(self, message_module):
populated = message_module.TestAllTypes()
test_util.SetAllNonLazyFields(populated)
empty = message_module.TestEmptyMessage()
empty.ParseFromString(populated.SerializeToString())
self.assertEqual(str(empty), '')
def testRepeatedNestedFieldIteration(self, message_module):
msg = message_module.TestAllTypes()
msg.repeated_nested_message.add(bb=1)
msg.repeated_nested_message.add(bb=2)
msg.repeated_nested_message.add(bb=3)
msg.repeated_nested_message.add(bb=4)
self.assertEqual([1, 2, 3, 4],
[m.bb for m in msg.repeated_nested_message])
self.assertEqual([4, 3, 2, 1],
[m.bb for m in reversed(msg.repeated_nested_message)])
self.assertEqual([4, 3, 2, 1],
[m.bb for m in msg.repeated_nested_message[::-1]])
def testSortingRepeatedScalarFieldsDefaultComparator(self, message_module):
"""Check some different types with the default comparator."""
message = message_module.TestAllTypes()
# TODO(mattp): would testing more scalar types strengthen test?
message.repeated_int32.append(1)
message.repeated_int32.append(3)
message.repeated_int32.append(2)
message.repeated_int32.sort()
self.assertEqual(message.repeated_int32[0], 1)
self.assertEqual(message.repeated_int32[1], 2)
self.assertEqual(message.repeated_int32[2], 3)
message.repeated_float.append(1.1)
message.repeated_float.append(1.3)
message.repeated_float.append(1.2)
message.repeated_float.sort()
self.assertAlmostEqual(message.repeated_float[0], 1.1)
self.assertAlmostEqual(message.repeated_float[1], 1.2)
self.assertAlmostEqual(message.repeated_float[2], 1.3)
message.repeated_string.append('a')
message.repeated_string.append('c')
message.repeated_string.append('b')
message.repeated_string.sort()
self.assertEqual(message.repeated_string[0], 'a')
self.assertEqual(message.repeated_string[1], 'b')
self.assertEqual(message.repeated_string[2], 'c')
message.repeated_bytes.append(b'a')
message.repeated_bytes.append(b'c')
message.repeated_bytes.append(b'b')
message.repeated_bytes.sort()
self.assertEqual(message.repeated_bytes[0], b'a')
self.assertEqual(message.repeated_bytes[1], b'b')
self.assertEqual(message.repeated_bytes[2], b'c')
def testSortingRepeatedScalarFieldsCustomComparator(self, message_module):
"""Check some different types with custom comparator."""
message = message_module.TestAllTypes()
message.repeated_int32.append(-3)
message.repeated_int32.append(-2)
message.repeated_int32.append(-1)
message.repeated_int32.sort(key=abs)
self.assertEqual(message.repeated_int32[0], -1)
self.assertEqual(message.repeated_int32[1], -2)
self.assertEqual(message.repeated_int32[2], -3)
message.repeated_string.append('aaa')
message.repeated_string.append('bb')
message.repeated_string.append('c')
message.repeated_string.sort(key=len)
self.assertEqual(message.repeated_string[0], 'c')
self.assertEqual(message.repeated_string[1], 'bb')
self.assertEqual(message.repeated_string[2], 'aaa')
def testSortingRepeatedCompositeFieldsCustomComparator(self, message_module):
"""Check passing a custom comparator to sort a repeated composite field."""
message = message_module.TestAllTypes()
message.repeated_nested_message.add().bb = 1
message.repeated_nested_message.add().bb = 3
message.repeated_nested_message.add().bb = 2
message.repeated_nested_message.add().bb = 6
message.repeated_nested_message.add().bb = 5
message.repeated_nested_message.add().bb = 4
message.repeated_nested_message.sort(key=operator.attrgetter('bb'))
self.assertEqual(message.repeated_nested_message[0].bb, 1)
self.assertEqual(message.repeated_nested_message[1].bb, 2)
self.assertEqual(message.repeated_nested_message[2].bb, 3)
self.assertEqual(message.repeated_nested_message[3].bb, 4)
self.assertEqual(message.repeated_nested_message[4].bb, 5)
self.assertEqual(message.repeated_nested_message[5].bb, 6)
def testSortingRepeatedCompositeFieldsStable(self, message_module):
"""Check passing a custom comparator to sort a repeated composite field."""
message = message_module.TestAllTypes()
message.repeated_nested_message.add().bb = 21
message.repeated_nested_message.add().bb = 20
message.repeated_nested_message.add().bb = 13
message.repeated_nested_message.add().bb = 33
message.repeated_nested_message.add().bb = 11
message.repeated_nested_message.add().bb = 24
message.repeated_nested_message.add().bb = 10
message.repeated_nested_message.sort(key=lambda z: z.bb // 10)
self.assertEqual(
[13, 11, 10, 21, 20, 24, 33],
[n.bb for n in message.repeated_nested_message])
# Make sure that for the C++ implementation, the underlying fields
# are actually reordered.
pb = message.SerializeToString()
message.Clear()
message.MergeFromString(pb)
self.assertEqual(
[13, 11, 10, 21, 20, 24, 33],
[n.bb for n in message.repeated_nested_message])
def testRepeatedCompositeFieldSortArguments(self, message_module):
"""Check sorting a repeated composite field using list.sort() arguments."""
message = message_module.TestAllTypes()
get_bb = operator.attrgetter('bb')
cmp_bb = lambda a, b: cmp(a.bb, b.bb)
message.repeated_nested_message.add().bb = 1
message.repeated_nested_message.add().bb = 3
message.repeated_nested_message.add().bb = 2
message.repeated_nested_message.add().bb = 6
message.repeated_nested_message.add().bb = 5
message.repeated_nested_message.add().bb = 4
message.repeated_nested_message.sort(key=get_bb)
self.assertEqual([k.bb for k in message.repeated_nested_message],
[1, 2, 3, 4, 5, 6])
message.repeated_nested_message.sort(key=get_bb, reverse=True)
self.assertEqual([k.bb for k in message.repeated_nested_message],
[6, 5, 4, 3, 2, 1])
if sys.version_info >= (3,): return # No cmp sorting in PY3.
message.repeated_nested_message.sort(sort_function=cmp_bb)
self.assertEqual([k.bb for k in message.repeated_nested_message],
[1, 2, 3, 4, 5, 6])
message.repeated_nested_message.sort(cmp=cmp_bb, reverse=True)
self.assertEqual([k.bb for k in message.repeated_nested_message],
[6, 5, 4, 3, 2, 1])
def testRepeatedScalarFieldSortArguments(self, message_module):
"""Check sorting a scalar field using list.sort() arguments."""
message = message_module.TestAllTypes()
message.repeated_int32.append(-3)
message.repeated_int32.append(-2)
message.repeated_int32.append(-1)
message.repeated_int32.sort(key=abs)
self.assertEqual(list(message.repeated_int32), [-1, -2, -3])
message.repeated_int32.sort(key=abs, reverse=True)
self.assertEqual(list(message.repeated_int32), [-3, -2, -1])
if sys.version_info < (3,): # No cmp sorting in PY3.
abs_cmp = lambda a, b: cmp(abs(a), abs(b))
message.repeated_int32.sort(sort_function=abs_cmp)
self.assertEqual(list(message.repeated_int32), [-1, -2, -3])
message.repeated_int32.sort(cmp=abs_cmp, reverse=True)
self.assertEqual(list(message.repeated_int32), [-3, -2, -1])
message.repeated_string.append('aaa')
message.repeated_string.append('bb')
message.repeated_string.append('c')
message.repeated_string.sort(key=len)
self.assertEqual(list(message.repeated_string), ['c', 'bb', 'aaa'])
message.repeated_string.sort(key=len, reverse=True)
self.assertEqual(list(message.repeated_string), ['aaa', 'bb', 'c'])
if sys.version_info < (3,): # No cmp sorting in PY3.
len_cmp = lambda a, b: cmp(len(a), len(b))
message.repeated_string.sort(sort_function=len_cmp)
self.assertEqual(list(message.repeated_string), ['c', 'bb', 'aaa'])
message.repeated_string.sort(cmp=len_cmp, reverse=True)
self.assertEqual(list(message.repeated_string), ['aaa', 'bb', 'c'])
def testRepeatedFieldsComparable(self, message_module):
m1 = message_module.TestAllTypes()
m2 = message_module.TestAllTypes()
m1.repeated_int32.append(0)
m1.repeated_int32.append(1)
m1.repeated_int32.append(2)
m2.repeated_int32.append(0)
m2.repeated_int32.append(1)
m2.repeated_int32.append(2)
m1.repeated_nested_message.add().bb = 1
m1.repeated_nested_message.add().bb = 2
m1.repeated_nested_message.add().bb = 3
m2.repeated_nested_message.add().bb = 1
m2.repeated_nested_message.add().bb = 2
m2.repeated_nested_message.add().bb = 3
if sys.version_info >= (3,): return # No cmp() in PY3.
# These comparisons should not raise errors.
_ = m1 < m2
_ = m1.repeated_nested_message < m2.repeated_nested_message
# Make sure cmp always works. If it wasn't defined, these would be
# id() comparisons and would all fail.
self.assertEqual(cmp(m1, m2), 0)
self.assertEqual(cmp(m1.repeated_int32, m2.repeated_int32), 0)
self.assertEqual(cmp(m1.repeated_int32, [0, 1, 2]), 0)
self.assertEqual(cmp(m1.repeated_nested_message,
m2.repeated_nested_message), 0)
with self.assertRaises(TypeError):
# Can't compare repeated composite containers to lists.
cmp(m1.repeated_nested_message, m2.repeated_nested_message[:])
# TODO(anuraag): Implement extensiondict comparison in C++ and then add test
def testRepeatedFieldsAreSequences(self, message_module):
m = message_module.TestAllTypes()
self.assertIsInstance(m.repeated_int32, collections.MutableSequence)
self.assertIsInstance(m.repeated_nested_message,
collections.MutableSequence)
def ensureNestedMessageExists(self, msg, attribute):
"""Make sure that a nested message object exists.
As soon as a nested message attribute is accessed, it will be present in the
_fields dict, without being marked as actually being set.
"""
getattr(msg, attribute)
self.assertFalse(msg.HasField(attribute))
def testOneofGetCaseNonexistingField(self, message_module):
m = message_module.TestAllTypes()
self.assertRaises(ValueError, m.WhichOneof, 'no_such_oneof_field')
def testOneofDefaultValues(self, message_module):
m = message_module.TestAllTypes()
self.assertIs(None, m.WhichOneof('oneof_field'))
self.assertFalse(m.HasField('oneof_uint32'))
# Oneof is set even when setting it to a default value.
m.oneof_uint32 = 0
self.assertEqual('oneof_uint32', m.WhichOneof('oneof_field'))
self.assertTrue(m.HasField('oneof_uint32'))
self.assertFalse(m.HasField('oneof_string'))
m.oneof_string = ""
self.assertEqual('oneof_string', m.WhichOneof('oneof_field'))
self.assertTrue(m.HasField('oneof_string'))
self.assertFalse(m.HasField('oneof_uint32'))
def testOneofSemantics(self, message_module):
m = message_module.TestAllTypes()
self.assertIs(None, m.WhichOneof('oneof_field'))
m.oneof_uint32 = 11
self.assertEqual('oneof_uint32', m.WhichOneof('oneof_field'))
self.assertTrue(m.HasField('oneof_uint32'))
m.oneof_string = u'foo'
self.assertEqual('oneof_string', m.WhichOneof('oneof_field'))
self.assertFalse(m.HasField('oneof_uint32'))
self.assertTrue(m.HasField('oneof_string'))
# Read nested message accessor without accessing submessage.
m.oneof_nested_message
self.assertEqual('oneof_string', m.WhichOneof('oneof_field'))
self.assertTrue(m.HasField('oneof_string'))
self.assertFalse(m.HasField('oneof_nested_message'))
# Read accessor of nested message without accessing submessage.
m.oneof_nested_message.bb
self.assertEqual('oneof_string', m.WhichOneof('oneof_field'))
self.assertTrue(m.HasField('oneof_string'))
self.assertFalse(m.HasField('oneof_nested_message'))
m.oneof_nested_message.bb = 11
self.assertEqual('oneof_nested_message', m.WhichOneof('oneof_field'))
self.assertFalse(m.HasField('oneof_string'))
self.assertTrue(m.HasField('oneof_nested_message'))
m.oneof_bytes = b'bb'
self.assertEqual('oneof_bytes', m.WhichOneof('oneof_field'))
self.assertFalse(m.HasField('oneof_nested_message'))
self.assertTrue(m.HasField('oneof_bytes'))
def testOneofCompositeFieldReadAccess(self, message_module):
m = message_module.TestAllTypes()
m.oneof_uint32 = 11
self.ensureNestedMessageExists(m, 'oneof_nested_message')
self.assertEqual('oneof_uint32', m.WhichOneof('oneof_field'))
self.assertEqual(11, m.oneof_uint32)
def testOneofWhichOneof(self, message_module):
m = message_module.TestAllTypes()
self.assertIs(None, m.WhichOneof('oneof_field'))
if message_module is unittest_pb2:
self.assertFalse(m.HasField('oneof_field'))
m.oneof_uint32 = 11
self.assertEqual('oneof_uint32', m.WhichOneof('oneof_field'))
if message_module is unittest_pb2:
self.assertTrue(m.HasField('oneof_field'))
m.oneof_bytes = b'bb'
self.assertEqual('oneof_bytes', m.WhichOneof('oneof_field'))
m.ClearField('oneof_bytes')
self.assertIs(None, m.WhichOneof('oneof_field'))
if message_module is unittest_pb2:
self.assertFalse(m.HasField('oneof_field'))
def testOneofClearField(self, message_module):
m = message_module.TestAllTypes()
m.oneof_uint32 = 11
m.ClearField('oneof_field')
if message_module is unittest_pb2:
self.assertFalse(m.HasField('oneof_field'))
self.assertFalse(m.HasField('oneof_uint32'))
self.assertIs(None, m.WhichOneof('oneof_field'))
def testOneofClearSetField(self, message_module):
m = message_module.TestAllTypes()
m.oneof_uint32 = 11
m.ClearField('oneof_uint32')
if message_module is unittest_pb2:
self.assertFalse(m.HasField('oneof_field'))
self.assertFalse(m.HasField('oneof_uint32'))
self.assertIs(None, m.WhichOneof('oneof_field'))
def testOneofClearUnsetField(self, message_module):
m = message_module.TestAllTypes()
m.oneof_uint32 = 11
self.ensureNestedMessageExists(m, 'oneof_nested_message')
m.ClearField('oneof_nested_message')
self.assertEqual(11, m.oneof_uint32)
if message_module is unittest_pb2:
self.assertTrue(m.HasField('oneof_field'))
self.assertTrue(m.HasField('oneof_uint32'))
self.assertEqual('oneof_uint32', m.WhichOneof('oneof_field'))
def testOneofDeserialize(self, message_module):
m = message_module.TestAllTypes()
m.oneof_uint32 = 11
m2 = message_module.TestAllTypes()
m2.ParseFromString(m.SerializeToString())
self.assertEqual('oneof_uint32', m2.WhichOneof('oneof_field'))
def testOneofCopyFrom(self, message_module):
m = message_module.TestAllTypes()
m.oneof_uint32 = 11
m2 = message_module.TestAllTypes()
m2.CopyFrom(m)
self.assertEqual('oneof_uint32', m2.WhichOneof('oneof_field'))
def testOneofNestedMergeFrom(self, message_module):
m = message_module.NestedTestAllTypes()
m.payload.oneof_uint32 = 11
m2 = message_module.NestedTestAllTypes()
m2.payload.oneof_bytes = b'bb'
m2.child.payload.oneof_bytes = b'bb'
m2.MergeFrom(m)
self.assertEqual('oneof_uint32', m2.payload.WhichOneof('oneof_field'))
self.assertEqual('oneof_bytes', m2.child.payload.WhichOneof('oneof_field'))
def testOneofMessageMergeFrom(self, message_module):
m = message_module.NestedTestAllTypes()
m.payload.oneof_nested_message.bb = 11
m.child.payload.oneof_nested_message.bb = 12
m2 = message_module.NestedTestAllTypes()
m2.payload.oneof_uint32 = 13
m2.MergeFrom(m)
self.assertEqual('oneof_nested_message',
m2.payload.WhichOneof('oneof_field'))
self.assertEqual('oneof_nested_message',
m2.child.payload.WhichOneof('oneof_field'))
def testOneofNestedMessageInit(self, message_module):
m = message_module.TestAllTypes(
oneof_nested_message=message_module.TestAllTypes.NestedMessage())
self.assertEqual('oneof_nested_message', m.WhichOneof('oneof_field'))
def testOneofClear(self, message_module):
m = message_module.TestAllTypes()
m.oneof_uint32 = 11
m.Clear()
self.assertIsNone(m.WhichOneof('oneof_field'))
m.oneof_bytes = b'bb'
self.assertEqual('oneof_bytes', m.WhichOneof('oneof_field'))
def testAssignByteStringToUnicodeField(self, message_module):
"""Assigning a byte string to a string field should result
in the value being converted to a Unicode string."""
m = message_module.TestAllTypes()
m.optional_string = str('')
self.assertIsInstance(m.optional_string, six.text_type)
def testLongValuedSlice(self, message_module):
"""It should be possible to use long-valued indicies in slices
This didn't used to work in the v2 C++ implementation.
"""
m = message_module.TestAllTypes()
# Repeated scalar
m.repeated_int32.append(1)
sl = m.repeated_int32[long(0):long(len(m.repeated_int32))]
self.assertEqual(len(m.repeated_int32), len(sl))
# Repeated composite
m.repeated_nested_message.add().bb = 3
sl = m.repeated_nested_message[long(0):long(len(m.repeated_nested_message))]
self.assertEqual(len(m.repeated_nested_message), len(sl))
def testExtendShouldNotSwallowExceptions(self, message_module):
"""This didn't use to work in the v2 C++ implementation."""
m = message_module.TestAllTypes()
with self.assertRaises(NameError) as _:
m.repeated_int32.extend(a for i in range(10)) # pylint: disable=undefined-variable
with self.assertRaises(NameError) as _:
m.repeated_nested_enum.extend(
a for i in range(10)) # pylint: disable=undefined-variable
FALSY_VALUES = [None, False, 0, 0.0, b'', u'', bytearray(), [], {}, set()]
def testExtendInt32WithNothing(self, message_module):
"""Test no-ops extending repeated int32 fields."""
m = message_module.TestAllTypes()
self.assertSequenceEqual([], m.repeated_int32)
# TODO(ptucker): Deprecate this behavior. b/18413862
for falsy_value in MessageTest.FALSY_VALUES:
m.repeated_int32.extend(falsy_value)
self.assertSequenceEqual([], m.repeated_int32)
m.repeated_int32.extend([])
self.assertSequenceEqual([], m.repeated_int32)
def testExtendFloatWithNothing(self, message_module):
"""Test no-ops extending repeated float fields."""
m = message_module.TestAllTypes()
self.assertSequenceEqual([], m.repeated_float)
# TODO(ptucker): Deprecate this behavior. b/18413862
for falsy_value in MessageTest.FALSY_VALUES:
m.repeated_float.extend(falsy_value)
self.assertSequenceEqual([], m.repeated_float)
m.repeated_float.extend([])
self.assertSequenceEqual([], m.repeated_float)
def testExtendStringWithNothing(self, message_module):
"""Test no-ops extending repeated string fields."""
m = message_module.TestAllTypes()
self.assertSequenceEqual([], m.repeated_string)
# TODO(ptucker): Deprecate this behavior. b/18413862
for falsy_value in MessageTest.FALSY_VALUES:
m.repeated_string.extend(falsy_value)
self.assertSequenceEqual([], m.repeated_string)
m.repeated_string.extend([])
self.assertSequenceEqual([], m.repeated_string)
def testExtendInt32WithPythonList(self, message_module):
"""Test extending repeated int32 fields with python lists."""
m = message_module.TestAllTypes()
self.assertSequenceEqual([], m.repeated_int32)
m.repeated_int32.extend([0])
self.assertSequenceEqual([0], m.repeated_int32)
m.repeated_int32.extend([1, 2])
self.assertSequenceEqual([0, 1, 2], m.repeated_int32)
m.repeated_int32.extend([3, 4])
self.assertSequenceEqual([0, 1, 2, 3, 4], m.repeated_int32)
def testExtendFloatWithPythonList(self, message_module):
"""Test extending repeated float fields with python lists."""
m = message_module.TestAllTypes()
self.assertSequenceEqual([], m.repeated_float)
m.repeated_float.extend([0.0])
self.assertSequenceEqual([0.0], m.repeated_float)
m.repeated_float.extend([1.0, 2.0])
self.assertSequenceEqual([0.0, 1.0, 2.0], m.repeated_float)
m.repeated_float.extend([3.0, 4.0])
self.assertSequenceEqual([0.0, 1.0, 2.0, 3.0, 4.0], m.repeated_float)
def testExtendStringWithPythonList(self, message_module):
"""Test extending repeated string fields with python lists."""
m = message_module.TestAllTypes()
self.assertSequenceEqual([], m.repeated_string)
m.repeated_string.extend([''])
self.assertSequenceEqual([''], m.repeated_string)
m.repeated_string.extend(['11', '22'])
self.assertSequenceEqual(['', '11', '22'], m.repeated_string)
m.repeated_string.extend(['33', '44'])
self.assertSequenceEqual(['', '11', '22', '33', '44'], m.repeated_string)
def testExtendStringWithString(self, message_module):
"""Test extending repeated string fields with characters from a string."""
m = message_module.TestAllTypes()
self.assertSequenceEqual([], m.repeated_string)
m.repeated_string.extend('abc')
self.assertSequenceEqual(['a', 'b', 'c'], m.repeated_string)
class TestIterable(object):
"""This iterable object mimics the behavior of numpy.array.
__nonzero__ fails for length > 1, and returns bool(item[0]) for length == 1.
"""
def __init__(self, values=None):
self._list = values or []
def __nonzero__(self):
size = len(self._list)
if size == 0:
return False
if size == 1:
return bool(self._list[0])
raise ValueError('Truth value is ambiguous.')
def __len__(self):
return len(self._list)
def __iter__(self):
return self._list.__iter__()
def testExtendInt32WithIterable(self, message_module):
"""Test extending repeated int32 fields with iterable."""
m = message_module.TestAllTypes()
self.assertSequenceEqual([], m.repeated_int32)
m.repeated_int32.extend(MessageTest.TestIterable([]))
self.assertSequenceEqual([], m.repeated_int32)
m.repeated_int32.extend(MessageTest.TestIterable([0]))
self.assertSequenceEqual([0], m.repeated_int32)
m.repeated_int32.extend(MessageTest.TestIterable([1, 2]))
self.assertSequenceEqual([0, 1, 2], m.repeated_int32)
m.repeated_int32.extend(MessageTest.TestIterable([3, 4]))
self.assertSequenceEqual([0, 1, 2, 3, 4], m.repeated_int32)
def testExtendFloatWithIterable(self, message_module):
"""Test extending repeated float fields with iterable."""
m = message_module.TestAllTypes()
self.assertSequenceEqual([], m.repeated_float)
m.repeated_float.extend(MessageTest.TestIterable([]))
self.assertSequenceEqual([], m.repeated_float)
m.repeated_float.extend(MessageTest.TestIterable([0.0]))
self.assertSequenceEqual([0.0], m.repeated_float)
m.repeated_float.extend(MessageTest.TestIterable([1.0, 2.0]))
self.assertSequenceEqual([0.0, 1.0, 2.0], m.repeated_float)
m.repeated_float.extend(MessageTest.TestIterable([3.0, 4.0]))
self.assertSequenceEqual([0.0, 1.0, 2.0, 3.0, 4.0], m.repeated_float)
def testExtendStringWithIterable(self, message_module):
"""Test extending repeated string fields with iterable."""
m = message_module.TestAllTypes()
self.assertSequenceEqual([], m.repeated_string)
m.repeated_string.extend(MessageTest.TestIterable([]))
self.assertSequenceEqual([], m.repeated_string)
m.repeated_string.extend(MessageTest.TestIterable(['']))
self.assertSequenceEqual([''], m.repeated_string)
m.repeated_string.extend(MessageTest.TestIterable(['1', '2']))
self.assertSequenceEqual(['', '1', '2'], m.repeated_string)
m.repeated_string.extend(MessageTest.TestIterable(['3', '4']))
self.assertSequenceEqual(['', '1', '2', '3', '4'], m.repeated_string)
def testPickleRepeatedScalarContainer(self, message_module):
# TODO(tibell): The pure-Python implementation support pickling of
# scalar containers in *some* cases. For now the cpp2 version
# throws an exception to avoid a segfault. Investigate if we
# want to support pickling of these fields.
#
# For more information see: https://b2.corp.google.com/u/0/issues/18677897
if (api_implementation.Type() != 'cpp' or
api_implementation.Version() == 2):
return
m = message_module.TestAllTypes()
with self.assertRaises(pickle.PickleError) as _:
pickle.dumps(m.repeated_int32, pickle.HIGHEST_PROTOCOL)
def testSortEmptyRepeatedCompositeContainer(self, message_module):
"""Exercise a scenario that has led to segfaults in the past.
"""
m = message_module.TestAllTypes()
m.repeated_nested_message.sort()
def testHasFieldOnRepeatedField(self, message_module):
"""Using HasField on a repeated field should raise an exception.
"""
m = message_module.TestAllTypes()
with self.assertRaises(ValueError) as _:
m.HasField('repeated_int32')
def testRepeatedScalarFieldPop(self, message_module):
m = message_module.TestAllTypes()
with self.assertRaises(IndexError) as _:
m.repeated_int32.pop()
m.repeated_int32.extend(range(5))
self.assertEqual(4, m.repeated_int32.pop())
self.assertEqual(0, m.repeated_int32.pop(0))
self.assertEqual(2, m.repeated_int32.pop(1))
self.assertEqual([1, 3], m.repeated_int32)
def testRepeatedCompositeFieldPop(self, message_module):
m = message_module.TestAllTypes()
with self.assertRaises(IndexError) as _:
m.repeated_nested_message.pop()
for i in range(5):
n = m.repeated_nested_message.add()
n.bb = i
self.assertEqual(4, m.repeated_nested_message.pop().bb)
self.assertEqual(0, m.repeated_nested_message.pop(0).bb)
self.assertEqual(2, m.repeated_nested_message.pop(1).bb)
self.assertEqual([1, 3], [n.bb for n in m.repeated_nested_message])
# Class to test proto2-only features (required, extensions, etc.)
class Proto2Test(unittest.TestCase):
def testFieldPresence(self):
message = unittest_pb2.TestAllTypes()
self.assertFalse(message.HasField("optional_int32"))
self.assertFalse(message.HasField("optional_bool"))
self.assertFalse(message.HasField("optional_nested_message"))
with self.assertRaises(ValueError):
message.HasField("field_doesnt_exist")
with self.assertRaises(ValueError):
message.HasField("repeated_int32")
with self.assertRaises(ValueError):
message.HasField("repeated_nested_message")
self.assertEqual(0, message.optional_int32)
self.assertEqual(False, message.optional_bool)
self.assertEqual(0, message.optional_nested_message.bb)
# Fields are set even when setting the values to default values.
message.optional_int32 = 0
message.optional_bool = False
message.optional_nested_message.bb = 0
self.assertTrue(message.HasField("optional_int32"))
self.assertTrue(message.HasField("optional_bool"))
self.assertTrue(message.HasField("optional_nested_message"))
# Set the fields to non-default values.
message.optional_int32 = 5
message.optional_bool = True
message.optional_nested_message.bb = 15
self.assertTrue(message.HasField("optional_int32"))
self.assertTrue(message.HasField("optional_bool"))
self.assertTrue(message.HasField("optional_nested_message"))
# Clearing the fields unsets them and resets their value to default.
message.ClearField("optional_int32")
message.ClearField("optional_bool")
message.ClearField("optional_nested_message")
self.assertFalse(message.HasField("optional_int32"))
self.assertFalse(message.HasField("optional_bool"))
self.assertFalse(message.HasField("optional_nested_message"))
self.assertEqual(0, message.optional_int32)
self.assertEqual(False, message.optional_bool)
self.assertEqual(0, message.optional_nested_message.bb)
# TODO(tibell): The C++ implementations actually allows assignment
# of unknown enum values to *scalar* fields (but not repeated
# fields). Once checked enum fields becomes the default in the
# Python implementation, the C++ implementation should follow suit.
def testAssignInvalidEnum(self):
"""It should not be possible to assign an invalid enum number to an
enum field."""
m = unittest_pb2.TestAllTypes()
with self.assertRaises(ValueError) as _:
m.optional_nested_enum = 1234567
self.assertRaises(ValueError, m.repeated_nested_enum.append, 1234567)
def testGoldenExtensions(self):
golden_data = test_util.GoldenFileData('golden_message')
golden_message = unittest_pb2.TestAllExtensions()
golden_message.ParseFromString(golden_data)
all_set = unittest_pb2.TestAllExtensions()
test_util.SetAllExtensions(all_set)
self.assertEqual(all_set, golden_message)
self.assertEqual(golden_data, golden_message.SerializeToString())
golden_copy = copy.deepcopy(golden_message)
self.assertEqual(golden_data, golden_copy.SerializeToString())
def testGoldenPackedExtensions(self):
golden_data = test_util.GoldenFileData('golden_packed_fields_message')
golden_message = unittest_pb2.TestPackedExtensions()
golden_message.ParseFromString(golden_data)
all_set = unittest_pb2.TestPackedExtensions()
test_util.SetAllPackedExtensions(all_set)
self.assertEqual(all_set, golden_message)
self.assertEqual(golden_data, all_set.SerializeToString())
golden_copy = copy.deepcopy(golden_message)
self.assertEqual(golden_data, golden_copy.SerializeToString())
def testPickleIncompleteProto(self):
golden_message = unittest_pb2.TestRequired(a=1)
pickled_message = pickle.dumps(golden_message)
unpickled_message = pickle.loads(pickled_message)
self.assertEqual(unpickled_message, golden_message)
self.assertEqual(unpickled_message.a, 1)
# This is still an incomplete proto - so serializing should fail
self.assertRaises(message.EncodeError, unpickled_message.SerializeToString)
# TODO(haberman): this isn't really a proto2-specific test except that this
# message has a required field in it. Should probably be factored out so
# that we can test the other parts with proto3.
def testParsingMerge(self):
"""Check the merge behavior when a required or optional field appears
multiple times in the input."""
messages = [
unittest_pb2.TestAllTypes(),
unittest_pb2.TestAllTypes(),
unittest_pb2.TestAllTypes() ]
messages[0].optional_int32 = 1
messages[1].optional_int64 = 2
messages[2].optional_int32 = 3
messages[2].optional_string = 'hello'
merged_message = unittest_pb2.TestAllTypes()
merged_message.optional_int32 = 3
merged_message.optional_int64 = 2
merged_message.optional_string = 'hello'
generator = unittest_pb2.TestParsingMerge.RepeatedFieldsGenerator()
generator.field1.extend(messages)
generator.field2.extend(messages)
generator.field3.extend(messages)
generator.ext1.extend(messages)
generator.ext2.extend(messages)
generator.group1.add().field1.MergeFrom(messages[0])
generator.group1.add().field1.MergeFrom(messages[1])
generator.group1.add().field1.MergeFrom(messages[2])
generator.group2.add().field1.MergeFrom(messages[0])
generator.group2.add().field1.MergeFrom(messages[1])
generator.group2.add().field1.MergeFrom(messages[2])
data = generator.SerializeToString()
parsing_merge = unittest_pb2.TestParsingMerge()
parsing_merge.ParseFromString(data)
# Required and optional fields should be merged.
self.assertEqual(parsing_merge.required_all_types, merged_message)
self.assertEqual(parsing_merge.optional_all_types, merged_message)
self.assertEqual(parsing_merge.optionalgroup.optional_group_all_types,
merged_message)
self.assertEqual(parsing_merge.Extensions[
unittest_pb2.TestParsingMerge.optional_ext],
merged_message)
# Repeated fields should not be merged.
self.assertEqual(len(parsing_merge.repeated_all_types), 3)
self.assertEqual(len(parsing_merge.repeatedgroup), 3)
self.assertEqual(len(parsing_merge.Extensions[
unittest_pb2.TestParsingMerge.repeated_ext]), 3)
def testPythonicInit(self):
message = unittest_pb2.TestAllTypes(
optional_int32=100,
optional_fixed32=200,
optional_float=300.5,
optional_bytes=b'x',
optionalgroup={'a': 400},
optional_nested_message={'bb': 500},
optional_nested_enum='BAZ',
repeatedgroup=[{'a': 600},
{'a': 700}],
repeated_nested_enum=['FOO', unittest_pb2.TestAllTypes.BAR],
default_int32=800,
oneof_string='y')
self.assertIsInstance(message, unittest_pb2.TestAllTypes)
self.assertEqual(100, message.optional_int32)
self.assertEqual(200, message.optional_fixed32)
self.assertEqual(300.5, message.optional_float)
self.assertEqual(b'x', message.optional_bytes)
self.assertEqual(400, message.optionalgroup.a)
self.assertIsInstance(message.optional_nested_message, unittest_pb2.TestAllTypes.NestedMessage)
self.assertEqual(500, message.optional_nested_message.bb)
self.assertEqual(unittest_pb2.TestAllTypes.BAZ,
message.optional_nested_enum)
self.assertEqual(2, len(message.repeatedgroup))
self.assertEqual(600, message.repeatedgroup[0].a)
self.assertEqual(700, message.repeatedgroup[1].a)
self.assertEqual(2, len(message.repeated_nested_enum))
self.assertEqual(unittest_pb2.TestAllTypes.FOO,
message.repeated_nested_enum[0])
self.assertEqual(unittest_pb2.TestAllTypes.BAR,
message.repeated_nested_enum[1])
self.assertEqual(800, message.default_int32)
self.assertEqual('y', message.oneof_string)
self.assertFalse(message.HasField('optional_int64'))
self.assertEqual(0, len(message.repeated_float))
self.assertEqual(42, message.default_int64)
message = unittest_pb2.TestAllTypes(optional_nested_enum=u'BAZ')
self.assertEqual(unittest_pb2.TestAllTypes.BAZ,
message.optional_nested_enum)
with self.assertRaises(ValueError):
unittest_pb2.TestAllTypes(
optional_nested_message={'INVALID_NESTED_FIELD': 17})
with self.assertRaises(TypeError):
unittest_pb2.TestAllTypes(
optional_nested_message={'bb': 'INVALID_VALUE_TYPE'})
with self.assertRaises(ValueError):
unittest_pb2.TestAllTypes(optional_nested_enum='INVALID_LABEL')
with self.assertRaises(ValueError):
unittest_pb2.TestAllTypes(repeated_nested_enum='FOO')
# Class to test proto3-only features/behavior (updated field presence & enums)
class Proto3Test(unittest.TestCase):
# Utility method for comparing equality with a map.
def assertMapIterEquals(self, map_iter, dict_value):
# Avoid mutating caller's copy.
dict_value = dict(dict_value)
for k, v in map_iter:
self.assertEqual(v, dict_value[k])
del dict_value[k]
self.assertEqual({}, dict_value)
def testFieldPresence(self):
message = unittest_proto3_arena_pb2.TestAllTypes()
# We can't test presence of non-repeated, non-submessage fields.
with self.assertRaises(ValueError):
message.HasField('optional_int32')
with self.assertRaises(ValueError):
message.HasField('optional_float')
with self.assertRaises(ValueError):
message.HasField('optional_string')
with self.assertRaises(ValueError):
message.HasField('optional_bool')
# But we can still test presence of submessage fields.
self.assertFalse(message.HasField('optional_nested_message'))
# As with proto2, we can't test presence of fields that don't exist, or
# repeated fields.
with self.assertRaises(ValueError):
message.HasField('field_doesnt_exist')
with self.assertRaises(ValueError):
message.HasField('repeated_int32')
with self.assertRaises(ValueError):
message.HasField('repeated_nested_message')
# Fields should default to their type-specific default.
self.assertEqual(0, message.optional_int32)
self.assertEqual(0, message.optional_float)
self.assertEqual('', message.optional_string)
self.assertEqual(False, message.optional_bool)
self.assertEqual(0, message.optional_nested_message.bb)
# Setting a submessage should still return proper presence information.
message.optional_nested_message.bb = 0
self.assertTrue(message.HasField('optional_nested_message'))
# Set the fields to non-default values.
message.optional_int32 = 5
message.optional_float = 1.1
message.optional_string = 'abc'
message.optional_bool = True
message.optional_nested_message.bb = 15
# Clearing the fields unsets them and resets their value to default.
message.ClearField('optional_int32')
message.ClearField('optional_float')
message.ClearField('optional_string')
message.ClearField('optional_bool')
message.ClearField('optional_nested_message')
self.assertEqual(0, message.optional_int32)
self.assertEqual(0, message.optional_float)
self.assertEqual('', message.optional_string)
self.assertEqual(False, message.optional_bool)
self.assertEqual(0, message.optional_nested_message.bb)
def testAssignUnknownEnum(self):
"""Assigning an unknown enum value is allowed and preserves the value."""
m = unittest_proto3_arena_pb2.TestAllTypes()
m.optional_nested_enum = 1234567
self.assertEqual(1234567, m.optional_nested_enum)
m.repeated_nested_enum.append(22334455)
self.assertEqual(22334455, m.repeated_nested_enum[0])
# Assignment is a different code path than append for the C++ impl.
m.repeated_nested_enum[0] = 7654321
self.assertEqual(7654321, m.repeated_nested_enum[0])
serialized = m.SerializeToString()
m2 = unittest_proto3_arena_pb2.TestAllTypes()
m2.ParseFromString(serialized)
self.assertEqual(1234567, m2.optional_nested_enum)
self.assertEqual(7654321, m2.repeated_nested_enum[0])
# Map isn't really a proto3-only feature. But there is no proto2 equivalent
# of google/protobuf/map_unittest.proto right now, so it's not easy to
# test both with the same test like we do for the other proto2/proto3 tests.
# (google/protobuf/map_protobuf_unittest.proto is very different in the set
# of messages and fields it contains).
def testScalarMapDefaults(self):
msg = map_unittest_pb2.TestMap()
# Scalars start out unset.
self.assertFalse(-123 in msg.map_int32_int32)
self.assertFalse(-2**33 in msg.map_int64_int64)
self.assertFalse(123 in msg.map_uint32_uint32)
self.assertFalse(2**33 in msg.map_uint64_uint64)
self.assertFalse('abc' in msg.map_string_string)
self.assertFalse(888 in msg.map_int32_enum)
# Accessing an unset key returns the default.
self.assertEqual(0, msg.map_int32_int32[-123])
self.assertEqual(0, msg.map_int64_int64[-2**33])
self.assertEqual(0, msg.map_uint32_uint32[123])
self.assertEqual(0, msg.map_uint64_uint64[2**33])
self.assertEqual('', msg.map_string_string['abc'])
self.assertEqual(0, msg.map_int32_enum[888])
# It also sets the value in the map
self.assertTrue(-123 in msg.map_int32_int32)
self.assertTrue(-2**33 in msg.map_int64_int64)
self.assertTrue(123 in msg.map_uint32_uint32)
self.assertTrue(2**33 in msg.map_uint64_uint64)
self.assertTrue('abc' in msg.map_string_string)
self.assertTrue(888 in msg.map_int32_enum)
self.assertIsInstance(msg.map_string_string['abc'], six.text_type)
# Accessing an unset key still throws TypeError if the type of the key
# is incorrect.
with self.assertRaises(TypeError):
msg.map_string_string[123]
with self.assertRaises(TypeError):
123 in msg.map_string_string
def testMapGet(self):
# Need to test that get() properly returns the default, even though the dict
# has defaultdict-like semantics.
msg = map_unittest_pb2.TestMap()
self.assertIsNone(msg.map_int32_int32.get(5))
self.assertEqual(10, msg.map_int32_int32.get(5, 10))
self.assertIsNone(msg.map_int32_int32.get(5))
msg.map_int32_int32[5] = 15
self.assertEqual(15, msg.map_int32_int32.get(5))
self.assertIsNone(msg.map_int32_foreign_message.get(5))
self.assertEqual(10, msg.map_int32_foreign_message.get(5, 10))
submsg = msg.map_int32_foreign_message[5]
self.assertIs(submsg, msg.map_int32_foreign_message.get(5))
def testScalarMap(self):
msg = map_unittest_pb2.TestMap()
self.assertEqual(0, len(msg.map_int32_int32))
self.assertFalse(5 in msg.map_int32_int32)
msg.map_int32_int32[-123] = -456
msg.map_int64_int64[-2**33] = -2**34
msg.map_uint32_uint32[123] = 456
msg.map_uint64_uint64[2**33] = 2**34
msg.map_string_string['abc'] = '123'
msg.map_int32_enum[888] = 2
self.assertEqual([], msg.FindInitializationErrors())
self.assertEqual(1, len(msg.map_string_string))
# Bad key.
with self.assertRaises(TypeError):
msg.map_string_string[123] = '123'
# Verify that trying to assign a bad key doesn't actually add a member to
# the map.
self.assertEqual(1, len(msg.map_string_string))
# Bad value.
with self.assertRaises(TypeError):
msg.map_string_string['123'] = 123
serialized = msg.SerializeToString()
msg2 = map_unittest_pb2.TestMap()
msg2.ParseFromString(serialized)
# Bad key.
with self.assertRaises(TypeError):
msg2.map_string_string[123] = '123'
# Bad value.
with self.assertRaises(TypeError):
msg2.map_string_string['123'] = 123
self.assertEqual(-456, msg2.map_int32_int32[-123])
self.assertEqual(-2**34, msg2.map_int64_int64[-2**33])
self.assertEqual(456, msg2.map_uint32_uint32[123])
self.assertEqual(2**34, msg2.map_uint64_uint64[2**33])
self.assertEqual('123', msg2.map_string_string['abc'])
self.assertEqual(2, msg2.map_int32_enum[888])
def testStringUnicodeConversionInMap(self):
msg = map_unittest_pb2.TestMap()
unicode_obj = u'\u1234'
bytes_obj = unicode_obj.encode('utf8')
msg.map_string_string[bytes_obj] = bytes_obj
(key, value) = list(msg.map_string_string.items())[0]
self.assertEqual(key, unicode_obj)
self.assertEqual(value, unicode_obj)
self.assertIsInstance(key, six.text_type)
self.assertIsInstance(value, six.text_type)
def testMessageMap(self):
msg = map_unittest_pb2.TestMap()
self.assertEqual(0, len(msg.map_int32_foreign_message))
self.assertFalse(5 in msg.map_int32_foreign_message)
msg.map_int32_foreign_message[123]
# get_or_create() is an alias for getitem.
msg.map_int32_foreign_message.get_or_create(-456)
self.assertEqual(2, len(msg.map_int32_foreign_message))
self.assertIn(123, msg.map_int32_foreign_message)
self.assertIn(-456, msg.map_int32_foreign_message)
self.assertEqual(2, len(msg.map_int32_foreign_message))
# Bad key.
with self.assertRaises(TypeError):
msg.map_int32_foreign_message['123']
# Can't assign directly to submessage.
with self.assertRaises(ValueError):
msg.map_int32_foreign_message[999] = msg.map_int32_foreign_message[123]
# Verify that trying to assign a bad key doesn't actually add a member to
# the map.
self.assertEqual(2, len(msg.map_int32_foreign_message))
serialized = msg.SerializeToString()
msg2 = map_unittest_pb2.TestMap()
msg2.ParseFromString(serialized)
self.assertEqual(2, len(msg2.map_int32_foreign_message))
self.assertIn(123, msg2.map_int32_foreign_message)
self.assertIn(-456, msg2.map_int32_foreign_message)
self.assertEqual(2, len(msg2.map_int32_foreign_message))
def testMergeFrom(self):
msg = map_unittest_pb2.TestMap()
msg.map_int32_int32[12] = 34
msg.map_int32_int32[56] = 78
msg.map_int64_int64[22] = 33
msg.map_int32_foreign_message[111].c = 5
msg.map_int32_foreign_message[222].c = 10
msg2 = map_unittest_pb2.TestMap()
msg2.map_int32_int32[12] = 55
msg2.map_int64_int64[88] = 99
msg2.map_int32_foreign_message[222].c = 15
msg2.MergeFrom(msg)
self.assertEqual(34, msg2.map_int32_int32[12])
self.assertEqual(78, msg2.map_int32_int32[56])
self.assertEqual(33, msg2.map_int64_int64[22])
self.assertEqual(99, msg2.map_int64_int64[88])
self.assertEqual(5, msg2.map_int32_foreign_message[111].c)
self.assertEqual(10, msg2.map_int32_foreign_message[222].c)
# Verify that there is only one entry per key, even though the MergeFrom
# may have internally created multiple entries for a single key in the
# list representation.
as_dict = {}
for key in msg2.map_int32_foreign_message:
self.assertFalse(key in as_dict)
as_dict[key] = msg2.map_int32_foreign_message[key].c
self.assertEqual({111: 5, 222: 10}, as_dict)
# Special case: test that delete of item really removes the item, even if
# there might have physically been duplicate keys due to the previous merge.
# This is only a special case for the C++ implementation which stores the
# map as an array.
del msg2.map_int32_int32[12]
self.assertFalse(12 in msg2.map_int32_int32)
del msg2.map_int32_foreign_message[222]
self.assertFalse(222 in msg2.map_int32_foreign_message)
def testIntegerMapWithLongs(self):
msg = map_unittest_pb2.TestMap()
msg.map_int32_int32[long(-123)] = long(-456)
msg.map_int64_int64[long(-2**33)] = long(-2**34)
msg.map_uint32_uint32[long(123)] = long(456)
msg.map_uint64_uint64[long(2**33)] = long(2**34)
serialized = msg.SerializeToString()
msg2 = map_unittest_pb2.TestMap()
msg2.ParseFromString(serialized)
self.assertEqual(-456, msg2.map_int32_int32[-123])
self.assertEqual(-2**34, msg2.map_int64_int64[-2**33])
self.assertEqual(456, msg2.map_uint32_uint32[123])
self.assertEqual(2**34, msg2.map_uint64_uint64[2**33])
def testMapAssignmentCausesPresence(self):
msg = map_unittest_pb2.TestMapSubmessage()
msg.test_map.map_int32_int32[123] = 456
serialized = msg.SerializeToString()
msg2 = map_unittest_pb2.TestMapSubmessage()
msg2.ParseFromString(serialized)
self.assertEqual(msg, msg2)
# Now test that various mutations of the map properly invalidate the
# cached size of the submessage.
msg.test_map.map_int32_int32[888] = 999
serialized = msg.SerializeToString()
msg2.ParseFromString(serialized)
self.assertEqual(msg, msg2)
msg.test_map.map_int32_int32.clear()
serialized = msg.SerializeToString()
msg2.ParseFromString(serialized)
self.assertEqual(msg, msg2)
def testMapAssignmentCausesPresenceForSubmessages(self):
msg = map_unittest_pb2.TestMapSubmessage()
msg.test_map.map_int32_foreign_message[123].c = 5
serialized = msg.SerializeToString()
msg2 = map_unittest_pb2.TestMapSubmessage()
msg2.ParseFromString(serialized)
self.assertEqual(msg, msg2)
# Now test that various mutations of the map properly invalidate the
# cached size of the submessage.
msg.test_map.map_int32_foreign_message[888].c = 7
serialized = msg.SerializeToString()
msg2.ParseFromString(serialized)
self.assertEqual(msg, msg2)
msg.test_map.map_int32_foreign_message[888].MergeFrom(
msg.test_map.map_int32_foreign_message[123])
serialized = msg.SerializeToString()
msg2.ParseFromString(serialized)
self.assertEqual(msg, msg2)
msg.test_map.map_int32_foreign_message.clear()
serialized = msg.SerializeToString()
msg2.ParseFromString(serialized)
self.assertEqual(msg, msg2)
def testModifyMapWhileIterating(self):
msg = map_unittest_pb2.TestMap()
string_string_iter = iter(msg.map_string_string)
int32_foreign_iter = iter(msg.map_int32_foreign_message)
msg.map_string_string['abc'] = '123'
msg.map_int32_foreign_message[5].c = 5
with self.assertRaises(RuntimeError):
for key in string_string_iter:
pass
with self.assertRaises(RuntimeError):
for key in int32_foreign_iter:
pass
def testSubmessageMap(self):
msg = map_unittest_pb2.TestMap()
submsg = msg.map_int32_foreign_message[111]
self.assertIs(submsg, msg.map_int32_foreign_message[111])
self.assertIsInstance(submsg, unittest_pb2.ForeignMessage)
submsg.c = 5
serialized = msg.SerializeToString()
msg2 = map_unittest_pb2.TestMap()
msg2.ParseFromString(serialized)
self.assertEqual(5, msg2.map_int32_foreign_message[111].c)
# Doesn't allow direct submessage assignment.
with self.assertRaises(ValueError):
msg.map_int32_foreign_message[88] = unittest_pb2.ForeignMessage()
def testMapIteration(self):
msg = map_unittest_pb2.TestMap()
for k, v in msg.map_int32_int32.items():
# Should not be reached.
self.assertTrue(False)
msg.map_int32_int32[2] = 4
msg.map_int32_int32[3] = 6
msg.map_int32_int32[4] = 8
self.assertEqual(3, len(msg.map_int32_int32))
matching_dict = {2: 4, 3: 6, 4: 8}
self.assertMapIterEquals(msg.map_int32_int32.items(), matching_dict)
def testMapIterationClearMessage(self):
# Iterator needs to work even if message and map are deleted.
msg = map_unittest_pb2.TestMap()
msg.map_int32_int32[2] = 4
msg.map_int32_int32[3] = 6
msg.map_int32_int32[4] = 8
it = msg.map_int32_int32.items()
del msg
matching_dict = {2: 4, 3: 6, 4: 8}
self.assertMapIterEquals(it, matching_dict)
def testMapConstruction(self):
msg = map_unittest_pb2.TestMap(map_int32_int32={1: 2, 3: 4})
self.assertEqual(2, msg.map_int32_int32[1])
self.assertEqual(4, msg.map_int32_int32[3])
msg = map_unittest_pb2.TestMap(
map_int32_foreign_message={3: unittest_pb2.ForeignMessage(c=5)})
self.assertEqual(5, msg.map_int32_foreign_message[3].c)
def testMapValidAfterFieldCleared(self):
# Map needs to work even if field is cleared.
# For the C++ implementation this tests the correctness of
# ScalarMapContainer::Release()
msg = map_unittest_pb2.TestMap()
int32_map = msg.map_int32_int32
int32_map[2] = 4
int32_map[3] = 6
int32_map[4] = 8
msg.ClearField('map_int32_int32')
self.assertEqual(b'', msg.SerializeToString())
matching_dict = {2: 4, 3: 6, 4: 8}
self.assertMapIterEquals(int32_map.items(), matching_dict)
def testMessageMapValidAfterFieldCleared(self):
# Map needs to work even if field is cleared.
# For the C++ implementation this tests the correctness of
# ScalarMapContainer::Release()
msg = map_unittest_pb2.TestMap()
int32_foreign_message = msg.map_int32_foreign_message
int32_foreign_message[2].c = 5
msg.ClearField('map_int32_foreign_message')
self.assertEqual(b'', msg.SerializeToString())
self.assertTrue(2 in int32_foreign_message.keys())
def testMapIterInvalidatedByClearField(self):
# Map iterator is invalidated when field is cleared.
# But this case does need to not crash the interpreter.
# For the C++ implementation this tests the correctness of
# ScalarMapContainer::Release()
msg = map_unittest_pb2.TestMap()
it = iter(msg.map_int32_int32)
msg.ClearField('map_int32_int32')
with self.assertRaises(RuntimeError):
for _ in it:
pass
it = iter(msg.map_int32_foreign_message)
msg.ClearField('map_int32_foreign_message')
with self.assertRaises(RuntimeError):
for _ in it:
pass
def testMapDelete(self):
msg = map_unittest_pb2.TestMap()
self.assertEqual(0, len(msg.map_int32_int32))
msg.map_int32_int32[4] = 6
self.assertEqual(1, len(msg.map_int32_int32))
with self.assertRaises(KeyError):
del msg.map_int32_int32[88]
del msg.map_int32_int32[4]
self.assertEqual(0, len(msg.map_int32_int32))
def testMapsAreMapping(self):
msg = map_unittest_pb2.TestMap()
self.assertIsInstance(msg.map_int32_int32, collections.Mapping)
self.assertIsInstance(msg.map_int32_int32, collections.MutableMapping)
self.assertIsInstance(msg.map_int32_foreign_message, collections.Mapping)
self.assertIsInstance(msg.map_int32_foreign_message,
collections.MutableMapping)
def testMapFindInitializationErrorsSmokeTest(self):
msg = map_unittest_pb2.TestMap()
msg.map_string_string['abc'] = '123'
msg.map_int32_int32[35] = 64
msg.map_string_foreign_message['foo'].c = 5
self.assertEqual(0, len(msg.FindInitializationErrors()))
def testAnyMessage(self):
# Creates and sets message.
msg = any_test_pb2.TestAny()
msg_descriptor = msg.DESCRIPTOR
all_types = unittest_pb2.TestAllTypes()
all_descriptor = all_types.DESCRIPTOR
all_types.repeated_string.append(u'\u00fc\ua71f')
# Packs to Any.
msg.value.Pack(all_types)
self.assertEqual(msg.value.type_url,
'type.googleapis.com/%s' % all_descriptor.full_name)
self.assertEqual(msg.value.value,
all_types.SerializeToString())
# Tests Is() method.
self.assertTrue(msg.value.Is(all_descriptor))
self.assertFalse(msg.value.Is(msg_descriptor))
# Unpacks Any.
unpacked_message = unittest_pb2.TestAllTypes()
self.assertTrue(msg.value.Unpack(unpacked_message))
self.assertEqual(all_types, unpacked_message)
# Unpacks to different type.
self.assertFalse(msg.value.Unpack(msg))
# Only Any messages have Pack method.
try:
msg.Pack(all_types)
except AttributeError:
pass
else:
raise AttributeError('%s should not have Pack method.' %
msg_descriptor.full_name)
class ValidTypeNamesTest(unittest.TestCase):
def assertImportFromName(self, msg, base_name):
# Parse <type 'module.class_name'> to extra 'some.name' as a string.
tp_name = str(type(msg)).split("'")[1]
valid_names = ('Repeated%sContainer' % base_name,
'Repeated%sFieldContainer' % base_name)
self.assertTrue(any(tp_name.endswith(v) for v in valid_names),
'%r does end with any of %r' % (tp_name, valid_names))
parts = tp_name.split('.')
class_name = parts[-1]
module_name = '.'.join(parts[:-1])
__import__(module_name, fromlist=[class_name])
def testTypeNamesCanBeImported(self):
# If import doesn't work, pickling won't work either.
pb = unittest_pb2.TestAllTypes()
self.assertImportFromName(pb.repeated_int32, 'Scalar')
self.assertImportFromName(pb.repeated_nested_message, 'Composite')
class PackedFieldTest(unittest.TestCase):
def setMessage(self, message):
message.repeated_int32.append(1)
message.repeated_int64.append(1)
message.repeated_uint32.append(1)
message.repeated_uint64.append(1)
message.repeated_sint32.append(1)
message.repeated_sint64.append(1)
message.repeated_fixed32.append(1)
message.repeated_fixed64.append(1)
message.repeated_sfixed32.append(1)
message.repeated_sfixed64.append(1)
message.repeated_float.append(1.0)
message.repeated_double.append(1.0)
message.repeated_bool.append(True)
message.repeated_nested_enum.append(1)
def testPackedFields(self):
message = packed_field_test_pb2.TestPackedTypes()
self.setMessage(message)
golden_data = (b'\x0A\x01\x01'
b'\x12\x01\x01'
b'\x1A\x01\x01'
b'\x22\x01\x01'
b'\x2A\x01\x02'
b'\x32\x01\x02'
b'\x3A\x04\x01\x00\x00\x00'
b'\x42\x08\x01\x00\x00\x00\x00\x00\x00\x00'
b'\x4A\x04\x01\x00\x00\x00'
b'\x52\x08\x01\x00\x00\x00\x00\x00\x00\x00'
b'\x5A\x04\x00\x00\x80\x3f'
b'\x62\x08\x00\x00\x00\x00\x00\x00\xf0\x3f'
b'\x6A\x01\x01'
b'\x72\x01\x01')
self.assertEqual(golden_data, message.SerializeToString())
def testUnpackedFields(self):
message = packed_field_test_pb2.TestUnpackedTypes()
self.setMessage(message)
golden_data = (b'\x08\x01'
b'\x10\x01'
b'\x18\x01'
b'\x20\x01'
b'\x28\x02'
b'\x30\x02'
b'\x3D\x01\x00\x00\x00'
b'\x41\x01\x00\x00\x00\x00\x00\x00\x00'
b'\x4D\x01\x00\x00\x00'
b'\x51\x01\x00\x00\x00\x00\x00\x00\x00'
b'\x5D\x00\x00\x80\x3f'
b'\x61\x00\x00\x00\x00\x00\x00\xf0\x3f'
b'\x68\x01'
b'\x70\x01')
self.assertEqual(golden_data, message.SerializeToString())
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
rezasafi/spark | examples/src/main/python/mllib/fpgrowth_example.py | 158 | 1280 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# $example on$
from pyspark.mllib.fpm import FPGrowth
# $example off$
from pyspark import SparkContext
if __name__ == "__main__":
sc = SparkContext(appName="FPGrowth")
# $example on$
data = sc.textFile("data/mllib/sample_fpgrowth.txt")
transactions = data.map(lambda line: line.strip().split(' '))
model = FPGrowth.train(transactions, minSupport=0.2, numPartitions=10)
result = model.freqItemsets().collect()
for fi in result:
print(fi)
# $example off$
| apache-2.0 |
psibi/Neuron | doc/conf.py | 1 | 6985 | # -*- coding: utf-8 -*-
#
# Neuron documentation build configuration file, created by
# sphinx-quickstart on Fri Jan 13 10:45:35 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Neuron'
copyright = u'2012, Sibi'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Neurondoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Neuron.tex', u'Neuron Documentation',
u'Sibi', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'neuron', u'Neuron Documentation',
[u'Sibi'], 1)
]
| gpl-3.0 |
globau/servo | components/script/dom/bindings/codegen/parser/tests/test_constructor_no_interface_object.py | 52 | 1525 | def WebIDLTest(parser, harness):
threw = False
try:
parser.parse("""
[Constructor, NoInterfaceObject]
interface TestConstructorNoInterfaceObject {
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Should have thrown.")
threw = False
try:
parser.parse("""
[NoInterfaceObject, Constructor]
interface TestConstructorNoInterfaceObject {
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Should have thrown.")
parser = parser.reset()
parser.parse("""
[NoInterfaceObject, NamedConstructor=FooBar]
interface TestNamedConstructorNoInterfaceObject {
};
""")
# Test HTMLConstructor and NoInterfaceObject
parser = parser.reset()
threw = False
try:
parser.parse("""
[NoInterfaceObject, HTMLConstructor]
interface TestHTMLConstructorNoInterfaceObject {
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Should have thrown.")
parser = parser.reset()
threw = False
try:
parser.parse("""
[HTMLConstructor, NoInterfaceObject]
interface TestHTMLConstructorNoInterfaceObject {
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Should have thrown.")
| mpl-2.0 |
novafloss/jenkins-epo | tests/extensions/test_poll.py | 2 | 5094 | import asyncio
from unittest.mock import Mock
from asynctest import CoroutineMock
import pytest
@pytest.mark.asyncio
@asyncio.coroutine
def test_skip_outdated():
from jenkins_epo.extensions.jenkins import PollExtension
ext = PollExtension('test', Mock())
ext.current = ext.bot.current
ext.current.head.sha = 'cafed0d0'
ext.current.cancel_queue = []
ext.current.job_specs = {'job': Mock()}
ext.current.job_specs['job'].name = 'job'
ext.current.jobs = {}
ext.current.jobs['job'] = job = Mock()
job.fetch_builds = CoroutineMock()
job.process_builds.return_value = builds = [Mock()]
build = builds[0]
build.is_outdated = True
yield from ext.run()
assert not builds[0].is_running.mock_calls
assert 0 == len(ext.current.cancel_queue)
@pytest.mark.asyncio
@asyncio.coroutine
def test_skip_build_not_running():
from jenkins_epo.extensions.jenkins import PollExtension
ext = PollExtension('test', Mock())
ext.current = ext.bot.current
ext.current.head.sha = 'cafed0d0'
ext.current.cancel_queue = []
ext.current.job_specs = {'job': Mock()}
ext.current.job_specs['job'].name = 'job'
ext.current.jobs = {}
ext.current.jobs['job'] = job = Mock()
job.fetch_builds = CoroutineMock()
job.process_builds.return_value = builds = [Mock()]
build = builds[0]
build.is_outdated = False
build.is_running = False
yield from ext.run()
assert 0 == len(ext.current.cancel_queue)
@pytest.mark.asyncio
@asyncio.coroutine
def test_skip_other_branch():
from jenkins_epo.extensions.jenkins import PollExtension
ext = PollExtension('test', Mock())
ext.current = ext.bot.current
ext.current.cancel_queue = []
ext.current.head.sha = 'cafed0d0'
ext.current.head.ref = 'branch'
ext.current.job_specs = {'job': Mock()}
ext.current.job_specs['job'].name = 'job'
ext.current.jobs = {}
ext.current.jobs['job'] = job = Mock()
job.fetch_builds = CoroutineMock()
job.process_builds.return_value = builds = [Mock()]
build = builds[0]
build.is_outdated = False
build.is_running = True
build.ref = 'otherbranch'
yield from ext.run()
assert 0 == len(ext.current.cancel_queue)
@pytest.mark.asyncio
@asyncio.coroutine
def test_skip_current_sha(mocker):
from jenkins_epo.extensions.jenkins import PollExtension
ext = PollExtension('test', Mock())
ext.current = ext.bot.current
ext.current.cancel_queue = []
ext.current.head.ref = 'branch'
ext.current.head.sha = 'bab1'
ext.current.job_specs = {'job': Mock()}
ext.current.job_specs['job'].name = 'job'
ext.current.jobs = {}
ext.current.jobs['job'] = job = Mock()
job.list_contexts.return_value = []
job.fetch_builds = CoroutineMock()
job.process_builds.return_value = builds = [Mock()]
build = builds[0]
build.is_outdated = False
build.is_running = True
build.ref = 'branch'
build.sha = ext.current.head.sha
yield from ext.run()
assert 0 == len(ext.current.cancel_queue)
@pytest.mark.asyncio
@asyncio.coroutine
def test_preset_status_cloning(mocker):
# When Jenkins is cloning, the build is real but no status is reported, we
# preset status on latest sha.
from jenkins_epo.extensions.jenkins import PollExtension
ext = PollExtension('test', Mock(name='bot'))
ext.current = ext.bot.current
ext.current.cancel_queue = []
ext.current.head.ref = 'branch'
ext.current.head.sha = 'bab1'
ext.current.last_commit.maybe_update_status = CoroutineMock()
ext.current.statuses = {}
ext.current.job_specs = {'job': Mock()}
ext.current.job_specs['job'].name = 'job'
ext.current.jobs = {}
ext.current.jobs['job'] = job = Mock()
job.list_contexts.return_value = ['job']
job.fetch_builds = CoroutineMock()
job.process_builds.return_value = builds = [Mock(spec=[
'is_outdated', 'is_running', 'ref', 'url'
])]
build = builds[0]
build.is_outdated = False
build.is_running = True
build.ref = 'branch'
build.commit_status = dict()
build.url = 'url://'
yield from ext.run()
assert 0 == len(ext.current.cancel_queue)
assert ext.current.last_commit.maybe_update_status.mock_calls
@pytest.mark.asyncio
@asyncio.coroutine
def test_cancel(mocker):
from jenkins_epo.extensions.jenkins import PollExtension
ext = PollExtension('test', Mock())
ext.current = ext.bot.current
ext.current.cancel_queue = []
ext.current.head.ref = 'branch'
ext.current.head.sha = 'bab1'
ext.current.job_specs = {'job': Mock()}
ext.current.job_specs['job'].name = 'job'
ext.current.jobs = {}
ext.current.jobs['job'] = job = Mock()
job.fetch_builds = CoroutineMock()
job.process_builds.return_value = builds = [Mock()]
build = builds[0]
build.is_outdated = False
build.is_running = True
build.ref = 'branch'
build.sha = '01d'
build.url = 'url://'
build.commit_status = dict()
yield from ext.run()
assert 1 == len(ext.current.cancel_queue)
| gpl-3.0 |
PourroyJean/performance_modelisation | script/data visualisation/venv/lib/python3.6/encodings/iso8859_7.py | 272 | 12844 | """ Python Character Mapping Codec iso8859_7 generated from 'MAPPINGS/ISO8859/8859-7.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='iso8859-7',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> NULL
'\x01' # 0x01 -> START OF HEADING
'\x02' # 0x02 -> START OF TEXT
'\x03' # 0x03 -> END OF TEXT
'\x04' # 0x04 -> END OF TRANSMISSION
'\x05' # 0x05 -> ENQUIRY
'\x06' # 0x06 -> ACKNOWLEDGE
'\x07' # 0x07 -> BELL
'\x08' # 0x08 -> BACKSPACE
'\t' # 0x09 -> HORIZONTAL TABULATION
'\n' # 0x0A -> LINE FEED
'\x0b' # 0x0B -> VERTICAL TABULATION
'\x0c' # 0x0C -> FORM FEED
'\r' # 0x0D -> CARRIAGE RETURN
'\x0e' # 0x0E -> SHIFT OUT
'\x0f' # 0x0F -> SHIFT IN
'\x10' # 0x10 -> DATA LINK ESCAPE
'\x11' # 0x11 -> DEVICE CONTROL ONE
'\x12' # 0x12 -> DEVICE CONTROL TWO
'\x13' # 0x13 -> DEVICE CONTROL THREE
'\x14' # 0x14 -> DEVICE CONTROL FOUR
'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x16 -> SYNCHRONOUS IDLE
'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
'\x18' # 0x18 -> CANCEL
'\x19' # 0x19 -> END OF MEDIUM
'\x1a' # 0x1A -> SUBSTITUTE
'\x1b' # 0x1B -> ESCAPE
'\x1c' # 0x1C -> FILE SEPARATOR
'\x1d' # 0x1D -> GROUP SEPARATOR
'\x1e' # 0x1E -> RECORD SEPARATOR
'\x1f' # 0x1F -> UNIT SEPARATOR
' ' # 0x20 -> SPACE
'!' # 0x21 -> EXCLAMATION MARK
'"' # 0x22 -> QUOTATION MARK
'#' # 0x23 -> NUMBER SIGN
'$' # 0x24 -> DOLLAR SIGN
'%' # 0x25 -> PERCENT SIGN
'&' # 0x26 -> AMPERSAND
"'" # 0x27 -> APOSTROPHE
'(' # 0x28 -> LEFT PARENTHESIS
')' # 0x29 -> RIGHT PARENTHESIS
'*' # 0x2A -> ASTERISK
'+' # 0x2B -> PLUS SIGN
',' # 0x2C -> COMMA
'-' # 0x2D -> HYPHEN-MINUS
'.' # 0x2E -> FULL STOP
'/' # 0x2F -> SOLIDUS
'0' # 0x30 -> DIGIT ZERO
'1' # 0x31 -> DIGIT ONE
'2' # 0x32 -> DIGIT TWO
'3' # 0x33 -> DIGIT THREE
'4' # 0x34 -> DIGIT FOUR
'5' # 0x35 -> DIGIT FIVE
'6' # 0x36 -> DIGIT SIX
'7' # 0x37 -> DIGIT SEVEN
'8' # 0x38 -> DIGIT EIGHT
'9' # 0x39 -> DIGIT NINE
':' # 0x3A -> COLON
';' # 0x3B -> SEMICOLON
'<' # 0x3C -> LESS-THAN SIGN
'=' # 0x3D -> EQUALS SIGN
'>' # 0x3E -> GREATER-THAN SIGN
'?' # 0x3F -> QUESTION MARK
'@' # 0x40 -> COMMERCIAL AT
'A' # 0x41 -> LATIN CAPITAL LETTER A
'B' # 0x42 -> LATIN CAPITAL LETTER B
'C' # 0x43 -> LATIN CAPITAL LETTER C
'D' # 0x44 -> LATIN CAPITAL LETTER D
'E' # 0x45 -> LATIN CAPITAL LETTER E
'F' # 0x46 -> LATIN CAPITAL LETTER F
'G' # 0x47 -> LATIN CAPITAL LETTER G
'H' # 0x48 -> LATIN CAPITAL LETTER H
'I' # 0x49 -> LATIN CAPITAL LETTER I
'J' # 0x4A -> LATIN CAPITAL LETTER J
'K' # 0x4B -> LATIN CAPITAL LETTER K
'L' # 0x4C -> LATIN CAPITAL LETTER L
'M' # 0x4D -> LATIN CAPITAL LETTER M
'N' # 0x4E -> LATIN CAPITAL LETTER N
'O' # 0x4F -> LATIN CAPITAL LETTER O
'P' # 0x50 -> LATIN CAPITAL LETTER P
'Q' # 0x51 -> LATIN CAPITAL LETTER Q
'R' # 0x52 -> LATIN CAPITAL LETTER R
'S' # 0x53 -> LATIN CAPITAL LETTER S
'T' # 0x54 -> LATIN CAPITAL LETTER T
'U' # 0x55 -> LATIN CAPITAL LETTER U
'V' # 0x56 -> LATIN CAPITAL LETTER V
'W' # 0x57 -> LATIN CAPITAL LETTER W
'X' # 0x58 -> LATIN CAPITAL LETTER X
'Y' # 0x59 -> LATIN CAPITAL LETTER Y
'Z' # 0x5A -> LATIN CAPITAL LETTER Z
'[' # 0x5B -> LEFT SQUARE BRACKET
'\\' # 0x5C -> REVERSE SOLIDUS
']' # 0x5D -> RIGHT SQUARE BRACKET
'^' # 0x5E -> CIRCUMFLEX ACCENT
'_' # 0x5F -> LOW LINE
'`' # 0x60 -> GRAVE ACCENT
'a' # 0x61 -> LATIN SMALL LETTER A
'b' # 0x62 -> LATIN SMALL LETTER B
'c' # 0x63 -> LATIN SMALL LETTER C
'd' # 0x64 -> LATIN SMALL LETTER D
'e' # 0x65 -> LATIN SMALL LETTER E
'f' # 0x66 -> LATIN SMALL LETTER F
'g' # 0x67 -> LATIN SMALL LETTER G
'h' # 0x68 -> LATIN SMALL LETTER H
'i' # 0x69 -> LATIN SMALL LETTER I
'j' # 0x6A -> LATIN SMALL LETTER J
'k' # 0x6B -> LATIN SMALL LETTER K
'l' # 0x6C -> LATIN SMALL LETTER L
'm' # 0x6D -> LATIN SMALL LETTER M
'n' # 0x6E -> LATIN SMALL LETTER N
'o' # 0x6F -> LATIN SMALL LETTER O
'p' # 0x70 -> LATIN SMALL LETTER P
'q' # 0x71 -> LATIN SMALL LETTER Q
'r' # 0x72 -> LATIN SMALL LETTER R
's' # 0x73 -> LATIN SMALL LETTER S
't' # 0x74 -> LATIN SMALL LETTER T
'u' # 0x75 -> LATIN SMALL LETTER U
'v' # 0x76 -> LATIN SMALL LETTER V
'w' # 0x77 -> LATIN SMALL LETTER W
'x' # 0x78 -> LATIN SMALL LETTER X
'y' # 0x79 -> LATIN SMALL LETTER Y
'z' # 0x7A -> LATIN SMALL LETTER Z
'{' # 0x7B -> LEFT CURLY BRACKET
'|' # 0x7C -> VERTICAL LINE
'}' # 0x7D -> RIGHT CURLY BRACKET
'~' # 0x7E -> TILDE
'\x7f' # 0x7F -> DELETE
'\x80' # 0x80 -> <control>
'\x81' # 0x81 -> <control>
'\x82' # 0x82 -> <control>
'\x83' # 0x83 -> <control>
'\x84' # 0x84 -> <control>
'\x85' # 0x85 -> <control>
'\x86' # 0x86 -> <control>
'\x87' # 0x87 -> <control>
'\x88' # 0x88 -> <control>
'\x89' # 0x89 -> <control>
'\x8a' # 0x8A -> <control>
'\x8b' # 0x8B -> <control>
'\x8c' # 0x8C -> <control>
'\x8d' # 0x8D -> <control>
'\x8e' # 0x8E -> <control>
'\x8f' # 0x8F -> <control>
'\x90' # 0x90 -> <control>
'\x91' # 0x91 -> <control>
'\x92' # 0x92 -> <control>
'\x93' # 0x93 -> <control>
'\x94' # 0x94 -> <control>
'\x95' # 0x95 -> <control>
'\x96' # 0x96 -> <control>
'\x97' # 0x97 -> <control>
'\x98' # 0x98 -> <control>
'\x99' # 0x99 -> <control>
'\x9a' # 0x9A -> <control>
'\x9b' # 0x9B -> <control>
'\x9c' # 0x9C -> <control>
'\x9d' # 0x9D -> <control>
'\x9e' # 0x9E -> <control>
'\x9f' # 0x9F -> <control>
'\xa0' # 0xA0 -> NO-BREAK SPACE
'\u2018' # 0xA1 -> LEFT SINGLE QUOTATION MARK
'\u2019' # 0xA2 -> RIGHT SINGLE QUOTATION MARK
'\xa3' # 0xA3 -> POUND SIGN
'\u20ac' # 0xA4 -> EURO SIGN
'\u20af' # 0xA5 -> DRACHMA SIGN
'\xa6' # 0xA6 -> BROKEN BAR
'\xa7' # 0xA7 -> SECTION SIGN
'\xa8' # 0xA8 -> DIAERESIS
'\xa9' # 0xA9 -> COPYRIGHT SIGN
'\u037a' # 0xAA -> GREEK YPOGEGRAMMENI
'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xac' # 0xAC -> NOT SIGN
'\xad' # 0xAD -> SOFT HYPHEN
'\ufffe'
'\u2015' # 0xAF -> HORIZONTAL BAR
'\xb0' # 0xB0 -> DEGREE SIGN
'\xb1' # 0xB1 -> PLUS-MINUS SIGN
'\xb2' # 0xB2 -> SUPERSCRIPT TWO
'\xb3' # 0xB3 -> SUPERSCRIPT THREE
'\u0384' # 0xB4 -> GREEK TONOS
'\u0385' # 0xB5 -> GREEK DIALYTIKA TONOS
'\u0386' # 0xB6 -> GREEK CAPITAL LETTER ALPHA WITH TONOS
'\xb7' # 0xB7 -> MIDDLE DOT
'\u0388' # 0xB8 -> GREEK CAPITAL LETTER EPSILON WITH TONOS
'\u0389' # 0xB9 -> GREEK CAPITAL LETTER ETA WITH TONOS
'\u038a' # 0xBA -> GREEK CAPITAL LETTER IOTA WITH TONOS
'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\u038c' # 0xBC -> GREEK CAPITAL LETTER OMICRON WITH TONOS
'\xbd' # 0xBD -> VULGAR FRACTION ONE HALF
'\u038e' # 0xBE -> GREEK CAPITAL LETTER UPSILON WITH TONOS
'\u038f' # 0xBF -> GREEK CAPITAL LETTER OMEGA WITH TONOS
'\u0390' # 0xC0 -> GREEK SMALL LETTER IOTA WITH DIALYTIKA AND TONOS
'\u0391' # 0xC1 -> GREEK CAPITAL LETTER ALPHA
'\u0392' # 0xC2 -> GREEK CAPITAL LETTER BETA
'\u0393' # 0xC3 -> GREEK CAPITAL LETTER GAMMA
'\u0394' # 0xC4 -> GREEK CAPITAL LETTER DELTA
'\u0395' # 0xC5 -> GREEK CAPITAL LETTER EPSILON
'\u0396' # 0xC6 -> GREEK CAPITAL LETTER ZETA
'\u0397' # 0xC7 -> GREEK CAPITAL LETTER ETA
'\u0398' # 0xC8 -> GREEK CAPITAL LETTER THETA
'\u0399' # 0xC9 -> GREEK CAPITAL LETTER IOTA
'\u039a' # 0xCA -> GREEK CAPITAL LETTER KAPPA
'\u039b' # 0xCB -> GREEK CAPITAL LETTER LAMDA
'\u039c' # 0xCC -> GREEK CAPITAL LETTER MU
'\u039d' # 0xCD -> GREEK CAPITAL LETTER NU
'\u039e' # 0xCE -> GREEK CAPITAL LETTER XI
'\u039f' # 0xCF -> GREEK CAPITAL LETTER OMICRON
'\u03a0' # 0xD0 -> GREEK CAPITAL LETTER PI
'\u03a1' # 0xD1 -> GREEK CAPITAL LETTER RHO
'\ufffe'
'\u03a3' # 0xD3 -> GREEK CAPITAL LETTER SIGMA
'\u03a4' # 0xD4 -> GREEK CAPITAL LETTER TAU
'\u03a5' # 0xD5 -> GREEK CAPITAL LETTER UPSILON
'\u03a6' # 0xD6 -> GREEK CAPITAL LETTER PHI
'\u03a7' # 0xD7 -> GREEK CAPITAL LETTER CHI
'\u03a8' # 0xD8 -> GREEK CAPITAL LETTER PSI
'\u03a9' # 0xD9 -> GREEK CAPITAL LETTER OMEGA
'\u03aa' # 0xDA -> GREEK CAPITAL LETTER IOTA WITH DIALYTIKA
'\u03ab' # 0xDB -> GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA
'\u03ac' # 0xDC -> GREEK SMALL LETTER ALPHA WITH TONOS
'\u03ad' # 0xDD -> GREEK SMALL LETTER EPSILON WITH TONOS
'\u03ae' # 0xDE -> GREEK SMALL LETTER ETA WITH TONOS
'\u03af' # 0xDF -> GREEK SMALL LETTER IOTA WITH TONOS
'\u03b0' # 0xE0 -> GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS
'\u03b1' # 0xE1 -> GREEK SMALL LETTER ALPHA
'\u03b2' # 0xE2 -> GREEK SMALL LETTER BETA
'\u03b3' # 0xE3 -> GREEK SMALL LETTER GAMMA
'\u03b4' # 0xE4 -> GREEK SMALL LETTER DELTA
'\u03b5' # 0xE5 -> GREEK SMALL LETTER EPSILON
'\u03b6' # 0xE6 -> GREEK SMALL LETTER ZETA
'\u03b7' # 0xE7 -> GREEK SMALL LETTER ETA
'\u03b8' # 0xE8 -> GREEK SMALL LETTER THETA
'\u03b9' # 0xE9 -> GREEK SMALL LETTER IOTA
'\u03ba' # 0xEA -> GREEK SMALL LETTER KAPPA
'\u03bb' # 0xEB -> GREEK SMALL LETTER LAMDA
'\u03bc' # 0xEC -> GREEK SMALL LETTER MU
'\u03bd' # 0xED -> GREEK SMALL LETTER NU
'\u03be' # 0xEE -> GREEK SMALL LETTER XI
'\u03bf' # 0xEF -> GREEK SMALL LETTER OMICRON
'\u03c0' # 0xF0 -> GREEK SMALL LETTER PI
'\u03c1' # 0xF1 -> GREEK SMALL LETTER RHO
'\u03c2' # 0xF2 -> GREEK SMALL LETTER FINAL SIGMA
'\u03c3' # 0xF3 -> GREEK SMALL LETTER SIGMA
'\u03c4' # 0xF4 -> GREEK SMALL LETTER TAU
'\u03c5' # 0xF5 -> GREEK SMALL LETTER UPSILON
'\u03c6' # 0xF6 -> GREEK SMALL LETTER PHI
'\u03c7' # 0xF7 -> GREEK SMALL LETTER CHI
'\u03c8' # 0xF8 -> GREEK SMALL LETTER PSI
'\u03c9' # 0xF9 -> GREEK SMALL LETTER OMEGA
'\u03ca' # 0xFA -> GREEK SMALL LETTER IOTA WITH DIALYTIKA
'\u03cb' # 0xFB -> GREEK SMALL LETTER UPSILON WITH DIALYTIKA
'\u03cc' # 0xFC -> GREEK SMALL LETTER OMICRON WITH TONOS
'\u03cd' # 0xFD -> GREEK SMALL LETTER UPSILON WITH TONOS
'\u03ce' # 0xFE -> GREEK SMALL LETTER OMEGA WITH TONOS
'\ufffe'
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| gpl-3.0 |
betoesquivel/fil2014 | build/django/tests/syncdb_signals/tests.py | 51 | 2598 | from django.db.models import signals
from django.test import TestCase
from django.core import management
from django.utils import six
from . import models
PRE_SYNCDB_ARGS = ['app', 'create_models', 'verbosity', 'interactive', 'db']
SYNCDB_DATABASE = 'default'
SYNCDB_VERBOSITY = 1
SYNCDB_INTERACTIVE = False
class PreSyncdbReceiver(object):
def __init__(self):
self.call_counter = 0
self.call_args = None
def __call__(self, signal, sender, **kwargs):
self.call_counter = self.call_counter + 1
self.call_args = kwargs
class OneTimeReceiver(object):
"""
Special receiver for handle the fact that test runner calls syncdb for
several databases and several times for some of them.
"""
def __init__(self):
self.call_counter = 0
self.call_args = None
def __call__(self, signal, sender, **kwargs):
# Although test runner calls syncdb for several databases,
# testing for only one of them is quite sufficient.
if kwargs['db'] == SYNCDB_DATABASE:
self.call_counter = self.call_counter + 1
self.call_args = kwargs
# we need to test only one call of syncdb
signals.pre_syncdb.disconnect(pre_syncdb_receiver, sender=models)
# We connect receiver here and not in unit test code because we need to
# connect receiver before test runner creates database. That is, sequence of
# actions would be:
#
# 1. Test runner imports this module.
# 2. We connect receiver.
# 3. Test runner calls syncdb for create default database.
# 4. Test runner execute our unit test code.
pre_syncdb_receiver = OneTimeReceiver()
signals.pre_syncdb.connect(pre_syncdb_receiver, sender=models)
class SyncdbSignalTests(TestCase):
available_apps = [
'syncdb_signals',
]
def test_pre_syncdb_call_time(self):
self.assertEqual(pre_syncdb_receiver.call_counter, 1)
def test_pre_syncdb_args(self):
r = PreSyncdbReceiver()
signals.pre_syncdb.connect(r, sender=models)
management.call_command('syncdb', database=SYNCDB_DATABASE,
verbosity=SYNCDB_VERBOSITY, interactive=SYNCDB_INTERACTIVE,
load_initial_data=False, stdout=six.StringIO())
args = r.call_args
self.assertEqual(r.call_counter, 1)
self.assertEqual(set(args), set(PRE_SYNCDB_ARGS))
self.assertEqual(args['app'], models)
self.assertEqual(args['verbosity'], SYNCDB_VERBOSITY)
self.assertEqual(args['interactive'], SYNCDB_INTERACTIVE)
self.assertEqual(args['db'], 'default')
| mit |
jmcarp/django | django/db/backends/mysql/base.py | 323 | 15548 | """
MySQL database backend for Django.
Requires mysqlclient: https://pypi.python.org/pypi/mysqlclient/
MySQLdb is supported for Python 2 only: http://sourceforge.net/projects/mysql-python
"""
from __future__ import unicode_literals
import datetime
import re
import sys
import warnings
from django.conf import settings
from django.db import utils
from django.db.backends import utils as backend_utils
from django.db.backends.base.base import BaseDatabaseWrapper
from django.utils import six, timezone
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.encoding import force_str
from django.utils.functional import cached_property
from django.utils.safestring import SafeBytes, SafeText
try:
import MySQLdb as Database
except ImportError as e:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("Error loading MySQLdb module: %s" % e)
from MySQLdb.constants import CLIENT, FIELD_TYPE # isort:skip
from MySQLdb.converters import Thing2Literal, conversions # isort:skip
# Some of these import MySQLdb, so import them after checking if it's installed.
from .client import DatabaseClient # isort:skip
from .creation import DatabaseCreation # isort:skip
from .features import DatabaseFeatures # isort:skip
from .introspection import DatabaseIntrospection # isort:skip
from .operations import DatabaseOperations # isort:skip
from .schema import DatabaseSchemaEditor # isort:skip
from .validation import DatabaseValidation # isort:skip
# We want version (1, 2, 1, 'final', 2) or later. We can't just use
# lexicographic ordering in this check because then (1, 2, 1, 'gamma')
# inadvertently passes the version test.
version = Database.version_info
if (version < (1, 2, 1) or (version[:3] == (1, 2, 1) and
(len(version) < 5 or version[3] != 'final' or version[4] < 2))):
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("MySQLdb-1.2.1p2 or newer is required; you have %s" % Database.__version__)
DatabaseError = Database.DatabaseError
IntegrityError = Database.IntegrityError
def adapt_datetime_warn_on_aware_datetime(value, conv):
# Remove this function and rely on the default adapter in Django 2.0.
if settings.USE_TZ and timezone.is_aware(value):
warnings.warn(
"The MySQL database adapter received an aware datetime (%s), "
"probably from cursor.execute(). Update your code to pass a "
"naive datetime in the database connection's time zone (UTC by "
"default).", RemovedInDjango20Warning)
# This doesn't account for the database connection's timezone,
# which isn't known. (That's why this adapter is deprecated.)
value = value.astimezone(timezone.utc).replace(tzinfo=None)
return Thing2Literal(value.strftime("%Y-%m-%d %H:%M:%S.%f"), conv)
# MySQLdb-1.2.1 returns TIME columns as timedelta -- they are more like
# timedelta in terms of actual behavior as they are signed and include days --
# and Django expects time, so we still need to override that. We also need to
# add special handling for SafeText and SafeBytes as MySQLdb's type
# checking is too tight to catch those (see Django ticket #6052).
django_conversions = conversions.copy()
django_conversions.update({
FIELD_TYPE.TIME: backend_utils.typecast_time,
FIELD_TYPE.DECIMAL: backend_utils.typecast_decimal,
FIELD_TYPE.NEWDECIMAL: backend_utils.typecast_decimal,
datetime.datetime: adapt_datetime_warn_on_aware_datetime,
})
# This should match the numerical portion of the version numbers (we can treat
# versions like 5.0.24 and 5.0.24a as the same). Based on the list of version
# at http://dev.mysql.com/doc/refman/4.1/en/news.html and
# http://dev.mysql.com/doc/refman/5.0/en/news.html .
server_version_re = re.compile(r'(\d{1,2})\.(\d{1,2})\.(\d{1,2})')
# MySQLdb-1.2.1 and newer automatically makes use of SHOW WARNINGS on
# MySQL-4.1 and newer, so the MysqlDebugWrapper is unnecessary. Since the
# point is to raise Warnings as exceptions, this can be done with the Python
# warning module, and this is setup when the connection is created, and the
# standard backend_utils.CursorDebugWrapper can be used. Also, using sql_mode
# TRADITIONAL will automatically cause most warnings to be treated as errors.
class CursorWrapper(object):
"""
A thin wrapper around MySQLdb's normal cursor class so that we can catch
particular exception instances and reraise them with the right types.
Implemented as a wrapper, rather than a subclass, so that we aren't stuck
to the particular underlying representation returned by Connection.cursor().
"""
codes_for_integrityerror = (1048,)
def __init__(self, cursor):
self.cursor = cursor
def execute(self, query, args=None):
try:
# args is None means no string interpolation
return self.cursor.execute(query, args)
except Database.OperationalError as e:
# Map some error codes to IntegrityError, since they seem to be
# misclassified and Django would prefer the more logical place.
if e.args[0] in self.codes_for_integrityerror:
six.reraise(utils.IntegrityError, utils.IntegrityError(*tuple(e.args)), sys.exc_info()[2])
raise
def executemany(self, query, args):
try:
return self.cursor.executemany(query, args)
except Database.OperationalError as e:
# Map some error codes to IntegrityError, since they seem to be
# misclassified and Django would prefer the more logical place.
if e.args[0] in self.codes_for_integrityerror:
six.reraise(utils.IntegrityError, utils.IntegrityError(*tuple(e.args)), sys.exc_info()[2])
raise
def __getattr__(self, attr):
if attr in self.__dict__:
return self.__dict__[attr]
else:
return getattr(self.cursor, attr)
def __iter__(self):
return iter(self.cursor)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
# Ticket #17671 - Close instead of passing thru to avoid backend
# specific behavior.
self.close()
class DatabaseWrapper(BaseDatabaseWrapper):
vendor = 'mysql'
# This dictionary maps Field objects to their associated MySQL column
# types, as strings. Column-type strings can contain format strings; they'll
# be interpolated against the values of Field.__dict__ before being output.
# If a column type is set to None, it won't be included in the output.
_data_types = {
'AutoField': 'integer AUTO_INCREMENT',
'BinaryField': 'longblob',
'BooleanField': 'bool',
'CharField': 'varchar(%(max_length)s)',
'CommaSeparatedIntegerField': 'varchar(%(max_length)s)',
'DateField': 'date',
'DateTimeField': 'datetime',
'DecimalField': 'numeric(%(max_digits)s, %(decimal_places)s)',
'DurationField': 'bigint',
'FileField': 'varchar(%(max_length)s)',
'FilePathField': 'varchar(%(max_length)s)',
'FloatField': 'double precision',
'IntegerField': 'integer',
'BigIntegerField': 'bigint',
'IPAddressField': 'char(15)',
'GenericIPAddressField': 'char(39)',
'NullBooleanField': 'bool',
'OneToOneField': 'integer',
'PositiveIntegerField': 'integer UNSIGNED',
'PositiveSmallIntegerField': 'smallint UNSIGNED',
'SlugField': 'varchar(%(max_length)s)',
'SmallIntegerField': 'smallint',
'TextField': 'longtext',
'TimeField': 'time',
'UUIDField': 'char(32)',
}
@cached_property
def data_types(self):
if self.features.supports_microsecond_precision:
return dict(self._data_types, DateTimeField='datetime(6)', TimeField='time(6)')
else:
return self._data_types
operators = {
'exact': '= %s',
'iexact': 'LIKE %s',
'contains': 'LIKE BINARY %s',
'icontains': 'LIKE %s',
'regex': 'REGEXP BINARY %s',
'iregex': 'REGEXP %s',
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': 'LIKE BINARY %s',
'endswith': 'LIKE BINARY %s',
'istartswith': 'LIKE %s',
'iendswith': 'LIKE %s',
}
# The patterns below are used to generate SQL pattern lookup clauses when
# the right-hand side of the lookup isn't a raw string (it might be an expression
# or the result of a bilateral transformation).
# In those cases, special characters for LIKE operators (e.g. \, *, _) should be
# escaped on database side.
#
# Note: we use str.format() here for readability as '%' is used as a wildcard for
# the LIKE operator.
pattern_esc = r"REPLACE(REPLACE(REPLACE({}, '\\', '\\\\'), '%%', '\%%'), '_', '\_')"
pattern_ops = {
'contains': "LIKE BINARY CONCAT('%%', {}, '%%')",
'icontains': "LIKE CONCAT('%%', {}, '%%')",
'startswith': "LIKE BINARY CONCAT({}, '%%')",
'istartswith': "LIKE CONCAT({}, '%%')",
'endswith': "LIKE BINARY CONCAT('%%', {})",
'iendswith': "LIKE CONCAT('%%', {})",
}
Database = Database
SchemaEditorClass = DatabaseSchemaEditor
def __init__(self, *args, **kwargs):
super(DatabaseWrapper, self).__init__(*args, **kwargs)
self.features = DatabaseFeatures(self)
self.ops = DatabaseOperations(self)
self.client = DatabaseClient(self)
self.creation = DatabaseCreation(self)
self.introspection = DatabaseIntrospection(self)
self.validation = DatabaseValidation(self)
def get_connection_params(self):
kwargs = {
'conv': django_conversions,
'charset': 'utf8',
}
if six.PY2:
kwargs['use_unicode'] = True
settings_dict = self.settings_dict
if settings_dict['USER']:
kwargs['user'] = settings_dict['USER']
if settings_dict['NAME']:
kwargs['db'] = settings_dict['NAME']
if settings_dict['PASSWORD']:
kwargs['passwd'] = force_str(settings_dict['PASSWORD'])
if settings_dict['HOST'].startswith('/'):
kwargs['unix_socket'] = settings_dict['HOST']
elif settings_dict['HOST']:
kwargs['host'] = settings_dict['HOST']
if settings_dict['PORT']:
kwargs['port'] = int(settings_dict['PORT'])
# We need the number of potentially affected rows after an
# "UPDATE", not the number of changed rows.
kwargs['client_flag'] = CLIENT.FOUND_ROWS
kwargs.update(settings_dict['OPTIONS'])
return kwargs
def get_new_connection(self, conn_params):
conn = Database.connect(**conn_params)
conn.encoders[SafeText] = conn.encoders[six.text_type]
conn.encoders[SafeBytes] = conn.encoders[bytes]
return conn
def init_connection_state(self):
with self.cursor() as cursor:
# SQL_AUTO_IS_NULL in MySQL controls whether an AUTO_INCREMENT column
# on a recently-inserted row will return when the field is tested for
# NULL. Disabling this value brings this aspect of MySQL in line with
# SQL standards.
cursor.execute('SET SQL_AUTO_IS_NULL = 0')
def create_cursor(self):
cursor = self.connection.cursor()
return CursorWrapper(cursor)
def _rollback(self):
try:
BaseDatabaseWrapper._rollback(self)
except Database.NotSupportedError:
pass
def _set_autocommit(self, autocommit):
with self.wrap_database_errors:
self.connection.autocommit(autocommit)
def disable_constraint_checking(self):
"""
Disables foreign key checks, primarily for use in adding rows with forward references. Always returns True,
to indicate constraint checks need to be re-enabled.
"""
self.cursor().execute('SET foreign_key_checks=0')
return True
def enable_constraint_checking(self):
"""
Re-enable foreign key checks after they have been disabled.
"""
# Override needs_rollback in case constraint_checks_disabled is
# nested inside transaction.atomic.
self.needs_rollback, needs_rollback = False, self.needs_rollback
try:
self.cursor().execute('SET foreign_key_checks=1')
finally:
self.needs_rollback = needs_rollback
def check_constraints(self, table_names=None):
"""
Checks each table name in `table_names` for rows with invalid foreign
key references. This method is intended to be used in conjunction with
`disable_constraint_checking()` and `enable_constraint_checking()`, to
determine if rows with invalid references were entered while constraint
checks were off.
Raises an IntegrityError on the first invalid foreign key reference
encountered (if any) and provides detailed information about the
invalid reference in the error message.
Backends can override this method if they can more directly apply
constraint checking (e.g. via "SET CONSTRAINTS ALL IMMEDIATE")
"""
cursor = self.cursor()
if table_names is None:
table_names = self.introspection.table_names(cursor)
for table_name in table_names:
primary_key_column_name = self.introspection.get_primary_key_column(cursor, table_name)
if not primary_key_column_name:
continue
key_columns = self.introspection.get_key_columns(cursor, table_name)
for column_name, referenced_table_name, referenced_column_name in key_columns:
cursor.execute("""
SELECT REFERRING.`%s`, REFERRING.`%s` FROM `%s` as REFERRING
LEFT JOIN `%s` as REFERRED
ON (REFERRING.`%s` = REFERRED.`%s`)
WHERE REFERRING.`%s` IS NOT NULL AND REFERRED.`%s` IS NULL"""
% (primary_key_column_name, column_name, table_name, referenced_table_name,
column_name, referenced_column_name, column_name, referenced_column_name))
for bad_row in cursor.fetchall():
raise utils.IntegrityError("The row in table '%s' with primary key '%s' has an invalid "
"foreign key: %s.%s contains a value '%s' that does not have a corresponding value in %s.%s."
% (table_name, bad_row[0],
table_name, column_name, bad_row[1],
referenced_table_name, referenced_column_name))
def is_usable(self):
try:
self.connection.ping()
except Database.Error:
return False
else:
return True
@cached_property
def mysql_version(self):
with self.temporary_connection():
server_info = self.connection.get_server_info()
match = server_version_re.match(server_info)
if not match:
raise Exception('Unable to determine MySQL version from version string %r' % server_info)
return tuple(int(x) for x in match.groups())
| bsd-3-clause |
nkuhlen/replication-study-financial-macro | .mywaflib/waflib/Runner.py | 6 | 8687 | #!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2005-2010 (ita)
"""
Runner.py: Task scheduling and execution
"""
import random, atexit
try:
from queue import Queue
except ImportError:
from Queue import Queue
from waflib import Utils, Task, Errors, Logs
GAP = 10
"""
Wait for free tasks if there are at least ``GAP * njobs`` in queue
"""
class TaskConsumer(Utils.threading.Thread):
"""
Task consumers belong to a pool of workers
They wait for tasks in the queue and then use ``task.process(...)``
"""
def __init__(self):
Utils.threading.Thread.__init__(self)
self.ready = Queue()
"""
Obtain :py:class:`waflib.Task.TaskBase` instances from this queue.
"""
self.setDaemon(1)
self.start()
def run(self):
"""
Loop over the tasks to execute
"""
try:
self.loop()
except Exception:
pass
def loop(self):
"""
Obtain tasks from :py:attr:`waflib.Runner.TaskConsumer.ready` and call
:py:meth:`waflib.Task.TaskBase.process`. If the object is a function, execute it.
"""
while 1:
tsk = self.ready.get()
if not isinstance(tsk, Task.TaskBase):
tsk(self)
else:
tsk.process()
pool = Queue()
"""
Pool of task consumer objects
"""
def get_pool():
"""
Obtain a task consumer from :py:attr:`waflib.Runner.pool`.
Do not forget to put it back by using :py:func:`waflib.Runner.put_pool`
and reset properly (original waiting queue).
:rtype: :py:class:`waflib.Runner.TaskConsumer`
"""
try:
return pool.get(False)
except Exception:
return TaskConsumer()
def put_pool(x):
"""
Return a task consumer to the thread pool :py:attr:`waflib.Runner.pool`
:param x: task consumer object
:type x: :py:class:`waflib.Runner.TaskConsumer`
"""
pool.put(x)
def _free_resources():
global pool
lst = []
while pool.qsize():
lst.append(pool.get())
for x in lst:
x.ready.put(None)
for x in lst:
x.join()
pool = None
atexit.register(_free_resources)
class Parallel(object):
"""
Schedule the tasks obtained from the build context for execution.
"""
def __init__(self, bld, j=2):
"""
The initialization requires a build context reference
for computing the total number of jobs.
"""
self.numjobs = j
"""
Number of consumers in the pool
"""
self.bld = bld
"""
Instance of :py:class:`waflib.Build.BuildContext`
"""
self.outstanding = []
"""List of :py:class:`waflib.Task.TaskBase` that may be ready to be executed"""
self.frozen = []
"""List of :py:class:`waflib.Task.TaskBase` that cannot be executed immediately"""
self.out = Queue(0)
"""List of :py:class:`waflib.Task.TaskBase` returned by the task consumers"""
self.count = 0
"""Amount of tasks that may be processed by :py:class:`waflib.Runner.TaskConsumer`"""
self.processed = 1
"""Amount of tasks processed"""
self.stop = False
"""Error flag to stop the build"""
self.error = []
"""Tasks that could not be executed"""
self.biter = None
"""Task iterator which must give groups of parallelizable tasks when calling ``next()``"""
self.dirty = False
"""Flag to indicate that tasks have been executed, and that the build cache must be saved (call :py:meth:`waflib.Build.BuildContext.store`)"""
def get_next_task(self):
"""
Obtain the next task to execute.
:rtype: :py:class:`waflib.Task.TaskBase`
"""
if not self.outstanding:
return None
return self.outstanding.pop(0)
def postpone(self, tsk):
"""
A task cannot be executed at this point, put it in the list :py:attr:`waflib.Runner.Parallel.frozen`.
:param tsk: task
:type tsk: :py:class:`waflib.Task.TaskBase`
"""
if random.randint(0, 1):
self.frozen.insert(0, tsk)
else:
self.frozen.append(tsk)
def refill_task_list(self):
"""
Put the next group of tasks to execute in :py:attr:`waflib.Runner.Parallel.outstanding`.
"""
while self.count > self.numjobs * GAP:
self.get_out()
while not self.outstanding:
if self.count:
self.get_out()
elif self.frozen:
try:
cond = self.deadlock == self.processed
except AttributeError:
pass
else:
if cond:
msg = 'check the build order for the tasks'
for tsk in self.frozen:
if not tsk.run_after:
msg = 'check the methods runnable_status'
break
lst = []
for tsk in self.frozen:
lst.append('%s\t-> %r' % (repr(tsk), [id(x) for x in tsk.run_after]))
raise Errors.WafError('Deadlock detected: %s%s' % (msg, ''.join(lst)))
self.deadlock = self.processed
if self.frozen:
self.outstanding += self.frozen
self.frozen = []
elif not self.count:
self.outstanding.extend(next(self.biter))
self.total = self.bld.total()
break
def add_more_tasks(self, tsk):
"""
Tasks may be added dynamically during the build by binding them to the task :py:attr:`waflib.Task.TaskBase.more_tasks`
:param tsk: task
:type tsk: :py:attr:`waflib.Task.TaskBase`
"""
if getattr(tsk, 'more_tasks', None):
self.outstanding += tsk.more_tasks
self.total += len(tsk.more_tasks)
def get_out(self):
"""
Obtain one task returned from the task consumers, and update the task count. Add more tasks if necessary through
:py:attr:`waflib.Runner.Parallel.add_more_tasks`.
:rtype: :py:attr:`waflib.Task.TaskBase`
"""
tsk = self.out.get()
if not self.stop:
self.add_more_tasks(tsk)
self.count -= 1
self.dirty = True
return tsk
def add_task(self, tsk):
"""
Pass a task to a consumer.
:param tsk: task
:type tsk: :py:attr:`waflib.Task.TaskBase`
"""
try:
self.pool
except AttributeError:
self.init_task_pool()
self.ready.put(tsk)
def init_task_pool(self):
# lazy creation, and set a common pool for all task consumers
pool = self.pool = [get_pool() for i in range(self.numjobs)]
self.ready = Queue(0)
def setq(consumer):
consumer.ready = self.ready
for x in pool:
x.ready.put(setq)
return pool
def free_task_pool(self):
# return the consumers, setting a different queue for each of them
def setq(consumer):
consumer.ready = Queue(0)
self.out.put(self)
try:
pool = self.pool
except AttributeError:
pass
else:
for x in pool:
self.ready.put(setq)
for x in pool:
self.get_out()
for x in pool:
put_pool(x)
self.pool = []
def skip(self, tsk):
tsk.hasrun = Task.SKIPPED
def error_handler(self, tsk):
"""
Called when a task cannot be executed. The flag :py:attr:`waflib.Runner.Parallel.stop` is set, unless
the build is executed with::
$ waf build -k
:param tsk: task
:type tsk: :py:attr:`waflib.Task.TaskBase`
"""
if not self.bld.keep:
self.stop = True
self.error.append(tsk)
def task_status(self, tsk):
try:
return tsk.runnable_status()
except Exception:
self.processed += 1
tsk.err_msg = Utils.ex_stack()
if not self.stop and self.bld.keep:
self.skip(tsk)
if self.bld.keep == 1:
# if -k stop at the first exception, if -kk try to go as far as possible
if Logs.verbose > 1 or not self.error:
self.error.append(tsk)
self.stop = True
else:
if Logs.verbose > 1:
self.error.append(tsk)
return Task.EXCEPTION
tsk.hasrun = Task.EXCEPTION
self.error_handler(tsk)
return Task.EXCEPTION
def start(self):
"""
Give tasks to :py:class:`waflib.Runner.TaskConsumer` instances until the build finishes or the ``stop`` flag is set.
If only one job is used, then execute the tasks one by one, without consumers.
"""
self.total = self.bld.total()
while not self.stop:
self.refill_task_list()
# consider the next task
tsk = self.get_next_task()
if not tsk:
if self.count:
# tasks may add new ones after they are run
continue
else:
# no tasks to run, no tasks running, time to exit
break
if tsk.hasrun:
# if the task is marked as "run", just skip it
self.processed += 1
continue
if self.stop: # stop immediately after a failure was detected
break
st = self.task_status(tsk)
if st == Task.RUN_ME:
tsk.position = (self.processed, self.total)
self.count += 1
tsk.master = self
self.processed += 1
if self.numjobs == 1:
tsk.process()
else:
self.add_task(tsk)
if st == Task.ASK_LATER:
self.postpone(tsk)
elif st == Task.SKIP_ME:
self.processed += 1
self.skip(tsk)
self.add_more_tasks(tsk)
# self.count represents the tasks that have been made available to the consumer threads
# collect all the tasks after an error else the message may be incomplete
while self.error and self.count:
self.get_out()
#print loop
assert (self.count == 0 or self.stop)
# free the task pool, if any
self.free_task_pool()
| gpl-3.0 |
Danielhiversen/home-assistant | tests/components/image_processing/test_microsoft_face_detect.py | 2 | 5467 | """The tests for the microsoft face detect platform."""
from unittest.mock import patch, PropertyMock
from homeassistant.core import callback
from homeassistant.const import ATTR_ENTITY_PICTURE
from homeassistant.setup import setup_component
import homeassistant.components.image_processing as ip
import homeassistant.components.microsoft_face as mf
from tests.common import (
get_test_home_assistant, assert_setup_component, load_fixture, mock_coro)
from tests.components.image_processing import common
class TestMicrosoftFaceDetectSetup:
"""Test class for image processing."""
def setup_method(self):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
def teardown_method(self):
"""Stop everything that was started."""
self.hass.stop()
@patch('homeassistant.components.microsoft_face.'
'MicrosoftFace.update_store', return_value=mock_coro())
def test_setup_platform(self, store_mock):
"""Set up platform with one entity."""
config = {
ip.DOMAIN: {
'platform': 'microsoft_face_detect',
'source': {
'entity_id': 'camera.demo_camera'
},
'attributes': ['age', 'gender'],
},
'camera': {
'platform': 'demo'
},
mf.DOMAIN: {
'api_key': '12345678abcdef6',
}
}
with assert_setup_component(1, ip.DOMAIN):
setup_component(self.hass, ip.DOMAIN, config)
assert self.hass.states.get(
'image_processing.microsoftface_demo_camera')
@patch('homeassistant.components.microsoft_face.'
'MicrosoftFace.update_store', return_value=mock_coro())
def test_setup_platform_name(self, store_mock):
"""Set up platform with one entity and set name."""
config = {
ip.DOMAIN: {
'platform': 'microsoft_face_detect',
'source': {
'entity_id': 'camera.demo_camera',
'name': 'test local'
},
},
'camera': {
'platform': 'demo'
},
mf.DOMAIN: {
'api_key': '12345678abcdef6',
}
}
with assert_setup_component(1, ip.DOMAIN):
setup_component(self.hass, ip.DOMAIN, config)
assert self.hass.states.get('image_processing.test_local')
class TestMicrosoftFaceDetect:
"""Test class for image processing."""
def setup_method(self):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.config = {
ip.DOMAIN: {
'platform': 'microsoft_face_detect',
'source': {
'entity_id': 'camera.demo_camera',
'name': 'test local'
},
'attributes': ['age', 'gender'],
},
'camera': {
'platform': 'demo'
},
mf.DOMAIN: {
'api_key': '12345678abcdef6',
}
}
self.endpoint_url = "https://westus.{0}".format(mf.FACE_API_URL)
def teardown_method(self):
"""Stop everything that was started."""
self.hass.stop()
@patch('homeassistant.components.image_processing.microsoft_face_detect.'
'MicrosoftFaceDetectEntity.should_poll',
new_callable=PropertyMock(return_value=False))
def test_ms_detect_process_image(self, poll_mock, aioclient_mock):
"""Set up and scan a picture and test plates from event."""
aioclient_mock.get(
self.endpoint_url.format("persongroups"),
text=load_fixture('microsoft_face_persongroups.json')
)
aioclient_mock.get(
self.endpoint_url.format("persongroups/test_group1/persons"),
text=load_fixture('microsoft_face_persons.json')
)
aioclient_mock.get(
self.endpoint_url.format("persongroups/test_group2/persons"),
text=load_fixture('microsoft_face_persons.json')
)
setup_component(self.hass, ip.DOMAIN, self.config)
state = self.hass.states.get('camera.demo_camera')
url = "{0}{1}".format(
self.hass.config.api.base_url,
state.attributes.get(ATTR_ENTITY_PICTURE))
face_events = []
@callback
def mock_face_event(event):
"""Mock event."""
face_events.append(event)
self.hass.bus.listen('image_processing.detect_face', mock_face_event)
aioclient_mock.get(url, content=b'image')
aioclient_mock.post(
self.endpoint_url.format("detect"),
text=load_fixture('microsoft_face_detect.json'),
params={'returnFaceAttributes': "age,gender"}
)
common.scan(self.hass, entity_id='image_processing.test_local')
self.hass.block_till_done()
state = self.hass.states.get('image_processing.test_local')
assert len(face_events) == 1
assert state.attributes.get('total_faces') == 1
assert state.state == '1'
assert face_events[0].data['age'] == 71.0
assert face_events[0].data['gender'] == 'male'
assert face_events[0].data['entity_id'] == \
'image_processing.test_local'
| mit |
fluxer/spm | nuitka/nuitka/build/inline_copy/lib/scons-2.3.2/SCons/compat/_scons_subprocess.py | 183 | 44500 | # subprocess - Subprocesses with accessible I/O streams
#
# For more information about this module, see PEP 324.
#
# This module should remain compatible with Python 2.2, see PEP 291.
#
# Copyright (c) 2003-2005 by Peter Astrand <astrand@lysator.liu.se>
#
# Licensed to PSF under a Contributor Agreement.
# See http://www.python.org/2.4/license for licensing details.
r"""subprocess - Subprocesses with accessible I/O streams
This module allows you to spawn processes, connect to their
input/output/error pipes, and obtain their return codes. This module
intends to replace several other, older modules and functions, like:
os.system
os.spawn*
os.popen*
popen2.*
commands.*
Information about how the subprocess module can be used to replace these
modules and functions can be found below.
Using the subprocess module
===========================
This module defines one class called Popen:
class Popen(args, bufsize=0, executable=None,
stdin=None, stdout=None, stderr=None,
preexec_fn=None, close_fds=False, shell=False,
cwd=None, env=None, universal_newlines=False,
startupinfo=None, creationflags=0):
Arguments are:
args should be a string, or a sequence of program arguments. The
program to execute is normally the first item in the args sequence or
string, but can be explicitly set by using the executable argument.
On UNIX, with shell=False (default): In this case, the Popen class
uses os.execvp() to execute the child program. args should normally
be a sequence. A string will be treated as a sequence with the string
as the only item (the program to execute).
On UNIX, with shell=True: If args is a string, it specifies the
command string to execute through the shell. If args is a sequence,
the first item specifies the command string, and any additional items
will be treated as additional shell arguments.
On Windows: the Popen class uses CreateProcess() to execute the child
program, which operates on strings. If args is a sequence, it will be
converted to a string using the list2cmdline method. Please note that
not all MS Windows applications interpret the command line the same
way: The list2cmdline is designed for applications using the same
rules as the MS C runtime.
bufsize, if given, has the same meaning as the corresponding argument
to the built-in open() function: 0 means unbuffered, 1 means line
buffered, any other positive value means use a buffer of
(approximately) that size. A negative bufsize means to use the system
default, which usually means fully buffered. The default value for
bufsize is 0 (unbuffered).
stdin, stdout and stderr specify the executed programs' standard
input, standard output and standard error file handles, respectively.
Valid values are PIPE, an existing file descriptor (a positive
integer), an existing file object, and None. PIPE indicates that a
new pipe to the child should be created. With None, no redirection
will occur; the child's file handles will be inherited from the
parent. Additionally, stderr can be STDOUT, which indicates that the
stderr data from the applications should be captured into the same
file handle as for stdout.
If preexec_fn is set to a callable object, this object will be called
in the child process just before the child is executed.
If close_fds is true, all file descriptors except 0, 1 and 2 will be
closed before the child process is executed.
if shell is true, the specified command will be executed through the
shell.
If cwd is not None, the current directory will be changed to cwd
before the child is executed.
If env is not None, it defines the environment variables for the new
process.
If universal_newlines is true, the file objects stdout and stderr are
opened as a text files, but lines may be terminated by any of '\n',
the Unix end-of-line convention, '\r', the Macintosh convention or
'\r\n', the Windows convention. All of these external representations
are seen as '\n' by the Python program. Note: This feature is only
available if Python is built with universal newline support (the
default). Also, the newlines attribute of the file objects stdout,
stdin and stderr are not updated by the communicate() method.
The startupinfo and creationflags, if given, will be passed to the
underlying CreateProcess() function. They can specify things such as
appearance of the main window and priority for the new process.
(Windows only)
This module also defines two shortcut functions:
call(*popenargs, **kwargs):
Run command with arguments. Wait for command to complete, then
return the returncode attribute.
The arguments are the same as for the Popen constructor. Example:
retcode = call(["ls", "-l"])
check_call(*popenargs, **kwargs):
Run command with arguments. Wait for command to complete. If the
exit code was zero then return, otherwise raise
CalledProcessError. The CalledProcessError object will have the
return code in the returncode attribute.
The arguments are the same as for the Popen constructor. Example:
check_call(["ls", "-l"])
Exceptions
----------
Exceptions raised in the child process, before the new program has
started to execute, will be re-raised in the parent. Additionally,
the exception object will have one extra attribute called
'child_traceback', which is a string containing traceback information
from the childs point of view.
The most common exception raised is OSError. This occurs, for
example, when trying to execute a non-existent file. Applications
should prepare for OSErrors.
A ValueError will be raised if Popen is called with invalid arguments.
check_call() will raise CalledProcessError, if the called process
returns a non-zero return code.
Security
--------
Unlike some other popen functions, this implementation will never call
/bin/sh implicitly. This means that all characters, including shell
metacharacters, can safely be passed to child processes.
Popen objects
=============
Instances of the Popen class have the following methods:
poll()
Check if child process has terminated. Returns returncode
attribute.
wait()
Wait for child process to terminate. Returns returncode attribute.
communicate(input=None)
Interact with process: Send data to stdin. Read data from stdout
and stderr, until end-of-file is reached. Wait for process to
terminate. The optional stdin argument should be a string to be
sent to the child process, or None, if no data should be sent to
the child.
communicate() returns a tuple (stdout, stderr).
Note: The data read is buffered in memory, so do not use this
method if the data size is large or unlimited.
The following attributes are also available:
stdin
If the stdin argument is PIPE, this attribute is a file object
that provides input to the child process. Otherwise, it is None.
stdout
If the stdout argument is PIPE, this attribute is a file object
that provides output from the child process. Otherwise, it is
None.
stderr
If the stderr argument is PIPE, this attribute is file object that
provides error output from the child process. Otherwise, it is
None.
pid
The process ID of the child process.
returncode
The child return code. A None value indicates that the process
hasn't terminated yet. A negative value -N indicates that the
child was terminated by signal N (UNIX only).
Replacing older functions with the subprocess module
====================================================
In this section, "a ==> b" means that b can be used as a replacement
for a.
Note: All functions in this section fail (more or less) silently if
the executed program cannot be found; this module raises an OSError
exception.
In the following examples, we assume that the subprocess module is
imported with "from subprocess import *".
Replacing /bin/sh shell backquote
---------------------------------
output=`mycmd myarg`
==>
output = Popen(["mycmd", "myarg"], stdout=PIPE).communicate()[0]
Replacing shell pipe line
-------------------------
output=`dmesg | grep hda`
==>
p1 = Popen(["dmesg"], stdout=PIPE)
p2 = Popen(["grep", "hda"], stdin=p1.stdout, stdout=PIPE)
output = p2.communicate()[0]
Replacing os.system()
---------------------
sts = os.system("mycmd" + " myarg")
==>
p = Popen("mycmd" + " myarg", shell=True)
pid, sts = os.waitpid(p.pid, 0)
Note:
* Calling the program through the shell is usually not required.
* It's easier to look at the returncode attribute than the
exitstatus.
A more real-world example would look like this:
try:
retcode = call("mycmd" + " myarg", shell=True)
if retcode < 0:
print >>sys.stderr, "Child was terminated by signal", -retcode
else:
print >>sys.stderr, "Child returned", retcode
except OSError, e:
print >>sys.stderr, "Execution failed:", e
Replacing os.spawn*
-------------------
P_NOWAIT example:
pid = os.spawnlp(os.P_NOWAIT, "/bin/mycmd", "mycmd", "myarg")
==>
pid = Popen(["/bin/mycmd", "myarg"]).pid
P_WAIT example:
retcode = os.spawnlp(os.P_WAIT, "/bin/mycmd", "mycmd", "myarg")
==>
retcode = call(["/bin/mycmd", "myarg"])
Vector example:
os.spawnvp(os.P_NOWAIT, path, args)
==>
Popen([path] + args[1:])
Environment example:
os.spawnlpe(os.P_NOWAIT, "/bin/mycmd", "mycmd", "myarg", env)
==>
Popen(["/bin/mycmd", "myarg"], env={"PATH": "/usr/bin"})
Replacing os.popen*
-------------------
pipe = os.popen(cmd, mode='r', bufsize)
==>
pipe = Popen(cmd, shell=True, bufsize=bufsize, stdout=PIPE).stdout
pipe = os.popen(cmd, mode='w', bufsize)
==>
pipe = Popen(cmd, shell=True, bufsize=bufsize, stdin=PIPE).stdin
(child_stdin, child_stdout) = os.popen2(cmd, mode, bufsize)
==>
p = Popen(cmd, shell=True, bufsize=bufsize,
stdin=PIPE, stdout=PIPE, close_fds=True)
(child_stdin, child_stdout) = (p.stdin, p.stdout)
(child_stdin,
child_stdout,
child_stderr) = os.popen3(cmd, mode, bufsize)
==>
p = Popen(cmd, shell=True, bufsize=bufsize,
stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=True)
(child_stdin,
child_stdout,
child_stderr) = (p.stdin, p.stdout, p.stderr)
(child_stdin, child_stdout_and_stderr) = os.popen4(cmd, mode, bufsize)
==>
p = Popen(cmd, shell=True, bufsize=bufsize,
stdin=PIPE, stdout=PIPE, stderr=STDOUT, close_fds=True)
(child_stdin, child_stdout_and_stderr) = (p.stdin, p.stdout)
Replacing popen2.*
------------------
Note: If the cmd argument to popen2 functions is a string, the command
is executed through /bin/sh. If it is a list, the command is directly
executed.
(child_stdout, child_stdin) = popen2.popen2("somestring", bufsize, mode)
==>
p = Popen(["somestring"], shell=True, bufsize=bufsize
stdin=PIPE, stdout=PIPE, close_fds=True)
(child_stdout, child_stdin) = (p.stdout, p.stdin)
(child_stdout, child_stdin) = popen2.popen2(["mycmd", "myarg"], bufsize, mode)
==>
p = Popen(["mycmd", "myarg"], bufsize=bufsize,
stdin=PIPE, stdout=PIPE, close_fds=True)
(child_stdout, child_stdin) = (p.stdout, p.stdin)
The popen2.Popen3 and popen3.Popen4 basically works as subprocess.Popen,
except that:
* subprocess.Popen raises an exception if the execution fails
* the capturestderr argument is replaced with the stderr argument.
* stdin=PIPE and stdout=PIPE must be specified.
* popen2 closes all filedescriptors by default, but you have to specify
close_fds=True with subprocess.Popen.
"""
import sys
mswindows = (sys.platform == "win32")
import os
import types
import traceback
# Exception classes used by this module.
class CalledProcessError(Exception):
"""This exception is raised when a process run by check_call() returns
a non-zero exit status. The exit status will be stored in the
returncode attribute."""
def __init__(self, returncode, cmd):
self.returncode = returncode
self.cmd = cmd
def __str__(self):
return "Command '%s' returned non-zero exit status %d" % (self.cmd, self.returncode)
if mswindows:
try:
import threading
except ImportError:
# SCons: the threading module is only used by the communicate()
# method, which we don't actually use, so don't worry if we
# can't import it.
pass
import msvcrt
try:
# Try to get _subprocess
from _subprocess import *
class STARTUPINFO(object):
dwFlags = 0
hStdInput = None
hStdOutput = None
hStdError = None
wShowWindow = 0
class pywintypes(object):
error = IOError
except ImportError:
# If not there, then drop back to requiring pywin32
# TODO: Should this be wrapped in try as well? To notify user to install
# pywin32 ? With URL to it?
import pywintypes
from win32api import GetStdHandle, STD_INPUT_HANDLE, \
STD_OUTPUT_HANDLE, STD_ERROR_HANDLE
from win32api import GetCurrentProcess, DuplicateHandle, \
GetModuleFileName, GetVersion
from win32con import DUPLICATE_SAME_ACCESS, SW_HIDE
from win32pipe import CreatePipe
from win32process import CreateProcess, STARTUPINFO, \
GetExitCodeProcess, STARTF_USESTDHANDLES, \
STARTF_USESHOWWINDOW, CREATE_NEW_CONSOLE
from win32event import WaitForSingleObject, INFINITE, WAIT_OBJECT_0
else:
import select
import errno
import fcntl
import pickle
try:
fcntl.F_GETFD
except AttributeError:
fcntl.F_GETFD = 1
try:
fcntl.F_SETFD
except AttributeError:
fcntl.F_SETFD = 2
__all__ = ["Popen", "PIPE", "STDOUT", "call", "check_call", "CalledProcessError"]
try:
MAXFD = os.sysconf("SC_OPEN_MAX")
except KeyboardInterrupt:
raise # SCons: don't swallow keyboard interrupts
except:
MAXFD = 256
try:
isinstance(1, int)
except TypeError:
def is_int(obj):
return isinstance(obj, type(1))
def is_int_or_long(obj):
return type(obj) in (type(1), type(1L))
else:
def is_int(obj):
return isinstance(obj, int)
def is_int_or_long(obj):
return isinstance(obj, (int, long))
try:
types.StringTypes
except AttributeError:
try:
types.StringTypes = (str, unicode)
except NameError:
types.StringTypes = (str,)
def is_string(obj):
return isinstance(obj, types.StringTypes)
_active = []
def _cleanup():
for inst in _active[:]:
if inst.poll(_deadstate=sys.maxsize) >= 0:
try:
_active.remove(inst)
except ValueError:
# This can happen if two threads create a new Popen instance.
# It's harmless that it was already removed, so ignore.
pass
PIPE = -1
STDOUT = -2
def call(*popenargs, **kwargs):
"""Run command with arguments. Wait for command to complete, then
return the returncode attribute.
The arguments are the same as for the Popen constructor. Example:
retcode = call(["ls", "-l"])
"""
return apply(Popen, popenargs, kwargs).wait()
def check_call(*popenargs, **kwargs):
"""Run command with arguments. Wait for command to complete. If
the exit code was zero then return, otherwise raise
CalledProcessError. The CalledProcessError object will have the
return code in the returncode attribute.
The arguments are the same as for the Popen constructor. Example:
check_call(["ls", "-l"])
"""
retcode = call(*popenargs, **kwargs)
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
if retcode:
raise CalledProcessError(retcode, cmd)
return retcode
def list2cmdline(seq):
"""
Translate a sequence of arguments into a command line
string, using the same rules as the MS C runtime:
1) Arguments are delimited by white space, which is either a
space or a tab.
2) A string surrounded by double quotation marks is
interpreted as a single argument, regardless of white space
contained within. A quoted string can be embedded in an
argument.
3) A double quotation mark preceded by a backslash is
interpreted as a literal double quotation mark.
4) Backslashes are interpreted literally, unless they
immediately precede a double quotation mark.
5) If backslashes immediately precede a double quotation mark,
every pair of backslashes is interpreted as a literal
backslash. If the number of backslashes is odd, the last
backslash escapes the next double quotation mark as
described in rule 3.
"""
# See
# http://msdn.microsoft.com/library/en-us/vccelng/htm/progs_12.asp
result = []
needquote = False
for arg in seq:
bs_buf = []
# Add a space to separate this argument from the others
if result:
result.append(' ')
needquote = (" " in arg) or ("\t" in arg)
if needquote:
result.append('"')
for c in arg:
if c == '\\':
# Don't know if we need to double yet.
bs_buf.append(c)
elif c == '"':
# Double backspaces.
result.append('\\' * len(bs_buf)*2)
bs_buf = []
result.append('\\"')
else:
# Normal char
if bs_buf:
result.extend(bs_buf)
bs_buf = []
result.append(c)
# Add remaining backspaces, if any.
if bs_buf:
result.extend(bs_buf)
if needquote:
result.extend(bs_buf)
result.append('"')
return ''.join(result)
class Popen(object):
def __init__(self, args, bufsize=0, executable=None,
stdin=None, stdout=None, stderr=None,
preexec_fn=None, close_fds=False, shell=False,
cwd=None, env=None, universal_newlines=False,
startupinfo=None, creationflags=0):
"""Create new Popen instance."""
_cleanup()
self._child_created = False
if not is_int_or_long(bufsize):
raise TypeError("bufsize must be an integer")
if mswindows:
if preexec_fn is not None:
raise ValueError("preexec_fn is not supported on Windows "
"platforms")
if close_fds:
raise ValueError("close_fds is not supported on Windows "
"platforms")
else:
# POSIX
if startupinfo is not None:
raise ValueError("startupinfo is only supported on Windows "
"platforms")
if creationflags != 0:
raise ValueError("creationflags is only supported on Windows "
"platforms")
self.stdin = None
self.stdout = None
self.stderr = None
self.pid = None
self.returncode = None
self.universal_newlines = universal_newlines
# Input and output objects. The general principle is like
# this:
#
# Parent Child
# ------ -----
# p2cwrite ---stdin---> p2cread
# c2pread <--stdout--- c2pwrite
# errread <--stderr--- errwrite
#
# On POSIX, the child objects are file descriptors. On
# Windows, these are Windows file handles. The parent objects
# are file descriptors on both platforms. The parent objects
# are None when not using PIPEs. The child objects are None
# when not redirecting.
(p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite) = self._get_handles(stdin, stdout, stderr)
self._execute_child(args, executable, preexec_fn, close_fds,
cwd, env, universal_newlines,
startupinfo, creationflags, shell,
p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite)
if p2cwrite:
self.stdin = os.fdopen(p2cwrite, 'wb', bufsize)
if c2pread:
if universal_newlines:
self.stdout = os.fdopen(c2pread, 'rU', bufsize)
else:
self.stdout = os.fdopen(c2pread, 'rb', bufsize)
if errread:
if universal_newlines:
self.stderr = os.fdopen(errread, 'rU', bufsize)
else:
self.stderr = os.fdopen(errread, 'rb', bufsize)
def _translate_newlines(self, data):
data = data.replace("\r\n", "\n")
data = data.replace("\r", "\n")
return data
def __del__(self):
if not self._child_created:
# We didn't get to successfully create a child process.
return
# In case the child hasn't been waited on, check if it's done.
self.poll(_deadstate=sys.maxsize)
if self.returncode is None and _active is not None:
# Child is still running, keep us alive until we can wait on it.
_active.append(self)
def communicate(self, input=None):
"""Interact with process: Send data to stdin. Read data from
stdout and stderr, until end-of-file is reached. Wait for
process to terminate. The optional input argument should be a
string to be sent to the child process, or None, if no data
should be sent to the child.
communicate() returns a tuple (stdout, stderr)."""
# Optimization: If we are only using one pipe, or no pipe at
# all, using select() or threads is unnecessary.
if [self.stdin, self.stdout, self.stderr].count(None) >= 2:
stdout = None
stderr = None
if self.stdin:
if input:
self.stdin.write(input)
self.stdin.close()
elif self.stdout:
stdout = self.stdout.read()
elif self.stderr:
stderr = self.stderr.read()
self.wait()
return (stdout, stderr)
return self._communicate(input)
if mswindows:
#
# Windows methods
#
def _get_handles(self, stdin, stdout, stderr):
"""Construct and return tupel with IO objects:
p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite
"""
if stdin is None and stdout is None and stderr is None:
return (None, None, None, None, None, None)
p2cread, p2cwrite = None, None
c2pread, c2pwrite = None, None
errread, errwrite = None, None
if stdin is None:
p2cread = GetStdHandle(STD_INPUT_HANDLE)
elif stdin == PIPE:
p2cread, p2cwrite = CreatePipe(None, 0)
# Detach and turn into fd
p2cwrite = p2cwrite.Detach()
p2cwrite = msvcrt.open_osfhandle(p2cwrite, 0)
elif is_int(stdin):
p2cread = msvcrt.get_osfhandle(stdin)
else:
# Assuming file-like object
p2cread = msvcrt.get_osfhandle(stdin.fileno())
p2cread = self._make_inheritable(p2cread)
if stdout is None:
c2pwrite = GetStdHandle(STD_OUTPUT_HANDLE)
elif stdout == PIPE:
c2pread, c2pwrite = CreatePipe(None, 0)
# Detach and turn into fd
c2pread = c2pread.Detach()
c2pread = msvcrt.open_osfhandle(c2pread, 0)
elif is_int(stdout):
c2pwrite = msvcrt.get_osfhandle(stdout)
else:
# Assuming file-like object
c2pwrite = msvcrt.get_osfhandle(stdout.fileno())
c2pwrite = self._make_inheritable(c2pwrite)
if stderr is None:
errwrite = GetStdHandle(STD_ERROR_HANDLE)
elif stderr == PIPE:
errread, errwrite = CreatePipe(None, 0)
# Detach and turn into fd
errread = errread.Detach()
errread = msvcrt.open_osfhandle(errread, 0)
elif stderr == STDOUT:
errwrite = c2pwrite
elif is_int(stderr):
errwrite = msvcrt.get_osfhandle(stderr)
else:
# Assuming file-like object
errwrite = msvcrt.get_osfhandle(stderr.fileno())
errwrite = self._make_inheritable(errwrite)
return (p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite)
def _make_inheritable(self, handle):
"""Return a duplicate of handle, which is inheritable"""
return DuplicateHandle(GetCurrentProcess(), handle,
GetCurrentProcess(), 0, 1,
DUPLICATE_SAME_ACCESS)
def _find_w9xpopen(self):
"""Find and return absolut path to w9xpopen.exe"""
w9xpopen = os.path.join(os.path.dirname(GetModuleFileName(0)),
"w9xpopen.exe")
if not os.path.exists(w9xpopen):
# Eeek - file-not-found - possibly an embedding
# situation - see if we can locate it in sys.exec_prefix
w9xpopen = os.path.join(os.path.dirname(sys.exec_prefix),
"w9xpopen.exe")
if not os.path.exists(w9xpopen):
raise RuntimeError("Cannot locate w9xpopen.exe, which is "
"needed for Popen to work with your "
"shell or platform.")
return w9xpopen
def _execute_child(self, args, executable, preexec_fn, close_fds,
cwd, env, universal_newlines,
startupinfo, creationflags, shell,
p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite):
"""Execute program (MS Windows version)"""
if not isinstance(args, types.StringTypes):
args = list2cmdline(args)
# Process startup details
if startupinfo is None:
startupinfo = STARTUPINFO()
if None not in (p2cread, c2pwrite, errwrite):
startupinfo.dwFlags = startupinfo.dwFlags | STARTF_USESTDHANDLES
startupinfo.hStdInput = p2cread
startupinfo.hStdOutput = c2pwrite
startupinfo.hStdError = errwrite
if shell:
startupinfo.dwFlags = startupinfo.dwFlags | STARTF_USESHOWWINDOW
startupinfo.wShowWindow = SW_HIDE
comspec = os.environ.get("COMSPEC", "cmd.exe")
args = comspec + " /c " + args
if (GetVersion() >= 0x80000000L or
os.path.basename(comspec).lower() == "command.com"):
# Win9x, or using command.com on NT. We need to
# use the w9xpopen intermediate program. For more
# information, see KB Q150956
# (http://web.archive.org/web/20011105084002/http://support.microsoft.com/support/kb/articles/Q150/9/56.asp)
w9xpopen = self._find_w9xpopen()
args = '"%s" %s' % (w9xpopen, args)
# Not passing CREATE_NEW_CONSOLE has been known to
# cause random failures on win9x. Specifically a
# dialog: "Your program accessed mem currently in
# use at xxx" and a hopeful warning about the
# stability of your system. Cost is Ctrl+C wont
# kill children.
creationflags = creationflags | CREATE_NEW_CONSOLE
# Start the process
try:
hp, ht, pid, tid = CreateProcess(executable, args,
# no special security
None, None,
# must inherit handles to pass std
# handles
1,
creationflags,
env,
cwd,
startupinfo)
except pywintypes.error, e:
# Translate pywintypes.error to WindowsError, which is
# a subclass of OSError. FIXME: We should really
# translate errno using _sys_errlist (or simliar), but
# how can this be done from Python?
raise WindowsError(*e.args)
# Retain the process handle, but close the thread handle
self._child_created = True
self._handle = hp
self.pid = pid
ht.Close()
# Child is launched. Close the parent's copy of those pipe
# handles that only the child should have open. You need
# to make sure that no handles to the write end of the
# output pipe are maintained in this process or else the
# pipe will not close when the child process exits and the
# ReadFile will hang.
if p2cread is not None:
p2cread.Close()
if c2pwrite is not None:
c2pwrite.Close()
if errwrite is not None:
errwrite.Close()
def poll(self, _deadstate=None):
"""Check if child process has terminated. Returns returncode
attribute."""
if self.returncode is None:
if WaitForSingleObject(self._handle, 0) == WAIT_OBJECT_0:
self.returncode = GetExitCodeProcess(self._handle)
return self.returncode
def wait(self):
"""Wait for child process to terminate. Returns returncode
attribute."""
if self.returncode is None:
obj = WaitForSingleObject(self._handle, INFINITE)
self.returncode = GetExitCodeProcess(self._handle)
return self.returncode
def _readerthread(self, fh, buffer):
buffer.append(fh.read())
def _communicate(self, input):
stdout = None # Return
stderr = None # Return
if self.stdout:
stdout = []
stdout_thread = threading.Thread(target=self._readerthread,
args=(self.stdout, stdout))
stdout_thread.setDaemon(True)
stdout_thread.start()
if self.stderr:
stderr = []
stderr_thread = threading.Thread(target=self._readerthread,
args=(self.stderr, stderr))
stderr_thread.setDaemon(True)
stderr_thread.start()
if self.stdin:
if input is not None:
self.stdin.write(input)
self.stdin.close()
if self.stdout:
stdout_thread.join()
if self.stderr:
stderr_thread.join()
# All data exchanged. Translate lists into strings.
if stdout is not None:
stdout = stdout[0]
if stderr is not None:
stderr = stderr[0]
# Translate newlines, if requested. We cannot let the file
# object do the translation: It is based on stdio, which is
# impossible to combine with select (unless forcing no
# buffering).
if self.universal_newlines and hasattr(file, 'newlines'):
if stdout:
stdout = self._translate_newlines(stdout)
if stderr:
stderr = self._translate_newlines(stderr)
self.wait()
return (stdout, stderr)
else:
#
# POSIX methods
#
def _get_handles(self, stdin, stdout, stderr):
"""Construct and return tupel with IO objects:
p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite
"""
p2cread, p2cwrite = None, None
c2pread, c2pwrite = None, None
errread, errwrite = None, None
if stdin is None:
pass
elif stdin == PIPE:
p2cread, p2cwrite = os.pipe()
elif is_int(stdin):
p2cread = stdin
else:
# Assuming file-like object
p2cread = stdin.fileno()
if stdout is None:
pass
elif stdout == PIPE:
c2pread, c2pwrite = os.pipe()
elif is_int(stdout):
c2pwrite = stdout
else:
# Assuming file-like object
c2pwrite = stdout.fileno()
if stderr is None:
pass
elif stderr == PIPE:
errread, errwrite = os.pipe()
elif stderr == STDOUT:
errwrite = c2pwrite
elif is_int(stderr):
errwrite = stderr
else:
# Assuming file-like object
errwrite = stderr.fileno()
return (p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite)
def _set_cloexec_flag(self, fd):
try:
cloexec_flag = fcntl.FD_CLOEXEC
except AttributeError:
cloexec_flag = 1
old = fcntl.fcntl(fd, fcntl.F_GETFD)
fcntl.fcntl(fd, fcntl.F_SETFD, old | cloexec_flag)
def _close_fds(self, but):
for i in range(3, MAXFD):
if i == but:
continue
try:
os.close(i)
except KeyboardInterrupt:
raise # SCons: don't swallow keyboard interrupts
except:
pass
def _execute_child(self, args, executable, preexec_fn, close_fds,
cwd, env, universal_newlines,
startupinfo, creationflags, shell,
p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite):
"""Execute program (POSIX version)"""
if is_string(args):
args = [args]
if shell:
args = ["/bin/sh", "-c"] + args
if executable is None:
executable = args[0]
# For transferring possible exec failure from child to parent
# The first char specifies the exception type: 0 means
# OSError, 1 means some other error.
errpipe_read, errpipe_write = os.pipe()
self._set_cloexec_flag(errpipe_write)
self.pid = os.fork()
self._child_created = True
if self.pid == 0:
# Child
try:
# Close parent's pipe ends
if p2cwrite:
os.close(p2cwrite)
if c2pread:
os.close(c2pread)
if errread:
os.close(errread)
os.close(errpipe_read)
# Dup fds for child
if p2cread:
os.dup2(p2cread, 0)
if c2pwrite:
os.dup2(c2pwrite, 1)
if errwrite:
os.dup2(errwrite, 2)
# Close pipe fds. Make sure we don't close the same
# fd more than once, or standard fds.
try:
set
except NameError:
# Fall-back for earlier Python versions, so epydoc
# can use this module directly to execute things.
if p2cread:
os.close(p2cread)
if c2pwrite and c2pwrite not in (p2cread,):
os.close(c2pwrite)
if errwrite and errwrite not in (p2cread, c2pwrite):
os.close(errwrite)
else:
for fd in set((p2cread, c2pwrite, errwrite))-set((0,1,2)):
if fd: os.close(fd)
# Close all other fds, if asked for
if close_fds:
self._close_fds(but=errpipe_write)
if cwd is not None:
os.chdir(cwd)
if preexec_fn:
apply(preexec_fn)
if env is None:
os.execvp(executable, args)
else:
os.execvpe(executable, args, env)
except KeyboardInterrupt:
raise # SCons: don't swallow keyboard interrupts
except:
exc_type, exc_value, tb = sys.exc_info()
# Save the traceback and attach it to the exception object
exc_lines = traceback.format_exception(exc_type,
exc_value,
tb)
exc_value.child_traceback = ''.join(exc_lines)
os.write(errpipe_write, pickle.dumps(exc_value))
# This exitcode won't be reported to applications, so it
# really doesn't matter what we return.
os._exit(255)
# Parent
os.close(errpipe_write)
if p2cread and p2cwrite:
os.close(p2cread)
if c2pwrite and c2pread:
os.close(c2pwrite)
if errwrite and errread:
os.close(errwrite)
# Wait for exec to fail or succeed; possibly raising exception
data = os.read(errpipe_read, 1048576) # Exceptions limited to 1 MB
os.close(errpipe_read)
if data != "":
os.waitpid(self.pid, 0)
child_exception = pickle.loads(data)
raise child_exception
def _handle_exitstatus(self, sts):
if os.WIFSIGNALED(sts):
self.returncode = -os.WTERMSIG(sts)
elif os.WIFEXITED(sts):
self.returncode = os.WEXITSTATUS(sts)
else:
# Should never happen
raise RuntimeError("Unknown child exit status!")
def poll(self, _deadstate=None):
"""Check if child process has terminated. Returns returncode
attribute."""
if self.returncode is None:
try:
pid, sts = os.waitpid(self.pid, os.WNOHANG)
if pid == self.pid:
self._handle_exitstatus(sts)
except os.error:
if _deadstate is not None:
self.returncode = _deadstate
return self.returncode
def wait(self):
"""Wait for child process to terminate. Returns returncode
attribute."""
if self.returncode is None:
pid, sts = os.waitpid(self.pid, 0)
self._handle_exitstatus(sts)
return self.returncode
def _communicate(self, input):
read_set = []
write_set = []
stdout = None # Return
stderr = None # Return
if self.stdin:
# Flush stdio buffer. This might block, if the user has
# been writing to .stdin in an uncontrolled fashion.
self.stdin.flush()
if input:
write_set.append(self.stdin)
else:
self.stdin.close()
if self.stdout:
read_set.append(self.stdout)
stdout = []
if self.stderr:
read_set.append(self.stderr)
stderr = []
input_offset = 0
while read_set or write_set:
rlist, wlist, xlist = select.select(read_set, write_set, [])
if self.stdin in wlist:
# When select has indicated that the file is writable,
# we can write up to PIPE_BUF bytes without risk
# blocking. POSIX defines PIPE_BUF >= 512
m = memoryview(input)[input_offset:input_offset+512]
bytes_written = os.write(self.stdin.fileno(), m)
input_offset = input_offset + bytes_written
if input_offset >= len(input):
self.stdin.close()
write_set.remove(self.stdin)
if self.stdout in rlist:
data = os.read(self.stdout.fileno(), 1024)
if data == "":
self.stdout.close()
read_set.remove(self.stdout)
stdout.append(data)
if self.stderr in rlist:
data = os.read(self.stderr.fileno(), 1024)
if data == "":
self.stderr.close()
read_set.remove(self.stderr)
stderr.append(data)
# All data exchanged. Translate lists into strings.
if stdout is not None:
stdout = ''.join(stdout)
if stderr is not None:
stderr = ''.join(stderr)
# Translate newlines, if requested. We cannot let the file
# object do the translation: It is based on stdio, which is
# impossible to combine with select (unless forcing no
# buffering).
if self.universal_newlines and hasattr(file, 'newlines'):
if stdout:
stdout = self._translate_newlines(stdout)
if stderr:
stderr = self._translate_newlines(stderr)
self.wait()
return (stdout, stderr)
def _demo_posix():
#
# Example 1: Simple redirection: Get process list
#
plist = Popen(["ps"], stdout=PIPE).communicate()[0]
print "Process list:"
print plist
#
# Example 2: Change uid before executing child
#
if os.getuid() == 0:
p = Popen(["id"], preexec_fn=lambda: os.setuid(100))
p.wait()
#
# Example 3: Connecting several subprocesses
#
print "Looking for 'hda'..."
p1 = Popen(["dmesg"], stdout=PIPE)
p2 = Popen(["grep", "hda"], stdin=p1.stdout, stdout=PIPE)
print repr(p2.communicate()[0])
#
# Example 4: Catch execution error
#
print
print "Trying a weird file..."
try:
print Popen(["/this/path/does/not/exist"]).communicate()
except OSError, e:
if e.errno == errno.ENOENT:
print "The file didn't exist. I thought so..."
print "Child traceback:"
print e.child_traceback
else:
print "Error", e.errno
else:
sys.stderr.write( "Gosh. No error.\n" )
def _demo_windows():
#
# Example 1: Connecting several subprocesses
#
print "Looking for 'PROMPT' in set output..."
p1 = Popen("set", stdout=PIPE, shell=True)
p2 = Popen('find "PROMPT"', stdin=p1.stdout, stdout=PIPE)
print repr(p2.communicate()[0])
#
# Example 2: Simple execution of program
#
print "Executing calc..."
p = Popen("calc")
p.wait()
if __name__ == "__main__":
if mswindows:
_demo_windows()
else:
_demo_posix()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| gpl-2.0 |
boghison/servo | tests/wpt/css-tests/tools/pywebsocket/src/test/test_stream_hixie75.py | 496 | 2285 | #!/usr/bin/env python
#
# Copyright 2011, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests for stream module."""
import unittest
import set_sys_path # Update sys.path to locate mod_pywebsocket module.
from mod_pywebsocket.stream import StreamHixie75
from test.test_msgutil import _create_request_hixie75
class StreamHixie75Test(unittest.TestCase):
"""A unittest for StreamHixie75 class."""
def test_payload_length(self):
for length, bytes in ((0, '\x00'), (0x7f, '\x7f'), (0x80, '\x81\x00'),
(0x1234, '\x80\xa4\x34')):
test_stream = StreamHixie75(_create_request_hixie75(bytes))
self.assertEqual(
length, test_stream._read_payload_length_hixie75())
if __name__ == '__main__':
unittest.main()
# vi:sts=4 sw=4 et
| mpl-2.0 |
klahnakoski/TestFailures | pyLibrary/debugs/startup.py | 1 | 5798 | # encoding: utf-8
#
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Author: Kyle Lahnakoski (kyle@lahnakoski.com)
#
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
import argparse as _argparse
import os
import tempfile
import sys
from pyLibrary.jsons import ref
from pyLibrary.dot import listwrap, wrap, unwrap
from pyLibrary.debugs.logs import Log
from pyLibrary.env.files import File
# PARAMETERS MATCH argparse.ArgumentParser.add_argument()
# https://docs.python.org/dev/library/argparse.html#the-add-argument-method
#
# name or flags - Either a name or a list of option strings, e.g. foo or -f, --foo.
# action - The basic type of action to be taken when this argument is encountered at the command line.
# nargs - The number of command-line arguments that should be consumed.
# const - A constant value required by some action and nargs selections.
# default - The value produced if the argument is absent from the command line.
# type - The type to which the command-line argument should be converted.
# choices - A container of the allowable values for the argument.
# required - Whether or not the command-line option may be omitted (optionals only).
# help - A brief description of what the argument does.
# metavar - A name for the argument in usage messages.
# dest - The name of the attribute to be added to the object returned by parse_args().
def argparse(defs):
parser = _argparse.ArgumentParser()
for d in listwrap(defs):
args = d.copy()
name = args.name
args.name = None
parser.add_argument(*unwrap(listwrap(name)), **args)
namespace = parser.parse_args()
output = {k: getattr(namespace, k) for k in vars(namespace)}
return wrap(output)
def read_settings(filename=None, defs=None):
# READ SETTINGS
if filename:
settings_file = File(filename)
if not settings_file.exists:
Log.error("Can not file settings file {{filename}}", {
"filename": settings_file.abspath
})
settings = ref.get("file:///" + settings_file.abspath)
if defs:
settings.args = argparse(defs)
return settings
else:
defs = listwrap(defs)
defs.append({
"name": ["--settings", "--settings-file", "--settings_file"],
"help": "path to JSON file with settings",
"type": str,
"dest": "filename",
"default": "./settings.json",
"required": False
})
args = argparse(defs)
settings = ref.get("file://" + args.filename.replace(os.sep, "/"))
settings.args = args
return settings
# snagged from https://github.com/pycontribs/tendo/blob/master/tendo/singleton.py (under licence PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2)
class SingleInstance:
"""
ONLY ONE INSTANCE OF PROGRAM ALLOWED
If you want to prevent your script from running in parallel just instantiate SingleInstance() class.
If is there another instance already running it will exist the application with the message
"Another instance is already running, quitting.", returning -1 error code.
me = SingleInstance()
This option is very useful if you have scripts executed by crontab at small amounts of time.
Remember that this works by creating a lock file with a filename based on the full path to the script file.
"""
def __init__(self, flavor_id=""):
self.initialized = False
appname = os.path.splitext(os.path.abspath(sys.argv[0]))[0]
basename = ((appname + '-%s') % flavor_id).replace("/", "-").replace(":", "").replace("\\", "-").replace("-.-", "-") + '.lock'
self.lockfile = os.path.normpath(tempfile.gettempdir() + '/' + basename)
def __enter__(self):
Log.note("SingleInstance.lockfile = " + self.lockfile)
if sys.platform == 'win32':
try:
# file already exists, we try to remove (in case previous execution was interrupted)
if os.path.exists(self.lockfile):
os.unlink(self.lockfile)
self.fd = os.open(self.lockfile, os.O_CREAT | os.O_EXCL | os.O_RDWR)
except Exception, e:
Log.alarm("Another instance is already running, quitting.")
sys.exit(-1)
else: # non Windows
import fcntl
self.fp = open(self.lockfile, 'w')
try:
fcntl.lockf(self.fp, fcntl.LOCK_EX | fcntl.LOCK_NB)
except IOError:
Log.note("\n"+
"**********************************************************************\n"+
"** Another instance is already running, quitting.\n"+
"**********************************************************************\n"
)
sys.exit(-1)
self.initialized = True
def __exit__(self, type, value, traceback):
self.__del__()
def __del__(self):
temp, self.initialized = self.initialized, False
if not temp:
return
try:
if sys.platform == 'win32':
if hasattr(self, 'fd'):
os.close(self.fd)
os.unlink(self.lockfile)
else:
import fcntl
fcntl.lockf(self.fp, fcntl.LOCK_UN)
if os.path.isfile(self.lockfile):
os.unlink(self.lockfile)
except Exception as e:
Log.warning("Problem with SingleInstance __del__()", e)
sys.exit(-1)
| mpl-2.0 |
ChrisAntaki/phantomjs | src/qt/qtwebkit/Tools/QueueStatusServer/model/queuestatus.py | 121 | 2109 | # Copyright (C) 2013 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from config import messages
from google.appengine.ext import db
from model.queuepropertymixin import QueuePropertyMixin
class QueueStatus(db.Model, QueuePropertyMixin):
author = db.UserProperty()
queue_name = db.StringProperty()
bot_id = db.StringProperty()
active_bug_id = db.IntegerProperty()
active_patch_id = db.IntegerProperty()
message = db.StringProperty(multiline=True)
date = db.DateTimeProperty(auto_now_add=True)
results_file = db.BlobProperty()
def is_retry_request(self):
return self.message == messages.retry_status
| bsd-3-clause |
SergeyPirogov/selene | tests/integration/helpers/givenpage.py | 1 | 1831 | import os
EMPTY_PAGE_URL = 'file://' + os.path.abspath(os.path.dirname(__file__)) + '/../../resources/empty.html'
class LoadingHtmlPage(object):
def __init__(self, timeout=0, body=""):
self._body = body
self._timeout = timeout
def load_in(self, driver):
driver.get(EMPTY_PAGE_URL)
return LoadedHtmlPage(driver).render_body(self._body, self._timeout)
class LoadedHtmlPage(object):
def __init__(self, driver):
self._driver = driver
def render_body(self, body, timeout=0):
self._driver.execute_script(
'setTimeout(function() { document.getElementsByTagName("body")[0].innerHTML = "'
+ body.replace("\n", " ").replace('"', '\\"') + '";}, ' + str(timeout) + ");")
return self
def execute_script(self, script):
self._driver.execute_script(script)
return self
def execute_script_with_timeout(self, script, timeout):
self._driver.execute_script(
"setTimeout(function() { " + script.replace("\n", " ") + " }, " + str(timeout) + ");")
return self
def render_body_with_timeout(self, body, timeout):
return self.render_body(body, timeout)
class GivenPage(object):
def __init__(self, driver):
self._driver = driver
def load_body_with_timeout(self, body, timeout):
return LoadedHtmlPage(self._driver).render_body_with_timeout(body, timeout)
def opened_with_body_with_timeout(self, body, timeout):
return LoadingHtmlPage(timeout, body).load_in(self._driver)
def opened_with_body(self, body):
return self.opened_with_body_with_timeout(body, 0)
def opened_empty(self):
return LoadingHtmlPage().load_in(self._driver)
def load_body(self, body):
return LoadedHtmlPage(self._driver).render_body(body)
| mit |
socialplanning/opencore | opencore/scripts/fixlists.py | 1 | 1962 | """Fixes listen mailing lists that have got purged from the IListLookup
utility. If an optional argument is provided, it's assumed to be
a log file (from a previous run) from which we read FAILED fixes
to try again.
"""
from zope.event import notify
from zope.app.event.objectevent import ObjectModifiedEvent
from zope.component import getUtility
from Products.listen.interfaces import IListLookup
from Testing.makerequest import makerequest
import transaction
from zope.app.component.hooks import setSite
def fixlist(thelist):
print "fixing %r" % '/'.join(thelist.getPhysicalPath())
notify(ObjectModifiedEvent(thelist))
def _get_failures_to_retry(logfile):
lines = logfile.readlines()
failures = []
for line in lines:
if line.startswith('FAILED'):
path = line.split()[-1]
path = path.strip("!'")
projname = path.split('/')[3]
failures.append(projname)
return failures
def fixall(project_container, retrylog=None):
if retrylog is not None:
names = _get_failures_to_retry(retrylog)
else:
names = None
ll = getUtility(IListLookup, context=app.openplans)
for i, proj in enumerate(project_container.objectValues('OpenProject')):
if names and proj.getId() not in names:
continue
project_container._p_jar.sync()
try:
listcontainer = proj['lists']
except:
continue
for ml in listcontainer.objectValues('OpenMailingList'):
fixlist(ml)
if ll._mapping.get(ml.mailto.lower()) is None:
print "FAILED %r!" % '/'.join(ml.getPhysicalPath())
transaction.commit()
app = makerequest(app) # plone stuff barfs without a request.
setSite(app.openplans) # need this or utility lookups mysteriously fail.
import sys
if len(sys.argv) > 1:
retrylog = open(sys.argv[1], 'r')
else:
retrylog = None
fixall(app.openplans.projects, retrylog)
| gpl-3.0 |
thisisshi/cloud-custodian | tests/test_cwe.py | 2 | 10371 | # Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
import jmespath
from pytest_terraform import terraform
from unittest import TestCase
from .common import event_data, BaseTest
from c7n.cwe import CloudWatchEvents
@terraform('event_bridge_bus')
def test_event_bus_describe(test, event_bridge_bus):
factory = test.replay_flight_data('test_cwe_bus_xaccount')
p = test.load_policy({
'name': 'bus-xaccount',
'resource': 'aws.event-bus',
'filters': [
{'tag:Env': 'Sandbox'},
'cross-account'
],
}, session_factory=factory)
resources = p.run()
assert len(resources) == 1
resources[0]['Name'] == event_bridge_bus[
'aws_cloudwatch_event_bus.messenger.name']
assert 'CrossAccountViolations' in resources[0]
class CloudWatchEventTest(BaseTest):
def test_event_rule_tags(self):
factory = self.replay_flight_data('test_cwe_rule_tags')
client = factory().client('events')
policy = self.load_policy(
{
'name': 'cwe-rule',
'resource': 'aws.event-rule',
'filters': [
{'tag:App': 'absent'},
{'Name': 'cloud-custodian-mailer'}],
'actions': [
{'type': 'tag', 'tags': {'App': 'Custodian'}}]
}, session_factory=factory, config={'region': 'us-west-2'})
resources = policy.run()
self.assertEqual(len(resources), 1)
tags = {t['Key']: t['Value'] for t in
client.list_tags_for_resource(
ResourceARN=policy.resource_manager.get_arns(resources)[0]).get(
'Tags')}
self.assertEqual(tags, {'App': 'Custodian'})
def test_target_cross_account_remove(self):
session_factory = self.replay_flight_data("test_cwe_rule_target_cross")
client = session_factory().client("events")
policy = self.load_policy(
{
"name": "cwe-cross-account",
"resource": "event-rule-target",
"filters": [{"type": "cross-account"}],
"actions": ["delete"],
},
session_factory=session_factory,
)
resources = policy.run()
self.assertEqual(len(resources), 1)
targets = client.list_targets_by_rule(Rule=resources[0]["c7n:parent-id"]).get(
"Targets"
)
self.assertEqual(targets, [])
def test_event_rule_force_delete(self):
session_factory = self.replay_flight_data("test_cwe_rule_force_delete")
client = session_factory().client('events')
policy = self.load_policy({
"name": "cwe-filter-on-target",
"resource": "aws.event-rule",
"filters": [
{
"type": "event-rule-target",
"key": "[].Arn",
"value": "arn:aws:lambda:us-east-1:644160558196:function:test",
"op": "in",
"value_type": "swap"
}
],
"actions": [
{
"type": "delete",
"force": True
}
]
}, session_factory=session_factory)
resources = policy.run()
with self.assertRaises(client.exceptions.ResourceNotFoundException):
client.describe_rule(Name=resources[0]["Name"])
self.assertEqual(len(resources), 1)
def test_event_rule_invalid_targets_any(self):
session_factory = self.replay_flight_data("test_cwe_rule_invalid_targets")
lambda_client = session_factory().client('lambda')
sns_client = session_factory().client('sns')
policy = self.load_policy({
"name": "cwe-filter-on-invalid-target",
"resource": "aws.event-rule",
"filters": [
{
"type": "invalid-targets"
}
],
}, session_factory=session_factory)
resources = policy.run()
invalid_targets = set([
"arn:aws:lambda:us-east-1:644160558196:function:test",
"arn:aws:sns:us-east-1:644160558196:foo"])
self.assertEqual(set(resources[0]["c7n:InvalidTargets"]), invalid_targets)
with self.assertRaises(lambda_client.exceptions.ClientError):
lambda_client.get_function(FunctionName="test")
with self.assertRaises(sns_client.exceptions.NotFoundException):
sns_client.get_topic_attributes(TopicArn="arn:aws:sns:us-east-1:644160558196:foo")
res = sns_client.get_topic_attributes(TopicArn="arn:aws:sns:us-east-1:644160558196:test2")
self.assertTrue(res)
def test_event_rule_invalid_targets_all(self):
session_factory = self.replay_flight_data("test_cwe_rule_invalid_targets")
policy = self.load_policy({
"name": "cwe-filter-on-invalid-target",
"resource": "aws.event-rule",
"filters": [
{
"type": "invalid-targets",
"all": True
}
],
}, session_factory=session_factory)
resources = policy.run()
self.assertEqual(len(resources), 0)
class CloudWatchEventsFacadeTest(TestCase):
def test_get_ids(self):
self.assertEqual(
CloudWatchEvents.get_ids(
{"detail": event_data("event-cloud-trail-run-instances.json")},
{"type": "cloudtrail", "events": ["RunInstances"]},
),
["i-784cdacd", "i-7b4cdace"],
)
def test_get_ids_sans_with_details_expr(self):
self.assertEqual(
CloudWatchEvents.get_ids(
{'detail': event_data('event-cloud-trail-run-instances.json')},
{'type': 'cloudtrail', 'events': [
{'ids': 'detail.responseElements.instancesSet.items[].instanceId',
'source': 'ec2.amazonaws.com',
'event': 'RunInstances'}]}),
["i-784cdacd", "i-7b4cdace"],
)
def test_get_ids_sans_without_details_expr(self):
self.assertEqual(
sorted(CloudWatchEvents.get_ids(
{'detail': event_data('event-cloud-trail-run-instances.json')},
{'type': 'cloudtrail', 'events': [
{'ids': 'responseElements.instancesSet.items[].instanceId',
'source': 'ec2.amazonaws.com',
'event': 'RunInstances'}
]})),
["i-784cdacd", "i-7b4cdace"],
)
def test_get_ids_multiple_events(self):
d = event_data("event-cloud-trail-run-instances.json")
d["eventName"] = "StartInstances"
self.assertEqual(
CloudWatchEvents.get_ids(
{"detail": d},
{
"type": "cloudtrail",
"events": [
# wrong event name
{
"source": "ec2.amazonaws.com",
"event": "CreateTags",
"ids": "requestParameters.resourcesSet.items[].resourceId",
},
# wrong event source
{
"source": "ecs.amazonaws.com",
"event": "StartInstances",
"ids": "responseElements.instancesSet.items",
},
# matches no resource ids
{
"source": "ec2.amazonaws.com",
"event": "StartInstances",
"ids": "responseElements.instancesSet2.items[].instanceId",
},
# correct
{
"source": "ec2.amazonaws.com",
"event": "StartInstances",
"ids": "responseElements.instancesSet.items[].instanceId",
},
# we don't fall off the end
{
"source": "ec2.amazonaws.com",
"event": "StartInstances",
"ids": "responseElements.instancesSet.items[]",
},
],
},
),
["i-784cdacd", u"i-7b4cdace"],
)
def test_ec2_state(self):
self.assertEqual(
CloudWatchEvents.get_ids(
event_data("event-instance-state.json"), {"type": "ec2-instance-state"}
),
["i-a2d74f12"],
)
def test_asg_state(self):
self.assertEqual(
CloudWatchEvents.get_ids(
event_data("event-asg-instance-failed.json"),
{
"type": "asg-instance-state",
"events": ["EC2 Instance Launch Unsuccessful"],
},
),
["CustodianTest"],
)
def test_custom_event(self):
d = {"detail": event_data("event-cloud-trail-run-instances.json")}
d["detail"]["eventName"] = "StartInstances"
self.assertEqual(
CloudWatchEvents.get_ids(
d,
{
"type": "cloudtrail",
"events": [
{
"event": "StartInstances",
"ids": "responseElements.instancesSet.items[].instanceId",
"source": "ec2.amazonaws.com",
}
],
},
),
["i-784cdacd", u"i-7b4cdace"],
)
def test_non_cloud_trail_event(self):
for event in ["event-instance-state.json", "event-scheduled.json"]:
self.assertFalse(CloudWatchEvents.match(event_data(event)))
def test_cloud_trail_resource(self):
self.assertEqual(
CloudWatchEvents.match(event_data("event-cloud-trail-s3.json")),
{
"source": "s3.amazonaws.com",
"ids": jmespath.compile("detail.requestParameters.bucketName"),
},
)
| apache-2.0 |
Pajinek/spacewalk | java/scripts/findmissingstrings.py | 20 | 1148 | #!/usr/bin/python
try:
import elementtree.ElementTree as et
except:
import xml.etree.ElementTree as et
import os
skip = ["emptyspace.jsp"]
def parsefile(file):
items = []
tree = et.parse(file)
root = tree.getroot()
for i in root.getiterator():
if i.tag.endswith('trans-unit'):
for item in i.items():
items.append(item[1])
return items
def diff(en, other):
#find all items that are in en but NOT in other.
notfound = []
for e in en:
if e not in skip and e not in other:
notfound.append(e)
return notfound
items = parsefile('StringResource_en_US.xml')
#print items
files = os.listdir('.')
#print files
for file in files:
if file.startswith('StringResource_') and file.endswith('.xml') and file != 'StringResource_en_US.xml':
#print 'processing ' + str(file)
otherkeys = parsefile(file)
notfound = diff(items, otherkeys)
if notfound:
k = ''
for nf in notfound:
k = k + '\t' + nf + '\n'
print "%s is missing the following keys:\n%s\n---" % (str(file), k)
| gpl-2.0 |
nhomar/odoo-mirror | addons/membership/membership.py | 128 | 27626 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
import openerp.addons.decimal_precision as dp
from openerp.tools.translate import _
STATE = [
('none', 'Non Member'),
('canceled', 'Cancelled Member'),
('old', 'Old Member'),
('waiting', 'Waiting Member'),
('invoiced', 'Invoiced Member'),
('free', 'Free Member'),
('paid', 'Paid Member'),
]
STATE_PRIOR = {
'none': 0,
'canceled': 1,
'old': 2,
'waiting': 3,
'invoiced': 4,
'free': 6,
'paid': 7
}
class membership_line(osv.osv):
'''Member line'''
def _get_partners(self, cr, uid, ids, context=None):
list_membership_line = []
member_line_obj = self.pool.get('membership.membership_line')
for partner in self.pool.get('res.partner').browse(cr, uid, ids, context=context):
if partner.member_lines:
list_membership_line += member_line_obj.search(cr, uid, [('id', 'in', [ l.id for l in partner.member_lines])], context=context)
return list_membership_line
def _get_membership_lines(self, cr, uid, ids, context=None):
list_membership_line = []
member_line_obj = self.pool.get('membership.membership_line')
for invoice in self.pool.get('account.invoice').browse(cr, uid, ids, context=context):
if invoice.invoice_line:
list_membership_line += member_line_obj.search(cr, uid, [('account_invoice_line', 'in', [ l.id for l in invoice.invoice_line])], context=context)
return list_membership_line
def _check_membership_date(self, cr, uid, ids, context=None):
"""Check if membership product is not in the past
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of Membership Line IDs
@param context: A standard dictionary for contextual values
"""
cr.execute('''
SELECT MIN(ml.date_to - ai.date_invoice)
FROM membership_membership_line ml
JOIN account_invoice_line ail ON (
ml.account_invoice_line = ail.id
)
JOIN account_invoice ai ON (
ai.id = ail.invoice_id)
WHERE ml.id IN %s''', (tuple(ids),))
res = cr.fetchall()
for r in res:
if r[0] and r[0] < 0:
return False
return True
def _state(self, cr, uid, ids, name, args, context=None):
"""Compute the state lines
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of Membership Line IDs
@param name: Field Name
@param context: A standard dictionary for contextual values
@param return: Dictionary of state Value
"""
res = {}
inv_obj = self.pool.get('account.invoice')
for line in self.browse(cr, uid, ids, context=context):
cr.execute('''
SELECT i.state, i.id FROM
account_invoice i
WHERE
i.id = (
SELECT l.invoice_id FROM
account_invoice_line l WHERE
l.id = (
SELECT ml.account_invoice_line FROM
membership_membership_line ml WHERE
ml.id = %s
)
)
''', (line.id,))
fetched = cr.fetchone()
if not fetched:
res[line.id] = 'canceled'
continue
istate = fetched[0]
state = 'none'
if (istate == 'draft') | (istate == 'proforma'):
state = 'waiting'
elif istate == 'open':
state = 'invoiced'
elif istate == 'paid':
state = 'paid'
inv = inv_obj.browse(cr, uid, fetched[1], context=context)
for payment in inv.payment_ids:
if payment.invoice and payment.invoice.type == 'out_refund':
state = 'canceled'
elif istate == 'cancel':
state = 'canceled'
res[line.id] = state
return res
_description = __doc__
_name = 'membership.membership_line'
_columns = {
'partner': fields.many2one('res.partner', 'Partner', ondelete='cascade', select=1),
'membership_id': fields.many2one('product.product', string="Membership", required=True),
'date_from': fields.date('From', readonly=True),
'date_to': fields.date('To', readonly=True),
'date_cancel': fields.date('Cancel date'),
'date': fields.date('Join Date', help="Date on which member has joined the membership"),
'member_price': fields.float('Membership Fee', digits_compute= dp.get_precision('Product Price'), required=True, help='Amount for the membership'),
'account_invoice_line': fields.many2one('account.invoice.line', 'Account Invoice line', readonly=True),
'account_invoice_id': fields.related('account_invoice_line', 'invoice_id', type='many2one', relation='account.invoice', string='Invoice', readonly=True),
'state': fields.function(_state,
string='Membership Status', type='selection',
selection=STATE, store = {
'account.invoice': (_get_membership_lines, ['state'], 10),
'res.partner': (_get_partners, ['membership_state'], 12),
}, help="""It indicates the membership status.
-Non Member: A member who has not applied for any membership.
-Cancelled Member: A member who has cancelled his membership.
-Old Member: A member whose membership date has expired.
-Waiting Member: A member who has applied for the membership and whose invoice is going to be created.
-Invoiced Member: A member whose invoice has been created.
-Paid Member: A member who has paid the membership amount."""),
'company_id': fields.related('account_invoice_line', 'invoice_id', 'company_id', type="many2one", relation="res.company", string="Company", readonly=True, store=True)
}
_rec_name = 'partner'
_order = 'id desc'
_constraints = [
(_check_membership_date, 'Error, this membership product is out of date', [])
]
class Partner(osv.osv):
'''Partner'''
_inherit = 'res.partner'
def _get_partner_id(self, cr, uid, ids, context=None):
member_line_obj = self.pool.get('membership.membership_line')
res_obj = self.pool.get('res.partner')
data_inv = member_line_obj.browse(cr, uid, ids, context=context)
list_partner = []
for data in data_inv:
list_partner.append(data.partner.id)
ids2 = list_partner
while ids2:
ids2 = res_obj.search(cr, uid, [('associate_member', 'in', ids2)], context=context)
list_partner += ids2
return list_partner
def _get_invoice_partner(self, cr, uid, ids, context=None):
inv_obj = self.pool.get('account.invoice')
res_obj = self.pool.get('res.partner')
data_inv = inv_obj.browse(cr, uid, ids, context=context)
list_partner = []
for data in data_inv:
list_partner.append(data.partner_id.id)
ids2 = list_partner
while ids2:
ids2 = res_obj.search(cr, uid, [('associate_member', 'in', ids2)], context=context)
list_partner += ids2
return list_partner
def _membership_state(self, cr, uid, ids, name, args, context=None):
"""This Function return Membership State For Given Partner.
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of Partner IDs
@param name: Field Name
@param context: A standard dictionary for contextual values
@param return: Dictionary of Membership state Value
"""
res = {}
for id in ids:
res[id] = 'none'
today = time.strftime('%Y-%m-%d')
for id in ids:
partner_data = self.browse(cr, uid, id, context=context)
if partner_data.membership_cancel and today > partner_data.membership_cancel:
res[id] = 'canceled'
continue
if partner_data.membership_stop and today > partner_data.membership_stop:
res[id] = 'old'
continue
s = 4
if partner_data.member_lines:
for mline in partner_data.member_lines:
if mline.date_to >= today:
if mline.account_invoice_line and mline.account_invoice_line.invoice_id:
mstate = mline.account_invoice_line.invoice_id.state
if mstate == 'paid':
s = 0
inv = mline.account_invoice_line.invoice_id
for payment in inv.payment_ids:
if payment.invoice.type == 'out_refund':
s = 2
break
elif mstate == 'open' and s!=0:
s = 1
elif mstate == 'cancel' and s!=0 and s!=1:
s = 2
elif (mstate == 'draft' or mstate == 'proforma') and s!=0 and s!=1:
s = 3
if s==4:
for mline in partner_data.member_lines:
if mline.date_from < today and mline.date_to < today and mline.date_from <= mline.date_to and (mline.account_invoice_line and mline.account_invoice_line.invoice_id.state) == 'paid':
s = 5
else:
s = 6
if s==0:
res[id] = 'paid'
elif s==1:
res[id] = 'invoiced'
elif s==2:
res[id] = 'canceled'
elif s==3:
res[id] = 'waiting'
elif s==5:
res[id] = 'old'
elif s==6:
res[id] = 'none'
if partner_data.free_member and s!=0:
res[id] = 'free'
if partner_data.associate_member:
res_state = self._membership_state(cr, uid, [partner_data.associate_member.id], name, args, context=context)
res[id] = res_state[partner_data.associate_member.id]
return res
def _membership_date(self, cr, uid, ids, name, args, context=None):
"""Return date of membership"""
name = name[0]
res = {}
member_line_obj = self.pool.get('membership.membership_line')
for partner in self.browse(cr, uid, ids, context=context):
if partner.associate_member:
partner_id = partner.associate_member.id
else:
partner_id = partner.id
res[partner.id] = {
'membership_start': False,
'membership_stop': False,
'membership_cancel': False
}
if name == 'membership_start':
line_id = member_line_obj.search(cr, uid, [('partner', '=', partner_id),('date_cancel','=',False)],
limit=1, order='date_from', context=context)
if line_id:
res[partner.id]['membership_start'] = member_line_obj.read(cr, uid, [line_id[0]],
['date_from'], context=context)[0]['date_from']
if name == 'membership_stop':
line_id1 = member_line_obj.search(cr, uid, [('partner', '=', partner_id),('date_cancel','=',False)],
limit=1, order='date_to desc', context=context)
if line_id1:
res[partner.id]['membership_stop'] = member_line_obj.read(cr, uid, [line_id1[0]],
['date_to'], context=context)[0]['date_to']
if name == 'membership_cancel':
if partner.membership_state == 'canceled':
line_id2 = member_line_obj.search(cr, uid, [('partner', '=', partner.id)], limit=1, order='date_cancel', context=context)
if line_id2:
res[partner.id]['membership_cancel'] = member_line_obj.read(cr, uid, [line_id2[0]], ['date_cancel'], context=context)[0]['date_cancel']
return res
def _get_partners(self, cr, uid, ids, context=None):
ids2 = ids
while ids2:
ids2 = self.search(cr, uid, [('associate_member', 'in', ids2)], context=context)
ids += ids2
return ids
def __get_membership_state(self, *args, **kwargs):
return self._membership_state(*args, **kwargs)
_columns = {
'associate_member': fields.many2one('res.partner', 'Associate Member',help="A member with whom you want to associate your membership.It will consider the membership state of the associated member."),
'member_lines': fields.one2many('membership.membership_line', 'partner', 'Membership'),
'free_member': fields.boolean('Free Member', help = "Select if you want to give free membership."),
'membership_amount': fields.float(
'Membership Amount', digits=(16, 2),
help = 'The price negotiated by the partner'),
'membership_state': fields.function(
__get_membership_state,
string = 'Current Membership Status', type = 'selection',
selection = STATE,
store = {
'account.invoice': (_get_invoice_partner, ['state'], 10),
'membership.membership_line': (_get_partner_id, ['state'], 10),
'res.partner': (_get_partners, ['free_member', 'membership_state', 'associate_member'], 10)
}, help='It indicates the membership state.\n'
'-Non Member: A partner who has not applied for any membership.\n'
'-Cancelled Member: A member who has cancelled his membership.\n'
'-Old Member: A member whose membership date has expired.\n'
'-Waiting Member: A member who has applied for the membership and whose invoice is going to be created.\n'
'-Invoiced Member: A member whose invoice has been created.\n'
'-Paying member: A member who has paid the membership fee.'),
'membership_start': fields.function(
_membership_date, multi = 'membeship_start',
string = 'Membership Start Date', type = 'date',
store = {
'account.invoice': (_get_invoice_partner, ['state'], 10),
'membership.membership_line': (_get_partner_id, ['state'], 10, ),
'res.partner': (lambda self, cr, uid, ids, c={}: ids, ['free_member'], 10)
}, help="Date from which membership becomes active."),
'membership_stop': fields.function(
_membership_date,
string = 'Membership End Date', type='date', multi='membership_stop',
store = {
'account.invoice': (_get_invoice_partner, ['state'], 10),
'membership.membership_line': (_get_partner_id, ['state'], 10),
'res.partner': (lambda self, cr, uid, ids, c={}: ids, ['free_member'], 10)
}, help="Date until which membership remains active."),
'membership_cancel': fields.function(
_membership_date,
string = 'Cancel Membership Date', type='date', multi='membership_cancel',
store = {
'account.invoice': (_get_invoice_partner, ['state'], 11),
'membership.membership_line': (_get_partner_id, ['state'], 10),
'res.partner': (lambda self, cr, uid, ids, c={}: ids, ['free_member'], 10)
}, help="Date on which membership has been cancelled"),
}
_defaults = {
'free_member': False,
'membership_cancel': False,
}
def _check_recursion(self, cr, uid, ids, context=None):
"""Check Recursive for Associated Members.
"""
level = 100
while len(ids):
cr.execute('SELECT DISTINCT associate_member FROM res_partner WHERE id IN %s', (tuple(ids),))
ids = filter(None, map(lambda x:x[0], cr.fetchall()))
if not level:
return False
level -= 1
return True
_constraints = [
(_check_recursion, 'Error ! You cannot create recursive associated members.', ['associate_member'])
]
def create_membership_invoice(self, cr, uid, ids, product_id=None, datas=None, context=None):
""" Create Customer Invoice of Membership for partners.
@param datas: datas has dictionary value which consist Id of Membership product and Cost Amount of Membership.
datas = {'membership_product_id': None, 'amount': None}
"""
invoice_obj = self.pool.get('account.invoice')
invoice_line_obj = self.pool.get('account.invoice.line')
invoice_tax_obj = self.pool.get('account.invoice.tax')
product_id = product_id or datas.get('membership_product_id', False)
amount = datas.get('amount', 0.0)
invoice_list = []
if type(ids) in (int, long,):
ids = [ids]
for partner in self.browse(cr, uid, ids, context=context):
account_id = partner.property_account_receivable and partner.property_account_receivable.id or False
fpos_id = partner.property_account_position and partner.property_account_position.id or False
addr = self.address_get(cr, uid, [partner.id], ['invoice'])
if partner.free_member:
raise osv.except_osv(_('Error!'),
_("Partner is a free Member."))
if not addr.get('invoice', False):
raise osv.except_osv(_('Error!'),
_("Partner doesn't have an address to make the invoice."))
quantity = 1
line_value = {
'product_id': product_id,
}
line_dict = invoice_line_obj.product_id_change(cr, uid, {},
product_id, False, quantity, '', 'out_invoice', partner.id, fpos_id, price_unit=amount, context=context)
line_value.update(line_dict['value'])
line_value['price_unit'] = amount
if line_value.get('invoice_line_tax_id', False):
tax_tab = [(6, 0, line_value['invoice_line_tax_id'])]
line_value['invoice_line_tax_id'] = tax_tab
invoice_id = invoice_obj.create(cr, uid, {
'partner_id': partner.id,
'account_id': account_id,
'fiscal_position': fpos_id or False
}, context=context)
line_value['invoice_id'] = invoice_id
invoice_line_id = invoice_line_obj.create(cr, uid, line_value, context=context)
invoice_obj.write(cr, uid, invoice_id, {'invoice_line': [(6, 0, [invoice_line_id])]}, context=context)
invoice_list.append(invoice_id)
if line_value['invoice_line_tax_id']:
tax_value = invoice_tax_obj.compute(cr, uid, invoice_id).values()
for tax in tax_value:
invoice_tax_obj.create(cr, uid, tax, context=context)
#recompute the membership_state of those partners
self.pool.get('res.partner').write(cr, uid, ids, {})
return invoice_list
class Product(osv.osv):
def fields_view_get(self, cr, user, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
model_obj = self.pool.get('ir.model.data')
if context is None:
context = {}
if ('product' in context) and (context['product']=='membership_product'):
model_data_ids_form = model_obj.search(cr, user, [('model','=','ir.ui.view'), ('name', 'in', ['membership_products_form', 'membership_products_tree'])], context=context)
resource_id_form = model_obj.read(cr, user, model_data_ids_form, fields=['res_id', 'name'], context=context)
dict_model = {}
for i in resource_id_form:
dict_model[i['name']] = i['res_id']
if view_type == 'form':
view_id = dict_model['membership_products_form']
else:
view_id = dict_model['membership_products_tree']
return super(Product,self).fields_view_get(cr, user, view_id, view_type, context, toolbar, submenu)
'''Product'''
_inherit = 'product.template'
_columns = {
'membership': fields.boolean('Membership', help='Check if the product is eligible for membership.'),
'membership_date_from': fields.date('Membership Start Date', help='Date from which membership becomes active.'),
'membership_date_to': fields.date('Membership End Date', help='Date until which membership remains active.'),
}
_sql_constraints = [('membership_date_greater','check(membership_date_to >= membership_date_from)','Error ! Ending Date cannot be set before Beginning Date.')]
_defaults = {
'membership': False,
}
class Invoice(osv.osv):
'''Invoice'''
_inherit = 'account.invoice'
def action_cancel(self, cr, uid, ids, context=None):
'''Create a 'date_cancel' on the membership_line object'''
member_line_obj = self.pool.get('membership.membership_line')
today = time.strftime('%Y-%m-%d')
for invoice in self.browse(cr, uid, ids, context=context):
mlines = member_line_obj.search(cr, uid,
[('account_invoice_line', 'in',
[l.id for l in invoice.invoice_line])])
member_line_obj.write(cr, uid, mlines, {'date_cancel': today})
return super(Invoice, self).action_cancel(cr, uid, ids, context=context)
class account_invoice_line(osv.osv):
_inherit='account.invoice.line'
def write(self, cr, uid, ids, vals, context=None):
"""Overrides orm write method
"""
member_line_obj = self.pool.get('membership.membership_line')
res = super(account_invoice_line, self).write(cr, uid, ids, vals, context=context)
for line in self.browse(cr, uid, ids, context=context):
if line.invoice_id.type == 'out_invoice':
ml_ids = member_line_obj.search(cr, uid, [('account_invoice_line', '=', line.id)], context=context)
if line.product_id and line.product_id.membership and not ml_ids:
# Product line has changed to a membership product
date_from = line.product_id.membership_date_from
date_to = line.product_id.membership_date_to
if line.invoice_id.date_invoice > date_from and line.invoice_id.date_invoice < date_to:
date_from = line.invoice_id.date_invoice
member_line_obj.create(cr, uid, {
'partner': line.invoice_id.partner_id.id,
'membership_id': line.product_id.id,
'member_price': line.price_unit,
'date': time.strftime('%Y-%m-%d'),
'date_from': date_from,
'date_to': date_to,
'account_invoice_line': line.id,
}, context=context)
if line.product_id and not line.product_id.membership and ml_ids:
# Product line has changed to a non membership product
member_line_obj.unlink(cr, uid, ml_ids, context=context)
return res
def unlink(self, cr, uid, ids, context=None):
"""Remove Membership Line Record for Account Invoice Line
"""
member_line_obj = self.pool.get('membership.membership_line')
for id in ids:
ml_ids = member_line_obj.search(cr, uid, [('account_invoice_line', '=', id)], context=context)
member_line_obj.unlink(cr, uid, ml_ids, context=context)
return super(account_invoice_line, self).unlink(cr, uid, ids, context=context)
def create(self, cr, uid, vals, context=None):
"""Overrides orm create method
"""
member_line_obj = self.pool.get('membership.membership_line')
result = super(account_invoice_line, self).create(cr, uid, vals, context=context)
line = self.browse(cr, uid, result, context=context)
if line.invoice_id.type == 'out_invoice':
ml_ids = member_line_obj.search(cr, uid, [('account_invoice_line', '=', line.id)], context=context)
if line.product_id and line.product_id.membership and not ml_ids:
# Product line is a membership product
date_from = line.product_id.membership_date_from
date_to = line.product_id.membership_date_to
if line.invoice_id.date_invoice > date_from and line.invoice_id.date_invoice < date_to:
date_from = line.invoice_id.date_invoice
member_line_obj.create(cr, uid, {
'partner': line.invoice_id.partner_id and line.invoice_id.partner_id.id or False,
'membership_id': line.product_id.id,
'member_price': line.price_unit,
'date': time.strftime('%Y-%m-%d'),
'date_from': date_from,
'date_to': date_to,
'account_invoice_line': line.id,
}, context=context)
return result
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
dimdung/boto | boto/cloudsearch2/optionstatus.py | 153 | 8121 | # Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
# All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from boto.compat import json
class OptionStatus(dict):
"""
Presents a combination of status field (defined below) which are
accessed as attributes and option values which are stored in the
native Python dictionary. In this class, the option values are
merged from a JSON object that is stored as the Option part of
the object.
:ivar domain_name: The name of the domain this option is associated with.
:ivar create_date: A timestamp for when this option was created.
:ivar state: The state of processing a change to an option.
Possible values:
* RequiresIndexDocuments: the option's latest value will not
be visible in searches until IndexDocuments has been called
and indexing is complete.
* Processing: the option's latest value is not yet visible in
all searches but is in the process of being activated.
* Active: the option's latest value is completely visible.
:ivar update_date: A timestamp for when this option was updated.
:ivar update_version: A unique integer that indicates when this
option was last updated.
"""
def __init__(self, domain, data=None, refresh_fn=None, refresh_key=None,
save_fn=None):
self.domain = domain
self.refresh_fn = refresh_fn
self.refresh_key = refresh_key
self.save_fn = save_fn
self.refresh(data)
def _update_status(self, status):
self.creation_date = status['CreationDate']
self.status = status['State']
self.update_date = status['UpdateDate']
self.update_version = int(status['UpdateVersion'])
def _update_options(self, options):
if options:
self.update(options)
def refresh(self, data=None):
"""
Refresh the local state of the object. You can either pass
new state data in as the parameter ``data`` or, if that parameter
is omitted, the state data will be retrieved from CloudSearch.
"""
if not data:
if self.refresh_fn:
data = self.refresh_fn(self.domain.name)
if data and self.refresh_key:
# Attempt to pull out the right nested bag of data
for key in self.refresh_key:
data = data[key]
if data:
self._update_status(data['Status'])
self._update_options(data['Options'])
def to_json(self):
"""
Return the JSON representation of the options as a string.
"""
return json.dumps(self)
def save(self):
"""
Write the current state of the local object back to the
CloudSearch service.
"""
if self.save_fn:
data = self.save_fn(self.domain.name, self.to_json())
self.refresh(data)
class IndexFieldStatus(OptionStatus):
def save(self):
pass
class AvailabilityOptionsStatus(OptionStatus):
def save(self):
pass
class ScalingParametersStatus(IndexFieldStatus):
pass
class ExpressionStatus(IndexFieldStatus):
pass
class ServicePoliciesStatus(OptionStatus):
def new_statement(self, arn, ip):
"""
Returns a new policy statement that will allow
access to the service described by ``arn`` by the
ip specified in ``ip``.
:type arn: string
:param arn: The Amazon Resource Notation identifier for the
service you wish to provide access to. This would be
either the search service or the document service.
:type ip: string
:param ip: An IP address or CIDR block you wish to grant access
to.
"""
return {
"Effect": "Allow",
"Action": "*", # Docs say use GET, but denies unless *
"Resource": arn,
"Condition": {
"IpAddress": {
"aws:SourceIp": [ip]
}
}
}
def _allow_ip(self, arn, ip):
if 'Statement' not in self:
s = self.new_statement(arn, ip)
self['Statement'] = [s]
self.save()
else:
add_statement = True
for statement in self['Statement']:
if statement['Resource'] == arn:
for condition_name in statement['Condition']:
if condition_name == 'IpAddress':
add_statement = False
condition = statement['Condition'][condition_name]
if ip not in condition['aws:SourceIp']:
condition['aws:SourceIp'].append(ip)
if add_statement:
s = self.new_statement(arn, ip)
self['Statement'].append(s)
self.save()
def allow_search_ip(self, ip):
"""
Add the provided ip address or CIDR block to the list of
allowable address for the search service.
:type ip: string
:param ip: An IP address or CIDR block you wish to grant access
to.
"""
arn = self.domain.service_arn
self._allow_ip(arn, ip)
def allow_doc_ip(self, ip):
"""
Add the provided ip address or CIDR block to the list of
allowable address for the document service.
:type ip: string
:param ip: An IP address or CIDR block you wish to grant access
to.
"""
arn = self.domain.service_arn
self._allow_ip(arn, ip)
def _disallow_ip(self, arn, ip):
if 'Statement' not in self:
return
need_update = False
for statement in self['Statement']:
if statement['Resource'] == arn:
for condition_name in statement['Condition']:
if condition_name == 'IpAddress':
condition = statement['Condition'][condition_name]
if ip in condition['aws:SourceIp']:
condition['aws:SourceIp'].remove(ip)
need_update = True
if need_update:
self.save()
def disallow_search_ip(self, ip):
"""
Remove the provided ip address or CIDR block from the list of
allowable address for the search service.
:type ip: string
:param ip: An IP address or CIDR block you wish to grant access
to.
"""
arn = self.domain.service_arn
self._disallow_ip(arn, ip)
def disallow_doc_ip(self, ip):
"""
Remove the provided ip address or CIDR block from the list of
allowable address for the document service.
:type ip: string
:param ip: An IP address or CIDR block you wish to grant access
to.
"""
arn = self.domain.service_arn
self._disallow_ip(arn, ip)
| mit |
cloudera/hue | desktop/core/ext-py/pycryptodomex-3.9.7/lib/Cryptodome/SelfTest/Hash/test_SHA384.py | 5 | 2722 | # -*- coding: utf-8 -*-
#
# SelfTest/Hash/test_SHA.py: Self-test for the SHA-384 hash function
#
# Written in 2008 by Dwayne C. Litzenberger <dlitz@dlitz.net>
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
"""Self-test suite for Cryptodome.Hash.SHA384"""
# Test vectors from various sources
# This is a list of (expected_result, input[, description]) tuples.
test_data = [
# RFC 4634: Section Page 8.4, "Test 1"
('cb00753f45a35e8bb5a03d699ac65007272c32ab0eded1631a8b605a43ff5bed8086072ba1e7cc2358baeca134c825a7', 'abc'),
# RFC 4634: Section Page 8.4, "Test 2.2"
('09330c33f71147e83d192fc782cd1b4753111b173b3b05d22fa08086e3b0f712fcc7c71a557e2db966c3e9fa91746039', 'abcdefghbcdefghicdefghijdefghijkefghijklfghijklmghijklmnhijklmnoijklmnopjklmnopqklmnopqrlmnopqrsmnopqrstnopqrstu'),
# RFC 4634: Section Page 8.4, "Test 3"
('9d0e1809716474cb086e834e310a4a1ced149e9c00f248527972cec5704c2a5b07b8b3dc38ecc4ebae97ddd87f3d8985', 'a' * 10**6, "'a' * 10**6"),
# Taken from http://de.wikipedia.org/wiki/Secure_Hash_Algorithm
('38b060a751ac96384cd9327eb1b1e36a21fdb71114be07434c0cc7bf63f6e1da274edebfe76f65fbd51ad2f14898b95b', ''),
# Example from http://de.wikipedia.org/wiki/Secure_Hash_Algorithm
('71e8383a4cea32d6fd6877495db2ee353542f46fa44bc23100bca48f3366b84e809f0708e81041f427c6d5219a286677',
'Franz jagt im komplett verwahrlosten Taxi quer durch Bayern'),
]
def get_tests(config={}):
from Cryptodome.Hash import SHA384
from .common import make_hash_tests
return make_hash_tests(SHA384, "SHA384", test_data,
digest_size=48,
oid='2.16.840.1.101.3.4.2.2')
if __name__ == '__main__':
import unittest
suite = lambda: unittest.TestSuite(get_tests())
unittest.main(defaultTest='suite')
# vim:set ts=4 sw=4 sts=4 expandtab:
| apache-2.0 |
att-comdev/deckhand | deckhand/db/sqlalchemy/models.py | 1 | 8858 | # Copyright 2017 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from oslo_db.sqlalchemy import models
from oslo_db.sqlalchemy import types as oslo_types
from oslo_log import log as logging
from oslo_utils import timeutils
from sqlalchemy import Boolean
from sqlalchemy import Column
from sqlalchemy import DateTime
from sqlalchemy.dialects.postgresql import JSONB
from sqlalchemy.ext import declarative
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy import ForeignKey
from sqlalchemy import Integer
from sqlalchemy.orm import relationship
from sqlalchemy import String
from sqlalchemy.types import PickleType
from sqlalchemy import UniqueConstraint
LOG = logging.getLogger(__name__)
# Declarative base class which maintains a catalog of classes and tables
# relative to that base.
BASE = None
class DeckhandBase(models.ModelBase, models.TimestampMixin):
"""Base class for Deckhand Models."""
__table_args__ = {'mysql_engine': 'Postgre', 'mysql_charset': 'utf8'}
__table_initialized__ = False
__protected_attributes__ = set([
"created_at", "updated_at", "deleted_at", "deleted"])
created_at = Column(DateTime, default=lambda: timeutils.utcnow(),
nullable=False)
updated_at = Column(DateTime, default=lambda: timeutils.utcnow(),
nullable=True, onupdate=lambda: timeutils.utcnow())
deleted_at = Column(DateTime, nullable=True)
deleted = Column(Boolean, nullable=False, default=False)
def save(self, session=None):
from deckhand.db.sqlalchemy import api as db_api
super(DeckhandBase, self).save(session or db_api.get_session())
def safe_delete(self, session=None):
self.deleted = True
self.deleted_at = timeutils.utcnow()
super(DeckhandBase, self).save(session=session)
def keys(self):
return self.__dict__.keys()
def values(self):
return self.__dict__.values()
def items(self):
return self.__dict__.items()
def to_dict(self):
"""Convert the object into dictionary format.
"""
d = self.__dict__.copy()
# Remove private state instance, as it is not serializable and causes
# CircularReference.
d.pop("_sa_instance_state")
for k in ["created_at", "updated_at", "deleted_at"]:
if k in d and d[k]:
d[k] = d[k].isoformat()
else:
d.setdefault(k, None)
return d
def __build_tables(blob_type_obj, blob_type_list):
global BASE
if BASE:
return
BASE = declarative.declarative_base()
class Bucket(BASE, DeckhandBase):
__tablename__ = 'buckets'
id = Column(Integer, primary_key=True)
name = Column(String(36), unique=True)
documents = relationship("Document", backref="bucket")
class RevisionTag(BASE, DeckhandBase):
__tablename__ = 'revision_tags'
id = Column(Integer, primary_key=True)
tag = Column(String(64), nullable=False)
data = Column(blob_type_obj, nullable=True, default={})
revision_id = Column(
Integer, ForeignKey('revisions.id', ondelete='CASCADE'),
nullable=False)
class Revision(BASE, DeckhandBase):
__tablename__ = 'revisions'
id = Column(Integer, primary_key=True)
# `primaryjoin` used below for sqlalchemy to distinguish between
# `Document.revision_id` and `Document.orig_revision_id`.
documents = relationship(
"Document", primaryjoin="Revision.id==Document.revision_id")
tags = relationship("RevisionTag")
validations = relationship("Validation")
def to_dict(self):
d = super(Revision, self).to_dict()
d['documents'] = [doc.to_dict() for doc in self.documents]
d['tags'] = [tag.to_dict() for tag in self.tags]
return d
class Document(BASE, DeckhandBase):
UNIQUE_CONSTRAINTS = ('schema', 'layer', 'name', 'revision_id')
__tablename__ = 'documents'
__table_args__ = (
UniqueConstraint(*UNIQUE_CONSTRAINTS,
name='duplicate_document_constraint'),
)
id = Column(Integer, primary_key=True)
name = Column(String(64), nullable=False)
schema = Column(String(64), nullable=False)
layer = Column(String(64), nullable=True)
# NOTE(fmontei): ``metadata`` is reserved by the DB, so ``meta`` must
# be used to store document metadata information in the DB.
meta = Column(blob_type_obj, nullable=False)
data = Column(blob_type_obj, nullable=True)
data_hash = Column(String, nullable=False)
metadata_hash = Column(String, nullable=False)
bucket_id = Column(Integer, ForeignKey('buckets.id',
ondelete='CASCADE'),
nullable=False)
revision_id = Column(
Integer, ForeignKey('revisions.id', ondelete='CASCADE'),
nullable=False)
# Used for documents that haven't changed across revisions but still
# have been carried over into newer revisions. This is necessary in
# order to roll back to previous revisions or to generate a revision
# diff. Without recording all the documents that were PUT in a
# revision, this is rather difficult. By using `orig_revision_id` it is
# therefore possible to maintain the correct revision history -- that
# is, remembering the exact revision a document was created in -- while
# still being able to roll back to all the documents that exist in a
# specific revision or generate an accurate revision diff report.
orig_revision_id = Column(
Integer, ForeignKey('revisions.id', ondelete='CASCADE'),
nullable=True)
@hybrid_property
def bucket_name(self):
if hasattr(self, 'bucket') and self.bucket:
return self.bucket.name
return None
def to_dict(self, raw_dict=False):
"""Convert the object into dictionary format.
:param raw_dict: Renames the key "meta" to "metadata".
"""
d = super(Document, self).to_dict()
d['bucket_name'] = self.bucket_name
if not raw_dict:
d['metadata'] = d.pop('meta')
if 'bucket' in d:
d.pop('bucket')
return d
class Validation(BASE, DeckhandBase):
__tablename__ = 'validations'
id = Column(Integer, primary_key=True)
name = Column(String(64), nullable=False)
status = Column(String(8), nullable=False)
validator = Column(blob_type_obj, nullable=False)
errors = Column(blob_type_list, nullable=False, default=[])
revision_id = Column(
Integer, ForeignKey('revisions.id', ondelete='CASCADE'),
nullable=False)
this_module = sys.modules[__name__]
tables = [Bucket, Document, Revision, RevisionTag, Validation]
for table in tables:
setattr(this_module, table.__name__, table)
def register_models(engine, connection_string):
"""Register the sqlalchemy tables itno the BASE.metadata
Sets up the database model objects. Does not create the tables in
the associated configured database. (see create_tables)
"""
blob_types = ((JSONB, JSONB) if 'postgresql' in connection_string
else (PickleType, oslo_types.JsonEncodedList()))
LOG.debug('Initializing DB tables using %s, %s as the column type '
'for dictionaries, lists.', *blob_types)
__build_tables(*blob_types)
def create_tables(engine):
"""Creates database tables for all models with the given engine.
This will be done only by tests that do not have their tables
set up by Alembic running during the associated helm chart db_sync job.
"""
global BASE
LOG.debug('Creating DB tables')
BASE.metadata.create_all(engine)
def unregister_models(engine):
"""Drop database tables for all models with the given engine."""
global BASE
BASE.metadata.drop_all(engine)
| apache-2.0 |
tokers/NOJ_JUDGE_CORE | src/sabo.py | 2 | 3162 | # !/usr/bin/env python3
# -*- coding: utf-8 -*-
# ACMICPC problem online judger Sabo
# Copyright (C) 2016 zchao1995@gmail.com(Zhang Chao)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import re
import sys
import time
import config
import multiprocessing
from os import getpid
from os import path
from os import makedirs
from os import environ
from os import fork
from os import dup2
from os import chdir
from os import setsid
from os import umask
from os import devnull
from os import listdir
from shutil import rmtree
from sabo_log import sabo_log_init, sabo_error_log
from sabo_conf_parse import sabo_yaml_parse
from sabo_database import sabo_traversal_database
from sabo_judge import sabo_workers_do
def sabo_daemon(work_path):
try:
child = fork()
if child:
sys.exit()
chdir(work_path)
setsid()
umask(0)
sys.stdout.flush()
sys.stderr.flush()
si = open(devnull, "r")
so = open(devnull, "a+")
se = open(devnull, "a+")
dup2(si.fileno(), sys.stdin.fileno())
dup2(so.fileno(), sys.stdout.fileno())
dup2(se.fileno(), sys.stderr.fileno())
except Exception as e:
sabo_error_log("error", "fork failed: {0}".format(e))
print("fork failed: {0}".format(e))
sys.exit()
def sabo_init(conf):
sabo_log_init(conf["base"]["log_path"])
sabo_error_log("info", "sabo start...")
task_queue = multiprocessing.JoinableQueue()
result_queue = multiprocessing.JoinableQueue()
return task_queue, result_queue
def sabo_run(conf_path):
conf, err = sabo_yaml_parse(conf_path)
if err:
print(err)
sys.exit()
# remove dirs (last run)
pattern = r"^sabo.\d+$"
rexp = re.compile(pattern)
for element in listdir(conf["base"]["work_path"]):
if rexp.match(element):
rmtree(path.join(conf["base"]["work_path"], element))
if conf["base"]["daemon"]:
sabo_daemon(conf["base"]["work_path"])
task_queue, result_queue = sabo_init(conf)
cocurrent = conf["base"]["cocurrent"]
for i in range(cocurrent):
sabo_worker = multiprocessing.Process(target=sabo_workers_do,
args=(conf, task_queue, result_queue))
sabo_worker.start()
# sabo master for traverse the database
sabo_traversal_database(task_queue, conf["db"])
if __name__ == '__main__':
argv = sys.argv
if len(argv) != 2:
print("Usage: python3 sabo.py <conf_path>")
sys.exit()
conf_path = argv[1]
sabo_run(conf_path)
| gpl-2.0 |
mahak/neutron | neutron/tests/functional/agent/linux/test_ip_lib.py | 2 | 47196 | # Copyright (c) 2014 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import copy
import itertools
import signal
import netaddr
from neutron_lib import constants
from neutron_lib.utils import net
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import importutils
from oslo_utils import uuidutils
from pyroute2.iproute import linux as iproute_linux
import testscenarios
import testtools
from neutron.agent.common import async_process
from neutron.agent.linux import ip_lib
from neutron.common import utils
from neutron.conf.agent import common as config
from neutron.privileged.agent.linux import ip_lib as priv_ip_lib
from neutron.tests.common import net_helpers
from neutron.tests.functional.agent.linux.bin import ip_monitor
from neutron.tests.functional import base as functional_base
LOG = logging.getLogger(__name__)
Device = collections.namedtuple('Device',
'name ip_cidrs mac_address namespace')
WRONG_IP = '0.0.0.0'
TEST_IP = '240.0.0.1'
TEST_IP_NEIGH = '240.0.0.2'
TEST_IP_SECONDARY = '240.0.0.3'
TEST_IP6_NEIGH = 'fd00::2'
TEST_IP6_SECONDARY = 'fd00::3'
TEST_IP_NUD_STATES = ((TEST_IP_NEIGH, 'permanent'),
(TEST_IP_SECONDARY, 'reachable'),
(TEST_IP6_NEIGH, 'permanent'),
(TEST_IP6_SECONDARY, 'reachable'))
class IpLibTestFramework(functional_base.BaseSudoTestCase):
def setUp(self):
super(IpLibTestFramework, self).setUp()
self._configure()
def _configure(self):
config.register_interface_driver_opts_helper(cfg.CONF)
cfg.CONF.set_override(
'interface_driver',
'neutron.agent.linux.interface.OVSInterfaceDriver')
config.register_interface_opts()
self.driver = importutils.import_object(cfg.CONF.interface_driver,
cfg.CONF)
def generate_device_details(self, name=None, ip_cidrs=None,
mac_address=None, namespace=None):
if ip_cidrs is None:
ip_cidrs = ["%s/24" % TEST_IP]
return Device(name or utils.get_rand_name(),
ip_cidrs,
mac_address or
net.get_random_mac('fa:16:3e:00:00:00'.split(':')),
namespace or utils.get_rand_name())
def _safe_delete_device(self, device):
try:
device.link.delete()
except RuntimeError:
LOG.debug('Could not delete %s, was it already deleted?', device)
def manage_device(self, attr):
"""Create a tuntap with the specified attributes.
The device is cleaned up at the end of the test.
:param attr: A Device namedtuple
:return: A tuntap ip_lib.IPDevice
"""
ip = ip_lib.IPWrapper(namespace=attr.namespace)
if attr.namespace:
ip.netns.add(attr.namespace)
self.addCleanup(ip.netns.delete, attr.namespace)
tap_device = ip.add_tuntap(attr.name)
self.addCleanup(self._safe_delete_device, tap_device)
tap_device.link.set_address(attr.mac_address)
self.driver.init_l3(attr.name, attr.ip_cidrs,
namespace=attr.namespace)
tap_device.link.set_up()
return tap_device
class IpLibTestCase(IpLibTestFramework):
def _check_routes(self, expected_routes, actual_routes):
actual_routes = [{key: route[key] for key in expected_routes[0].keys()}
for route in actual_routes]
self.assertEqual(expected_routes, actual_routes)
def test_rules_lifecycle(self):
PRIORITY = 32768
TABLE = 16
attr = self.generate_device_details()
device = self.manage_device(attr)
test_cases = {
constants.IP_VERSION_4: [
{
'ip': '1.1.1.1',
'to': '8.8.8.0/24'
},
{
'ip': '1.1.1.1',
'iif': device.name,
'to': '7.7.7.0/24'
}
],
constants.IP_VERSION_6: [
{
'ip': 'abcd::1',
'to': '1234::/64'
},
{
'ip': 'abcd::1',
'iif': device.name,
'to': '4567::/64'
}
]
}
expected_rules = {
constants.IP_VERSION_4: [
{
'from': '1.1.1.1',
'to': '8.8.8.0/24',
'priority': str(PRIORITY),
'table': str(TABLE),
'type': 'unicast'
}, {
'from': '0.0.0.0/0',
'to': '7.7.7.0/24',
'iif': device.name,
'priority': str(PRIORITY),
'table': str(TABLE),
'type': 'unicast'
}
],
constants.IP_VERSION_6: [
{
'from': 'abcd::1',
'to': '1234::/64',
'priority': str(PRIORITY),
'table': str(TABLE),
'type': 'unicast'
},
{
'from': '::/0',
'to': '4567::/64',
'iif': device.name,
'priority': str(PRIORITY),
'table': str(TABLE),
'type': 'unicast',
}
]
}
for ip_version, test_case in test_cases.items():
for rule in test_case:
ip_lib.add_ip_rule(namespace=device.namespace, table=TABLE,
priority=PRIORITY, **rule)
rules = ip_lib.list_ip_rules(device.namespace, ip_version)
for expected_rule in expected_rules[ip_version]:
self.assertIn(expected_rule, rules)
for rule in test_case:
ip_lib.delete_ip_rule(device.namespace, table=TABLE,
priority=PRIORITY, **rule)
rules = priv_ip_lib.list_ip_rules(device.namespace, ip_version)
for expected_rule in expected_rules[ip_version]:
self.assertNotIn(expected_rule, rules)
def test_device_exists(self):
attr = self.generate_device_details()
self.assertFalse(
ip_lib.device_exists(attr.name, namespace=attr.namespace))
device = self.manage_device(attr)
self.assertTrue(
ip_lib.device_exists(device.name, namespace=attr.namespace))
self.assertFalse(
ip_lib.device_exists(attr.name, namespace='wrong_namespace'))
device.link.delete()
self.assertFalse(
ip_lib.device_exists(attr.name, namespace=attr.namespace))
def test_ipdevice_exists(self):
attr = self.generate_device_details()
device = self.manage_device(attr)
self.assertTrue(device.exists())
device.link.delete()
self.assertFalse(device.exists())
def test_vlan_exists(self):
attr = self.generate_device_details()
ip = ip_lib.IPWrapper(namespace=attr.namespace)
ip.netns.add(attr.namespace)
self.addCleanup(ip.netns.delete, attr.namespace)
priv_ip_lib.create_interface(attr.name, attr.namespace, 'dummy')
self.assertFalse(ip_lib.vlan_in_use(1999, namespace=attr.namespace))
device = ip.add_vlan('vlan1999', attr.name, 1999)
self.assertTrue(ip_lib.vlan_in_use(1999, namespace=attr.namespace))
device.link.delete()
self.assertFalse(ip_lib.vlan_in_use(1999, namespace=attr.namespace))
def test_vxlan_exists(self):
attr = self.generate_device_details()
ip = ip_lib.IPWrapper(namespace=attr.namespace)
ip.netns.add(attr.namespace)
self.addCleanup(ip.netns.delete, attr.namespace)
self.assertFalse(ip_lib.vxlan_in_use(9999, namespace=attr.namespace))
device = ip.add_vxlan(attr.name, 9999)
self.addCleanup(self._safe_delete_device, device)
self.assertTrue(ip_lib.vxlan_in_use(9999, namespace=attr.namespace))
device.link.delete()
self.assertFalse(ip_lib.vxlan_in_use(9999, namespace=attr.namespace))
def test_ipwrapper_get_device_by_ip_None(self):
ip_wrapper = ip_lib.IPWrapper(namespace=None)
self.assertIsNone(ip_wrapper.get_device_by_ip(ip=None))
def test_ipwrapper_get_device_by_ip(self):
# We need to pass both IP and cidr values to get_device_by_ip()
# to make sure it filters correctly.
test_ip = "%s/24" % TEST_IP
test_ip_secondary = "%s/24" % TEST_IP_SECONDARY
attr = self.generate_device_details(
ip_cidrs=[test_ip, test_ip_secondary]
)
self.manage_device(attr)
ip_wrapper = ip_lib.IPWrapper(namespace=attr.namespace)
self.assertEqual(attr.name, ip_wrapper.get_device_by_ip(TEST_IP).name)
self.assertEqual(attr.name,
ip_wrapper.get_device_by_ip(TEST_IP_SECONDARY).name)
self.assertIsNone(ip_wrapper.get_device_by_ip(TEST_IP_NEIGH))
# this is in the same subnet, so will match if we pass as cidr
test_ip_neigh = "%s/24" % TEST_IP_NEIGH
self.assertEqual(attr.name,
ip_wrapper.get_device_by_ip(test_ip_neigh).name)
self.assertIsNone(ip_wrapper.get_device_by_ip(WRONG_IP))
def test_device_exists_with_ips_and_mac(self):
attr = self.generate_device_details()
device = self.manage_device(attr)
self.assertTrue(
ip_lib.device_exists_with_ips_and_mac(*attr))
wrong_ip_cidr = '10.0.0.1/8'
wrong_mac_address = 'aa:aa:aa:aa:aa:aa'
attr = self.generate_device_details(name='wrong_name')
self.assertFalse(
ip_lib.device_exists_with_ips_and_mac(*attr))
attr = self.generate_device_details(ip_cidrs=[wrong_ip_cidr])
self.assertFalse(ip_lib.device_exists_with_ips_and_mac(*attr))
attr = self.generate_device_details(mac_address=wrong_mac_address)
self.assertFalse(ip_lib.device_exists_with_ips_and_mac(*attr))
attr = self.generate_device_details(namespace='wrong_namespace')
self.assertFalse(ip_lib.device_exists_with_ips_and_mac(*attr))
device.link.delete()
def test_get_device_mac(self):
attr = self.generate_device_details()
device = self.manage_device(attr)
mac_address = ip_lib.get_device_mac(attr.name,
namespace=attr.namespace)
self.assertEqual(attr.mac_address, mac_address)
device.link.delete()
def test_get_device_mac_too_long_name(self):
name = utils.get_rand_name(
max_length=constants.DEVICE_NAME_MAX_LEN + 5)
attr = self.generate_device_details(name=name)
device = self.manage_device(attr)
mac_address = ip_lib.get_device_mac(attr.name,
namespace=attr.namespace)
self.assertEqual(attr.mac_address, mac_address)
device.link.delete()
def test_gateway_lifecycle(self):
attr = self.generate_device_details(
ip_cidrs=["%s/24" % TEST_IP, "fd00::1/64"]
)
metric = 1000
device = self.manage_device(attr)
gateways = {
constants.IP_VERSION_4: attr.ip_cidrs[0].split('/')[0],
constants.IP_VERSION_6: "fd00::ff"
}
expected_gateways = {
constants.IP_VERSION_4: {
'metric': metric,
'via': gateways[constants.IP_VERSION_4]},
constants.IP_VERSION_6: {
'metric': metric,
'via': gateways[constants.IP_VERSION_6]}}
for ip_version, gateway_ip in gateways.items():
device.route.add_gateway(gateway_ip, metric)
self._check_routes(
[expected_gateways[ip_version]],
[device.route.get_gateway(ip_version=ip_version)])
device.route.delete_gateway(gateway_ip)
self.assertIsNone(
device.route.get_gateway(ip_version=ip_version))
def test_gateway_flush(self):
attr = self.generate_device_details(
ip_cidrs=["%s/24" % TEST_IP, "fd00::1/64"]
)
device = self.manage_device(attr)
gateways = {
constants.IP_VERSION_4: attr.ip_cidrs[0].split('/')[0],
constants.IP_VERSION_6: "fd00::ff"
}
for ip_version, gateway_ip in gateways.items():
# Ensure that there is no gateway configured
self.assertIsNone(
device.route.get_gateway(ip_version=ip_version))
# Now lets add gateway
device.route.add_gateway(gateway_ip, table="main")
self.assertIsNotNone(
device.route.get_gateway(ip_version=ip_version))
# Flush gateway and check that there is no any gateway configured
device.route.flush(ip_version, table="main")
self.assertIsNone(
device.route.get_gateway(ip_version=ip_version))
def test_get_neigh_entries(self):
attr = self.generate_device_details(
ip_cidrs=["%s/24" % TEST_IP, "fd00::1/64"]
)
mac_address = net.get_random_mac('fa:16:3e:00:00:00'.split(':'))
device = self.manage_device(attr)
device.neigh.add(TEST_IP_NEIGH, mac_address)
expected_neighs = [{'dst': TEST_IP_NEIGH,
'lladdr': mac_address,
'device': attr.name,
'state': 'permanent'}]
neighs = device.neigh.dump(4)
self.assertCountEqual(expected_neighs, neighs)
self.assertIsInstance(neighs, list)
device.neigh.delete(TEST_IP_NEIGH, mac_address)
neighs = device.neigh.dump(4, dst=TEST_IP_NEIGH, lladdr=mac_address)
self.assertEqual([], neighs)
def test_get_neigh_entries_no_namespace(self):
with testtools.ExpectedException(ip_lib.NetworkNamespaceNotFound):
ip_lib.dump_neigh_entries(4, namespace="nonexistent-netns")
def test_get_neigh_entries_no_interface(self):
attr = self.generate_device_details(
ip_cidrs=["%s/24" % TEST_IP, "fd00::1/64"]
)
self.manage_device(attr)
with testtools.ExpectedException(ip_lib.NetworkInterfaceNotFound):
ip_lib.dump_neigh_entries(4, device="nosuchdevice",
namespace=attr.namespace)
def test_delete_neigh_entries(self):
attr = self.generate_device_details(
ip_cidrs=["%s/24" % TEST_IP, "fd00::1/64"]
)
mac_address = net.get_random_mac('fa:16:3e:00:00:00'.split(':'))
device = self.manage_device(attr)
# trying to delete a non-existent entry shouldn't raise an error
device.neigh.delete(TEST_IP_NEIGH, mac_address)
def test_flush_neigh_ipv4(self):
# Entry with state "reachable" deleted.
self._flush_neigh(constants.IP_VERSION_4, TEST_IP_SECONDARY,
{TEST_IP_NEIGH})
# Entries belong to "ip_to_flush" passed CIDR, but "permanent" entry
# is not deleted.
self._flush_neigh(constants.IP_VERSION_4, '240.0.0.0/28',
{TEST_IP_NEIGH})
# "all" passed, but "permanent" entry is not deleted.
self._flush_neigh(constants.IP_VERSION_4, 'all', {TEST_IP_NEIGH})
def test_flush_neigh_ipv6(self):
# Entry with state "reachable" deleted.
self._flush_neigh(constants.IP_VERSION_6, TEST_IP6_SECONDARY,
{TEST_IP6_NEIGH})
# Entries belong to "ip_to_flush" passed CIDR, but "permanent" entry
# is not deleted.
self._flush_neigh(constants.IP_VERSION_6, 'fd00::0/64',
{TEST_IP6_NEIGH})
# "all" passed, but "permanent" entry is not deleted.
self._flush_neigh(constants.IP_VERSION_6, 'all', {TEST_IP6_NEIGH})
def _flush_neigh(self, version, ip_to_flush, ips_expected):
attr = self.generate_device_details(
ip_cidrs=['%s/24' % TEST_IP, 'fd00::1/64'],
namespace=utils.get_rand_name(20, 'ns-'))
device = self.manage_device(attr)
for test_ip, nud_state in TEST_IP_NUD_STATES:
mac_address = net.get_random_mac('fa:16:3e:00:00:00'.split(':'))
device.neigh.add(test_ip, mac_address, nud_state)
device.neigh.flush(version, ip_to_flush)
ips = {e['dst'] for e in device.neigh.dump(version)}
self.assertEqual(ips_expected, ips)
def _check_for_device_name(self, ip, name, should_exist):
exist = any(d for d in ip.get_devices() if d.name == name)
self.assertEqual(should_exist, exist)
def test_veth_exists(self):
namespace1 = self.useFixture(net_helpers.NamespaceFixture())
namespace2 = self.useFixture(net_helpers.NamespaceFixture())
dev_name1 = utils.get_rand_name()
dev_name2 = utils.get_rand_name()
device1, device2 = namespace1.ip_wrapper.add_veth(
dev_name1, dev_name2, namespace2.name)
self.addCleanup(self._safe_delete_device, device1)
self.addCleanup(self._safe_delete_device, device2)
self._check_for_device_name(namespace1.ip_wrapper, dev_name1, True)
self._check_for_device_name(namespace2.ip_wrapper, dev_name2, True)
self._check_for_device_name(namespace1.ip_wrapper, dev_name2, False)
self._check_for_device_name(namespace2.ip_wrapper, dev_name1, False)
# As it is veth pair, remove of device1 should be enough to remove
# both devices
device1.link.delete()
self._check_for_device_name(namespace1.ip_wrapper, dev_name1, False)
self._check_for_device_name(namespace2.ip_wrapper, dev_name2, False)
def test_macvtap_exists(self):
namespace = self.useFixture(net_helpers.NamespaceFixture())
src_dev_name = utils.get_rand_name()
src_dev = namespace.ip_wrapper.add_dummy(src_dev_name)
self.addCleanup(self._safe_delete_device, src_dev)
dev_name = utils.get_rand_name()
device = namespace.ip_wrapper.add_macvtap(dev_name, src_dev_name)
self.addCleanup(self._safe_delete_device, device)
self._check_for_device_name(namespace.ip_wrapper, dev_name, True)
device.link.delete()
self._check_for_device_name(namespace.ip_wrapper, dev_name, False)
def test_dummy_exists(self):
namespace = self.useFixture(net_helpers.NamespaceFixture())
dev_name = utils.get_rand_name()
device = namespace.ip_wrapper.add_dummy(dev_name)
self.addCleanup(self._safe_delete_device, device)
self._check_for_device_name(namespace.ip_wrapper, dev_name, True)
device.link.delete()
self._check_for_device_name(namespace.ip_wrapper, dev_name, False)
def test_set_link_mtu(self):
attr = self.generate_device_details()
device = self.manage_device(attr)
device.link.set_mtu(1450)
self.assertEqual(1450, device.link.mtu)
# Check if proper exception will be raised when wrong MTU value is
# provided
self.assertRaises(ip_lib.InvalidArgument, device.link.set_mtu, 1)
def test_set_link_allmulticast_on(self):
attr = self.generate_device_details()
device = self.manage_device(attr)
self.assertFalse(device.link.allmulticast)
device.link.set_allmulticast_on()
self.assertTrue(device.link.allmulticast)
def test_set_link_netns(self):
attr = self.generate_device_details()
device = self.manage_device(attr)
original_namespace = device.namespace
original_ip_wrapper = ip_lib.IPWrapper(namespace=original_namespace)
new_namespace = self.useFixture(net_helpers.NamespaceFixture())
device.link.set_netns(new_namespace.name)
self.assertEqual(new_namespace.name, device.namespace)
self._check_for_device_name(
new_namespace.ip_wrapper, device.name, True)
self._check_for_device_name(
original_ip_wrapper, device.name, False)
def test_set_link_name(self):
attr = self.generate_device_details()
device = self.manage_device(attr)
ip_wrapper = ip_lib.IPWrapper(namespace=device.namespace)
original_name = device.name
new_name = utils.get_rand_name()
# device has to be DOWN to rename it
device.link.set_down()
device.link.set_name(new_name)
self.assertEqual(new_name, device.name)
self._check_for_device_name(ip_wrapper, new_name, True)
self._check_for_device_name(ip_wrapper, original_name, False)
def test_set_link_alias(self):
attr = self.generate_device_details()
device = self.manage_device(attr)
alias = utils.get_rand_name()
device.link.set_alias(alias)
self.assertEqual(alias, device.link.alias)
def _add_and_check_ips(self, device, ip_addresses):
for cidr, scope, expected_broadcast in ip_addresses:
# For IPv4 address add_broadcast flag will be set to True only
# if expected_broadcast is given.
# For IPv6 add_broadcast flag can be set to True always but
# broadcast address will not be set, so expected_broadcast for
# IPv6 should be always given as None.
add_broadcast = True
if cidr.version == constants.IP_VERSION_4:
add_broadcast = bool(expected_broadcast)
device.addr.add(str(cidr), scope, add_broadcast)
device_ips_info = [
(netaddr.IPNetwork(ip_info['cidr']),
ip_info['scope'],
ip_info['broadcast']) for
ip_info in device.addr.list()]
self.assertCountEqual(ip_addresses, device_ips_info)
def _flush_ips(self, device, ip_version):
device.addr.flush(ip_version)
for ip_address in device.addr.list():
cidr = netaddr.IPNetwork(ip_address['cidr'])
self.assertNotEqual(ip_version, cidr.version)
def test_add_ip_address(self):
ip_addresses = [
(netaddr.IPNetwork("10.10.10.10/30"), "global", '10.10.10.11'),
(netaddr.IPNetwork("11.11.11.11/28"), "link", None),
(netaddr.IPNetwork("2801::1/120"), "global", None),
(netaddr.IPNetwork("fe80::/64"), "link", None)]
attr = self.generate_device_details(ip_cidrs=[])
device = self.manage_device(attr)
self._add_and_check_ips(device, ip_addresses)
# Now let's check if adding already existing IP address will raise
# RuntimeError
ip_address = ip_addresses[0]
self.assertRaises(RuntimeError,
device.addr.add, str(ip_address[0]), ip_address[1])
def test_delete_ip_address(self):
attr = self.generate_device_details()
cidr = attr.ip_cidrs[0]
device = self.manage_device(attr)
device_cidrs = [ip_info['cidr'] for ip_info in device.addr.list()]
self.assertIn(cidr, device_cidrs)
device.addr.delete(cidr)
device_cidrs = [ip_info['cidr'] for ip_info in device.addr.list()]
self.assertNotIn(cidr, device_cidrs)
# Try to delete not existing IP address, it should be just fine and
# finish without any error raised
device.addr.delete(cidr)
def test_flush_ip_addresses(self):
ip_addresses = [
(netaddr.IPNetwork("10.10.10.10/30"), "global", '10.10.10.11'),
(netaddr.IPNetwork("11.11.11.11/28"), "link", None),
(netaddr.IPNetwork("2801::1/120"), "global", None),
(netaddr.IPNetwork("fe80::/64"), "link", None)]
attr = self.generate_device_details(ip_cidrs=[])
device = self.manage_device(attr)
self._add_and_check_ips(device, ip_addresses)
self._flush_ips(device, constants.IP_VERSION_4)
self._flush_ips(device, constants.IP_VERSION_6)
class TestSetIpNonlocalBind(functional_base.BaseSudoTestCase):
def test_assigned_value(self):
namespace = self.useFixture(net_helpers.NamespaceFixture())
for expected in (0, 1):
failed = ip_lib.set_ip_nonlocal_bind(expected, namespace.name)
try:
observed = ip_lib.get_ip_nonlocal_bind(namespace.name)
except RuntimeError as rte:
stat_message = (
'cannot stat /proc/sys/net/ipv4/ip_nonlocal_bind')
if stat_message in str(rte):
raise self.skipException(
"This kernel doesn't support %s in network "
"namespaces." % ip_lib.IP_NONLOCAL_BIND)
raise
self.assertFalse(failed)
self.assertEqual(expected, observed)
class NamespaceTestCase(functional_base.BaseSudoTestCase):
def setUp(self):
super(NamespaceTestCase, self).setUp()
self.namespace = 'test_ns_' + uuidutils.generate_uuid()
ip_lib.create_network_namespace(self.namespace)
self.addCleanup(self._delete_namespace)
def _delete_namespace(self):
ip_lib.delete_network_namespace(self.namespace)
def test_network_namespace_exists_ns_exists(self):
self.assertTrue(ip_lib.network_namespace_exists(self.namespace))
def test_network_namespace_exists_ns_doesnt_exists(self):
self.assertFalse(ip_lib.network_namespace_exists('another_ns'))
def test_network_namespace_exists_ns_exists_try_is_ready(self):
self.assertTrue(ip_lib.network_namespace_exists(self.namespace,
try_is_ready=True))
def test_network_namespace_exists_ns_doesnt_exists_try_is_ready(self):
self.assertFalse(ip_lib.network_namespace_exists('another_ns',
try_is_ready=True))
class IpMonitorTestCase(testscenarios.WithScenarios,
functional_base.BaseLoggingTestCase):
scenarios = [
('namespace', {'namespace': 'ns_' + uuidutils.generate_uuid()}),
('no_namespace', {'namespace': None})
]
def setUp(self):
super(IpMonitorTestCase, self).setUp()
self.addCleanup(self._cleanup)
if self.namespace:
priv_ip_lib.create_netns(self.namespace)
self.devices = [('int_' + uuidutils.generate_uuid())[
:constants.DEVICE_NAME_MAX_LEN] for _ in range(5)]
self.ip_wrapper = ip_lib.IPWrapper(self.namespace)
self.temp_file = self.get_temp_file_path('out_' + self.devices[0] +
'.tmp')
self.proc = self._run_ip_monitor(ip_monitor)
def _cleanup(self):
self.proc.stop(kill_timeout=10, kill_signal=signal.SIGTERM)
if self.namespace:
priv_ip_lib.remove_netns(self.namespace)
else:
for device in self.devices:
try:
priv_ip_lib.delete_interface(device, self.namespace)
except priv_ip_lib.NetworkInterfaceNotFound:
pass
@staticmethod
def _normalize_module_name(name):
for suf in ['.pyc', '.pyo']:
if name.endswith(suf):
return name[:-len(suf)] + '.py'
return name
def _run_ip_monitor(self, module):
executable = self._normalize_module_name(module.__file__)
proc = async_process.AsyncProcess(
[executable, self.temp_file, str(self.namespace)],
run_as_root=True)
proc.start(block=True)
return proc
def _read_file(self, ip_addresses):
try:
registers = []
with open(self.temp_file, 'r') as f:
data = f.read()
for line in data.splitlines():
register = jsonutils.loads(line)
registers.append({'name': register['name'],
'cidr': register['cidr'],
'event': register['event']})
for ip_address in ip_addresses:
if ip_address not in registers:
return False
return True
except (OSError, IOError, ValueError):
return False
def _check_read_file(self, ip_addresses):
try:
utils.wait_until_true(lambda: self._read_file(ip_addresses),
timeout=30)
except utils.WaitTimeout:
with open(self.temp_file, 'r') as f:
registers = f.read()
self.fail('Defined IP addresses: %s, IP addresses registered: %s' %
(ip_addresses, registers))
def _handle_ip_addresses(self, event, ip_addresses):
for ip_address in (_ip for _ip in ip_addresses
if _ip['event'] == event):
ip_device = ip_lib.IPDevice(ip_address['name'], self.namespace)
if event == 'removed':
ip_device.addr.delete(ip_address['cidr'])
if event == 'added':
ip_device.addr.add(ip_address['cidr'])
def test_add_remove_ip_address_and_interface(self):
for device in self.devices:
self.ip_wrapper.add_dummy(device)
utils.wait_until_true(lambda: self._read_file({}), timeout=30)
ip_addresses = [
{'cidr': '192.168.250.1/24', 'event': 'added',
'name': self.devices[0]},
{'cidr': '192.168.250.2/24', 'event': 'added',
'name': self.devices[1]},
{'cidr': '192.168.250.3/24', 'event': 'added',
'name': self.devices[2]},
{'cidr': '192.168.250.10/24', 'event': 'added',
'name': self.devices[3]},
{'cidr': '192.168.250.10/24', 'event': 'removed',
'name': self.devices[3]},
{'cidr': '2001:db8::1/64', 'event': 'added',
'name': self.devices[4]},
{'cidr': '2001:db8::2/64', 'event': 'added',
'name': self.devices[4]}]
self._handle_ip_addresses('added', ip_addresses)
self._handle_ip_addresses('removed', ip_addresses)
self._check_read_file(ip_addresses)
ip_device = ip_lib.IPDevice(self.devices[4], self.namespace)
ip_device.link.delete()
ip_addresses = [
{'cidr': '2001:db8::1/64', 'event': 'removed',
'name': self.devices[4]},
{'cidr': '2001:db8::2/64', 'event': 'removed',
'name': self.devices[4]}]
self._check_read_file(ip_addresses)
def test_interface_added_after_initilization(self):
for device in self.devices[:len(self.devices) - 1]:
self.ip_wrapper.add_dummy(device)
utils.wait_until_true(lambda: self._read_file({}), timeout=30)
ip_addresses = [
{'cidr': '192.168.251.21/24', 'event': 'added',
'name': self.devices[0]},
{'cidr': '192.168.251.22/24', 'event': 'added',
'name': self.devices[1]}]
self._handle_ip_addresses('added', ip_addresses)
self._check_read_file(ip_addresses)
self.ip_wrapper.add_dummy(self.devices[-1])
ip_addresses.append({'cidr': '192.168.251.23/24', 'event': 'added',
'name': self.devices[-1]})
self._handle_ip_addresses('added', [ip_addresses[-1]])
self._check_read_file(ip_addresses)
def test_add_and_remove_multiple_ips(self):
# NOTE(ralonsoh): testing [1], adding multiple IPs.
# [1] https://bugs.launchpad.net/neutron/+bug/1832307
utils.wait_until_true(lambda: self._read_file({}), timeout=30)
self.ip_wrapper.add_dummy(self.devices[0])
ip_addresses = []
for i in range(100):
_cidr = str(netaddr.IPNetwork('192.168.252.1/32').ip + i) + '/32'
ip_addresses.append({'cidr': _cidr, 'event': 'added',
'name': self.devices[0]})
self._handle_ip_addresses('added', ip_addresses)
self._check_read_file(ip_addresses)
for i in range(100):
_cidr = str(netaddr.IPNetwork('192.168.252.1/32').ip + i) + '/32'
ip_addresses.append({'cidr': _cidr, 'event': 'removed',
'name': self.devices[0]})
self._handle_ip_addresses('removed', ip_addresses)
self._check_read_file(ip_addresses)
class IpRouteCommandTestCase(functional_base.BaseSudoTestCase):
def setUp(self):
super(IpRouteCommandTestCase, self).setUp()
self.namespace = self.useFixture(net_helpers.NamespaceFixture()).name
ip_lib.IPWrapper(self.namespace).add_dummy('test_device')
self.device = ip_lib.IPDevice('test_device', namespace=self.namespace)
self.device.link.set_up()
self.device_cidr_ipv4 = '192.168.100.1/24'
self.device_cidr_ipv6 = '2020::1/64'
self.device.addr.add(self.device_cidr_ipv4)
self.device.addr.add(self.device_cidr_ipv6)
self.cidrs = ['192.168.0.0/24', '10.0.0.0/8', '2001::/64', 'faaa::/96']
def _assert_route(self, ip_version, table=None, source_prefix=None,
cidr=None, scope=None, via=None, metric=None,
not_in=False):
if not_in:
fn = lambda: cmp not in self.device.route.list_routes(ip_version,
table=table)
msg = 'Route found: %s'
else:
fn = lambda: cmp in self.device.route.list_routes(ip_version,
table=table)
msg = 'Route not found: %s'
if cidr:
ip_version = utils.get_ip_version(cidr)
else:
ip_version = utils.get_ip_version(via)
cidr = constants.IP_ANY[ip_version]
if constants.IP_VERSION_6 == ip_version:
scope = ip_lib.IP_ADDRESS_SCOPE[0]
elif not scope:
scope = 'global' if via else 'link'
if not metric:
metric = ip_lib.IP_ROUTE_METRIC_DEFAULT[ip_version]
table = table or iproute_linux.DEFAULT_TABLE
table = ip_lib.IP_RULE_TABLES_NAMES.get(table, table)
cmp = {'table': table,
'cidr': cidr,
'source_prefix': source_prefix,
'scope': scope,
'device': 'test_device',
'via': via,
'metric': metric,
'proto': 'static'}
try:
utils.wait_until_true(fn, timeout=5)
except utils.WaitTimeout:
raise self.fail(msg % cmp)
def test_add_route_table(self):
tables = (None, 1, 253, 254, 255)
for cidr in self.cidrs:
for table in tables:
self.device.route.add_route(cidr, table=table)
ip_version = utils.get_ip_version(cidr)
self._assert_route(ip_version, cidr=cidr, table=table)
def test_add_route_via(self):
gateway_ipv4 = str(netaddr.IPNetwork(self.device_cidr_ipv4).ip)
gateway_ipv6 = str(netaddr.IPNetwork(self.device_cidr_ipv6).ip + 1)
for cidr in self.cidrs:
ip_version = utils.get_ip_version(cidr)
gateway = (gateway_ipv4 if ip_version == constants.IP_VERSION_4
else gateway_ipv6)
self.device.route.add_route(cidr, via=gateway)
self._assert_route(ip_version, cidr=cidr, via=gateway)
def test_add_route_metric(self):
metrics = (None, 1, 10, 255)
for cidr in self.cidrs:
for metric in metrics:
self.device.route.add_route(cidr, metric=metric)
ip_version = utils.get_ip_version(cidr)
self._assert_route(ip_version, cidr=cidr, metric=metric)
def test_add_route_scope(self):
for cidr in self.cidrs:
for scope in ip_lib.IP_ADDRESS_SCOPE_NAME:
self.device.route.add_route(cidr, scope=scope)
ip_version = utils.get_ip_version(cidr)
self._assert_route(ip_version, cidr=cidr, scope=scope)
def test_add_route_gateway(self):
gateways = (str(netaddr.IPNetwork(self.device_cidr_ipv4).ip),
str(netaddr.IPNetwork(self.device_cidr_ipv6).ip + 1))
for gateway in gateways:
ip_version = utils.get_ip_version(gateway)
self.device.route.add_gateway(gateway)
self._assert_route(ip_version, cidr=None, via=gateway,
scope='global')
def test_list_onlink_routes_ipv4(self):
cidr_ipv4 = []
for cidr in self.cidrs:
if utils.get_ip_version(cidr) == constants.IP_VERSION_4:
cidr_ipv4.append(cidr)
self.device.route.add_onlink_route(cidr)
for cidr in cidr_ipv4:
self._assert_route(constants.IP_VERSION_4, cidr=cidr)
routes = self.device.route.list_onlink_routes(constants.IP_VERSION_4)
self.assertEqual(len(cidr_ipv4), len(routes))
def test_get_and_delete_gateway(self):
gateways = (str(netaddr.IPNetwork(self.device_cidr_ipv4).ip),
str(netaddr.IPNetwork(self.device_cidr_ipv6).ip + 1))
scopes = ('global', 'site', 'link')
metrics = (None, 1, 255)
tables = (None, 1, 254, 255)
for gateway, scope, metric, table in itertools.product(
gateways, scopes, metrics, tables):
ip_version = utils.get_ip_version(gateway)
self.device.route.add_gateway(gateway, scope=scope, metric=metric,
table=table)
self._assert_route(ip_version, cidr=None, via=gateway, scope=scope,
metric=metric, table=table)
self.assertEqual(gateway, self.device.route.get_gateway(
ip_version=ip_version, table=table)['via'])
self.device.route.delete_gateway(gateway, table=table, scope=scope)
self.assertIsNone(self.device.route.get_gateway(
ip_version=ip_version, table=table))
def test_delete_route(self):
scopes = ('global', 'site', 'link')
tables = (None, 1, 254, 255)
for cidr, scope, table in itertools.product(
self.cidrs, scopes, tables):
ip_version = utils.get_ip_version(cidr)
self.device.route.add_route(cidr, table=table, scope=scope)
self._assert_route(ip_version, cidr=cidr, scope=scope, table=table)
self.device.route.delete_route(cidr, table=table, scope=scope)
self._assert_route(ip_version, cidr=cidr, scope=scope, table=table,
not_in=True)
def test_flush(self):
tables = (None, 1, 200)
ip_versions = (constants.IP_VERSION_4, constants.IP_VERSION_6)
for cidr, table in itertools.product(self.cidrs, tables):
self.device.route.add_route(cidr, table=table)
for ip_version, table in itertools.product(ip_versions, tables):
routes = self.device.route.list_routes(ip_version, table=table)
self.assertGreater(len(routes), 0)
self.device.route.flush(ip_version, table=table)
routes = self.device.route.list_routes(ip_version, table=table)
self.assertEqual([], routes)
class IpAddrCommandTestCase(functional_base.BaseSudoTestCase):
def setUp(self):
super(IpAddrCommandTestCase, self).setUp()
self.namespace = self.useFixture(net_helpers.NamespaceFixture()).name
ip_lib.IPWrapper(self.namespace).add_dummy('test_device')
self.device = ip_lib.IPDevice('test_device', namespace=self.namespace)
self.device.link.set_up()
def test_list_with_scope(self):
scope_ip = [
('global', '192.168.100.1/24'),
('global', '2001:db8::1/64'),
('link', '192.168.101.1/24'),
('link', 'fe80::1:1/64'),
('site', 'fec0:0:0:f101::1/64'),
('host', '192.168.102.1/24')]
for scope, _ip in scope_ip:
self.device.addr.add(_ip, scope=scope)
devices = self.device.addr.list()
devices_cidr = {device['cidr'] for device in devices}
for scope in scope_ip:
self.assertIn(scope[1], devices_cidr)
for scope, _ip in scope_ip:
devices_filtered = self.device.addr.list(scope=scope)
devices_cidr = {device['cidr'] for device in devices_filtered}
self.assertIn(_ip, devices_cidr)
class GetDevicesWithIpTestCase(functional_base.BaseSudoTestCase):
def setUp(self):
super().setUp()
self.namespace = self.useFixture(net_helpers.NamespaceFixture()).name
self.devices = []
self.num_devices = 5
self.num_devices_with_ip = 3
for idx in range(self.num_devices):
dev_name = 'test_device_%s' % idx
ip_lib.IPWrapper(self.namespace).add_dummy(dev_name)
device = ip_lib.IPDevice(dev_name, namespace=self.namespace)
device.link.set_up()
self.devices.append(device)
self.cidrs = [netaddr.IPNetwork('10.10.0.0/24'),
netaddr.IPNetwork('10.20.0.0/24'),
netaddr.IPNetwork('2001:db8:1234:1111::/64'),
netaddr.IPNetwork('2001:db8:1234:2222::/64')]
for idx in range(self.num_devices_with_ip):
for cidr in self.cidrs:
self.devices[idx].addr.add(str(cidr.ip + idx) + '/' +
str(cidr.netmask.netmask_bits()))
@staticmethod
def _remove_loopback_interface(ip_addresses):
return [ipa for ipa in ip_addresses if
ipa['name'] != ip_lib.LOOPBACK_DEVNAME]
@staticmethod
def _remove_ipv6_scope_link(ip_addresses):
# Remove all IPv6 addresses with scope link (fe80::...).
return [ipa for ipa in ip_addresses if not (
ipa['scope'] == 'link' and utils.get_ip_version(ipa['cidr']))]
@staticmethod
def _pop_ip_address(ip_addresses, cidr):
for idx, ip_address in enumerate(copy.deepcopy(ip_addresses)):
if cidr == ip_address['cidr']:
ip_addresses.pop(idx)
return
def test_get_devices_with_ip(self):
ip_addresses = ip_lib.get_devices_with_ip(self.namespace)
ip_addresses = self._remove_loopback_interface(ip_addresses)
ip_addresses = self._remove_ipv6_scope_link(ip_addresses)
self.assertEqual(self.num_devices_with_ip * len(self.cidrs),
len(ip_addresses))
for idx in range(self.num_devices_with_ip):
for cidr in self.cidrs:
cidr = (str(cidr.ip + idx) + '/' +
str(cidr.netmask.netmask_bits()))
self._pop_ip_address(ip_addresses, cidr)
self.assertEqual(0, len(ip_addresses))
def test_get_devices_with_ip_name(self):
for idx in range(self.num_devices_with_ip):
dev_name = 'test_device_%s' % idx
ip_addresses = ip_lib.get_devices_with_ip(self.namespace,
name=dev_name)
ip_addresses = self._remove_loopback_interface(ip_addresses)
ip_addresses = self._remove_ipv6_scope_link(ip_addresses)
for cidr in self.cidrs:
cidr = (str(cidr.ip + idx) + '/' +
str(cidr.netmask.netmask_bits()))
self._pop_ip_address(ip_addresses, cidr)
self.assertEqual(0, len(ip_addresses))
for idx in range(self.num_devices_with_ip, self.num_devices):
dev_name = 'test_device_%s' % idx
ip_addresses = ip_lib.get_devices_with_ip(self.namespace,
name=dev_name)
ip_addresses = self._remove_loopback_interface(ip_addresses)
ip_addresses = self._remove_ipv6_scope_link(ip_addresses)
self.assertEqual(0, len(ip_addresses))
class ListIpRoutesTestCase(functional_base.BaseSudoTestCase):
def setUp(self):
super().setUp()
self.namespace = self.useFixture(net_helpers.NamespaceFixture()).name
self.device_names = ['test_device1', 'test_device2']
self.device_ips = ['10.0.0.1/24', '10.0.1.1/24']
self.device_cidrs = [netaddr.IPNetwork(ip_address).cidr for ip_address
in self.device_ips]
for idx, dev in enumerate(self.device_names):
ip_lib.IPWrapper(self.namespace).add_dummy(dev)
device = ip_lib.IPDevice(dev, namespace=self.namespace)
device.link.set_up()
device.addr.add(self.device_ips[idx])
def test_list_ip_routes_multipath(self):
multipath = [
{'device': self.device_names[0],
'via': str(self.device_cidrs[0].ip + 100), 'weight': 10},
{'device': self.device_names[1],
'via': str(self.device_cidrs[1].ip + 100), 'weight': 20},
{'via': str(self.device_cidrs[1].ip + 101), 'weight': 30},
{'via': str(self.device_cidrs[1].ip + 102)}]
ip_lib.add_ip_route(self.namespace, '1.2.3.0/24',
constants.IP_VERSION_4, via=multipath)
routes = ip_lib.list_ip_routes(self.namespace, constants.IP_VERSION_4)
multipath[2]['device'] = self.device_names[1]
multipath[3]['device'] = self.device_names[1]
multipath[3]['weight'] = 1
for route in (route for route in routes if
route['cidr'] == '1.2.3.0/24'):
if not isinstance(route['via'], list):
continue
self.assertEqual(len(multipath), len(route['via']))
for nexthop in multipath:
for mp in route['via']:
if nexthop != mp:
continue
break
else:
self.fail('Not matching route, routes: %s' % routes)
return
self.fail('Not matching route, routes: %s' % routes)
| apache-2.0 |
kratorius/ads | python/interviewquestions/string_prefix.py | 1 | 1709 | """
Find the longest sequence of prefix shared by all the words in a string.
Example:
"abcdef abcdxxx abcdabcdef abcyy" => "abc"
"""
import unittest
import itertools
def longest_prefix(string):
words = string.split()
prefix = words[0]
for word in words[1:]:
l = 0
for pr, ch in zip(prefix, word):
if pr != ch:
break
l += 1
prefix = prefix[:min(l, len(prefix))]
return prefix
def longest_prefix_itertools(string):
"""A slower (because of the need to copy things around) but more
elegant implementation, based on itertools.
"""
words = string.split()
prefix = words[0]
for word in words[1:]:
same = lambda v: v[0] == v[1]
current_prefix = list(itertools.takewhile(same, zip(prefix, word)))
prefix = prefix[:min(len(current_prefix), len(prefix))]
return prefix
class LongestPrefixTest(unittest.TestCase):
def test_longest_prefix(self):
self.assertEqual("abc", longest_prefix("abcdef abcdxxx abcdabcdef abcyy"))
self.assertEqual("abcdef", longest_prefix("abcdef abcdefxxx abcdefdabcdef abcdefyy"))
self.assertEqual("", longest_prefix("abcdef defg hijkl lmnopq"))
self.assertEqual("", longest_prefix("abcdef defg hijkl lmnopq"))
def test_longest_prefix_itertools(self):
self.assertEqual("abc", longest_prefix_itertools("abcdef abcdxxx abcdabcdef abcyy"))
self.assertEqual("abcdef", longest_prefix_itertools("abcdef abcdefxxx abcdefdabcdef abcdefyy"))
self.assertEqual("", longest_prefix_itertools("abcdef defg hijkl lmnopq"))
self.assertEqual("", longest_prefix_itertools("abcdef defg hijkl lmnopq"))
| mit |
egabancho/invenio-upgrader | invenio_upgrader/upgrades/submit_2015_03_03_fix_models.py | 5 | 5464 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Upgrade Submit models."""
import warnings
from invenio.ext.sqlalchemy import db
from invenio.legacy.dbquery import run_sql
from invenio_upgrader.api import op
from sqlalchemy.exc import OperationalError
depends_on = ['invenio_release_1_1_0']
def info():
"""Info."""
return "Add a autoincrement id in sbmCOLLECTION_sbmDOCTYPE " + \
"and sbmCOLLECTION_sbmCOLLECTION table"
def do_upgrade():
"""Implement your upgrades here."""
# Table sbmCOLLECTION_sbmCOLLECTION
# add column "id" in the table
op.add_column('sbmCOLLECTION_sbmCOLLECTION',
db.Column('id', db.Integer(11), nullable=False))
# set all new ids
records = run_sql("""SELECT id_father, id_son FROM """
"""sbmCOLLECTION_sbmCOLLECTION AS ssc """
"""ORDER BY ssc.id_father, ssc.id_son""")
for index, rec in enumerate(records):
run_sql("""UPDATE sbmCOLLECTION_sbmCOLLECTION
SET id = %s WHERE id_father = %s AND id_son = %s """,
(index + 1, rec[0], rec[1]))
# drop primary keys
try:
op.drop_constraint(None, 'sbmCOLLECTION_sbmCOLLECTION',
type_='primary')
except OperationalError:
# the primary key is already dropped
warnings.warn("""Primary key of sbmCOLLECTION_sbmCOLLECTION """
"""table has been already dropped.""")
# create new primary key with id
op.create_primary_key('pk_sbmCOLLECTION_sbmCOLLECTION_id',
'sbmCOLLECTION_sbmCOLLECTION', ['id'])
# set id as autoincrement
op.alter_column('sbmCOLLECTION_sbmCOLLECTION', 'id',
existing_type=db.Integer(11),
existing_nullable=False, autoincrement=True)
# fix columns id_father and id_son
op.alter_column('sbmCOLLECTION_sbmCOLLECTION', 'id_father',
existing_type=db.Integer(11),
nullable=True, server_default=None)
op.alter_column('sbmCOLLECTION_sbmCOLLECTION', 'id_son',
existing_type=db.Integer(11),
nullable=False, server_default=None)
op.create_index('id_father', 'sbmCOLLECTION_sbmCOLLECTION',
columns=['id_father'])
# Table sbmCOLLECTION_sbmDOCTYPE
# add column "id" in the table
op.add_column('sbmCOLLECTION_sbmDOCTYPE',
db.Column('id', db.Integer(11), nullable=False))
# set all new ids
records = run_sql("""SELECT id_father, id_son
FROM sbmCOLLECTION_sbmDOCTYPE AS ssd
ORDER BY ssd.id_father, ssd.id_son""")
for index, rec in enumerate(records):
run_sql("""UPDATE sbmCOLLECTION_sbmDOCTYPE
SET id = %s WHERE id_father = %s AND id_son = %s """,
(index + 1, rec[0], rec[1]))
# drop primary keys
try:
op.drop_constraint('id_father', 'sbmCOLLECTION_sbmDOCTYPE',
type_='primary')
except OperationalError:
# the primary key is already dropped
warnings.warn("""Primary key of sbmCOLLECTION_sbmDOCTYPE """
"""table has been already dropped.""")
# create new primary key with id
op.create_primary_key('pk_sbmCOLLECTION_sbmDOCTYPE_id',
'sbmCOLLECTION_sbmDOCTYPE', ['id'])
# set id as autoincrement
op.alter_column('sbmCOLLECTION_sbmDOCTYPE', 'id',
existing_type=db.Integer(11),
existing_nullable=False, autoincrement=True)
# fix columns id_father and id_son
op.alter_column('sbmCOLLECTION_sbmDOCTYPE', 'id_father',
existing_type=db.Integer(11),
nullable=True, server_default=None)
op.alter_column('sbmCOLLECTION_sbmDOCTYPE', 'id_son',
existing_type=db.Char(10),
nullable=False, server_default=None)
op.create_index('id_father', 'sbmCOLLECTION_sbmDOCTYPE',
columns=['id_father'])
def estimate():
"""Estimate running time of upgrade in seconds (optional)."""
num_ssc = run_sql("SELECT count(*) FROM sbmCOLLECTION_sbmCOLLECTION")
num_ssd = run_sql("SELECT count(*) FROM sbmCOLLECTION_sbmDOCTYPE")
total = int(num_ssc[0][0]) + int(num_ssd[0][0])
return int(float(total) / 1000) + 1
def pre_upgrade():
"""Run pre-upgrade checks (optional)."""
# Example of raising errors:
# raise RuntimeError("Description of error 1", "Description of error 2")
def post_upgrade():
"""Run post-upgrade checks (optional)."""
# Example of issuing warnings:
# warnings.warn("A continuable error occurred")
| gpl-2.0 |
frodrigo/osmose-backend | analysers/analyser_merge_milestone_FR_metropole.py | 4 | 4801 | #!/usr/bin/env python
#-*- coding: utf-8 -*-
###########################################################################
## ##
## Copyrights Didier Marchand 2020 ##
## This program is free software: you can redistribute it and/or modify ##
## it under the terms of the GNU General Public License as published by ##
## the Free Software Foundation, either version 3 of the License, or ##
## (at your option) any later version. ##
## ##
## This program is distributed in the hope that it will be useful, ##
## but WITHOUT ANY WARRANTY; without even the implied warranty of ##
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ##
## GNU General Public License for more details. ##
## ##
## You should have received a copy of the GNU General Public License ##
## along with this program. If not, see <http://www.gnu.org/licenses/>. ##
## ##
###########################################################################
from modules.OsmoseTranslation import T_
from .Analyser_Merge import Analyser_Merge, SourceDataGouv, CSV, Load, Conflate, Select, Mapping
class Analyser_Merge_Milestone_FR_metropole(Analyser_Merge):
def __init__(self, config, logger = None):
Analyser_Merge.__init__(self, config, logger)
doc = dict(
detail = T_(
'''The list of milestone comes from the CEREMA's database "RIU" in France.'''),
trap = T_(
'''Those milestones can't be on way * _link. The position of the marker may be a little different than what is visible on the road. Sometimes, a small white line perpendicular to the road on the emergency stop strip or the left flared strip can be seen on satellite images or Mapillary's photos.'''))
self.def_class_missing_official(item = 8430, id = 41, level = 3, tags = ['merge', 'highway', 'fix:picture', 'fix:survey'],
title = T_('Milestone not integrated'), **doc)
self.def_class_possible_merge(item = 8431, id = 43, level = 3, tags = ['merge', 'highway', 'fix:picture', 'fix:survey'],
title = T_('Milestone integration suggestion'), **doc)
self.def_class_update_official(item = 8432, id = 44, level = 3, tags = ['merge', 'highway', 'fix:picture', 'fix:survey'],
title = T_('Milestone update'), **doc)
self.init(
"https://www.data.gouv.fr/fr/datasets/bornage-du-reseau-routier-national/",
"Bornage du réseau routier national",
CSV(SourceDataGouv(
attribution="data.gouv.fr:Ministère de la Transition écologique et solidaire",
dataset="57a83c3dc751df5b90bb5dd5",
resource="7de08adc-74ae-4e62-8967-6f559ff6cbed")),
Load("x", "y", srid = 2154,
xFunction = Load.float_comma,
yFunction = Load.float_comma,
where = lambda row: self.is_milestone(row)),
Conflate(
select = Select(
types = ["nodes"],
tags = [{"highway": "milestone"}]),
osmRef = "nat_ref",
conflationDistance = 150,
mapping = Mapping(
static1 = {"highway": "milestone"},
static2 = {"source:nat_ref": self.source},
mapping1 = {
"distance": 'pr',
"nat_ref": lambda row: self.transform_to_plo(row) }
)))
def is_milestone(self,row):
if len(row['depPr']) == 3:
return False
elif [ele for ele in ('P', 'N1', 'N2', 'A9', 'N9') if ele in row['route']]:
#P for temporary ; N1 for future up_class and N2 for down_class road ; A9,N9 in metropole, is not milestone but way_link or roundabout
return False
else:
return True
def transform_to_plo(self, row):
# use plo format, description available at http://dtrf.setra.fr/pdf/pj/Dtrf/0005/Dtrf-0005792/DT5792.pdf
#dept must be on 2 caracter
dept = row['depPr']
if len(dept) == 1:
dept = '0' + dept
#C or '', not 'N'
concede = 'C' if row['concessionPr'] == 'C' else ''
#I is for ignore, sens is D,G or U for droite (sens croissant), gauche (sens décroissant), unique.
sens = row['cote']
if sens == 'I':
sens = 'U'
return dept + 'PR' + row['pr'] + sens + concede
| gpl-3.0 |
kovacsbalu/ansible-modules-extras | packaging/os/apk.py | 56 | 7067 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Kevin Brebanov <https://github.com/kbrebanov>
# Based on pacman (Afterburn <http://github.com/afterburn>, Aaron Bull Schaefer <aaron@elasticdog.com>)
# and apt (Matthew Williams <matthew@flowroute.com>>) modules.
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: apk
short_description: Manages apk packages
description:
- Manages I(apk) packages for Alpine Linux.
version_added: "2.0"
options:
name:
description:
- A package name, like C(foo), or mutliple packages, like C(foo, bar).
required: false
default: null
state:
description:
- Indicates the desired package(s) state.
- C(present) ensures the package(s) is/are present.
- C(absent) ensures the package(s) is/are absent.
- C(latest) ensures the package(s) is/are present and the latest version(s).
required: false
default: present
choices: [ "present", "absent", "latest" ]
update_cache:
description:
- Update repository indexes. Can be run with other steps or on it's own.
required: false
default: no
choices: [ "yes", "no" ]
upgrade:
description:
- Upgrade all installed packages to their latest version.
required: false
default: no
choices: [ "yes", "no" ]
'''
EXAMPLES = '''
# Update repositories and install "foo" package
- apk: name=foo update_cache=yes
# Update repositories and install "foo" and "bar" packages
- apk: name=foo,bar update_cache=yes
# Remove "foo" package
- apk: name=foo state=absent
# Remove "foo" and "bar" packages
- apk: name=foo,bar state=absent
# Install the package "foo"
- apk: name=foo state=present
# Install the packages "foo" and "bar"
- apk: name=foo,bar state=present
# Update repositories and update package "foo" to latest version
- apk: name=foo state=latest update_cache=yes
# Update repositories and update packages "foo" and "bar" to latest versions
- apk: name=foo,bar state=latest update_cache=yes
# Update all installed packages to the latest versions
- apk: upgrade=yes
# Update repositories as a separate step
- apk: update_cache=yes
'''
import os
import re
def update_package_db(module):
cmd = "%s update" % (APK_PATH)
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
if rc == 0:
return True
else:
module.fail_json(msg="could not update package db")
def query_package(module, name):
cmd = "%s -v info --installed %s" % (APK_PATH, name)
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
if rc == 0:
return True
else:
return False
def query_latest(module, name):
cmd = "%s version %s" % (APK_PATH, name)
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
search_pattern = "(%s)-[\d\.\w]+-[\d\w]+\s+(.)\s+[\d\.\w]+-[\d\w]+\s+" % (name)
match = re.search(search_pattern, stdout)
if match and match.group(2) == "<":
return False
return True
def upgrade_packages(module):
if module.check_mode:
cmd = "%s upgrade --simulate" % (APK_PATH)
else:
cmd = "%s upgrade" % (APK_PATH)
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
if rc != 0:
module.fail_json(msg="failed to upgrade packages")
if re.search('^OK', stdout):
module.exit_json(changed=False, msg="packages already upgraded")
module.exit_json(changed=True, msg="upgraded packages")
def install_packages(module, names, state):
upgrade = False
uninstalled = []
for name in names:
if not query_package(module, name):
uninstalled.append(name)
elif state == 'latest' and not query_latest(module, name):
upgrade = True
if not uninstalled and not upgrade:
module.exit_json(changed=False, msg="package(s) already installed")
names = " ".join(uninstalled)
if upgrade:
if module.check_mode:
cmd = "%s add --upgrade --simulate %s" % (APK_PATH, names)
else:
cmd = "%s add --upgrade %s" % (APK_PATH, names)
else:
if module.check_mode:
cmd = "%s add --simulate %s" % (APK_PATH, names)
else:
cmd = "%s add %s" % (APK_PATH, names)
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
if rc != 0:
module.fail_json(msg="failed to install %s" % (names))
module.exit_json(changed=True, msg="installed %s package(s)" % (names))
def remove_packages(module, names):
installed = []
for name in names:
if query_package(module, name):
installed.append(name)
if not installed:
module.exit_json(changed=False, msg="package(s) already removed")
names = " ".join(installed)
if module.check_mode:
cmd = "%s del --purge --simulate %s" % (APK_PATH, names)
else:
cmd = "%s del --purge %s" % (APK_PATH, names)
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
if rc != 0:
module.fail_json(msg="failed to remove %s package(s)" % (names))
module.exit_json(changed=True, msg="removed %s package(s)" % (names))
# ==========================================
# Main control flow.
def main():
module = AnsibleModule(
argument_spec = dict(
state = dict(default='present', choices=['present', 'installed', 'absent', 'removed', 'latest']),
name = dict(type='list'),
update_cache = dict(default='no', choices=BOOLEANS, type='bool'),
upgrade = dict(default='no', choices=BOOLEANS, type='bool'),
),
required_one_of = [['name', 'update_cache', 'upgrade']],
supports_check_mode = True
)
global APK_PATH
APK_PATH = module.get_bin_path('apk', required=True)
p = module.params
# normalize the state parameter
if p['state'] in ['present', 'installed']:
p['state'] = 'present'
if p['state'] in ['absent', 'removed']:
p['state'] = 'absent'
if p['update_cache']:
update_package_db(module)
if not p['name']:
module.exit_json(changed=True, msg='updated repository indexes')
if p['upgrade']:
upgrade_packages(module)
if p['state'] in ['present', 'latest']:
install_packages(module, p['name'], p['state'])
elif p['state'] == 'absent':
remove_packages(module, p['name'])
# Import module snippets.
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
xxsergzzxx/python-for-android | python3-alpha/python3-src/Lib/lib2to3/tests/pytree_idempotency.py | 56 | 2405 | #!/usr/bin/env python3
# Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Main program for testing the infrastructure."""
__author__ = "Guido van Rossum <guido@python.org>"
# Support imports (need to be imported first)
from . import support
# Python imports
import os
import sys
import logging
# Local imports
from .. import pytree
import pgen2
from pgen2 import driver
logging.basicConfig()
def main():
gr = driver.load_grammar("Grammar.txt")
dr = driver.Driver(gr, convert=pytree.convert)
fn = "example.py"
tree = dr.parse_file(fn, debug=True)
if not diff(fn, tree):
print("No diffs.")
if not sys.argv[1:]:
return # Pass a dummy argument to run the complete test suite below
problems = []
# Process every imported module
for name in sys.modules:
mod = sys.modules[name]
if mod is None or not hasattr(mod, "__file__"):
continue
fn = mod.__file__
if fn.endswith(".pyc"):
fn = fn[:-1]
if not fn.endswith(".py"):
continue
print("Parsing", fn, file=sys.stderr)
tree = dr.parse_file(fn, debug=True)
if diff(fn, tree):
problems.append(fn)
# Process every single module on sys.path (but not in packages)
for dir in sys.path:
try:
names = os.listdir(dir)
except os.error:
continue
print("Scanning", dir, "...", file=sys.stderr)
for name in names:
if not name.endswith(".py"):
continue
print("Parsing", name, file=sys.stderr)
fn = os.path.join(dir, name)
try:
tree = dr.parse_file(fn, debug=True)
except pgen2.parse.ParseError as err:
print("ParseError:", err)
else:
if diff(fn, tree):
problems.append(fn)
# Show summary of problem files
if not problems:
print("No problems. Congratulations!")
else:
print("Problems in following files:")
for fn in problems:
print("***", fn)
def diff(fn, tree):
f = open("@", "w")
try:
f.write(str(tree))
finally:
f.close()
try:
return os.system("diff -u %s @" % fn)
finally:
os.remove("@")
if __name__ == "__main__":
main()
| apache-2.0 |
etherkit/OpenBeacon2 | client/macos/venv/lib/python3.8/site-packages/pip/_vendor/distlib/_backport/sysconfig.py | 33 | 26854 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2012 The Python Software Foundation.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
"""Access to Python's configuration information."""
import codecs
import os
import re
import sys
from os.path import pardir, realpath
try:
import configparser
except ImportError:
import ConfigParser as configparser
__all__ = [
'get_config_h_filename',
'get_config_var',
'get_config_vars',
'get_makefile_filename',
'get_path',
'get_path_names',
'get_paths',
'get_platform',
'get_python_version',
'get_scheme_names',
'parse_config_h',
]
def _safe_realpath(path):
try:
return realpath(path)
except OSError:
return path
if sys.executable:
_PROJECT_BASE = os.path.dirname(_safe_realpath(sys.executable))
else:
# sys.executable can be empty if argv[0] has been changed and Python is
# unable to retrieve the real program name
_PROJECT_BASE = _safe_realpath(os.getcwd())
if os.name == "nt" and "pcbuild" in _PROJECT_BASE[-8:].lower():
_PROJECT_BASE = _safe_realpath(os.path.join(_PROJECT_BASE, pardir))
# PC/VS7.1
if os.name == "nt" and "\\pc\\v" in _PROJECT_BASE[-10:].lower():
_PROJECT_BASE = _safe_realpath(os.path.join(_PROJECT_BASE, pardir, pardir))
# PC/AMD64
if os.name == "nt" and "\\pcbuild\\amd64" in _PROJECT_BASE[-14:].lower():
_PROJECT_BASE = _safe_realpath(os.path.join(_PROJECT_BASE, pardir, pardir))
def is_python_build():
for fn in ("Setup.dist", "Setup.local"):
if os.path.isfile(os.path.join(_PROJECT_BASE, "Modules", fn)):
return True
return False
_PYTHON_BUILD = is_python_build()
_cfg_read = False
def _ensure_cfg_read():
global _cfg_read
if not _cfg_read:
from ..resources import finder
backport_package = __name__.rsplit('.', 1)[0]
_finder = finder(backport_package)
_cfgfile = _finder.find('sysconfig.cfg')
assert _cfgfile, 'sysconfig.cfg exists'
with _cfgfile.as_stream() as s:
_SCHEMES.readfp(s)
if _PYTHON_BUILD:
for scheme in ('posix_prefix', 'posix_home'):
_SCHEMES.set(scheme, 'include', '{srcdir}/Include')
_SCHEMES.set(scheme, 'platinclude', '{projectbase}/.')
_cfg_read = True
_SCHEMES = configparser.RawConfigParser()
_VAR_REPL = re.compile(r'\{([^{]*?)\}')
def _expand_globals(config):
_ensure_cfg_read()
if config.has_section('globals'):
globals = config.items('globals')
else:
globals = tuple()
sections = config.sections()
for section in sections:
if section == 'globals':
continue
for option, value in globals:
if config.has_option(section, option):
continue
config.set(section, option, value)
config.remove_section('globals')
# now expanding local variables defined in the cfg file
#
for section in config.sections():
variables = dict(config.items(section))
def _replacer(matchobj):
name = matchobj.group(1)
if name in variables:
return variables[name]
return matchobj.group(0)
for option, value in config.items(section):
config.set(section, option, _VAR_REPL.sub(_replacer, value))
#_expand_globals(_SCHEMES)
_PY_VERSION = '%s.%s.%s' % sys.version_info[:3]
_PY_VERSION_SHORT = '%s.%s' % sys.version_info[:2]
_PY_VERSION_SHORT_NO_DOT = '%s%s' % sys.version_info[:2]
_PREFIX = os.path.normpath(sys.prefix)
_EXEC_PREFIX = os.path.normpath(sys.exec_prefix)
_CONFIG_VARS = None
_USER_BASE = None
def _subst_vars(path, local_vars):
"""In the string `path`, replace tokens like {some.thing} with the
corresponding value from the map `local_vars`.
If there is no corresponding value, leave the token unchanged.
"""
def _replacer(matchobj):
name = matchobj.group(1)
if name in local_vars:
return local_vars[name]
elif name in os.environ:
return os.environ[name]
return matchobj.group(0)
return _VAR_REPL.sub(_replacer, path)
def _extend_dict(target_dict, other_dict):
target_keys = target_dict.keys()
for key, value in other_dict.items():
if key in target_keys:
continue
target_dict[key] = value
def _expand_vars(scheme, vars):
res = {}
if vars is None:
vars = {}
_extend_dict(vars, get_config_vars())
for key, value in _SCHEMES.items(scheme):
if os.name in ('posix', 'nt'):
value = os.path.expanduser(value)
res[key] = os.path.normpath(_subst_vars(value, vars))
return res
def format_value(value, vars):
def _replacer(matchobj):
name = matchobj.group(1)
if name in vars:
return vars[name]
return matchobj.group(0)
return _VAR_REPL.sub(_replacer, value)
def _get_default_scheme():
if os.name == 'posix':
# the default scheme for posix is posix_prefix
return 'posix_prefix'
return os.name
def _getuserbase():
env_base = os.environ.get("PYTHONUSERBASE", None)
def joinuser(*args):
return os.path.expanduser(os.path.join(*args))
# what about 'os2emx', 'riscos' ?
if os.name == "nt":
base = os.environ.get("APPDATA") or "~"
if env_base:
return env_base
else:
return joinuser(base, "Python")
if sys.platform == "darwin":
framework = get_config_var("PYTHONFRAMEWORK")
if framework:
if env_base:
return env_base
else:
return joinuser("~", "Library", framework, "%d.%d" %
sys.version_info[:2])
if env_base:
return env_base
else:
return joinuser("~", ".local")
def _parse_makefile(filename, vars=None):
"""Parse a Makefile-style file.
A dictionary containing name/value pairs is returned. If an
optional dictionary is passed in as the second argument, it is
used instead of a new dictionary.
"""
# Regexes needed for parsing Makefile (and similar syntaxes,
# like old-style Setup files).
_variable_rx = re.compile(r"([a-zA-Z][a-zA-Z0-9_]+)\s*=\s*(.*)")
_findvar1_rx = re.compile(r"\$\(([A-Za-z][A-Za-z0-9_]*)\)")
_findvar2_rx = re.compile(r"\${([A-Za-z][A-Za-z0-9_]*)}")
if vars is None:
vars = {}
done = {}
notdone = {}
with codecs.open(filename, encoding='utf-8', errors="surrogateescape") as f:
lines = f.readlines()
for line in lines:
if line.startswith('#') or line.strip() == '':
continue
m = _variable_rx.match(line)
if m:
n, v = m.group(1, 2)
v = v.strip()
# `$$' is a literal `$' in make
tmpv = v.replace('$$', '')
if "$" in tmpv:
notdone[n] = v
else:
try:
v = int(v)
except ValueError:
# insert literal `$'
done[n] = v.replace('$$', '$')
else:
done[n] = v
# do variable interpolation here
variables = list(notdone.keys())
# Variables with a 'PY_' prefix in the makefile. These need to
# be made available without that prefix through sysconfig.
# Special care is needed to ensure that variable expansion works, even
# if the expansion uses the name without a prefix.
renamed_variables = ('CFLAGS', 'LDFLAGS', 'CPPFLAGS')
while len(variables) > 0:
for name in tuple(variables):
value = notdone[name]
m = _findvar1_rx.search(value) or _findvar2_rx.search(value)
if m is not None:
n = m.group(1)
found = True
if n in done:
item = str(done[n])
elif n in notdone:
# get it on a subsequent round
found = False
elif n in os.environ:
# do it like make: fall back to environment
item = os.environ[n]
elif n in renamed_variables:
if (name.startswith('PY_') and
name[3:] in renamed_variables):
item = ""
elif 'PY_' + n in notdone:
found = False
else:
item = str(done['PY_' + n])
else:
done[n] = item = ""
if found:
after = value[m.end():]
value = value[:m.start()] + item + after
if "$" in after:
notdone[name] = value
else:
try:
value = int(value)
except ValueError:
done[name] = value.strip()
else:
done[name] = value
variables.remove(name)
if (name.startswith('PY_') and
name[3:] in renamed_variables):
name = name[3:]
if name not in done:
done[name] = value
else:
# bogus variable reference (e.g. "prefix=$/opt/python");
# just drop it since we can't deal
done[name] = value
variables.remove(name)
# strip spurious spaces
for k, v in done.items():
if isinstance(v, str):
done[k] = v.strip()
# save the results in the global dictionary
vars.update(done)
return vars
def get_makefile_filename():
"""Return the path of the Makefile."""
if _PYTHON_BUILD:
return os.path.join(_PROJECT_BASE, "Makefile")
if hasattr(sys, 'abiflags'):
config_dir_name = 'config-%s%s' % (_PY_VERSION_SHORT, sys.abiflags)
else:
config_dir_name = 'config'
return os.path.join(get_path('stdlib'), config_dir_name, 'Makefile')
def _init_posix(vars):
"""Initialize the module as appropriate for POSIX systems."""
# load the installed Makefile:
makefile = get_makefile_filename()
try:
_parse_makefile(makefile, vars)
except IOError as e:
msg = "invalid Python installation: unable to open %s" % makefile
if hasattr(e, "strerror"):
msg = msg + " (%s)" % e.strerror
raise IOError(msg)
# load the installed pyconfig.h:
config_h = get_config_h_filename()
try:
with open(config_h) as f:
parse_config_h(f, vars)
except IOError as e:
msg = "invalid Python installation: unable to open %s" % config_h
if hasattr(e, "strerror"):
msg = msg + " (%s)" % e.strerror
raise IOError(msg)
# On AIX, there are wrong paths to the linker scripts in the Makefile
# -- these paths are relative to the Python source, but when installed
# the scripts are in another directory.
if _PYTHON_BUILD:
vars['LDSHARED'] = vars['BLDSHARED']
def _init_non_posix(vars):
"""Initialize the module as appropriate for NT"""
# set basic install directories
vars['LIBDEST'] = get_path('stdlib')
vars['BINLIBDEST'] = get_path('platstdlib')
vars['INCLUDEPY'] = get_path('include')
vars['SO'] = '.pyd'
vars['EXE'] = '.exe'
vars['VERSION'] = _PY_VERSION_SHORT_NO_DOT
vars['BINDIR'] = os.path.dirname(_safe_realpath(sys.executable))
#
# public APIs
#
def parse_config_h(fp, vars=None):
"""Parse a config.h-style file.
A dictionary containing name/value pairs is returned. If an
optional dictionary is passed in as the second argument, it is
used instead of a new dictionary.
"""
if vars is None:
vars = {}
define_rx = re.compile("#define ([A-Z][A-Za-z0-9_]+) (.*)\n")
undef_rx = re.compile("/[*] #undef ([A-Z][A-Za-z0-9_]+) [*]/\n")
while True:
line = fp.readline()
if not line:
break
m = define_rx.match(line)
if m:
n, v = m.group(1, 2)
try:
v = int(v)
except ValueError:
pass
vars[n] = v
else:
m = undef_rx.match(line)
if m:
vars[m.group(1)] = 0
return vars
def get_config_h_filename():
"""Return the path of pyconfig.h."""
if _PYTHON_BUILD:
if os.name == "nt":
inc_dir = os.path.join(_PROJECT_BASE, "PC")
else:
inc_dir = _PROJECT_BASE
else:
inc_dir = get_path('platinclude')
return os.path.join(inc_dir, 'pyconfig.h')
def get_scheme_names():
"""Return a tuple containing the schemes names."""
return tuple(sorted(_SCHEMES.sections()))
def get_path_names():
"""Return a tuple containing the paths names."""
# xxx see if we want a static list
return _SCHEMES.options('posix_prefix')
def get_paths(scheme=_get_default_scheme(), vars=None, expand=True):
"""Return a mapping containing an install scheme.
``scheme`` is the install scheme name. If not provided, it will
return the default scheme for the current platform.
"""
_ensure_cfg_read()
if expand:
return _expand_vars(scheme, vars)
else:
return dict(_SCHEMES.items(scheme))
def get_path(name, scheme=_get_default_scheme(), vars=None, expand=True):
"""Return a path corresponding to the scheme.
``scheme`` is the install scheme name.
"""
return get_paths(scheme, vars, expand)[name]
def get_config_vars(*args):
"""With no arguments, return a dictionary of all configuration
variables relevant for the current platform.
On Unix, this means every variable defined in Python's installed Makefile;
On Windows and Mac OS it's a much smaller set.
With arguments, return a list of values that result from looking up
each argument in the configuration variable dictionary.
"""
global _CONFIG_VARS
if _CONFIG_VARS is None:
_CONFIG_VARS = {}
# Normalized versions of prefix and exec_prefix are handy to have;
# in fact, these are the standard versions used most places in the
# distutils2 module.
_CONFIG_VARS['prefix'] = _PREFIX
_CONFIG_VARS['exec_prefix'] = _EXEC_PREFIX
_CONFIG_VARS['py_version'] = _PY_VERSION
_CONFIG_VARS['py_version_short'] = _PY_VERSION_SHORT
_CONFIG_VARS['py_version_nodot'] = _PY_VERSION[0] + _PY_VERSION[2]
_CONFIG_VARS['base'] = _PREFIX
_CONFIG_VARS['platbase'] = _EXEC_PREFIX
_CONFIG_VARS['projectbase'] = _PROJECT_BASE
try:
_CONFIG_VARS['abiflags'] = sys.abiflags
except AttributeError:
# sys.abiflags may not be defined on all platforms.
_CONFIG_VARS['abiflags'] = ''
if os.name in ('nt', 'os2'):
_init_non_posix(_CONFIG_VARS)
if os.name == 'posix':
_init_posix(_CONFIG_VARS)
# Setting 'userbase' is done below the call to the
# init function to enable using 'get_config_var' in
# the init-function.
if sys.version >= '2.6':
_CONFIG_VARS['userbase'] = _getuserbase()
if 'srcdir' not in _CONFIG_VARS:
_CONFIG_VARS['srcdir'] = _PROJECT_BASE
else:
_CONFIG_VARS['srcdir'] = _safe_realpath(_CONFIG_VARS['srcdir'])
# Convert srcdir into an absolute path if it appears necessary.
# Normally it is relative to the build directory. However, during
# testing, for example, we might be running a non-installed python
# from a different directory.
if _PYTHON_BUILD and os.name == "posix":
base = _PROJECT_BASE
try:
cwd = os.getcwd()
except OSError:
cwd = None
if (not os.path.isabs(_CONFIG_VARS['srcdir']) and
base != cwd):
# srcdir is relative and we are not in the same directory
# as the executable. Assume executable is in the build
# directory and make srcdir absolute.
srcdir = os.path.join(base, _CONFIG_VARS['srcdir'])
_CONFIG_VARS['srcdir'] = os.path.normpath(srcdir)
if sys.platform == 'darwin':
kernel_version = os.uname()[2] # Kernel version (8.4.3)
major_version = int(kernel_version.split('.')[0])
if major_version < 8:
# On Mac OS X before 10.4, check if -arch and -isysroot
# are in CFLAGS or LDFLAGS and remove them if they are.
# This is needed when building extensions on a 10.3 system
# using a universal build of python.
for key in ('LDFLAGS', 'BASECFLAGS',
# a number of derived variables. These need to be
# patched up as well.
'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'):
flags = _CONFIG_VARS[key]
flags = re.sub(r'-arch\s+\w+\s', ' ', flags)
flags = re.sub('-isysroot [^ \t]*', ' ', flags)
_CONFIG_VARS[key] = flags
else:
# Allow the user to override the architecture flags using
# an environment variable.
# NOTE: This name was introduced by Apple in OSX 10.5 and
# is used by several scripting languages distributed with
# that OS release.
if 'ARCHFLAGS' in os.environ:
arch = os.environ['ARCHFLAGS']
for key in ('LDFLAGS', 'BASECFLAGS',
# a number of derived variables. These need to be
# patched up as well.
'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'):
flags = _CONFIG_VARS[key]
flags = re.sub(r'-arch\s+\w+\s', ' ', flags)
flags = flags + ' ' + arch
_CONFIG_VARS[key] = flags
# If we're on OSX 10.5 or later and the user tries to
# compiles an extension using an SDK that is not present
# on the current machine it is better to not use an SDK
# than to fail.
#
# The major usecase for this is users using a Python.org
# binary installer on OSX 10.6: that installer uses
# the 10.4u SDK, but that SDK is not installed by default
# when you install Xcode.
#
CFLAGS = _CONFIG_VARS.get('CFLAGS', '')
m = re.search(r'-isysroot\s+(\S+)', CFLAGS)
if m is not None:
sdk = m.group(1)
if not os.path.exists(sdk):
for key in ('LDFLAGS', 'BASECFLAGS',
# a number of derived variables. These need to be
# patched up as well.
'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'):
flags = _CONFIG_VARS[key]
flags = re.sub(r'-isysroot\s+\S+(\s|$)', ' ', flags)
_CONFIG_VARS[key] = flags
if args:
vals = []
for name in args:
vals.append(_CONFIG_VARS.get(name))
return vals
else:
return _CONFIG_VARS
def get_config_var(name):
"""Return the value of a single variable using the dictionary returned by
'get_config_vars()'.
Equivalent to get_config_vars().get(name)
"""
return get_config_vars().get(name)
def get_platform():
"""Return a string that identifies the current platform.
This is used mainly to distinguish platform-specific build directories and
platform-specific built distributions. Typically includes the OS name
and version and the architecture (as supplied by 'os.uname()'),
although the exact information included depends on the OS; eg. for IRIX
the architecture isn't particularly important (IRIX only runs on SGI
hardware), but for Linux the kernel version isn't particularly
important.
Examples of returned values:
linux-i586
linux-alpha (?)
solaris-2.6-sun4u
irix-5.3
irix64-6.2
Windows will return one of:
win-amd64 (64bit Windows on AMD64 (aka x86_64, Intel64, EM64T, etc)
win-ia64 (64bit Windows on Itanium)
win32 (all others - specifically, sys.platform is returned)
For other non-POSIX platforms, currently just returns 'sys.platform'.
"""
if os.name == 'nt':
# sniff sys.version for architecture.
prefix = " bit ("
i = sys.version.find(prefix)
if i == -1:
return sys.platform
j = sys.version.find(")", i)
look = sys.version[i+len(prefix):j].lower()
if look == 'amd64':
return 'win-amd64'
if look == 'itanium':
return 'win-ia64'
return sys.platform
if os.name != "posix" or not hasattr(os, 'uname'):
# XXX what about the architecture? NT is Intel or Alpha,
# Mac OS is M68k or PPC, etc.
return sys.platform
# Try to distinguish various flavours of Unix
osname, host, release, version, machine = os.uname()
# Convert the OS name to lowercase, remove '/' characters
# (to accommodate BSD/OS), and translate spaces (for "Power Macintosh")
osname = osname.lower().replace('/', '')
machine = machine.replace(' ', '_')
machine = machine.replace('/', '-')
if osname[:5] == "linux":
# At least on Linux/Intel, 'machine' is the processor --
# i386, etc.
# XXX what about Alpha, SPARC, etc?
return "%s-%s" % (osname, machine)
elif osname[:5] == "sunos":
if release[0] >= "5": # SunOS 5 == Solaris 2
osname = "solaris"
release = "%d.%s" % (int(release[0]) - 3, release[2:])
# fall through to standard osname-release-machine representation
elif osname[:4] == "irix": # could be "irix64"!
return "%s-%s" % (osname, release)
elif osname[:3] == "aix":
return "%s-%s.%s" % (osname, version, release)
elif osname[:6] == "cygwin":
osname = "cygwin"
rel_re = re.compile(r'[\d.]+')
m = rel_re.match(release)
if m:
release = m.group()
elif osname[:6] == "darwin":
#
# For our purposes, we'll assume that the system version from
# distutils' perspective is what MACOSX_DEPLOYMENT_TARGET is set
# to. This makes the compatibility story a bit more sane because the
# machine is going to compile and link as if it were
# MACOSX_DEPLOYMENT_TARGET.
cfgvars = get_config_vars()
macver = cfgvars.get('MACOSX_DEPLOYMENT_TARGET')
if True:
# Always calculate the release of the running machine,
# needed to determine if we can build fat binaries or not.
macrelease = macver
# Get the system version. Reading this plist is a documented
# way to get the system version (see the documentation for
# the Gestalt Manager)
try:
f = open('/System/Library/CoreServices/SystemVersion.plist')
except IOError:
# We're on a plain darwin box, fall back to the default
# behaviour.
pass
else:
try:
m = re.search(r'<key>ProductUserVisibleVersion</key>\s*'
r'<string>(.*?)</string>', f.read())
finally:
f.close()
if m is not None:
macrelease = '.'.join(m.group(1).split('.')[:2])
# else: fall back to the default behaviour
if not macver:
macver = macrelease
if macver:
release = macver
osname = "macosx"
if ((macrelease + '.') >= '10.4.' and
'-arch' in get_config_vars().get('CFLAGS', '').strip()):
# The universal build will build fat binaries, but not on
# systems before 10.4
#
# Try to detect 4-way universal builds, those have machine-type
# 'universal' instead of 'fat'.
machine = 'fat'
cflags = get_config_vars().get('CFLAGS')
archs = re.findall(r'-arch\s+(\S+)', cflags)
archs = tuple(sorted(set(archs)))
if len(archs) == 1:
machine = archs[0]
elif archs == ('i386', 'ppc'):
machine = 'fat'
elif archs == ('i386', 'x86_64'):
machine = 'intel'
elif archs == ('i386', 'ppc', 'x86_64'):
machine = 'fat3'
elif archs == ('ppc64', 'x86_64'):
machine = 'fat64'
elif archs == ('i386', 'ppc', 'ppc64', 'x86_64'):
machine = 'universal'
else:
raise ValueError(
"Don't know machine value for archs=%r" % (archs,))
elif machine == 'i386':
# On OSX the machine type returned by uname is always the
# 32-bit variant, even if the executable architecture is
# the 64-bit variant
if sys.maxsize >= 2**32:
machine = 'x86_64'
elif machine in ('PowerPC', 'Power_Macintosh'):
# Pick a sane name for the PPC architecture.
# See 'i386' case
if sys.maxsize >= 2**32:
machine = 'ppc64'
else:
machine = 'ppc'
return "%s-%s-%s" % (osname, release, machine)
def get_python_version():
return _PY_VERSION_SHORT
def _print_dict(title, data):
for index, (key, value) in enumerate(sorted(data.items())):
if index == 0:
print('%s: ' % (title))
print('\t%s = "%s"' % (key, value))
def _main():
"""Display all information sysconfig detains."""
print('Platform: "%s"' % get_platform())
print('Python version: "%s"' % get_python_version())
print('Current installation scheme: "%s"' % _get_default_scheme())
print()
_print_dict('Paths', get_paths())
print()
_print_dict('Variables', get_config_vars())
if __name__ == '__main__':
_main()
| gpl-3.0 |
applegrew/django-select2 | example/example/settings.py | 1 | 3535 | """
Django settings for example project.
Generated by 'django-admin startproject' using Django 3.1a1.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve(strict=True).parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/dev/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = "kstexlapcf3lucx@47mmxsu9-9eixia+6n97aw)4$qo&!laxad"
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"django_select2",
"example",
]
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "example.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [BASE_DIR / "templates"],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "example.wsgi.application"
# Database
# https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": BASE_DIR / "db.sqlite3",
}
}
# Password validation
# https://docs.djangoproject.com/en/dev/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator"},
{"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator"},
{"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator"},
]
# Internationalization
# https://docs.djangoproject.com/en/dev/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/dev/howto/static-files/
STATIC_URL = "/static/"
CACHES = {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": "redis://127.0.0.1:6379/1",
"OPTIONS": {"CLIENT_CLASS": "django_redis.client.DefaultClient"},
},
"select2": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": "redis://127.0.0.1:6379/2",
"OPTIONS": {"CLIENT_CLASS": "django_redis.client.DefaultClient"},
},
}
SELECT2_CACHE_BACKEND = "select2"
| mit |
mlavin/django | django/db/backends/oracle/features.py | 6 | 2050 | from django.db.backends.base.features import BaseDatabaseFeatures
from django.db.utils import InterfaceError
class DatabaseFeatures(BaseDatabaseFeatures):
empty_fetchmany_value = ()
interprets_empty_strings_as_nulls = True
uses_savepoints = True
has_select_for_update = True
has_select_for_update_nowait = True
has_select_for_update_skip_locked = True
has_select_for_update_of = True
select_for_update_of_column = True
can_return_id_from_insert = True
allow_sliced_subqueries = False
can_introspect_autofield = True
supports_subqueries_in_group_by = False
supports_transactions = True
supports_timezones = False
has_native_duration_field = True
can_defer_constraint_checks = True
supports_partially_nullable_unique_constraints = False
truncates_names = True
has_bulk_insert = True
supports_tablespaces = True
supports_sequence_reset = False
can_introspect_time_field = False
atomic_transactions = False
supports_combined_alters = False
nulls_order_largest = True
requires_literal_defaults = True
closed_cursor_error_class = InterfaceError
bare_select_suffix = " FROM DUAL"
uppercases_column_names = True
# select for update with limit can be achieved on Oracle, but not with the current backend.
supports_select_for_update_with_limit = False
supports_temporal_subtraction = True
# Oracle doesn't ignore quoted identifiers case but the current backend
# does by uppercasing all identifiers.
ignores_table_name_case = True
supports_index_on_text_field = False
has_case_insensitive_like = False
create_test_procedure_without_params_sql = """
CREATE PROCEDURE "TEST_PROCEDURE" AS
V_I INTEGER;
BEGIN
V_I := 1;
END;
"""
create_test_procedure_with_int_param_sql = """
CREATE PROCEDURE "TEST_PROCEDURE" (P_I INTEGER) AS
V_I INTEGER;
BEGIN
V_I := P_I;
END;
"""
supports_callproc_kwargs = True
| bsd-3-clause |
maelnor/nova | nova/tests/objects/test_virtual_interface.py | 31 | 4618 | # Copyright (C) 2014, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from nova import db
from nova.objects import virtual_interface as vif_obj
from nova.tests.objects import test_objects
fake_vif = {
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': 0,
'id': 1,
'address': '00:00:00:00:00:00',
'network_id': 123,
'instance_uuid': 'fake-uuid',
'uuid': 'fake-uuid-2',
}
class _TestVirtualInterface(object):
@staticmethod
def _compare(test, db, obj):
for field, value in db.items():
test.assertEqual(db[field], obj[field])
def test_get_by_id(self):
with mock.patch.object(db, 'virtual_interface_get') as get:
get.return_value = fake_vif
vif = vif_obj.VirtualInterface.get_by_id(self.context, 1)
self._compare(self, fake_vif, vif)
def test_get_by_uuid(self):
with mock.patch.object(db, 'virtual_interface_get_by_uuid') as get:
get.return_value = fake_vif
vif = vif_obj.VirtualInterface.get_by_uuid(self.context,
'fake-uuid-2')
self._compare(self, fake_vif, vif)
def test_get_by_address(self):
with mock.patch.object(db, 'virtual_interface_get_by_address') as get:
get.return_value = fake_vif
vif = vif_obj.VirtualInterface.get_by_address(self.context,
'00:00:00:00:00:00')
self._compare(self, fake_vif, vif)
def test_get_by_instance_and_network(self):
with mock.patch.object(db,
'virtual_interface_get_by_instance_and_network') as get:
get.return_value = fake_vif
vif = vif_obj.VirtualInterface.get_by_instance_and_network(
self.context, 'fake-uuid', 123)
self._compare(self, fake_vif, vif)
def test_create(self):
vif = vif_obj.VirtualInterface()
vif.address = '00:00:00:00:00:00'
vif.network_id = 123
vif.instance_uuid = 'fake-uuid'
vif.uuid = 'fake-uuid-2'
with mock.patch.object(db, 'virtual_interface_create') as create:
create.return_value = fake_vif
vif.create(self.context)
self.assertEqual(self.context, vif._context)
vif._context = None
self._compare(self, fake_vif, vif)
def test_delete_by_instance_uuid(self):
with mock.patch.object(db,
'virtual_interface_delete_by_instance') as delete:
vif_obj.VirtualInterface.delete_by_instance_uuid(self.context,
'fake-uuid')
delete.assert_called_with(self.context, 'fake-uuid')
class TestVirtualInterfaceObject(test_objects._LocalTest,
_TestVirtualInterface):
pass
class TestRemoteVirtualInterfaceObject(test_objects._RemoteTest,
_TestVirtualInterface):
pass
class _TestVirtualInterfaceList(object):
def test_get_all(self):
with mock.patch.object(db, 'virtual_interface_get_all') as get:
get.return_value = [fake_vif]
vifs = vif_obj.VirtualInterfaceList.get_all(self.context)
self.assertEqual(1, len(vifs))
_TestVirtualInterface._compare(self, fake_vif, vifs[0])
def test_get_by_instance_uuid(self):
with mock.patch.object(db, 'virtual_interface_get_by_instance') as get:
get.return_value = [fake_vif]
vifs = vif_obj.VirtualInterfaceList.get_by_instance_uuid(
self.context, 'fake-uuid')
self.assertEqual(1, len(vifs))
_TestVirtualInterface._compare(self, fake_vif, vifs[0])
class TestVirtualInterfaceList(test_objects._LocalTest,
_TestVirtualInterfaceList):
pass
class TestRemoteVirtualInterfaceList(test_objects._RemoteTest,
_TestVirtualInterfaceList):
pass
| apache-2.0 |
CredoReference/edx-platform | cms/conftest.py | 8 | 2166 | """
Studio unit test configuration and fixtures.
This module needs to exist because the pytest.ini in the cms package stops
pytest from looking for the conftest.py module in the parent directory when
only running cms tests.
"""
from __future__ import absolute_import, unicode_literals
import importlib
import os
import contracts
import pytest
# Patch the xml libs before anything else.
from safe_lxml import defuse_xml_libs
defuse_xml_libs()
def pytest_configure(config):
"""
Do core setup operations from manage.py before collecting tests.
"""
if config.getoption('help'):
return
enable_contracts = os.environ.get('ENABLE_CONTRACTS', False)
if not enable_contracts:
contracts.disable_all()
settings_module = os.environ.get('DJANGO_SETTINGS_MODULE')
startup_module = 'cms.startup' if settings_module.startswith('cms') else 'lms.startup'
startup = importlib.import_module(startup_module)
startup.run()
@pytest.fixture(autouse=True, scope='function')
def _django_clear_site_cache():
"""
pytest-django uses this fixture to automatically clear the Site object
cache by replacing it with a new dictionary. edx-django-sites-extensions
grabs the cache dictionary at startup, and uses that one for all lookups
from then on. Our CacheIsolationMixin class tries to clear the cache by
grabbing the current dictionary from the site models module and clearing
it. Long story short: if you use this all together, neither cache
clearing mechanism actually works. So override this fixture to not mess
with what has been working for us so far.
"""
pass
@pytest.fixture(autouse=True)
def no_webpack_loader(monkeypatch):
"""
Monkeypatch webpack_loader to make sure that webpack assets don't need to be
compiled before unit tests are run.
"""
monkeypatch.setattr(
"webpack_loader.templatetags.webpack_loader.render_bundle",
lambda entry, extension=None, config='DEFAULT', attrs='': ''
)
monkeypatch.setattr(
"webpack_loader.utils.get_as_tags",
lambda entry, extension=None, config='DEFAULT', attrs='': []
)
| agpl-3.0 |
pp-mo/iris | docs/iris/src/userguide/regridding_plots/regridded_to_global_area_weighted.py | 5 | 1493 | import iris
import iris.analysis
import iris.plot as iplt
import matplotlib.pyplot as plt
import matplotlib.colors
import numpy as np
global_air_temp = iris.load_cube(iris.sample_data_path("air_temp.pp"))
regional_ash = iris.load_cube(iris.sample_data_path("NAME_output.txt"))
regional_ash = regional_ash.collapsed("flight_level", iris.analysis.SUM)
# Mask values so low that they are anomalous.
regional_ash.data = np.ma.masked_less(regional_ash.data, 5e-6)
norm = matplotlib.colors.LogNorm(5e-6, 0.0175)
global_air_temp.coord("longitude").guess_bounds()
global_air_temp.coord("latitude").guess_bounds()
fig = plt.figure(figsize=(8, 4.5))
plt.subplot(2, 2, 1)
iplt.pcolormesh(regional_ash, norm=norm)
plt.title("Volcanic ash total\nconcentration not regridded", size="medium")
for subplot_num, mdtol in zip([2, 3, 4], [0, 0.5, 1]):
plt.subplot(2, 2, subplot_num)
scheme = iris.analysis.AreaWeighted(mdtol=mdtol)
global_ash = regional_ash.regrid(global_air_temp, scheme)
iplt.pcolormesh(global_ash, norm=norm)
plt.title(
"Volcanic ash total concentration\n"
"regridded with AreaWeighted(mdtol={})".format(mdtol),
size="medium",
)
plt.subplots_adjust(
hspace=0, wspace=0.05, left=0.001, right=0.999, bottom=0, top=0.955
)
# Iterate over each of the figure's axes, adding coastlines, gridlines
# and setting the extent.
for ax in fig.axes:
ax.coastlines("50m")
ax.gridlines()
ax.set_extent([-80, 40, 31, 75])
plt.show()
| lgpl-3.0 |
NixIPFS/nixipfs-scripts | nixipfs/src/karkinos.py | 1 | 1559 | import urllib.request
import json
import os
from nixipfs.download_helpers import fetch_json, fetch_store_path
from nixipfs.hydra_helpers import *
from nixipfs.defaults import *
class KarkinosURLopener(urllib.request.FancyURLopener):
version = "Karkinos/11.11"
class Karkinos:
def __init__(self, hydra_url, eval_id, binary_cache = DEFAULT_BINARY_CACHE_URL):
urllib._urlopener = KarkinosURLopener()
self.hydra_url = hydra_url
self.binary_cache = binary_cache
self.eval_id = eval_id
@property
def eval_url(self):
return "{}/eval/{}".format(self.hydra_url, self.eval_id)
@property
def store_path_url(self):
return "{}/store-paths".format(self.eval_url)
def build_info_url(self, jobname):
return "{}/job/{}".format(self.eval_url, jobname)
def fetch_eval_info(self):
return fetch_json(self.eval_url)
def fetch_store_paths(self):
return fetch_json(self.store_path_url)
def fetch_build_info(self, jobname):
return fetch_json(self.build_info_url(jobname))
def download_file(self, jobname, dest_dir, dest_name='', tmp_dir=os.getcwd()):
build_info = BuildInfo(self.fetch_build_info(jobname))
store_path = "/".join(build_info.path.split("/")[:4])
if len(dest_name) == 0:
dest_name = os.path.basename(build_info.path)
dest_file = os.path.join(dest_dir, dest_name)
if not os.path.isfile(dest_file):
fetch_store_path(build_info.path, dest_file, self.binary_cache, tmp_dir)
| gpl-3.0 |
showp1984/bricked-hammerhead | scripts/rt-tester/rt-tester.py | 11005 | 5307 | #!/usr/bin/python
#
# rt-mutex tester
#
# (C) 2006 Thomas Gleixner <tglx@linutronix.de>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
import os
import sys
import getopt
import shutil
import string
# Globals
quiet = 0
test = 0
comments = 0
sysfsprefix = "/sys/devices/system/rttest/rttest"
statusfile = "/status"
commandfile = "/command"
# Command opcodes
cmd_opcodes = {
"schedother" : "1",
"schedfifo" : "2",
"lock" : "3",
"locknowait" : "4",
"lockint" : "5",
"lockintnowait" : "6",
"lockcont" : "7",
"unlock" : "8",
"signal" : "11",
"resetevent" : "98",
"reset" : "99",
}
test_opcodes = {
"prioeq" : ["P" , "eq" , None],
"priolt" : ["P" , "lt" , None],
"priogt" : ["P" , "gt" , None],
"nprioeq" : ["N" , "eq" , None],
"npriolt" : ["N" , "lt" , None],
"npriogt" : ["N" , "gt" , None],
"unlocked" : ["M" , "eq" , 0],
"trylock" : ["M" , "eq" , 1],
"blocked" : ["M" , "eq" , 2],
"blockedwake" : ["M" , "eq" , 3],
"locked" : ["M" , "eq" , 4],
"opcodeeq" : ["O" , "eq" , None],
"opcodelt" : ["O" , "lt" , None],
"opcodegt" : ["O" , "gt" , None],
"eventeq" : ["E" , "eq" , None],
"eventlt" : ["E" , "lt" , None],
"eventgt" : ["E" , "gt" , None],
}
# Print usage information
def usage():
print "rt-tester.py <-c -h -q -t> <testfile>"
print " -c display comments after first command"
print " -h help"
print " -q quiet mode"
print " -t test mode (syntax check)"
print " testfile: read test specification from testfile"
print " otherwise from stdin"
return
# Print progress when not in quiet mode
def progress(str):
if not quiet:
print str
# Analyse a status value
def analyse(val, top, arg):
intval = int(val)
if top[0] == "M":
intval = intval / (10 ** int(arg))
intval = intval % 10
argval = top[2]
elif top[0] == "O":
argval = int(cmd_opcodes.get(arg, arg))
else:
argval = int(arg)
# progress("%d %s %d" %(intval, top[1], argval))
if top[1] == "eq" and intval == argval:
return 1
if top[1] == "lt" and intval < argval:
return 1
if top[1] == "gt" and intval > argval:
return 1
return 0
# Parse the commandline
try:
(options, arguments) = getopt.getopt(sys.argv[1:],'chqt')
except getopt.GetoptError, ex:
usage()
sys.exit(1)
# Parse commandline options
for option, value in options:
if option == "-c":
comments = 1
elif option == "-q":
quiet = 1
elif option == "-t":
test = 1
elif option == '-h':
usage()
sys.exit(0)
# Select the input source
if arguments:
try:
fd = open(arguments[0])
except Exception,ex:
sys.stderr.write("File not found %s\n" %(arguments[0]))
sys.exit(1)
else:
fd = sys.stdin
linenr = 0
# Read the test patterns
while 1:
linenr = linenr + 1
line = fd.readline()
if not len(line):
break
line = line.strip()
parts = line.split(":")
if not parts or len(parts) < 1:
continue
if len(parts[0]) == 0:
continue
if parts[0].startswith("#"):
if comments > 1:
progress(line)
continue
if comments == 1:
comments = 2
progress(line)
cmd = parts[0].strip().lower()
opc = parts[1].strip().lower()
tid = parts[2].strip()
dat = parts[3].strip()
try:
# Test or wait for a status value
if cmd == "t" or cmd == "w":
testop = test_opcodes[opc]
fname = "%s%s%s" %(sysfsprefix, tid, statusfile)
if test:
print fname
continue
while 1:
query = 1
fsta = open(fname, 'r')
status = fsta.readline().strip()
fsta.close()
stat = status.split(",")
for s in stat:
s = s.strip()
if s.startswith(testop[0]):
# Separate status value
val = s[2:].strip()
query = analyse(val, testop, dat)
break
if query or cmd == "t":
break
progress(" " + status)
if not query:
sys.stderr.write("Test failed in line %d\n" %(linenr))
sys.exit(1)
# Issue a command to the tester
elif cmd == "c":
cmdnr = cmd_opcodes[opc]
# Build command string and sys filename
cmdstr = "%s:%s" %(cmdnr, dat)
fname = "%s%s%s" %(sysfsprefix, tid, commandfile)
if test:
print fname
continue
fcmd = open(fname, 'w')
fcmd.write(cmdstr)
fcmd.close()
except Exception,ex:
sys.stderr.write(str(ex))
sys.stderr.write("\nSyntax error in line %d\n" %(linenr))
if not test:
fd.close()
sys.exit(1)
# Normal exit pass
print "Pass"
sys.exit(0)
| gpl-2.0 |
okuta/chainer | chainer/backends/_chainerx.py | 2 | 6717 | import numpy
import chainer
from chainer import _backend
from chainer.backends import _cpu
from chainer.backends import cuda
from chainer.backends import intel64
import chainerx
class ChainerxDevice(_backend.Device):
"""Device for ChainerX backend"""
xp = chainerx
supported_array_types = (chainerx.ndarray,)
__hash__ = _backend.Device.__hash__
def __init__(self, device):
# type: (chainerx.Device) -> None
assert isinstance(device, chainerx.Device)
super(ChainerxDevice, self).__init__()
self.device = device # type: chainerx.Device
@staticmethod
def from_array(array):
if isinstance(array, chainerx.ndarray) and array.device is not None:
return ChainerxDevice(array.device)
return None
@staticmethod
def from_fallback_device(device):
"""Returns a :class:`~chainer.backend.ChainerxDevice` corresponding \
to the fallback device.
.. seealso::
:data:`~chainer.backend.ChainerxDevice.fallback_device`
"""
assert isinstance(device, _backend.Device)
if isinstance(device, _cpu.CpuDevice):
return ChainerxDevice(chainerx.get_device('native', 0))
if isinstance(device, cuda.GpuDevice):
return ChainerxDevice(
chainerx.get_device('cuda', device.device.id))
raise RuntimeError(
'Only CPU or GPU devices are allowed. '
'Actual: {}'.format(device))
@property
def name(self):
return self.device.name
@property
def fallback_device(self):
"""Fallback device.
A fallback device is either a :class:`~chainer.backend.CpuDevice` or
a :class:`~chainer.backend.GpuDevice` which shares the same physical
device with the original ChainerX device.
For example, the fallback device of ``native:0`` ChainerX device is
:class:`~chainer.backend.CpuDevice`. The fallback device of ``cuda:1``
ChainerX device is :class:`~chainer.backend.GpuDevice` with device ID
1.
"""
backend_name = self.device.backend.name
if backend_name == 'native':
return _cpu.CpuDevice()
if backend_name == 'cuda':
return cuda.GpuDevice.from_device_id(self.device.index)
raise RuntimeError(
'Only \'native\' or \'cuda\' devices have corresponding fallback '
'devices. Actual: {}'.format(backend_name))
def __eq__(self, other):
return (
isinstance(other, ChainerxDevice)
and other.device == self.device)
def __repr__(self):
return '<{} {}>'.format(
self.__class__.__name__, self.device.name)
def create_context(self):
# Returns a context that sets the default device.
return chainerx.using_device(self.device)
def send_array(self, array):
device = self.device
if isinstance(array, chainerx.ndarray):
if array.device is device:
return array
return array.to_device(device)
return _array_to_chainerx(array, device)
def use(self):
chainerx.set_default_device(self.device)
def is_array_supported(self, array):
return (
isinstance(array, chainerx.ndarray)
and self.device == array.device)
def to_chx(array):
"""Converts an array or arrays to ChainerX.
Destination ChainerX devices are chosen according to the types of input
arrays.
"""
return _backend._convert_arrays(array, _array_to_chainerx)
def from_chx(array):
"""Converts an array or arrays from ChainerX to NumPy or CuPy ones.
Destination array types are chosen such that no copies occur.
"""
return _backend._convert_arrays(array, _array_from_chainerx)
def _get_chainerx_device(device_spec):
# Returns chainerx.Device
if isinstance(device_spec, chainerx.Device):
return device_spec
return chainerx.get_device(device_spec)
def _array_to_chainerx(array, device=None):
# If device is None, appropriate device is chosen according to the input
# arrays.
assert device is None or isinstance(device, chainerx.Device)
if array is None:
return None
if array.dtype not in chainerx.all_dtypes:
raise TypeError(
'Dtype {} is not supported in ChainerX.'.format(array.dtype.name))
if isinstance(array, chainerx.ndarray):
if device is None:
return array
if device is array.device:
return array
return array.to_device(device)
if isinstance(array, numpy.ndarray):
if device is None:
device = chainerx.get_device('native', 0)
return chainerx.array(array, device=device, copy=False)
if isinstance(array, cuda.ndarray):
if device is None:
device = chainerx.get_device('cuda', array.device.id)
elif device.backend.name != 'cuda':
# cupy to non-cuda backend
# TODO(niboshi): Remove conversion to numpy when both CuPy and
# ChainerX support the array interface.
array = _cpu._to_cpu(array)
return chainerx.array(array, device=device, copy=False)
elif device.index != array.device.id:
# cupy to cuda backend but different device
array = cuda.to_gpu(array, device=device.index)
# cupy to cuda backend with the same device
return chainerx._core._fromrawpointer(
array.data.mem.ptr,
array.shape,
array.dtype,
array.strides,
device,
array.data.ptr - array.data.mem.ptr,
array)
if isinstance(array, intel64.mdarray):
return _array_to_chainerx(numpy.array(array), device)
if numpy.isscalar(array):
return chainerx.asarray(array)
raise TypeError(
'Array cannot be converted into chainerx.ndarray'
'\nActual type: {0}.'.format(type(array)))
def _array_from_chainerx(array):
if array is None:
return None
if not isinstance(array, chainerx.ndarray):
if isinstance(array, chainer.get_array_types()):
return array
raise TypeError(
'Tried to convert to a non-ChainerX array from an invalid type: '
'{}'.format(type(array)))
backend_name = array.device.backend.name
if backend_name == 'native':
return _cpu._to_cpu(array)
if backend_name == 'cuda':
return cuda.to_gpu(array, array.device.index)
raise ValueError(
'Only ChainerX arrays with native or cuda backends can be converted '
'to non-ChainerX arrays.\nActual: {0}.'.format(backend_name))
| mit |
hyperized/ansible | lib/ansible/modules/cloud/openstack/os_stack.py | 17 | 9687 | #!/usr/bin/python
# coding: utf-8 -*-
# (c) 2016, Mathieu Bultel <mbultel@redhat.com>
# (c) 2016, Steve Baker <sbaker@redhat.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: os_stack
short_description: Add/Remove Heat Stack
extends_documentation_fragment: openstack
version_added: "2.2"
author:
- "Mathieu Bultel (@matbu)"
- "Steve Baker (@steveb)"
description:
- Add or Remove a Stack to an OpenStack Heat
options:
state:
description:
- Indicate desired state of the resource
choices: ['present', 'absent']
default: present
name:
description:
- Name of the stack that should be created, name could be char and digit, no space
required: true
tag:
description:
- Tag for the stack that should be created, name could be char and digit, no space
version_added: "2.5"
template:
description:
- Path of the template file to use for the stack creation
environment:
description:
- List of environment files that should be used for the stack creation
parameters:
description:
- Dictionary of parameters for the stack creation
rollback:
description:
- Rollback stack creation
type: bool
default: 'yes'
timeout:
description:
- Maximum number of seconds to wait for the stack creation
default: 3600
availability_zone:
description:
- Ignored. Present for backwards compatibility
requirements:
- "python >= 2.7"
- "openstacksdk"
'''
EXAMPLES = '''
---
- name: create stack
ignore_errors: True
register: stack_create
os_stack:
name: "{{ stack_name }}"
tag: "{{ tag_name }}"
state: present
template: "/path/to/my_stack.yaml"
environment:
- /path/to/resource-registry.yaml
- /path/to/environment.yaml
parameters:
bmc_flavor: m1.medium
bmc_image: CentOS
key_name: default
private_net: "{{ private_net_param }}"
node_count: 2
name: undercloud
image: CentOS
my_flavor: m1.large
external_net: "{{ external_net_param }}"
'''
RETURN = '''
id:
description: Stack ID.
type: str
sample: "97a3f543-8136-4570-920e-fd7605c989d6"
returned: always
stack:
description: stack info
type: complex
returned: always
contains:
action:
description: Action, could be Create or Update.
type: str
sample: "CREATE"
creation_time:
description: Time when the action has been made.
type: str
sample: "2016-07-05T17:38:12Z"
description:
description: Description of the Stack provided in the heat template.
type: str
sample: "HOT template to create a new instance and networks"
id:
description: Stack ID.
type: str
sample: "97a3f543-8136-4570-920e-fd7605c989d6"
name:
description: Name of the Stack
type: str
sample: "test-stack"
identifier:
description: Identifier of the current Stack action.
type: str
sample: "test-stack/97a3f543-8136-4570-920e-fd7605c989d6"
links:
description: Links to the current Stack.
type: list of dict
sample: "[{'href': 'http://foo:8004/v1/7f6a/stacks/test-stack/97a3f543-8136-4570-920e-fd7605c989d6']"
outputs:
description: Output returned by the Stack.
type: list of dict
sample: "{'description': 'IP address of server1 in private network',
'output_key': 'server1_private_ip',
'output_value': '10.1.10.103'}"
parameters:
description: Parameters of the current Stack
type: dict
sample: "{'OS::project_id': '7f6a3a3e01164a4eb4eecb2ab7742101',
'OS::stack_id': '97a3f543-8136-4570-920e-fd7605c989d6',
'OS::stack_name': 'test-stack',
'stack_status': 'CREATE_COMPLETE',
'stack_status_reason': 'Stack CREATE completed successfully',
'status': 'COMPLETE',
'template_description': 'HOT template to create a new instance and networks',
'timeout_mins': 60,
'updated_time': null}"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.openstack import openstack_full_argument_spec, openstack_module_kwargs, openstack_cloud_from_module
from ansible.module_utils._text import to_native
def _create_stack(module, stack, cloud, sdk, parameters):
try:
stack = cloud.create_stack(module.params['name'],
template_file=module.params['template'],
environment_files=module.params['environment'],
timeout=module.params['timeout'],
wait=True,
rollback=module.params['rollback'],
**parameters)
stack = cloud.get_stack(stack.id, None)
if stack.stack_status == 'CREATE_COMPLETE':
return stack
else:
module.fail_json(msg="Failure in creating stack: {0}".format(stack))
except sdk.exceptions.OpenStackCloudException as e:
if hasattr(e, 'response'):
module.fail_json(msg=to_native(e), response=e.response.json())
else:
module.fail_json(msg=to_native(e))
def _update_stack(module, stack, cloud, sdk, parameters):
try:
stack = cloud.update_stack(
module.params['name'],
template_file=module.params['template'],
environment_files=module.params['environment'],
timeout=module.params['timeout'],
rollback=module.params['rollback'],
wait=module.params['wait'],
**parameters)
if stack['stack_status'] == 'UPDATE_COMPLETE':
return stack
else:
module.fail_json(msg="Failure in updating stack: %s" %
stack['stack_status_reason'])
except sdk.exceptions.OpenStackCloudException as e:
if hasattr(e, 'response'):
module.fail_json(msg=to_native(e), response=e.response.json())
else:
module.fail_json(msg=to_native(e))
def _system_state_change(module, stack, cloud):
state = module.params['state']
if state == 'present':
if not stack:
return True
if state == 'absent' and stack:
return True
return False
def main():
argument_spec = openstack_full_argument_spec(
name=dict(required=True),
tag=dict(required=False, default=None),
template=dict(default=None),
environment=dict(default=None, type='list'),
parameters=dict(default={}, type='dict'),
rollback=dict(default=False, type='bool'),
timeout=dict(default=3600, type='int'),
state=dict(default='present', choices=['absent', 'present']),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec,
supports_check_mode=True,
**module_kwargs)
state = module.params['state']
name = module.params['name']
# Check for required parameters when state == 'present'
if state == 'present':
for p in ['template']:
if not module.params[p]:
module.fail_json(msg='%s required with present state' % p)
sdk, cloud = openstack_cloud_from_module(module)
try:
stack = cloud.get_stack(name)
if module.check_mode:
module.exit_json(changed=_system_state_change(module, stack, cloud))
if state == 'present':
parameters = module.params['parameters']
if module.params['tag']:
parameters['tags'] = module.params['tag']
from distutils.version import StrictVersion
min_version = '0.28.0'
if StrictVersion(sdk.version.__version__) < StrictVersion(min_version) and stack:
module.warn("To update tags using os_stack module, the"
"installed version of the openstacksdk"
"library MUST be >={min_version}"
"".format(min_version=min_version))
if not stack:
stack = _create_stack(module, stack, cloud, sdk, parameters)
else:
stack = _update_stack(module, stack, cloud, sdk, parameters)
module.exit_json(changed=True,
stack=stack,
id=stack.id)
elif state == 'absent':
if not stack:
changed = False
else:
changed = True
if not cloud.delete_stack(name, wait=module.params['wait']):
module.fail_json(msg='delete stack failed for stack: %s' % name)
module.exit_json(changed=changed)
except sdk.exceptions.OpenStackCloudException as e:
module.fail_json(msg=to_native(e))
if __name__ == '__main__':
main()
| gpl-3.0 |
boakley/robotframework-pageobjectlibrary | demo/resources/LoginPage.py | 2 | 1262 | from PageObjectLibrary import PageObject
from robot.libraries.BuiltIn import BuiltIn
class LoginPage(PageObject):
PAGE_TITLE = "Login - PageObjectLibrary Demo"
PAGE_URL = "/login.html"
# these are accessible via dot notaton with self.locator
# (eg: self.locator.username, etc)
_locators = {
"username": "id=id_username",
"password": "id=id_password",
"submit_button": "id=id_submit",
}
def login_as_a_normal_user(self):
config = BuiltIn().get_variable_value("${CONFIG}")
self.enter_username(config.username)
self.enter_password(config.password)
with self._wait_for_page_refresh():
self.click_the_submit_button()
def enter_username(self, username):
"""Enter the given string into the username field"""
self.selib.input_text(self.locator.username, username)
def enter_password(self, password):
"""Enter the given string into the password field"""
self.selib.input_text(self.locator.password, password)
def click_the_submit_button(self):
"""Click the submit button, and wait for the page to reload"""
with self._wait_for_page_refresh():
self.selib.click_button(self.locator.submit_button)
| apache-2.0 |
zhoulvwen/onedrive-d | onedrive_d/__init__.py | 2 | 2879 | # encoding: utf-8
"""
onedrive-d is an OneDrive client based on OneDrive API.
It aims to run on major Linux distributions that support Python 3.
"""
__all__ = ['api', 'cli', 'common', 'store', 'tests', 'ui', 'vendor']
__author__ = "Xiangyu Bu"
__copyright__ = "Copyright © 2014-present Xiangyu Bu"
__created__ = "2015-08-07"
__credits__ = []
__email__ = "xybu92@live.com"
__license__ = "GPL 3.0"
__project__ = "onedrive-d"
__status__ = "Development"
__updated__ = "2015-08-08"
__version__ = "2.0.0.dev1"
import os
import pkgutil
from calendar import timegm
from datetime import datetime
from pwd import getpwnam, getpwuid
from ciso8601 import parse_datetime
def get_current_os_user():
"""
Find the real user who runs the current process. Return a tuple of uid, username, homedir.
:rtype: (int, str, str)
"""
user_name = os.getenv('SUDO_USER')
if not user_name:
user_name = os.getenv('USER')
if user_name:
pw = getpwnam(user_name)
user_uid = pw.pw_uid
else:
# If cannot find the user, use ruid instead.
user_uid = os.getresuid()[0]
pw = getpwuid(user_uid)
user_name = pw.pw_name
user_gid = pw.pw_gid
user_home = pw.pw_dir
return user_uid, user_name, user_home, user_gid
OS_USER_ID, OS_USER_NAME, OS_USER_HOME, OS_USER_GID = get_current_os_user()
OS_HOSTNAME = os.uname()[1]
def datetime_to_str(d):
"""
:param datetime.datetime d:
:return str:
"""
return d.isoformat() + 'Z'
def str_to_datetime(s):
"""
:param str s:
:return datetime.datetime:
"""
return parse_datetime(s)
def datetime_to_timestamp(d):
"""
:param datetime.datetime d: A datetime object.
:return float: An equivalent UNIX timestamp.
"""
return timegm(d.utctimetuple()) + d.microsecond / 1e6
def timestamp_to_datetime(t):
"""
Convert a UNIX timestamp to a datetime object. Precision loss may occur.
:param float t: A UNIX timestamp.
:return datetime.datetime: An equivalent datetime object.
"""
return datetime.utcfromtimestamp(t)
def compare_timestamps(t1, t2):
if t1 - t2 > 0.001:
return 1
elif t2 - t1 > 0.001:
return -1
else:
return 0
def get_content(file_name, pkg_name='onedrive_d', is_text=True):
"""
Read a resource file in data/.
:param str file_name:
:param str pkg_name:
:param True | False is_text: True to indicate the text is UTF-8 encoded.
:return str | bytes: Content of the file.
"""
content = pkgutil.get_data(pkg_name, 'data/' + file_name)
if is_text:
content = content.decode('utf-8')
return content
def mkdir(path):
os.makedirs(path, mode=0o700)
os.chown(path, OS_USER_ID, OS_USER_GID)
| gpl-3.0 |
Fusion-Rom/android_external_chromium_org_third_party_WebKit | Tools/Scripts/webkitpy/layout_tests/port/builders_unittest.py | 47 | 1897 | # Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
import builders
class BuildersTest(unittest.TestCase):
def test_path_from_name(self):
tests = {
'test': 'test',
'Mac 10.6 (dbg)(1)': 'Mac_10_6__dbg__1_',
'(.) ': '____',
}
for name, expected in tests.items():
self.assertEqual(expected, builders.builder_path_from_name(name))
| bsd-3-clause |
numenta/htmresearch | htmresearch/algorithms/apical_dependent_temporal_memory.py | 7 | 38736 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2017, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""An implementation of ApicalDependentTemporalMemory"""
import numpy as np
from htmresearch.support import numpy_helpers as np2
from nupic.bindings.math import Random, SparseMatrixConnections
class ApicalDependentTemporalMemory(object):
"""
A generalized Temporal Memory that creates cell SDRs that are specific to both
the basal and apical input.
Prediction requires both basal and apical support. For sequence memory, the
result is that every sequence happens within a "world" which is specified by
the apical input. Sequences are not shared between worlds.
This class is generalized in two ways:
- This class does not specify when a 'timestep' begins and ends. It exposes
two main methods: 'depolarizeCells' and 'activateCells', and callers or
subclasses can introduce the notion of a timestep.
- This class is unaware of whether its 'basalInput' or 'apicalInput' are from
internal or external cells. They are just cell numbers. The caller knows
what these cell numbers mean, but the TemporalMemory doesn't.
"""
def __init__(self,
columnCount=2048,
basalInputSize=0,
apicalInputSize=0,
cellsPerColumn=32,
activationThreshold=13,
reducedBasalThreshold=10,
initialPermanence=0.21,
connectedPermanence=0.50,
minThreshold=10,
sampleSize=20,
permanenceIncrement=0.1,
permanenceDecrement=0.1,
basalPredictedSegmentDecrement=0.0,
apicalPredictedSegmentDecrement=0.0,
maxSynapsesPerSegment=-1,
seed=42):
"""
@param columnCount (int)
The number of minicolumns
@param basalInputSize (sequence)
The number of bits in the basal input
@param apicalInputSize (int)
The number of bits in the apical input
@param cellsPerColumn (int)
Number of cells per column
@param activationThreshold (int)
If the number of active connected synapses on a segment is at least this
threshold, the segment is said to be active.
@param reducedBasalThreshold (int)
The activation threshold of basal (lateral) segments for cells that have
active apical segments. If equal to activationThreshold (default),
this parameter has no effect.
@param initialPermanence (float)
Initial permanence of a new synapse
@param connectedPermanence (float)
If the permanence value for a synapse is greater than this value, it is said
to be connected.
@param minThreshold (int)
If the number of potential synapses active on a segment is at least this
threshold, it is said to be "matching" and is eligible for learning.
@param sampleSize (int)
How much of the active SDR to sample with synapses.
@param permanenceIncrement (float)
Amount by which permanences of synapses are incremented during learning.
@param permanenceDecrement (float)
Amount by which permanences of synapses are decremented during learning.
@param basalPredictedSegmentDecrement (float)
Amount by which segments are punished for incorrect predictions.
@param apicalPredictedSegmentDecrement (float)
Amount by which segments are punished for incorrect predictions.
@param maxSynapsesPerSegment
The maximum number of synapses per segment.
@param seed (int)
Seed for the random number generator.
"""
self.columnCount = columnCount
self.cellsPerColumn = cellsPerColumn
self.initialPermanence = initialPermanence
self.connectedPermanence = connectedPermanence
self.minThreshold = minThreshold
self.sampleSize = sampleSize
self.permanenceIncrement = permanenceIncrement
self.permanenceDecrement = permanenceDecrement
self.basalPredictedSegmentDecrement = basalPredictedSegmentDecrement
self.apicalPredictedSegmentDecrement = apicalPredictedSegmentDecrement
self.activationThreshold = activationThreshold
self.reducedBasalThreshold = reducedBasalThreshold
self.maxSynapsesPerSegment = maxSynapsesPerSegment
self.basalConnections = SparseMatrixConnections(columnCount*cellsPerColumn,
basalInputSize)
self.disableApicalDependence = False
self.apicalConnections = SparseMatrixConnections(columnCount*cellsPerColumn,
apicalInputSize)
self.rng = Random(seed)
self.activeCells = np.empty(0, dtype="uint32")
self.winnerCells = np.empty(0, dtype="uint32")
self.predictedCells = np.empty(0, dtype="uint32")
self.predictedActiveCells = np.empty(0, dtype="uint32")
self.activeBasalSegments = np.empty(0, dtype="uint32")
self.activeApicalSegments = np.empty(0, dtype="uint32")
self.matchingBasalSegments = np.empty(0, dtype="uint32")
self.matchingApicalSegments = np.empty(0, dtype="uint32")
self.basalPotentialOverlaps = np.empty(0, dtype="int32")
self.apicalPotentialOverlaps = np.empty(0, dtype="int32")
def reset(self):
"""
Clear all cell and segment activity.
"""
self.activeCells = np.empty(0, dtype="uint32")
self.winnerCells = np.empty(0, dtype="uint32")
self.predictedCells = np.empty(0, dtype="uint32")
self.predictedActiveCells = np.empty(0, dtype="uint32")
self.activeBasalSegments = np.empty(0, dtype="uint32")
self.activeApicalSegments = np.empty(0, dtype="uint32")
self.matchingBasalSegments = np.empty(0, dtype="uint32")
self.matchingApicalSegments = np.empty(0, dtype="uint32")
self.basalPotentialOverlaps = np.empty(0, dtype="int32")
self.apicalPotentialOverlaps = np.empty(0, dtype="int32")
def depolarizeCells(self, basalInput, apicalInput, learn):
"""
Calculate predictions.
@param basalInput (numpy array)
List of active input bits for the basal dendrite segments
@param apicalInput (numpy array)
List of active input bits for the apical dendrite segments
@param learn (bool)
Whether learning is enabled. Some TM implementations may depolarize cells
differently or do segment activity bookkeeping when learning is enabled.
"""
# Calculate predictions for this timestep
(activeApicalSegments,
matchingApicalSegments,
apicalPotentialOverlaps) = self._calculateSegmentActivity(
self.apicalConnections, apicalInput, self.connectedPermanence,
self.activationThreshold, self.minThreshold, self.reducedBasalThreshold)
apicallySupportedCells = self.apicalConnections.mapSegmentsToCells(
activeApicalSegments)
if not self.disableApicalDependence:
(activeBasalSegments,
matchingBasalSegments,
basalPotentialOverlaps) = self._calculateSegmentActivity(
self.basalConnections, basalInput,
self.connectedPermanence, self.activationThreshold,
self.minThreshold, self.reducedBasalThreshold,
reducedThresholdCells = apicallySupportedCells,)
predictedCells = np.intersect1d(
self.basalConnections.mapSegmentsToCells(activeBasalSegments),
apicallySupportedCells)
else:
(activeBasalSegments,
matchingBasalSegments,
basalPotentialOverlaps) = self._calculateSegmentActivity(
self.basalConnections, basalInput, self.connectedPermanence,
self.activationThreshold, self.minThreshold, self.reducedBasalThreshold)
predictedCells = self.basalConnections.mapSegmentsToCells(activeBasalSegments)
self.predictedCells = predictedCells
self.activeBasalSegments = activeBasalSegments
self.activeApicalSegments = activeApicalSegments
self.matchingBasalSegments = matchingBasalSegments
self.matchingApicalSegments = matchingApicalSegments
self.basalPotentialOverlaps = basalPotentialOverlaps
self.apicalPotentialOverlaps = apicalPotentialOverlaps
def activateCells(self,
activeColumns,
basalReinforceCandidates,
apicalReinforceCandidates,
basalGrowthCandidates,
apicalGrowthCandidates,
learn=True):
"""
Activate cells in the specified columns, using the result of the previous
'depolarizeCells' as predictions. Then learn.
@param activeColumns (numpy array)
List of active columns
@param basalReinforceCandidates (numpy array)
List of bits that the active cells may reinforce basal synapses to.
@param apicalReinforceCandidates (numpy array)
List of bits that the active cells may reinforce apical synapses to.
@param basalGrowthCandidates (numpy array or None)
List of bits that the active cells may grow new basal synapses to.
@param apicalGrowthCandidates (numpy array or None)
List of bits that the active cells may grow new apical synapses to
@param learn (bool)
Whether to grow / reinforce / punish synapses
"""
# Calculate active cells
(correctPredictedCells,
burstingColumns) = np2.setCompare(self.predictedCells, activeColumns,
self.predictedCells / self.cellsPerColumn,
rightMinusLeft=True)
newActiveCells = np.concatenate((correctPredictedCells,
np2.getAllCellsInColumns(
burstingColumns, self.cellsPerColumn)))
# Calculate learning
(learningActiveBasalSegments,
learningActiveApicalSegments,
learningMatchingBasalSegments,
learningMatchingApicalSegments,
basalSegmentsToPunish,
apicalSegmentsToPunish,
newSegmentCells,
learningCells) = self._calculateLearning(activeColumns,
burstingColumns,
correctPredictedCells,
self.activeBasalSegments,
self.activeApicalSegments,
self.matchingBasalSegments,
self.matchingApicalSegments,
self.basalPotentialOverlaps,
self.apicalPotentialOverlaps)
if learn:
# Learn on existing segments
for learningSegments in (learningActiveBasalSegments,
learningMatchingBasalSegments):
self._learn(self.basalConnections, self.rng, learningSegments,
basalReinforceCandidates, basalGrowthCandidates,
self.basalPotentialOverlaps,
self.initialPermanence, self.sampleSize,
self.permanenceIncrement, self.permanenceDecrement,
self.maxSynapsesPerSegment)
for learningSegments in (learningActiveApicalSegments,
learningMatchingApicalSegments):
self._learn(self.apicalConnections, self.rng, learningSegments,
apicalReinforceCandidates, apicalGrowthCandidates,
self.apicalPotentialOverlaps, self.initialPermanence,
self.sampleSize, self.permanenceIncrement,
self.permanenceDecrement, self.maxSynapsesPerSegment)
# Punish incorrect predictions
if self.basalPredictedSegmentDecrement != 0.0:
self.basalConnections.adjustActiveSynapses(
basalSegmentsToPunish, basalReinforceCandidates,
-self.basalPredictedSegmentDecrement)
if self.apicalPredictedSegmentDecrement != 0.0:
self.apicalConnections.adjustActiveSynapses(
apicalSegmentsToPunish, apicalReinforceCandidates,
-self.apicalPredictedSegmentDecrement)
# Only grow segments if there is basal *and* apical input.
if len(basalGrowthCandidates) > 0 and len(apicalGrowthCandidates) > 0:
self._learnOnNewSegments(self.basalConnections, self.rng,
newSegmentCells, basalGrowthCandidates,
self.initialPermanence, self.sampleSize,
self.maxSynapsesPerSegment)
self._learnOnNewSegments(self.apicalConnections, self.rng,
newSegmentCells, apicalGrowthCandidates,
self.initialPermanence, self.sampleSize,
self.maxSynapsesPerSegment)
# Save the results
newActiveCells.sort()
learningCells.sort()
self.activeCells = newActiveCells
self.winnerCells = learningCells
self.predictedActiveCells = correctPredictedCells
def _calculateLearning(self,
activeColumns,
burstingColumns,
correctPredictedCells,
activeBasalSegments,
activeApicalSegments,
matchingBasalSegments,
matchingApicalSegments,
basalPotentialOverlaps,
apicalPotentialOverlaps):
"""
Learning occurs on pairs of segments. Correctly predicted cells always have
active basal and apical segments, and we learn on these segments. In
bursting columns, we either learn on an existing segment pair, or we grow a
new pair of segments.
@param activeColumns (numpy array)
@param burstingColumns (numpy array)
@param correctPredictedCells (numpy array)
@param activeBasalSegments (numpy array)
@param activeApicalSegments (numpy array)
@param matchingBasalSegments (numpy array)
@param matchingApicalSegments (numpy array)
@param basalPotentialOverlaps (numpy array)
@param apicalPotentialOverlaps (numpy array)
@return (tuple)
- learningActiveBasalSegments (numpy array)
Active basal segments on correct predicted cells
- learningActiveApicalSegments (numpy array)
Active apical segments on correct predicted cells
- learningMatchingBasalSegments (numpy array)
Matching basal segments selected for learning in bursting columns
- learningMatchingApicalSegments (numpy array)
Matching apical segments selected for learning in bursting columns
- basalSegmentsToPunish (numpy array)
Basal segments that should be punished for predicting an inactive column
- apicalSegmentsToPunish (numpy array)
Apical segments that should be punished for predicting an inactive column
- newSegmentCells (numpy array)
Cells in bursting columns that were selected to grow new segments
- learningCells (numpy array)
Every cell that has a learning segment or was selected to grow a segment
"""
# Correctly predicted columns
learningActiveBasalSegments = self.basalConnections.filterSegmentsByCell(
activeBasalSegments, correctPredictedCells)
learningActiveApicalSegments = self.apicalConnections.filterSegmentsByCell(
activeApicalSegments, correctPredictedCells)
# Bursting columns
cellsForMatchingBasal = self.basalConnections.mapSegmentsToCells(
matchingBasalSegments)
cellsForMatchingApical = self.apicalConnections.mapSegmentsToCells(
matchingApicalSegments)
matchingCells = np.intersect1d(
cellsForMatchingBasal, cellsForMatchingApical)
(matchingCellsInBurstingColumns,
burstingColumnsWithNoMatch) = np2.setCompare(
matchingCells, burstingColumns, matchingCells / self.cellsPerColumn,
rightMinusLeft=True)
(learningMatchingBasalSegments,
learningMatchingApicalSegments) = self._chooseBestSegmentPairPerColumn(
matchingCellsInBurstingColumns, matchingBasalSegments,
matchingApicalSegments, basalPotentialOverlaps, apicalPotentialOverlaps)
newSegmentCells = self._getCellsWithFewestSegments(
burstingColumnsWithNoMatch)
# Incorrectly predicted columns
if self.basalPredictedSegmentDecrement > 0.0:
correctMatchingBasalMask = np.in1d(
cellsForMatchingBasal / self.cellsPerColumn, activeColumns)
basalSegmentsToPunish = matchingBasalSegments[~correctMatchingBasalMask]
else:
basalSegmentsToPunish = ()
if self.apicalPredictedSegmentDecrement > 0.0:
correctMatchingApicalMask = np.in1d(
cellsForMatchingApical / self.cellsPerColumn, activeColumns)
apicalSegmentsToPunish = matchingApicalSegments[~correctMatchingApicalMask]
else:
apicalSegmentsToPunish = ()
# Make a list of every cell that is learning
learningCells = np.concatenate(
(correctPredictedCells,
self.basalConnections.mapSegmentsToCells(learningMatchingBasalSegments),
newSegmentCells))
return (learningActiveBasalSegments,
learningActiveApicalSegments,
learningMatchingBasalSegments,
learningMatchingApicalSegments,
basalSegmentsToPunish,
apicalSegmentsToPunish,
newSegmentCells,
learningCells)
@staticmethod
def _calculateSegmentActivity(connections, activeInput, connectedPermanence,
activationThreshold, minThreshold,
reducedThreshold,
reducedThresholdCells = ()):
"""
Calculate the active and matching basal segments for this timestep.
@param connections (SparseMatrixConnections)
@param activeInput (numpy array)
@return (tuple)
- activeSegments (numpy array)
Dendrite segments with enough active connected synapses to cause a
dendritic spike
- matchingSegments (numpy array)
Dendrite segments with enough active potential synapses to be selected for
learning in a bursting column
- potentialOverlaps (numpy array)
The number of active potential synapses for each segment.
Includes counts for active, matching, and nonmatching segments.
"""
# Active apical segments lower the activation threshold for basal segments
overlaps = connections.computeActivity(activeInput, connectedPermanence)
outrightActiveSegments = np.flatnonzero(overlaps >= activationThreshold)
if (reducedThreshold != activationThreshold and
len(reducedThresholdCells) > 0):
potentiallyActiveSegments = np.flatnonzero(
(overlaps < activationThreshold) & (overlaps >= reducedThreshold))
cellsOfCASegments = connections.mapSegmentsToCells(
potentiallyActiveSegments)
# apically active segments are condit. active segments from apically
# active cells
conditionallyActiveSegments = potentiallyActiveSegments[
np.in1d(cellsOfCASegments, reducedThresholdCells)]
activeSegments = np.concatenate((outrightActiveSegments,
conditionallyActiveSegments))
else:
activeSegments = outrightActiveSegments
# Matching
potentialOverlaps = connections.computeActivity(activeInput)
matchingSegments = np.flatnonzero(potentialOverlaps >= minThreshold)
return (activeSegments,
matchingSegments,
potentialOverlaps)
@staticmethod
def _learn(connections, rng, learningSegments, activeInput, growthCandidates,
potentialOverlaps, initialPermanence, sampleSize,
permanenceIncrement, permanenceDecrement, maxSynapsesPerSegment):
"""
Adjust synapse permanences, and grow new synapses.
@param learningActiveSegments (numpy array)
@param learningMatchingSegments (numpy array)
@param activeInput (numpy array)
@param growthCandidates (numpy array)
@param potentialOverlaps (numpy array)
"""
# Learn on existing segments
connections.adjustSynapses(learningSegments, activeInput,
permanenceIncrement, -permanenceDecrement)
# Grow new synapses. Calculate "maxNew", the maximum number of synapses to
# grow per segment. "maxNew" might be a number or it might be a list of
# numbers.
if sampleSize == -1:
maxNew = len(growthCandidates)
else:
maxNew = sampleSize - potentialOverlaps[learningSegments]
if maxSynapsesPerSegment != -1:
synapseCounts = connections.mapSegmentsToSynapseCounts(
learningSegments)
numSynapsesToReachMax = maxSynapsesPerSegment - synapseCounts
maxNew = np.where(maxNew <= numSynapsesToReachMax,
maxNew, numSynapsesToReachMax)
connections.growSynapsesToSample(learningSegments, growthCandidates,
maxNew, initialPermanence, rng)
@staticmethod
def _learnOnNewSegments(connections, rng, newSegmentCells, growthCandidates,
initialPermanence, sampleSize, maxSynapsesPerSegment):
"""
Create new segments, and grow synapses on them.
@param connections (SparseMatrixConnections)
@param rng (Random)
@param newSegmentCells (numpy array)
@param growthCandidates (numpy array)
"""
numNewSynapses = len(growthCandidates)
if sampleSize != -1:
numNewSynapses = min(numNewSynapses, sampleSize)
if maxSynapsesPerSegment != -1:
numNewSynapses = min(numNewSynapses, maxSynapsesPerSegment)
newSegments = connections.createSegments(newSegmentCells)
connections.growSynapsesToSample(newSegments, growthCandidates,
numNewSynapses, initialPermanence,
rng)
def _chooseBestSegmentPairPerColumn(self,
matchingCellsInBurstingColumns,
matchingBasalSegments,
matchingApicalSegments,
basalPotentialOverlaps,
apicalPotentialOverlaps):
"""
Choose the best pair of matching segments - one basal and one apical - for
each column. Pairs are ranked by the sum of their potential overlaps.
When there's a tie, the first pair wins.
@param matchingCellsInBurstingColumns (numpy array)
Cells in bursting columns that have at least one matching basal segment and
at least one matching apical segment
@param matchingBasalSegments (numpy array)
@param matchingApicalSegments (numpy array)
@param basalPotentialOverlaps (numpy array)
@param apicalPotentialOverlaps (numpy array)
@return (tuple)
- learningBasalSegments (numpy array)
The selected basal segments
- learningApicalSegments (numpy array)
The selected apical segments
"""
basalCandidateSegments = self.basalConnections.filterSegmentsByCell(
matchingBasalSegments, matchingCellsInBurstingColumns)
apicalCandidateSegments = self.apicalConnections.filterSegmentsByCell(
matchingApicalSegments, matchingCellsInBurstingColumns)
# Sort everything once rather than inside of each call to argmaxMulti.
self.basalConnections.sortSegmentsByCell(basalCandidateSegments)
self.apicalConnections.sortSegmentsByCell(apicalCandidateSegments)
# Narrow it down to one pair per cell.
oneBasalPerCellFilter = np2.argmaxMulti(
basalPotentialOverlaps[basalCandidateSegments],
self.basalConnections.mapSegmentsToCells(basalCandidateSegments),
assumeSorted=True)
basalCandidateSegments = basalCandidateSegments[oneBasalPerCellFilter]
oneApicalPerCellFilter = np2.argmaxMulti(
apicalPotentialOverlaps[apicalCandidateSegments],
self.apicalConnections.mapSegmentsToCells(apicalCandidateSegments),
assumeSorted=True)
apicalCandidateSegments = apicalCandidateSegments[oneApicalPerCellFilter]
# Narrow it down to one pair per column.
cellScores = (basalPotentialOverlaps[basalCandidateSegments] +
apicalPotentialOverlaps[apicalCandidateSegments])
columnsForCandidates = (
self.basalConnections.mapSegmentsToCells(basalCandidateSegments) /
self.cellsPerColumn)
onePerColumnFilter = np2.argmaxMulti(cellScores, columnsForCandidates,
assumeSorted=True)
learningBasalSegments = basalCandidateSegments[onePerColumnFilter]
learningApicalSegments = apicalCandidateSegments[onePerColumnFilter]
return (learningBasalSegments,
learningApicalSegments)
def _getCellsWithFewestSegments(self, columns):
"""
For each column, get the cell that has the fewest total segments (basal or
apical). Break ties randomly.
@param columns (numpy array)
Columns to check
@return (numpy array)
One cell for each of the provided columns
"""
candidateCells = np2.getAllCellsInColumns(columns, self.cellsPerColumn)
# Arrange the segment counts into one row per minicolumn.
segmentCounts = np.reshape(
self.basalConnections.getSegmentCounts(candidateCells) +
self.apicalConnections.getSegmentCounts(candidateCells),
newshape=(len(columns),
self.cellsPerColumn))
# Filter to just the cells that are tied for fewest in their minicolumn.
minSegmentCounts = np.amin(segmentCounts, axis=1, keepdims=True)
candidateCells = candidateCells[np.flatnonzero(segmentCounts ==
minSegmentCounts)]
# Filter to one cell per column, choosing randomly from the minimums.
# To do the random choice, add a random offset to each index in-place, using
# casting to floor the result.
(_,
onePerColumnFilter,
numCandidatesInColumns) = np.unique(candidateCells / self.cellsPerColumn,
return_index=True, return_counts=True)
offsetPercents = np.empty(len(columns), dtype="float32")
self.rng.initializeReal32Array(offsetPercents)
np.add(onePerColumnFilter,
offsetPercents*numCandidatesInColumns,
out=onePerColumnFilter,
casting="unsafe")
return candidateCells[onePerColumnFilter]
def getActiveCells(self):
"""
@return (numpy array)
Active cells
"""
return self.activeCells
def getPredictedActiveCells(self):
"""
@return (numpy array)
Active cells that were correctly predicted
"""
return np.intersect1d(self.activeCells, self.predictedCells)
def getWinnerCells(self):
"""
@return (numpy array)
Cells that were selected for learning
"""
return self.winnerCells
def getPredictedCells(self):
"""
@return (numpy array)
Cells that were predicted for this timestep
"""
return self.predictedCells
def getActiveBasalSegments(self):
"""
@return (numpy array)
Active basal segments for this timestep
"""
return self.activeBasalSegments
def getActiveApicalSegments(self):
"""
@return (numpy array)
Matching basal segments for this timestep
"""
return self.activeApicalSegments
def numberOfColumns(self):
""" Returns the number of columns in this layer.
@return (int) Number of columns
"""
return self.columnCount
def numberOfCells(self):
"""
Returns the number of cells in this layer.
@return (int) Number of cells
"""
return self.numberOfColumns() * self.cellsPerColumn
def getCellsPerColumn(self):
"""
Returns the number of cells per column.
@return (int) The number of cells per column.
"""
return self.cellsPerColumn
def getActivationThreshold(self):
"""
Returns the activation threshold.
@return (int) The activation threshold.
"""
return self.activationThreshold
def setActivationThreshold(self, activationThreshold):
"""
Sets the activation threshold.
@param activationThreshold (int) activation threshold.
"""
self.activationThreshold = activationThreshold
def getInitialPermanence(self):
"""
Get the initial permanence.
@return (float) The initial permanence.
"""
return self.initialPermanence
def setInitialPermanence(self, initialPermanence):
"""
Sets the initial permanence.
@param initialPermanence (float) The initial permanence.
"""
self.initialPermanence = initialPermanence
def getMinThreshold(self):
"""
Returns the min threshold.
@return (int) The min threshold.
"""
return self.minThreshold
def setMinThreshold(self, minThreshold):
"""
Sets the min threshold.
@param minThreshold (int) min threshold.
"""
self.minThreshold = minThreshold
def getSampleSize(self):
"""
Gets the sampleSize.
@return (int)
"""
return self.sampleSize
def setSampleSize(self, sampleSize):
"""
Sets the sampleSize.
@param sampleSize (int)
"""
self.sampleSize = sampleSize
def getPermanenceIncrement(self):
"""
Get the permanence increment.
@return (float) The permanence increment.
"""
return self.permanenceIncrement
def setPermanenceIncrement(self, permanenceIncrement):
"""
Sets the permanence increment.
@param permanenceIncrement (float) The permanence increment.
"""
self.permanenceIncrement = permanenceIncrement
def getPermanenceDecrement(self):
"""
Get the permanence decrement.
@return (float) The permanence decrement.
"""
return self.permanenceDecrement
def setPermanenceDecrement(self, permanenceDecrement):
"""
Sets the permanence decrement.
@param permanenceDecrement (float) The permanence decrement.
"""
self.permanenceDecrement = permanenceDecrement
def getBasalPredictedSegmentDecrement(self):
"""
Get the predicted segment decrement.
@return (float) The predicted segment decrement.
"""
return self.basalPredictedSegmentDecrement
def setBasalPredictedSegmentDecrement(self, predictedSegmentDecrement):
"""
Sets the predicted segment decrement.
@param predictedSegmentDecrement (float) The predicted segment decrement.
"""
self.basalPredictedSegmentDecrement = basalPredictedSegmentDecrement
def getApicalPredictedSegmentDecrement(self):
"""
Get the predicted segment decrement.
@return (float) The predicted segment decrement.
"""
return self.apicalPredictedSegmentDecrement
def setApicalPredictedSegmentDecrement(self, predictedSegmentDecrement):
"""
Sets the predicted segment decrement.
@param predictedSegmentDecrement (float) The predicted segment decrement.
"""
self.apicalPredictedSegmentDecrement = apicalPredictedSegmentDecrement
def getConnectedPermanence(self):
"""
Get the connected permanence.
@return (float) The connected permanence.
"""
return self.connectedPermanence
def setConnectedPermanence(self, connectedPermanence):
"""
Sets the connected permanence.
@param connectedPermanence (float) The connected permanence.
"""
self.connectedPermanence = connectedPermanence
class TripleMemory(ApicalDependentTemporalMemory):
"""
Pair memory with apical dependence, i.e. "triple memory".
"""
def compute(self,
activeColumns,
basalInput,
apicalInput=(),
basalGrowthCandidates=None,
apicalGrowthCandidates=None,
learn=True):
"""
Perform one timestep. Use the basal and apical input to form a set of
predictions, then activate the specified columns, then learn.
@param activeColumns (numpy array)
List of active columns
@param basalInput (numpy array)
List of active input bits for the basal dendrite segments
@param apicalInput (numpy array)
List of active input bits for the apical dendrite segments
@param basalGrowthCandidates (numpy array or None)
List of bits that the active cells may grow new basal synapses to.
If None, the basalInput is assumed to be growth candidates.
@param apicalGrowthCandidates (numpy array or None)
List of bits that the active cells may grow new apical synapses to
If None, the apicalInput is assumed to be growth candidates.
@param learn (bool)
Whether to grow / reinforce / punish synapses
"""
activeColumns = np.asarray(activeColumns)
basalInput = np.asarray(basalInput)
apicalInput = np.asarray(apicalInput)
if basalGrowthCandidates is None:
basalGrowthCandidates = basalInput
basalGrowthCandidates = np.asarray(basalGrowthCandidates)
if apicalGrowthCandidates is None:
apicalGrowthCandidates = apicalInput
apicalGrowthCandidates = np.asarray(apicalGrowthCandidates)
self.depolarizeCells(basalInput, apicalInput, learn)
self.activateCells(activeColumns, basalInput, apicalInput,
basalGrowthCandidates, apicalGrowthCandidates, learn)
def getPredictedCells(self):
"""
@return (numpy array)
Cells that were predicted for this timestep
"""
return self.predictedCells
def getBasalPredictedCells(self):
"""
@return (numpy array)
Cells with active basal segments
"""
return np.unique(
self.basalConnections.mapSegmentsToCells(
self.activeBasalSegments))
def getApicalPredictedCells(self):
"""
@return (numpy array)
Cells with active apical segments
"""
return np.unique(
self.apicalConnections.mapSegmentsToCells(
self.activeApicalSegments))
class ApicalDependentSequenceMemory(ApicalDependentTemporalMemory):
"""
Sequence memory with apical dependence.
"""
def __init__(self,
columnCount=2048,
apicalInputSize=0,
cellsPerColumn=32,
activationThreshold=13,
reducedBasalThreshold=13,
initialPermanence=0.21,
connectedPermanence=0.50,
minThreshold=10,
sampleSize=20,
permanenceIncrement=0.1,
permanenceDecrement=0.1,
basalPredictedSegmentDecrement=0.0,
apicalPredictedSegmentDecrement=0.0,
maxSynapsesPerSegment=-1,
seed=42):
params = {
"columnCount": columnCount,
"basalInputSize": columnCount * cellsPerColumn,
"apicalInputSize": apicalInputSize,
"cellsPerColumn": cellsPerColumn,
"activationThreshold": activationThreshold,
"reducedBasalThreshold": reducedBasalThreshold,
"initialPermanence": initialPermanence,
"connectedPermanence": connectedPermanence,
"minThreshold": minThreshold,
"sampleSize": sampleSize,
"permanenceIncrement": permanenceIncrement,
"permanenceDecrement": permanenceDecrement,
"basalPredictedSegmentDecrement": basalPredictedSegmentDecrement,
"apicalPredictedSegmentDecrement": apicalPredictedSegmentDecrement,
"maxSynapsesPerSegment": maxSynapsesPerSegment,
"seed": seed,
}
super(ApicalDependentSequenceMemory, self).__init__(**params)
self.prevApicalInput = np.empty(0, dtype="uint32")
self.prevApicalGrowthCandidates = np.empty(0, dtype="uint32")
self.prevPredictedCells = np.empty(0, dtype="uint32")
def reset(self):
"""
Clear all cell and segment activity.
"""
super(ApicalDependentSequenceMemory, self).reset()
self.prevApicalInput = np.empty(0, dtype="uint32")
self.prevApicalGrowthCandidates = np.empty(0, dtype="uint32")
self.prevPredictedCells = np.empty(0, dtype="uint32")
def compute(self,
activeColumns,
apicalInput=(),
apicalGrowthCandidates=None,
learn=True):
"""
Perform one timestep. Activate the specified columns, using the predictions
from the previous timestep, then learn. Then form a new set of predictions
using the new active cells and the apicalInput.
@param activeColumns (numpy array)
List of active columns
@param apicalInput (numpy array)
List of active input bits for the apical dendrite segments
@param apicalGrowthCandidates (numpy array or None)
List of bits that the active cells may grow new apical synapses to
If None, the apicalInput is assumed to be growth candidates.
@param learn (bool)
Whether to grow / reinforce / punish synapses
"""
activeColumns = np.asarray(activeColumns)
apicalInput = np.asarray(apicalInput)
if apicalGrowthCandidates is None:
apicalGrowthCandidates = apicalInput
apicalGrowthCandidates = np.asarray(apicalGrowthCandidates)
self.prevPredictedCells = self.predictedCells
self.activateCells(activeColumns, self.activeCells, self.prevApicalInput,
self.winnerCells, self.prevApicalGrowthCandidates, learn)
self.depolarizeCells(self.activeCells, apicalInput, learn)
self.prevApicalInput = apicalInput.copy()
self.prevApicalGrowthCandidates = apicalGrowthCandidates.copy()
def getPredictedCells(self):
"""
@return (numpy array)
The prediction from the previous timestep
"""
return self.prevPredictedCells
def getNextPredictedCells(self):
"""
@return (numpy array)
The prediction for the next timestep
"""
return self.predictedCells
def getNextBasalPredictedCells(self):
"""
@return (numpy array)
Cells with active basal segments
"""
return np.unique(
self.basalConnections.mapSegmentsToCells(
self.activeBasalSegments))
def getNextApicalPredictedCells(self):
"""
@return (numpy array)
Cells with active apical segments
"""
return np.unique(
self.apicalConnections.mapSegmentsToCells(
self.activeApicalSegments))
| agpl-3.0 |
MphasisWyde/eWamSublimeAdaptor | POC/v0_3_POC_with_project_aborted/third-party/bravado_core/validate.py | 7 | 2412 | """
Delegate as much validation as possible out to jsonschema. This module serves
as the single point of entry for validations should we need to further
customize the behavior.
"""
from bravado_core.exception import SwaggerMappingError
from bravado_core.schema import SWAGGER_PRIMITIVES
from bravado_core.swagger20_validator import get_validator_type
def validate_schema_object(swagger_spec, schema_object_spec, value):
"""
:raises ValidationError: when jsonschema validation fails.
:raises SwaggerMappingError: on invalid Swagger `type`.
:raises SwaggerValidationError: when user-defined format validation fails.
"""
deref = swagger_spec.deref
schema_object_spec = deref(schema_object_spec)
obj_type = deref(schema_object_spec.get('type'))
if obj_type in SWAGGER_PRIMITIVES:
validate_primitive(swagger_spec, schema_object_spec, value)
elif obj_type == 'array':
validate_array(swagger_spec, schema_object_spec, value)
elif obj_type == 'object':
validate_object(swagger_spec, schema_object_spec, value)
elif obj_type == 'file':
pass
else:
raise SwaggerMappingError('Unknown type {0} for value {1}'.format(
obj_type, value))
def validate_primitive(swagger_spec, primitive_spec, value):
"""
:type swagger_spec: :class:`bravado_core.spec.Spec`
:param primitive_spec: spec for a swagger primitive type in dict form
:type value: int, string, float, long, etc
"""
get_validator_type(swagger_spec)(
primitive_spec,
format_checker=swagger_spec.format_checker,
resolver=swagger_spec.resolver).validate(value)
def validate_array(swagger_spec, array_spec, value):
"""
:type swagger_spec: :class:`bravado_core.spec.Spec`
:param spec: spec for an 'array' type in dict form
:type value: list
"""
get_validator_type(swagger_spec)(
array_spec,
format_checker=swagger_spec.format_checker,
resolver=swagger_spec.resolver).validate(value)
def validate_object(swagger_spec, object_spec, value):
"""
:type swagger_spec: :class:`bravado_core.spec.Spec`
:param object_spec: spec for an 'object' type in dict form
:type value: dict
"""
get_validator_type(swagger_spec)(
object_spec,
format_checker=swagger_spec.format_checker,
resolver=swagger_spec.resolver).validate(value)
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.