repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
JakeLowey/HackRPI2 | django/contrib/gis/gdal/envelope.py | 94 | 7041 | """
The GDAL/OGR library uses an Envelope structure to hold the bounding
box information for a geometry. The envelope (bounding box) contains
two pairs of coordinates, one for the lower left coordinate and one
for the upper right coordinate:
+----------o Upper right; (max_x, max_y)
| |
| |
| |
Lower left (min_x, min_y) o----------+
"""
from ctypes import Structure, c_double
from django.contrib.gis.gdal.error import OGRException
# The OGR definition of an Envelope is a C structure containing four doubles.
# See the 'ogr_core.h' source file for more information:
# http://www.gdal.org/ogr/ogr__core_8h-source.html
class OGREnvelope(Structure):
"Represents the OGREnvelope C Structure."
_fields_ = [("MinX", c_double),
("MaxX", c_double),
("MinY", c_double),
("MaxY", c_double),
]
class Envelope(object):
"""
The Envelope object is a C structure that contains the minimum and
maximum X, Y coordinates for a rectangle bounding box. The naming
of the variables is compatible with the OGR Envelope structure.
"""
def __init__(self, *args):
"""
The initialization function may take an OGREnvelope structure, 4-element
tuple or list, or 4 individual arguments.
"""
if len(args) == 1:
if isinstance(args[0], OGREnvelope):
# OGREnvelope (a ctypes Structure) was passed in.
self._envelope = args[0]
elif isinstance(args[0], (tuple, list)):
# A tuple was passed in.
if len(args[0]) != 4:
raise OGRException('Incorrect number of tuple elements (%d).' % len(args[0]))
else:
self._from_sequence(args[0])
else:
raise TypeError('Incorrect type of argument: %s' % str(type(args[0])))
elif len(args) == 4:
# Individual parameters passed in.
# Thanks to ww for the help
self._from_sequence(map(float, args))
else:
raise OGRException('Incorrect number (%d) of arguments.' % len(args))
# Checking the x,y coordinates
if self.min_x > self.max_x:
raise OGRException('Envelope minimum X > maximum X.')
if self.min_y > self.max_y:
raise OGRException('Envelope minimum Y > maximum Y.')
def __eq__(self, other):
"""
Returns True if the envelopes are equivalent; can compare against
other Envelopes and 4-tuples.
"""
if isinstance(other, Envelope):
return (self.min_x == other.min_x) and (self.min_y == other.min_y) and \
(self.max_x == other.max_x) and (self.max_y == other.max_y)
elif isinstance(other, tuple) and len(other) == 4:
return (self.min_x == other[0]) and (self.min_y == other[1]) and \
(self.max_x == other[2]) and (self.max_y == other[3])
else:
raise OGRException('Equivalence testing only works with other Envelopes.')
def __str__(self):
"Returns a string representation of the tuple."
return str(self.tuple)
def _from_sequence(self, seq):
"Initializes the C OGR Envelope structure from the given sequence."
self._envelope = OGREnvelope()
self._envelope.MinX = seq[0]
self._envelope.MinY = seq[1]
self._envelope.MaxX = seq[2]
self._envelope.MaxY = seq[3]
def expand_to_include(self, *args):
"""
Modifies the envelope to expand to include the boundaries of
the passed-in 2-tuple (a point), 4-tuple (an extent) or
envelope.
"""
# We provide a number of different signatures for this method,
# and the logic here is all about converting them into a
# 4-tuple single parameter which does the actual work of
# expanding the envelope.
if len(args) == 1:
if isinstance(args[0], Envelope):
return self.expand_to_include(args[0].tuple)
elif hasattr(args[0], 'x') and hasattr(args[0], 'y'):
return self.expand_to_include(args[0].x, args[0].y, args[0].x, args[0].y)
elif isinstance(args[0], (tuple, list)):
# A tuple was passed in.
if len(args[0]) == 2:
return self.expand_to_include((args[0][0], args[0][1], args[0][0], args[0][1]))
elif len(args[0]) == 4:
(minx, miny, maxx, maxy) = args[0]
if minx < self._envelope.MinX:
self._envelope.MinX = minx
if miny < self._envelope.MinY:
self._envelope.MinY = miny
if maxx > self._envelope.MaxX:
self._envelope.MaxX = maxx
if maxy > self._envelope.MaxY:
self._envelope.MaxY = maxy
else:
raise OGRException('Incorrect number of tuple elements (%d).' % len(args[0]))
else:
raise TypeError('Incorrect type of argument: %s' % str(type(args[0])))
elif len(args) == 2:
# An x and an y parameter were passed in
return self.expand_to_include((args[0], args[1], args[0], args[1]))
elif len(args) == 4:
# Individual parameters passed in.
return self.expand_to_include(args)
else:
raise OGRException('Incorrect number (%d) of arguments.' % len(args[0]))
@property
def min_x(self):
"Returns the value of the minimum X coordinate."
return self._envelope.MinX
@property
def min_y(self):
"Returns the value of the minimum Y coordinate."
return self._envelope.MinY
@property
def max_x(self):
"Returns the value of the maximum X coordinate."
return self._envelope.MaxX
@property
def max_y(self):
"Returns the value of the maximum Y coordinate."
return self._envelope.MaxY
@property
def ur(self):
"Returns the upper-right coordinate."
return (self.max_x, self.max_y)
@property
def ll(self):
"Returns the lower-left coordinate."
return (self.min_x, self.min_y)
@property
def tuple(self):
"Returns a tuple representing the envelope."
return (self.min_x, self.min_y, self.max_x, self.max_y)
@property
def wkt(self):
"Returns WKT representing a Polygon for this envelope."
# TODO: Fix significant figures.
return 'POLYGON((%s %s,%s %s,%s %s,%s %s,%s %s))' % \
(self.min_x, self.min_y, self.min_x, self.max_y,
self.max_x, self.max_y, self.max_x, self.min_y,
self.min_x, self.min_y)
| mit |
daineseh/kodi-plugin.video.ted-talks-chinese | youtube_dl/extractor/goldenmoustache.py | 159 | 1739 | from __future__ import unicode_literals
from .common import InfoExtractor
class GoldenMoustacheIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?goldenmoustache\.com/(?P<display_id>[\w-]+)-(?P<id>\d+)'
_TESTS = [{
'url': 'http://www.goldenmoustache.com/suricate-le-poker-3700/',
'md5': '0f904432fa07da5054d6c8beb5efb51a',
'info_dict': {
'id': '3700',
'ext': 'mp4',
'title': 'Suricate - Le Poker',
'description': 'md5:3d1f242f44f8c8cb0a106f1fd08e5dc9',
'thumbnail': 're:^https?://.*\.jpg$',
}
}, {
'url': 'http://www.goldenmoustache.com/le-lab-tout-effacer-mc-fly-et-carlito-55249/',
'md5': '27f0c50fb4dd5f01dc9082fc67cd5700',
'info_dict': {
'id': '55249',
'ext': 'mp4',
'title': 'Le LAB - Tout Effacer (Mc Fly et Carlito)',
'description': 'md5:9b7fbf11023fb2250bd4b185e3de3b2a',
'thumbnail': 're:^https?://.*\.(?:png|jpg)$',
}
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
video_url = self._html_search_regex(
r'data-src-type="mp4" data-src="([^"]+)"', webpage, 'video URL')
title = self._html_search_regex(
r'<title>(.*?)(?: - Golden Moustache)?</title>', webpage, 'title')
thumbnail = self._og_search_thumbnail(webpage)
description = self._og_search_description(webpage)
return {
'id': video_id,
'url': video_url,
'ext': 'mp4',
'title': title,
'description': description,
'thumbnail': thumbnail,
}
| gpl-2.0 |
saydulk/newfies-dialer | newfies/apirest/view_callrequest.py | 4 | 1568 | # -*- coding: utf-8 -*-
#
# Newfies-Dialer License
# http://www.newfies-dialer.org
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (C) 2011-2015 Star2Billing S.L.
#
# The primary maintainer of this project is
# Arezqui Belaid <info@star2billing.com>
#
from rest_framework import viewsets
from apirest.callrequest_serializers import CallrequestSerializer
from rest_framework.permissions import IsAuthenticated
from rest_framework.authentication import BasicAuthentication, SessionAuthentication
from dialer_cdr.models import Callrequest
from apirest.permissions import CustomObjectPermissions
class CallrequestViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows campaigns to be viewed or edited.
"""
model = Callrequest
queryset = Callrequest.objects.all()
serializer_class = CallrequestSerializer
authentication = (BasicAuthentication, SessionAuthentication)
permission_classes = (IsAuthenticated, CustomObjectPermissions)
def pre_save(self, obj):
obj.user = self.request.user
def get_queryset(self):
"""
This view should return a list of all the callrequests
for the currently authenticated user.
"""
if self.request.user.is_superuser:
queryset = Callrequest.objects.all()
else:
queryset = Callrequest.objects.filter(user=self.request.user)
return queryset
| mpl-2.0 |
mtlynch/ndt-e2e-clientworker | client_wrapper/install_selenium_extensions.py | 1 | 3193 | import argparse
import os
import platform
import urllib
import tempfile
import names
driver_urls = {
'chrome_os_x': {
'url':
'http://chromedriver.storage.googleapis.com/2.21/chromedriver_mac32.zip',
'file_name': 'chromedriver_mac32.zip'
},
'chrome_ubuntu': {
'url':
'http://chromedriver.storage.googleapis.com/2.21/chromedriver_linux64.zip',
'file_name': 'chromedriver_linux64.zip'
},
'chrome_windows_10': {
'url':
'http://chromedriver.storage.googleapis.com/2.21/chromedriver_win32.zip',
'file_name': 'chromedriver_win32.zip'
},
'edge_windows_10': {
'url':
'https://download.microsoft.com/download/8/D/0/8D0D08CF-790D-4586-B726-C6469A9ED49C/MicrosoftWebDriver.msi',
'file_name': 'MicrosoftWebDriver.msi'
},
'safari_os_x': {
'url':
'http://selenium-release.storage.googleapis.com/2.48/SafariDriver.safariextz',
'file_name': 'SafariDriver.safariextz',
}
}
def _download_chrome_drivers():
"""Downloads Chrome drivers for Selenium."""
# Mac OS X
if platform.system() == 'Darwin':
remote_file = driver_urls['chrome_os_x']
elif platform.system() == 'Linux':
remote_file = driver_urls['chrome_ubuntu']
elif platform.system() == 'Windows':
remote_file = driver_urls['chrome_windows_10']
else:
raise ValueError('Unsupported OS specified: %s' % (platform.system()))
_download_temp_file(remote_file['url'], remote_file['file_name'])
def _download_temp_file(url, file_name):
"""Downloads file into temp directory.
Args:
url: A string representing the URL the file is to be downloaded from.
file_name: A string representing the name of the file to be downloaded.
"""
temp_dir = tempfile.mkdtemp()
download_path = os.path.join(temp_dir, file_name)
print('File downloading to %s' % download_path)
urllib.URLopener().retrieve(url, download_path)
def _download_edge_drivers():
"""Downloads Edge drivers for Selenium."""
remote_file = driver_urls['edge_windows_10']
_download_temp_file(remote_file['url'], remote_file['file_name'])
def _download_safari_drivers():
"""Downloads Safari drivers for Selenium."""
remote_file = driver_urls['safari_os_x']
_download_temp_file(remote_file['url'], remote_file['file_name'])
def main(args):
if args.browser == names.CHROME:
_download_chrome_drivers()
elif args.browser == names.EDGE:
_download_edge_drivers()
elif args.browser == names.SAFARI:
_download_safari_drivers()
elif args.browser == names.FIREFOX:
pass
else:
raise ValueError('Unsupported browser specified: %s' % (args.browser))
if __name__ == '__main__':
parser = argparse.ArgumentParser(
prog='NDT E2E Testing Client Selenium Extension Installer',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--browser',
help='Browser to run under (for browser-based client)',
choices=('chrome', 'firefox', 'safari', 'edge'))
main(parser.parse_args())
| apache-2.0 |
jmcarp/django | django/core/checks/security/csrf.py | 477 | 1796 | from django.conf import settings
from .. import Tags, Warning, register
W003 = Warning(
"You don't appear to be using Django's built-in "
"cross-site request forgery protection via the middleware "
"('django.middleware.csrf.CsrfViewMiddleware' is not in your "
"MIDDLEWARE_CLASSES). Enabling the middleware is the safest approach "
"to ensure you don't leave any holes.",
id='security.W003',
)
W016 = Warning(
"You have 'django.middleware.csrf.CsrfViewMiddleware' in your "
"MIDDLEWARE_CLASSES, but you have not set CSRF_COOKIE_SECURE to True. "
"Using a secure-only CSRF cookie makes it more difficult for network "
"traffic sniffers to steal the CSRF token.",
id='security.W016',
)
W017 = Warning(
"You have 'django.middleware.csrf.CsrfViewMiddleware' in your "
"MIDDLEWARE_CLASSES, but you have not set CSRF_COOKIE_HTTPONLY to True. "
"Using an HttpOnly CSRF cookie makes it more difficult for cross-site "
"scripting attacks to steal the CSRF token.",
id='security.W017',
)
def _csrf_middleware():
return "django.middleware.csrf.CsrfViewMiddleware" in settings.MIDDLEWARE_CLASSES
@register(Tags.security, deploy=True)
def check_csrf_middleware(app_configs, **kwargs):
passed_check = _csrf_middleware()
return [] if passed_check else [W003]
@register(Tags.security, deploy=True)
def check_csrf_cookie_secure(app_configs, **kwargs):
passed_check = (
not _csrf_middleware() or
settings.CSRF_COOKIE_SECURE
)
return [] if passed_check else [W016]
@register(Tags.security, deploy=True)
def check_csrf_cookie_httponly(app_configs, **kwargs):
passed_check = (
not _csrf_middleware() or
settings.CSRF_COOKIE_HTTPONLY
)
return [] if passed_check else [W017]
| bsd-3-clause |
raphui/linux | Documentation/target/tcm_mod_builder.py | 337 | 24391 | #!/usr/bin/python
# The TCM v4 multi-protocol fabric module generation script for drivers/target/$NEW_MOD
#
# Copyright (c) 2010 Rising Tide Systems
# Copyright (c) 2010 Linux-iSCSI.org
#
# Author: nab@kernel.org
#
import os, sys
import subprocess as sub
import string
import re
import optparse
tcm_dir = ""
fabric_ops = []
fabric_mod_dir = ""
fabric_mod_port = ""
fabric_mod_init_port = ""
def tcm_mod_err(msg):
print msg
sys.exit(1)
def tcm_mod_create_module_subdir(fabric_mod_dir_var):
if os.path.isdir(fabric_mod_dir_var) == True:
return 1
print "Creating fabric_mod_dir: " + fabric_mod_dir_var
ret = os.mkdir(fabric_mod_dir_var)
if ret:
tcm_mod_err("Unable to mkdir " + fabric_mod_dir_var)
return
def tcm_mod_build_FC_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* FC lport target portal group tag for TCM */\n"
buf += " u16 lport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_lport */\n"
buf += " struct " + fabric_mod_name + "_lport *lport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_lport {\n"
buf += " /* Binary World Wide unique Port Name for FC Target Lport */\n"
buf += " u64 lport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Target Lport */\n"
buf += " char lport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_lport() */\n"
buf += " struct se_wwn lport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "lport"
fabric_mod_init_port = "nport"
return
def tcm_mod_build_SAS_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* SAS port target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* Binary World Wide unique Port Name for SAS Target port */\n"
buf += " u64 tport_wwpn;\n"
buf += " /* ASCII formatted WWPN for SAS Target port */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_iSCSI_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* iSCSI target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* ASCII formatted TargetName for IQN */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_base_includes(proto_ident, fabric_mod_dir_val, fabric_mod_name):
if proto_ident == "FC":
tcm_mod_build_FC_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "SAS":
tcm_mod_build_SAS_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "iSCSI":
tcm_mod_build_iSCSI_include(fabric_mod_dir_val, fabric_mod_name)
else:
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
return
def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_configfs.c"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#include <linux/module.h>\n"
buf += "#include <linux/moduleparam.h>\n"
buf += "#include <linux/version.h>\n"
buf += "#include <generated/utsrelease.h>\n"
buf += "#include <linux/utsname.h>\n"
buf += "#include <linux/init.h>\n"
buf += "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/configfs.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n"
buf += "#include <scsi/scsi_proto.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_fabric.h>\n"
buf += "#include <target/target_core_fabric_configfs.h>\n"
buf += "#include <target/configfs_macros.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "static const struct target_core_fabric_ops " + fabric_mod_name + "_ops;\n\n"
buf += "static struct se_portal_group *" + fabric_mod_name + "_make_tpg(\n"
buf += " struct se_wwn *wwn,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + "*" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg;\n"
buf += " unsigned long tpgt;\n"
buf += " int ret;\n\n"
buf += " if (strstr(name, \"tpgt_\") != name)\n"
buf += " return ERR_PTR(-EINVAL);\n"
buf += " if (kstrtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)\n"
buf += " return ERR_PTR(-EINVAL);\n\n"
buf += " tpg = kzalloc(sizeof(struct " + fabric_mod_name + "_tpg), GFP_KERNEL);\n"
buf += " if (!tpg) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_tpg\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
buf += " tpg->" + fabric_mod_port + " = " + fabric_mod_port + ";\n"
buf += " tpg->" + fabric_mod_port + "_tpgt = tpgt;\n\n"
if proto_ident == "FC":
buf += " ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_FCP);\n"
elif proto_ident == "SAS":
buf += " ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_SAS);\n"
elif proto_ident == "iSCSI":
buf += " ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_ISCSI);\n"
buf += " if (ret < 0) {\n"
buf += " kfree(tpg);\n"
buf += " return NULL;\n"
buf += " }\n"
buf += " return &tpg->se_tpg;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_tpg(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n\n"
buf += " core_tpg_deregister(se_tpg);\n"
buf += " kfree(tpg);\n"
buf += "}\n\n"
buf += "static struct se_wwn *" + fabric_mod_name + "_make_" + fabric_mod_port + "(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + ";\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n\n"
buf += " /* if (" + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n\n"
buf += " " + fabric_mod_port + " = kzalloc(sizeof(struct " + fabric_mod_name + "_" + fabric_mod_port + "), GFP_KERNEL);\n"
buf += " if (!" + fabric_mod_port + ") {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_" + fabric_mod_port + "\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " " + fabric_mod_port + "->" + fabric_mod_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&" + fabric_mod_port + "->" + fabric_mod_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_wwn;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_" + fabric_mod_port + "(struct se_wwn *wwn)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n"
buf += " kfree(" + fabric_mod_port + ");\n"
buf += "}\n\n"
buf += "static ssize_t " + fabric_mod_name + "_wwn_show_attr_version(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " char *page)\n"
buf += "{\n"
buf += " return sprintf(page, \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \"on \"UTS_RELEASE\"\\n\", " + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += "}\n\n"
buf += "TF_WWN_ATTR_RO(" + fabric_mod_name + ", version);\n\n"
buf += "static struct configfs_attribute *" + fabric_mod_name + "_wwn_attrs[] = {\n"
buf += " &" + fabric_mod_name + "_wwn_version.attr,\n"
buf += " NULL,\n"
buf += "};\n\n"
buf += "static const struct target_core_fabric_ops " + fabric_mod_name + "_ops = {\n"
buf += " .module = THIS_MODULE,\n"
buf += " .name = \"" + fabric_mod_name + "\",\n"
buf += " .get_fabric_name = " + fabric_mod_name + "_get_fabric_name,\n"
buf += " .tpg_get_wwn = " + fabric_mod_name + "_get_fabric_wwn,\n"
buf += " .tpg_get_tag = " + fabric_mod_name + "_get_tag,\n"
buf += " .tpg_check_demo_mode = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_check_demo_mode_cache = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_demo_mode_write_protect = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_prod_mode_write_protect = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_get_inst_index = " + fabric_mod_name + "_tpg_get_inst_index,\n"
buf += " .release_cmd = " + fabric_mod_name + "_release_cmd,\n"
buf += " .shutdown_session = " + fabric_mod_name + "_shutdown_session,\n"
buf += " .close_session = " + fabric_mod_name + "_close_session,\n"
buf += " .sess_get_index = " + fabric_mod_name + "_sess_get_index,\n"
buf += " .sess_get_initiator_sid = NULL,\n"
buf += " .write_pending = " + fabric_mod_name + "_write_pending,\n"
buf += " .write_pending_status = " + fabric_mod_name + "_write_pending_status,\n"
buf += " .set_default_node_attributes = " + fabric_mod_name + "_set_default_node_attrs,\n"
buf += " .get_cmd_state = " + fabric_mod_name + "_get_cmd_state,\n"
buf += " .queue_data_in = " + fabric_mod_name + "_queue_data_in,\n"
buf += " .queue_status = " + fabric_mod_name + "_queue_status,\n"
buf += " .queue_tm_rsp = " + fabric_mod_name + "_queue_tm_rsp,\n"
buf += " .aborted_task = " + fabric_mod_name + "_aborted_task,\n"
buf += " /*\n"
buf += " * Setup function pointers for generic logic in target_core_fabric_configfs.c\n"
buf += " */\n"
buf += " .fabric_make_wwn = " + fabric_mod_name + "_make_" + fabric_mod_port + ",\n"
buf += " .fabric_drop_wwn = " + fabric_mod_name + "_drop_" + fabric_mod_port + ",\n"
buf += " .fabric_make_tpg = " + fabric_mod_name + "_make_tpg,\n"
buf += " .fabric_drop_tpg = " + fabric_mod_name + "_drop_tpg,\n"
buf += "\n"
buf += " .tfc_wwn_attrs = " + fabric_mod_name + "_wwn_attrs,\n"
buf += "};\n\n"
buf += "static int __init " + fabric_mod_name + "_init(void)\n"
buf += "{\n"
buf += " return target_register_template(&" + fabric_mod_name + "_ops);\n"
buf += "};\n\n"
buf += "static void __exit " + fabric_mod_name + "_exit(void)\n"
buf += "{\n"
buf += " target_unregister_template(&" + fabric_mod_name + "_ops);\n"
buf += "};\n\n"
buf += "MODULE_DESCRIPTION(\"" + fabric_mod_name.upper() + " series fabric driver\");\n"
buf += "MODULE_LICENSE(\"GPL\");\n"
buf += "module_init(" + fabric_mod_name + "_init);\n"
buf += "module_exit(" + fabric_mod_name + "_exit);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_scan_fabric_ops(tcm_dir):
fabric_ops_api = tcm_dir + "include/target/target_core_fabric.h"
print "Using tcm_mod_scan_fabric_ops: " + fabric_ops_api
process_fo = 0;
p = open(fabric_ops_api, 'r')
line = p.readline()
while line:
if process_fo == 0 and re.search('struct target_core_fabric_ops {', line):
line = p.readline()
continue
if process_fo == 0:
process_fo = 1;
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
continue
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
p.close()
return
def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
bufi = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.c"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
fi = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.h"
print "Writing file: " + fi
pi = open(fi, 'w')
if not pi:
tcm_mod_err("Unable to open file: " + fi)
buf = "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/list.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n"
buf += "#include <scsi/scsi_common.h>\n"
buf += "#include <scsi/scsi_proto.h>\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_fabric.h>\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "int " + fabric_mod_name + "_check_true(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_true(struct se_portal_group *);\n"
buf += "int " + fabric_mod_name + "_check_false(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_false(struct se_portal_group *);\n"
total_fabric_ops = len(fabric_ops)
i = 0
while i < total_fabric_ops:
fo = fabric_ops[i]
i += 1
# print "fabric_ops: " + fo
if re.search('get_fabric_name', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_name(void)\n"
buf += "{\n"
buf += " return \"" + fabric_mod_name + "\";\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_name(void);\n"
continue
if re.search('get_wwn', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_name[0];\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *);\n"
if re.search('get_tag', fo):
buf += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " return tpg->" + fabric_mod_port + "_tpgt;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *);\n"
if re.search('tpg_get_inst_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *);\n"
if re.search('\*release_cmd\)\(', fo):
buf += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *);\n"
if re.search('shutdown_session\)\(', fo):
buf += "int " + fabric_mod_name + "_shutdown_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_shutdown_session(struct se_session *);\n"
if re.search('close_session\)\(', fo):
buf += "void " + fabric_mod_name + "_close_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_close_session(struct se_session *);\n"
if re.search('sess_get_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *);\n"
if re.search('write_pending\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending(struct se_cmd *);\n"
if re.search('write_pending_status\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *);\n"
if re.search('set_default_node_attributes\)\(', fo):
buf += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *nacl)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *);\n"
if re.search('get_cmd_state\)\(', fo):
buf += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *);\n"
if re.search('queue_data_in\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *);\n"
if re.search('queue_status\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_status(struct se_cmd *);\n"
if re.search('queue_tm_rsp\)\(', fo):
buf += "void " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *);\n"
if re.search('aborted_task\)\(', fo):
buf += "void " + fabric_mod_name + "_aborted_task(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_aborted_task(struct se_cmd *);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
ret = pi.write(bufi)
if ret:
tcm_mod_err("Unable to write fi: " + fi)
pi.close()
return
def tcm_mod_build_kbuild(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Makefile"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf += fabric_mod_name + "-objs := " + fabric_mod_name + "_fabric.o \\\n"
buf += " " + fabric_mod_name + "_configfs.o\n"
buf += "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name + ".o\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_build_kconfig(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Kconfig"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "config " + fabric_mod_name.upper() + "\n"
buf += " tristate \"" + fabric_mod_name.upper() + " fabric module\"\n"
buf += " depends on TARGET_CORE && CONFIGFS_FS\n"
buf += " default n\n"
buf += " ---help---\n"
buf += " Say Y here to enable the " + fabric_mod_name.upper() + " fabric module\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_add_kbuild(tcm_dir, fabric_mod_name):
buf = "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name.lower() + "/\n"
kbuild = tcm_dir + "/drivers/target/Makefile"
f = open(kbuild, 'a')
f.write(buf)
f.close()
return
def tcm_mod_add_kconfig(tcm_dir, fabric_mod_name):
buf = "source \"drivers/target/" + fabric_mod_name.lower() + "/Kconfig\"\n"
kconfig = tcm_dir + "/drivers/target/Kconfig"
f = open(kconfig, 'a')
f.write(buf)
f.close()
return
def main(modname, proto_ident):
# proto_ident = "FC"
# proto_ident = "SAS"
# proto_ident = "iSCSI"
tcm_dir = os.getcwd();
tcm_dir += "/../../"
print "tcm_dir: " + tcm_dir
fabric_mod_name = modname
fabric_mod_dir = tcm_dir + "drivers/target/" + fabric_mod_name
print "Set fabric_mod_name: " + fabric_mod_name
print "Set fabric_mod_dir: " + fabric_mod_dir
print "Using proto_ident: " + proto_ident
if proto_ident != "FC" and proto_ident != "SAS" and proto_ident != "iSCSI":
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
ret = tcm_mod_create_module_subdir(fabric_mod_dir)
if ret:
print "tcm_mod_create_module_subdir() failed because module already exists!"
sys.exit(1)
tcm_mod_build_base_includes(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_scan_fabric_ops(tcm_dir)
tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_configfs(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kbuild(fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kconfig(fabric_mod_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + " to drivers/target/Makefile..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kbuild(tcm_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + " to drivers/target/Kconfig..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kconfig(tcm_dir, fabric_mod_name)
return
parser = optparse.OptionParser()
parser.add_option('-m', '--modulename', help='Module name', dest='modname',
action='store', nargs=1, type='string')
parser.add_option('-p', '--protoident', help='Protocol Ident', dest='protoident',
action='store', nargs=1, type='string')
(opts, args) = parser.parse_args()
mandatories = ['modname', 'protoident']
for m in mandatories:
if not opts.__dict__[m]:
print "mandatory option is missing\n"
parser.print_help()
exit(-1)
if __name__ == "__main__":
main(str(opts.modname), opts.protoident)
| gpl-2.0 |
dakcarto/QGIS | python/plugins/db_manager/sqledit.py | 18 | 6399 | # -*- coding: utf-8 -*-
"""
***************************************************************************
ScriptEdit.py
---------------------
Date : February 2014
Copyright : (C) 2014 by Alexander Bruy
Email : alexander dot bruy at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Alexander Bruy'
__date__ = 'February 2014'
__copyright__ = '(C) 2014, Alexander Bruy'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from PyQt4.QtCore import Qt, QSettings
from PyQt4.QtGui import QColor, QFont, QShortcut, QKeySequence
from PyQt4.Qsci import QsciScintilla, QsciLexerSQL
class SqlEdit(QsciScintilla):
LEXER_PYTHON = 0
LEXER_R = 1
def __init__(self, parent=None):
QsciScintilla.__init__(self, parent)
self.mylexer = None
self.api = None
self.setCommonOptions()
self.initShortcuts()
def setCommonOptions(self):
# Enable non-ASCII characters
self.setUtf8(True)
# Default font
font = QFont()
font.setFamily('Courier')
font.setFixedPitch(True)
font.setPointSize(10)
self.setFont(font)
self.setMarginsFont(font)
self.setBraceMatching(QsciScintilla.SloppyBraceMatch)
self.setWrapMode(QsciScintilla.WrapWord)
self.setWrapVisualFlags(QsciScintilla.WrapFlagByText,
QsciScintilla.WrapFlagNone, 4)
self.setSelectionForegroundColor(QColor('#2e3436'))
self.setSelectionBackgroundColor(QColor('#babdb6'))
# Show line numbers
self.setMarginWidth(1, '000')
self.setMarginLineNumbers(1, True)
self.setMarginsForegroundColor(QColor('#2e3436'))
self.setMarginsBackgroundColor(QColor('#babdb6'))
# Highlight current line
self.setCaretLineVisible(True)
self.setCaretLineBackgroundColor(QColor('#d3d7cf'))
# Folding
self.setFolding(QsciScintilla.BoxedTreeFoldStyle)
self.setFoldMarginColors(QColor('#d3d7cf'), QColor('#d3d7cf'))
# Mark column 80 with vertical line
self.setEdgeMode(QsciScintilla.EdgeLine)
self.setEdgeColumn(80)
self.setEdgeColor(QColor('#eeeeec'))
# Indentation
self.setAutoIndent(True)
self.setIndentationsUseTabs(False)
self.setIndentationWidth(4)
self.setTabIndents(True)
self.setBackspaceUnindents(True)
self.setTabWidth(4)
# Autocomletion
self.setAutoCompletionThreshold(2)
self.setAutoCompletionSource(QsciScintilla.AcsAPIs)
self.setAutoCompletionCaseSensitivity(False)
# Load font from Python console settings
settings = QSettings()
fontName = settings.value('pythonConsole/fontfamilytext', 'Monospace')
fontSize = int(settings.value('pythonConsole/fontsize', 10))
self.defaultFont = QFont(fontName)
self.defaultFont.setFixedPitch(True)
self.defaultFont.setPointSize(fontSize)
self.defaultFont.setStyleHint(QFont.TypeWriter)
self.defaultFont.setStretch(QFont.SemiCondensed)
self.defaultFont.setLetterSpacing(QFont.PercentageSpacing, 87.0)
self.defaultFont.setBold(False)
self.boldFont = QFont(self.defaultFont)
self.boldFont.setBold(True)
self.italicFont = QFont(self.defaultFont)
self.italicFont.setItalic(True)
self.setFont(self.defaultFont)
self.setMarginsFont(self.defaultFont)
self.initLexer()
def initShortcuts(self):
(ctrl, shift) = (self.SCMOD_CTRL << 16, self.SCMOD_SHIFT << 16)
# Disable some shortcuts
self.SendScintilla(QsciScintilla.SCI_CLEARCMDKEY, ord('D') + ctrl)
self.SendScintilla(QsciScintilla.SCI_CLEARCMDKEY, ord('L') + ctrl)
self.SendScintilla(QsciScintilla.SCI_CLEARCMDKEY, ord('L') + ctrl
+ shift)
self.SendScintilla(QsciScintilla.SCI_CLEARCMDKEY, ord('T') + ctrl)
# self.SendScintilla(QsciScintilla.SCI_CLEARCMDKEY, ord("Z") + ctrl)
#self.SendScintilla(QsciScintilla.SCI_CLEARCMDKEY, ord("Y") + ctrl)
# Use Ctrl+Space for autocompletion
self.shortcutAutocomplete = QShortcut(QKeySequence(Qt.CTRL
+ Qt.Key_Space), self)
self.shortcutAutocomplete.setContext(Qt.WidgetShortcut)
self.shortcutAutocomplete.activated.connect(self.autoComplete)
def autoComplete(self):
self.autoCompleteFromAll()
def initLexer(self):
self.mylexer = QsciLexerSQL()
colorDefault = QColor('#2e3436')
colorComment = QColor('#c00')
colorCommentBlock = QColor('#3465a4')
colorNumber = QColor('#4e9a06')
colorType = QColor('#4e9a06')
colorKeyword = QColor('#204a87')
colorString = QColor('#ce5c00')
self.mylexer.setDefaultFont(self.defaultFont)
self.mylexer.setDefaultColor(colorDefault)
self.mylexer.setColor(colorComment, 1)
self.mylexer.setColor(colorNumber, 2)
self.mylexer.setColor(colorString, 3)
self.mylexer.setColor(colorString, 4)
self.mylexer.setColor(colorKeyword, 5)
self.mylexer.setColor(colorString, 6)
self.mylexer.setColor(colorString, 7)
self.mylexer.setColor(colorType, 8)
self.mylexer.setColor(colorCommentBlock, 12)
self.mylexer.setColor(colorString, 15)
self.mylexer.setFont(self.italicFont, 1)
self.mylexer.setFont(self.boldFont, 5)
self.mylexer.setFont(self.boldFont, 8)
self.mylexer.setFont(self.italicFont, 12)
self.setLexer(self.mylexer)
def lexer(self):
return self.mylexer
| gpl-2.0 |
emedinaa/contentbox | third_party/openid/test/test_urinorm.py | 77 | 1298 | import os
import unittest
import openid.urinorm
class UrinormTest(unittest.TestCase):
def __init__(self, desc, case, expected):
unittest.TestCase.__init__(self)
self.desc = desc
self.case = case
self.expected = expected
def shortDescription(self):
return self.desc
def runTest(self):
try:
actual = openid.urinorm.urinorm(self.case)
except ValueError, why:
self.assertEqual(self.expected, 'fail', why)
else:
self.assertEqual(actual, self.expected)
def parse(cls, full_case):
desc, case, expected = full_case.split('\n')
case = unicode(case, 'utf-8')
return cls(desc, case, expected)
parse = classmethod(parse)
def parseTests(test_data):
result = []
cases = test_data.split('\n\n')
for case in cases:
case = case.strip()
if case:
result.append(UrinormTest.parse(case))
return result
def pyUnitTests():
here = os.path.dirname(os.path.abspath(__file__))
test_data_file_name = os.path.join(here, 'urinorm.txt')
test_data_file = file(test_data_file_name)
test_data = test_data_file.read()
test_data_file.close()
tests = parseTests(test_data)
return unittest.TestSuite(tests)
| apache-2.0 |
cwacek/python-jsonschema-objects | test/test_regression_126.py | 1 | 1829 | import pytest
import python_jsonschema_objects as pjs
import collections
@pytest.fixture
def schema():
return {
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "Test",
"definitions": {
"MyEnum1": {"type": "string", "enum": ["E_A", "E_B"]},
"MyEnum2": {"type": "string", "enum": ["F_A", "F_B", "F_C", "F_D"]},
"MyInt": {
"default": "0",
"type": "integer",
"minimum": 0,
"maximum": 4294967295,
},
"MyObj1": {
"type": "object",
"properties": {
"e1": {"$ref": "#/definitions/MyEnum1"},
"e2": {"$ref": "#/definitions/MyEnum2"},
"i1": {"$ref": "#/definitions/MyInt"},
},
"required": ["e1", "e2", "i1"],
},
"MyArray": {
"type": "array",
"items": {"$ref": "#/definitions/MyObj1"},
"minItems": 0,
"uniqueItems": True,
},
"MyMsg1": {
"type": "object",
"properties": {"a1": {"$ref": "#/definitions/MyArray"}},
},
"MyMsg2": {"type": "object", "properties": {"s1": {"type": "string"}}},
},
"type": "object",
"oneOf": [{"$ref": "#/definitions/MyMsg1"}, {"$ref": "#/definitions/MyMsg2"}],
}
def test_regression_126(schema):
builder = pjs.ObjectBuilder(schema)
ns = builder.build_classes(standardize_names=False)
Obj1 = ns.MyObj1
Array1 = ns.MyArray
Msg1 = ns.MyMsg1
o1 = Obj1(e1="E_A", e2="F_C", i1=2600)
o2 = Obj1(e1="E_B", e2="F_D", i1=2500)
objs = Array1([o1, o2])
msg = Msg1(a1=objs)
print(msg.serialize())
| mit |
Rademade/taiga-back | tests/integration/test_exporter_api.py | 3 | 3389 | # Copyright (C) 2014-2016 Andrey Antukh <niwi@niwi.nz>
# Copyright (C) 2014-2016 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2014-2016 David Barragán <bameda@dbarragan.com>
# Copyright (C) 2014-2016 Alejandro Alonso <alejandro.alonso@kaleidos.net>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import pytest
from unittest import mock
from django.core.urlresolvers import reverse
from .. import factories as f
from taiga.base.utils import json
pytestmark = pytest.mark.django_db
def test_invalid_project_export(client):
user = f.UserFactory.create()
client.login(user)
url = reverse("exporter-detail", args=[1000000])
response = client.get(url, content_type="application/json")
assert response.status_code == 404
def test_valid_project_export_with_celery_disabled(client, settings):
settings.CELERY_ENABLED = False
user = f.UserFactory.create()
project = f.ProjectFactory.create(owner=user)
f.MembershipFactory(project=project, user=user, is_admin=True)
client.login(user)
url = reverse("exporter-detail", args=[project.pk])
response = client.get(url, content_type="application/json")
assert response.status_code == 200
response_data = response.data
assert "url" in response_data
def test_valid_project_export_with_celery_enabled(client, settings):
settings.CELERY_ENABLED = True
user = f.UserFactory.create()
project = f.ProjectFactory.create(owner=user)
f.MembershipFactory(project=project, user=user, is_admin=True)
client.login(user)
url = reverse("exporter-detail", args=[project.pk])
#delete_project_dump task should have been launched
with mock.patch('taiga.export_import.tasks.delete_project_dump') as delete_project_dump_mock:
response = client.get(url, content_type="application/json")
assert response.status_code == 202
response_data = response.data
assert "export_id" in response_data
args = (project.id, project.slug, response_data["export_id"],)
kwargs = {"countdown": settings.EXPORTS_TTL}
delete_project_dump_mock.apply_async.assert_called_once_with(args, **kwargs)
def test_valid_project_with_throttling(client, settings):
settings.CELERY_ENABLED = False
settings.REST_FRAMEWORK["DEFAULT_THROTTLE_RATES"]["import-dump-mode"] = "1/minute"
user = f.UserFactory.create()
project = f.ProjectFactory.create(owner=user)
f.MembershipFactory(project=project, user=user, is_admin=True)
client.login(user)
url = reverse("exporter-detail", args=[project.pk])
response = client.get(url, content_type="application/json")
assert response.status_code == 200
response = client.get(url, content_type="application/json")
assert response.status_code == 429
| agpl-3.0 |
jazztpt/edx-platform | common/djangoapps/embargo/admin.py | 154 | 1315 | """
Django admin page for embargo models
"""
from django.contrib import admin
import textwrap
from config_models.admin import ConfigurationModelAdmin
from embargo.models import IPFilter, CountryAccessRule, RestrictedCourse
from embargo.forms import IPFilterForm, RestrictedCourseForm
class IPFilterAdmin(ConfigurationModelAdmin):
"""Admin for blacklisting/whitelisting specific IP addresses"""
form = IPFilterForm
fieldsets = (
(None, {
'fields': ('enabled', 'whitelist', 'blacklist'),
'description': textwrap.dedent("""Enter specific IP addresses to explicitly
whitelist (not block) or blacklist (block) in the appropriate box below.
Separate IP addresses with a comma. Do not surround with quotes.
""")
}),
)
class CountryAccessRuleInline(admin.StackedInline):
"""Inline editor for country access rules. """
model = CountryAccessRule
extra = 1
def has_delete_permission(self, request, obj=None):
return True
class RestrictedCourseAdmin(admin.ModelAdmin):
"""Admin for configuring course restrictions. """
inlines = [CountryAccessRuleInline]
form = RestrictedCourseForm
admin.site.register(IPFilter, IPFilterAdmin)
admin.site.register(RestrictedCourse, RestrictedCourseAdmin)
| agpl-3.0 |
tmerrick1/spack | var/spack/repos/builtin/packages/py-astroid/package.py | 5 | 2103 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PyAstroid(PythonPackage):
homepage = "https://www.astroid.org/"
url = "https://github.com/PyCQA/astroid/archive/astroid-1.4.5.tar.gz"
# version('1.5.3', '6f65e4ea8290ec032320460905afb828') # has broken unit tests
version('1.4.5', '7adfc55809908297ef430efe4ea20ac3')
version('1.4.4', '8ae6f63f6a2b260bb7f647dafccbc796')
version('1.4.3', '4647159de7d4d0c4b1de23ecbfb8e246')
version('1.4.2', '677f7965840f375af51b0e86403bee6a')
version('1.4.1', 'ed70bfed5e4b25be4292e7fe72da2c02')
depends_on('py-lazy-object-proxy')
depends_on('py-six')
depends_on('py-wrapt')
depends_on('py-enum34@1.1.3:', when='^python@:3.3.99')
depends_on('py-singledispatch', when='^python@:3.3.99')
depends_on('py-backports-functools-lru-cache', when='^python@:3.2.99')
depends_on('py-setuptools@17.1:')
| lgpl-2.1 |
arcean/telepathy-sunshine | debian/telepathy-sunshine/usr/lib/python2.6/dist-packages/sunshine/capabilities.py | 4 | 10907 | # telepathy-sunshine is the GaduGadu connection manager for Telepathy
#
# Copyright (C) 2009 Collabora Ltd.
# Copyright (C) 2010 Krzysztof Klinikowski <kkszysiu@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import logging
import dbus
import telepathy
try:
from telepathy.server import ConnectionInterfaceContactCapabilities
except:
from telepathy._generated.Connection_Interface_Contact_Capabilities \
import ConnectionInterfaceContactCapabilities
from sunshine.util.decorator import async
from sunshine.handle import SunshineHandleFactory
__all__ = ['SunshineCapabilities']
logger = logging.getLogger('Sunshine.Capabilities')
class SunshineCapabilities(telepathy.server.ConnectionInterfaceCapabilities,
ConnectionInterfaceContactCapabilities):
text_chat_class = \
({telepathy.CHANNEL_INTERFACE + '.ChannelType':
telepathy.CHANNEL_TYPE_TEXT,
telepathy.CHANNEL_INTERFACE + '.TargetHandleType':
dbus.UInt32(telepathy.HANDLE_TYPE_CONTACT)},
[telepathy.CHANNEL_INTERFACE + '.TargetHandle',
telepathy.CHANNEL_INTERFACE + '.TargetID'])
audio_chat_class = \
({telepathy.CHANNEL_INTERFACE + '.ChannelType':
telepathy.CHANNEL_TYPE_STREAMED_MEDIA,
telepathy.CHANNEL_INTERFACE + '.TargetHandleType':
dbus.UInt32(telepathy.HANDLE_TYPE_CONTACT)},
[telepathy.CHANNEL_INTERFACE + '.TargetHandle',
telepathy.CHANNEL_INTERFACE + '.TargetID',
telepathy.CHANNEL_TYPE_STREAMED_MEDIA + '.InitialAudio'])
av_chat_class = \
({telepathy.CHANNEL_INTERFACE + '.ChannelType':
telepathy.CHANNEL_TYPE_STREAMED_MEDIA,
telepathy.CHANNEL_INTERFACE + '.TargetHandleType':
dbus.UInt32(telepathy.HANDLE_TYPE_CONTACT)},
[telepathy.CHANNEL_INTERFACE + '.TargetHandle',
telepathy.CHANNEL_INTERFACE + '.TargetID',
telepathy.CHANNEL_TYPE_STREAMED_MEDIA + '.InitialAudio',
telepathy.CHANNEL_TYPE_STREAMED_MEDIA + '.InitialVideo'])
def __init__(self):
telepathy.server.ConnectionInterfaceCapabilities.__init__(self)
ConnectionInterfaceContactCapabilities.__init__(self)
# handle -> list(RCC)
self._contact_caps = {}
self._video_clients = []
self._update_capabilities_calls = []
def AdvertiseCapabilities(self, add, remove):
# for caps, specs in add:
# if caps == telepathy.CHANNEL_TYPE_STREAMED_MEDIA:
# if specs & telepathy.CHANNEL_MEDIA_CAPABILITY_VIDEO:
# self._self_handle.profile.client_id.has_webcam = True
# self._self_handle.profile.client_id.supports_rtc_video = True
# for caps in remove:
# if caps == telepathy.CHANNEL_TYPE_STREAMED_MEDIA:
# self._self_handle.profile.client_id.has_webcam = False
return telepathy.server.ConnectionInterfaceCapabilities.\
AdvertiseCapabilities(self, add, remove)
def GetContactCapabilities(self, handles):
if 0 in handles:
raise telepathy.InvalidHandle('Contact handle list contains zero')
ret = dbus.Dictionary({}, signature='ua(a{sv}as)')
for i in handles:
handle = self.handle(telepathy.HANDLE_TYPE_CONTACT, i)
# If the handle has no contact capabilities yet then it
# won't be in the dict. It's fair to return an empty list
# here for its contact caps.
if handle in self._contact_caps:
ret[handle] = dbus.Array(self._contact_caps[handle], signature='(a{sv}as)')
else:
ret[handle] = dbus.Array([], signature='(a{sv}as)')
return ret
def UpdateCapabilities(self, caps):
if self._status != telepathy.CONNECTION_STATUS_CONNECTED:
self._update_capabilities_calls.append(caps)
return
# voip is disabled, so
return
# We only care about voip.
for client, classes, capabilities in caps:
video = False
for channel_class in classes:
# Does this client support video?
if channel_class[telepathy.CHANNEL_INTERFACE + '.ChannelType'] == \
telepathy.CHANNEL_TYPE_STREAMED_MEDIA:
video = True
self._video_clients.append(client)
else:
# *Did* it used to support video?
if client in self._video_clients:
self._video_clients.remove(client)
changed = False
# We've got no more clients that support video; remove the cap.
if not video and not self._video_clients:
self._self_handle.profile.client_id.has_webcam = False
changed = True
# We want video.
if video and not self._self_handle.profile.client_id.has_webcam:
self._self_handle.profile.client_id.has_webcam = True
self._self_handle.profile.client_id.supports_rtc_video = True
changed = True
# Signal.
if changed:
updated = dbus.Dictionary({self._self_handle: self._contact_caps[self._self_handle]},
signature='ua(a{sv}as)')
self.ContactCapabilitiesChanged(updated)
def on_contact_client_capabilities_changed(self, contact):
self._update_capabilities(contact)
def contactAdded(self, handle):
"""When we add a contact in our contact list, add the
capabilities to create text channel to the contact"""
self.add_text_capabilities([handle])
def add_text_capabilities(self, contacts_handles):
"""Add the create capability for text channel to these contacts."""
ret = []
cc_ret = dbus.Dictionary({}, signature='ua(a{sv}as)')
for handle in contacts_handles:
ctype = telepathy.CHANNEL_TYPE_TEXT
if handle in self._caps and ctype in self._caps[handle]:
old_gen, old_spec = self._caps[handle][ctype]
else:
old_gen = 0
old_spec = 0
new_gen = old_gen
new_gen |= telepathy.CONNECTION_CAPABILITY_FLAG_CREATE
diff = (int(handle), ctype, old_gen, new_gen, old_spec, old_spec)
ret.append(diff)
# ContactCapabilities
self._contact_caps.setdefault(handle, []).append(self.text_chat_class)
cc_ret[handle] = self._contact_caps[handle]
self.CapabilitiesChanged(ret)
self.ContactCapabilitiesChanged(cc_ret)
# def add_create_capability(self, contact_handle):
# """Add the create capability for self handle."""
# ret = []
# cc_ret = dbus.Dictionary({}, signature='ua(a{sv}as)')
#
# ctype = telepathy.CHANNEL_TYPE_TEXT
# if handle in self._caps:
# old_gen, old_spec = self._caps[handle][ctype]
# else:
# old_gen = 0
# old_spec = 0
# new_gen = old_gen
# new_gen |= telepathy.CONNECTION_CAPABILITY_FLAG_CREATE
#
# diff = (int(handle), ctype, old_gen, new_gen, old_spec, old_spec)
# ret.append(diff)
#
# # ContactCapabilities
# self._contact_caps.setdefault(handle, []).append(self.text_chat_class)
# cc_ret[handle] = self._contact_caps[handle]
#
# self.CapabilitiesChanged(ret)
# self.ContactCapabilitiesChanged(cc_ret)
def _update_capabilities(self, contact):
handle = SunshineHandleFactory(self, 'contact',
contact.account, contact.network_id)
ctype = telepathy.CHANNEL_TYPE_STREAMED_MEDIA
new_gen, new_spec, rcc = self._get_capabilities(contact)
if handle in self._caps:
old_gen, old_spec = self._caps[handle][ctype]
else:
old_gen = 0
old_spec = 0
if old_gen != new_gen or old_spec != new_spec:
diff = (int(handle), ctype, old_gen, new_gen, old_spec, new_spec)
self.CapabilitiesChanged([diff])
if rcc is None:
return
self._contact_caps.setdefault(handle, [])
if rcc in self._contact_caps[handle]:
return
if self.audio_chat_class in self._contact_caps[handle]:
self._contact_caps[handle].remove(self.audio_chat_class)
if self.audio_chat_class in self._contact_caps[handle]:
self._contact_caps[handle].remove(self.audio_chat_class)
self._contact_caps[handle].append(rcc)
ret = dbus.Dictionary({handle: self._contact_caps[handle]},
signature='ua(a{sv}as)')
self.ContactCapabilitiesChanged(ret)
def _get_capabilities(self, contact):
gen_caps = 0
spec_caps = 0
rcc = None
caps = contact.client_capabilities
#if caps.supports_sip_invite:
#gen_caps |= telepathy.CONNECTION_CAPABILITY_FLAG_CREATE
#gen_caps |= telepathy.CONNECTION_CAPABILITY_FLAG_INVITE
#spec_caps |= telepathy.CHANNEL_MEDIA_CAPABILITY_AUDIO
#spec_caps |= telepathy.CHANNEL_MEDIA_CAPABILITY_NAT_TRAVERSAL_STUN
#if caps.has_webcam:
#spec_caps |= telepathy.CHANNEL_MEDIA_CAPABILITY_VIDEO
#rcc = self.av_chat_class
#else:
#rcc = self.audio_chat_class
return gen_caps, spec_caps, rcc
@async
def _populate_capabilities(self):
""" Add the capability to create text channels to all contacts in our
contacts list."""
handles = set([self._self_handle])
for contact in self.profile.contacts:
handle = SunshineHandleFactory(self, 'contact',
str(contact.uin), None)
handles.add(handle)
self.add_text_capabilities(handles)
# These caps were updated before we were online.
for caps in self._update_capabilities_calls:
self.UpdateCapabilities(caps)
self._update_capabilities_calls = []
def updateCapabilitiesCalls(self):
# These caps were updated before we were online.
for caps in self._update_capabilities_calls:
self.UpdateCapabilities(caps)
| gpl-3.0 |
nirmeshk/oh-mainline | vendor/packages/requests/requests/packages/chardet/latin1prober.py | 1778 | 5232 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .charsetprober import CharSetProber
from .constants import eNotMe
from .compat import wrap_ord
FREQ_CAT_NUM = 4
UDF = 0 # undefined
OTH = 1 # other
ASC = 2 # ascii capital letter
ASS = 3 # ascii small letter
ACV = 4 # accent capital vowel
ACO = 5 # accent capital other
ASV = 6 # accent small vowel
ASO = 7 # accent small other
CLASS_NUM = 8 # total classes
Latin1_CharToClass = (
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 00 - 07
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 08 - 0F
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 10 - 17
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 18 - 1F
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 20 - 27
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 28 - 2F
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 30 - 37
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 38 - 3F
OTH, ASC, ASC, ASC, ASC, ASC, ASC, ASC, # 40 - 47
ASC, ASC, ASC, ASC, ASC, ASC, ASC, ASC, # 48 - 4F
ASC, ASC, ASC, ASC, ASC, ASC, ASC, ASC, # 50 - 57
ASC, ASC, ASC, OTH, OTH, OTH, OTH, OTH, # 58 - 5F
OTH, ASS, ASS, ASS, ASS, ASS, ASS, ASS, # 60 - 67
ASS, ASS, ASS, ASS, ASS, ASS, ASS, ASS, # 68 - 6F
ASS, ASS, ASS, ASS, ASS, ASS, ASS, ASS, # 70 - 77
ASS, ASS, ASS, OTH, OTH, OTH, OTH, OTH, # 78 - 7F
OTH, UDF, OTH, ASO, OTH, OTH, OTH, OTH, # 80 - 87
OTH, OTH, ACO, OTH, ACO, UDF, ACO, UDF, # 88 - 8F
UDF, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 90 - 97
OTH, OTH, ASO, OTH, ASO, UDF, ASO, ACO, # 98 - 9F
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # A0 - A7
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # A8 - AF
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # B0 - B7
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # B8 - BF
ACV, ACV, ACV, ACV, ACV, ACV, ACO, ACO, # C0 - C7
ACV, ACV, ACV, ACV, ACV, ACV, ACV, ACV, # C8 - CF
ACO, ACO, ACV, ACV, ACV, ACV, ACV, OTH, # D0 - D7
ACV, ACV, ACV, ACV, ACV, ACO, ACO, ACO, # D8 - DF
ASV, ASV, ASV, ASV, ASV, ASV, ASO, ASO, # E0 - E7
ASV, ASV, ASV, ASV, ASV, ASV, ASV, ASV, # E8 - EF
ASO, ASO, ASV, ASV, ASV, ASV, ASV, OTH, # F0 - F7
ASV, ASV, ASV, ASV, ASV, ASO, ASO, ASO, # F8 - FF
)
# 0 : illegal
# 1 : very unlikely
# 2 : normal
# 3 : very likely
Latin1ClassModel = (
# UDF OTH ASC ASS ACV ACO ASV ASO
0, 0, 0, 0, 0, 0, 0, 0, # UDF
0, 3, 3, 3, 3, 3, 3, 3, # OTH
0, 3, 3, 3, 3, 3, 3, 3, # ASC
0, 3, 3, 3, 1, 1, 3, 3, # ASS
0, 3, 3, 3, 1, 2, 1, 2, # ACV
0, 3, 3, 3, 3, 3, 3, 3, # ACO
0, 3, 1, 3, 1, 1, 1, 3, # ASV
0, 3, 1, 3, 1, 1, 3, 3, # ASO
)
class Latin1Prober(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self.reset()
def reset(self):
self._mLastCharClass = OTH
self._mFreqCounter = [0] * FREQ_CAT_NUM
CharSetProber.reset(self)
def get_charset_name(self):
return "windows-1252"
def feed(self, aBuf):
aBuf = self.filter_with_english_letters(aBuf)
for c in aBuf:
charClass = Latin1_CharToClass[wrap_ord(c)]
freq = Latin1ClassModel[(self._mLastCharClass * CLASS_NUM)
+ charClass]
if freq == 0:
self._mState = eNotMe
break
self._mFreqCounter[freq] += 1
self._mLastCharClass = charClass
return self.get_state()
def get_confidence(self):
if self.get_state() == eNotMe:
return 0.01
total = sum(self._mFreqCounter)
if total < 0.01:
confidence = 0.0
else:
confidence = ((self._mFreqCounter[3] - self._mFreqCounter[1] * 20.0)
/ total)
if confidence < 0.0:
confidence = 0.0
# lower the confidence of latin1 so that other more accurate
# detector can take priority.
confidence = confidence * 0.73
return confidence
| agpl-3.0 |
sekikn/incubator-airflow | kubernetes_tests/test_kubernetes_pod_operator_backcompat.py | 3 | 26065 | # pylint: disable=unused-argument
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
import sys
import unittest
from unittest import mock
from unittest.mock import patch
import kubernetes.client.models as k8s
import pendulum
from kubernetes.client.api_client import ApiClient
from kubernetes.client.rest import ApiException
from airflow.exceptions import AirflowException
from airflow.kubernetes import kube_client
from airflow.kubernetes.pod import Port
from airflow.kubernetes.pod_generator import PodDefaults
from airflow.kubernetes.pod_launcher import PodLauncher
from airflow.kubernetes.pod_runtime_info_env import PodRuntimeInfoEnv
from airflow.kubernetes.secret import Secret
from airflow.kubernetes.volume import Volume
from airflow.kubernetes.volume_mount import VolumeMount
from airflow.models import DAG, TaskInstance
from airflow.providers.cncf.kubernetes.operators.kubernetes_pod import KubernetesPodOperator
from airflow.utils import timezone
from airflow.utils.state import State
from airflow.version import version as airflow_version
# noinspection DuplicatedCode
def create_context(task):
dag = DAG(dag_id="dag")
tzinfo = pendulum.timezone("Europe/Amsterdam")
execution_date = timezone.datetime(2016, 1, 1, 1, 0, 0, tzinfo=tzinfo)
task_instance = TaskInstance(task=task, execution_date=execution_date)
return {
"dag": dag,
"ts": execution_date.isoformat(),
"task": task,
"ti": task_instance,
}
# noinspection DuplicatedCode,PyUnusedLocal
class TestKubernetesPodOperatorSystem(unittest.TestCase):
def get_current_task_name(self):
# reverse test name to make pod name unique (it has limited length)
return "_" + unittest.TestCase.id(self).replace(".", "_")[::-1]
def setUp(self):
self.maxDiff = None # pylint: disable=invalid-name
self.api_client = ApiClient()
self.expected_pod = {
'apiVersion': 'v1',
'kind': 'Pod',
'metadata': {
'namespace': 'default',
'name': mock.ANY,
'annotations': {},
'labels': {
'foo': 'bar',
'kubernetes_pod_operator': 'True',
'airflow_version': airflow_version.replace('+', '-'),
'execution_date': '2016-01-01T0100000100-a2f50a31f',
'dag_id': 'dag',
'task_id': 'task',
'try_number': '1',
},
},
'spec': {
'affinity': {},
'containers': [
{
'image': 'ubuntu:16.04',
'imagePullPolicy': 'IfNotPresent',
'args': ["echo 10"],
'command': ["bash", "-cx"],
'env': [],
'envFrom': [],
'resources': {},
'name': 'base',
'ports': [],
'volumeMounts': [],
}
],
'hostNetwork': False,
'imagePullSecrets': [],
'initContainers': [],
'restartPolicy': 'Never',
'securityContext': {},
'serviceAccountName': 'default',
'tolerations': [],
'volumes': [],
},
}
def tearDown(self):
client = kube_client.get_kube_client(in_cluster=False)
client.delete_collection_namespaced_pod(namespace="default")
def create_context(self, task):
dag = DAG(dag_id="dag")
tzinfo = pendulum.timezone("Europe/Amsterdam")
execution_date = timezone.datetime(2016, 1, 1, 1, 0, 0, tzinfo=tzinfo)
task_instance = TaskInstance(task=task, execution_date=execution_date)
return {
"dag": dag,
"ts": execution_date.isoformat(),
"task": task,
"ti": task_instance,
}
@mock.patch("airflow.kubernetes.pod_launcher.PodLauncher.start_pod")
@mock.patch("airflow.kubernetes.pod_launcher.PodLauncher.monitor_pod")
@mock.patch("airflow.kubernetes.kube_client.get_kube_client")
def test_image_pull_secrets_correctly_set(self, mock_client, monitor_mock, start_mock):
fake_pull_secrets = "fakeSecret"
k = KubernetesPodOperator(
namespace='default',
image="ubuntu:16.04",
cmds=["bash", "-cx"],
arguments=["echo 10"],
labels={"foo": "bar"},
name="test",
task_id="task",
in_cluster=False,
do_xcom_push=False,
image_pull_secrets=fake_pull_secrets,
cluster_context='default',
)
monitor_mock.return_value = (State.SUCCESS, None)
context = self.create_context(k)
k.execute(context=context)
self.assertEqual(
start_mock.call_args[0][0].spec.image_pull_secrets,
[k8s.V1LocalObjectReference(name=fake_pull_secrets)],
)
@mock.patch("airflow.kubernetes.pod_launcher.PodLauncher.start_pod")
@mock.patch("airflow.kubernetes.pod_launcher.PodLauncher.monitor_pod")
@mock.patch("airflow.kubernetes.pod_launcher.PodLauncher.delete_pod")
@mock.patch("airflow.kubernetes.kube_client.get_kube_client")
def test_pod_delete_even_on_launcher_error(
self, mock_client, delete_pod_mock, monitor_pod_mock, start_pod_mock
): # pylint: disable=unused-argument
k = KubernetesPodOperator(
namespace='default',
image="ubuntu:16.04",
cmds=["bash", "-cx"],
arguments=["echo 10"],
labels={"foo": "bar"},
name="test",
task_id="task",
in_cluster=False,
do_xcom_push=False,
cluster_context='default',
is_delete_operator_pod=True,
)
monitor_pod_mock.side_effect = AirflowException('fake failure')
with self.assertRaises(AirflowException):
context = self.create_context(k)
k.execute(context=context)
assert delete_pod_mock.called
def test_working_pod(self):
k = KubernetesPodOperator(
namespace='default',
image="ubuntu:16.04",
cmds=["bash", "-cx"],
arguments=["echo 10"],
labels={"foo": "bar"},
name="test",
task_id="task",
in_cluster=False,
do_xcom_push=False,
)
context = create_context(k)
k.execute(context)
actual_pod = self.api_client.sanitize_for_serialization(k.pod)
self.assertEqual(self.expected_pod['spec'], actual_pod['spec'])
self.assertEqual(self.expected_pod['metadata']['labels'], actual_pod['metadata']['labels'])
def test_pod_node_selectors(self):
node_selectors = {'beta.kubernetes.io/os': 'linux'}
k = KubernetesPodOperator(
namespace='default',
image="ubuntu:16.04",
cmds=["bash", "-cx"],
arguments=["echo 10"],
labels={"foo": "bar"},
name="test",
task_id="task",
in_cluster=False,
do_xcom_push=False,
node_selectors=node_selectors,
)
context = create_context(k)
k.execute(context)
actual_pod = self.api_client.sanitize_for_serialization(k.pod)
self.expected_pod['spec']['nodeSelector'] = node_selectors
self.assertEqual(self.expected_pod, actual_pod)
def test_pod_resources(self):
resources = {
'limit_cpu': 0.25,
'limit_memory': '64Mi',
'limit_ephemeral_storage': '2Gi',
'request_cpu': '250m',
'request_memory': '64Mi',
'request_ephemeral_storage': '1Gi',
}
k = KubernetesPodOperator(
namespace='default',
image="ubuntu:16.04",
cmds=["bash", "-cx"],
arguments=["echo 10"],
labels={"foo": "bar"},
name="test",
task_id="task",
in_cluster=False,
do_xcom_push=False,
resources=resources,
)
context = self.create_context(k)
k.execute(context)
actual_pod = self.api_client.sanitize_for_serialization(k.pod)
self.expected_pod['spec']['containers'][0]['resources'] = {
'requests': {'memory': '64Mi', 'cpu': '250m', 'ephemeral-storage': '1Gi'},
'limits': {'memory': '64Mi', 'cpu': 0.25, 'ephemeral-storage': '2Gi'},
}
self.assertEqual(self.expected_pod, actual_pod)
def test_pod_affinity(self):
affinity = {
'nodeAffinity': {
'requiredDuringSchedulingIgnoredDuringExecution': {
'nodeSelectorTerms': [
{
'matchExpressions': [
{'key': 'beta.kubernetes.io/os', 'operator': 'In', 'values': ['linux']}
]
}
]
}
}
}
k = KubernetesPodOperator(
namespace='default',
image="ubuntu:16.04",
cmds=["bash", "-cx"],
arguments=["echo 10"],
labels={"foo": "bar"},
name="test",
task_id="task",
in_cluster=False,
do_xcom_push=False,
affinity=affinity,
)
context = create_context(k)
k.execute(context=context)
actual_pod = self.api_client.sanitize_for_serialization(k.pod)
self.expected_pod['spec']['affinity'] = affinity
self.assertEqual(self.expected_pod, actual_pod)
def test_port(self):
port = Port('http', 80)
k = KubernetesPodOperator(
namespace='default',
image="ubuntu:16.04",
cmds=["bash", "-cx"],
arguments=["echo 10"],
labels={"foo": "bar"},
name="test",
task_id="task",
in_cluster=False,
do_xcom_push=False,
ports=[port],
)
context = self.create_context(k)
k.execute(context=context)
actual_pod = self.api_client.sanitize_for_serialization(k.pod)
self.expected_pod['spec']['containers'][0]['ports'] = [{'name': 'http', 'containerPort': 80}]
self.assertEqual(self.expected_pod, actual_pod)
def test_volume_mount(self):
with patch.object(PodLauncher, 'log') as mock_logger:
volume_mount = VolumeMount(
'test-volume', mount_path='/tmp/test_volume', sub_path=None, read_only=False
)
volume_config = {'persistentVolumeClaim': {'claimName': 'test-volume'}}
volume = Volume(name='test-volume', configs=volume_config)
args = [
"echo \"retrieved from mount\" > /tmp/test_volume/test.txt "
"&& cat /tmp/test_volume/test.txt"
]
k = KubernetesPodOperator(
namespace='default',
image="ubuntu:16.04",
cmds=["bash", "-cx"],
arguments=args,
labels={"foo": "bar"},
volume_mounts=[volume_mount],
volumes=[volume],
is_delete_operator_pod=False,
name="test",
task_id="task",
in_cluster=False,
do_xcom_push=False,
)
context = create_context(k)
k.execute(context=context)
mock_logger.info.assert_any_call('retrieved from mount')
actual_pod = self.api_client.sanitize_for_serialization(k.pod)
self.expected_pod['spec']['containers'][0]['args'] = args
self.expected_pod['spec']['containers'][0]['volumeMounts'] = [
{'name': 'test-volume', 'mountPath': '/tmp/test_volume', 'readOnly': False}
]
self.expected_pod['spec']['volumes'] = [
{'name': 'test-volume', 'persistentVolumeClaim': {'claimName': 'test-volume'}}
]
self.assertEqual(self.expected_pod, actual_pod)
def test_run_as_user_root(self):
security_context = {
'securityContext': {
'runAsUser': 0,
}
}
k = KubernetesPodOperator(
namespace='default',
image="ubuntu:16.04",
cmds=["bash", "-cx"],
arguments=["echo 10"],
labels={"foo": "bar"},
name="test",
task_id="task",
in_cluster=False,
do_xcom_push=False,
security_context=security_context,
)
context = create_context(k)
k.execute(context)
actual_pod = self.api_client.sanitize_for_serialization(k.pod)
self.expected_pod['spec']['securityContext'] = security_context
self.assertEqual(self.expected_pod, actual_pod)
def test_run_as_user_non_root(self):
security_context = {
'securityContext': {
'runAsUser': 1000,
}
}
k = KubernetesPodOperator(
namespace='default',
image="ubuntu:16.04",
cmds=["bash", "-cx"],
arguments=["echo 10"],
labels={"foo": "bar"},
name="test",
task_id="task",
in_cluster=False,
do_xcom_push=False,
security_context=security_context,
)
context = create_context(k)
k.execute(context)
actual_pod = self.api_client.sanitize_for_serialization(k.pod)
self.expected_pod['spec']['securityContext'] = security_context
self.assertEqual(self.expected_pod, actual_pod)
def test_fs_group(self):
security_context = {
'securityContext': {
'fsGroup': 1000,
}
}
k = KubernetesPodOperator(
namespace='default',
image="ubuntu:16.04",
cmds=["bash", "-cx"],
arguments=["echo 10"],
labels={"foo": "bar"},
name="test",
task_id="task",
in_cluster=False,
do_xcom_push=False,
security_context=security_context,
)
context = create_context(k)
k.execute(context)
actual_pod = self.api_client.sanitize_for_serialization(k.pod)
self.expected_pod['spec']['securityContext'] = security_context
self.assertEqual(self.expected_pod, actual_pod)
def test_faulty_service_account(self):
bad_service_account_name = "foobar"
k = KubernetesPodOperator(
namespace='default',
image="ubuntu:16.04",
cmds=["bash", "-cx"],
arguments=["echo 10"],
labels={"foo": "bar"},
name="test",
task_id="task",
in_cluster=False,
do_xcom_push=False,
startup_timeout_seconds=5,
service_account_name=bad_service_account_name,
)
with self.assertRaises(ApiException):
context = create_context(k)
k.execute(context)
actual_pod = self.api_client.sanitize_for_serialization(k.pod)
self.expected_pod['spec']['serviceAccountName'] = bad_service_account_name
self.assertEqual(self.expected_pod, actual_pod)
def test_pod_failure(self):
"""
Tests that the task fails when a pod reports a failure
"""
bad_internal_command = ["foobar 10 "]
k = KubernetesPodOperator(
namespace='default',
image="ubuntu:16.04",
cmds=["bash", "-cx"],
arguments=bad_internal_command,
labels={"foo": "bar"},
name="test",
task_id="task",
in_cluster=False,
do_xcom_push=False,
)
with self.assertRaises(AirflowException):
context = create_context(k)
k.execute(context)
actual_pod = self.api_client.sanitize_for_serialization(k.pod)
self.expected_pod['spec']['containers'][0]['args'] = bad_internal_command
self.assertEqual(self.expected_pod, actual_pod)
def test_xcom_push(self):
return_value = '{"foo": "bar"\n, "buzz": 2}'
args = [f'echo \'{return_value}\' > /airflow/xcom/return.json']
k = KubernetesPodOperator(
namespace='default',
image="ubuntu:16.04",
cmds=["bash", "-cx"],
arguments=args,
labels={"foo": "bar"},
name="test",
task_id="task",
in_cluster=False,
do_xcom_push=True,
)
context = create_context(k)
self.assertEqual(k.execute(context), json.loads(return_value))
actual_pod = self.api_client.sanitize_for_serialization(k.pod)
volume = self.api_client.sanitize_for_serialization(PodDefaults.VOLUME)
volume_mount = self.api_client.sanitize_for_serialization(PodDefaults.VOLUME_MOUNT)
container = self.api_client.sanitize_for_serialization(PodDefaults.SIDECAR_CONTAINER)
self.expected_pod['spec']['containers'][0]['args'] = args
self.expected_pod['spec']['containers'][0]['volumeMounts'].insert(0, volume_mount) # noqa
self.expected_pod['spec']['volumes'].insert(0, volume)
self.expected_pod['spec']['containers'].append(container)
self.assertEqual(self.expected_pod, actual_pod)
@mock.patch("airflow.kubernetes.pod_launcher.PodLauncher.start_pod")
@mock.patch("airflow.kubernetes.pod_launcher.PodLauncher.monitor_pod")
@mock.patch("airflow.kubernetes.kube_client.get_kube_client")
def test_envs_from_configmaps(self, mock_client, mock_monitor, mock_start):
# GIVEN
configmap = 'test-configmap'
# WHEN
k = KubernetesPodOperator(
namespace='default',
image="ubuntu:16.04",
cmds=["bash", "-cx"],
arguments=["echo 10"],
labels={"foo": "bar"},
name="test",
task_id="task",
in_cluster=False,
do_xcom_push=False,
configmaps=[configmap],
)
# THEN
mock_monitor.return_value = (State.SUCCESS, None)
context = self.create_context(k)
k.execute(context)
self.assertEqual(
mock_start.call_args[0][0].spec.containers[0].env_from,
[k8s.V1EnvFromSource(config_map_ref=k8s.V1ConfigMapEnvSource(name=configmap))],
)
@mock.patch("airflow.kubernetes.pod_launcher.PodLauncher.start_pod")
@mock.patch("airflow.kubernetes.pod_launcher.PodLauncher.monitor_pod")
@mock.patch("airflow.kubernetes.kube_client.get_kube_client")
def test_envs_from_secrets(self, mock_client, monitor_mock, start_mock):
# GIVEN
secret_ref = 'secret_name'
secrets = [Secret('env', None, secret_ref)]
# WHEN
k = KubernetesPodOperator(
namespace='default',
image="ubuntu:16.04",
cmds=["bash", "-cx"],
arguments=["echo 10"],
secrets=secrets,
labels={"foo": "bar"},
name="test",
task_id="task",
in_cluster=False,
do_xcom_push=False,
)
# THEN
monitor_mock.return_value = (State.SUCCESS, None)
context = self.create_context(k)
k.execute(context)
self.assertEqual(
start_mock.call_args[0][0].spec.containers[0].env_from,
[k8s.V1EnvFromSource(secret_ref=k8s.V1SecretEnvSource(name=secret_ref))],
)
def test_env_vars(self):
# WHEN
k = KubernetesPodOperator(
namespace='default',
image="ubuntu:16.04",
cmds=["bash", "-cx"],
arguments=["echo 10"],
env_vars={
"ENV1": "val1",
"ENV2": "val2",
},
pod_runtime_info_envs=[PodRuntimeInfoEnv("ENV3", "status.podIP")],
labels={"foo": "bar"},
name="test",
task_id="task",
in_cluster=False,
do_xcom_push=False,
)
context = create_context(k)
k.execute(context)
# THEN
actual_pod = self.api_client.sanitize_for_serialization(k.pod)
self.expected_pod['spec']['containers'][0]['env'] = [
{'name': 'ENV1', 'value': 'val1'},
{'name': 'ENV2', 'value': 'val2'},
{'name': 'ENV3', 'valueFrom': {'fieldRef': {'fieldPath': 'status.podIP'}}},
]
self.assertEqual(self.expected_pod, actual_pod)
def test_pod_template_file_with_overrides_system(self):
fixture = sys.path[0] + '/tests/kubernetes/basic_pod.yaml'
k = KubernetesPodOperator(
task_id="task" + self.get_current_task_name(),
labels={"foo": "bar", "fizz": "buzz"},
env_vars={"env_name": "value"},
in_cluster=False,
pod_template_file=fixture,
do_xcom_push=True,
)
context = create_context(k)
result = k.execute(context)
self.assertIsNotNone(result)
self.assertEqual(k.pod.metadata.labels, {'fizz': 'buzz', 'foo': 'bar'})
self.assertEqual(k.pod.spec.containers[0].env, [k8s.V1EnvVar(name="env_name", value="value")])
self.assertDictEqual(result, {"hello": "world"})
def test_init_container(self):
# GIVEN
volume_mounts = [
k8s.V1VolumeMount(mount_path='/etc/foo', name='test-volume', sub_path=None, read_only=True)
]
init_environments = [
k8s.V1EnvVar(name='key1', value='value1'),
k8s.V1EnvVar(name='key2', value='value2'),
]
init_container = k8s.V1Container(
name="init-container",
image="ubuntu:16.04",
env=init_environments,
volume_mounts=volume_mounts,
command=["bash", "-cx"],
args=["echo 10"],
)
volume_config = {'persistentVolumeClaim': {'claimName': 'test-volume'}}
volume = Volume(name='test-volume', configs=volume_config)
expected_init_container = {
'name': 'init-container',
'image': 'ubuntu:16.04',
'command': ['bash', '-cx'],
'args': ['echo 10'],
'env': [{'name': 'key1', 'value': 'value1'}, {'name': 'key2', 'value': 'value2'}],
'volumeMounts': [{'mountPath': '/etc/foo', 'name': 'test-volume', 'readOnly': True}],
}
k = KubernetesPodOperator(
namespace='default',
image="ubuntu:16.04",
cmds=["bash", "-cx"],
arguments=["echo 10"],
labels={"foo": "bar"},
name="test",
task_id="task",
volumes=[volume],
init_containers=[init_container],
in_cluster=False,
do_xcom_push=False,
)
context = create_context(k)
k.execute(context)
actual_pod = self.api_client.sanitize_for_serialization(k.pod)
self.expected_pod['spec']['initContainers'] = [expected_init_container]
self.expected_pod['spec']['volumes'] = [
{'name': 'test-volume', 'persistentVolumeClaim': {'claimName': 'test-volume'}}
]
self.assertEqual(self.expected_pod, actual_pod)
@mock.patch("airflow.kubernetes.pod_launcher.PodLauncher.start_pod")
@mock.patch("airflow.kubernetes.pod_launcher.PodLauncher.monitor_pod")
@mock.patch("airflow.kubernetes.kube_client.get_kube_client")
def test_pod_priority_class_name(
self, mock_client, monitor_mock, start_mock
): # pylint: disable=unused-argument
"""Test ability to assign priorityClassName to pod"""
priority_class_name = "medium-test"
k = KubernetesPodOperator(
namespace='default',
image="ubuntu:16.04",
cmds=["bash", "-cx"],
arguments=["echo 10"],
labels={"foo": "bar"},
name="test",
task_id="task",
in_cluster=False,
do_xcom_push=False,
priority_class_name=priority_class_name,
)
monitor_mock.return_value = (State.SUCCESS, None)
context = self.create_context(k)
k.execute(context)
actual_pod = self.api_client.sanitize_for_serialization(k.pod)
self.expected_pod['spec']['priorityClassName'] = priority_class_name
self.assertEqual(self.expected_pod, actual_pod)
def test_pod_name(self):
pod_name_too_long = "a" * 221
with self.assertRaises(AirflowException):
KubernetesPodOperator(
namespace='default',
image="ubuntu:16.04",
cmds=["bash", "-cx"],
arguments=["echo 10"],
labels={"foo": "bar"},
name=pod_name_too_long,
task_id="task",
in_cluster=False,
do_xcom_push=False,
)
# pylint: enable=unused-argument
| apache-2.0 |
Krakn/learning | src/python/advent_of_code/2017/05/a_maze_of_twisty_trampolines_all_alike.py | 1 | 3322 | #!/usr/bin/env python3
'''
--- Day 5: A Maze of Twisty Trampolines, All Alike ---
'''
def load_input(filename):
'''
Parse input file, returning an array of maze offsets.
'''
maze = list()
with open(filename, 'r') as file_input:
for line in file_input.readlines():
maze.append(int(line.strip()))
return maze
def part1(maze):
'''
--- Part 1 ---
An urgent interrupt arrives from the CPU: it's trapped in a maze of jump
instructions, and it would like assistance from any programs with spare
cycles to help find the exit.
The message includes a list of the offsets for each jump. Jumps are
relative: -1 moves to the previous instruction, and 2 skips the next one.
Start at the first instruction in the list. The goal is to follow the jumps
until one leads outside the list.
In addition, these instructions are a little strange; after each jump, the
offset of that instruction increases by 1. So, if you come across an offset
of 3, you would move three instructions forward, but change it to a 4 for
the next time it is encountered.
For example, consider the following list of jump offsets:
0 3 0 1 -3 Positive jumps ("forward") move downward; negative jumps move
upward. For legibility in this example, these offset values
will be written all on one line, with the current instruction
marked in parentheses. The following steps would be taken
before an exit is found:
(0) 3 0 1 -3 - Before we have taken any steps.
(1) 3 0 1 -3 - Jump with offset 0 (that is, don't jump at all).
Fortunately, the instruction is then incremented
to 1.
2 (3) 0 1 -3 - Step forward because of the instruction we just modified.
The first instruction is incremented again, now to 2.
2 4 0 1 (-3) - Jump all the way to the end; leave a 4 behind.
2 (4) 0 1 -2 - Go back to where we just were; increment -3 to -2.
2 5 0 1 -2 - Jump 4 steps forward, escaping the maze. In this
example, the exit is reached in 5 steps.
How many steps does it take to reach the exit?
'''
index = 0
steps = 0
while index >= 0 and index < len(maze):
maze[index] += 1
index = index + maze[index] - 1
steps += 1
return steps
def part2(maze):
'''
--- Part Two ---
Now, the jumps are even stranger: after each jump, if the offset was three
or more, instead decrease it by 1. Otherwise, increase it by 1 as before.
Using this rule with the above example, the process now takes 10 steps,
and the offset values after finding the exit are left as 2 3 2 3 -1.
How many steps does it now take to reach the exit?
'''
index = 0
steps = 0
while index >= 0 and index < len(maze):
if maze[index] >= 3:
maze[index] -= 1
index = index + maze[index] + 1
else:
maze[index] += 1
index = index + maze[index] - 1
steps += 1
return steps
if __name__ == "__main__":
MAZE1 = load_input('input.txt')
MAZE2 = load_input('input.txt')
print("Part 1:", part1(MAZE1))
print("Part 2:", part2(MAZE2))
| isc |
winterbird-code/adbb | adbb/__init__.py | 1 | 2124 | #!/usr/bin/env python
#
# This file is part of adbb.
#
# adbb is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# adbb is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with adbb. If not, see <http://www.gnu.org/licenses/>.
import multiprocessing
import logging
import logging.handlers
import sys
import adbb.db
from adbb.link import AniDBLink
from adbb.animeobjs import Anime, AnimeTitle, Episode, File
from adbb.anames import get_titles
anidb_client_name = "adbb"
anidb_client_version = 2
anidb_api_version = 3
log = None
_anidb = None
_sessionmaker = None
def init(
anidb_user,
anidb_pwd,
sql_db_url,
debug=False,
loglevel='info',
logger=None,
outgoing_udp_port=9876):
if logger is None:
logger = logging.getLogger(__name__)
logger.setLevel(loglevel.upper())
if debug:
logger.setLevel(logging.DEBUG)
lh = logging.StreamHandler()
lh.setFormatter(logging.Formatter(
'%(asctime)s %(levelname)s %(filename)s:%(lineno)d - %(message)s'))
logger.addHandler(lh)
lh = logging.handlers.SysLogHandler(address='/dev/log')
lh.setFormatter(logging.Formatter(
'adbb %(filename)s/%(funcName)s:%(lineno)d - %(message)s'))
logger.addHandler(lh)
global log, _anidb, _sessionmaker
log = logger
_sessionmaker = adbb.db.init_db(sql_db_url)
_anidb = adbb.link.AniDBLink(
anidb_user,
anidb_pwd,
myport=outgoing_udp_port)
def get_session():
return _sessionmaker()
def close_session(session):
session.close()
def close():
global _anidb
_anidb.stop()
| gpl-3.0 |
ifxit/nidhogg | tests/test_get_best_volume.py | 1 | 9629 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import pytest
from nidhogg import get_best_volume_by_quota, get_best_volume_by_size
from nidhogg.core import NidhoggException
from nidhogg.compatible import Volume, VolumeWithQuotaRatio
def check_volume(volume, size):
"""Helper function that is applied to check if the volume is suitable."""
size *= 1048576 # convert to byte
size *= 1.2 # add buffer to the given size
max_file_count = 32000000
quota_ratio_threshold = 1.2
# checks
check_1 = bool(volume["size_available"] >= size)
check_2 = bool(volume["files_used"] < max_file_count)
check_3 = bool(volume["quota_ratio"] < quota_ratio_threshold)
return check_1 and check_2 and check_3
def test_best_project_home_1():
volumes = [
VolumeWithQuotaRatio(**{'size_used': 116086018048.0, 'filer': u'filer07.example.com', 'files_total': 31876689.0, 'size_total': 193273528320.0, 'size_available': 77187489792.0, 'quota_size': 216895848448.0, 'state': u'online', 'quota_ratio': 1.1222222222222222, 'snapable': True, 'files_used': 1049599.0, 'name': u'proj000'}),
VolumeWithQuotaRatio(**{'size_used': 768038326272.0, 'filer': u'filer07.example.com', 'files_total': 31876689.0, 'size_total': 1428076625920.0, 'size_available': 660038287360.0, 'quota_size': 1526860873728.0, 'state': u'online', 'quota_ratio': 1.069172932330827, 'snapable': True, 'files_used': 6377127.0, 'name': u'proj109'}),
VolumeWithQuotaRatio(**{'size_used': 168616095744.0, 'filer': u'filer07.example.com', 'files_total': 31876689.0, 'size_total': 483183820800.0, 'size_available': 314567712768.0, 'quota_size': 558345748480.0, 'state': u'online', 'quota_ratio': 1.1555555555555554, 'snapable': True, 'files_used': 882234.0, 'name': u'proj013'}),
VolumeWithQuotaRatio(**{'size_used': 755761999872.0, 'filer': u'filer07.example.com', 'files_total': 44876648.0, 'size_total': 1122060206080.0, 'size_available': 366298185728.0, 'quota_size': 918049259518.0, 'state': u'online', 'quota_ratio': 0.8181818181800358, 'snapable': True, 'files_used': 35818461.0, 'name': u'proj090'}),
VolumeWithQuotaRatio(**{'size_used': 1775658102784.0, 'filer': u'filer07.example.com', 'files_total': 31876689.0, 'size_total': 2415919104000.0, 'size_available': 640259833856.0, 'quota_size': 2655363530744.0, 'state': u'online', 'quota_ratio': 1.0991111111077998, 'snapable': True, 'files_used': 19140696.0, 'name': u'proj320'}),
VolumeWithQuotaRatio(**{'size_used': 1592106135552.0, 'filer': u'filer07.example.com', 'files_total': 31876689.0, 'size_total': 2126008811520.0, 'size_available': 533902389248.0, 'quota_size': 2759516487680.0, 'state': u'online', 'quota_ratio': 1.297979797979798, 'snapable': True, 'files_used': 11719412.0, 'name': u'proj108'}), # quota over 1.2
]
# 50 GB, smallest quota ratio, because proj090 has too much files > 32 mio
assert 'proj109' == get_best_volume_by_quota(volumes, check_volume, size=50 * 1024)['name']
def test_best_project_home_2():
volumes = [
VolumeWithQuotaRatio(**{'size_used': 755761999872.0, 'filer': u'filer07.example.com', 'files_total': 44876648.0, 'size_total': 1122060206080.0, 'size_available': 366298185728.0, 'quota_size': 918049259518.0, 'state': u'online', 'quota_ratio': 0.8181818181800358, 'snapable': True, 'files_used': 31999999.0, 'name': u'proj090'}),
VolumeWithQuotaRatio(**{'size_used': 1775658102784.0, 'filer': u'filer07.example.com', 'files_total': 31876689.0, 'size_total': 2415919104000.0, 'size_available': 640259833856.0, 'quota_size': 2655363530744.0, 'state': u'online', 'quota_ratio': 1.0991111111077998, 'snapable': True, 'files_used': 19140696.0, 'name': u'proj320'}),
VolumeWithQuotaRatio(**{'size_used': 1592106135552.0, 'filer': u'filer07.example.com', 'files_total': 31876689.0, 'size_total': 2126008811520.0, 'size_available': 533902389248.0, 'quota_size': 2759516487680.0, 'state': u'online', 'quota_ratio': 1.297979797979798, 'snapable': True, 'files_used': 11719412.0, 'name': u'proj108'}), # quota over 1.2
]
# 100 GB, netapp with sufficient space
assert 'proj090' == get_best_volume_by_quota(volumes, check_volume, size=100 * 1024)['name']
def test_best_project_home_big():
volumes = [
VolumeWithQuotaRatio(**{'size_used': 755761999872.0, 'filer': u'filer07.example.com', 'files_total': 44876648.0, 'size_total': 1122060206080.0, 'size_available': 366298185728.0, 'quota_size': 918049259518.0, 'state': u'online', 'quota_ratio': 0.8181818181800358, 'snapable': True, 'files_used': 31999999.0, 'name': u'proj090'}),
VolumeWithQuotaRatio(**{'size_used': 1775658102784.0, 'filer': u'filer07.example.com', 'files_total': 31876689.0, 'size_total': 2415919104000.0, 'size_available': 640259833856.0, 'quota_size': 2655363530744.0, 'state': u'online', 'quota_ratio': 1.0991111111077998, 'snapable': True, 'files_used': 19140696.0, 'name': u'proj320'}),
VolumeWithQuotaRatio(**{'size_used': 1592106135552.0, 'filer': u'filer07.example.com', 'files_total': 31876689.0, 'size_total': 2126008811520.0, 'size_available': 533902389248.0, 'quota_size': 2759516487680.0, 'state': u'online', 'quota_ratio': 1.297979797979798, 'snapable': True, 'files_used': 11719412.0, 'name': u'proj108'}), # quota over 1.2
]
# 350 GB, netapp with sufficient space
assert 'proj320' == get_best_volume_by_quota(volumes, check_volume, size=350 * 1024)['name']
def test_best_project_home_too_big():
volumes = [
VolumeWithQuotaRatio(**{'size_used': 755761999872.0, 'filer': u'filer07.example.com', 'files_total': 44876648.0, 'size_total': 1122060206080.0, 'size_available': 366298185728.0, 'quota_size': 918049259518.0, 'state': u'online', 'quota_ratio': 0.8181818181800358, 'snapable': True, 'files_used': 31999999.0, 'name': u'proj090'}),
]
with pytest.raises(NidhoggException):
# 350 GB, netapp with sufficient space
get_best_volume_by_quota(volumes, check_volume, size=350 * 1024)
def test_best_project_home_too_much_files():
volumes = [
VolumeWithQuotaRatio(**{'size_used': 755761999872.0, 'filer': u'filer07.example.com', 'files_total': 44876648.0, 'size_total': 1122060206080.0, 'size_available': 366298185728.0, 'quota_size': 918049259518.0, 'state': u'online', 'quota_ratio': 0.8181818181800358, 'snapable': True, 'files_used': 35818461.0, 'name': u'proj090'}),
]
with pytest.raises(NidhoggException):
get_best_volume_by_quota(volumes, check_volume, size=1234)
def test_best_project_home_too_big_ratio_quota():
volumes = [
VolumeWithQuotaRatio(**{'size_used': 1592106135552.0, 'filer': u'filer07.example.com', 'files_total': 31876689.0, 'size_total': 2126008811520.0, 'size_available': 533902389248.0, 'quota_size': 2759516487680.0, 'state': u'online', 'quota_ratio': 1.297979797979798, 'snapable': True, 'files_used': 11719412.0, 'name': u'proj108'}), # quota over 1.2
]
with pytest.raises(NidhoggException):
get_best_volume_by_quota(volumes, check_volume, size=1234)
def test_best_user_home_1():
volumes = [
Volume(**{'size_used': 432169402368.0, 'filer': u'filer21.example.com', 'files_total': 21790707.0, 'size_total': 676457349120.0, 'size_available': 244287254528.0, 'state': u'online', 'snapable': True, 'files_used': 8648992.0, 'name': u'home000'}),
Volume(**{'size_used': 81415127040.0, 'filer': u'filer21.example.com', 'files_total': 3112959.0, 'size_total': 96636764160.0, 'size_available': 15221399552.0, 'state': u'online', 'snapable': True, 'files_used': 1413035.0, 'name': u'home002'}),
Volume(**{'size_used': 349094301696.0, 'filer': u'filer21.example.com', 'files_total': 15564791.0, 'size_total': 429496729600.0, 'size_available': 80396869632.0, 'state': u'online', 'snapable': True, 'files_used': 7136798.0, 'name': u'home050'}),
Volume(**{'size_used': 133556998144.0, 'filer': u'filer21.example.com', 'files_total': 26460144.0, 'size_total': 429496729600.0, 'size_available': 295939719168.0, 'state': u'online', 'snapable': True, 'files_used': 862642.0, 'name': u'home110'}),
]
assert 'home110' == get_best_volume_by_size(volumes)['name']
def test_best_user_home_2():
def check(volume):
if volume['name'] == 'home110':
return False
return True
volumes = [
Volume(**{'size_used': 432169402368.0, 'filer': u'filer21.example.com', 'files_total': 21790707.0, 'size_total': 676457349120.0, 'size_available': 244287254528.0, 'state': u'online', 'snapable': True, 'files_used': 8648992.0, 'name': u'home000'}),
Volume(**{'size_used': 81415127040.0, 'filer': u'filer21.example.com', 'files_total': 3112959.0, 'size_total': 96636764160.0, 'size_available': 15221399552.0, 'state': u'online', 'snapable': True, 'files_used': 1413035.0, 'name': u'home002'}),
Volume(**{'size_used': 349094301696.0, 'filer': u'filer21.example.com', 'files_total': 15564791.0, 'size_total': 429496729600.0, 'size_available': 80396869632.0, 'state': u'online', 'snapable': True, 'files_used': 7136798.0, 'name': u'home050'}),
Volume(**{'size_used': 133556998144.0, 'filer': u'filer21.example.com', 'files_total': 26460144.0, 'size_total': 429496729600.0, 'size_available': 295939719168.0, 'state': u'online', 'snapable': True, 'files_used': 862642.0, 'name': u'home110'}),
]
assert 'home000' == get_best_volume_by_size(volumes, check)['name']
def test_best_user_home_no_volumes():
volumes = []
with pytest.raises(NidhoggException):
get_best_volume_by_size(volumes)['name']
| mit |
andyzsf/edx | common/lib/xmodule/xmodule/capa_module.py | 3 | 7405 | """Implements basics of Capa, including class CapaModule."""
import json
import logging
import sys
from pkg_resources import resource_string
from .capa_base import CapaMixin, CapaFields, ComplexEncoder
from .progress import Progress
from xmodule.x_module import XModule, module_attr
from xmodule.raw_module import RawDescriptor
from xmodule.exceptions import NotFoundError, ProcessingError
log = logging.getLogger("edx.courseware")
class CapaModule(CapaMixin, XModule):
"""
An XModule implementing LonCapa format problems, implemented by way of
capa.capa_problem.LoncapaProblem
CapaModule.__init__ takes the same arguments as xmodule.x_module:XModule.__init__
"""
icon_class = 'problem'
js = {
'coffee': [
resource_string(__name__, 'js/src/capa/display.coffee'),
resource_string(__name__, 'js/src/javascript_loader.coffee'),
],
'js': [
resource_string(__name__, 'js/src/collapsible.js'),
resource_string(__name__, 'js/src/capa/imageinput.js'),
resource_string(__name__, 'js/src/capa/schematic.js'),
]
}
js_module_name = "Problem"
css = {'scss': [resource_string(__name__, 'css/capa/display.scss')]}
def __init__(self, *args, **kwargs):
"""
Accepts the same arguments as xmodule.x_module:XModule.__init__
"""
super(CapaModule, self).__init__(*args, **kwargs)
def handle_ajax(self, dispatch, data):
"""
This is called by courseware.module_render, to handle an AJAX call.
`data` is request.POST.
Returns a json dictionary:
{ 'progress_changed' : True/False,
'progress' : 'none'/'in_progress'/'done',
<other request-specific values here > }
"""
handlers = {
'problem_get': self.get_problem,
'problem_check': self.check_problem,
'problem_reset': self.reset_problem,
'problem_save': self.save_problem,
'problem_show': self.get_answer,
'score_update': self.update_score,
'input_ajax': self.handle_input_ajax,
'ungraded_response': self.handle_ungraded_response
}
_ = self.runtime.service(self, "i18n").ugettext
generic_error_message = _(
"We're sorry, there was an error with processing your request. "
"Please try reloading your page and trying again."
)
not_found_error_message = _(
"The state of this problem has changed since you loaded this page. "
"Please refresh your page."
)
if dispatch not in handlers:
return 'Error: {} is not a known capa action'.format(dispatch)
before = self.get_progress()
try:
result = handlers[dispatch](data)
except NotFoundError as err:
_, _, traceback_obj = sys.exc_info() # pylint: disable=redefined-outer-name
raise ProcessingError(not_found_error_message), None, traceback_obj
except Exception as err:
_, _, traceback_obj = sys.exc_info() # pylint: disable=redefined-outer-name
raise ProcessingError(generic_error_message), None, traceback_obj
after = self.get_progress()
result.update({
'progress_changed': after != before,
'progress_status': Progress.to_js_status_str(after),
'progress_detail': Progress.to_js_detail_str(after),
})
return json.dumps(result, cls=ComplexEncoder)
class CapaDescriptor(CapaFields, RawDescriptor):
"""
Module implementing problems in the LON-CAPA format,
as implemented by capa.capa_problem
"""
module_class = CapaModule
has_score = True
template_dir_name = 'problem'
mako_template = "widgets/problem-edit.html"
js = {'coffee': [resource_string(__name__, 'js/src/problem/edit.coffee')]}
js_module_name = "MarkdownEditingDescriptor"
css = {
'scss': [
resource_string(__name__, 'css/editor/edit.scss'),
resource_string(__name__, 'css/problem/edit.scss')
]
}
# The capa format specifies that what we call max_attempts in the code
# is the attribute `attempts`. This will do that conversion
metadata_translations = dict(RawDescriptor.metadata_translations)
metadata_translations['attempts'] = 'max_attempts'
@classmethod
def filter_templates(cls, template, course):
"""
Filter template that contains 'latex' from templates.
Show them only if use_latex_compiler is set to True in
course settings.
"""
return (not 'latex' in template['template_id'] or course.use_latex_compiler)
def get_context(self):
_context = RawDescriptor.get_context(self)
_context.update({
'markdown': self.markdown,
'enable_markdown': self.markdown is not None,
'enable_latex_compiler': self.use_latex_compiler,
})
return _context
# VS[compat]
# TODO (cpennington): Delete this method once all fall 2012 course are being
# edited in the cms
@classmethod
def backcompat_paths(cls, path):
return [
'problems/' + path[8:],
path[8:],
]
@property
def non_editable_metadata_fields(self):
non_editable_fields = super(CapaDescriptor, self).non_editable_metadata_fields
non_editable_fields.extend([
CapaDescriptor.due,
CapaDescriptor.graceperiod,
CapaDescriptor.force_save_button,
CapaDescriptor.markdown,
CapaDescriptor.text_customization,
CapaDescriptor.use_latex_compiler,
])
return non_editable_fields
# Proxy to CapaModule for access to any of its attributes
answer_available = module_attr('answer_available')
check_button_name = module_attr('check_button_name')
check_button_checking_name = module_attr('check_button_checking_name')
check_problem = module_attr('check_problem')
choose_new_seed = module_attr('choose_new_seed')
closed = module_attr('closed')
get_answer = module_attr('get_answer')
get_problem = module_attr('get_problem')
get_problem_html = module_attr('get_problem_html')
get_state_for_lcp = module_attr('get_state_for_lcp')
handle_input_ajax = module_attr('handle_input_ajax')
handle_problem_html_error = module_attr('handle_problem_html_error')
handle_ungraded_response = module_attr('handle_ungraded_response')
is_attempted = module_attr('is_attempted')
is_correct = module_attr('is_correct')
is_past_due = module_attr('is_past_due')
is_submitted = module_attr('is_submitted')
lcp = module_attr('lcp')
make_dict_of_responses = module_attr('make_dict_of_responses')
new_lcp = module_attr('new_lcp')
publish_grade = module_attr('publish_grade')
rescore_problem = module_attr('rescore_problem')
reset_problem = module_attr('reset_problem')
save_problem = module_attr('save_problem')
set_state_from_lcp = module_attr('set_state_from_lcp')
should_show_check_button = module_attr('should_show_check_button')
should_show_reset_button = module_attr('should_show_reset_button')
should_show_save_button = module_attr('should_show_save_button')
update_score = module_attr('update_score')
| agpl-3.0 |
vvv1559/intellij-community | python/lib/Lib/site-packages/django/contrib/webdesign/tests.py | 379 | 1054 | # -*- coding: utf-8 -*-
import unittest
from django.contrib.webdesign.lorem_ipsum import *
from django.template import loader, Context
class WebdesignTest(unittest.TestCase):
def test_words(self):
self.assertEqual(words(7), u'lorem ipsum dolor sit amet consectetur adipisicing')
def test_paragraphs(self):
self.assertEqual(paragraphs(1),
['Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.'])
def test_lorem_tag(self):
t = loader.get_template_from_string("{% load webdesign %}{% lorem 3 w %}")
self.assertEqual(t.render(Context({})),
u'lorem ipsum dolor')
| apache-2.0 |
jcfr/mystic | examples/TEST_ffitPP2_b.py | 1 | 1429 | #!/usr/bin/env python
#
# Author: Mike McKerns (mmckerns @caltech and @uqfoundation)
# Copyright (c) 1997-2015 California Institute of Technology.
# License: 3-clause BSD. The full license text is available at:
# - http://trac.mystic.cacr.caltech.edu/project/mystic/browser/mystic/LICENSE
"""
Testing the polynomial fitting problem of [1] using scipy's Nelder-Mead algorithm.
Reference:
[1] Storn, R. and Price, K. Differential Evolution - A Simple and Efficient
Heuristic for Global Optimization over Continuous Spaces. Journal of Global
Optimization 11: 341-359, 1997.
"""
from test_ffit import Chebyshev8, plot_solution, print_solution
from TEST_ffitPP_b import ChebyshevCost
if __name__ == '__main__':
import random
from mystic.solvers import fmin
#from mystic._scipyoptimize import fmin
from mystic.tools import random_seed
random_seed(123)
import pp
import sys
if len(sys.argv) > 1:
tunnelport = sys.argv[1]
ppservers = ("localhost:%s" % tunnelport,)
else:
ppservers = ()
myserver = pp.Server(ppservers=ppservers)
trials = []
for trial in range(8):
x = tuple([random.uniform(-100,100) + Chebyshev8[i] for i in range(9)])
trials.append(x)
results = [myserver.submit(fmin,(ChebyshevCost,x),(),()) for x in trials]
for solution in results:
print_solution(solution())
#plot_solution(solution)
# end of file
| bsd-3-clause |
Aloomaio/googleads-python-lib | examples/ad_manager/v201805/creative_service/create_creative_from_template.py | 1 | 3666 | #!/usr/bin/env python
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example creates a new template creative for a given advertiser.
To determine which companies are advertisers, run get_advertisers.py.
To determine which creative templates exist, run
get_all_creative_templates.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
"""
import os
import uuid
# Import appropriate modules from the client library.
from googleads import ad_manager
# Set id of the advertiser (company) that the creative will be assigned to.
ADVERTISER_ID = 'INSERT_ADVERTISER_COMPANY_ID_HERE'
def main(client, advertiser_id):
# Initialize appropriate service.
creative_service = client.GetService('CreativeService', version='v201805')
# Use the image banner with optional third party tracking template.
creative_template_id = '10000680'
# Create image asset.
file_name = 'image%s.jpg' % uuid.uuid4()
image_data = open(os.path.join(os.path.split(__file__)[0], '..', '..', 'data',
'medium_rectangle.jpg'), 'r').read()
size = {
'width': '300',
'height': '250'
}
asset = {
'xsi_type': 'CreativeAsset',
'fileName': file_name,
'assetByteArray': image_data,
'size': size
}
# Create creative from templates.
creative = {
'xsi_type': 'TemplateCreative',
'name': 'Template Creative #%s' % uuid.uuid4(),
'advertiserId': advertiser_id,
'size': size,
'creativeTemplateId': creative_template_id,
'creativeTemplateVariableValues': [
{
'xsi_type': 'AssetCreativeTemplateVariableValue',
'uniqueName': 'Imagefile',
'asset': asset
},
{
'xsi_type': 'LongCreativeTemplateVariableValue',
'uniqueName': 'Imagewidth',
'value': '300'
},
{
'xsi_type': 'LongCreativeTemplateVariableValue',
'uniqueName': 'Imageheight',
'value': '250'
},
{
'xsi_type': 'UrlCreativeTemplateVariableValue',
'uniqueName': 'ClickthroughURL',
'value': 'www.google.com'
},
{
'xsi_type': 'StringCreativeTemplateVariableValue',
'uniqueName': 'Targetwindow',
'value': '_blank'
}
]
}
# Call service to create the creative.
creative = creative_service.createCreatives([creative])[0]
# Display results.
print ('Template creative with id "%s", name "%s", and type "%s" was '
'created and can be previewed at %s.'
% (creative['id'], creative['name'],
ad_manager.AdManagerClassType(creative), creative['previewUrl']))
if __name__ == '__main__':
# Initialize client object.
ad_manager_client = ad_manager.AdManagerClient.LoadFromStorage()
main(ad_manager_client, ADVERTISER_ID)
| apache-2.0 |
hyller/CodeLibrary | python-cookbook-master/src/12/storing_thread_specific_state/example2.py | 5 | 1786 | from socket import socket, AF_INET, SOCK_STREAM
import threading
class LazyConnection:
def __init__(self, address, family=AF_INET, type=SOCK_STREAM):
self.address = address
self.family = AF_INET
self.type = SOCK_STREAM
self.local = threading.local()
def __enter__(self):
sock = socket(self.family, self.type)
sock.connect(self.address)
if not hasattr(self.local, 'connections'):
self.local.connections = []
self.local.connections.append(sock)
return sock
def __exit__(self, exc_ty, exc_val, tb):
self.local.connections.pop().close()
def test(conn):
# Example use
from functools import partial
with conn as s:
s.send(b'GET /index.html HTTP/1.0\r\n')
s.send(b'Host: www.python.org\r\n')
s.send(b'\r\n')
resp = b''.join(iter(partial(s.recv, 8192), b''))
print('Got {} bytes'.format(len(resp)))
with conn as s1, conn as s2:
s1.send(b'GET /downloads HTTP/1.0\r\n')
s2.send(b'GET /index.html HTTP/1.0\r\n')
s1.send(b'Host: www.python.org\r\n')
s2.send(b'Host: www.python.org\r\n')
s1.send(b'\r\n')
s2.send(b'\r\n')
resp1 = b''.join(iter(partial(s1.recv, 8192), b''))
resp2 = b''.join(iter(partial(s2.recv, 8192), b''))
print('resp1 got {} bytes'.format(len(resp1)))
print('resp2 got {} bytes'.format(len(resp2)))
if __name__ == '__main__':
conn = LazyConnection(('www.python.org', 80))
t1 = threading.Thread(target=test, args=(conn,))
t2 = threading.Thread(target=test, args=(conn,))
t3 = threading.Thread(target=test, args=(conn,))
t1.start()
t2.start()
t3.start()
t1.join()
t2.join()
t3.join()
| unlicense |
open-mmlab/mmdetection | mmdet/models/detectors/base.py | 1 | 14139 | from abc import ABCMeta, abstractmethod
from collections import OrderedDict
import mmcv
import numpy as np
import torch
import torch.distributed as dist
from mmcv.runner import BaseModule, auto_fp16
from mmdet.core.visualization import imshow_det_bboxes
class BaseDetector(BaseModule, metaclass=ABCMeta):
"""Base class for detectors."""
def __init__(self, init_cfg=None):
super(BaseDetector, self).__init__(init_cfg)
self.fp16_enabled = False
@property
def with_neck(self):
"""bool: whether the detector has a neck"""
return hasattr(self, 'neck') and self.neck is not None
# TODO: these properties need to be carefully handled
# for both single stage & two stage detectors
@property
def with_shared_head(self):
"""bool: whether the detector has a shared head in the RoI Head"""
return hasattr(self, 'roi_head') and self.roi_head.with_shared_head
@property
def with_bbox(self):
"""bool: whether the detector has a bbox head"""
return ((hasattr(self, 'roi_head') and self.roi_head.with_bbox)
or (hasattr(self, 'bbox_head') and self.bbox_head is not None))
@property
def with_mask(self):
"""bool: whether the detector has a mask head"""
return ((hasattr(self, 'roi_head') and self.roi_head.with_mask)
or (hasattr(self, 'mask_head') and self.mask_head is not None))
@abstractmethod
def extract_feat(self, imgs):
"""Extract features from images."""
pass
def extract_feats(self, imgs):
"""Extract features from multiple images.
Args:
imgs (list[torch.Tensor]): A list of images. The images are
augmented from the same image but in different ways.
Returns:
list[torch.Tensor]: Features of different images
"""
assert isinstance(imgs, list)
return [self.extract_feat(img) for img in imgs]
def forward_train(self, imgs, img_metas, **kwargs):
"""
Args:
img (list[Tensor]): List of tensors of shape (1, C, H, W).
Typically these should be mean centered and std scaled.
img_metas (list[dict]): List of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys, see
:class:`mmdet.datasets.pipelines.Collect`.
kwargs (keyword arguments): Specific to concrete implementation.
"""
# NOTE the batched image size information may be useful, e.g.
# in DETR, this is needed for the construction of masks, which is
# then used for the transformer_head.
batch_input_shape = tuple(imgs[0].size()[-2:])
for img_meta in img_metas:
img_meta['batch_input_shape'] = batch_input_shape
async def async_simple_test(self, img, img_metas, **kwargs):
raise NotImplementedError
@abstractmethod
def simple_test(self, img, img_metas, **kwargs):
pass
@abstractmethod
def aug_test(self, imgs, img_metas, **kwargs):
"""Test function with test time augmentation."""
pass
async def aforward_test(self, *, img, img_metas, **kwargs):
for var, name in [(img, 'img'), (img_metas, 'img_metas')]:
if not isinstance(var, list):
raise TypeError(f'{name} must be a list, but got {type(var)}')
num_augs = len(img)
if num_augs != len(img_metas):
raise ValueError(f'num of augmentations ({len(img)}) '
f'!= num of image metas ({len(img_metas)})')
# TODO: remove the restriction of samples_per_gpu == 1 when prepared
samples_per_gpu = img[0].size(0)
assert samples_per_gpu == 1
if num_augs == 1:
return await self.async_simple_test(img[0], img_metas[0], **kwargs)
else:
raise NotImplementedError
def forward_test(self, imgs, img_metas, **kwargs):
"""
Args:
imgs (List[Tensor]): the outer list indicates test-time
augmentations and inner Tensor should have a shape NxCxHxW,
which contains all images in the batch.
img_metas (List[List[dict]]): the outer list indicates test-time
augs (multiscale, flip, etc.) and the inner list indicates
images in a batch.
"""
for var, name in [(imgs, 'imgs'), (img_metas, 'img_metas')]:
if not isinstance(var, list):
raise TypeError(f'{name} must be a list, but got {type(var)}')
num_augs = len(imgs)
if num_augs != len(img_metas):
raise ValueError(f'num of augmentations ({len(imgs)}) '
f'!= num of image meta ({len(img_metas)})')
# NOTE the batched image size information may be useful, e.g.
# in DETR, this is needed for the construction of masks, which is
# then used for the transformer_head.
for img, img_meta in zip(imgs, img_metas):
batch_size = len(img_meta)
for img_id in range(batch_size):
img_meta[img_id]['batch_input_shape'] = tuple(img.size()[-2:])
if num_augs == 1:
# proposals (List[List[Tensor]]): the outer list indicates
# test-time augs (multiscale, flip, etc.) and the inner list
# indicates images in a batch.
# The Tensor should have a shape Px4, where P is the number of
# proposals.
if 'proposals' in kwargs:
kwargs['proposals'] = kwargs['proposals'][0]
return self.simple_test(imgs[0], img_metas[0], **kwargs)
else:
assert imgs[0].size(0) == 1, 'aug test does not support ' \
'inference with batch size ' \
f'{imgs[0].size(0)}'
# TODO: support test augmentation for predefined proposals
assert 'proposals' not in kwargs
return self.aug_test(imgs, img_metas, **kwargs)
@auto_fp16(apply_to=('img', ))
def forward(self, img, img_metas, return_loss=True, **kwargs):
"""Calls either :func:`forward_train` or :func:`forward_test` depending
on whether ``return_loss`` is ``True``.
Note this setting will change the expected inputs. When
``return_loss=True``, img and img_meta are single-nested (i.e. Tensor
and List[dict]), and when ``resturn_loss=False``, img and img_meta
should be double nested (i.e. List[Tensor], List[List[dict]]), with
the outer list indicating test time augmentations.
"""
if torch.onnx.is_in_onnx_export():
assert len(img_metas) == 1
return self.onnx_export(img[0], img_metas[0])
if return_loss:
return self.forward_train(img, img_metas, **kwargs)
else:
return self.forward_test(img, img_metas, **kwargs)
def _parse_losses(self, losses):
"""Parse the raw outputs (losses) of the network.
Args:
losses (dict): Raw output of the network, which usually contain
losses and other necessary infomation.
Returns:
tuple[Tensor, dict]: (loss, log_vars), loss is the loss tensor \
which may be a weighted sum of all losses, log_vars contains \
all the variables to be sent to the logger.
"""
log_vars = OrderedDict()
for loss_name, loss_value in losses.items():
if isinstance(loss_value, torch.Tensor):
log_vars[loss_name] = loss_value.mean()
elif isinstance(loss_value, list):
log_vars[loss_name] = sum(_loss.mean() for _loss in loss_value)
else:
raise TypeError(
f'{loss_name} is not a tensor or list of tensors')
loss = sum(_value for _key, _value in log_vars.items()
if 'loss' in _key)
log_vars['loss'] = loss
for loss_name, loss_value in log_vars.items():
# reduce loss when distributed training
if dist.is_available() and dist.is_initialized():
loss_value = loss_value.data.clone()
dist.all_reduce(loss_value.div_(dist.get_world_size()))
log_vars[loss_name] = loss_value.item()
return loss, log_vars
def train_step(self, data, optimizer):
"""The iteration step during training.
This method defines an iteration step during training, except for the
back propagation and optimizer updating, which are done in an optimizer
hook. Note that in some complicated cases or models, the whole process
including back propagation and optimizer updating is also defined in
this method, such as GAN.
Args:
data (dict): The output of dataloader.
optimizer (:obj:`torch.optim.Optimizer` | dict): The optimizer of
runner is passed to ``train_step()``. This argument is unused
and reserved.
Returns:
dict: It should contain at least 3 keys: ``loss``, ``log_vars``, \
``num_samples``.
- ``loss`` is a tensor for back propagation, which can be a \
weighted sum of multiple losses.
- ``log_vars`` contains all the variables to be sent to the
logger.
- ``num_samples`` indicates the batch size (when the model is \
DDP, it means the batch size on each GPU), which is used for \
averaging the logs.
"""
losses = self(**data)
loss, log_vars = self._parse_losses(losses)
outputs = dict(
loss=loss, log_vars=log_vars, num_samples=len(data['img_metas']))
return outputs
def val_step(self, data, optimizer=None):
"""The iteration step during validation.
This method shares the same signature as :func:`train_step`, but used
during val epochs. Note that the evaluation after training epochs is
not implemented with this method, but an evaluation hook.
"""
losses = self(**data)
loss, log_vars = self._parse_losses(losses)
outputs = dict(
loss=loss, log_vars=log_vars, num_samples=len(data['img_metas']))
return outputs
def show_result(self,
img,
result,
score_thr=0.3,
bbox_color=(72, 101, 241),
text_color=(72, 101, 241),
mask_color=None,
thickness=2,
font_size=13,
win_name='',
show=False,
wait_time=0,
out_file=None):
"""Draw `result` over `img`.
Args:
img (str or Tensor): The image to be displayed.
result (Tensor or tuple): The results to draw over `img`
bbox_result or (bbox_result, segm_result).
score_thr (float, optional): Minimum score of bboxes to be shown.
Default: 0.3.
bbox_color (str or tuple(int) or :obj:`Color`):Color of bbox lines.
The tuple of color should be in BGR order. Default: 'green'
text_color (str or tuple(int) or :obj:`Color`):Color of texts.
The tuple of color should be in BGR order. Default: 'green'
mask_color (None or str or tuple(int) or :obj:`Color`):
Color of masks. The tuple of color should be in BGR order.
Default: None
thickness (int): Thickness of lines. Default: 2
font_size (int): Font size of texts. Default: 13
win_name (str): The window name. Default: ''
wait_time (float): Value of waitKey param.
Default: 0.
show (bool): Whether to show the image.
Default: False.
out_file (str or None): The filename to write the image.
Default: None.
Returns:
img (Tensor): Only if not `show` or `out_file`
"""
img = mmcv.imread(img)
img = img.copy()
if isinstance(result, tuple):
bbox_result, segm_result = result
if isinstance(segm_result, tuple):
segm_result = segm_result[0] # ms rcnn
else:
bbox_result, segm_result = result, None
bboxes = np.vstack(bbox_result)
labels = [
np.full(bbox.shape[0], i, dtype=np.int32)
for i, bbox in enumerate(bbox_result)
]
labels = np.concatenate(labels)
# draw segmentation masks
segms = None
if segm_result is not None and len(labels) > 0: # non empty
segms = mmcv.concat_list(segm_result)
if isinstance(segms[0], torch.Tensor):
segms = torch.stack(segms, dim=0).detach().cpu().numpy()
else:
segms = np.stack(segms, axis=0)
# if out_file specified, do not show image in window
if out_file is not None:
show = False
# draw bounding boxes
img = imshow_det_bboxes(
img,
bboxes,
labels,
segms,
class_names=self.CLASSES,
score_thr=score_thr,
bbox_color=bbox_color,
text_color=text_color,
mask_color=mask_color,
thickness=thickness,
font_size=font_size,
win_name=win_name,
show=show,
wait_time=wait_time,
out_file=out_file)
if not (show or out_file):
return img
def onnx_export(self, img, img_metas):
raise NotImplementedError(f'{self.__class__.__name__} does '
f'not support ONNX EXPORT')
| apache-2.0 |
hollabaq86/haikuna-matata | env/lib/python2.7/site-packages/nltk/tokenize/mwe.py | 7 | 3829 | # Multi-Word Expression tokenizer
#
# Copyright (C) 2001-2017 NLTK Project
# Author: Rob Malouf <rmalouf@mail.sdsu.edu>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
"""
Multi-Word Expression Tokenizer
A ``MWETokenizer`` takes a string which has already been divided into tokens and
retokenizes it, merging multi-word expressions into single tokens, using a lexicon
of MWEs:
>>> from nltk.tokenize import MWETokenizer
>>> tokenizer = MWETokenizer([('a', 'little'), ('a', 'little', 'bit'), ('a', 'lot')])
>>> tokenizer.add_mwe(('in', 'spite', 'of'))
>>> tokenizer.tokenize('Testing testing testing one two three'.split())
['Testing', 'testing', 'testing', 'one', 'two', 'three']
>>> tokenizer.tokenize('This is a test in spite'.split())
['This', 'is', 'a', 'test', 'in', 'spite']
>>> tokenizer.tokenize('In a little or a little bit or a lot in spite of'.split())
['In', 'a_little', 'or', 'a_little_bit', 'or', 'a_lot', 'in_spite_of']
"""
from nltk.util import Trie
from nltk.tokenize.api import TokenizerI
class MWETokenizer(TokenizerI):
"""A tokenizer that processes tokenized text and merges multi-word expressions
into single tokens.
"""
def __init__(self, mwes=None, separator='_'):
"""Initialize the multi-word tokenizer with a list of expressions and a
separator
:type mwes: list(list(str))
:param mwes: A sequence of multi-word expressions to be merged, where
each MWE is a sequence of strings.
:type separator: str
:param separator: String that should be inserted between words in a multi-word
expression token. (Default is '_')
"""
if not mwes:
mwes = []
self._mwes = Trie(mwes)
self._separator = separator
def add_mwe(self, mwe):
"""Add a multi-word expression to the lexicon (stored as a word trie)
We use ``util.Trie`` to represent the trie. Its form is a dict of dicts.
The key True marks the end of a valid MWE.
:param mwe: The multi-word expression we're adding into the word trie
:type mwe: tuple(str) or list(str)
:Example:
>>> tokenizer = MWETokenizer()
>>> tokenizer.add_mwe(('a', 'b'))
>>> tokenizer.add_mwe(('a', 'b', 'c'))
>>> tokenizer.add_mwe(('a', 'x'))
>>> expected = {'a': {'x': {True: None}, 'b': {True: None, 'c': {True: None}}}}
>>> tokenizer._mwes.as_dict() == expected
True
"""
self._mwes.insert(mwe)
def tokenize(self, text):
"""
:param text: A list containing tokenized text
:type text: list(str)
:return: A list of the tokenized text with multi-words merged together
:rtype: list(str)
:Example:
>>> tokenizer = MWETokenizer([('hors', "d'oeuvre")], separator='+')
>>> tokenizer.tokenize("An hors d'oeuvre tonight, sir?".split())
['An', "hors+d'oeuvre", 'tonight,', 'sir?']
"""
i = 0
n = len(text)
result = []
while i < n:
if text[i] in self._mwes:
# possible MWE match
j = i
trie = self._mwes
while j < n and text[j] in trie:
trie = trie[text[j]]
j = j + 1
else:
if Trie.LEAF in trie:
# success!
result.append(self._separator.join(text[i:j]))
i = j
else:
# no match, so backtrack
result.append(text[i])
i += 1
else:
result.append(text[i])
i += 1
return result
| mit |
mikewiebe-ansible/ansible | lib/ansible/modules/cloud/amazon/iam_cert.py | 10 | 12204 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: iam_cert
short_description: Manage server certificates for use on ELBs and CloudFront
description:
- Allows for the management of server certificates.
version_added: "2.0"
options:
name:
description:
- Name of certificate to add, update or remove.
required: true
type: str
new_name:
description:
- When state is present, this will update the name of the cert.
- The cert, key and cert_chain parameters will be ignored if this is defined.
type: str
new_path:
description:
- When state is present, this will update the path of the cert.
- The I(cert), I(key) and I(cert_chain) parameters will be ignored if this is defined.
type: str
state:
description:
- Whether to create(or update) or delete the certificate.
- If I(new_path) or I(new_name) is defined, specifying present will attempt to make an update these.
required: true
choices: [ "present", "absent" ]
type: str
path:
description:
- When creating or updating, specify the desired path of the certificate.
default: "/"
type: str
cert_chain:
description:
- The path to, or content of, the CA certificate chain in PEM encoded format.
As of 2.4 content is accepted. If the parameter is not a file, it is assumed to be content.
type: str
cert:
description:
- The path to, or content of the certificate body in PEM encoded format.
As of 2.4 content is accepted. If the parameter is not a file, it is assumed to be content.
type: str
key:
description:
- The path to, or content of the private key in PEM encoded format.
As of 2.4 content is accepted. If the parameter is not a file, it is assumed to be content.
type: str
dup_ok:
description:
- By default the module will not upload a certificate that is already uploaded into AWS.
- If I(dup_ok=True), it will upload the certificate as long as the name is unique.
default: False
type: bool
requirements: [ "boto" ]
author: Jonathan I. Davila (@defionscode)
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Basic server certificate upload from local file
- iam_cert:
name: very_ssl
state: present
cert: "{{ lookup('file', 'path/to/cert') }}"
key: "{{ lookup('file', 'path/to/key') }}"
cert_chain: "{{ lookup('file', 'path/to/certchain') }}"
# Basic server certificate upload
- iam_cert:
name: very_ssl
state: present
cert: path/to/cert
key: path/to/key
cert_chain: path/to/certchain
# Server certificate upload using key string
- iam_cert:
name: very_ssl
state: present
path: "/a/cert/path/"
cert: body_of_somecert
key: vault_body_of_privcertkey
cert_chain: body_of_myverytrustedchain
# Basic rename of existing certificate
- iam_cert:
name: very_ssl
new_name: new_very_ssl
state: present
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import ec2_argument_spec, get_aws_connection_info, connect_to_aws
import os
try:
import boto
import boto.iam
import boto.ec2
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
def cert_meta(iam, name):
certificate = iam.get_server_certificate(name).get_server_certificate_result.server_certificate
ocert = certificate.certificate_body
opath = certificate.server_certificate_metadata.path
ocert_id = certificate.server_certificate_metadata.server_certificate_id
upload_date = certificate.server_certificate_metadata.upload_date
exp = certificate.server_certificate_metadata.expiration
arn = certificate.server_certificate_metadata.arn
return opath, ocert, ocert_id, upload_date, exp, arn
def dup_check(module, iam, name, new_name, cert, orig_cert_names, orig_cert_bodies, dup_ok):
update = False
# IAM cert names are case insensitive
names_lower = [n.lower() for n in [name, new_name] if n is not None]
orig_cert_names_lower = [ocn.lower() for ocn in orig_cert_names]
if any(ct in orig_cert_names_lower for ct in names_lower):
for i_name in names_lower:
if cert is not None:
try:
c_index = orig_cert_names_lower.index(i_name)
except NameError:
continue
else:
# NOTE: remove the carriage return to strictly compare the cert bodies.
slug_cert = cert.replace('\r', '')
slug_orig_cert_bodies = orig_cert_bodies[c_index].replace('\r', '')
if slug_orig_cert_bodies == slug_cert:
update = True
break
elif slug_cert.startswith(slug_orig_cert_bodies):
update = True
break
else:
module.fail_json(changed=False, msg='A cert with the name %s already exists and'
' has a different certificate body associated'
' with it. Certificates cannot have the same name' % orig_cert_names[c_index])
else:
update = True
break
elif cert in orig_cert_bodies and not dup_ok:
for crt_name, crt_body in zip(orig_cert_names, orig_cert_bodies):
if crt_body == cert:
module.fail_json(changed=False, msg='This certificate already'
' exists under the name %s' % crt_name)
return update
def cert_action(module, iam, name, cpath, new_name, new_path, state,
cert, key, cert_chain, orig_cert_names, orig_cert_bodies, dup_ok):
if state == 'present':
update = dup_check(module, iam, name, new_name, cert, orig_cert_names,
orig_cert_bodies, dup_ok)
if update:
opath, ocert, ocert_id, upload_date, exp, arn = cert_meta(iam, name)
changed = True
if new_name and new_path:
iam.update_server_cert(name, new_cert_name=new_name, new_path=new_path)
module.exit_json(changed=changed, original_name=name, new_name=new_name,
original_path=opath, new_path=new_path, cert_body=ocert,
upload_date=upload_date, expiration_date=exp, arn=arn)
elif new_name and not new_path:
iam.update_server_cert(name, new_cert_name=new_name)
module.exit_json(changed=changed, original_name=name, new_name=new_name,
cert_path=opath, cert_body=ocert,
upload_date=upload_date, expiration_date=exp, arn=arn)
elif not new_name and new_path:
iam.update_server_cert(name, new_path=new_path)
module.exit_json(changed=changed, name=new_name,
original_path=opath, new_path=new_path, cert_body=ocert,
upload_date=upload_date, expiration_date=exp, arn=arn)
else:
changed = False
module.exit_json(changed=changed, name=name, cert_path=opath, cert_body=ocert,
upload_date=upload_date, expiration_date=exp, arn=arn,
msg='No new path or name specified. No changes made')
else:
changed = True
iam.upload_server_cert(name, cert, key, cert_chain=cert_chain, path=cpath)
opath, ocert, ocert_id, upload_date, exp, arn = cert_meta(iam, name)
module.exit_json(changed=changed, name=name, cert_path=opath, cert_body=ocert,
upload_date=upload_date, expiration_date=exp, arn=arn)
elif state == 'absent':
if name in orig_cert_names:
changed = True
iam.delete_server_cert(name)
module.exit_json(changed=changed, deleted_cert=name)
else:
changed = False
module.exit_json(changed=changed, msg='Certificate with the name %s already absent' % name)
def load_data(cert, key, cert_chain):
# if paths are provided rather than lookups read the files and return the contents
if cert and os.path.isfile(cert):
with open(cert, 'r') as cert_fh:
cert = cert_fh.read().rstrip()
if key and os.path.isfile(key):
with open(key, 'r') as key_fh:
key = key_fh.read().rstrip()
if cert_chain and os.path.isfile(cert_chain):
with open(cert_chain, 'r') as cert_chain_fh:
cert_chain = cert_chain_fh.read()
return cert, key, cert_chain
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
state=dict(required=True, choices=['present', 'absent']),
name=dict(required=True),
cert=dict(),
key=dict(no_log=True),
cert_chain=dict(),
new_name=dict(),
path=dict(default='/'),
new_path=dict(),
dup_ok=dict(type='bool')
)
)
module = AnsibleModule(
argument_spec=argument_spec,
mutually_exclusive=[
['new_path', 'key'],
['new_path', 'cert'],
['new_path', 'cert_chain'],
['new_name', 'key'],
['new_name', 'cert'],
['new_name', 'cert_chain'],
],
)
if not HAS_BOTO:
module.fail_json(msg="Boto is required for this module")
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
try:
if region:
iam = connect_to_aws(boto.iam, region, **aws_connect_kwargs)
else:
iam = boto.iam.connection.IAMConnection(**aws_connect_kwargs)
except boto.exception.NoAuthHandlerFound as e:
module.fail_json(msg=str(e))
state = module.params.get('state')
name = module.params.get('name')
path = module.params.get('path')
new_name = module.params.get('new_name')
new_path = module.params.get('new_path')
dup_ok = module.params.get('dup_ok')
if state == 'present' and not new_name and not new_path:
cert, key, cert_chain = load_data(cert=module.params.get('cert'),
key=module.params.get('key'),
cert_chain=module.params.get('cert_chain'))
else:
cert = key = cert_chain = None
orig_cert_names = [ctb['server_certificate_name'] for ctb in
iam.get_all_server_certs().list_server_certificates_result.server_certificate_metadata_list]
orig_cert_bodies = [iam.get_server_certificate(thing).get_server_certificate_result.certificate_body
for thing in orig_cert_names]
if new_name == name:
new_name = None
if new_path == path:
new_path = None
changed = False
try:
cert_action(module, iam, name, path, new_name, new_path, state,
cert, key, cert_chain, orig_cert_names, orig_cert_bodies, dup_ok)
except boto.exception.BotoServerError as err:
module.fail_json(changed=changed, msg=str(err), debug=[cert, key])
if __name__ == '__main__':
main()
| gpl-3.0 |
eduble/panteda | operators/map/heatmap.py | 1 | 4193 | #!/usr/bin/env python3
import numpy as np
import numpy.random
from time import time
# web mercator projection functions
# ---------------------------------
def linear_lat(lat, atanh = np.arctanh, sin = np.sin, radians = np.radians):
return atanh(sin(radians(lat)))
def inv_linear_lat(ll, asin = np.arcsin, tanh = np.tanh, degrees = np.degrees):
return degrees(asin(tanh(ll)))
def lng_to_x(w, lng_min, lng_max, lng):
return (lng - lng_min) * (w / (lng_max - lng_min))
def lat_to_y(h, lat_min, lat_max, lat):
return (linear_lat(lat) - linear_lat(lat_min)) * (h / (linear_lat(lat_max) - linear_lat(lat_min)))
def x_to_lng(w, lng_min, lng_max, x):
return x * ((lng_max - lng_min)/w) + lng_min
def y_to_lat(h, lat_min, lat_max, y):
return inv_linear_lat(y * ((linear_lat(lat_max) - linear_lat(lat_min))/h) + linear_lat(lat_min))
# heatmap data generation
# -----------------------
class HeatMap:
def __init__(self, lnglat, width, height, westlng, eastlng, southlat, northlat):
# compute pixel bounds of the map
x = np.append(np.arange(0, width, 5), width)
y = np.append(np.arange(0, height, 5), height)
# project pixel bounds coordinates (x, y -> lng, lat)
edgelng = x_to_lng(width, westlng, eastlng, x)
centerlng = x_to_lng(width, westlng, eastlng, (x[1:] + x[:-1])/2)
edgelat = y_to_lat(height, southlat, northlat, y)
centerlat = y_to_lat(height, southlat, northlat, (y[1:] + y[:-1])/2)
# prepare computation parameters
self.bins = edgelng, edgelat
self.range = (westlng, eastlng), (southlat, northlat)
self.iterator = lnglat.chunks()
self.heatmap = None
# prepare compression parameters
scalelat = (edgelat[1:] - edgelat[:-1]).min() / 2
self.approx_centerlat = numpy.rint((centerlat - centerlat[0]) / scalelat)
scalelng = edgelng[1] - edgelng[0] # longitude is linear
self.approx_centerlng = numpy.rint((centerlng - centerlng[0]) / scalelng)
self.scales = dict(lat=scalelat, lng=scalelng)
self.offsets = dict(lat=centerlat[0], lng=centerlng[0])
# stream status parameters
self.done = False
def compute(self, time_credit):
# make histogram:
# - create a pixel grid
# - given a tuple (lng, lat) increment the corresponding pixel
deadline = time() + time_credit
deadline_reached = False
for chunk in self.iterator:
lng, lat = chunk.columns
chunk_heatmap = np.histogram2d(lng, lat, bins=self.bins, range=self.range)[0]
if self.heatmap is None:
self.heatmap = chunk_heatmap.T
else:
self.heatmap += chunk_heatmap.T
if time() > deadline:
deadline_reached = True
break
if not deadline_reached:
# we left the loop because of the end of iteration
self.done = True
# get sparse matrix representation: (lat, lng, intensity) tuples.
# in order to lower network usage, we will transfer this data in a
# compressed form: lng & lat values will be transfered as integers
# together with a scaling factor and an offset to be applied.
def compressed_form(self):
# count number of points
count = int(self.heatmap.sum())
if count == 0:
# if no points, return empty data
data = dict(lat = [], lng = [], val = [])
else:
# apply threshold and
# compute approximated sparse matrix data
nonzero_xy = ((self.heatmap / self.heatmap.max()) > 0.05).nonzero()
nonzero_x = nonzero_xy[1]
nonzero_y = nonzero_xy[0]
data = dict(
lat = self.approx_centerlat[nonzero_y].astype(int).tolist(),
lng = self.approx_centerlng[nonzero_x].astype(int).tolist(),
val = self.heatmap[nonzero_xy].astype(int).tolist()
)
return dict(
data = data,
scales = self.scales,
offsets = self.offsets,
count = count,
done = self.done
)
| gpl-3.0 |
gangadharkadam/saloon_frappe_install | frappe/tests/test_fmt_money.py | 54 | 4016 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.utils import flt, cstr
def fmt_money(amount, precision=None):
"""
Convert to string with commas for thousands, millions etc
"""
number_format = frappe.db.get_default("number_format") or "#,###.##"
decimal_str, comma_str, precision = get_number_format_info(number_format)
amount = '%.*f' % (precision, flt(amount))
if amount.find('.') == -1:
decimals = ''
else:
decimals = amount.split('.')[1]
parts = []
minus = ''
if flt(amount) < 0:
minus = '-'
amount = cstr(abs(flt(amount))).split('.')[0]
if len(amount) > 3:
parts.append(amount[-3:])
amount = amount[:-3]
val = number_format=="#,##,###.##" and 2 or 3
while len(amount) > val:
parts.append(amount[-val:])
amount = amount[:-val]
parts.append(amount)
parts.reverse()
amount = comma_str.join(parts) + (precision and (decimal_str + decimals) or "")
amount = minus + amount
return amount
def get_number_format_info(format):
if format=="#.###":
return "", ".", 0
elif format=="#,###":
return "", ",", 0
elif format=="#,###.##" or format=="#,##,###.##":
return ".", ",", 2
elif format=="#.###,##":
return ",", ".", 2
elif format=="# ###.##":
return ".", " ", 2
else:
return ".", ",", 2
import unittest
class TestFmtMoney(unittest.TestCase):
def test_standard(self):
frappe.db.set_default("number_format", "#,###.##")
self.assertEquals(fmt_money(100), "100.00")
self.assertEquals(fmt_money(1000), "1,000.00")
self.assertEquals(fmt_money(10000), "10,000.00")
self.assertEquals(fmt_money(100000), "100,000.00")
self.assertEquals(fmt_money(1000000), "1,000,000.00")
self.assertEquals(fmt_money(10000000), "10,000,000.00")
self.assertEquals(fmt_money(100000000), "100,000,000.00")
self.assertEquals(fmt_money(1000000000), "1,000,000,000.00")
def test_negative(self):
frappe.db.set_default("number_format", "#,###.##")
self.assertEquals(fmt_money(-100), "-100.00")
self.assertEquals(fmt_money(-1000), "-1,000.00")
self.assertEquals(fmt_money(-10000), "-10,000.00")
self.assertEquals(fmt_money(-100000), "-100,000.00")
self.assertEquals(fmt_money(-1000000), "-1,000,000.00")
self.assertEquals(fmt_money(-10000000), "-10,000,000.00")
self.assertEquals(fmt_money(-100000000), "-100,000,000.00")
self.assertEquals(fmt_money(-1000000000), "-1,000,000,000.00")
def test_decimal(self):
frappe.db.set_default("number_format", "#.###,##")
self.assertEquals(fmt_money(-100), "-100,00")
self.assertEquals(fmt_money(-1000), "-1.000,00")
self.assertEquals(fmt_money(-10000), "-10.000,00")
self.assertEquals(fmt_money(-100000), "-100.000,00")
self.assertEquals(fmt_money(-1000000), "-1.000.000,00")
self.assertEquals(fmt_money(-10000000), "-10.000.000,00")
self.assertEquals(fmt_money(-100000000), "-100.000.000,00")
self.assertEquals(fmt_money(-1000000000), "-1.000.000.000,00")
def test_lacs(self):
frappe.db.set_default("number_format", "#,##,###.##")
self.assertEquals(fmt_money(100), "100.00")
self.assertEquals(fmt_money(1000), "1,000.00")
self.assertEquals(fmt_money(10000), "10,000.00")
self.assertEquals(fmt_money(100000), "1,00,000.00")
self.assertEquals(fmt_money(1000000), "10,00,000.00")
self.assertEquals(fmt_money(10000000), "1,00,00,000.00")
self.assertEquals(fmt_money(100000000), "10,00,00,000.00")
self.assertEquals(fmt_money(1000000000), "1,00,00,00,000.00")
def test_no_precision(self):
frappe.db.set_default("number_format", "#,###")
self.assertEquals(fmt_money(0.3), "0")
self.assertEquals(fmt_money(100.3), "100")
self.assertEquals(fmt_money(1000.3), "1,000")
self.assertEquals(fmt_money(10000.3), "10,000")
self.assertEquals(fmt_money(-0.3), "0")
self.assertEquals(fmt_money(-100.3), "-100")
self.assertEquals(fmt_money(-1000.3), "-1,000")
if __name__=="__main__":
frappe.connect()
unittest.main() | mit |
felliott/osf.io | api_tests/base/test_parsers.py | 10 | 2696 | import pytest
from api.base.parsers import JSONAPIMultipleRelationshipsParser, JSONAPIMultipleRelationshipsParserForRegularJSON
SINGLE_RELATIONSHIP = {
'region': {
'data': {
'type': 'regions',
'id': 'us-1'
}
}
}
MULTIPLE_RELATIONSHIP = {
'affiliated_institutions': {
'data': [
{
'type': 'institutions',
'id': 'cos'
}, {
'type': 'institutions',
'id': 'ljaf'
}
]
}
}
MIXED_RELATIONSHIP = {
'region': {
'data': {
'type': 'regions',
'id': 'us-1'
}
},
'affiliated_institutions': {
'data': [
{
'type': 'institutions',
'id': 'cos'
}, {
'type': 'institutions',
'id': 'ljaf'
}
]
}
}
MULTIPLE_SINGLE_RELATIONSHIPS = {
'node': {
'data': {
'type': 'nodes',
'id': 'abcde'
}
},
'provider': {
'data': {
'type': 'preprint_providers',
'id': 'agrixiv'
}
}
}
MULTIPLE_MULTIPLE_RELATIONSHIPS = {
'affiliated_institutions': {
'data': [
{
'type': 'institutions',
'id': 'cos'
}, {
'type': 'institutions',
'id': 'ljaf'
}
]
},
'providers': {
'data': [
{
'type': 'preprint_providers',
'id': 'agrixiv'
}, {
'type': 'preprint_providers',
'id': 'osfpreprints'
}
]
}
}
class TestMultipleRelationshipsParser:
@pytest.mark.parametrize('relationship,expected',
[
(SINGLE_RELATIONSHIP, {'region': 'us-1'}),
(MULTIPLE_RELATIONSHIP, {'affiliated_institutions': ['cos', 'ljaf']}),
(MIXED_RELATIONSHIP, {'region': 'us-1', 'affiliated_institutions': ['cos', 'ljaf']}),
(MULTIPLE_SINGLE_RELATIONSHIPS, {'node': 'abcde', 'provider': 'agrixiv'}),
(MULTIPLE_MULTIPLE_RELATIONSHIPS, {'affiliated_institutions': ['cos', 'ljaf'], 'providers': ['agrixiv', 'osfpreprints']}),
])
def test_flatten_relationships(self, relationship, expected):
parser = JSONAPIMultipleRelationshipsParser()
assert JSONAPIMultipleRelationshipsParser.flatten_relationships(parser, relationship) == expected
parser = JSONAPIMultipleRelationshipsParserForRegularJSON()
assert JSONAPIMultipleRelationshipsParserForRegularJSON.flatten_relationships(parser, relationship) == expected
| apache-2.0 |
TeslaProject/external_chromium_org | tools/telemetry/telemetry/core/forwarders/do_nothing_forwarder_unittest.py | 37 | 2557 | # Copyright 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from telemetry.core import forwarders
from telemetry.core import util
from telemetry.core.forwarders import do_nothing_forwarder
class TestDoNothingForwarder(do_nothing_forwarder.DoNothingForwarder):
"""Override _WaitForConnect to avoid actual socket connection."""
def __init__(self, port_pairs):
self.connected_addresses = []
super(TestDoNothingForwarder, self).__init__(port_pairs)
def _WaitForConnectionEstablished(self, address, timeout):
self.connected_addresses.append(address)
class TestErrorDoNothingForwarder(do_nothing_forwarder.DoNothingForwarder):
"""Simulate a connection error."""
def _WaitForConnectionEstablished(self, address, timeout):
raise util.TimeoutException
class CheckPortPairsTest(unittest.TestCase):
def testChecksOnlyHttpHttps(self):
port_pairs = forwarders.PortPairs(
http=forwarders.PortPair(80, 80),
https=forwarders.PortPair(443, 443),
dns=forwarders.PortPair(53, 53))
f = TestDoNothingForwarder(port_pairs)
expected_connected_addresses = [
('127.0.0.1', 80),
('127.0.0.1', 443),
# Port 53 is skipped because it is UDP and does not support connections.
]
self.assertEqual(expected_connected_addresses, f.connected_addresses)
def testNoDnsStillChecksHttpHttps(self):
port_pairs = forwarders.PortPairs(
http=forwarders.PortPair(5566, 5566),
https=forwarders.PortPair(7788, 7788),
dns=None)
f = TestDoNothingForwarder(port_pairs)
expected_connected_addresses = [
('127.0.0.1', 5566),
('127.0.0.1', 7788),
]
self.assertEqual(expected_connected_addresses, f.connected_addresses)
def testPortMismatchRaisesPortsMismatchError(self):
# The do_nothing_forward cannot forward from one port to another.
port_pairs = forwarders.PortPairs(
http=forwarders.PortPair(80, 80),
https=forwarders.PortPair(8443, 443),
dns=None)
with self.assertRaises(do_nothing_forwarder.PortsMismatchError):
TestDoNothingForwarder(port_pairs)
def testConnectionTimeoutRaisesConnectionError(self):
port_pairs = forwarders.PortPairs(
http=forwarders.PortPair(80, 80),
https=forwarders.PortPair(8443, 443),
dns=None)
with self.assertRaises(do_nothing_forwarder.ConnectionError):
TestErrorDoNothingForwarder(port_pairs)
| bsd-3-clause |
mohitreddy1996/Gender-Detection-from-Signature | src/train_test/random_forests.py | 1 | 1140 | from sklearn.metrics import precision_recall_fscore_support
import pandas as pd
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn.preprocessing import MinMaxScaler, normalize
df = pd.read_csv('../../Dataset/dataset.csv', delimiter='\t')
dataset = df.values
mask = np.random.rand(len(df)) < .80
train = df[mask]
test = df[~mask]
X = pd.DataFrame()
Y = pd.DataFrame()
X = train.ix[:, 2:len(train.columns) - 1]
Y = train.ix[:, len(train.columns) - 1: len(train.columns)]
X_Test = pd.DataFrame()
Y_Test = pd.DataFrame()
# After Normalising
X_standard = normalize(X)
print X_standard.shape
X_Test = test.ix[:, 2:len(test.columns) - 1]
Y_Test = test.ix[:, len(test.columns) - 1: len(test.columns)]
X_Test_standard = normalize(X_Test)
print X_Test_standard.shape
print "Training Data Set Size : ", str(len(X))
print "Testing Data Set Size : ", str(len(X_Test))
# tune parameters here.
rf = RandomForestClassifier(n_estimators=150, max_features=20)
rf.fit(X_standard, Y)
# predict
Y_Result = rf.predict(X_Test_standard)
print precision_recall_fscore_support(Y_Test, Y_Result, average='micro')
| mit |
aquaya/ivrhub | ivrhub/models.py | 1 | 3129 | ''' mongoengine models
'''
from mongoengine import *
class User(Document):
''' some are admins some are not
'''
admin_rights = BooleanField(required=True)
api_id = StringField()
api_key = StringField()
email = EmailField(required=True, unique=True, max_length=254)
email_confirmation_code = StringField(required=True)
email_confirmed = BooleanField(required=True)
forgot_password_code = StringField()
last_login_time = DateTimeField(required=True)
name = StringField()
organizations = ListField(ReferenceField('Organization'))
password_hash = StringField(required=True)
registration_time = DateTimeField(required=True)
verified = BooleanField(required=True)
class Organization(Document):
''' people join orgs
'''
description = StringField(default='')
# url-safe version of the name
label = StringField(unique=True, required=True)
location = StringField(default='')
name = StringField(unique=True, required=True)
class Form(Document):
''' the heart of the system
'''
# unique code for requesting this form via sms or a call
calling_code = StringField()
creation_time = DateTimeField()
creator = ReferenceField(User)
description = StringField(default = '')
# url-safe version of the name
label = StringField(unique_with='organization')
language = StringField(default = '')
name = StringField(unique_with='organization')
organization = ReferenceField(Organization)
# have to store questions here as well so we know the order
questions = ListField(ReferenceField('Question'))
class Question(Document):
''' connected to forms
'''
audio_filename = StringField()
audio_url = StringField()
creation_time = DateTimeField()
description = StringField()
form = ReferenceField(Form)
# url-safe version of the name
label = StringField(unique_with='form')
name = StringField(unique_with='form')
# 'text_prompt', 'audio_file' or 'audio_url'
prompt_type = StringField(default='text_prompt')
# 'keypad' or 'voice' or 'no response'
response_type = StringField(default='keypad')
s3_key = StringField()
s3_url = StringField()
text_prompt = StringField()
text_prompt_language = StringField(default='en')
class Response(Document):
''' individual response to a form
'''
call_sid = StringField()
completion_time = DateTimeField()
form = ReferenceField(Form)
# whether this was a 'call' or 'ringback' or 'scheduled call'
initiated_using = StringField()
initiation_time = DateTimeField()
# track the progress of the response
last_question_asked = ReferenceField(Question)
# any notes about the response as a whole
notes = StringField()
respondent_phone_number = StringField()
class Answer(Document):
''' connected to questions and responses
'''
audio_url = StringField()
keypad_input = StringField()
# any notes on this answer (like a transcription)
notes = StringField()
question = ReferenceField(Question)
response = ReferenceField(Response)
| mit |
insertnamehere1/maraschino | lib/rtorrent/lib/torrentparser.py | 91 | 5653 | # Copyright (c) 2013 Chris Lucas, <chris@chrisjlucas.com>
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from rtorrent.compat import is_py3
import os.path
import re
import rtorrent.lib.bencode as bencode
import hashlib
if is_py3():
from urllib.request import urlopen # @UnresolvedImport @UnusedImport
else:
from urllib2 import urlopen # @UnresolvedImport @Reimport
class TorrentParser():
def __init__(self, torrent):
"""Decode and parse given torrent
@param torrent: handles: urls, file paths, string of torrent data
@type torrent: str
@raise AssertionError: Can be raised for a couple reasons:
- If _get_raw_torrent() couldn't figure out
what X{torrent} is
- if X{torrent} isn't a valid bencoded torrent file
"""
self.torrent = torrent
self._raw_torrent = None # : testing yo
self._torrent_decoded = None # : what up
self.file_type = None
self._get_raw_torrent()
assert self._raw_torrent is not None, "Couldn't get raw_torrent."
if self._torrent_decoded is None:
self._decode_torrent()
assert isinstance(self._torrent_decoded, dict), "Invalid torrent file."
self._parse_torrent()
def _is_raw(self):
raw = False
if isinstance(self.torrent, (str, bytes)):
if isinstance(self._decode_torrent(self.torrent), dict):
raw = True
else:
# reset self._torrent_decoded (currently equals False)
self._torrent_decoded = None
return(raw)
def _get_raw_torrent(self):
"""Get raw torrent data by determining what self.torrent is"""
# already raw?
if self._is_raw():
self.file_type = "raw"
self._raw_torrent = self.torrent
return
# local file?
if os.path.isfile(self.torrent):
self.file_type = "file"
self._raw_torrent = open(self.torrent, "rb").read()
# url?
elif re.search("^(http|ftp):\/\/", self.torrent, re.I):
self.file_type = "url"
self._raw_torrent = urlopen(self.torrent).read()
def _decode_torrent(self, raw_torrent=None):
if raw_torrent is None:
raw_torrent = self._raw_torrent
self._torrent_decoded = bencode.decode(raw_torrent)
return(self._torrent_decoded)
def _calc_info_hash(self):
self.info_hash = None
if "info" in self._torrent_decoded.keys():
info_encoded = bencode.encode(self._torrent_decoded["info"])
if info_encoded:
self.info_hash = hashlib.sha1(info_encoded).hexdigest().upper()
return(self.info_hash)
def _parse_torrent(self):
for k in self._torrent_decoded:
key = k.replace(" ", "_").lower()
setattr(self, key, self._torrent_decoded[k])
self._calc_info_hash()
class NewTorrentParser(object):
@staticmethod
def _read_file(fp):
return fp.read()
@staticmethod
def _write_file(fp):
fp.write()
return fp
@staticmethod
def _decode_torrent(data):
return bencode.decode(data)
def __init__(self, input):
self.input = input
self._raw_torrent = None
self._decoded_torrent = None
self._hash_outdated = False
if isinstance(self.input, (str, bytes)):
# path to file?
if os.path.isfile(self.input):
self._raw_torrent = self._read_file(open(self.input, "rb"))
else:
# assume input was the raw torrent data (do we really want
# this?)
self._raw_torrent = self.input
# file-like object?
elif self.input.hasattr("read"):
self._raw_torrent = self._read_file(self.input)
assert self._raw_torrent is not None, "Invalid input: input must be a path or a file-like object"
self._decoded_torrent = self._decode_torrent(self._raw_torrent)
assert isinstance(
self._decoded_torrent, dict), "File could not be decoded"
def _calc_info_hash(self):
self.info_hash = None
info_dict = self._torrent_decoded["info"]
self.info_hash = hashlib.sha1(bencode.encode(
info_dict)).hexdigest().upper()
return(self.info_hash)
def set_tracker(self, tracker):
self._decoded_torrent["announce"] = tracker
def get_tracker(self):
return self._decoded_torrent.get("announce")
| mit |
tzangms/PyConTW | pycon_project/biblion/views.py | 1 | 3501 | from datetime import datetime
from django.core.urlresolvers import reverse
from django.http import HttpResponse, Http404
from django.shortcuts import render_to_response, get_object_or_404
from django.template import RequestContext
from django.template.loader import render_to_string
from django.utils import simplejson as json
from django.contrib.sites.models import Site
from biblion.exceptions import InvalidSection
from biblion.models import Post, FeedHit
from biblion.settings import ALL_SECTION_NAME
def blog_index(request):
posts = Post.objects.current()
posts = posts.filter(language=request.LANGUAGE_CODE)
return render_to_response("biblion/blog_list.html", {
"posts": posts,
}, context_instance=RequestContext(request))
def blog_section_list(request, section):
try:
posts = Post.objects.section(section)
except InvalidSection:
raise Http404()
return render_to_response("biblion/blog_section_list.html", {
"section_slug": section,
"section_name": dict(Post.SECTION_CHOICES)[Post.section_idx(section)],
"posts": posts,
}, context_instance=RequestContext(request))
def blog_post_detail(request, **kwargs):
if "post_pk" in kwargs:
if request.user.is_authenticated() and request.user.is_staff:
queryset = Post.objects.all()
post = get_object_or_404(queryset, pk=kwargs["post_pk"])
else:
raise Http404()
else:
queryset = Post.objects.current()
queryset = queryset.filter(
published__year = int(kwargs["year"]),
published__month = int(kwargs["month"]),
published__day = int(kwargs["day"]),
)
post = get_object_or_404(queryset, slug=kwargs["slug"])
post.inc_views()
return render_to_response("biblion/blog_post.html", {
"post": post,
}, context_instance=RequestContext(request))
def serialize_request(request):
data = {
"path": request.path,
"META": {
"QUERY_STRING": request.META.get("QUERY_STRING"),
"REMOTE_ADDR": request.META.get("REMOTE_ADDR"),
}
}
for key in request.META:
if key.startswith("HTTP"):
data["META"][key] = request.META[key]
return json.dumps(data)
def blog_feed(request, section=None):
try:
posts = Post.objects.section(section)
except InvalidSection:
raise Http404()
if section is None:
section = ALL_SECTION_NAME
current_site = Site.objects.get_current()
feed_title = "%s Blog: %s" % (current_site.name, section[0].upper() + section[1:])
blog_url = "http://%s%s" % (current_site.domain, reverse("blog"))
url_name, kwargs = "blog_feed", {"section": section}
feed_url = "http://%s%s" % (current_site.domain, reverse(url_name, kwargs=kwargs))
if posts:
feed_updated = posts[0].published
else:
feed_updated = datetime(2009, 8, 1, 0, 0, 0)
# create a feed hit
hit = FeedHit()
hit.request_data = serialize_request(request)
hit.save()
atom = render_to_string("biblion/atom_feed.xml", {
"feed_id": feed_url,
"feed_title": feed_title,
"blog_url": blog_url,
"feed_url": feed_url,
"feed_updated": feed_updated,
"entries": posts,
"current_site": current_site,
})
return HttpResponse(atom, mimetype="application/atom+xml")
| bsd-3-clause |
devinbalkind/eden | modules/unit_tests/s3/s3aaa.py | 3 | 164800 | # -*- coding: utf-8 -*-
#
# S3AAA Unit Tests
#
# To run this script use:
# python web2py.py -S eden -M -R applications/eden/modules/unit_tests/s3/s3aaa.py
#
import unittest
from gluon import *
from gluon.storage import Storage
from s3.s3aaa import S3EntityRoleManager, S3Permission
from s3.s3fields import s3_meta_fields
# =============================================================================
class AuthUtilsTests(unittest.TestCase):
""" S3Auth Utility Methods Tests """
# -------------------------------------------------------------------------
def testSystemRoles(self):
""" Test if system roles are present """
sr = current.auth.get_system_roles()
self.assertTrue("ADMIN" in sr)
self.assertTrue(sr.ADMIN is not None)
self.assertTrue("AUTHENTICATED" in sr)
self.assertTrue(sr.AUTHENTICATED is not None)
self.assertTrue("ANONYMOUS" in sr)
self.assertTrue(sr.ANONYMOUS is not None)
# -------------------------------------------------------------------------
def testGetUserIDByEmail(self):
""" Test user account identification by email """
user_id = current.auth.s3_get_user_id("normaluser@example.com")
self.assertTrue(user_id is not None)
# -------------------------------------------------------------------------
def testImpersonate(self):
""" Test s3_impersonate """
auth = current.auth
session = current.session
sr = auth.get_system_roles()
ADMIN = sr.ADMIN
ANONYMOUS = sr.ANONYMOUS
# Test-login as system administrator
auth.s3_impersonate("admin@example.com")
self.assertTrue(auth.s3_logged_in())
self.assertTrue(auth.user is not None)
self.assertTrue(ADMIN in session.s3.roles)
self.assertTrue(ANONYMOUS in session.s3.roles)
self.assertTrue(ADMIN in auth.user.realms)
# Test with nonexistent user
self.assertRaises(ValueError, auth.s3_impersonate, "NonExistentUser")
# => should still be logged in as ADMIN
self.assertTrue(auth.s3_logged_in())
self.assertTrue(ADMIN in session.s3.roles)
# Test with None => should logout and reset the roles
auth.s3_impersonate(None)
self.assertFalse(auth.s3_logged_in())
self.assertTrue(session.s3.roles == [] or
ANONYMOUS in session.s3.roles)
# Logout
auth.s3_impersonate(None)
# =============================================================================
class SetRolesTests(unittest.TestCase):
""" Test AuthS3.set_roles """
def setUp(self):
# Create test organisations
xmlstr = """
<s3xml>
<resource name="org_organisation" uuid="SRTO1">
<data field="name">SetRoleTestsOrg1</data>
</resource>
<resource name="org_organisation" uuid="SRTO2">
<data field="name">SetRoleTestsOrg2</data>
</resource>
<resource name="org_organisation" uuid="SRTO3">
<data field="name">SetRoleTestsOrg3</data>
</resource>
</s3xml>"""
try:
auth = current.auth
auth.override = True
from lxml import etree
xmltree = etree.ElementTree(etree.fromstring(xmlstr))
s3db = current.s3db
resource = s3db.resource("org_organisation")
resource.import_xml(xmltree)
resource = s3db.resource("org_organisation",
uid=["SRTO1", "SRTO2", "SRTO3"])
rows = resource.select(["pe_id", "uuid"], as_rows=True)
orgs = dict((row.uuid, row.pe_id) for row in rows)
self.org1 = orgs["SRTO1"]
self.org2 = orgs["SRTO2"]
self.org3 = orgs["SRTO3"]
auth.override = False
except:
current.db.rollback()
auth.override = False
raise
# -------------------------------------------------------------------------
def testSetRolesPolicy3(self):
""" Test set_roles with policy 3 """
auth = current.auth
settings = current.deployment_settings
settings.security.policy = 3
auth.permission = S3Permission(auth)
auth.s3_impersonate("normaluser@example.com")
realms = auth.user.realms.keys()
self.assertEqual(len(realms), 2)
self.assertTrue(2 in realms)
self.assertTrue(3 in realms)
for r in auth.user.realms:
self.assertEqual(auth.user.realms[r], None)
auth.s3_impersonate(None)
# -------------------------------------------------------------------------
def testSetRolesPolicy4(self):
""" Test set_roles with policy 4 """
auth = current.auth
settings = current.deployment_settings
settings.security.policy = 4
auth.permission = S3Permission(auth)
auth.s3_impersonate("normaluser@example.com")
realms = auth.user.realms.keys()
self.assertTrue(2 in realms)
self.assertTrue(3 in realms)
self.assertEqual(len(realms), 2)
for r in auth.user.realms:
self.assertEqual(auth.user.realms[r], None)
auth.s3_impersonate(None)
# -------------------------------------------------------------------------
def testSetRolesPolicy5(self):
""" Test set_roles with policy 5 """
auth = current.auth
settings = current.deployment_settings
settings.security.policy = 5
auth.permission = S3Permission(auth)
auth.s3_impersonate("normaluser@example.com")
realms = auth.user.realms.keys()
self.assertTrue(2 in realms)
self.assertTrue(3 in realms)
self.assertEqual(len(realms), 2)
for r in auth.user.realms:
self.assertEqual(auth.user.realms[r], None)
auth.s3_impersonate(None)
# -------------------------------------------------------------------------
def testSetRolesPolicy6(self):
""" Test set_roles with policy 6 """
s3db = current.s3db
auth = current.auth
settings = current.deployment_settings
settings.security.policy = 6
auth.permission = S3Permission(auth)
try:
# Create a test role
role = auth.s3_create_role("Example Role", uid="TESTROLE")
# Assign normaluser this role for a realm
user_id = auth.s3_get_user_id("normaluser@example.com")
auth.s3_assign_role(user_id, role, for_pe=self.org1)
auth.s3_impersonate("normaluser@example.com")
realms = auth.user.realms.keys()
self.assertEqual(len(realms), 3)
self.assertTrue(2 in realms)
self.assertTrue(3 in realms)
self.assertTrue(role in realms)
for r in auth.user.realms:
if r == role:
self.assertEqual(auth.user.realms[r], [self.org1])
else:
self.assertEqual(auth.user.realms[r], None)
finally:
auth.s3_impersonate(None)
auth.s3_delete_role("TESTROLE")
current.db.rollback()
# -------------------------------------------------------------------------
def testSetRolesPolicy7(self):
""" Test set_roles with policy 7 """
s3db = current.s3db
auth = current.auth
settings = current.deployment_settings
settings.security.policy = 7
auth.permission = S3Permission(auth)
try:
# Create a test role
role = auth.s3_create_role("Example Role", uid="TESTROLE")
# Create an OU-affiliation for two organisations
org1 = self.org1
org2 = self.org2
s3db.pr_add_affiliation(org1, org2, role="TestRole")
# Assign normaluser this role for the realm of the parent org
user_id = auth.s3_get_user_id("normaluser@example.com")
auth.s3_assign_role(user_id, role, for_pe=org1)
auth.s3_impersonate("normaluser@example.com")
realms = auth.user.realms.keys()
self.assertTrue(2 in realms)
self.assertTrue(3 in realms)
self.assertTrue(role in realms)
self.assertEqual(len(realms), 3)
for r in auth.user.realms:
if r == role:
self.assertTrue(org1 in auth.user.realms[r])
self.assertTrue(org2 in auth.user.realms[r])
else:
self.assertEqual(auth.user.realms[r], None)
finally:
auth.s3_impersonate(None)
auth.s3_delete_role("TESTROLE")
current.db.rollback()
# -------------------------------------------------------------------------
def testSetRolesPolicy8(self):
""" Test set_roles with policy 8 """
s3db = current.s3db
auth = current.auth
settings = current.deployment_settings
settings.security.policy = 8
auth.permission = S3Permission(auth)
try:
# Create a test role
role = auth.s3_create_role("Test Group", uid="TESTGROUP")
# Have two orgs, set org2 as OU descendant of org1
org1 = self.org1
org2 = self.org2
s3db.pr_add_affiliation(org1, org2, role="TestOrgUnit")
# Have a third org
org3 = self.org3
# Add the user as OU descendant of org3
user_id = auth.s3_get_user_id("normaluser@example.com")
user_pe = auth.s3_user_pe_id(user_id)
s3db.pr_add_affiliation(org3, user_pe, role="TestStaff")
# Assign normaluser the test role for org3
auth.s3_assign_role(user_id, role, for_pe=org3)
# Delegate the test role for org1 to org3
auth.s3_delegate_role("TESTGROUP", org1, receiver=org3)
# Impersonate as normal user
auth.s3_impersonate("normaluser@example.com")
# Check the realms
realms = auth.user.realms.keys()
self.assertTrue(2 in realms)
self.assertTrue(3 in realms)
self.assertTrue(role in realms)
self.assertEqual(len(realms), 3)
for r in auth.user.realms:
if r == role:
self.assertTrue(org3 in auth.user.realms[r])
else:
self.assertEqual(auth.user.realms[r], None)
# Check the delegations
delegations = auth.user.delegations.keys()
self.assertTrue(role in delegations)
self.assertEqual(len(delegations), 1)
realms = auth.user.delegations[role]
self.assertTrue(org3 in realms)
self.assertEqual(len(realms), 1)
self.assertTrue(org1 in realms[org3])
self.assertTrue(org2 in realms[org3])
# Remove the delegations
auth.s3_remove_delegation("TESTGROUP", org1, receiver=org3)
# Check the delegations again
delegations = auth.user.delegations.keys()
self.assertFalse(role in delegations)
self.assertEqual(len(delegations), 0)
finally:
s3db.pr_remove_affiliation(org1, org2, role="TestOrgUnit")
s3db.pr_remove_affiliation(org1, org2, role="TestStaff")
auth.s3_delete_role("TESTGROUP")
current.db.rollback()
auth.s3_impersonate(None)
# -------------------------------------------------------------------------
#def testPerformance(self):
#MAX_RUNTIME = 18 # Maximum acceptable runtime per request in milliseconds
##MAX_RUNTIME = 3 # Maximum acceptable runtime per request in milliseconds (up to policy 7)
#deployment_settings.security.policy = 8
#from s3.s3aaa import S3Permission
#auth.permission = S3Permission(auth)
#try:
#org1 = s3db.pr_get_pe_id("org_organisation", 1)
#org2 = s3db.pr_get_pe_id("org_organisation", 2)
#s3db.pr_add_affiliation(org1, org2, role="TestOrgUnit")
#org3 = s3db.pr_get_pe_id("org_organisation", 3)
#partners = s3db.pr_add_affiliation(org1, org3, role="TestPartners", role_type=9)
#user = auth.s3_user_pe_id(auth.s3_get_user_id("normaluser@example.com"))
#s3db.pr_add_affiliation(org3, user, role="TestStaff")
#role = auth.s3_create_role("Test Group", uid="TESTGROUP")
#dtable = s3db.pr_delegation
#record = dtable.insert(role_id=partners, group_id=role)
#def setRoles():
#auth.s3_impersonate("normaluser@example.com")
#import timeit
#runtime = timeit.Timer(setRoles).timeit(number=100)
#if runtime > (MAX_RUNTIME / 10.0):
#raise AssertionError("s3_impersonate: maximum acceptable run time exceeded (%sms > %sms)" % (int(runtime * 10), MAX_RUNTIME))
## Logout
#auth.s3_impersonate(None)
#finally:
#s3db.pr_remove_affiliation(org1, org2, role="TestOrgUnit")
#s3db.pr_remove_affiliation(org1, org2, role="TestStaff")
#s3db.pr_remove_affiliation(org1, org3, role="TestPartners")
#auth.s3_delete_role("TESTGROUP")
#current.db.rollback()
# -------------------------------------------------------------------------
def tearDown(self):
current.db.rollback()
current.auth.override = False
# =============================================================================
class RoleAssignmentTests(unittest.TestCase):
""" Test role assignments """
# -------------------------------------------------------------------------
def testAssignRole(self):
""" Test role assignment to a user """
db = current.db
auth = current.auth
UUID1 = "TESTAUTOCREATEDROLE1"
UUID2 = "TESTAUTOCREATEDROLE2"
uuids = [UUID1, UUID2]
table = auth.settings.table_group
query1 = (table.deleted != True) & (table.uuid == UUID1)
query2 = (table.deleted != True) & (table.uuid == UUID2)
auth.s3_impersonate("admin@example.com")
user_id = auth.user.id
row = db(query1).select(limitby=(0, 1)).first()
self.assertEqual(row, None)
row = db(query2).select(limitby=(0, 1)).first()
self.assertEqual(row, None)
auth.s3_assign_role(user_id, uuids, for_pe=0)
row = db(query1).select(limitby=(0, 1)).first()
self.assertNotEqual(row, None)
self.assertTrue(row.id > 0)
self.assertTrue(row.role == UUID1)
self.assertTrue(row.uuid == UUID1)
row = db(query2).select(limitby=(0, 1)).first()
self.assertNotEqual(row, None)
self.assertTrue(row.id > 0)
self.assertTrue(row.role == UUID2)
self.assertTrue(row.uuid == UUID2)
auth.s3_delete_role(UUID1)
row = db(query1).select(limitby=(0, 1)).first()
self.assertEqual(row, None)
row = db(query2).select(limitby=(0, 1)).first()
self.assertNotEqual(row, None)
self.assertTrue(row.id > 0)
self.assertTrue(row.role == UUID2)
self.assertTrue(row.uuid == UUID2)
auth.s3_delete_role(UUID2)
row = db(query1).select(limitby=(0, 1)).first()
self.assertEqual(row, None)
row = db(query2).select(limitby=(0, 1)).first()
self.assertEqual(row, None)
# -------------------------------------------------------------------------
def testGetRoles(self):
""" Test role lookup for a user """
auth = current.auth
UUID = "TESTAUTOCREATEDROLE"
role_id = auth.s3_create_role(UUID, uid=UUID)
try:
auth.s3_impersonate("normaluser@example.com")
user_id = auth.user.id
auth.s3_assign_role(user_id, role_id, for_pe=None)
roles = auth.s3_get_roles(user_id)
self.assertTrue(role_id in roles)
roles = auth.s3_get_roles(user_id, for_pe=None)
self.assertTrue(role_id in roles)
roles = auth.s3_get_roles(user_id, for_pe=0)
self.assertFalse(role_id in roles)
roles = auth.s3_get_roles(user_id, for_pe=1)
self.assertFalse(role_id in roles)
auth.s3_withdraw_role(user_id, role_id, for_pe=None)
auth.s3_assign_role(user_id, role_id, for_pe=0)
roles = auth.s3_get_roles(user_id)
self.assertTrue(role_id in roles)
roles = auth.s3_get_roles(user_id, for_pe=None)
self.assertFalse(role_id in roles)
roles = auth.s3_get_roles(user_id, for_pe=0)
self.assertTrue(role_id in roles)
roles = auth.s3_get_roles(user_id, for_pe=1)
self.assertFalse(role_id in roles)
auth.s3_withdraw_role(user_id, role_id, for_pe=0)
auth.s3_assign_role(user_id, role_id, for_pe=1)
roles = auth.s3_get_roles(user_id)
self.assertTrue(role_id in roles)
roles = auth.s3_get_roles(user_id, for_pe=None)
self.assertFalse(role_id in roles)
roles = auth.s3_get_roles(user_id, for_pe=0)
self.assertFalse(role_id in roles)
roles = auth.s3_get_roles(user_id, for_pe=1)
self.assertTrue(role_id in roles)
auth.s3_withdraw_role(user_id, role_id, for_pe=1)
finally:
auth.s3_delete_role(UUID)
auth.s3_impersonate(None)
# -------------------------------------------------------------------------
def tearDown(self):
current.db.rollback()
# =============================================================================
class RecordOwnershipTests(unittest.TestCase):
""" Test record ownership """
# -------------------------------------------------------------------------
@classmethod
def setUpClass(cls):
tablename = "ownership_test_table"
current.db.define_table(tablename,
Field("name"),
*s3_meta_fields())
@classmethod
def tearDownClass(cls):
table = current.db.ownership_test_table
table.drop()
# -------------------------------------------------------------------------
def setUp(self):
auth = current.auth
# Create Test Role
ROLE = "OWNERSHIPTESTROLE"
self.role_id = auth.s3_create_role(ROLE, uid=ROLE)
# Create a record which is not owned by any user, role or entity
auth.s3_impersonate(None)
self.table = current.db.ownership_test_table
self.table.owned_by_user.default = None
self.record_id = self.table.insert(name="Test")
def tearDown(self):
auth = current.auth
# Delete test record
current.db(self.table.id == self.record_id).delete()
# Remove Test Role
auth.s3_delete_role(self.role_id)
# Logout
auth.s3_impersonate(None)
# -------------------------------------------------------------------------
def testOwnershipRequiredController(self):
""" Test ownership required for controller """
auth = current.auth
permission = auth.permission
deployment_settings = current.deployment_settings
policies = {
1: False,
2: False,
3: True,
4: True,
5: True,
6: True,
7: True,
8: True,
0: True,
}
current_policy = deployment_settings.get_security_policy()
# Controller ACL
auth.permission.update_acl(self.role_id,
c="pr", f="person",
uacl=auth.permission.NONE,
oacl=auth.permission.ALL)
# Assign Test Role to normaluser@example.com
auth.s3_impersonate("normaluser@example.com")
auth.s3_assign_role(auth.user.id, self.role_id)
try:
for policy in policies:
deployment_settings.security.policy = policy
permission = S3Permission(auth)
ownership_required = permission.ownership_required
o = ownership_required("update",
"ownership_test_table",
c="pr",
f="person")
required = policies[policy]
msg = "ownership_required failed " \
"in policy %s (%s instead of %s)" % \
(policy, not required, required)
if policies[policy]:
self.assertTrue(o, msg=msg)
else:
self.assertFalse(o, msg=msg)
finally:
deployment_settings.security.policy = current_policy
auth.permission.delete_acl(self.role_id, c="pr", f="person")
# -------------------------------------------------------------------------
def testOwnershipRequiredTable(self):
""" Test ownership required for table """
auth = current.auth
permission = auth.permission
deployment_settings = current.deployment_settings
policies = {
1: False,
2: False,
3: False, # doesn't use table ACLs
4: False, # doesn't use table ACLs
5: True,
6: True,
7: True,
8: True,
0: True,
}
current_policy = deployment_settings.get_security_policy()
# Table ACL
auth.permission.update_acl(self.role_id,
t="ownership_test_table",
uacl=auth.permission.NONE,
oacl=auth.permission.ALL)
# Assign Test Role to normaluser@example.com
auth.s3_impersonate("normaluser@example.com")
auth.s3_assign_role(auth.user.id, self.role_id)
try:
for policy in policies:
deployment_settings.security.policy = policy
permission = S3Permission(auth)
ownership_required = permission.ownership_required
o = ownership_required("update", "ownership_test_table")
required = policies[policy]
msg = "ownership_required failed " \
"in policy %s (%s instead of %s)" % \
(policy, not required, required)
if policies[policy]:
self.assertTrue(o, msg=msg)
else:
self.assertFalse(o, msg=msg)
finally:
deployment_settings.security.policy = current_policy
auth.permission.delete_acl(self.role_id, t="ownership_test_table")
# -------------------------------------------------------------------------
def testSessionOwnership(self):
""" Test session ownership methods """
db = current.db
auth = current.auth
# Pick two tables
# (no real DB access here, so records don't need to exist)
s3db = current.s3db
ptable = s3db.pr_person
otable = s3db.org_organisation
# Logout + clear_session_ownership before testing
auth.s3_impersonate(None)
auth.s3_clear_session_ownership()
# Check general session ownership rules
auth.s3_make_session_owner(ptable, 1)
# No record ID should always return False
self.assertFalse(auth.s3_session_owns(ptable, None))
# Check for non-owned record
self.assertFalse(auth.s3_session_owns(ptable, 2))
# Check for owned record
self.assertTrue(auth.s3_session_owns(ptable, 1))
# If user is logged-in, session ownership is always False
auth.s3_impersonate("normaluser@example.com")
self.assertFalse(auth.s3_session_owns(ptable, 1))
# Check record-wise clear_session_ownership
auth.s3_impersonate(None)
auth.s3_make_session_owner(ptable, 1)
auth.s3_make_session_owner(ptable, 2)
self.assertTrue(auth.s3_session_owns(ptable, 1))
self.assertTrue(auth.s3_session_owns(ptable, 2))
auth.s3_clear_session_ownership(ptable, 1)
self.assertFalse(auth.s3_session_owns(ptable, 1))
self.assertTrue(auth.s3_session_owns(ptable, 2))
# Check table-wise clear_session_ownership
auth.s3_make_session_owner(ptable, 1)
auth.s3_make_session_owner(ptable, 2)
auth.s3_make_session_owner(otable, 1)
auth.s3_make_session_owner(otable, 2)
self.assertTrue(auth.s3_session_owns(ptable, 1))
self.assertTrue(auth.s3_session_owns(ptable, 2))
self.assertTrue(auth.s3_session_owns(otable, 1))
self.assertTrue(auth.s3_session_owns(otable, 2))
auth.s3_clear_session_ownership(ptable)
self.assertFalse(auth.s3_session_owns(ptable, 1))
self.assertFalse(auth.s3_session_owns(ptable, 2))
self.assertTrue(auth.s3_session_owns(otable, 1))
self.assertTrue(auth.s3_session_owns(otable, 2))
# Check global clear_session_ownership
auth.s3_make_session_owner(ptable, 1)
auth.s3_make_session_owner(ptable, 2)
auth.s3_make_session_owner(otable, 1)
auth.s3_make_session_owner(otable, 2)
self.assertTrue(auth.s3_session_owns(ptable, 1))
self.assertTrue(auth.s3_session_owns(ptable, 2))
self.assertTrue(auth.s3_session_owns(otable, 1))
self.assertTrue(auth.s3_session_owns(otable, 2))
auth.s3_clear_session_ownership()
self.assertFalse(auth.s3_session_owns(ptable, 1))
self.assertFalse(auth.s3_session_owns(ptable, 2))
self.assertFalse(auth.s3_session_owns(otable, 1))
self.assertFalse(auth.s3_session_owns(otable, 2))
# -------------------------------------------------------------------------
def testOwnershipPublicRecord(self):
""" Test ownership for a public record """
auth = current.auth
s3_impersonate = auth.s3_impersonate
is_owner = auth.permission.is_owner
assertTrue = self.assertTrue
assertFalse = self.assertFalse
auth.s3_clear_session_ownership()
table = self.table
record_id = self.record_id
# Admin owns all records
s3_impersonate("admin@example.com")
assertTrue(is_owner(table, record_id))
# Normal owns all public records
s3_impersonate("normaluser@example.com")
assertTrue(is_owner(table, record_id))
# Unauthenticated users never own a record
s3_impersonate(None)
assertFalse(is_owner(table, record_id))
# ...unless the session owns the record
auth.s3_make_session_owner(table, record_id)
assertTrue(is_owner(table, record_id))
# -------------------------------------------------------------------------
def testOwnershipAdminOwnedRecord(self):
""" Test ownership for an Admin-owned record """
auth = current.auth
s3_impersonate = auth.s3_impersonate
is_owner = auth.permission.is_owner
assertTrue = self.assertTrue
assertFalse = self.assertFalse
auth.s3_clear_session_ownership()
table = self.table
record_id = self.record_id
# Make Admin owner of the record
user_id = auth.s3_get_user_id("admin@example.com")
current.db(table.id == record_id).update(owned_by_user=user_id)
# Admin owns all records
s3_impersonate("admin@example.com")
assertTrue(is_owner(table, record_id))
# Normal does not own this record
s3_impersonate("normaluser@example.com")
assertFalse(is_owner(table, record_id))
# Unauthenticated does not own this record
s3_impersonate(None)
assertFalse(is_owner(table, record_id))
# ...unless the session owns the record
auth.s3_make_session_owner(table, record_id)
assertTrue(is_owner(table, record_id))
# -------------------------------------------------------------------------
def testOwnershipUserOwnedRecord(self):
""" Test ownership for a user-owned record """
auth = current.auth
s3_impersonate = auth.s3_impersonate
is_owner = auth.permission.is_owner
assertTrue = self.assertTrue
assertFalse = self.assertFalse
auth.s3_clear_session_ownership()
table = self.table
record_id = self.record_id
# Change the record owner to admin
user_id = auth.s3_get_user_id("normaluser@example.com")
current.db(table.id == record_id).update(owned_by_user=user_id)
# Admin owns all records
s3_impersonate("admin@example.com")
assertTrue(is_owner(table, record_id))
# Normal owns this record
s3_impersonate("normaluser@example.com")
assertTrue(is_owner(table, record_id))
# Unauthenticated does not own a record
s3_impersonate(None)
assertFalse(is_owner(table, record_id))
# ...unless the session owns the record
auth.s3_make_session_owner(table, record_id)
assertTrue(is_owner(table, record_id))
# -------------------------------------------------------------------------
def testOwnershipGroupOwnedRecord(self):
""" Test ownership for a collectively owned record """
auth = current.auth
s3_impersonate = auth.s3_impersonate
is_owner = auth.permission.is_owner
assertTrue = self.assertTrue
assertFalse = self.assertFalse
auth.s3_clear_session_ownership()
table = self.table
record_id = self.record_id
sr = auth.get_system_roles()
user_id = auth.s3_get_user_id("admin@example.com")
current.db(table.id == record_id).update(owned_by_user=user_id,
owned_by_group=sr.AUTHENTICATED)
# Admin owns all records
s3_impersonate("admin@example.com")
assertTrue(is_owner(table, record_id))
# Normal owns this record as member of AUTHENTICATED
s3_impersonate("normaluser@example.com")
assertTrue(is_owner(table, record_id))
# Unauthenticated does not own this record
s3_impersonate(None)
assertFalse(is_owner(table, record_id))
# ...unless the session owns the record
auth.s3_make_session_owner(table, record_id)
assertTrue(is_owner(table, record_id))
# -------------------------------------------------------------------------
def testOwnershipOrganisationOwnedRecord(self):
""" Test group-ownership for an entity-owned record """
auth = current.auth
s3_impersonate = auth.s3_impersonate
is_owner = auth.permission.is_owner
assertTrue = self.assertTrue
assertFalse = self.assertFalse
auth.s3_clear_session_ownership()
table = self.table
record_id = self.record_id
# Assume we have at least one org
org = current.s3db.pr_get_pe_id("org_organisation", 1)
role = self.role_id
# Make test role owner of the record and add to org's realm
user_id = auth.s3_get_user_id("admin@example.com")
current.db(table.id == record_id).update(owned_by_user=user_id,
owned_by_group=role,
realm_entity=org)
# Admin owns all records
s3_impersonate("admin@example.com")
assertTrue(is_owner(table, record_id))
# Normal user does not own the record
s3_impersonate("normaluser@example.com")
user_id = auth.user.id
assertFalse(is_owner(table, record_id))
# ...unless they have the role for this org
auth.s3_assign_role(user_id, role, for_pe=org)
assertTrue(is_owner(table, record_id))
auth.s3_withdraw_role(user_id, role, for_pe=[])
assertFalse(is_owner(table, record_id))
# ....or have the role without limitation (any org)
auth.s3_assign_role(user_id, role, for_pe=0)
assertTrue(is_owner(table, record_id))
auth.s3_withdraw_role(user_id, role, for_pe=[])
assertFalse(is_owner(table, record_id))
# Unauthenticated does not own this record
s3_impersonate(None)
assertFalse(is_owner(table, record_id))
# ...unless the session owns the record
auth.s3_make_session_owner(table, record_id)
assertTrue(is_owner(table, record_id))
# -------------------------------------------------------------------------
def testOwnershipOverride(self):
""" Test override of owners in is_owner """
auth = current.auth
s3_impersonate = auth.s3_impersonate
is_owner = auth.permission.is_owner
assertTrue = self.assertTrue
assertFalse = self.assertFalse
auth.s3_clear_session_ownership()
table = self.table
record_id = self.record_id
org = current.s3db.pr_get_pe_id("org_organisation", 1)
role = self.role_id
user_id = auth.s3_get_user_id("admin@example.com")
current.db(table.id == record_id).update(realm_entity=org,
owned_by_group=role,
owned_by_user=user_id)
# Normal user does not own the record
auth.s3_impersonate("normaluser@example.com")
assertFalse(auth.permission.is_owner(table, record_id))
# ...unless we override the record's owner stamp
owners_override = (None, None, None)
assertTrue(is_owner(table, record_id, owners=owners_override))
# -------------------------------------------------------------------------
def testGetOwners(self):
""" Test lookup of record owners """
auth = current.auth
s3_impersonate = auth.s3_impersonate
is_owner = auth.permission.is_owner
assertEqual = self.assertEqual
auth.s3_clear_session_ownership()
table = self.table
record_id = self.record_id
user = auth.s3_get_user_id("admin@example.com")
role = self.role_id
org = current.s3db.pr_get_pe_id("org_organisation", 1)
e, r, u = auth.permission.get_owners(table, None)
assertEqual(e, None)
assertEqual(r, None)
assertEqual(u, None)
e, r, u = auth.permission.get_owners(None, record_id)
assertEqual(e, None)
assertEqual(r, None)
assertEqual(u, None)
e, r, u = auth.permission.get_owners(None, None)
assertEqual(e, None)
assertEqual(r, None)
assertEqual(u, None)
e, r, u = auth.permission.get_owners(table, record_id)
assertEqual(e, None)
assertEqual(r, None)
assertEqual(u, None)
current.db(table.id == record_id).update(owned_by_user=user,
owned_by_group=role,
realm_entity=org)
e, r, u = auth.permission.get_owners(table, record_id)
assertEqual(e, org)
assertEqual(r, role)
assertEqual(u, user)
e, r, u = auth.permission.get_owners(table._tablename, record_id)
assertEqual(e, org)
assertEqual(r, role)
assertEqual(u, user)
# =============================================================================
class ACLManagementTests(unittest.TestCase):
""" Test ACL management/lookup functions """
# -------------------------------------------------------------------------
def testRequiredACL(self):
""" Test lambda to compute the required ACL """
p = current.auth.permission
assertEqual = self.assertEqual
assertEqual(p.required_acl(["read"]), p.READ)
assertEqual(p.required_acl(["create"]), p.CREATE)
assertEqual(p.required_acl(["update"]), p.UPDATE)
assertEqual(p.required_acl(["delete"]), p.DELETE)
assertEqual(p.required_acl(["create", "update"]), p.CREATE | p.UPDATE)
assertEqual(p.required_acl([]), p.NONE)
assertEqual(p.required_acl(["invalid"]), p.NONE)
# -------------------------------------------------------------------------
def testMostPermissive(self):
""" Test lambda to compute the most permissive ACL """
p = current.auth.permission
self.assertEqual(p.most_permissive([(p.NONE, p.READ),
(p.READ, p.READ)]),
(p.READ, p.READ))
self.assertEqual(p.most_permissive([(p.NONE, p.ALL),
(p.CREATE, p.ALL),
(p.READ, p.ALL)]),
(p.READ | p.CREATE, p.ALL))
# -------------------------------------------------------------------------
def testMostRestrictive(self):
""" Test lambda to compute the most restrictive ACL """
p = current.auth.permission
self.assertEqual(p.most_restrictive([(p.NONE, p.READ),
(p.READ, p.READ)]),
(p.NONE, p.READ))
self.assertEqual(p.most_restrictive([(p.CREATE, p.ALL),
(p.READ, p.READ)]),
(p.NONE, p.READ))
# -------------------------------------------------------------------------
def testUpdateControllerACL(self):
""" Test update/delete of a controller ACL """
auth = current.auth
table = auth.permission.table
self.assertNotEqual(table, None)
group_id = auth.s3_create_role("Test Role", uid="TEST")
acl_id = None
try:
self.assertTrue(group_id is not None and group_id != 0)
c = "pr"
f = "person"
uacl = auth.permission.NONE
oacl = auth.permission.ALL
acl_id = auth.permission.update_acl(group_id,
c=c, f=f,
uacl=uacl, oacl=oacl)
self.assertNotEqual(acl_id, None)
self.assertNotEqual(acl_id, 0)
acl = table[acl_id]
self.assertNotEqual(acl, None)
self.assertEqual(acl.controller, c)
self.assertEqual(acl.function, f)
self.assertEqual(acl.tablename, None)
self.assertEqual(acl.unrestricted, False)
self.assertEqual(acl.entity, None)
self.assertEqual(acl.uacl, uacl)
self.assertEqual(acl.oacl, oacl)
self.assertFalse(acl.deleted)
success = auth.permission.delete_acl(group_id,
c=c, f=f)
self.assertTrue(success is not None and success > 0)
acl = table[acl_id]
self.assertNotEqual(acl, None)
self.assertTrue(acl.deleted)
self.assertTrue(acl.deleted_fk, '{"group_id": %d}' % group_id)
finally:
if acl_id:
del table[acl_id]
auth.s3_delete_role(group_id)
# -------------------------------------------------------------------------
def testUpdateTableACL(self):
""" Test update/delete of a table-ACL """
auth = current.auth
table = auth.permission.table
self.assertNotEqual(table, None)
group_id = auth.s3_create_role("Test Role", uid="TEST")
acl_id = None
try:
self.assertTrue(group_id is not None and group_id != 0)
c = "pr"
f = "person"
t = "pr_person"
uacl = auth.permission.NONE
oacl = auth.permission.ALL
acl_id = auth.permission.update_acl(group_id,
c=c, f=f, t=t,
uacl=uacl, oacl=oacl)
self.assertNotEqual(acl_id, None)
self.assertNotEqual(acl_id, 0)
acl = table[acl_id]
self.assertNotEqual(acl, None)
self.assertEqual(acl.controller, None)
self.assertEqual(acl.function, None)
self.assertEqual(acl.tablename, t)
self.assertEqual(acl.unrestricted, False)
self.assertEqual(acl.entity, None)
self.assertEqual(acl.uacl, uacl)
self.assertEqual(acl.oacl, oacl)
self.assertFalse(acl.deleted)
success = auth.permission.delete_acl(group_id,
c=c, f=f, t=t)
self.assertTrue(success is not None and success > 0)
acl = table[acl_id]
self.assertNotEqual(acl, None)
self.assertTrue(acl.deleted)
self.assertTrue(acl.deleted_fk, '{"group_id": %d}' % group_id)
finally:
if acl_id:
del table[acl_id]
auth.s3_delete_role(group_id)
# -------------------------------------------------------------------------
def testApplicableACLsPolicy8(self):
db = current.db
auth = current.auth
s3db = current.s3db
# Create 3 test organisations
xmlstr = """
<s3xml>
<resource name="org_organisation" uuid="TAAO1">
<data field="name">TestApplicableACLsOrg1</data>
</resource>
<resource name="org_organisation" uuid="TAAO2">
<data field="name">TestApplicableACLsOrg2</data>
</resource>
<resource name="org_organisation" uuid="TAAO3">
<data field="name">TestApplicableACLsOrg3</data>
</resource>
</s3xml>"""
try:
auth.override = True
from lxml import etree
xmltree = etree.ElementTree(etree.fromstring(xmlstr))
resource = s3db.resource("org_organisation")
resource.import_xml(xmltree)
resource = s3db.resource("org_organisation",
uid=["TAAO1","TAAO2","TAAO3"])
rows = resource.select(["pe_id", "uuid"], as_rows=True)
orgs = dict((row.uuid, row.pe_id) for row in rows)
org1 = orgs["TAAO1"]
org2 = orgs["TAAO2"]
org3 = orgs["TAAO3"]
auth.override = False
except:
db.rollback()
auth.override = False
raise
try:
# Have two orgs, set org2 as OU descendant of org1
s3db.pr_add_affiliation(org1, org2, role="TestOrgUnit")
# Set org3 as non-OU (role_type=9) partner of org1
partners = s3db.pr_add_affiliation(org1, org3, role="TestPartners", role_type=9)
self.assertNotEqual(partners, None)
# Add the user as OU descendant of org3
user_id = auth.s3_get_user_id("normaluser@example.com")
user_pe = auth.s3_user_pe_id(user_id)
self.assertNotEqual(user_pe, None)
s3db.pr_add_affiliation(org3, user_pe, role="TestStaff")
# Create a TESTGROUP and assign a table ACL
acl = auth.permission
role = auth.s3_create_role("Test Group", None,
dict(c="org", f="office", uacl=acl.ALL, oacl=acl.ALL),
dict(t="org_office", uacl=acl.READ, oacl=acl.ALL),
uid="TESTGROUP")
auth.s3_assign_role(user_id, role)
# We use delegations (policy 8)
current.deployment_settings.security.policy = 8
from s3.s3aaa import S3Permission
auth.permission = S3Permission(auth)
# Impersonate as normal user
auth.s3_impersonate("normaluser@example.com")
realms = auth.user.realms
delegations = auth.user.delegations
# Check permissions
acls = auth.permission.applicable_acls(acl.DELETE,
realms,
delegations,
c="org",
f="office",
t="org_office",
entity=org2)
self.assertTrue(isinstance(acls, Storage))
self.assertEqual(acls, Storage())
# Delegate TESTGROUP to the TestPartners
auth.s3_delegate_role(role, org1, role="TestPartners")
# Update realms and delegations
auth.s3_impersonate("normaluser@example.com")
realms = auth.user.realms
delegations = auth.user.delegations
# Check permissions again
acls = auth.permission.applicable_acls(acl.DELETE,
realms,
delegations,
c="org",
f="office",
t="org_office",
entity=org2)
self.assertTrue(isinstance(acls, Storage))
self.assertTrue(org2 in acls)
self.assertEqual(acls[org2], (acl.READ, acl.ALL))
finally:
s3db.pr_remove_affiliation(org1, org2, role="TestOrgUnit")
s3db.pr_remove_affiliation(org1, org2, role="TestStaff")
s3db.pr_remove_affiliation(org1, org3, role="TestPartners")
auth.s3_delete_role("TESTGROUP")
db.rollback()
# =============================================================================
class HasPermissionTests(unittest.TestCase):
""" Test permission check method """
# -------------------------------------------------------------------------
@classmethod
def setUpClass(cls):
# Create test table
db = current.db
tablename = "org_permission_test"
db.define_table(tablename,
Field("name"),
*s3_meta_fields())
# Create test roles and ACLs
auth = current.auth
acl = auth.permission
READ = acl.READ
CREATE = acl.READ|acl.CREATE
UPDATE = acl.READ|acl.UPDATE
WRITE = acl.READ|acl.CREATE|acl.UPDATE
ALL = acl.ALL
TESTREADER = "TESTREADER"
auth.s3_create_role(TESTREADER, None,
dict(c="org",
uacl=READ, oacl=UPDATE),
dict(c="org", f="permission_test",
uacl=CREATE, oacl=UPDATE),
dict(t="org_permission_test",
uacl=WRITE, oacl=UPDATE),
uid=TESTREADER)
TESTEDITOR = "TESTEDITOR"
auth.s3_create_role(TESTEDITOR, None,
dict(c="org",
uacl=WRITE, oacl=UPDATE),
dict(c="org", f="permission_test",
uacl=WRITE, oacl=UPDATE),
dict(t="org_permission_test",
uacl=WRITE, oacl=UPDATE),
uid=TESTEDITOR)
TESTADMIN = "TESTADMIN"
auth.s3_create_role(TESTADMIN, None,
dict(c="org",
uacl=ALL, oacl=ALL),
dict(c="org", f="permission_test",
uacl=ALL, oacl=ALL),
dict(t="org_permission_test",
uacl=ALL, oacl=ALL),
uid=TESTADMIN)
db.commit()
@classmethod
def tearDownClass(cls):
# Remove test roles
s3_delete_role = current.auth.s3_delete_role
s3_delete_role("TESTREADER")
s3_delete_role("TESTEDITOR")
s3_delete_role("TESTADMIN")
# Remove test table
table = current.db.org_permission_test
table.drop()
current.db.commit()
# -------------------------------------------------------------------------
def setUp(self):
db = current.db
auth = current.auth
s3db = current.s3db
# Store current security policy
settings = current.deployment_settings
self.policy = settings.get_security_policy()
# Get the role IDs
gtable = auth.settings.table_group
row = db(gtable.uuid=="TESTREADER").select(limitby=(0, 1)).first()
self.reader = row.id
row = db(gtable.uuid=="TESTEDITOR").select(limitby=(0, 1)).first()
self.editor = row.id
row = db(gtable.uuid=="TESTADMIN").select(limitby=(0, 1)).first()
self.admin = row.id
# Impersonate Admin
auth.s3_impersonate("admin@example.com")
# Create test entities
table = s3db.org_organisation
self.org = []
for i in xrange(3):
record_id = table.insert(name="PermissionTestOrganisation%s" % i)
record = Storage(id=record_id)
s3db.update_super(table, record)
self.org.append(record.pe_id)
# Create test records
table = current.db.org_permission_test
self.record1 = table.insert(name="TestRecord1",
owned_by_user=auth.user.id,
realm_entity=self.org[0])
self.record2 = table.insert(name="TestRecord2",
owned_by_user=auth.user.id,
realm_entity=self.org[1])
self.record3 = table.insert(name="TestRecord3",
owned_by_user=auth.user.id,
realm_entity=self.org[2])
# Remove session ownership
auth.s3_clear_session_ownership()
# Logout + turn override off
auth.s3_impersonate(None)
auth.override = False
def tearDown(self):
table = current.s3db.org_organisation
# Rollback
current.db.rollback()
# Remove test records
table = current.s3db.org_permission_test
table.truncate()
# Restore security policy
current.deployment_settings.security.policy = self.policy
# Logout + turn override off
auth = current.auth
auth.s3_impersonate(None)
auth.override = False
# -------------------------------------------------------------------------
def testPolicy1(self):
""" Test permission check with policy 1 """
auth = current.auth
current.deployment_settings.security.policy = 1
auth.permission = S3Permission(auth)
has_permission = auth.s3_has_permission
tablename = "org_permission_test"
# Check anonymous
auth.s3_impersonate(None)
permitted = has_permission("read", table=tablename)
self.assertTrue(permitted)
permitted = has_permission("update", table=tablename)
self.assertFalse(permitted)
# Check authenticated
auth.s3_impersonate("normaluser@example.com")
permitted = has_permission("read", table=tablename)
self.assertTrue(permitted)
permitted = has_permission("update", table=tablename)
self.assertTrue(permitted)
# -------------------------------------------------------------------------
def testPolicy3(self):
""" Test permission check with policy 3 """
auth = current.auth
current.deployment_settings.security.policy = 3
auth.permission = S3Permission(auth)
has_permission = auth.s3_has_permission
c = "org"
f = "permission_test"
tablename = "org_permission_test"
assertTrue = self.assertTrue
assertFalse = self.assertFalse
# Check anonymous
auth.s3_impersonate(None)
permitted = has_permission("read", c=c, f=f, table=tablename)
assertFalse(permitted)
# Check authenticated
auth.s3_impersonate("normaluser@example.com")
permitted = has_permission("read", c=c, f=f, table=tablename)
assertFalse(permitted)
# Test with TESTREADER
auth.s3_assign_role(auth.user.id, self.reader)
permitted = has_permission("read", c=c, f=f, table=tablename)
assertTrue(permitted)
permitted = has_permission("create", c=c, f=f, table=tablename)
assertFalse(permitted) # Function ACL not applicable in policy 3
permitted = has_permission("update", c=c, f=f, table=tablename,
record_id=self.record1)
assertFalse(permitted)
permitted = has_permission("delete", c=c, f=f, table=tablename,
record_id=self.record1)
assertFalse(permitted)
auth.s3_withdraw_role(auth.user.id, self.reader)
# Test with TESTEDITOR
auth.s3_assign_role(auth.user.id, self.editor)
permitted = has_permission("read", c=c, f=f, table=tablename)
assertTrue(permitted)
permitted = has_permission("create", c=c, f=f, table=tablename)
assertTrue(permitted)
permitted = has_permission("update", c=c, f=f, table=tablename,
record_id=self.record1)
assertTrue(permitted)
permitted = has_permission("delete", c=c, f=f, table=tablename,
record_id=self.record1)
assertFalse(permitted)
auth.s3_withdraw_role(auth.user.id, self.editor)
# -------------------------------------------------------------------------
def testPolicy4(self):
""" Test permission check with policy 4 """
auth = current.auth
current.deployment_settings.security.policy = 4
auth.permission = S3Permission(auth)
has_permission = auth.s3_has_permission
c = "org"
f = "permission_test"
tablename = "org_permission_test"
assertTrue = self.assertTrue
assertFalse = self.assertFalse
# Check anonymous
auth.s3_impersonate(None)
permitted = has_permission("read", c=c, f=f, table=tablename)
assertFalse(permitted)
# Check authenticated
auth.s3_impersonate("normaluser@example.com")
permitted = has_permission("read", c=c, f=f, table=tablename)
assertFalse(permitted)
# Test with TESTREADER
auth.s3_assign_role(auth.user.id, self.reader)
permitted = has_permission("read", c=c, f=f, table=tablename)
assertTrue(permitted)
permitted = has_permission("create", c=c, f=f, table=tablename)
assertTrue(permitted) # Function ACL overrides controller ACL
permitted = has_permission("update", c=c, f=f, table=tablename,
record_id=self.record1)
assertFalse(permitted)
permitted = has_permission("delete", c=c, f=f, table=tablename,
record_id=self.record1)
assertFalse(permitted)
auth.s3_withdraw_role(auth.user.id, self.reader)
# Test with TESTEDITOR
auth.s3_assign_role(auth.user.id, self.editor)
permitted = has_permission("read", c=c, f=f, table=tablename)
assertTrue(permitted)
permitted = has_permission("create", c=c, f=f, table=tablename)
assertTrue(permitted)
permitted = has_permission("update", c=c, f=f, table=tablename,
record_id=self.record1)
assertTrue(permitted)
permitted = has_permission("delete", c=c, f=f, table=tablename,
record_id=self.record1)
assertFalse(permitted)
auth.s3_withdraw_role(auth.user.id, self.editor)
# -------------------------------------------------------------------------
def testPolicy5(self):
""" Test permission check with policy 5 """
auth = current.auth
current.deployment_settings.security.policy = 5
auth.permission = S3Permission(auth)
has_permission = auth.s3_has_permission
accessible_url = auth.permission.accessible_url
c = "org"
f = "permission_test"
tablename = "org_permission_test"
assertTrue = self.assertTrue
assertFalse = self.assertFalse
# Check anonymous
auth.s3_impersonate(None)
permitted = has_permission("read", c=c, f=f, table=tablename)
assertFalse(permitted)
url = accessible_url(c=c, f=f)
self.assertEqual(url, False)
# Check authenticated
auth.s3_impersonate("normaluser@example.com")
permitted = has_permission("read", c=c, f=f, table=tablename)
assertFalse(permitted)
url = accessible_url(c=c, f=f)
self.assertEqual(url, False)
# Test with TESTREADER
auth.s3_assign_role(auth.user.id, self.reader)
permitted = has_permission("read", c=c, f=f, table=tablename)
assertTrue(permitted)
permitted = has_permission("create", c=c, f=f, table=tablename)
assertTrue(permitted) # Function ACL overrides controller ACL
permitted = has_permission("update", c=c, f=f, table=tablename,
record_id=self.record1)
assertFalse(permitted) # Page ACL blocks Table ACL
# Toggle page ACL
acl = auth.permission
auth.permission.update_acl("TESTREADER", c=c, f=f,
uacl=acl.READ|acl.CREATE|acl.UPDATE,
oacl=acl.READ|acl.CREATE|acl.UPDATE)
permitted = has_permission("update", c=c, f=f, table=tablename,
record_id=self.record1)
assertTrue(permitted)
auth.permission.update_acl("TESTREADER", c=c, f=f,
uacl=acl.READ|acl.CREATE,
oacl=acl.READ|acl.CREATE|acl.UPDATE)
permitted = has_permission("update", c=c, f=f, table=tablename,
record_id=self.record1)
assertFalse(permitted)
url = accessible_url(c=c, f=f)
self.assertNotEqual(url, False)
auth.s3_withdraw_role(auth.user.id, self.reader)
# Test with TESTEDITOR
auth.s3_assign_role(auth.user.id, self.editor)
permitted = has_permission("update", c=c, f=f, table=tablename,
record_id=self.record1)
assertTrue(permitted)
permitted = has_permission("delete", c=c, f=f, table=tablename,
record_id=self.record1)
assertFalse(permitted)
auth.s3_withdraw_role(auth.user.id, self.editor)
# -------------------------------------------------------------------------
def testPolicy6(self):
""" Test permission check with policy 6 """
auth = current.auth
current.deployment_settings.security.policy = 6
auth.permission = S3Permission(auth)
has_permission = auth.s3_has_permission
c = "org"
f = "permission_test"
tablename = "org_permission_test"
assertTrue = self.assertTrue
assertFalse = self.assertFalse
# Check anonymous
auth.s3_impersonate(None)
permitted = has_permission("read", c=c, f=f, table=tablename)
assertFalse(permitted)
# Check authenticated
auth.s3_impersonate("normaluser@example.com")
permitted = has_permission("read", c=c, f=f, table=tablename)
assertFalse(permitted)
# Test with TESTREADER
auth.s3_assign_role(auth.user.id, self.reader, for_pe=0)
permitted = has_permission("read", c=c, f=f, table=tablename)
assertTrue(permitted)
permitted = has_permission("create", c=c, f=f, table=tablename)
assertTrue(permitted) # Function ACL overrides controller ACL
permitted = has_permission("update", c=c, f=f, table=tablename,
record_id=self.record1)
assertFalse(permitted) # Page ACL blocks Table ACL
auth.s3_withdraw_role(auth.user.id, self.reader, for_pe=[])
# Test with TESTEDITOR with universal realm
auth.s3_assign_role(auth.user.id, self.editor, for_pe=0)
permitted = has_permission("update", c=c, f=f, table=tablename,
record_id=self.record1)
assertTrue(permitted)
permitted = has_permission("delete", c=c, f=f, table=tablename,
record_id=self.record1)
assertFalse(permitted)
permitted = has_permission("update", c=c, f=f, table=tablename,
record_id=self.record2)
assertTrue(permitted)
permitted = has_permission("delete", c=c, f=f, table=tablename,
record_id=self.record2)
assertFalse(permitted)
auth.s3_withdraw_role(auth.user.id, self.editor, for_pe=[])
# Test with TESTEDITOR with limited realm
auth.s3_assign_role(auth.user.id, self.editor, for_pe=self.org[0])
permitted = has_permission("update", c=c, f=f, table=tablename,
record_id=self.record1)
assertTrue(permitted)
permitted = has_permission("delete", c=c, f=f, table=tablename,
record_id=self.record1)
assertFalse(permitted)
permitted = has_permission("update", c=c, f=f, table=tablename,
record_id=self.record2)
assertFalse(permitted)
permitted = has_permission("delete", c=c, f=f, table=tablename,
record_id=self.record2)
assertFalse(permitted)
# Extend realm
auth.s3_assign_role(auth.user.id, self.editor, for_pe=self.org[1])
permitted = has_permission("update", c=c, f=f, table=tablename,
record_id=self.record1)
assertTrue(permitted)
permitted = has_permission("update", c=c, f=f, table=tablename,
record_id=self.record2)
assertTrue(permitted)
# Withdraw role for one realm
auth.s3_withdraw_role(auth.user.id, self.editor, for_pe=self.org[0])
permitted = has_permission("update", c=c, f=f, table=tablename,
record_id=self.record1)
assertFalse(permitted)
permitted = has_permission("update", c=c, f=f, table=tablename,
record_id=self.record2)
assertTrue(permitted)
# Withdraw role for all realms
auth.s3_withdraw_role(auth.user.id, self.editor, for_pe=[])
permitted = has_permission("update", c=c, f=f, table=tablename,
record_id=self.record1)
assertFalse(permitted)
permitted = has_permission("update", c=c, f=f, table=tablename,
record_id=self.record2)
assertFalse(permitted)
# -------------------------------------------------------------------------
def testPolicy7(self):
""" Test permission check with policy 7 """
auth = current.auth
s3db = current.s3db
current.deployment_settings.security.policy = 7
auth.permission = S3Permission(auth)
has_permission = auth.s3_has_permission
c = "org"
f = "permission_test"
tablename = "org_permission_test"
assertTrue = self.assertTrue
assertFalse = self.assertFalse
# Check anonymous
auth.s3_impersonate(None)
permitted = has_permission("read", c=c, f=f, table=tablename)
assertFalse(permitted)
# Check authenticated
auth.s3_impersonate("normaluser@example.com")
permitted = has_permission("read", c=c, f=f, table=tablename)
assertFalse(permitted)
# Test with TESTEDITOR with limited realm
auth.s3_assign_role(auth.user.id, self.editor, for_pe=self.org[0])
permitted = has_permission("update", c=c, f=f, table=tablename,
record_id=self.record1)
assertTrue(permitted)
permitted = has_permission("update", c=c, f=f, table=tablename,
record_id=self.record2)
assertFalse(permitted)
# Make org[1] a sub-entity of org[0]
s3db.pr_add_affiliation(self.org[0], self.org[1], role="TestOrgUnit")
# Reload realms and test again
auth.s3_impersonate("normaluser@example.com")
permitted = has_permission("update", c=c, f=f, table=tablename,
record_id=self.record1)
assertTrue(permitted)
permitted = has_permission("update", c=c, f=f, table=tablename,
record_id=self.record2)
assertTrue(permitted) # Should now have access
s3db.pr_remove_affiliation(self.org[0], self.org[1], role="TestOrgUnit")
# Make org[0] a sub-entity of org[1]
s3db.pr_add_affiliation(self.org[1], self.org[0], role="TestOrgUnit")
# Reload realms
auth.s3_impersonate("normaluser@example.com")
permitted = has_permission("update", c=c, f=f, table=tablename,
record_id=self.record1)
assertTrue(permitted)
permitted = has_permission("update", c=c, f=f, table=tablename,
record_id=self.record2)
assertFalse(permitted) # Should no longer have access
# Switch realm
auth.s3_withdraw_role(auth.user.id, self.editor, for_pe=self.org[0])
auth.s3_assign_role(auth.user.id, self.editor, for_pe=self.org[1])
# Reload realms
auth.s3_impersonate("normaluser@example.com")
permitted = has_permission("update", c=c, f=f, table=tablename,
record_id=self.record1)
assertTrue(permitted)
permitted = has_permission("update", c=c, f=f, table=tablename,
record_id=self.record2)
assertTrue(permitted) # Should have access again
# Remove org[0] from realm
s3db.pr_remove_affiliation(self.org[1], self.org[0], role="TestOrgUnit")
# Reload realms
auth.s3_impersonate("normaluser@example.com")
permitted = has_permission("update", c=c, f=f, table=tablename,
record_id=self.record1)
assertFalse(permitted) # Should no longer have access
permitted = has_permission("update", c=c, f=f, table=tablename,
record_id=self.record2)
assertTrue(permitted)
# Withdraw TESTEDITOR role
auth.s3_withdraw_role(auth.user.id, self.editor, for_pe=[])
permitted = has_permission("update", c=c, f=f, table=tablename,
record_id=self.record1)
assertFalse(permitted)
permitted = has_permission("update", c=c, f=f, table=tablename,
record_id=self.record2)
assertFalse(permitted)
# -------------------------------------------------------------------------
def testPolicy8(self):
""" Test permission check with policy 8 """
auth = current.auth
s3db = current.s3db
current.deployment_settings.security.policy = 8
auth.permission = S3Permission(auth)
user = auth.s3_user_pe_id(auth.s3_get_user_id("normaluser@example.com"))
has_permission = auth.s3_has_permission
c = "org"
f = "permission_test"
tablename = "org_permission_test"
assertTrue = self.assertTrue
assertFalse = self.assertFalse
# Check anonymous
auth.s3_impersonate(None)
permitted = has_permission("read", c=c, f=f, table=tablename)
assertFalse(permitted)
# Check authenticated
auth.s3_impersonate("normaluser@example.com")
permitted = has_permission("read", c=c, f=f, table=tablename)
assertFalse(permitted)
# Add the user as staff member (=OU) of org[2]
s3db.pr_add_affiliation(self.org[2], user, role="TestStaff")
auth.s3_assign_role(auth.user.id, self.editor, for_pe=self.org[2])
# User should not be able to read record1 or record2 (no access),
# but record3 (as editor for org[2])
permitted = has_permission("read", c=c, f=f, table=tablename,
record_id=self.record1)
assertFalse(permitted)
permitted = has_permission("read", c=c, f=f, table=tablename,
record_id=self.record2)
assertFalse(permitted)
permitted = has_permission("read", c=c, f=f, table=tablename,
record_id=self.record3)
assertTrue(permitted)
# User should not be able to update record1 or record2 (no access),
# but record3 (as editor for org[2])
permitted = has_permission("update", c=c, f=f, table=tablename,
record_id=self.record1)
assertFalse(permitted)
permitted = has_permission("update", c=c, f=f, table=tablename,
record_id=self.record2)
assertFalse(permitted)
permitted = has_permission("update", c=c, f=f, table=tablename,
record_id=self.record3)
assertTrue(permitted)
# Make org[2] and OU of org[1]
s3db.pr_add_affiliation(self.org[1], self.org[2], role="TestOrgUnit")
# Delegate TESTREADER from org[0] to org[1]
auth.s3_delegate_role(self.reader, self.org[0], receiver=self.org[1])
# Update realms
auth.s3_impersonate("normaluser@example.com")
# User should be able to read record1 (reader delegated)
# and record3 (as editor for org[2]), but not record2 (no access)
permitted = has_permission("read", c=c, f=f, table=tablename,
record_id=self.record1)
assertTrue(permitted)
permitted = has_permission("read", c=c, f=f, table=tablename,
record_id=self.record2)
assertFalse(permitted)
permitted = has_permission("read", c=c, f=f, table=tablename,
record_id=self.record3)
assertTrue(permitted)
# User should be able to update record3 (as editor for org[2]),
# but not record1 (only reader delegated) or record2 (no access)
permitted = has_permission("update", c=c, f=f, table=tablename,
record_id=self.record1)
assertFalse(permitted)
permitted = has_permission("update", c=c, f=f, table=tablename,
record_id=self.record2)
assertFalse(permitted)
permitted = has_permission("update", c=c, f=f, table=tablename,
record_id=self.record3)
assertTrue(permitted)
auth.s3_remove_delegation(self.reader, self.org[0], receiver=self.org[1])
s3db.pr_remove_affiliation(self.org[1], self.org[2], role="TestOrgUnit")
s3db.pr_remove_affiliation(self.org[2], user, role="TestStaff")
auth.s3_withdraw_role(auth.user.id, self.editor, for_pe=[])
# -------------------------------------------------------------------------
def testWithUnavailableTable(self):
auth = current.auth
s3db = current.s3db
has_permission = auth.s3_has_permission
c = "org"
f = "permission_test"
tablename = "org_permission_unavailable"
auth.s3_impersonate(None)
permitted = has_permission("read", c=c, f=f, table=tablename)
# Should return None if the table doesn't exist
self.assertEqual(permitted, None)
## -------------------------------------------------------------------------
#def testPerformance(self):
#""" Test has_permission performance """
#MAX_RUNTIME = 1 # Maximum acceptable runtime per request in milliseconds
#auth = current.auth
#current.deployment_settings.security.policy = 8
#from s3.s3aaa import S3Permission
#auth.permission = S3Permission(auth)
#has_permission = auth.s3_has_permission
#c = "org"
#f = "permission_test"
#tablename = "org_permission_test"
#assertTrue = self.assertTrue
#assertFalse = self.assertFalse
#auth.s3_impersonate("normaluser@example.com")
#auth.s3_assign_role(auth.user.id, self.editor, for_pe=self.org[0])
#def hasPermission():
#permitted = has_permission("update", c=c, f=f, table=tablename,
#record_id=self.record1)
#import timeit
#runtime = timeit.Timer(hasPermission).timeit(number=1000)
#if runtime > MAX_RUNTIME:
#raise AssertionError("has_permission: maximum acceptable run time "
#"exceeded (%.2fms > %.2fms)" %
#(runtime, MAX_RUNTIME))
#auth.s3_withdraw_role(auth.user.id, self.editor, for_pe=[])
# =============================================================================
class AccessibleQueryTests(unittest.TestCase):
""" Test accessible query for all policies """
# -------------------------------------------------------------------------
@classmethod
def setUpClass(cls):
# Create test table
db = current.db
tablename = "org_permission_test"
db.define_table(tablename,
Field("name"),
*s3_meta_fields())
# Create test roles and ACLs
auth = current.auth
acl = auth.permission
READ = acl.READ
CREATE = acl.READ|acl.CREATE
UPDATE = acl.READ|acl.UPDATE
WRITE = acl.READ|acl.CREATE|acl.UPDATE
ALL = acl.ALL
TESTREADER = "TESTREADER"
auth.s3_create_role(TESTREADER, None,
dict(c="org",
uacl=READ, oacl=READ),
dict(c="org", f="permission_test",
uacl=CREATE, oacl=ALL),
dict(t="org_permission_test",
uacl=WRITE, oacl=UPDATE),
uid=TESTREADER)
TESTEDITOR = "TESTEDITOR"
auth.s3_create_role(TESTEDITOR, None,
dict(c="org",
uacl=WRITE, oacl=UPDATE),
dict(c="org", f="permission_test",
uacl=WRITE, oacl=UPDATE),
dict(t="org_permission_test",
uacl=WRITE, oacl=UPDATE),
uid=TESTEDITOR)
TESTADMIN = "TESTADMIN"
auth.s3_create_role(TESTADMIN, None,
dict(c="org",
uacl=ALL, oacl=ALL),
dict(c="org", f="permission_test",
uacl=ALL, oacl=ALL),
dict(t="org_permission_test",
uacl=ALL, oacl=ALL),
uid=TESTADMIN)
db.commit()
@classmethod
def tearDownClass(cls):
# Remove test roles
s3_delete_role = current.auth.s3_delete_role
s3_delete_role("TESTREADER")
s3_delete_role("TESTEDITOR")
s3_delete_role("TESTADMIN")
# Remove test table
table = current.db.org_permission_test
table.drop()
current.db.commit()
# -------------------------------------------------------------------------
def setUp(self):
db = current.db
auth = current.auth
s3db = current.s3db
# Store current security policy
settings = current.deployment_settings
self.policy = settings.get_security_policy()
# Store current ownership rule
self.strict = settings.get_security_strict_ownership()
settings.security.strict_ownership = False
# Get the role IDs
gtable = auth.settings.table_group
row = db(gtable.uuid=="TESTREADER").select(limitby=(0, 1)).first()
self.reader = row.id
row = db(gtable.uuid=="TESTEDITOR").select(limitby=(0, 1)).first()
self.editor = row.id
row = db(gtable.uuid=="TESTADMIN").select(limitby=(0, 1)).first()
self.admin = row.id
# Impersonate Admin
auth.s3_impersonate("admin@example.com")
# Create test entities
table = s3db.org_organisation
self.org = []
for i in xrange(3):
record_id = table.insert(name="PermissionTestOrganisation%s" % i)
record = Storage(id=record_id)
s3db.update_super(table, record)
self.org.append(record.pe_id)
# Create test records
table = current.db.org_permission_test
self.record1 = table.insert(name="TestRecord1",
owned_by_user=auth.user.id,
realm_entity=self.org[0])
self.record2 = table.insert(name="TestRecord2",
owned_by_user=auth.user.id,
realm_entity=self.org[1])
self.record3 = table.insert(name="TestRecord3",
owned_by_user=auth.user.id,
realm_entity=self.org[2])
# Remove session ownership
auth.s3_clear_session_ownership()
# Logout + turn override off
auth.s3_impersonate(None)
auth.override = False
def tearDown(self):
# Rollback
current.db.rollback()
# Remove test records
table = current.s3db.org_permission_test
table.truncate()
# Restore security policy
current.deployment_settings.security.policy = self.policy
# Restore current ownership rule
current.deployment_settings.security.strict_ownership = self.strict
# Logout + turn override off
auth = current.auth
auth.s3_impersonate(None)
auth.override = False
# -------------------------------------------------------------------------
def testPolicy3(self):
""" Test accessible query with policy 3 """
auth = current.auth
current.deployment_settings.security.policy = 3
auth.permission = S3Permission(auth)
accessible_query = auth.s3_accessible_query
c = "org"
f = "permission_test"
table = current.s3db.org_permission_test
assertEqual = self.assertEqual
ALL = (table.id > 0)
NONE = (table.id == 0)
# Check anonymous
auth.s3_impersonate(None)
query = accessible_query("read", table, c=c, f=f)
assertEqual(query, NONE)
# Check authenticated
auth.s3_impersonate("normaluser@example.com")
query = accessible_query("read", table, c=c, f=f)
assertEqual(query, NONE)
# Test with TESTREADER
auth.s3_assign_role(auth.user.id, self.reader)
query = accessible_query("read", "org_permission_test", c=c, f=f)
assertEqual(query, ALL)
query = accessible_query("update", table, c=c, f=f)
assertEqual(query, NONE)
query = accessible_query("delete", table, c=c, f=f)
assertEqual(query, NONE)
auth.s3_withdraw_role(auth.user.id, self.reader)
# Test with TESTEDITOR
auth.s3_assign_role(auth.user.id, self.editor)
query = accessible_query("read", table, c=c, f=f)
assertEqual(query, ALL)
query = accessible_query("update", table, c=c, f=f)
assertEqual(query, ALL)
query = accessible_query("delete", table, c=c, f=f)
assertEqual(query, NONE)
auth.s3_withdraw_role(auth.user.id, self.editor)
# -------------------------------------------------------------------------
def testPolicy4(self):
""" Test accessible query with policy 4 """
auth = current.auth
current.deployment_settings.security.policy = 4
auth.permission = S3Permission(auth)
accessible_query = auth.s3_accessible_query
c = "org"
f = "permission_test"
table = current.s3db.org_permission_test
assertEqual = self.assertEqual
ALL = (table.id > 0)
NONE = (table.id == 0)
# Check anonymous
auth.s3_impersonate(None)
query = accessible_query("read", table, c=c, f=f)
assertEqual(query, NONE)
# Check authenticated
auth.s3_impersonate("normaluser@example.com")
query = accessible_query("read", table, c=c, f=f)
assertEqual(query, NONE)
# Test with TESTREADER
auth.s3_assign_role(auth.user.id, self.reader)
query = accessible_query("read", "org_permission_test", c=c, f=f)
assertEqual(query, ALL)
query = accessible_query("update", table, c=c, f=f)
roles = [r for r in auth.user.realms if r is not None]
OWNED = (((table.owned_by_user == auth.user.id) | \
((table.owned_by_user == None) & \
(table.owned_by_group == None))) | \
(table.owned_by_group.belongs(roles)))
assertEqual(query, OWNED)
query = accessible_query("delete", table, c=c, f=f)
assertEqual(query, OWNED)
auth.s3_withdraw_role(auth.user.id, self.reader)
# Test with TESTEDITOR
auth.s3_assign_role(auth.user.id, self.editor)
query = accessible_query("read", table, c=c, f=f)
assertEqual(query, ALL)
query = accessible_query("update", table, c=c, f=f)
assertEqual(query, ALL)
query = accessible_query("delete", table, c=c, f=f)
assertEqual(query, NONE)
auth.s3_withdraw_role(auth.user.id, self.editor)
# -------------------------------------------------------------------------
def testPolicy5(self):
""" Test accessible query with policy 5 """
auth = current.auth
current.deployment_settings.security.policy = 5
auth.permission = S3Permission(auth)
accessible_query = auth.s3_accessible_query
c = "org"
f = "permission_test"
table = current.s3db.org_permission_test
assertEqual = self.assertEqual
ALL = (table.id > 0)
NONE = (table.id == 0)
# Check anonymous
auth.s3_impersonate(None)
query = accessible_query("read", table, c=c, f=f)
assertEqual(query, NONE)
# Check authenticated
auth.s3_impersonate("normaluser@example.com")
query = accessible_query("read", table, c=c, f=f)
assertEqual(query, NONE)
# Test with TESTREADER
auth.s3_assign_role(auth.user.id, self.reader)
query = accessible_query("read", "org_permission_test", c=c, f=f)
assertEqual(query, ALL)
query = accessible_query("update", table, c=c, f=f)
roles = [r for r in auth.user.realms if r is not None]
OWNED = (((table.owned_by_user == auth.user.id) | \
((table.owned_by_user == None) & \
(table.owned_by_group == None))) | \
(table.owned_by_group.belongs(roles)))
assertEqual(query, OWNED)
query = accessible_query("delete", table, c=c, f=f)
assertEqual(query, NONE)
auth.s3_withdraw_role(auth.user.id, self.reader)
# Test with TESTEDITOR
auth.s3_assign_role(auth.user.id, self.editor)
query = accessible_query("read", table, c=c, f=f)
assertEqual(query, ALL)
query = accessible_query("update", table, c=c, f=f)
assertEqual(query, ALL)
query = accessible_query("delete", table, c=c, f=f)
assertEqual(query, NONE)
auth.s3_withdraw_role(auth.user.id, self.editor)
# -------------------------------------------------------------------------
def testPolicy6(self):
""" Test accessible query with policy 6 """
auth = current.auth
current.deployment_settings.security.policy = 6
auth.permission = S3Permission(auth)
accessible_query = auth.s3_accessible_query
c = "org"
f = "permission_test"
table = current.s3db.org_permission_test
assertEqual = self.assertEqual
ALL = (table.id > 0)
NONE = (table.id == 0)
# Check anonymous
auth.s3_impersonate(None)
query = accessible_query("read", table, c=c, f=f)
assertEqual(query, NONE)
# Check authenticated
auth.s3_impersonate("normaluser@example.com")
query = accessible_query("read", table, c=c, f=f)
assertEqual(query, NONE)
# Test with TESTREADER
auth.s3_assign_role(auth.user.id, self.reader, for_pe=self.org[0])
expected = (((table.realm_entity == self.org[0]) | \
(table.realm_entity == None)) | \
(((table.owned_by_user == auth.user.id) | \
(((table.owned_by_user == None) & \
(table.owned_by_group == None)) & \
(table.realm_entity == None))) | \
(table.owned_by_group.belongs([2,3]))))
query = accessible_query("read", "org_permission_test", c=c, f=f)
assertEqual(query, expected)
query = accessible_query("update",table, c=c, f=f)
expected = (((table.owned_by_user == auth.user.id) | \
(((table.owned_by_user == None) & \
(table.owned_by_group == None)) & \
(table.realm_entity == None))) | \
(((table.owned_by_group == self.reader) & \
(table.realm_entity.belongs([self.org[0]]))) | \
(table.owned_by_group.belongs([2,3]))))
assertEqual(query, expected)
query = accessible_query("delete", table, c=c, f=f)
assertEqual(query, NONE)
auth.s3_withdraw_role(auth.user.id, self.reader)
# Test with TESTEDITOR
auth.s3_assign_role(auth.user.id, self.editor, for_pe=self.org[0])
query = accessible_query("read", table, c=c, f=f)
expected = (((table.realm_entity == self.org[0]) | \
(table.realm_entity == None)) | \
(((table.owned_by_user == auth.user.id) | \
(((table.owned_by_user == None) & \
(table.owned_by_group == None)) & \
(table.realm_entity == None))) | \
(table.owned_by_group.belongs([2,3]))))
assertEqual(query, expected)
query = accessible_query("update", table, c=c, f=f)
assertEqual(query, expected)
query = accessible_query("delete", table, c=c, f=f)
assertEqual(query, NONE)
auth.s3_withdraw_role(auth.user.id, self.editor)
# Logout
auth.s3_impersonate(None)
# -------------------------------------------------------------------------
def testPolicy7(self):
""" Test accessible query with policy 7 """
auth = current.auth
s3db = current.s3db
current.deployment_settings.security.policy = 7
auth.permission = S3Permission(auth)
accessible_query = auth.s3_accessible_query
c = "org"
f = "permission_test"
table = current.s3db.org_permission_test
assertEqual = self.assertEqual
ALL = (table.id > 0)
NONE = (table.id == 0)
# Check anonymous
auth.s3_impersonate(None)
query = accessible_query("read", table, c=c, f=f)
assertEqual(query, NONE)
# Check authenticated
auth.s3_impersonate("normaluser@example.com")
query = accessible_query("read", table, c=c, f=f)
assertEqual(query, NONE)
# Test with TESTREADER
auth.s3_assign_role(auth.user.id, self.reader, for_pe=self.org[0])
current.deployment_settings.security.strict_ownership = True
query = accessible_query("read", table, c=c, f=f)
expected = (((table.realm_entity == self.org[0]) | \
(table.realm_entity == None)) | \
((table.owned_by_user == auth.user.id) | \
(table.owned_by_group.belongs([2,3]))))
assertEqual(query, expected)
current.deployment_settings.security.strict_ownership = False
query = accessible_query("read", table, c=c, f=f)
expected = (((table.realm_entity == self.org[0]) | \
(table.realm_entity == None)) | \
(((table.owned_by_user == auth.user.id) | \
(((table.owned_by_user == None) & \
(table.owned_by_group == None)) & \
(table.realm_entity == None))) | \
(table.owned_by_group.belongs([2,3]))))
assertEqual(query, expected)
query = accessible_query("update",table, c=c, f=f)
expected = (((table.owned_by_user == auth.user.id) | \
(((table.owned_by_user == None) & \
(table.owned_by_group == None)) & \
(table.realm_entity == None))) | \
(((table.owned_by_group == self.reader) & \
(table.realm_entity.belongs([self.org[0]]))) | \
(table.owned_by_group.belongs([2,3]))))
assertEqual(query, expected)
query = accessible_query("delete", table, c=c, f=f)
assertEqual(query, NONE)
# Make org[1] a sub-entity of org[0]
s3db.pr_add_affiliation(self.org[0], self.org[1], role="TestOrgUnit")
# Reload realms and delegations
auth.s3_impersonate("normaluser@example.com")
# Re-check queries
query = accessible_query("read", table, c=c, f=f)
expected = (((table.realm_entity.belongs([self.org[0], self.org[1]])) | \
(table.realm_entity == None)) | \
(((table.owned_by_user == auth.user.id) | \
(((table.owned_by_user == None) & \
(table.owned_by_group == None)) & \
(table.realm_entity == None))) | \
(table.owned_by_group.belongs([2,3]))))
assertEqual(query, expected)
query = accessible_query("update",table, c=c, f=f)
expected = (((table.owned_by_user == auth.user.id) | \
(((table.owned_by_user == None) & \
(table.owned_by_group == None)) & \
(table.realm_entity == None))) | \
(((table.owned_by_group == self.reader) & \
(table.realm_entity.belongs([self.org[0], self.org[1]]))) | \
(table.owned_by_group.belongs([2,3]))))
assertEqual(query, expected)
query = accessible_query("delete", table, c=c, f=f)
assertEqual(query, NONE)
# Remove affiliation and role
s3db.pr_remove_affiliation(self.org[0], self.org[1], role="TestOrgUnit")
auth.s3_withdraw_role(auth.user.id, self.reader)
# Test with TESTEDITOR
auth.s3_assign_role(auth.user.id, self.editor, for_pe=self.org[0])
query = accessible_query("read", table, c=c, f=f)
expected = (((table.realm_entity == self.org[0]) | \
(table.realm_entity == None)) | \
(((table.owned_by_user == auth.user.id) | \
(((table.owned_by_user == None) & \
(table.owned_by_group == None)) & \
(table.realm_entity == None))) | \
(table.owned_by_group.belongs([2,3]))))
assertEqual(query, expected)
query = accessible_query("update", table, c=c, f=f)
assertEqual(query, expected)
query = accessible_query("delete", table, c=c, f=f)
assertEqual(query, NONE)
# Make org[1] a sub-entity of org[0]
s3db.pr_add_affiliation(self.org[0], self.org[1], role="TestOrgUnit")
# Reload realms and delegations
auth.s3_impersonate("normaluser@example.com")
# Re-check queries
expected = (((table.realm_entity.belongs([self.org[0], self.org[1]])) | \
(table.realm_entity == None)) | \
(((table.owned_by_user == auth.user.id) | \
(((table.owned_by_user == None) & \
(table.owned_by_group == None)) & \
(table.realm_entity == None))) | \
(table.owned_by_group.belongs([2,3]))))
query = accessible_query("read", table, c=c, f=f)
assertEqual(query, expected)
query = accessible_query("update", table, c=c, f=f)
self.assertTrue(query, expected)
query = accessible_query("delete", table, c=c, f=f)
assertEqual(query, NONE)
# Remove affiliation and role
s3db.pr_remove_affiliation(self.org[0], self.org[1], role="TestOrgUnit")
auth.s3_withdraw_role(auth.user.id, self.editor)
# -------------------------------------------------------------------------
def testPolicy8(self):
""" Test accessible query with policy 8 """
s3db = current.s3db
auth = current.auth
current.deployment_settings.security.policy = 8
auth.permission = S3Permission(auth)
accessible_query = auth.s3_accessible_query
c = "org"
f = "permission_test"
table = current.s3db.org_permission_test
assertEqual = self.assertEqual
ALL = (table.id > 0)
NONE = (table.id == 0)
# Check anonymous
auth.s3_impersonate(None)
query = accessible_query("read", table, c=c, f=f)
self.assertEqual(query, NONE)
# Check authenticated
auth.s3_impersonate("normaluser@example.com")
query = accessible_query("read", table, c=c, f=f)
self.assertEqual(query, NONE)
record = None
# Add the user as staff member (=OU) of org[2] and assign TESTEDITOR
user = auth.s3_user_pe_id(auth.s3_get_user_id("normaluser@example.com"))
s3db.pr_add_affiliation(self.org[2], user, role="TestStaff")
auth.s3_assign_role(auth.user.id, self.editor, for_pe=self.org[2])
# User should only be able to access records of org[2]
expected = (((table.realm_entity == self.org[2]) | \
(table.realm_entity == None)) | \
(((table.owned_by_user == auth.user.id) | \
(((table.owned_by_user == None) & \
(table.owned_by_group == None)) & \
(table.realm_entity == None))) | \
(table.owned_by_group.belongs([2,3]))))
query = accessible_query("read", table, c=c, f=f)
self.assertEqual(query, expected)
query = accessible_query("update", table, c=c, f=f)
self.assertEqual(query, expected)
# Make org[2] and OU of org[1]
s3db.pr_add_affiliation(self.org[1], self.org[2], role="TestOrgUnit")
# Delegate TESTREADER from org[0] to org[1]
auth.s3_delegate_role(self.reader, self.org[0], receiver=self.org[1])
# Update realms
auth.s3_impersonate("normaluser@example.com")
# User should now be able to read records of org[0] (delegated
# reader role) and org[2] (editor role), but update only org[2]
query = accessible_query("read", table, c=c, f=f)
expected = (((table.realm_entity.belongs([self.org[0], \
self.org[2]])) | \
(table.realm_entity == None)) | \
(((table.owned_by_user == auth.user.id) | \
(((table.owned_by_user == None) & \
(table.owned_by_group == None)) & \
(table.realm_entity == None))) | \
(table.owned_by_group.belongs([2,3]))))
self.assertEqual(query, expected)
query = accessible_query("update", table, c=c, f=f)
expected = (((table.realm_entity == self.org[2]) | \
(table.realm_entity == None)) | \
(((table.owned_by_user == auth.user.id) | \
(((table.owned_by_user == None) & \
(table.owned_by_group == None)) & \
(table.realm_entity == None))) | \
(table.owned_by_group.belongs([2,3]))))
self.assertEqual(query, expected)
# Remove the affiliation org org[2] with org[1]
s3db.pr_remove_affiliation(self.org[1],
self.org[2],
role="TestOrgUnit")
# Update realms
auth.s3_impersonate("normaluser@example.com")
# Check queries again, user should now only have access to
# records of org[2] (editor role)
query = accessible_query("read", table, c=c, f=f)
expected = (((table.realm_entity == self.org[2]) | \
(table.realm_entity == None)) | \
(((table.owned_by_user == auth.user.id) | \
(((table.owned_by_user == None) & \
(table.owned_by_group == None)) & \
(table.realm_entity == None))) | \
(table.owned_by_group.belongs([2,3]))))
self.assertEqual(query, expected)
query = accessible_query("update", table, c=c, f=f)
self.assertEqual(query, expected)
# Remove delegation, affiliation and role
s3db.pr_remove_affiliation(self.org[2], user, role="TestStaff")
s3db.pr_remove_affiliation(self.org[1], self.org[2],
role="TestOrgUnit")
auth.s3_withdraw_role(user, self.reader, for_pe=self.org[2])
## -------------------------------------------------------------------------
#def testPerformance(self):
#""" Test accessible query performance """
#auth = current.auth
## Maximum acceptable runtime per request in milliseconds
#MAX_RUNTIME = 1.5
#current.deployment_settings.security.policy = 8
#from s3.s3aaa import S3Permission
#auth.permission = S3Permission(auth)
#accessible_query = auth.s3_accessible_query
#c = "org"
#f = "permission_test"
#table = current.s3db.org_permission_test
#assertEqual = self.assertEqual
#auth.s3_impersonate("normaluser@example.com")
#auth.s3_assign_role(auth.user.id, self.editor, for_pe=self.org[0])
#def accessibleQuery():
#query = accessible_query("update", table, c=c, f=f)
#import timeit
#runtime = timeit.Timer(accessibleQuery).timeit(number=1000)
#if runtime > MAX_RUNTIME:
#raise AssertionError("accessible_query: maximum acceptable "
#"run time exceeded (%.2fms > %.2fms)" %
#(runtime, MAX_RUNTIME))
#auth.s3_withdraw_role(auth.user.id, self.editor, for_pe=[])
# =============================================================================
class DelegationTests(unittest.TestCase):
""" Test delegation of roles """
# -------------------------------------------------------------------------
@classmethod
def setUpClass(cls):
# Create test roles
s3_create_role = current.auth.s3_create_role
TESTREADER = "TESTREADER"
s3_create_role(TESTREADER, None, uid=TESTREADER)
TESTEDITOR = "TESTEDITOR"
s3_create_role(TESTEDITOR, None, uid=TESTEDITOR)
TESTADMIN = "TESTADMIN"
s3_create_role(TESTADMIN, None, uid=TESTADMIN)
current.db.commit()
@classmethod
def tearDownClass(cls):
# Remove test roles
s3_delete_role = current.auth.s3_delete_role
s3_delete_role("TESTREADER")
s3_delete_role("TESTEDITOR")
s3_delete_role("TESTADMIN")
current.db.commit()
# -------------------------------------------------------------------------
def setUp(self):
db = current.db
auth = current.auth
s3db = current.s3db
# Store current security policy
settings = current.deployment_settings
self.policy = settings.get_security_policy()
# Get the role IDs
gtable = auth.settings.table_group
row = db(gtable.uuid=="TESTREADER").select(limitby=(0, 1)).first()
self.reader = row.id
row = db(gtable.uuid=="TESTEDITOR").select(limitby=(0, 1)).first()
self.editor = row.id
row = db(gtable.uuid=="TESTADMIN").select(limitby=(0, 1)).first()
self.admin = row.id
# Impersonate Admin
auth.s3_impersonate("admin@example.com")
# Create test entities
table = s3db.org_organisation
self.org = []
for i in xrange(3):
record_id = table.insert(name="PermissionTestOrganisation%s" % i)
record = Storage(id=record_id)
s3db.update_super(table, record)
self.org.append(record.pe_id)
# Remove session ownership
auth.s3_clear_session_ownership()
# Logout + turn override off
auth.s3_impersonate(None)
auth.override = False
def tearDown(self):
# Rollback
current.db.rollback()
# Restore security policy
current.deployment_settings.security.policy = self.policy
# Logout + turn override off
auth = current.auth
auth.s3_impersonate(None)
auth.override = False
# -------------------------------------------------------------------------
def testRoleDelegation(self):
""" Test delegation of a role """
s3db = current.s3db
auth = current.auth
current.deployment_settings.security.policy = 8
auth.permission = S3Permission(auth)
auth.s3_impersonate("normaluser@example.com")
user = auth.user.pe_id
org1 = self.org[0]
org2 = self.org[1]
org3 = self.org[2]
pr_add_affiliation = s3db.pr_add_affiliation
pr_remove_affiliation = s3db.pr_remove_affiliation
s3_delegate_role = auth.s3_delegate_role
s3_remove_delegation = auth.s3_remove_delegation
assertTrue = self.assertTrue
assertFalse = self.assertFalse
assertEqual = self.assertEqual
assertNotEqual = self.assertNotEqual
READER = self.reader
EDITOR = self.editor
# Add the user as staff member (=OU) of org3 and assign TESTEDITOR
pr_add_affiliation(org3, user, role="TestStaff")
auth.s3_assign_role(auth.user.id, EDITOR, for_pe=org3)
# Make org3 an OU descendant of org2
pr_add_affiliation(org2, org3, role="TestOrgUnit")
# Delegate the TESTREADER role for org1 to org2
s3_delegate_role(READER, org1, receiver=org2)
# Check the delegations
delegations = auth.user.delegations
assertTrue(READER in delegations)
assertTrue(org3 in delegations[READER])
assertTrue(org1 in delegations[READER][org3])
s3_remove_delegation(READER, org1, receiver=org2)
# Check the delegations
delegations = auth.user.delegations
assertEqual(delegations.keys(), [])
# Delegate the TESTREADER and TESTEDITOR roles for org1 to org2
s3_delegate_role([READER, EDITOR], org1, receiver=org2)
delegations = auth.s3_get_delegations(org1)
assertNotEqual(delegations, None)
assertTrue(isinstance(delegations, Storage))
assertTrue(org2 in delegations)
assertTrue(isinstance(delegations[org2], list))
assertEqual(len(delegations[org2]), 2)
assertTrue(READER in delegations[org2])
assertTrue(EDITOR in delegations[org2])
# Check the delegations
delegations = auth.user.delegations
assertTrue(READER in delegations)
assertTrue(EDITOR in delegations)
assertTrue(org3 in delegations[READER])
assertTrue(org1 in delegations[READER][org3])
assertTrue(org3 in delegations[EDITOR])
assertTrue(org1 in delegations[EDITOR][org3])
s3_remove_delegation(EDITOR, org1, receiver=org2)
delegations = auth.s3_get_delegations(org1)
assertNotEqual(delegations, None)
assertTrue(isinstance(delegations, Storage))
assertTrue(org2 in delegations)
assertTrue(isinstance(delegations[org2], list))
assertEqual(len(delegations[org2]), 1)
assertTrue(READER in delegations[org2])
# Check the delegations
delegations = auth.user.delegations
assertTrue(READER in delegations)
assertFalse(EDITOR in delegations)
assertTrue(org3 in delegations[READER])
assertTrue(org1 in delegations[READER][org3])
s3_remove_delegation(READER, org1, receiver=org2)
delegations = auth.s3_get_delegations(org1)
assertNotEqual(delegations, None)
assertTrue(isinstance(delegations, Storage))
assertEqual(delegations.keys(), [])
# Check the delegations
delegations = auth.user.delegations
assertEqual(delegations.keys(), [])
# Remove delegation, affiliation and role
pr_remove_affiliation(org3, user, role="TestStaff")
pr_remove_affiliation(org2, org3, role="TestOrgUnit")
auth.s3_withdraw_role(user, READER, for_pe=org3)
# =============================================================================
class RecordApprovalTests(unittest.TestCase):
""" Tests for the record approval framework """
# -------------------------------------------------------------------------
def setUp(self):
auth = current.auth
settings = current.deployment_settings
sr = auth.get_system_roles()
auth.permission.update_acl(sr.AUTHENTICATED,
c="org",
uacl=auth.permission.READ,
oacl=auth.permission.READ|auth.permission.UPDATE)
auth.permission.update_acl(sr.AUTHENTICATED,
t="org_organisation",
uacl=auth.permission.READ|auth.permission.CREATE,
oacl=auth.permission.READ|auth.permission.UPDATE)
self.policy = settings.get_security_policy()
settings.security.policy = 5
self.approval = settings.get_auth_record_approval()
settings.auth.record_approval = False
self.approval_for = settings.get_auth_record_approval_required_for()
settings.auth.record_approval_required_for = None
auth.override = False
auth.s3_impersonate(None)
# -------------------------------------------------------------------------
def testRecordApprovedBy(self):
""" Test whether a new record is unapproved by default """
db = current.db
auth = current.auth
s3db = current.s3db
settings = current.deployment_settings
try:
# Set record approval on
settings.auth.record_approval = True
# Impersonate as admin
auth.s3_impersonate("admin@example.com")
# Create test record
otable = s3db.org_organisation
otable.approved_by.default = None
org = Storage(name="Test Approval Organisation")
org_id = otable.insert(**org)
self.assertTrue(org_id > 0)
org.update(id=org_id)
s3db.update_super(otable, org)
# Check record
row = db(otable.id==org_id).select(limitby=(0, 1)).first()
self.assertNotEqual(row, None)
self.assertEqual(row.approved_by, None)
finally:
db.rollback()
settings.auth.record_approval = False
auth.s3_impersonate(None)
# -------------------------------------------------------------------------
def testRequiresApproval(self):
""" Test requires_approval settings """
s3db = current.s3db
settings = current.deployment_settings
approval = settings.get_auth_record_approval()
tables = settings.get_auth_record_approval_required_for()
org_approval = s3db.get_config("org_organisation", "requires_approval")
approval_required = current.auth.permission.requires_approval
try:
# Approval globally turned off
settings.auth.record_approval = False
settings.auth.record_approval_required_for = []
s3db.configure("org_organisation", requires_approval=True)
self.assertFalse(approval_required("org_organisation"))
s3db.clear_config("org_organisation", "requires_approval")
# Approval globally turned on, but set to no tables and table=off
settings.auth.record_approval = True
settings.auth.record_approval_required_for = []
s3db.configure("org_organisation", requires_approval=False)
self.assertFalse(approval_required("org_organisation"))
s3db.clear_config("org_organisation", "requires_approval")
# Approval globally turned on, but set to no tables yet table=on
settings.auth.record_approval = True
settings.auth.record_approval_required_for = []
s3db.configure("org_organisation", requires_approval=True)
self.assertFalse(approval_required("org_organisation"))
s3db.clear_config("org_organisation", "requires_approval")
# Approval globally turned on, but set to any tables and table=on
settings.auth.record_approval = True
settings.auth.record_approval_required_for = None
s3db.configure("org_organisation", requires_approval=True)
self.assertTrue(approval_required("org_organisation"))
s3db.clear_config("org_organisation", "requires_approval")
# Approval globally turned on, but set to different tables and table=on
settings.auth.record_approval = True
settings.auth.record_approval_required_for = ["project_project"]
s3db.configure("org_organisation", requires_approval=True)
self.assertFalse(approval_required("org_organisation"))
s3db.clear_config("org_organisation", "requires_approval")
# Approval globally turned on, set to this table and table=off
settings.auth.record_approval = True
settings.auth.record_approval_required_for = ["org_organisation"]
s3db.configure("org_organisation", requires_approval=False)
self.assertTrue(approval_required("org_organisation"))
s3db.clear_config("org_organisation", "requires_approval")
# Approval globally turned on, set to any table and table=off
settings.auth.record_approval = True
settings.auth.record_approval_required_for = None
s3db.configure("org_organisation", requires_approval=False)
self.assertFalse(approval_required("org_organisation"))
s3db.clear_config("org_organisation", "requires_approval")
# Approval globally turned on, set to any table and no table config
settings.auth.record_approval = True
settings.auth.record_approval_required_for = None
s3db.clear_config("org_organisation", "requires_approval")
self.assertFalse(approval_required("org_organisation"))
s3db.clear_config("org_organisation", "requires_approval")
finally:
settings.auth.record_approval = approval
settings.auth.record_approval_required_for = tables
if org_approval is not None:
s3db.configure("org_organisation",
requires_approval = org_approval)
current.auth.s3_impersonate(None)
# -------------------------------------------------------------------------
def testSetDefaultApprover(self):
"""
Test whether default approver is set if current user has
permission to approve records in a table
"""
auth = current.auth
acl = auth.permission
AUTHENTICATED = auth.get_system_roles().AUTHENTICATED
otable = current.s3db.org_organisation
otable.approved_by.default = None
auth.s3_impersonate("normaluser@example.com")
acl.set_default_approver(otable)
self.assertEqual(otable.approved_by.default, None)
# Give user review and approve permissions on this table
acl.update_acl(AUTHENTICATED,
c="org",
uacl=acl.READ|acl.REVIEW|acl.APPROVE,
oacl=acl.READ|acl.UPDATE|acl.REVIEW|acl.APPROVE)
acl.update_acl(AUTHENTICATED,
t="org_organisation",
uacl=acl.READ|acl.CREATE|acl.REVIEW|acl.APPROVE,
oacl=acl.READ|acl.UPDATE|acl.REVIEW|acl.APPROVE)
auth.s3_impersonate("normaluser@example.com")
acl.set_default_approver(otable)
self.assertEqual(otable.approved_by.default, auth.user.id)
auth.s3_impersonate("admin@example.com")
acl.set_default_approver(otable)
self.assertEqual(otable.approved_by.default, auth.user.id)
auth.s3_impersonate(None)
acl.set_default_approver(otable)
self.assertEqual(otable.approved_by.default, None)
# -------------------------------------------------------------------------
def testRecordApprovalWithComponents(self):
""" Test record approval including components """
db = current.db
auth = current.auth
s3db = current.s3db
settings = current.deployment_settings
# Set record approval on
settings.auth.record_approval = True
self.approved_org = None
def org_onapprove_test(record):
self.approved_org = record.id
org_onapprove = s3db.get_config("org_organisation", "onapprove")
otable_requires_approval = s3db.get_config("org_organisation", "requires_approval", False)
s3db.configure("org_organisation",
onapprove=org_onapprove_test,
requires_approval=True)
self.approved_office = None
def office_onapprove_test(record):
self.approved_office = record.id
office_onapprove = s3db.get_config("org_office", "onapprove")
ftable_requires_approval = s3db.get_config("org_office", "requires_approval", False)
s3db.configure("org_office",
onapprove=office_onapprove_test,
requires_approval=True)
try:
# Impersonate as admin
auth.s3_impersonate("admin@example.com")
# Create test record
otable = s3db.org_organisation
otable.approved_by.default = None
org = Storage(name="Test Approval Organisation")
org_id = otable.insert(**org)
self.assertTrue(org_id > 0)
org.update(id=org_id)
s3db.update_super(otable, org)
# Create test component
ftable = s3db.org_office
ftable.approved_by.default = None
office = Storage(name="Test Approval Office",
organisation_id=org_id)
office_id = ftable.insert(**office)
self.assertTrue(office_id > 0)
office.update(id=office_id)
s3db.update_super(ftable, office)
# Check records
row = db(otable.id==org_id).select(limitby=(0, 1)).first()
self.assertNotEqual(row, None)
self.assertEqual(row.approved_by, None)
row = db(ftable.id==office_id).select(limitby=(0, 1)).first()
self.assertNotEqual(row, None)
self.assertEqual(row.approved_by, None)
approved = auth.permission.approved
unapproved = auth.permission.unapproved
# Check approved/unapproved
self.assertFalse(approved(otable, org_id))
self.assertTrue(unapproved(otable, org_id))
self.assertFalse(approved(ftable, office_id))
self.assertTrue(unapproved(ftable, office_id))
# Approve
resource = s3db.resource("org_organisation", id=org_id, unapproved=True)
self.assertTrue(resource.approve(components=["office"]))
# Check record
row = db(otable.id==org_id).select(limitby=(0, 1)).first()
self.assertNotEqual(row, None)
self.assertEqual(row.approved_by, auth.user.id)
row = db(ftable.id==office_id).select(limitby=(0, 1)).first()
self.assertNotEqual(row, None)
self.assertEqual(row.approved_by, auth.user.id)
# Check approved/unapproved
self.assertTrue(approved(otable, org_id))
self.assertFalse(unapproved(otable, org_id))
self.assertTrue(approved(ftable, office_id))
self.assertFalse(unapproved(ftable, office_id))
# Check hooks
self.assertEqual(self.approved_org, org_id)
self.assertEqual(self.approved_office, office_id)
finally:
current.db.rollback()
settings.auth.record_approval = False
auth.s3_impersonate(None)
s3db.configure("org_organisation",
onapprove=org_onapprove,
requires_approval=otable_requires_approval)
s3db.configure("org_office",
onapprove=office_onapprove,
requires_approval=ftable_requires_approval)
# -------------------------------------------------------------------------
def testRecordApprovalWithoutComponents(self):
""" Test record approval without components"""
db = current.db
auth = current.auth
s3db = current.s3db
settings = current.deployment_settings
# Set record approval on
settings.auth.record_approval = True
otable = s3db.org_organisation
otable_requires_approval = s3db.get_config(otable, "requires_approval", None)
s3db.configure(otable, requires_approval=True)
ftable = s3db.org_office
ftable_requires_approval = s3db.get_config(ftable, "requires_approval", None)
s3db.configure(ftable, requires_approval=True)
try:
# Impersonate as admin
auth.s3_impersonate("admin@example.com")
# Create test record
otable = s3db.org_organisation
otable.approved_by.default = None
org = Storage(name="Test Approval Organisation")
org_id = otable.insert(**org)
self.assertTrue(org_id > 0)
org.update(id=org_id)
s3db.update_super(otable, org)
# Create test component
ftable = s3db.org_office
ftable.approved_by.default = None
office = Storage(name="Test Approval Office",
organisation_id=org_id)
office_id = ftable.insert(**office)
self.assertTrue(office_id > 0)
office.update(id=office_id)
s3db.update_super(ftable, office)
# Check records
row = db(otable.id==org_id).select(limitby=(0, 1)).first()
self.assertNotEqual(row, None)
self.assertEqual(row.approved_by, None)
row = db(ftable.id==office_id).select(limitby=(0, 1)).first()
self.assertNotEqual(row, None)
self.assertEqual(row.approved_by, None)
approved = auth.permission.approved
unapproved = auth.permission.unapproved
# Check approved/unapproved
self.assertFalse(approved(otable, org_id))
self.assertTrue(unapproved(otable, org_id))
self.assertFalse(approved(ftable, office_id))
self.assertTrue(unapproved(ftable, office_id))
# Approve
resource = s3db.resource("org_organisation", id=org_id, unapproved=True)
self.assertTrue(resource.approve(components=None))
# Check record
row = db(otable.id==org_id).select(limitby=(0, 1)).first()
self.assertNotEqual(row, None)
self.assertEqual(row.approved_by, auth.user.id)
row = db(ftable.id==office_id).select(limitby=(0, 1)).first()
self.assertNotEqual(row, None)
self.assertEqual(row.approved_by, None)
# Check approved/unapproved
self.assertTrue(approved(otable, org_id))
self.assertFalse(unapproved(otable, org_id))
self.assertFalse(approved(ftable, office_id))
self.assertTrue(unapproved(ftable, office_id))
finally:
current.db.rollback()
settings.auth.record_approval = False
if otable_requires_approval is not None:
s3db.configure("org_organisation",
requires_approval=otable_requires_approval)
if ftable_requires_approval is not None:
s3db.configure("org_office",
requires_approval=ftable_requires_approval)
auth.s3_impersonate(None)
# -------------------------------------------------------------------------
def testRecordReject(self):
db = current.db
auth = current.auth
s3db = current.s3db
settings = current.deployment_settings
self.rejected_org = None
def org_onreject_test(record):
self.rejected_org = record.id
org_onreject = s3db.get_config("org_organisation", "onreject")
s3db.configure("org_organisation", onreject=org_onreject_test)
self.rejected_office = None
def office_onreject_test(record):
self.rejected_office = record.id
office_onreject = s3db.get_config("org_office", "onreject")
s3db.configure("org_office", onreject=office_onreject_test)
# Set record approval on
settings.auth.record_approval = True
otable = s3db.org_organisation
otable_requires_approval = s3db.get_config(otable, "requires_approval", None)
otable.approved_by.default = None
ftable = s3db.org_office
ftable_requires_approval = s3db.get_config(ftable, "requires_approval", None)
ftable.approved_by.default = None
try:
# Impersonate as admin
auth.s3_impersonate("admin@example.com")
# Create test record
org = Storage(name="Test Reject Organisation")
org_id = otable.insert(**org)
self.assertTrue(org_id > 0)
org.update(id=org_id)
s3db.update_super(otable, org)
# Create test component
office = Storage(name="Test Reject Office",
organisation_id=org_id)
office_id = ftable.insert(**office)
self.assertTrue(office_id > 0)
office.update(id=office_id)
s3db.update_super(ftable, office)
# Check records
row = db(otable.id==org_id).select(limitby=(0, 1)).first()
self.assertNotEqual(row, None)
self.assertEqual(row.approved_by, None)
row = db(ftable.id==office_id).select(limitby=(0, 1)).first()
self.assertNotEqual(row, None)
self.assertEqual(row.approved_by, None)
# Activate approval for these tables
s3db.configure(otable, requires_approval=True)
s3db.configure(ftable, requires_approval=True)
approved = auth.permission.approved
unapproved = auth.permission.unapproved
# Check approved/unapproved
self.assertFalse(approved(otable, org_id))
self.assertTrue(unapproved(otable, org_id))
self.assertFalse(approved(ftable, office_id))
self.assertTrue(unapproved(ftable, office_id))
# Reject
resource = s3db.resource("org_organisation", id=org_id, unapproved=True)
self.assertTrue(resource.reject())
# Check records
row = db(otable.id==org_id).select(limitby=(0, 1)).first()
self.assertNotEqual(row, None)
self.assertEqual(row.approved_by, None)
self.assertTrue(row.deleted)
row = db(ftable.id==office_id).select(limitby=(0, 1)).first()
self.assertNotEqual(row, None)
self.assertEqual(row.approved_by, None)
self.assertTrue(row.deleted)
# Check hooks
self.assertEqual(self.rejected_org, org_id)
self.assertEqual(self.rejected_office, office_id)
finally:
current.db.rollback()
settings.auth.record_approval = False
auth.s3_impersonate(None)
s3db.configure("org_organisation", onreject=org_onreject)
if otable_requires_approval is not None:
s3db.configure("org_organisation",
requires_approval=otable_requires_approval)
s3db.configure("org_office", onreject=office_onreject)
if ftable_requires_approval is not None:
s3db.configure("org_office",
onreject=office_onreject,
requires_approval=ftable_requires_approval)
# -------------------------------------------------------------------------
def testHasPermissionWithRecordApproval(self):
""" Test has_permission with record approval """
db = current.db
auth = current.auth
acl = auth.permission
s3db = current.s3db
settings = current.deployment_settings
has_permission = auth.s3_has_permission
AUTHENTICATED = auth.get_system_roles().AUTHENTICATED
# Store global settings
approval = settings.get_auth_record_approval()
approval_required = settings.get_auth_record_approval_required_for()
# Record approval on, but for no tables
settings.auth.record_approval = True
settings.auth.record_approval_required_for = []
try:
# Impersonate as admin
auth.s3_impersonate("admin@example.com")
# Create test record
otable = s3db.org_organisation
otable.approved_by.default = None
org = Storage(name="Test Approval Organisation")
org_id = otable.insert(**org)
self.assertTrue(org_id > 0)
org.update(id=org_id)
s3db.update_super(otable, org)
# Normal can see unapproved record if approval is not on for this table
auth.s3_impersonate("normaluser@example.com")
permitted = has_permission("read", otable, record_id=org_id, c="org", f="organisation")
self.assertTrue(permitted)
permitted = has_permission("update", otable, record_id=org_id, c="org", f="organisation")
self.assertTrue(permitted)
permitted = has_permission("delete", otable, record_id=org_id, c="org", f="organisation")
self.assertFalse(permitted) # not allowed as per ACL!
# They can not run any of the approval methods without permission, though
permitted = has_permission("review", otable, record_id=org_id, c="org", f="organisation")
self.assertFalse(permitted)
permitted = has_permission("approve", otable, record_id=org_id, c="org", f="organisation")
self.assertFalse(permitted)
permitted = has_permission("reject", otable, record_id=org_id, c="org", f="organisation")
self.assertFalse(permitted)
# Turn on approval for this table
settings.auth.record_approval_required_for = ["org_organisation"]
# Normal user must not see unapproved record
auth.s3_impersonate("normaluser@example.com")
permitted = has_permission("read", otable, record_id=org_id, c="org", f="organisation")
self.assertFalse(permitted)
permitted = has_permission("update", otable, record_id=org_id, c="org", f="organisation")
self.assertFalse(permitted)
permitted = has_permission("delete", otable, record_id=org_id, c="org", f="organisation")
self.assertFalse(permitted)
# Normal user can not review/approve/reject the record
permitted = has_permission(["read", "review"], otable, record_id=org_id, c="org", f="organisation")
self.assertFalse(permitted)
permitted = has_permission("review", otable, record_id=org_id, c="org", f="organisation")
self.assertFalse(permitted)
permitted = has_permission("approve", otable, record_id=org_id, c="org", f="organisation")
self.assertFalse(permitted)
permitted = has_permission("reject", otable, record_id=org_id, c="org", f="organisation")
self.assertFalse(permitted)
# Normal user can see the unapproved record if he owns it
db(otable.id==org_id).update(owned_by_user=auth.user.id)
auth.s3_impersonate("normaluser@example.com")
permitted = has_permission("read", otable, record_id=org_id, c="org", f="organisation")
self.assertTrue(permitted)
permitted = has_permission("update", otable, record_id=org_id, c="org", f="organisation")
self.assertTrue(permitted)
permitted = has_permission("delete", otable, record_id=org_id, c="org", f="organisation")
self.assertFalse(permitted) # not permitted per ACL
# Normal user can not review/approve/reject the record even if he owns it
permitted = has_permission("review", otable, record_id=org_id, c="org", f="organisation")
self.assertFalse(permitted)
permitted = has_permission("approve", otable, record_id=org_id, c="org", f="organisation")
self.assertFalse(permitted)
permitted = has_permission("reject", otable, record_id=org_id, c="org", f="organisation")
self.assertFalse(permitted)
db(otable.id==org_id).update(owned_by_user=None)
# Give user review and approve permissions on this table
acl.update_acl(AUTHENTICATED,
c="org",
uacl=acl.READ|acl.REVIEW|acl.APPROVE,
oacl=acl.READ|acl.UPDATE|acl.REVIEW|acl.APPROVE)
acl.update_acl(AUTHENTICATED,
t="org_organisation",
uacl=acl.READ|acl.CREATE|acl.REVIEW|acl.APPROVE,
oacl=acl.READ|acl.UPDATE|acl.REVIEW|acl.APPROVE)
# Normal user can still not see unapproved record even if they have approve-permissions
auth.s3_impersonate("normaluser@example.com")
permitted = has_permission("read", otable, record_id=org_id, c="org", f="organisation")
self.assertFalse(permitted)
permitted = has_permission("update", otable, record_id=org_id, c="org", f="organisation")
self.assertFalse(permitted)
permitted = has_permission("delete", otable, record_id=org_id, c="org", f="organisation")
self.assertFalse(permitted)
# Normal user can review/approve/reject if they have the approver role
permitted = has_permission(["read", "review"], otable, record_id=org_id, c="org", f="organisation")
self.assertTrue(permitted)
permitted = has_permission("review", otable, record_id=org_id, c="org", f="organisation")
self.assertTrue(permitted)
permitted = has_permission("approve", otable, record_id=org_id, c="org", f="organisation")
self.assertTrue(permitted)
permitted = has_permission("reject", otable, record_id=org_id, c="org", f="organisation")
self.assertTrue(permitted)
# Admin can always see the record
auth.s3_impersonate("admin@example.com")
permitted = has_permission("read", otable, record_id=org_id, c="org", f="organisation")
self.assertTrue(permitted)
# Approve the record
resource = s3db.resource(otable, id=org_id, unapproved=True)
resource.approve()
# Normal user can not review/approve/reject once the record is approved
auth.s3_impersonate("normaluser@example.com")
permitted = has_permission("review", otable, record_id=org_id, c="org", f="organisation")
self.assertFalse(permitted)
permitted = has_permission("approve", otable, record_id=org_id, c="org", f="organisation")
self.assertFalse(permitted)
permitted = has_permission("reject", otable, record_id=org_id, c="org", f="organisation")
self.assertFalse(permitted)
# Normal user can now see the record without having the approver role
auth.s3_impersonate("normaluser@example.com")
permitted = has_permission("read", otable, record_id=org_id, c="org", f="organisation")
self.assertTrue(permitted)
permitted = has_permission("update", otable, record_id=org_id, c="org", f="organisation")
self.assertTrue(permitted)
permitted = has_permission("delete", otable, record_id=org_id, c="org", f="organisation")
self.assertFalse(permitted) # not allowed as per ACL!
finally:
# Restore global settings
settings.auth.record_approval = approval
settings.auth.record_approval_required_for = approval_required
auth.s3_impersonate(None)
# -------------------------------------------------------------------------
def testAccessibleQueryWithRecordApproval(self):
""" Test accessible_query with record approval """
db = current.db
auth = current.auth
acl = auth.permission
s3db = current.s3db
settings = current.deployment_settings
accessible_query = auth.s3_accessible_query
session = current.session
table = s3db.pr_person
otable = s3db.org_organisation
approval = settings.get_auth_record_approval()
approval_required = settings.get_auth_record_approval_required_for()
# Record approval on, but for no tables
settings.auth.record_approval = True
settings.auth.record_approval_required_for = []
try:
AUTHENTICATED = auth.get_system_roles().AUTHENTICATED
# Admin can always see all records
auth.s3_impersonate("admin@example.com")
query = accessible_query("read", table, c="pr", f="person")
expected = (table.id > 0)
self.assertEqual(str(query), str(expected))
# User can only see their own records - approved_by not relevant
auth.s3_impersonate("normaluser@example.com")
query = accessible_query("read", table, c="pr", f="person")
self.assertFalse("approved_by" in str(query))
table = s3db.org_organisation
# Approval not required by default
auth.s3_impersonate("normaluser@example.com")
query = accessible_query("read", table, c="org", f="organisation")
expected = (table.id > 0)
self.assertEqual(str(query), str(expected))
settings.auth.record_approval_required_for = ["org_organisation"]
# Admin can see all records
auth.s3_impersonate("admin@example.com")
# See only approved records in read
query = accessible_query("read", table, c="org", f="organisation")
expected = (table.approved_by != None) | \
(table.owned_by_user == auth.user.id)
self.assertEqual(str(query), str(expected))
# See only unapproved records in review
query = accessible_query("review", table, c="org", f="organisation")
expected = (table.approved_by == None)
self.assertEqual(str(query), str(expected))
# See all records with both
query = accessible_query(["read", "review"], table, c="org", f="organisation")
expected = (table.id > 0)
self.assertEqual(str(query), str(expected))
# User can only see approved records
auth.s3_impersonate("normaluser@example.com")
# See only approved and personally owned records in read
query = accessible_query("read", table, c="org", f="organisation")
expected = (table.approved_by != None) | \
(table.owned_by_user == auth.user.id)
self.assertEqual(str(query), str(expected))
# See no records in approve
query = accessible_query("review", table, c="org", f="organisation")
expected = (table.id == 0)
self.assertEqual(str(query), str(expected))
# See only approved and personally owned records with both
query = accessible_query(["read", "review"], table, c="org", f="organisation")
expected = (table.approved_by != None) | \
(table.owned_by_user == auth.user.id)
self.assertEqual(str(query), str(expected))
# Give user review and approve permissions on this table
acl.update_acl(AUTHENTICATED,
c="org",
uacl=acl.READ|acl.REVIEW|acl.APPROVE,
oacl=acl.READ|acl.UPDATE|acl.REVIEW|acl.APPROVE)
acl.update_acl(AUTHENTICATED,
t="org_organisation",
uacl=acl.READ|acl.CREATE|acl.REVIEW|acl.APPROVE,
oacl=acl.READ|acl.UPDATE|acl.REVIEW|acl.APPROVE)
# User can now access unapproved records
auth.s3_impersonate("normaluser@example.com")
# See only approved records in read
query = accessible_query("read", table, c="org", f="organisation")
expected = (table.approved_by != None) | \
(table.owned_by_user == auth.user.id)
self.assertTrue(str(expected) in str(query))
# See only unapproved records in review
query = accessible_query("review", table, c="org", f="organisation")
expected = (table.approved_by != None)
self.assertFalse(str(expected) in str(query))
expected = (table.approved_by == None)
self.assertTrue(str(expected) in str(query))
# See all records with both
query = accessible_query(["read", "approve"], table, c="org", f="organisation")
expected = (table.approved_by != None) | \
(table.owned_by_user == auth.user.id)
self.assertTrue(str(expected) in str(query))
expected = (table.approved_by == None)
self.assertTrue(str(expected) in str(query))
# Turn off record approval and check the default query
settings.auth.record_approval = False
query = accessible_query("read", table, c="org", f="organisation")
expected = (table.id > 0)
self.assertEqual(str(query), str(expected))
finally:
settings.auth.record_approval = approval
settings.auth.record_approval_required_for = approval_required
auth.s3_impersonate(None)
# -------------------------------------------------------------------------
def tearDown(self):
settings = current.deployment_settings
settings.security.policy = self.policy
settings.auth.record_approval = self.approval
settings.auth.record_approval_required_for = self.approval_for
current.auth.s3_impersonate(None)
current.db.rollback()
# =============================================================================
class RealmEntityTests(unittest.TestCase):
""" Test customization hooks for realm entity """
# -------------------------------------------------------------------------
def setUp(self):
db = current.db
s3db = current.s3db
# Create a dummy record
otable = s3db.org_organisation
org = Storage(name="Ownership Test Organisation")
org_id = otable.insert(**org)
org.update(id=org_id)
s3db.update_super(otable, org)
self.org_id = org_id
# Create a dummy record
ftable = s3db.org_office
office = Storage(organisation_id=self.org_id,
name="Ownership Test Office")
office_id = ftable.insert(**office)
office.update(id=office_id)
s3db.update_super(ftable, office)
self.office_id = office_id
# Clear the hooks
tname = "org_organisation"
settings = current.deployment_settings
self.ghook = settings.get_auth_realm_entity()
self.shook = s3db.get_config(tname, "realm_entity")
settings.auth.realm_entity = None
s3db.clear_config(tname, "realm_entity")
self.owned_record = None
# -------------------------------------------------------------------------
def testTableSpecificRealmEntity(self):
""" Test table-specific realm_entity hook """
s3db = current.s3db
auth = current.auth
settings = current.deployment_settings
otable = s3db.org_organisation
record = otable[self.org_id]
tname = "org_organisation"
s3db.configure(tname, realm_entity = self.realm_entity)
auth.s3_set_record_owner(otable, record, force_update=True)
self.assertEqual(self.owned_record, (tname, record.id))
# -------------------------------------------------------------------------
def testGlobalRealmEntity(self):
""" Test global realm_entity hook """
s3db = current.s3db
auth = current.auth
settings = current.deployment_settings
otable = s3db.org_organisation
record = otable[self.org_id]
tname = "org_organisation"
settings.auth.realm_entity = self.realm_entity
auth.s3_set_record_owner(otable, record, force_update=True)
self.assertEqual(self.owned_record, (tname, record.id))
# -------------------------------------------------------------------------
def testRealmEntityOverride(self):
""" Check whether global realm_entity hook overrides any table-specific setting """
s3db = current.s3db
auth = current.auth
settings = current.deployment_settings
otable = s3db.org_organisation
record = otable[self.org_id]
tname = "org_organisation"
s3db.configure(tname, realm_entity = self.realm_entity)
settings.auth.realm_entity = self.realm_entity_override
auth.s3_set_record_owner(otable, record, force_update=True)
self.assertEqual(self.owned_record, "checked")
# -------------------------------------------------------------------------
def testSetRealmEntityWithRecord(self):
""" Test the realm entity can be set for a record """
s3db = current.s3db
auth = current.auth
settings = current.deployment_settings
otable = s3db.org_organisation
record = otable[self.org_id]
tname = "org_organisation"
settings.auth.realm_entity = self.realm_entity
auth.set_realm_entity(otable, record, force_update=True)
self.assertEqual(self.owned_record, (tname, record.id))
record = otable[self.org_id]
self.assertEqual(record.realm_entity, 5)
# -------------------------------------------------------------------------
def testSetRealmEntityWithRealmComponent(self):
""" Test whether the realm entity of the component updates automatically """
s3db = current.s3db
auth = current.auth
settings = current.deployment_settings
realm_components = s3db.get_config("org_organisation",
"realm_components", "none")
s3db.configure("org_organisation",
realm_components = ["office"])
try:
otable = s3db.org_organisation
ftable = s3db.org_office
settings.auth.realm_entity = self.realm_entity
record = otable[self.org_id]
record.update_record(realm_entity = None)
record = ftable[self.office_id]
record.update_record(realm_entity = None)
record = otable[self.org_id]
auth.set_realm_entity(otable, record, force_update=True)
tname = "org_organisation"
self.assertEqual(self.owned_record, (tname, record.id))
record = otable[self.org_id]
self.assertEqual(record.realm_entity, 5)
record = ftable[self.office_id]
self.assertEqual(record.realm_entity, 5)
finally:
if realm_components != "none":
s3db.configure("org_organisation",
realm_components=realm_components)
else:
s3db.clear_config("org_organisation", "realm_components")
# -------------------------------------------------------------------------
def testSetRealmEntityWithRecordID(self):
""" Test the realm entity can be set for a record ID """
s3db = current.s3db
auth = current.auth
settings = current.deployment_settings
otable = s3db.org_organisation
record = otable[self.org_id]
tname = "org_organisation"
settings.auth.realm_entity = self.realm_entity
auth.set_realm_entity(otable, self.org_id, force_update=True)
self.assertEqual(self.owned_record, (tname, record.id))
record = otable[self.org_id]
self.assertEqual(record.realm_entity, 5)
# -------------------------------------------------------------------------
def testSetRealmEntityWithRecordIDList(self):
""" Test the realm entity can be set for a list of record IDs """
s3db = current.s3db
auth = current.auth
settings = current.deployment_settings
otable = s3db.org_organisation
record = otable[self.org_id]
tname = "org_organisation"
settings.auth.realm_entity = self.realm_entity
auth.set_realm_entity(otable, [self.org_id], force_update=True)
self.assertEqual(self.owned_record, (tname, record.id))
record = otable[self.org_id]
self.assertEqual(record.realm_entity, 5)
# -------------------------------------------------------------------------
def testSetRealmEntityWithQuery(self):
""" Test the realm entity can be set for a query """
s3db = current.s3db
auth = current.auth
settings = current.deployment_settings
otable = s3db.org_organisation
record = otable[self.org_id]
tname = "org_organisation"
settings.auth.realm_entity = self.realm_entity
query = (otable.id == self.org_id)
auth.set_realm_entity(otable, query, force_update=True)
self.assertEqual(self.owned_record, (tname, record.id))
record = otable[self.org_id]
self.assertEqual(record.realm_entity, 5)
# -------------------------------------------------------------------------
def testSetRealmEntityWithQueryAndOverride(self):
""" Test that realm entity can be overridden by call """
s3db = current.s3db
auth = current.auth
settings = current.deployment_settings
otable = s3db.org_organisation
tname = "org_organisation"
settings.auth.realm_entity = self.realm_entity
query = (otable.id == self.org_id)
auth.set_realm_entity(otable, query, entity=4, force_update=True)
self.assertEqual(self.owned_record, None)
record = otable[self.org_id]
self.assertEqual(record.realm_entity, 4)
# -------------------------------------------------------------------------
def testSetRealmEntityWithQueryAndOverrideNone(self):
""" Test that realm entity can be set to None """
s3db = current.s3db
auth = current.auth
settings = current.deployment_settings
otable = s3db.org_organisation
tname = "org_organisation"
settings.auth.realm_entity = self.realm_entity
query = (otable.id == self.org_id)
auth.set_realm_entity(otable, query, entity=None, force_update=True)
self.assertEqual(self.owned_record, None)
record = otable[self.org_id]
self.assertEqual(record.realm_entity, None)
# -------------------------------------------------------------------------
def testUpdateSharedFields(self):
""" Test that realm entity gets set in super-entity """
s3db = current.s3db
auth = current.auth
settings = current.deployment_settings
ftable = s3db.org_office
stable = s3db.org_site
row = ftable[self.office_id]
row.update_record(realm_entity=row["pe_id"])
site_id = row["site_id"]
auth.update_shared_fields(ftable, self.office_id, realm_entity=None)
site = stable[site_id]
self.assertEqual(site["realm_entity"], None)
auth.update_shared_fields(ftable, self.office_id, realm_entity=row["realm_entity"])
site = stable[site_id]
self.assertEqual(site["realm_entity"], row["realm_entity"])
# -------------------------------------------------------------------------
def realm_entity(self, table, row):
""" Dummy method for hook testing """
self.owned_record = (table._tablename, row.id)
return 5
# -------------------------------------------------------------------------
def realm_entity_override(self, table, row):
""" Dummy method for hook testing """
self.owned_record = "checked"
return 6
# -------------------------------------------------------------------------
def tearDown(self):
s3db = current.s3db
settings = current.deployment_settings
# Rollback DB
current.db.rollback()
# Restore the hooks
settings.auth.realm_entity = self.ghook
if self.shook is not None:
s3db.configure("org_organisation", realm_entity=self.shook)
# =============================================================================
class LinkToPersonTests(unittest.TestCase):
""" Test s3_link_to_person """
# -------------------------------------------------------------------------
def setUp(self):
s3db = current.s3db
# Create organisation
otable = s3db.org_organisation
org = Storage(name="LTPRTestOrg")
org_id = otable.insert(**org)
self.assertTrue(org_id is not None)
org["id"] = org_id
s3db.update_super(otable, org)
self.org_id = org_id
self.org_pe_id = org.pe_id
# Create person record
ptable = s3db.pr_person
person = Storage(first_name="TestLTPR",
last_name="User")
person_id = ptable.insert(**person)
self.assertTrue(person_id is not None)
person["id"] = person_id
s3db.update_super(ptable, person)
self.person_id = person_id
self.pe_id = person.pe_id
# Add email contact
ctable = s3db.pr_contact
contact = Storage(pe_id=self.pe_id,
contact_method="EMAIL",
value="testltpr@example.com")
contact_id = ctable.insert(**contact)
self.assertTrue(contact_id is not None)
# -------------------------------------------------------------------------
def testLinkToNewPerson(self):
""" Test linking a user account to a new person record """
auth = current.auth
s3db = current.s3db
# Create new user record
utable = auth.settings.table_user
user = Storage(first_name="TestLTPR2",
last_name="User",
email="testltpr2@example.com",
password="XYZ")
user_id = utable.insert(**user)
self.assertTrue(user_id is not None)
user["id"] = user_id
# Link to person
person_id = auth.s3_link_to_person(user, self.org_id)
# Check the person_id
self.assertNotEqual(person_id, None)
self.assertFalse(isinstance(person_id, list))
self.assertNotEqual(person_id, self.person_id)
# Get the person record
ptable = s3db.pr_person
person = ptable[person_id]
self.assertNotEqual(person, None)
# Check the owner
self.assertEqual(person.realm_entity, self.org_pe_id)
# Check the link
ltable = s3db.pr_person_user
query = (ltable.user_id == user_id) & \
(ltable.pe_id == person.pe_id)
links = current.db(query).select()
self.assertEqual(len(links), 1)
# -------------------------------------------------------------------------
def testLinkToExistingPerson(self):
""" Test linking a user account to a pre-existing person record """
auth = current.auth
s3db = current.s3db
# Create new user record
utable = auth.settings.table_user
user = Storage(first_name="TestLTPR",
last_name="User",
email="testltpr@example.com",
password="XYZ")
user_id = utable.insert(**user)
self.assertTrue(user_id is not None)
user["id"] = user_id
# Link to person record
person_id = auth.s3_link_to_person(user, self.org_id)
# Check the person_id
self.assertNotEqual(person_id, None)
self.assertFalse(isinstance(person_id, list))
self.assertEqual(person_id, self.person_id)
# Get the person record
ptable = s3db.pr_person
person = ptable[person_id]
self.assertNotEqual(person, None)
# Check the link
ltable = s3db.pr_person_user
query = (ltable.user_id == user_id) & \
(ltable.pe_id == person.pe_id)
links = current.db(query).select()
self.assertEqual(len(links), 1)
# -------------------------------------------------------------------------
def testUpdateLinkedPerson(self):
""" Test update of a pre-linked person record upon user account update """
auth = current.auth
s3db = current.s3db
# Create new user record
utable = auth.settings.table_user
user = Storage(first_name="TestLTPR",
last_name="User",
email="testltpr@example.com",
password="XYZ")
user_id = utable.insert(**user)
self.assertTrue(user_id is not None)
user["id"] = user_id
# Link to person
person_id = auth.s3_link_to_person(user, self.org_id)
# Check the person_id
self.assertNotEqual(person_id, None)
self.assertFalse(isinstance(person_id, list))
self.assertEqual(person_id, self.person_id)
# Update the user record
update = Storage(first_name="TestLTPR2",
last_name="User",
email="testltpr2@example.com")
current.db(utable.id == user_id).update(**update)
update["id"] = user_id
# Link to person record again
update_id = auth.s3_link_to_person(user, self.org_id)
# Check unchanged person_id
self.assertEqual(update_id, person_id)
# Check updated person record
ptable = s3db.pr_person
person = ptable[update_id]
self.assertEqual(person.first_name, update["first_name"])
self.assertEqual(person.last_name, update["last_name"])
# Check updated contact record
ctable = s3db.pr_contact
query = (ctable.pe_id == self.pe_id) & \
(ctable.contact_method == "EMAIL")
contacts = current.db(query).select()
self.assertEqual(len(contacts), 2)
emails = [contact.value for contact in contacts]
self.assertTrue(user.email in emails)
self.assertTrue(update.email in emails)
# -------------------------------------------------------------------------
def testMultipleUserRecords(self):
""" Test s3_link_to_person with multiple user accounts """
auth = current.auth
s3db = current.s3db
# Create new user records
utable = auth.settings.table_user
users = []
user1 = Storage(first_name="TestLTPR1",
last_name="User",
email="testltpr1@example.com",
password="XYZ")
user_id = utable.insert(**user1)
self.assertTrue(user_id is not None)
user1["id"] = user_id
users.append(user1)
user2 = Storage(first_name="TestLTPR2",
last_name="User",
email="testltpr2@example.com",
password="XYZ")
user_id = utable.insert(**user2)
self.assertTrue(user_id is not None)
user2["id"] = user_id
users.append(user2)
user3 = Storage(first_name="TestLTPR3",
last_name="User",
email="testltpr3@example.com",
password="XYZ")
user_id = utable.insert(**user3)
self.assertTrue(user_id is not None)
user3["id"] = user_id
users.append(user3)
person_ids = auth.s3_link_to_person(users, self.org_id)
self.assertTrue(isinstance(person_ids, list))
self.assertEqual(len(person_ids), 3)
auth.s3_impersonate("testltpr2@example.com")
pe_id = auth.user.pe_id
ptable = s3db.pr_person
query = (ptable.pe_id == pe_id)
person2 = current.db(query).select().first()
self.assertNotEqual(person2, None)
self.assertTrue(person2.id in person_ids)
# -------------------------------------------------------------------------
def tearDown(self):
current.auth.s3_impersonate(None)
current.db.rollback()
# =============================================================================
class EntityRoleManagerTests(unittest.TestCase):
""" Test the entity role manager """
# -------------------------------------------------------------------------
def setUp(self):
auth = current.auth
# Test-login as system administrator
auth.s3_impersonate("admin@example.com")
self.rm = S3EntityRoleManager()
self.user_id = auth.s3_get_user_id("normaluser@example.com")
self.org_id = 1
auth.s3_assign_role(self.user_id, "staff_reader", for_pe=self.org_id)
auth.s3_assign_role(self.user_id, "project_editor", for_pe=self.org_id)
# -------------------------------------------------------------------------
def testGetAssignedRoles(self):
""" Test get_assigned_roles """
roles = self.rm.get_assigned_roles(entity_id=self.org_id)
self.assertTrue(self.user_id in roles)
assigned_roles = roles[self.user_id]
self.assertEqual(len(assigned_roles), 2)
self.assertTrue("staff_reader" in assigned_roles)
self.assertTrue("project_editor" in assigned_roles)
roles = self.rm.get_assigned_roles(entity_id=self.org_id,
user_id=self.user_id)
self.assertTrue(self.user_id in roles)
assigned_roles = roles[self.user_id]
self.assertEqual(len(assigned_roles), 2)
self.assertTrue("staff_reader" in assigned_roles)
self.assertTrue("project_editor" in assigned_roles)
assigned_roles = self.rm.get_assigned_roles(user_id=self.user_id)
self.assertTrue(all([r in assigned_roles[self.org_id]
for r in ("staff_reader", "project_editor")]))
self.assertEqual(len(assigned_roles[self.org_id]), 2)
roles = self.rm.get_assigned_roles(user_id=self.user_id)
self.assertTrue(self.org_id in roles)
assigned_roles = roles[self.org_id]
self.assertEqual(len(assigned_roles), 2)
self.assertTrue("staff_reader" in assigned_roles)
self.assertTrue("project_editor" in assigned_roles)
self.assertRaises(RuntimeError, self.rm.get_assigned_roles)
# -------------------------------------------------------------------------
def testUpdateRoles(self):
""" Test that before/after works """
before = ("staff_reader", "project_editor")
after = ("survey_reader",)
# Give the user a new set of roles
self.rm.update_roles(self.user_id,
self.org_id,
before,
after)
assigned_roles = self.rm.get_assigned_roles(user_id=self.user_id)
self.assertTrue(self.org_id in assigned_roles)
self.assertTrue(all([r in assigned_roles[self.org_id]
for r in after]))
self.assertEqual(len(assigned_roles[self.org_id]), len(after))
# Reverse the changes
self.rm.update_roles(self.user_id,
self.org_id,
after,
before)
assigned_roles = self.rm.get_assigned_roles(user_id=self.user_id)
self.assertTrue(self.org_id in assigned_roles)
self.assertTrue(all([r in assigned_roles[self.org_id]
for r in before]))
self.assertEqual(len(assigned_roles[self.org_id]), len(before))
# -------------------------------------------------------------------------
def tearDown(self):
auth = current.auth
auth.s3_impersonate(None)
auth.s3_withdraw_role(self.user_id, "staff_reader", for_pe=self.org_id)
auth.s3_withdraw_role(self.user_id, "project_editor", for_pe=self.org_id)
current.db.rollback()
# -------------------------------------------------------------------------
@classmethod
def tearDownClass(cls):
pass
# =============================================================================
def run_suite(*test_classes):
""" Run the test suite """
loader = unittest.TestLoader()
suite = unittest.TestSuite()
for test_class in test_classes:
tests = loader.loadTestsFromTestCase(test_class)
suite.addTests(tests)
if suite is not None:
unittest.TextTestRunner(verbosity=2).run(suite)
return
if __name__ == "__main__":
run_suite(
AuthUtilsTests,
SetRolesTests,
RoleAssignmentTests,
RecordOwnershipTests,
ACLManagementTests,
HasPermissionTests,
AccessibleQueryTests,
DelegationTests,
RecordApprovalTests,
RealmEntityTests,
LinkToPersonTests,
EntityRoleManagerTests,
)
# END ========================================================================
| mit |
Kali-/android_kernel_sony_msm8660 | scripts/gcc-wrapper.py | 501 | 3410 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Code Aurora nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Invoke gcc, looking for warnings, and causing a failure if there are
# non-whitelisted warnings.
import errno
import re
import os
import sys
import subprocess
# Note that gcc uses unicode, which may depend on the locale. TODO:
# force LANG to be set to en_US.UTF-8 to get consistent warnings.
allowed_warnings = set([
"alignment.c:327",
"mmu.c:602",
"return_address.c:62",
])
# Capture the name of the object file, can find it.
ofile = None
warning_re = re.compile(r'''(.*/|)([^/]+\.[a-z]+:\d+):(\d+:)? warning:''')
def interpret_warning(line):
"""Decode the message from gcc. The messages we care about have a filename, and a warning"""
line = line.rstrip('\n')
m = warning_re.match(line)
if m and m.group(2) not in allowed_warnings:
print "error, forbidden warning:", m.group(2)
# If there is a warning, remove any object if it exists.
if ofile:
try:
os.remove(ofile)
except OSError:
pass
sys.exit(1)
def run_gcc():
args = sys.argv[1:]
# Look for -o
try:
i = args.index('-o')
global ofile
ofile = args[i+1]
except (ValueError, IndexError):
pass
compiler = sys.argv[0]
try:
proc = subprocess.Popen(args, stderr=subprocess.PIPE)
for line in proc.stderr:
print line,
interpret_warning(line)
result = proc.wait()
except OSError as e:
result = e.errno
if result == errno.ENOENT:
print args[0] + ':',e.strerror
print 'Is your PATH set correctly?'
else:
print ' '.join(args), str(e)
return result
if __name__ == '__main__':
status = run_gcc()
sys.exit(status)
| gpl-2.0 |
OMNIL-Infinity/volatility | volatility/plugins/linux/pkt_queues.py | 44 | 3171 | # Volatility
# Copyright (C) 2007-2013 Volatility Foundation
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
"""
@author: Andrew Case
@license: GNU General Public License 2.0
@contact: atcuno@gmail.com
@organization:
"""
import os
import volatility.obj as obj
import volatility.debug as debug
import volatility.plugins.linux.netstat as linux_netstat
import volatility.plugins.linux.common as linux_common
class linux_pkt_queues(linux_netstat.linux_netstat):
"""Writes per-process packet queues out to disk"""
def __init__(self, config, *args, **kwargs):
linux_netstat.linux_netstat.__init__(self, config, *args, **kwargs)
self._config.add_option('DUMP-DIR', short_option = 'D', default = None, help = 'output directory for recovered packets', action = 'store', type = 'str')
def process_queue(self, name, pid, fd_num, queue):
if queue.qlen == 0:
return
wrote = 0
fname = "{0:s}.{1:d}.{2:d}".format(name, pid, fd_num)
fd = None
sk_buff = queue.m("next")
while sk_buff and sk_buff != queue.v():
pkt_len = sk_buff.len
if pkt_len > 0 and pkt_len != 0xffffffff:
# only open once we have a packet with data
# otherwise we get 0 sized files
if fd == None:
fd = open(os.path.join(self.edir, fname), "wb")
start = sk_buff.data
data = self.addr_space.zread(start, pkt_len)
fd.write(data)
wrote = wrote + pkt_len
sk_buff = sk_buff.next
if wrote:
yield "Wrote {0:d} bytes to {1:s}".format(wrote, fname)
if fd:
fd.close()
def calculate(self):
linux_common.set_plugin_members(self)
self.edir = self._config.DUMP_DIR
if not self.edir:
debug.error("No output directory given.")
if not os.path.isdir(self.edir):
debug.error(self.edir + " is not a directory")
for (task, fd_num, inet_sock) in linux_netstat.linux_netstat(self._config).calculate():
sk = inet_sock.sk
for msg in self.process_queue("receive", task.pid, fd_num, sk.sk_receive_queue):
yield msg
for msg in self.process_queue("write", task.pid, fd_num, sk.sk_write_queue):
yield msg
def render_text(self, outfd, data):
for msg in data:
outfd.write(msg + "\n")
| gpl-2.0 |
AlexanderFabisch/scikit-learn | sklearn/decomposition/tests/test_pca.py | 21 | 11810 | import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_no_warnings
from sklearn import datasets
from sklearn.decomposition import PCA
from sklearn.decomposition import RandomizedPCA
from sklearn.decomposition.pca import _assess_dimension_
from sklearn.decomposition.pca import _infer_dimension_
iris = datasets.load_iris()
def test_pca():
# PCA on dense arrays
pca = PCA(n_components=2)
X = iris.data
X_r = pca.fit(X).transform(X)
np.testing.assert_equal(X_r.shape[1], 2)
X_r2 = pca.fit_transform(X)
assert_array_almost_equal(X_r, X_r2)
pca = PCA()
pca.fit(X)
assert_almost_equal(pca.explained_variance_ratio_.sum(), 1.0, 3)
X_r = pca.transform(X)
X_r2 = pca.fit_transform(X)
assert_array_almost_equal(X_r, X_r2)
# Test get_covariance and get_precision with n_components == n_features
# with n_components < n_features and with n_components == 0
for n_components in [0, 2, X.shape[1]]:
pca.n_components = n_components
pca.fit(X)
cov = pca.get_covariance()
precision = pca.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]), 12)
def test_no_empty_slice_warning():
# test if we avoid numpy warnings for computing over empty arrays
n_components = 10
n_features = n_components + 2 # anything > n_comps triggerred it in 0.16
X = np.random.uniform(-1, 1, size=(n_components, n_features))
pca = PCA(n_components=n_components)
assert_no_warnings(pca.fit, X)
def test_whitening():
# Check that PCA output has unit-variance
rng = np.random.RandomState(0)
n_samples = 100
n_features = 80
n_components = 30
rank = 50
# some low rank data with correlated features
X = np.dot(rng.randn(n_samples, rank),
np.dot(np.diag(np.linspace(10.0, 1.0, rank)),
rng.randn(rank, n_features)))
# the component-wise variance of the first 50 features is 3 times the
# mean component-wise variance of the remaingin 30 features
X[:, :50] *= 3
assert_equal(X.shape, (n_samples, n_features))
# the component-wise variance is thus highly varying:
assert_almost_equal(X.std(axis=0).std(), 43.9, 1)
for this_PCA, copy in [(x, y) for x in (PCA, RandomizedPCA)
for y in (True, False)]:
# whiten the data while projecting to the lower dim subspace
X_ = X.copy() # make sure we keep an original across iterations.
pca = this_PCA(n_components=n_components, whiten=True, copy=copy)
if hasattr(pca, 'random_state'):
pca.random_state = rng
# test fit_transform
X_whitened = pca.fit_transform(X_.copy())
assert_equal(X_whitened.shape, (n_samples, n_components))
X_whitened2 = pca.transform(X_)
assert_array_almost_equal(X_whitened, X_whitened2)
assert_almost_equal(X_whitened.std(axis=0), np.ones(n_components),
decimal=4)
assert_almost_equal(X_whitened.mean(axis=0), np.zeros(n_components))
X_ = X.copy()
pca = this_PCA(n_components=n_components, whiten=False,
copy=copy).fit(X_)
X_unwhitened = pca.transform(X_)
assert_equal(X_unwhitened.shape, (n_samples, n_components))
# in that case the output components still have varying variances
assert_almost_equal(X_unwhitened.std(axis=0).std(), 74.1, 1)
# we always center, so no test for non-centering.
def test_explained_variance():
# Check that PCA output has unit-variance
rng = np.random.RandomState(0)
n_samples = 100
n_features = 80
X = rng.randn(n_samples, n_features)
pca = PCA(n_components=2).fit(X)
rpca = RandomizedPCA(n_components=2, random_state=rng).fit(X)
assert_array_almost_equal(pca.explained_variance_ratio_,
rpca.explained_variance_ratio_, 1)
# compare to empirical variances
X_pca = pca.transform(X)
assert_array_almost_equal(pca.explained_variance_,
np.var(X_pca, axis=0))
X_rpca = rpca.transform(X)
assert_array_almost_equal(rpca.explained_variance_, np.var(X_rpca, axis=0),
decimal=1)
# Same with correlated data
X = datasets.make_classification(n_samples, n_features,
n_informative=n_features-2,
random_state=rng)[0]
pca = PCA(n_components=2).fit(X)
rpca = RandomizedPCA(n_components=2, random_state=rng).fit(X)
assert_array_almost_equal(pca.explained_variance_ratio_,
rpca.explained_variance_ratio_, 5)
def test_pca_check_projection():
# Test that the projection of data is correct
rng = np.random.RandomState(0)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
Yt = PCA(n_components=2).fit(X).transform(Xt)
Yt /= np.sqrt((Yt ** 2).sum())
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_pca_inverse():
# Test that the projection of data can be inverted
rng = np.random.RandomState(0)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
pca = PCA(n_components=2).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
# same as above with whitening (approximate reconstruction)
pca = PCA(n_components=2, whiten=True)
pca.fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
def test_pca_validation():
X = [[0, 1], [1, 0]]
for n_components in [-1, 3]:
assert_raises(ValueError, PCA(n_components).fit, X)
def test_randomized_pca_check_projection():
# Test that the projection by RandomizedPCA on dense data is correct
rng = np.random.RandomState(0)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
Yt = RandomizedPCA(n_components=2, random_state=0).fit(X).transform(Xt)
Yt /= np.sqrt((Yt ** 2).sum())
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_randomized_pca_check_list():
# Test that the projection by RandomizedPCA on list data is correct
X = [[1.0, 0.0], [0.0, 1.0]]
X_transformed = RandomizedPCA(n_components=1,
random_state=0).fit(X).transform(X)
assert_equal(X_transformed.shape, (2, 1))
assert_almost_equal(X_transformed.mean(), 0.00, 2)
assert_almost_equal(X_transformed.std(), 0.71, 2)
def test_randomized_pca_inverse():
# Test that RandomizedPCA is inversible on dense data
rng = np.random.RandomState(0)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed signal
# (since the data is almost of rank n_components)
pca = RandomizedPCA(n_components=2, random_state=0).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=2)
# same as above with whitening (approximate reconstruction)
pca = RandomizedPCA(n_components=2, whiten=True,
random_state=0).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
relative_max_delta = (np.abs(X - Y_inverse) / np.abs(X).mean()).max()
assert_almost_equal(relative_max_delta, 0.11, decimal=2)
def test_pca_dim():
# Check automated dimensionality setting
rng = np.random.RandomState(0)
n, p = 100, 5
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
pca = PCA(n_components='mle').fit(X)
assert_equal(pca.n_components, 'mle')
assert_equal(pca.n_components_, 1)
def test_infer_dim_1():
# TODO: explain what this is testing
# Or at least use explicit variable names...
n, p = 1000, 5
rng = np.random.RandomState(0)
X = (rng.randn(n, p) * .1 + rng.randn(n, 1) * np.array([3, 4, 5, 1, 2])
+ np.array([1, 0, 7, 4, 6]))
pca = PCA(n_components=p)
pca.fit(X)
spect = pca.explained_variance_
ll = []
for k in range(p):
ll.append(_assess_dimension_(spect, k, n, p))
ll = np.array(ll)
assert_greater(ll[1], ll.max() - .01 * n)
def test_infer_dim_2():
# TODO: explain what this is testing
# Or at least use explicit variable names...
n, p = 1000, 5
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
X[10:20] += np.array([6, 0, 7, 2, -1])
pca = PCA(n_components=p)
pca.fit(X)
spect = pca.explained_variance_
assert_greater(_infer_dimension_(spect, n, p), 1)
def test_infer_dim_3():
n, p = 100, 5
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
X[10:20] += np.array([6, 0, 7, 2, -1])
X[30:40] += 2 * np.array([-1, 1, -1, 1, -1])
pca = PCA(n_components=p)
pca.fit(X)
spect = pca.explained_variance_
assert_greater(_infer_dimension_(spect, n, p), 2)
def test_infer_dim_by_explained_variance():
X = iris.data
pca = PCA(n_components=0.95)
pca.fit(X)
assert_equal(pca.n_components, 0.95)
assert_equal(pca.n_components_, 2)
pca = PCA(n_components=0.01)
pca.fit(X)
assert_equal(pca.n_components, 0.01)
assert_equal(pca.n_components_, 1)
rng = np.random.RandomState(0)
# more features than samples
X = rng.rand(5, 20)
pca = PCA(n_components=.5).fit(X)
assert_equal(pca.n_components, 0.5)
assert_equal(pca.n_components_, 2)
def test_pca_score():
# Test that probabilistic PCA scoring yields a reasonable score
n, p = 1000, 3
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1 + np.array([3, 4, 5])
pca = PCA(n_components=2)
pca.fit(X)
ll1 = pca.score(X)
h = -0.5 * np.log(2 * np.pi * np.exp(1) * 0.1 ** 2) * p
np.testing.assert_almost_equal(ll1 / h, 1, 0)
def test_pca_score2():
# Test that probabilistic PCA correctly separated different datasets
n, p = 100, 3
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1 + np.array([3, 4, 5])
pca = PCA(n_components=2)
pca.fit(X)
ll1 = pca.score(X)
ll2 = pca.score(rng.randn(n, p) * .2 + np.array([3, 4, 5]))
assert_greater(ll1, ll2)
# Test that it gives the same scores if whiten=True
pca = PCA(n_components=2, whiten=True)
pca.fit(X)
ll2 = pca.score(X)
assert_almost_equal(ll1, ll2)
def test_pca_score3():
# Check that probabilistic PCA selects the right model
n, p = 200, 3
rng = np.random.RandomState(0)
Xl = (rng.randn(n, p) + rng.randn(n, 1) * np.array([3, 4, 5])
+ np.array([1, 0, 7]))
Xt = (rng.randn(n, p) + rng.randn(n, 1) * np.array([3, 4, 5])
+ np.array([1, 0, 7]))
ll = np.zeros(p)
for k in range(p):
pca = PCA(n_components=k)
pca.fit(Xl)
ll[k] = pca.score(Xt)
assert_true(ll.argmax() == 1)
| bsd-3-clause |
servo/servo | tests/wpt/web-platform-tests/tools/third_party/py/py/_io/saferepr.py | 273 | 2483 | import py
import sys
builtin_repr = repr
reprlib = py.builtin._tryimport('repr', 'reprlib')
class SafeRepr(reprlib.Repr):
""" subclass of repr.Repr that limits the resulting size of repr()
and includes information on exceptions raised during the call.
"""
def repr(self, x):
return self._callhelper(reprlib.Repr.repr, self, x)
def repr_unicode(self, x, level):
# Strictly speaking wrong on narrow builds
def repr(u):
if "'" not in u:
return py.builtin._totext("'%s'") % u
elif '"' not in u:
return py.builtin._totext('"%s"') % u
else:
return py.builtin._totext("'%s'") % u.replace("'", r"\'")
s = repr(x[:self.maxstring])
if len(s) > self.maxstring:
i = max(0, (self.maxstring-3)//2)
j = max(0, self.maxstring-3-i)
s = repr(x[:i] + x[len(x)-j:])
s = s[:i] + '...' + s[len(s)-j:]
return s
def repr_instance(self, x, level):
return self._callhelper(builtin_repr, x)
def _callhelper(self, call, x, *args):
try:
# Try the vanilla repr and make sure that the result is a string
s = call(x, *args)
except py.builtin._sysex:
raise
except:
cls, e, tb = sys.exc_info()
exc_name = getattr(cls, '__name__', 'unknown')
try:
exc_info = str(e)
except py.builtin._sysex:
raise
except:
exc_info = 'unknown'
return '<[%s("%s") raised in repr()] %s object at 0x%x>' % (
exc_name, exc_info, x.__class__.__name__, id(x))
else:
if len(s) > self.maxsize:
i = max(0, (self.maxsize-3)//2)
j = max(0, self.maxsize-3-i)
s = s[:i] + '...' + s[len(s)-j:]
return s
def saferepr(obj, maxsize=240):
""" return a size-limited safe repr-string for the given object.
Failing __repr__ functions of user instances will be represented
with a short exception info and 'saferepr' generally takes
care to never raise exceptions itself. This function is a wrapper
around the Repr/reprlib functionality of the standard 2.6 lib.
"""
# review exception handling
srepr = SafeRepr()
srepr.maxstring = maxsize
srepr.maxsize = maxsize
srepr.maxother = 160
return srepr.repr(obj)
| mpl-2.0 |
timothyclemansinsea/smc | src/smc_pyutil/smc_pyutil/new_file.py | 2 | 1270 | #!/usr/bin/python
import os, platform, shutil, sys
PLATFORM = platform.system().lower()
def new_file(path):
if os.path.exists(path):
# nothing to do.
return
base, filename = os.path.split(path)
if base and not os.path.exists(base):
os.makedirs(base)
ext = os.path.splitext(path)[1]
for places in [os.environ['HOME'], os.path.dirname(os.path.realpath(__file__))]:
template = os.path.join(places, 'templates', PLATFORM, 'default' + ext)
if os.path.exists(template):
shutil.copyfile(template, path)
return
# No template found
open(path,'w').close()
def main():
if len(sys.argv) == 1:
print """
This script is called like so:
%s path/to/file.tex another/path/to/a/file.tex ....
If path/to/file.tex already exists, nothing happens.
If path/to/file.tex does not exist, it is created (including the directory that contains it),
and if there is a file $HOME/templates/default.tex or /projects/templates/[platform]/default.tex (for tex extension),
then that template file is set to the initial contents. """%(sys.argv[0])
sys.exit(1)
for x in sys.argv[1:]:
new_file(x)
if __name__ == "__main__":
main()
| gpl-3.0 |
jseabold/statsmodels | statsmodels/sandbox/distributions/sppatch.py | 5 | 24020 | '''patching scipy to fit distributions and expect method
This adds new methods to estimate continuous distribution parameters with some
fixed/frozen parameters. It also contains functions that calculate the expected
value of a function for any continuous or discrete distribution
It temporarily also contains Bootstrap and Monte Carlo function for testing the
distribution fit, but these are neither general nor verified.
Author: josef-pktd
License: Simplified BSD
'''
from statsmodels.compat.python import lmap
import numpy as np
from scipy import stats, optimize, integrate
########## patching scipy
#vonmises does not define finite bounds, because it is intended for circular
#support which does not define a proper pdf on the real line
stats.distributions.vonmises.a = -np.pi
stats.distributions.vonmises.b = np.pi
#the next 3 functions are for fit with some fixed parameters
#As they are written, they do not work as functions, only as methods
def _fitstart(self, x):
'''example method, method of moment estimator as starting values
Parameters
----------
x : ndarray
data for which the parameters are estimated
Returns
-------
est : tuple
preliminary estimates used as starting value for fitting, not
necessarily a consistent estimator
Notes
-----
This needs to be written and attached to each individual distribution
This example was written for the gamma distribution, but not verified
with literature
'''
loc = np.min([x.min(),0])
a = 4/stats.skew(x)**2
scale = np.std(x) / np.sqrt(a)
return (a, loc, scale)
def _fitstart_beta(self, x, fixed=None):
'''method of moment estimator as starting values for beta distribution
Parameters
----------
x : ndarray
data for which the parameters are estimated
fixed : None or array_like
sequence of numbers and np.nan to indicate fixed parameters and parameters
to estimate
Returns
-------
est : tuple
preliminary estimates used as starting value for fitting, not
necessarily a consistent estimator
Notes
-----
This needs to be written and attached to each individual distribution
References
----------
for method of moment estimator for known loc and scale
https://en.wikipedia.org/wiki/Beta_distribution#Parameter_estimation
http://www.itl.nist.gov/div898/handbook/eda/section3/eda366h.htm
NIST reference also includes reference to MLE in
Johnson, Kotz, and Balakrishan, Volume II, pages 221-235
'''
#todo: separate out this part to be used for other compact support distributions
# e.g. rdist, vonmises, and truncnorm
# but this might not work because it might still be distribution specific
a, b = x.min(), x.max()
eps = (a-b)*0.01
if fixed is None:
#this part not checked with books
loc = a - eps
scale = (a - b) * (1 + 2*eps)
else:
if np.isnan(fixed[-2]):
#estimate loc
loc = a - eps
else:
loc = fixed[-2]
if np.isnan(fixed[-1]):
#estimate scale
scale = (b + eps) - loc
else:
scale = fixed[-1]
#method of moment for known loc scale:
scale = float(scale)
xtrans = (x - loc)/scale
xm = xtrans.mean()
xv = xtrans.var()
tmp = (xm*(1-xm)/xv - 1)
p = xm * tmp
q = (1 - xm) * tmp
return (p, q, loc, scale) #check return type and should fixed be returned ?
def _fitstart_poisson(self, x, fixed=None):
'''maximum likelihood estimator as starting values for Poisson distribution
Parameters
----------
x : ndarray
data for which the parameters are estimated
fixed : None or array_like
sequence of numbers and np.nan to indicate fixed parameters and parameters
to estimate
Returns
-------
est : tuple
preliminary estimates used as starting value for fitting, not
necessarily a consistent estimator
Notes
-----
This needs to be written and attached to each individual distribution
References
----------
MLE :
https://en.wikipedia.org/wiki/Poisson_distribution#Maximum_likelihood
'''
#todo: separate out this part to be used for other compact support distributions
# e.g. rdist, vonmises, and truncnorm
# but this might not work because it might still be distribution specific
a = x.min()
eps = 0 # is this robust ?
if fixed is None:
#this part not checked with books
loc = a - eps
else:
if np.isnan(fixed[-1]):
#estimate loc
loc = a - eps
else:
loc = fixed[-1]
#MLE for standard (unshifted, if loc=0) Poisson distribution
xtrans = (x - loc)
lambd = xtrans.mean()
#second derivative d loglike/ dlambd Not used
#dlldlambd = 1/lambd # check
return (lambd, loc) #check return type and should fixed be returned ?
def nnlf_fr(self, thetash, x, frmask):
# new frozen version
# - sum (log pdf(x, theta),axis=0)
# where theta are the parameters (including loc and scale)
#
try:
if frmask is not None:
theta = frmask.copy()
theta[np.isnan(frmask)] = thetash
else:
theta = thetash
loc = theta[-2]
scale = theta[-1]
args = tuple(theta[:-2])
except IndexError:
raise ValueError("Not enough input arguments.")
if not self._argcheck(*args) or scale <= 0:
return np.inf
x = np.array((x-loc) / scale)
cond0 = (x <= self.a) | (x >= self.b)
if (np.any(cond0)):
return np.inf
else:
N = len(x)
#raise ValueError
return self._nnlf(x, *args) + N*np.log(scale)
def fit_fr(self, data, *args, **kwds):
'''estimate distribution parameters by MLE taking some parameters as fixed
Parameters
----------
data : ndarray, 1d
data for which the distribution parameters are estimated,
args : list ? check
starting values for optimization
kwds :
- 'frozen' : array_like
values for frozen distribution parameters and, for elements with
np.nan, the corresponding parameter will be estimated
Returns
-------
argest : ndarray
estimated parameters
Examples
--------
generate random sample
>>> np.random.seed(12345)
>>> x = stats.gamma.rvs(2.5, loc=0, scale=1.2, size=200)
estimate all parameters
>>> stats.gamma.fit(x)
array([ 2.0243194 , 0.20395655, 1.44411371])
>>> stats.gamma.fit_fr(x, frozen=[np.nan, np.nan, np.nan])
array([ 2.0243194 , 0.20395655, 1.44411371])
keep loc fixed, estimate shape and scale parameters
>>> stats.gamma.fit_fr(x, frozen=[np.nan, 0.0, np.nan])
array([ 2.45603985, 1.27333105])
keep loc and scale fixed, estimate shape parameter
>>> stats.gamma.fit_fr(x, frozen=[np.nan, 0.0, 1.0])
array([ 3.00048828])
>>> stats.gamma.fit_fr(x, frozen=[np.nan, 0.0, 1.2])
array([ 2.57792969])
estimate only scale parameter for fixed shape and loc
>>> stats.gamma.fit_fr(x, frozen=[2.5, 0.0, np.nan])
array([ 1.25087891])
Notes
-----
self is an instance of a distribution class. This can be attached to
scipy.stats.distributions.rv_continuous
*Todo*
* check if docstring is correct
* more input checking, args is list ? might also apply to current fit method
'''
loc0, scale0 = lmap(kwds.get, ['loc', 'scale'],[0.0, 1.0])
Narg = len(args)
if Narg == 0 and hasattr(self, '_fitstart'):
x0 = self._fitstart(data)
elif Narg > self.numargs:
raise ValueError("Too many input arguments.")
else:
args += (1.0,)*(self.numargs-Narg)
# location and scale are at the end
x0 = args + (loc0, scale0)
if 'frozen' in kwds:
frmask = np.array(kwds['frozen'])
if len(frmask) != self.numargs+2:
raise ValueError("Incorrect number of frozen arguments.")
else:
# keep starting values for not frozen parameters
for n in range(len(frmask)):
# Troubleshooting ex_generic_mle_tdist
if isinstance(frmask[n], np.ndarray) and frmask[n].size == 1:
frmask[n] = frmask[n].item()
# If there were array elements, then frmask will be object-dtype,
# in which case np.isnan will raise TypeError
frmask = frmask.astype(np.float64)
x0 = np.array(x0)[np.isnan(frmask)]
else:
frmask = None
#print(x0
#print(frmask
return optimize.fmin(self.nnlf_fr, x0,
args=(np.ravel(data), frmask), disp=0)
#The next two functions/methods calculate expected value of an arbitrary
#function, however for the continuous functions intquad is use, which might
#require continuouity or smoothness in the function.
#TODO: add option for Monte Carlo integration
def expect(self, fn=None, args=(), loc=0, scale=1, lb=None, ub=None, conditional=False):
'''calculate expected value of a function with respect to the distribution
location and scale only tested on a few examples
Parameters
----------
all parameters are keyword parameters
fn : function (default: identity mapping)
Function for which integral is calculated. Takes only one argument.
args : tuple
argument (parameters) of the distribution
lb, ub : numbers
lower and upper bound for integration, default is set to the support
of the distribution
conditional : bool (False)
If true then the integral is corrected by the conditional probability
of the integration interval. The return value is the expectation
of the function, conditional on being in the given interval.
Returns
-------
expected value : float
Notes
-----
This function has not been checked for it's behavior when the integral is
not finite. The integration behavior is inherited from scipy.integrate.quad.
'''
if fn is None:
def fun(x, *args):
return x*self.pdf(x, loc=loc, scale=scale, *args)
else:
def fun(x, *args):
return fn(x)*self.pdf(x, loc=loc, scale=scale, *args)
if lb is None:
lb = loc + self.a * scale #(self.a - loc)/(1.0*scale)
if ub is None:
ub = loc + self.b * scale #(self.b - loc)/(1.0*scale)
if conditional:
invfac = (self.sf(lb, loc=loc, scale=scale, *args)
- self.sf(ub, loc=loc, scale=scale, *args))
else:
invfac = 1.0
return integrate.quad(fun, lb, ub,
args=args)[0]/invfac
def expect_v2(self, fn=None, args=(), loc=0, scale=1, lb=None, ub=None, conditional=False):
'''calculate expected value of a function with respect to the distribution
location and scale only tested on a few examples
Parameters
----------
all parameters are keyword parameters
fn : function (default: identity mapping)
Function for which integral is calculated. Takes only one argument.
args : tuple
argument (parameters) of the distribution
lb, ub : numbers
lower and upper bound for integration, default is set using
quantiles of the distribution, see Notes
conditional : bool (False)
If true then the integral is corrected by the conditional probability
of the integration interval. The return value is the expectation
of the function, conditional on being in the given interval.
Returns
-------
expected value : float
Notes
-----
This function has not been checked for it's behavior when the integral is
not finite. The integration behavior is inherited from scipy.integrate.quad.
The default limits are lb = self.ppf(1e-9, *args), ub = self.ppf(1-1e-9, *args)
For some heavy tailed distributions, 'alpha', 'cauchy', 'halfcauchy',
'levy', 'levy_l', and for 'ncf', the default limits are not set correctly
even when the expectation of the function is finite. In this case, the
integration limits, lb and ub, should be chosen by the user. For example,
for the ncf distribution, ub=1000 works in the examples.
There are also problems with numerical integration in some other cases,
for example if the distribution is very concentrated and the default limits
are too large.
'''
#changes: 20100809
#correction and refactoring how loc and scale are handled
#uses now _pdf
#needs more testing for distribution with bound support, e.g. genpareto
if fn is None:
def fun(x, *args):
return (loc + x*scale)*self._pdf(x, *args)
else:
def fun(x, *args):
return fn(loc + x*scale)*self._pdf(x, *args)
if lb is None:
#lb = self.a
try:
lb = self.ppf(1e-9, *args) #1e-14 quad fails for pareto
except ValueError:
lb = self.a
else:
lb = max(self.a, (lb - loc)/(1.0*scale)) #transform to standardized
if ub is None:
#ub = self.b
try:
ub = self.ppf(1-1e-9, *args)
except ValueError:
ub = self.b
else:
ub = min(self.b, (ub - loc)/(1.0*scale))
if conditional:
invfac = self._sf(lb,*args) - self._sf(ub,*args)
else:
invfac = 1.0
return integrate.quad(fun, lb, ub,
args=args, limit=500)[0]/invfac
### for discrete distributions
#TODO: check that for a distribution with finite support the calculations are
# done with one array summation (np.dot)
#based on _drv2_moment(self, n, *args), but streamlined
def expect_discrete(self, fn=None, args=(), loc=0, lb=None, ub=None,
conditional=False):
'''calculate expected value of a function with respect to the distribution
for discrete distribution
Parameters
----------
(self : distribution instance as defined in scipy stats)
fn : function (default: identity mapping)
Function for which integral is calculated. Takes only one argument.
args : tuple
argument (parameters) of the distribution
optional keyword parameters
lb, ub : numbers
lower and upper bound for integration, default is set to the support
of the distribution, lb and ub are inclusive (ul<=k<=ub)
conditional : bool (False)
If true then the expectation is corrected by the conditional
probability of the integration interval. The return value is the
expectation of the function, conditional on being in the given
interval (k such that ul<=k<=ub).
Returns
-------
expected value : float
Notes
-----
* function is not vectorized
* accuracy: uses self.moment_tol as stopping criterium
for heavy tailed distribution e.g. zipf(4), accuracy for
mean, variance in example is only 1e-5,
increasing precision (moment_tol) makes zipf very slow
* suppnmin=100 internal parameter for minimum number of points to evaluate
could be added as keyword parameter, to evaluate functions with
non-monotonic shapes, points include integers in (-suppnmin, suppnmin)
* uses maxcount=1000 limits the number of points that are evaluated
to break loop for infinite sums
(a maximum of suppnmin+1000 positive plus suppnmin+1000 negative integers
are evaluated)
'''
#moment_tol = 1e-12 # increase compared to self.moment_tol,
# too slow for only small gain in precision for zipf
#avoid endless loop with unbound integral, eg. var of zipf(2)
maxcount = 1000
suppnmin = 100 #minimum number of points to evaluate (+ and -)
if fn is None:
def fun(x):
#loc and args from outer scope
return (x+loc)*self._pmf(x, *args)
else:
def fun(x):
#loc and args from outer scope
return fn(x+loc)*self._pmf(x, *args)
# used pmf because _pmf does not check support in randint
# and there might be problems(?) with correct self.a, self.b at this stage
# maybe not anymore, seems to work now with _pmf
self._argcheck(*args) # (re)generate scalar self.a and self.b
if lb is None:
lb = (self.a)
else:
lb = lb - loc
if ub is None:
ub = (self.b)
else:
ub = ub - loc
if conditional:
invfac = self.sf(lb,*args) - self.sf(ub+1,*args)
else:
invfac = 1.0
tot = 0.0
low, upp = self._ppf(0.001, *args), self._ppf(0.999, *args)
low = max(min(-suppnmin, low), lb)
upp = min(max(suppnmin, upp), ub)
supp = np.arange(low, upp+1, self.inc) #check limits
#print('low, upp', low, upp
tot = np.sum(fun(supp))
diff = 1e100
pos = upp + self.inc
count = 0
#handle cases with infinite support
while (pos <= ub) and (diff > self.moment_tol) and count <= maxcount:
diff = fun(pos)
tot += diff
pos += self.inc
count += 1
if self.a < 0: #handle case when self.a = -inf
diff = 1e100
pos = low - self.inc
while (pos >= lb) and (diff > self.moment_tol) and count <= maxcount:
diff = fun(pos)
tot += diff
pos -= self.inc
count += 1
if count > maxcount:
# replace with proper warning
print('sum did not converge')
return tot/invfac
stats.distributions.rv_continuous.fit_fr = fit_fr
stats.distributions.rv_continuous.nnlf_fr = nnlf_fr
stats.distributions.rv_continuous.expect = expect
stats.distributions.rv_discrete.expect = expect_discrete
stats.distributions.beta_gen._fitstart = _fitstart_beta #not tried out yet
stats.distributions.poisson_gen._fitstart = _fitstart_poisson #not tried out yet
########## end patching scipy
def distfitbootstrap(sample, distr, nrepl=100):
'''run bootstrap for estimation of distribution parameters
hard coded: only one shape parameter is allowed and estimated,
loc=0 and scale=1 are fixed in the estimation
Parameters
----------
sample : ndarray
original sample data for bootstrap
distr : distribution instance with fit_fr method
nrepl : int
number of bootstrap replications
Returns
-------
res : array (nrepl,)
parameter estimates for all bootstrap replications
'''
nobs = len(sample)
res = np.zeros(nrepl)
for ii in range(nrepl):
rvsind = np.random.randint(nobs, size=nobs)
x = sample[rvsind]
res[ii] = distr.fit_fr(x, frozen=[np.nan, 0.0, 1.0])
return res
def distfitmc(sample, distr, nrepl=100, distkwds={}):
'''run Monte Carlo for estimation of distribution parameters
hard coded: only one shape parameter is allowed and estimated,
loc=0 and scale=1 are fixed in the estimation
Parameters
----------
sample : ndarray
original sample data, in Monte Carlo only used to get nobs,
distr : distribution instance with fit_fr method
nrepl : int
number of Monte Carlo replications
Returns
-------
res : array (nrepl,)
parameter estimates for all Monte Carlo replications
'''
arg = distkwds.pop('arg')
nobs = len(sample)
res = np.zeros(nrepl)
for ii in range(nrepl):
x = distr.rvs(arg, size=nobs, **distkwds)
res[ii] = distr.fit_fr(x, frozen=[np.nan, 0.0, 1.0])
return res
def printresults(sample, arg, bres, kind='bootstrap'):
'''calculate and print(Bootstrap or Monte Carlo result
Parameters
----------
sample : ndarray
original sample data
arg : float (for general case will be array)
bres : ndarray
parameter estimates from Bootstrap or Monte Carlo run
kind : {'bootstrap', 'montecarlo'}
output is printed for Mootstrap (default) or Monte Carlo
Returns
-------
None, currently only printing
Notes
-----
still a bit a mess because it is used for both Bootstrap and Monte Carlo
made correction:
reference point for bootstrap is estimated parameter
not clear:
I'm not doing any ddof adjustment in estimation of variance, do we
need ddof>0 ?
todo: return results and string instead of printing
'''
print('true parameter value')
print(arg)
print('MLE estimate of parameters using sample (nobs=%d)'% (nobs))
argest = distr.fit_fr(sample, frozen=[np.nan, 0.0, 1.0])
print(argest)
if kind == 'bootstrap':
#bootstrap compares to estimate from sample
argorig = arg
arg = argest
print('%s distribution of parameter estimate (nrepl=%d)'% (kind, nrepl))
print('mean = %f, bias=%f' % (bres.mean(0), bres.mean(0)-arg))
print('median', np.median(bres, axis=0))
print('var and std', bres.var(0), np.sqrt(bres.var(0)))
bmse = ((bres - arg)**2).mean(0)
print('mse, rmse', bmse, np.sqrt(bmse))
bressorted = np.sort(bres)
print('%s confidence interval (90%% coverage)' % kind)
print(bressorted[np.floor(nrepl*0.05)], bressorted[np.floor(nrepl*0.95)])
print('%s confidence interval (90%% coverage) normal approximation' % kind)
print(stats.norm.ppf(0.05, loc=bres.mean(), scale=bres.std()),)
print(stats.norm.isf(0.05, loc=bres.mean(), scale=bres.std()))
print('Kolmogorov-Smirnov test for normality of %s distribution' % kind)
print(' - estimated parameters, p-values not really correct')
print(stats.kstest(bres, 'norm', (bres.mean(), bres.std())))
if __name__ == '__main__':
examplecases = ['largenumber', 'bootstrap', 'montecarlo'][:]
if 'largenumber' in examplecases:
print('\nDistribution: vonmises')
for nobs in [200]:#[20000, 1000, 100]:
x = stats.vonmises.rvs(1.23, loc=0, scale=1, size=nobs)
print('\nnobs:', nobs)
print('true parameter')
print('1.23, loc=0, scale=1')
print('unconstrained')
print(stats.vonmises.fit(x))
print(stats.vonmises.fit_fr(x, frozen=[np.nan, np.nan, np.nan]))
print('with fixed loc and scale')
print(stats.vonmises.fit_fr(x, frozen=[np.nan, 0.0, 1.0]))
print('\nDistribution: gamma')
distr = stats.gamma
arg, loc, scale = 2.5, 0., 20.
for nobs in [200]:#[20000, 1000, 100]:
x = distr.rvs(arg, loc=loc, scale=scale, size=nobs)
print('\nnobs:', nobs)
print('true parameter')
print('%f, loc=%f, scale=%f' % (arg, loc, scale))
print('unconstrained')
print(distr.fit(x))
print(distr.fit_fr(x, frozen=[np.nan, np.nan, np.nan]))
print('with fixed loc and scale')
print(distr.fit_fr(x, frozen=[np.nan, 0.0, 1.0]))
print('with fixed loc')
print(distr.fit_fr(x, frozen=[np.nan, 0.0, np.nan]))
ex = ['gamma', 'vonmises'][0]
if ex == 'gamma':
distr = stats.gamma
arg, loc, scale = 2.5, 0., 1
elif ex == 'vonmises':
distr = stats.vonmises
arg, loc, scale = 1.5, 0., 1
else:
raise ValueError('wrong example')
nobs = 100
nrepl = 1000
sample = distr.rvs(arg, loc=loc, scale=scale, size=nobs)
print('\nDistribution:', distr)
if 'bootstrap' in examplecases:
print('\nBootstrap')
bres = distfitbootstrap(sample, distr, nrepl=nrepl )
printresults(sample, arg, bres)
if 'montecarlo' in examplecases:
print('\nMonteCarlo')
mcres = distfitmc(sample, distr, nrepl=nrepl,
distkwds=dict(arg=arg, loc=loc, scale=scale))
printresults(sample, arg, mcres, kind='montecarlo')
| bsd-3-clause |
varunarya10/contrail-neutron-plugin | neutron_plugin_contrail/plugins/opencontrail/vnc_client/route_table_res_handler.py | 7 | 7374 | # Copyright 2015. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cfgm_common import exceptions as vnc_exc
from vnc_api import vnc_api
import contrail_res_handler as res_handler
class RouteTableMixin(object):
def _route_table_vnc_to_neutron(self, rt_obj):
rt_q_dict = self._vnc_lib.obj_to_dict(rt_obj)
# replace field names
rt_q_dict['id'] = rt_obj.uuid
rt_q_dict['tenant_id'] = self._project_id_vnc_to_neutron(
rt_obj.parent_uuid)
rt_q_dict['name'] = rt_obj.name
rt_q_dict['fq_name'] = rt_obj.fq_name
# get route table routes
rt_q_dict['routes'] = rt_q_dict.pop('routes', None)
if rt_q_dict['routes']:
for route in rt_q_dict['routes']['route']:
if route['next_hop_type']:
route['next_hop'] = route['next_hop_type']
return rt_q_dict
# end _route_table_vnc_to_neutron
class RouteTableBaseGet(res_handler.ResourceGetHandler):
resource_get_method = "route_table_read"
class RouteTableGetHandler(RouteTableBaseGet,
RouteTableMixin):
resource_list_method = "route_tables_list"
detail = False
def resource_get(self, context, rt_id, fields=None):
try:
rt_obj = self._resource_get(id=rt_id)
except vnc_exc.NoIdError:
# TODO() add route table specific exception
self._raise_contrail_exception(
'NetworkNotFound', net_id=rt_id, resource='route_table')
return self._route_table_vnc_to_neutron(rt_obj)
def resource_list_by_project(self, project_id):
try:
project_uuid = self._project_id_neutron_to_vnc(project_id)
except Exception:
print("Error in converting uuid %s" % (project_id))
resp_dict = self._resource_list(parent_id=project_uuid)
return resp_dict['route-tables']
def resource_list(self, context, filters=None, fields=None):
ret_list = []
# collect phase
all_rts = [] # all rts in all projects
if filters and 'tenant_id' in filters:
project_ids = self._validate_project_ids(
context,
filters['tenant_id'])
for p_id in project_ids:
project_rts = self.resource_list_by_project(p_id)
all_rts.append(project_rts)
elif filters and 'name' in filters:
p_id = self._project_id_neutron_to_vnc(context['tenant'])
project_rts = self.resource_list_by_project(p_id)
all_rts.append(project_rts)
else: # no filters
dom_projects = self._project_list_domain(None)
for project in dom_projects:
proj_id = project['uuid']
project_rts = self.resource_list_by_project(proj_id)
all_rts.append(project_rts)
# prune phase
for project_rts in all_rts:
for proj_rt in project_rts:
# TODO() implement same for name specified in filter
proj_rt_id = proj_rt['uuid']
if not self._filters_is_present(filters, 'id', proj_rt_id):
continue
rt_info = self.resource_get(proj_rt_id)
if not self._filters_is_present(filters, 'name',
rt_info['name']):
continue
ret_list.append(rt_info)
return ret_list
class RouteTableCreateHandler(res_handler.ResourceCreateHandler):
resource_create_method = "route_table_create"
def resource_create(self, context, rt_q):
project_id = self._project_id_neutron_to_vnc(rt_q['tenant_id'])
project_obj = self._project_read(proj_id=project_id)
rt_obj = vnc_api.RouteTable(name=rt_q['name'],
parent_obj=project_obj)
if rt_q['routes']:
for route in rt_q['routes']['route']:
try:
vm_obj = self._vnc_lib.virtual_machine_read(
id=route['next_hop'])
si_list = vm_obj.get_service_instance_refs()
if si_list:
fq_name = si_list[0]['to']
si_obj = self._vnc_lib.service_instance_read(
fq_name=fq_name)
route['next_hop'] = si_obj.get_fq_name_str()
rt_obj.set_routes(
vnc_api.RouteTableType.factory(**rt_q['routes']))
except Exception as e:
pass
try:
self._resource_create(rt_obj)
except vnc_exc.RefsExistError as e:
self._raise_contrail_exception(
'BadRequest',
resource='route_table', msg=str(e))
ret_rt_q = self._route_table_vnc_to_neutron(rt_obj)
return ret_rt_q
class RouteTableUpdateHandler(res_handler.ResourceUpdateHandler,
RouteTableBaseGet,
RouteTableMixin):
resource_update_method = "route_table_update"
def resource_update(self, context, rt_id, rt_q):
rt_q['id'] = rt_id
try:
rt_obj = self._resource_get(id=rt_q['id'])
except vnc_exc.NoIdError:
raise self._raise_contrail_exception(
'ResourceNotFound', id=rt_id, resource='route_table')
if rt_q['routes']:
for route in rt_q['routes']['route']:
try:
vm_obj = self._vnc_lib.virtual_machine_read(
id=route['next_hop'])
si_list = vm_obj.get_service_instance_refs()
if si_list:
fq_name = si_list[0]['to']
si_obj = self._vnc_lib.service_instance_read(
fq_name=fq_name)
route['next_hop'] = si_obj.get_fq_name_str()
rt_obj.set_routes(
vnc_api.RouteTableType.factory(**rt_q['routes']))
except Exception:
pass
self._resource_update(rt_obj)
return self._route_table_vnc_to_neutron(rt_obj)
class RouteTableDeleteHandler(res_handler.ResourceDeleteHandler):
resource_delete_method = "route_table_delete"
def resource_delete(self, context, rt_id):
try:
self._resource_delete(rt_id)
except vnc_exc.NoIdError:
raise self._raise_contrail_exception(
"ResourceNotFound", id=rt_id, resource='route_table')
class RouteTableHandler(RouteTableGetHandler,
RouteTableCreateHandler,
RouteTableUpdateHandler,
RouteTableDeleteHandler):
pass
| apache-2.0 |
credativUK/purchase-workflow | framework_agreement/tests/test_price_list.py | 20 | 4797 | # -*- coding: utf-8 -*-
# Author: Nicolas Bessi, Leonardo Pistone
# Copyright 2013-2015 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from datetime import timedelta, date
from openerp import exceptions, fields
import openerp.tests.common as test_common
from .common import BaseAgreementTestMixin
class TestAgreementPriceList(test_common.TransactionCase,
BaseAgreementTestMixin):
"""Test observer on_change and purchase order on_change"""
def setUp(self):
""" Create a default agreement
with 3 price line
qty 0 price 70
qty 200 price 60
qty 500 price 50
qty 1000 price 45
"""
super(TestAgreementPriceList, self).setUp()
self.commonsetUp()
start_date = date.today() + timedelta(days=10)
end_date = date.today() + timedelta(days=20)
self.agreement = self.agreement_model.create({
'portfolio_id': self.portfolio.id,
'product_id': self.product.id,
'start_date': fields.Date.to_string(start_date),
'end_date': fields.Date.to_string(end_date),
'delay': 5,
'draft': False,
'quantity': 1500,
})
pl = self.agreement_pl_model.create(
{'framework_agreement_id': self.agreement.id,
'currency_id': self.ref('base.EUR')}
)
self.agreement_line_model.create(
{'framework_agreement_pricelist_id': pl.id,
'quantity': 0,
'price': 70.0}
)
self.agreement_line_model.create(
{'framework_agreement_pricelist_id': pl.id,
'quantity': 200,
'price': 60.0}
)
self.agreement_line_model.create(
{'framework_agreement_pricelist_id': pl.id,
'quantity': 500,
'price': 50.0}
)
self.agreement_line_model.create(
{'framework_agreement_pricelist_id': pl.id,
'quantity': 1000,
'price': 45.0}
)
self.agreement.refresh()
def test_00_test_qty(self):
"""Test if barem retrieval is correct"""
self.assertEqual(
self.agreement.get_price(0, currency=self.browse_ref('base.EUR')),
70.0
)
self.assertEqual(
self.agreement.get_price(
100,
currency=self.browse_ref('base.EUR')
),
70.0
)
self.assertEqual(
self.agreement.get_price(
200,
currency=self.browse_ref('base.EUR')
),
60.0
)
self.assertEqual(
self.agreement.get_price(
210,
currency=self.browse_ref('base.EUR')
),
60.0
)
self.assertEqual(
self.agreement.get_price(
500,
currency=self.browse_ref('base.EUR')),
50.0
)
self.assertEqual(
self.agreement.get_price(
800,
currency=self.browse_ref('base.EUR')
),
50.0
)
self.assertEqual(
self.agreement.get_price(
999,
currency=self.browse_ref('base.EUR')
),
50.0
)
self.assertEqual(
self.agreement.get_price(
1000,
currency=self.browse_ref('base.EUR')
),
45.0
)
self.assertEqual(
self.agreement.get_price(
10000,
currency=self.browse_ref('base.EUR')
),
45.0
)
self.assertEqual(
self.agreement.get_price(
-10,
currency=self.browse_ref('base.EUR')
),
70.0
)
def test_01_failed_wrong_currency(self):
"""Tests that wrong currency raise an exception"""
with self.assertRaises(exceptions.Warning):
self.agreement.get_price(0, currency=self.browse_ref('base.USD'))
| agpl-3.0 |
hyperized/ansible | test/units/modules/network/netscaler/test_netscaler_nitro_request.py | 10 | 12922 |
# Copyright (c) 2017 Citrix Systems
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from units.compat.mock import patch, Mock, call
from .netscaler_module import TestModule
import copy
import tempfile
import json
import codecs
from ansible.modules.network.netscaler import netscaler_nitro_request
module_arguments = dict(
nsip=None,
nitro_user=None,
nitro_pass=None,
nitro_protocol=None,
validate_certs=None,
nitro_auth_token=None,
resource=None,
name=None,
attributes=None,
args=None,
filter=None,
operation=None,
expected_nitro_errorcode=None,
action=None,
instance_ip=None,
instance_name=None,
instance_id=None,
)
class TestNetscalerNitroRequestModule(TestModule):
@classmethod
def setUpClass(cls):
class MockException(Exception):
pass
cls.MockException = MockException
@classmethod
def tearDownClass(cls):
pass
def setUp(self):
pass
def tearDown(self):
pass
def test_fail_on_conflicting_authentication_methods(self):
args = copy.deepcopy(module_arguments)
args.update(dict(
nitro_user='nsroot',
nitro_pass='nsroot',
nitro_auth_token='##DDASKLFDJ',
))
mock_module_instance = Mock(params=args)
expected_calls = [
call.fail_json(
changed=False,
failed=True,
msg='Cannot define both authentication token and username/password'
)
]
module_mock = Mock(return_value=mock_module_instance)
with patch('ansible.modules.network.netscaler.netscaler_nitro_request.AnsibleModule', module_mock):
netscaler_nitro_request.NitroAPICaller()
mock_module_instance.assert_has_calls(expected_calls)
def test_nitro_user_pass_credentials(self):
args = copy.deepcopy(module_arguments)
args.update(dict(
nitro_user='nsroot',
nitro_pass='nsroot',
))
mock_module_instance = Mock(params=args)
expected_headers = {
'Content-Type': 'application/json',
'X-NITRO-USER': 'nsroot',
'X-NITRO-PASS': 'nsroot',
}
module_mock = Mock(return_value=mock_module_instance)
with patch('ansible.modules.network.netscaler.netscaler_nitro_request.AnsibleModule', module_mock):
instance = netscaler_nitro_request.NitroAPICaller()
self.assertDictEqual(instance._headers, expected_headers)
def test_mas_login_headers(self):
args = copy.deepcopy(module_arguments)
args.update(dict(
nitro_user='nsroot',
nitro_pass='nsroot',
operation='mas_login',
))
mock_module_instance = Mock(params=args)
expected_headers = {
'Content-Type': 'application/json',
}
module_mock = Mock(return_value=mock_module_instance)
with patch('ansible.modules.network.netscaler.netscaler_nitro_request.AnsibleModule', module_mock):
instance = netscaler_nitro_request.NitroAPICaller()
self.assertDictEqual(instance._headers, expected_headers)
def test_mas_proxy_call_headers_instance_ip(self):
args = copy.deepcopy(module_arguments)
args.update(dict(
nitro_auth_token='##ABDB',
operation='add',
instance_ip='192.168.1.1',
))
mock_module_instance = Mock(params=args)
expected_headers = {
'Content-Type': 'application/json',
'_MPS_API_PROXY_MANAGED_INSTANCE_IP': args['instance_ip'],
'Cookie': 'NITRO_AUTH_TOKEN=%s' % args['nitro_auth_token'],
}
module_mock = Mock(return_value=mock_module_instance)
with patch('ansible.modules.network.netscaler.netscaler_nitro_request.AnsibleModule', module_mock):
instance = netscaler_nitro_request.NitroAPICaller()
self.assertDictEqual(instance._headers, expected_headers)
def test_mas_proxy_call_headers_instance_id(self):
args = copy.deepcopy(module_arguments)
args.update(dict(
nitro_auth_token='##ABDB',
operation='add',
instance_id='myid',
))
mock_module_instance = Mock(params=args)
expected_headers = {
'Content-Type': 'application/json',
'_MPS_API_PROXY_MANAGED_INSTANCE_ID': args['instance_id'],
'Cookie': 'NITRO_AUTH_TOKEN=%s' % args['nitro_auth_token'],
}
module_mock = Mock(return_value=mock_module_instance)
with patch('ansible.modules.network.netscaler.netscaler_nitro_request.AnsibleModule', module_mock):
instance = netscaler_nitro_request.NitroAPICaller()
self.assertDictEqual(instance._headers, expected_headers)
def test_mas_proxy_call_headers_instance_name(self):
args = copy.deepcopy(module_arguments)
args.update(dict(
nitro_auth_token='##ABDB',
operation='add',
instance_name='myname',
))
mock_module_instance = Mock(params=args)
expected_headers = {
'Content-Type': 'application/json',
'_MPS_API_PROXY_MANAGED_INSTANCE_NAME': args['instance_name'],
'Cookie': 'NITRO_AUTH_TOKEN=%s' % args['nitro_auth_token'],
}
module_mock = Mock(return_value=mock_module_instance)
with patch('ansible.modules.network.netscaler.netscaler_nitro_request.AnsibleModule', module_mock):
instance = netscaler_nitro_request.NitroAPICaller()
self.assertDictEqual(instance._headers, expected_headers)
def test_edit_response_data_no_body_success_status(self):
with patch('ansible.modules.network.netscaler.netscaler_nitro_request.AnsibleModule'):
instance = netscaler_nitro_request.NitroAPICaller()
r = None
info = {
'status': 200,
}
result = {}
success_status = 200
expected_result = {
'nitro_errorcode': 0,
'nitro_message': 'Success',
'nitro_severity': 'NONE',
'http_response_body': '',
'http_response_data': info,
}
instance.edit_response_data(r, info, result, success_status)
self.assertDictEqual(result, expected_result)
def test_edit_response_data_no_body_fail_status(self):
with patch('ansible.modules.network.netscaler.netscaler_nitro_request.AnsibleModule'):
instance = netscaler_nitro_request.NitroAPICaller()
r = None
info = {
'status': 201,
}
result = {}
success_status = 200
expected_result = {
'nitro_errorcode': -1,
'nitro_message': 'HTTP status %s' % info['status'],
'nitro_severity': 'ERROR',
'http_response_body': '',
'http_response_data': info,
}
instance.edit_response_data(r, info, result, success_status)
self.assertDictEqual(result, expected_result)
def test_edit_response_data_actual_body_data(self):
args = copy.deepcopy(module_arguments)
args.update(dict(
nitro_user='nsroot',
nitro_pass='nsroot',
nitro_auth_token='##DDASKLFDJ',
))
module_mock = Mock(params=args, from_json=json.loads)
with patch('ansible.modules.network.netscaler.netscaler_nitro_request.AnsibleModule', Mock(return_value=module_mock)):
with tempfile.TemporaryFile() as r:
actual_body = {
'errorcode': 258,
'message': 'Some error',
'severity': 'ERROR',
}
r.write(codecs.encode(json.dumps(actual_body), 'utf-8'))
r.seek(0)
instance = netscaler_nitro_request.NitroAPICaller()
info = {
'status': 200,
}
result = {}
success_status = 200
expected_result = {
'http_response_body': json.dumps(actual_body),
'http_response_data': info,
}
nitro_data = {}
for key, value in actual_body.items():
nitro_data['nitro_%s' % key] = value
expected_result.update(nitro_data)
instance.edit_response_data(r, info, result, success_status)
self.assertDictEqual(result, expected_result)
def test_edit_response_data_actual_body_data_irrelevant(self):
args = copy.deepcopy(module_arguments)
args.update(dict(
nitro_user='nsroot',
nitro_pass='nsroot',
nitro_auth_token='##DDASKLFDJ',
))
module_mock = Mock(params=args, from_json=json.loads)
with patch('ansible.modules.network.netscaler.netscaler_nitro_request.AnsibleModule', Mock(return_value=module_mock)):
with tempfile.TemporaryFile() as r:
actual_body = {}
r.write(codecs.encode(json.dumps(actual_body), 'utf-8'))
r.seek(0)
instance = netscaler_nitro_request.NitroAPICaller()
info = {
'status': 200,
}
result = {}
success_status = 200
expected_result = {
'http_response_body': json.dumps(actual_body),
'http_response_data': info,
'nitro_errorcode': 0,
'nitro_message': 'Success',
'nitro_severity': 'NONE',
}
instance.edit_response_data(r, info, result, success_status)
self.assertDictEqual(result, expected_result)
def test_edit_response_data_body_in_info(self):
args = copy.deepcopy(module_arguments)
args.update(dict(
nitro_user='nsroot',
nitro_pass='nsroot',
))
module_mock = Mock(params=args, from_json=json.loads)
with patch('ansible.modules.network.netscaler.netscaler_nitro_request.AnsibleModule', Mock(return_value=module_mock)):
body = {
'errorcode': 258,
'message': 'Numerical error 258',
'severity': 'ERROR'
}
instance = netscaler_nitro_request.NitroAPICaller()
r = None
info = {
'status': 200,
'body': codecs.encode(json.dumps(body), 'utf-8'),
}
result = {}
success_status = 200
expected_result = {
'http_response_body': json.dumps(body),
'http_response_data': info,
}
nitro_data = {}
for key, value in body.items():
nitro_data['nitro_%s' % key] = value
expected_result.update(nitro_data)
instance.edit_response_data(r, info, result, success_status)
self.assertDictEqual(result, expected_result)
def test_handle_get_return_object(self):
resource = 'lbvserver'
args = copy.deepcopy(module_arguments)
args.update(dict(
nitro_user='nsroot',
nitro_pass='nsroot',
resource=resource,
))
resource_data = {
'property1': 'value1',
'property2': 'value2',
}
module_mock = Mock(params=args, from_json=json.loads)
with patch('ansible.modules.network.netscaler.netscaler_nitro_request.AnsibleModule', Mock(return_value=module_mock)):
instance = netscaler_nitro_request.NitroAPICaller()
data = {resource: resource_data}
result = {
'nitro_errorcode': 0,
'http_response_body': json.dumps(data),
}
expected_result = {
'nitro_object': resource_data
}
expected_result.update(result)
instance.handle_get_return_object(result)
self.assertDictEqual(result, expected_result)
| gpl-3.0 |
usc-isi-i2/WEDC | spark_dependencies/python_lib/nose2/plugins/prof.py | 16 | 2786 | """
Profile test execution using hotshot.
This plugin implements :func:`startTestRun` and replaces
``event.executeTests`` with :meth:`hotshot.Profile.runcall`. It
implements :func:`beforeSummaryReport` to output profiling information
before the final test summary time. Config file options ``filename``,
``sort`` and ``restrict`` can be used to change where profiling
information is saved and how it is presented.
"""
try:
import hotshot
from hotshot import stats
except ImportError:
hotshot, stats = None, None
import logging
import os
import tempfile
from nose2 import events, util
log = logging.getLogger(__name__)
__unittest = True
class Profiler(events.Plugin):
"""Profile the test run"""
configSection = 'profiler'
commandLineSwitch = ('P', 'profile', 'Run tests under profiler')
def __init__(self):
self.pfile = self.config.as_str('filename', '')
self.sort = self.config.as_str('sort', 'cumulative')
self.restrict = self.config.as_list('restrict', [])
self.clean = False
self.fileno = None
def register(self):
"""Don't register if hotshot is not found"""
if hotshot is None:
log.error("Unable to profile: hotshot module not available")
return
super(Profiler, self).register()
def startTestRun(self, event):
"""Set up the profiler"""
self.createPfile()
self.prof = hotshot.Profile(self.pfile)
event.executeTests = self.prof.runcall
def beforeSummaryReport(self, event):
"""Output profiling results"""
# write prof output to stream
class Stream:
def write(self, *msg):
for m in msg:
event.stream.write(m)
event.stream.write(' ')
event.stream.flush()
stream = Stream()
self.prof.close()
prof_stats = stats.load(self.pfile)
prof_stats.sort_stats(self.sort)
event.stream.writeln(util.ln("Profiling results"))
tmp = prof_stats.stream
prof_stats.stream = stream
try:
if self.restrict:
prof_stats.print_stats(*self.restrict)
else:
prof_stats.print_stats()
finally:
prof_stats.stream = tmp
self.prof.close()
event.stream.writeln('')
if self.clean:
if self.fileno:
try:
os.close(self.fileno)
except OSError:
pass
try:
os.unlink(self.pfile)
except OSError:
pass
def createPfile(self):
if not self.pfile:
self.fileno, self.pfile = tempfile.mkstemp()
self.clean = True
| apache-2.0 |
codefisher/mozbutton_sdk | builder/restartless_button.py | 1 | 28578 | import os
import re
import json
import codecs
import lxml.etree as ET
from copy import deepcopy
from collections import namedtuple, defaultdict
try:
from PIL import Image
except ImportError:
pass
from builder.ext_button import Button, Option, ChromeString, ChromeFile
try:
basestring
except NameError:
basestring = str # py3
Keys = namedtuple("Keys", ['command', 'button'])
ExtraUI = namedtuple("ExtraUI", ["parent", "parent_id", "index", "code", "after"])
class RestartlessButton(Button):
def __init__(self, *args, **kwargs):
super(RestartlessButton, self).__init__(*args, **kwargs)
self._ui_ids = set()
self._included_js_files = []
self._bootstrap_globals = []
self._bootstrap_startup = []
self._bootstrap_shutdown = []
for folder, button, files in self._info:
if "bootstrap" in files:
for file_name in os.listdir(os.path.join(folder, "bootstrap")):
if file_name[0] != ".":
with open(os.path.join(folder, "bootstrap", file_name), "r") as js_fp:
data = js_fp.read()
if file_name == "global.js":
self._bootstrap_globals.append(data)
elif file_name == "startup.js":
self._bootstrap_startup.append(data)
elif file_name == "shutdown.js":
self._bootstrap_shutdown.append(data)
def get_files(self):
for file_name, data in self.get_jsm_files().items():
yield (file_name + ".jsm", data)
def locale_files(self, button_locales, *args, **kwargs):
dtd_data = button_locales.get_dtd_data(self.get_locale_strings(),
self, untranslated=False, format_type="properties")
for locale, data in dtd_data.items():
yield locale, "button_labels.properties", data
locales_inuse = dtd_data.keys()
key_strings = button_locales.get_string_data(self.get_key_strings(),
self, format_type="properties")
for locale, data in self.locale_file_filter(key_strings, locales_inuse):
yield locale, "keys.properties", data
for locale, file_name, data in super(RestartlessButton, self).locale_files(button_locales, locales_inuse):
yield locale, file_name, data
def jsm_keyboard_shortcuts(self, file_name):
if not self._settings.get("use_keyboard_shortcuts"):
return
for button in self._button_keys.keys():
func = self._button_commands.get(file_name, {}).get(button)
if func is not None:
yield Keys(self._patch_call(func), button)
def option_data(self):
scripts = []
if self._settings.get("use_keyboard_shortcuts"):
scripts.append("key-option.js")
with open(self.find_file("key-option.xul"), "r") as key_option_file:
key_option_template = key_option_file.read()
for button in self._button_keys.keys():
xul = self.format_string(key_option_template,
button=button,
menu_label=button + ".label")
applications = " ".join(self._button_applications[button])
self._button_options[button + "-key-item"].append(
Option("tb-key-shortcut.option.title:lightning.png:" + applications, xul))
self._button_applications[
button + "-key-item"] = self._button_applications[button]
files, javascript = super(RestartlessButton, self).option_data()
return files, javascript + scripts
def get_pref_list(self):
settings = super(RestartlessButton, self).get_pref_list()
pref_root = self._settings.get("pref_root")
if self._settings.get('use_keyboard_shortcuts'):
for button in self._button_keys.keys():
settings.append(("{}key-disabled.{}".format(pref_root, button), 'false'))
properties = self.pref_locale_file("'chrome://{chrome_name}/locale/{prefix}keys.properties'")
settings.append(("{}key.{}".format(pref_root, button), properties))
settings.append(("{}modifier.{}".format(pref_root, button), properties))
return settings
def get_js_files(self):
js_files = super(RestartlessButton, self).get_js_files()
if self._settings.get("use_keyboard_shortcuts"):
with open(self.find_file("key-option.js")) as key_option_fp:
js_files["key-option"] = self.string_subs(key_option_fp.read())
self._included_js_files = js_files.keys()
return js_files
def get_chrome_strings(self):
for chrome_string in super(RestartlessButton, self).get_chrome_strings():
yield chrome_string
yield ChromeString(file_name='bootstrap.js', data=self.create_bootstrap())
defaults = self.get_defaults()
if defaults:
yield ChromeString(file_name=os.path.join("chrome", "content", "defaultprefs.js"), data=defaults)
def get_chrome_files(self):
for chrome_file in super(RestartlessButton, self).get_chrome_files():
yield chrome_file
yield ChromeFile(file_name=os.path.join("chrome", "content", "customizable.jsm"), path=self.find_file('customizable.jsm'))
def create_bootstrap(self):
chrome_name = self._settings.get("chrome_name")
loaders = []
resource = ""
if self.resource_files:
resource = "createResource('{0}', 'chrome://{0}/content/resources/');".format(chrome_name)
window_modules = defaultdict(list)
for file_name in self._button_files:
for overlay in self._settings.get("files_to_window").get(file_name, ()):
window_modules[overlay].append(file_name)
for overlay, modules in window_modules.items():
mods = "\n\t\t".join(["modules.push('chrome://{0}/content/{1}.jsm');".format(chrome_name, file_name) for file_name in modules])
loaders.append("(uri == '{0}') {{\n\t\t{1}\n\t}}".format(overlay, mods))
if self._settings.get("show_updated_prompt"):
install_template = self.env.get_template('bootstrap.js')
install = install_template.render(**self._settings)
else:
install = ""
template = self.env.get_template('bootstrap.js')
return template.render(
resource=resource, install=install,
globals=self.string_subs("\n".join(self._bootstrap_globals)),
startup=self.string_subs("\n".join(self._bootstrap_startup)),
shutdown=self.string_subs("\n".join(self._bootstrap_shutdown)),
loaders = "if" + " else if".join(loaders),
**self._settings)
def _jsm_create_menu(self, file_name, buttons):
if not self._settings.get('menuitems'):
return ''
statements = []
data = self.create_menu_dom(file_name, buttons)
in_submenu = [menuitem for menuitem in data if menuitem.parent_id is None]
in_menu = [menuitem for menuitem in data if menuitem.parent_id is not None]
num = 0
template = self.env.get_template('menu.js')
if in_submenu:
menu_id, menu_label, locations = self._settings.get("menu_meta")
if isinstance(locations, basestring):
locations = [locations]
for i, location in enumerate(locations):
menu_id_num = "{0}_{1}".format(menu_id, i) if i else menu_id
meta = self._settings.get("file_to_menu").get(location, {}).get(file_name)
if meta:
menu_name, insert_after = meta
statements.append(template.render(**{
"menu_name": menu_name,
"menu_id": menu_id_num,
"label": menu_label,
"class": "menu-iconic",
"menu_label": menu_label,
"insert_after": insert_after,
"menuitems_sorted": self._settings.get("menuitems_sorted")
}))
num += 3
for item, _, _ in in_submenu:
item_statements, count, _ = self._create_dom(
item, top="menupopup_2", count=num, doc="document")
num = count + 1
statements.extend(item_statements)
for item, menu_name, insert_after in in_menu:
statements.append("var menupopup_{0} = document.getElementById('{1}');".format(num, menu_name))
var_name = "menupopup_%s" % num
num += 1
item.attrib["insertafter"] = insert_after
item_statements, count, _ = self._create_dom(item, top=var_name, count=num)
num = count + 1
statements.extend(item_statements)
return "\n\t".join(statements)
def _dom_string_lookup(self, value):
result = []
items = re.findall(r'&.+?;|[^&;]+', value)
for item in items:
if item == "&brandShortName;":
result.append("Cc['@mozilla.org/xre/app-info;1'].createInstance(Ci.nsIXULAppInfo).name")
elif item[0] == '&' and item[-1] == ';':
result.append("buttonStrings.get('%s')" % item[1:-1])
else:
result.append("'%s'" % item)
return ' + '.join(result)
def _create_dom(self, root, top=None, count=0, doc='document', child_parent=None, rename=None, append_children=True):
num = count
if rename == None:
rename = {}
children = []
statements = [
"var %s_%s = %s.createElement('%s');" % (root.tag, num, doc, rename.get(root.tag, root.tag)),
]
javascript_object = self._settings.get("javascript_object")
for key, value in sorted(root.attrib.items(), key=self._attr_key):
if key == 'id':
statements.append("%s_%s.id = '%s';" % (root.tag, num, value))
elif key in ('label', 'tooltiptext') or (root.tag == 'key' and key in ('key', 'keycode', 'modifiers')):
statements.append("%s_%s.setAttribute('%s', %s);" % ((root.tag, num, key, self._dom_string_lookup(value))))
elif key == "class":
for val in value.split():
statements.append('%s_%s.classList.add("%s");' % (root.tag, num, val))
elif key[0:2] == 'on':
if key == 'oncommand' and root.tag == 'key':
# we do this because key elements without a oncommand are optimized away
# but we can't call our function, because that might not exist
# in the window scope, so the event listener has to be used
statements.append("%s_%s.setAttribute('oncommand', 'void(0);');" % (root.tag, num))
statements.append("%s_%s.addEventListener('%s', function(event) {\n\t\t\t\t%s\n\t\t\t}, false);" % (root.tag, num, key[2:], self._patch_call(value)))
elif key == "insertafter":
pass
elif key == "showamenu":
statements.append("{}_{}.addEventListener('DOMMenuItemActive', {}.menuLoaderEvent, false);".format(root.tag, num, javascript_object))
statements.append("%s_%s._handelMenuLoaders = true;" % (root.tag, num))
statements.append("%s_%s.setAttribute('%s', '%s');" % ((root.tag, num, key, value)))
elif key == "toolbarname":
# this is just for our custom toolbars which are named "Toolbar Buttons 1" and the like
name, sep, other = value.partition(' ')
other = " + '%s%s'" % (sep, other) if sep else ""
value = "buttonStrings.get('%s')%s" % (name, other)
statements.append("%s_%s.setAttribute('%s', %s);" % ((root.tag, num, key, value)))
elif key == "type" and value == "menu-button" and 'id' in root.attrib:
statements.append('''if(extensionPrefs.getPrefType('menupopup.hide.{0}') == extensionPrefs.PREF_INVALID || !extensionPrefs.getBoolPref('menupopup.hide.{0}')) {{\n\t\t\t\t{1}_{2}.setAttribute("{3}", "{4}");\n\t\t\t}}'''.format(root.attrib['id'], root.tag, num, key, value))
else:
statements.append('%s_%s.setAttribute("%s", "%s");' % ((root.tag, num, key, value)))
for node in root:
sub_nodes, count, _ = self._create_dom(node, '%s_%s' % (root.tag, num), count+1, doc=doc, rename=rename, child_parent=(child_parent if top == None else None))
if append_children:
statements.extend(sub_nodes)
else:
children = sub_nodes
if not top:
statements.append('return %s_%s;' % (root.tag, num))
else:
if "insertafter" in root.attrib:
statements.append("%s.insertBefore(%s_%s, %s.getElementById('%s').nextSibling);" % (top, root.tag, num, doc, root.attrib.get("insertafter")))
else:
statements.append('%s.appendChild(%s_%s);' % (top if not child_parent else child_parent, root.tag, num))
return statements, count, children
def _attr_key(self, attr):
order = ('id', 'defaultarea', 'type', 'label', 'tooltiptext', 'command', 'onclick', 'oncommand')
if attr[0].lower() in order:
return order.index(attr[0].lower())
return 100
def _create_dom_button(self, button_id, root, file_name, count, toolbar_ids):
add_to_main_toolbar = self._settings.get("add_to_main_toolbar")
if 'viewid' in root.attrib:
self._ui_ids.add(root.attrib["viewid"])
statements, _, children = self._create_dom(root, child_parent="popupset", append_children=False)
children[0] = """var popupset = document.getElementById('PanelUI-multiView');
if(popupset) {
var menupopup_1 = document.createElement('panelview');
} else {
var menupopup_1 = document.createElement('menupopup');
popupset = document.documentElement;
}"""
data = {
"type": "'view'",
"onBeforeCreated": 'function (document) {\n\t\t\t\tvar window = document.defaultView;\n\t\t\t\t%s\n\t\t\t}' % "\n\t\t\t\t".join(children),
}
elif 'usepanelview' in root.attrib:
self._ui_ids.add("{0}-panel-view".format(root.attrib["id"]))
root.attrib["onclick"] = """if(event.target != event.currentTarget || ('button' in event && event.button != 0)) {{
return;
}}
var item = event.target;
if(item.nodeName == 'key') {{
item = document.getElementById('{0}');
}}
if(item.getAttribute('cui-areatype') == 'menu-panel') {{
var win = item.ownerDocument.defaultView;
event.preventDefault();
event.stopPropagation();
item.ownerDocument.getElementById('{0}-panel-view').ownerButton = item;
win.PanelUI.showSubView('{0}-panel-view', item, CustomizableUI.AREA_PANEL);
}}""".format(root.attrib["id"])
if 'type' not in root.attrib:
popup_opener = """ else {
item.firstChild.openPopup(item, "after_start");
}"""
if 'oncommand' not in root.attrib:
root.attrib["oncommand"] = root.attrib["onclick"] + popup_opener
else:
root.attrib["onclick"] += popup_opener
statements, _, _ = self._create_dom(root)
root_clone = deepcopy(root)
popup = root_clone[0]
if root.attrib['usepanelview'] == 'button-menu':
del root_clone.attrib["type"]
popup.insert(0, ET.Element("menuseparator"))
popup.insert(0, ET.Element("menuitem", root_clone.attrib))
for node in popup:
node.attrib['class'] = 'subviewbutton'
if 'onpopupshowing' in popup.attrib:
popup.attrib['onViewShowing'] = popup.attrib['onpopupshowing']
del popup.attrib['onpopupshowing']
if 'onpopuphiding' in popup.attrib:
popup.attrib['onViewHiding'] = popup.attrib['onpopuphiding']
del popup.attrib['onpopuphiding']
_, _, children = self._create_dom(root_clone, child_parent="popupset", rename={'menuitem': 'toolbarbutton'}, append_children=False)
children.pop(0)
data = {
"type": "'custom'",
"onBuild": '''function (document) {
var window = document.defaultView;
var popupset = document.getElementById('PanelUI-multiView');
if(popupset) {
var menupopup_1 = document.createElement('panelview');
%s
menupopup_1.id = "%s-panel-view";
}
%s
}''' % ("\n\t\t\t\t\t".join(children), root.attrib['id'], "\n\t\t\t\t".join(statements))
}
else:
statements, _, _ = self._create_dom(root)
data = {
"type": "'custom'",
"onBuild": 'function (document) {\n\t\t\t\tvar window = document.defaultView;\n\t\t\t\t%s\n\t\t\t}' % "\n\t\t\t\t".join(statements)
}
self._apply_toolbox(file_name, data)
toolbar_max_count = self._settings.get("buttons_per_toolbar")
if add_to_main_toolbar and button_id in add_to_main_toolbar:
data['defaultArea'] = "'%s'" % self._settings.get('file_to_main_toolbar').get(file_name)
elif self._settings.get("put_button_on_toolbar"):
toolbar_index = count // toolbar_max_count
if len(toolbar_ids) > toolbar_index:
data['defaultArea'] = "'%s'" % toolbar_ids[toolbar_index]
for key, value in root.attrib.items():
if key in ('label', 'tooltiptext'):
data[key] = self._dom_string_lookup(value)
elif key == "id":
data[key] = "'%s'" % value
elif key == 'oncommand':
self._button_commands[file_name][button_id] = value
elif key == 'viewid':
data["viewId"] = "'%s'" % value
elif key == 'onviewshowing':
data["onViewShowing"] = "function(event){\n\t\t\t\t%s\n\t\t\t}" % self._patch_call(value)
elif key == 'onviewhideing':
data["onViewHiding"] = "function(event){\n\t\t\t\t%s\n\t\t\t}" % self._patch_call(value)
for js_file in self._get_js_file_list(file_name):
if self._button_js_setup.get(js_file, {}).get(button_id):
data["onCreated"] = "function(aNode){\n\t\t\tvar document = aNode.ownerDocument;\n\t\t\t%s\n\t\t}" % self._button_js_setup[js_file][button_id]
items = sorted(data.items(), key=self._attr_key)
return "CustomizableUI.createWidget({\n\t\t\t%s\n\t\t});" % ",\n\t\t\t".join("%s: %s" % (key, value) for key, value in items)
def _apply_toolbox(self, file_name, data):
toolbox_info = self._settings.get("file_to_toolbar_box2").get(file_name)
if toolbox_info:
window_file, toolbox_id = toolbox_info
data["toolbox"] = "'%s'" % toolbox_id
if window_file:
data["window"] = "'%s'" % window_file
def _patch_call(self, value):
data = []
if re.search(r'\bthis\b', value):
value = re.sub(r'\bthis\b', 'aThis', value)
data.append("var aThis = event.currentTarget;")
if re.search(r'\bdocument\b', value):
data.append("var document = event.target.ownerDocument;")
if re.search(r'\bwindow\b', value):
data.append("var window = event.target.ownerDocument.defaultView;")
data.append(value)
return "\n\t\t\t\t".join(data)
def _create_jsm_button(self, button_id, root, file_name, count, toolbar_ids):
toolbar_max_count = self._settings.get("buttons_per_toolbar")
add_to_main_toolbar = self._settings.get("add_to_main_toolbar")
data = {}
attr = root.attrib
self._apply_toolbox(file_name, data)
if add_to_main_toolbar and button_id in add_to_main_toolbar:
data['defaultArea'] = "'%s'" % self._settings.get('file_to_main_toolbar').get(file_name)
elif self._settings.get("put_button_on_toolbar"):
toolbar_index = count // toolbar_max_count
if len(toolbar_ids) > toolbar_index:
data['defaultArea'] = "'%s'" % toolbar_ids[toolbar_index]
for key, value in attr.items():
if key in ('label', 'tooltiptext'):
data[key] = self._dom_string_lookup(value)
elif key == "id":
data[key] = "'%s'" % value
elif key in ('onclick', 'oncommand'):
if key == 'oncommand':
self._button_commands[file_name][button_id] = value
key = 'onCommand' if key == 'oncommand' else 'onClick'
data[key] = "function(event) {\n\t\t\t\t%s\n\t\t\t}" % self._patch_call(value)
for js_file in self._get_js_file_list(file_name):
if self._button_js_setup.get(js_file, {}).get(button_id):
data["onCreated"] = "function(aNode) {\n\t\t\t\tvar document = aNode.ownerDocument;\n\t\t\t\t%s\n\t\t\t}" % self._button_js_setup[js_file][button_id]
items = sorted(data.items(), key=self._attr_key)
result = "CustomizableUI.createWidget({\n\t\t\t%s\n\t\t});" % ",\n\t\t\t".join("%s: %s" % (key, value) for (key, value) in items)
return result
def get_jsm_files(self):
result = {}
simple_attrs = {'label', 'tooltiptext', 'id', 'oncommand', 'onclick', 'key', 'class'}
button_hash, toolbar_template = self._get_toolbar_info()
template = self.env.get_template('button.jsm')
javascript_object = self._settings.get("javascript_object")
for file_name, values in self._button_xul.items():
jsm_buttons = []
js_includes = [js_file for js_file in self._get_js_file_list(file_name)
if js_file != "loader" and js_file in self._included_js_files]
toolbars, toolbar_ids = self._create_jsm_toolbar(button_hash, toolbar_template, file_name, values)
count = 0
modules = set()
for button_id, xul in values.items():
root = ET.fromstring(xul.replace('&', '&'))
modules.update(self._modules[button_id])
attr = root.attrib
if not len(root) and not set(attr.keys()).difference(simple_attrs) and (not "class" in attr or attr["class"] == "toolbarbutton-1 chromeclass-toolbar-additional"):
jsm_buttons.append(self._create_jsm_button(button_id, root, file_name, count, toolbar_ids))
else:
jsm_buttons.append(self._create_dom_button(button_id, root, file_name, count, toolbar_ids))
count += 1
default_mods = {
"resource://gre/modules/Services.jsm",
"resource:///modules/CustomizableUI.jsm",
"resource://services-common/stringbundle.js"
}
modules_import = "\n".join("try { Cu.import('%s'); } catch(e) {}" % mod for mod in modules if mod and mod not in default_mods)
if self._settings.get("menu_meta"):
menu_id, menu_label, _ = self._settings.get("menu_meta")
else:
menu_id, menu_label = "", ""
end = set()
menu = self._jsm_create_menu(file_name, values)
for js_file in set(self._get_js_file_list(file_name) + [file_name]):
if self._button_js_setup.get(js_file, {}):
end.update(self._button_js_setup[js_file].values())
if (self._settings.get("menuitems") and menu) or self._settings.get('location_placement'):
end.add(javascript_object + ".setUpMenuShower(document);")
extra_ui = self.create_extra_ui(file_name, values)
result[file_name] = template.render(
modules=modules_import,
locale_file_prefix=self._settings.get("locale_file_prefix"),
scripts=js_includes,
button_ids=json.dumps(list(values.keys())),
toolbar_ids=json.dumps(toolbar_ids),
toolbars=toolbars,
menu_id=menu_id,
ui_ids=json.dumps(list(self._ui_ids)),
toolbox=self._settings.get("file_to_toolbar_box").get(file_name, ('', ''))[1],
menu=menu,
keys=list(self.jsm_keyboard_shortcuts(file_name)),
end="\n\t".join(end),
buttons=jsm_buttons,
extra_ui=extra_ui,
javascript_object=self._settings.get("javascript_object"),
pref_root=self._settings.get("pref_root"),
chrome_name=self._settings.get("chrome_name")
)
return result
def create_extra_ui(self, file_name, values):
location = self._settings.get("location_placement")
result = []
if location and file_name in self._settings.get("file_to_location", {}).get(location):
for index, (button_id, xul) in enumerate(values.items()):
parent, parent_id, after, attrib = self._settings.get("file_to_location").get(location).get(file_name)
root = ET.fromstring(xul.replace('&', '&'))
root.attrib["insertafter"] = after
root.attrib["id"] += "-extra-ui"
self._ui_ids.add(root.attrib["id"])
if attrib:
for name, value in attrib.items():
if value is None:
del root.attrib[name]
else:
root.attrib[name] = value
parent_var = "{}_{}".format(parent, index)
statements, _, _ = self._create_dom(root, top=parent_var)
result.append(ExtraUI(parent, parent_id, index, "\n\t\t".join(statements), after))
return result
def _create_jsm_toolbar(self, button_hash, toolbar_template, file_name, values):
toolbar_ids = []
toolbars = []
if file_name in self._settings.get("extra_toolbars_disabled"):
return '', []
count = 0
max_count = self._settings.get("buttons_per_toolbar")
buttons = list(values.keys())
for box_setting, include_setting in [("file_to_toolbar_box", "include_toolbars"),
("file_to_bottom_box", "include_satusbars")]:
toolbar_node, toolbar_box = self._settings.get(box_setting).get(file_name, ('', ''))
data = {
"defaultset": "",
"persist": "collapsed,hidden",
"context": "toolbar-context-menu",
"class": "toolbar-buttons-toolbar chromeclass-toolbar",
"mode": "icons",
"iconsize": "small",
"customizable": "true",
}
if self._settings.get(include_setting) and toolbar_box:
number = self.toolbar_count(include_setting, values, max_count)
for i in range(number):
if self._settings.get("put_button_on_toolbar"):
data["defaultset"] = ",".join(buttons[i * max_count:(i + 1) * max_count])
button_hash.update(bytes(i))
hash = button_hash.hexdigest()[:6]
label_number = "" if (number + count) == 1 else " %s" % (i + count + 1)
toolbar_ids.append("tb-toolbar-%s" % hash)
if include_setting != "include_toolbars":
data["toolboxid"] = toolbar_box
data["id"] = "tb-toolbar-%s" % hash
toolbarname = self._dom_string_lookup("&tb-toolbar-buttons-toggle-toolbar.name;%s" % label_number)
values["tb-toolbar-buttons-toggle-toolbar-%s" % hash] = toolbar_template.replace("{{hash}}", hash).replace("{{ number }}", label_number)
toolbars.append("""createToolbar(document, '%s', %s, %s)""" % (toolbar_box, json.dumps(data), toolbarname))
count += number
return "\n\t\t".join(toolbars), toolbar_ids
| mit |
soybean217/lora-python | UServer/admin_server/admin_http_api/api/api_group.py | 1 | 3730 | import json
from wtforms import ValidationError
from userver.object.application import Application
from . import api, root
from flask import request, Response
from userver.object.group import Group
from binascii import hexlify
from utils.errors import KeyDuplicateError, PatchError
from .decorators import group_filter_valid, group_exists
from .forms import get_formdata_from_json_or_form
from .forms.form_group import AddGroupForm, PatchGroup, device_operate
from ..http_auth import auth
@api.route(root + 'groups', methods=['GET'])
@auth.auth_required
@group_filter_valid
def group_list(user=None, app=None):
if request.method == 'GET':
if app is not None:
groups = Group.objects.all(app_eui=app.app_eui)
elif user is not None:
groups = []
apps = Application.query.filter_by(user_id=user.id)
for app in apps:
groups += Group.objects.all(app.app_eui)
else:
groups = Group.objects.all()
groups = [group.obj_to_dict() for group in groups]
groups_json = json.dumps(groups)
return Response(status=200, response=groups_json)
# elif request.method == 'POST':
# formdata = get_formdata_from_json_or_form(request)
# add_group = AddGroupForm(formdata)
# try:
# if add_group.validate():
# if len(add_group['appskey'].data) != 0:
# group = Group(add_group['app_eui'].data, add_group['name'].data, add_group['addr'].data, add_group['nwkskey'].data, appskey=add_group['appskey'].data)
# else:
# group = Group(add_group['app_eui'].data, add_group['name'].data, add_group['addr'].data, add_group['nwkskey'].data)
# group.save()
# return Response(status=201, response=json.dumps(group.obj_to_dict()))
# else:
# return Response(status=406, response=json.dumps({'errors': add_group.errors,
# 'succeed': False}))
# except KeyDuplicateError as error:
# return Response(status=403, response=json.dumps({"error": str(error),
# "succeed": False}))
@api.route(root + 'groups/<group_id>', methods=['GET'])
@auth.auth_required
@group_exists
def group_index(group):
if request.method == 'GET':
group_json = json.dumps(group.obj_to_dict())
return group_json, 200
# elif request.method == 'PATCH':
# try:
# formdata = get_formdata_from_json_or_form(request)
# PatchGroup.patch(group, formdata)
# return Response(status=200, response=json.dumps(group.obj_to_dict()))
# except (AssertionError, ValidationError, PatchError) as e:
# return json.dumps({"error": str(e)}), 406
# elif request.method == 'POST':
# POST Down Msg
# pass
# elif request.method == 'DELETE':
# try:
# group.delete()
# return json.dumps({'errors': "Group: %s deleted." % hexlify(group.id).decode(),
# 'succeed': False}), 200
# except Exception as e:
# return json.dumps({'errors': "Fail to delete group: %s.\n%s" % (hexlify(group.id).decode(), str(e)),
# 'succeed': False}), 400
# elif request.method == 'POST':
# formdata = get_formdata_from_json_or_form(request)
# error = device_operate(group, formdata)
# if error is None or len(error) == 0:
# return json.dumps({'success': True}), 200
# else:
# return json.dumps({'error': str(error)}), 406
#
| mit |
sigmunau/nav | python/nav/smidumps/extreme_vlan_mib.py | 4 | 74473 | # python version 1.0 DO NOT EDIT
#
# Generated by smidump version 0.4.8:
#
# smidump -f python EXTREME-VLAN-MIB
FILENAME = "/home/mvold/mibs/v2/extreme.mib"
MIB = {
"moduleName" : "EXTREME-VLAN-MIB",
"EXTREME-VLAN-MIB" : {
"nodetype" : "module",
"language" : "SMIv2",
"organization" :
"""Extreme Networks, Inc.""",
"contact" :
"""www.extremenetworks.com""",
"description" :
"""Extreme Virtual LAN objects""",
"revisions" : (
{
"date" : "1905-06-24 00:00",
"description" :
"""[Revision added by libsmi due to a LAST-UPDATED clause.]""",
},
),
"identity node" : "extremeVlan",
},
"imports" : (
{"module" : "SNMPv2-SMI", "name" : "MODULE-IDENTITY"},
{"module" : "SNMPv2-TC", "name" : "RowStatus"},
{"module" : "SNMPv2-TC", "name" : "TruthValue"},
{"module" : "RFC1213-MIB", "name" : "DisplayString"},
{"module" : "SNMPv2-SMI", "name" : "OBJECT-TYPE"},
{"module" : "SNMPv2-SMI", "name" : "Integer32"},
{"module" : "SNMPv2-SMI", "name" : "IpAddress"},
{"module" : "EXTREME-BASE-MIB", "name" : "extremeAgent"},
{"module" : "EXTREME-BASE-MIB", "name" : "PortList"},
{"module" : "EXTREME-SYSTEM-MIB", "name" : "extremeSlotNumber"},
{"module" : "SNMPv2-SMI", "name" : "Counter64"},
),
"typedefs" : {
"ExtremeVlanType" : {
"basetype" : "Enumeration",
"vlanLayer2" : {
"nodetype" : "namednumber",
"number" : "1"
},
},
"ExtremeVlanEncapsType" : {
"basetype" : "Enumeration",
"vlanEncaps8021q" : {
"nodetype" : "namednumber",
"number" : "1"
},
"vlanEncapsNone" : {
"nodetype" : "namednumber",
"number" : "2"
},
},
}, # typedefs
"nodes" : {
"extremeVlan" : {
"nodetype" : "node",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2",
"status" : "current",
}, # node
"extremeVlanGroup" : {
"nodetype" : "node",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.1",
}, # node
"extremeVlanGlobalMappingTable" : {
"nodetype" : "table",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.1.1",
"status" : "deprecated",
"description" :
"""This table lists VLAN interfaces that are globally
identified. A single entry exists in this list for
each VLAN interface in the system that is bound to
a global identifier.""",
}, # table
"extremeVlanGlobalMappingEntry" : {
"nodetype" : "row",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.1.1.1",
"status" : "current",
"linkage" : [
"extremeVlanGlobalMappingIdentifier",
],
"description" :
"""An individual VLAN interface global mapping entry.
Entries in this table are created by setting the
extremeVlanIfGlobalIdentifier object in the
extremeVlanIfTable to a non-zero value.""",
}, # row
"extremeVlanGlobalMappingIdentifier" : {
"nodetype" : "column",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.1.1.1.1",
"status" : "current",
"syntax" : {
"type" : {
"basetype" : "Integer32",
"ranges" : [
{
"min" : "0",
"max" : "65535"
},
],
"range" : {
"min" : "0",
"max" : "65535"
},
},
},
"access" : "readonly",
"description" :
"""An index into the extremeVlanGlobalMappingTable and an
administratively assigned global VLAN identifier. The
value of this object globally identifies the VLAN interface.
For VLAN interfaces, on different network devices, which are
part of the same globally identified VLAN, the value of this
object will be the same.""",
}, # column
"extremeVlanGlobalMappingIfIndex" : {
"nodetype" : "column",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.1.1.1.2",
"status" : "current",
"syntax" : {
"type" : { "module" :"", "name" : "Integer32"},
},
"access" : "readonly",
"description" :
"""The value of extremeVlanIfIndex for the VLAN interface in the
extremeVlanIfTable, which is bound to the global identifier
specified by this entry.""",
}, # column
"extremeVlanIfTable" : {
"nodetype" : "table",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.1.2",
"status" : "current",
"description" :
"""This table lists VLAN interfaces that exist within
a device. A single entry exists in this list for each
VLAN interface in the system. A VLAN interface may
be created, destroyed and/or mapped to a globally
identified vlan.""",
}, # table
"extremeVlanIfEntry" : {
"nodetype" : "row",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.1.2.1",
"create" : "true",
"status" : "current",
"linkage" : [
"extremeVlanIfIndex",
],
"description" :
"""An individual VLAN interface entry. When an NMS wishes
to create a new entry in this table, it must obtain a
non-zero index from the extremeNextAvailableVirtIfIndex
object. Row creation in this table will fail if the
chosen index value does not match the current value
returned from the extremeNextAvailableVirtIfIndex object.""",
}, # row
"extremeVlanIfIndex" : {
"nodetype" : "column",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.1.2.1.1",
"status" : "current",
"syntax" : {
"type" : { "module" :"", "name" : "Integer32"},
},
"access" : "readwrite",
"description" :
"""The index value of this row and the vlan's ifIndex in the
ifTable. The NMS obtains the index value for this row by
reading the extremeNextAvailableVirtIfIndex object.""",
}, # column
"extremeVlanIfDescr" : {
"nodetype" : "column",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.1.2.1.2",
"status" : "current",
"syntax" : {
"type" : {
"basetype" : "OctetString",
"parent module" : {
"name" : "RFC1213-MIB",
"type" : "DisplayString",
},
"ranges" : [
{
"min" : "0",
"max" : "32"
},
],
"range" : {
"min" : "0",
"max" : "32"
},
},
},
"access" : "readwrite",
"description" :
"""This is a description of the VLAN interface.""",
}, # column
"extremeVlanIfType" : {
"nodetype" : "column",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.1.2.1.3",
"status" : "current",
"syntax" : {
"type" : { "module" :"EXTREME-VLAN-MIB", "name" : "ExtremeVlanType"},
},
"access" : "readwrite",
"description" :
"""The VLAN interface type.""",
}, # column
"extremeVlanIfGlobalIdentifier" : {
"nodetype" : "column",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.1.2.1.4",
"status" : "deprecated",
"syntax" : {
"type" : {
"basetype" : "Integer32",
"ranges" : [
{
"min" : "0",
"max" : "65535"
},
],
"range" : {
"min" : "0",
"max" : "65535"
},
},
},
"access" : "readwrite",
"description" :
"""An administratively assigned global VLAN identifier. For
VLAN interfaces, on different network devices, which are
part of the same globally identified VLAN, the value of this
object will be the same.
The binding between a global identifier and a VLAN
interface can be created or removed. To create a binding
an NMS must write a non-zero value to this object. To
delete a binding, the NMS must write a zero to this
object. The value 1 is reserved for the default VLAN and
this cannot be deleted or re-assigned.""",
}, # column
"extremeVlanIfStatus" : {
"nodetype" : "column",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.1.2.1.6",
"status" : "current",
"syntax" : {
"type" : { "module" :"SNMPv2-TC", "name" : "RowStatus"},
},
"access" : "readwrite",
"description" :
"""The status column for this VLAN interface.
This OBJECT can be set to:
active(1)
createAndGo(4)
createAndWait(5)
destroy(6)
The following values may be read:
active(1)
notInService(2)
notReady(3).
Setting this object to createAndGo(4) causes the agent
to attempt to create and commit the row based on
the contents of the objects in the row. If all necessary
information is present in the row and the values are
acceptible to the agent, the agent will change the
status to active(1). If any of the necessary objects
are not available, the agent will reject the creation
request.
Setting this object to createAndWait(5) causes a row
in this table to be created. The agent sets the
status to notInService(2) if all of the information is
present in the row and the values are acceptable to the
agent; otherwise, the agent sets the status to notReady(3).
Setting this object to active(1) is only valid when
the current status is active(1) or notInService(2).
When the state of the row transitions is set to active(1),
the agent creates the corresponding row in the ifTable.
Setting this object to destroy(6) will remove the
corresponding VLAN interface, remove the entry in this
table, and the corresponding entries in the
extremeVlanGlobalMappingTable and the ifTable.
In order for a set of this object to destroy(6) to succeed,
all dependencies on this row must have been removed. These
will include any stacking dependencies in the ifStackTable
and any protocol specific tables dependencies.""",
}, # column
"extremeVlanIfIgnoreStpFlag" : {
"nodetype" : "column",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.1.2.1.7",
"status" : "current",
"syntax" : {
"type" : { "module" :"SNMPv2-TC", "name" : "TruthValue"},
},
"access" : "readwrite",
"description" :
"""Enable/disable STP for this VLAN interface. Setting this
object to true will cause the ports on this VLAN to ignore
STP BPDUs. When a vlan is first created, the Default value
is FALSE, which means that the VLAN uses STP port information""",
}, # column
"extremeVlanIfIgnoreBpduFlag" : {
"nodetype" : "column",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.1.2.1.8",
"status" : "current",
"syntax" : {
"type" : { "module" :"SNMPv2-TC", "name" : "TruthValue"},
},
"access" : "readwrite",
"description" :
"""Setting this object to true causes this VLAN's BPDU's to be ignored by
the Spanning Tree process. This can be used to keep the root bridge within
one's own network when external switches also fall within the same Spanning
Tree Domain. When a vlan is first created, the Default value is FALSE.""",
}, # column
"extremeVlanIfLoopbackModeFlag" : {
"nodetype" : "column",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.1.2.1.9",
"status" : "current",
"syntax" : {
"type" : { "module" :"SNMPv2-TC", "name" : "TruthValue"},
},
"access" : "readwrite",
"description" :
"""Setting this object to true causes loopback mode to be enabled on this VLAN.""",
}, # column
"extremeVlanIfVlanId" : {
"nodetype" : "column",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.1.2.1.10",
"status" : "current",
"syntax" : {
"type" : {
"basetype" : "Integer32",
"ranges" : [
{
"min" : "1",
"max" : "4095"
},
],
"range" : {
"min" : "1",
"max" : "4095"
},
},
},
"access" : "readwrite",
"description" :
"""The VLAN ID of this VLAN.""",
}, # column
"extremeVlanIfEncapsType" : {
"nodetype" : "column",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.1.2.1.11",
"status" : "current",
"syntax" : {
"type" : { "module" :"EXTREME-VLAN-MIB", "name" : "ExtremeVlanEncapsType"},
},
"access" : "readwrite",
"description" :
"""The encapsulation algorithm used when encapsulating
packets transmitted, or de-encapsulating packets
received through this interface.""",
}, # column
"extremeVlanIfAdminStatus" : {
"nodetype" : "column",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.1.2.1.12",
"status" : "current",
"syntax" : {
"type" : { "module" :"SNMPv2-TC", "name" : "TruthValue"},
},
"access" : "readwrite",
"description" :
"""Enable/disable this VLAN interface. Setting this
object to true will administratively enable this VLAN.""",
}, # column
"extremeVirtualGroup" : {
"nodetype" : "node",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.2",
}, # node
"extremeNextAvailableVirtIfIndex" : {
"nodetype" : "scalar",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.2.1",
"status" : "current",
"syntax" : {
"type" : { "module" :"", "name" : "Integer32"},
},
"access" : "readonly",
"description" :
"""The value of the next available virtual ifIndex. This
object is used by an NMS to select an index value
for row-creation in tables indexed by ifIndex. The
current value of this object is changed to a new
value when the current value is written to an agent's
table, that is indexed by ifIndex. Row creation
using the current value of this object, allocates
a virtual ifIndex. Note the following:
1. A newly created row does not have to be active(1)
for the agent to allocate the virtual ifIndex.
2. Race conditions between multiple NMS's end when
a row is created. Rows are deemed created when
a setRequest is successfully committed (i.e.
the errorStats is noError(0)).
3. An agent that exhausts its supply of virual
ifIndex values returns zero as the value of this
object. This can be used by an NMS as an indication
to deleted unused rows and reboot the device.""",
}, # scalar
"extremeEncapsulationGroup" : {
"nodetype" : "node",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.3",
}, # node
"extremeVlanEncapsIfTable" : {
"nodetype" : "table",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.3.1",
"status" : "current",
"description" :
"""This table lists VLAN encapsulation interfaces that
exist within a device. A single entry exists in this
list for each VLAN encapsulation interface in the
system. A VLAN encapsulation interface may be created
or destroyed.""",
}, # table
"extremeVlanEncapsIfEntry" : {
"nodetype" : "row",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.3.1.1",
"create" : "true",
"status" : "current",
"linkage" : [
"extremeVlanEncapsIfIndex",
],
"description" :
"""An individual VLAN encapsulation interface entry.
When an NMS wishes to create a new entry in this table,
it must obtain a non-zero index from the
extremeNextAvailableVirtIfIndex object. Row creation
in this table will fail if the chosen index value does
not match the current value returned from the
extremeNextAvailableVirtIfIndex object.""",
}, # row
"extremeVlanEncapsIfIndex" : {
"nodetype" : "column",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.3.1.1.1",
"status" : "current",
"syntax" : {
"type" : { "module" :"", "name" : "Integer32"},
},
"access" : "readwrite",
"description" :
"""The index value of this row and the encapsulation
interface's ifIndex in the ifTable. The NMS obtains
the index value used for creating a row in this table
by reading the extremeNextAvailableVirtIfIndex object.""",
}, # column
"extremeVlanEncapsIfType" : {
"nodetype" : "column",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.3.1.1.2",
"status" : "current",
"syntax" : {
"type" : { "module" :"EXTREME-VLAN-MIB", "name" : "ExtremeVlanEncapsType"},
},
"access" : "readwrite",
"description" :
"""The encapsulation algorithm used when encapsulating
packets transmitted, or de-encapsulating packets
received through this interface.""",
}, # column
"extremeVlanEncapsIfTag" : {
"nodetype" : "column",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.3.1.1.3",
"status" : "current",
"syntax" : {
"type" : { "module" :"", "name" : "Integer32"},
},
"access" : "readwrite",
"description" :
"""The tag used when encapsulating packets transmitted,
or de-encapsulating packets received through this
interface.""",
}, # column
"extremeVlanEncapsIfStatus" : {
"nodetype" : "column",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.3.1.1.4",
"status" : "current",
"syntax" : {
"type" : { "module" :"SNMPv2-TC", "name" : "RowStatus"},
},
"access" : "readwrite",
"description" :
"""The row status for this VLAN encapsulation interface.
This OBJECT can be set to:
active(1)
createAndGo(4)
createAndWait(5)
destroy(6)
The following values may be read:
active(1)
notReady(3).
In order for a row to become active, the NMS must set
extremeVlanEncapsIfTagType and extremeVlanEncapsIfTag
to some valid and consistent values.
Setting this object to createAndGo(4) causes the agent
to attempt to create and commit the row based on the
contents of the objects in the row. If all necessary
information is present in the row, the agent will
create the row and change the status to active(1). If
any of the necessary objects are not available, or
specify an invalid configuration, the row will not be
created and the agent will return an appropriate error.
Setting this object to createAndWait(5) causes a row in
in this table to be created. If all necessary objects
in the row have been assigned values and specify a
valid configuration, the status of the row will be set
to notInService(2); otherwise, the status will be set
to notReady(3).
This object may only be set to createAndGo(4) or
createAndWait(5) if it does not exist.
Setting this object to active(1) when the status is
notInService(2) causes the agent to commit the
row. Setting this object to active(1) when its value
is already active(1) is a no-op.
Setting this object to destroy(6) will remove the
corresponding VLAN encapsulation interface, remove the
entry in this table, and remove the corresponding entry
in the ifTable.
In order for a set of this object to destroy(6) to
succeed, all dependencies on this row must have been
removed. These will include any references to this
interface in the ifStackTable.""",
}, # column
"extremeVlanIpGroup" : {
"nodetype" : "node",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.4",
}, # node
"extremeVlanIpTable" : {
"nodetype" : "table",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.4.1",
"status" : "current",
"description" :
"""A list of IP VLAN interface information
entries. Entries in this table are related
to entries in the extremeVlanIfTable by using the
same index.""",
}, # table
"extremeVlanIpEntry" : {
"nodetype" : "row",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.4.1.1",
"create" : "true",
"status" : "current",
"linkage" : [
"extremeVlanIfIndex",
],
"description" :
"""A extremeVlanIpEntry contains layer 3
information about a particular IP VLAN
interface.""",
}, # row
"extremeVlanIpNetAddress" : {
"nodetype" : "column",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.4.1.1.1",
"status" : "current",
"syntax" : {
"type" : { "module" :"SNMPv2-SMI", "name" : "IpAddress"},
},
"access" : "readwrite",
"description" :
"""The IP network number for the IP VLAN interface defined
in the extremeVlanIfTable identified with the same index.""",
}, # column
"extremeVlanIpNetMask" : {
"nodetype" : "column",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.4.1.1.2",
"status" : "current",
"syntax" : {
"type" : { "module" :"SNMPv2-SMI", "name" : "IpAddress"},
},
"access" : "readwrite",
"description" :
"""The IP network mask corresponding to the IP Network
address defined by extremeVlanIpIpNetAddress. """,
}, # column
"extremeVlanIpStatus" : {
"nodetype" : "column",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.4.1.1.3",
"status" : "current",
"syntax" : {
"type" : { "module" :"SNMPv2-TC", "name" : "RowStatus"},
},
"access" : "readwrite",
"description" :
"""The status column for this IP VLAN entry.
This object can be set to:
active(1)
createAndGo(4)
destroy(6)
The following values may be read:
active(1)
Setting this object to active(1) or createAndGo(4) causes
the agent to attempt to create and commit the row based on
the contents of the objects in the row. If all necessary
information is present in the row and the values are
acceptible to the agent, the agent will change the
status to active(1). If any of the necessary objects
are not available, the agent will reject the row
creation request.
Setting this object to createAndWait(5) is not supported.
When the status changes to active(1), the agent applies the
IP parmeters to the IP VLAN interface identified by the
corresponding value of the extremeVlanIpIndex object.
Setting this object to destroy(6) will remove the IP
parmeters from the IP VLAN interface and remove the
entry from this table.""",
}, # column
"extremeVlanIpForwardingState" : {
"nodetype" : "column",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.4.1.1.4",
"status" : "current",
"syntax" : {
"type" : { "module" :"SNMPv2-TC", "name" : "TruthValue"},
},
"access" : "readwrite",
"description" :
"""Indicates whether the IP Forwarding on this Vlan is
Enable or not. A true value indicates that the Vlan is
Enable.""",
}, # column
"extremeProtocolGroup" : {
"nodetype" : "node",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.5",
}, # node
"extremeVlanProtocolTable" : {
"nodetype" : "table",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.5.1",
"status" : "current",
"description" :
"""Table of defined combinations of protocol IDs
that make up one protocol definition name.
All members of one protocol definition share
the same extremeVlanProtocolIndex. A given
protocol ID may appear in the definition of
just one protocol definition. This table will
typically contain some default entries for
popular protocols chosen by ExtremeWare.""",
}, # table
"extremeVlanProtocolEntry" : {
"nodetype" : "row",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.5.1.1",
"create" : "true",
"status" : "current",
"linkage" : [
"extremeVlanProtocolIndex",
"extremeVlanProtocolIdIndex",
],
"description" :
"""One member protocol ID of a protocol definition.""",
}, # row
"extremeVlanProtocolIndex" : {
"nodetype" : "column",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.5.1.1.1",
"status" : "current",
"syntax" : {
"type" : {
"basetype" : "Integer32",
"ranges" : [
{
"min" : "0",
"max" : "16"
},
],
"range" : {
"min" : "0",
"max" : "16"
},
},
},
"access" : "readwrite",
"description" :
"""An index representing a protocol grouping of
protocol ID values. A limited number of
protocol groupings may be defined (up to 7
in ExtremeWare and 16 in EXOS). 0 is used
for internal purposes.""",
}, # column
"extremeVlanProtocolIdIndex" : {
"nodetype" : "column",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.5.1.1.2",
"status" : "current",
"syntax" : {
"type" : {
"basetype" : "Integer32",
"ranges" : [
{
"min" : "0",
"max" : "6"
},
],
"range" : {
"min" : "0",
"max" : "6"
},
},
},
"access" : "readwrite",
"description" :
"""An index representing a particular protocol ID
within a protocol grouping. A limited number of
protocol IDs may be defined per
extremeVlanProtocolIndex (up to 6 in ExtremeWare).
0 is used for internal purposes.""",
}, # column
"extremeVlanProtocolName" : {
"nodetype" : "column",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.5.1.1.3",
"status" : "current",
"syntax" : {
"type" : {
"basetype" : "OctetString",
"parent module" : {
"name" : "RFC1213-MIB",
"type" : "DisplayString",
},
"ranges" : [
{
"min" : "0",
"max" : "31"
},
],
"range" : {
"min" : "0",
"max" : "31"
},
},
},
"access" : "readwrite",
"description" :
"""A human-readable string representing this protocol.
This string should be the same for all entries
sharing a given extremeVlanProtocolIndex.
A ExtremeVlanProtocolEntry with
extremeVlanProtocolName of ANY represents a match
on all protocols: this entry may not be modified.""",
}, # column
"extremeVlanProtocolDllEncapsType" : {
"nodetype" : "column",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.5.1.1.4",
"status" : "current",
"syntax" : {
"type" : {
"basetype" : "Enumeration",
"any" : {
"nodetype" : "namednumber",
"number" : "1"
},
"ethertype" : {
"nodetype" : "namednumber",
"number" : "2"
},
"llc" : {
"nodetype" : "namednumber",
"number" : "3"
},
"llcSnapEthertype" : {
"nodetype" : "namednumber",
"number" : "4"
},
"none" : {
"nodetype" : "namednumber",
"number" : "5"
},
},
},
"access" : "readwrite",
"description" :
"""Represents the type of data-link encapsulation
in which the extremeVlanProtocolId protocol ID
is carried. The value any(1) is used to indicate
a wildcard and matches all encapsulations and
protocol IDs that are not explicitly mentioned
in this table. Entries of type any(1) may not
be created. The value none(5) is used to indicate
that no protocol IDs match this entry. A value
of none(5) may not be set by a manager.""",
}, # column
"extremeVlanProtocolId" : {
"nodetype" : "column",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.5.1.1.5",
"status" : "current",
"syntax" : {
"type" : {
"basetype" : "Integer32",
"ranges" : [
{
"min" : "0",
"max" : "65535"
},
],
"range" : {
"min" : "0",
"max" : "65535"
},
},
},
"access" : "readwrite",
"description" :
"""The protocol ID: for entries of type ethertype(2)
or llcSnapEthertype(4) this represents a 16-bit
protocol ID. For entries of type llc(3) it
represents a concatenation of LLC DSAP+SSAP in
network byte order. This value is not valid
for extremeVlanProtocolDllEncapsType values of
any(1) or none(5).""",
}, # column
"extremeVlanProtocolStatus" : {
"nodetype" : "column",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.5.1.1.6",
"status" : "current",
"syntax" : {
"type" : { "module" :"SNMPv2-TC", "name" : "RowStatus"},
},
"access" : "readwrite",
"description" :
"""The row status variable, used according to
row installation and removal conventions.""",
}, # column
"extremeVlanProtocolVlanTable" : {
"nodetype" : "table",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.5.2",
"status" : "current",
"description" :
"""Table to apply one of the protocol definitions
in extremeVlanProtocolTable to a given VLAN
This applies to all ports that are untagged
in this VLAN). A limited number of protocols
may be applied simultaneously in one device
(up to 8 in ExtremeWare).""",
}, # table
"extremeVlanProtocolVlanEntry" : {
"nodetype" : "row",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.5.2.1",
"create" : "true",
"status" : "current",
"linkage" : [
"extremeVlanProtocolVlanIfIndex",
"extremeVlanProtocolVlanProtocolIndex",
],
"description" :
"""A mapping of untagged packets of one protocol
onto a particular VLAN.""",
}, # row
"extremeVlanProtocolVlanIfIndex" : {
"nodetype" : "column",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.5.2.1.1",
"status" : "current",
"syntax" : {
"type" : { "module" :"", "name" : "Integer32"},
},
"access" : "readwrite",
"description" :
"""The index value of this row and the vlan's ifIndex in the
ifTable. The NMS obtains the index value for this row by
reading the extremeNextAvailableVirtIfIndex object.""",
}, # column
"extremeVlanProtocolVlanProtocolIndex" : {
"nodetype" : "column",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.5.2.1.2",
"status" : "current",
"syntax" : {
"type" : { "module" :"", "name" : "Integer32"},
},
"access" : "readwrite",
"description" :
"""An index representing a protocol grouping of
protocol ID values. A limited number of
protocol groupings may be defined (up to 7
in ExtremeWare).""",
}, # column
"extremeVlanProtocolVlanStatus" : {
"nodetype" : "column",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.5.2.1.3",
"status" : "current",
"syntax" : {
"type" : { "module" :"SNMPv2-TC", "name" : "RowStatus"},
},
"access" : "readwrite",
"description" :
"""The row status variable, used according to
row installation and removal conventions.""",
}, # column
"extremeVlanProtocolDefTable" : {
"nodetype" : "table",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.5.3",
"status" : "current",
"description" :
"""Table of defined combinations of protocols and
DLLEncaps type. This table will
typically contain some default entries for
popular protocols chosen by ExtremeWare.""",
}, # table
"extremeVlanProtocolDefEntry" : {
"nodetype" : "row",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.5.3.1",
"status" : "current",
"linkage" : [
"extremeVlanProtocolDefName",
"extremeVlanProtocolDefDllEncapsType",
"extremeVlanProtocolDefValue",
],
"description" :
"""One member representing combination of protocol and
DLLEncaps Type.""",
}, # row
"extremeVlanProtocolDefName" : {
"nodetype" : "column",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.5.3.1.1",
"status" : "current",
"syntax" : {
"type" : {
"basetype" : "OctetString",
"parent module" : {
"name" : "RFC1213-MIB",
"type" : "DisplayString",
},
"ranges" : [
{
"min" : "0",
"max" : "31"
},
],
"range" : {
"min" : "0",
"max" : "31"
},
},
},
"access" : "noaccess",
"description" :
"""A human-readable string representing this protocol.
A ExtremeVlanProtocolEntry with
extremeVlanProtocol2Name of ANY represents a match
on all protocols: this entry may not be modified.""",
}, # column
"extremeVlanProtocolDefDllEncapsType" : {
"nodetype" : "column",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.5.3.1.2",
"status" : "current",
"syntax" : {
"type" : {
"basetype" : "Enumeration",
"any" : {
"nodetype" : "namednumber",
"number" : "1"
},
"ethertype" : {
"nodetype" : "namednumber",
"number" : "2"
},
"llc" : {
"nodetype" : "namednumber",
"number" : "3"
},
"llcSnapEthertype" : {
"nodetype" : "namednumber",
"number" : "4"
},
"none" : {
"nodetype" : "namednumber",
"number" : "5"
},
},
},
"access" : "noaccess",
"description" :
"""Represents the type of data-link encapsulation
in which the protocol designated by
extremeVlanProtocolDefName is carried.
The value any(1) is used to indicate
a wildcard and matches all encapsulations and
protocol IDs that are not explicitly mentioned
in this table. Entries of type any(1) may not
be created. The value none(5) is used to indicate
that no protocol IDs match this entry. A value
of none(5) may not be set by a manager.""",
}, # column
"extremeVlanProtocolDefValue" : {
"nodetype" : "column",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.5.3.1.3",
"status" : "current",
"syntax" : {
"type" : {
"basetype" : "Integer32",
"ranges" : [
{
"min" : "0",
"max" : "65535"
},
],
"range" : {
"min" : "0",
"max" : "65535"
},
},
},
"access" : "noaccess",
"description" :
"""The protocol ID: for entries of type ethertype(2)
or llcSnapEthertype(4) this represents a 16-bit
protocol ID. For entries of type llc(3) it
represents a concatenation of LLC DSAP+SSAP in
network byte order. This value is not valid
for extremeVlanProtocolDefDllEncapsType values of
any(1) or none(5).""",
}, # column
"extremeVlanProtocolDefStatus" : {
"nodetype" : "column",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.5.3.1.4",
"status" : "current",
"syntax" : {
"type" : { "module" :"SNMPv2-TC", "name" : "RowStatus"},
},
"access" : "readonly",
"description" :
"""The row status variable, used according to
row installation and removal conventions.""",
}, # column
"extremeVlanProtocolBindingTable" : {
"nodetype" : "table",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.5.4",
"status" : "current",
"description" :
"""Table to apply one of the protocol definitions
in extremeVlanProtocolDefTable to a given VLAN
This applies to all ports that are untagged
in this VLAN). A limited number of protocols
may be applied simultaneously in one device
(up to 8 in ExtremeWare).""",
}, # table
"extremeVlanProtocolBindingEntry" : {
"nodetype" : "row",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.5.4.1",
"create" : "true",
"status" : "current",
"linkage" : [
"extremeVlanProtocolBindingIfIndex",
],
"description" :
"""A mapping of untagged packets of one protocol
onto a particular VLAN.""",
}, # row
"extremeVlanProtocolBindingIfIndex" : {
"nodetype" : "column",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.5.4.1.1",
"status" : "current",
"syntax" : {
"type" : { "module" :"", "name" : "Integer32"},
},
"access" : "noaccess",
"description" :
"""The index value of this row and the vlan's ifIndex in the
ifTable. The NMS obtains the index value for this row by
reading the extremeNextAvailableVirtIfIndex object.""",
}, # column
"extremeVlanProtocolBindingName" : {
"nodetype" : "column",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.5.4.1.2",
"status" : "current",
"syntax" : {
"type" : {
"basetype" : "OctetString",
"parent module" : {
"name" : "RFC1213-MIB",
"type" : "DisplayString",
},
"ranges" : [
{
"min" : "0",
"max" : "31"
},
],
"range" : {
"min" : "0",
"max" : "31"
},
},
},
"access" : "readwrite",
"description" :
"""A human-readable string representing this protocol.
A ExtremeVlanProtocolBindingEntry with
extremeVlanProtocolBindingName of ANY represents a
match on all protocols: this entry may not be modified.""",
}, # column
"extremeVlanProtocolBindingStatus" : {
"nodetype" : "column",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.5.4.1.3",
"status" : "current",
"syntax" : {
"type" : { "module" :"SNMPv2-TC", "name" : "RowStatus"},
},
"access" : "readonly",
"description" :
"""The row status variable, used according to
row installation and removal conventions.""",
}, # column
"extremeVlanOpaqueGroup" : {
"nodetype" : "node",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.6",
}, # node
"extremeVlanOpaqueTable" : {
"nodetype" : "table",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.6.1",
"status" : "current",
"description" :
"""This table lists the ports associated with each VLAN interface.""",
}, # table
"extremeVlanOpaqueEntry" : {
"nodetype" : "row",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.6.1.1",
"status" : "current",
"linkage" : [
"extremeVlanIfIndex",
"extremeSlotNumber",
],
"description" :
"""This represents the tagged and untagged ports on each slot per vlan.""",
}, # row
"extremeVlanOpaqueTaggedPorts" : {
"nodetype" : "column",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.6.1.1.1",
"status" : "current",
"syntax" : {
"type" : { "module" :"EXTREME-BASE-MIB", "name" : "PortList"},
},
"access" : "readonly",
"description" :
"""Each bit in the octet string represents one port.
A 1 means that the port is a tagged port in that vlan.
The bit value for a port is 0 otherwise.""",
}, # column
"extremeVlanOpaqueUntaggedPorts" : {
"nodetype" : "column",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.6.1.1.2",
"status" : "current",
"syntax" : {
"type" : { "module" :"EXTREME-BASE-MIB", "name" : "PortList"},
},
"access" : "readonly",
"description" :
"""Each bit in the octet string represents one port.
A 1 means that the port is an untagged port in that vlan.
The bit value for a port is 0 otherwise.""",
}, # column
"extremeVlanOpaqueControlTable" : {
"nodetype" : "table",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.6.2",
"status" : "current",
"description" :
"""This table is used to configure the ports associated with each VLAN
interface. The table is used to add/delete ports on a vlan. The table is
transitional in nature and SNMP read operations must not be performed on it.
Use extremeVlanOpaqueTable for reading the port membership association with vlans""",
}, # table
"extremeVlanOpaqueControlEntry" : {
"nodetype" : "row",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.6.2.1",
"create" : "true",
"status" : "current",
"linkage" : [
"extremeVlanIfIndex",
"extremeSlotNumber",
],
"description" :
"""This represents a control table entry (command) to configure the tagged
and untagged ports on each slot per vlan. The first index of the entry is
the ifIndex of the VLAN and second index is the slot number of the ports.
When adding untagged ports to a VLAN, those ports maybe not be untagged ports
for another VLAN (assuming both VLANs use the ANY protocol filter). Such
ports must first be deleted from the other VLAN(s) or an error will occur.
The operation will succeed or fail in its entirety, no partial results on some of the ports.""",
}, # row
"extremeVlanOpaqueControlPorts" : {
"nodetype" : "column",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.6.2.1.1",
"status" : "current",
"syntax" : {
"type" : { "module" :"EXTREME-BASE-MIB", "name" : "PortList"},
},
"access" : "readwrite",
"description" :
"""The octet string representing a list of ports in
bitwise form.""",
}, # column
"extremeVlanOpaqueControlOperation" : {
"nodetype" : "column",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.6.2.1.2",
"status" : "current",
"syntax" : {
"type" : {
"basetype" : "Enumeration",
"addTagged" : {
"nodetype" : "namednumber",
"number" : "1"
},
"addUntagged" : {
"nodetype" : "namednumber",
"number" : "2"
},
"delete" : {
"nodetype" : "namednumber",
"number" : "3"
},
},
},
"access" : "readwrite",
"description" :
"""The operation code for this entry.
addTagged(1) = Ports referred to in the extremeVlanOpaqueControlPorts
variable are added as tagged ports to the VLAN indicated
by the index. The ports belong to the slot number as
indicated by the second index of the variable.
addUntagged(2) = Ports referred to in the extremeVlanOpaqueControlPorts
variable are added as tagged ports to the VLAN indicated
by the index. The ports belong to the slot number as
indicated by the second index of the variable.
delete(3) = Ports referred to in the extremeVlanOpaqueControlPorts
variable are removed from the VLAN indicated by the index.
The ports belong to the slot number as indicated by the
second index of the variable.""",
}, # column
"extremeVlanOpaqueControlStatus" : {
"nodetype" : "column",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.6.2.1.3",
"status" : "current",
"syntax" : {
"type" : { "module" :"SNMPv2-TC", "name" : "RowStatus"},
},
"access" : "readwrite",
"description" :
"""The status of this entry as per standard RowStatus
conventions. Note however, that only the CreateAndGo
state is supported.""",
}, # column
"extremeVlanStackGroup" : {
"nodetype" : "node",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.7",
}, # node
"extremeVlanStackTable" : {
"nodetype" : "table",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.7.1",
"status" : "current",
"description" :
"""Represents those components of the ifStackTable that do not
contain a Physical interface.""",
}, # table
"extremeVlanStackEntry" : {
"nodetype" : "row",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.7.1.1",
"status" : "current",
"linkage" : [
"extremeVlanStackHigherLayer",
"extremeVlanStackLowerLayer",
],
"description" :
"""Each entry in this read-only table defines which interfaces are
on top of which one. All information in the table is also
contained in ifStackTable. The Physical interfaces in the
ifStackTable are not represented here.""",
}, # row
"extremeVlanStackHigherLayer" : {
"nodetype" : "column",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.7.1.1.1",
"status" : "current",
"syntax" : {
"type" : { "module" :"", "name" : "Integer32"},
},
"access" : "readonly",
"description" :
"""The higher layer in the logical vlan hierarchy.""",
}, # column
"extremeVlanStackLowerLayer" : {
"nodetype" : "column",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.7.1.1.2",
"status" : "current",
"syntax" : {
"type" : { "module" :"", "name" : "Integer32"},
},
"access" : "readonly",
"description" :
"""The lower layer in the logical vlan hierarchy.""",
}, # column
"extremeVlanStatsGroup" : {
"nodetype" : "node",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.8",
}, # node
"extremeVlanL2StatsTable" : {
"nodetype" : "table",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.8.1",
"status" : "current",
"description" :
"""This tables contains per-VLAN layer 2 statistics information.""",
}, # table
"extremeVlanL2StatsEntry" : {
"nodetype" : "row",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.8.1.1",
"status" : "current",
"linkage" : [
"extremeVlanIfIndex",
],
"description" :
"""""",
}, # row
"extremeVlanL2StatsIfDescr" : {
"nodetype" : "column",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.8.1.1.1",
"status" : "current",
"syntax" : {
"type" : {
"basetype" : "OctetString",
"parent module" : {
"name" : "RFC1213-MIB",
"type" : "DisplayString",
},
"ranges" : [
{
"min" : "0",
"max" : "32"
},
],
"range" : {
"min" : "0",
"max" : "32"
},
},
},
"access" : "readonly",
"description" :
"""This is a description(name) of the VLAN.""",
}, # column
"extremeVlanL2StatsPktsToCpu" : {
"nodetype" : "column",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.8.1.1.2",
"status" : "current",
"syntax" : {
"type" : { "module" :"SNMPv2-SMI", "name" : "Counter64"},
},
"access" : "readonly",
"description" :
"""The total number of packets of this VLAN sent to the CPU.""",
}, # column
"extremeVlanL2StatsPktsLearnt" : {
"nodetype" : "column",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.8.1.1.3",
"status" : "current",
"syntax" : {
"type" : { "module" :"SNMPv2-SMI", "name" : "Counter64"},
},
"access" : "readonly",
"description" :
"""The total number of packets learnt on this VLAN.""",
}, # column
"extremeVlanL2StatsIgmpCtrlPktsSnooped" : {
"nodetype" : "column",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.8.1.1.4",
"status" : "current",
"syntax" : {
"type" : { "module" :"SNMPv2-SMI", "name" : "Counter64"},
},
"access" : "readonly",
"description" :
"""The total number of IGMP control packets snooped on this VLAN.""",
}, # column
"extremeVlanL2StatsIgmpDataPktsSwitched" : {
"nodetype" : "column",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.8.1.1.5",
"status" : "current",
"syntax" : {
"type" : { "module" :"SNMPv2-SMI", "name" : "Counter64"},
},
"access" : "readonly",
"description" :
"""The total number of IGMP data packets switched on this VLAN.""",
}, # column
"extremePortVlanStatsTable" : {
"nodetype" : "row",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.8.2",
"create" : "true",
"status" : "current",
"linkage" : [
"extremeStatsPortIfIndex",
"extremeStatsVlanNameIndex",
],
"description" :
"""Vlan statistics per port.""",
}, # row
"extremeStatsPortIfIndex" : {
"nodetype" : "column",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.8.2.1",
"status" : "current",
"syntax" : {
"type" : { "module" :"", "name" : "Integer32"},
},
"access" : "readonly",
"description" :
"""The index of this table.""",
}, # column
"extremeStatsVlanNameIndex" : {
"nodetype" : "column",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.8.2.2",
"status" : "current",
"syntax" : {
"type" : {
"basetype" : "OctetString",
"parent module" : {
"name" : "RFC1213-MIB",
"type" : "DisplayString",
},
"ranges" : [
{
"min" : "0",
"max" : "32"
},
],
"range" : {
"min" : "0",
"max" : "32"
},
},
},
"access" : "readonly",
"description" :
"""The index of this table.""",
}, # column
"extremePortVlanStatsCntrType" : {
"nodetype" : "column",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.8.2.3",
"status" : "current",
"syntax" : {
"type" : { "module" :"", "name" : "Integer32"},
},
"access" : "readonly",
"description" :
"""The flag to decide what fields to display, basic
or extended. Currently, it is read-only and will
reflect whatever has been set for the switch
through the cli.""",
}, # column
"extremePortVlanUnicastReceivedPacketsCounter" : {
"nodetype" : "column",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.8.2.4",
"status" : "current",
"syntax" : {
"type" : { "module" :"SNMPv2-SMI", "name" : "Counter64"},
},
"access" : "readonly",
"description" :
"""The number of Unicast packets received by a port
for a particular VLAN.""",
}, # column
"extremePortVlanMulticastReceivedPacketsCounter" : {
"nodetype" : "column",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.8.2.5",
"status" : "current",
"syntax" : {
"type" : { "module" :"SNMPv2-SMI", "name" : "Counter64"},
},
"access" : "readonly",
"description" :
"""The number of Multicast packets received by a port
for a particular VLAN.""",
}, # column
"extremePortVlanBroadcastReceivedPacketsCounter" : {
"nodetype" : "column",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.8.2.6",
"status" : "current",
"syntax" : {
"type" : { "module" :"SNMPv2-SMI", "name" : "Counter64"},
},
"access" : "readonly",
"description" :
"""The number of Broadcast packets received by a port
for a particular VLAN.""",
}, # column
"extremePortVlanTotalReceivedBytesCounter" : {
"nodetype" : "column",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.8.2.7",
"status" : "current",
"syntax" : {
"type" : { "module" :"SNMPv2-SMI", "name" : "Counter64"},
},
"access" : "readonly",
"description" :
"""The total number of bytes received by a port
for a particular VLAN.""",
}, # column
"extremePortVlanTotalReceivedFramesCounter" : {
"nodetype" : "column",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.8.2.8",
"status" : "current",
"syntax" : {
"type" : { "module" :"SNMPv2-SMI", "name" : "Counter64"},
},
"access" : "readonly",
"description" :
"""The total number of frames received by a port
for a particular VLAN.""",
}, # column
"extremePortVlanUnicastTransmittedPacketsCounter" : {
"nodetype" : "column",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.8.2.9",
"status" : "current",
"syntax" : {
"type" : { "module" :"SNMPv2-SMI", "name" : "Counter64"},
},
"access" : "readonly",
"description" :
"""The number of Unicast packets transmitted by a
port for a particular VLAN.""",
}, # column
"extremePortVlanMulticastTransmittedPacketsCounter" : {
"nodetype" : "column",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.8.2.10",
"status" : "current",
"syntax" : {
"type" : { "module" :"SNMPv2-SMI", "name" : "Counter64"},
},
"access" : "readonly",
"description" :
"""The number of Multicast packets transmitted by a port
for a particular VLAN.""",
}, # column
"extremePortVlanBroadcastTransmittedPacketsCounter" : {
"nodetype" : "column",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.8.2.11",
"status" : "current",
"syntax" : {
"type" : { "module" :"SNMPv2-SMI", "name" : "Counter64"},
},
"access" : "readonly",
"description" :
"""The number of Broadcast packets transmitted by a port
for a particular VLAN.""",
}, # column
"extremePortVlanTotalTransmittedBytesCounter" : {
"nodetype" : "column",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.8.2.12",
"status" : "current",
"syntax" : {
"type" : { "module" :"SNMPv2-SMI", "name" : "Counter64"},
},
"access" : "readonly",
"description" :
"""The total number of bytes transmitted by a port
for a particular VLAN.""",
}, # column
"extremePortVlanTotalTransmittedFramesCounter" : {
"nodetype" : "column",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.8.2.13",
"status" : "current",
"syntax" : {
"type" : { "module" :"SNMPv2-SMI", "name" : "Counter64"},
},
"access" : "readonly",
"description" :
"""The total number of frames transmitted by a port
for a particular VLAN.""",
}, # column
"extremePortConfigureVlanStatus" : {
"nodetype" : "column",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.8.2.14",
"status" : "current",
"syntax" : {
"type" : { "module" :"SNMPv2-TC", "name" : "RowStatus"},
},
"access" : "readwrite",
"description" :
"""The row status variable, used according to
row installation and removal conventions.""",
}, # column
"extremeVlanAggregationGroup" : {
"nodetype" : "node",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.9",
}, # node
"extremeVlanAggregationTable" : {
"nodetype" : "table",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.9.1",
"status" : "current",
"description" :
"""This table contains the VLAN aggregation information.""",
}, # table
"extremeVlanAggregationEntry" : {
"nodetype" : "row",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.9.1.1",
"create" : "true",
"status" : "current",
"linkage" : [
"extremeVlanAggregationSuperVlanIfIndex",
"extremeVlanAggregationSubVlanIfIndex",
],
"description" :
"""Information about the individual VLAN aggregation entry.""",
}, # row
"extremeVlanAggregationSuperVlanIfIndex" : {
"nodetype" : "column",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.9.1.1.1",
"status" : "current",
"syntax" : {
"type" : { "module" :"", "name" : "Integer32"},
},
"access" : "readonly",
"description" :
"""The Super Vlan index for this entry.""",
}, # column
"extremeVlanAggregationSubVlanIfIndex" : {
"nodetype" : "column",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.9.1.1.2",
"status" : "current",
"syntax" : {
"type" : { "module" :"", "name" : "Integer32"},
},
"access" : "readonly",
"description" :
"""The Sub Vlan index for this entry.""",
}, # column
"extremeVlanAggregationSubVlanStartIpNetAddress" : {
"nodetype" : "column",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.9.1.1.3",
"status" : "current",
"syntax" : {
"type" : { "module" :"SNMPv2-SMI", "name" : "IpAddress"},
},
"access" : "readwrite",
"description" :
"""This represents the start network address of the IP range.""",
}, # column
"extremeVlanAggregationSubVlanStartIpNetMask" : {
"nodetype" : "column",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.9.1.1.4",
"status" : "current",
"syntax" : {
"type" : { "module" :"SNMPv2-SMI", "name" : "IpAddress"},
},
"access" : "readonly",
"description" :
"""This represents the start network address mask of the IP range""",
}, # column
"extremeVlanAggregationSubVlanEndIpNetAddress" : {
"nodetype" : "column",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.9.1.1.5",
"status" : "current",
"syntax" : {
"type" : { "module" :"SNMPv2-SMI", "name" : "IpAddress"},
},
"access" : "readwrite",
"description" :
"""This represents the end network address of the IP range.""",
}, # column
"extremeVlanAggregationSubVlanEndIpNetMask" : {
"nodetype" : "column",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.9.1.1.6",
"status" : "current",
"syntax" : {
"type" : { "module" :"SNMPv2-SMI", "name" : "IpAddress"},
},
"access" : "readonly",
"description" :
"""This represents the end network address mask of the IP range.""",
}, # column
"extremeVlanAggregationStatus" : {
"nodetype" : "column",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.9.1.1.7",
"status" : "current",
"syntax" : {
"type" : { "module" :"SNMPv2-TC", "name" : "RowStatus"},
},
"access" : "readwrite",
"description" :
"""The status of this entry.""",
}, # column
"extremeVlanAggregationConfigTable" : {
"nodetype" : "table",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.9.2",
"status" : "current",
"description" :
"""This table contains the sub VLAN proxy setting information.""",
}, # table
"extremeVlanAggregationConfigEntry" : {
"nodetype" : "row",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.9.2.1",
"create" : "true",
"status" : "current",
"linkage" : [
"extremeVlanAggregationConfigSuperVlanIfIndex",
],
"description" :
"""Information about the individual VLAN aggregation entry.""",
}, # row
"extremeVlanAggregationConfigSuperVlanIfIndex" : {
"nodetype" : "column",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.9.2.1.1",
"status" : "current",
"syntax" : {
"type" : { "module" :"", "name" : "Integer32"},
},
"access" : "readonly",
"description" :
"""The Super Vlan index for this entry.""",
}, # column
"extremeVlanAggregationConfigSubVlanProxyEnable" : {
"nodetype" : "column",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.9.2.1.2",
"status" : "current",
"syntax" : {
"type" : { "module" :"SNMPv2-TC", "name" : "TruthValue"},
},
"access" : "readwrite",
"description" :
"""The boolean flag that prevents normal communication between sub vlans.""",
}, # column
"extremeVlanTranslationGroup" : {
"nodetype" : "node",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.10",
}, # node
"extremeVlanTranslationTable" : {
"nodetype" : "table",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.10.1",
"status" : "current",
"description" :
"""This table contains the VLAN translation information.""",
}, # table
"extremeVlanTranslationEntry" : {
"nodetype" : "row",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.10.1.1",
"create" : "true",
"status" : "current",
"linkage" : [
"extremeVlanTranslationSuperVlanIfIndex",
"extremeVlanTranslationMemberVlanIfIndex",
],
"description" :
"""Information about the individual VLAN translation entry.""",
}, # row
"extremeVlanTranslationSuperVlanIfIndex" : {
"nodetype" : "column",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.10.1.1.1",
"status" : "current",
"syntax" : {
"type" : { "module" :"", "name" : "Integer32"},
},
"access" : "readonly",
"description" :
"""The Super Vlan index for this entry.""",
}, # column
"extremeVlanTranslationMemberVlanIfIndex" : {
"nodetype" : "column",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.10.1.1.2",
"status" : "current",
"syntax" : {
"type" : { "module" :"", "name" : "Integer32"},
},
"access" : "readonly",
"description" :
"""The member Vlan index for this entry.""",
}, # column
"extremeVlanTranslationStatus" : {
"nodetype" : "column",
"moduleName" : "EXTREME-VLAN-MIB",
"oid" : "1.3.6.1.4.1.1916.1.2.10.1.1.3",
"status" : "current",
"syntax" : {
"type" : { "module" :"SNMPv2-TC", "name" : "RowStatus"},
},
"access" : "readwrite",
"description" :
"""The status of this entry.""",
}, # column
}, # nodes
}
| gpl-2.0 |
meejah/crossbarexamples | xbr/teststack1/_init/check_blockchain.py | 3 | 8477 | import os
import sys
import web3
import six
import txaio
txaio.use_twisted()
from autobahn import xbr
import argparse
from binascii import a2b_hex, b2a_hex
import txaio
from autobahn.twisted.wamp import ApplicationSession, ApplicationRunner
from init_data import ACCOUNTS, OWNER, MARKETS
from cfxdb import unpack_uint256
class MarketMakerClient(ApplicationSession):
async def onJoin(self, details):
self.log.debug('{klass}.onJoin(details.session={session}, details.authid="{authid}")',
klass=self.__class__.__name__, session=details.session, authid=details.authid)
self.log.info('-' * 120)
self.log.info('Channels in market (off-chain information):')
for channel_adr, channel_type, channel_state in self.config.extra.get('channels', []):
channel_adr = a2b_hex(channel_adr[2:])
try:
# get real-time off-chain channel balance (as maintained within the market maker)
if channel_type == 1:
channel = await self.call('xbr.marketmaker.get_payment_channel', channel_adr)
balance = await self.call('xbr.marketmaker.get_payment_channel_balance', channel_adr)
elif channel_type == 2:
channel = await self.call('xbr.marketmaker.get_paying_channel', channel_adr)
balance = await self.call('xbr.marketmaker.get_paying_channel_balance', channel_adr)
except:
self.log.failure()
else:
# initial on-chain channel amount
amount = int(unpack_uint256(channel['amount']) / 10 ** 18)
remaining = int(unpack_uint256(balance['remaining']) / 10 ** 18)
ctype = {0: 'No Channel', 1: 'Payment Channel', 2: 'Paying Channel'}.get(channel['type'], 'UNKNOWN')
cstate = {0: None, 1: 'OPEN', 2: 'CLOSING', 3: 'CLOSED', 4: 'FAILED'}.get(channel['state'], 'UNKNOWN')
print(' {} 0x{}: market {}, delegate {}, currently in {} state, initial amount {} XBR, current off-chain balance {} XBR'.format(ctype,
b2a_hex(channel_adr).decode(),
b2a_hex(channel['market']).decode(),
b2a_hex(channel['delegate']).decode(),
cstate,
amount,
remaining))
self.log.info('-' * 120)
self.leave()
def onLeave(self, details):
self.log.debug('{klass}.onLeave(details.reason="{reason}", details.message="{message}")',
klass=self.__class__.__name__, reason=details.reason, message=details.message if details.message else '')
runner = self.config.extra.get('runner', None)
if runner:
try:
runner.stop()
except:
self.log.failure()
self.disconnect()
def onDisconnect(self):
from twisted.internet import reactor
try:
reactor.stop()
except:
pass
def main(w3, accounts, owner, markets, args):
market_id = a2b_hex(markets[0]['id'][2:])
# 1) show test accounts
#
print('-' * 120)
print('Test accounts:')
for ak in accounts:
acct = accounts[ak]
balance_eth = w3.eth.getBalance(acct.address)
balance_xbr = xbr.xbrtoken.functions.balanceOf(acct.address).call()
# the XBR token has 18 decimal digits
balance_xbr = int(balance_xbr / 10 ** 18)
print(' balances of {} {:>25}: {:>30} ETH {:>30} XBR'.format(acct.address, ak, balance_eth, balance_xbr))
print('-' * 120)
print('Channels in market (on-chain information):')
channels = []
for market in markets:
market_id = market['id']
for actor in market['actors']:
actor_adr = actor['addr']
channels.extend(xbr.xbrnetwork.functions.getAllPaymentChannels(market_id, actor_adr).call())
channels.extend(xbr.xbrnetwork.functions.getAllPayingChannels(market_id, actor_adr).call())
channels_ = []
for channel_adr in channels:
if type(channel_adr) == tuple:
continue
channel = w3.eth.contract(address=channel_adr, abi=xbr.XBR_CHANNEL_ABI).functions
if channel:
amount = int(channel.amount().call() / 10**18)
ctype = channel.ctype().call()
cstate = channel.state().call()
balance = int(xbr.xbrtoken.functions.balanceOf(channel_adr).call() / 10**18)
if ctype in [1, 2]:
channels_.append((channel_adr, ctype, cstate))
else:
print('Skipping unknown channel type {} for address {}'.format(ctype, channel_adr))
ctype = {0: 'No Channel', 1: 'Payment Channel', 2: 'Paying Channel'}.get(ctype, 'UNKNOWN')
cstate = {0: None, 1: 'OPEN', 2: 'CLOSING', 3: 'CLOSED', 4: 'FAILED'}.get(cstate, 'UNKNOWN')
print(' {:<16} {}: currently in {} state, initial amount {} XBR, current on-chain balance {} XBR'.format(ctype, channel_adr, cstate, amount, balance))
if False:
print('channel:',
channel_adr, channel.channelType().call(), channel.channelState().call(), channel.marketId().call(),
channel.sender().call(), channel.delegate().call(), channel.recipient().call(), channel.amount().call(),
channel.openedAt().call(), channel.closedAt().call(), channel.channelTimeout().call())
# now actually run a WAMP client using our session class ClientSession
extra = {
'runner': None,
'channels': channels_,
}
runner = ApplicationRunner(url=args.url, realm=args.realm, extra=extra)
extra['runner'] = runner
runner.run(MarketMakerClient, auto_reconnect=True)
if __name__ == '__main__':
print('Using web3.py v{}'.format(web3.__version__))
parser = argparse.ArgumentParser()
parser.add_argument('-d',
'--debug',
action='store_true',
help='Enable debug output.')
parser.add_argument('--gateway',
dest='gateway',
type=str,
default=None,
help='Ethereum HTTP gateway URL or None for auto-select (default: -, means let web3 auto-select).')
parser.add_argument('--url',
dest='url',
type=six.text_type,
default=os.environ.get('CBURL', u'ws://localhost:8080/ws'),
help='The router URL (default: "ws://localhost:8080/ws").')
parser.add_argument('--realm',
dest='realm',
type=six.text_type,
default=os.environ.get('CBREALM', u'realm1'),
help='The realm to join (default: "realm1").')
args = parser.parse_args()
# start logging
if args.debug:
txaio.start_logging(level='debug')
else:
txaio.start_logging(level='info')
if args.gateway:
w3 = web3.Web3(web3.Web3.HTTPProvider(args.gateway))
else:
# using automatic provider detection:
from web3.auto import w3
# check we are connected, and check network ID
if not w3.isConnected():
print('Could not connect to Web3/Ethereum at: {}'.format(args.gateway or 'auto'))
sys.exit(1)
else:
print('Connected to provider "{}"'.format(args.gateway or 'auto'))
# set new provider on XBR library
xbr.setProvider(w3)
# now enter main ..
# main(w3.eth.accounts)
main(w3, ACCOUNTS, OWNER, MARKETS, args)
| apache-2.0 |
GeekTrainer/Flask | Work/TriviaMVA/TriviaMVA/env/Lib/site-packages/pip/_vendor/requests/packages/chardet/charsetprober.py | 3127 | 1902 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from . import constants
import re
class CharSetProber:
def __init__(self):
pass
def reset(self):
self._mState = constants.eDetecting
def get_charset_name(self):
return None
def feed(self, aBuf):
pass
def get_state(self):
return self._mState
def get_confidence(self):
return 0.0
def filter_high_bit_only(self, aBuf):
aBuf = re.sub(b'([\x00-\x7F])+', b' ', aBuf)
return aBuf
def filter_without_english_letters(self, aBuf):
aBuf = re.sub(b'([A-Za-z])+', b' ', aBuf)
return aBuf
def filter_with_english_letters(self, aBuf):
# TODO
return aBuf
| apache-2.0 |
etherkit/OpenBeacon2 | client/linux-arm/venv/lib/python3.5/site-packages/setuptools/config.py | 28 | 16413 | from __future__ import absolute_import, unicode_literals
import io
import os
import sys
from collections import defaultdict
from functools import partial
from distutils.errors import DistutilsOptionError, DistutilsFileError
from setuptools.py26compat import import_module
from setuptools.extern.six import string_types
def read_configuration(
filepath, find_others=False, ignore_option_errors=False):
"""Read given configuration file and returns options from it as a dict.
:param str|unicode filepath: Path to configuration file
to get options from.
:param bool find_others: Whether to search for other configuration files
which could be on in various places.
:param bool ignore_option_errors: Whether to silently ignore
options, values of which could not be resolved (e.g. due to exceptions
in directives such as file:, attr:, etc.).
If False exceptions are propagated as expected.
:rtype: dict
"""
from setuptools.dist import Distribution, _Distribution
filepath = os.path.abspath(filepath)
if not os.path.isfile(filepath):
raise DistutilsFileError(
'Configuration file %s does not exist.' % filepath)
current_directory = os.getcwd()
os.chdir(os.path.dirname(filepath))
try:
dist = Distribution()
filenames = dist.find_config_files() if find_others else []
if filepath not in filenames:
filenames.append(filepath)
_Distribution.parse_config_files(dist, filenames=filenames)
handlers = parse_configuration(
dist, dist.command_options,
ignore_option_errors=ignore_option_errors)
finally:
os.chdir(current_directory)
return configuration_to_dict(handlers)
def configuration_to_dict(handlers):
"""Returns configuration data gathered by given handlers as a dict.
:param list[ConfigHandler] handlers: Handlers list,
usually from parse_configuration()
:rtype: dict
"""
config_dict = defaultdict(dict)
for handler in handlers:
obj_alias = handler.section_prefix
target_obj = handler.target_obj
for option in handler.set_options:
getter = getattr(target_obj, 'get_%s' % option, None)
if getter is None:
value = getattr(target_obj, option)
else:
value = getter()
config_dict[obj_alias][option] = value
return config_dict
def parse_configuration(
distribution, command_options, ignore_option_errors=False):
"""Performs additional parsing of configuration options
for a distribution.
Returns a list of used option handlers.
:param Distribution distribution:
:param dict command_options:
:param bool ignore_option_errors: Whether to silently ignore
options, values of which could not be resolved (e.g. due to exceptions
in directives such as file:, attr:, etc.).
If False exceptions are propagated as expected.
:rtype: list
"""
meta = ConfigMetadataHandler(
distribution.metadata, command_options, ignore_option_errors)
meta.parse()
options = ConfigOptionsHandler(
distribution, command_options, ignore_option_errors)
options.parse()
return [meta, options]
class ConfigHandler(object):
"""Handles metadata supplied in configuration files."""
section_prefix = None
"""Prefix for config sections handled by this handler.
Must be provided by class heirs.
"""
aliases = {}
"""Options aliases.
For compatibility with various packages. E.g.: d2to1 and pbr.
Note: `-` in keys is replaced with `_` by config parser.
"""
def __init__(self, target_obj, options, ignore_option_errors=False):
sections = {}
section_prefix = self.section_prefix
for section_name, section_options in options.items():
if not section_name.startswith(section_prefix):
continue
section_name = section_name.replace(section_prefix, '').strip('.')
sections[section_name] = section_options
self.ignore_option_errors = ignore_option_errors
self.target_obj = target_obj
self.sections = sections
self.set_options = []
@property
def parsers(self):
"""Metadata item name to parser function mapping."""
raise NotImplementedError(
'%s must provide .parsers property' % self.__class__.__name__)
def __setitem__(self, option_name, value):
unknown = tuple()
target_obj = self.target_obj
# Translate alias into real name.
option_name = self.aliases.get(option_name, option_name)
current_value = getattr(target_obj, option_name, unknown)
if current_value is unknown:
raise KeyError(option_name)
if current_value:
# Already inhabited. Skipping.
return
skip_option = False
parser = self.parsers.get(option_name)
if parser:
try:
value = parser(value)
except Exception:
skip_option = True
if not self.ignore_option_errors:
raise
if skip_option:
return
setter = getattr(target_obj, 'set_%s' % option_name, None)
if setter is None:
setattr(target_obj, option_name, value)
else:
setter(value)
self.set_options.append(option_name)
@classmethod
def _parse_list(cls, value, separator=','):
"""Represents value as a list.
Value is split either by separator (defaults to comma) or by lines.
:param value:
:param separator: List items separator character.
:rtype: list
"""
if isinstance(value, list): # _get_parser_compound case
return value
if '\n' in value:
value = value.splitlines()
else:
value = value.split(separator)
return [chunk.strip() for chunk in value if chunk.strip()]
@classmethod
def _parse_dict(cls, value):
"""Represents value as a dict.
:param value:
:rtype: dict
"""
separator = '='
result = {}
for line in cls._parse_list(value):
key, sep, val = line.partition(separator)
if sep != separator:
raise DistutilsOptionError(
'Unable to parse option value to dict: %s' % value)
result[key.strip()] = val.strip()
return result
@classmethod
def _parse_bool(cls, value):
"""Represents value as boolean.
:param value:
:rtype: bool
"""
value = value.lower()
return value in ('1', 'true', 'yes')
@classmethod
def _parse_file(cls, value):
"""Represents value as a string, allowing including text
from nearest files using `file:` directive.
Directive is sandboxed and won't reach anything outside
directory with setup.py.
Examples:
include: LICENSE
include: src/file.txt
:param str value:
:rtype: str
"""
if not isinstance(value, string_types):
return value
include_directive = 'file:'
if not value.startswith(include_directive):
return value
current_directory = os.getcwd()
filepath = value.replace(include_directive, '').strip()
filepath = os.path.abspath(filepath)
if not filepath.startswith(current_directory):
raise DistutilsOptionError(
'`file:` directive can not access %s' % filepath)
if os.path.isfile(filepath):
with io.open(filepath, encoding='utf-8') as f:
value = f.read()
return value
@classmethod
def _parse_attr(cls, value):
"""Represents value as a module attribute.
Examples:
attr: package.attr
attr: package.module.attr
:param str value:
:rtype: str
"""
attr_directive = 'attr:'
if not value.startswith(attr_directive):
return value
attrs_path = value.replace(attr_directive, '').strip().split('.')
attr_name = attrs_path.pop()
module_name = '.'.join(attrs_path)
module_name = module_name or '__init__'
sys.path.insert(0, os.getcwd())
try:
module = import_module(module_name)
value = getattr(module, attr_name)
finally:
sys.path = sys.path[1:]
return value
@classmethod
def _get_parser_compound(cls, *parse_methods):
"""Returns parser function to represents value as a list.
Parses a value applying given methods one after another.
:param parse_methods:
:rtype: callable
"""
def parse(value):
parsed = value
for method in parse_methods:
parsed = method(parsed)
return parsed
return parse
@classmethod
def _parse_section_to_dict(cls, section_options, values_parser=None):
"""Parses section options into a dictionary.
Optionally applies a given parser to values.
:param dict section_options:
:param callable values_parser:
:rtype: dict
"""
value = {}
values_parser = values_parser or (lambda val: val)
for key, (_, val) in section_options.items():
value[key] = values_parser(val)
return value
def parse_section(self, section_options):
"""Parses configuration file section.
:param dict section_options:
"""
for (name, (_, value)) in section_options.items():
try:
self[name] = value
except KeyError:
pass # Keep silent for a new option may appear anytime.
def parse(self):
"""Parses configuration file items from one
or more related sections.
"""
for section_name, section_options in self.sections.items():
method_postfix = ''
if section_name: # [section.option] variant
method_postfix = '_%s' % section_name
section_parser_method = getattr(
self,
# Dots in section names are tranlsated into dunderscores.
('parse_section%s' % method_postfix).replace('.', '__'),
None)
if section_parser_method is None:
raise DistutilsOptionError(
'Unsupported distribution option section: [%s.%s]' % (
self.section_prefix, section_name))
section_parser_method(section_options)
class ConfigMetadataHandler(ConfigHandler):
section_prefix = 'metadata'
aliases = {
'home_page': 'url',
'summary': 'description',
'classifier': 'classifiers',
'platform': 'platforms',
}
strict_mode = False
"""We need to keep it loose, to be partially compatible with
`pbr` and `d2to1` packages which also uses `metadata` section.
"""
@property
def parsers(self):
"""Metadata item name to parser function mapping."""
parse_list = self._parse_list
parse_file = self._parse_file
return {
'platforms': parse_list,
'keywords': parse_list,
'provides': parse_list,
'requires': parse_list,
'obsoletes': parse_list,
'classifiers': self._get_parser_compound(parse_file, parse_list),
'license': parse_file,
'description': parse_file,
'long_description': parse_file,
'version': self._parse_version,
}
def parse_section_classifiers(self, section_options):
"""Parses configuration file section.
:param dict section_options:
"""
classifiers = []
for begin, (_, rest) in section_options.items():
classifiers.append('%s :%s' % (begin.title(), rest))
self['classifiers'] = classifiers
def _parse_version(self, value):
"""Parses `version` option value.
:param value:
:rtype: str
"""
version = self._parse_attr(value)
if callable(version):
version = version()
if not isinstance(version, string_types):
if hasattr(version, '__iter__'):
version = '.'.join(map(str, version))
else:
version = '%s' % version
return version
class ConfigOptionsHandler(ConfigHandler):
section_prefix = 'options'
@property
def parsers(self):
"""Metadata item name to parser function mapping."""
parse_list = self._parse_list
parse_list_semicolon = partial(self._parse_list, separator=';')
parse_bool = self._parse_bool
parse_dict = self._parse_dict
return {
'zip_safe': parse_bool,
'use_2to3': parse_bool,
'include_package_data': parse_bool,
'package_dir': parse_dict,
'use_2to3_fixers': parse_list,
'use_2to3_exclude_fixers': parse_list,
'convert_2to3_doctests': parse_list,
'scripts': parse_list,
'eager_resources': parse_list,
'dependency_links': parse_list,
'namespace_packages': parse_list,
'install_requires': parse_list_semicolon,
'setup_requires': parse_list_semicolon,
'tests_require': parse_list_semicolon,
'packages': self._parse_packages,
'entry_points': self._parse_file,
}
def _parse_packages(self, value):
"""Parses `packages` option value.
:param value:
:rtype: list
"""
find_directive = 'find:'
if not value.startswith(find_directive):
return self._parse_list(value)
# Read function arguments from a dedicated section.
find_kwargs = self.parse_section_packages__find(
self.sections.get('packages.find', {}))
from setuptools import find_packages
return find_packages(**find_kwargs)
def parse_section_packages__find(self, section_options):
"""Parses `packages.find` configuration file section.
To be used in conjunction with _parse_packages().
:param dict section_options:
"""
section_data = self._parse_section_to_dict(
section_options, self._parse_list)
valid_keys = ['where', 'include', 'exclude']
find_kwargs = dict(
[(k, v) for k, v in section_data.items() if k in valid_keys and v])
where = find_kwargs.get('where')
if where is not None:
find_kwargs['where'] = where[0] # cast list to single val
return find_kwargs
def parse_section_entry_points(self, section_options):
"""Parses `entry_points` configuration file section.
:param dict section_options:
"""
parsed = self._parse_section_to_dict(section_options, self._parse_list)
self['entry_points'] = parsed
def _parse_package_data(self, section_options):
parsed = self._parse_section_to_dict(section_options, self._parse_list)
root = parsed.get('*')
if root:
parsed[''] = root
del parsed['*']
return parsed
def parse_section_package_data(self, section_options):
"""Parses `package_data` configuration file section.
:param dict section_options:
"""
self['package_data'] = self._parse_package_data(section_options)
def parse_section_exclude_package_data(self, section_options):
"""Parses `exclude_package_data` configuration file section.
:param dict section_options:
"""
self['exclude_package_data'] = self._parse_package_data(
section_options)
def parse_section_extras_require(self, section_options):
"""Parses `extras_require` configuration file section.
:param dict section_options:
"""
parse_list = partial(self._parse_list, separator=';')
self['extras_require'] = self._parse_section_to_dict(
section_options, parse_list)
| gpl-3.0 |
mnieber/shared-goal | django/goal/views.py | 2 | 4762 | from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.shortcuts import get_object_or_404, render
from django.views.generic import View
from django.utils.decorators import method_decorator
from .models import Member, GlobalUser, Goal
from .forms import GoalForm
from suggestion.models import Suggestion
from notification.models import Notification
def membership_required(view):
def _wrapper(request, *args, **kw):
if not request.member:
return HttpResponseRedirect(
reverse(
'register',
kwargs=dict(goal_slug=request.goal.slug)
)
)
return view(request, *args, **kw)
return _wrapper
class GoalView(View):
def get(self, request, goal_slug):
suggestions = request.goal.suggestions.filter(
is_draft=False
).order_by('-avg_rating')
context = {
'suggestions': suggestions,
}
return render(request, 'goal/goal.html', context)
class MembersView(View):
def get(self, request, goal_slug):
members = request.goal.members.all()
context = {
'members': members,
}
return render(request, 'goal/members.html', context)
class GoalsView(View):
def get(self, request):
goals = Goal.objects.filter(
is_draft=False
).order_by('-pub_date')
context = {
'goals': goals
}
return render(request, 'goal/goals.html', context)
class JoinGoalView(View):
@method_decorator(login_required)
def get(self, request, goal_slug):
Member.objects.get_or_create(
global_user=request.global_user,
goal=request.goal
)
return HttpResponseRedirect(
reverse('goal', kwargs=dict(goal_slug=request.goal.slug))
)
class NewGoalView(View):
def on_cancel(self):
return HttpResponseRedirect(reverse('home'))
def on_save(self, goal):
return HttpResponseRedirect(
reverse('goal', kwargs=dict(goal_slug=goal.slug))
)
@method_decorator(login_required)
def get(self, request):
return self.handle(request)
@method_decorator(login_required)
def post(self, request):
return self.handle(request)
def handle(self, request):
goal = GoalForm.get_or_create_draft(request)
is_posting = request.method == 'POST'
submit = request.POST.get('submit', 'none')
bound_form = None
if is_posting:
bound_form = GoalForm.get_posted_form(request, goal)
should_accept_data = bound_form.update_goal_and_save(
goal, submit)
if should_accept_data and submit == 'save':
return self.on_save(goal)
elif submit == 'cancel':
return self.on_cancel()
form = (
bound_form
if is_posting and submit == 'save' else
GoalForm(instance=goal)
)
context = {
'form': form,
'crop_settings': {
'url': goal.image.url if goal.image else "",
'klass': 'goal--image crop-image',
'output_key': form.cropped_image_key,
'jcrop': dict(
aspectRatio=360.0 / 200.0,
setSelect=[0, 0, 10000, 10000],
),
},
'show_image_form': True,
'show_errors': submit == 'save',
'post_button_label': 'Submit',
'submit_button_header': (
'All done, press Submit to publish your goal'
),
'show_delete_button': False
}
return render(request, 'goal/edit_goal.html', context)
class ProfileView(View):
@method_decorator(login_required)
def get(self, request, goal_slug, username):
global_user = get_object_or_404(
GlobalUser,
user__username=username
)
suggestions = Suggestion.objects.filter(
owner=request.global_user,
is_draft=False
).order_by('-avg_rating')
if request.goal:
suggestions = suggestions.filter(goal=request.goal)
notifications = Notification.objects.filter(
owner=request.global_user
).order_by('-pub_date')
context = {
'suggestions': suggestions,
'notifications': notifications,
'global_user': global_user,
'show_notifications': request.global_user == global_user,
}
return render(request, 'goal/profile.html', context)
| apache-2.0 |
1upon0/rfid-auth-system | GUI/printer/Pillow-2.7.0/build/lib.linux-x86_64-2.7/PIL/WmfImagePlugin.py | 26 | 4139 | #
# The Python Imaging Library
# $Id$
#
# WMF stub codec
#
# history:
# 1996-12-14 fl Created
# 2004-02-22 fl Turned into a stub driver
# 2004-02-23 fl Added EMF support
#
# Copyright (c) Secret Labs AB 1997-2004. All rights reserved.
# Copyright (c) Fredrik Lundh 1996.
#
# See the README file for information on usage and redistribution.
#
__version__ = "0.2"
from PIL import Image, ImageFile, _binary
_handler = None
if str != bytes:
long = int
##
# Install application-specific WMF image handler.
#
# @param handler Handler object.
def register_handler(handler):
global _handler
_handler = handler
if hasattr(Image.core, "drawwmf"):
# install default handler (windows only)
class WmfHandler:
def open(self, im):
im.mode = "RGB"
self.bbox = im.info["wmf_bbox"]
def load(self, im):
im.fp.seek(0) # rewind
return Image.frombytes(
"RGB", im.size,
Image.core.drawwmf(im.fp.read(), im.size, self.bbox),
"raw", "BGR", (im.size[0]*3 + 3) & -4, -1
)
register_handler(WmfHandler())
# --------------------------------------------------------------------
word = _binary.i16le
def short(c, o=0):
v = word(c, o)
if v >= 32768:
v -= 65536
return v
dword = _binary.i32le
#
# --------------------------------------------------------------------
# Read WMF file
def _accept(prefix):
return (
prefix[:6] == b"\xd7\xcd\xc6\x9a\x00\x00" or
prefix[:4] == b"\x01\x00\x00\x00"
)
##
# Image plugin for Windows metafiles.
class WmfStubImageFile(ImageFile.StubImageFile):
format = "WMF"
format_description = "Windows Metafile"
def _open(self):
# check placable header
s = self.fp.read(80)
if s[:6] == b"\xd7\xcd\xc6\x9a\x00\x00":
# placeable windows metafile
# get units per inch
inch = word(s, 14)
# get bounding box
x0 = short(s, 6)
y0 = short(s, 8)
x1 = short(s, 10)
y1 = short(s, 12)
# normalize size to 72 dots per inch
size = (x1 - x0) * 72 // inch, (y1 - y0) * 72 // inch
self.info["wmf_bbox"] = x0, y0, x1, y1
self.info["dpi"] = 72
# print self.mode, self.size, self.info
# sanity check (standard metafile header)
if s[22:26] != b"\x01\x00\t\x00":
raise SyntaxError("Unsupported WMF file format")
elif dword(s) == 1 and s[40:44] == b" EMF":
# enhanced metafile
# get bounding box
x0 = dword(s, 8)
y0 = dword(s, 12)
x1 = dword(s, 16)
y1 = dword(s, 20)
# get frame (in 0.01 millimeter units)
frame = dword(s, 24), dword(s, 28), dword(s, 32), dword(s, 36)
# normalize size to 72 dots per inch
size = x1 - x0, y1 - y0
# calculate dots per inch from bbox and frame
xdpi = 2540 * (x1 - y0) // (frame[2] - frame[0])
ydpi = 2540 * (y1 - y0) // (frame[3] - frame[1])
self.info["wmf_bbox"] = x0, y0, x1, y1
if xdpi == ydpi:
self.info["dpi"] = xdpi
else:
self.info["dpi"] = xdpi, ydpi
else:
raise SyntaxError("Unsupported file format")
self.mode = "RGB"
self.size = size
loader = self._load()
if loader:
loader.open(self)
def _load(self):
return _handler
def _save(im, fp, filename):
if _handler is None or not hasattr("_handler", "save"):
raise IOError("WMF save handler not installed")
_handler.save(im, fp, filename)
#
# --------------------------------------------------------------------
# Registry stuff
Image.register_open(WmfStubImageFile.format, WmfStubImageFile, _accept)
Image.register_save(WmfStubImageFile.format, _save)
Image.register_extension(WmfStubImageFile.format, ".wmf")
Image.register_extension(WmfStubImageFile.format, ".emf")
| apache-2.0 |
mitchrule/Miscellaneous | Django_Project/django/Lib/site-packages/pip/_vendor/html5lib/treebuilders/__init__.py | 1730 | 3405 | """A collection of modules for building different kinds of tree from
HTML documents.
To create a treebuilder for a new type of tree, you need to do
implement several things:
1) A set of classes for various types of elements: Document, Doctype,
Comment, Element. These must implement the interface of
_base.treebuilders.Node (although comment nodes have a different
signature for their constructor, see treebuilders.etree.Comment)
Textual content may also be implemented as another node type, or not, as
your tree implementation requires.
2) A treebuilder object (called TreeBuilder by convention) that
inherits from treebuilders._base.TreeBuilder. This has 4 required attributes:
documentClass - the class to use for the bottommost node of a document
elementClass - the class to use for HTML Elements
commentClass - the class to use for comments
doctypeClass - the class to use for doctypes
It also has one required method:
getDocument - Returns the root node of the complete document tree
3) If you wish to run the unit tests, you must also create a
testSerializer method on your treebuilder which accepts a node and
returns a string containing Node and its children serialized according
to the format used in the unittests
"""
from __future__ import absolute_import, division, unicode_literals
from ..utils import default_etree
treeBuilderCache = {}
def getTreeBuilder(treeType, implementation=None, **kwargs):
"""Get a TreeBuilder class for various types of tree with built-in support
treeType - the name of the tree type required (case-insensitive). Supported
values are:
"dom" - A generic builder for DOM implementations, defaulting to
a xml.dom.minidom based implementation.
"etree" - A generic builder for tree implementations exposing an
ElementTree-like interface, defaulting to
xml.etree.cElementTree if available and
xml.etree.ElementTree if not.
"lxml" - A etree-based builder for lxml.etree, handling
limitations of lxml's implementation.
implementation - (Currently applies to the "etree" and "dom" tree types). A
module implementing the tree type e.g.
xml.etree.ElementTree or xml.etree.cElementTree."""
treeType = treeType.lower()
if treeType not in treeBuilderCache:
if treeType == "dom":
from . import dom
# Come up with a sane default (pref. from the stdlib)
if implementation is None:
from xml.dom import minidom
implementation = minidom
# NEVER cache here, caching is done in the dom submodule
return dom.getDomModule(implementation, **kwargs).TreeBuilder
elif treeType == "lxml":
from . import etree_lxml
treeBuilderCache[treeType] = etree_lxml.TreeBuilder
elif treeType == "etree":
from . import etree
if implementation is None:
implementation = default_etree
# NEVER cache here, caching is done in the etree submodule
return etree.getETreeModule(implementation, **kwargs).TreeBuilder
else:
raise ValueError("""Unrecognised treebuilder "%s" """ % treeType)
return treeBuilderCache.get(treeType)
| mit |
syjeon/new_edx | common/djangoapps/student/management/commands/cert_restriction.py | 256 | 3879 | from django.core.management.base import BaseCommand, CommandError
import os
from optparse import make_option
from student.models import UserProfile
import csv
class Command(BaseCommand):
help = """
Sets or gets certificate restrictions for users
from embargoed countries. (allow_certificate in
userprofile)
CSV should be comma delimited with double quoted entries.
$ ... cert_restriction --import path/to/userlist.csv
Export a list of students who have "allow_certificate" in
userprofile set to True
$ ... cert_restriction --output path/to/export.csv
Enable a single user so she is not on the restricted list
$ ... cert_restriction -e user
Disable a single user so she is on the restricted list
$ ... cert_restriction -d user
"""
option_list = BaseCommand.option_list + (
make_option('-i', '--import',
metavar='IMPORT_FILE',
dest='import',
default=False,
help='csv file to import, comma delimitted file with '
'double-quoted entries'),
make_option('-o', '--output',
metavar='EXPORT_FILE',
dest='output',
default=False,
help='csv file to export'),
make_option('-e', '--enable',
metavar='STUDENT',
dest='enable',
default=False,
help="enable a single student's certificate"),
make_option('-d', '--disable',
metavar='STUDENT',
dest='disable',
default=False,
help="disable a single student's certificate")
)
def handle(self, *args, **options):
if options['output']:
if os.path.exists(options['output']):
raise CommandError("File {0} already exists".format(
options['output']))
disabled_users = UserProfile.objects.filter(
allow_certificate=False)
with open(options['output'], 'w') as csvfile:
csvwriter = csv.writer(csvfile, delimiter=',', quotechar='"',
quoting=csv.QUOTE_MINIMAL)
for user in disabled_users:
csvwriter.writerow([user.user.username])
elif options['import']:
if not os.path.exists(options['import']):
raise CommandError("File {0} does not exist".format(
options['import']))
print "Importing students from {0}".format(options['import'])
students = None
with open(options['import']) as csvfile:
student_list = csv.reader(csvfile, delimiter=',',
quotechar='"')
students = [student[0] for student in student_list]
if not students:
raise CommandError(
"Unable to read student data from {0}".format(
options['import']))
UserProfile.objects.filter(user__username__in=students).update(
allow_certificate=False)
elif options['enable']:
print "Enabling {0} for certificate download".format(
options['enable'])
cert_allow = UserProfile.objects.get(
user__username=options['enable'])
cert_allow.allow_certificate = True
cert_allow.save()
elif options['disable']:
print "Disabling {0} for certificate download".format(
options['disable'])
cert_allow = UserProfile.objects.get(
user__username=options['disable'])
cert_allow.allow_certificate = False
cert_allow.save()
| agpl-3.0 |
40223142/cda11 | static/Brython3.1.1-20150328-091302/Lib/unittest/test/testmock/testpatch.py | 739 | 53126 | # Copyright (C) 2007-2012 Michael Foord & the mock team
# E-mail: fuzzyman AT voidspace DOT org DOT uk
# http://www.voidspace.org.uk/python/mock/
import os
import sys
import unittest
from unittest.test.testmock import support
from unittest.test.testmock.support import SomeClass, is_instance
from unittest.mock import (
NonCallableMock, CallableMixin, patch, sentinel,
MagicMock, Mock, NonCallableMagicMock, patch, _patch,
DEFAULT, call, _get_target
)
builtin_string = 'builtins'
PTModule = sys.modules[__name__]
MODNAME = '%s.PTModule' % __name__
def _get_proxy(obj, get_only=True):
class Proxy(object):
def __getattr__(self, name):
return getattr(obj, name)
if not get_only:
def __setattr__(self, name, value):
setattr(obj, name, value)
def __delattr__(self, name):
delattr(obj, name)
Proxy.__setattr__ = __setattr__
Proxy.__delattr__ = __delattr__
return Proxy()
# for use in the test
something = sentinel.Something
something_else = sentinel.SomethingElse
class Foo(object):
def __init__(self, a):
pass
def f(self, a):
pass
def g(self):
pass
foo = 'bar'
class Bar(object):
def a(self):
pass
foo_name = '%s.Foo' % __name__
def function(a, b=Foo):
pass
class Container(object):
def __init__(self):
self.values = {}
def __getitem__(self, name):
return self.values[name]
def __setitem__(self, name, value):
self.values[name] = value
def __delitem__(self, name):
del self.values[name]
def __iter__(self):
return iter(self.values)
class PatchTest(unittest.TestCase):
def assertNotCallable(self, obj, magic=True):
MockClass = NonCallableMagicMock
if not magic:
MockClass = NonCallableMock
self.assertRaises(TypeError, obj)
self.assertTrue(is_instance(obj, MockClass))
self.assertFalse(is_instance(obj, CallableMixin))
def test_single_patchobject(self):
class Something(object):
attribute = sentinel.Original
@patch.object(Something, 'attribute', sentinel.Patched)
def test():
self.assertEqual(Something.attribute, sentinel.Patched, "unpatched")
test()
self.assertEqual(Something.attribute, sentinel.Original,
"patch not restored")
def test_patchobject_with_none(self):
class Something(object):
attribute = sentinel.Original
@patch.object(Something, 'attribute', None)
def test():
self.assertIsNone(Something.attribute, "unpatched")
test()
self.assertEqual(Something.attribute, sentinel.Original,
"patch not restored")
def test_multiple_patchobject(self):
class Something(object):
attribute = sentinel.Original
next_attribute = sentinel.Original2
@patch.object(Something, 'attribute', sentinel.Patched)
@patch.object(Something, 'next_attribute', sentinel.Patched2)
def test():
self.assertEqual(Something.attribute, sentinel.Patched,
"unpatched")
self.assertEqual(Something.next_attribute, sentinel.Patched2,
"unpatched")
test()
self.assertEqual(Something.attribute, sentinel.Original,
"patch not restored")
self.assertEqual(Something.next_attribute, sentinel.Original2,
"patch not restored")
def test_object_lookup_is_quite_lazy(self):
global something
original = something
@patch('%s.something' % __name__, sentinel.Something2)
def test():
pass
try:
something = sentinel.replacement_value
test()
self.assertEqual(something, sentinel.replacement_value)
finally:
something = original
def test_patch(self):
@patch('%s.something' % __name__, sentinel.Something2)
def test():
self.assertEqual(PTModule.something, sentinel.Something2,
"unpatched")
test()
self.assertEqual(PTModule.something, sentinel.Something,
"patch not restored")
@patch('%s.something' % __name__, sentinel.Something2)
@patch('%s.something_else' % __name__, sentinel.SomethingElse)
def test():
self.assertEqual(PTModule.something, sentinel.Something2,
"unpatched")
self.assertEqual(PTModule.something_else, sentinel.SomethingElse,
"unpatched")
self.assertEqual(PTModule.something, sentinel.Something,
"patch not restored")
self.assertEqual(PTModule.something_else, sentinel.SomethingElse,
"patch not restored")
# Test the patching and restoring works a second time
test()
self.assertEqual(PTModule.something, sentinel.Something,
"patch not restored")
self.assertEqual(PTModule.something_else, sentinel.SomethingElse,
"patch not restored")
mock = Mock()
mock.return_value = sentinel.Handle
@patch('%s.open' % builtin_string, mock)
def test():
self.assertEqual(open('filename', 'r'), sentinel.Handle,
"open not patched")
test()
test()
self.assertNotEqual(open, mock, "patch not restored")
def test_patch_class_attribute(self):
@patch('%s.SomeClass.class_attribute' % __name__,
sentinel.ClassAttribute)
def test():
self.assertEqual(PTModule.SomeClass.class_attribute,
sentinel.ClassAttribute, "unpatched")
test()
self.assertIsNone(PTModule.SomeClass.class_attribute,
"patch not restored")
def test_patchobject_with_default_mock(self):
class Test(object):
something = sentinel.Original
something2 = sentinel.Original2
@patch.object(Test, 'something')
def test(mock):
self.assertEqual(mock, Test.something,
"Mock not passed into test function")
self.assertIsInstance(mock, MagicMock,
"patch with two arguments did not create a mock")
test()
@patch.object(Test, 'something')
@patch.object(Test, 'something2')
def test(this1, this2, mock1, mock2):
self.assertEqual(this1, sentinel.this1,
"Patched function didn't receive initial argument")
self.assertEqual(this2, sentinel.this2,
"Patched function didn't receive second argument")
self.assertEqual(mock1, Test.something2,
"Mock not passed into test function")
self.assertEqual(mock2, Test.something,
"Second Mock not passed into test function")
self.assertIsInstance(mock2, MagicMock,
"patch with two arguments did not create a mock")
self.assertIsInstance(mock2, MagicMock,
"patch with two arguments did not create a mock")
# A hack to test that new mocks are passed the second time
self.assertNotEqual(outerMock1, mock1, "unexpected value for mock1")
self.assertNotEqual(outerMock2, mock2, "unexpected value for mock1")
return mock1, mock2
outerMock1 = outerMock2 = None
outerMock1, outerMock2 = test(sentinel.this1, sentinel.this2)
# Test that executing a second time creates new mocks
test(sentinel.this1, sentinel.this2)
def test_patch_with_spec(self):
@patch('%s.SomeClass' % __name__, spec=SomeClass)
def test(MockSomeClass):
self.assertEqual(SomeClass, MockSomeClass)
self.assertTrue(is_instance(SomeClass.wibble, MagicMock))
self.assertRaises(AttributeError, lambda: SomeClass.not_wibble)
test()
def test_patchobject_with_spec(self):
@patch.object(SomeClass, 'class_attribute', spec=SomeClass)
def test(MockAttribute):
self.assertEqual(SomeClass.class_attribute, MockAttribute)
self.assertTrue(is_instance(SomeClass.class_attribute.wibble,
MagicMock))
self.assertRaises(AttributeError,
lambda: SomeClass.class_attribute.not_wibble)
test()
def test_patch_with_spec_as_list(self):
@patch('%s.SomeClass' % __name__, spec=['wibble'])
def test(MockSomeClass):
self.assertEqual(SomeClass, MockSomeClass)
self.assertTrue(is_instance(SomeClass.wibble, MagicMock))
self.assertRaises(AttributeError, lambda: SomeClass.not_wibble)
test()
def test_patchobject_with_spec_as_list(self):
@patch.object(SomeClass, 'class_attribute', spec=['wibble'])
def test(MockAttribute):
self.assertEqual(SomeClass.class_attribute, MockAttribute)
self.assertTrue(is_instance(SomeClass.class_attribute.wibble,
MagicMock))
self.assertRaises(AttributeError,
lambda: SomeClass.class_attribute.not_wibble)
test()
def test_nested_patch_with_spec_as_list(self):
# regression test for nested decorators
@patch('%s.open' % builtin_string)
@patch('%s.SomeClass' % __name__, spec=['wibble'])
def test(MockSomeClass, MockOpen):
self.assertEqual(SomeClass, MockSomeClass)
self.assertTrue(is_instance(SomeClass.wibble, MagicMock))
self.assertRaises(AttributeError, lambda: SomeClass.not_wibble)
test()
def test_patch_with_spec_as_boolean(self):
@patch('%s.SomeClass' % __name__, spec=True)
def test(MockSomeClass):
self.assertEqual(SomeClass, MockSomeClass)
# Should not raise attribute error
MockSomeClass.wibble
self.assertRaises(AttributeError, lambda: MockSomeClass.not_wibble)
test()
def test_patch_object_with_spec_as_boolean(self):
@patch.object(PTModule, 'SomeClass', spec=True)
def test(MockSomeClass):
self.assertEqual(SomeClass, MockSomeClass)
# Should not raise attribute error
MockSomeClass.wibble
self.assertRaises(AttributeError, lambda: MockSomeClass.not_wibble)
test()
def test_patch_class_acts_with_spec_is_inherited(self):
@patch('%s.SomeClass' % __name__, spec=True)
def test(MockSomeClass):
self.assertTrue(is_instance(MockSomeClass, MagicMock))
instance = MockSomeClass()
self.assertNotCallable(instance)
# Should not raise attribute error
instance.wibble
self.assertRaises(AttributeError, lambda: instance.not_wibble)
test()
def test_patch_with_create_mocks_non_existent_attributes(self):
@patch('%s.frooble' % builtin_string, sentinel.Frooble, create=True)
def test():
self.assertEqual(frooble, sentinel.Frooble)
test()
self.assertRaises(NameError, lambda: frooble)
def test_patchobject_with_create_mocks_non_existent_attributes(self):
@patch.object(SomeClass, 'frooble', sentinel.Frooble, create=True)
def test():
self.assertEqual(SomeClass.frooble, sentinel.Frooble)
test()
self.assertFalse(hasattr(SomeClass, 'frooble'))
def test_patch_wont_create_by_default(self):
try:
@patch('%s.frooble' % builtin_string, sentinel.Frooble)
def test():
self.assertEqual(frooble, sentinel.Frooble)
test()
except AttributeError:
pass
else:
self.fail('Patching non existent attributes should fail')
self.assertRaises(NameError, lambda: frooble)
def test_patchobject_wont_create_by_default(self):
try:
@patch.object(SomeClass, 'frooble', sentinel.Frooble)
def test():
self.fail('Patching non existent attributes should fail')
test()
except AttributeError:
pass
else:
self.fail('Patching non existent attributes should fail')
self.assertFalse(hasattr(SomeClass, 'frooble'))
def test_patch_with_static_methods(self):
class Foo(object):
@staticmethod
def woot():
return sentinel.Static
@patch.object(Foo, 'woot', staticmethod(lambda: sentinel.Patched))
def anonymous():
self.assertEqual(Foo.woot(), sentinel.Patched)
anonymous()
self.assertEqual(Foo.woot(), sentinel.Static)
def test_patch_local(self):
foo = sentinel.Foo
@patch.object(sentinel, 'Foo', 'Foo')
def anonymous():
self.assertEqual(sentinel.Foo, 'Foo')
anonymous()
self.assertEqual(sentinel.Foo, foo)
def test_patch_slots(self):
class Foo(object):
__slots__ = ('Foo',)
foo = Foo()
foo.Foo = sentinel.Foo
@patch.object(foo, 'Foo', 'Foo')
def anonymous():
self.assertEqual(foo.Foo, 'Foo')
anonymous()
self.assertEqual(foo.Foo, sentinel.Foo)
def test_patchobject_class_decorator(self):
class Something(object):
attribute = sentinel.Original
class Foo(object):
def test_method(other_self):
self.assertEqual(Something.attribute, sentinel.Patched,
"unpatched")
def not_test_method(other_self):
self.assertEqual(Something.attribute, sentinel.Original,
"non-test method patched")
Foo = patch.object(Something, 'attribute', sentinel.Patched)(Foo)
f = Foo()
f.test_method()
f.not_test_method()
self.assertEqual(Something.attribute, sentinel.Original,
"patch not restored")
def test_patch_class_decorator(self):
class Something(object):
attribute = sentinel.Original
class Foo(object):
def test_method(other_self, mock_something):
self.assertEqual(PTModule.something, mock_something,
"unpatched")
def not_test_method(other_self):
self.assertEqual(PTModule.something, sentinel.Something,
"non-test method patched")
Foo = patch('%s.something' % __name__)(Foo)
f = Foo()
f.test_method()
f.not_test_method()
self.assertEqual(Something.attribute, sentinel.Original,
"patch not restored")
self.assertEqual(PTModule.something, sentinel.Something,
"patch not restored")
def test_patchobject_twice(self):
class Something(object):
attribute = sentinel.Original
next_attribute = sentinel.Original2
@patch.object(Something, 'attribute', sentinel.Patched)
@patch.object(Something, 'attribute', sentinel.Patched)
def test():
self.assertEqual(Something.attribute, sentinel.Patched, "unpatched")
test()
self.assertEqual(Something.attribute, sentinel.Original,
"patch not restored")
def test_patch_dict(self):
foo = {'initial': object(), 'other': 'something'}
original = foo.copy()
@patch.dict(foo)
def test():
foo['a'] = 3
del foo['initial']
foo['other'] = 'something else'
test()
self.assertEqual(foo, original)
@patch.dict(foo, {'a': 'b'})
def test():
self.assertEqual(len(foo), 3)
self.assertEqual(foo['a'], 'b')
test()
self.assertEqual(foo, original)
@patch.dict(foo, [('a', 'b')])
def test():
self.assertEqual(len(foo), 3)
self.assertEqual(foo['a'], 'b')
test()
self.assertEqual(foo, original)
def test_patch_dict_with_container_object(self):
foo = Container()
foo['initial'] = object()
foo['other'] = 'something'
original = foo.values.copy()
@patch.dict(foo)
def test():
foo['a'] = 3
del foo['initial']
foo['other'] = 'something else'
test()
self.assertEqual(foo.values, original)
@patch.dict(foo, {'a': 'b'})
def test():
self.assertEqual(len(foo.values), 3)
self.assertEqual(foo['a'], 'b')
test()
self.assertEqual(foo.values, original)
def test_patch_dict_with_clear(self):
foo = {'initial': object(), 'other': 'something'}
original = foo.copy()
@patch.dict(foo, clear=True)
def test():
self.assertEqual(foo, {})
foo['a'] = 3
foo['other'] = 'something else'
test()
self.assertEqual(foo, original)
@patch.dict(foo, {'a': 'b'}, clear=True)
def test():
self.assertEqual(foo, {'a': 'b'})
test()
self.assertEqual(foo, original)
@patch.dict(foo, [('a', 'b')], clear=True)
def test():
self.assertEqual(foo, {'a': 'b'})
test()
self.assertEqual(foo, original)
def test_patch_dict_with_container_object_and_clear(self):
foo = Container()
foo['initial'] = object()
foo['other'] = 'something'
original = foo.values.copy()
@patch.dict(foo, clear=True)
def test():
self.assertEqual(foo.values, {})
foo['a'] = 3
foo['other'] = 'something else'
test()
self.assertEqual(foo.values, original)
@patch.dict(foo, {'a': 'b'}, clear=True)
def test():
self.assertEqual(foo.values, {'a': 'b'})
test()
self.assertEqual(foo.values, original)
def test_name_preserved(self):
foo = {}
@patch('%s.SomeClass' % __name__, object())
@patch('%s.SomeClass' % __name__, object(), autospec=True)
@patch.object(SomeClass, object())
@patch.dict(foo)
def some_name():
pass
self.assertEqual(some_name.__name__, 'some_name')
def test_patch_with_exception(self):
foo = {}
@patch.dict(foo, {'a': 'b'})
def test():
raise NameError('Konrad')
try:
test()
except NameError:
pass
else:
self.fail('NameError not raised by test')
self.assertEqual(foo, {})
def test_patch_dict_with_string(self):
@patch.dict('os.environ', {'konrad_delong': 'some value'})
def test():
self.assertIn('konrad_delong', os.environ)
test()
def test_patch_descriptor(self):
# would be some effort to fix this - we could special case the
# builtin descriptors: classmethod, property, staticmethod
return
class Nothing(object):
foo = None
class Something(object):
foo = {}
@patch.object(Nothing, 'foo', 2)
@classmethod
def klass(cls):
self.assertIs(cls, Something)
@patch.object(Nothing, 'foo', 2)
@staticmethod
def static(arg):
return arg
@patch.dict(foo)
@classmethod
def klass_dict(cls):
self.assertIs(cls, Something)
@patch.dict(foo)
@staticmethod
def static_dict(arg):
return arg
# these will raise exceptions if patching descriptors is broken
self.assertEqual(Something.static('f00'), 'f00')
Something.klass()
self.assertEqual(Something.static_dict('f00'), 'f00')
Something.klass_dict()
something = Something()
self.assertEqual(something.static('f00'), 'f00')
something.klass()
self.assertEqual(something.static_dict('f00'), 'f00')
something.klass_dict()
def test_patch_spec_set(self):
@patch('%s.SomeClass' % __name__, spec=SomeClass, spec_set=True)
def test(MockClass):
MockClass.z = 'foo'
self.assertRaises(AttributeError, test)
@patch.object(support, 'SomeClass', spec=SomeClass, spec_set=True)
def test(MockClass):
MockClass.z = 'foo'
self.assertRaises(AttributeError, test)
@patch('%s.SomeClass' % __name__, spec_set=True)
def test(MockClass):
MockClass.z = 'foo'
self.assertRaises(AttributeError, test)
@patch.object(support, 'SomeClass', spec_set=True)
def test(MockClass):
MockClass.z = 'foo'
self.assertRaises(AttributeError, test)
def test_spec_set_inherit(self):
@patch('%s.SomeClass' % __name__, spec_set=True)
def test(MockClass):
instance = MockClass()
instance.z = 'foo'
self.assertRaises(AttributeError, test)
def test_patch_start_stop(self):
original = something
patcher = patch('%s.something' % __name__)
self.assertIs(something, original)
mock = patcher.start()
try:
self.assertIsNot(mock, original)
self.assertIs(something, mock)
finally:
patcher.stop()
self.assertIs(something, original)
def test_stop_without_start(self):
patcher = patch(foo_name, 'bar', 3)
# calling stop without start used to produce a very obscure error
self.assertRaises(RuntimeError, patcher.stop)
def test_patchobject_start_stop(self):
original = something
patcher = patch.object(PTModule, 'something', 'foo')
self.assertIs(something, original)
replaced = patcher.start()
try:
self.assertEqual(replaced, 'foo')
self.assertIs(something, replaced)
finally:
patcher.stop()
self.assertIs(something, original)
def test_patch_dict_start_stop(self):
d = {'foo': 'bar'}
original = d.copy()
patcher = patch.dict(d, [('spam', 'eggs')], clear=True)
self.assertEqual(d, original)
patcher.start()
try:
self.assertEqual(d, {'spam': 'eggs'})
finally:
patcher.stop()
self.assertEqual(d, original)
def test_patch_dict_class_decorator(self):
this = self
d = {'spam': 'eggs'}
original = d.copy()
class Test(object):
def test_first(self):
this.assertEqual(d, {'foo': 'bar'})
def test_second(self):
this.assertEqual(d, {'foo': 'bar'})
Test = patch.dict(d, {'foo': 'bar'}, clear=True)(Test)
self.assertEqual(d, original)
test = Test()
test.test_first()
self.assertEqual(d, original)
test.test_second()
self.assertEqual(d, original)
test = Test()
test.test_first()
self.assertEqual(d, original)
test.test_second()
self.assertEqual(d, original)
def test_get_only_proxy(self):
class Something(object):
foo = 'foo'
class SomethingElse:
foo = 'foo'
for thing in Something, SomethingElse, Something(), SomethingElse:
proxy = _get_proxy(thing)
@patch.object(proxy, 'foo', 'bar')
def test():
self.assertEqual(proxy.foo, 'bar')
test()
self.assertEqual(proxy.foo, 'foo')
self.assertEqual(thing.foo, 'foo')
self.assertNotIn('foo', proxy.__dict__)
def test_get_set_delete_proxy(self):
class Something(object):
foo = 'foo'
class SomethingElse:
foo = 'foo'
for thing in Something, SomethingElse, Something(), SomethingElse:
proxy = _get_proxy(Something, get_only=False)
@patch.object(proxy, 'foo', 'bar')
def test():
self.assertEqual(proxy.foo, 'bar')
test()
self.assertEqual(proxy.foo, 'foo')
self.assertEqual(thing.foo, 'foo')
self.assertNotIn('foo', proxy.__dict__)
def test_patch_keyword_args(self):
kwargs = {'side_effect': KeyError, 'foo.bar.return_value': 33,
'foo': MagicMock()}
patcher = patch(foo_name, **kwargs)
mock = patcher.start()
patcher.stop()
self.assertRaises(KeyError, mock)
self.assertEqual(mock.foo.bar(), 33)
self.assertIsInstance(mock.foo, MagicMock)
def test_patch_object_keyword_args(self):
kwargs = {'side_effect': KeyError, 'foo.bar.return_value': 33,
'foo': MagicMock()}
patcher = patch.object(Foo, 'f', **kwargs)
mock = patcher.start()
patcher.stop()
self.assertRaises(KeyError, mock)
self.assertEqual(mock.foo.bar(), 33)
self.assertIsInstance(mock.foo, MagicMock)
def test_patch_dict_keyword_args(self):
original = {'foo': 'bar'}
copy = original.copy()
patcher = patch.dict(original, foo=3, bar=4, baz=5)
patcher.start()
try:
self.assertEqual(original, dict(foo=3, bar=4, baz=5))
finally:
patcher.stop()
self.assertEqual(original, copy)
def test_autospec(self):
class Boo(object):
def __init__(self, a):
pass
def f(self, a):
pass
def g(self):
pass
foo = 'bar'
class Bar(object):
def a(self):
pass
def _test(mock):
mock(1)
mock.assert_called_with(1)
self.assertRaises(TypeError, mock)
def _test2(mock):
mock.f(1)
mock.f.assert_called_with(1)
self.assertRaises(TypeError, mock.f)
mock.g()
mock.g.assert_called_with()
self.assertRaises(TypeError, mock.g, 1)
self.assertRaises(AttributeError, getattr, mock, 'h')
mock.foo.lower()
mock.foo.lower.assert_called_with()
self.assertRaises(AttributeError, getattr, mock.foo, 'bar')
mock.Bar()
mock.Bar.assert_called_with()
mock.Bar.a()
mock.Bar.a.assert_called_with()
self.assertRaises(TypeError, mock.Bar.a, 1)
mock.Bar().a()
mock.Bar().a.assert_called_with()
self.assertRaises(TypeError, mock.Bar().a, 1)
self.assertRaises(AttributeError, getattr, mock.Bar, 'b')
self.assertRaises(AttributeError, getattr, mock.Bar(), 'b')
def function(mock):
_test(mock)
_test2(mock)
_test2(mock(1))
self.assertIs(mock, Foo)
return mock
test = patch(foo_name, autospec=True)(function)
mock = test()
self.assertIsNot(Foo, mock)
# test patching a second time works
test()
module = sys.modules[__name__]
test = patch.object(module, 'Foo', autospec=True)(function)
mock = test()
self.assertIsNot(Foo, mock)
# test patching a second time works
test()
def test_autospec_function(self):
@patch('%s.function' % __name__, autospec=True)
def test(mock):
function(1)
function.assert_called_with(1)
function(2, 3)
function.assert_called_with(2, 3)
self.assertRaises(TypeError, function)
self.assertRaises(AttributeError, getattr, function, 'foo')
test()
def test_autospec_keywords(self):
@patch('%s.function' % __name__, autospec=True,
return_value=3)
def test(mock_function):
#self.assertEqual(function.abc, 'foo')
return function(1, 2)
result = test()
self.assertEqual(result, 3)
def test_autospec_with_new(self):
patcher = patch('%s.function' % __name__, new=3, autospec=True)
self.assertRaises(TypeError, patcher.start)
module = sys.modules[__name__]
patcher = patch.object(module, 'function', new=3, autospec=True)
self.assertRaises(TypeError, patcher.start)
def test_autospec_with_object(self):
class Bar(Foo):
extra = []
patcher = patch(foo_name, autospec=Bar)
mock = patcher.start()
try:
self.assertIsInstance(mock, Bar)
self.assertIsInstance(mock.extra, list)
finally:
patcher.stop()
def test_autospec_inherits(self):
FooClass = Foo
patcher = patch(foo_name, autospec=True)
mock = patcher.start()
try:
self.assertIsInstance(mock, FooClass)
self.assertIsInstance(mock(3), FooClass)
finally:
patcher.stop()
def test_autospec_name(self):
patcher = patch(foo_name, autospec=True)
mock = patcher.start()
try:
self.assertIn(" name='Foo'", repr(mock))
self.assertIn(" name='Foo.f'", repr(mock.f))
self.assertIn(" name='Foo()'", repr(mock(None)))
self.assertIn(" name='Foo().f'", repr(mock(None).f))
finally:
patcher.stop()
def test_tracebacks(self):
@patch.object(Foo, 'f', object())
def test():
raise AssertionError
try:
test()
except:
err = sys.exc_info()
result = unittest.TextTestResult(None, None, 0)
traceback = result._exc_info_to_string(err, self)
self.assertIn('raise AssertionError', traceback)
def test_new_callable_patch(self):
patcher = patch(foo_name, new_callable=NonCallableMagicMock)
m1 = patcher.start()
patcher.stop()
m2 = patcher.start()
patcher.stop()
self.assertIsNot(m1, m2)
for mock in m1, m2:
self.assertNotCallable(m1)
def test_new_callable_patch_object(self):
patcher = patch.object(Foo, 'f', new_callable=NonCallableMagicMock)
m1 = patcher.start()
patcher.stop()
m2 = patcher.start()
patcher.stop()
self.assertIsNot(m1, m2)
for mock in m1, m2:
self.assertNotCallable(m1)
def test_new_callable_keyword_arguments(self):
class Bar(object):
kwargs = None
def __init__(self, **kwargs):
Bar.kwargs = kwargs
patcher = patch(foo_name, new_callable=Bar, arg1=1, arg2=2)
m = patcher.start()
try:
self.assertIs(type(m), Bar)
self.assertEqual(Bar.kwargs, dict(arg1=1, arg2=2))
finally:
patcher.stop()
def test_new_callable_spec(self):
class Bar(object):
kwargs = None
def __init__(self, **kwargs):
Bar.kwargs = kwargs
patcher = patch(foo_name, new_callable=Bar, spec=Bar)
patcher.start()
try:
self.assertEqual(Bar.kwargs, dict(spec=Bar))
finally:
patcher.stop()
patcher = patch(foo_name, new_callable=Bar, spec_set=Bar)
patcher.start()
try:
self.assertEqual(Bar.kwargs, dict(spec_set=Bar))
finally:
patcher.stop()
def test_new_callable_create(self):
non_existent_attr = '%s.weeeee' % foo_name
p = patch(non_existent_attr, new_callable=NonCallableMock)
self.assertRaises(AttributeError, p.start)
p = patch(non_existent_attr, new_callable=NonCallableMock,
create=True)
m = p.start()
try:
self.assertNotCallable(m, magic=False)
finally:
p.stop()
def test_new_callable_incompatible_with_new(self):
self.assertRaises(
ValueError, patch, foo_name, new=object(), new_callable=MagicMock
)
self.assertRaises(
ValueError, patch.object, Foo, 'f', new=object(),
new_callable=MagicMock
)
def test_new_callable_incompatible_with_autospec(self):
self.assertRaises(
ValueError, patch, foo_name, new_callable=MagicMock,
autospec=True
)
self.assertRaises(
ValueError, patch.object, Foo, 'f', new_callable=MagicMock,
autospec=True
)
def test_new_callable_inherit_for_mocks(self):
class MockSub(Mock):
pass
MockClasses = (
NonCallableMock, NonCallableMagicMock, MagicMock, Mock, MockSub
)
for Klass in MockClasses:
for arg in 'spec', 'spec_set':
kwargs = {arg: True}
p = patch(foo_name, new_callable=Klass, **kwargs)
m = p.start()
try:
instance = m.return_value
self.assertRaises(AttributeError, getattr, instance, 'x')
finally:
p.stop()
def test_new_callable_inherit_non_mock(self):
class NotAMock(object):
def __init__(self, spec):
self.spec = spec
p = patch(foo_name, new_callable=NotAMock, spec=True)
m = p.start()
try:
self.assertTrue(is_instance(m, NotAMock))
self.assertRaises(AttributeError, getattr, m, 'return_value')
finally:
p.stop()
self.assertEqual(m.spec, Foo)
def test_new_callable_class_decorating(self):
test = self
original = Foo
class SomeTest(object):
def _test(self, mock_foo):
test.assertIsNot(Foo, original)
test.assertIs(Foo, mock_foo)
test.assertIsInstance(Foo, SomeClass)
def test_two(self, mock_foo):
self._test(mock_foo)
def test_one(self, mock_foo):
self._test(mock_foo)
SomeTest = patch(foo_name, new_callable=SomeClass)(SomeTest)
SomeTest().test_one()
SomeTest().test_two()
self.assertIs(Foo, original)
def test_patch_multiple(self):
original_foo = Foo
original_f = Foo.f
original_g = Foo.g
patcher1 = patch.multiple(foo_name, f=1, g=2)
patcher2 = patch.multiple(Foo, f=1, g=2)
for patcher in patcher1, patcher2:
patcher.start()
try:
self.assertIs(Foo, original_foo)
self.assertEqual(Foo.f, 1)
self.assertEqual(Foo.g, 2)
finally:
patcher.stop()
self.assertIs(Foo, original_foo)
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
@patch.multiple(foo_name, f=3, g=4)
def test():
self.assertIs(Foo, original_foo)
self.assertEqual(Foo.f, 3)
self.assertEqual(Foo.g, 4)
test()
def test_patch_multiple_no_kwargs(self):
self.assertRaises(ValueError, patch.multiple, foo_name)
self.assertRaises(ValueError, patch.multiple, Foo)
def test_patch_multiple_create_mocks(self):
original_foo = Foo
original_f = Foo.f
original_g = Foo.g
@patch.multiple(foo_name, f=DEFAULT, g=3, foo=DEFAULT)
def test(f, foo):
self.assertIs(Foo, original_foo)
self.assertIs(Foo.f, f)
self.assertEqual(Foo.g, 3)
self.assertIs(Foo.foo, foo)
self.assertTrue(is_instance(f, MagicMock))
self.assertTrue(is_instance(foo, MagicMock))
test()
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
def test_patch_multiple_create_mocks_different_order(self):
# bug revealed by Jython!
original_f = Foo.f
original_g = Foo.g
patcher = patch.object(Foo, 'f', 3)
patcher.attribute_name = 'f'
other = patch.object(Foo, 'g', DEFAULT)
other.attribute_name = 'g'
patcher.additional_patchers = [other]
@patcher
def test(g):
self.assertIs(Foo.g, g)
self.assertEqual(Foo.f, 3)
test()
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
def test_patch_multiple_stacked_decorators(self):
original_foo = Foo
original_f = Foo.f
original_g = Foo.g
@patch.multiple(foo_name, f=DEFAULT)
@patch.multiple(foo_name, foo=DEFAULT)
@patch(foo_name + '.g')
def test1(g, **kwargs):
_test(g, **kwargs)
@patch.multiple(foo_name, f=DEFAULT)
@patch(foo_name + '.g')
@patch.multiple(foo_name, foo=DEFAULT)
def test2(g, **kwargs):
_test(g, **kwargs)
@patch(foo_name + '.g')
@patch.multiple(foo_name, f=DEFAULT)
@patch.multiple(foo_name, foo=DEFAULT)
def test3(g, **kwargs):
_test(g, **kwargs)
def _test(g, **kwargs):
f = kwargs.pop('f')
foo = kwargs.pop('foo')
self.assertFalse(kwargs)
self.assertIs(Foo, original_foo)
self.assertIs(Foo.f, f)
self.assertIs(Foo.g, g)
self.assertIs(Foo.foo, foo)
self.assertTrue(is_instance(f, MagicMock))
self.assertTrue(is_instance(g, MagicMock))
self.assertTrue(is_instance(foo, MagicMock))
test1()
test2()
test3()
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
def test_patch_multiple_create_mocks_patcher(self):
original_foo = Foo
original_f = Foo.f
original_g = Foo.g
patcher = patch.multiple(foo_name, f=DEFAULT, g=3, foo=DEFAULT)
result = patcher.start()
try:
f = result['f']
foo = result['foo']
self.assertEqual(set(result), set(['f', 'foo']))
self.assertIs(Foo, original_foo)
self.assertIs(Foo.f, f)
self.assertIs(Foo.foo, foo)
self.assertTrue(is_instance(f, MagicMock))
self.assertTrue(is_instance(foo, MagicMock))
finally:
patcher.stop()
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
def test_patch_multiple_decorating_class(self):
test = self
original_foo = Foo
original_f = Foo.f
original_g = Foo.g
class SomeTest(object):
def _test(self, f, foo):
test.assertIs(Foo, original_foo)
test.assertIs(Foo.f, f)
test.assertEqual(Foo.g, 3)
test.assertIs(Foo.foo, foo)
test.assertTrue(is_instance(f, MagicMock))
test.assertTrue(is_instance(foo, MagicMock))
def test_two(self, f, foo):
self._test(f, foo)
def test_one(self, f, foo):
self._test(f, foo)
SomeTest = patch.multiple(
foo_name, f=DEFAULT, g=3, foo=DEFAULT
)(SomeTest)
thing = SomeTest()
thing.test_one()
thing.test_two()
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
def test_patch_multiple_create(self):
patcher = patch.multiple(Foo, blam='blam')
self.assertRaises(AttributeError, patcher.start)
patcher = patch.multiple(Foo, blam='blam', create=True)
patcher.start()
try:
self.assertEqual(Foo.blam, 'blam')
finally:
patcher.stop()
self.assertFalse(hasattr(Foo, 'blam'))
def test_patch_multiple_spec_set(self):
# if spec_set works then we can assume that spec and autospec also
# work as the underlying machinery is the same
patcher = patch.multiple(Foo, foo=DEFAULT, spec_set=['a', 'b'])
result = patcher.start()
try:
self.assertEqual(Foo.foo, result['foo'])
Foo.foo.a(1)
Foo.foo.b(2)
Foo.foo.a.assert_called_with(1)
Foo.foo.b.assert_called_with(2)
self.assertRaises(AttributeError, setattr, Foo.foo, 'c', None)
finally:
patcher.stop()
def test_patch_multiple_new_callable(self):
class Thing(object):
pass
patcher = patch.multiple(
Foo, f=DEFAULT, g=DEFAULT, new_callable=Thing
)
result = patcher.start()
try:
self.assertIs(Foo.f, result['f'])
self.assertIs(Foo.g, result['g'])
self.assertIsInstance(Foo.f, Thing)
self.assertIsInstance(Foo.g, Thing)
self.assertIsNot(Foo.f, Foo.g)
finally:
patcher.stop()
def test_nested_patch_failure(self):
original_f = Foo.f
original_g = Foo.g
@patch.object(Foo, 'g', 1)
@patch.object(Foo, 'missing', 1)
@patch.object(Foo, 'f', 1)
def thing1():
pass
@patch.object(Foo, 'missing', 1)
@patch.object(Foo, 'g', 1)
@patch.object(Foo, 'f', 1)
def thing2():
pass
@patch.object(Foo, 'g', 1)
@patch.object(Foo, 'f', 1)
@patch.object(Foo, 'missing', 1)
def thing3():
pass
for func in thing1, thing2, thing3:
self.assertRaises(AttributeError, func)
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
def test_new_callable_failure(self):
original_f = Foo.f
original_g = Foo.g
original_foo = Foo.foo
def crasher():
raise NameError('crasher')
@patch.object(Foo, 'g', 1)
@patch.object(Foo, 'foo', new_callable=crasher)
@patch.object(Foo, 'f', 1)
def thing1():
pass
@patch.object(Foo, 'foo', new_callable=crasher)
@patch.object(Foo, 'g', 1)
@patch.object(Foo, 'f', 1)
def thing2():
pass
@patch.object(Foo, 'g', 1)
@patch.object(Foo, 'f', 1)
@patch.object(Foo, 'foo', new_callable=crasher)
def thing3():
pass
for func in thing1, thing2, thing3:
self.assertRaises(NameError, func)
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
self.assertEqual(Foo.foo, original_foo)
def test_patch_multiple_failure(self):
original_f = Foo.f
original_g = Foo.g
patcher = patch.object(Foo, 'f', 1)
patcher.attribute_name = 'f'
good = patch.object(Foo, 'g', 1)
good.attribute_name = 'g'
bad = patch.object(Foo, 'missing', 1)
bad.attribute_name = 'missing'
for additionals in [good, bad], [bad, good]:
patcher.additional_patchers = additionals
@patcher
def func():
pass
self.assertRaises(AttributeError, func)
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
def test_patch_multiple_new_callable_failure(self):
original_f = Foo.f
original_g = Foo.g
original_foo = Foo.foo
def crasher():
raise NameError('crasher')
patcher = patch.object(Foo, 'f', 1)
patcher.attribute_name = 'f'
good = patch.object(Foo, 'g', 1)
good.attribute_name = 'g'
bad = patch.object(Foo, 'foo', new_callable=crasher)
bad.attribute_name = 'foo'
for additionals in [good, bad], [bad, good]:
patcher.additional_patchers = additionals
@patcher
def func():
pass
self.assertRaises(NameError, func)
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
self.assertEqual(Foo.foo, original_foo)
def test_patch_multiple_string_subclasses(self):
Foo = type('Foo', (str,), {'fish': 'tasty'})
foo = Foo()
@patch.multiple(foo, fish='nearly gone')
def test():
self.assertEqual(foo.fish, 'nearly gone')
test()
self.assertEqual(foo.fish, 'tasty')
@patch('unittest.mock.patch.TEST_PREFIX', 'foo')
def test_patch_test_prefix(self):
class Foo(object):
thing = 'original'
def foo_one(self):
return self.thing
def foo_two(self):
return self.thing
def test_one(self):
return self.thing
def test_two(self):
return self.thing
Foo = patch.object(Foo, 'thing', 'changed')(Foo)
foo = Foo()
self.assertEqual(foo.foo_one(), 'changed')
self.assertEqual(foo.foo_two(), 'changed')
self.assertEqual(foo.test_one(), 'original')
self.assertEqual(foo.test_two(), 'original')
@patch('unittest.mock.patch.TEST_PREFIX', 'bar')
def test_patch_dict_test_prefix(self):
class Foo(object):
def bar_one(self):
return dict(the_dict)
def bar_two(self):
return dict(the_dict)
def test_one(self):
return dict(the_dict)
def test_two(self):
return dict(the_dict)
the_dict = {'key': 'original'}
Foo = patch.dict(the_dict, key='changed')(Foo)
foo =Foo()
self.assertEqual(foo.bar_one(), {'key': 'changed'})
self.assertEqual(foo.bar_two(), {'key': 'changed'})
self.assertEqual(foo.test_one(), {'key': 'original'})
self.assertEqual(foo.test_two(), {'key': 'original'})
def test_patch_with_spec_mock_repr(self):
for arg in ('spec', 'autospec', 'spec_set'):
p = patch('%s.SomeClass' % __name__, **{arg: True})
m = p.start()
try:
self.assertIn(" name='SomeClass'", repr(m))
self.assertIn(" name='SomeClass.class_attribute'",
repr(m.class_attribute))
self.assertIn(" name='SomeClass()'", repr(m()))
self.assertIn(" name='SomeClass().class_attribute'",
repr(m().class_attribute))
finally:
p.stop()
def test_patch_nested_autospec_repr(self):
with patch('unittest.test.testmock.support', autospec=True) as m:
self.assertIn(" name='support.SomeClass.wibble()'",
repr(m.SomeClass.wibble()))
self.assertIn(" name='support.SomeClass().wibble()'",
repr(m.SomeClass().wibble()))
def test_mock_calls_with_patch(self):
for arg in ('spec', 'autospec', 'spec_set'):
p = patch('%s.SomeClass' % __name__, **{arg: True})
m = p.start()
try:
m.wibble()
kalls = [call.wibble()]
self.assertEqual(m.mock_calls, kalls)
self.assertEqual(m.method_calls, kalls)
self.assertEqual(m.wibble.mock_calls, [call()])
result = m()
kalls.append(call())
self.assertEqual(m.mock_calls, kalls)
result.wibble()
kalls.append(call().wibble())
self.assertEqual(m.mock_calls, kalls)
self.assertEqual(result.mock_calls, [call.wibble()])
self.assertEqual(result.wibble.mock_calls, [call()])
self.assertEqual(result.method_calls, [call.wibble()])
finally:
p.stop()
def test_patch_imports_lazily(self):
sys.modules.pop('squizz', None)
p1 = patch('squizz.squozz')
self.assertRaises(ImportError, p1.start)
squizz = Mock()
squizz.squozz = 6
sys.modules['squizz'] = squizz
p1 = patch('squizz.squozz')
squizz.squozz = 3
p1.start()
p1.stop()
self.assertEqual(squizz.squozz, 3)
def test_patch_propogrates_exc_on_exit(self):
class holder:
exc_info = None, None, None
class custom_patch(_patch):
def __exit__(self, etype=None, val=None, tb=None):
_patch.__exit__(self, etype, val, tb)
holder.exc_info = etype, val, tb
stop = __exit__
def with_custom_patch(target):
getter, attribute = _get_target(target)
return custom_patch(
getter, attribute, DEFAULT, None, False, None,
None, None, {}
)
@with_custom_patch('squizz.squozz')
def test(mock):
raise RuntimeError
self.assertRaises(RuntimeError, test)
self.assertIs(holder.exc_info[0], RuntimeError)
self.assertIsNotNone(holder.exc_info[1],
'exception value not propgated')
self.assertIsNotNone(holder.exc_info[2],
'exception traceback not propgated')
def test_create_and_specs(self):
for kwarg in ('spec', 'spec_set', 'autospec'):
p = patch('%s.doesnotexist' % __name__, create=True,
**{kwarg: True})
self.assertRaises(TypeError, p.start)
self.assertRaises(NameError, lambda: doesnotexist)
# check that spec with create is innocuous if the original exists
p = patch(MODNAME, create=True, **{kwarg: True})
p.start()
p.stop()
def test_multiple_specs(self):
original = PTModule
for kwarg in ('spec', 'spec_set'):
p = patch(MODNAME, autospec=0, **{kwarg: 0})
self.assertRaises(TypeError, p.start)
self.assertIs(PTModule, original)
for kwarg in ('spec', 'autospec'):
p = patch(MODNAME, spec_set=0, **{kwarg: 0})
self.assertRaises(TypeError, p.start)
self.assertIs(PTModule, original)
for kwarg in ('spec_set', 'autospec'):
p = patch(MODNAME, spec=0, **{kwarg: 0})
self.assertRaises(TypeError, p.start)
self.assertIs(PTModule, original)
def test_specs_false_instead_of_none(self):
p = patch(MODNAME, spec=False, spec_set=False, autospec=False)
mock = p.start()
try:
# no spec should have been set, so attribute access should not fail
mock.does_not_exist
mock.does_not_exist = 3
finally:
p.stop()
def test_falsey_spec(self):
for kwarg in ('spec', 'autospec', 'spec_set'):
p = patch(MODNAME, **{kwarg: 0})
m = p.start()
try:
self.assertRaises(AttributeError, getattr, m, 'doesnotexit')
finally:
p.stop()
def test_spec_set_true(self):
for kwarg in ('spec', 'autospec'):
p = patch(MODNAME, spec_set=True, **{kwarg: True})
m = p.start()
try:
self.assertRaises(AttributeError, setattr, m,
'doesnotexist', 'something')
self.assertRaises(AttributeError, getattr, m, 'doesnotexist')
finally:
p.stop()
def test_callable_spec_as_list(self):
spec = ('__call__',)
p = patch(MODNAME, spec=spec)
m = p.start()
try:
self.assertTrue(callable(m))
finally:
p.stop()
def test_not_callable_spec_as_list(self):
spec = ('foo', 'bar')
p = patch(MODNAME, spec=spec)
m = p.start()
try:
self.assertFalse(callable(m))
finally:
p.stop()
def test_patch_stopall(self):
unlink = os.unlink
chdir = os.chdir
path = os.path
patch('os.unlink', something).start()
patch('os.chdir', something_else).start()
@patch('os.path')
def patched(mock_path):
patch.stopall()
self.assertIs(os.path, mock_path)
self.assertIs(os.unlink, unlink)
self.assertIs(os.chdir, chdir)
patched()
self.assertIs(os.path, path)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
minhtuancn/odoo | addons/hr_timesheet/wizard/__init__.py | 381 | 1079 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import hr_timesheet_sign_in_out
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
Lh4cKg/brython | www/src/Lib/errno.py | 624 | 4096 | """
This module makes available standard errno system symbols.
The value of each symbol is the corresponding integer value,
e.g., on most systems, errno.ENOENT equals the integer 2.
The dictionary errno.errorcode maps numeric codes to symbol names,
e.g., errno.errorcode[2] could be the string 'ENOENT'.
Symbols that are not relevant to the underlying system are not defined.
To map error codes to error messages, use the function os.strerror(),
e.g. os.strerror(2) could return 'No such file or directory'.
"""
errorcode= {1: 'EPERM', 2: 'ENOENT', 3: 'ESRCH', 4: 'EINTR', 5: 'EIO',
6: 'ENXIO', 7: 'E2BIG', 8: 'ENOEXEC', 9: 'EBADF', 10: 'ECHILD', 11: 'EAGAIN',
12: 'ENOMEM', 13: 'EACCES', 14: 'EFAULT', 15: 'ENOTBLK', 16: 'EBUSY',
17: 'EEXIST', 18: 'EXDEV', 19: 'ENODEV', 20: 'ENOTDIR', 21: 'EISDIR',
22: 'EINVAL', 23: 'ENFILE', 24: 'EMFILE', 25: 'ENOTTY', 26: 'ETXTBSY',
27: 'EFBIG', 28: 'ENOSPC', 29: 'ESPIPE', 30: 'EROFS', 31: 'EMLINK',
32: 'EPIPE', 33: 'EDOM', 34: 'ERANGE', 35: 'EDEADLOCK', 36: 'ENAMETOOLONG',
37: 'ENOLCK', 38: 'ENOSYS', 39: 'ENOTEMPTY', 40: 'ELOOP', 42: 'ENOMSG',
43: 'EIDRM', 44: 'ECHRNG', 45: 'EL2NSYNC', 46: 'EL3HLT', 47: 'EL3RST',
48: 'ELNRNG', 49: 'EUNATCH', 50: 'ENOCSI', 51: 'EL2HLT', 52: 'EBADE',
53: 'EBADR', 54: 'EXFULL', 55: 'ENOANO', 56: 'EBADRQC', 57: 'EBADSLT',
59: 'EBFONT', 60: 'ENOSTR', 61: 'ENODATA', 62: 'ETIME', 63: 'ENOSR',
64: 'ENONET', 65: 'ENOPKG', 66: 'EREMOTE', 67: 'ENOLINK', 68: 'EADV',
69: 'ESRMNT', 70: 'ECOMM', 71: 'EPROTO', 72: 'EMULTIHOP', 73: 'EDOTDOT',
74: 'EBADMSG', 75: 'EOVERFLOW', 76: 'ENOTUNIQ', 77: 'EBADFD', 78: 'EREMCHG',
79: 'ELIBACC', 80: 'ELIBBAD', 81: 'ELIBSCN', 82: 'ELIBMAX', 83: 'ELIBEXEC',
84: 'EILSEQ', 85: 'ERESTART', 86: 'ESTRPIPE', 87: 'EUSERS', 88: 'ENOTSOCK',
89: 'EDESTADDRREQ', 90: 'EMSGSIZE', 91: 'EPROTOTYPE', 92: 'ENOPROTOOPT',
93: 'EPROTONOSUPPORT', 94: 'ESOCKTNOSUPPORT', 95: 'ENOTSUP',
96: 'EPFNOSUPPORT', 97: 'EAFNOSUPPORT', 98: 'EADDRINUSE',
99: 'EADDRNOTAVAIL', 100: 'ENETDOWN', 101: 'ENETUNREACH', 102: 'ENETRESET',
103: 'ECONNABORTED', 104: 'ECONNRESET', 105: 'ENOBUFS', 106: 'EISCONN',
107: 'ENOTCONN', 108: 'ESHUTDOWN', 109: 'ETOOMANYREFS', 110: 'ETIMEDOUT',
111: 'ECONNREFUSED', 112: 'EHOSTDOWN', 113: 'EHOSTUNREACH', 114: 'EALREADY',
115: 'EINPROGRESS', 116: 'ESTALE', 117: 'EUCLEAN', 118: 'ENOTNAM',
119: 'ENAVAIL', 120: 'EISNAM', 121: 'EREMOTEIO', 122: 'EDQUOT',
123: 'ENOMEDIUM', 124: 'EMEDIUMTYPE', 125: 'ECANCELED', 126: 'ENOKEY',
127: 'EKEYEXPIRED', 128: 'EKEYREVOKED', 129: 'EKEYREJECTED',
130: 'EOWNERDEAD', 131: 'ENOTRECOVERABLE', 132: 'ERFKILL'}
EPERM=1
ENOENT=2
ESRCH=3
EINTR=4
EIO=5
ENXIO=6
E2BIG=7
ENOEXEC=8
EBADF=9
ECHILD=10
EAGAIN=11
ENOMEM=12
EACCES=13
EFAULT=14
ENOTBLK=15
EBUSY=16
EEXIST=17
EXDEV=18
ENODEV=19
ENOTDIR=20
EISDIR=21
EINVAL=22
ENFILE=23
EMFILE=24
ENOTTY=25
ETXTBSY=26
EFBIG=27
ENOSPC=28
ESPIPE=29
EROFS=30
EMLINK=31
EPIPE=32
EDOM=33
ERANGE=34
EDEADLOCK=35
ENAMETOOLONG=36
ENOLCK=37
ENOSYS=38
ENOTEMPTY=39
ELOOP=40
ENOMSG=42
EIDRM=43
ECHRNG=44
EL2NSYNC=45
EL3HLT=46
EL3RST=47
ELNRNG=48
EUNATCH=49
ENOCSI=50
EL2HLT=51
EBADE=52
EBADR=53
EXFULL=54
ENOANO=55
EBADRQC=56
EBADSLT=57
EBFONT=59
ENOSTR=60
ENODATA=61
ETIME=62
ENOSR=63
ENONET=64
ENOPKG=65
EREMOTE=66
ENOLINK=67
EADV=68
ESRMNT=69
ECOMM=70
EPROTO=71
EMULTIHOP=72
EDOTDOT=73
EBADMSG=74
EOVERFLOW=75
ENOTUNIQ=76
EBADFD=77
EREMCHG=78
ELIBACC=79
ELIBBAD=80
ELIBSCN=81
ELIBMAX=82
ELIBEXEC=83
EILSEQ=84
ERESTART=85
ESTRPIPE=86
EUSERS=87
ENOTSOCK=88
EDESTADDRREQ=89
EMSGSIZE=90
EPROTOTYPE=91
ENOPROTOOPT=92
EPROTONOSUPPORT=93
ESOCKTNOSUPPORT=94
ENOTSUP=95
EPFNOSUPPORT=96
EAFNOSUPPORT=97
EADDRINUSE=98
EADDRNOTAVAIL=99
ENETDOWN=100
ENETUNREACH=101
ENETRESET=102
ECONNABORTED=103
ECONNRESET=104
ENOBUFS=105
EISCONN=106
ENOTCONN=107
ESHUTDOWN=108
ETOOMANYREFS=109
ETIMEDOUT=110
ECONNREFUSED=111
EHOSTDOWN=112
EHOSTUNREACH=113
EALREADY=114
EINPROGRESS=115
ESTALE=116
EUCLEAN=117
ENOTNAM=118
ENAVAIL=119
EISNAM=120
EREMOTEIO=121
EDQUOT=122
ENOMEDIUM=123
EMEDIUMTYPE=124
ECANCELED=125
ENOKEY=126
EKEYEXPIRED=127
EKEYREVOKED=128
EKEYREJECTED=129
EOWNERDEAD=130
ENOTRECOVERABLE=131
ERFKILL=132
| bsd-3-clause |
Alphadelta14/ansible | lib/ansible/playbook/become.py | 150 | 4878 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleParserError
from ansible.playbook.attribute import Attribute, FieldAttribute
#from ansible.utils.display import deprecated
class Become:
# Privlege escalation
_become = FieldAttribute(isa='bool')
_become_method = FieldAttribute(isa='string')
_become_user = FieldAttribute(isa='string')
def __init__(self):
return super(Become, self).__init__()
def _detect_privilege_escalation_conflict(self, ds):
# Fail out if user specifies conflicting privilege escalations
has_become = 'become' in ds or 'become_user'in ds
has_sudo = 'sudo' in ds or 'sudo_user' in ds
has_su = 'su' in ds or 'su_user' in ds
if has_become:
msg = 'The become params ("become", "become_user") and'
if has_sudo:
raise AnsibleParserError('%s sudo params ("sudo", "sudo_user") cannot be used together' % msg)
elif has_su:
raise AnsibleParserError('%s su params ("su", "su_user") cannot be used together' % msg)
elif has_sudo and has_su:
raise AnsibleParserError('sudo params ("sudo", "sudo_user") and su params ("su", "su_user") cannot be used together')
def _preprocess_data_become(self, ds):
"""Preprocess the playbook data for become attributes
This is called from the Base object's preprocess_data() method which
in turn is called pretty much anytime any sort of playbook object
(plays, tasks, blocks, etc) are created.
"""
self._detect_privilege_escalation_conflict(ds)
# Privilege escalation, backwards compatibility for sudo/su
if 'sudo' in ds or 'sudo_user' in ds:
ds['become_method'] = 'sudo'
if 'sudo' in ds:
ds['become'] = ds['sudo']
del ds['sudo']
else:
ds['become'] = True
if 'sudo_user' in ds:
ds['become_user'] = ds['sudo_user']
del ds['sudo_user']
#deprecated("Instead of sudo/sudo_user, use become/become_user and set become_method to 'sudo' (default)")
elif 'su' in ds or 'su_user' in ds:
ds['become_method'] = 'su'
if 'su' in ds:
ds['become'] = ds['su']
del ds['su']
else:
ds['become'] = True
if 'su_user' in ds:
ds['become_user'] = ds['su_user']
del ds['su_user']
#deprecated("Instead of su/su_user, use become/become_user and set become_method to 'su' (default is sudo)")
# if we are becoming someone else, but some fields are unset,
# make sure they're initialized to the default config values
if ds.get('become', False):
if ds.get('become_method', None) is None:
ds['become_method'] = C.DEFAULT_BECOME_METHOD
if ds.get('become_user', None) is None:
ds['become_user'] = C.DEFAULT_BECOME_USER
return ds
def _get_attr_become(self):
'''
Override for the 'become' getattr fetcher, used from Base.
'''
if hasattr(self, '_get_parent_attribute'):
return self._get_parent_attribute('become')
else:
return self._attributes['become']
def _get_attr_become_method(self):
'''
Override for the 'become_method' getattr fetcher, used from Base.
'''
if hasattr(self, '_get_parent_attribute'):
return self._get_parent_attribute('become_method')
else:
return self._attributes['become_method']
def _get_attr_become_user(self):
'''
Override for the 'become_user' getattr fetcher, used from Base.
'''
if hasattr(self, '_get_parent_attribute'):
return self._get_parent_attribute('become_user')
else:
return self._attributes['become_user']
| gpl-3.0 |
kerr-huang/SL4A | python/src/Lib/plat-os2emx/pwd.py | 67 | 6415 | # this module is an OS/2 oriented replacement for the pwd standard
# extension module.
# written by Andrew MacIntyre, April 2001.
# updated July 2003, adding field accessor support
# note that this implementation checks whether ":" or ";" as used as
# the field separator character. Path conversions are are applied when
# the database uses ":" as the field separator character.
"""Replacement for pwd standard extension module, intended for use on
OS/2 and similar systems which don't normally have an /etc/passwd file.
The standard Unix password database is an ASCII text file with 7 fields
per record (line), separated by a colon:
- user name (string)
- password (encrypted string, or "*" or "")
- user id (integer)
- group id (integer)
- description (usually user's name)
- home directory (path to user's home directory)
- shell (path to the user's login shell)
(see the section 8.1 of the Python Library Reference)
This implementation differs from the standard Unix implementation by
allowing use of the platform's native path separator character - ';' on OS/2,
DOS and MS-Windows - as the field separator in addition to the Unix
standard ":". Additionally, when ":" is the separator path conversions
are applied to deal with any munging of the drive letter reference.
The module looks for the password database at the following locations
(in order first to last):
- ${ETC_PASSWD} (or %ETC_PASSWD%)
- ${ETC}/passwd (or %ETC%/passwd)
- ${PYTHONHOME}/Etc/passwd (or %PYTHONHOME%/Etc/passwd)
Classes
-------
None
Functions
---------
getpwuid(uid) - return the record for user-id uid as a 7-tuple
getpwnam(name) - return the record for user 'name' as a 7-tuple
getpwall() - return a list of 7-tuples, each tuple being one record
(NOTE: the order is arbitrary)
Attributes
----------
passwd_file - the path of the password database file
"""
import os
# try and find the passwd file
__passwd_path = []
if os.environ.has_key('ETC_PASSWD'):
__passwd_path.append(os.environ['ETC_PASSWD'])
if os.environ.has_key('ETC'):
__passwd_path.append('%s/passwd' % os.environ['ETC'])
if os.environ.has_key('PYTHONHOME'):
__passwd_path.append('%s/Etc/passwd' % os.environ['PYTHONHOME'])
passwd_file = None
for __i in __passwd_path:
try:
__f = open(__i, 'r')
__f.close()
passwd_file = __i
break
except:
pass
# path conversion handlers
def __nullpathconv(path):
return path.replace(os.altsep, os.sep)
def __unixpathconv(path):
# two known drive letter variations: "x;" and "$x"
if path[0] == '$':
conv = path[1] + ':' + path[2:]
elif path[1] == ';':
conv = path[0] + ':' + path[2:]
else:
conv = path
return conv.replace(os.altsep, os.sep)
# decide what field separator we can try to use - Unix standard, with
# the platform's path separator as an option. No special field conversion
# handler is required when using the platform's path separator as field
# separator, but are required for the home directory and shell fields when
# using the standard Unix (":") field separator.
__field_sep = {':': __unixpathconv}
if os.pathsep:
if os.pathsep != ':':
__field_sep[os.pathsep] = __nullpathconv
# helper routine to identify which separator character is in use
def __get_field_sep(record):
fs = None
for c in __field_sep.keys():
# there should be 6 delimiter characters (for 7 fields)
if record.count(c) == 6:
fs = c
break
if fs:
return fs
else:
raise KeyError, '>> passwd database fields not delimited <<'
# class to match the new record field name accessors.
# the resulting object is intended to behave like a read-only tuple,
# with each member also accessible by a field name.
class Passwd:
def __init__(self, name, passwd, uid, gid, gecos, dir, shell):
self.__dict__['pw_name'] = name
self.__dict__['pw_passwd'] = passwd
self.__dict__['pw_uid'] = uid
self.__dict__['pw_gid'] = gid
self.__dict__['pw_gecos'] = gecos
self.__dict__['pw_dir'] = dir
self.__dict__['pw_shell'] = shell
self.__dict__['_record'] = (self.pw_name, self.pw_passwd,
self.pw_uid, self.pw_gid,
self.pw_gecos, self.pw_dir,
self.pw_shell)
def __len__(self):
return 7
def __getitem__(self, key):
return self._record[key]
def __setattr__(self, name, value):
raise AttributeError('attribute read-only: %s' % name)
def __repr__(self):
return str(self._record)
def __cmp__(self, other):
this = str(self._record)
if this == other:
return 0
elif this < other:
return -1
else:
return 1
# read the whole file, parsing each entry into tuple form
# with dictionaries to speed recall by UID or passwd name
def __read_passwd_file():
if passwd_file:
passwd = open(passwd_file, 'r')
else:
raise KeyError, '>> no password database <<'
uidx = {}
namx = {}
sep = None
while 1:
entry = passwd.readline().strip()
if len(entry) > 6:
if sep is None:
sep = __get_field_sep(entry)
fields = entry.split(sep)
for i in (2, 3):
fields[i] = int(fields[i])
for i in (5, 6):
fields[i] = __field_sep[sep](fields[i])
record = Passwd(*fields)
if not uidx.has_key(fields[2]):
uidx[fields[2]] = record
if not namx.has_key(fields[0]):
namx[fields[0]] = record
elif len(entry) > 0:
pass # skip empty or malformed records
else:
break
passwd.close()
if len(uidx) == 0:
raise KeyError
return (uidx, namx)
# return the passwd database entry by UID
def getpwuid(uid):
u, n = __read_passwd_file()
return u[uid]
# return the passwd database entry by passwd name
def getpwnam(name):
u, n = __read_passwd_file()
return n[name]
# return all the passwd database entries
def getpwall():
u, n = __read_passwd_file()
return n.values()
# test harness
if __name__ == '__main__':
getpwall()
| apache-2.0 |
bonitadecker77/python-for-android | python3-alpha/extra_modules/gdata/sample_util.py | 47 | 10741 | #!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provides utility functions used with command line samples."""
# This module is used for version 2 of the Google Data APIs.
import sys
import getpass
import urllib.request, urllib.parse, urllib.error
import gdata.gauth
__author__ = 'j.s@google.com (Jeff Scudder)'
CLIENT_LOGIN = 1
AUTHSUB = 2
OAUTH = 3
HMAC = 1
RSA = 2
class SettingsUtil(object):
"""Gather's user preferences from flags or command prompts.
An instance of this object stores the choices made by the user. At some
point it might be useful to save the user's preferences so that they do
not need to always set flags or answer preference prompts.
"""
def __init__(self, prefs=None):
self.prefs = prefs or {}
def get_param(self, name, prompt='', secret=False, ask=True, reuse=False):
# First, check in this objects stored preferences.
if name in self.prefs:
return self.prefs[name]
# Second, check for a command line parameter.
value = None
for i in range(len(sys.argv)):
if sys.argv[i].startswith('--%s=' % name):
value = sys.argv[i].split('=')[1]
elif sys.argv[i] == '--%s' % name:
value = sys.argv[i + 1]
# Third, if it was not on the command line, ask the user to input the
# value.
if value is None and ask:
prompt = '%s: ' % prompt
if secret:
value = getpass.getpass(prompt)
else:
value = input(prompt)
# If we want to save the preference for reuse in future requests, add it
# to this object's prefs.
if value is not None and reuse:
self.prefs[name] = value
return value
def authorize_client(self, client, auth_type=None, service=None,
source=None, scopes=None, oauth_type=None,
consumer_key=None, consumer_secret=None):
"""Uses command line arguments, or prompts user for token values."""
if 'client_auth_token' in self.prefs:
return
if auth_type is None:
auth_type = int(self.get_param(
'auth_type', 'Please choose the authorization mechanism you want'
' to use.\n'
'1. to use your email address and password (ClientLogin)\n'
'2. to use a web browser to visit an auth web page (AuthSub)\n'
'3. if you have registed to use OAuth\n', reuse=True))
# Get the scopes for the services we want to access.
if auth_type == AUTHSUB or auth_type == OAUTH:
if scopes is None:
scopes = self.get_param(
'scopes', 'Enter the URL prefixes (scopes) for the resources you '
'would like to access.\nFor multiple scope URLs, place a comma '
'between each URL.\n'
'Example: http://www.google.com/calendar/feeds/,'
'http://www.google.com/m8/feeds/\n', reuse=True).split(',')
elif isinstance(scopes, str):
scopes = scopes.split(',')
if auth_type == CLIENT_LOGIN:
email = self.get_param('email', 'Please enter your username',
reuse=False)
password = self.get_param('password', 'Password', True, reuse=False)
if service is None:
service = self.get_param(
'service', 'What is the name of the service you wish to access?'
'\n(See list:'
' http://code.google.com/apis/gdata/faq.html#clientlogin)',
reuse=True)
if source is None:
source = self.get_param('source', ask=False, reuse=True)
client.client_login(email, password, source=source, service=service)
elif auth_type == AUTHSUB:
auth_sub_token = self.get_param('auth_sub_token', ask=False, reuse=True)
session_token = self.get_param('session_token', ask=False, reuse=True)
private_key = None
auth_url = None
single_use_token = None
rsa_private_key = self.get_param(
'rsa_private_key',
'If you want to use secure mode AuthSub, please provide the\n'
' location of your RSA private key which corresponds to the\n'
' certificate you have uploaded for your domain. If you do not\n'
' have an RSA key, simply press enter', reuse=True)
if rsa_private_key:
try:
private_key_file = open(rsa_private_key, 'rb')
private_key = private_key_file.read()
private_key_file.close()
except IOError:
print('Unable to read private key from file')
if private_key is not None:
if client.auth_token is None:
if session_token:
client.auth_token = gdata.gauth.SecureAuthSubToken(
session_token, private_key, scopes)
self.prefs['client_auth_token'] = gdata.gauth.token_to_blob(
client.auth_token)
return
elif auth_sub_token:
client.auth_token = gdata.gauth.SecureAuthSubToken(
auth_sub_token, private_key, scopes)
client.upgrade_token()
self.prefs['client_auth_token'] = gdata.gauth.token_to_blob(
client.auth_token)
return
auth_url = gdata.gauth.generate_auth_sub_url(
'http://gauthmachine.appspot.com/authsub', scopes, True)
print('with a private key, get ready for this URL', auth_url)
else:
if client.auth_token is None:
if session_token:
client.auth_token = gdata.gauth.AuthSubToken(session_token,
scopes)
self.prefs['client_auth_token'] = gdata.gauth.token_to_blob(
client.auth_token)
return
elif auth_sub_token:
client.auth_token = gdata.gauth.AuthSubToken(auth_sub_token,
scopes)
client.upgrade_token()
self.prefs['client_auth_token'] = gdata.gauth.token_to_blob(
client.auth_token)
return
auth_url = gdata.gauth.generate_auth_sub_url(
'http://gauthmachine.appspot.com/authsub', scopes)
print('Visit the following URL in your browser to authorize this app:')
print(str(auth_url))
print('After agreeing to authorize the app, copy the token value from')
print(' the URL. Example: "www.google.com/?token=ab12" token value is')
print(' ab12')
token_value = input('Please enter the token value: ')
if private_key is not None:
single_use_token = gdata.gauth.SecureAuthSubToken(
token_value, private_key, scopes)
else:
single_use_token = gdata.gauth.AuthSubToken(token_value, scopes)
client.auth_token = single_use_token
client.upgrade_token()
elif auth_type == OAUTH:
if oauth_type is None:
oauth_type = int(self.get_param(
'oauth_type', 'Please choose the authorization mechanism you want'
' to use.\n'
'1. use an HMAC signature using your consumer key and secret\n'
'2. use RSA with your private key to sign requests\n',
reuse=True))
consumer_key = self.get_param(
'consumer_key', 'Please enter your OAuth conumer key '
'which identifies your app', reuse=True)
if oauth_type == HMAC:
consumer_secret = self.get_param(
'consumer_secret', 'Please enter your OAuth conumer secret '
'which you share with the OAuth provider', True, reuse=False)
# Swap out this code once the client supports requesting an oauth
# token.
# Get a request token.
request_token = client.get_oauth_token(
scopes, 'http://gauthmachine.appspot.com/oauth', consumer_key,
consumer_secret=consumer_secret)
elif oauth_type == RSA:
rsa_private_key = self.get_param(
'rsa_private_key',
'Please provide the location of your RSA private key which\n'
' corresponds to the certificate you have uploaded for your'
' domain.',
reuse=True)
try:
private_key_file = open(rsa_private_key, 'rb')
private_key = private_key_file.read()
private_key_file.close()
except IOError:
print('Unable to read private key from file')
request_token = client.get_oauth_token(
scopes, 'http://gauthmachine.appspot.com/oauth', consumer_key,
rsa_private_key=private_key)
else:
print('Invalid OAuth signature type')
return None
# Authorize the request token in the browser.
print('Visit the following URL in your browser to authorize this app:')
print(str(request_token.generate_authorization_url()))
print('After agreeing to authorize the app, copy URL from the browser\'s')
print(' address bar.')
url = input('Please enter the url: ')
gdata.gauth.authorize_request_token(request_token, url)
# Exchange for an access token.
client.auth_token = client.get_access_token(request_token)
else:
print('Invalid authorization type.')
return None
if client.auth_token:
self.prefs['client_auth_token'] = gdata.gauth.token_to_blob(
client.auth_token)
def get_param(name, prompt='', secret=False, ask=True):
settings = SettingsUtil()
return settings.get_param(name=name, prompt=prompt, secret=secret, ask=ask)
def authorize_client(client, auth_type=None, service=None, source=None,
scopes=None, oauth_type=None, consumer_key=None,
consumer_secret=None):
"""Uses command line arguments, or prompts user for token values."""
settings = SettingsUtil()
return settings.authorize_client(client=client, auth_type=auth_type,
service=service, source=source,
scopes=scopes, oauth_type=oauth_type,
consumer_key=consumer_key,
consumer_secret=consumer_secret)
def print_options():
"""Displays usage information, available command line params."""
# TODO: fill in the usage description for authorizing the client.
print('')
| apache-2.0 |
Fokko/incubator-airflow | airflow/providers/google/marketing_platform/example_dags/example_search_ads.py | 2 | 2898 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Example Airflow DAG that shows how to use SearchAds.
"""
import os
from airflow import models
from airflow.providers.google.marketing_platform.operators.search_ads import (
GoogleSearchAdsDownloadReportOperator, GoogleSearchAdsInsertReportOperator,
)
from airflow.providers.google.marketing_platform.sensors.search_ads import GoogleSearchAdsReportSensor
from airflow.utils import dates
# [START howto_search_ads_env_variables]
AGENCY_ID = os.environ.get("GMP_AGENCY_ID")
ADVERTISER_ID = os.environ.get("GMP_ADVERTISER_ID")
GCS_BUCKET = os.environ.get("GMP_GCS_BUCKET", "test-cm-bucket")
REPORT = {
"reportScope": {"agencyId": AGENCY_ID, "advertiserId": ADVERTISER_ID},
"reportType": "account",
"columns": [{"columnName": "agency"}, {"columnName": "lastModifiedTimestamp"}],
"includeRemovedEntities": False,
"statisticsCurrency": "usd",
"maxRowsPerFile": 1000000,
"downloadFormat": "csv",
}
# [END howto_search_ads_env_variables]
default_args = {"start_date": dates.days_ago(1)}
with models.DAG(
"example_search_ads",
default_args=default_args,
schedule_interval=None, # Override to match your needs
) as dag:
# [START howto_search_ads_generate_report_operator]
generate_report = GoogleSearchAdsInsertReportOperator(
report=REPORT, task_id="generate_report"
)
# [END howto_search_ads_generate_report_operator]
# [START howto_search_ads_get_report_id]
report_id = "{{ task_instance.xcom_pull('generate_report', key='report_id') }}"
# [END howto_search_ads_get_report_id]
# [START howto_search_ads_get_report_operator]
wait_for_report = GoogleSearchAdsReportSensor(
report_id=report_id, task_id="wait_for_report"
)
# [END howto_search_ads_get_report_operator]
# [START howto_search_ads_getfile_report_operator]
download_report = GoogleSearchAdsDownloadReportOperator(
report_id=report_id, bucket_name=GCS_BUCKET, task_id="download_report"
)
# [END howto_search_ads_getfile_report_operator]
generate_report >> wait_for_report >> download_report
| apache-2.0 |
LukeM12/samba | lib/dnspython/dns/rdtypes/ANY/ISDN.py | 100 | 3250 | # Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import dns.exception
import dns.rdata
import dns.tokenizer
class ISDN(dns.rdata.Rdata):
"""ISDN record
@ivar address: the ISDN address
@type address: string
@ivar subaddress: the ISDN subaddress (or '' if not present)
@type subaddress: string
@see: RFC 1183"""
__slots__ = ['address', 'subaddress']
def __init__(self, rdclass, rdtype, address, subaddress):
super(ISDN, self).__init__(rdclass, rdtype)
self.address = address
self.subaddress = subaddress
def to_text(self, origin=None, relativize=True, **kw):
if self.subaddress:
return '"%s" "%s"' % (dns.rdata._escapify(self.address),
dns.rdata._escapify(self.subaddress))
else:
return '"%s"' % dns.rdata._escapify(self.address)
def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
address = tok.get_string()
t = tok.get()
if not t.is_eol_or_eof():
tok.unget(t)
subaddress = tok.get_string()
else:
tok.unget(t)
subaddress = ''
tok.get_eol()
return cls(rdclass, rdtype, address, subaddress)
from_text = classmethod(from_text)
def to_wire(self, file, compress = None, origin = None):
l = len(self.address)
assert l < 256
byte = chr(l)
file.write(byte)
file.write(self.address)
l = len(self.subaddress)
if l > 0:
assert l < 256
byte = chr(l)
file.write(byte)
file.write(self.subaddress)
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
l = ord(wire[current])
current += 1
rdlen -= 1
if l > rdlen:
raise dns.exception.FormError
address = wire[current : current + l].unwrap()
current += l
rdlen -= l
if rdlen > 0:
l = ord(wire[current])
current += 1
rdlen -= 1
if l != rdlen:
raise dns.exception.FormError
subaddress = wire[current : current + l].unwrap()
else:
subaddress = ''
return cls(rdclass, rdtype, address, subaddress)
from_wire = classmethod(from_wire)
def _cmp(self, other):
v = cmp(self.address, other.address)
if v == 0:
v = cmp(self.subaddress, other.subaddress)
return v
| gpl-3.0 |
bcharlas/mytrunk | scripts/checks-and-tests/checks/checkSaveLoadClumps.py | 3 | 1963 | #!/usr/bin/env python
# encoding: utf-8
# Script generates clumps and saves them
# Then it tries to load the saved simulation
# See https://bugs.launchpad.net/bugs/1560171
# Thanks to Bettina Suhr for providing the
# minimal test script.
from yade import pack
import tempfile, shutil
import time
#define material for all bodies:
id_Mat=O.materials.append(FrictMat(young=1e6,poisson=0.3,density=1000,frictionAngle=1))
partType='clumps'
#define engines:
O.engines=[
ForceResetter(),
InsertionSortCollider([Bo1_Sphere_Aabb(),Bo1_Facet_Aabb()]),
InteractionLoop(
[Ig2_Sphere_Sphere_ScGeom(),Ig2_Facet_Sphere_ScGeom()],
[Ip2_FrictMat_FrictMat_FrictPhys()],
[Law2_ScGeom_FrictPhys_CundallStrack()]
),
NewtonIntegrator(damping=0.7,gravity=[0,0,-9.81])
]
#create a box:
O.bodies.append(geom.facetBox(center=(0.0,0.0,0.25),extents = (0.125,0.125,0.25), wallMask=31))
#add particles eiter spheres or clumps
if partType=='spheres':
sp=pack.SpherePack()
sp.makeCloud( (-0.125,-0.125,0), ( 0.125,0.125,0.5), rMean=37.6e-3/2.,rRelFuzz=0.0,num=100)
sp.toSimulation()
O.bodies.updateClumpProperties(discretization=10)# correct mass, volume, inertia!!
elif partType=='clumps':
sp=pack.SpherePack()
c1=pack.SpherePack([ ((0.,0.,0.),37.6e-3/2.), ((37.6e-3/2.,0.,0.),25e-3/2.) ])# overlap between both spheres
sp.makeClumpCloud( (-0.125,-0.125,0), ( 0.125,0.125,0.5), [c1],num=80)
sp.toSimulation()
O.bodies.updateClumpProperties(discretization=10)# correct mass, volume, inertia!!
else:
print "ERROR! choose either spheres or clumps for partType!"
O.dt=1e-6
#write some restart files
tmp_dir = tempfile.mkdtemp()
O.save(tmp_dir + '/restartMinWorkEx_'+partType+'_Initial')
O.run(100000,True)
O.save(tmp_dir + '/restartMinWorkEx_'+partType+str(O.iter))
time.sleep(1)
O.reset()
time.sleep(1)
O.load(tmp_dir + '/restartMinWorkEx_'+partType +'100000')
O.run(1000, True)
| gpl-2.0 |
zenodo/zenodo | zenodo/modules/sipstore/utils.py | 2 | 2339 | # -*- coding: utf-8 -*-
#
# This file is part of Zenodo.
# Copyright (C) 2017 CERN.
#
# Zenodo is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Zenodo is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Zenodo; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Utilities for SIPStore module."""
from __future__ import absolute_import, unicode_literals
import arrow
from invenio_sipstore.api import SIP
from invenio_sipstore.archivers.utils import chunks
build_agent_info = SIP._build_agent_info
def generate_bag_path(recid, iso_timestamp):
"""Generates a path for the BagIt.
Splits the recid string into chunks of size 3, e.g.:
generate_bag_path('12345', '2017-09-15T11:44:24.590537+00:00') ==
['123', '45', 'r', '2017-09-15T11:44:24.590537+00:00']
:param recid: recid value
:type recid: str
:param iso_timestamp: ISO-8601 formatted creation date (UTC) of the SIP.
:type iso_timestamp: str
"""
recid_chunks = list(chunks(recid, 3))
return recid_chunks + ['r', iso_timestamp, ]
def archive_directory_builder(sip):
"""Generate a path for BagIt from SIP.
:param sip: SIP which is to be archived
:type SIP: invenio_sipstore.models.SIP
:return: list of str
"""
iso_timestamp = arrow.get(sip.model.created).isoformat()
recid = sip.model.record_sips[0].pid.pid_value
return generate_bag_path(recid, iso_timestamp)
def sipmetadata_name_formatter(sipmetadata):
"""Generator for the archived SIPMetadata filenames."""
return "record-{name}.{format}".format(
name=sipmetadata.type.name,
format=sipmetadata.type.format
)
| gpl-2.0 |
draugiskisprendimai/odoo | openerp/addons/base/res/res_lang.py | 49 | 12612 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import locale
from locale import localeconv
import logging
import re
from openerp import tools
from openerp.osv import fields, osv
from openerp.tools.safe_eval import safe_eval as eval
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
class lang(osv.osv):
_name = "res.lang"
_description = "Languages"
_disallowed_datetime_patterns = tools.DATETIME_FORMATS_MAP.keys()
_disallowed_datetime_patterns.remove('%y') # this one is in fact allowed, just not good practice
def install_lang(self, cr, uid, **args):
"""
This method is called from openerp/addons/base/base_data.xml to load
some language and set it as the default for every partners. The
language is set via tools.config by the RPC 'create' method on the
'db' object. This is a fragile solution and something else should be
found.
"""
lang = tools.config.get('lang')
if not lang:
return False
lang_ids = self.search(cr, uid, [('code','=', lang)])
if not lang_ids:
self.load_lang(cr, uid, lang)
ir_values_obj = self.pool.get('ir.values')
default_value = ir_values_obj.get(cr, uid, 'default', False, ['res.partner'])
if not default_value:
ir_values_obj.set(cr, uid, 'default', False, 'lang', ['res.partner'], lang)
return True
def load_lang(self, cr, uid, lang, lang_name=None):
# create the language with locale information
fail = True
iso_lang = tools.get_iso_codes(lang)
for ln in tools.get_locales(lang):
try:
locale.setlocale(locale.LC_ALL, str(ln))
fail = False
break
except locale.Error:
continue
if fail:
lc = locale.getdefaultlocale()[0]
msg = 'Unable to get information for locale %s. Information from the default locale (%s) have been used.'
_logger.warning(msg, lang, lc)
if not lang_name:
lang_name = tools.ALL_LANGUAGES.get(lang, lang)
def fix_xa0(s):
"""Fix badly-encoded non-breaking space Unicode character from locale.localeconv(),
coercing to utf-8, as some platform seem to output localeconv() in their system
encoding, e.g. Windows-1252"""
if s == '\xa0':
return '\xc2\xa0'
return s
def fix_datetime_format(format):
"""Python's strftime supports only the format directives
that are available on the platform's libc, so in order to
be 100% cross-platform we map to the directives required by
the C standard (1989 version), always available on platforms
with a C standard implementation."""
# For some locales, nl_langinfo returns a D_FMT/T_FMT that contains
# unsupported '%-' patterns, e.g. for cs_CZ
format = format.replace('%-', '%')
for pattern, replacement in tools.DATETIME_FORMATS_MAP.iteritems():
format = format.replace(pattern, replacement)
return str(format)
lang_info = {
'code': lang,
'iso_code': iso_lang,
'name': lang_name,
'translatable': 1,
'date_format' : fix_datetime_format(locale.nl_langinfo(locale.D_FMT)),
'time_format' : fix_datetime_format(locale.nl_langinfo(locale.T_FMT)),
'decimal_point' : fix_xa0(str(locale.localeconv()['decimal_point'])),
'thousands_sep' : fix_xa0(str(locale.localeconv()['thousands_sep'])),
}
lang_id = False
try:
lang_id = self.create(cr, uid, lang_info)
finally:
tools.resetlocale()
return lang_id
def _check_format(self, cr, uid, ids, context=None):
for lang in self.browse(cr, uid, ids, context=context):
for pattern in self._disallowed_datetime_patterns:
if (lang.time_format and pattern in lang.time_format)\
or (lang.date_format and pattern in lang.date_format):
return False
return True
def _check_grouping(self, cr, uid, ids, context=None):
for lang in self.browse(cr, uid, ids, context=context):
try:
if not all(isinstance(x, int) for x in eval(lang.grouping)):
return False
except Exception:
return False
return True
def _get_default_date_format(self, cursor, user, context=None):
return '%m/%d/%Y'
def _get_default_time_format(self, cursor, user, context=None):
return '%H:%M:%S'
_columns = {
'name': fields.char('Name', required=True),
'code': fields.char('Locale Code', size=16, required=True, help='This field is used to set/get locales for user'),
'iso_code': fields.char('ISO code', size=16, required=False, help='This ISO code is the name of po files to use for translations'),
'translatable': fields.boolean('Translatable'),
'active': fields.boolean('Active'),
'direction': fields.selection([('ltr', 'Left-to-Right'), ('rtl', 'Right-to-Left')], 'Direction', required=True),
'date_format':fields.char('Date Format', required=True),
'time_format':fields.char('Time Format', required=True),
'grouping':fields.char('Separator Format', required=True,help="The Separator Format should be like [,n] where 0 < n :starting from Unit digit.-1 will end the separation. e.g. [3,2,-1] will represent 106500 to be 1,06,500;[1,2,-1] will represent it to be 106,50,0;[3] will represent it as 106,500. Provided ',' as the thousand separator in each case."),
'decimal_point':fields.char('Decimal Separator', required=True),
'thousands_sep':fields.char('Thousands Separator'),
}
_defaults = {
'active': 1,
'translatable': 0,
'direction': 'ltr',
'date_format':_get_default_date_format,
'time_format':_get_default_time_format,
'grouping': '[]',
'decimal_point': '.',
'thousands_sep': ',',
}
_sql_constraints = [
('name_uniq', 'unique (name)', 'The name of the language must be unique !'),
('code_uniq', 'unique (code)', 'The code of the language must be unique !'),
]
_constraints = [
(_check_format, 'Invalid date/time format directive specified. Please refer to the list of allowed directives, displayed when you edit a language.', ['time_format', 'date_format']),
(_check_grouping, "The Separator Format should be like [,n] where 0 < n :starting from Unit digit.-1 will end the separation. e.g. [3,2,-1] will represent 106500 to be 1,06,500;[1,2,-1] will represent it to be 106,50,0;[3] will represent it as 106,500. Provided ',' as the thousand separator in each case.", ['grouping'])
]
@tools.ormcache(skiparg=3)
def _lang_data_get(self, cr, uid, lang, monetary=False):
if type(lang) in (str, unicode):
lang = self.search(cr, uid, [('code', '=', lang)]) or \
self.search(cr, uid, [('code', '=', 'en_US')])
lang = lang[0]
conv = localeconv()
lang_obj = self.browse(cr, uid, lang)
thousands_sep = lang_obj.thousands_sep or conv[monetary and 'mon_thousands_sep' or 'thousands_sep']
decimal_point = lang_obj.decimal_point
grouping = lang_obj.grouping
return grouping, thousands_sep, decimal_point
def write(self, cr, uid, ids, vals, context=None):
if 'code' in vals:
for rec in self.browse(cr, uid, ids, context):
if rec.code != vals['code']:
raise osv.except_osv(_('User Error'), _("Language code cannot be modified."))
for lang_id in ids :
self._lang_data_get.clear_cache(self)
return super(lang, self).write(cr, uid, ids, vals, context)
def unlink(self, cr, uid, ids, context=None):
if context is None:
context = {}
languages = self.read(cr, uid, ids, ['code','active'], context=context)
for language in languages:
ctx_lang = context.get('lang')
if language['code']=='en_US':
raise osv.except_osv(_('User Error'), _("Base Language 'en_US' can not be deleted!"))
if ctx_lang and (language['code']==ctx_lang):
raise osv.except_osv(_('User Error'), _("You cannot delete the language which is User's Preferred Language!"))
if language['active']:
raise osv.except_osv(_('User Error'), _("You cannot delete the language which is Active!\nPlease de-activate the language first."))
trans_obj = self.pool.get('ir.translation')
trans_ids = trans_obj.search(cr, uid, [('lang','=',language['code'])], context=context)
trans_obj.unlink(cr, uid, trans_ids, context=context)
return super(lang, self).unlink(cr, uid, ids, context=context)
#
# IDS: can be a list of IDS or a list of XML_IDS
#
def format(self, cr, uid, ids, percent, value, grouping=False, monetary=False, context=None):
""" Format() will return the language-specific output for float values"""
if percent[0] != '%':
raise ValueError("format() must be given exactly one %char format specifier")
formatted = percent % value
# floats and decimal ints need special action!
if grouping:
lang_grouping, thousands_sep, decimal_point = \
self._lang_data_get(cr, uid, ids[0], monetary)
eval_lang_grouping = eval(lang_grouping)
if percent[-1] in 'eEfFgG':
parts = formatted.split('.')
parts[0], _ = intersperse(parts[0], eval_lang_grouping, thousands_sep)
formatted = decimal_point.join(parts)
elif percent[-1] in 'diu':
formatted = intersperse(formatted, eval_lang_grouping, thousands_sep)[0]
return formatted
# import re, operator
# _percent_re = re.compile(r'%(?:\((?P<key>.*?)\))?'
# r'(?P<modifiers>[-#0-9 +*.hlL]*?)[eEfFgGdiouxXcrs%]')
lang()
def split(l, counts):
"""
>>> split("hello world", [])
['hello world']
>>> split("hello world", [1])
['h', 'ello world']
>>> split("hello world", [2])
['he', 'llo world']
>>> split("hello world", [2,3])
['he', 'llo', ' world']
>>> split("hello world", [2,3,0])
['he', 'llo', ' wo', 'rld']
>>> split("hello world", [2,-1,3])
['he', 'llo world']
"""
res = []
saved_count = len(l) # count to use when encoutering a zero
for count in counts:
if not l:
break
if count == -1:
break
if count == 0:
while l:
res.append(l[:saved_count])
l = l[saved_count:]
break
res.append(l[:count])
l = l[count:]
saved_count = count
if l:
res.append(l)
return res
intersperse_pat = re.compile('([^0-9]*)([^ ]*)(.*)')
def intersperse(string, counts, separator=''):
"""
See the asserts below for examples.
"""
left, rest, right = intersperse_pat.match(string).groups()
def reverse(s): return s[::-1]
splits = split(reverse(rest), counts)
res = separator.join(map(reverse, reverse(splits)))
return left + res + right, len(splits) > 0 and len(splits) -1 or 0
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
MrLoick/python-for-android | python-modules/twisted/twisted/python/hook.py | 90 | 5266 |
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
I define support for hookable instance methods.
These are methods which you can register pre-call and post-call external
functions to augment their functionality. People familiar with more esoteric
languages may think of these as \"method combinations\".
This could be used to add optional preconditions, user-extensible callbacks
(a-la emacs) or a thread-safety mechanism.
The four exported calls are:
- L{addPre}
- L{addPost}
- L{removePre}
- L{removePost}
All have the signature (class, methodName, callable), and the callable they
take must always have the signature (instance, *args, **kw) unless the
particular signature of the method they hook is known.
Hooks should typically not throw exceptions, however, no effort will be made by
this module to prevent them from doing so. Pre-hooks will always be called,
but post-hooks will only be called if the pre-hooks do not raise any exceptions
(they will still be called if the main method raises an exception). The return
values and exception status of the main method will be propogated (assuming
none of the hooks raise an exception). Hooks will be executed in the order in
which they are added.
"""
# System Imports
import string
### Public Interface
class HookError(Exception):
"An error which will fire when an invariant is violated."
def addPre(klass, name, func):
"""hook.addPre(klass, name, func) -> None
Add a function to be called before the method klass.name is invoked.
"""
_addHook(klass, name, PRE, func)
def addPost(klass, name, func):
"""hook.addPost(klass, name, func) -> None
Add a function to be called after the method klass.name is invoked.
"""
_addHook(klass, name, POST, func)
def removePre(klass, name, func):
"""hook.removePre(klass, name, func) -> None
Remove a function (previously registered with addPre) so that it
is no longer executed before klass.name.
"""
_removeHook(klass, name, PRE, func)
def removePost(klass, name, func):
"""hook.removePre(klass, name, func) -> None
Remove a function (previously registered with addPost) so that it
is no longer executed after klass.name.
"""
_removeHook(klass, name, POST, func)
### "Helper" functions.
hooked_func = """
import %(module)s
def %(name)s(*args, **kw):
klazz = %(module)s.%(klass)s
for preMethod in klazz.%(preName)s:
preMethod(*args, **kw)
try:
return klazz.%(originalName)s(*args, **kw)
finally:
for postMethod in klazz.%(postName)s:
postMethod(*args, **kw)
"""
_PRE = '__hook_pre_%s_%s_%s__'
_POST = '__hook_post_%s_%s_%s__'
_ORIG = '__hook_orig_%s_%s_%s__'
def _XXX(k,n,s):
"string manipulation garbage"
x = s % (string.replace(k.__module__,'.','_'), k.__name__, n)
return x
def PRE(k,n):
"(private) munging to turn a method name into a pre-hook-method-name"
return _XXX(k,n,_PRE)
def POST(k,n):
"(private) munging to turn a method name into a post-hook-method-name"
return _XXX(k,n,_POST)
def ORIG(k,n):
"(private) munging to turn a method name into an `original' identifier"
return _XXX(k,n,_ORIG)
def _addHook(klass, name, phase, func):
"(private) adds a hook to a method on a class"
_enhook(klass, name)
if not hasattr(klass, phase(klass, name)):
setattr(klass, phase(klass, name), [])
phaselist = getattr(klass, phase(klass, name))
phaselist.append(func)
def _removeHook(klass, name, phase, func):
"(private) removes a hook from a method on a class"
phaselistname = phase(klass, name)
if not hasattr(klass, ORIG(klass,name)):
raise HookError("no hooks present!")
phaselist = getattr(klass, phase(klass, name))
try: phaselist.remove(func)
except ValueError:
raise HookError("hook %s not found in removal list for %s"%
(name,klass))
if not getattr(klass, PRE(klass,name)) and not getattr(klass, POST(klass, name)):
_dehook(klass, name)
def _enhook(klass, name):
"(private) causes a certain method name to be hooked on a class"
if hasattr(klass, ORIG(klass, name)):
return
def newfunc(*args, **kw):
for preMethod in getattr(klass, PRE(klass, name)):
preMethod(*args, **kw)
try:
return getattr(klass, ORIG(klass, name))(*args, **kw)
finally:
for postMethod in getattr(klass, POST(klass, name)):
postMethod(*args, **kw)
try:
newfunc.func_name = name
except TypeError:
# Older python's don't let you do this
pass
oldfunc = getattr(klass, name).im_func
setattr(klass, ORIG(klass, name), oldfunc)
setattr(klass, PRE(klass, name), [])
setattr(klass, POST(klass, name), [])
setattr(klass, name, newfunc)
def _dehook(klass, name):
"(private) causes a certain method name no longer to be hooked on a class"
if not hasattr(klass, ORIG(klass, name)):
raise HookError("Cannot unhook!")
setattr(klass, name, getattr(klass, ORIG(klass,name)))
delattr(klass, PRE(klass,name))
delattr(klass, POST(klass,name))
delattr(klass, ORIG(klass,name))
| apache-2.0 |
s0930342674/pyload | module/plugins/hooks/LinkdecrypterComHook.py | 13 | 1246 | # -*- coding: utf-8 -*-
import re
from module.plugins.internal.MultiHook import MultiHook
class LinkdecrypterComHook(MultiHook):
__name__ = "LinkdecrypterComHook"
__type__ = "hook"
__version__ = "1.07"
__status__ = "testing"
__config__ = [("activated" , "bool" , "Activated" , True ),
("pluginmode" , "all;listed;unlisted", "Use for plugins" , "all"),
("pluginlist" , "str" , "Plugin list (comma separated)", "" ),
("reload" , "bool" , "Reload plugin list" , True ),
("reloadinterval", "int" , "Reload interval in hours" , 12 )]
__description__ = """Linkdecrypter.com hook plugin"""
__license__ = "GPLv3"
__authors__ = [("Walter Purcaro", "vuolter@gmail.com")]
def get_hosters(self):
list = re.search(r'>Supported\(\d+\)</b>: <i>(.[\w.\-, ]+)',
self.load("http://linkdecrypter.com/").replace("(g)", "")).group(1).split(', ')
try:
list.remove("download.serienjunkies.org")
except ValueError:
pass
return list
| gpl-3.0 |
spino327/sdr_testbed | DistributedTestbed/SlaveRX.py | 1 | 6293 | '''
Copyright (c) 2011, Universidad Industrial de Santander, Colombia
University of Delaware
All rights reserved.
@author: Sergio Pino
@author: Henry Arguello
Website: http://www.eecis.udel.edu/
emails : sergiop@udel.edu - henarfu@udel.edu
Date : Feb, 2011
'''
import socket
import time
import sys
from receiver.RXApp import RXApp
from util.PropertyReader import readProperties
from util import Utils
class SlaveRX(object):
'''
SlaveRX is responsible of control the RX USRP node.
'''
def __init__(self, host, port, path):
'''
Constructor
@param host: refers to the local host address
@param port: port for the server to listen
@param path: File system path where the data will be stored
'''
# server
self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server.bind((host, port))
self.server.listen(1)
self.path = path
self.app = None
def setRXProperties(self, lo_off, fc, dec, gain, sync):
'''
Set the USRP RX properties
@param lo_off: local oscillator offset (int)
@param fc: Center frequency (float)
@param dec: Decimation factor (int)
@param gain: Gain of the receiver in dB (int)
@param sync: True if the Hardware will use the GPSDO (boolean)
'''
self.lo_off = lo_off
self.fc = fc
self.dec = dec
self.gain = gain
self.sync = sync
def launch(self):
'''
calls startup
'''
print("i: launch SlaveRX")
while True:
sc, addr = self.server.accept()
sc.settimeout(10*60)
print("\n\ni: SlaveRX Connection from " + str(addr) + ", time " + time.strftime("%d-%m-%y/%H:%M:%S"))
tic = time.time()
try:
self.__startup__(sc, addr)
except Exception, e:
print("e: " + str(e))
sc.close()
print("i: SlaveRX Connection closed, duration: " + str(time.time() - tic) + " [seg]\n\n")
print("i: SlaveRX end launch")
def record(self, prefix, at, signame):
"""
@param prefix: prefix path folder where the signals are stored, e.g. /home/folder/
@param at: attenuation factor
@param signame: filename of the signal
Start recording
"""
# creating the folder
folder = self.path + prefix
folder = folder if (folder.endswith("/")) else folder + "/"
Utils.ensure_dir(folder)
# signal file
filename = folder + signame + "_at" + str(at) +"_G" + str(self.gain) + ".dat"
print("i: record filename = " + filename)
self.app = RXApp(self.fc, self.dec, self.gain, "addr=192.168.10.2", self.sync, filename, self.lo_off)
self.app.launch()
def __startup__(self, sc, addr):
'''
Responsible for starting the application; for creating and showing
the initial GUI.
'''
print("i: startup")
msg = sc.recv(1024)
if msg == "start":
sc.send("ok")
print("i: start ok")
msg = sc.recv(1024)
print("i: msg = " + msg)
while msg != "finish":
tic = time.time()
if msg.find("startRec") >= 0:
# message "startRec:/prefix_path/:at:signame:"
print("i: startRec received")
values = msg.split(":")
prefix = values[1]
at = float(values[2])
signame = values[3]
self.record(prefix, at, signame)
sc.send("ok")
elif msg.find("stopRec") >= 0:
print("i: stopRec received")
if self.app.stopApp():
print("i: stopRec successful")
sc.send("ok")
else:
print("i: stopRec failed")
sc.send("error")
else:
print("i: ending")
break
print("i: cmd duration: " + str(time.time() - tic) + " [seg]\n")
msg = sc.recv(1024)
else:
print("e: not start")
sc.send("error")
if msg == "finish":
print("i: finish cmd received")
sc.close()
print("i: end startup")
def __exit__(self):
'''
This method runs on the event dispatching thread.
'''
print "somebody call me!"
self.__exit__()
if __name__ == '__main__':
'''
Creates an instance of the specified {@code Application}
subclass, sets the {@code ApplicationContext} {@code
application} property, and then calls the new {@code
Application's} {@code startup} method. The {@code launch} method is
typically called from the Application's {@code main}:
'''
# Reading the properties
confFile = "confRX.txt"
if(len(sys.argv) > 1):
arg = sys.argv[1]
confFile = arg if len(arg) > 0 else confFile
else:
print("working with default config file path")
properties = readProperties(confFile)
print("Properties:")
for p in properties:
print("\t" + p + " : " + properties[p])
path = properties["rxpath"]
path = path if (path.endswith("/")) else path+"/"
sync = True if properties["sync"] == "True" else False
app = SlaveRX(properties["rxip"],
int(properties["rxport"]),
path)
app.setRXProperties(int(properties["lo_off"]),
float(properties["fc"]),
int(properties["dec"]),
int(properties["gain"]),
sync)
app.launch()
exit() | apache-2.0 |
Pablo126/SSBW | Tarea4/tarea4/lib/python3.5/site-packages/django/core/files/base.py | 487 | 5717 | from __future__ import unicode_literals
import os
from io import BytesIO, StringIO, UnsupportedOperation
from django.core.files.utils import FileProxyMixin
from django.utils import six
from django.utils.encoding import (
force_bytes, force_str, python_2_unicode_compatible, smart_text,
)
@python_2_unicode_compatible
class File(FileProxyMixin):
DEFAULT_CHUNK_SIZE = 64 * 2 ** 10
def __init__(self, file, name=None):
self.file = file
if name is None:
name = getattr(file, 'name', None)
self.name = name
if hasattr(file, 'mode'):
self.mode = file.mode
def __str__(self):
return smart_text(self.name or '')
def __repr__(self):
return force_str("<%s: %s>" % (self.__class__.__name__, self or "None"))
def __bool__(self):
return bool(self.name)
def __nonzero__(self): # Python 2 compatibility
return type(self).__bool__(self)
def __len__(self):
return self.size
def _get_size_from_underlying_file(self):
if hasattr(self.file, 'size'):
return self.file.size
if hasattr(self.file, 'name'):
try:
return os.path.getsize(self.file.name)
except (OSError, TypeError):
pass
if hasattr(self.file, 'tell') and hasattr(self.file, 'seek'):
pos = self.file.tell()
self.file.seek(0, os.SEEK_END)
size = self.file.tell()
self.file.seek(pos)
return size
raise AttributeError("Unable to determine the file's size.")
def _get_size(self):
if hasattr(self, '_size'):
return self._size
self._size = self._get_size_from_underlying_file()
return self._size
def _set_size(self, size):
self._size = size
size = property(_get_size, _set_size)
def _get_closed(self):
return not self.file or self.file.closed
closed = property(_get_closed)
def chunks(self, chunk_size=None):
"""
Read the file and yield chunks of ``chunk_size`` bytes (defaults to
``UploadedFile.DEFAULT_CHUNK_SIZE``).
"""
if not chunk_size:
chunk_size = self.DEFAULT_CHUNK_SIZE
try:
self.seek(0)
except (AttributeError, UnsupportedOperation):
pass
while True:
data = self.read(chunk_size)
if not data:
break
yield data
def multiple_chunks(self, chunk_size=None):
"""
Returns ``True`` if you can expect multiple chunks.
NB: If a particular file representation is in memory, subclasses should
always return ``False`` -- there's no good reason to read from memory in
chunks.
"""
if not chunk_size:
chunk_size = self.DEFAULT_CHUNK_SIZE
return self.size > chunk_size
def __iter__(self):
# Iterate over this file-like object by newlines
buffer_ = None
for chunk in self.chunks():
for line in chunk.splitlines(True):
if buffer_:
if endswith_cr(buffer_) and not equals_lf(line):
# Line split after a \r newline; yield buffer_.
yield buffer_
# Continue with line.
else:
# Line either split without a newline (line
# continues after buffer_) or with \r\n
# newline (line == b'\n').
line = buffer_ + line
# buffer_ handled, clear it.
buffer_ = None
# If this is the end of a \n or \r\n line, yield.
if endswith_lf(line):
yield line
else:
buffer_ = line
if buffer_ is not None:
yield buffer_
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
self.close()
def open(self, mode=None):
if not self.closed:
self.seek(0)
elif self.name and os.path.exists(self.name):
self.file = open(self.name, mode or self.mode)
else:
raise ValueError("The file cannot be reopened.")
def close(self):
self.file.close()
@python_2_unicode_compatible
class ContentFile(File):
"""
A File-like object that takes just raw content, rather than an actual file.
"""
def __init__(self, content, name=None):
if six.PY3:
stream_class = StringIO if isinstance(content, six.text_type) else BytesIO
else:
stream_class = BytesIO
content = force_bytes(content)
super(ContentFile, self).__init__(stream_class(content), name=name)
self.size = len(content)
def __str__(self):
return 'Raw content'
def __bool__(self):
return True
def __nonzero__(self): # Python 2 compatibility
return type(self).__bool__(self)
def open(self, mode=None):
self.seek(0)
def close(self):
pass
def endswith_cr(line):
"""
Return True if line (a text or byte string) ends with '\r'.
"""
return line.endswith('\r' if isinstance(line, six.text_type) else b'\r')
def endswith_lf(line):
"""
Return True if line (a text or byte string) ends with '\n'.
"""
return line.endswith('\n' if isinstance(line, six.text_type) else b'\n')
def equals_lf(line):
"""
Return True if line (a text or byte string) equals '\n'.
"""
return line == ('\n' if isinstance(line, six.text_type) else b'\n')
| gpl-3.0 |
spartonia/django-oscar | src/oscar/apps/catalogue/admin.py | 14 | 3226 | from django.contrib import admin
from treebeard.admin import TreeAdmin
from treebeard.forms import movenodeform_factory
from oscar.core.loading import get_model
AttributeOption = get_model('catalogue', 'AttributeOption')
AttributeOptionGroup = get_model('catalogue', 'AttributeOptionGroup')
Category = get_model('catalogue', 'Category')
Option = get_model('catalogue', 'Option')
Product = get_model('catalogue', 'Product')
ProductAttribute = get_model('catalogue', 'ProductAttribute')
ProductAttributeValue = get_model('catalogue', 'ProductAttributeValue')
ProductCategory = get_model('catalogue', 'ProductCategory')
ProductClass = get_model('catalogue', 'ProductClass')
ProductImage = get_model('catalogue', 'ProductImage')
ProductRecommendation = get_model('catalogue', 'ProductRecommendation')
class AttributeInline(admin.TabularInline):
model = ProductAttributeValue
class ProductRecommendationInline(admin.TabularInline):
model = ProductRecommendation
fk_name = 'primary'
raw_id_fields = ['primary', 'recommendation']
class CategoryInline(admin.TabularInline):
model = ProductCategory
extra = 1
class ProductAttributeInline(admin.TabularInline):
model = ProductAttribute
extra = 2
class ProductClassAdmin(admin.ModelAdmin):
list_display = ('name', 'requires_shipping', 'track_stock')
inlines = [ProductAttributeInline]
class ProductAdmin(admin.ModelAdmin):
date_hierarchy = 'date_created'
list_display = ('get_title', 'upc', 'get_product_class', 'structure',
'attribute_summary', 'date_created')
list_filter = ['structure', 'is_discountable']
raw_id_fields = ['parent']
inlines = [AttributeInline, CategoryInline, ProductRecommendationInline]
prepopulated_fields = {"slug": ("title",)}
search_fields = ['upc', 'title']
def get_queryset(self, request):
qs = super(ProductAdmin, self).get_queryset(request)
return (
qs
.select_related('product_class', 'parent')
.prefetch_related(
'attribute_values',
'attribute_values__attribute'))
class ProductAttributeAdmin(admin.ModelAdmin):
list_display = ('name', 'code', 'product_class', 'type')
prepopulated_fields = {"code": ("name", )}
class OptionAdmin(admin.ModelAdmin):
pass
class ProductAttributeValueAdmin(admin.ModelAdmin):
list_display = ('product', 'attribute', 'value')
class AttributeOptionInline(admin.TabularInline):
model = AttributeOption
class AttributeOptionGroupAdmin(admin.ModelAdmin):
list_display = ('name', 'option_summary')
inlines = [AttributeOptionInline, ]
class CategoryAdmin(TreeAdmin):
form = movenodeform_factory(Category)
list_display = ('name', 'slug')
admin.site.register(ProductClass, ProductClassAdmin)
admin.site.register(Product, ProductAdmin)
admin.site.register(ProductAttribute, ProductAttributeAdmin)
admin.site.register(ProductAttributeValue, ProductAttributeValueAdmin)
admin.site.register(AttributeOptionGroup, AttributeOptionGroupAdmin)
admin.site.register(Option, OptionAdmin)
admin.site.register(ProductImage)
admin.site.register(Category, CategoryAdmin)
admin.site.register(ProductCategory)
| bsd-3-clause |
alanc10n/py-rau | pyrau/rau.py | 1 | 1747 | import argparse
from redis import StrictRedis
from pyrau.commands import Command
def delete(args, command):
""" Execute the delete command """
command.delete(args.pattern)
def keys(args, command):
""" Execute the keys command """
details = args.details | args.sorted
command.keys(args.pattern, details, args.sorted)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('-H', '--host', default='localhost', help='Host')
parser.add_argument('-p', '--port', default=6379, type=int, help='Port')
parser.add_argument('-b', '--batch_size', default=20,
type=int, help='Batch size for pipeline operations')
subparsers = parser.add_subparsers(help='Commands')
del_parser = subparsers.add_parser('delete', help='Delete key(s)')
del_parser.add_argument('pattern', type=str, help='Key pattern')
del_parser.set_defaults(func=delete)
key_parser = subparsers.add_parser('keys', help="List keys")
key_parser.add_argument('-p', '--pattern', help='Key pattern',
default=None)
key_parser.add_argument('-d', '--details',
help='Include details for key(s)',
action='store_true')
key_parser.add_argument('-s', '--sorted',
help='Sort result by size, implies --details',
action='store_true')
key_parser.set_defaults(func=keys)
args = parser.parse_args()
return args
def main():
args = parse_args()
redis = StrictRedis(host=args.host, port=args.port)
command = Command(redis)
command.batch_size = args.batch_size
args.func(args, command)
if __name__ == '__main__':
main()
| mit |
lanselin/pysal | pysal/core/IOHandlers/tests/test_stata_txt.py | 20 | 2086 | import unittest
import pysal
from pysal.core.IOHandlers.stata_txt import StataTextIO
import tempfile
import os
class test_StataTextIO(unittest.TestCase):
def setUp(self):
self.test_file_sparse = test_file_sparse = pysal.examples.get_path(
'stata_sparse.txt')
self.test_file_full = test_file_full = pysal.examples.get_path(
'stata_full.txt')
self.obj_sparse = StataTextIO(test_file_sparse, 'r')
self.obj_full = StataTextIO(test_file_full, 'r')
def test_close(self):
for obj in [self.obj_sparse, self.obj_full]:
f = obj
f.close()
self.failUnlessRaises(ValueError, f.read)
def test_read(self):
w_sparse = self.obj_sparse.read()
self.assertEqual(56, w_sparse.n)
self.assertEqual(4.0, w_sparse.mean_neighbors)
self.assertEqual([1.0, 1.0, 1.0, 1.0, 1.0], w_sparse[1].values())
w_full = self.obj_full.read()
self.assertEqual(56, w_full.n)
self.assertEqual(4.0, w_full.mean_neighbors)
self.assertEqual(
[0.125, 0.125, 0.125, 0.125, 0.125], w_full[1].values())
def test_seek(self):
self.test_read()
self.failUnlessRaises(StopIteration, self.obj_sparse.read)
self.failUnlessRaises(StopIteration, self.obj_full.read)
self.obj_sparse.seek(0)
self.obj_full.seek(0)
self.test_read()
def test_write(self):
for obj in [self.obj_sparse, self.obj_full]:
w = obj.read()
f = tempfile.NamedTemporaryFile(
suffix='.txt', dir=pysal.examples.get_path(''))
fname = f.name
f.close()
o = pysal.open(fname, 'w', 'stata_text')
if obj == self.obj_sparse:
o.write(w)
else:
o.write(w, matrix_form=True)
o.close()
wnew = pysal.open(fname, 'r', 'stata_text').read()
self.assertEqual(wnew.pct_nonzero, w.pct_nonzero)
os.remove(fname)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
QuLogic/python-future | src/future/moves/urllib/request.py | 70 | 3525 | from __future__ import absolute_import
from future.standard_library import suspend_hooks
from future.utils import PY3
if PY3:
from urllib.request import *
# This aren't in __all__:
from urllib.request import (getproxies,
pathname2url,
proxy_bypass,
quote,
request_host,
splitattr,
splithost,
splitpasswd,
splitport,
splitquery,
splittag,
splittype,
splituser,
splitvalue,
thishost,
to_bytes,
unquote,
unwrap,
url2pathname,
urlcleanup,
urljoin,
urlopen,
urlparse,
urlretrieve,
urlsplit,
urlunparse)
else:
__future_module__ = True
with suspend_hooks():
from urllib import *
from urllib2 import *
from urlparse import *
# Rename:
from urllib import toBytes # missing from __all__ on Py2.6
to_bytes = toBytes
# from urllib import (pathname2url,
# url2pathname,
# getproxies,
# urlretrieve,
# urlcleanup,
# URLopener,
# FancyURLopener,
# proxy_bypass)
# from urllib2 import (
# AbstractBasicAuthHandler,
# AbstractDigestAuthHandler,
# BaseHandler,
# CacheFTPHandler,
# FileHandler,
# FTPHandler,
# HTTPBasicAuthHandler,
# HTTPCookieProcessor,
# HTTPDefaultErrorHandler,
# HTTPDigestAuthHandler,
# HTTPErrorProcessor,
# HTTPHandler,
# HTTPPasswordMgr,
# HTTPPasswordMgrWithDefaultRealm,
# HTTPRedirectHandler,
# HTTPSHandler,
# URLError,
# build_opener,
# install_opener,
# OpenerDirector,
# ProxyBasicAuthHandler,
# ProxyDigestAuthHandler,
# ProxyHandler,
# Request,
# UnknownHandler,
# urlopen,
# )
# from urlparse import (
# urldefrag
# urljoin,
# urlparse,
# urlunparse,
# urlsplit,
# urlunsplit,
# parse_qs,
# parse_q"
# )
| mit |
rhurkes/chasegame | venv/lib/python2.7/site-packages/pip/commands/uninstall.py | 395 | 2203 | from pip.req import InstallRequirement, RequirementSet, parse_requirements
from pip.basecommand import Command
from pip.exceptions import InstallationError
class UninstallCommand(Command):
"""
Uninstall packages.
pip is able to uninstall most installed packages. Known exceptions are:
- Pure distutils packages installed with ``python setup.py install``, which
leave behind no metadata to determine what files were installed.
- Script wrappers installed by ``python setup.py develop``.
"""
name = 'uninstall'
usage = """
%prog [options] <package> ...
%prog [options] -r <requirements file> ..."""
summary = 'Uninstall packages.'
def __init__(self, *args, **kw):
super(UninstallCommand, self).__init__(*args, **kw)
self.cmd_opts.add_option(
'-r', '--requirement',
dest='requirements',
action='append',
default=[],
metavar='file',
help='Uninstall all the packages listed in the given requirements file. '
'This option can be used multiple times.')
self.cmd_opts.add_option(
'-y', '--yes',
dest='yes',
action='store_true',
help="Don't ask for confirmation of uninstall deletions.")
self.parser.insert_option_group(0, self.cmd_opts)
def run(self, options, args):
session = self._build_session(options)
requirement_set = RequirementSet(
build_dir=None,
src_dir=None,
download_dir=None,
session=session,
)
for name in args:
requirement_set.add_requirement(
InstallRequirement.from_line(name))
for filename in options.requirements:
for req in parse_requirements(filename,
options=options, session=session):
requirement_set.add_requirement(req)
if not requirement_set.has_requirements:
raise InstallationError('You must give at least one requirement '
'to %(name)s (see "pip help %(name)s")' % dict(name=self.name))
requirement_set.uninstall(auto_confirm=options.yes)
| mit |
akx/shoop | shoop_tests/admin/test_views.py | 1 | 2079 | # -*- coding: utf-8 -*-
# This file is part of Shoop.
#
# Copyright (c) 2012-2016, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
import json
import pytest
from shoop.testing.factories import (
create_random_order, create_random_person, get_default_category,
get_default_product, get_default_shop
)
from shoop.testing.utils import apply_request_middleware
from shoop.utils.importing import load
@pytest.mark.parametrize("class_spec", [
"shoop.admin.modules.categories.views.list:CategoryListView",
"shoop.admin.modules.contacts.views:ContactListView",
"shoop.admin.modules.orders.views:OrderListView",
"shoop.admin.modules.products.views:ProductListView",
])
@pytest.mark.django_db
def test_list_view(rf, class_spec):
view = load(class_spec).as_view()
request = rf.get("/", {
"jq": json.dumps({"perPage": 100, "page": 1})
})
response = view(request)
assert 200 <= response.status_code < 300
def random_order():
# These are prerequisites for random orders
contact = create_random_person()
product = get_default_product()
return create_random_order(contact, [product])
@pytest.mark.parametrize("model_and_class", [
(get_default_category, "shoop.admin.modules.categories.views:CategoryEditView"),
(create_random_person, "shoop.admin.modules.contacts.views:ContactDetailView"),
(random_order, "shoop.admin.modules.orders.views:OrderDetailView"),
(get_default_product, "shoop.admin.modules.products.views:ProductEditView"),
])
@pytest.mark.django_db
def test_detail_view(rf, admin_user, model_and_class):
get_default_shop() # obvious prerequisite
model_func, class_spec = model_and_class
model = model_func()
view = load(class_spec).as_view()
request = apply_request_middleware(rf.get("/"), user=admin_user)
response = view(request, pk=model.pk)
if hasattr(response, "render"):
response.render()
assert 200 <= response.status_code < 300
| agpl-3.0 |
TerryRen/TrPython | NetLib/SuperCaptcha.py | 1 | 9743 | #python 2.7
#coding=utf-8
__author__ = "Terry.Ren"
#try:
# import Image
#except ImportError:
# from PIL import Image
from PIL import Image
from PIL import ImageDraw
import ImageEnhance
import os
import urllib
import StringIO
import uuid
import pytesseract #open source
class Captcha(object):
def __init__(self, isDebug = False):
self.__isDebug = isDebug
self.__currentStepId = 1
self.__tempFileList = []
def __BuildTempFileFullName(self, localDir, extName):
fname = str(uuid.uuid1()) + "_" + str(self.__currentStepId) + "." + extName
fname = os.path.join(localDir,fname)
self.__currentStepId += 1
self.__tempFileList.append(fname)
return fname
'''
Store remote image to local dir
'''
def __StoreImage2LocalDir(self, imageUrl , localDir , extName):
response = urllib.urlopen(imageUrl)
tempFileFullName = self.__BuildTempFileFullName(localDir, extName)
with open(tempFileFullName, 'wb') as f:
f.write(response.read())
return tempFileFullName
def Clearup(self):
for filename in self.__tempFileList:
if os.path.isfile(filename):
os.remove(filename)
'''
image enhance
'''
def __imageEnhance(self, image):
enhancer = ImageEnhance.Contrast(image)
image_enhancer = enhancer.enhance(4)
return image_enhancer
'''
two value
'''
def __twoValue(self, image):
img = image.convert('RGBA') # convert to RGBA
pix = img.load() #read pix
for x in range(img.size[0]): #remove [top-bottom] border
pix[x, 0] = pix[x, img.size[1] - 1] = (255, 255, 255, 255)
for y in range(img.size[1]): #remove [left-right] border
pix[0, y] = pix[img.size[0] - 1, y] = (255, 255, 255, 255)
for y in range(img.size[1]): # two value: R=95,G=95,B=95
for x in range(img.size[0]):
if pix[x, y][0] < 95 or pix[x, y][1] < 95 or pix[x, y][2] < 95:
pix[x, y] = (0, 0, 0, 255)
else:
pix[x, y] = (255, 255, 255, 255)
return img
'''
Get Captcha Code from on-line web site
'''
def GetOnlineCaptchaCode(self, imageUrl, isStoreOriginalImage = False, localDir = '', extName = 'jpg'):
if isStoreOriginalImage == True:
if not os.path.isdir(localDir):
raise ValueError("please validate the argument GetOnlineCaptchaCode.localDir...")
localFileName = self.__StoreImage2LocalDir(imageUrl , localDir , extName)
img = Image.open(localFileName)
else:
imgBuf = StringIO.StringIO(urllib.urlopen(imageUrl).read())
img = Image.open(imgBuf)
print img.format, img.size, img.mode
# image Enhance
img = self.__imageEnhance(img)
if self.__isDebug:
img.save(self.__BuildTempFileFullName(localDir, extName))
img = self.__twoValue(img)
tempFileFullName = self.__BuildTempFileFullName(localDir, extName)
img.save(tempFileFullName) # must use local file via tesseract-orc
text = pytesseract.image_to_string(Image.open(tempFileFullName))
return text
'''
Get Captcha Code from local
'''
def GetLocalCaptchaCode(self, imagePath, extName = 'jpg'):
localDir = os.path.dirname(imagePath)
img = Image.open(imagePath)
print img.format, img.size, img.mode
# image Enhance
img = self.__imageEnhance(img)
if self.__isDebug:
img.save(self.__BuildTempFileFullName(localDir, extName))
img = img.convert('RGBA') # convert to RGBA
pix = img.load() #read pix
for x in range(img.size[0]): #remove [top-bottom] border
pix[x, 0] = pix[x, img.size[1] - 1] = (255, 255, 255, 255)
for y in range(img.size[1]): #remove [left-right] border
pix[0, y] = pix[img.size[0] - 1, y] = (255, 255, 255, 255)
for y in range(img.size[1]): # two value: R=95,G=95,B=95
for x in range(img.size[0]):
if pix[x, y][0] < 90 or pix[x, y][1] < 90 or pix[x, y][2] < 90:
pix[x, y] = (0, 0, 0, 255)
else:
pix[x, y] = (255, 255, 255, 255)
tempFileFullName = self.__BuildTempFileFullName(localDir, extName)
img.save(tempFileFullName) # must use local file via tesseract-orc
text = pytesseract.image_to_string(Image.open(tempFileFullName))
return text
def TestImage(self):
data = [(1,0),(0,1)]
size = (2,2)
image = Image.new("1",size)
draw = ImageDraw.Draw(image)
for x in xrange(0,size[0]):
for y in xrange(0,size[1]):
draw.point((x,y),data[x][y])
image.save("D:\\GitHub\\TrPython\\NetLib\\Test\\1.gif")
class SmartCaptcha(object):
def __init__(self, isDebug = False):
self.__isDebug = isDebug
self.__currentStepId = 1
self.__tempFileList = []
def __BuildTempFileFullName(self, localDir, extName):
fname = str(uuid.uuid1()) + "_" + str(self.__currentStepId) + "." + extName
fname = os.path.join(localDir,fname)
self.__currentStepId += 1
self.__tempFileList.append(fname)
return fname
'''
Store remote image to local dir
'''
def __StoreImage2LocalDir(self, imageUrl , localDir , extName):
response = urllib.urlopen(imageUrl)
tempFileFullName = self.__BuildTempFileFullName(localDir, extName)
with open(tempFileFullName, 'wb') as f:
f.write(response.read())
return tempFileFullName
def Clearup(self):
for filename in self.__tempFileList:
if os.path.isfile(filename):
os.remove(filename)
'''
image enhance
'''
def __imageEnhance(self, image):
enhancer = ImageEnhance.Contrast(image)
image_enhancer = enhancer.enhance(4)
return image_enhancer
'''
two value
'''
def __twoValue(self, image):
img = image.convert('RGBA') # convert to RGBA
pix = img.load() #read pix
for x in range(img.size[0]): #remove [top-bottom] border
pix[x, 0] = pix[x, img.size[1] - 1] = (255, 255, 255, 255)
for y in range(img.size[1]): #remove [left-right] border
pix[0, y] = pix[img.size[0] - 1, y] = (255, 255, 255, 255)
for y in range(img.size[1]): # two value: R=100,G=100,B=120
for x in range(img.size[0]):
if pix[x, y][0] < 100 and pix[x, y][1] < 100:
pix[x, y] = (0, 0, 0, 255)
else:
pix[x, y] = (255, 255, 255, 255)
return img
def __getEffectivePoint(self, pix, x , y):
point, sx , sy = 0, x-1, y-1
#print sx+3 , sy +3 ,x , y
for i in xrange(3):
for j in xrange(3):
if sx+i == x and sy+j == y:
continue
if pix[sx+i,sy+j] == pix[x,y]:
point += 1
return point;
'''
1111111
1011101
1011101
1111111
'''
def __clearNoise(self, img, effectivePoint ,processCount):
for ct in xrange(0, processCount):
pix = img.load() #read pix
for x in xrange(1,img.size[0] - 1):
for y in xrange(1, img.size[1] - 1):
point = self.__getEffectivePoint(pix , x , y)
if point < effectivePoint:
pix[x, y] = (255, 255, 255, 255) # set to Noise
return img
'''
Get Captcha Code from local
'''
def GetLocalCaptchaCode(self, imagePath, extName = 'jpg'):
localDir = os.path.dirname(imagePath)
img = Image.open(imagePath)
print img.format, img.size, img.mode
# image Enhance
img = self.__imageEnhance(img)
if self.__isDebug:
img.save(self.__BuildTempFileFullName(localDir, extName))
# two value
img = self.__twoValue(img)
if self.__isDebug:
img.save(self.__BuildTempFileFullName(localDir, extName))
# clear Noise
img = self.__clearNoise(img, 3 , 1)
# orc
tempFileFullName = self.__BuildTempFileFullName(localDir, extName)
img.save(tempFileFullName) # must use local file via tesseract-orc
text = pytesseract.image_to_string(Image.open(tempFileFullName))
return text
if __name__ == "__main__":
print '[unit test]'
#validate1 = Captcha()
#print validate1.GetOnlineCaptchaCode("http://202.119.81.113:8080/verifycode.servlet")
#validate2 = Captcha(True)
#print validate2.GetOnlineCaptchaCode("http://202.119.81.113:8080/verifycode.servlet",True,"D:\\GitHub\\TrPython\\NetLib\\Test")
#validate2.Clearup()
#validate3 = Captcha(True)
#print validate3.GetLocalCaptchaCode("D:\\GitHub\\TrPython\\NetLib\\Test\\1400.gif","gif")
#validate3.TestImage()
validate4 = SmartCaptcha(True)
print validate4.GetLocalCaptchaCode("D:\\GitHub\\TrPython\\NetLib\\Test\\xxf2.jpg","jpg")
#print validate4.GetLocalCaptchaCode("D:\\GitHub\\TrPython\\NetLib\\Test\\queaa.jpg","jpg")
print pytesseract.image_to_string(Image.open("D:\\GitHub\\TrPython\\NetLib\\Test\\xxf2.jpg"))
| apache-2.0 |
agramian/PythonExternalProgramTestFramework | jinja2/constants.py | 1169 | 1626 | # -*- coding: utf-8 -*-
"""
jinja.constants
~~~~~~~~~~~~~~~
Various constants.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
#: list of lorem ipsum words used by the lipsum() helper function
LOREM_IPSUM_WORDS = u'''\
a ac accumsan ad adipiscing aenean aliquam aliquet amet ante aptent arcu at
auctor augue bibendum blandit class commodo condimentum congue consectetuer
consequat conubia convallis cras cubilia cum curabitur curae cursus dapibus
diam dictum dictumst dignissim dis dolor donec dui duis egestas eget eleifend
elementum elit enim erat eros est et etiam eu euismod facilisi facilisis fames
faucibus felis fermentum feugiat fringilla fusce gravida habitant habitasse hac
hendrerit hymenaeos iaculis id imperdiet in inceptos integer interdum ipsum
justo lacinia lacus laoreet lectus leo libero ligula litora lobortis lorem
luctus maecenas magna magnis malesuada massa mattis mauris metus mi molestie
mollis montes morbi mus nam nascetur natoque nec neque netus nibh nisi nisl non
nonummy nostra nulla nullam nunc odio orci ornare parturient pede pellentesque
penatibus per pharetra phasellus placerat platea porta porttitor posuere
potenti praesent pretium primis proin pulvinar purus quam quis quisque rhoncus
ridiculus risus rutrum sagittis sapien scelerisque sed sem semper senectus sit
sociis sociosqu sodales sollicitudin suscipit suspendisse taciti tellus tempor
tempus tincidunt torquent tortor tristique turpis ullamcorper ultrices
ultricies urna ut varius vehicula vel velit venenatis vestibulum vitae vivamus
viverra volutpat vulputate'''
| mit |
leohmoraes/weblate | weblate/lang/tests.py | 8 | 9069 | # -*- coding: utf-8 -*-
#
# Copyright © 2012 - 2015 Michal Čihař <michal@cihar.com>
#
# This file is part of Weblate <http://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Tests for language manipulations.
"""
import os.path
import gettext
from django.test import TestCase
from django.core.urlresolvers import reverse
from django.core.management import call_command
from weblate.lang.models import Language, get_plural_type
from weblate.lang import data
from weblate.trans.tests.test_views import ViewTestCase
class LanguagesTest(TestCase):
TEST_LANGUAGES = (
(
'cs_CZ',
'cs',
'ltr',
'(n==1) ? 0 : (n>=2 && n<=4) ? 1 : 2',
),
(
'cs (2)',
'cs',
'ltr',
'(n==1) ? 0 : (n>=2 && n<=4) ? 1 : 2',
),
(
'czech',
'cs',
'ltr',
'(n==1) ? 0 : (n>=2 && n<=4) ? 1 : 2',
),
(
'cs_CZ@hantec',
'cs_CZ@hantec',
'ltr',
'(n==1) ? 0 : (n>=2 && n<=4) ? 1 : 2',
),
(
'de-DE',
'de',
'ltr',
'n != 1',
),
(
'de_AT',
'de_AT',
'ltr',
'n != 1',
),
(
'portuguese_portugal',
'pt_PT',
'ltr',
'n > 1',
),
(
'pt-rBR',
'pt_BR',
'ltr',
'n > 1',
),
(
'sr_RS@latin',
'sr_RS@latin',
'ltr',
'n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && '
'(n%100<10 || n%100>=20) ? 1 : 2',
),
(
'sr-RS@latin',
'sr_RS@latin',
'ltr',
'n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && '
'(n%100<10 || n%100>=20) ? 1 : 2',
),
(
'sr_RS_Latin',
'sr_RS@latin',
'ltr',
'n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && '
'(n%100<10 || n%100>=20) ? 1 : 2',
),
(
'en_CA_MyVariant',
'en_CA@myvariant',
'ltr',
'n != 1',
),
(
'en_CZ',
'en_CZ',
'ltr',
'n != 1',
),
(
'zh_CN',
'zh_CN',
'ltr',
'0',
),
(
'zh-CN',
'zh_CN',
'ltr',
'0',
),
(
'zh-CN@test',
'zh_CN@test',
'ltr',
'0',
),
(
'zh-rCN',
'zh_CN',
'ltr',
'0',
),
(
'ar',
'ar',
'rtl',
'n==0 ? 0 : n==1 ? 1 : n==2 ? 2 : n%100>=3 && n%100<=10 ? 3 '
': n%100>=11 ? 4 : 5',
),
(
'ar_AA',
'ar',
'rtl',
'n==0 ? 0 : n==1 ? 1 : n==2 ? 2 : n%100>=3 && n%100<=10 ? 3 '
': n%100>=11 ? 4 : 5',
),
(
'ar_XX',
'ar_XX',
'rtl',
'n==0 ? 0 : n==1 ? 1 : n==2 ? 2 : n%100>=3 && n%100<=10 ? 3 '
': n%100>=11 ? 4 : 5',
),
(
'xx',
'xx',
'ltr',
'n != 1',
),
)
def test_auto_create(self):
'''
Tests that auto create correctly handles languages
'''
for original, expected, direction, plurals in self.TEST_LANGUAGES:
# Create language
lang = Language.objects.auto_get_or_create(original)
# Check language code
self.assertEqual(
lang.code,
expected,
'Invalid code for %s: %s' % (original, lang.code)
)
# Check direction
self.assertEqual(
lang.direction,
direction,
'Invalid direction for %s' % original
)
# Check plurals
self.assertEqual(
lang.pluralequation,
plurals,
'Invalid plural for {0} (expected {1}, got {2})'.format(
original, plurals, lang.pluralequation,
)
)
# Check whether html contains both language code and direction
self.assertIn(direction, lang.get_html())
self.assertIn(expected, lang.get_html())
def test_plurals(self):
'''
Test whether plural form is correctly calculated.
'''
lang = Language.objects.get(code='cs')
self.assertEqual(
lang.get_plural_form(),
'nplurals=3; plural=(n==1) ? 0 : (n>=2 && n<=4) ? 1 : 2;'
)
def test_plural_labels(self):
lang = Language.objects.get(code='cs')
self.assertEqual(lang.get_plural_label(0), 'One')
self.assertEqual(lang.get_plural_label(1), 'Few')
self.assertEqual(lang.get_plural_label(2), 'Other')
def test_plural_labels_invalid(self):
lang = Language.objects.get(code='cs')
lang.plural_type = -1
self.assertEqual(lang.get_plural_label(0), 'Singular')
self.assertEqual(lang.get_plural_label(1), 'Plural')
self.assertEqual(lang.get_plural_label(2), 'Plural form 2')
class CommandTest(TestCase):
'''
Tests for management commands.
'''
def test_setuplang(self):
call_command('setuplang')
self.assertTrue(Language.objects.exists())
def test_setuplang_noupdate(self):
call_command('setuplang', update=False)
self.assertTrue(Language.objects.exists())
def test_checklang(self):
testfile = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'plurals.txt'
)
call_command('checklang', testfile)
class VerifyPluralsTest(TestCase):
"""
In database plural form verification.
"""
def test_valid(self):
"""Validates that we can name all plural equations"""
for language in Language.objects.all():
self.assertNotEqual(
get_plural_type(
language.code,
language.pluralequation
),
data.PLURAL_UNKNOWN,
'Can not guess plural type for {0} ({1})'.format(
language.code,
language.pluralequation
)
)
def test_equation(self):
"""Validates that all equations can be parsed by gettext"""
# Verify we get an error on invalid syntax
self.assertRaises(
SyntaxError,
gettext.c2py,
'n==0 ? 1 2'
)
for language in Language.objects.all():
# Validate plurals can be parsed
plural = gettext.c2py(language.pluralequation)
# Get maximal plural
nplurals = max([plural(x) for x in range(200)]) + 1
# Check it matches ours
self.assertEqual(
nplurals,
language.nplurals,
'Invalid nplurals for {0}: {1} ({2}, {3})'.format(
language.code,
nplurals,
language.nplurals,
language.pluralequation
)
)
class LanguagesViewTest(ViewTestCase):
def test_languages(self):
response = self.client.get(reverse('languages'))
self.assertContains(response, 'Czech')
def test_language(self):
response = self.client.get(reverse(
'show_language',
kwargs={'lang': 'cs'}
))
self.assertContains(response, 'Czech')
self.assertContains(response, 'Test/Test')
def test_language_redirect(self):
response = self.client.get(reverse(
'show_language',
kwargs={'lang': 'cs_CZ'}
))
self.assertRedirects(
response,
reverse(
'show_language',
kwargs={'lang': 'cs'}
)
)
def test_language_nonexisting(self):
response = self.client.get(reverse(
'show_language',
kwargs={'lang': 'nonexisting'}
))
self.assertEqual(response.status_code, 404)
| gpl-3.0 |
batxes/4c2vhic | SHH_WT_models/SHH_WT_models_final_output_0.1_-0.1_11000/SHH_WT_models17200.py | 2 | 17581 | import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "particle_0 geometry" not in marker_sets:
s=new_marker_set('particle_0 geometry')
marker_sets["particle_0 geometry"]=s
s= marker_sets["particle_0 geometry"]
mark=s.place_marker((8780.95, -327.097, 3156.59), (0.7, 0.7, 0.7), 890.203)
if "particle_1 geometry" not in marker_sets:
s=new_marker_set('particle_1 geometry')
marker_sets["particle_1 geometry"]=s
s= marker_sets["particle_1 geometry"]
mark=s.place_marker((8658.36, 1454.63, 3173.45), (0.7, 0.7, 0.7), 792.956)
if "particle_2 geometry" not in marker_sets:
s=new_marker_set('particle_2 geometry')
marker_sets["particle_2 geometry"]=s
s= marker_sets["particle_2 geometry"]
mark=s.place_marker((6837.04, 2042.3, 3186.94), (0.7, 0.7, 0.7), 856.786)
if "particle_3 geometry" not in marker_sets:
s=new_marker_set('particle_3 geometry')
marker_sets["particle_3 geometry"]=s
s= marker_sets["particle_3 geometry"]
mark=s.place_marker((7165.15, -0.947572, 2118.38), (0.7, 0.7, 0.7), 963.679)
if "particle_4 geometry" not in marker_sets:
s=new_marker_set('particle_4 geometry')
marker_sets["particle_4 geometry"]=s
s= marker_sets["particle_4 geometry"]
mark=s.place_marker((5770.23, -261.708, 2172.32), (0.7, 0.7, 0.7), 761.442)
if "particle_5 geometry" not in marker_sets:
s=new_marker_set('particle_5 geometry')
marker_sets["particle_5 geometry"]=s
s= marker_sets["particle_5 geometry"]
mark=s.place_marker((4180.75, 1385.73, 3088.87), (0.7, 0.7, 0.7), 961.183)
if "particle_6 geometry" not in marker_sets:
s=new_marker_set('particle_6 geometry')
marker_sets["particle_6 geometry"]=s
s= marker_sets["particle_6 geometry"]
mark=s.place_marker((3188.41, 2110.16, 4265.81), (0.7, 0.7, 0.7), 753.151)
if "particle_7 geometry" not in marker_sets:
s=new_marker_set('particle_7 geometry')
marker_sets["particle_7 geometry"]=s
s= marker_sets["particle_7 geometry"]
mark=s.place_marker((3193.1, 1280.65, 3823), (1, 0.7, 0), 1098.07)
if "particle_8 geometry" not in marker_sets:
s=new_marker_set('particle_8 geometry')
marker_sets["particle_8 geometry"]=s
s= marker_sets["particle_8 geometry"]
mark=s.place_marker((3127.72, 3278.25, 5691.21), (0.7, 0.7, 0.7), 1010.42)
if "particle_9 geometry" not in marker_sets:
s=new_marker_set('particle_9 geometry')
marker_sets["particle_9 geometry"]=s
s= marker_sets["particle_9 geometry"]
mark=s.place_marker((2853.07, 2757.68, 7317.83), (1, 0.7, 0), 821.043)
if "particle_10 geometry" not in marker_sets:
s=new_marker_set('particle_10 geometry')
marker_sets["particle_10 geometry"]=s
s= marker_sets["particle_10 geometry"]
mark=s.place_marker((3377.72, 4535.76, 7901.16), (0.7, 0.7, 0.7), 873.876)
if "particle_11 geometry" not in marker_sets:
s=new_marker_set('particle_11 geometry')
marker_sets["particle_11 geometry"]=s
s= marker_sets["particle_11 geometry"]
mark=s.place_marker((4684.81, 4415.68, 7605.07), (0.7, 0.7, 0.7), 625.532)
if "particle_12 geometry" not in marker_sets:
s=new_marker_set('particle_12 geometry')
marker_sets["particle_12 geometry"]=s
s= marker_sets["particle_12 geometry"]
mark=s.place_marker((6259.75, 4687.1, 7676.7), (0.7, 0.7, 0.7), 880.474)
if "particle_13 geometry" not in marker_sets:
s=new_marker_set('particle_13 geometry')
marker_sets["particle_13 geometry"]=s
s= marker_sets["particle_13 geometry"]
mark=s.place_marker((6488.28, 3295.62, 7494.95), (0.7, 0.7, 0.7), 659.161)
if "particle_14 geometry" not in marker_sets:
s=new_marker_set('particle_14 geometry')
marker_sets["particle_14 geometry"]=s
s= marker_sets["particle_14 geometry"]
mark=s.place_marker((8492.91, 2964.04, 8484.76), (0.7, 0.7, 0.7), 831.745)
if "particle_15 geometry" not in marker_sets:
s=new_marker_set('particle_15 geometry')
marker_sets["particle_15 geometry"]=s
s= marker_sets["particle_15 geometry"]
mark=s.place_marker((11334.9, 3843.85, 7647.74), (0.7, 0.7, 0.7), 803.065)
if "particle_16 geometry" not in marker_sets:
s=new_marker_set('particle_16 geometry')
marker_sets["particle_16 geometry"]=s
s= marker_sets["particle_16 geometry"]
mark=s.place_marker((10901.3, 4874.79, 6039.53), (0.7, 0.7, 0.7), 610.262)
if "particle_17 geometry" not in marker_sets:
s=new_marker_set('particle_17 geometry')
marker_sets["particle_17 geometry"]=s
s= marker_sets["particle_17 geometry"]
mark=s.place_marker((10383, 5904.7, 6910.25), (0.7, 0.7, 0.7), 741.265)
if "particle_18 geometry" not in marker_sets:
s=new_marker_set('particle_18 geometry')
marker_sets["particle_18 geometry"]=s
s= marker_sets["particle_18 geometry"]
mark=s.place_marker((8774.32, 5698.57, 7445.9), (0.7, 0.7, 0.7), 748.625)
if "particle_19 geometry" not in marker_sets:
s=new_marker_set('particle_19 geometry')
marker_sets["particle_19 geometry"]=s
s= marker_sets["particle_19 geometry"]
mark=s.place_marker((8000.48, 5923.13, 8742.99), (0.7, 0.7, 0.7), 677.181)
if "particle_20 geometry" not in marker_sets:
s=new_marker_set('particle_20 geometry')
marker_sets["particle_20 geometry"]=s
s= marker_sets["particle_20 geometry"]
mark=s.place_marker((6006.28, 5779.22, 7333.58), (0.7, 0.7, 0.7), 616.015)
if "particle_21 geometry" not in marker_sets:
s=new_marker_set('particle_21 geometry')
marker_sets["particle_21 geometry"]=s
s= marker_sets["particle_21 geometry"]
mark=s.place_marker((7779.75, 6438.89, 8039.09), (0.7, 0.7, 0.7), 653.154)
if "particle_22 geometry" not in marker_sets:
s=new_marker_set('particle_22 geometry')
marker_sets["particle_22 geometry"]=s
s= marker_sets["particle_22 geometry"]
mark=s.place_marker((7501.11, 7223.15, 7694.21), (0.7, 0.7, 0.7), 595.33)
if "particle_23 geometry" not in marker_sets:
s=new_marker_set('particle_23 geometry')
marker_sets["particle_23 geometry"]=s
s= marker_sets["particle_23 geometry"]
mark=s.place_marker((8703.19, 7549.72, 7149.74), (0.7, 0.7, 0.7), 627.901)
if "particle_24 geometry" not in marker_sets:
s=new_marker_set('particle_24 geometry')
marker_sets["particle_24 geometry"]=s
s= marker_sets["particle_24 geometry"]
mark=s.place_marker((9697.13, 6621.18, 6657.24), (0.7, 0.7, 0.7), 663.941)
if "particle_25 geometry" not in marker_sets:
s=new_marker_set('particle_25 geometry')
marker_sets["particle_25 geometry"]=s
s= marker_sets["particle_25 geometry"]
mark=s.place_marker((11256.8, 6144.71, 6780.59), (0.7, 0.7, 0.7), 663.899)
if "particle_26 geometry" not in marker_sets:
s=new_marker_set('particle_26 geometry')
marker_sets["particle_26 geometry"]=s
s= marker_sets["particle_26 geometry"]
mark=s.place_marker((9732.74, 6209.38, 7104.52), (0.7, 0.7, 0.7), 644.694)
if "particle_27 geometry" not in marker_sets:
s=new_marker_set('particle_27 geometry')
marker_sets["particle_27 geometry"]=s
s= marker_sets["particle_27 geometry"]
mark=s.place_marker((7779.85, 5243.87, 6752.19), (0.7, 0.7, 0.7), 896.802)
if "particle_28 geometry" not in marker_sets:
s=new_marker_set('particle_28 geometry')
marker_sets["particle_28 geometry"]=s
s= marker_sets["particle_28 geometry"]
mark=s.place_marker((7148.45, 6590.43, 7098.85), (0.7, 0.7, 0.7), 576.38)
if "particle_29 geometry" not in marker_sets:
s=new_marker_set('particle_29 geometry')
marker_sets["particle_29 geometry"]=s
s= marker_sets["particle_29 geometry"]
mark=s.place_marker((5866.31, 6650.89, 7360.35), (0.7, 0.7, 0.7), 635.092)
if "particle_30 geometry" not in marker_sets:
s=new_marker_set('particle_30 geometry')
marker_sets["particle_30 geometry"]=s
s= marker_sets["particle_30 geometry"]
mark=s.place_marker((5823.34, 7287.6, 6562.72), (0.7, 0.7, 0.7), 651.505)
if "particle_31 geometry" not in marker_sets:
s=new_marker_set('particle_31 geometry')
marker_sets["particle_31 geometry"]=s
s= marker_sets["particle_31 geometry"]
mark=s.place_marker((5168.15, 5839.54, 7305.06), (0.7, 0.7, 0.7), 718.042)
if "particle_32 geometry" not in marker_sets:
s=new_marker_set('particle_32 geometry')
marker_sets["particle_32 geometry"]=s
s= marker_sets["particle_32 geometry"]
mark=s.place_marker((5862.29, 7289.66, 7985.34), (0.7, 0.7, 0.7), 726.714)
if "particle_33 geometry" not in marker_sets:
s=new_marker_set('particle_33 geometry')
marker_sets["particle_33 geometry"]=s
s= marker_sets["particle_33 geometry"]
mark=s.place_marker((7185.15, 7125.48, 7268.32), (0.7, 0.7, 0.7), 673.585)
if "particle_34 geometry" not in marker_sets:
s=new_marker_set('particle_34 geometry')
marker_sets["particle_34 geometry"]=s
s= marker_sets["particle_34 geometry"]
mark=s.place_marker((8535, 6723.63, 7355.18), (0.7, 0.7, 0.7), 598.418)
if "particle_35 geometry" not in marker_sets:
s=new_marker_set('particle_35 geometry')
marker_sets["particle_35 geometry"]=s
s= marker_sets["particle_35 geometry"]
mark=s.place_marker((9666.6, 6807.35, 7960.53), (0.7, 0.7, 0.7), 693.382)
if "particle_36 geometry" not in marker_sets:
s=new_marker_set('particle_36 geometry')
marker_sets["particle_36 geometry"]=s
s= marker_sets["particle_36 geometry"]
mark=s.place_marker((7430.23, 6053.29, 7238.51), (0.7, 0.7, 0.7), 804.038)
if "particle_37 geometry" not in marker_sets:
s=new_marker_set('particle_37 geometry')
marker_sets["particle_37 geometry"]=s
s= marker_sets["particle_37 geometry"]
mark=s.place_marker((8776.25, 7345.6, 7369.36), (0.7, 0.7, 0.7), 816.178)
if "particle_38 geometry" not in marker_sets:
s=new_marker_set('particle_38 geometry')
marker_sets["particle_38 geometry"]=s
s= marker_sets["particle_38 geometry"]
mark=s.place_marker((8682.29, 6651.87, 6413.78), (0.7, 0.7, 0.7), 776.628)
if "particle_39 geometry" not in marker_sets:
s=new_marker_set('particle_39 geometry')
marker_sets["particle_39 geometry"]=s
s= marker_sets["particle_39 geometry"]
mark=s.place_marker((8471.46, 7496.58, 7708.9), (0.7, 0.7, 0.7), 750.656)
if "particle_40 geometry" not in marker_sets:
s=new_marker_set('particle_40 geometry')
marker_sets["particle_40 geometry"]=s
s= marker_sets["particle_40 geometry"]
mark=s.place_marker((6932.46, 7585.84, 7124.48), (0.7, 0.7, 0.7), 709.625)
if "particle_41 geometry" not in marker_sets:
s=new_marker_set('particle_41 geometry')
marker_sets["particle_41 geometry"]=s
s= marker_sets["particle_41 geometry"]
mark=s.place_marker((5418.11, 7837.44, 8131.28), (0.7, 0.7, 0.7), 927.681)
if "particle_42 geometry" not in marker_sets:
s=new_marker_set('particle_42 geometry')
marker_sets["particle_42 geometry"]=s
s= marker_sets["particle_42 geometry"]
mark=s.place_marker((6996.81, 9924.84, 8625.79), (0.7, 0.7, 0.7), 1088.21)
if "particle_43 geometry" not in marker_sets:
s=new_marker_set('particle_43 geometry')
marker_sets["particle_43 geometry"]=s
s= marker_sets["particle_43 geometry"]
mark=s.place_marker((5767.84, 8582.67, 8525.81), (0.7, 0.7, 0.7), 736.147)
if "particle_44 geometry" not in marker_sets:
s=new_marker_set('particle_44 geometry')
marker_sets["particle_44 geometry"]=s
s= marker_sets["particle_44 geometry"]
mark=s.place_marker((6986.06, 7895.12, 7640.44), (0.7, 0.7, 0.7), 861.101)
if "particle_45 geometry" not in marker_sets:
s=new_marker_set('particle_45 geometry')
marker_sets["particle_45 geometry"]=s
s= marker_sets["particle_45 geometry"]
mark=s.place_marker((5397.14, 7249.94, 6523.49), (0.7, 0.7, 0.7), 924.213)
if "particle_46 geometry" not in marker_sets:
s=new_marker_set('particle_46 geometry')
marker_sets["particle_46 geometry"]=s
s= marker_sets["particle_46 geometry"]
mark=s.place_marker((5243.42, 8824.91, 5262.33), (0.7, 0.7, 0.7), 881.828)
if "particle_47 geometry" not in marker_sets:
s=new_marker_set('particle_47 geometry')
marker_sets["particle_47 geometry"]=s
s= marker_sets["particle_47 geometry"]
mark=s.place_marker((4939.86, 10534.2, 6416.63), (0.7, 0.7, 0.7), 927.681)
if "particle_48 geometry" not in marker_sets:
s=new_marker_set('particle_48 geometry')
marker_sets["particle_48 geometry"]=s
s= marker_sets["particle_48 geometry"]
mark=s.place_marker((4160.47, 9467.68, 5065.88), (0.7, 0.7, 0.7), 831.576)
if "particle_49 geometry" not in marker_sets:
s=new_marker_set('particle_49 geometry')
marker_sets["particle_49 geometry"]=s
s= marker_sets["particle_49 geometry"]
mark=s.place_marker((3673.88, 7735.15, 4519.72), (0.7, 0.7, 0.7), 859.494)
if "particle_50 geometry" not in marker_sets:
s=new_marker_set('particle_50 geometry')
marker_sets["particle_50 geometry"]=s
s= marker_sets["particle_50 geometry"]
mark=s.place_marker((3230.31, 8551, 5118.81), (0.7, 0.7, 0.7), 704.845)
if "particle_51 geometry" not in marker_sets:
s=new_marker_set('particle_51 geometry')
marker_sets["particle_51 geometry"]=s
s= marker_sets["particle_51 geometry"]
mark=s.place_marker((3420.03, 6903.47, 5320.74), (0.7, 0.7, 0.7), 804.461)
if "particle_52 geometry" not in marker_sets:
s=new_marker_set('particle_52 geometry')
marker_sets["particle_52 geometry"]=s
s= marker_sets["particle_52 geometry"]
mark=s.place_marker((4060.98, 5251.78, 5318.28), (0.7, 0.7, 0.7), 934.111)
if "particle_53 geometry" not in marker_sets:
s=new_marker_set('particle_53 geometry')
marker_sets["particle_53 geometry"]=s
s= marker_sets["particle_53 geometry"]
mark=s.place_marker((2650.55, 4601.33, 5409.22), (0.7, 0.7, 0.7), 988.339)
if "particle_54 geometry" not in marker_sets:
s=new_marker_set('particle_54 geometry')
marker_sets["particle_54 geometry"]=s
s= marker_sets["particle_54 geometry"]
mark=s.place_marker((2221.03, 5176.96, 5269.69), (1, 0.7, 0), 803.7)
if "particle_55 geometry" not in marker_sets:
s=new_marker_set('particle_55 geometry')
marker_sets["particle_55 geometry"]=s
s= marker_sets["particle_55 geometry"]
mark=s.place_marker((3553.41, 6605.91, 4516.12), (0.7, 0.7, 0.7), 812.118)
if "particle_56 geometry" not in marker_sets:
s=new_marker_set('particle_56 geometry')
marker_sets["particle_56 geometry"]=s
s= marker_sets["particle_56 geometry"]
mark=s.place_marker((4166.9, 5864.75, 2556.87), (0.7, 0.7, 0.7), 1177.93)
if "particle_57 geometry" not in marker_sets:
s=new_marker_set('particle_57 geometry')
marker_sets["particle_57 geometry"]=s
s= marker_sets["particle_57 geometry"]
mark=s.place_marker((5374.4, 6737.05, 505.355), (0.7, 0.7, 0.7), 1038.21)
if "particle_58 geometry" not in marker_sets:
s=new_marker_set('particle_58 geometry')
marker_sets["particle_58 geometry"]=s
s= marker_sets["particle_58 geometry"]
mark=s.place_marker((5706.47, 7007.99, 62.5744), (1, 0.7, 0), 758.016)
if "particle_59 geometry" not in marker_sets:
s=new_marker_set('particle_59 geometry')
marker_sets["particle_59 geometry"]=s
s= marker_sets["particle_59 geometry"]
mark=s.place_marker((5295.4, 6362.29, -306.738), (0.7, 0.7, 0.7), 824.046)
if "particle_60 geometry" not in marker_sets:
s=new_marker_set('particle_60 geometry')
marker_sets["particle_60 geometry"]=s
s= marker_sets["particle_60 geometry"]
mark=s.place_marker((4992.66, 6802.46, 471.119), (0.7, 0.7, 0.7), 793.379)
if "particle_61 geometry" not in marker_sets:
s=new_marker_set('particle_61 geometry')
marker_sets["particle_61 geometry"]=s
s= marker_sets["particle_61 geometry"]
mark=s.place_marker((4421.35, 7023.26, -121.38), (0.7, 0.7, 0.7), 1011.56)
if "particle_62 geometry" not in marker_sets:
s=new_marker_set('particle_62 geometry')
marker_sets["particle_62 geometry"]=s
s= marker_sets["particle_62 geometry"]
mark=s.place_marker((4712.73, 6307.06, 1627.78), (0.7, 0.7, 0.7), 1097.01)
if "particle_63 geometry" not in marker_sets:
s=new_marker_set('particle_63 geometry')
marker_sets["particle_63 geometry"]=s
s= marker_sets["particle_63 geometry"]
mark=s.place_marker((3974.95, 7022.32, 161.675), (0.7, 0.7, 0.7), 851.626)
if "particle_64 geometry" not in marker_sets:
s=new_marker_set('particle_64 geometry')
marker_sets["particle_64 geometry"]=s
s= marker_sets["particle_64 geometry"]
mark=s.place_marker((3945.4, 8023.21, -1661.8), (0.7, 0.7, 0.7), 869.434)
if "particle_65 geometry" not in marker_sets:
s=new_marker_set('particle_65 geometry')
marker_sets["particle_65 geometry"]=s
s= marker_sets["particle_65 geometry"]
mark=s.place_marker((3341.95, 8696.42, -111.528), (0.7, 0.7, 0.7), 818.463)
if "particle_66 geometry" not in marker_sets:
s=new_marker_set('particle_66 geometry')
marker_sets["particle_66 geometry"]=s
s= marker_sets["particle_66 geometry"]
mark=s.place_marker((4216.41, 9253.2, -1429.51), (0.7, 0.7, 0.7), 759.539)
if "particle_67 geometry" not in marker_sets:
s=new_marker_set('particle_67 geometry')
marker_sets["particle_67 geometry"]=s
s= marker_sets["particle_67 geometry"]
mark=s.place_marker((3954.66, 7556.03, 309.807), (0.7, 0.7, 0.7), 1088.59)
if "particle_68 geometry" not in marker_sets:
s=new_marker_set('particle_68 geometry')
marker_sets["particle_68 geometry"]=s
s= marker_sets["particle_68 geometry"]
mark=s.place_marker((4428.56, 7284.08, -1753.88), (0.7, 0.7, 0.7), 822.312)
if "particle_69 geometry" not in marker_sets:
s=new_marker_set('particle_69 geometry')
marker_sets["particle_69 geometry"]=s
s= marker_sets["particle_69 geometry"]
mark=s.place_marker((4640.69, 6751.4, -2844.42), (0.7, 0.7, 0.7), 749.81)
if "particle_70 geometry" not in marker_sets:
s=new_marker_set('particle_70 geometry')
marker_sets["particle_70 geometry"]=s
s= marker_sets["particle_70 geometry"]
mark=s.place_marker((6026.85, 6972.94, -2255.29), (0.7, 0.7, 0.7), 764.488)
for k in surf_sets.keys():
chimera.openModels.add([surf_sets[k]])
| gpl-3.0 |
kevinthesun/mxnet | tests/python/unittest/test_metric.py | 12 | 1347 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import mxnet as mx
import json
def check_metric(metric, *args, **kwargs):
metric = mx.metric.create(metric, *args, **kwargs)
str_metric = json.dumps(metric.get_config())
metric2 = mx.metric.create(str_metric)
assert metric.get_config() == metric2.get_config()
def test_metrics():
check_metric('acc', axis=0)
check_metric('f1')
check_metric('perplexity', -1)
check_metric('pearsonr')
composite = mx.metric.create(['acc', 'f1'])
check_metric(composite)
if __name__ == '__main__':
import nose
nose.runmodule()
| apache-2.0 |
fmaguire/BayeHem | bayehem/rsem/detonate-1.11/ref-eval/boost/tools/build/v2/build/configure.py | 44 | 5289 | # Status: ported.
# Base revison: 64488
#
# Copyright (c) 2010 Vladimir Prus.
#
# Use, modification and distribution is subject to the Boost Software
# License Version 1.0. (See accompanying file LICENSE_1_0.txt or
# http://www.boost.org/LICENSE_1_0.txt)
# This module defines function to help with two main tasks:
#
# - Discovering build-time configuration for the purposes of adjusting
# build process.
# - Reporting what is built, and how it is configured.
import b2.build.property as property
import b2.build.property_set as property_set
import b2.build.targets
from b2.manager import get_manager
from b2.util.sequence import unique
from b2.util import bjam_signature, value_to_jam
import bjam
import os
__width = 30
def set_width(width):
global __width
__width = 30
__components = []
__built_components = []
__component_logs = {}
__announced_checks = False
__log_file = None
__log_fd = -1
def register_components(components):
"""Declare that the components specified by the parameter exist."""
__components.extend(components)
def components_building(components):
"""Declare that the components specified by the parameters will be build."""
__built_components.extend(components)
def log_component_configuration(component, message):
"""Report something about component configuration that the user should better know."""
__component_logs.setdefault(component, []).append(message)
def log_check_result(result):
global __announced_checks
if not __announced_checks:
print "Performing configuration checks"
__announced_checks = True
print result
def log_library_search_result(library, result):
log_check_result((" - %(library)s : %(result)s" % locals()).rjust(width))
def print_component_configuration():
print "\nComponent configuration:"
for c in __components:
if c in __built_components:
s = "building"
else:
s = "not building"
message = " - %s)" % c
message = message.rjust(__width)
message += " : " + s
for m in __component_logs.get(c, []):
print " -" + m
print ""
__builds_cache = {}
def builds(metatarget_reference, project, ps, what):
# Attempt to build a metatarget named by 'metatarget-reference'
# in context of 'project' with properties 'ps'.
# Returns non-empty value if build is OK.
result = []
existing = __builds_cache.get((what, ps), None)
if existing is None:
result = False
__builds_cache[(what, ps)] = False
targets = b2.build.targets.generate_from_reference(
metatarget_reference, project, ps).targets()
jam_targets = []
for t in targets:
jam_targets.append(t.actualize())
x = (" - %s" % what).rjust(__width)
if bjam.call("UPDATE_NOW", jam_targets, str(__log_fd), "ignore-minus-n"):
__builds_cache[(what, ps)] = True
result = True
log_check_result("%s: yes" % x)
else:
log_check_result("%s: no" % x)
return result
else:
return existing
def set_log_file(log_file_name):
# Called by Boost.Build startup code to specify name of a file
# that will receive results of configure checks. This
# should never be called by users.
global __log_file, __log_fd
dirname = os.path.dirname(log_file_name)
if not os.path.exists(dirname):
os.makedirs(dirname)
# Make sure to keep the file around, so that it's not
# garbage-collected and closed
__log_file = open(log_file_name, "w")
__log_fd = __log_file.fileno()
# Frontend rules
class CheckTargetBuildsWorker:
def __init__(self, target, true_properties, false_properties):
self.target = target
self.true_properties = property.create_from_strings(true_properties, True)
self.false_properties = property.create_from_strings(false_properties, True)
def check(self, ps):
# FIXME: this should not be hardcoded. Other checks might
# want to consider different set of features as relevant.
toolset = ps.get('toolset')[0]
toolset_version_property = "<toolset-" + toolset + ":version>" ;
relevant = ps.get_properties('target-os') + \
ps.get_properties("toolset") + \
ps.get_properties(toolset_version_property) + \
ps.get_properties("address-model") + \
ps.get_properties("architecture")
rps = property_set.create(relevant)
t = get_manager().targets().current()
p = t.project()
if builds(self.target, p, rps, "%s builds" % self.target):
choosen = self.true_properties
else:
choosen = self.false_properties
return property.evaluate_conditionals_in_context(choosen, ps)
@bjam_signature((["target"], ["true_properties", "*"], ["false_properties", "*"]))
def check_target_builds(target, true_properties, false_properties):
worker = CheckTargetBuildsWorker(target, true_properties, false_properties)
value = value_to_jam(worker.check)
return "<conditional>" + value
get_manager().projects().add_rule("check-target-builds", check_target_builds)
| apache-2.0 |
maciekcc/tensorflow | tensorflow/contrib/distributions/python/ops/bijectors/affine_impl.py | 8 | 22699 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Affine bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.distributions.python.ops import operator_pd_cholesky
from tensorflow.contrib.distributions.python.ops import operator_pd_diag
from tensorflow.contrib.distributions.python.ops import operator_pd_identity
from tensorflow.contrib.distributions.python.ops import operator_pd_vdvt_update
from tensorflow.contrib.distributions.python.ops.shape import _DistributionShape
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import bijector
__all__ = [
"Affine",
]
def _as_tensor(x, name):
"""Convenience to convert to `Tensor` or leave as `None`."""
return None if x is None else ops.convert_to_tensor(x, name=name)
# TODO(srvasude): Deprecate this class with a dedicated Linear Operator
# corresponding to TriL + V D V.T.
class _TriLPlusVDVTLightweightOperatorPD(object):
"""Helper/hidden class fake an OperatorPD for TriL+VDV.T."""
def __init__(self, tril, v, diag=None, validate_args=False):
"""Creates an instance of _TriLPlusVDVTLightweightOperatorPD.
WARNING: This object is not to be used outside of `Affine` where it is
currently being temporarily used for refactoring purposes.
Args:
tril: `Tensor` of shape `[B1,..,Bb, d, d]`.
v: `Tensor` of shape `[B1,...,Bb, d, k]`.
diag: `Tensor` of shape `[B1,...,Bb, k, k]` or None
validate_args: Python `bool` indicating whether arguments should be
checked for correctness.
"""
self._m = tril
self._v = v
self._validate_args = validate_args
self._inputs = [tril, v]
if diag is not None:
self._inputs += [diag]
self._d = operator_pd_diag.OperatorPDDiag(diag, verify_pd=validate_args)
self._d_inv = operator_pd_diag.OperatorPDDiag(1. / diag,
verify_pd=validate_args)
return
if v.get_shape().is_fully_defined():
v_shape = v.get_shape().as_list()
id_shape = v_shape[:-2] + [v_shape[-1], v_shape[-1]]
else:
v_shape = array_ops.shape(v)
id_shape = array_ops.concat([v_shape[:-2], [v_shape[-1], v_shape[-1]]], 0)
self._d = operator_pd_identity.OperatorPDIdentity(
id_shape, v.dtype, verify_pd=self.validate_args)
self._d_inv = self._d
@property
def inputs(self):
return self._inputs
@property
def dtype(self):
return self._m.dtype.base_dtype
@property
def validate_args(self):
return self._validate_args
def rank(self):
"""Returns `rank(self)`."""
return array_ops.rank(self._m)
def sqrt_matmul(self, x):
"""Computes `matmul(self, x)`.
Doesn't actually do the sqrt! Named as such to agree with API.
Args:
x: `Tensor`
Returns:
self_times_x: `Tensor`
"""
m_x = math_ops.matmul(self._m, x)
vt_x = math_ops.matmul(self._v, x, adjoint_a=True)
d_vt_x = self._d.matmul(vt_x)
v_d_vt_x = math_ops.matmul(self._v, d_vt_x)
return m_x + v_d_vt_x
def sqrt_solve(self, x):
"""Computes `solve(self, x)`.
Doesn't actually do the sqrt! Named as such to agree with API.
To compute (M + V D V.T), we use the Woodbury matrix identity:
inv(M + V D V.T) = inv(M) - inv(M) V inv(C) V.T inv(M)
where,
C = inv(D) + V.T inv(M) V.
See: https://en.wikipedia.org/wiki/Woodbury_matrix_identity
Args:
x: `Tensor`
Returns:
inv_of_self_times_x: `Tensor`
"""
minv_x = linalg_ops.matrix_triangular_solve(self._m, x)
vt_minv_x = math_ops.matmul(self._v, minv_x, transpose_a=True)
cinv_vt_minv_x = linalg_ops.matrix_solve(
self._woodbury_sandwiched_term(), vt_minv_x)
v_cinv_vt_minv_x = math_ops.matmul(self._v, cinv_vt_minv_x)
minv_v_cinv_vt_minv_x = linalg_ops.matrix_triangular_solve(
self._m, v_cinv_vt_minv_x)
return minv_x - minv_v_cinv_vt_minv_x
def sqrt_log_abs_det(self):
"""Computes (log o abs o det)(X) for matrix X.
Doesn't actually do the sqrt! Named as such to agree with API.
To compute det(M + V D V.T), we use the matrix determinant lemma:
det(Tril + V D V.T) = det(C) det(D) det(M)
where C is defined as in `_inverse`, ie,
C = inv(D) + V.T inv(M) V.
See: https://en.wikipedia.org/wiki/Matrix_determinant_lemma
Returns:
log_abs_det: `Tensor`.
"""
log_det_c = math_ops.log(math_ops.abs(
linalg_ops.matrix_determinant(self._woodbury_sandwiched_term())))
# Reduction is ok because we always prepad inputs to this class.
log_det_m = math_ops.reduce_sum(math_ops.log(math_ops.abs(
array_ops.matrix_diag_part(self._m))), axis=[-1])
return log_det_c + 2. * self._d.sqrt_log_abs_det() + log_det_m
def _woodbury_sandwiched_term(self):
"""Computes the sandwiched term in the Woodbury identity.
Computes the "`C`" in the identity:
inv(M + V D V.T) = inv(M) - inv(M) V inv(C) V.T inv(M)
where,
C = inv(D) + V.T inv(M) V.
See: https://en.wikipedia.org/wiki/Woodbury_matrix_identity
Returns:
woodbury_sandwich_term: A `Tensor` to be used like `C`, above.
"""
minv_v = linalg_ops.matrix_triangular_solve(self._m, self._v)
vt_minv_v = math_ops.matmul(self._v, minv_v, adjoint_a=True)
return self._d_inv.add_to_tensor(vt_minv_v)
class Affine(bijector.Bijector):
"""Compute `Y = g(X; shift, scale) = scale @ X + shift`.
Here `scale = c * I + diag(D1) + tril(L) + V @ diag(D2) @ V.T`.
In TF parlance, the `scale` term is logically equivalent to:
```python
scale = (
scale_identity_multiplier * tf.diag(tf.ones(d)) +
tf.diag(scale_diag) +
scale_tril +
scale_perturb_factor @ diag(scale_perturb_diag) @
tf.transpose([scale_perturb_factor])
)
```
The `scale` term is applied without necessarily materializing constituent
matrices, i.e., the matmul is [matrix-free](
https://en.wikipedia.org/wiki/Matrix-free_methods) when possible.
Examples:
```python
# Y = X
b = Affine()
# Y = X + shift
b = Affine(shift=[1., 2, 3])
# Y = 2 * I @ X.T + shift
b = Affine(shift=[1., 2, 3],
scale_identity_multiplier=2.)
# Y = tf.diag(d1) @ X.T + shift
b = Affine(shift=[1., 2, 3],
scale_diag=[-1., 2, 1]) # Implicitly 3x3.
# Y = (I + v * v.T) @ X.T + shift
b = Affine(shift=[1., 2, 3],
scale_perturb_factor=[[1., 0],
[0, 1],
[1, 1]])
# Y = (diag(d1) + v * diag(d2) * v.T) @ X.T + shift
b = Affine(shift=[1., 2, 3],
scale_diag=[1., 3, 3], # Implicitly 3x3.
scale_perturb_diag=[2., 1], # Implicitly 2x2.
scale_perturb_factor=[[1., 0],
[0, 1],
[1, 1]])
```
"""
def __init__(self,
shift=None,
scale_identity_multiplier=None,
scale_diag=None,
scale_tril=None,
scale_perturb_factor=None,
scale_perturb_diag=None,
event_ndims=1,
validate_args=False,
name="affine"):
"""Instantiates the `Affine` bijector.
This `Bijector` is initialized with `shift` `Tensor` and `scale` arguments,
giving the forward operation:
```none
Y = g(X) = scale @ X + shift
```
where the `scale` term is logically equivalent to:
```python
scale = (
scale_identity_multiplier * tf.diag(tf.ones(d)) +
tf.diag(scale_diag) +
scale_tril +
scale_perturb_factor @ diag(scale_perturb_diag) @
tf.transpose([scale_perturb_factor])
)
```
If none of `scale_identity_multiplier`, `scale_diag`, or `scale_tril` are
specified then `scale += IdentityMatrix`. Otherwise specifying a
`scale` argument has the semantics of `scale += Expand(arg)`, i.e.,
`scale_diag != None` means `scale += tf.diag(scale_diag)`.
Args:
shift: Floating-point `Tensor`. If this is set to `None`, no shift is
applied.
scale_identity_multiplier: floating point rank 0 `Tensor` representing a
scaling done to the identity matrix.
When `scale_identity_multiplier = scale_diag = scale_tril = None` then
`scale += IdentityMatrix`. Otherwise no scaled-identity-matrix is added
to `scale`.
scale_diag: Floating-point `Tensor` representing the diagonal matrix.
`scale_diag` has shape [N1, N2, ... k], which represents a k x k
diagonal matrix.
When `None` no diagonal term is added to `scale`.
scale_tril: Floating-point `Tensor` representing the diagonal matrix.
`scale_diag` has shape [N1, N2, ... k, k], which represents a k x k
lower triangular matrix.
When `None` no `scale_tril` term is added to `scale`.
The upper triangular elements above the diagonal are ignored.
scale_perturb_factor: Floating-point `Tensor` representing factor matrix
with last two dimensions of shape `(k, r)`. When `None`, no rank-r
update is added to `scale`.
scale_perturb_diag: Floating-point `Tensor` representing the diagonal
matrix. `scale_perturb_diag` has shape [N1, N2, ... r], which
represents an `r x r` diagonal matrix. When `None` low rank updates will
take the form `scale_perturb_factor * scale_perturb_factor.T`.
event_ndims: Scalar `int32` `Tensor` indicating the number of dimensions
associated with a particular draw from the distribution. Must be 0 or 1.
validate_args: Python `bool` indicating whether arguments should be
checked for correctness.
name: Python `str` name given to ops managed by this object.
Raises:
ValueError: if `perturb_diag` is specified but not `perturb_factor`.
TypeError: if `shift` has different `dtype` from `scale` arguments.
"""
self._graph_parents = []
self._name = name
self._validate_args = validate_args
# Ambiguous definition of low rank update.
if scale_perturb_diag is not None and scale_perturb_factor is None:
raise ValueError("When scale_perturb_diag is specified, "
"scale_perturb_factor must be specified.")
# Special case, only handling a scaled identity matrix. We don't know its
# dimensions, so this is special cased.
# We don't check identity_multiplier, since below we set it to 1. if all
# other scale args are None.
self._is_only_identity_multiplier = (scale_tril is None and
scale_diag is None and
scale_perturb_factor is None)
# When no args are specified, pretend the scale matrix is the identity
# matrix.
with self._name_scope("init", values=[
shift, scale_identity_multiplier, scale_diag, scale_tril,
scale_perturb_diag, scale_perturb_factor, event_ndims]):
event_ndims = ops.convert_to_tensor(event_ndims, name="event_ndims")
if validate_args:
is_less_than_two = check_ops.assert_less(
event_ndims, 2,
message="event_ndims must be 0 or 1")
event_ndims = control_flow_ops.with_dependencies(
[is_less_than_two], event_ndims)
self._shift = _as_tensor(shift, "shift")
if (self._is_only_identity_multiplier
and scale_identity_multiplier is None):
if self._shift is not None:
scale_identity_multiplier = ops.convert_to_tensor(
1., dtype=self._shift.dtype)
else:
scale_identity_multiplier = 1.
# self._create_scale_operator returns an OperatorPD in all cases except if
# self._is_only_identity_multiplier; in which case it returns a scalar
# Tensor.
self._scale = self._create_scale_operator(
identity_multiplier=scale_identity_multiplier,
diag=scale_diag,
tril=scale_tril,
perturb_diag=scale_perturb_diag,
perturb_factor=scale_perturb_factor,
event_ndims=event_ndims,
validate_args=validate_args)
if (self._shift is not None and
self._shift.dtype.base_dtype != self._scale.dtype.base_dtype):
raise TypeError("shift.dtype({}) does not match scale.dtype({})".format(
self._shift.dtype, self._scale.dtype))
self._shaper = _DistributionShape(
batch_ndims=self._infer_batch_ndims(),
event_ndims=event_ndims,
validate_args=validate_args)
super(Affine, self).__init__(
event_ndims=event_ndims,
graph_parents=(
[event_ndims] +
[self._scale] if tensor_util.is_tensor(self._scale)
else self._scale.inputs +
[self._shift] if self._shift is not None else []),
is_constant_jacobian=True,
dtype=self._scale.dtype,
validate_args=validate_args,
name=name)
def _create_scale_operator(self, identity_multiplier, diag, tril,
perturb_diag, perturb_factor, event_ndims,
validate_args):
"""Construct `scale` from various components.
Args:
identity_multiplier: floating point rank 0 `Tensor` representing a scaling
done to the identity matrix.
diag: Floating-point `Tensor` representing the diagonal matrix.
`scale_diag` has shape [N1, N2, ... k], which represents a k x k
diagonal matrix.
tril: Floating-point `Tensor` representing the diagonal matrix.
`scale_tril` has shape [N1, N2, ... k], which represents a k x k lower
triangular matrix.
perturb_diag: Floating-point `Tensor` representing the diagonal matrix of
the low rank update.
perturb_factor: Floating-point `Tensor` representing factor matrix.
event_ndims: Scalar `int32` `Tensor` indicating the number of dimensions
associated with a particular draw from the distribution. Must be 0 or 1
validate_args: Python `bool` indicating whether arguments should be
checked for correctness.
Returns:
scale. In the case of scaling by a constant, scale is a
floating point `Tensor`. Otherwise, scale is an `OperatorPD`.
Raises:
ValueError: if all of `tril`, `diag` and `identity_multiplier` are `None`.
"""
identity_multiplier = _as_tensor(identity_multiplier, "identity_multiplier")
diag = _as_tensor(diag, "diag")
tril = _as_tensor(tril, "tril")
perturb_diag = _as_tensor(perturb_diag, "perturb_diag")
perturb_factor = _as_tensor(perturb_factor, "perturb_factor")
identity_multiplier = self._maybe_validate_identity_multiplier(
identity_multiplier, validate_args)
if perturb_factor is not None:
perturb_factor = self._process_matrix(
perturb_factor, min_rank=2, event_ndims=event_ndims)
if perturb_diag is not None:
perturb_diag = self._process_matrix(
perturb_diag, min_rank=1, event_ndims=event_ndims)
# The following if-statments are ordered by increasingly stronger
# assumptions in the base matrix, i.e., we process in the order:
# TriL, Diag, Identity.
if tril is not None:
tril = self._preprocess_tril(
identity_multiplier, diag, tril, event_ndims)
if perturb_factor is None:
return operator_pd_cholesky.OperatorPDCholesky(
tril, verify_pd=validate_args)
return _TriLPlusVDVTLightweightOperatorPD(
tril=tril, v=perturb_factor, diag=perturb_diag,
validate_args=validate_args)
if diag is not None:
diag = self._preprocess_diag(identity_multiplier, diag, event_ndims)
if perturb_factor is None:
return operator_pd_diag.OperatorPDSqrtDiag(
diag, verify_pd=validate_args)
return operator_pd_vdvt_update.OperatorPDSqrtVDVTUpdate(
operator=operator_pd_diag.OperatorPDDiag(
diag, verify_pd=validate_args),
v=perturb_factor,
diag=perturb_diag,
verify_pd=validate_args)
if identity_multiplier is not None:
if perturb_factor is None:
return identity_multiplier
# Infer the shape from the V and D.
v_shape = array_ops.shape(perturb_factor)
identity_shape = array_ops.concat([v_shape[:-1], [v_shape[-2]]], 0)
scaled_identity = operator_pd_identity.OperatorPDIdentity(
identity_shape,
perturb_factor.dtype.base_dtype,
scale=identity_multiplier,
verify_pd=validate_args)
return operator_pd_vdvt_update.OperatorPDSqrtVDVTUpdate(
operator=scaled_identity,
v=perturb_factor,
diag=perturb_diag,
verify_pd=validate_args)
raise ValueError("One of tril, diag and/or identity_multiplier must be "
"specified.")
def _maybe_validate_identity_multiplier(self, identity_multiplier,
validate_args):
"""Check that the init arg `identity_multiplier` is valid."""
if identity_multiplier is None or not validate_args:
return identity_multiplier
if validate_args:
identity_multiplier = control_flow_ops.with_dependencies(
[check_ops.assert_positive(identity_multiplier)],
identity_multiplier)
return identity_multiplier
def _preprocess_tril(self, identity_multiplier, diag, tril, event_ndims):
"""Helper to preprocess a lower triangular matrix."""
tril = array_ops.matrix_band_part(tril, -1, 0) # Zero out TriU.
if identity_multiplier is None and diag is None:
return self._process_matrix(tril, min_rank=2, event_ndims=event_ndims)
new_diag = array_ops.matrix_diag_part(tril)
if identity_multiplier is not None:
new_diag += identity_multiplier
if diag is not None:
new_diag += diag
tril = array_ops.matrix_set_diag(tril, new_diag)
return self._process_matrix(tril, min_rank=2, event_ndims=event_ndims)
def _preprocess_diag(self, identity_multiplier, diag, event_ndims):
"""Helper to preprocess a diagonal matrix."""
if identity_multiplier is not None:
diag += identity_multiplier
return self._process_matrix(diag, min_rank=1, event_ndims=event_ndims)
def _process_matrix(self, matrix, min_rank, event_ndims):
"""Helper to __init__ which gets matrix in batch-ready form."""
# Pad the matrix so that matmul works in the case of a matrix and vector
# input. Keep track if the matrix was padded, to distinguish between a
# rank 3 tensor and a padded rank 2 tensor.
# TODO(srvasude): Remove side-effects from functions. Its currently unbroken
# but error-prone since the function call order may change in the future.
self._rank_two_event_ndims_one = math_ops.logical_and(
math_ops.equal(array_ops.rank(matrix), min_rank),
math_ops.equal(event_ndims, 1))
left = array_ops.where(self._rank_two_event_ndims_one, 1, 0)
pad = array_ops.concat(
[array_ops.ones(
[left], dtype=dtypes.int32), array_ops.shape(matrix)],
0)
return array_ops.reshape(matrix, pad)
def _infer_batch_ndims(self):
"""Return batch_ndims."""
if self._is_only_identity_multiplier:
return 0
# The real batch dims is one less when we pad in the case of event_ndims =
# 1, and the rank of the underlying scale being 2. This allows us to have
# non-negative sample dims.
return (self._scale.rank() - 2 -
array_ops.where(self._rank_two_event_ndims_one, 1, 0))
@property
def shift(self):
"""The `shift` `Tensor` in `Y = scale @ X + shift`."""
return self._shift
@property
def scale(self):
"""The `scale` `LinearOperator` in `Y = scale @ X + shift`."""
# TODO(srvasude): Remove this exception once TriLPlusVDVT is properly
# implemented.
if isinstance(self._scale, _TriLPlusVDVTLightweightOperatorPD):
raise NotImplementedError("Cannot access scale when Tril+VDV.T.")
return self._scale
def _forward(self, x):
y = x
if self._is_only_identity_multiplier:
y *= self._scale
if self.shift is not None:
return y + self.shift
return y
y, sample_shape = self._shaper.make_batch_of_event_sample_matrices(y)
y = self._scale.sqrt_matmul(y)
y = self._shaper.undo_make_batch_of_event_sample_matrices(y, sample_shape)
if self.shift is not None:
return y + self.shift
return y
def _inverse(self, y):
x = y
if self.shift is not None:
x -= self.shift
if self._is_only_identity_multiplier:
return x / self._scale
x, sample_shape = self._shaper.make_batch_of_event_sample_matrices(x)
x = self._scale.sqrt_solve(x)
x = self._shaper.undo_make_batch_of_event_sample_matrices(x, sample_shape)
return x
def _inverse_log_det_jacobian(self, y):
return -self._forward_log_det_jacobian(y)
def _forward_log_det_jacobian(self, x):
if self._is_only_identity_multiplier:
# TODO(jvdillon): We don't pad in this case and instead let the fldj be
# applied via broadcast.
d = math_ops.cast(array_ops.shape(x)[-1], dtype=self._scale.dtype)
one = ops.convert_to_tensor(1., self._scale.dtype)
return math_ops.log(math_ops.abs(self._scale)) * array_ops.where(
math_ops.equal(self._shaper.event_ndims, 0), one, d)
fldj = self._scale.sqrt_log_abs_det()
# We need to squeeze off the padded dimension.
start = array_ops.where(self._rank_two_event_ndims_one, 1, 0)
return array_ops.reshape(fldj, array_ops.shape(fldj)[start:])
| apache-2.0 |
Freso/picard | picard/formats/wav.py | 2 | 1666 | # -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
# Copyright (C) 2007 Lukáš Lalinský
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import wave
from picard import log
from picard.file import File
from picard.metadata import Metadata
from picard.util import encode_filename
class WAVFile(File):
EXTENSIONS = [".wav"]
NAME = "Microsoft WAVE"
_File = None
def _load(self, filename):
log.debug("Loading file %r", filename)
f = wave.open(encode_filename(filename), "rb")
metadata = Metadata()
metadata['~channels'] = f.getnchannels()
metadata['~bits_per_sample'] = f.getsampwidth() * 8
metadata['~sample_rate'] = f.getframerate()
metadata.length = 1000 * f.getnframes() / f.getframerate()
metadata['~format'] = 'Microsoft WAVE'
self._add_path_to_metadata(metadata)
return metadata
def _save(self, filename, metadata):
log.debug("Saving file %r", filename)
pass
| gpl-2.0 |
UIA-CAIR/DeepRTS | coding/test.py | 1 | 3205 | import Scenarios
import Agents
import torch
import imageio
import pygame
import os
from datetime import datetime
import numpy
action_names = {
1: "Previous Unit",
2: "Next Unit",
3: "Move Left",
4: "Move Right",
5: "Move Up",
6: "Move Down",
7: "Move Up Left",
8: "Move Up Right",
9: "Move Down Left",
10: "Move Down Right",
11: "Attack",
12: "Harvest",
13: "Build 0",
14: "Build 1",
15: "Build 2",
16: "No Action"
}
if __name__ == "__main__":
now = datetime.now()
now_string = now.strftime("%d-%m-%Y %H-%M-%S")
directory = "Videos " + now_string
test_directory = "Tests"
test_path = os.path.join(os.getcwd(), test_directory)
files = ["NN_700"]
results_path = os.path.join(os.getcwd(), "Results")
recording_path = os.path.join(results_path, directory)
log_path = os.path.join(recording_path, "log.txt")
os.mkdir(recording_path)
log = open(log_path, "w+")
# environment
env = Scenarios.ImageToPyTorch(Scenarios.Scenario182({}))
env.game.set_max_fps(99999999)
env.game.set_max_ups(99999999)
TRIALS = 100
for file in files:
file_path = os.path.join(test_path, file + ".pt")
results_directory = file + "-Random"
results_path = os.path.join(test_path, results_directory)
os.mkdir(results_path)
outcomes_path = os.path.join(results_path, "outcomes.txt")
durations_path = os.path.join(results_path, "durations.txt")
outcomes_file = open(outcomes_path, "w+")
durations_file = open(durations_path, "w+")
# agents
state_size = env.observation_space.shape
action_size = env.action_space.n
agent_a = Agents.SmallAgent(4410, action_size)
agent_a.load(file_path)
agent_b = Agents.RandomAgent()
for trial in range(TRIALS):
state = env.reset()
flat_state = state.flatten()
# video stuff
filenames = []
terminal = False
changed = False
count = 0
# play game
while not terminal:
if trial == 0:
if changed:
# save the current window
window = pygame.display.get_surface()
image_name = "image_" + str(count) + ".jpeg"
image_path = os.path.join(results_path, image_name)
pygame.image.save(window, image_path)
filenames.append(image_path)
count += 1
# AI for player 1
env.game.set_player(env.game.players[0])
action = agent_a.get_action(flat_state, 0)
next_state, _, terminal, _ = env.step(action)
flat_next_state = next_state.flatten()
# AI for player 1
env.game.set_player(env.game.players[1])
action = agent_b.get_action(state, 0)
next_state, _, terminal, _ = env.step(action)
changed = not numpy.array_equal(state, next_state)
state = next_state
flat_state = flat_next_state
if (env.game.players[0].is_defeated()):
outcomes_file.write("0,")
outcomes_file.flush()
else:
outcomes_file.write("1,")
outcomes_file.flush()
durations_file.write(str(env.game.get_episode_duration()) + ",")
durations_file.flush()
if trial == 0:
images = []
for filename in filenames:
images.append(imageio.imread(filename))
video_path = os.path.join(results_path, "video.gif")
imageio.mimsave(video_path, images) | mit |
tfiedor/perun | perun/fuzz/randomizer.py | 1 | 1994 | """Module that simply encapsulate all the random functions that are used in fuzzing,
with only one call of function from random package."""
__author__ = 'Matus Liscinsky'
import random
def random_repeats(repeats):
"""Decorator for random number of repeats of inner function
Note that the return value of the wrapped function is NOT checked or passed anywhere
:param int repeats: the upper bound of number of repeats
:return: decorator that takes function and repeats its call up to @p repeats times
"""
def inner_wrapper(func):
"""Inner wrapper
:param function func: wrapped function
:return: innermost wrapper
"""
def innermost_wrapper(*args, **kwargs):
"""Innermost wrapper
:param list args: list of arguments
:param dict kwargs: list of keyword arguments
"""
for _ in range(rand_from_range(1, repeats)):
func(*args, **kwargs)
innermost_wrapper.__doc__ = func.__doc__
return innermost_wrapper
return inner_wrapper
def rand_from_range(start, stop):
"""Basic function that randomly choose an integer from range bounded by `start` and `stop`
parameters. Matematically expressed as `start` <= random_number <= `stop`.
:param int start: lower bound of the interval
:param int stop: upper limit of the interval
:return int: random integer from given range
"""
return random.randint(start, stop)
def rand_index(lst_len):
"""Function that randomly choose an index from list.
:param int lst_len: length of the list
:return int: random integer that represents valid index of element in list
"""
return rand_from_range(0, lst_len-1)
def rand_choice(lst):
"""Return a randomly selected element of a list.
:param list lst: the list from which the element will be selected
:return: element of list on random index
"""
return lst[rand_from_range(0, len(lst)-1)]
| gpl-3.0 |
damonkohler/sl4a | python-build/python-libs/gdata/src/gdata/tlslite/integration/TLSAsyncDispatcherMixIn.py | 237 | 4878 | """TLS Lite + asyncore."""
import asyncore
from gdata.tlslite.TLSConnection import TLSConnection
from AsyncStateMachine import AsyncStateMachine
class TLSAsyncDispatcherMixIn(AsyncStateMachine):
"""This class can be "mixed in" with an
L{asyncore.dispatcher} to add TLS support.
This class essentially sits between the dispatcher and the select
loop, intercepting events and only calling the dispatcher when
applicable.
In the case of handle_read(), a read operation will be activated,
and when it completes, the bytes will be placed in a buffer where
the dispatcher can retrieve them by calling recv(), and the
dispatcher's handle_read() will be called.
In the case of handle_write(), the dispatcher's handle_write() will
be called, and when it calls send(), a write operation will be
activated.
To use this class, you must combine it with an asyncore.dispatcher,
and pass in a handshake operation with setServerHandshakeOp().
Below is an example of using this class with medusa. This class is
mixed in with http_channel to create http_tls_channel. Note:
1. the mix-in is listed first in the inheritance list
2. the input buffer size must be at least 16K, otherwise the
dispatcher might not read all the bytes from the TLS layer,
leaving some bytes in limbo.
3. IE seems to have a problem receiving a whole HTTP response in a
single TLS record, so HTML pages containing '\\r\\n\\r\\n' won't
be displayed on IE.
Add the following text into 'start_medusa.py', in the 'HTTP Server'
section::
from tlslite.api import *
s = open("./serverX509Cert.pem").read()
x509 = X509()
x509.parse(s)
certChain = X509CertChain([x509])
s = open("./serverX509Key.pem").read()
privateKey = parsePEMKey(s, private=True)
class http_tls_channel(TLSAsyncDispatcherMixIn,
http_server.http_channel):
ac_in_buffer_size = 16384
def __init__ (self, server, conn, addr):
http_server.http_channel.__init__(self, server, conn, addr)
TLSAsyncDispatcherMixIn.__init__(self, conn)
self.tlsConnection.ignoreAbruptClose = True
self.setServerHandshakeOp(certChain=certChain,
privateKey=privateKey)
hs.channel_class = http_tls_channel
If the TLS layer raises an exception, the exception will be caught
in asyncore.dispatcher, which will call close() on this class. The
TLS layer always closes the TLS connection before raising an
exception, so the close operation will complete right away, causing
asyncore.dispatcher.close() to be called, which closes the socket
and removes this instance from the asyncore loop.
"""
def __init__(self, sock=None):
AsyncStateMachine.__init__(self)
if sock:
self.tlsConnection = TLSConnection(sock)
#Calculate the sibling I'm being mixed in with.
#This is necessary since we override functions
#like readable(), handle_read(), etc., but we
#also want to call the sibling's versions.
for cl in self.__class__.__bases__:
if cl != TLSAsyncDispatcherMixIn and cl != AsyncStateMachine:
self.siblingClass = cl
break
else:
raise AssertionError()
def readable(self):
result = self.wantsReadEvent()
if result != None:
return result
return self.siblingClass.readable(self)
def writable(self):
result = self.wantsWriteEvent()
if result != None:
return result
return self.siblingClass.writable(self)
def handle_read(self):
self.inReadEvent()
def handle_write(self):
self.inWriteEvent()
def outConnectEvent(self):
self.siblingClass.handle_connect(self)
def outCloseEvent(self):
asyncore.dispatcher.close(self)
def outReadEvent(self, readBuffer):
self.readBuffer = readBuffer
self.siblingClass.handle_read(self)
def outWriteEvent(self):
self.siblingClass.handle_write(self)
def recv(self, bufferSize=16384):
if bufferSize < 16384 or self.readBuffer == None:
raise AssertionError()
returnValue = self.readBuffer
self.readBuffer = None
return returnValue
def send(self, writeBuffer):
self.setWriteOp(writeBuffer)
return len(writeBuffer)
def close(self):
if hasattr(self, "tlsConnection"):
self.setCloseOp()
else:
asyncore.dispatcher.close(self)
| apache-2.0 |
dset0x/invenio-deposit | tests/test_deposit_form.py | 7 | 15078 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2013, 2014, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Unit tests for the WebDeposit Form """
import copy
from invenio.testsuite import InvenioTestCase, make_test_suite, run_test_suite
class WebDepositFormTest(InvenioTestCase):
def setUp(self):
from invenio_deposit.form import WebDepositForm
from invenio_deposit import fields
from werkzeug import MultiDict
def reset_processor(form, field, submit=False, fields=None):
if field.name == 'related_identifier-0-scheme':
if fields and 'related_identifier-0-scheme' in fields:
field.data = 'RESET TEST FIELDS'
return
field.data = 'RESET'
def dummy_autocomplete(form, field, term, limit=50):
if term == 'test':
return map(
lambda x: field.name + '-' + str(x),
range(0, 100)
)[:limit]
return []
class IdentifierTestForm(WebDepositForm):
scheme = fields.StringField(
processors=[reset_processor],
autocomplete_fn=dummy_autocomplete,
)
identifier = fields.StringField()
def post_process_identifier(self, form, field, submit=False,
fields=None):
form.scheme.data = field.data
class TestForm(WebDepositForm):
title = fields.StringField(
processors=[reset_processor],
autocomplete_fn=dummy_autocomplete,
)
subtitle = fields.StringField()
related_identifier = fields.DynamicFieldList(
fields.FormField(IdentifierTestForm)
)
keywords = fields.DynamicFieldList(fields.StringField())
self.form_class = TestForm
self._form_data = MultiDict({
'title': 'my title',
'subtitle': 'my subtitle',
'related_identifier-0-scheme': 'doi',
'related_identifier-0-identifier': '10.1234/1',
'related_identifier-1-scheme': 'orcid',
'related_identifier-1-identifier': '10.1234/2',
'keywords-0': 'kw1',
'keywords-1': 'kw2',
})
self._object_data = {
'title': 'my title',
'subtitle': 'my subtitle',
'related_identifier': [
{'scheme': 'doi', 'identifier': '10.1234/1', },
{'scheme': 'orcid', 'identifier': '10.1234/2'},
],
'keywords': ['kw1', 'kw2'],
}
def multidict(self, d):
from werkzeug import MultiDict
return MultiDict(d)
@property
def object_data(self):
return copy.deepcopy(self._object_data)
@property
def form_data(self):
return copy.deepcopy(self._form_data)
def test_autocomplete_routing(self):
form = self.form_class()
self.assertEqual(
form.autocomplete('title', 'Nothing', limit=3),
[]
)
self.assertEqual(
form.autocomplete('title', 'test', limit=3),
['title-0', 'title-1', 'title-2']
)
self.assertEqual(
len(form.autocomplete('title', 'test', limit=51)),
51
)
self.assertEqual(
len(form.autocomplete('title', 'test', limit=200)),
100
)
self.assertEqual(
form.autocomplete('unexistingfield', 'test', limit=3),
None
)
self.assertEqual(
form.autocomplete('related_identifier-0-scheme', 'test', limit=2),
['related_identifier-0-scheme-0',
'related_identifier-0-scheme-1', ]
)
def test_loading_objectdata(self):
form = self.form_class(**self.object_data)
self.assertEqual(form.data, self.object_data)
def test_getting_jsondata(self):
from invenio_deposit import fields
from invenio_deposit.form import WebDepositForm
from datetime import date
class RelatedDatesForm(WebDepositForm):
date = fields.Date()
class TestForm(WebDepositForm):
dates = fields.DynamicFieldList(
fields.FormField(RelatedDatesForm)
)
object_data = {'dates': [
{'date': date(2002, 1, 1)},
{'date': date(2013, 1, 1)},
]}
json_data = {'dates': [
{'date': '2002-01-01'},
{'date': '2013-01-01'},
]}
form = TestForm(
formdata=self.multidict({
'dates-0-date': '2002-01-01',
'dates-1-date': '2013-01-01',
})
)
self.assertEqual(form.data, object_data)
self.assertEqual(form.json_data, json_data)
def test_loading_jsondata(self):
# For field enclosures values may also be sent as a json structure
form = self.form_class(formdata=self.multidict(self.object_data))
self.assertEqual(form.data, self.object_data)
self.assertTrue(form.validate())
# Skip test due to changed API
# def test_loading_invalid_jsondata(self):
# data = self.object_data
# data['unknownkey'] = "Test"
# # For field enclosures values may also be sent as a json structure
# form = self.form_class(formdata=self.multidict(data))
# self.assertFalse(form.validate())
def test_loading_formdata(self):
form = self.form_class(formdata=self.form_data)
self.assertEqual(form.data, self.object_data)
# Form data fields not specified is assumed by
# WTForms to be empty, and will overwrite
# object data.
modified_data = self.object_data
modified_data['title'] += "a"
modified_data['subtitle'] += "a"
modified_data['related_identifier'][0]['scheme'] += "a"
modified_data['related_identifier'][1]['scheme'] += "a"
modified_formdata = self.form_data
del modified_formdata['subtitle']
expected_data = self.object_data
expected_data['subtitle'] = u''
form = self.form_class(formdata=modified_formdata, **modified_data)
self.assertEqual(form.data, expected_data)
def test_update_list_element(self):
new_title = 'new title'
new_scheme = 'new scheme'
expected_data = self.object_data
expected_data['title'] = new_title
expected_data['related_identifier'][1]['scheme'] = new_scheme
formdata = self.multidict({
'title': new_title,
'related_identifier-1-scheme': new_scheme,
})
form = self.form_class(formdata=formdata, **self.object_data)
form.reset_field_data(exclude=formdata.keys())
self.assertEqual(form.data, expected_data)
def test_add_list_element(self):
new_title = 'new title'
new_scheme = 'new scheme'
expected_data = self.object_data
expected_data['title'] = new_title
expected_data['related_identifier'].append({'scheme': None,
'identifier': None})
expected_data['related_identifier'].append({'scheme': new_scheme,
'identifier': None})
formdata = self.multidict({
'title': new_title,
'related_identifier-3-scheme': new_scheme,
})
form = self.form_class(formdata=formdata, **self.object_data)
form.reset_field_data(exclude=formdata.keys())
self.assertEqual(form.data, expected_data)
def test_new_list_element(self):
new_title = 'new title'
new_list = [
{'scheme': 'a', 'identifier': 'a'},
{'scheme': 'b', 'identifier': 'b'}
]
expected_data = self.object_data
expected_data['title'] = new_title
expected_data['related_identifier'] = new_list
formdata = self.multidict({
'title': new_title,
'related_identifier': new_list,
})
form = self.form_class(formdata=formdata, **self.object_data)
form.reset_field_data(exclude=formdata.keys())
self.assertEqual(form.data, expected_data)
def test_extract_indices(self):
formdata = self.multidict({
'related_identifier-1-scheme': '',
'related_identifier-1-name': '',
'related_identifier-4-name': '',
'related_identifier-0': '',
'related_identifier-0-name-3;': '',
})
form = self.form_class()
indices = sorted(set(form.related_identifier._extract_indices(
form.related_identifier.name,
formdata)
))
self.assertEqual(indices, [0, 1, 4])
def test_postprocess(self):
form = self.form_class(formdata=self.form_data, **self.object_data)
form.post_process()
expected_data = self.object_data
expected_data['title'] = "RESET"
expected_data['related_identifier'][1]['scheme'] = \
expected_data['related_identifier'][1]['identifier']
expected_data['related_identifier'][0]['scheme'] = \
expected_data['related_identifier'][0]['identifier']
self.assertEqual(form.data, expected_data)
def test_postprocess_exclude(self):
form_data = self.multidict({
'related_identifier-0-scheme': 'test'
})
form = self.form_class(formdata=form_data, **self.object_data)
form.reset_field_data(exclude=form_data.keys())
form.post_process(formfields=form_data.keys())
expected_data = self.object_data
expected_data['related_identifier'][0]['scheme'] = "RESET TEST FIELDS"
self.assertEqual(form.data, expected_data)
def test_flags(self):
form = self.form_class(**self.object_data)
form.subtitle.flags.hidden = True
form.related_identifier.flags.hidden = True
form.related_identifier[0].flags.hidden = True
form.related_identifier[0].scheme.flags.hidden = True
expected_flags = {
'title': [],
'subtitle': ['hidden'],
'related_identifier': ['hidden'],
'related_identifier-0': ['hidden'],
'related_identifier-0-scheme': ['hidden'],
'related_identifier-0-identifier': [],
'related_identifier-1': [],
'related_identifier-1-identifier': [],
'related_identifier-1-scheme': [],
'keywords': [],
'keywords-0': [],
'keywords-1': [],
}
self.assertEqual(
form.get_flags(),
expected_flags
)
form = self.form_class(**self.object_data)
form.set_flags(copy.deepcopy(expected_flags))
self.assertEqual(
form.get_flags(),
expected_flags
)
def test_messages(self):
form = self.form_class(**self.object_data)
form.title.add_message('t1', state='info')
form.title.add_message('t2', state='warning')
form.related_identifier.add_message('t3', state='warning')
form.related_identifier[0].add_message('t4', state='warning')
form.related_identifier[0]['scheme'].add_message('t5', state='warning')
self.assertEqual(
form.messages,
{
'title': {'state': 'warning', 'messages': ['t1', 't2']},
'subtitle': {},
'related_identifier': {'state': 'warning', 'messages': ['t3']},
'related_identifier-0': {'state': 'warning',
'messages': ['t4']},
'related_identifier-0-scheme': {'state': 'warning',
'messages': ['t5']},
'related_identifier-0-identifier': {},
'related_identifier-1': {},
'related_identifier-1-scheme': {},
'related_identifier-1-identifier': {},
'keywords': {},
'keywords-0': {},
'keywords-1': {},
}
)
def test_nested(self):
from invenio_deposit import fields
from invenio_deposit.form import WebDepositForm
class NestedNestedForm(WebDepositForm):
id = fields.StringField()
class NestedForm(WebDepositForm):
id = fields.StringField()
fieldlist = fields.DynamicFieldList(
fields.FormField(NestedNestedForm, separator=':')
)
class TestForm(WebDepositForm):
formfield = fields.FormField(NestedForm, separator=';')
fieldlist = fields.DynamicFieldList(
fields.DynamicFieldList(
fields.StringField()
)
)
formdata = {
'formfield;id': 'a',
'formfield;fieldlist-0:id': 'b',
'formfield;fieldlist-1:id': 'c',
'fieldlist-0-0': 'd',
'fieldlist-0-1': 'e',
'fieldlist-1-0': 'f',
'fieldlist-1-1': 'g',
}
object_data = {
'formfield': {
'id': 'a',
'fieldlist': [
{'id': 'b'},
{'id': 'c'},
]
},
'fieldlist': [
['d', 'e'],
['f', 'g']
]
}
form = TestForm(formdata=self.multidict(object_data))
self.assertEqual(form.data, object_data)
self.assertTrue(form.validate())
form = TestForm(formdata=self.multidict(formdata))
self.assertEqual(form.data, object_data)
self.assertTrue(form.validate())
# Skip these tests due to changed API
# data = object_data.copy()
# data['fieldlist'] = {'somefield': 'should have been a list'}
# form = TestForm(formdata=self.multidict(data))
# self.assertFalse(form.validate())
# data = object_data.copy()
# data['formfield'] = "should have been a dict"
# form = TestForm(formdata=self.multidict(data))
# self.assertFalse(form.validate())
TEST_SUITE = make_test_suite(WebDepositFormTest)
if __name__ == "__main__":
run_test_suite(TEST_SUITE)
| gpl-2.0 |
ff94315/hiwifi-openwrt-HC5661-HC5761 | staging_dir/host/lib/python2.7/audiodev.py | 286 | 7597 | """Classes for manipulating audio devices (currently only for Sun and SGI)"""
from warnings import warnpy3k
warnpy3k("the audiodev module has been removed in Python 3.0", stacklevel=2)
del warnpy3k
__all__ = ["error","AudioDev"]
class error(Exception):
pass
class Play_Audio_sgi:
# Private instance variables
## if 0: access frameratelist, nchannelslist, sampwidthlist, oldparams, \
## params, config, inited_outrate, inited_width, \
## inited_nchannels, port, converter, classinited: private
classinited = 0
frameratelist = nchannelslist = sampwidthlist = None
def initclass(self):
import AL
self.frameratelist = [
(48000, AL.RATE_48000),
(44100, AL.RATE_44100),
(32000, AL.RATE_32000),
(22050, AL.RATE_22050),
(16000, AL.RATE_16000),
(11025, AL.RATE_11025),
( 8000, AL.RATE_8000),
]
self.nchannelslist = [
(1, AL.MONO),
(2, AL.STEREO),
(4, AL.QUADRO),
]
self.sampwidthlist = [
(1, AL.SAMPLE_8),
(2, AL.SAMPLE_16),
(3, AL.SAMPLE_24),
]
self.classinited = 1
def __init__(self):
import al, AL
if not self.classinited:
self.initclass()
self.oldparams = []
self.params = [AL.OUTPUT_RATE, 0]
self.config = al.newconfig()
self.inited_outrate = 0
self.inited_width = 0
self.inited_nchannels = 0
self.converter = None
self.port = None
return
def __del__(self):
if self.port:
self.stop()
if self.oldparams:
import al, AL
al.setparams(AL.DEFAULT_DEVICE, self.oldparams)
self.oldparams = []
def wait(self):
if not self.port:
return
import time
while self.port.getfilled() > 0:
time.sleep(0.1)
self.stop()
def stop(self):
if self.port:
self.port.closeport()
self.port = None
if self.oldparams:
import al, AL
al.setparams(AL.DEFAULT_DEVICE, self.oldparams)
self.oldparams = []
def setoutrate(self, rate):
for (raw, cooked) in self.frameratelist:
if rate == raw:
self.params[1] = cooked
self.inited_outrate = 1
break
else:
raise error, 'bad output rate'
def setsampwidth(self, width):
for (raw, cooked) in self.sampwidthlist:
if width == raw:
self.config.setwidth(cooked)
self.inited_width = 1
break
else:
if width == 0:
import AL
self.inited_width = 0
self.config.setwidth(AL.SAMPLE_16)
self.converter = self.ulaw2lin
else:
raise error, 'bad sample width'
def setnchannels(self, nchannels):
for (raw, cooked) in self.nchannelslist:
if nchannels == raw:
self.config.setchannels(cooked)
self.inited_nchannels = 1
break
else:
raise error, 'bad # of channels'
def writeframes(self, data):
if not (self.inited_outrate and self.inited_nchannels):
raise error, 'params not specified'
if not self.port:
import al, AL
self.port = al.openport('Python', 'w', self.config)
self.oldparams = self.params[:]
al.getparams(AL.DEFAULT_DEVICE, self.oldparams)
al.setparams(AL.DEFAULT_DEVICE, self.params)
if self.converter:
data = self.converter(data)
self.port.writesamps(data)
def getfilled(self):
if self.port:
return self.port.getfilled()
else:
return 0
def getfillable(self):
if self.port:
return self.port.getfillable()
else:
return self.config.getqueuesize()
# private methods
## if 0: access *: private
def ulaw2lin(self, data):
import audioop
return audioop.ulaw2lin(data, 2)
class Play_Audio_sun:
## if 0: access outrate, sampwidth, nchannels, inited_outrate, inited_width, \
## inited_nchannels, converter: private
def __init__(self):
self.outrate = 0
self.sampwidth = 0
self.nchannels = 0
self.inited_outrate = 0
self.inited_width = 0
self.inited_nchannels = 0
self.converter = None
self.port = None
return
def __del__(self):
self.stop()
def setoutrate(self, rate):
self.outrate = rate
self.inited_outrate = 1
def setsampwidth(self, width):
self.sampwidth = width
self.inited_width = 1
def setnchannels(self, nchannels):
self.nchannels = nchannels
self.inited_nchannels = 1
def writeframes(self, data):
if not (self.inited_outrate and self.inited_width and self.inited_nchannels):
raise error, 'params not specified'
if not self.port:
import sunaudiodev, SUNAUDIODEV
self.port = sunaudiodev.open('w')
info = self.port.getinfo()
info.o_sample_rate = self.outrate
info.o_channels = self.nchannels
if self.sampwidth == 0:
info.o_precision = 8
self.o_encoding = SUNAUDIODEV.ENCODING_ULAW
# XXX Hack, hack -- leave defaults
else:
info.o_precision = 8 * self.sampwidth
info.o_encoding = SUNAUDIODEV.ENCODING_LINEAR
self.port.setinfo(info)
if self.converter:
data = self.converter(data)
self.port.write(data)
def wait(self):
if not self.port:
return
self.port.drain()
self.stop()
def stop(self):
if self.port:
self.port.flush()
self.port.close()
self.port = None
def getfilled(self):
if self.port:
return self.port.obufcount()
else:
return 0
## # Nobody remembers what this method does, and it's broken. :-(
## def getfillable(self):
## return BUFFERSIZE - self.getfilled()
def AudioDev():
# Dynamically try to import and use a platform specific module.
try:
import al
except ImportError:
try:
import sunaudiodev
return Play_Audio_sun()
except ImportError:
try:
import Audio_mac
except ImportError:
raise error, 'no audio device'
else:
return Audio_mac.Play_Audio_mac()
else:
return Play_Audio_sgi()
def test(fn = None):
import sys
if sys.argv[1:]:
fn = sys.argv[1]
else:
fn = 'f:just samples:just.aif'
import aifc
af = aifc.open(fn, 'r')
print fn, af.getparams()
p = AudioDev()
p.setoutrate(af.getframerate())
p.setsampwidth(af.getsampwidth())
p.setnchannels(af.getnchannels())
BUFSIZ = af.getframerate()/af.getsampwidth()/af.getnchannels()
while 1:
data = af.readframes(BUFSIZ)
if not data: break
print len(data)
p.writeframes(data)
p.wait()
if __name__ == '__main__':
test()
| gpl-2.0 |
eramirem/astroML | book_figures/chapter9/fig_photoz_tree.py | 3 | 3637 | """
Photometric Redshifts by Decision Trees
---------------------------------------
Figure 9.14
Photometric redshift estimation using decision-tree regression. The data is
described in Section 1.5.5. The training set consists of u, g , r, i, z
magnitudes of 60,000 galaxies from the SDSS spectroscopic sample.
Cross-validation is performed on an additional 6000 galaxies. The left panel
shows training error and cross-validation error as a function of the maximum
depth of the tree. For a number of nodes N > 13, overfitting is evident.
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
import numpy as np
from matplotlib import pyplot as plt
from sklearn.tree import DecisionTreeRegressor
from astroML.datasets import fetch_sdss_specgals
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
#------------------------------------------------------------
# Fetch data and prepare it for the computation
data = fetch_sdss_specgals()
# put magnitudes in a matrix
mag = np.vstack([data['modelMag_%s' % f] for f in 'ugriz']).T
z = data['z']
# train on ~60,000 points
mag_train = mag[::10]
z_train = z[::10]
# test on ~6,000 separate points
mag_test = mag[1::100]
z_test = z[1::100]
#------------------------------------------------------------
# Compute the cross-validation scores for several tree depths
depth = np.arange(1, 21)
rms_test = np.zeros(len(depth))
rms_train = np.zeros(len(depth))
i_best = 0
z_fit_best = None
for i, d in enumerate(depth):
clf = DecisionTreeRegressor(max_depth=d, random_state=0)
clf.fit(mag_train, z_train)
z_fit_train = clf.predict(mag_train)
z_fit = clf.predict(mag_test)
rms_train[i] = np.mean(np.sqrt((z_fit_train - z_train) ** 2))
rms_test[i] = np.mean(np.sqrt((z_fit - z_test) ** 2))
if rms_test[i] <= rms_test[i_best]:
i_best = i
z_fit_best = z_fit
best_depth = depth[i_best]
#------------------------------------------------------------
# Plot the results
fig = plt.figure(figsize=(5, 2.5))
fig.subplots_adjust(wspace=0.25,
left=0.1, right=0.95,
bottom=0.15, top=0.9)
# first panel: cross-validation
ax = fig.add_subplot(121)
ax.plot(depth, rms_test, '-k', label='cross-validation')
ax.plot(depth, rms_train, '--k', label='training set')
ax.set_xlabel('depth of tree')
ax.set_ylabel('rms error')
ax.yaxis.set_major_locator(plt.MultipleLocator(0.01))
ax.set_xlim(0, 21)
ax.set_ylim(0.009, 0.04)
ax.legend(loc=1)
# second panel: best-fit results
ax = fig.add_subplot(122)
ax.scatter(z_test, z_fit_best, s=1, lw=0, c='k')
ax.plot([-0.1, 0.4], [-0.1, 0.4], ':k')
ax.text(0.04, 0.96, "depth = %i\nrms = %.3f" % (best_depth, rms_test[i_best]),
ha='left', va='top', transform=ax.transAxes)
ax.set_xlabel(r'$z_{\rm true}$')
ax.set_ylabel(r'$z_{\rm fit}$')
ax.set_xlim(-0.02, 0.4001)
ax.set_ylim(-0.02, 0.4001)
ax.xaxis.set_major_locator(plt.MultipleLocator(0.1))
ax.yaxis.set_major_locator(plt.MultipleLocator(0.1))
plt.show()
| bsd-2-clause |
stacywsmith/ansible | lib/ansible/plugins/callback/actionable.py | 105 | 2683 | # (c) 2015, Andrew Gaffney <andrew@agaffney.org>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.plugins.callback.default import CallbackModule as CallbackModule_default
class CallbackModule(CallbackModule_default):
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'stdout'
CALLBACK_NAME = 'actionable'
def __init__(self):
self.super_ref = super(CallbackModule, self)
self.super_ref.__init__()
self.last_task = None
self.shown_title = False
def v2_playbook_on_handler_task_start(self, task):
self.super_ref.v2_playbook_on_handler_task_start(task)
self.shown_title = True
def v2_playbook_on_task_start(self, task, is_conditional):
self.last_task = task
self.shown_title = False
def display_task_banner(self):
if not self.shown_title:
self.super_ref.v2_playbook_on_task_start(self.last_task, None)
self.shown_title = True
def v2_runner_on_failed(self, result, ignore_errors=False):
self.display_task_banner()
self.super_ref.v2_runner_on_failed(result, ignore_errors)
def v2_runner_on_ok(self, result):
if result._result.get('changed', False):
self.display_task_banner()
self.super_ref.v2_runner_on_ok(result)
def v2_runner_on_unreachable(self, result):
self.display_task_banner()
self.super_ref.v2_runner_on_unreachable(result)
def v2_runner_on_skipped(self, result):
pass
def v2_playbook_on_include(self, included_file):
pass
def v2_runner_item_on_ok(self, result):
if result._result.get('changed', False):
self.display_task_banner()
self.super_ref.v2_runner_item_on_ok(result)
def v2_runner_item_on_skipped(self, result):
pass
def v2_runner_item_on_failed(self, result):
self.display_task_banner()
self.super_ref.v2_runner_item_on_failed(result)
| gpl-3.0 |
weggert/calendar_sync | calendar_sync.py | 1 | 6753 | #!/usr/bin/python
import fileinput
import os
class CalendarManager:
def __init__(self, calendar_name, dry_run, include_descriptions):
self.calendar_name = calendar_name
self.dry_run = dry_run
self.include_descriptions = include_descriptions
def clear_calendar(self):
command = """
osascript -e 'tell application "Calendar" to tell calendar "%s"
set eventList to every event
repeat with e in eventList
delete e
end repeat
end tell'
"""
command = command % self.calendar_name
if not self.dry_run:
os.system(command)
print 'Calendar cleared'
def create_calendar_event(self, summary, start_date, end_date, all_day, location, description):
if not self.include_descriptions:
description = ''
properties = 'start date:theStartDate, end date:theEndDate, summary:"%s", description:"%s", location:"%s"'\
% (summary, description, location)
if all_day is True:
properties += ', allday event:true'
command = """
osascript -e 'set theStartDate to date "%s"
set theEndDate to date "%s"
tell application "Calendar" to tell calendar "%s"
set theEvent to make new event with properties {%s}
end tell'
"""
command = command % (start_date, end_date, self.calendar_name, properties)
if not self.dry_run:
os.system(command)
self.print_summary(summary, start_date, end_date, all_day, location, description)
@staticmethod
def print_summary(summary, start_date, end_date, all_day, location, description):
print 'Summary: ' + summary
print ' Start: ' + start_date
print ' End: ' + end_date
print ' All Day: ' + str(all_day)
print ' Location: ' + location
print ' Description: ' + description
print ''
class CalendarSummaryProcessor:
class LineType:
EventStart, Summary, Location, Date, Time, Where, Notes, Status, Other = range(9)
def __init__(self):
pass
def __init__(self, calendar_name, dry_run, include_descriptions):
self.calendar_manager = CalendarManager(
calendar_name=calendar_name,
dry_run=dry_run,
include_descriptions=include_descriptions)
self.reset()
self.processing_event = False
self.first_description_line = True
self.last_description_line_was_blank = False
self.summary = ''
self.date = ''
self.time = ''
self.location = ''
self.description = ''
def reset(self):
self.processing_event = False
self.first_description_line = True
self.last_description_line_was_blank = False
self.summary = ''
self.date = ''
self.time = ''
self.location = ''
self.description = ''
def process_summary(self):
self.calendar_manager.clear_calendar()
for input_line in fileinput.input():
line_type = self.get_line_type(input_line)
if line_type is self.LineType.EventStart:
if self.processing_event:
if self.summary != 'Remote'\
and self.summary != 'IP Video - Daily Scrum'\
and self.summary != 'Cloud Team Scrum':
start_date, end_date, all_day = self.get_start_end_dates(self.date, self.time)
self.calendar_manager.create_calendar_event(
self.summary, start_date, end_date, all_day, self.location, self.description)
self.reset()
if line_type is self.LineType.Summary:
self.summary = self.sanitize_line(input_line.strip()[9:])
self.processing_event = True
if line_type is self.LineType.Date:
self.date = input_line.strip()[6:]
if line_type is self.LineType.Time:
self.time = input_line.strip()[6:]
if line_type is self.LineType.Location:
self.location = self.sanitize_line(input_line.strip()[10:])
self.processing_event = True
if line_type is self.LineType.Other:
description_line = self.sanitize_line(input_line.strip())
if len(description_line) > 0:
self.description = self.description + description_line + '\n'
self.last_description_line_was_blank = False
else:
if not self.first_description_line and not self.last_description_line_was_blank:
self.description += '\n'
self.last_description_line_was_blank = True
self.first_description_line = False
if self.processing_event:
start_date, end_date, all_day = self.get_start_end_dates(self.date, self.time)
self.calendar_manager.create_calendar_event(
self.summary, start_date, end_date, all_day, self.location, self.description)
@staticmethod
def get_start_end_dates(date, time):
dates = date.split(" to ")
times = time.split(" to ")
start_date = dates[0] + ' ' + times[0]
end_date = dates[1] + ' ' + times[1]
all_day = False
if times[0] == '12:00:00 AM' and times[1] == "12:00:00 AM" and dates[0] != dates[1]:
all_day = True
return start_date, end_date, all_day
def get_line_type(self, input_line):
if input_line.startswith('EVENT'):
return self.LineType.EventStart
if input_line.startswith('Summary:'):
return self.LineType.Summary
if input_line.startswith('Date:'):
return self.LineType.Date
if input_line.startswith('Time:'):
return self.LineType.Time
if input_line.startswith('Location:'):
return self.LineType.Location
if input_line.startswith('Where'):
return self.LineType.Where
if input_line.startswith('Notes'):
return self.LineType.Notes
if input_line.startswith('Status'):
return self.LineType.Status
return self.LineType.Other
def process_named_line(self, input_line):
colon_position = input_line.find(':')
return self.sanitize_line(input_line[colon_position+1:].strip())
@staticmethod
def sanitize_line(input_line):
return input_line.replace("'", "").replace('"', '').replace('*~*~*~*~*~*~*~*~*~*', '').strip()
CalendarSummaryProcessor(calendar_name='Work Calendar',
dry_run=False,
include_descriptions=True).process_summary() | apache-2.0 |
Goclis/pelican-blog | plugins/extract_toc/extract_toc.py | 28 | 1805 | # -*- coding: utf-8 -*-
"""
Extract Table of Content
========================
A Pelican plugin to extract table of contents (ToC) from `article.content` and
place it in its own `article.toc` variable for use in templates.
"""
from os import path
from bs4 import BeautifulSoup
from pelican import signals, readers, contents
try:
from pandoc_reader import PandocReader
except ImportError:
PandocReader = False
def extract_toc(content):
if isinstance(content, contents.Static):
return
soup = BeautifulSoup(content._content,'html.parser')
filename = content.source_path
extension = path.splitext(filename)[1][1:]
toc = None
# default Markdown reader
if not toc and readers.MarkdownReader.enabled and extension in readers.MarkdownReader.file_extensions:
toc = soup.find('div', class_='toc')
if toc: toc.extract()
# default reStructuredText reader
if not toc and readers.RstReader.enabled and extension in readers.RstReader.file_extensions:
toc = soup.find('div', class_='contents topic')
if toc: toc.extract()
if toc:
tag=BeautifulSoup(str(toc), 'html.parser')
tag.div['class']='toc'
tag.div['id']=''
p=tag.find('p', class_='topic-title first')
if p:p.extract()
toc=tag
# Pandoc reader (markdown and other formats)
if not toc and PandocReader and PandocReader.enabled and extension in PandocReader.file_extensions:
toc = soup.find('nav', id='TOC')
if toc:
toc.extract()
content._content = soup.decode()
content.toc = toc.decode()
if content.toc.startswith('<html>'):
content.toc = content.toc[12:-14]
def register():
signals.content_object_init.connect(extract_toc)
| mit |
MattDevo/stk-code | tools/nightbuilder/utils.py | 20 | 3226 | #!/bin/python
# From Supertuxkart SVN revision $Revision$
# Copyright (C) 2012 Jean-manuel clemencon (samuncle)
################################################################################
import os
import sys
from time import gmtime, strftime
# import config
from config import *
class Cdir:
"""
A class used to change the directory and reset it when it's destructed
"""
#-------------------------------------------------------------------------------
def __init__ (self, path):
self.oriPath = os.getcwd()
os.chdir(path)
#-------------------------------------------------------------------------------
def __del__ (self):
os.chdir(self.oriPath)
class COLOR:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
def separator(color):
return color + 80 * "-" + COLOR.ENDC
#-------------------------------------------------------------------------------
# usage of the script. Displayed if -h is invoqued
def usage(error = ""):
if (error):
print "[error] " + error
h = [
" Options avaliables:",
" --bin # package the binary",
" --data # package the data",
" --clean # remove all packages and logs",
" --send # send the package via FTP",
" --force # force the build (even the revision hasn't changed)",
" --update # update the SVN",
" --web # html output"
" --job= # like -j for make",
" --help # display help",
]
for i in h:
print i
def getTime():
return strftime("%a, %d %b %Y %H:%M:%S GMT+01", gmtime())
#-------------------------------------------------------------------------------
# Used to format output
def bufferedOutput(string, nb = 74):
space = (nb - len(string)) * " "
sys.stdout.write(string)
sys.stdout.flush()
return space
#-------------------------------------------------------------------------------
def parser(argv):
a = os.system("ls")
print a
try:
opts, args = getopt.getopt(argv, "bdcsfuhj:", ["bin",
"data",
"clean",
"send",
"force",
"update",
"help",
"job="
])
for opt, args in opts:
if opt in ("-h", "--help"):
ARG["HELP"] = True
"""
if opt in ("-b", "bin"):
ARG["BIN"] = True
if opt in ("-d", "data"):
ARG["DATA"] = True
if opt in ("-s", "send"):
ARG["SEND"] = True
if opt in ("-f", "force"):
ARG["FORCE"] = True
if opt in ("-u", "update"):
ARG["UPDATE"] = True
"""
except:
usage("unrecognized option")
| gpl-3.0 |
coastmap/geonode | geonode/people/utils.py | 33 | 3269 | #########################################################################
#
# Copyright (C) 2012 OpenPlans
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
from django.contrib.auth import get_user_model
from geonode import GeoNodeException
def get_default_user():
"""Create a default user
"""
superusers = get_user_model().objects.filter(
is_superuser=True).order_by('id')
if superusers.count() > 0:
# Return the first created superuser
return superusers[0]
else:
raise GeoNodeException('You must have an admin account configured '
'before importing data. '
'Try: django-admin.py createsuperuser')
def get_valid_user(user=None):
"""Gets the default user or creates it if it does not exist
"""
if user is None:
theuser = get_default_user()
elif isinstance(user, basestring):
theuser = get_user_model().objects.get(username=user)
elif user == user.get_anonymous():
raise GeoNodeException('The user uploading files must not '
'be anonymous')
else:
theuser = user
# FIXME: Pass a user in the unit tests that is not yet saved ;)
assert isinstance(theuser, get_user_model())
return theuser
def format_address(street=None, zipcode=None, city=None, area=None, country=None):
if country is not None and country == "USA":
address = ""
if city and area:
if street:
address += street+", "
address += city+", "+area
if zipcode:
address += " "+zipcode
elif (not city) and area:
if street:
address += street+", "
address += area
if zipcode:
address += " "+zipcode
elif city and (not area):
if street:
address += street+", "
address += city
if zipcode:
address += " "+zipcode
else:
if street:
address += ", "+street
if zipcode:
address += " "+zipcode
if address:
address += ", United States"
else:
address += "United States"
return address
else:
address = []
if street:
address.append(street)
if zipcode:
address.append(zipcode)
if city:
address.append(city)
if area:
address.append(area)
address.append(country)
return " ".join(address)
| gpl-3.0 |
dirkmueller/qemu | scripts/tracetool/backend/dtrace.py | 94 | 2484 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
DTrace/SystemTAP backend.
"""
__author__ = "Lluís Vilanova <vilanova@ac.upc.edu>"
__copyright__ = "Copyright 2012, Lluís Vilanova <vilanova@ac.upc.edu>"
__license__ = "GPL version 2 or (at your option) any later version"
__maintainer__ = "Stefan Hajnoczi"
__email__ = "stefanha@linux.vnet.ibm.com"
from tracetool import out
PUBLIC = True
PROBEPREFIX = None
def _probeprefix():
if PROBEPREFIX is None:
raise ValueError("you must set PROBEPREFIX")
return PROBEPREFIX
BINARY = None
def _binary():
if BINARY is None:
raise ValueError("you must set BINARY")
return BINARY
def c(events):
pass
def h(events):
out('#include "trace/generated-tracers-dtrace.h"',
'')
for e in events:
out('static inline void trace_%(name)s(%(args)s) {',
' QEMU_%(uppername)s(%(argnames)s);',
'}',
name = e.name,
args = e.args,
uppername = e.name.upper(),
argnames = ", ".join(e.args.names()),
)
def d(events):
out('provider qemu {')
for e in events:
args = str(e.args)
# DTrace provider syntax expects foo() for empty
# params, not foo(void)
if args == 'void':
args = ''
# Define prototype for probe arguments
out('',
'probe %(name)s(%(args)s);',
name = e.name,
args = args,
)
out('',
'};')
# Technically 'self' is not used by systemtap yet, but
# they recommended we keep it in the reserved list anyway
RESERVED_WORDS = (
'break', 'catch', 'continue', 'delete', 'else', 'for',
'foreach', 'function', 'global', 'if', 'in', 'limit',
'long', 'next', 'probe', 'return', 'self', 'string',
'try', 'while'
)
def stap(events):
for e in events:
# Define prototype for probe arguments
out('probe %(probeprefix)s.%(name)s = process("%(binary)s").mark("%(name)s")',
'{',
probeprefix = _probeprefix(),
name = e.name,
binary = _binary(),
)
i = 1
if len(e.args) > 0:
for name in e.args.names():
# Append underscore to reserved keywords
if name in RESERVED_WORDS:
name += '_'
out(' %s = $arg%d;' % (name, i))
i += 1
out('}')
out()
| gpl-2.0 |
allenai/allennlp | allennlp/modules/text_field_embedders/basic_text_field_embedder.py | 1 | 5232 | from typing import Dict
import inspect
import torch
from overrides import overrides
from allennlp.common.checks import ConfigurationError
from allennlp.data import TextFieldTensors
from allennlp.modules.text_field_embedders.text_field_embedder import TextFieldEmbedder
from allennlp.modules.time_distributed import TimeDistributed
from allennlp.modules.token_embedders.token_embedder import TokenEmbedder
from allennlp.modules.token_embedders import EmptyEmbedder
@TextFieldEmbedder.register("basic")
class BasicTextFieldEmbedder(TextFieldEmbedder):
"""
This is a `TextFieldEmbedder` that wraps a collection of
[`TokenEmbedder`](../token_embedders/token_embedder.md) objects. Each
`TokenEmbedder` embeds or encodes the representation output from one
[`allennlp.data.TokenIndexer`](../../data/token_indexers/token_indexer.md). As the data produced by a
[`allennlp.data.fields.TextField`](../../data/fields/text_field.md) is a dictionary mapping names to these
representations, we take `TokenEmbedders` with corresponding names. Each `TokenEmbedders`
embeds its input, and the result is concatenated in an arbitrary (but consistent) order.
Registered as a `TextFieldEmbedder` with name "basic", which is also the default.
# Parameters
token_embedders : `Dict[str, TokenEmbedder]`, required.
A dictionary mapping token embedder names to implementations.
These names should match the corresponding indexer used to generate
the tensor passed to the TokenEmbedder.
"""
def __init__(self, token_embedders: Dict[str, TokenEmbedder]) -> None:
super().__init__()
# NOTE(mattg): I'd prefer to just use ModuleDict(token_embedders) here, but that changes
# weight locations in torch state dictionaries and invalidates all prior models, just for a
# cosmetic change in the code.
self._token_embedders = token_embedders
for key, embedder in token_embedders.items():
name = "token_embedder_%s" % key
self.add_module(name, embedder)
self._ordered_embedder_keys = sorted(self._token_embedders.keys())
@overrides
def get_output_dim(self) -> int:
output_dim = 0
for embedder in self._token_embedders.values():
output_dim += embedder.get_output_dim()
return output_dim
def forward(
self, text_field_input: TextFieldTensors, num_wrapping_dims: int = 0, **kwargs
) -> torch.Tensor:
if sorted(self._token_embedders.keys()) != sorted(text_field_input.keys()):
message = "Mismatched token keys: %s and %s" % (
str(self._token_embedders.keys()),
str(text_field_input.keys()),
)
embedder_keys = set(self._token_embedders.keys())
input_keys = set(text_field_input.keys())
if embedder_keys > input_keys and all(
isinstance(embedder, EmptyEmbedder)
for name, embedder in self._token_embedders.items()
if name in embedder_keys - input_keys
):
# Allow extra embedders that are only in the token embedders (but not input) and are empty to pass
# config check
pass
else:
raise ConfigurationError(message)
embedded_representations = []
for key in self._ordered_embedder_keys:
# Note: need to use getattr here so that the pytorch voodoo
# with submodules works with multiple GPUs.
embedder = getattr(self, "token_embedder_{}".format(key))
if isinstance(embedder, EmptyEmbedder):
# Skip empty embedders
continue
forward_params = inspect.signature(embedder.forward).parameters
forward_params_values = {}
missing_tensor_args = set()
for param in forward_params.keys():
if param in kwargs:
forward_params_values[param] = kwargs[param]
else:
missing_tensor_args.add(param)
for _ in range(num_wrapping_dims):
embedder = TimeDistributed(embedder)
tensors: Dict[str, torch.Tensor] = text_field_input[key]
if len(tensors) == 1 and len(missing_tensor_args) == 1:
# If there's only one tensor argument to the embedder, and we just have one tensor to
# embed, we can just pass in that tensor, without requiring a name match.
token_vectors = embedder(list(tensors.values())[0], **forward_params_values)
else:
# If there are multiple tensor arguments, we have to require matching names from the
# TokenIndexer. I don't think there's an easy way around that.
token_vectors = embedder(**tensors, **forward_params_values)
if token_vectors is not None:
# To handle some very rare use cases, we allow the return value of the embedder to
# be None; we just skip it in that case.
embedded_representations.append(token_vectors)
return torch.cat(embedded_representations, dim=-1)
| apache-2.0 |
ptemplier/ansible | lib/ansible/modules/database/misc/riak.py | 29 | 7451 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, James Martin <jmartin@basho.com>, Drew Kerrigan <dkerrigan@basho.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: riak
short_description: This module handles some common Riak operations
description:
- This module can be used to join nodes to a cluster, check
the status of the cluster.
version_added: "1.2"
author:
- "James Martin (@jsmartin)"
- "Drew Kerrigan (@drewkerrigan)"
options:
command:
description:
- The command you would like to perform against the cluster.
required: false
default: null
choices: ['ping', 'kv_test', 'join', 'plan', 'commit']
config_dir:
description:
- The path to the riak configuration directory
required: false
default: /etc/riak
http_conn:
description:
- The ip address and port that is listening for Riak HTTP queries
required: false
default: 127.0.0.1:8098
target_node:
description:
- The target node for certain operations (join, ping)
required: false
default: riak@127.0.0.1
wait_for_handoffs:
description:
- Number of seconds to wait for handoffs to complete.
required: false
default: null
wait_for_ring:
description:
- Number of seconds to wait for all nodes to agree on the ring.
required: false
default: null
wait_for_service:
description:
- Waits for a riak service to come online before continuing.
required: false
default: None
choices: ['kv']
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
required: false
default: 'yes'
choices: ['yes', 'no']
version_added: 1.5.1
'''
EXAMPLES = '''
# Join's a Riak node to another node
- riak:
command: join
target_node: riak@10.1.1.1
# Wait for handoffs to finish. Use with async and poll.
- riak:
wait_for_handoffs: yes
# Wait for riak_kv service to startup
- riak:
wait_for_service: kv
'''
import json
import time
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.urls import fetch_url
def ring_check(module, riak_admin_bin):
cmd = '%s ringready' % riak_admin_bin
rc, out, err = module.run_command(cmd)
if rc == 0 and 'TRUE All nodes agree on the ring' in out:
return True
else:
return False
def main():
module = AnsibleModule(
argument_spec=dict(
command=dict(required=False, default=None, choices=[
'ping', 'kv_test', 'join', 'plan', 'commit']),
config_dir=dict(default='/etc/riak', type='path'),
http_conn=dict(required=False, default='127.0.0.1:8098'),
target_node=dict(default='riak@127.0.0.1', required=False),
wait_for_handoffs=dict(default=False, type='int'),
wait_for_ring=dict(default=False, type='int'),
wait_for_service=dict(
required=False, default=None, choices=['kv']),
validate_certs = dict(default='yes', type='bool'))
)
command = module.params.get('command')
http_conn = module.params.get('http_conn')
target_node = module.params.get('target_node')
wait_for_handoffs = module.params.get('wait_for_handoffs')
wait_for_ring = module.params.get('wait_for_ring')
wait_for_service = module.params.get('wait_for_service')
#make sure riak commands are on the path
riak_bin = module.get_bin_path('riak')
riak_admin_bin = module.get_bin_path('riak-admin')
timeout = time.time() + 120
while True:
if time.time() > timeout:
module.fail_json(msg='Timeout, could not fetch Riak stats.')
(response, info) = fetch_url(module, 'http://%s/stats' % (http_conn), force=True, timeout=5)
if info['status'] == 200:
stats_raw = response.read()
break
time.sleep(5)
# here we attempt to load those stats,
try:
stats = json.loads(stats_raw)
except:
module.fail_json(msg='Could not parse Riak stats.')
node_name = stats['nodename']
nodes = stats['ring_members']
ring_size = stats['ring_creation_size']
rc, out, err = module.run_command([riak_bin, 'version'] )
version = out.strip()
result = dict(node_name=node_name,
nodes=nodes,
ring_size=ring_size,
version=version)
if command == 'ping':
cmd = '%s ping %s' % ( riak_bin, target_node )
rc, out, err = module.run_command(cmd)
if rc == 0:
result['ping'] = out
else:
module.fail_json(msg=out)
elif command == 'kv_test':
cmd = '%s test' % riak_admin_bin
rc, out, err = module.run_command(cmd)
if rc == 0:
result['kv_test'] = out
else:
module.fail_json(msg=out)
elif command == 'join':
if nodes.count(node_name) == 1 and len(nodes) > 1:
result['join'] = 'Node is already in cluster or staged to be in cluster.'
else:
cmd = '%s cluster join %s' % (riak_admin_bin, target_node)
rc, out, err = module.run_command(cmd)
if rc == 0:
result['join'] = out
result['changed'] = True
else:
module.fail_json(msg=out)
elif command == 'plan':
cmd = '%s cluster plan' % riak_admin_bin
rc, out, err = module.run_command(cmd)
if rc == 0:
result['plan'] = out
if 'Staged Changes' in out:
result['changed'] = True
else:
module.fail_json(msg=out)
elif command == 'commit':
cmd = '%s cluster commit' % riak_admin_bin
rc, out, err = module.run_command(cmd)
if rc == 0:
result['commit'] = out
result['changed'] = True
else:
module.fail_json(msg=out)
# this could take a while, recommend to run in async mode
if wait_for_handoffs:
timeout = time.time() + wait_for_handoffs
while True:
cmd = '%s transfers' % riak_admin_bin
rc, out, err = module.run_command(cmd)
if 'No transfers active' in out:
result['handoffs'] = 'No transfers active.'
break
time.sleep(10)
if time.time() > timeout:
module.fail_json(msg='Timeout waiting for handoffs.')
if wait_for_service:
cmd = [riak_admin_bin, 'wait_for_service', 'riak_%s' % wait_for_service, node_name ]
rc, out, err = module.run_command(cmd)
result['service'] = out
if wait_for_ring:
timeout = time.time() + wait_for_ring
while True:
if ring_check(module, riak_admin_bin):
break
time.sleep(10)
if time.time() > timeout:
module.fail_json(msg='Timeout waiting for nodes to agree on ring.')
result['ring_ready'] = ring_check(module, riak_admin_bin)
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
sri85/selenium | py/test/selenium/webdriver/common/page_loading_tests.py | 53 | 5014 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
import pytest
from selenium.webdriver.common.by import By
class PageLoadingTests(unittest.TestCase):
def testShouldWaitForDocumentToBeLoaded(self):
self._loadSimplePage()
self.assertEqual(self.driver.title, "Hello WebDriver")
# Disabled till Java WebServer is used
#def testShouldFollowRedirectsSentInTheHttpResponseHeaders(self):
# self.driver.get(pages.redirectPage);
# self.assertEqual(self.driver.title, "We Arrive Here")
# Disabled till the Java WebServer is used
#def testShouldFollowMetaRedirects(self):
# self._loadPage("metaRedirect")
# self.assertEqual(self.driver.title, "We Arrive Here")
def testShouldBeAbleToGetAFragmentOnTheCurrentPage(self):
self._loadPage("xhtmlTest")
location = self.driver.current_url
self.driver.get(location + "#text")
self.driver.find_element(by=By.ID, value="id1")
@pytest.mark.ignore_safari
def testShouldReturnWhenGettingAUrlThatDoesNotResolve(self):
try:
# Of course, we're up the creek if this ever does get registered
self.driver.get("http://www.thisurldoesnotexist.comx/")
except ValueError:
pass
@pytest.mark.ignore_safari
def testShouldReturnWhenGettingAUrlThatDoesNotConnect(self):
# Here's hoping that there's nothing here. There shouldn't be
self.driver.get("http://localhost:3001")
#@Ignore({IE, IPHONE, SELENESE})
#def testShouldBeAbleToLoadAPageWithFramesetsAndWaitUntilAllFramesAreLoaded() {
# self.driver.get(pages.framesetPage);
# self.driver.switchTo().frame(0);
# WebElement pageNumber = self.driver.findElement(By.xpath("#span[@id='pageNumber']"));
# self.assertEqual((pageNumber.getText().trim(), equalTo("1"));
# self.driver.switchTo().defaultContent().switchTo().frame(1);
# pageNumber = self.driver.findElement(By.xpath("#span[@id='pageNumber']"));
# self.assertEqual((pageNumber.getText().trim(), equalTo("2"));
#Need to implement this decorator
#@NeedsFreshDriver
#def testSouldDoNothingIfThereIsNothingToGoBackTo() {
# String originalTitle = self.driver.getTitle();
# self.driver.get(pages.formPage);
# self.driver.back();
# We may have returned to the browser's home page
# self.assertEqual(self.driver.title, anyOf(equalTo(originalTitle), equalTo("We Leave From Here")));
def testShouldBeAbleToNavigateBackInTheBrowserHistory(self):
self._loadPage("formPage")
self.driver.find_element(by=By.ID, value="imageButton").submit()
self.assertEqual(self.driver.title, "We Arrive Here")
self.driver.back()
self.assertEqual(self.driver.title, "We Leave From Here")
def testShouldBeAbleToNavigateBackInTheBrowserHistoryInPresenceOfIframes(self):
self._loadPage("xhtmlTest")
self.driver.find_element(by=By.NAME,value="sameWindow").click()
self.assertEqual(self.driver.title, "This page has iframes")
self.driver.back()
self.assertEqual(self.driver.title, "XHTML Test Page")
def testShouldBeAbleToNavigateForwardsInTheBrowserHistory(self):
self._loadPage("formPage")
self.driver.find_element(by=By.ID, value="imageButton").submit()
self.assertEqual(self.driver.title, "We Arrive Here")
self.driver.back()
self.assertEqual(self.driver.title, "We Leave From Here")
self.driver.forward()
self.assertEqual(self.driver.title, "We Arrive Here")
@pytest.mark.ignore_ie
def testShouldNotHangifDocumentOpenCallIsNeverFollowedByDocumentCloseCall(self):
self._loadPage("document_write_in_onload")
self.driver.find_element(By.XPATH, "//body")
def testShouldBeAbleToRefreshAPage(self):
self._loadPage("xhtmlTest")
self.driver.refresh()
self.assertEqual(self.driver.title, "XHTML Test Page")
def _pageURL(self, name):
return self.webserver.where_is(name + '.html')
def _loadSimplePage(self):
self._loadPage("simpleTest")
def _loadPage(self, name):
self.driver.get(self._pageURL(name))
| apache-2.0 |
vovojh/gem5 | ext/ply/test/yacc_uprec2.py | 174 | 1430 | # -----------------------------------------------------------------------------
# yacc_uprec2.py
#
# A grammar with a bad %prec specifier
# -----------------------------------------------------------------------------
import sys
if ".." not in sys.path: sys.path.insert(0,"..")
import ply.yacc as yacc
from calclex import tokens
# Parsing rules
# dictionary of names
names = { }
def p_statement_assign(t):
'statement : NAME EQUALS expression'
names[t[1]] = t[3]
def p_statement_expr(t):
'statement : expression'
print(t[1])
def p_expression_binop(t):
'''expression : expression PLUS expression
| expression MINUS expression
| expression TIMES expression
| expression DIVIDE expression'''
if t[2] == '+' : t[0] = t[1] + t[3]
elif t[2] == '-': t[0] = t[1] - t[3]
elif t[2] == '*': t[0] = t[1] * t[3]
elif t[2] == '/': t[0] = t[1] / t[3]
def p_expression_uminus(t):
'expression : MINUS expression %prec'
t[0] = -t[2]
def p_expression_group(t):
'expression : LPAREN expression RPAREN'
t[0] = t[2]
def p_expression_number(t):
'expression : NUMBER'
t[0] = t[1]
def p_expression_name(t):
'expression : NAME'
try:
t[0] = names[t[1]]
except LookupError:
print("Undefined name '%s'" % t[1])
t[0] = 0
def p_error(t):
print("Syntax error at '%s'" % t.value)
yacc.yacc()
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.