repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
jzoldak/edx-platform | common/lib/xmodule/xmodule/error_module.py | 27 | 7612 | """
Modules that get shown to the users when an error has occurred while
loading or rendering other modules
"""
import hashlib
import logging
import json
import sys
from lxml import etree
from xmodule.x_module import XModule, XModuleDescriptor
from xmodule.errortracker import exc_info_to_str
from xblock.fields import String, Scope, ScopeIds
from xblock.field_data import DictFieldData
from xmodule.modulestore import EdxJSONEncoder
log = logging.getLogger(__name__)
# NOTE: This is not the most beautiful design in the world, but there's no good
# way to tell if the module is being used in a staff context or not. Errors that get discovered
# at course load time are turned into ErrorDescriptor objects, and automatically hidden from students.
# Unfortunately, we can also have errors when loading modules mid-request, and then we need to decide
# what to show, and the logic for that belongs in the LMS (e.g. in get_module), so the error handler
# decides whether to create a staff or not-staff module.
class ErrorFields(object):
"""
XBlock fields used by the ErrorModules
"""
contents = String(scope=Scope.content)
error_msg = String(scope=Scope.content)
display_name = String(scope=Scope.settings)
class ErrorModule(ErrorFields, XModule):
"""
Module that gets shown to staff when there has been an error while
loading or rendering other modules
"""
def get_html(self):
'''Show an error to staff.
TODO (vshnayder): proper style, divs, etc.
'''
# staff get to see all the details
return self.system.render_template('module-error.html', {
'staff_access': True,
'data': self.contents,
'error': self.error_msg,
})
class NonStaffErrorModule(ErrorFields, XModule):
"""
Module that gets shown to students when there has been an error while
loading or rendering other modules
"""
def get_html(self):
'''Show an error to a student.
TODO (vshnayder): proper style, divs, etc.
'''
# staff get to see all the details
return self.system.render_template('module-error.html', {
'staff_access': False,
'data': "",
'error': "",
})
class ErrorDescriptor(ErrorFields, XModuleDescriptor):
"""
Module that provides a raw editing view of broken xml.
"""
module_class = ErrorModule
resources_dir = None
def get_html(self):
return u''
@classmethod
def _construct(cls, system, contents, error_msg, location, for_parent=None):
"""
Build a new ErrorDescriptor. using ``system``.
Arguments:
system (:class:`DescriptorSystem`): The :class:`DescriptorSystem` used
to construct the XBlock that had an error.
contents (unicode): An encoding of the content of the xblock that had an error.
error_msg (unicode): A message describing the error.
location (:class:`UsageKey`): The usage key of the XBlock that had an error.
for_parent (:class:`XBlock`): Optional. The parent of this error block.
"""
if error_msg is None:
# this string is not marked for translation because we don't have
# access to the user context, and this will only be seen by staff
error_msg = 'Error not available'
if location.category == 'error':
location = location.replace(
# Pick a unique url_name -- the sha1 hash of the contents.
# NOTE: We could try to pull out the url_name of the errored descriptor,
# but url_names aren't guaranteed to be unique between descriptor types,
# and ErrorDescriptor can wrap any type. When the wrapped module is fixed,
# it will be written out with the original url_name.
name=hashlib.sha1(contents.encode('utf8')).hexdigest()
)
# real metadata stays in the content, but add a display name
field_data = DictFieldData({
'error_msg': unicode(error_msg),
'contents': contents,
'location': location,
'category': 'error'
})
return system.construct_xblock_from_class(
cls,
# The error module doesn't use scoped data, and thus doesn't need
# real scope keys
ScopeIds(None, 'error', location, location),
field_data,
for_parent=for_parent,
)
def get_context(self):
return {
'module': self,
'data': self.contents,
}
@classmethod
def from_json(cls, json_data, system, location, error_msg='Error not available'):
try:
json_string = json.dumps(json_data, skipkeys=False, indent=4, cls=EdxJSONEncoder)
except: # pylint: disable=bare-except
json_string = repr(json_data)
return cls._construct(
system,
json_string,
error_msg,
location=location
)
@classmethod
def from_descriptor(cls, descriptor, error_msg=None):
return cls._construct(
descriptor.runtime,
str(descriptor),
error_msg,
location=descriptor.location,
for_parent=descriptor.get_parent() if descriptor.has_cached_parent else None
)
@classmethod
def from_xml(cls, xml_data, system, id_generator, # pylint: disable=arguments-differ
error_msg=None):
'''Create an instance of this descriptor from the supplied data.
Does not require that xml_data be parseable--just stores it and exports
as-is if not.
Takes an extra, optional, parameter--the error that caused an
issue. (should be a string, or convert usefully into one).
'''
try:
# If this is already an error tag, don't want to re-wrap it.
xml_obj = etree.fromstring(xml_data)
if xml_obj.tag == 'error':
xml_data = xml_obj.text
error_node = xml_obj.find('error_msg')
if error_node is not None:
error_msg = error_node.text
else:
error_msg = None
except etree.XMLSyntaxError:
# Save the error to display later--overrides other problems
error_msg = exc_info_to_str(sys.exc_info())
return cls._construct(system, xml_data, error_msg, location=id_generator.create_definition('error'))
def export_to_xml(self, resource_fs):
'''
If the definition data is invalid xml, export it wrapped in an "error"
tag. If it is valid, export without the wrapper.
NOTE: There may still be problems with the valid xml--it could be
missing required attributes, could have the wrong tags, refer to missing
files, etc. That would just get re-wrapped on import.
'''
try:
xml = etree.fromstring(self.contents)
return etree.tostring(xml, encoding='unicode')
except etree.XMLSyntaxError:
# still not valid.
root = etree.Element('error')
root.text = self.contents
err_node = etree.SubElement(root, 'error_msg')
err_node.text = self.error_msg
return etree.tostring(root, encoding='unicode')
class NonStaffErrorDescriptor(ErrorDescriptor):
"""
Module that provides non-staff error messages.
"""
module_class = NonStaffErrorModule
| agpl-3.0 |
halostatue/ansible | lib/ansible/plugins/connection/libvirt_lxc.py | 20 | 5220 | # Based on local.py (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
# Based on chroot.py (c) 2013, Maykel Moya <mmoya@speedyrails.com>
# (c) 2013, Michael Scherer <misc@zarb.org>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import distutils.spawn
import os
import subprocess
from ansible import errors
from ansible.callbacks import vvv
import ansible.constants as C
class Connection(object):
''' Local lxc based connections '''
def _search_executable(self, executable):
cmd = distutils.spawn.find_executable(executable)
if not cmd:
raise errors.AnsibleError("%s command not found in PATH") % executable
return cmd
def _check_domain(self, domain):
p = subprocess.Popen([self.cmd, '-q', '-c', 'lxc:///', 'dominfo', domain],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p.communicate()
if p.returncode:
raise errors.AnsibleError("%s is not a lxc defined in libvirt" % domain)
def __init__(self, runner, host, port, *args, **kwargs):
self.lxc = host
self.cmd = self._search_executable('virsh')
self._check_domain(host)
self.runner = runner
self.host = host
# port is unused, since this is local
self.port = port
self.become_methods_supported=C.BECOME_METHODS
def connect(self, port=None):
''' connect to the lxc; nothing to do here '''
vvv("THIS IS A LOCAL LXC DIR", host=self.lxc)
return self
def _generate_cmd(self, executable, cmd):
if executable:
local_cmd = [self.cmd, '-q', '-c', 'lxc:///', 'lxc-enter-namespace', self.lxc, '--', executable , '-c', cmd]
else:
local_cmd = '%s -q -c lxc:/// lxc-enter-namespace %s -- %s' % (self.cmd, self.lxc, cmd)
return local_cmd
def exec_command(self, cmd, become_user, sudoable=False, executable='/bin/sh', in_data=None):
''' run a command on the chroot '''
if sudoable and self.runner.become and self.runner.become_method not in self.become_methods_supported:
raise errors.AnsibleError("Internal Error: this module does not support running commands via %s" % self.runner.become_method)
if in_data:
raise errors.AnsibleError("Internal Error: this module does not support optimized module pipelining")
# We ignore privilege escalation!
local_cmd = self._generate_cmd(executable, cmd)
vvv("EXEC %s" % (local_cmd), host=self.lxc)
p = subprocess.Popen(local_cmd, shell=isinstance(local_cmd, basestring),
cwd=self.runner.basedir,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
return (p.returncode, stdout, stderr)
def _normalize_path(self, path, prefix):
if not path.startswith(os.path.sep):
path = os.path.join(os.path.sep, path)
normpath = os.path.normpath(path)
return os.path.join(prefix, normpath[1:])
def put_file(self, in_path, out_path):
''' transfer a file from local to lxc '''
out_path = self._normalize_path(out_path, '/')
vvv("PUT %s TO %s" % (in_path, out_path), host=self.lxc)
local_cmd = [self.cmd, '-q', '-c', 'lxc:///', 'lxc-enter-namespace', self.lxc, '--', '/bin/tee', out_path]
vvv("EXEC %s" % (local_cmd), host=self.lxc)
p = subprocess.Popen(local_cmd, cwd=self.runner.basedir,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate(open(in_path,'rb').read())
def fetch_file(self, in_path, out_path):
''' fetch a file from lxc to local '''
in_path = self._normalize_path(in_path, '/')
vvv("FETCH %s TO %s" % (in_path, out_path), host=self.lxc)
local_cmd = [self.cmd, '-q', '-c', 'lxc:///', 'lxc-enter-namespace', self.lxc, '--', '/bin/cat', in_path]
vvv("EXEC %s" % (local_cmd), host=self.lxc)
p = subprocess.Popen(local_cmd, cwd=self.runner.basedir,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
open(out_path,'wb').write(stdout)
def close(self):
''' terminate the connection; nothing to do here '''
pass
| gpl-3.0 |
erdc-cm/air-water-vv | 2d/benchmarks/flatPlate_wallFunctions/moveMesh_n.py | 11 | 1890 | from proteus.default_n import *
from proteus import (FemTools,
Quadrature,
TimeIntegration,
NumericalFlux,
NonlinearSolvers,
LinearSolvers)
import moveMesh_p as physics
from proteus import Context
ct = Context.get()
domain = ct.domain
nd = ct.domain.nd
mesh = domain.MeshOptions
# time stepping
runCFL = ct.runCFL
timeIntegration = TimeIntegration.NoIntegration
# mesh options
nLevels = ct.nLevels
parallelPartitioningType = mesh.parallelPartitioningType
nLayersOfOverlapForParallel = mesh.nLayersOfOverlapForParallel
restrictFineSolutionToAllMeshes = mesh.restrictFineSolutionToAllMeshes
triangleOptions = mesh.triangleOptions
elementQuadrature = ct.elementQuadrature
elementBoundaryQuadrature = ct.elementBoundaryQuadrature
femSpaces = {0: ct.basis,
1: ct.basis}
if nd == 3:
femSpaces[2] = ct.basis
massLumping = False
numericalFluxType = NumericalFlux.Stress_IIPG_exterior
conservativeFlux = None
subgridError = None
shockCapturing = None
fullNewtonFlag = True
multilevelNonlinearSolver = NonlinearSolvers.Newton
levelNonlinearSolver = NonlinearSolvers.Newton
nonlinearSmoother = None
linearSmoother = None
matrix = SparseMatrix
if ct.useOldPETSc:
multilevelLinearSolver = LinearSolvers.PETSc
levelLinearSolver = LinearSolvers.PETSc
else:
multilevelLinearSolver = LinearSolvers.KSP_petsc4py
levelLinearSolver = LinearSolvers.KSP_petsc4py
if ct.useSuperlu:
multilevelLinearSolver = LinearSolvers.LU
levelLinearSolver = LinearSolvers.LU
linear_solver_options_prefix = 'mesh_'
linearSmoother = None
linearSolverConvergenceTest = 'r-true'
tolFac = 0.0
linTolFac = 0.001
l_atol_res = 0.001*ct.mesh_nl_atol_res
nl_atol_res = ct.mesh_nl_atol_res
maxNonlinearIts = 4#should be linear
maxLineSearches = 0
| mit |
huang4fstudio/django | tests/field_deconstruction/tests.py | 61 | 17839 | from __future__ import unicode_literals
from django.db import models
from django.test import SimpleTestCase, override_settings
from django.utils import six
class FieldDeconstructionTests(SimpleTestCase):
"""
Tests the deconstruct() method on all core fields.
"""
def test_name(self):
"""
Tests the outputting of the correct name if assigned one.
"""
# First try using a "normal" field
field = models.CharField(max_length=65)
name, path, args, kwargs = field.deconstruct()
self.assertIsNone(name)
field.set_attributes_from_name("is_awesome_test")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(name, "is_awesome_test")
self.assertIsInstance(name, six.text_type)
# Now try with a ForeignKey
field = models.ForeignKey("some_fake.ModelName")
name, path, args, kwargs = field.deconstruct()
self.assertIsNone(name)
field.set_attributes_from_name("author")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(name, "author")
def test_auto_field(self):
field = models.AutoField(primary_key=True)
field.set_attributes_from_name("id")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.AutoField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"primary_key": True})
def test_big_integer_field(self):
field = models.BigIntegerField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.BigIntegerField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
def test_boolean_field(self):
field = models.BooleanField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.BooleanField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
field = models.BooleanField(default=True)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.BooleanField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"default": True})
def test_char_field(self):
field = models.CharField(max_length=65)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.CharField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"max_length": 65})
field = models.CharField(max_length=65, null=True, blank=True)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.CharField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"max_length": 65, "null": True, "blank": True})
def test_char_field_choices(self):
field = models.CharField(max_length=1, choices=(("A", "One"), ("B", "Two")))
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.CharField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"choices": [("A", "One"), ("B", "Two")], "max_length": 1})
def test_csi_field(self):
field = models.CommaSeparatedIntegerField(max_length=100)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.CommaSeparatedIntegerField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"max_length": 100})
def test_date_field(self):
field = models.DateField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.DateField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
field = models.DateField(auto_now=True)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.DateField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"auto_now": True})
def test_datetime_field(self):
field = models.DateTimeField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.DateTimeField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
field = models.DateTimeField(auto_now_add=True)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.DateTimeField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"auto_now_add": True})
# Bug #21785
field = models.DateTimeField(auto_now=True, auto_now_add=True)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.DateTimeField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"auto_now_add": True, "auto_now": True})
def test_decimal_field(self):
field = models.DecimalField(max_digits=5, decimal_places=2)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.DecimalField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"max_digits": 5, "decimal_places": 2})
def test_decimal_field_0_decimal_places(self):
"""
A DecimalField with decimal_places=0 should work (#22272).
"""
field = models.DecimalField(max_digits=5, decimal_places=0)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.DecimalField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"max_digits": 5, "decimal_places": 0})
def test_email_field(self):
field = models.EmailField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.EmailField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"max_length": 254})
field = models.EmailField(max_length=255)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.EmailField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"max_length": 255})
def test_file_field(self):
field = models.FileField(upload_to="foo/bar")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.FileField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"upload_to": "foo/bar"})
# Test max_length
field = models.FileField(upload_to="foo/bar", max_length=200)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.FileField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"upload_to": "foo/bar", "max_length": 200})
def test_file_path_field(self):
field = models.FilePathField(match=".*\.txt$")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.FilePathField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"match": ".*\.txt$"})
field = models.FilePathField(recursive=True, allow_folders=True, max_length=123)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.FilePathField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"recursive": True, "allow_folders": True, "max_length": 123})
def test_float_field(self):
field = models.FloatField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.FloatField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
def test_foreign_key(self):
# Test basic pointing
from django.contrib.auth.models import Permission
field = models.ForeignKey("auth.Permission")
field.remote_field.model = Permission
field.remote_field.field_name = "id"
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.ForeignKey")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"to": "auth.Permission"})
self.assertFalse(hasattr(kwargs['to'], "setting_name"))
# Test swap detection for swappable model
field = models.ForeignKey("auth.User")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.ForeignKey")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"to": "auth.User"})
self.assertEqual(kwargs['to'].setting_name, "AUTH_USER_MODEL")
# Test nonexistent (for now) model
field = models.ForeignKey("something.Else")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.ForeignKey")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"to": "something.Else"})
# Test on_delete
field = models.ForeignKey("auth.User", on_delete=models.SET_NULL)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.ForeignKey")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"to": "auth.User", "on_delete": models.SET_NULL})
# Test to_field preservation
field = models.ForeignKey("auth.Permission", to_field="foobar")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.ForeignKey")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"to": "auth.Permission", "to_field": "foobar"})
# Test related_name preservation
field = models.ForeignKey("auth.Permission", related_name="foobar")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.ForeignKey")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"to": "auth.Permission", "related_name": "foobar"})
@override_settings(AUTH_USER_MODEL="auth.Permission")
def test_foreign_key_swapped(self):
# It doesn't matter that we swapped out user for permission;
# there's no validation. We just want to check the setting stuff works.
field = models.ForeignKey("auth.Permission")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.ForeignKey")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"to": "auth.Permission"})
self.assertEqual(kwargs['to'].setting_name, "AUTH_USER_MODEL")
def test_image_field(self):
field = models.ImageField(upload_to="foo/barness", width_field="width", height_field="height")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.ImageField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"upload_to": "foo/barness", "width_field": "width", "height_field": "height"})
def test_integer_field(self):
field = models.IntegerField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.IntegerField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
def test_ip_address_field(self):
field = models.IPAddressField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.IPAddressField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
def test_generic_ip_address_field(self):
field = models.GenericIPAddressField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.GenericIPAddressField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
field = models.GenericIPAddressField(protocol="IPv6")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.GenericIPAddressField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"protocol": "IPv6"})
def test_many_to_many_field(self):
# Test normal
field = models.ManyToManyField("auth.Permission")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.ManyToManyField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"to": "auth.Permission"})
self.assertFalse(hasattr(kwargs['to'], "setting_name"))
# Test swappable
field = models.ManyToManyField("auth.User")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.ManyToManyField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"to": "auth.User"})
self.assertEqual(kwargs['to'].setting_name, "AUTH_USER_MODEL")
# Test through
field = models.ManyToManyField("auth.Permission", through="auth.Group")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.ManyToManyField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"to": "auth.Permission", "through": "auth.Group"})
# Test custom db_table
field = models.ManyToManyField("auth.Permission", db_table="custom_table")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.ManyToManyField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"to": "auth.Permission", "db_table": "custom_table"})
# Test related_name
field = models.ManyToManyField("auth.Permission", related_name="custom_table")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.ManyToManyField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"to": "auth.Permission", "related_name": "custom_table"})
@override_settings(AUTH_USER_MODEL="auth.Permission")
def test_many_to_many_field_swapped(self):
# It doesn't matter that we swapped out user for permission;
# there's no validation. We just want to check the setting stuff works.
field = models.ManyToManyField("auth.Permission")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.ManyToManyField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"to": "auth.Permission"})
self.assertEqual(kwargs['to'].setting_name, "AUTH_USER_MODEL")
def test_null_boolean_field(self):
field = models.NullBooleanField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.NullBooleanField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
def test_positive_integer_field(self):
field = models.PositiveIntegerField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.PositiveIntegerField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
def test_positive_small_integer_field(self):
field = models.PositiveSmallIntegerField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.PositiveSmallIntegerField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
def test_slug_field(self):
field = models.SlugField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.SlugField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
field = models.SlugField(db_index=False, max_length=231)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.SlugField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"db_index": False, "max_length": 231})
def test_small_integer_field(self):
field = models.SmallIntegerField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.SmallIntegerField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
def test_text_field(self):
field = models.TextField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.TextField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
def test_time_field(self):
field = models.TimeField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.TimeField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
field = models.TimeField(auto_now=True)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(args, [])
self.assertEqual(kwargs, {'auto_now': True})
field = models.TimeField(auto_now_add=True)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(args, [])
self.assertEqual(kwargs, {'auto_now_add': True})
def test_url_field(self):
field = models.URLField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.URLField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
field = models.URLField(max_length=231)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.URLField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"max_length": 231})
def test_binary_field(self):
field = models.BinaryField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.BinaryField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
| bsd-3-clause |
rboyes/KerasScripts | CSVTrainer.py | 1 | 5321 | import os
import datetime
import sys
import time
import string
import random
import pandas as pd
import numpy as np
import gc
if(len(sys.argv) < 2):
print('Usage: CSVTrainer.py train.csv validation.csv model.h5 log.txt')
sys.exit(1)
trainingName = sys.argv[1]
validationName = sys.argv[2]
modelName = sys.argv[3]
logName = sys.argv[4]
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers import *
import keras.preprocessing.image as image
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import ModelCheckpoint, CSVLogger
from keras.layers import Input, merge, Dropout, Dense, Flatten, Activation
from keras.layers.convolutional import MaxPooling2D, Convolution2D, AveragePooling2D
from keras.layers.normalization import BatchNormalization
from keras.optimizers import Adam, SGD
from keras.models import Model, load_model
from keras import regularizers
from keras import backend as K
from keras.utils.data_utils import get_file
from sklearn.metrics import accuracy_score
from keras.applications import resnet50
def readCSV(fileList):
namesDataFrame = pd.read_csv(fileList)
flatten = lambda l: [item for sublist in l for item in sublist]
labels = sorted(list(set(flatten([l.split(' ') for l in namesDataFrame['tags'].values]))))
labelMap = {l: i for i, l in enumerate(labels)}
numberOfLabels = len(labels)
numberOfImages = len(namesDataFrame)
fileNames = []
y = np.zeros((numberOfImages, numberOfLabels), np.float32)
for index in range(0, numberOfImages):
inputImage = image.img_to_array(image.load_img(namesDataFrame.iloc[index][0]))
fileNames.append(namesDataFrame.iloc[index][0])
tags = namesDataFrame.iloc[index][1]
for t in tags.split(' '):
y[index, labelMap[t]] = 1.0
return (fileNames, y, labelMap)
print('Loading images..........', end = '',flush = True)
(trainingFileNames, trainY, trainingLabelMap) = readCSV(trainingName)
(validationFileNames, validationY, validationLabelMap) = readCSV(validationName)
print('done.', flush = True)
if len(trainingLabelMap) != len(validationLabelMap):
print("Label maps for training and validation are not equal")
sys.exit(1)
numberOfTrainingImages = len(trainingFileNames)
numberOfValidationImages = len(validationFileNames)
numberOfChannels = 3
nx = 256
ny = 256
batchSize = 25
lossName = 'binary_crossentropy'
activationName = 'sigmoid'
resnetModel = resnet50.ResNet50(include_top=False, weights='imagenet', input_shape=(numberOfChannels, nx, ny))
print('The number of layers in the resnet model = %d' % (len(resnetModel.layers)))
bottleneckTrainingDataGenerator = ImageDataGenerator(rescale = 1.0/255.0)
bottleneckValidationDataGenerator = ImageDataGenerator(rescale = 1.0/255.0)
bottleneckTrainingGenerator = bottleneckTrainingDataGenerator.flow_from_filenames(trainingFileNames, target_size = (nx, ny), batch_size = batchSize, shuffle = False)
bottleneckValidationGenerator = bottleneckTrainingDataGenerator.flow_from_filenames(validationFileNames, target_size = (nx, ny), batch_size = batchSize, shuffle = False)
bottleneckTrainingFeatures = resnetModel.predict_generator(bottleneckTrainingGenerator, numberOfTrainingImages)
bottleneckValidationFeatures = resnetModel.predict_generator(bottleneckValidationGenerator, numberOfValidationImages)
newTop = Sequential()
newTop.add(Flatten(input_shape = bottleneckTrainingFeatures.shape[1:]))
newTop.add(Dense(512, activation='relu'))
newTop.add(Dropout(0.5))
newTop.add(Dense(len(trainingLabelMap), activation=activationName, name='predictions'))
newTop.compile(loss=lossName, optimizer=Adam(lr=1.0E-3))
print('Fitting predicted features...', flush = True)
newTop.fit(bottleneckTrainingFeatures, trainY, validation_data = (bottleneckValidationFeatures, validationY), verbose = 1, batch_size = batchSize, nb_epoch = 25)
print('Done.', flush = True)
finalModel = Model(input = resnetModel.input, output = newTop(resnetModel.output))
print('The number of layers in the final model = %d' % (len(finalModel.layers)))
for layer in finalModel.layers[:(len(resnetModel.layers) - 21)]:
layer.trainable = False
finalModel.compile(loss=lossName,optimizer=SGD(lr=1e-4, momentum=0.9))
print(finalModel.summary())
# Could add vertical_flip = True
trainingDataGenerator = ImageDataGenerator(rescale = 1.0/255.0, rotation_range = 40, zoom_range = 0.15, horizontal_flip = True,
width_shift_range = 0.1, height_shift_range = 0.1, shear_range = 0.1)
validationDataGenerator = ImageDataGenerator(rescale = 1.0/255.0)
trainingGenerator = trainingDataGenerator.flow_from_filenames(trainingFileNames, trainY, batch_size = batchSize, target_size = (nx, ny))
validationGenerator = validationDataGenerator.flow_from_filenames(validationFileNames, validationY, batch_size = batchSize, target_size = (nx, ny))
csvLogger = CSVLogger(logName, append=True)
checkPointer = ModelCheckpoint(filepath=modelName, verbose = 1, save_best_only = True)
finalModel.fit_generator(trainingGenerator, numberOfTrainingImages, 50, validation_data = validationGenerator,
nb_val_samples = numberOfValidationImages, callbacks = [checkPointer, csvLogger])
| apache-2.0 |
djabber/Dashboard | bottle/dash/local/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/universaldetector.py | 744 | 6831 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from . import constants
import sys
import codecs
from .latin1prober import Latin1Prober # windows-1252
from .mbcsgroupprober import MBCSGroupProber # multi-byte character sets
from .sbcsgroupprober import SBCSGroupProber # single-byte character sets
from .escprober import EscCharSetProber # ISO-2122, etc.
import re
MINIMUM_THRESHOLD = 0.20
ePureAscii = 0
eEscAscii = 1
eHighbyte = 2
class UniversalDetector:
def __init__(self):
self._highBitDetector = re.compile(b'[\x80-\xFF]')
self._escDetector = re.compile(b'(\033|~{)')
self._mEscCharSetProber = None
self._mCharSetProbers = []
self.reset()
def reset(self):
self.result = {'encoding': None, 'confidence': 0.0}
self.done = False
self._mStart = True
self._mGotData = False
self._mInputState = ePureAscii
self._mLastChar = b''
if self._mEscCharSetProber:
self._mEscCharSetProber.reset()
for prober in self._mCharSetProbers:
prober.reset()
def feed(self, aBuf):
if self.done:
return
aLen = len(aBuf)
if not aLen:
return
if not self._mGotData:
# If the data starts with BOM, we know it is UTF
if aBuf[:3] == codecs.BOM:
# EF BB BF UTF-8 with BOM
self.result = {'encoding': "UTF-8", 'confidence': 1.0}
elif aBuf[:4] == codecs.BOM_UTF32_LE:
# FF FE 00 00 UTF-32, little-endian BOM
self.result = {'encoding': "UTF-32LE", 'confidence': 1.0}
elif aBuf[:4] == codecs.BOM_UTF32_BE:
# 00 00 FE FF UTF-32, big-endian BOM
self.result = {'encoding': "UTF-32BE", 'confidence': 1.0}
elif aBuf[:4] == b'\xFE\xFF\x00\x00':
# FE FF 00 00 UCS-4, unusual octet order BOM (3412)
self.result = {
'encoding': "X-ISO-10646-UCS-4-3412",
'confidence': 1.0
}
elif aBuf[:4] == b'\x00\x00\xFF\xFE':
# 00 00 FF FE UCS-4, unusual octet order BOM (2143)
self.result = {
'encoding': "X-ISO-10646-UCS-4-2143",
'confidence': 1.0
}
elif aBuf[:2] == codecs.BOM_LE:
# FF FE UTF-16, little endian BOM
self.result = {'encoding': "UTF-16LE", 'confidence': 1.0}
elif aBuf[:2] == codecs.BOM_BE:
# FE FF UTF-16, big endian BOM
self.result = {'encoding': "UTF-16BE", 'confidence': 1.0}
self._mGotData = True
if self.result['encoding'] and (self.result['confidence'] > 0.0):
self.done = True
return
if self._mInputState == ePureAscii:
if self._highBitDetector.search(aBuf):
self._mInputState = eHighbyte
elif ((self._mInputState == ePureAscii) and
self._escDetector.search(self._mLastChar + aBuf)):
self._mInputState = eEscAscii
self._mLastChar = aBuf[-1:]
if self._mInputState == eEscAscii:
if not self._mEscCharSetProber:
self._mEscCharSetProber = EscCharSetProber()
if self._mEscCharSetProber.feed(aBuf) == constants.eFoundIt:
self.result = {'encoding': self._mEscCharSetProber.get_charset_name(),
'confidence': self._mEscCharSetProber.get_confidence()}
self.done = True
elif self._mInputState == eHighbyte:
if not self._mCharSetProbers:
self._mCharSetProbers = [MBCSGroupProber(), SBCSGroupProber(),
Latin1Prober()]
for prober in self._mCharSetProbers:
if prober.feed(aBuf) == constants.eFoundIt:
self.result = {'encoding': prober.get_charset_name(),
'confidence': prober.get_confidence()}
self.done = True
break
def close(self):
if self.done:
return
if not self._mGotData:
if constants._debug:
sys.stderr.write('no data received!\n')
return
self.done = True
if self._mInputState == ePureAscii:
self.result = {'encoding': 'ascii', 'confidence': 1.0}
return self.result
if self._mInputState == eHighbyte:
proberConfidence = None
maxProberConfidence = 0.0
maxProber = None
for prober in self._mCharSetProbers:
if not prober:
continue
proberConfidence = prober.get_confidence()
if proberConfidence > maxProberConfidence:
maxProberConfidence = proberConfidence
maxProber = prober
if maxProber and (maxProberConfidence > MINIMUM_THRESHOLD):
self.result = {'encoding': maxProber.get_charset_name(),
'confidence': maxProber.get_confidence()}
return self.result
if constants._debug:
sys.stderr.write('no probers hit minimum threshhold\n')
for prober in self._mCharSetProbers[0].mProbers:
if not prober:
continue
sys.stderr.write('%s confidence = %s\n' %
(prober.get_charset_name(),
prober.get_confidence()))
| mit |
joshuahoman/vivisect | vivisect/analysis/crypto/constants.py | 5 | 2752 | import envi
from vivisect.const import *
"""Locate the basic use of known crypto constants"""
dh_group1 = "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A63A3620FFFFFFFFFFFFFFFF".decode("hex")
dh_group2 = "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF".decode("hex")
md5_inits = [0x67452301,0xefcdab89,0x98badcfe,0x10325476]
md5_xform = [
3614090360, 3905402710, 606105819, 3250441966, 4118548399, 1200080426,
2821735955, 4249261313, 1770035416, 2336552879, 4294925233, 2304563134,
1804603682, 4254626195, 2792965006, 1236535329, 4129170786, 3225465664,
643717713, 3921069994, 3593408605, 38016083, 3634488961, 3889429448,
568446438, 3275163606, 4107603335, 1163531501, 2850285829, 4243563512,
1735328473, 2368359562, 4294588738, 2272392833, 1839030562, 4259657740,
2763975236, 1272893353, 4139469664, 3200236656, 681279174, 3936430074,
3572445317, 76029189, 3654602809, 3873151461, 530742520, 3299628645,
4096336452, 1126891415, 2878612391, 4237533241, 1700485571, 2399980690,
4293915773, 2240044497, 1873313359, 4264355552, 2734768916, 1309151649,
4149444226, 3174756917, 718787259, 3951481745,
]
vlname = "Crypto Constants"
def analyze(vw):
rows = []
for fva in vw.getFunctions():
md5_init_score = 0
md5_xform_score = 0
for va, size, funcva in vw.getFunctionBlocks(fva):
maxva = va+size
while va < maxva:
op = vw.parseOpcode(va)
for o in op.opers:
if not o.isImmed():
continue
imm = o.getOperValue(op)
if imm in md5_inits:
md5_init_score += 1
if imm in md5_xform:
md5_xform_score += 1
va += len(op)
if md5_init_score == len(md5_inits):
rows.append((fva, "MD5 Init"))
if md5_xform_score == len(md5_xform):
rows.append((fva, "MD5 Transform"))
for va in vw.searchMemory(dh_group1):
rows.append((va, "DH Well-Known MODP Group 1"))
for va in vw.searchMemory(dh_group2):
rows.append((va, "DH Well-Known MODP Group 2"))
if len(rows):
vw.vprint("Adding VA Set: %s" % vlname)
vw.addVaSet(vlname, (("va",VASET_ADDRESS),("Match Type", VASET_STRING)), rows)
else:
vw.vprint("No known constants found.")
| apache-2.0 |
GoogleCloudPlatform/python-docs-samples | monitoring/api/v3/api-client/custom_metric_test.py | 1 | 3859 | #!/usr/bin/env python
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Integration test for custom_metric.py
GOOGLE_APPLICATION_CREDENTIALS must be set to a Service Account for a project
that has enabled the Monitoring API.
Currently the TEST_PROJECT_ID is hard-coded to run using the project created
for this test, but it could be changed to a different project.
"""
import os
import random
import time
import uuid
import backoff
import googleapiclient.discovery
from googleapiclient.errors import HttpError
import pytest
from custom_metric import create_custom_metric
from custom_metric import delete_metric_descriptor
from custom_metric import get_custom_metric
from custom_metric import read_timeseries
from custom_metric import write_timeseries_value
PROJECT = os.environ['GOOGLE_CLOUD_PROJECT']
PROJECT_RESOURCE = "projects/{}".format(PROJECT)
""" Custom metric domain for all custom metrics"""
CUSTOM_METRIC_DOMAIN = "custom.googleapis.com"
METRIC = 'compute.googleapis.com/instance/cpu/usage_time'
METRIC_NAME = uuid.uuid4().hex
METRIC_RESOURCE = "{}/{}".format(
CUSTOM_METRIC_DOMAIN, METRIC_NAME)
METRIC_KIND = "GAUGE"
@pytest.fixture(scope='module')
def client():
return googleapiclient.discovery.build('monitoring', 'v3')
@pytest.fixture(scope='module')
def custom_metric(client):
custom_metric_descriptor = create_custom_metric(
client, PROJECT_RESOURCE, METRIC_RESOURCE, METRIC_KIND)
# Wait up to 50 seconds until metric has been created. Use the get call
# to wait until a response comes back with the new metric with 10 retries.
custom_metric = None
retry_count = 0
while not custom_metric and retry_count < 10:
time.sleep(5)
retry_count += 1
custom_metric = get_custom_metric(
client, PROJECT_RESOURCE, METRIC_RESOURCE)
# make sure we get the custom_metric
assert custom_metric
yield custom_metric
# cleanup
delete_metric_descriptor(client, custom_metric_descriptor['name'])
def test_custom_metric(client, custom_metric):
# Use a constant seed so psuedo random number is known ahead of time
random.seed(1)
pseudo_random_value = random.randint(0, 10)
INSTANCE_ID = "test_instance"
# It's rare, but write can fail with HttpError 500, so we retry.
@backoff.on_exception(backoff.expo, HttpError, max_time=120)
def write_value():
# Reseed it to make sure the sample code will pick the same
# value.
random.seed(1)
write_timeseries_value(client, PROJECT_RESOURCE,
METRIC_RESOURCE, INSTANCE_ID,
METRIC_KIND)
write_value()
# Sometimes on new metric descriptors, writes have a delay in being
# read back. Use backoff to account for this.
@backoff.on_exception(
backoff.expo, (AssertionError, HttpError), max_time=120)
def eventually_consistent_test():
response = read_timeseries(
client, PROJECT_RESOURCE, METRIC_RESOURCE)
# Make sure the value is not empty.
assert 'timeSeries' in response
value = int(
response['timeSeries'][0]['points'][0]['value']['int64Value'])
# using seed of 1 will create a value of 1
assert pseudo_random_value == value
eventually_consistent_test()
| apache-2.0 |
gnychis/grforwarder | grc/base/Element.py | 34 | 3040 | """
Copyright 2008, 2009 Free Software Foundation, Inc.
This file is part of GNU Radio
GNU Radio Companion is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
GNU Radio Companion is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
"""
class Element(object):
def __init__(self, parent=None):
self._parent = parent
##################################################
# Element Validation API
##################################################
def validate(self):
"""
Validate this element and call validate on all children.
Call this base method before adding error messages in the subclass.
"""
self._error_messages = list()
for child in self.get_children(): child.validate()
def is_valid(self):
"""
Is this element valid?
@return true when the element is enabled and has no error messages
"""
return not self.get_error_messages() or not self.get_enabled()
def add_error_message(self, msg):
"""
Add an error message to the list of errors.
@param msg the error message string
"""
self._error_messages.append(msg)
def get_error_messages(self):
"""
Get the list of error messages from this element and all of its children.
Do not include the error messages from disabled children.
Cleverly indent the children error messages for printing purposes.
@return a list of error message strings
"""
error_messages = list(self._error_messages) #make a copy
for child in filter(lambda c: c.get_enabled(), self.get_children()):
for msg in child.get_error_messages():
error_messages.append("%s:\n\t%s"%(child, msg.replace("\n", "\n\t")))
return error_messages
def rewrite(self):
"""
Rewrite this element and call rewrite on all children.
Call this base method before rewriting the element.
"""
for child in self.get_children(): child.rewrite()
def get_enabled(self): return True
##############################################
## Tree-like API
##############################################
def get_parent(self): return self._parent
def get_children(self): return list()
##############################################
## Type testing methods
##############################################
def is_element(self): return True
def is_platform(self): return False
def is_flow_graph(self): return False
def is_connection(self): return False
def is_block(self): return False
def is_source(self): return False
def is_sink(self): return False
def is_port(self): return False
def is_param(self): return False
| gpl-3.0 |
sighingnow/Spider-Utils | get-bing-pic/get_bing_pic.py | 1 | 1193 | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'He Tao'
'''
This script is used to fetch the picture of bing's mainpage.
Author: He Tao, hetao@mail.com
Date: December 13, 2014
'''
import http.client
import re
from datetime import date
def get_bing_pic():
conn = http.client.HTTPConnection('cn.bing.com')
conn.request(method = 'GET', url = '/')
mainpage = str(conn.getresponse().read())
pattern = re.compile(r's.cn.bing.net/az/hprichbg/rb/\S*.jpg')
image_url = re.search(pattern, mainpage).group(0)
image_path = image_url[29:image_url.__len__()]
conn = http.client.HTTPConnection(image_url[0:13])
print('start fetching %s ...' %(image_url))
conn.request(method = 'GET', url = '/az/hprichbg/rb/%s' %(image_path))
img = open('bing\\%s-%s' %(date.today().__str__(), image_path), 'w')
img.close()
with open('bing\\%s-%s' %(date.today().__str__(), image_path), 'wb') as img:
img.write(conn.getresponse().read())
print('saving picture to %s ...' % ('bing\\%s-%s' %(date.today().__str__(), image_path)))
print('fetch successfully !')
if __name__ == '__main__':
get_bing_pic()
| mit |
akshatharaj/django | django/contrib/flatpages/models.py | 318 | 1556 | from __future__ import unicode_literals
from django.contrib.sites.models import Site
from django.core.urlresolvers import get_script_prefix
from django.db import models
from django.utils.encoding import iri_to_uri, python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
@python_2_unicode_compatible
class FlatPage(models.Model):
url = models.CharField(_('URL'), max_length=100, db_index=True)
title = models.CharField(_('title'), max_length=200)
content = models.TextField(_('content'), blank=True)
enable_comments = models.BooleanField(_('enable comments'), default=False)
template_name = models.CharField(_('template name'), max_length=70, blank=True,
help_text=_(
"Example: 'flatpages/contact_page.html'. If this isn't provided, "
"the system will use 'flatpages/default.html'."
),
)
registration_required = models.BooleanField(_('registration required'),
help_text=_("If this is checked, only logged-in users will be able to view the page."),
default=False)
sites = models.ManyToManyField(Site, verbose_name=_('sites'))
class Meta:
db_table = 'django_flatpage'
verbose_name = _('flat page')
verbose_name_plural = _('flat pages')
ordering = ('url',)
def __str__(self):
return "%s -- %s" % (self.url, self.title)
def get_absolute_url(self):
# Handle script prefix manually because we bypass reverse()
return iri_to_uri(get_script_prefix().rstrip('/') + self.url)
| bsd-3-clause |
richardcs/ansible | lib/ansible/modules/storage/emc/emc_vnx_sg_member.py | 27 | 4976 | #!/usr/bin/python
#
# Copyright (c) 2018, Luca 'remix_tj' Lorenzetto <lorenzetto.luca@gmail.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: emc_vnx_sg_member
short_description: Manage storage group member on EMC VNX
version_added: "2.7"
description:
- "This module manages the members of an existing storage group."
extends_documentation_fragment:
- emc.emc_vnx
options:
name:
description:
- Name of the Storage group to manage.
required: true
lunid:
description:
- Lun id to be added.
required: true
state:
description:
- Indicates the desired lunid state.
- C(present) ensures specified lunid is present in the Storage Group.
- C(absent) ensures specified lunid is absent from Storage Group.
default: present
choices: [ "present", "absent"]
author:
- Luca 'remix_tj' Lorenzetto (@remixtj)
'''
EXAMPLES = '''
- name: Add lun to storage group
emc_vnx_sg_member:
name: sg01
sp_address: sp1a.fqdn
sp_user: sysadmin
sp_password: sysadmin
lunid: 100
state: present
- name: Remove lun from storage group
emc_vnx_sg_member:
name: sg01
sp_address: sp1a.fqdn
sp_user: sysadmin
sp_password: sysadmin
lunid: 100
state: absent
'''
RETURN = '''
hluid:
description: LUNID that hosts attached to the storage group will see.
type: int
returned: success
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
from ansible.module_utils.storage.emc.emc_vnx import emc_vnx_argument_spec
try:
from storops import VNXSystem
from storops.exception import VNXCredentialError, VNXStorageGroupError, \
VNXAluAlreadyAttachedError, VNXAttachAluError, VNXDetachAluNotFoundError
HAS_LIB = True
except:
HAS_LIB = False
def run_module():
module_args = dict(
name=dict(type='str', required=True),
lunid=dict(type='int', required=True),
state=dict(default='present', choices=['present', 'absent']),
)
module_args.update(emc_vnx_argument_spec)
result = dict(
changed=False,
hluid=None
)
module = AnsibleModule(
argument_spec=module_args,
supports_check_mode=True
)
if not HAS_LIB:
module.fail_json(msg='storops library (0.5.10 or greater) is missing.'
'Install with pip install storops'
)
sp_user = module.params['sp_user']
sp_address = module.params['sp_address']
sp_password = module.params['sp_password']
alu = module.params['lunid']
# if the user is working with this module in only check mode we do not
# want to make any changes to the environment, just return the current
# state with no modifications
if module.check_mode:
return result
try:
vnx = VNXSystem(sp_address, sp_user, sp_password)
sg = vnx.get_sg(module.params['name'])
if sg.existed:
if module.params['state'] == 'present':
if not sg.has_alu(alu):
try:
result['hluid'] = sg.attach_alu(alu)
result['changed'] = True
except VNXAluAlreadyAttachedError:
result['hluid'] = sg.get_hlu(alu)
except (VNXAttachAluError, VNXStorageGroupError) as e:
module.fail_json(msg='Error attaching {0}: '
'{1} '.format(alu, to_native(e)),
**result)
else:
result['hluid'] = sg.get_hlu(alu)
if module.params['state'] == 'absent' and sg.has_alu(alu):
try:
sg.detach_alu(alu)
result['changed'] = True
except VNXDetachAluNotFoundError:
# being not attached when using absent is OK
pass
except VNXStorageGroupError as e:
module.fail_json(msg='Error detaching alu {0}: '
'{1} '.format(alu, to_native(e)),
**result)
else:
module.fail_json(msg='No such storage group named '
'{0}'.format(module.params['name']),
**result)
except VNXCredentialError as e:
module.fail_json(msg='{0}'.format(to_native(e)), **result)
module.exit_json(**result)
def main():
run_module()
if __name__ == '__main__':
main()
| gpl-3.0 |
cngo-github/nupic | examples/opf/experiments/multistep/hotgym_best_tp_16K/description.py | 32 | 2789 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
## This file defines parameters for a prediction experiment.
import os
from nupic.frameworks.opf.expdescriptionhelpers import importBaseDescription
# the sub-experiment configuration
config = \
{ 'modelParams': { 'clParams': { 'clVerbosity': 0},
'sensorParams': { 'encoders': { 'consumption': { 'clipInput': True,
'fieldname': u'consumption',
'n': 28,
'name': u'consumption',
'type': 'AdaptiveScalarEncoder',
'w': 21},
'timestamp_dayOfWeek': None,
'timestamp_timeOfDay': { 'fieldname': u'timestamp',
'name': u'timestamp_timeOfDay',
'timeOfDay': ( 21,
8),
'type': 'DateEncoder'},
'timestamp_weekend': None},
'verbosity': 0},
'spParams': { },
'tpParams': { 'activationThreshold': 14,
'minThreshold': 12,
'verbosity': 0}},
'numRecords': 16000}
mod = importBaseDescription('../hotgym/description.py', config)
locals().update(mod.__dict__)
| agpl-3.0 |
Drooids/exercises-in-programming-style | 25-persistent-tables/tf-25.py | 17 | 2407 | #!/usr/bin/env python
import sys, re, string, sqlite3, os.path
#
# The relational database of this problem consists of 3 tables:
# documents, words, characters
#
def create_db_schema(connection):
c = connection.cursor()
c.execute('''CREATE TABLE documents (id INTEGER PRIMARY KEY AUTOINCREMENT, name)''')
c.execute('''CREATE TABLE words (id, doc_id, value)''')
c.execute('''CREATE TABLE characters (id, word_id, value)''')
connection.commit()
c.close()
def load_file_into_database(path_to_file, connection):
""" Takes the path to a file and loads the contents into the database """
def _extract_words(path_to_file):
with open(path_to_file) as f:
str_data = f.read()
pattern = re.compile('[\W_]+')
word_list = pattern.sub(' ', str_data).lower().split()
with open('../stop_words.txt') as f:
stop_words = f.read().split(',')
stop_words.extend(list(string.ascii_lowercase))
return [w for w in word_list if not w in stop_words]
words = _extract_words(path_to_file)
# Now let's add data to the database
# Add the document itself to the database
c = connection.cursor()
c.execute("INSERT INTO documents (name) VALUES (?)", (path_to_file,))
c.execute("SELECT id from documents WHERE name=?", (path_to_file,))
doc_id = c.fetchone()[0]
# Add the words to the database
c.execute("SELECT MAX(id) FROM words")
row = c.fetchone()
word_id = row[0]
if word_id == None:
word_id = 0
for w in words:
c.execute("INSERT INTO words VALUES (?, ?, ?)", (word_id, doc_id, w))
# Add the characters to the database
char_id = 0
for char in w:
c.execute("INSERT INTO characters VALUES (?, ?, ?)", (char_id, word_id, char))
char_id += 1
word_id += 1
connection.commit()
c.close()
#
# Create if it doesn't exist
#
if not os.path.isfile('tf.db'):
with sqlite3.connect('tf.db') as connection:
create_db_schema(connection)
load_file_into_database(sys.argv[1], connection)
# Now, let's query
with sqlite3.connect('tf.db') as connection:
c = connection.cursor()
c.execute("SELECT value, COUNT(*) as C FROM words GROUP BY value ORDER BY C DESC")
for i in range(25):
row = c.fetchone()
if row != None:
print row[0] + ' - ' + str(row[1])
| mit |
pschmitt/home-assistant | homeassistant/components/concord232/binary_sensor.py | 6 | 4407 | """Support for exposing Concord232 elements as sensors."""
import datetime
import logging
from concord232 import client as concord232_client
import requests
import voluptuous as vol
from homeassistant.components.binary_sensor import (
DEVICE_CLASSES,
PLATFORM_SCHEMA,
BinarySensorEntity,
)
from homeassistant.const import CONF_HOST, CONF_PORT
import homeassistant.helpers.config_validation as cv
import homeassistant.util.dt as dt_util
_LOGGER = logging.getLogger(__name__)
CONF_EXCLUDE_ZONES = "exclude_zones"
CONF_ZONE_TYPES = "zone_types"
DEFAULT_HOST = "localhost"
DEFAULT_NAME = "Alarm"
DEFAULT_PORT = "5007"
DEFAULT_SSL = False
SCAN_INTERVAL = datetime.timedelta(seconds=10)
ZONE_TYPES_SCHEMA = vol.Schema({cv.positive_int: vol.In(DEVICE_CLASSES)})
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_EXCLUDE_ZONES, default=[]): vol.All(
cv.ensure_list, [cv.positive_int]
),
vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_ZONE_TYPES, default={}): ZONE_TYPES_SCHEMA,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Concord232 binary sensor platform."""
host = config[CONF_HOST]
port = config[CONF_PORT]
exclude = config[CONF_EXCLUDE_ZONES]
zone_types = config[CONF_ZONE_TYPES]
sensors = []
try:
_LOGGER.debug("Initializing client")
client = concord232_client.Client(f"http://{host}:{port}")
client.zones = client.list_zones()
client.last_zone_update = dt_util.utcnow()
except requests.exceptions.ConnectionError as ex:
_LOGGER.error("Unable to connect to Concord232: %s", str(ex))
return False
# The order of zones returned by client.list_zones() can vary.
# When the zones are not named, this can result in the same entity
# name mapping to different sensors in an unpredictable way. Sort
# the zones by zone number to prevent this.
client.zones.sort(key=lambda zone: zone["number"])
for zone in client.zones:
_LOGGER.info("Loading Zone found: %s", zone["name"])
if zone["number"] not in exclude:
sensors.append(
Concord232ZoneSensor(
hass,
client,
zone,
zone_types.get(zone["number"], get_opening_type(zone)),
)
)
add_entities(sensors, True)
def get_opening_type(zone):
"""Return the result of the type guessing from name."""
if "MOTION" in zone["name"]:
return "motion"
if "KEY" in zone["name"]:
return "safety"
if "SMOKE" in zone["name"]:
return "smoke"
if "WATER" in zone["name"]:
return "water"
return "opening"
class Concord232ZoneSensor(BinarySensorEntity):
"""Representation of a Concord232 zone as a sensor."""
def __init__(self, hass, client, zone, zone_type):
"""Initialize the Concord232 binary sensor."""
self._hass = hass
self._client = client
self._zone = zone
self._number = zone["number"]
self._zone_type = zone_type
@property
def device_class(self):
"""Return the class of this sensor, from DEVICE_CLASSES."""
return self._zone_type
@property
def should_poll(self):
"""No polling needed."""
return True
@property
def name(self):
"""Return the name of the binary sensor."""
return self._zone["name"]
@property
def is_on(self):
"""Return true if the binary sensor is on."""
# True means "faulted" or "open" or "abnormal state"
return bool(self._zone["state"] != "Normal")
def update(self):
"""Get updated stats from API."""
last_update = dt_util.utcnow() - self._client.last_zone_update
_LOGGER.debug("Zone: %s ", self._zone)
if last_update > datetime.timedelta(seconds=1):
self._client.zones = self._client.list_zones()
self._client.last_zone_update = dt_util.utcnow()
_LOGGER.debug("Updated from zone: %s", self._zone["name"])
if hasattr(self._client, "zones"):
self._zone = next(
(x for x in self._client.zones if x["number"] == self._number), None
)
| apache-2.0 |
hzlf/openbroadcast | website/filer/management/commands/import_files.py | 23 | 5493 | #-*- coding: utf-8 -*-
from django.core.files import File as DjangoFile
from django.core.management.base import BaseCommand, NoArgsCommand
from filer.models.filemodels import File
from filer.models.foldermodels import Folder
from filer.models.imagemodels import Image
from filer.settings import FILER_IS_PUBLIC_DEFAULT
from optparse import make_option
import os
class FileImporter(object):
def __init__(self, * args, **kwargs):
self.path = kwargs.get('path')
self.base_folder = kwargs.get('base_folder')
self.verbosity = int(kwargs.get('verbosity', 1))
self.file_created = 0
self.image_created = 0
self.folder_created = 0
def import_file(self, file_obj, folder):
"""
Create a File or an Image into the given folder
"""
try:
iext = os.path.splitext(file_obj.name)[1].lower()
except:
iext = ''
if iext in ['.jpg', '.jpeg', '.png', '.gif']:
obj, created = Image.objects.get_or_create(
original_filename=file_obj.name,
file=file_obj,
folder=folder,
is_public=FILER_IS_PUBLIC_DEFAULT)
if created:
self.image_created += 1
else:
obj, created = File.objects.get_or_create(
original_filename=file_obj.name,
file=file_obj,
folder=folder,
is_public=FILER_IS_PUBLIC_DEFAULT)
if created:
self.file_created += 1
if self.verbosity >= 2:
print u"file_created #%s / image_created #%s -- file : %s -- created : %s" % (self.file_created,
self.image_created,
obj, created)
return obj
def get_or_create_folder(self, folder_names):
"""
Gets or creates a Folder based the list of folder names in hierarchical
order (like breadcrumbs).
get_or_create_folder(['root', 'subfolder', 'subsub folder'])
creates the folders with correct parent relations and returns the
'subsub folder' instance.
"""
if not len(folder_names):
return None
current_parent = None
for folder_name in folder_names:
current_parent, created = Folder.objects.get_or_create(name=folder_name, parent=current_parent)
if created:
self.folder_created += 1
if self.verbosity >= 2:
print u"folder_created #%s folder : %s -- created : %s" % (self.folder_created,
current_parent, created)
return current_parent
def walker(self, path=None, base_folder=None):
"""
This method walk a directory structure and create the
Folders and Files as they appear.
"""
path = path or self.path
base_folder = base_folder or self.base_folder
# prevent trailing slashes and other inconsistencies on path.
# cast to unicode so that os.walk returns path names in unicode
# (prevents encoding/decoding errors)
path = unicode(os.path.normpath(path))
if base_folder:
base_folder = unicode(os.path.normpath(base_folder))
print u"The directory structure will be imported in %s" % (base_folder,)
if self.verbosity >= 1:
print u"Import the folders and files in %s" % (path,)
root_folder_name = os.path.basename(path)
for root, dirs, files in os.walk(path):
rel_folders = root.partition(path)[2].strip(os.path.sep).split(os.path.sep)
while '' in rel_folders:
rel_folders.remove('')
if base_folder:
folder_names = base_folder.split('/') + [root_folder_name] + rel_folders
else:
folder_names = [root_folder_name] + rel_folders
folder = self.get_or_create_folder(folder_names)
for file_obj in files:
dj_file = DjangoFile(open(os.path.join(root, file_obj)),
name=file_obj)
self.import_file(file_obj=dj_file, folder=folder)
if self.verbosity >= 1:
print ('folder_created #%s / file_created #%s / ' + \
'image_created #%s') % (
self.folder_created, self.file_created,
self.image_created)
class Command(NoArgsCommand):
"""
Import directory structure into the filer ::
manage.py --path=/tmp/assets/images
manage.py --path=/tmp/assets/news --folder=images
"""
option_list = BaseCommand.option_list + (
make_option('--path',
action='store',
dest='path',
default=False,
help='Import files located in the path into django-filer'),
make_option('--folder',
action='store',
dest='base_folder',
default=False,
help='Specify the destination folder in which the directory structure should be imported'),
)
def handle_noargs(self, **options):
file_importer = FileImporter(**options)
file_importer.walker()
| gpl-3.0 |
clemkoa/scikit-learn | sklearn/decomposition/sparse_pca.py | 22 | 10587 | """Matrix factorization with Sparse PCA"""
# Author: Vlad Niculae, Gael Varoquaux, Alexandre Gramfort
# License: BSD 3 clause
import warnings
import numpy as np
from ..utils import check_random_state, check_array
from ..utils.validation import check_is_fitted
from ..linear_model import ridge_regression
from ..base import BaseEstimator, TransformerMixin
from .dict_learning import dict_learning, dict_learning_online
class SparsePCA(BaseEstimator, TransformerMixin):
"""Sparse Principal Components Analysis (SparsePCA)
Finds the set of sparse components that can optimally reconstruct
the data. The amount of sparseness is controllable by the coefficient
of the L1 penalty, given by the parameter alpha.
Read more in the :ref:`User Guide <SparsePCA>`.
Parameters
----------
n_components : int,
Number of sparse atoms to extract.
alpha : float,
Sparsity controlling parameter. Higher values lead to sparser
components.
ridge_alpha : float,
Amount of ridge shrinkage to apply in order to improve
conditioning when calling the transform method.
max_iter : int,
Maximum number of iterations to perform.
tol : float,
Tolerance for the stopping condition.
method : {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
n_jobs : int,
Number of parallel jobs to run.
U_init : array of shape (n_samples, n_components),
Initial values for the loadings for warm restart scenarios.
V_init : array of shape (n_components, n_features),
Initial values for the components for warm restart scenarios.
verbose : int
Controls the verbosity; the higher, the more messages. Defaults to 0.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
components_ : array, [n_components, n_features]
Sparse components extracted from the data.
error_ : array
Vector of errors at each iteration.
n_iter_ : int
Number of iterations run.
See also
--------
PCA
MiniBatchSparsePCA
DictionaryLearning
"""
def __init__(self, n_components=None, alpha=1, ridge_alpha=0.01,
max_iter=1000, tol=1e-8, method='lars', n_jobs=1, U_init=None,
V_init=None, verbose=False, random_state=None):
self.n_components = n_components
self.alpha = alpha
self.ridge_alpha = ridge_alpha
self.max_iter = max_iter
self.tol = tol
self.method = method
self.n_jobs = n_jobs
self.U_init = U_init
self.V_init = V_init
self.verbose = verbose
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model from data in X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
y : Ignored
Returns
-------
self : object
Returns the instance itself.
"""
random_state = check_random_state(self.random_state)
X = check_array(X)
if self.n_components is None:
n_components = X.shape[1]
else:
n_components = self.n_components
code_init = self.V_init.T if self.V_init is not None else None
dict_init = self.U_init.T if self.U_init is not None else None
Vt, _, E, self.n_iter_ = dict_learning(X.T, n_components, self.alpha,
tol=self.tol,
max_iter=self.max_iter,
method=self.method,
n_jobs=self.n_jobs,
verbose=self.verbose,
random_state=random_state,
code_init=code_init,
dict_init=dict_init,
return_n_iter=True
)
self.components_ = Vt.T
self.error_ = E
return self
def transform(self, X, ridge_alpha='deprecated'):
"""Least Squares projection of the data onto the sparse components.
To avoid instability issues in case the system is under-determined,
regularization can be applied (Ridge regression) via the
`ridge_alpha` parameter.
Note that Sparse PCA components orthogonality is not enforced as in PCA
hence one cannot use a simple linear projection.
Parameters
----------
X : array of shape (n_samples, n_features)
Test data to be transformed, must have the same number of
features as the data used to train the model.
ridge_alpha : float, default: 0.01
Amount of ridge shrinkage to apply in order to improve
conditioning.
.. deprecated:: 0.19
This parameter will be removed in 0.21.
Specify ``ridge_alpha`` in the ``SparsePCA`` constructor.
Returns
-------
X_new array, shape (n_samples, n_components)
Transformed data.
"""
check_is_fitted(self, 'components_')
X = check_array(X)
if ridge_alpha != 'deprecated':
warnings.warn("The ridge_alpha parameter on transform() is "
"deprecated since 0.19 and will be removed in 0.21. "
"Specify ridge_alpha in the SparsePCA constructor.",
DeprecationWarning)
if ridge_alpha is None:
ridge_alpha = self.ridge_alpha
else:
ridge_alpha = self.ridge_alpha
U = ridge_regression(self.components_.T, X.T, ridge_alpha,
solver='cholesky')
s = np.sqrt((U ** 2).sum(axis=0))
s[s == 0] = 1
U /= s
return U
class MiniBatchSparsePCA(SparsePCA):
"""Mini-batch Sparse Principal Components Analysis
Finds the set of sparse components that can optimally reconstruct
the data. The amount of sparseness is controllable by the coefficient
of the L1 penalty, given by the parameter alpha.
Read more in the :ref:`User Guide <SparsePCA>`.
Parameters
----------
n_components : int,
number of sparse atoms to extract
alpha : int,
Sparsity controlling parameter. Higher values lead to sparser
components.
ridge_alpha : float,
Amount of ridge shrinkage to apply in order to improve
conditioning when calling the transform method.
n_iter : int,
number of iterations to perform for each mini batch
callback : callable or None, optional (default: None)
callable that gets invoked every five iterations
batch_size : int,
the number of features to take in each mini batch
verbose : int
Controls the verbosity; the higher, the more messages. Defaults to 0.
shuffle : boolean,
whether to shuffle the data before splitting it in batches
n_jobs : int,
number of parallel jobs to run, or -1 to autodetect.
method : {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
components_ : array, [n_components, n_features]
Sparse components extracted from the data.
error_ : array
Vector of errors at each iteration.
n_iter_ : int
Number of iterations run.
See also
--------
PCA
SparsePCA
DictionaryLearning
"""
def __init__(self, n_components=None, alpha=1, ridge_alpha=0.01,
n_iter=100, callback=None, batch_size=3, verbose=False,
shuffle=True, n_jobs=1, method='lars', random_state=None):
super(MiniBatchSparsePCA, self).__init__(
n_components=n_components, alpha=alpha, verbose=verbose,
ridge_alpha=ridge_alpha, n_jobs=n_jobs, method=method,
random_state=random_state)
self.n_iter = n_iter
self.callback = callback
self.batch_size = batch_size
self.shuffle = shuffle
def fit(self, X, y=None):
"""Fit the model from data in X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
y : Ignored
Returns
-------
self : object
Returns the instance itself.
"""
random_state = check_random_state(self.random_state)
X = check_array(X)
if self.n_components is None:
n_components = X.shape[1]
else:
n_components = self.n_components
Vt, _, self.n_iter_ = dict_learning_online(
X.T, n_components, alpha=self.alpha,
n_iter=self.n_iter, return_code=True,
dict_init=None, verbose=self.verbose,
callback=self.callback,
batch_size=self.batch_size,
shuffle=self.shuffle,
n_jobs=self.n_jobs, method=self.method,
random_state=random_state,
return_n_iter=True)
self.components_ = Vt.T
return self
| bsd-3-clause |
kleientertainment/ds_mod_tools | pkg/win32/Python27/Lib/email/__init__.py | 61 | 2979 | # Copyright (C) 2001-2006 Python Software Foundation
# Author: Barry Warsaw
# Contact: email-sig@python.org
"""A package for parsing, handling, and generating email messages."""
__version__ = '4.0.3'
__all__ = [
# Old names
'base64MIME',
'Charset',
'Encoders',
'Errors',
'Generator',
'Header',
'Iterators',
'Message',
'MIMEAudio',
'MIMEBase',
'MIMEImage',
'MIMEMessage',
'MIMEMultipart',
'MIMENonMultipart',
'MIMEText',
'Parser',
'quopriMIME',
'Utils',
'message_from_string',
'message_from_file',
# new names
'base64mime',
'charset',
'encoders',
'errors',
'generator',
'header',
'iterators',
'message',
'mime',
'parser',
'quoprimime',
'utils',
]
# Some convenience routines. Don't import Parser and Message as side-effects
# of importing email since those cascadingly import most of the rest of the
# email package.
def message_from_string(s, *args, **kws):
"""Parse a string into a Message object model.
Optional _class and strict are passed to the Parser constructor.
"""
from email.parser import Parser
return Parser(*args, **kws).parsestr(s)
def message_from_file(fp, *args, **kws):
"""Read a file and parse its contents into a Message object model.
Optional _class and strict are passed to the Parser constructor.
"""
from email.parser import Parser
return Parser(*args, **kws).parse(fp)
# Lazy loading to provide name mapping from new-style names (PEP 8 compatible
# email 4.0 module names), to old-style names (email 3.0 module names).
import sys
class LazyImporter(object):
def __init__(self, module_name):
self.__name__ = 'email.' + module_name
def __getattr__(self, name):
__import__(self.__name__)
mod = sys.modules[self.__name__]
self.__dict__.update(mod.__dict__)
return getattr(mod, name)
_LOWERNAMES = [
# email.<old name> -> email.<new name is lowercased old name>
'Charset',
'Encoders',
'Errors',
'FeedParser',
'Generator',
'Header',
'Iterators',
'Message',
'Parser',
'Utils',
'base64MIME',
'quopriMIME',
]
_MIMENAMES = [
# email.MIME<old name> -> email.mime.<new name is lowercased old name>
'Audio',
'Base',
'Image',
'Message',
'Multipart',
'NonMultipart',
'Text',
]
for _name in _LOWERNAMES:
importer = LazyImporter(_name.lower())
sys.modules['email.' + _name] = importer
setattr(sys.modules['email'], _name, importer)
import email.mime
for _name in _MIMENAMES:
importer = LazyImporter('mime.' + _name.lower())
sys.modules['email.MIME' + _name] = importer
setattr(sys.modules['email'], 'MIME' + _name, importer)
setattr(sys.modules['email.mime'], _name, importer)
| mit |
DonBeo/statsmodels | statsmodels/graphics/tests/test_gofplots.py | 27 | 6814 | import numpy as np
from numpy.testing import dec
import statsmodels.api as sm
from statsmodels.graphics.gofplots import qqplot, qqline, ProbPlot
from scipy import stats
try:
import matplotlib.pyplot as plt
import matplotlib
have_matplotlib = True
except ImportError:
have_matplotlib = False
class BaseProbplotMixin(object):
def base_setup(self):
if have_matplotlib:
self.fig, self.ax = plt.subplots()
self.other_array = np.random.normal(size=self.prbplt.data.shape)
self.other_prbplot = sm.ProbPlot(self.other_array)
def teardown(self):
if have_matplotlib:
plt.close('all')
@dec.skipif(not have_matplotlib)
def test_qqplot(self):
self.fig = self.prbplt.qqplot(ax=self.ax, line=self.line)
@dec.skipif(not have_matplotlib)
def test_ppplot(self):
self.fig = self.prbplt.ppplot(ax=self.ax, line=self.line)
@dec.skipif(not have_matplotlib)
def test_probplot(self):
self.fig = self.prbplt.probplot(ax=self.ax, line=self.line)
@dec.skipif(not have_matplotlib)
def test_qqplot_other_array(self):
self.fig = self.prbplt.qqplot(ax=self.ax, line=self.line,
other=self.other_array)
@dec.skipif(not have_matplotlib)
def test_ppplot_other_array(self):
self.fig = self.prbplt.ppplot(ax=self.ax, line=self.line,
other=self.other_array)
@dec.skipif(not have_matplotlib)
def t_est_probplot_other_array(self):
self.fig = self.prbplt.probplot(ax=self.ax, line=self.line,
other=self.other_array)
@dec.skipif(not have_matplotlib)
def test_qqplot_other_prbplt(self):
self.fig = self.prbplt.qqplot(ax=self.ax, line=self.line,
other=self.other_prbplot)
@dec.skipif(not have_matplotlib)
def test_ppplot_other_prbplt(self):
self.fig = self.prbplt.ppplot(ax=self.ax, line=self.line,
other=self.other_prbplot)
@dec.skipif(not have_matplotlib)
def t_est_probplot_other_prbplt(self):
self.fig = self.prbplt.probplot(ax=self.ax, line=self.line,
other=self.other_prbplot)
@dec.skipif(not have_matplotlib)
def test_qqplot_custom_labels(self):
self.fig = self.prbplt.qqplot(ax=self.ax, line=self.line,
xlabel='Custom X-Label',
ylabel='Custom Y-Label')
@dec.skipif(not have_matplotlib)
def test_ppplot_custom_labels(self):
self.fig = self.prbplt.ppplot(ax=self.ax, line=self.line,
xlabel='Custom X-Label',
ylabel='Custom Y-Label')
@dec.skipif(not have_matplotlib)
def test_probplot_custom_labels(self):
self.fig = self.prbplt.probplot(ax=self.ax, line=self.line,
xlabel='Custom X-Label',
ylabel='Custom Y-Label')
@dec.skipif(not have_matplotlib)
def test_qqplot_pltkwargs(self):
self.fig = self.prbplt.qqplot(ax=self.ax, line=self.line,
marker='d',
markerfacecolor='cornflowerblue',
markeredgecolor='white',
alpha=0.5)
@dec.skipif(not have_matplotlib)
def test_ppplot_pltkwargs(self):
self.fig = self.prbplt.ppplot(ax=self.ax, line=self.line,
marker='d',
markerfacecolor='cornflowerblue',
markeredgecolor='white',
alpha=0.5)
@dec.skipif(not have_matplotlib)
def test_probplot_pltkwargs(self):
self.fig = self.prbplt.probplot(ax=self.ax, line=self.line,
marker='d',
markerfacecolor='cornflowerblue',
markeredgecolor='white',
alpha=0.5)
class TestProbPlotLongely(BaseProbplotMixin):
def setup(self):
np.random.seed(5)
self.data = sm.datasets.longley.load()
self.data.exog = sm.add_constant(self.data.exog, prepend=False)
self.mod_fit = sm.OLS(self.data.endog, self.data.exog).fit()
self.prbplt = sm.ProbPlot(self.mod_fit.resid, stats.t, distargs=(4,))
self.line = 'r'
self.base_setup()
class TestProbPlotRandomNormalMinimal(BaseProbplotMixin):
def setup(self):
np.random.seed(5)
self.data = np.random.normal(loc=8.25, scale=3.25, size=37)
self.prbplt = sm.ProbPlot(self.data)
self.line = None
self.base_setup()
class TestProbPlotRandomNormalWithFit(BaseProbplotMixin):
def setup(self):
np.random.seed(5)
self.data = np.random.normal(loc=8.25, scale=3.25, size=37)
self.prbplt = sm.ProbPlot(self.data, fit=True)
self.line = 'q'
self.base_setup()
class TestProbPlotRandomNormalLocScale(BaseProbplotMixin):
def setup(self):
np.random.seed(5)
self.data = np.random.normal(loc=8.25, scale=3.25, size=37)
self.prbplt = sm.ProbPlot(self.data, loc=8.25, scale=3.25)
self.line = '45'
self.base_setup()
class TestTopLevel(object):
def setup(self):
self.data = sm.datasets.longley.load()
self.data.exog = sm.add_constant(self.data.exog, prepend=False)
self.mod_fit = sm.OLS(self.data.endog, self.data.exog).fit()
self.res = self.mod_fit.resid
self.prbplt = sm.ProbPlot(self.mod_fit.resid, stats.t, distargs=(4,))
self.other_array = np.random.normal(size=self.prbplt.data.shape)
self.other_prbplot = sm.ProbPlot(self.other_array)
def teardown(self):
if have_matplotlib:
plt.close('all')
@dec.skipif(not have_matplotlib)
def test_qqplot(self):
fig = sm.qqplot(self.res, line='r')
@dec.skipif(not have_matplotlib)
def test_qqplot_2samples_ProbPlotObjects(self):
# also tests all values for line
for line in ['r', 'q', '45', 's']:
# test with `ProbPlot` instances
fig = sm.qqplot_2samples(self.prbplt, self.other_prbplot,
line=line)
@dec.skipif(not have_matplotlib)
def test_qqplot_2samples_arrays(self):
# also tests all values for line
for line in ['r', 'q', '45', 's']:
# test with arrays
fig = sm.qqplot_2samples(self.res, self.other_array, line=line)
| bsd-3-clause |
dreispt/todo_app | TodoClientApp/todo_odoorpc.py | 1 | 1127 | from odoorpc import ODOO
class TodoAPI():
def __init__(self, srv, port, db, user, pwd):
self.api = ODOO(srv, port=port)
self.api.login(db, user, pwd)
self.uid = self.api.env.uid
self.model = 'todo.task'
self.Model = self.api.env[self.model]
def execute(self, method, arg_list, kwarg_dict=None):
return self.api.execute(
self.model,
method, *arg_list, **kwarg_dict)
def read(self, ids=None):
domain = [('id',' in', ids)] if ids else []
fields = ['id', 'name', 'is_done']
return self.Model.search_read(domain, fields)
def write(self, text, id=None):
if id:
self.Model.write(id, {'name': text})
else:
vals = {'name': text, 'user_id': self.uid}
id = self.Model.create(vals)
return id
def unlink(self, id):
return self.Model.unlink(id)
if __name__ == '__main__':
srv, port, db = 'localhost', 8069, 'todo'
user, pwd = 'admin', 'admin'
api = TodoAPI(srv, port, db, user, pwd)
from pprint import pprint
pprint(api.read())
| agpl-3.0 |
cy/react-native-talk | node_modules/node-sass/node_modules/node-gyp/gyp/pylib/gyp/MSVSUserFile.py | 2710 | 5094 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Visual Studio user preferences file writer."""
import os
import re
import socket # for gethostname
import gyp.common
import gyp.easy_xml as easy_xml
#------------------------------------------------------------------------------
def _FindCommandInPath(command):
"""If there are no slashes in the command given, this function
searches the PATH env to find the given command, and converts it
to an absolute path. We have to do this because MSVS is looking
for an actual file to launch a debugger on, not just a command
line. Note that this happens at GYP time, so anything needing to
be built needs to have a full path."""
if '/' in command or '\\' in command:
# If the command already has path elements (either relative or
# absolute), then assume it is constructed properly.
return command
else:
# Search through the path list and find an existing file that
# we can access.
paths = os.environ.get('PATH','').split(os.pathsep)
for path in paths:
item = os.path.join(path, command)
if os.path.isfile(item) and os.access(item, os.X_OK):
return item
return command
def _QuoteWin32CommandLineArgs(args):
new_args = []
for arg in args:
# Replace all double-quotes with double-double-quotes to escape
# them for cmd shell, and then quote the whole thing if there
# are any.
if arg.find('"') != -1:
arg = '""'.join(arg.split('"'))
arg = '"%s"' % arg
# Otherwise, if there are any spaces, quote the whole arg.
elif re.search(r'[ \t\n]', arg):
arg = '"%s"' % arg
new_args.append(arg)
return new_args
class Writer(object):
"""Visual Studio XML user user file writer."""
def __init__(self, user_file_path, version, name):
"""Initializes the user file.
Args:
user_file_path: Path to the user file.
version: Version info.
name: Name of the user file.
"""
self.user_file_path = user_file_path
self.version = version
self.name = name
self.configurations = {}
def AddConfig(self, name):
"""Adds a configuration to the project.
Args:
name: Configuration name.
"""
self.configurations[name] = ['Configuration', {'Name': name}]
def AddDebugSettings(self, config_name, command, environment = {},
working_directory=""):
"""Adds a DebugSettings node to the user file for a particular config.
Args:
command: command line to run. First element in the list is the
executable. All elements of the command will be quoted if
necessary.
working_directory: other files which may trigger the rule. (optional)
"""
command = _QuoteWin32CommandLineArgs(command)
abs_command = _FindCommandInPath(command[0])
if environment and isinstance(environment, dict):
env_list = ['%s="%s"' % (key, val)
for (key,val) in environment.iteritems()]
environment = ' '.join(env_list)
else:
environment = ''
n_cmd = ['DebugSettings',
{'Command': abs_command,
'WorkingDirectory': working_directory,
'CommandArguments': " ".join(command[1:]),
'RemoteMachine': socket.gethostname(),
'Environment': environment,
'EnvironmentMerge': 'true',
# Currently these are all "dummy" values that we're just setting
# in the default manner that MSVS does it. We could use some of
# these to add additional capabilities, I suppose, but they might
# not have parity with other platforms then.
'Attach': 'false',
'DebuggerType': '3', # 'auto' debugger
'Remote': '1',
'RemoteCommand': '',
'HttpUrl': '',
'PDBPath': '',
'SQLDebugging': '',
'DebuggerFlavor': '0',
'MPIRunCommand': '',
'MPIRunArguments': '',
'MPIRunWorkingDirectory': '',
'ApplicationCommand': '',
'ApplicationArguments': '',
'ShimCommand': '',
'MPIAcceptMode': '',
'MPIAcceptFilter': ''
}]
# Find the config, and add it if it doesn't exist.
if config_name not in self.configurations:
self.AddConfig(config_name)
# Add the DebugSettings onto the appropriate config.
self.configurations[config_name].append(n_cmd)
def WriteIfChanged(self):
"""Writes the user file."""
configs = ['Configurations']
for config, spec in sorted(self.configurations.iteritems()):
configs.append(spec)
content = ['VisualStudioUserFile',
{'Version': self.version.ProjectVersion(),
'Name': self.name
},
configs]
easy_xml.WriteXmlIfChanged(content, self.user_file_path,
encoding="Windows-1252")
| mit |
stianrh/askbot-nordic | askbot/migrations/0115_auto__chg_field_post_thread.py | 18 | 25846 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Post.thread'
db.alter_column('askbot_post', 'thread_id', self.gf('django.db.models.fields.related.ForeignKey')(null=True, to=orm['askbot.Thread']))
def backwards(self, orm):
# Changing field 'Post.thread'
db.alter_column('askbot_post', 'thread_id', self.gf('django.db.models.fields.related.ForeignKey')(default='zhopa', to=orm['askbot.Thread']))
models = {
'askbot.activity': {
'Meta': {'object_name': 'Activity', 'db_table': "u'activity'"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'activity_type': ('django.db.models.fields.SmallIntegerField', [], {}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_auditted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Post']", 'null': 'True'}),
'receiving_users': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'received_activity'", 'symmetrical': 'False', 'to': "orm['auth.User']"}),
'recipients': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'incoming_activity'", 'symmetrical': 'False', 'through': "orm['askbot.ActivityAuditStatus']", 'to': "orm['auth.User']"}),
'summary': ('django.db.models.fields.TextField', [], {'default': "''"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'askbot.activityauditstatus': {
'Meta': {'unique_together': "(('user', 'activity'),)", 'object_name': 'ActivityAuditStatus'},
'activity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Activity']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'status': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'askbot.anonymousanswer': {
'Meta': {'object_name': 'AnonymousAnswer'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_addr': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'anonymous_answers'", 'to': "orm['askbot.Post']"}),
'session_key': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '180'}),
'text': ('django.db.models.fields.TextField', [], {}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'askbot.anonymousquestion': {
'Meta': {'object_name': 'AnonymousQuestion'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_addr': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'is_anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'session_key': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '180'}),
'tagnames': ('django.db.models.fields.CharField', [], {'max_length': '125'}),
'text': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'askbot.award': {
'Meta': {'object_name': 'Award', 'db_table': "u'award'"},
'awarded_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'badge': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'award_badge'", 'to': "orm['askbot.BadgeData']"}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notified': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'award_user'", 'to': "orm['auth.User']"})
},
'askbot.badgedata': {
'Meta': {'ordering': "('slug',)", 'object_name': 'BadgeData'},
'awarded_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'awarded_to': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'badges'", 'symmetrical': 'False', 'through': "orm['askbot.Award']", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'})
},
'askbot.emailfeedsetting': {
'Meta': {'object_name': 'EmailFeedSetting'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'feed_type': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'frequency': ('django.db.models.fields.CharField', [], {'default': "'n'", 'max_length': '8'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reported_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'subscriber': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'notification_subscriptions'", 'to': "orm['auth.User']"})
},
'askbot.favoritequestion': {
'Meta': {'object_name': 'FavoriteQuestion', 'db_table': "u'favorite_question'"},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'thread': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Thread']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_favorite_questions'", 'to': "orm['auth.User']"})
},
'askbot.groupmembership': {
'Meta': {'object_name': 'GroupMembership'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_memberships'", 'to': "orm['askbot.Tag']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'group_memberships'", 'to': "orm['auth.User']"})
},
'askbot.groupprofile': {
'Meta': {'object_name': 'GroupProfile'},
'group_tag': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'group_profile'", 'unique': 'True', 'to': "orm['askbot.Tag']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'askbot.markedtag': {
'Meta': {'object_name': 'MarkedTag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reason': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_selections'", 'to': "orm['askbot.Tag']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'tag_selections'", 'to': "orm['auth.User']"})
},
'askbot.post': {
'Meta': {'object_name': 'Post'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'posts'", 'to': "orm['auth.User']"}),
'comment_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'deleted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'deleted_posts'", 'null': 'True', 'to': "orm['auth.User']"}),
'html': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_edited_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_edited_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'last_edited_posts'", 'null': 'True', 'to': "orm['auth.User']"}),
'locked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'locked_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'locked_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'locked_posts'", 'null': 'True', 'to': "orm['auth.User']"}),
'offensive_flag_count': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'old_answer_id': ('django.db.models.fields.PositiveIntegerField', [], {'default': 'None', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'old_comment_id': ('django.db.models.fields.PositiveIntegerField', [], {'default': 'None', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'old_question_id': ('django.db.models.fields.PositiveIntegerField', [], {'default': 'None', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'comments'", 'null': 'True', 'to': "orm['askbot.Post']"}),
'post_type': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '180'}),
'text': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'thread': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'posts'", 'null': 'True', 'blank': 'True', 'to': "orm['askbot.Thread']"}),
'vote_down_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'vote_up_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'wikified_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'askbot.postrevision': {
'Meta': {'ordering': "('-revision',)", 'unique_together': "(('post', 'revision'),)", 'object_name': 'PostRevision'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'postrevisions'", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'post': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'revisions'", 'null': 'True', 'to': "orm['askbot.Post']"}),
'revised_at': ('django.db.models.fields.DateTimeField', [], {}),
'revision': ('django.db.models.fields.PositiveIntegerField', [], {}),
'revision_type': ('django.db.models.fields.SmallIntegerField', [], {}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'}),
'tagnames': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '125', 'blank': 'True'}),
'text': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '300', 'blank': 'True'})
},
'askbot.questionview': {
'Meta': {'object_name': 'QuestionView'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'viewed'", 'to': "orm['askbot.Post']"}),
'when': ('django.db.models.fields.DateTimeField', [], {}),
'who': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'question_views'", 'to': "orm['auth.User']"})
},
'askbot.replyaddress': {
'Meta': {'object_name': 'ReplyAddress'},
'address': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '25'}),
'allowed_from_email': ('django.db.models.fields.EmailField', [], {'max_length': '150'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'post': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'reply_addresses'", 'to': "orm['askbot.Post']"}),
'response_post': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'edit_addresses'", 'null': 'True', 'to': "orm['askbot.Post']"}),
'used_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'askbot.repute': {
'Meta': {'object_name': 'Repute', 'db_table': "u'repute'"},
'comment': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'negative': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'positive': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Post']", 'null': 'True', 'blank': 'True'}),
'reputation': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'reputation_type': ('django.db.models.fields.SmallIntegerField', [], {}),
'reputed_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'askbot.tag': {
'Meta': {'ordering': "('-used_count', 'name')", 'object_name': 'Tag', 'db_table': "u'tag'"},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_tags'", 'to': "orm['auth.User']"}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'deleted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'deleted_tags'", 'null': 'True', 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'tag_wiki': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'described_tag'", 'unique': 'True', 'null': 'True', 'to': "orm['askbot.Post']"}),
'used_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'askbot.thread': {
'Meta': {'object_name': 'Thread'},
'accepted_answer': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['askbot.Post']"}),
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'answer_accepted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'answer_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'close_reason': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'closed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'closed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'closed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'favorited_by': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'unused_favorite_threads'", 'symmetrical': 'False', 'through': "orm['askbot.FavoriteQuestion']", 'to': "orm['auth.User']"}),
'favourite_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'followed_by': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'followed_threads'", 'symmetrical': 'False', 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_activity_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_activity_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'unused_last_active_in_threads'", 'to': "orm['auth.User']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'tagnames': ('django.db.models.fields.CharField', [], {'max_length': '125'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'threads'", 'symmetrical': 'False', 'to': "orm['askbot.Tag']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'view_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'askbot.vote': {
'Meta': {'unique_together': "(('user', 'voted_post'),)", 'object_name': 'Vote', 'db_table': "u'vote'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'votes'", 'to': "orm['auth.User']"}),
'vote': ('django.db.models.fields.SmallIntegerField', [], {}),
'voted_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'voted_post': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'votes'", 'to': "orm['askbot.Post']"})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'about': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'avatar_type': ('django.db.models.fields.CharField', [], {'default': "'n'", 'max_length': '1'}),
'bronze': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'consecutive_days_visit_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'display_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'email_isvalid': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'email_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'email_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'gold': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'gravatar': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ignored_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'interesting_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'new_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'questions_per_page': ('django.db.models.fields.SmallIntegerField', [], {'default': '10'}),
'real_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'reputation': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'seen_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'show_country': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'silver': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'w'", 'max_length': '2'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['askbot']
| gpl-3.0 |
factorlibre/OCB | addons/mrp/wizard/mrp_workcenter_load.py | 381 | 2222 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class mrp_workcenter_load(osv.osv_memory):
_name = 'mrp.workcenter.load'
_description = 'Work Center Load'
_columns = {
'time_unit': fields.selection([('day', 'Day by day'),('week', 'Per week'),('month', 'Per month')],'Type of period', required=True),
'measure_unit': fields.selection([('hours', 'Amount in hours'),('cycles', 'Amount in cycles')],'Amount measuring unit', required=True),
}
def print_report(self, cr, uid, ids, context=None):
""" To print the report of Work Center Load
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param context: A standard dictionary
@return : Report
"""
if context is None:
context = {}
datas = {'ids' : context.get('active_ids',[])}
res = self.read(cr, uid, ids, ['time_unit','measure_unit'])
res = res and res[0] or {}
datas['form'] = res
return {
'type' : 'ir.actions.report.xml',
'report_name':'mrp.workcenter.load',
'datas' : datas,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
mgax/babel | babel/support.py | 1 | 22610 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2007-2011 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://babel.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://babel.edgewall.org/log/.
"""Several classes and functions that help with integrating and using Babel
in applications.
.. note: the code in this module is not used by Babel itself
"""
from datetime import date, datetime, timedelta
import gettext
import locale
from babel.core import Locale
from babel.dates import format_date, format_datetime, format_time, \
format_timedelta
from babel.numbers import format_number, format_decimal, format_currency, \
format_percent, format_scientific
from babel.util import UTC
__all__ = ['Format', 'LazyProxy', 'NullTranslations', 'Translations']
class Format(object):
"""Wrapper class providing the various date and number formatting functions
bound to a specific locale and time-zone.
>>> fmt = Format('en_US', UTC)
>>> fmt.date(date(2007, 4, 1))
u'Apr 1, 2007'
>>> fmt.decimal(1.2345)
u'1.234'
"""
def __init__(self, locale, tzinfo=None):
"""Initialize the formatter.
:param locale: the locale identifier or `Locale` instance
:param tzinfo: the time-zone info (a `tzinfo` instance or `None`)
"""
self.locale = Locale.parse(locale)
self.tzinfo = tzinfo
def date(self, date=None, format='medium'):
"""Return a date formatted according to the given pattern.
>>> fmt = Format('en_US')
>>> fmt.date(date(2007, 4, 1))
u'Apr 1, 2007'
:see: `babel.dates.format_date`
"""
return format_date(date, format, locale=self.locale)
def datetime(self, datetime=None, format='medium'):
"""Return a date and time formatted according to the given pattern.
>>> from pytz import timezone
>>> fmt = Format('en_US', tzinfo=timezone('US/Eastern'))
>>> fmt.datetime(datetime(2007, 4, 1, 15, 30))
u'Apr 1, 2007, 11:30:00 AM'
:see: `babel.dates.format_datetime`
"""
return format_datetime(datetime, format, tzinfo=self.tzinfo,
locale=self.locale)
def time(self, time=None, format='medium'):
"""Return a time formatted according to the given pattern.
>>> from pytz import timezone
>>> fmt = Format('en_US', tzinfo=timezone('US/Eastern'))
>>> fmt.time(datetime(2007, 4, 1, 15, 30))
u'11:30:00 AM'
:see: `babel.dates.format_time`
"""
return format_time(time, format, tzinfo=self.tzinfo, locale=self.locale)
def timedelta(self, delta, granularity='second', threshold=.85,
format='medium', add_direction=False):
"""Return a time delta according to the rules of the given locale.
>>> fmt = Format('en_US')
>>> fmt.timedelta(timedelta(weeks=11))
u'3 months'
:see: `babel.dates.format_timedelta`
"""
return format_timedelta(delta, granularity=granularity,
threshold=threshold,
format=format, add_direction=add_direction,
locale=self.locale)
def number(self, number):
"""Return an integer number formatted for the locale.
>>> fmt = Format('en_US')
>>> fmt.number(1099)
u'1,099'
:see: `babel.numbers.format_number`
"""
return format_number(number, locale=self.locale)
def decimal(self, number, format=None):
"""Return a decimal number formatted for the locale.
>>> fmt = Format('en_US')
>>> fmt.decimal(1.2345)
u'1.234'
:see: `babel.numbers.format_decimal`
"""
return format_decimal(number, format, locale=self.locale)
def currency(self, number, currency):
"""Return a number in the given currency formatted for the locale.
:see: `babel.numbers.format_currency`
"""
return format_currency(number, currency, locale=self.locale)
def percent(self, number, format=None):
"""Return a number formatted as percentage for the locale.
>>> fmt = Format('en_US')
>>> fmt.percent(0.34)
u'34%'
:see: `babel.numbers.format_percent`
"""
return format_percent(number, format, locale=self.locale)
def scientific(self, number):
"""Return a number formatted using scientific notation for the locale.
:see: `babel.numbers.format_scientific`
"""
return format_scientific(number, locale=self.locale)
class LazyProxy(object):
"""Class for proxy objects that delegate to a specified function to evaluate
the actual object.
>>> def greeting(name='world'):
... return 'Hello, %s!' % name
>>> lazy_greeting = LazyProxy(greeting, name='Joe')
>>> print lazy_greeting
Hello, Joe!
>>> u' ' + lazy_greeting
u' Hello, Joe!'
>>> u'(%s)' % lazy_greeting
u'(Hello, Joe!)'
This can be used, for example, to implement lazy translation functions that
delay the actual translation until the string is actually used. The
rationale for such behavior is that the locale of the user may not always
be available. In web applications, you only know the locale when processing
a request.
The proxy implementation attempts to be as complete as possible, so that
the lazy objects should mostly work as expected, for example for sorting:
>>> greetings = [
... LazyProxy(greeting, 'world'),
... LazyProxy(greeting, 'Joe'),
... LazyProxy(greeting, 'universe'),
... ]
>>> greetings.sort()
>>> for greeting in greetings:
... print greeting
Hello, Joe!
Hello, universe!
Hello, world!
"""
__slots__ = ['_func', '_args', '_kwargs', '_value', '_is_cache_enabled']
def __init__(self, func, *args, **kwargs):
is_cache_enabled = kwargs.pop('enable_cache', True)
# Avoid triggering our own __setattr__ implementation
object.__setattr__(self, '_func', func)
object.__setattr__(self, '_args', args)
object.__setattr__(self, '_kwargs', kwargs)
object.__setattr__(self, '_is_cache_enabled', is_cache_enabled)
object.__setattr__(self, '_value', None)
@property
def value(self):
if self._value is None:
value = self._func(*self._args, **self._kwargs)
if not self._is_cache_enabled:
return value
object.__setattr__(self, '_value', value)
return self._value
def __contains__(self, key):
return key in self.value
def __nonzero__(self):
return bool(self.value)
def __dir__(self):
return dir(self.value)
def __iter__(self):
return iter(self.value)
def __len__(self):
return len(self.value)
def __str__(self):
return str(self.value)
def __unicode__(self):
return unicode(self.value)
def __add__(self, other):
return self.value + other
def __radd__(self, other):
return other + self.value
def __mod__(self, other):
return self.value % other
def __rmod__(self, other):
return other % self.value
def __mul__(self, other):
return self.value * other
def __rmul__(self, other):
return other * self.value
def __call__(self, *args, **kwargs):
return self.value(*args, **kwargs)
def __lt__(self, other):
return self.value < other
def __le__(self, other):
return self.value <= other
def __eq__(self, other):
return self.value == other
def __ne__(self, other):
return self.value != other
def __gt__(self, other):
return self.value > other
def __ge__(self, other):
return self.value >= other
def __delattr__(self, name):
delattr(self.value, name)
def __getattr__(self, name):
return getattr(self.value, name)
def __setattr__(self, name, value):
setattr(self.value, name, value)
def __delitem__(self, key):
del self.value[key]
def __getitem__(self, key):
return self.value[key]
def __setitem__(self, key, value):
self.value[key] = value
class NullTranslations(gettext.NullTranslations, object):
DEFAULT_DOMAIN = None
def __init__(self, fp=None):
"""Initialize a simple translations class which is not backed by a
real catalog. Behaves similar to gettext.NullTranslations but also
offers Babel's on *gettext methods (e.g. 'dgettext()').
:param fp: a file-like object (ignored in this class)
"""
# These attributes are set by gettext.NullTranslations when a catalog
# is parsed (fp != None). Ensure that they are always present because
# some *gettext methods (including '.gettext()') rely on the attributes.
self._catalog = {}
self.plural = lambda n: int(n != 1)
super(NullTranslations, self).__init__(fp=fp)
self.files = filter(None, [getattr(fp, 'name', None)])
self.domain = self.DEFAULT_DOMAIN
self._domains = {}
def dgettext(self, domain, message):
"""Like ``gettext()``, but look the message up in the specified
domain.
"""
return self._domains.get(domain, self).gettext(message)
def ldgettext(self, domain, message):
"""Like ``lgettext()``, but look the message up in the specified
domain.
"""
return self._domains.get(domain, self).lgettext(message)
def udgettext(self, domain, message):
"""Like ``ugettext()``, but look the message up in the specified
domain.
"""
return self._domains.get(domain, self).ugettext(message)
# backward compatibility with 0.9
dugettext = udgettext
def dngettext(self, domain, singular, plural, num):
"""Like ``ngettext()``, but look the message up in the specified
domain.
"""
return self._domains.get(domain, self).ngettext(singular, plural, num)
def ldngettext(self, domain, singular, plural, num):
"""Like ``lngettext()``, but look the message up in the specified
domain.
"""
return self._domains.get(domain, self).lngettext(singular, plural, num)
def udngettext(self, domain, singular, plural, num):
"""Like ``ungettext()`` but look the message up in the specified
domain.
"""
return self._domains.get(domain, self).ungettext(singular, plural, num)
# backward compatibility with 0.9
dungettext = udngettext
# Most of the downwards code, until it get's included in stdlib, from:
# http://bugs.python.org/file10036/gettext-pgettext.patch
#
# The encoding of a msgctxt and a msgid in a .mo file is
# msgctxt + "\x04" + msgid (gettext version >= 0.15)
CONTEXT_ENCODING = '%s\x04%s'
def pgettext(self, context, message):
"""Look up the `context` and `message` id in the catalog and return the
corresponding message string, as an 8-bit string encoded with the
catalog's charset encoding, if known. If there is no entry in the
catalog for the `message` id and `context` , and a fallback has been
set, the look up is forwarded to the fallback's ``pgettext()``
method. Otherwise, the `message` id is returned.
"""
ctxt_msg_id = self.CONTEXT_ENCODING % (context, message)
missing = object()
tmsg = self._catalog.get(ctxt_msg_id, missing)
if tmsg is missing:
if self._fallback:
return self._fallback.pgettext(context, message)
return message
# Encode the Unicode tmsg back to an 8-bit string, if possible
if self._output_charset:
return tmsg.encode(self._output_charset)
elif self._charset:
return tmsg.encode(self._charset)
return tmsg
def lpgettext(self, context, message):
"""Equivalent to ``pgettext()``, but the translation is returned in the
preferred system encoding, if no other encoding was explicitly set with
``bind_textdomain_codeset()``.
"""
ctxt_msg_id = self.CONTEXT_ENCODING % (context, message)
missing = object()
tmsg = self._catalog.get(ctxt_msg_id, missing)
if tmsg is missing:
if self._fallback:
return self._fallback.lpgettext(context, message)
return message
if self._output_charset:
return tmsg.encode(self._output_charset)
return tmsg.encode(locale.getpreferredencoding())
def npgettext(self, context, singular, plural, num):
"""Do a plural-forms lookup of a message id. `singular` is used as the
message id for purposes of lookup in the catalog, while `num` is used to
determine which plural form to use. The returned message string is an
8-bit string encoded with the catalog's charset encoding, if known.
If the message id for `context` is not found in the catalog, and a
fallback is specified, the request is forwarded to the fallback's
``npgettext()`` method. Otherwise, when ``num`` is 1 ``singular`` is
returned, and ``plural`` is returned in all other cases.
"""
ctxt_msg_id = self.CONTEXT_ENCODING % (context, singular)
try:
tmsg = self._catalog[(ctxt_msg_id, self.plural(num))]
if self._output_charset:
return tmsg.encode(self._output_charset)
elif self._charset:
return tmsg.encode(self._charset)
return tmsg
except KeyError:
if self._fallback:
return self._fallback.npgettext(context, singular, plural, num)
if num == 1:
return singular
else:
return plural
def lnpgettext(self, context, singular, plural, num):
"""Equivalent to ``npgettext()``, but the translation is returned in the
preferred system encoding, if no other encoding was explicitly set with
``bind_textdomain_codeset()``.
"""
ctxt_msg_id = self.CONTEXT_ENCODING % (context, singular)
try:
tmsg = self._catalog[(ctxt_msg_id, self.plural(num))]
if self._output_charset:
return tmsg.encode(self._output_charset)
return tmsg.encode(locale.getpreferredencoding())
except KeyError:
if self._fallback:
return self._fallback.lnpgettext(context, singular, plural, num)
if num == 1:
return singular
else:
return plural
def upgettext(self, context, message):
"""Look up the `context` and `message` id in the catalog and return the
corresponding message string, as a Unicode string. If there is no entry
in the catalog for the `message` id and `context`, and a fallback has
been set, the look up is forwarded to the fallback's ``upgettext()``
method. Otherwise, the `message` id is returned.
"""
ctxt_message_id = self.CONTEXT_ENCODING % (context, message)
missing = object()
tmsg = self._catalog.get(ctxt_message_id, missing)
if tmsg is missing:
if self._fallback:
return self._fallback.upgettext(context, message)
return unicode(message)
return tmsg
def unpgettext(self, context, singular, plural, num):
"""Do a plural-forms lookup of a message id. `singular` is used as the
message id for purposes of lookup in the catalog, while `num` is used to
determine which plural form to use. The returned message string is a
Unicode string.
If the message id for `context` is not found in the catalog, and a
fallback is specified, the request is forwarded to the fallback's
``unpgettext()`` method. Otherwise, when `num` is 1 `singular` is
returned, and `plural` is returned in all other cases.
"""
ctxt_message_id = self.CONTEXT_ENCODING % (context, singular)
try:
tmsg = self._catalog[(ctxt_message_id, self.plural(num))]
except KeyError:
if self._fallback:
return self._fallback.unpgettext(context, singular, plural, num)
if num == 1:
tmsg = unicode(singular)
else:
tmsg = unicode(plural)
return tmsg
def dpgettext(self, domain, context, message):
"""Like `pgettext()`, but look the message up in the specified
`domain`.
"""
return self._domains.get(domain, self).pgettext(context, message)
def udpgettext(self, domain, context, message):
"""Like `upgettext()`, but look the message up in the specified
`domain`.
"""
return self._domains.get(domain, self).upgettext(context, message)
# backward compatibility with 0.9
dupgettext = udpgettext
def ldpgettext(self, domain, context, message):
"""Equivalent to ``dpgettext()``, but the translation is returned in the
preferred system encoding, if no other encoding was explicitly set with
``bind_textdomain_codeset()``.
"""
return self._domains.get(domain, self).lpgettext(context, message)
def dnpgettext(self, domain, context, singular, plural, num):
"""Like ``npgettext``, but look the message up in the specified
`domain`.
"""
return self._domains.get(domain, self).npgettext(context, singular,
plural, num)
def udnpgettext(self, domain, context, singular, plural, num):
"""Like ``unpgettext``, but look the message up in the specified
`domain`.
"""
return self._domains.get(domain, self).unpgettext(context, singular,
plural, num)
# backward compatibility with 0.9
dunpgettext = udnpgettext
def ldnpgettext(self, domain, context, singular, plural, num):
"""Equivalent to ``dnpgettext()``, but the translation is returned in
the preferred system encoding, if no other encoding was explicitly set
with ``bind_textdomain_codeset()``.
"""
return self._domains.get(domain, self).lnpgettext(context, singular,
plural, num)
class Translations(NullTranslations, gettext.GNUTranslations):
"""An extended translation catalog class."""
DEFAULT_DOMAIN = 'messages'
def __init__(self, fp=None, domain=None):
"""Initialize the translations catalog.
:param fp: the file-like object the translation should be read from
:param domain: the message domain (default: 'messages')
"""
super(Translations, self).__init__(fp=fp)
self.domain = domain or self.DEFAULT_DOMAIN
@classmethod
def load(cls, dirname=None, locales=None, domain=None):
"""Load translations from the given directory.
:param dirname: the directory containing the ``MO`` files
:param locales: the list of locales in order of preference (items in
this list can be either `Locale` objects or locale
strings)
:param domain: the message domain (default: 'messages')
:return: the loaded catalog, or a ``NullTranslations`` instance if no
matching translations were found
:rtype: `Translations`
"""
if locales is not None:
if not isinstance(locales, (list, tuple)):
locales = [locales]
locales = [str(locale) for locale in locales]
if not domain:
domain = cls.DEFAULT_DOMAIN
filename = gettext.find(domain, dirname, locales)
if not filename:
return NullTranslations()
with open(filename, 'rb') as fp:
return cls(fp=fp, domain=domain)
def __repr__(self):
return '<%s: "%s">' % (type(self).__name__,
self._info.get('project-id-version'))
def add(self, translations, merge=True):
"""Add the given translations to the catalog.
If the domain of the translations is different than that of the
current catalog, they are added as a catalog that is only accessible
by the various ``d*gettext`` functions.
:param translations: the `Translations` instance with the messages to
add
:param merge: whether translations for message domains that have
already been added should be merged with the existing
translations
:return: the `Translations` instance (``self``) so that `merge` calls
can be easily chained
:rtype: `Translations`
"""
domain = getattr(translations, 'domain', self.DEFAULT_DOMAIN)
if merge and domain == self.domain:
return self.merge(translations)
existing = self._domains.get(domain)
if merge and existing is not None:
existing.merge(translations)
else:
translations.add_fallback(self)
self._domains[domain] = translations
return self
def merge(self, translations):
"""Merge the given translations into the catalog.
Message translations in the specified catalog override any messages
with the same identifier in the existing catalog.
:param translations: the `Translations` instance with the messages to
merge
:return: the `Translations` instance (``self``) so that `merge` calls
can be easily chained
:rtype: `Translations`
"""
if isinstance(translations, gettext.GNUTranslations):
self._catalog.update(translations._catalog)
if isinstance(translations, Translations):
self.files.extend(translations.files)
return self
| bsd-3-clause |
wilkeraziz/notebooks | MoL_June15/parser.py | 1 | 3853 | from cfg import read_grammar_rules, WCFG
from rule import Rule
from symbol import is_terminal, is_nonterminal, make_symbol
from collections import defaultdict
from item import Item
from agenda import Agenda
def cky_axioms(cfg, sentence):
"""
Axioms for CKY.
Inference rule:
-------------------- (X -> alpha) in cfg and 0 <= i < n
[X -> * alpha, [i]]
:param cfg: a context-free grammar (an instance of WCFG)
:param sentence: the input sentence (as a list or tuple)
:returns: a list of items
"""
items = []
for rule in cfg:
for i in range(len(sentence)): # from zero to n-1
items.append(Item(rule, [i]))
return items
def scan(item, sentence):
"""
Scan a terminal (compatible with CKY and Earley).
Inference rule:
[X -> alpha * x beta, [i ... j]]
------------------------------------ sentence[j] == x
[X -> alpha x * beta, [i ... j + 1]]
:param item: an active Item
:param sentence: a list/tuple of terminals
:returns: an Item or None
"""
assert is_terminal(item.next), 'Only terminal symbols can be scanned, got %s' % item.next
if item.dot < len(sentence) and sentence[item.dot] == item.next:
return item.advance(item.dot + 1)
else:
return None
def complete(item, agenda):
"""
Move dot over nonterminals (compatible with CKY and Earley).
Inference rule:
[X -> alpha * Y beta, [i ... k]] [Y -> gamma *, [k ... j]]
----------------------------------------------------------
[X -> alpha Y * beta, [i ... j]]
:param item: an active Item.
if `item` is complete, we advance the dot of incomplete passive items to `item.dot`
otherwise, we check whether we know a set of positions J = {j1, j2, ..., jN} such that we can
advance this item's dot to.
:param agenda: an instance of Agenda
:returns: a list of items
"""
items = []
if item.is_complete():
# advance the dot for incomplete items waiting for item.lhs spanning from item.start
for incomplete in agenda.waiting(item.lhs, item.start):
items.append(incomplete.advance(item.dot))
else:
# look for completions of item.next spanning from item.dot
ends = set()
for complete in agenda.complete(item.next, item.dot):
ends.add(complete.dot)
# advance the dot of the input item for each position that complete a span
for end in ends:
items.append(item.advance(end))
return items
def make_forest(complete_items):
"""
Turn complete items into a WCFG.
:param complete_items: complete items (iterable)
:returns: a WCFG
"""
forest = WCFG()
for item in complete_items:
lhs = make_symbol(item.lhs, item.start, item.dot)
rhs = []
for i, sym in enumerate(item.rule.rhs):
rhs.append(make_symbol(sym, item.state(i), item.state(i + 1)))
forest.add(Rule(lhs, rhs, item.rule.prob))
return forest
def make_chart(complete_items, n):
chart = [[defaultdict(list) for j in range(n)] for i in range(n)] # n by n matrix with edges
for item in complete_items:
chart[item.start][item.dot][item.lhs].append((item.rule, item.dots_))
return chart
def cky(cfg, sentence):
A = Agenda()
for item in cky_axioms(cfg, sentence):
A.push(item)
while A:
item = A.pop()
if item.is_complete() or is_nonterminal(item.next):
for new in complete(item, A):
A.push(new)
else:
new = scan(item, sentence)
if new is not None:
A.push(new)
A.make_passive(item)
return make_forest(A.itercomplete())
| apache-2.0 |
yochow/autotest | client/tests/cpu_hotplug/cpu_hotplug.py | 7 | 1474 | import time, os
from autotest_lib.client.bin import test, utils
from autotest_lib.client.common_lib import error
class cpu_hotplug(test.test):
version = 2
# http://developer.osdl.org/dev/hotplug/tests/lhcs_regression-1.6.tgz
def setup(self, tarball = 'lhcs_regression-1.6.tgz'):
tarball = utils.unmap_url(self.bindir, tarball, self.tmpdir)
utils.extract_tarball_to_dir(tarball, self.srcdir)
def initialize(self):
# Check if the kernel supports cpu hotplug
if utils.running_config():
utils.check_for_kernel_feature('HOTPLUG_CPU')
# Check cpu nums, if equals 1, quit.
if utils.count_cpus() == 1:
e_msg = 'Single CPU online detected, test not supported.'
raise error.TestNAError(e_msg)
# Have a simple and quick check first, FIX me please.
utils.system('dmesg -c > /dev/null')
for cpu in utils.cpu_online_map():
if os.path.isfile('/sys/devices/system/cpu/cpu%s/online' % cpu):
utils.system('echo 0 > /sys/devices/system/cpu/cpu%s/online' % cpu, 1)
utils.system('dmesg -c')
time.sleep(3)
utils.system('echo 1 > /sys/devices/system/cpu/cpu%s/online' % cpu, 1)
utils.system('dmesg -c')
time.sleep(3)
def run_once(self):
# Begin this cpu hotplug test big guru.
os.chdir(self.srcdir)
utils.system('./runtests.sh')
| gpl-2.0 |
huijunwu/heron | heron/tools/admin/src/python/main.py | 1 | 5233 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# !/usr/bin/env python2.7
''' main.py '''
import argparse
import os
import shutil
import sys
import traceback
import heron.common.src.python.utils.log as log
import heron.tools.common.src.python.utils.config as config
import heron.tools.cli.src.python.result as result
import heron.tools.admin.src.python.standalone as standalone
Log = log.Log
HELP_EPILOG = '''Getting more help:
heron help <command> Prints help and options for <command>
For detailed documentation, go to http://heronstreaming.io'''
# pylint: disable=protected-access,superfluous-parens
class _HelpAction(argparse._HelpAction):
def __call__(self, parser, namespace, values, option_string=None):
parser.print_help()
# retrieve subparsers from parser
subparsers_actions = [
action for action in parser._actions
if isinstance(action, argparse._SubParsersAction)
]
# there will probably only be one subparser_action,
# but better save than sorry
for subparsers_action in subparsers_actions:
# get all subparsers and print help
for choice, subparser in list(subparsers_action.choices.items()):
print("Subparser '{}'".format(choice))
print(subparser.format_help())
return
################################################################################
def get_command_handlers():
'''
Create a map of command names and handlers
'''
return {
'standalone': standalone,
}
################################################################################
def create_parser(command_handlers):
'''
Main parser
:return:
'''
parser = argparse.ArgumentParser(
prog='heron',
epilog=HELP_EPILOG,
formatter_class=config.SubcommandHelpFormatter,
add_help=True)
subparsers = parser.add_subparsers(
title="Available commands",
metavar='<command> <options>')
command_list = sorted(command_handlers.items())
for command in command_list:
command[1].create_parser(subparsers)
return parser
################################################################################
def run(handlers, command, parser, command_args, unknown_args):
'''
Run the command
:param command:
:param parser:
:param command_args:
:param unknown_args:
:return:
'''
if command in handlers:
return handlers[command].run(command, parser, command_args, unknown_args)
else:
err_context = 'Unknown subcommand: %s' % command
return result.SimpleResult(result.Status.InvocationError, err_context)
def cleanup(files):
'''
:param files:
:return:
'''
for cur_file in files:
if os.path.isdir(cur_file):
shutil.rmtree(cur_file)
else:
shutil.rmtree(os.path.dirname(cur_file))
################################################################################
def check_environment():
'''
Check whether the environment variables are set
:return:
'''
if not config.check_java_home_set():
sys.exit(1)
if not config.check_release_file_exists():
sys.exit(1)
################################################################################
def execute(handlers):
'''
Run the command
:return:
'''
# verify if the environment variables are correctly set
check_environment()
# create the argument parser
parser = create_parser(handlers)
# if no argument is provided, print help and exit
if len(sys.argv[1:]) == 0:
parser.print_help()
return 0
# insert the boolean values for some of the options
sys.argv = config.insert_bool_values(sys.argv)
try:
# parse the args
args, unknown_args = parser.parse_known_args()
except ValueError as ex:
Log.error("Error while parsing arguments: %s", str(ex))
Log.debug(traceback.format_exc())
sys.exit(1)
command_line_args = vars(args)
# set log level
log.set_logging_level(command_line_args)
Log.debug("Input Command Line Args: %s", command_line_args)
# command to be execute
command = command_line_args['subcommand']
# print the input parameters, if verbose is enabled
Log.debug("Processed Command Line Args: %s", command_line_args)
results = run(handlers, command, parser, command_line_args, unknown_args)
return 0 if result.is_successful(results) else 1
def main():
# Create a map of supported commands and handlers
command_handlers = get_command_handlers()
# Execute
return execute(command_handlers)
if __name__ == "__main__":
sys.exit(main())
| apache-2.0 |
HPPTECH/hpp_IOSTressTest | Refer/IOST_OLD_SRC/IOST_0.11/IOST_WMain_USB.py | 1 | 6648 | #!/usr/bin/python
#======================================================================
#
# Project : hpp_IOStressTest
# File : IOST_WMain_USB.py
# Date : Oct 20, 2016
# Author : HuuHoang Nguyen
# Contact : hhnguyen@apm.com
# : hoangnh.hpp@gmail.com
# License : MIT License
# Copyright : 2016
# Description: The hpp_IOStressTest is under the MIT License, a copy of license which may be found in LICENSE
#
#======================================================================
import io
import os
import sys
import time
from IOST_Prepare import IOST_Prepare
from IOST_Config import *
import gtk
import gtk.glade
IOST_WMain_USB_Debug_Enable = 1
class IOST_WMain_USB():
"""
"""
def __init__(self, glade_filename, windown_name, builder=None):
"""
"""
self.IOST_WMain_USB_window_name = windown_name
if not builder:
self.IOST_USB_Builder = gtk.Builder()
self.IOST_USB_Builder.add_from_file(glade_filename)
self.IOST_USB_Builder.connect_signals(self)
else:
self.IOST_USB_Builder = builder
def GetUSB_Obj(self, window_name):
"""
Get all USB objects on WMain window
"""
self.IOST_Objs[window_name][window_name+"_IP_Enable_USB_CB"] = self.IOST_USB_Builder.get_object(self.IOST_Objs[window_name]["_IP_Enable_USB_CB"])
for i in range(0, self.IOST_Data["USB_PortNum"]):
self.IOST_Objs[window_name][window_name+"_Config_USB"+str(i)+"_CB"] = self.IOST_USB_Builder.get_object(self.IOST_Objs[window_name]["_Config_USB"+str(i)+"_CB"])
self.IOST_Objs[window_name][window_name+"_Config_USB"+str(i)+"_B"] = self.IOST_USB_Builder.get_object(self.IOST_Objs[window_name]["_Config_USB"+str(i)+"_B"])
# self.IOST_Objs[window_name][window_name+"_Config_USB0_CB"] = self.IOST_USB_Builder.get_object(self.IOST_Objs[window_name]["_Config_USB0_CB"])
# self.IOST_Objs[window_name][window_name+"_Config_USB0_B"] = self.IOST_USB_Builder.get_object(self.IOST_Objs[window_name]["_Config_USB0_B"])
# self.IOST_Objs[window_name][window_name+"_Config_USB1_CB"] = self.IOST_USB_Builder.get_object(self.IOST_Objs[window_name]["_Config_USB1_CB"])
# self.IOST_Objs[window_name][window_name+"_Config_USB1_B"] = self.IOST_USB_Builder.get_object(self.IOST_Objs[window_name]["_Config_USB1_B"])
def InitUSB_Obj(self, window_name):
"""
Init all USB objects when start IOST Wmain program
"""
if self.IOST_Data["USB"] == "Enable":
self.IOST_Objs[window_name][window_name+"_IP_Enable_USB_CB"].set_active(True)
for i in range(0, self.IOST_Data["USB_PortNum"]):
if self.IOST_Data["USB"+str(i)][0] == "Disable":
self.IOST_Objs[window_name][window_name+"_Config_USB"+str(i)+"_CB"].set_active(False)
self.IOST_Objs[window_name][window_name+"_Config_USB"+str(i)+"_B"].set_sensitive(False)
else:
self.IOST_Objs[window_name][window_name+"_Config_USB"+str(i)+"_CB"].set_active(True)
self.IOST_Objs[window_name][window_name+"_Config_USB"+str(i)+"_B"].set_sensitive(True)
else:
self.IOST_Objs[window_name][window_name+"_IP_Enable_USB_CB"].set_active(False)
for i in range(0, self.IOST_Data["USB_PortNum"]):
self.IOST_Objs[window_name][window_name+"_Config_USB"+str(i)+"_CB"].set_sensitive(False)
self.IOST_Objs[window_name][window_name+"_Config_USB"+str(i)+"_B"].set_sensitive(False)
# if self.IOST_Data["USB0"][0] == "Disable":
# self.IOST_Objs[window_name][window_name+"_Config_USB0_B"].set_sensitive(False)
# if self.IOST_Data["USB1"][0] == "Disable":
# self.IOST_Objs[window_name][window_name+"_Config_USB1_B"].set_sensitive(False)
#----------------------------------------------------------------------
# USB
#----------------------------------------------------------------------
def on_IOST_WMain_Config_USB0_B_clicked(self, object, data=None):
"Control to ConfigUSB-0 button "
def on_IOST_WMain_Config_USB0_C_toggled(self, object, data=None):
""
Res = self.IOST_Objs[self.IOST_WMain_USB_window_name][self.IOST_WMain_USB_window_name+"_Config_USB0_CB"].get_active()
self.IOST_Objs[self.IOST_WMain_USB_window_name][self.IOST_WMain_USB_window_name+"_Config_USB0_B"].set_sensitive(Res)
if (Res):
self.IOST_Data["USB0"][0] = 'Enable'
else:
self.IOST_Data["USB0"][0] = 'Disable'
if IOST_WMain_USB_Debug_Enable:
print self.IOST_Data["USB0"][0]
#----------------------------------------------------------------------
def on_IOST_WMain_Config_USB1_B_clicked(self, object, data=None):
"Control to ConfigUSB-1 button "
def on_IOST_WMain_Config_USB1_C_toggled(self, object, data=None):
""
Res = self.IOST_Objs[self.IOST_WMain_USB_window_name][self.IOST_WMain_USB_window_name+"_Config_USB1_CB"].get_active()
self.IOST_Objs[self.IOST_WMain_USB_window_name][self.IOST_WMain_USB_window_name+"_Config_USB1_B"].set_sensitive(Res)
if (Res):
self.IOST_Data["USB1"][0] = 'Enable'
else:
self.IOST_Data["USB1"][0] = 'Disable'
if IOST_WMain_USB_Debug_Enable:
print self.IOST_Data["USB1"][0]
#----------------------------------------------------------------------
def on_IOST_WMain_IP_Enable_USB_CB_toggled(self, object, data=None):
Res = self.IOST_Objs[self.IOST_WMain_USB_window_name][self.IOST_WMain_USB_window_name+"_IP_Enable_USB_CB"].get_active()
self.IOST_WMain_USB_set_sensitive_all(Res)
if Res:
self.IOST_Data["USB"] = 'Enable'
else:
self.IOST_Data["USB"] = 'Disable'
#----------------------------------------------------------------------
def IOST_WMain_USB_set_sensitive_all(self, value):
for i in range(0, self.IOST_Data["USB_PortNum"]):
self.IOST_Objs[self.IOST_WMain_USB_window_name][self.IOST_WMain_USB_window_name+"_Config_USB"+str(i)+"_CB"].set_sensitive(value)
if self.IOST_Data["USB"+str(i)][0] == "Enable" and value:
self.IOST_Objs[self.IOST_WMain_USB_window_name][self.IOST_WMain_USB_window_name+"_Config_USB"+str(i)+"_B"].set_sensitive(value)
else:
self.IOST_Objs[self.IOST_WMain_USB_window_name][self.IOST_WMain_USB_window_name+"_Config_USB"+str(i)+"_B"].set_sensitive(False)
#
| mit |
Limags/MissionPlanner | Lib/smtplib.py | 50 | 31551 | #! /usr/bin/env python
'''SMTP/ESMTP client class.
This should follow RFC 821 (SMTP), RFC 1869 (ESMTP), RFC 2554 (SMTP
Authentication) and RFC 2487 (Secure SMTP over TLS).
Notes:
Please remember, when doing ESMTP, that the names of the SMTP service
extensions are NOT the same thing as the option keywords for the RCPT
and MAIL commands!
Example:
>>> import smtplib
>>> s=smtplib.SMTP("localhost")
>>> print s.help()
This is Sendmail version 8.8.4
Topics:
HELO EHLO MAIL RCPT DATA
RSET NOOP QUIT HELP VRFY
EXPN VERB ETRN DSN
For more info use "HELP <topic>".
To report bugs in the implementation send email to
sendmail-bugs@sendmail.org.
For local information send email to Postmaster at your site.
End of HELP info
>>> s.putcmd("vrfy","someone@here")
>>> s.getreply()
(250, "Somebody OverHere <somebody@here.my.org>")
>>> s.quit()
'''
# Author: The Dragon De Monsyne <dragondm@integral.org>
# ESMTP support, test code and doc fixes added by
# Eric S. Raymond <esr@thyrsus.com>
# Better RFC 821 compliance (MAIL and RCPT, and CRLF in data)
# by Carey Evans <c.evans@clear.net.nz>, for picky mail servers.
# RFC 2554 (authentication) support by Gerhard Haering <gerhard@bigfoot.de>.
#
# This was modified from the Python 1.5 library HTTP lib.
import socket
import re
import email.utils
import base64
import hmac
from email.base64mime import encode as encode_base64
from sys import stderr
__all__ = ["SMTPException", "SMTPServerDisconnected", "SMTPResponseException",
"SMTPSenderRefused", "SMTPRecipientsRefused", "SMTPDataError",
"SMTPConnectError", "SMTPHeloError", "SMTPAuthenticationError",
"quoteaddr", "quotedata", "SMTP"]
SMTP_PORT = 25
SMTP_SSL_PORT = 465
CRLF = "\r\n"
OLDSTYLE_AUTH = re.compile(r"auth=(.*)", re.I)
# Exception classes used by this module.
class SMTPException(Exception):
"""Base class for all exceptions raised by this module."""
class SMTPServerDisconnected(SMTPException):
"""Not connected to any SMTP server.
This exception is raised when the server unexpectedly disconnects,
or when an attempt is made to use the SMTP instance before
connecting it to a server.
"""
class SMTPResponseException(SMTPException):
"""Base class for all exceptions that include an SMTP error code.
These exceptions are generated in some instances when the SMTP
server returns an error code. The error code is stored in the
`smtp_code' attribute of the error, and the `smtp_error' attribute
is set to the error message.
"""
def __init__(self, code, msg):
self.smtp_code = code
self.smtp_error = msg
self.args = (code, msg)
class SMTPSenderRefused(SMTPResponseException):
"""Sender address refused.
In addition to the attributes set by on all SMTPResponseException
exceptions, this sets `sender' to the string that the SMTP refused.
"""
def __init__(self, code, msg, sender):
self.smtp_code = code
self.smtp_error = msg
self.sender = sender
self.args = (code, msg, sender)
class SMTPRecipientsRefused(SMTPException):
"""All recipient addresses refused.
The errors for each recipient are accessible through the attribute
'recipients', which is a dictionary of exactly the same sort as
SMTP.sendmail() returns.
"""
def __init__(self, recipients):
self.recipients = recipients
self.args = (recipients,)
class SMTPDataError(SMTPResponseException):
"""The SMTP server didn't accept the data."""
class SMTPConnectError(SMTPResponseException):
"""Error during connection establishment."""
class SMTPHeloError(SMTPResponseException):
"""The server refused our HELO reply."""
class SMTPAuthenticationError(SMTPResponseException):
"""Authentication error.
Most probably the server didn't accept the username/password
combination provided.
"""
def quoteaddr(addr):
"""Quote a subset of the email addresses defined by RFC 821.
Should be able to handle anything rfc822.parseaddr can handle.
"""
m = (None, None)
try:
m = email.utils.parseaddr(addr)[1]
except AttributeError:
pass
if m == (None, None): # Indicates parse failure or AttributeError
# something weird here.. punt -ddm
return "<%s>" % addr
elif m is None:
# the sender wants an empty return address
return "<>"
else:
return "<%s>" % m
def quotedata(data):
"""Quote data for email.
Double leading '.', and change Unix newline '\\n', or Mac '\\r' into
Internet CRLF end-of-line.
"""
return re.sub(r'(?m)^\.', '..',
re.sub(r'(?:\r\n|\n|\r(?!\n))', CRLF, data))
try:
import ssl
except ImportError:
_have_ssl = False
else:
class SSLFakeFile:
"""A fake file like object that really wraps a SSLObject.
It only supports what is needed in smtplib.
"""
def __init__(self, sslobj):
self.sslobj = sslobj
def readline(self):
str = ""
chr = None
while chr != "\n":
chr = self.sslobj.read(1)
if not chr:
break
str += chr
return str
def close(self):
pass
_have_ssl = True
class SMTP:
"""This class manages a connection to an SMTP or ESMTP server.
SMTP Objects:
SMTP objects have the following attributes:
helo_resp
This is the message given by the server in response to the
most recent HELO command.
ehlo_resp
This is the message given by the server in response to the
most recent EHLO command. This is usually multiline.
does_esmtp
This is a True value _after you do an EHLO command_, if the
server supports ESMTP.
esmtp_features
This is a dictionary, which, if the server supports ESMTP,
will _after you do an EHLO command_, contain the names of the
SMTP service extensions this server supports, and their
parameters (if any).
Note, all extension names are mapped to lower case in the
dictionary.
See each method's docstrings for details. In general, there is a
method of the same name to perform each SMTP command. There is also a
method called 'sendmail' that will do an entire mail transaction.
"""
debuglevel = 0
file = None
helo_resp = None
ehlo_msg = "ehlo"
ehlo_resp = None
does_esmtp = 0
default_port = SMTP_PORT
def __init__(self, host='', port=0, local_hostname=None,
timeout=socket._GLOBAL_DEFAULT_TIMEOUT):
"""Initialize a new instance.
If specified, `host' is the name of the remote host to which to
connect. If specified, `port' specifies the port to which to connect.
By default, smtplib.SMTP_PORT is used. An SMTPConnectError is raised
if the specified `host' doesn't respond correctly. If specified,
`local_hostname` is used as the FQDN of the local host. By default,
the local hostname is found using socket.getfqdn().
"""
self.timeout = timeout
self.esmtp_features = {}
if host:
(code, msg) = self.connect(host, port)
if code != 220:
raise SMTPConnectError(code, msg)
if local_hostname is not None:
self.local_hostname = local_hostname
else:
# RFC 2821 says we should use the fqdn in the EHLO/HELO verb, and
# if that can't be calculated, that we should use a domain literal
# instead (essentially an encoded IP address like [A.B.C.D]).
fqdn = socket.getfqdn()
if '.' in fqdn:
self.local_hostname = fqdn
else:
# We can't find an fqdn hostname, so use a domain literal
addr = '127.0.0.1'
try:
addr = socket.gethostbyname(socket.gethostname())
except socket.gaierror:
pass
self.local_hostname = '[%s]' % addr
def set_debuglevel(self, debuglevel):
"""Set the debug output level.
A non-false value results in debug messages for connection and for all
messages sent to and received from the server.
"""
self.debuglevel = debuglevel
def _get_socket(self, port, host, timeout):
# This makes it simpler for SMTP_SSL to use the SMTP connect code
# and just alter the socket connection bit.
if self.debuglevel > 0:
print>>stderr, 'connect:', (host, port)
return socket.create_connection((port, host), timeout)
def connect(self, host='localhost', port=0):
"""Connect to a host on a given port.
If the hostname ends with a colon (`:') followed by a number, and
there is no port specified, that suffix will be stripped off and the
number interpreted as the port number to use.
Note: This method is automatically invoked by __init__, if a host is
specified during instantiation.
"""
if not port and (host.find(':') == host.rfind(':')):
i = host.rfind(':')
if i >= 0:
host, port = host[:i], host[i + 1:]
try:
port = int(port)
except ValueError:
raise socket.error, "nonnumeric port"
if not port:
port = self.default_port
if self.debuglevel > 0:
print>>stderr, 'connect:', (host, port)
self.sock = self._get_socket(host, port, self.timeout)
(code, msg) = self.getreply()
if self.debuglevel > 0:
print>>stderr, "connect:", msg
return (code, msg)
def send(self, str):
"""Send `str' to the server."""
if self.debuglevel > 0:
print>>stderr, 'send:', repr(str)
if hasattr(self, 'sock') and self.sock:
try:
self.sock.sendall(str)
except socket.error:
self.close()
raise SMTPServerDisconnected('Server not connected')
else:
raise SMTPServerDisconnected('please run connect() first')
def putcmd(self, cmd, args=""):
"""Send a command to the server."""
if args == "":
str = '%s%s' % (cmd, CRLF)
else:
str = '%s %s%s' % (cmd, args, CRLF)
self.send(str)
def getreply(self):
"""Get a reply from the server.
Returns a tuple consisting of:
- server response code (e.g. '250', or such, if all goes well)
Note: returns -1 if it can't read response code.
- server response string corresponding to response code (multiline
responses are converted to a single, multiline string).
Raises SMTPServerDisconnected if end-of-file is reached.
"""
resp = []
if self.file is None:
self.file = self.sock.makefile('rb')
while 1:
try:
line = self.file.readline()
except socket.error:
line = ''
if line == '':
self.close()
raise SMTPServerDisconnected("Connection unexpectedly closed")
if self.debuglevel > 0:
print>>stderr, 'reply:', repr(line)
resp.append(line[4:].strip())
code = line[:3]
# Check that the error code is syntactically correct.
# Don't attempt to read a continuation line if it is broken.
try:
errcode = int(code)
except ValueError:
errcode = -1
break
# Check if multiline response.
if line[3:4] != "-":
break
errmsg = "\n".join(resp)
if self.debuglevel > 0:
print>>stderr, 'reply: retcode (%s); Msg: %s' % (errcode, errmsg)
return errcode, errmsg
def docmd(self, cmd, args=""):
"""Send a command, and return its response code."""
self.putcmd(cmd, args)
return self.getreply()
# std smtp commands
def helo(self, name=''):
"""SMTP 'helo' command.
Hostname to send for this command defaults to the FQDN of the local
host.
"""
self.putcmd("helo", name or self.local_hostname)
(code, msg) = self.getreply()
self.helo_resp = msg
return (code, msg)
def ehlo(self, name=''):
""" SMTP 'ehlo' command.
Hostname to send for this command defaults to the FQDN of the local
host.
"""
self.esmtp_features = {}
self.putcmd(self.ehlo_msg, name or self.local_hostname)
(code, msg) = self.getreply()
# According to RFC1869 some (badly written)
# MTA's will disconnect on an ehlo. Toss an exception if
# that happens -ddm
if code == -1 and len(msg) == 0:
self.close()
raise SMTPServerDisconnected("Server not connected")
self.ehlo_resp = msg
if code != 250:
return (code, msg)
self.does_esmtp = 1
#parse the ehlo response -ddm
resp = self.ehlo_resp.split('\n')
del resp[0]
for each in resp:
# To be able to communicate with as many SMTP servers as possible,
# we have to take the old-style auth advertisement into account,
# because:
# 1) Else our SMTP feature parser gets confused.
# 2) There are some servers that only advertise the auth methods we
# support using the old style.
auth_match = OLDSTYLE_AUTH.match(each)
if auth_match:
# This doesn't remove duplicates, but that's no problem
self.esmtp_features["auth"] = self.esmtp_features.get("auth", "") \
+ " " + auth_match.groups(0)[0]
continue
# RFC 1869 requires a space between ehlo keyword and parameters.
# It's actually stricter, in that only spaces are allowed between
# parameters, but were not going to check for that here. Note
# that the space isn't present if there are no parameters.
m = re.match(r'(?P<feature>[A-Za-z0-9][A-Za-z0-9\-]*) ?', each)
if m:
feature = m.group("feature").lower()
params = m.string[m.end("feature"):].strip()
if feature == "auth":
self.esmtp_features[feature] = self.esmtp_features.get(feature, "") \
+ " " + params
else:
self.esmtp_features[feature] = params
return (code, msg)
def has_extn(self, opt):
"""Does the server support a given SMTP service extension?"""
return opt.lower() in self.esmtp_features
def help(self, args=''):
"""SMTP 'help' command.
Returns help text from server."""
self.putcmd("help", args)
return self.getreply()[1]
def rset(self):
"""SMTP 'rset' command -- resets session."""
return self.docmd("rset")
def noop(self):
"""SMTP 'noop' command -- doesn't do anything :>"""
return self.docmd("noop")
def mail(self, sender, options=[]):
"""SMTP 'mail' command -- begins mail xfer session."""
optionlist = ''
if options and self.does_esmtp:
optionlist = ' ' + ' '.join(options)
self.putcmd("mail", "FROM:%s%s" % (quoteaddr(sender), optionlist))
return self.getreply()
def rcpt(self, recip, options=[]):
"""SMTP 'rcpt' command -- indicates 1 recipient for this mail."""
optionlist = ''
if options and self.does_esmtp:
optionlist = ' ' + ' '.join(options)
self.putcmd("rcpt", "TO:%s%s" % (quoteaddr(recip), optionlist))
return self.getreply()
def data(self, msg):
"""SMTP 'DATA' command -- sends message data to server.
Automatically quotes lines beginning with a period per rfc821.
Raises SMTPDataError if there is an unexpected reply to the
DATA command; the return value from this method is the final
response code received when the all data is sent.
"""
self.putcmd("data")
(code, repl) = self.getreply()
if self.debuglevel > 0:
print>>stderr, "data:", (code, repl)
if code != 354:
raise SMTPDataError(code, repl)
else:
q = quotedata(msg)
if q[-2:] != CRLF:
q = q + CRLF
q = q + "." + CRLF
self.send(q)
(code, msg) = self.getreply()
if self.debuglevel > 0:
print>>stderr, "data:", (code, msg)
return (code, msg)
def verify(self, address):
"""SMTP 'verify' command -- checks for address validity."""
self.putcmd("vrfy", quoteaddr(address))
return self.getreply()
# a.k.a.
vrfy = verify
def expn(self, address):
"""SMTP 'expn' command -- expands a mailing list."""
self.putcmd("expn", quoteaddr(address))
return self.getreply()
# some useful methods
def ehlo_or_helo_if_needed(self):
"""Call self.ehlo() and/or self.helo() if needed.
If there has been no previous EHLO or HELO command this session, this
method tries ESMTP EHLO first.
This method may raise the following exceptions:
SMTPHeloError The server didn't reply properly to
the helo greeting.
"""
if self.helo_resp is None and self.ehlo_resp is None:
if not (200 <= self.ehlo()[0] <= 299):
(code, resp) = self.helo()
if not (200 <= code <= 299):
raise SMTPHeloError(code, resp)
def login(self, user, password):
"""Log in on an SMTP server that requires authentication.
The arguments are:
- user: The user name to authenticate with.
- password: The password for the authentication.
If there has been no previous EHLO or HELO command this session, this
method tries ESMTP EHLO first.
This method will return normally if the authentication was successful.
This method may raise the following exceptions:
SMTPHeloError The server didn't reply properly to
the helo greeting.
SMTPAuthenticationError The server didn't accept the username/
password combination.
SMTPException No suitable authentication method was
found.
"""
def encode_cram_md5(challenge, user, password):
challenge = base64.decodestring(challenge)
response = user + " " + hmac.HMAC(password, challenge).hexdigest()
return encode_base64(response, eol="")
def encode_plain(user, password):
return encode_base64("\0%s\0%s" % (user, password), eol="")
AUTH_PLAIN = "PLAIN"
AUTH_CRAM_MD5 = "CRAM-MD5"
AUTH_LOGIN = "LOGIN"
self.ehlo_or_helo_if_needed()
if not self.has_extn("auth"):
raise SMTPException("SMTP AUTH extension not supported by server.")
# Authentication methods the server supports:
authlist = self.esmtp_features["auth"].split()
# List of authentication methods we support: from preferred to
# less preferred methods. Except for the purpose of testing the weaker
# ones, we prefer stronger methods like CRAM-MD5:
preferred_auths = [AUTH_CRAM_MD5, AUTH_PLAIN, AUTH_LOGIN]
# Determine the authentication method we'll use
authmethod = None
for method in preferred_auths:
if method in authlist:
authmethod = method
break
if authmethod == AUTH_CRAM_MD5:
(code, resp) = self.docmd("AUTH", AUTH_CRAM_MD5)
if code == 503:
# 503 == 'Error: already authenticated'
return (code, resp)
(code, resp) = self.docmd(encode_cram_md5(resp, user, password))
elif authmethod == AUTH_PLAIN:
(code, resp) = self.docmd("AUTH",
AUTH_PLAIN + " " + encode_plain(user, password))
elif authmethod == AUTH_LOGIN:
(code, resp) = self.docmd("AUTH",
"%s %s" % (AUTH_LOGIN, encode_base64(user, eol="")))
if code != 334:
raise SMTPAuthenticationError(code, resp)
(code, resp) = self.docmd(encode_base64(password, eol=""))
elif authmethod is None:
raise SMTPException("No suitable authentication method found.")
if code not in (235, 503):
# 235 == 'Authentication successful'
# 503 == 'Error: already authenticated'
raise SMTPAuthenticationError(code, resp)
return (code, resp)
def starttls(self, keyfile=None, certfile=None):
"""Puts the connection to the SMTP server into TLS mode.
If there has been no previous EHLO or HELO command this session, this
method tries ESMTP EHLO first.
If the server supports TLS, this will encrypt the rest of the SMTP
session. If you provide the keyfile and certfile parameters,
the identity of the SMTP server and client can be checked. This,
however, depends on whether the socket module really checks the
certificates.
This method may raise the following exceptions:
SMTPHeloError The server didn't reply properly to
the helo greeting.
"""
self.ehlo_or_helo_if_needed()
if not self.has_extn("starttls"):
raise SMTPException("STARTTLS extension not supported by server.")
(resp, reply) = self.docmd("STARTTLS")
if resp == 220:
if not _have_ssl:
raise RuntimeError("No SSL support included in this Python")
self.sock = ssl.wrap_socket(self.sock, keyfile, certfile)
self.file = SSLFakeFile(self.sock)
# RFC 3207:
# The client MUST discard any knowledge obtained from
# the server, such as the list of SMTP service extensions,
# which was not obtained from the TLS negotiation itself.
self.helo_resp = None
self.ehlo_resp = None
self.esmtp_features = {}
self.does_esmtp = 0
return (resp, reply)
def sendmail(self, from_addr, to_addrs, msg, mail_options=[],
rcpt_options=[]):
"""This command performs an entire mail transaction.
The arguments are:
- from_addr : The address sending this mail.
- to_addrs : A list of addresses to send this mail to. A bare
string will be treated as a list with 1 address.
- msg : The message to send.
- mail_options : List of ESMTP options (such as 8bitmime) for the
mail command.
- rcpt_options : List of ESMTP options (such as DSN commands) for
all the rcpt commands.
If there has been no previous EHLO or HELO command this session, this
method tries ESMTP EHLO first. If the server does ESMTP, message size
and each of the specified options will be passed to it. If EHLO
fails, HELO will be tried and ESMTP options suppressed.
This method will return normally if the mail is accepted for at least
one recipient. It returns a dictionary, with one entry for each
recipient that was refused. Each entry contains a tuple of the SMTP
error code and the accompanying error message sent by the server.
This method may raise the following exceptions:
SMTPHeloError The server didn't reply properly to
the helo greeting.
SMTPRecipientsRefused The server rejected ALL recipients
(no mail was sent).
SMTPSenderRefused The server didn't accept the from_addr.
SMTPDataError The server replied with an unexpected
error code (other than a refusal of
a recipient).
Note: the connection will be open even after an exception is raised.
Example:
>>> import smtplib
>>> s=smtplib.SMTP("localhost")
>>> tolist=["one@one.org","two@two.org","three@three.org","four@four.org"]
>>> msg = '''\\
... From: Me@my.org
... Subject: testin'...
...
... This is a test '''
>>> s.sendmail("me@my.org",tolist,msg)
{ "three@three.org" : ( 550 ,"User unknown" ) }
>>> s.quit()
In the above example, the message was accepted for delivery to three
of the four addresses, and one was rejected, with the error code
550. If all addresses are accepted, then the method will return an
empty dictionary.
"""
self.ehlo_or_helo_if_needed()
esmtp_opts = []
if self.does_esmtp:
# Hmmm? what's this? -ddm
# self.esmtp_features['7bit']=""
if self.has_extn('size'):
esmtp_opts.append("size=%d" % len(msg))
for option in mail_options:
esmtp_opts.append(option)
(code, resp) = self.mail(from_addr, esmtp_opts)
if code != 250:
self.rset()
raise SMTPSenderRefused(code, resp, from_addr)
senderrs = {}
if isinstance(to_addrs, basestring):
to_addrs = [to_addrs]
for each in to_addrs:
(code, resp) = self.rcpt(each, rcpt_options)
if (code != 250) and (code != 251):
senderrs[each] = (code, resp)
if len(senderrs) == len(to_addrs):
# the server refused all our recipients
self.rset()
raise SMTPRecipientsRefused(senderrs)
(code, resp) = self.data(msg)
if code != 250:
self.rset()
raise SMTPDataError(code, resp)
#if we got here then somebody got our mail
return senderrs
def close(self):
"""Close the connection to the SMTP server."""
if self.file:
self.file.close()
self.file = None
if self.sock:
self.sock.close()
self.sock = None
def quit(self):
"""Terminate the SMTP session."""
res = self.docmd("quit")
self.close()
return res
if _have_ssl:
class SMTP_SSL(SMTP):
""" This is a subclass derived from SMTP that connects over an SSL encrypted
socket (to use this class you need a socket module that was compiled with SSL
support). If host is not specified, '' (the local host) is used. If port is
omitted, the standard SMTP-over-SSL port (465) is used. keyfile and certfile
are also optional - they can contain a PEM formatted private key and
certificate chain file for the SSL connection.
"""
default_port = SMTP_SSL_PORT
def __init__(self, host='', port=0, local_hostname=None,
keyfile=None, certfile=None,
timeout=socket._GLOBAL_DEFAULT_TIMEOUT):
self.keyfile = keyfile
self.certfile = certfile
SMTP.__init__(self, host, port, local_hostname, timeout)
def _get_socket(self, host, port, timeout):
if self.debuglevel > 0:
print>>stderr, 'connect:', (host, port)
new_socket = socket.create_connection((host, port), timeout)
new_socket = ssl.wrap_socket(new_socket, self.keyfile, self.certfile)
self.file = SSLFakeFile(new_socket)
return new_socket
__all__.append("SMTP_SSL")
#
# LMTP extension
#
LMTP_PORT = 2003
class LMTP(SMTP):
"""LMTP - Local Mail Transfer Protocol
The LMTP protocol, which is very similar to ESMTP, is heavily based
on the standard SMTP client. It's common to use Unix sockets for LMTP,
so our connect() method must support that as well as a regular
host:port server. To specify a Unix socket, you must use an absolute
path as the host, starting with a '/'.
Authentication is supported, using the regular SMTP mechanism. When
using a Unix socket, LMTP generally don't support or require any
authentication, but your mileage might vary."""
ehlo_msg = "lhlo"
def __init__(self, host='', port=LMTP_PORT, local_hostname=None):
"""Initialize a new instance."""
SMTP.__init__(self, host, port, local_hostname)
def connect(self, host='localhost', port=0):
"""Connect to the LMTP daemon, on either a Unix or a TCP socket."""
if host[0] != '/':
return SMTP.connect(self, host, port)
# Handle Unix-domain sockets.
try:
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.sock.connect(host)
except socket.error, msg:
if self.debuglevel > 0:
print>>stderr, 'connect fail:', host
if self.sock:
self.sock.close()
self.sock = None
raise socket.error, msg
(code, msg) = self.getreply()
if self.debuglevel > 0:
print>>stderr, "connect:", msg
return (code, msg)
# Test the sendmail method, which tests most of the others.
# Note: This always sends to localhost.
if __name__ == '__main__':
import sys
def prompt(prompt):
sys.stdout.write(prompt + ": ")
return sys.stdin.readline().strip()
fromaddr = prompt("From")
toaddrs = prompt("To").split(',')
print "Enter message, end with ^D:"
msg = ''
while 1:
line = sys.stdin.readline()
if not line:
break
msg = msg + line
print "Message length is %d" % len(msg)
server = SMTP('localhost')
server.set_debuglevel(1)
server.sendmail(fromaddr, toaddrs, msg)
server.quit()
| gpl-3.0 |
xsixing/blaze | blaze/io/server/app.py | 10 | 6092 | from __future__ import absolute_import, division, print_function
import sys
import os
import flask
from flask import request, Response
import blaze
import datashape
from dynd import nd, ndt
from blaze.catalog.array_provider import json_array_provider
from blaze.catalog.blaze_url import (split_array_base, add_indexers_to_url,
slice_as_string, index_tuple_as_string)
from blaze.py2help import _inttypes, _strtypes
from .datashape_html import render_datashape
from .compute_session import compute_session
from .crossdomain import crossdomain
app = flask.Flask('blaze.server')
app.sessions = {}
def indexers_navigation_html(base_url, array_name, indexers):
base_url = base_url + array_name
result = '<a href="' + base_url + '">' + array_name + '</a>'
for i, idx in enumerate(indexers):
if isinstance(idx, _strtypes):
base_url = base_url + '.' + idx
result += (' . <a href="' + base_url + '">' + idx + '</a>')
elif isinstance(idx, _inttypes):
new_base_url = base_url + '[' + str(idx) + ']'
result += (' <a href="' + new_base_url + '">[' + str(idx) + ']</a>')
# Links to increment/decrement this indexer
#result += '<font style="size:7px"><table cellpadding="0" cellspacing="0" border="0">'
#result += '<tr><td><a href="'
#result += add_indexers_to_url(base_url, [idx + 1] + indexers[i+1:])
#result += '">/\\</a></td></tr>'
#result += '<tr><td><a href="'
#result += add_indexers_to_url(base_url, [idx - 1] + indexers[i+1:])
#result += '">\\/</a></td></tr>'
#result += '</table></font>'
base_url = new_base_url
elif isinstance(idx, slice):
s = slice_as_string(idx)
base_url = base_url + s
result += (' <a href="' + base_url + '">' + s + '</a>')
elif isinstance(idx, tuple):
s = index_tuple_as_string(idx)
base_url = base_url + s
result += (' <a href="' + base_url + '">' + s + '</a>')
else:
raise IndexError('Invalid indexer %r' % idx)
return result
def get_array(array_name, indexers):
arr = blaze.catalog.get(array_name)
for i in indexers:
if type(i) in [slice, int, tuple]:
arr = arr[i]
else:
ds = arr.dshape
if isinstance(ds, datashape.DataShape):
ds = ds[-1]
if isinstance(ds, datashape.Record) and i in ds.names:
arr = getattr(arr, i)
else:
raise Exception('Blaze array does not have field ' + i)
return arr
def html_array(arr, base_url, array_name, indexers):
array_url = add_indexers_to_url(base_url + array_name, indexers)
print(array_url)
nav_html = indexers_navigation_html(base_url, array_name, indexers)
datashape_html = render_datashape(array_url, arr.dshape)
body = '<html><head><title>Blaze Array</title></head>\n' + \
'<body>\n' + \
'Blaze Array > ' + nav_html + '\n<p />\n' + \
'<a href="' + array_url + '?r=data.json">JSON</a>\n<p />\n' + \
datashape_html + \
'\n</body></html>'
return body
@app.route("/favicon.ico")
def favicon():
return 'no icon'
@app.route("/<path:path>", methods=['GET', 'POST', 'OPTIONS'])
@crossdomain(origin="*", automatic_options=False, automatic_headers=True)
def handle(path):
if request.path in app.sessions:
return handle_session_query()
else:
return handle_array_query()
def handle_session_query():
session = app.sessions[request.path]
q_req = request.values['r']
if q_req == 'close_session':
content_type, body = session.close()
return Response(body, mimetype='application/json')
elif q_req == 'add_computed_fields':
j = request.values['json']
content_type, body = session.add_computed_fields(j)
return Response(body, mimetype='application/json')
elif q_req == 'sort':
j = request.values['json']
content_type, body = session.sort(j)
return Response(body, mimetype='application/json')
elif q_req == 'groupby':
j = request.values['json']
content_type, body = session.groupby(j)
return Response(body, mimetype='application/json')
else:
return 'something with session ' + session.session_name
def handle_array_query():
array_name, indexers = split_array_base(request.path.rstrip('/'))
arr = get_array(array_name, indexers)
base_url = request.url_root[:-1]
#no query params
# NOTE: len(request.values) was failing within werkzeug
if len(list(request.values)) == 0:
return html_array(arr, base_url, array_name, indexers)
q_req = request.values['r']
if q_req == 'data.json':
dat = arr._data.dynd_arr()
return Response(nd.as_py(nd.format_json(dat).view_scalars(ndt.bytes)),
mimetype='application/json')
elif q_req == 'datashape':
content_type = 'text/plain; charset=utf-8'
return str(arr.dshape)
elif q_req == 'dyndtype':
content_type = 'application/json; charset=utf-8'
body = str(arr.dtype)
return Response(body, mimetype='application/json')
elif q_req == 'dynddebug':
return arr.debug_repr()
elif q_req == 'create_session':
session = compute_session(base_url,
add_indexers_to_url(array_name, indexers))
app.sessions[session.session_name] = session
content_type, body = session.creation_response()
return Response(body, mimetype='application/json')
else:
abort(400, "Unknown Blaze server request %s" % q_req)
if __name__ == "__main__":
if len(sys.argv) > 1:
root_path = sys.argv[1]
else:
root_path = os.path.join(os.getcwdu(), 'arrays')
array_provider = json_array_provider(root_path)
app.array_provider = array_provider
app.run(debug=True, port=8080, use_reloader=True)
| bsd-3-clause |
johanvdw/python-shapely | tests/test_affinity.py | 7 | 11312 | from . import unittest
from math import pi
from shapely import affinity
from shapely.wkt import loads as load_wkt
from shapely.geometry import Point
class AffineTestCase(unittest.TestCase):
def test_affine_params(self):
g = load_wkt('LINESTRING(2.4 4.1, 2.4 3, 3 3)')
self.assertRaises(
TypeError, affinity.affine_transform, g, None)
self.assertRaises(
TypeError, affinity.affine_transform, g, '123456')
self.assertRaises(ValueError, affinity.affine_transform, g,
[1, 2, 3, 4, 5, 6, 7, 8, 9])
self.assertRaises(AttributeError, affinity.affine_transform, None,
[1, 2, 3, 4, 5, 6])
def test_affine_geom_types(self):
# identity matrices, which should result with no transformation
matrix2d = (1, 0,
0, 1,
0, 0)
matrix3d = (1, 0, 0,
0, 1, 0,
0, 0, 1,
0, 0, 0)
# empty in, empty out
empty2d = load_wkt('MULTIPOLYGON EMPTY')
self.assertTrue(affinity.affine_transform(empty2d, matrix2d).is_empty)
def test_geom(g2, g3=None):
self.assertFalse(g2.has_z)
a2 = affinity.affine_transform(g2, matrix2d)
self.assertFalse(a2.has_z)
self.assertTrue(g2.equals(a2))
if g3 is not None:
self.assertTrue(g3.has_z)
a3 = affinity.affine_transform(g3, matrix3d)
self.assertTrue(a3.has_z)
self.assertTrue(g3.equals(a3))
return
pt2d = load_wkt('POINT(12.3 45.6)')
pt3d = load_wkt('POINT(12.3 45.6 7.89)')
test_geom(pt2d, pt3d)
ls2d = load_wkt('LINESTRING(0.9 3.4, 0.7 2, 2.5 2.7)')
ls3d = load_wkt('LINESTRING(0.9 3.4 3.3, 0.7 2 2.3, 2.5 2.7 5.5)')
test_geom(ls2d, ls3d)
lr2d = load_wkt('LINEARRING(0.9 3.4, 0.7 2, 2.5 2.7, 0.9 3.4)')
lr3d = load_wkt(
'LINEARRING(0.9 3.4 3.3, 0.7 2 2.3, 2.5 2.7 5.5, 0.9 3.4 3.3)')
test_geom(lr2d, lr3d)
test_geom(load_wkt('POLYGON((0.9 2.3, 0.5 1.1, 2.4 0.8, 0.9 2.3), '
'(1.1 1.7, 0.9 1.3, 1.4 1.2, 1.1 1.7), '
'(1.6 1.3, 1.7 1, 1.9 1.1, 1.6 1.3))'))
test_geom(load_wkt(
'MULTIPOINT ((-300 300), (700 300), (-800 -1100), (200 -300))'))
test_geom(load_wkt(
'MULTILINESTRING((0 0, -0.7 -0.7, 0.6 -1), '
'(-0.5 0.5, 0.7 0.6, 0 -0.6))'))
test_geom(load_wkt(
'MULTIPOLYGON(((900 4300, -1100 -400, 900 -800, 900 4300)), '
'((1200 4300, 2300 4400, 1900 1000, 1200 4300)))'))
test_geom(load_wkt('GEOMETRYCOLLECTION(POINT(20 70),'
' POLYGON((60 70, 13 35, 60 -30, 60 70)),'
' LINESTRING(60 70, 50 100, 80 100))'))
def test_affine_2d(self):
g = load_wkt('LINESTRING(2.4 4.1, 2.4 3, 3 3)')
# custom scale and translate
expected2d = load_wkt('LINESTRING(-0.2 14.35, -0.2 11.6, 1 11.6)')
matrix2d = (2, 0,
0, 2.5,
-5, 4.1)
a2 = affinity.affine_transform(g, matrix2d)
self.assertTrue(a2.almost_equals(expected2d))
self.assertFalse(a2.has_z)
# Make sure a 3D matrix does not make a 3D shape from a 2D input
matrix3d = (2, 0, 0,
0, 2.5, 0,
0, 0, 10,
-5, 4.1, 100)
a3 = affinity.affine_transform(g, matrix3d)
self.assertTrue(a3.almost_equals(expected2d))
self.assertFalse(a3.has_z)
def test_affine_3d(self):
g2 = load_wkt('LINESTRING(2.4 4.1, 2.4 3, 3 3)')
g3 = load_wkt('LINESTRING(2.4 4.1 100.2, 2.4 3 132.8, 3 3 128.6)')
# custom scale and translate
matrix2d = (2, 0,
0, 2.5,
-5, 4.1)
matrix3d = (2, 0, 0,
0, 2.5, 0,
0, 0, 0.3048,
-5, 4.1, 100)
# Combinations of 2D and 3D geometries and matrices
a22 = affinity.affine_transform(g2, matrix2d)
a23 = affinity.affine_transform(g2, matrix3d)
a32 = affinity.affine_transform(g3, matrix2d)
a33 = affinity.affine_transform(g3, matrix3d)
# Check dimensions
self.assertFalse(a22.has_z)
self.assertFalse(a23.has_z)
self.assertTrue(a32.has_z)
self.assertTrue(a33.has_z)
# 2D equality checks
expected2d = load_wkt('LINESTRING(-0.2 14.35, -0.2 11.6, 1 11.6)')
expected3d = load_wkt('LINESTRING(-0.2 14.35 130.54096, '
'-0.2 11.6 140.47744, 1 11.6 139.19728)')
expected32 = load_wkt('LINESTRING(-0.2 14.35 100.2, '
'-0.2 11.6 132.8, 1 11.6 128.6)')
self.assertTrue(a22.almost_equals(expected2d))
self.assertTrue(a23.almost_equals(expected2d))
# Do explicit 3D check of coordinate values
for a, e in zip(a32.coords, expected32.coords):
for ap, ep in zip(a, e):
self.assertAlmostEqual(ap, ep)
for a, e in zip(a33.coords, expected3d.coords):
for ap, ep in zip(a, e):
self.assertAlmostEqual(ap, ep)
class TransformOpsTestCase(unittest.TestCase):
def test_rotate(self):
ls = load_wkt('LINESTRING(240 400, 240 300, 300 300)')
# counter-clockwise degrees
rls = affinity.rotate(ls, 90)
els = load_wkt('LINESTRING(220 320, 320 320, 320 380)')
self.assertTrue(rls.equals(els))
# retest with named parameters for the same result
rls = affinity.rotate(geom=ls, angle=90, origin='center')
self.assertTrue(rls.equals(els))
# clockwise radians
rls = affinity.rotate(ls, -pi/2, use_radians=True)
els = load_wkt('LINESTRING(320 380, 220 380, 220 320)')
self.assertTrue(rls.equals(els))
## other `origin` parameters
# around the centroid
rls = affinity.rotate(ls, 90, origin='centroid')
els = load_wkt('LINESTRING(182.5 320, 282.5 320, 282.5 380)')
self.assertTrue(rls.equals(els))
# around the second coordinate tuple
rls = affinity.rotate(ls, 90, origin=ls.coords[1])
els = load_wkt('LINESTRING(140 300, 240 300, 240 360)')
self.assertTrue(rls.equals(els))
# around the absolute Point of origin
rls = affinity.rotate(ls, 90, origin=Point(0, 0))
els = load_wkt('LINESTRING(-400 240, -300 240, -300 300)')
self.assertTrue(rls.equals(els))
def test_scale(self):
ls = load_wkt('LINESTRING(240 400 10, 240 300 30, 300 300 20)')
# test defaults of 1.0
sls = affinity.scale(ls)
self.assertTrue(sls.equals(ls))
# different scaling in different dimensions
sls = affinity.scale(ls, 2, 3, 0.5)
els = load_wkt('LINESTRING(210 500 5, 210 200 15, 330 200 10)')
self.assertTrue(sls.equals(els))
# Do explicit 3D check of coordinate values
for a, b in zip(sls.coords, els.coords):
for ap, bp in zip(a, b):
self.assertEqual(ap, bp)
# retest with named parameters for the same result
sls = affinity.scale(geom=ls, xfact=2, yfact=3, zfact=0.5,
origin='center')
self.assertTrue(sls.equals(els))
## other `origin` parameters
# around the centroid
sls = affinity.scale(ls, 2, 3, 0.5, origin='centroid')
els = load_wkt('LINESTRING(228.75 537.5, 228.75 237.5, 348.75 237.5)')
self.assertTrue(sls.equals(els))
# around the second coordinate tuple
sls = affinity.scale(ls, 2, 3, 0.5, origin=ls.coords[1])
els = load_wkt('LINESTRING(240 600, 240 300, 360 300)')
self.assertTrue(sls.equals(els))
# around some other 3D Point of origin
sls = affinity.scale(ls, 2, 3, 0.5, origin=Point(100, 200, 1000))
els = load_wkt('LINESTRING(380 800 505, 380 500 515, 500 500 510)')
self.assertTrue(sls.equals(els))
# Do explicit 3D check of coordinate values
for a, b in zip(sls.coords, els.coords):
for ap, bp in zip(a, b):
self.assertEqual(ap, bp)
def test_skew(self):
ls = load_wkt('LINESTRING(240 400 10, 240 300 30, 300 300 20)')
# test default shear angles of 0.0
sls = affinity.skew(ls)
self.assertTrue(sls.equals(ls))
# different shearing in x- and y-directions
sls = affinity.skew(ls, 15, -30)
els = load_wkt('LINESTRING (253.39745962155615 417.3205080756888, '
'226.60254037844385 317.3205080756888, '
'286.60254037844385 282.67949192431126)')
self.assertTrue(sls.almost_equals(els))
# retest with radians for the same result
sls = affinity.skew(ls, pi/12, -pi/6, use_radians=True)
self.assertTrue(sls.almost_equals(els))
# retest with named parameters for the same result
sls = affinity.skew(geom=ls, xs=15, ys=-30,
origin='center', use_radians=False)
self.assertTrue(sls.almost_equals(els))
## other `origin` parameters
# around the centroid
sls = affinity.skew(ls, 15, -30, origin='centroid')
els = load_wkt('LINESTRING(258.42150697963973 406.49519052838332, '
'231.6265877365273980 306.4951905283833185, '
'291.6265877365274264 271.8541743770057337)')
self.assertTrue(sls.almost_equals(els))
# around the second coordinate tuple
sls = affinity.skew(ls, 15, -30, origin=ls.coords[1])
els = load_wkt('LINESTRING(266.7949192431123038 400, 240 300, '
'300 265.3589838486224153)')
self.assertTrue(sls.almost_equals(els))
# around the absolute Point of origin
sls = affinity.skew(ls, 15, -30, origin=Point(0, 0))
els = load_wkt('LINESTRING(347.179676972449101 261.435935394489832, '
'320.3847577293367976 161.4359353944898317, '
'380.3847577293367976 126.7949192431122754)')
self.assertTrue(sls.almost_equals(els))
def test_translate(self):
ls = load_wkt('LINESTRING(240 400 10, 240 300 30, 300 300 20)')
# test default offset of 0.0
tls = affinity.translate(ls)
self.assertTrue(tls.equals(ls))
# test all offsets
tls = affinity.translate(ls, 100, 400, -10)
els = load_wkt('LINESTRING(340 800 0, 340 700 20, 400 700 10)')
self.assertTrue(tls.equals(els))
# Do explicit 3D check of coordinate values
for a, b in zip(tls.coords, els.coords):
for ap, bp in zip(a, b):
self.assertEqual(ap, bp)
# retest with named parameters for the same result
tls = affinity.translate(geom=ls, xoff=100, yoff=400, zoff=-10)
self.assertTrue(tls.equals(els))
def test_suite():
loader = unittest.TestLoader()
return unittest.TestSuite([
loader.loadTestsFromTestCase(AffineTestCase),
loader.loadTestsFromTestCase(TransformOpsTestCase)])
| bsd-3-clause |
black9/Nyan-Tuna-JB | tools/perf/scripts/python/netdev-times.py | 11271 | 15048 | # Display a process of packets and processed time.
# It helps us to investigate networking or network device.
#
# options
# tx: show only tx chart
# rx: show only rx chart
# dev=: show only thing related to specified device
# debug: work with debug mode. It shows buffer status.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
all_event_list = []; # insert all tracepoint event related with this script
irq_dic = {}; # key is cpu and value is a list which stacks irqs
# which raise NET_RX softirq
net_rx_dic = {}; # key is cpu and value include time of NET_RX softirq-entry
# and a list which stacks receive
receive_hunk_list = []; # a list which include a sequence of receive events
rx_skb_list = []; # received packet list for matching
# skb_copy_datagram_iovec
buffer_budget = 65536; # the budget of rx_skb_list, tx_queue_list and
# tx_xmit_list
of_count_rx_skb_list = 0; # overflow count
tx_queue_list = []; # list of packets which pass through dev_queue_xmit
of_count_tx_queue_list = 0; # overflow count
tx_xmit_list = []; # list of packets which pass through dev_hard_start_xmit
of_count_tx_xmit_list = 0; # overflow count
tx_free_list = []; # list of packets which is freed
# options
show_tx = 0;
show_rx = 0;
dev = 0; # store a name of device specified by option "dev="
debug = 0;
# indices of event_info tuple
EINFO_IDX_NAME= 0
EINFO_IDX_CONTEXT=1
EINFO_IDX_CPU= 2
EINFO_IDX_TIME= 3
EINFO_IDX_PID= 4
EINFO_IDX_COMM= 5
# Calculate a time interval(msec) from src(nsec) to dst(nsec)
def diff_msec(src, dst):
return (dst - src) / 1000000.0
# Display a process of transmitting a packet
def print_transmit(hunk):
if dev != 0 and hunk['dev'].find(dev) < 0:
return
print "%7s %5d %6d.%06dsec %12.3fmsec %12.3fmsec" % \
(hunk['dev'], hunk['len'],
nsecs_secs(hunk['queue_t']),
nsecs_nsecs(hunk['queue_t'])/1000,
diff_msec(hunk['queue_t'], hunk['xmit_t']),
diff_msec(hunk['xmit_t'], hunk['free_t']))
# Format for displaying rx packet processing
PF_IRQ_ENTRY= " irq_entry(+%.3fmsec irq=%d:%s)"
PF_SOFT_ENTRY=" softirq_entry(+%.3fmsec)"
PF_NAPI_POLL= " napi_poll_exit(+%.3fmsec %s)"
PF_JOINT= " |"
PF_WJOINT= " | |"
PF_NET_RECV= " |---netif_receive_skb(+%.3fmsec skb=%x len=%d)"
PF_NET_RX= " |---netif_rx(+%.3fmsec skb=%x)"
PF_CPY_DGRAM= " | skb_copy_datagram_iovec(+%.3fmsec %d:%s)"
PF_KFREE_SKB= " | kfree_skb(+%.3fmsec location=%x)"
PF_CONS_SKB= " | consume_skb(+%.3fmsec)"
# Display a process of received packets and interrputs associated with
# a NET_RX softirq
def print_receive(hunk):
show_hunk = 0
irq_list = hunk['irq_list']
cpu = irq_list[0]['cpu']
base_t = irq_list[0]['irq_ent_t']
# check if this hunk should be showed
if dev != 0:
for i in range(len(irq_list)):
if irq_list[i]['name'].find(dev) >= 0:
show_hunk = 1
break
else:
show_hunk = 1
if show_hunk == 0:
return
print "%d.%06dsec cpu=%d" % \
(nsecs_secs(base_t), nsecs_nsecs(base_t)/1000, cpu)
for i in range(len(irq_list)):
print PF_IRQ_ENTRY % \
(diff_msec(base_t, irq_list[i]['irq_ent_t']),
irq_list[i]['irq'], irq_list[i]['name'])
print PF_JOINT
irq_event_list = irq_list[i]['event_list']
for j in range(len(irq_event_list)):
irq_event = irq_event_list[j]
if irq_event['event'] == 'netif_rx':
print PF_NET_RX % \
(diff_msec(base_t, irq_event['time']),
irq_event['skbaddr'])
print PF_JOINT
print PF_SOFT_ENTRY % \
diff_msec(base_t, hunk['sirq_ent_t'])
print PF_JOINT
event_list = hunk['event_list']
for i in range(len(event_list)):
event = event_list[i]
if event['event_name'] == 'napi_poll':
print PF_NAPI_POLL % \
(diff_msec(base_t, event['event_t']), event['dev'])
if i == len(event_list) - 1:
print ""
else:
print PF_JOINT
else:
print PF_NET_RECV % \
(diff_msec(base_t, event['event_t']), event['skbaddr'],
event['len'])
if 'comm' in event.keys():
print PF_WJOINT
print PF_CPY_DGRAM % \
(diff_msec(base_t, event['comm_t']),
event['pid'], event['comm'])
elif 'handle' in event.keys():
print PF_WJOINT
if event['handle'] == "kfree_skb":
print PF_KFREE_SKB % \
(diff_msec(base_t,
event['comm_t']),
event['location'])
elif event['handle'] == "consume_skb":
print PF_CONS_SKB % \
diff_msec(base_t,
event['comm_t'])
print PF_JOINT
def trace_begin():
global show_tx
global show_rx
global dev
global debug
for i in range(len(sys.argv)):
if i == 0:
continue
arg = sys.argv[i]
if arg == 'tx':
show_tx = 1
elif arg =='rx':
show_rx = 1
elif arg.find('dev=',0, 4) >= 0:
dev = arg[4:]
elif arg == 'debug':
debug = 1
if show_tx == 0 and show_rx == 0:
show_tx = 1
show_rx = 1
def trace_end():
# order all events in time
all_event_list.sort(lambda a,b :cmp(a[EINFO_IDX_TIME],
b[EINFO_IDX_TIME]))
# process all events
for i in range(len(all_event_list)):
event_info = all_event_list[i]
name = event_info[EINFO_IDX_NAME]
if name == 'irq__softirq_exit':
handle_irq_softirq_exit(event_info)
elif name == 'irq__softirq_entry':
handle_irq_softirq_entry(event_info)
elif name == 'irq__softirq_raise':
handle_irq_softirq_raise(event_info)
elif name == 'irq__irq_handler_entry':
handle_irq_handler_entry(event_info)
elif name == 'irq__irq_handler_exit':
handle_irq_handler_exit(event_info)
elif name == 'napi__napi_poll':
handle_napi_poll(event_info)
elif name == 'net__netif_receive_skb':
handle_netif_receive_skb(event_info)
elif name == 'net__netif_rx':
handle_netif_rx(event_info)
elif name == 'skb__skb_copy_datagram_iovec':
handle_skb_copy_datagram_iovec(event_info)
elif name == 'net__net_dev_queue':
handle_net_dev_queue(event_info)
elif name == 'net__net_dev_xmit':
handle_net_dev_xmit(event_info)
elif name == 'skb__kfree_skb':
handle_kfree_skb(event_info)
elif name == 'skb__consume_skb':
handle_consume_skb(event_info)
# display receive hunks
if show_rx:
for i in range(len(receive_hunk_list)):
print_receive(receive_hunk_list[i])
# display transmit hunks
if show_tx:
print " dev len Qdisc " \
" netdevice free"
for i in range(len(tx_free_list)):
print_transmit(tx_free_list[i])
if debug:
print "debug buffer status"
print "----------------------------"
print "xmit Qdisc:remain:%d overflow:%d" % \
(len(tx_queue_list), of_count_tx_queue_list)
print "xmit netdevice:remain:%d overflow:%d" % \
(len(tx_xmit_list), of_count_tx_xmit_list)
print "receive:remain:%d overflow:%d" % \
(len(rx_skb_list), of_count_rx_skb_list)
# called from perf, when it finds a correspoinding event
def irq__softirq_entry(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_exit(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_raise(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__irq_handler_entry(name, context, cpu, sec, nsec, pid, comm,
irq, irq_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
irq, irq_name)
all_event_list.append(event_info)
def irq__irq_handler_exit(name, context, cpu, sec, nsec, pid, comm, irq, ret):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, irq, ret)
all_event_list.append(event_info)
def napi__napi_poll(name, context, cpu, sec, nsec, pid, comm, napi, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
napi, dev_name)
all_event_list.append(event_info)
def net__netif_receive_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__netif_rx(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_queue(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_xmit(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, rc, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, rc ,dev_name)
all_event_list.append(event_info)
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
skbaddr, protocol, location):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, protocol, location)
all_event_list.append(event_info)
def skb__consume_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr)
all_event_list.append(event_info)
def skb__skb_copy_datagram_iovec(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen)
all_event_list.append(event_info)
def handle_irq_handler_entry(event_info):
(name, context, cpu, time, pid, comm, irq, irq_name) = event_info
if cpu not in irq_dic.keys():
irq_dic[cpu] = []
irq_record = {'irq':irq, 'name':irq_name, 'cpu':cpu, 'irq_ent_t':time}
irq_dic[cpu].append(irq_record)
def handle_irq_handler_exit(event_info):
(name, context, cpu, time, pid, comm, irq, ret) = event_info
if cpu not in irq_dic.keys():
return
irq_record = irq_dic[cpu].pop()
if irq != irq_record['irq']:
return
irq_record.update({'irq_ext_t':time})
# if an irq doesn't include NET_RX softirq, drop.
if 'event_list' in irq_record.keys():
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_raise(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'sirq_raise'})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_entry(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
net_rx_dic[cpu] = {'sirq_ent_t':time, 'event_list':[]}
def handle_irq_softirq_exit(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
irq_list = []
event_list = 0
if cpu in irq_dic.keys():
irq_list = irq_dic[cpu]
del irq_dic[cpu]
if cpu in net_rx_dic.keys():
sirq_ent_t = net_rx_dic[cpu]['sirq_ent_t']
event_list = net_rx_dic[cpu]['event_list']
del net_rx_dic[cpu]
if irq_list == [] or event_list == 0:
return
rec_data = {'sirq_ent_t':sirq_ent_t, 'sirq_ext_t':time,
'irq_list':irq_list, 'event_list':event_list}
# merge information realted to a NET_RX softirq
receive_hunk_list.append(rec_data)
def handle_napi_poll(event_info):
(name, context, cpu, time, pid, comm, napi, dev_name) = event_info
if cpu in net_rx_dic.keys():
event_list = net_rx_dic[cpu]['event_list']
rec_data = {'event_name':'napi_poll',
'dev':dev_name, 'event_t':time}
event_list.append(rec_data)
def handle_netif_rx(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'netif_rx',
'skbaddr':skbaddr, 'skblen':skblen, 'dev_name':dev_name})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_netif_receive_skb(event_info):
global of_count_rx_skb_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu in net_rx_dic.keys():
rec_data = {'event_name':'netif_receive_skb',
'event_t':time, 'skbaddr':skbaddr, 'len':skblen}
event_list = net_rx_dic[cpu]['event_list']
event_list.append(rec_data)
rx_skb_list.insert(0, rec_data)
if len(rx_skb_list) > buffer_budget:
rx_skb_list.pop()
of_count_rx_skb_list += 1
def handle_net_dev_queue(event_info):
global of_count_tx_queue_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
skb = {'dev':dev_name, 'skbaddr':skbaddr, 'len':skblen, 'queue_t':time}
tx_queue_list.insert(0, skb)
if len(tx_queue_list) > buffer_budget:
tx_queue_list.pop()
of_count_tx_queue_list += 1
def handle_net_dev_xmit(event_info):
global of_count_tx_xmit_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, rc, dev_name) = event_info
if rc == 0: # NETDEV_TX_OK
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
skb['xmit_t'] = time
tx_xmit_list.insert(0, skb)
del tx_queue_list[i]
if len(tx_xmit_list) > buffer_budget:
tx_xmit_list.pop()
of_count_tx_xmit_list += 1
return
def handle_kfree_skb(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, protocol, location) = event_info
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
del tx_queue_list[i]
return
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if rec_data['skbaddr'] == skbaddr:
rec_data.update({'handle':"kfree_skb",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
def handle_consume_skb(event_info):
(name, context, cpu, time, pid, comm, skbaddr) = event_info
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
def handle_skb_copy_datagram_iovec(event_info):
(name, context, cpu, time, pid, comm, skbaddr, skblen) = event_info
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if skbaddr == rec_data['skbaddr']:
rec_data.update({'handle':"skb_copy_datagram_iovec",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
| gpl-2.0 |
obiben/profitpy | profit/workbench/accountdisplay.py | 18 | 4727 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2007 Troy Melhase <troy@gci.net>
# Distributed under the terms of the GNU General Public License v2
from PyQt4.QtCore import QAbstractTableModel, QSize, QVariant, Qt
from PyQt4.QtGui import QFrame, QStandardItemModel, QStandardItem
from profit.lib import BasicHandler, Signals, valueAlign
from profit.lib.gui import colorIcon, complementColor
from profit.lib.widgets.plot import PlotCurve, ControlTreeValueItem
from profit.workbench.widgets.ui_accountdisplay import Ui_AccountDisplay
class AccountTableModel(QStandardItemModel):
""" AccountTableModel -> item model of latest account data.
"""
columnTitles = ['Item', 'Currency', 'Value', 'Account', ]
def __init__(self, session, parent=None):
""" Initializer.
@param session Session instance
@param parent ancestor object
"""
QStandardItemModel.__init__(self, parent)
self.setHorizontalHeaderLabels(self.columnTitles)
self.items = {}
self.session = session
fillSlot = self.on_session_UpdateAccountValue
for mrec in session.iterMessageTypes('UpdateAccountValue'):
fillSlot(mrec[1])
session.registerMeta(self)
def on_session_UpdateAccountValue(self, message):
""" Changes model items to match latest account data.
@param message message instance
@return None
"""
key = (message.key, message.currency, message.accountName)
try:
items = self.items[key]
except (KeyError, ):
pass
else:
items[2].setText(message.value)
class AccountDisplay(QFrame, Ui_AccountDisplay, BasicHandler):
""" AccountDisplay -> displays account data and associated plot controls.
"""
def __init__(self, parent=None):
""" Initializer.
@param parent ancestor object
"""
QFrame.__init__(self, parent)
self.setupUi(self)
self.requestSession()
self.resizePlotControls()
def setSession(self, session):
""" Configures this instance for a session.
@param session Session instance
@return None
"""
self.session = session
self.dataModel = model = AccountTableModel(session, self)
plot = self.plot
plot.plotButton.setVisible(False)
plot.setSessionPlot(session, session.maps.account, 'account')
plot.controlsTreeModel = model
plot.controlsTree.setModel(model)
plot.controlsTree.header().show()
for key, series in session.maps.account.items():
value = session.maps.account.last.get(key, None)
self.newPlotSeries(key, series, value)
connect = self.connect
connect(session, Signals.createdAccountData, self.newPlotSeries)
connect(model, Signals.standardItemChanged, plot.on_controlsTree_itemChanged)
connect(model, Signals.rowsInserted, self.updateModelItems)
plot.loadSelections()
self.resizePlotControls()
def newPlotSeries(self, key, series, value):
""" Called when the session creates a new series for account data.
@param key triple of account data key, currency, and account name
@param series newly created data series
@param value value for account data key; may be float or string
@return None
"""
cols = range(len(self.dataModel.columnTitles))
items = [ControlTreeValueItem('') for i in cols[1:]]
items[0].setText(key[1])
items[1].setText(str(value))
items[2].setText(key[2])
try:
value = float(value)
checkable = True
except (TypeError, ValueError, ):
checkable = False
self.plot.addSeries(key, series, items=items, checkable=checkable)
def resizePlotControls(self):
""" Adjusts column sizes and sort order.
@return None
"""
for i in range(3): ## why 3 and not 4?
self.plot.controlsTree.resizeColumnToContents(i)
self.plot.controlsTree.sortByColumn(0, Qt.AscendingOrder)
def updateModelItems(self, parent, start, end):
""" Called when rows are inserted into the item model.
@param parent QModelIndex instance
@param start first row number
@param end last row number
@return None
"""
model = self.dataModel
item = model.itemFromIndex(parent)
if item:
others = [model.item(item.row(), i) for i in range(1,4)]
key = tuple(str(i.text()) for i in (item, others[0], others[2]))
model.items[key] = [item, ] + others
self.resizePlotControls()
| gpl-2.0 |
gacomm/VELVEEVA | lib/readconfig.py | 2 | 1157 | #!/usr/bin/env python3
import activate_venv
import json, sys, os
from functools import reduce
from veevutils import CONFIG_FILENAME
def focus(acc, new_key):
try:
if type(acc) == list:
focused = acc[int(new_key)]
elif type(acc) == dict:
focused = acc[new_key]
else:
raise TypeError("Cannot get subkey value for non object or array types")
except KeyError:
raise KeyError("Could not find key " + new_key)
return focused
def main():
config = {}
if os.path.exists(CONFIG_FILENAME):
with open(CONFIG_FILENAME) as f:
config = json.load(f)
else:
raise IOError(CONFIG_FILENAME + " does not exist")
if len(sys.argv) < 2:
# no keys specified, just print out the whole config file
print(config, file=sys.stdout)
else:
key_names = sys.argv[1].split(".")
try:
config_value = reduce(focus, key_names, config)
print(config_value, file=sys.stdout)
except KeyError:
raise KeyError("Could not find key " + sys.argv[1] + " in " + CONFIG_FILENAME)
return 0
if __name__ == "__main__":
try:
sys.exit(main())
except KeyboardInterrupt:
sys.exit(1)
except Exception as e:
print(e, file=sys.stderr)
sys.exit(1) | bsd-3-clause |
jolove/monmale | machineLearningLibrary.py | 1 | 14267 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from sklearn.cross_validation import train_test_split
from sklearn import linear_model
from sklearn import mixture
from sklearn import metrics
import logging
import sys, traceback, os
import uuid
import psutil
import getpass
import usefulLibraryFiles # Libreria propia del APP
import usefulLibrary # Libreria propia del APP
import numpy as np
from datetime import datetime
log = logging.getLogger()
log.setLevel('INFO')
def applyRegression(dicWrongValues,L2,dicL,L_train,coefVaration,dicAlarmsRegr,score,testSize,analysis,procID):
# Esta función tiene como objetivo:
# 1º. Sustituir los "wrong" values de la lista L2 por valores predecidos según el resto de características.
# NOTA: cuidado cuando existan mas de una característica "wrong" para la misma muestra.
# 2º. Validar uno a uno cada valor de la lista L2, es decir, comprobar que el valor real es el "mismo" que obtenemos al predecirlo. En
# el caso de que no sea (sea lo suficientemente diferente) generará una alarma.
#
# Lo idóneo es unir el array sacado de la BD con el del data set a analizar (quitando las muestras cuyas coordenadas tienen valore raro),
# y ya con esta lista dividir la en train y test, esto es válido solo para el PASO 1
# NOTA: se podría definir una variable "regressionMinScore" para sólo aplicar la regresión en caso de que el score sea mayor a este valor.
features=[]
for col in sorted(dicL.keys()):
features.append(dicL[col])
feature_names = np.array(features)
log.info(procID+" <applyRegression> Features: "+str(feature_names))
if (len(dicWrongValues.keys())>0):
L_full = usefulLibrary.unionListsWrong(dicWrongValues,L2,L_train,procID) # L_full contiene un array de la unión del array de
# entrenamiento mas el del fichero sin los valores a 0 que tenían strings
log.info(procID+" <applyRegression> Num columns L_full: "+str(len(L_full[0])))
# Toca recorrer el array: PASO 1 --> en busca de los "0" que hemos puesto en los valores malos
# - la forma de recorrer esta vez no será registro a registro, lo haremos por columna ya que a la hora de aplicar el algoritmo
# Sólo podremos aplicar la regresión lineal si se cumple que el tamaño del array L_full es:
# 1. Mayor que el valor definido por la variable test_size
# 2. Si es mayor lo divideremos hasta tener un array con el valor justo de "test_size", cogiendo muestras aleatorias
percentSizeTrain=(len(L_full)*100)/(len(L_full)+len(dicWrongValues.keys()))
if int(percentSizeTrain) >= int(testSize):
log.info(procID+" <applyRegression> STEP 1: Train array is upper to test_size "+str(testSize)+", the Lineal Regression will be executed.")
analysis=True
values_X, values_Y = [], []
columns=[]
for wrong in sorted(dicWrongValues.keys()):
log.info(procID+" <applyRegression> WrongDict key= "+str(wrong))
if dicWrongValues[wrong]["y"] not in columns:
columns.append(dicWrongValues[wrong]["y"])
log.info(procID+" <applyRegression> Num columns of wrong values= "+str(len(columns)))
for col in columns:
log.info(procID+" <applyRegression> Col= "+str(col))
values_Y, values_X = usefulLibrary.extractColumnArray(L_full,col,procID)
log.info(procID+" <applyRegression> Num rows values_Y: "+str(len(values_Y)))
log.info(procID+" <applyRegression> "+str(values_Y))
log.info(procID+" <applyRegression> Num columns values_X: "+str(len(values_X[0])))
values_X = np.array(values_X)
values_Y = np.array(values_Y)
# Antes de dividir tenemos que calcular el % del tamaño del test_size
#
perCsplit=(percentSizeTrain-int(testSize))/100 # EJ: 91% - 80% = 11% / 100 --> 0.11 del array a usar como test y 0.89 para train
# haciendo justo el 80% (definido por la variable)
log.info(procID+" <applyRegression> test_size= "+str(perCsplit))
X_train, X_test, y_train, y_test =train_test_split(values_X, values_Y, test_size=perCsplit,random_state=33)
log.debug(procID+" <applyRegression> X_train size: before= "+str(len(values_X))+" | now= "+str(len(X_train)))
# Create linear regression object
regr = linear_model.LinearRegression()
# Train the model using the training sets
regr.fit(X_train, y_train)
# Explained variance score: 1 is perfect prediction
log.info(procID+" <applyRegression> Variance score: "+str(regr.score(X_test,y_test)))
# Ahora tocaría estimar y sustituir
for reg in dicWrongValues.keys():
x=dicWrongValues[reg]["x"]
y=dicWrongValues[reg]["y"]
if L2[x][y] == 0 and y == col: # nosotros le pusimos este valor, así nos aseguramosxº
y_pred=regr.predict(usefulLibrary.extractColumnList(L2[x],y,procID))
log.info(procID+" <applyRegression> Value predict for wrong value in coordinates "+str(x)+","+str(y)+": "+str(y_pred))
aprox=round(y_pred,4)
log.info(procID+" <applyRegression> Aproximate predict value: "+str(aprox))
# Ahora deberíamos sustituirlo en la Lista definitiva
L2[x][y]=aprox
else:
log.info(procID+" <applyRegression> STEP1: Train array is lower to test_size "+str(testSize)+", the Lineal Regression will not be executed.")
# Para el PASO 2 no podemos unir la lista sacada de la BD con la del fichero ya que vamos a ir prediciendo cada valore del array del
# fichero y no podemos usar los valores que ya vienen en el.
log.info(procID+" <applyRegression> Num columns L_train: "+str(len(L_train[0])))
percentSizeTrain=(len(L_train)*100)/(len(L_train)+len(L2))
if int(percentSizeTrain) >= int(testSize):
log.info(procID+" <applyRegression> STEP 2: Train array is upper to test_size "+str(testSize)+", the Lineal Regression will be executed.")
analysis=True
values_X, values_Y = [], []
# Nos toca recorre todo el array del fichero prediciendo uno a uno cada valor, por columna
for colum in range(len(feature_names)):
log.info(procID+" <applyRegression> Predict values of Colum= "+str(colum))
values_Y, values_X = usefulLibrary.extractColumnArray(L_train,colum,procID)
values_X = np.array(values_X)
values_Y = np.array(values_Y)
# Antes de dividir tenemos que calcular el % del tamaño del test_size
#
perCsplit=(percentSizeTrain-int(testSize))/100 # EJ: 91% - 80% = 11% / 100 --> 0.11 del array a usar como test y 0.89 para train
# haciendo justo el 80% (definido por la variable)
log.info(procID+" <applyRegression> test_size= "+str(perCsplit))
X_train, X_test, y_train, y_test =train_test_split(values_X, values_Y, test_size=perCsplit,random_state=33)
log.debug(procID+" <applyRegression> X_train size: before= "+str(len(values_X))+" | now= "+str(len(X_train)))
# Create linear regression object
regr = linear_model.LinearRegression()
# Train the model using the training sets
regr.fit(X_train, y_train)
# Explained variance score: 1 is perfect prediction
score=regr.score(X_test,y_test)
log.info(procID+" - Variance score: "+str(score))
# Una vez ya tenemos el estimador entrenado comenzamos a predecir
for row in range(len(L2)):
subDalarm={}
newL = usefulLibrary.extractColumnList(L2[row],colum,procID)
log.info(procID+" <applyRegression> List of features to predict: "+str(newL))
y_pred=regr.predict(newL)
log.info(procID+" <applyRegression> Value predict for coordinates row,colum "+str(row)+","+str(colum)+" -> REAL: "+str(L2[row][colum])+" PRED: "+str(y_pred))
aprox=round(y_pred,4)
log.info(procID+" <applyRegression> Aproximate predict value: "+str(aprox))
coefV = usefulLibrary.desviacionTipica(L2[row][colum],aprox,procID)
if coefV > int(coefVaration):
# Como el coeficiente de variación es mayor a lo permitido por variable generamos la alarma como posible anomalía
subDalarm["x"]=row
subDalarm["y"]=colum
dicAlarmsRegr[len(dicAlarmsRegr.keys())]=subDalarm
log.info(procID+" <applyRegression> Alarm generated...[coefV= "+str(coefV)+"]")
else:
# Como el coeficiente de variación es menor a lo permitido por variable, consideramos que tanto el valor real como el predecido
# son semejantes por lo que no generaremos alarma
log.info(procID+" <applyRegression> Element with value between interval...[coefV= "+str(coefV)+"]")
else:
log.info(procID+" <applyRegression> STEP2: Train array is lower to test_size "+str(testSize)+", the Lineal Regression will not be executed.")
# Una vez recorrido el array y obtenidas todas las posibles alarmas, hemos terminado.
def applyClusteringTotal(L_predict,features,L_train,dicDBvariables,dicAlarmsClus,score,procID):
# EL objetivo de este procedimiento es aplicar un número de veces (según la variable proofGroup) el algoritmo de clustering
# Gaussian Mixture Models (GMM) aumentanto en cada una de las iteraciones el número de grupos a obtener (2^x). En cada iteración se obtendrá
# el grupo al que pertence cada una de las muestras del array a predecir (L_predict)
# Se aplicará por muestra (row)
log.info(procID+" <applyClusteringTotal> Features: "+str(features))
percentSizeTrain=(len(L_train)*100)/(len(L_train)+len(L_predict))
# Antes de dividir tenemos que calcular el % del tamaño del test_size
#
perCsplit=(percentSizeTrain-int(dicDBvariables["test_size"]))/100 # EJ: 91% - 80% = 11% / 100 --> 0.11 del array a usar como test y 0.89 para train
# haciendo justo el 80% (definido por la variable)
log.debug(procID+" <applyClusteringTotal> test_size= "+str(perCsplit))
X_train, X_test, y_train, y_test =train_test_split(L_train, L_train, test_size=perCsplit,random_state=33)
log.debug(procID+" <applyClusteringTotal> X_train size: before= "+str(len(L_train))+" | now= "+str(len(X_train)))
nComp=2
dicResultClusSample={}
dicResultClusGroup={}
for proof in range(int(dicDBvariables['proofGroup'])):
log.info(procID+" <applyClusteringTotal> Proof level:"+str(proof)+" - n_components: "+str(nComp))
gm = mixture.GMM(n_components=nComp,covariance_type='tied', random_state=42)
gm.fit(X_train)
y_pred = gm.predict(L_predict)
usefulLibrary.saveResult(y_pred,dicResultClusSample,dicResultClusGroup,'I'+str(proof),procID)
nComp=nComp*2
log.debug(dicResultClusSample)
log.debug(dicResultClusGroup)
usefulLibrary.applyClusteringAlarm(dicResultClusSample,dicResultClusGroup,dicAlarmsClus,dicDBvariables['clustGroup'],procID)
for alarm in sorted(dicAlarmsClus.keys()):
log.info(procID+" <applyClusteringTotal> Row:"+str(L_predict[alarm])+" - level: "+str(dicAlarmsClus[alarm]))
def applyClusteringPartial(L_predict,features,L_train,dicDBvariables,dicAlarmsClusTotal,score,procID):
# EL objetivo de este procedimiento es aplicar un número de veces (según la variable proofGroup) el algoritmo de clustering
# Gaussian Mixture Models (GMM) aumentanto en cada una de las iteraciones el número de grupos a obtener (2^x). En cada iteración se obtendrá
# el grupo al que pertence cada una de las muestras del array a predecir (L_predict)
# Se aplicará por columna (column)
log.info(procID+" <applyClusteringPartial> Features: "+str(features))
percentSizeTrain=(len(L_train)*100)/(len(L_train)+len(L_predict))
# Antes de dividir tenemos que calcular el % del tamaño del test_size
#
perCsplit=(percentSizeTrain-int(dicDBvariables["test_size"]))/100 # EJ: 91% - 80% = 11% / 100 --> 0.11 del array a usar como test y 0.89 para train
# haciendo justo el 80% (definido por la variable)
log.debug(procID+" <applyClusteringPartial> test_size= "+str(perCsplit))
X_train, X_test, y_train, y_test =train_test_split(L_train, L_train, test_size=perCsplit,random_state=33)
log.debug(procID+" <applyClusteringPartial> X_train size: before= "+str(len(L_train))+" | now= "+str(len(X_train)))
for col in range(len(features)):
dicAlarmsClus={}
nComp=2
dicResultClusSample={}
dicResultClusGroup={}
L_1colTR, L_restColTR = usefulLibrary.extractColumnArray(L_train,col,procID)
L_1colPR, L_restColPR = usefulLibrary.extractColumnArray(L_predict,col,procID)
for proof in range(int(dicDBvariables['proofGroup'])):
log.info(procID+" <applyClusteringPartial> Proof level:"+str(proof)+" - n_components: "+str(nComp)+" - COLUMN= "+str(col))
gm = mixture.GMM(n_components=nComp,covariance_type='tied', random_state=42)
gm.fit(L_1colTR)
y_pred = gm.predict(L_1colPR)
usefulLibrary.saveResult(y_pred,dicResultClusSample,dicResultClusGroup,'I'+str(proof),procID)
nComp=nComp*2
log.debug(dicResultClusSample)
log.debug(dicResultClusGroup)
usefulLibrary.applyClusteringAlarm(dicResultClusSample,dicResultClusGroup,dicAlarmsClus,dicDBvariables['clustGroup'],procID)
dicAlarmsClusTotal[col]=dicAlarmsClus
for alarm in sorted(dicAlarmsClus.keys()):
log.info(procID+" <applyClusteringPartial> COLUMN= "+str(col)+" Value:"+str(L_predict[alarm][col])+" - level: "+str(dicAlarmsClus[alarm])) | apache-2.0 |
NoneGG/aredis | aredis/commands/server.py | 1 | 11696 | import datetime
from aredis.exceptions import RedisError
from aredis.utils import (b, bool_ok,
nativestr, dict_merge,
string_keys_to_dict,
list_keys_to_dict,
pairs_to_dict,
NodeFlag)
def parse_slowlog_get(response, **options):
return [{
'id': item[0],
'start_time': int(item[1]),
'duration': int(item[2]),
'command': b(' ').join(item[3])
} for item in response]
def parse_client_list(response, **options):
clients = []
for c in nativestr(response).splitlines():
# Values might contain '='
clients.append(dict([pair.split('=', 1) for pair in c.split(' ')]))
return clients
def parse_config_get(response, **options):
response = [nativestr(i) if i is not None else None for i in response]
return response and pairs_to_dict(response) or {}
def timestamp_to_datetime(response):
"""Converts a unix timestamp to a Python datetime object"""
if not response:
return None
try:
response = int(response)
except ValueError:
return None
return datetime.datetime.fromtimestamp(response)
def parse_debug_object(response):
"""
Parses the results of Redis's DEBUG OBJECT command into a Python dict
"""
# The 'type' of the object is the first item in the response, but isn't
# prefixed with a name
response = nativestr(response)
response = 'type:' + response
response = dict([kv.split(':') for kv in response.split()])
# parse some expected int values from the string response
# note: this cmd isn't spec'd so these may not appear in all redis versions
int_fields = ('refcount', 'serializedlength', 'lru', 'lru_seconds_idle')
for field in int_fields:
if field in response:
response[field] = int(response[field])
return response
def parse_info(response):
"""Parses the result of Redis's INFO command into a Python dict"""
info = {}
response = nativestr(response)
def get_value(value):
if ',' not in value or '=' not in value:
try:
if '.' in value:
return float(value)
else:
return int(value)
except ValueError:
return value
else:
sub_dict = {}
for item in value.split(','):
k, v = item.rsplit('=', 1)
sub_dict[k] = get_value(v)
return sub_dict
for line in response.splitlines():
if line and not line.startswith('#'):
if line.find(':') != -1:
key, value = line.split(':', 1)
info[key] = get_value(value)
else:
# if the line isn't splittable, append it to the "__raw__" key
info.setdefault('__raw__', []).append(line)
return info
def parse_role(response):
role = nativestr(response[0])
def _parse_master(response):
offset, slaves = response[1:]
res = {
'role': role,
'offset': offset,
'slaves': []
}
for slave in slaves:
host, port, offset = slave
res['slaves'].append({
'host': host,
'port': int(port),
'offset': int(offset)
})
return res
def _parse_slave(response):
host, port, status, offset = response[1:]
return {
'role': role,
'host': host,
'port': port,
'status': status,
'offset': offset
}
def _parse_sentinel(response):
return {
'role': role,
'masters': response[1:]
}
parser = {
'master': _parse_master,
'slave': _parse_slave,
'sentinel': _parse_sentinel
}[role]
return parser(response)
class ServerCommandMixin:
RESPONSE_CALLBACKS = dict_merge(
string_keys_to_dict('BGREWRITEAOF BGSAVE', lambda r: True),
string_keys_to_dict(
'FLUSHALL FLUSHDB SAVE '
'SHUTDOWN SLAVEOF', bool_ok
),
{
'ROLE': parse_role,
'SLOWLOG GET': parse_slowlog_get,
'SLOWLOG LEN': int,
'SLOWLOG RESET': bool_ok,
'CLIENT GETNAME': lambda r: r and nativestr(r),
'CLIENT KILL': bool_ok,
'CLIENT LIST': parse_client_list,
'CLIENT SETNAME': bool_ok,
'CLIENT PAUSE': bool_ok,
'CONFIG GET': parse_config_get,
'CONFIG RESETSTAT': bool_ok,
'CONFIG SET': bool_ok,
'DEBUG OBJECT': parse_debug_object,
'INFO': parse_info,
'LASTSAVE': timestamp_to_datetime,
'TIME': lambda x: (int(x[0]), int(x[1])),
}
)
async def bgrewriteaof(self):
"""Tell the Redis server to rewrite the AOF file from data in memory"""
return await self.execute_command('BGREWRITEAOF')
async def bgsave(self):
"""
Tells the Redis server to save its data to disk. Unlike save(),
this method is asynchronous and returns immediately.
"""
return await self.execute_command('BGSAVE')
async def client_kill(self, address):
"""Disconnects the client at ``address`` (ip:port)"""
return await self.execute_command('CLIENT KILL', address)
async def client_list(self):
"""Returns a list of currently connected clients"""
return await self.execute_command('CLIENT LIST')
async def client_getname(self):
"""Returns the current connection name"""
return await self.execute_command('CLIENT GETNAME')
async def client_setname(self, name):
"""Sets the current connection name"""
return await self.execute_command('CLIENT SETNAME', name)
async def client_pause(self, timeout=0):
"""
Suspends all the Redis clients for the specified amount of time
(in milliseconds).
"""
return await self.execute_command('CLIENT PAUSE', timeout)
async def config_get(self, pattern="*"):
"""Returns a dictionary of configuration based on the ``pattern``"""
return await self.execute_command('CONFIG GET', pattern)
async def config_set(self, name, value):
"""Sets config item ``name`` to ``value``"""
return await self.execute_command('CONFIG SET', name, value)
async def config_resetstat(self):
"""Resets runtime statistics"""
return await self.execute_command('CONFIG RESETSTAT')
async def config_rewrite(self):
"""
Rewrites config file with the minimal change to reflect running config
"""
return await self.execute_command('CONFIG REWRITE')
async def dbsize(self):
"""Returns the number of keys in the current database"""
return await self.execute_command('DBSIZE')
async def debug_object(self, key):
"""Returns version specific meta information about a given key"""
return await self.execute_command('DEBUG OBJECT', key)
async def flushall(self):
"""Deletes all keys in all databases on the current host"""
return await self.execute_command('FLUSHALL')
async def flushdb(self):
"""Deletes all keys in the current database"""
return await self.execute_command('FLUSHDB')
async def info(self, section=None):
"""
Returns a dictionary containing information about the Redis server
The ``section`` option can be used to select a specific section
of information
The section option is not supported by older versions of Redis Server,
and will generate ResponseError
"""
if section is None:
return await self.execute_command('INFO')
else:
return await self.execute_command('INFO', section)
async def lastsave(self):
"""
Returns a Python datetime object representing the last time the
Redis database was saved to disk
"""
return await self.execute_command('LASTSAVE')
async def save(self):
"""
Tells the Redis server to save its data to disk,
blocking until the save is complete
"""
return await self.execute_command('SAVE')
async def shutdown(self):
"""Stops Redis server"""
try:
await self.execute_command('SHUTDOWN')
except ConnectionError:
# a ConnectionError here is expected
return
raise RedisError("SHUTDOWN seems to have failed.")
async def slaveof(self, host=None, port=None):
"""
Sets the server to be a replicated slave of the instance identified
by the ``host`` and ``port``. If called without arguments, the
instance is promoted to a master instead.
"""
if host is None and port is None:
return await self.execute_command('SLAVEOF', b('NO'), b('ONE'))
return await self.execute_command('SLAVEOF', host, port)
async def slowlog_get(self, num=None):
"""
Gets the entries from the slowlog. If ``num`` is specified, get the
most recent ``num`` items.
"""
args = ['SLOWLOG GET']
if num is not None:
args.append(num)
return await self.execute_command(*args)
async def slowlog_len(self):
"""Gets the number of items in the slowlog"""
return await self.execute_command('SLOWLOG LEN')
async def slowlog_reset(self):
"""Removes all items in the slowlog"""
return await self.execute_command('SLOWLOG RESET')
async def time(self):
"""
Returns the server time as a 2-item tuple of ints:
(seconds since epoch, microseconds into this second).
"""
return await self.execute_command('TIME')
async def role(self):
"""
Provides information on the role of a Redis instance in the context of replication,
by returning if the instance is currently a master, slave, or sentinel.
The command also returns additional information about the state of the replication
(if the role is master or slave)
or the list of monitored master names (if the role is sentinel).
:return:
"""
return await self.execute_command('ROLE')
class ClusterServerCommandMixin(ServerCommandMixin):
NODES_FLAGS = dict_merge(
list_keys_to_dict(
['SHUTDOWN', 'SLAVEOF', 'CLIENT SETNAME'],
NodeFlag.BLOCKED
),
list_keys_to_dict(
['FLUSHALL', 'FLUSHDB'],
NodeFlag.ALL_MASTERS
),
list_keys_to_dict(
['SLOWLOG LEN', 'SLOWLOG RESET', 'SLOWLOG GET',
'TIME', 'SAVE', 'LASTSAVE', 'DBSIZE',
'CONFIG RESETSTAT', 'CONFIG REWRITE',
'CONFIG GET', 'CONFIG SET', 'CLIENT KILL',
'CLIENT LIST', 'CLIENT GETNAME', 'INFO',
'BGSAVE', 'BGREWRITEAOF'],
NodeFlag.ALL_NODES
)
)
RESULT_CALLBACKS = dict_merge(
list_keys_to_dict(
['CONFIG GET', 'CONFIG SET', 'SLOWLOG GET',
'CLIENT KILL', 'INFO', 'BGREWRITEAOF',
'BGSAVE', 'CLIENT LIST', 'CLIENT GETNAME',
'CONFIG RESETSTAT', 'CONFIG REWRITE', 'DBSIZE',
'LASTSAVE', 'SAVE', 'SLOWLOG LEN',
'SLOWLOG RESET', 'TIME', 'FLUSHALL',
'FLUSHDB'],
lambda res: res
)
)
| mit |
squidsoup/snapcraft | snapcraft/tests/commands/test_update.py | 9 | 3639 | # -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2016 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from collections import OrderedDict
import logging
import os
import fixtures
import yaml
from xdg import BaseDirectory
from snapcraft import main, tests
class UpdateCommandTestCase(tests.TestWithFakeRemoteParts):
def setUp(self):
super().setUp()
self.parts_dir = os.path.join(BaseDirectory.xdg_data_home, 'snapcraft')
self.parts_yaml = os.path.join(self.parts_dir, 'parts.yaml')
self.headers_yaml = os.path.join(self.parts_dir, 'headers.yaml')
def test_update(self):
main.main(['update'])
self.assertTrue(os.path.exists(self.parts_yaml))
self.assertTrue(os.path.exists(self.headers_yaml))
expected_parts = OrderedDict()
expected_parts['curl'] = p = OrderedDict()
p['plugin'] = 'autotools'
p['source'] = 'http://curl.org'
p['description'] = 'test entry for curl'
p['maintainer'] = 'none'
expected_parts['part1'] = p = OrderedDict()
p['plugin'] = 'go'
p['source'] = 'http://source.tar.gz'
p['description'] = 'test entry for part1'
p['maintainer'] = 'none'
expected_parts['long-described-part'] = p = OrderedDict()
p['plugin'] = 'go'
p['source'] = 'http://source.tar.gz'
p['description'] = 'this is a repetitive description ' * 3
p['maintainer'] = 'none'
expected_parts['multiline-part'] = p = OrderedDict()
p['plugin'] = 'go'
p['source'] = 'http://source.tar.gz'
p['description'] = 'this is a multiline description\n' * 3
p['maintainer'] = 'none'
expected_headers = {
'If-Modified-Since': 'Thu, 07 Jul 2016 10:00:20 GMT',
}
with open(self.parts_yaml) as parts_file:
parts = yaml.load(parts_file)
with open(self.headers_yaml) as headers_file:
headers = yaml.load(headers_file)
self.assertEqual(parts, expected_parts)
self.assertEqual(headers, expected_headers)
def test_update_with_unchanged_date_does_not_download_again(self):
fake_logger = fixtures.FakeLogger(level=logging.INFO)
self.useFixture(fake_logger)
main.main(['update'])
main.main(['update'])
self.assertEqual(
'The parts cache is already up to date.\n',
fake_logger.output)
def test_update_with_changed_date_downloads_again(self):
fake_logger = fixtures.FakeLogger(level=logging.INFO)
self.useFixture(fake_logger)
os.makedirs(self.parts_dir)
with open(self.headers_yaml, 'w') as headers_file:
yaml.dump(
{'If-Modified-Since': 'Fri, 01 Jan 2016 12:00:00 GMT'},
headers_file)
main.main(['update'])
self.assertEqual('', fake_logger.output)
def test_update_with_no_content_length_is_supported(self):
self.useFixture(fixtures.EnvironmentVariable('NO_CONTENT_LENGTH', '1'))
main.main(['update'])
| gpl-3.0 |
Unsettled/eve-wspace | evewspace/Map/tasks.py | 10 | 4215 | # Eve W-Space
# Copyright 2014 Andrew Austin and contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime, timedelta
from celery import task
from Map.models import System, KSystem, Signature
from core.models import Faction
import eveapi
from API import cache_handler as handler
from django.core.cache import cache
import pytz
@task()
def update_system_stats():
"""
Updates the System Statistics (jumps, kills) from the API.
"""
api = eveapi.EVEAPIConnection(cacheHandler=handler)
jumpapi = api.map.Jumps()
killapi = api.map.Kills()
System.objects.all().update(shipkills=0, podkills=0, npckills=0)
KSystem.objects.all().update(jumps=0)
# Update jumps from Jumps API for K-space systems
for entry in jumpapi.solarSystems:
try:
KSystem.objects.filter(pk=entry.solarSystemID).update(
jumps=entry.shipJumps)
except Exception:
pass
# Update kills from Kills API
for entry in killapi.solarSystems:
try:
System.objects.filter(pk=entry.solarSystemID).update(
shipkills=entry.shipKills,
podkills=entry.podKills,
npckills=entry.factionKills
)
except Exception:
pass
@task()
def update_system_sov():
"""
Updates the Sov for K-Space systems. If any exceptions are raised
(e.g. Alliance record doesn't exist), sov is just marked "None."
"""
api = eveapi.EVEAPIConnection(cacheHandler=handler)
sovapi = api.map.Sovereignty()
alliance_list = api.eve.AllianceList().alliances
lookup_table = {}
for alliance in alliance_list:
lookup_table[alliance.allianceID] = alliance.name
KSystem.objects.all().update(sov="None")
for sys in sovapi.solarSystems:
try:
if sys.factionID:
KSystem.objects.filter(pk=sys.solarSystemID).update(
sov=Faction.objects.get(pk=sys.factionID).name)
elif sys.allianceID:
if sys.allianceID in lookup_table:
KSystem.objects.filter(pk=sys.solarSystemID).update(
sov=lookup_table[sys.allianceID])
except Exception:
pass
@task()
def fill_jumps_cache():
"""
Ensures that the jumps cache is filled.
"""
if not cache.get('route_graph'):
from Map.utils import RouteFinder
rf = RouteFinder()
# Initializing RouteFinder should be sufficient to cache the graph
# If it doesn't for some reason, we explicitly cache it
if not cache.get('route_graph'):
rf._cache_graph()
@task()
def check_server_status():
"""
Checks the server status, if it detects the server is down,
set updated=False on all signatures. This is deprecated as of Hyperion and
is maintained to prevent older configuration files from breaking on upgrade.
"""
return None
@task()
def downtime_site_update():
"""
This task should be run during the scheduled EVE downtime.
It triggers the increment_downtime function on all singatures
that have been activated.
"""
for sig in Signature.objects.all():
if sig.downtimes or sig.downtimes == 0:
sig.increment_downtime()
@task()
def clear_stale_records():
"""
This task will clear any user location records older than 15 minutes.
"""
limit = datetime.now(pytz.utc) - timedelta(minutes=15)
Signature.objects.filter(owned_time__isnull=False,
owned_time__lt=limit).update(owned_time=None,
owned_by=None)
| apache-2.0 |
tfgraph/tfgraph | tfgraph/graph/graph.py | 1 | 12058 | import numpy as np
import tensorflow as tf
from tfgraph.utils.callbacks.update_edge_notifier import UpdateEdgeNotifier
from tfgraph.utils.tensorflow_object import TensorFlowObject, TF_type
def __str__(self) -> str:
return str(self.run_tf(self.L_tf))
class Graph(TensorFlowObject, UpdateEdgeNotifier):
""" Graph class implemented in the top of TensorFlow.
The class codifies the graph using an square matrix of 2-D shape and
provides functionality operating with this matrix.
Attributes:
sess (:obj:`tf.Session`): This attribute represents the session that runs
the TensorFlow operations.
name (str): This attribute represents the name of the object in TensorFlow's
op Graph.
writer (:obj:`tf.summary.FileWriter`): This attribute represents a
TensorFlow's Writer, that is used to obtain stats. The default value is
`None`.
_listeners (:obj:`set`): The set of objects that will be notified when an
edge modifies it weight.
n (int): Represents the cardinality of the vertex set as Python `int`.
n_tf (:obj:`tf.Tensor`): Represents the cardinality of the vertex set as 0-D
Tensor.
m (int): Represents the cardinality of the edge set as Python `int`.
A_tf (:obj:`tf.Tensor`): Represents the Adjacency matrix of the graph as
2-D Tensor with shape [n,n].
out_degrees_tf (:obj:`tf.Tensor`): Represents the out-degrees of the
vertices of the graph as 2-D Tensor with shape [n, 1]
in_degrees_tf (:obj:`tf.Tensor`): Represents the in-degrees of the vertices
of the graph as 2-D Tensor with shape [1, n]
"""
def __init__(self, sess: tf.Session, name: str,
writer: tf.summary.FileWriter = None,
edges_np: np.ndarray = None, n: int = None,
is_sparse: bool = False) -> None:
""" Class Constructor of the Graph
This method is called to construct a Graph object. This block of code
initializes all the variables necessaries for this class to properly works.
This class can be initialized using an edge list, that fill the graph at
this moment, or can be construct it from the cardinality of vertices set
given by `n` parameter.
Args:
sess (:obj:`tf.Session`): This attribute represents the session that runs
the TensorFlow operations.
name (str): This attribute represents the name of the object in
TensorFlow's op Graph.
writer (:obj:`tf.summary.FileWriter`, optional): This attribute represents
a TensorFlow's Writer, that is used to obtain stats. The default value
is `None`.
edges_np (:obj:`np.ndarray`, optional): The edge set of the graph codifies
as `edges_np[:,0]` represents the sources and `edges_np[:,1]` the
destinations of the edges. The default value is `None`.
n (int, optional): Represents the cardinality of the vertex set. The
default value is `None`.
is_sparse (bool, optional): Use sparse Tensors if it's set to `True`. The
default value is False` Not implemented yet. Show the Todo for more
information.
Todo:
* Implement variables as sparse when it's possible. Waiting to
TensorFlow for it.
"""
TensorFlowObject.__init__(self, sess, name, writer, is_sparse)
UpdateEdgeNotifier.__init__(self)
if edges_np is not None:
if n is not None:
self.n = max(n, int(edges_np.max(axis=0).max() + 1))
else:
self.n = int(edges_np.max(axis=0).max() + 1)
self.m = int(edges_np.shape[0])
A_init = tf.scatter_nd(edges_np.tolist(), self.m * [1.0],
[self.n, self.n])
elif n is not None:
self.n = n
self.m = 0
A_init = tf.zeros([self.n, self.n])
else:
raise ValueError('Graph constructor must be have edges or n')
self.n_tf = tf.Variable(float(self.n), tf.float32,
name=self.name + "_n")
self.A_tf = tf.Variable(A_init, tf.float64,
name=self.name + "_A")
self.out_degrees_tf = tf.Variable(
tf.reduce_sum(self.A_tf, 1, keep_dims=True),
name=self.name + "_d_out")
self.in_degrees_tf = tf.Variable(
tf.reduce_sum(self.A_tf, 0, keep_dims=True),
name=self.name + "_d_in")
self.run_tf(tf.variables_initializer([self.A_tf, self.n_tf]))
self.run_tf(tf.variables_initializer([
self.out_degrees_tf, self.in_degrees_tf]))
def __str__(self) -> str:
""" Transforms the graph to a string.
This method is used to print the graph on the command line. It codifies the
laplacian matrix of the graph as string.
Returns:
(str): representing the laplacian matrix to visualize it.
"""
return str(self.run_tf(self.L_tf))
@property
def L_tf(self) -> tf.Tensor:
""" This method returns the Laplacian of the graph.
The method generates a 2-D Array containing the laplacian matrix of the
graph
Returns:
(:obj:`tf.Tensor`): A 2-D Tensor with [n,n] shape where n is the
cardinality of the vertex set
"""
return tf.diag(self.out_degrees_tf_vector) - self.A_tf
@property
def is_not_sink_tf(self) -> tf.Tensor:
""" This method returns if a vertex is a sink vertex as vector.
The method generates a 1-D Tensor containing the boolean values that
indicates if the vertex at position `i` is a sink vertex.
Returns:
(:obj:`tf.Tensor`): A 1-D Tensor with the same length as cardinality
of the vertex set.
"""
return tf.not_equal(self.out_degrees_tf_vector, 0)
def is_not_sink_tf_vertex(self, vertex: int) -> TF_type:
""" This method returns if a vertex is a sink vertex as vector.
The method generates a 1-D Tensor containing the boolean values that
indicates if the vertex at position `i` is a sink vertex.
Args:
vertex (int): The index of the vertex that wants to know if is sink.
Returns:
(:obj:`tf.Tensor`): A 0-D Tensor that represents if a vertex is a sink
vertex
"""
return tf.not_equal(
tf.reshape([self.out_degrees_tf_vertex(vertex)], [1]), 0)
@property
def out_degrees_np(self) -> np.ndarray:
""" This method returns the degree of all vertex as vector.
The method generates a 1-D Array containing the out-degree of the vertex `i`
at position `i`
Returns:
(:obj:`np.ndarray`): A 1-D Array with the same length as cardinality of the
vertex set.
"""
return self.run_tf(self.out_degrees_tf)
def out_degrees_tf_vertex(self, vertex: int) -> tf.Tensor:
""" This method returns the degree of all vertex as vector.
The method generates a 0-D Array containing the out-degree of the vertex i.
Args:
vertex (int): The index of the vertex that wants the degree.
Returns:
(:obj:`np.ndarray`): A 1-D Array with the same length as cardinality of the
vertex set.
"""
return tf.gather(self.out_degrees_tf, [vertex])
@property
def edge_list_tf(self) -> tf.Tensor:
""" Method that returns the edge set of the graph as list.
This method return all the edges of the graph codified as 2-D matrix in
which the first dimension represents each edge and second dimension the
source and destination vertices of each edge.
Returns:
(:obj:`tf.Tensor`): A 2-D Tensor with the he same length as cardinality of
the edge set in the first dimension and 2 in the second.
"""
return tf.cast(tf.where(tf.not_equal(self.A_tf, 0)), tf.int64)
@property
def edge_list_np(self) -> np.ndarray:
""" Method that returns the edge set of the graph as list.
This method return all the edges of the graph codified as 2-D matrix in
which the first dimension represents each edge and second dimension the
source and destination vertices of each edge.
Returns:
(:obj:`np.ndarray`): A 2-D Array with the he same length as cardinality of
the edge set in the first dimension and 2 in the second.
"""
return self.run_tf(self.edge_list_tf)
@property
def L_pseudo_inverse_tf(self) -> tf.Tensor:
""" Method that returns the pseudo inverse of the Laplacian matrix.
This method calculates the pseudo inverse matrix of the Laplacian of the
Graph. It generates a matrix of the same shape as the Laplacian matrix, i.e.
[n, n] where n is the cardinality of the vertex set.
Returns:
(:obj:`tf.Tensor`): A 2-D square Tensor with the he same length as
cardinality of the vertex set representing the laplacian pseudo inverse.
"""
return tf.py_func(np.linalg.pinv, [self.L_tf], tf.float32)
def A_tf_vertex(self, vertex: int) -> tf.Tensor:
""" Method that returns the adjacency of an individual vertex.
This method extracts the corresponding row referred to the `vertex` passed
as parameter. It constructs a vector that contains the weight of the edge
between `vertex` (obtained as parameter) and the vertex at position `i` in
the vector.
Args:
vertex (int): The index of the vertex that wants the degree.
Returns:
(:obj:`tf.Tensor`): A 1-D Tensor with the same length as the cardinality
of the vertex set.
"""
return tf.gather(self.A_tf, [vertex])
@property
def in_degrees_np(self) -> np.ndarray:
""" This method returns the in-degree of all vertex as vector.
The method generates a 1-D Array containing the in-degree of the vertex `i`
at position `i`
Returns:
(:obj:`np.ndarray`): A 1-D Array with the same length as cardinality of the
vertex set.
"""
return self.run_tf(self.in_degrees_tf)
@property
def in_degrees_tf_vector(self):
""" The in-degrees of the vertices of the graph
Method that returns the in-degrees of the vertices of the graph as 1-D
Tensor with shape [n]
Returns:
(:obj:`tf.Tensor`): A 1-D Tensor with the same length as the cardinality
of the vertex set.
"""
return tf.reshape(self.in_degrees_tf, [self.n])
@property
def out_degrees_tf_vector(self):
""" The out-degrees of the vertices of the graph
Method that returns the out-degrees of the vertices of the graph as 1-D
Tensor with shape [n]
Returns:
(:obj:`tf.Tensor`): A 1-D Tensor with the same length as the cardinality
of the vertex set.
"""
return tf.reshape(self.out_degrees_tf, [self.n])
def append(self, src: int, dst: int) -> None:
""" Append an edge to the graph.
This method process an input edge adding it to the graph updating all the
variables necessaries to maintain the graph in correct state.
Args:
src (int): The id of the source vertex of the edge.
dst (int): The id of the destination vertex of the edge.
Returns:
This method returns nothing.
"""
if src and dst is None:
raise ValueError(
"tfgraph and dst must not be None ")
self.run_tf([tf.scatter_nd_add(self.A_tf, [[src, dst]], [1.0]),
tf.scatter_nd_add(self.out_degrees_tf, [[src, 0]], [1.0]),
tf.scatter_nd_add(self.in_degrees_tf, [[0, dst]], [1.0])])
self.m += 1
self._notify(np.array([src, dst]), 1)
def remove(self, src: int, dst: int) -> None:
""" Remove an edge to the graph.
This method process an input edge deleting it to the graph updating all the
variables necessaries to maintain the graph in correct state.
Args:
src (int): The id of the source vertex of the edge.
dst (int): The id of the destination vertex of the edge.
Returns:
This method returns nothing.
"""
if src and dst is None:
raise ValueError(
"tfgraph and dst must not be None ")
self.run_tf([tf.scatter_nd_add(self.A_tf, [[src, dst]], [-1.0]),
tf.scatter_nd_add(self.out_degrees_tf, [[src, 0]], [-1.0]),
tf.scatter_nd_add(self.in_degrees_tf, [[0, dst]], [-1.0])])
self.m -= 1
self._notify(np.array([src, dst]), -1)
| apache-2.0 |
jlegendary/pybrain | pybrain/tools/plotting/fitnesslandscapes.py | 31 | 3853 | # some utility code for nicely plotting 3D images of function fitness landscapes.
__author__ = 'Tom Schaul, tom@idsia.ch'
from scipy import zeros, r_, cos, sin, pi, array, dot, sqrt, diag
from scipy.linalg import svd
from pylab import figure, plot, show, meshgrid, contour, savefig, colorbar
from pybrain.rl.environments.functions import FunctionEnvironment
from inspect import isclass
def plotCovEllipse(emat, center, segments=50, color='y', transp=1.):
""" Plots a covariance ellipse. """
# compute a nb of points on the ellipse
ex = zeros(segments + 1)
ey = zeros(segments + 1)
u, s, d = svd(emat)
sm = dot(d, dot(diag(sqrt(s)), u))
for i in range(segments + 1):
circlex = cos((2 * pi * i) / float(segments))
circley = sin((2 * pi * i) / float(segments))
ex[i] = center[0] + sm[0, 0] * circlex + sm[0, 1] * circley
ey[i] = center[1] + sm[1, 0] * circlex + sm[1, 1] * circley
# plot them
plot([center[0]], [center[1]], '+', color=color, alpha=transp)
plot(ex, ey, '-', color=color, alpha=transp)
return ex, ey
class FitnessPlotter:
""" plot the function's values in the rectangular region specified by ranges. By default, plot in [-1,1] """
def __init__(self, f, xmin= -1, xmax=1, ymin= -1, ymax=1, precision=50, newfig=True,
colorbar=False, cblabel=None):
""" :key precision: how many steps along every dimension """
if isinstance(f, FunctionEnvironment):
assert f.xdim == 2
self.f = lambda x, y: f(array([x, y]))
elif isclass(f) and issubclass(f, FunctionEnvironment):
tmp = f(2)
self.f = lambda x, y: tmp(array([x, y]))
else:
self.f = f
self.precision = precision
self.colorbar = colorbar
self.cblabel = cblabel
self.xs = r_[xmin:xmax:self.precision * 1j]
self.ys = r_[ymin:ymax:self.precision * 1j]
self.zs = self._generateValMap()
if newfig:
self.fig = figure()
def _generateValMap(self):
""" generate the function fitness values for the current grid of x and y """
vals = zeros((len(self.xs), len(self.ys)))
for i, x in enumerate(self.xs):
for j, y in enumerate(self.ys):
vals[j, i] = self.f(x, y)
return vals
def plotAll(self, levels=50, popup=True):
""" :key levels: how many fitness levels should be drawn."""
tmp = contour(self.xs, self.ys, self.zs, levels)
if self.colorbar:
cb = colorbar(tmp)
if self.cblabel != None:
cb.set_label(self.cblabel)
if popup: show()
def addSamples(self, samples, rescale=True, color=''):
"""plot some sample points on the fitness landscape.
:key rescale: should the plotting ranges be adjusted? """
# split samples into x and y
sx = zeros(len(samples))
sy = zeros(len(samples))
for i, s in enumerate(samples):
sx[i] = s[0]
sy[i] = s[1]
if rescale:
self._rescale(min(sx), max(sx), min(sy), max(sy))
plot(sx, sy, color + '+')
def _rescale(self, xmin, xmax, ymin, ymax):
self.xs = r_[min(xmin * 1.1, min(self.xs)):max(xmax * 1.1, max(self.xs)):self.precision * 1j]
self.ys = r_[min(ymin * 1.1, min(self.ys)):max(ymax * 1.1, max(self.ys)):self.precision * 1j]
self.zs = self._generateValMap()
def addCovEllipse(self, emat, center, segments=50, rescale=True, color='c', transp=1.):
"""plot a covariance ellipse """
ex, ey = plotCovEllipse(emat, center, segments, color, transp)
if rescale:
self._rescale(min(ex), max(ex), min(ey), max(ey))
def saveAs(self, filename, format='.jpg'):
savefig(filename + format)
| bsd-3-clause |
ManiacTwister/dionaea | modules/python/scripts/test.py | 11 | 2040 | #********************************************************************************
#* Dionaea
#* - catches bugs -
#*
#*
#*
#* Copyright (C) 2009 Paul Baecher & Markus Koetter
#*
#* This program is free software; you can redistribute it and/or
#* modify it under the terms of the GNU General Public License
#* as published by the Free Software Foundation; either version 2
#* of the License, or (at your option) any later version.
#*
#* This program is distributed in the hope that it will be useful,
#* but WITHOUT ANY WARRANTY; without even the implied warranty of
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#* GNU General Public License for more details.
#*
#* You should have received a copy of the GNU General Public License
#* along with this program; if not, write to the Free Software
#* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#*
#*
#* contact nepenthesdev@gmail.com
#*
#*******************************************************************************/
from dionaea.core import ihandler, incident, g_dionaea
from dionaea.core import connection
import logging
import json
global p
logger = logging.getLogger('test')
logger.setLevel(logging.DEBUG)
class uniquedownloadihandler(ihandler):
def __init__(self, path):
logger.debug("%s ready!" % (self.__class__.__name__))
ihandler.__init__(self, path)
def handle_incident(self, icd):
logger.debug("submitting file")
try:
tos = g_dionaea.config()['submit']
except:
return
for to in tos:
if 'urls' not in tos[to]:
logger.warn("your configuration lacks urls to submit to %s" % to)
continue
for url in tos[to]['urls']:
i = incident("dionaea.upload.request")
i._url = url
# copy all values for this url
for key in tos[to]:
if key == 'urls':
continue
if key == 'file_fieldname':
i.set("file://" + tos[to][key], icd.file)
continue
i.set(key, tos[to][key])
i.report()
| gpl-2.0 |
dongweiming/code | tests/docs/test_docs.py | 3 | 9129 | from vilya.models.project import CodeDoubanProject
from vilya.models.sphinx_docs import SphinxDocs
import nose
from tests.base import TestCase
from tests.utils import delete_project
base_yaml_conf_old = """
sphinx_docs:
dir: ""
"""
base_yaml_conf = """
docs:
docs:
dir: ""
builder: pickle
"""
base_index_rst = """
Unit testing sphinx docs
========================
.. toctree::
:glob:
*
"""
base_document1_rst = """
Test doc1
=========
Something here
"""
base_document2_rst = """
Test doc2
=========
Something here
"""
class TestDocsHelpers(TestCase):
html1 = '<h1>TITLE1**</h1>'
def _prj(self):
delete_project('test')
prj = CodeDoubanProject.add('test', 'owner', create_trac=False)
return prj
def _add(self, prj, fn, content):
u = self.addUser()
prj.git.commit_one_file(fn, content, 'add %s' % fn, u)
class TestDocs(TestDocsHelpers):
@nose.tools.raises(Exception)
def test_create_wrong(self):
sd = SphinxDocs('unexisting_project')
assert sd.enabled is False
def test_create_disabled(self):
prj = self._prj()
conf = """
sphinx_docs: ""
docs:
docs:
builder: pickle
"""
self._add(prj, 'code_config.yaml', conf)
sd = SphinxDocs(prj.name)
assert sd.enabled is True, "should be enabled by default"
def test_create_enabled(self):
prj = self._prj()
self._add(prj, 'code_config.yaml', base_yaml_conf)
sd = SphinxDocs(prj.name)
assert sd.enabled is True
def test_create_with_index_and_doc(self):
prj = self._prj()
self._add(prj, 'code_config.yaml', base_yaml_conf)
self._add(prj, 'index.rst', base_index_rst)
self._add(prj, 'doc1.rst', base_document1_rst)
sd = SphinxDocs(prj.name)
sd.build_all()
builder = sd.get_builder('docs')
doc = builder.template_data('', {})
assert doc['title'] == 'Unit testing sphinx docs'
def test_build_info(self):
prj = self._prj()
self._add(prj, 'code_config.yaml', base_yaml_conf)
self._add(prj, 'index.rst', base_index_rst)
self._add(prj, 'doc1.rst', base_document1_rst)
sd = SphinxDocs(prj.name)
sd.build_all()
bi = sd.last_build_info()
assert bi['status'] == 'success'
def test_need_rebuild(self):
prj = self._prj()
self._add(prj, 'code_config.yaml', base_yaml_conf)
self._add(prj, 'index.rst', base_index_rst)
sd = SphinxDocs(prj.name)
assert sd.need_rebuild()
sd.build_all()
assert not sd.need_rebuild()
self._add(prj, 'doc1.rst', base_document1_rst)
sd = SphinxDocs(prj.name) # Bad, should not have to refresh object
assert sd.need_rebuild()
sd.build_all()
assert not sd.need_rebuild()
def test_create_with_index_and_doc_and_get_again(self):
prj = self._prj()
self._add(prj, 'code_config.yaml', base_yaml_conf)
self._add(prj, 'index.rst', base_index_rst)
self._add(prj, 'doc1.rst', base_document1_rst)
sd = SphinxDocs(prj.name)
sd.build_all()
sd2 = SphinxDocs(prj.name)
builder = sd2.get_builder('docs')
assert builder.template
doc = builder.template_data('', {})
assert doc['title'] == 'Unit testing sphinx docs'
def test_create_with_index_and_doc_and_two_builders(self):
prj = self._prj()
base_yaml_conf_two_builders = """
docs:
docs:
builder: html
dir: ""
html_theme: default
html_short_title: testsub
docs2:
dir: ""
builder: pickle
"""
self._add(prj, 'code_config.yaml', base_yaml_conf_two_builders)
self._add(prj, 'index.rst', base_index_rst)
self._add(prj, 'doc1.rst', base_document1_rst)
sd = SphinxDocs(prj.name)
sd.build_all()
assert sd.builders == ['docs', 'docs2']
pickle_builder = sd.get_builder('docs2')
assert pickle_builder.template
doc = pickle_builder.template_data('', {})
assert doc['title'] == 'Unit testing sphinx docs'
html_builder = sd.get_builder('docs')
assert not html_builder.template
raw = html_builder.raw_content('index.html', {})
assert "<h1>Unit testing sphinx docs" in raw
class TestDocsPages(TestDocsHelpers):
conf = 'docs: {"pages": {"builder": "raw"}}'
builder = 'raw'
def test_pages_mode(self):
prj = self._prj()
self._add(prj, 'code_config.yaml', self.conf)
self._add(prj, 'pages/index.html', self.html1)
sd = SphinxDocs(prj.name)
assert sd.builders == ['pages']
assert sd.last_build_info() is None
sd.build_all()
assert sd.last_build_info()['status'] == 'success'
builder = sd.get_builder(sd.builders[0])
assert builder.raw_content('index.html', {}) == self.html1
def test_pages_no_docsdir(self):
prj = self._prj()
self._add(prj, 'code_config.yaml', self.conf)
self._add(prj, 'pagesNOT_THE_SAME/index.html', self.html1)
sd = SphinxDocs(prj.name)
sd.build_all()
assert sd.last_build_info()['status'] == 'no_doc_dir_found'
builder = sd.get_builder(sd.builders[0])
assert builder.raw_content('index.html', {}) is False
def test_html_and_raw_builders(self):
prj = self._prj()
base_yaml_conf_two_builders = """
docs:
docs:
builder: html
html_short_title: testsub
dir: docs
html_theme: default
pages:
builder: raw
dir: docs
"""
self._add(prj, 'code_config.yaml', base_yaml_conf_two_builders)
self._add(prj, 'docs/index.rst', base_index_rst)
self._add(prj, 'docs/index.html', self.html1)
self._add(prj, 'docs/doc1.rst', base_document1_rst)
sd = SphinxDocs(prj.name)
sd.build_all()
assert sd.builders == ['docs', 'pages']
raw_builder = sd.get_builder('pages')
doc = raw_builder.raw_content('index.html', {})
assert doc == self.html1
html_builder = sd.get_builder('docs')
assert not html_builder.template
raw = html_builder.raw_content('index.html', {})
assert "<h1>Unit testing sphinx docs" in raw
def test_html_and_raw_builders_in_different_dirs(self):
prj = self._prj()
base_yaml_conf_two_builders = """
docs:
docs:
builder: html
html_short_title: testsub
dir: html_docs
html_theme: default
pages:
builder: raw
dir: pages
"""
self._add(prj, 'code_config.yaml', base_yaml_conf_two_builders)
self._add(prj, 'html_docs/index.rst', base_index_rst)
self._add(prj, 'html_docs/doc1.rst', base_document1_rst)
self._add(prj, 'pages/index.html', self.html1)
sd = SphinxDocs(prj.name)
sd.build_all()
assert sd.builders == ['docs', 'pages']
raw_builder = sd.get_builder('pages')
doc = raw_builder.raw_content('index.html', {})
assert doc == self.html1
html_builder = sd.get_builder('docs')
assert not html_builder.template
raw = html_builder.raw_content('index.html', {})
assert "<h1>Unit testing sphinx docs" in raw
class TestDocsPagesNewConf(TestDocsHelpers):
def test_two_builders_with_other_config_fmt(self):
prj = self._prj()
base_yaml_conf_two_builders = """
docs:
docs:
builder: html
html_theme: default
html_short_title: testsub
dir: html_docs
pages:
builder: raw
"""
self._add(prj, 'code_config.yaml', base_yaml_conf_two_builders)
self._add(prj, 'html_docs/index.rst', base_index_rst)
self._add(prj, 'html_docs/doc1.rst', base_document1_rst)
self._add(prj, 'pages/index.html', self.html1)
sd = SphinxDocs(prj.name)
sd.build_all()
assert sd.builders == ['docs', 'pages'] # noqa Sorted alphabetically by default
raw_builder = sd.get_builder('pages')
doc = raw_builder.raw_content('index.html', {})
assert doc == self.html1
html_builder = sd.get_builder('docs')
assert not html_builder.template
raw = html_builder.raw_content('index.html', {})
assert "<h1>Unit testing sphinx docs" in raw
def test_sort_key(self):
prj = self._prj()
base_yaml_conf_two_builders = """
sphinx_docs:
docs:
docs:
builder: html
html_theme: default
html_short_title: testsub
sort: 2
pages:
builder: raw
sort: 1
"""
self._add(prj, 'code_config.yaml', base_yaml_conf_two_builders)
sd = SphinxDocs(prj.name)
assert sd.builders == ['pages', 'docs']
| bsd-3-clause |
nelmiux/CarnotKE | jyhton/lib-python/2.7/lib-tk/tkFont.py | 146 | 6104 | # Tkinter font wrapper
#
# written by Fredrik Lundh, February 1998
#
# FIXME: should add 'displayof' option where relevant (actual, families,
# measure, and metrics)
#
__version__ = "0.9"
import Tkinter
# weight/slant
NORMAL = "normal"
ROMAN = "roman"
BOLD = "bold"
ITALIC = "italic"
def nametofont(name):
"""Given the name of a tk named font, returns a Font representation.
"""
return Font(name=name, exists=True)
class Font:
"""Represents a named font.
Constructor options are:
font -- font specifier (name, system font, or (family, size, style)-tuple)
name -- name to use for this font configuration (defaults to a unique name)
exists -- does a named font by this name already exist?
Creates a new named font if False, points to the existing font if True.
Raises _Tkinter.TclError if the assertion is false.
the following are ignored if font is specified:
family -- font 'family', e.g. Courier, Times, Helvetica
size -- font size in points
weight -- font thickness: NORMAL, BOLD
slant -- font slant: ROMAN, ITALIC
underline -- font underlining: false (0), true (1)
overstrike -- font strikeout: false (0), true (1)
"""
def _set(self, kw):
options = []
for k, v in kw.items():
options.append("-"+k)
options.append(str(v))
return tuple(options)
def _get(self, args):
options = []
for k in args:
options.append("-"+k)
return tuple(options)
def _mkdict(self, args):
options = {}
for i in range(0, len(args), 2):
options[args[i][1:]] = args[i+1]
return options
def __init__(self, root=None, font=None, name=None, exists=False, **options):
if not root:
root = Tkinter._default_root
if font:
# get actual settings corresponding to the given font
font = root.tk.splitlist(root.tk.call("font", "actual", font))
else:
font = self._set(options)
if not name:
name = "font" + str(id(self))
self.name = name
if exists:
self.delete_font = False
# confirm font exists
if self.name not in root.tk.call("font", "names"):
raise Tkinter._tkinter.TclError, "named font %s does not already exist" % (self.name,)
# if font config info supplied, apply it
if font:
root.tk.call("font", "configure", self.name, *font)
else:
# create new font (raises TclError if the font exists)
root.tk.call("font", "create", self.name, *font)
self.delete_font = True
# backlinks!
self._root = root
self._split = root.tk.splitlist
self._call = root.tk.call
def __str__(self):
return self.name
def __eq__(self, other):
return self.name == other.name and isinstance(other, Font)
def __getitem__(self, key):
return self.cget(key)
def __setitem__(self, key, value):
self.configure(**{key: value})
def __del__(self):
try:
if self.delete_font:
self._call("font", "delete", self.name)
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
pass
def copy(self):
"Return a distinct copy of the current font"
return Font(self._root, **self.actual())
def actual(self, option=None):
"Return actual font attributes"
if option:
return self._call("font", "actual", self.name, "-"+option)
else:
return self._mkdict(
self._split(self._call("font", "actual", self.name))
)
def cget(self, option):
"Get font attribute"
return self._call("font", "config", self.name, "-"+option)
def config(self, **options):
"Modify font attributes"
if options:
self._call("font", "config", self.name,
*self._set(options))
else:
return self._mkdict(
self._split(self._call("font", "config", self.name))
)
configure = config
def measure(self, text):
"Return text width"
return int(self._call("font", "measure", self.name, text))
def metrics(self, *options):
"""Return font metrics.
For best performance, create a dummy widget
using this font before calling this method."""
if options:
return int(
self._call("font", "metrics", self.name, self._get(options))
)
else:
res = self._split(self._call("font", "metrics", self.name))
options = {}
for i in range(0, len(res), 2):
options[res[i][1:]] = int(res[i+1])
return options
def families(root=None):
"Get font families (as a tuple)"
if not root:
root = Tkinter._default_root
return root.tk.splitlist(root.tk.call("font", "families"))
def names(root=None):
"Get names of defined fonts (as a tuple)"
if not root:
root = Tkinter._default_root
return root.tk.splitlist(root.tk.call("font", "names"))
# --------------------------------------------------------------------
# test stuff
if __name__ == "__main__":
root = Tkinter.Tk()
# create a font
f = Font(family="times", size=30, weight=NORMAL)
print f.actual()
print f.actual("family")
print f.actual("weight")
print f.config()
print f.cget("family")
print f.cget("weight")
print names()
print f.measure("hello"), f.metrics("linespace")
print f.metrics()
f = Font(font=("Courier", 20, "bold"))
print f.measure("hello"), f.metrics("linespace")
w = Tkinter.Label(root, text="Hello, world", font=f)
w.pack()
w = Tkinter.Button(root, text="Quit!", command=root.destroy)
w.pack()
fb = Font(font=w["font"]).copy()
fb.config(weight=BOLD)
w.config(font=fb)
Tkinter.mainloop()
| apache-2.0 |
Arcanemagus/SickRage | lib/chardet/hebrewprober.py | 289 | 13838 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Shy Shalom
# Portions created by the Initial Developer are Copyright (C) 2005
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .charsetprober import CharSetProber
from .enums import ProbingState
# This prober doesn't actually recognize a language or a charset.
# It is a helper prober for the use of the Hebrew model probers
### General ideas of the Hebrew charset recognition ###
#
# Four main charsets exist in Hebrew:
# "ISO-8859-8" - Visual Hebrew
# "windows-1255" - Logical Hebrew
# "ISO-8859-8-I" - Logical Hebrew
# "x-mac-hebrew" - ?? Logical Hebrew ??
#
# Both "ISO" charsets use a completely identical set of code points, whereas
# "windows-1255" and "x-mac-hebrew" are two different proper supersets of
# these code points. windows-1255 defines additional characters in the range
# 0x80-0x9F as some misc punctuation marks as well as some Hebrew-specific
# diacritics and additional 'Yiddish' ligature letters in the range 0xc0-0xd6.
# x-mac-hebrew defines similar additional code points but with a different
# mapping.
#
# As far as an average Hebrew text with no diacritics is concerned, all four
# charsets are identical with respect to code points. Meaning that for the
# main Hebrew alphabet, all four map the same values to all 27 Hebrew letters
# (including final letters).
#
# The dominant difference between these charsets is their directionality.
# "Visual" directionality means that the text is ordered as if the renderer is
# not aware of a BIDI rendering algorithm. The renderer sees the text and
# draws it from left to right. The text itself when ordered naturally is read
# backwards. A buffer of Visual Hebrew generally looks like so:
# "[last word of first line spelled backwards] [whole line ordered backwards
# and spelled backwards] [first word of first line spelled backwards]
# [end of line] [last word of second line] ... etc' "
# adding punctuation marks, numbers and English text to visual text is
# naturally also "visual" and from left to right.
#
# "Logical" directionality means the text is ordered "naturally" according to
# the order it is read. It is the responsibility of the renderer to display
# the text from right to left. A BIDI algorithm is used to place general
# punctuation marks, numbers and English text in the text.
#
# Texts in x-mac-hebrew are almost impossible to find on the Internet. From
# what little evidence I could find, it seems that its general directionality
# is Logical.
#
# To sum up all of the above, the Hebrew probing mechanism knows about two
# charsets:
# Visual Hebrew - "ISO-8859-8" - backwards text - Words and sentences are
# backwards while line order is natural. For charset recognition purposes
# the line order is unimportant (In fact, for this implementation, even
# word order is unimportant).
# Logical Hebrew - "windows-1255" - normal, naturally ordered text.
#
# "ISO-8859-8-I" is a subset of windows-1255 and doesn't need to be
# specifically identified.
# "x-mac-hebrew" is also identified as windows-1255. A text in x-mac-hebrew
# that contain special punctuation marks or diacritics is displayed with
# some unconverted characters showing as question marks. This problem might
# be corrected using another model prober for x-mac-hebrew. Due to the fact
# that x-mac-hebrew texts are so rare, writing another model prober isn't
# worth the effort and performance hit.
#
#### The Prober ####
#
# The prober is divided between two SBCharSetProbers and a HebrewProber,
# all of which are managed, created, fed data, inquired and deleted by the
# SBCSGroupProber. The two SBCharSetProbers identify that the text is in
# fact some kind of Hebrew, Logical or Visual. The final decision about which
# one is it is made by the HebrewProber by combining final-letter scores
# with the scores of the two SBCharSetProbers to produce a final answer.
#
# The SBCSGroupProber is responsible for stripping the original text of HTML
# tags, English characters, numbers, low-ASCII punctuation characters, spaces
# and new lines. It reduces any sequence of such characters to a single space.
# The buffer fed to each prober in the SBCS group prober is pure text in
# high-ASCII.
# The two SBCharSetProbers (model probers) share the same language model:
# Win1255Model.
# The first SBCharSetProber uses the model normally as any other
# SBCharSetProber does, to recognize windows-1255, upon which this model was
# built. The second SBCharSetProber is told to make the pair-of-letter
# lookup in the language model backwards. This in practice exactly simulates
# a visual Hebrew model using the windows-1255 logical Hebrew model.
#
# The HebrewProber is not using any language model. All it does is look for
# final-letter evidence suggesting the text is either logical Hebrew or visual
# Hebrew. Disjointed from the model probers, the results of the HebrewProber
# alone are meaningless. HebrewProber always returns 0.00 as confidence
# since it never identifies a charset by itself. Instead, the pointer to the
# HebrewProber is passed to the model probers as a helper "Name Prober".
# When the Group prober receives a positive identification from any prober,
# it asks for the name of the charset identified. If the prober queried is a
# Hebrew model prober, the model prober forwards the call to the
# HebrewProber to make the final decision. In the HebrewProber, the
# decision is made according to the final-letters scores maintained and Both
# model probers scores. The answer is returned in the form of the name of the
# charset identified, either "windows-1255" or "ISO-8859-8".
class HebrewProber(CharSetProber):
# windows-1255 / ISO-8859-8 code points of interest
FINAL_KAF = 0xea
NORMAL_KAF = 0xeb
FINAL_MEM = 0xed
NORMAL_MEM = 0xee
FINAL_NUN = 0xef
NORMAL_NUN = 0xf0
FINAL_PE = 0xf3
NORMAL_PE = 0xf4
FINAL_TSADI = 0xf5
NORMAL_TSADI = 0xf6
# Minimum Visual vs Logical final letter score difference.
# If the difference is below this, don't rely solely on the final letter score
# distance.
MIN_FINAL_CHAR_DISTANCE = 5
# Minimum Visual vs Logical model score difference.
# If the difference is below this, don't rely at all on the model score
# distance.
MIN_MODEL_DISTANCE = 0.01
VISUAL_HEBREW_NAME = "ISO-8859-8"
LOGICAL_HEBREW_NAME = "windows-1255"
def __init__(self):
super(HebrewProber, self).__init__()
self._final_char_logical_score = None
self._final_char_visual_score = None
self._prev = None
self._before_prev = None
self._logical_prober = None
self._visual_prober = None
self.reset()
def reset(self):
self._final_char_logical_score = 0
self._final_char_visual_score = 0
# The two last characters seen in the previous buffer,
# mPrev and mBeforePrev are initialized to space in order to simulate
# a word delimiter at the beginning of the data
self._prev = ' '
self._before_prev = ' '
# These probers are owned by the group prober.
def set_model_probers(self, logicalProber, visualProber):
self._logical_prober = logicalProber
self._visual_prober = visualProber
def is_final(self, c):
return c in [self.FINAL_KAF, self.FINAL_MEM, self.FINAL_NUN,
self.FINAL_PE, self.FINAL_TSADI]
def is_non_final(self, c):
# The normal Tsadi is not a good Non-Final letter due to words like
# 'lechotet' (to chat) containing an apostrophe after the tsadi. This
# apostrophe is converted to a space in FilterWithoutEnglishLetters
# causing the Non-Final tsadi to appear at an end of a word even
# though this is not the case in the original text.
# The letters Pe and Kaf rarely display a related behavior of not being
# a good Non-Final letter. Words like 'Pop', 'Winamp' and 'Mubarak'
# for example legally end with a Non-Final Pe or Kaf. However, the
# benefit of these letters as Non-Final letters outweighs the damage
# since these words are quite rare.
return c in [self.NORMAL_KAF, self.NORMAL_MEM,
self.NORMAL_NUN, self.NORMAL_PE]
def feed(self, byte_str):
# Final letter analysis for logical-visual decision.
# Look for evidence that the received buffer is either logical Hebrew
# or visual Hebrew.
# The following cases are checked:
# 1) A word longer than 1 letter, ending with a final letter. This is
# an indication that the text is laid out "naturally" since the
# final letter really appears at the end. +1 for logical score.
# 2) A word longer than 1 letter, ending with a Non-Final letter. In
# normal Hebrew, words ending with Kaf, Mem, Nun, Pe or Tsadi,
# should not end with the Non-Final form of that letter. Exceptions
# to this rule are mentioned above in isNonFinal(). This is an
# indication that the text is laid out backwards. +1 for visual
# score
# 3) A word longer than 1 letter, starting with a final letter. Final
# letters should not appear at the beginning of a word. This is an
# indication that the text is laid out backwards. +1 for visual
# score.
#
# The visual score and logical score are accumulated throughout the
# text and are finally checked against each other in GetCharSetName().
# No checking for final letters in the middle of words is done since
# that case is not an indication for either Logical or Visual text.
#
# We automatically filter out all 7-bit characters (replace them with
# spaces) so the word boundary detection works properly. [MAP]
if self.state == ProbingState.NOT_ME:
# Both model probers say it's not them. No reason to continue.
return ProbingState.NOT_ME
byte_str = self.filter_high_byte_only(byte_str)
for cur in byte_str:
if cur == ' ':
# We stand on a space - a word just ended
if self._before_prev != ' ':
# next-to-last char was not a space so self._prev is not a
# 1 letter word
if self.is_final(self._prev):
# case (1) [-2:not space][-1:final letter][cur:space]
self._final_char_logical_score += 1
elif self.is_non_final(self._prev):
# case (2) [-2:not space][-1:Non-Final letter][
# cur:space]
self._final_char_visual_score += 1
else:
# Not standing on a space
if ((self._before_prev == ' ') and
(self.is_final(self._prev)) and (cur != ' ')):
# case (3) [-2:space][-1:final letter][cur:not space]
self._final_char_visual_score += 1
self._before_prev = self._prev
self._prev = cur
# Forever detecting, till the end or until both model probers return
# ProbingState.NOT_ME (handled above)
return ProbingState.DETECTING
@property
def charset_name(self):
# Make the decision: is it Logical or Visual?
# If the final letter score distance is dominant enough, rely on it.
finalsub = self._final_char_logical_score - self._final_char_visual_score
if finalsub >= self.MIN_FINAL_CHAR_DISTANCE:
return self.LOGICAL_HEBREW_NAME
if finalsub <= -self.MIN_FINAL_CHAR_DISTANCE:
return self.VISUAL_HEBREW_NAME
# It's not dominant enough, try to rely on the model scores instead.
modelsub = (self._logical_prober.get_confidence()
- self._visual_prober.get_confidence())
if modelsub > self.MIN_MODEL_DISTANCE:
return self.LOGICAL_HEBREW_NAME
if modelsub < -self.MIN_MODEL_DISTANCE:
return self.VISUAL_HEBREW_NAME
# Still no good, back to final letter distance, maybe it'll save the
# day.
if finalsub < 0.0:
return self.VISUAL_HEBREW_NAME
# (finalsub > 0 - Logical) or (don't know what to do) default to
# Logical.
return self.LOGICAL_HEBREW_NAME
@property
def language(self):
return 'Hebrew'
@property
def state(self):
# Remain active as long as any of the model probers are active.
if (self._logical_prober.state == ProbingState.NOT_ME) and \
(self._visual_prober.state == ProbingState.NOT_ME):
return ProbingState.NOT_ME
return ProbingState.DETECTING
| gpl-3.0 |
PDuckworth/strands_qsr_lib | qsr_lib/src/qsrlib_qsrs/__init__.py | 7 | 1298 | from qsr_rcc2 import QSR_RCC2
from qsr_rcc3_rectangle_bounding_boxes_2d import QSR_RCC3_Rectangle_Bounding_Boxes_2D
from qsr_rcc4 import QSR_RCC4
from qsr_rcc5 import QSR_RCC5
from qsr_rcc8 import QSR_RCC8
from qsr_cardinal_direction import QSR_Cardinal_Direction
from qsr_qtc_b_simplified import QSR_QTC_B_Simplified
from qsr_qtc_c_simplified import QSR_QTC_C_Simplified
from qsr_qtc_bc_simplified import QSR_QTC_BC_Simplified
from qsr_arg_relations_distance import QSR_Arg_Relations_Distance
from qsr_arg_prob_relations_distance import QSR_Arg_Prob_Relations_Distance
from qsr_moving_or_stationary import QSR_Moving_or_Stationary
from qsr_new_mwe import QSR_MWE
from qsr_ra import QSR_RA
from qsr_tpcc import QSR_TPCC
# register new qsrs by class name below
qsrs_registry = (QSR_RCC2,
QSR_RCC3_Rectangle_Bounding_Boxes_2D,
QSR_RCC4,
QSR_RCC5,
QSR_RCC8,
QSR_Cardinal_Direction,
QSR_QTC_B_Simplified,
QSR_QTC_C_Simplified,
QSR_QTC_BC_Simplified,
QSR_Arg_Relations_Distance,
QSR_Arg_Prob_Relations_Distance,
QSR_Moving_or_Stationary,
QSR_MWE,
QSR_RA,
QSR_TPCC)
| mit |
dxmahata/TwitterSentimentAnalysis | bson/dbref.py | 17 | 4971 | # Copyright 2009-2014 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tools for manipulating DBRefs (references to MongoDB documents)."""
from copy import deepcopy
from bson.son import SON
class DBRef(object):
"""A reference to a document stored in MongoDB.
"""
# DBRef isn't actually a BSON "type" so this number was arbitrarily chosen.
_type_marker = 100
def __init__(self, collection, id, database=None, _extra={}, **kwargs):
"""Initialize a new :class:`DBRef`.
Raises :class:`TypeError` if `collection` or `database` is not
an instance of :class:`basestring` (:class:`str` in python 3).
`database` is optional and allows references to documents to work
across databases. Any additional keyword arguments will create
additional fields in the resultant embedded document.
:Parameters:
- `collection`: name of the collection the document is stored in
- `id`: the value of the document's ``"_id"`` field
- `database` (optional): name of the database to reference
- `**kwargs` (optional): additional keyword arguments will
create additional, custom fields
.. versionchanged:: 1.8
Now takes keyword arguments to specify additional fields.
.. versionadded:: 1.1.1
The `database` parameter.
.. mongodoc:: dbrefs
"""
if not isinstance(collection, basestring):
raise TypeError("collection must be an "
"instance of %s" % (basestring.__name__,))
if database is not None and not isinstance(database, basestring):
raise TypeError("database must be an "
"instance of %s" % (basestring.__name__,))
self.__collection = collection
self.__id = id
self.__database = database
kwargs.update(_extra)
self.__kwargs = kwargs
@property
def collection(self):
"""Get the name of this DBRef's collection as unicode.
"""
return self.__collection
@property
def id(self):
"""Get this DBRef's _id.
"""
return self.__id
@property
def database(self):
"""Get the name of this DBRef's database.
Returns None if this DBRef doesn't specify a database.
.. versionadded:: 1.1.1
"""
return self.__database
def __getattr__(self, key):
try:
return self.__kwargs[key]
except KeyError:
raise AttributeError(key)
# Have to provide __setstate__ to avoid
# infinite recursion since we override
# __getattr__.
def __setstate__(self, state):
self.__dict__.update(state)
def as_doc(self):
"""Get the SON document representation of this DBRef.
Generally not needed by application developers
"""
doc = SON([("$ref", self.collection),
("$id", self.id)])
if self.database is not None:
doc["$db"] = self.database
doc.update(self.__kwargs)
return doc
def __repr__(self):
extra = "".join([", %s=%r" % (k, v)
for k, v in self.__kwargs.iteritems()])
if self.database is None:
return "DBRef(%r, %r%s)" % (self.collection, self.id, extra)
return "DBRef(%r, %r, %r%s)" % (self.collection, self.id,
self.database, extra)
def __eq__(self, other):
if isinstance(other, DBRef):
us = (self.__database, self.__collection,
self.__id, self.__kwargs)
them = (other.__database, other.__collection,
other.__id, other.__kwargs)
return us == them
return NotImplemented
def __ne__(self, other):
return not self == other
def __hash__(self):
"""Get a hash value for this :class:`DBRef`.
.. versionadded:: 1.1
"""
return hash((self.__collection, self.__id, self.__database,
tuple(sorted(self.__kwargs.items()))))
def __deepcopy__(self, memo):
"""Support function for `copy.deepcopy()`.
.. versionadded:: 1.10
"""
return DBRef(deepcopy(self.__collection, memo),
deepcopy(self.__id, memo),
deepcopy(self.__database, memo),
deepcopy(self.__kwargs, memo))
| mit |
rodrigolucianocosta/ProjectParking | ProjectParking/Parking/django-localflavor-1.1/tests/test_generic.py | 1 | 12114 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.core.exceptions import ValidationError, ImproperlyConfigured
from django.test import SimpleTestCase, TestCase
from django.utils import formats
from localflavor.generic.models import BICField, IBANField
from localflavor.generic.validators import BICValidator, IBANValidator
from localflavor.generic.forms import DateField, DateTimeField, SplitDateTimeField, BICFormField, IBANFormField
class DateTimeFieldTestCase(SimpleTestCase):
default_date_input_formats = (
'%Y-%m-%d', '%d/%m/%Y', '%d/%m/%y', '%b %d %Y', '%b %d, %Y',
'%d %b %Y', '%d %b, %Y', '%B %d %Y', '%B %d, %Y', '%d %B %Y',
'%d %B, %Y',
)
default_datetime_input_formats = (
'%Y-%m-%d %H:%M:%S', '%Y-%m-%d %H:%M', '%Y-%m-%d', '%d/%m/%Y %H:%M:%S',
'%d/%m/%Y %H:%M', '%d/%m/%Y', '%d/%m/%y %H:%M:%S', '%d/%m/%y %H:%M',
'%d/%m/%y',
)
def assertInputFormats(self, field, formats):
self.assertSequenceEqual(field.input_formats, formats)
class DateFieldTests(DateTimeFieldTestCase):
def setUp(self):
self.default_input_formats = self.default_date_input_formats
def test_init_no_input_formats(self):
field = DateField()
self.assertInputFormats(field, self.default_input_formats)
def test_init_empty_input_formats(self):
field = DateField(input_formats=())
self.assertInputFormats(field, self.default_input_formats)
def test_init_custom_input_formats(self):
input_formats = ('%m/%d/%Y', '%m/%d/%y')
field = DateField(input_formats=input_formats)
self.assertInputFormats(field, input_formats)
class DateTimeFieldTests(DateTimeFieldTestCase):
def setUp(self):
self.default_input_formats = self.default_datetime_input_formats
def test_init_no_input_formats(self):
field = DateTimeField()
self.assertInputFormats(field, self.default_input_formats)
def test_init_empty_input_formats(self):
field = DateTimeField(input_formats=())
self.assertInputFormats(field, self.default_input_formats)
def test_init_custom_input_formats(self):
input_formats = ('%m/%d/%Y %H:%M', '%m/%d/%y %H:%M')
field = DateTimeField(input_formats=input_formats)
self.assertInputFormats(field, input_formats)
class SplitDateTimeFieldTests(DateTimeFieldTestCase):
default_time_input_formats = formats.get_format_lazy('TIME_INPUT_FORMATS')
def test_init_no_input_formats(self):
field = SplitDateTimeField()
date_field, time_field = field.fields
self.assertInputFormats(date_field, self.default_date_input_formats)
self.assertInputFormats(time_field, self.default_time_input_formats)
def test_init_empty_input_formats(self):
field = SplitDateTimeField(input_date_formats=(),
input_time_formats=())
date_field, time_field = field.fields
self.assertInputFormats(date_field, self.default_date_input_formats)
self.assertInputFormats(time_field, ())
def test_init_custom_input_formats(self):
date_input_formats = ('%m/%d/%Y', '%m/%d/%y')
time_input_formats = ('%H:%M', '%H:%M:%S')
field = SplitDateTimeField(input_date_formats=date_input_formats,
input_time_formats=time_input_formats)
date_field, time_field = field.fields
self.assertInputFormats(date_field, date_input_formats)
self.assertInputFormats(time_field, time_input_formats)
class IBANTests(TestCase):
def test_iban_validator(self):
valid = [
'GB82WeST12345698765432',
'GB82 WEST 1234 5698 7654 32',
'GR1601101250000000012300695',
'GR16-0110-1250-0000-0001-2300-695',
'GB29NWBK60161331926819',
'GB29N-WB K6016-13319-26819',
'SA0380000000608010167519',
'SA0380 0 0000 06 0 8 0 1 0 1 6 7 519 ',
'CH9300762011623852957',
'IL620108000000099999999',
'EE982200221111099080',
]
invalid = {
'GB82WEST1234569876543': 'GB IBANs must contain 22 characters.',
'CA34CIBC123425345': 'CA is not a valid country code for IBAN.',
'GB29ÉWBK60161331926819': 'is not a valid character for IBAN.',
'SA0380000000608019167519': 'Not a valid IBAN.',
'EE012200221111099080': 'Not a valid IBAN.',
}
for iban in valid:
IBANValidator(iban)
for iban in invalid:
self.assertRaisesMessage(ValidationError, invalid[iban], IBANValidator(), iban)
def test_iban_fields(self):
""" Test the IBAN model and form field. """
valid = {
'NL02ABNA0123456789': 'NL02ABNA0123456789',
'Nl02aBNa0123456789': 'NL02ABNA0123456789',
'NL02 ABNA 0123 4567 89': 'NL02ABNA0123456789',
'NL02-ABNA-0123-4567-89': 'NL02ABNA0123456789',
'NL91ABNA0417164300': 'NL91ABNA0417164300',
'NL91 ABNA 0417 1643 00': 'NL91ABNA0417164300',
'NL91-ABNA-0417-1643-00': 'NL91ABNA0417164300',
'MU17BOMM0101101030300200000MUR': 'MU17BOMM0101101030300200000MUR',
'MU17 BOMM 0101 1010 3030 0200 000M UR': 'MU17BOMM0101101030300200000MUR',
'MU 17BO MM01011010 3030-02 000-00M UR': 'MU17BOMM0101101030300200000MUR',
'BE68539007547034': 'BE68539007547034',
'BE68 5390 0754 7034': 'BE68539007547034',
'BE-685390075470 34': 'BE68539007547034',
}
invalid = {
'NL02ABNA012345678999': ['NL IBANs must contain 18 characters.'],
'NL02 ABNA 0123 4567 8999': ['NL IBANs must contain 18 characters.'],
'NL91ABNB0417164300': ['Not a valid IBAN.'],
'NL91 ABNB 0417 1643 00': ['Not a valid IBAN.'],
'MU17BOMM0101101030300200000MUR12345': [
'MU IBANs must contain 30 characters.',
'Ensure this value has at most 34 characters (it has 35).'],
'MU17 BOMM 0101 1010 3030 0200 000M UR12 345': [
'MU IBANs must contain 30 characters.',
'Ensure this value has at most 34 characters (it has 35).'],
# This IBAN should only be valid only if the Nordea extensions are turned on.
'EG1100006001880800100014553': ['EG is not a valid country code for IBAN.'],
'EG11 0000 6001 8808 0010 0014 553': ['EG is not a valid country code for IBAN.']
}
self.assertFieldOutput(IBANFormField, valid=valid, invalid=invalid)
# Test valid inputs for model field.
iban_model_field = IBANField()
for input, output in valid.items():
self.assertEqual(iban_model_field.clean(input, None), output)
# Invalid inputs for model field.
for input, errors in invalid.items():
with self.assertRaises(ValidationError) as context_manager:
iban_model_field.clean(input, None)
# The error messages for models are in a different order.
errors.reverse()
self.assertEqual(context_manager.exception.messages, errors)
def test_nordea_extensions(self):
""" Test a valid IBAN in the Nordea extensions. """
iban_validator = IBANValidator(use_nordea_extensions=True)
# Run the validator to ensure there are no ValidationErrors raised.
iban_validator('Eg1100006001880800100014553')
def test_form_field_formatting(self):
iban_form_field = IBANFormField()
self.assertEqual(iban_form_field.prepare_value('NL02ABNA0123456789'), 'NL02 ABNA 0123 4567 89')
self.assertEqual(iban_form_field.prepare_value('NL02 ABNA 0123 4567 89'), 'NL02 ABNA 0123 4567 89')
self.assertIsNone(iban_form_field.prepare_value(None))
def test_include_countries(self):
""" Test the IBAN model and form include_countries feature. """
include_countries = ('NL', 'BE', 'LU')
valid = {
'NL02ABNA0123456789': 'NL02ABNA0123456789',
'BE68539007547034': 'BE68539007547034',
'LU280019400644750000': 'LU280019400644750000'
}
invalid = {
# This IBAN is valid but not for the configured countries.
'GB82WEST12345698765432': ['GB IBANs are not allowed in this field.']
}
self.assertFieldOutput(IBANFormField, field_kwargs={'include_countries': include_countries},
valid=valid, invalid=invalid)
# Test valid inputs for model field.
iban_model_field = IBANField(include_countries=include_countries)
for input, output in valid.items():
self.assertEqual(iban_model_field.clean(input, None), output)
# Invalid inputs for model field.
for input, errors in invalid.items():
with self.assertRaises(ValidationError) as context_manager:
iban_model_field.clean(input, None)
# The error messages for models are in a different order.
errors.reverse()
self.assertEqual(context_manager.exception.messages, errors)
def test_misconfigured_include_countries(self):
""" Test that an IBAN field or model raises an error when asked to validate a country not part of IBAN.
"""
# Test an unassigned ISO 3166-1 country code so that the tests will work even if a country joins IBAN.
self.assertRaises(ImproperlyConfigured, IBANValidator, include_countries=('JJ',))
self.assertRaises(ImproperlyConfigured, IBANValidator, use_nordea_extensions=True, include_countries=('JJ',))
# Test a Nordea IBAN when Nordea extensions are turned off.
self.assertRaises(ImproperlyConfigured, IBANValidator, include_countries=('AO',))
class BICTests(TestCase):
def test_bic_validator(self):
valid = [
'DEUTDEFF',
'deutdeff',
'NEDSZAJJXXX',
'NEDSZAJJxxx',
'DABADKKK',
'daBadKkK',
'UNCRIT2B912',
'DSBACNBXSHA',
None,
]
invalid = {
'NEDSZAJJXX': 'BIC codes have either 8 or 11 characters.',
'': 'BIC codes have either 8 or 11 characters.',
'CIBCJJH2': 'JJ is not a valid country code.',
'DÉUTDEFF': 'is not a valid institution code.'
}
bic_validator = BICValidator()
for bic in valid:
bic_validator(bic)
for bic in invalid:
self.assertRaisesMessage(ValidationError, invalid[bic], BICValidator(), bic)
def test_form_field_formatting(self):
bic_form_field = BICFormField()
self.assertEqual(bic_form_field.prepare_value('deutdeff'), 'DEUTDEFF')
self.assertIsNone(bic_form_field.prepare_value(None))
self.assertEqual(bic_form_field.to_python(None), '')
def test_bic_model_field(self):
valid = {
'DEUTDEFF': 'DEUTDEFF',
'NEDSZAJJXXX': 'NEDSZAJJXXX',
'DABADKKK': 'DABADKKK',
'UNCRIT2B912': 'UNCRIT2B912',
'DSBACNBXSHA': 'DSBACNBXSHA'
}
invalid = {
'NEDSZAJJXX': ['BIC codes have either 8 or 11 characters.'],
'CIBCJJH2': ['JJ is not a valid country code.'],
'D3UTDEFF': ['D3UT is not a valid institution code.']
}
self.assertFieldOutput(BICFormField, valid=valid, invalid=invalid)
bic_model_field = BICField()
# Test valid inputs for model field.
for input, output in valid.items():
self.assertEqual(bic_model_field.clean(input, None), output)
self.assertIsNone(bic_model_field.to_python(None))
# Invalid inputs for model field.
for input, errors in invalid.items():
with self.assertRaises(ValidationError) as context_manager:
bic_model_field.clean(input, None)
self.assertEqual(errors, context_manager.exception.messages)
| mpl-2.0 |
noobcoderT/ryu-3.21 | ryu/app/ofctl/event.py | 37 | 1645 | # Copyright (C) 2014 Nippon Telegraph and Telephone Corporation.
# Copyright (C) 2014 YAMAMOTO Takashi <yamamoto at valinux co jp>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numbers
from ryu.controller import event
# base classes
class _RequestBase(event.EventRequestBase):
def __init__(self):
self.dst = 'ofctl_service'
class _ReplyBase(event.EventReplyBase):
pass
# get datapath
class GetDatapathRequest(_RequestBase):
def __init__(self, dpid):
assert isinstance(dpid, numbers.Integral)
super(GetDatapathRequest, self).__init__()
self.dpid = dpid
# send msg
class SendMsgRequest(_RequestBase):
def __init__(self, msg, reply_cls=None, reply_multi=False):
super(SendMsgRequest, self).__init__()
self.msg = msg
self.reply_cls = reply_cls
self.reply_multi = reply_multi
# generic reply
class Reply(_ReplyBase):
def __init__(self, result=None, exception=None):
self.result = result
self.exception = exception
def __call__(self):
if self.exception:
raise self.exception
return self.result
| apache-2.0 |
nprapps/dailygraphics | fabfile/flat.py | 1 | 2586 | #!/usr/bin/env python
# _*_ coding:utf-8 _*_
import copy
from fnmatch import fnmatch
import hashlib
import mimetypes
import os
from boto.s3.key import Key
import app_config
import utils
def deploy_file(src, dst, headers={}):
"""
Deploy a single file to S3, if the local version is different.
"""
bucket = utils.get_bucket(app_config.S3_BUCKET['bucket_name'])
k = bucket.get_key(dst)
s3_md5 = None
if k:
s3_md5 = k.etag.strip('"')
else:
k = Key(bucket)
k.key = dst
file_headers = copy.copy(headers)
if app_config.S3_BUCKET == app_config.STAGING_S3_BUCKET:
policy = 'private'
else:
policy = 'public-read'
if 'Content-Type' not in headers:
file_headers['Content-Type'] = mimetypes.guess_type(src)[0]
if file_headers['Content-Type'] == 'text/html':
# Force character encoding header
file_headers['Content-Type'] = '; '.join([
file_headers['Content-Type'],
'charset=utf-8'])
with open(src, 'rb') as f:
local_md5 = hashlib.md5()
local_md5.update(f.read())
local_md5 = local_md5.hexdigest()
if local_md5 == s3_md5:
print 'Skipping %s (has not changed)' % src
else:
print 'Uploading %s --> %s' % (src, dst)
k.set_contents_from_filename(src, file_headers, policy=policy)
def deploy_folder(src, dst, headers={}, ignore=[]):
"""
Deploy a folder to S3, checking each file to see if it has changed.
"""
to_deploy = []
for local_path, subdirs, filenames in os.walk(src, topdown=True):
rel_path = os.path.relpath(local_path, src)
for name in filenames:
if name.startswith('.'):
continue
src_path = os.path.join(local_path, name)
skip = False
for pattern in ignore:
if fnmatch(src_path, pattern):
skip = True
break
if skip:
continue
if rel_path == '.':
dst_path = os.path.join(dst, name)
else:
dst_path = os.path.join(dst, rel_path, name)
to_deploy.append((src_path, dst_path))
for src, dst in to_deploy:
deploy_file(src, dst, headers)
def delete_folder(dst):
"""
Delete a folder from S3.
"""
bucket = utils.get_bucket(app_config.S3_BUCKET['bucket_name'])
for key in bucket.list(prefix='%s/' % dst):
print 'Deleting %s' % (key.key)
key.delete()
| mit |
resmo/ansible | lib/ansible/modules/remote_management/ucs/ucs_ntp_server.py | 64 | 4688 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: ucs_ntp_server
short_description: Configures NTP server on Cisco UCS Manager
extends_documentation_fragment:
- ucs
description:
- Configures NTP server on Cisco UCS Manager.
- Examples can be used with the L(UCS Platform Emulator,https://communities.cisco.com/ucspe).
options:
state:
description:
- If C(absent), will remove an NTP server.
- If C(present), will add or update an NTP server.
choices: [absent, present]
default: present
ntp_server:
description:
- NTP server IP address or hostname.
- Enter up to 63 characters that form a valid hostname.
- Enter a valid IPV4 Address.
aliases: [ name ]
default: ""
description:
description:
- A user-defined description of the NTP server.
- Enter up to 256 characters.
- "You can use any characters or spaces except the following:"
- "` (accent mark), \ (backslash), ^ (carat), \" (double quote), = (equal sign), > (greater than), < (less than), or ' (single quote)."
aliases: [ descr ]
default: ""
requirements:
- ucsmsdk
author:
- John McDonough (@movinalot)
- CiscoUcs (@CiscoUcs)
version_added: "2.7"
'''
EXAMPLES = r'''
- name: Configure NTP server
ucs_ntp_server:
hostname: 172.16.143.150
username: admin
password: password
ntp_server: 10.10.10.10
description: Internal NTP Server by IP address
state: present
- name: Configure NTP server
ucs_ntp_server:
hostname: 172.16.143.150
username: admin
password: password
ntp_server: pool.ntp.org
description: External NTP Server by hostname
state: present
- name: Remove NTP server
ucs_ntp_server:
hostname: 172.16.143.150
username: admin
password: password
ntp_server: 10.10.10.10
state: absent
- name: Remove NTP server
ucs_ntp_server:
hostname: 172.16.143.150
username: admin
password: password
ntp_server: pool.ntp.org
state: absent
'''
RETURN = r'''
#
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.remote_management.ucs import UCSModule, ucs_argument_spec
def run_module():
argument_spec = ucs_argument_spec
argument_spec.update(
ntp_server=dict(type='str', aliases=['name']),
description=dict(type='str', aliases=['descr'], default=''),
state=dict(type='str', default='present', choices=['present', 'absent']),
)
module = AnsibleModule(
argument_spec,
supports_check_mode=True,
required_if=[
['state', 'present', ['ntp_server']],
],
)
# UCSModule verifies ucsmsdk is present and exits on failure. Imports are below ucs object creation.
ucs = UCSModule(module)
err = False
from ucsmsdk.mometa.comm.CommNtpProvider import CommNtpProvider
changed = False
try:
mo_exists = False
props_match = False
dn = 'sys/svc-ext/datetime-svc/ntp-' + module.params['ntp_server']
mo = ucs.login_handle.query_dn(dn)
if mo:
mo_exists = True
if module.params['state'] == 'absent':
if mo_exists:
if not module.check_mode:
ucs.login_handle.remove_mo(mo)
ucs.login_handle.commit()
changed = True
else:
if mo_exists:
# check top-level mo props
kwargs = dict(descr=module.params['description'])
if mo.check_prop_match(**kwargs):
props_match = True
if not props_match:
if not module.check_mode:
# update/add mo
mo = CommNtpProvider(parent_mo_or_dn='sys/svc-ext/datetime-svc',
name=module.params['ntp_server'],
descr=module.params['description'])
ucs.login_handle.add_mo(mo, modify_present=True)
ucs.login_handle.commit()
changed = True
except Exception as e:
err = True
ucs.result['msg'] = "setup error: %s " % str(e)
ucs.result['changed'] = changed
if err:
module.fail_json(**ucs.result)
module.exit_json(**ucs.result)
def main():
run_module()
if __name__ == '__main__':
main()
| gpl-3.0 |
Ziqi-Li/bknqgis | numpy/numpy/core/tests/test_ufunc.py | 8 | 55288 | from __future__ import division, absolute_import, print_function
import warnings
import itertools
import numpy as np
import numpy.core.umath_tests as umt
import numpy.core.operand_flag_tests as opflag_tests
from numpy.core.test_rational import rational, test_add, test_add_rationals
from numpy.testing import (
run_module_suite, assert_, assert_equal, assert_raises,
assert_array_equal, assert_almost_equal, assert_array_almost_equal,
assert_no_warnings, assert_allclose,
)
class TestUfuncKwargs(object):
def test_kwarg_exact(self):
assert_raises(TypeError, np.add, 1, 2, castingx='safe')
assert_raises(TypeError, np.add, 1, 2, dtypex=int)
assert_raises(TypeError, np.add, 1, 2, extobjx=[4096])
assert_raises(TypeError, np.add, 1, 2, outx=None)
assert_raises(TypeError, np.add, 1, 2, sigx='ii->i')
assert_raises(TypeError, np.add, 1, 2, signaturex='ii->i')
assert_raises(TypeError, np.add, 1, 2, subokx=False)
assert_raises(TypeError, np.add, 1, 2, wherex=[True])
def test_sig_signature(self):
assert_raises(ValueError, np.add, 1, 2, sig='ii->i',
signature='ii->i')
def test_sig_dtype(self):
assert_raises(RuntimeError, np.add, 1, 2, sig='ii->i',
dtype=int)
assert_raises(RuntimeError, np.add, 1, 2, signature='ii->i',
dtype=int)
class TestUfunc(object):
def test_pickle(self):
import pickle
assert_(pickle.loads(pickle.dumps(np.sin)) is np.sin)
# Check that ufunc not defined in the top level numpy namespace such as
# numpy.core.test_rational.test_add can also be pickled
assert_(pickle.loads(pickle.dumps(test_add)) is test_add)
def test_pickle_withstring(self):
import pickle
astring = (b"cnumpy.core\n_ufunc_reconstruct\np0\n"
b"(S'numpy.core.umath'\np1\nS'cos'\np2\ntp3\nRp4\n.")
assert_(pickle.loads(astring) is np.cos)
def test_reduceat_shifting_sum(self):
L = 6
x = np.arange(L)
idx = np.array(list(zip(np.arange(L - 2), np.arange(L - 2) + 2))).ravel()
assert_array_equal(np.add.reduceat(x, idx)[::2], [1, 3, 5, 7])
def test_generic_loops(self):
"""Test generic loops.
The loops to be tested are:
PyUFunc_ff_f_As_dd_d
PyUFunc_ff_f
PyUFunc_dd_d
PyUFunc_gg_g
PyUFunc_FF_F_As_DD_D
PyUFunc_DD_D
PyUFunc_FF_F
PyUFunc_GG_G
PyUFunc_OO_O
PyUFunc_OO_O_method
PyUFunc_f_f_As_d_d
PyUFunc_d_d
PyUFunc_f_f
PyUFunc_g_g
PyUFunc_F_F_As_D_D
PyUFunc_F_F
PyUFunc_D_D
PyUFunc_G_G
PyUFunc_O_O
PyUFunc_O_O_method
PyUFunc_On_Om
Where:
f -- float
d -- double
g -- long double
F -- complex float
D -- complex double
G -- complex long double
O -- python object
It is difficult to assure that each of these loops is entered from the
Python level as the special cased loops are a moving target and the
corresponding types are architecture dependent. We probably need to
define C level testing ufuncs to get at them. For the time being, I've
just looked at the signatures registered in the build directory to find
relevant functions.
Fixme, currently untested:
PyUFunc_ff_f_As_dd_d
PyUFunc_FF_F_As_DD_D
PyUFunc_f_f_As_d_d
PyUFunc_F_F_As_D_D
PyUFunc_On_Om
"""
fone = np.exp
ftwo = lambda x, y: x**y
fone_val = 1
ftwo_val = 1
# check unary PyUFunc_f_f.
msg = "PyUFunc_f_f"
x = np.zeros(10, dtype=np.single)[0::2]
assert_almost_equal(fone(x), fone_val, err_msg=msg)
# check unary PyUFunc_d_d.
msg = "PyUFunc_d_d"
x = np.zeros(10, dtype=np.double)[0::2]
assert_almost_equal(fone(x), fone_val, err_msg=msg)
# check unary PyUFunc_g_g.
msg = "PyUFunc_g_g"
x = np.zeros(10, dtype=np.longdouble)[0::2]
assert_almost_equal(fone(x), fone_val, err_msg=msg)
# check unary PyUFunc_F_F.
msg = "PyUFunc_F_F"
x = np.zeros(10, dtype=np.csingle)[0::2]
assert_almost_equal(fone(x), fone_val, err_msg=msg)
# check unary PyUFunc_D_D.
msg = "PyUFunc_D_D"
x = np.zeros(10, dtype=np.cdouble)[0::2]
assert_almost_equal(fone(x), fone_val, err_msg=msg)
# check unary PyUFunc_G_G.
msg = "PyUFunc_G_G"
x = np.zeros(10, dtype=np.clongdouble)[0::2]
assert_almost_equal(fone(x), fone_val, err_msg=msg)
# check binary PyUFunc_ff_f.
msg = "PyUFunc_ff_f"
x = np.ones(10, dtype=np.single)[0::2]
assert_almost_equal(ftwo(x, x), ftwo_val, err_msg=msg)
# check binary PyUFunc_dd_d.
msg = "PyUFunc_dd_d"
x = np.ones(10, dtype=np.double)[0::2]
assert_almost_equal(ftwo(x, x), ftwo_val, err_msg=msg)
# check binary PyUFunc_gg_g.
msg = "PyUFunc_gg_g"
x = np.ones(10, dtype=np.longdouble)[0::2]
assert_almost_equal(ftwo(x, x), ftwo_val, err_msg=msg)
# check binary PyUFunc_FF_F.
msg = "PyUFunc_FF_F"
x = np.ones(10, dtype=np.csingle)[0::2]
assert_almost_equal(ftwo(x, x), ftwo_val, err_msg=msg)
# check binary PyUFunc_DD_D.
msg = "PyUFunc_DD_D"
x = np.ones(10, dtype=np.cdouble)[0::2]
assert_almost_equal(ftwo(x, x), ftwo_val, err_msg=msg)
# check binary PyUFunc_GG_G.
msg = "PyUFunc_GG_G"
x = np.ones(10, dtype=np.clongdouble)[0::2]
assert_almost_equal(ftwo(x, x), ftwo_val, err_msg=msg)
# class to use in testing object method loops
class foo(object):
def conjugate(self):
return np.bool_(1)
def logical_xor(self, obj):
return np.bool_(1)
# check unary PyUFunc_O_O
msg = "PyUFunc_O_O"
x = np.ones(10, dtype=object)[0::2]
assert_(np.all(np.abs(x) == 1), msg)
# check unary PyUFunc_O_O_method
msg = "PyUFunc_O_O_method"
x = np.zeros(10, dtype=object)[0::2]
for i in range(len(x)):
x[i] = foo()
assert_(np.all(np.conjugate(x) == True), msg)
# check binary PyUFunc_OO_O
msg = "PyUFunc_OO_O"
x = np.ones(10, dtype=object)[0::2]
assert_(np.all(np.add(x, x) == 2), msg)
# check binary PyUFunc_OO_O_method
msg = "PyUFunc_OO_O_method"
x = np.zeros(10, dtype=object)[0::2]
for i in range(len(x)):
x[i] = foo()
assert_(np.all(np.logical_xor(x, x)), msg)
# check PyUFunc_On_Om
# fixme -- I don't know how to do this yet
def test_all_ufunc(self):
"""Try to check presence and results of all ufuncs.
The list of ufuncs comes from generate_umath.py and is as follows:
===== ==== ============= =============== ========================
done args function types notes
===== ==== ============= =============== ========================
n 1 conjugate nums + O
n 1 absolute nums + O complex -> real
n 1 negative nums + O
n 1 sign nums + O -> int
n 1 invert bool + ints + O flts raise an error
n 1 degrees real + M cmplx raise an error
n 1 radians real + M cmplx raise an error
n 1 arccos flts + M
n 1 arccosh flts + M
n 1 arcsin flts + M
n 1 arcsinh flts + M
n 1 arctan flts + M
n 1 arctanh flts + M
n 1 cos flts + M
n 1 sin flts + M
n 1 tan flts + M
n 1 cosh flts + M
n 1 sinh flts + M
n 1 tanh flts + M
n 1 exp flts + M
n 1 expm1 flts + M
n 1 log flts + M
n 1 log10 flts + M
n 1 log1p flts + M
n 1 sqrt flts + M real x < 0 raises error
n 1 ceil real + M
n 1 trunc real + M
n 1 floor real + M
n 1 fabs real + M
n 1 rint flts + M
n 1 isnan flts -> bool
n 1 isinf flts -> bool
n 1 isfinite flts -> bool
n 1 signbit real -> bool
n 1 modf real -> (frac, int)
n 1 logical_not bool + nums + M -> bool
n 2 left_shift ints + O flts raise an error
n 2 right_shift ints + O flts raise an error
n 2 add bool + nums + O boolean + is ||
n 2 subtract bool + nums + O boolean - is ^
n 2 multiply bool + nums + O boolean * is &
n 2 divide nums + O
n 2 floor_divide nums + O
n 2 true_divide nums + O bBhH -> f, iIlLqQ -> d
n 2 fmod nums + M
n 2 power nums + O
n 2 greater bool + nums + O -> bool
n 2 greater_equal bool + nums + O -> bool
n 2 less bool + nums + O -> bool
n 2 less_equal bool + nums + O -> bool
n 2 equal bool + nums + O -> bool
n 2 not_equal bool + nums + O -> bool
n 2 logical_and bool + nums + M -> bool
n 2 logical_or bool + nums + M -> bool
n 2 logical_xor bool + nums + M -> bool
n 2 maximum bool + nums + O
n 2 minimum bool + nums + O
n 2 bitwise_and bool + ints + O flts raise an error
n 2 bitwise_or bool + ints + O flts raise an error
n 2 bitwise_xor bool + ints + O flts raise an error
n 2 arctan2 real + M
n 2 remainder ints + real + O
n 2 hypot real + M
===== ==== ============= =============== ========================
Types other than those listed will be accepted, but they are cast to
the smallest compatible type for which the function is defined. The
casting rules are:
bool -> int8 -> float32
ints -> double
"""
pass
def test_signature(self):
# the arguments to test_signature are: nin, nout, core_signature
# pass
assert_equal(umt.test_signature(2, 1, "(i),(i)->()"), 1)
# pass. empty core signature; treat as plain ufunc (with trivial core)
assert_equal(umt.test_signature(2, 1, "(),()->()"), 0)
# in the following calls, a ValueError should be raised because
# of error in core signature
# FIXME These should be using assert_raises
# error: extra parenthesis
msg = "core_sig: extra parenthesis"
try:
ret = umt.test_signature(2, 1, "((i)),(i)->()")
assert_equal(ret, None, err_msg=msg)
except ValueError:
pass
# error: parenthesis matching
msg = "core_sig: parenthesis matching"
try:
ret = umt.test_signature(2, 1, "(i),)i(->()")
assert_equal(ret, None, err_msg=msg)
except ValueError:
pass
# error: incomplete signature. letters outside of parenthesis are ignored
msg = "core_sig: incomplete signature"
try:
ret = umt.test_signature(2, 1, "(i),->()")
assert_equal(ret, None, err_msg=msg)
except ValueError:
pass
# error: incomplete signature. 2 output arguments are specified
msg = "core_sig: incomplete signature"
try:
ret = umt.test_signature(2, 2, "(i),(i)->()")
assert_equal(ret, None, err_msg=msg)
except ValueError:
pass
# more complicated names for variables
assert_equal(umt.test_signature(2, 1, "(i1,i2),(J_1)->(_kAB)"), 1)
def test_get_signature(self):
assert_equal(umt.inner1d.signature, "(i),(i)->()")
def test_forced_sig(self):
a = 0.5*np.arange(3, dtype='f8')
assert_equal(np.add(a, 0.5), [0.5, 1, 1.5])
assert_equal(np.add(a, 0.5, sig='i', casting='unsafe'), [0, 0, 1])
assert_equal(np.add(a, 0.5, sig='ii->i', casting='unsafe'), [0, 0, 1])
assert_equal(np.add(a, 0.5, sig=('i4',), casting='unsafe'), [0, 0, 1])
assert_equal(np.add(a, 0.5, sig=('i4', 'i4', 'i4'),
casting='unsafe'), [0, 0, 1])
b = np.zeros((3,), dtype='f8')
np.add(a, 0.5, out=b)
assert_equal(b, [0.5, 1, 1.5])
b[:] = 0
np.add(a, 0.5, sig='i', out=b, casting='unsafe')
assert_equal(b, [0, 0, 1])
b[:] = 0
np.add(a, 0.5, sig='ii->i', out=b, casting='unsafe')
assert_equal(b, [0, 0, 1])
b[:] = 0
np.add(a, 0.5, sig=('i4',), out=b, casting='unsafe')
assert_equal(b, [0, 0, 1])
b[:] = 0
np.add(a, 0.5, sig=('i4', 'i4', 'i4'), out=b, casting='unsafe')
assert_equal(b, [0, 0, 1])
def test_true_divide(self):
a = np.array(10)
b = np.array(20)
tgt = np.array(0.5)
for tc in 'bhilqBHILQefdgFDG':
dt = np.dtype(tc)
aa = a.astype(dt)
bb = b.astype(dt)
# Check result value and dtype.
for x, y in itertools.product([aa, -aa], [bb, -bb]):
# Check with no output type specified
if tc in 'FDG':
tgt = complex(x)/complex(y)
else:
tgt = float(x)/float(y)
res = np.true_divide(x, y)
rtol = max(np.finfo(res).resolution, 1e-15)
assert_allclose(res, tgt, rtol=rtol)
if tc in 'bhilqBHILQ':
assert_(res.dtype.name == 'float64')
else:
assert_(res.dtype.name == dt.name )
# Check with output type specified. This also checks for the
# incorrect casts in issue gh-3484 because the unary '-' does
# not change types, even for unsigned types, Hence casts in the
# ufunc from signed to unsigned and vice versa will lead to
# errors in the values.
for tcout in 'bhilqBHILQ':
dtout = np.dtype(tcout)
assert_raises(TypeError, np.true_divide, x, y, dtype=dtout)
for tcout in 'efdg':
dtout = np.dtype(tcout)
if tc in 'FDG':
# Casting complex to float is not allowed
assert_raises(TypeError, np.true_divide, x, y, dtype=dtout)
else:
tgt = float(x)/float(y)
rtol = max(np.finfo(dtout).resolution, 1e-15)
atol = max(np.finfo(dtout).tiny, 3e-308)
# Some test values result in invalid for float16.
with np.errstate(invalid='ignore'):
res = np.true_divide(x, y, dtype=dtout)
if not np.isfinite(res) and tcout == 'e':
continue
assert_allclose(res, tgt, rtol=rtol, atol=atol)
assert_(res.dtype.name == dtout.name)
for tcout in 'FDG':
dtout = np.dtype(tcout)
tgt = complex(x)/complex(y)
rtol = max(np.finfo(dtout).resolution, 1e-15)
atol = max(np.finfo(dtout).tiny, 3e-308)
res = np.true_divide(x, y, dtype=dtout)
if not np.isfinite(res):
continue
assert_allclose(res, tgt, rtol=rtol, atol=atol)
assert_(res.dtype.name == dtout.name)
# Check booleans
a = np.ones((), dtype=np.bool_)
res = np.true_divide(a, a)
assert_(res == 1.0)
assert_(res.dtype.name == 'float64')
res = np.true_divide(~a, a)
assert_(res == 0.0)
assert_(res.dtype.name == 'float64')
def test_sum_stability(self):
a = np.ones(500, dtype=np.float32)
assert_almost_equal((a / 10.).sum() - a.size / 10., 0, 4)
a = np.ones(500, dtype=np.float64)
assert_almost_equal((a / 10.).sum() - a.size / 10., 0, 13)
def test_sum(self):
for dt in (int, np.float16, np.float32, np.float64, np.longdouble):
for v in (0, 1, 2, 7, 8, 9, 15, 16, 19, 127,
128, 1024, 1235):
tgt = dt(v * (v + 1) / 2)
d = np.arange(1, v + 1, dtype=dt)
# warning if sum overflows, which it does in float16
overflow = not np.isfinite(tgt)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
assert_almost_equal(np.sum(d), tgt)
assert_equal(len(w), 1 * overflow)
assert_almost_equal(np.sum(d[::-1]), tgt)
assert_equal(len(w), 2 * overflow)
d = np.ones(500, dtype=dt)
assert_almost_equal(np.sum(d[::2]), 250.)
assert_almost_equal(np.sum(d[1::2]), 250.)
assert_almost_equal(np.sum(d[::3]), 167.)
assert_almost_equal(np.sum(d[1::3]), 167.)
assert_almost_equal(np.sum(d[::-2]), 250.)
assert_almost_equal(np.sum(d[-1::-2]), 250.)
assert_almost_equal(np.sum(d[::-3]), 167.)
assert_almost_equal(np.sum(d[-1::-3]), 167.)
# sum with first reduction entry != 0
d = np.ones((1,), dtype=dt)
d += d
assert_almost_equal(d, 2.)
def test_sum_complex(self):
for dt in (np.complex64, np.complex128, np.clongdouble):
for v in (0, 1, 2, 7, 8, 9, 15, 16, 19, 127,
128, 1024, 1235):
tgt = dt(v * (v + 1) / 2) - dt((v * (v + 1) / 2) * 1j)
d = np.empty(v, dtype=dt)
d.real = np.arange(1, v + 1)
d.imag = -np.arange(1, v + 1)
assert_almost_equal(np.sum(d), tgt)
assert_almost_equal(np.sum(d[::-1]), tgt)
d = np.ones(500, dtype=dt) + 1j
assert_almost_equal(np.sum(d[::2]), 250. + 250j)
assert_almost_equal(np.sum(d[1::2]), 250. + 250j)
assert_almost_equal(np.sum(d[::3]), 167. + 167j)
assert_almost_equal(np.sum(d[1::3]), 167. + 167j)
assert_almost_equal(np.sum(d[::-2]), 250. + 250j)
assert_almost_equal(np.sum(d[-1::-2]), 250. + 250j)
assert_almost_equal(np.sum(d[::-3]), 167. + 167j)
assert_almost_equal(np.sum(d[-1::-3]), 167. + 167j)
# sum with first reduction entry != 0
d = np.ones((1,), dtype=dt) + 1j
d += d
assert_almost_equal(d, 2. + 2j)
def test_inner1d(self):
a = np.arange(6).reshape((2, 3))
assert_array_equal(umt.inner1d(a, a), np.sum(a*a, axis=-1))
a = np.arange(6)
assert_array_equal(umt.inner1d(a, a), np.sum(a*a))
def test_broadcast(self):
msg = "broadcast"
a = np.arange(4).reshape((2, 1, 2))
b = np.arange(4).reshape((1, 2, 2))
assert_array_equal(umt.inner1d(a, b), np.sum(a*b, axis=-1), err_msg=msg)
msg = "extend & broadcast loop dimensions"
b = np.arange(4).reshape((2, 2))
assert_array_equal(umt.inner1d(a, b), np.sum(a*b, axis=-1), err_msg=msg)
# Broadcast in core dimensions should fail
a = np.arange(8).reshape((4, 2))
b = np.arange(4).reshape((4, 1))
assert_raises(ValueError, umt.inner1d, a, b)
# Extend core dimensions should fail
a = np.arange(8).reshape((4, 2))
b = np.array(7)
assert_raises(ValueError, umt.inner1d, a, b)
# Broadcast should fail
a = np.arange(2).reshape((2, 1, 1))
b = np.arange(3).reshape((3, 1, 1))
assert_raises(ValueError, umt.inner1d, a, b)
def test_type_cast(self):
msg = "type cast"
a = np.arange(6, dtype='short').reshape((2, 3))
assert_array_equal(umt.inner1d(a, a), np.sum(a*a, axis=-1),
err_msg=msg)
msg = "type cast on one argument"
a = np.arange(6).reshape((2, 3))
b = a + 0.1
assert_array_almost_equal(umt.inner1d(a, b), np.sum(a*b, axis=-1),
err_msg=msg)
def test_endian(self):
msg = "big endian"
a = np.arange(6, dtype='>i4').reshape((2, 3))
assert_array_equal(umt.inner1d(a, a), np.sum(a*a, axis=-1),
err_msg=msg)
msg = "little endian"
a = np.arange(6, dtype='<i4').reshape((2, 3))
assert_array_equal(umt.inner1d(a, a), np.sum(a*a, axis=-1),
err_msg=msg)
# Output should always be native-endian
Ba = np.arange(1, dtype='>f8')
La = np.arange(1, dtype='<f8')
assert_equal((Ba+Ba).dtype, np.dtype('f8'))
assert_equal((Ba+La).dtype, np.dtype('f8'))
assert_equal((La+Ba).dtype, np.dtype('f8'))
assert_equal((La+La).dtype, np.dtype('f8'))
assert_equal(np.absolute(La).dtype, np.dtype('f8'))
assert_equal(np.absolute(Ba).dtype, np.dtype('f8'))
assert_equal(np.negative(La).dtype, np.dtype('f8'))
assert_equal(np.negative(Ba).dtype, np.dtype('f8'))
def test_incontiguous_array(self):
msg = "incontiguous memory layout of array"
x = np.arange(64).reshape((2, 2, 2, 2, 2, 2))
a = x[:, 0,:, 0,:, 0]
b = x[:, 1,:, 1,:, 1]
a[0, 0, 0] = -1
msg2 = "make sure it references to the original array"
assert_equal(x[0, 0, 0, 0, 0, 0], -1, err_msg=msg2)
assert_array_equal(umt.inner1d(a, b), np.sum(a*b, axis=-1), err_msg=msg)
x = np.arange(24).reshape(2, 3, 4)
a = x.T
b = x.T
a[0, 0, 0] = -1
assert_equal(x[0, 0, 0], -1, err_msg=msg2)
assert_array_equal(umt.inner1d(a, b), np.sum(a*b, axis=-1), err_msg=msg)
def test_output_argument(self):
msg = "output argument"
a = np.arange(12).reshape((2, 3, 2))
b = np.arange(4).reshape((2, 1, 2)) + 1
c = np.zeros((2, 3), dtype='int')
umt.inner1d(a, b, c)
assert_array_equal(c, np.sum(a*b, axis=-1), err_msg=msg)
c[:] = -1
umt.inner1d(a, b, out=c)
assert_array_equal(c, np.sum(a*b, axis=-1), err_msg=msg)
msg = "output argument with type cast"
c = np.zeros((2, 3), dtype='int16')
umt.inner1d(a, b, c)
assert_array_equal(c, np.sum(a*b, axis=-1), err_msg=msg)
c[:] = -1
umt.inner1d(a, b, out=c)
assert_array_equal(c, np.sum(a*b, axis=-1), err_msg=msg)
msg = "output argument with incontiguous layout"
c = np.zeros((2, 3, 4), dtype='int16')
umt.inner1d(a, b, c[..., 0])
assert_array_equal(c[..., 0], np.sum(a*b, axis=-1), err_msg=msg)
c[:] = -1
umt.inner1d(a, b, out=c[..., 0])
assert_array_equal(c[..., 0], np.sum(a*b, axis=-1), err_msg=msg)
def test_innerwt(self):
a = np.arange(6).reshape((2, 3))
b = np.arange(10, 16).reshape((2, 3))
w = np.arange(20, 26).reshape((2, 3))
assert_array_equal(umt.innerwt(a, b, w), np.sum(a*b*w, axis=-1))
a = np.arange(100, 124).reshape((2, 3, 4))
b = np.arange(200, 224).reshape((2, 3, 4))
w = np.arange(300, 324).reshape((2, 3, 4))
assert_array_equal(umt.innerwt(a, b, w), np.sum(a*b*w, axis=-1))
def test_innerwt_empty(self):
"""Test generalized ufunc with zero-sized operands"""
a = np.array([], dtype='f8')
b = np.array([], dtype='f8')
w = np.array([], dtype='f8')
assert_array_equal(umt.innerwt(a, b, w), np.sum(a*b*w, axis=-1))
def test_matrix_multiply(self):
self.compare_matrix_multiply_results(np.long)
self.compare_matrix_multiply_results(np.double)
def test_matrix_multiply_umath_empty(self):
res = umt.matrix_multiply(np.ones((0, 10)), np.ones((10, 0)))
assert_array_equal(res, np.zeros((0, 0)))
res = umt.matrix_multiply(np.ones((10, 0)), np.ones((0, 10)))
assert_array_equal(res, np.zeros((10, 10)))
def compare_matrix_multiply_results(self, tp):
d1 = np.array(np.random.rand(2, 3, 4), dtype=tp)
d2 = np.array(np.random.rand(2, 3, 4), dtype=tp)
msg = "matrix multiply on type %s" % d1.dtype.name
def permute_n(n):
if n == 1:
return ([0],)
ret = ()
base = permute_n(n-1)
for perm in base:
for i in range(n):
new = perm + [n-1]
new[n-1] = new[i]
new[i] = n-1
ret += (new,)
return ret
def slice_n(n):
if n == 0:
return ((),)
ret = ()
base = slice_n(n-1)
for sl in base:
ret += (sl+(slice(None),),)
ret += (sl+(slice(0, 1),),)
return ret
def broadcastable(s1, s2):
return s1 == s2 or s1 == 1 or s2 == 1
permute_3 = permute_n(3)
slice_3 = slice_n(3) + ((slice(None, None, -1),)*3,)
ref = True
for p1 in permute_3:
for p2 in permute_3:
for s1 in slice_3:
for s2 in slice_3:
a1 = d1.transpose(p1)[s1]
a2 = d2.transpose(p2)[s2]
ref = ref and a1.base is not None
ref = ref and a2.base is not None
if (a1.shape[-1] == a2.shape[-2] and
broadcastable(a1.shape[0], a2.shape[0])):
assert_array_almost_equal(
umt.matrix_multiply(a1, a2),
np.sum(a2[..., np.newaxis].swapaxes(-3, -1) *
a1[..., np.newaxis,:], axis=-1),
err_msg=msg + ' %s %s' % (str(a1.shape),
str(a2.shape)))
assert_equal(ref, True, err_msg="reference check")
def test_euclidean_pdist(self):
a = np.arange(12, dtype=float).reshape(4, 3)
out = np.empty((a.shape[0] * (a.shape[0] - 1) // 2,), dtype=a.dtype)
umt.euclidean_pdist(a, out)
b = np.sqrt(np.sum((a[:, None] - a)**2, axis=-1))
b = b[~np.tri(a.shape[0], dtype=bool)]
assert_almost_equal(out, b)
# An output array is required to determine p with signature (n,d)->(p)
assert_raises(ValueError, umt.euclidean_pdist, a)
def test_object_logical(self):
a = np.array([3, None, True, False, "test", ""], dtype=object)
assert_equal(np.logical_or(a, None),
np.array([x or None for x in a], dtype=object))
assert_equal(np.logical_or(a, True),
np.array([x or True for x in a], dtype=object))
assert_equal(np.logical_or(a, 12),
np.array([x or 12 for x in a], dtype=object))
assert_equal(np.logical_or(a, "blah"),
np.array([x or "blah" for x in a], dtype=object))
assert_equal(np.logical_and(a, None),
np.array([x and None for x in a], dtype=object))
assert_equal(np.logical_and(a, True),
np.array([x and True for x in a], dtype=object))
assert_equal(np.logical_and(a, 12),
np.array([x and 12 for x in a], dtype=object))
assert_equal(np.logical_and(a, "blah"),
np.array([x and "blah" for x in a], dtype=object))
assert_equal(np.logical_not(a),
np.array([not x for x in a], dtype=object))
assert_equal(np.logical_or.reduce(a), 3)
assert_equal(np.logical_and.reduce(a), None)
def test_object_array_reduction(self):
# Reductions on object arrays
a = np.array(['a', 'b', 'c'], dtype=object)
assert_equal(np.sum(a), 'abc')
assert_equal(np.max(a), 'c')
assert_equal(np.min(a), 'a')
a = np.array([True, False, True], dtype=object)
assert_equal(np.sum(a), 2)
assert_equal(np.prod(a), 0)
assert_equal(np.any(a), True)
assert_equal(np.all(a), False)
assert_equal(np.max(a), True)
assert_equal(np.min(a), False)
assert_equal(np.array([[1]], dtype=object).sum(), 1)
assert_equal(np.array([[[1, 2]]], dtype=object).sum((0, 1)), [1, 2])
def test_object_array_accumulate_inplace(self):
# Checks that in-place accumulates work, see also gh-7402
arr = np.ones(4, dtype=object)
arr[:] = [[1] for i in range(4)]
# Twice reproduced also for tuples:
np.add.accumulate(arr, out=arr)
np.add.accumulate(arr, out=arr)
assert_array_equal(arr, np.array([[1]*i for i in [1, 3, 6, 10]]))
# And the same if the axis argument is used
arr = np.ones((2, 4), dtype=object)
arr[0, :] = [[2] for i in range(4)]
np.add.accumulate(arr, out=arr, axis=-1)
np.add.accumulate(arr, out=arr, axis=-1)
assert_array_equal(arr[0, :], np.array([[2]*i for i in [1, 3, 6, 10]]))
def test_object_array_reduceat_inplace(self):
# Checks that in-place reduceats work, see also gh-7465
arr = np.empty(4, dtype=object)
arr[:] = [[1] for i in range(4)]
out = np.empty(4, dtype=object)
out[:] = [[1] for i in range(4)]
np.add.reduceat(arr, np.arange(4), out=arr)
np.add.reduceat(arr, np.arange(4), out=arr)
assert_array_equal(arr, out)
# And the same if the axis argument is used
arr = np.ones((2, 4), dtype=object)
arr[0, :] = [[2] for i in range(4)]
out = np.ones((2, 4), dtype=object)
out[0, :] = [[2] for i in range(4)]
np.add.reduceat(arr, np.arange(4), out=arr, axis=-1)
np.add.reduceat(arr, np.arange(4), out=arr, axis=-1)
assert_array_equal(arr, out)
def test_object_scalar_multiply(self):
# Tickets #2469 and #4482
arr = np.matrix([1, 2], dtype=object)
desired = np.matrix([[3, 6]], dtype=object)
assert_equal(np.multiply(arr, 3), desired)
assert_equal(np.multiply(3, arr), desired)
def test_zerosize_reduction(self):
# Test with default dtype and object dtype
for a in [[], np.array([], dtype=object)]:
assert_equal(np.sum(a), 0)
assert_equal(np.prod(a), 1)
assert_equal(np.any(a), False)
assert_equal(np.all(a), True)
assert_raises(ValueError, np.max, a)
assert_raises(ValueError, np.min, a)
def test_axis_out_of_bounds(self):
a = np.array([False, False])
assert_raises(np.AxisError, a.all, axis=1)
a = np.array([False, False])
assert_raises(np.AxisError, a.all, axis=-2)
a = np.array([False, False])
assert_raises(np.AxisError, a.any, axis=1)
a = np.array([False, False])
assert_raises(np.AxisError, a.any, axis=-2)
def test_scalar_reduction(self):
# The functions 'sum', 'prod', etc allow specifying axis=0
# even for scalars
assert_equal(np.sum(3, axis=0), 3)
assert_equal(np.prod(3.5, axis=0), 3.5)
assert_equal(np.any(True, axis=0), True)
assert_equal(np.all(False, axis=0), False)
assert_equal(np.max(3, axis=0), 3)
assert_equal(np.min(2.5, axis=0), 2.5)
# Check scalar behaviour for ufuncs without an identity
assert_equal(np.power.reduce(3), 3)
# Make sure that scalars are coming out from this operation
assert_(type(np.prod(np.float32(2.5), axis=0)) is np.float32)
assert_(type(np.sum(np.float32(2.5), axis=0)) is np.float32)
assert_(type(np.max(np.float32(2.5), axis=0)) is np.float32)
assert_(type(np.min(np.float32(2.5), axis=0)) is np.float32)
# check if scalars/0-d arrays get cast
assert_(type(np.any(0, axis=0)) is np.bool_)
# assert that 0-d arrays get wrapped
class MyArray(np.ndarray):
pass
a = np.array(1).view(MyArray)
assert_(type(np.any(a)) is MyArray)
def test_casting_out_param(self):
# Test that it's possible to do casts on output
a = np.ones((200, 100), np.int64)
b = np.ones((200, 100), np.int64)
c = np.ones((200, 100), np.float64)
np.add(a, b, out=c)
assert_equal(c, 2)
a = np.zeros(65536)
b = np.zeros(65536, dtype=np.float32)
np.subtract(a, 0, out=b)
assert_equal(b, 0)
def test_where_param(self):
# Test that the where= ufunc parameter works with regular arrays
a = np.arange(7)
b = np.ones(7)
c = np.zeros(7)
np.add(a, b, out=c, where=(a % 2 == 1))
assert_equal(c, [0, 2, 0, 4, 0, 6, 0])
a = np.arange(4).reshape(2, 2) + 2
np.power(a, [2, 3], out=a, where=[[0, 1], [1, 0]])
assert_equal(a, [[2, 27], [16, 5]])
# Broadcasting the where= parameter
np.subtract(a, 2, out=a, where=[True, False])
assert_equal(a, [[0, 27], [14, 5]])
def test_where_param_buffer_output(self):
# This test is temporarily skipped because it requires
# adding masking features to the nditer to work properly
# With casting on output
a = np.ones(10, np.int64)
b = np.ones(10, np.int64)
c = 1.5 * np.ones(10, np.float64)
np.add(a, b, out=c, where=[1, 0, 0, 1, 0, 0, 1, 1, 1, 0])
assert_equal(c, [2, 1.5, 1.5, 2, 1.5, 1.5, 2, 2, 2, 1.5])
def test_where_param_alloc(self):
# With casting and allocated output
a = np.array([1], dtype=np.int64)
m = np.array([True], dtype=bool)
assert_equal(np.sqrt(a, where=m), [1])
# No casting and allocated output
a = np.array([1], dtype=np.float64)
m = np.array([True], dtype=bool)
assert_equal(np.sqrt(a, where=m), [1])
def check_identityless_reduction(self, a):
# np.minimum.reduce is a identityless reduction
# Verify that it sees the zero at various positions
a[...] = 1
a[1, 0, 0] = 0
assert_equal(np.minimum.reduce(a, axis=None), 0)
assert_equal(np.minimum.reduce(a, axis=(0, 1)), [0, 1, 1, 1])
assert_equal(np.minimum.reduce(a, axis=(0, 2)), [0, 1, 1])
assert_equal(np.minimum.reduce(a, axis=(1, 2)), [1, 0])
assert_equal(np.minimum.reduce(a, axis=0),
[[0, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]])
assert_equal(np.minimum.reduce(a, axis=1),
[[1, 1, 1, 1], [0, 1, 1, 1]])
assert_equal(np.minimum.reduce(a, axis=2),
[[1, 1, 1], [0, 1, 1]])
assert_equal(np.minimum.reduce(a, axis=()), a)
a[...] = 1
a[0, 1, 0] = 0
assert_equal(np.minimum.reduce(a, axis=None), 0)
assert_equal(np.minimum.reduce(a, axis=(0, 1)), [0, 1, 1, 1])
assert_equal(np.minimum.reduce(a, axis=(0, 2)), [1, 0, 1])
assert_equal(np.minimum.reduce(a, axis=(1, 2)), [0, 1])
assert_equal(np.minimum.reduce(a, axis=0),
[[1, 1, 1, 1], [0, 1, 1, 1], [1, 1, 1, 1]])
assert_equal(np.minimum.reduce(a, axis=1),
[[0, 1, 1, 1], [1, 1, 1, 1]])
assert_equal(np.minimum.reduce(a, axis=2),
[[1, 0, 1], [1, 1, 1]])
assert_equal(np.minimum.reduce(a, axis=()), a)
a[...] = 1
a[0, 0, 1] = 0
assert_equal(np.minimum.reduce(a, axis=None), 0)
assert_equal(np.minimum.reduce(a, axis=(0, 1)), [1, 0, 1, 1])
assert_equal(np.minimum.reduce(a, axis=(0, 2)), [0, 1, 1])
assert_equal(np.minimum.reduce(a, axis=(1, 2)), [0, 1])
assert_equal(np.minimum.reduce(a, axis=0),
[[1, 0, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]])
assert_equal(np.minimum.reduce(a, axis=1),
[[1, 0, 1, 1], [1, 1, 1, 1]])
assert_equal(np.minimum.reduce(a, axis=2),
[[0, 1, 1], [1, 1, 1]])
assert_equal(np.minimum.reduce(a, axis=()), a)
def test_identityless_reduction_corder(self):
a = np.empty((2, 3, 4), order='C')
self.check_identityless_reduction(a)
def test_identityless_reduction_forder(self):
a = np.empty((2, 3, 4), order='F')
self.check_identityless_reduction(a)
def test_identityless_reduction_otherorder(self):
a = np.empty((2, 4, 3), order='C').swapaxes(1, 2)
self.check_identityless_reduction(a)
def test_identityless_reduction_noncontig(self):
a = np.empty((3, 5, 4), order='C').swapaxes(1, 2)
a = a[1:, 1:, 1:]
self.check_identityless_reduction(a)
def test_identityless_reduction_noncontig_unaligned(self):
a = np.empty((3*4*5*8 + 1,), dtype='i1')
a = a[1:].view(dtype='f8')
a.shape = (3, 4, 5)
a = a[1:, 1:, 1:]
self.check_identityless_reduction(a)
def test_identityless_reduction_nonreorderable(self):
a = np.array([[8.0, 2.0, 2.0], [1.0, 0.5, 0.25]])
res = np.divide.reduce(a, axis=0)
assert_equal(res, [8.0, 4.0, 8.0])
res = np.divide.reduce(a, axis=1)
assert_equal(res, [2.0, 8.0])
res = np.divide.reduce(a, axis=())
assert_equal(res, a)
assert_raises(ValueError, np.divide.reduce, a, axis=(0, 1))
def test_reduce_zero_axis(self):
# If we have a n x m array and do a reduction with axis=1, then we are
# doing n reductions, and each reduction takes an m-element array. For
# a reduction operation without an identity, then:
# n > 0, m > 0: fine
# n = 0, m > 0: fine, doing 0 reductions of m-element arrays
# n > 0, m = 0: can't reduce a 0-element array, ValueError
# n = 0, m = 0: can't reduce a 0-element array, ValueError (for
# consistency with the above case)
# This test doesn't actually look at return values, it just checks to
# make sure that error we get an error in exactly those cases where we
# expect one, and assumes the calculations themselves are done
# correctly.
def ok(f, *args, **kwargs):
f(*args, **kwargs)
def err(f, *args, **kwargs):
assert_raises(ValueError, f, *args, **kwargs)
def t(expect, func, n, m):
expect(func, np.zeros((n, m)), axis=1)
expect(func, np.zeros((m, n)), axis=0)
expect(func, np.zeros((n // 2, n // 2, m)), axis=2)
expect(func, np.zeros((n // 2, m, n // 2)), axis=1)
expect(func, np.zeros((n, m // 2, m // 2)), axis=(1, 2))
expect(func, np.zeros((m // 2, n, m // 2)), axis=(0, 2))
expect(func, np.zeros((m // 3, m // 3, m // 3,
n // 2, n // 2)),
axis=(0, 1, 2))
# Check what happens if the inner (resp. outer) dimensions are a
# mix of zero and non-zero:
expect(func, np.zeros((10, m, n)), axis=(0, 1))
expect(func, np.zeros((10, n, m)), axis=(0, 2))
expect(func, np.zeros((m, 10, n)), axis=0)
expect(func, np.zeros((10, m, n)), axis=1)
expect(func, np.zeros((10, n, m)), axis=2)
# np.maximum is just an arbitrary ufunc with no reduction identity
assert_equal(np.maximum.identity, None)
t(ok, np.maximum.reduce, 30, 30)
t(ok, np.maximum.reduce, 0, 30)
t(err, np.maximum.reduce, 30, 0)
t(err, np.maximum.reduce, 0, 0)
err(np.maximum.reduce, [])
np.maximum.reduce(np.zeros((0, 0)), axis=())
# all of the combinations are fine for a reduction that has an
# identity
t(ok, np.add.reduce, 30, 30)
t(ok, np.add.reduce, 0, 30)
t(ok, np.add.reduce, 30, 0)
t(ok, np.add.reduce, 0, 0)
np.add.reduce([])
np.add.reduce(np.zeros((0, 0)), axis=())
# OTOH, accumulate always makes sense for any combination of n and m,
# because it maps an m-element array to an m-element array. These
# tests are simpler because accumulate doesn't accept multiple axes.
for uf in (np.maximum, np.add):
uf.accumulate(np.zeros((30, 0)), axis=0)
uf.accumulate(np.zeros((0, 30)), axis=0)
uf.accumulate(np.zeros((30, 30)), axis=0)
uf.accumulate(np.zeros((0, 0)), axis=0)
def test_safe_casting(self):
# In old versions of numpy, in-place operations used the 'unsafe'
# casting rules. In versions >= 1.10, 'same_kind' is the
# default and an exception is raised instead of a warning.
# when 'same_kind' is not satisfied.
a = np.array([1, 2, 3], dtype=int)
# Non-in-place addition is fine
assert_array_equal(assert_no_warnings(np.add, a, 1.1),
[2.1, 3.1, 4.1])
assert_raises(TypeError, np.add, a, 1.1, out=a)
def add_inplace(a, b):
a += b
assert_raises(TypeError, add_inplace, a, 1.1)
# Make sure that explicitly overriding the exception is allowed:
assert_no_warnings(np.add, a, 1.1, out=a, casting="unsafe")
assert_array_equal(a, [2, 3, 4])
def test_ufunc_custom_out(self):
# Test ufunc with built in input types and custom output type
a = np.array([0, 1, 2], dtype='i8')
b = np.array([0, 1, 2], dtype='i8')
c = np.empty(3, dtype=rational)
# Output must be specified so numpy knows what
# ufunc signature to look for
result = test_add(a, b, c)
assert_equal(result, np.array([0, 2, 4], dtype=rational))
# no output type should raise TypeError
assert_raises(TypeError, test_add, a, b)
def test_operand_flags(self):
a = np.arange(16, dtype='l').reshape(4, 4)
b = np.arange(9, dtype='l').reshape(3, 3)
opflag_tests.inplace_add(a[:-1, :-1], b)
assert_equal(a, np.array([[0, 2, 4, 3], [7, 9, 11, 7],
[14, 16, 18, 11], [12, 13, 14, 15]], dtype='l'))
a = np.array(0)
opflag_tests.inplace_add(a, 3)
assert_equal(a, 3)
opflag_tests.inplace_add(a, [3, 4])
assert_equal(a, 10)
def test_struct_ufunc(self):
import numpy.core.struct_ufunc_test as struct_ufunc
a = np.array([(1, 2, 3)], dtype='u8,u8,u8')
b = np.array([(1, 2, 3)], dtype='u8,u8,u8')
result = struct_ufunc.add_triplet(a, b)
assert_equal(result, np.array([(2, 4, 6)], dtype='u8,u8,u8'))
def test_custom_ufunc(self):
a = np.array([rational(1, 2), rational(1, 3), rational(1, 4)],
dtype=rational)
b = np.array([rational(1, 2), rational(1, 3), rational(1, 4)],
dtype=rational)
result = test_add_rationals(a, b)
expected = np.array([rational(1), rational(2, 3), rational(1, 2)],
dtype=rational)
assert_equal(result, expected)
def test_custom_ufunc_forced_sig(self):
# gh-9351 - looking for a non-first userloop would previously hang
assert_raises(TypeError,
np.multiply, rational(1), 1, signature=(rational, int, None))
def test_custom_array_like(self):
class MyThing(object):
__array_priority__ = 1000
rmul_count = 0
getitem_count = 0
def __init__(self, shape):
self.shape = shape
def __len__(self):
return self.shape[0]
def __getitem__(self, i):
MyThing.getitem_count += 1
if not isinstance(i, tuple):
i = (i,)
if len(i) > self.ndim:
raise IndexError("boo")
return MyThing(self.shape[len(i):])
def __rmul__(self, other):
MyThing.rmul_count += 1
return self
np.float64(5)*MyThing((3, 3))
assert_(MyThing.rmul_count == 1, MyThing.rmul_count)
assert_(MyThing.getitem_count <= 2, MyThing.getitem_count)
def test_inplace_fancy_indexing(self):
a = np.arange(10)
np.add.at(a, [2, 5, 2], 1)
assert_equal(a, [0, 1, 4, 3, 4, 6, 6, 7, 8, 9])
a = np.arange(10)
b = np.array([100, 100, 100])
np.add.at(a, [2, 5, 2], b)
assert_equal(a, [0, 1, 202, 3, 4, 105, 6, 7, 8, 9])
a = np.arange(9).reshape(3, 3)
b = np.array([[100, 100, 100], [200, 200, 200], [300, 300, 300]])
np.add.at(a, (slice(None), [1, 2, 1]), b)
assert_equal(a, [[0, 201, 102], [3, 404, 205], [6, 607, 308]])
a = np.arange(27).reshape(3, 3, 3)
b = np.array([100, 200, 300])
np.add.at(a, (slice(None), slice(None), [1, 2, 1]), b)
assert_equal(a,
[[[0, 401, 202],
[3, 404, 205],
[6, 407, 208]],
[[9, 410, 211],
[12, 413, 214],
[15, 416, 217]],
[[18, 419, 220],
[21, 422, 223],
[24, 425, 226]]])
a = np.arange(9).reshape(3, 3)
b = np.array([[100, 100, 100], [200, 200, 200], [300, 300, 300]])
np.add.at(a, ([1, 2, 1], slice(None)), b)
assert_equal(a, [[0, 1, 2], [403, 404, 405], [206, 207, 208]])
a = np.arange(27).reshape(3, 3, 3)
b = np.array([100, 200, 300])
np.add.at(a, (slice(None), [1, 2, 1], slice(None)), b)
assert_equal(a,
[[[0, 1, 2],
[203, 404, 605],
[106, 207, 308]],
[[9, 10, 11],
[212, 413, 614],
[115, 216, 317]],
[[18, 19, 20],
[221, 422, 623],
[124, 225, 326]]])
a = np.arange(9).reshape(3, 3)
b = np.array([100, 200, 300])
np.add.at(a, (0, [1, 2, 1]), b)
assert_equal(a, [[0, 401, 202], [3, 4, 5], [6, 7, 8]])
a = np.arange(27).reshape(3, 3, 3)
b = np.array([100, 200, 300])
np.add.at(a, ([1, 2, 1], 0, slice(None)), b)
assert_equal(a,
[[[0, 1, 2],
[3, 4, 5],
[6, 7, 8]],
[[209, 410, 611],
[12, 13, 14],
[15, 16, 17]],
[[118, 219, 320],
[21, 22, 23],
[24, 25, 26]]])
a = np.arange(27).reshape(3, 3, 3)
b = np.array([100, 200, 300])
np.add.at(a, (slice(None), slice(None), slice(None)), b)
assert_equal(a,
[[[100, 201, 302],
[103, 204, 305],
[106, 207, 308]],
[[109, 210, 311],
[112, 213, 314],
[115, 216, 317]],
[[118, 219, 320],
[121, 222, 323],
[124, 225, 326]]])
a = np.arange(10)
np.negative.at(a, [2, 5, 2])
assert_equal(a, [0, 1, 2, 3, 4, -5, 6, 7, 8, 9])
# Test 0-dim array
a = np.array(0)
np.add.at(a, (), 1)
assert_equal(a, 1)
assert_raises(IndexError, np.add.at, a, 0, 1)
assert_raises(IndexError, np.add.at, a, [], 1)
# Test mixed dtypes
a = np.arange(10)
np.power.at(a, [1, 2, 3, 2], 3.5)
assert_equal(a, np.array([0, 1, 4414, 46, 4, 5, 6, 7, 8, 9]))
# Test boolean indexing and boolean ufuncs
a = np.arange(10)
index = a % 2 == 0
np.equal.at(a, index, [0, 2, 4, 6, 8])
assert_equal(a, [1, 1, 1, 3, 1, 5, 1, 7, 1, 9])
# Test unary operator
a = np.arange(10, dtype='u4')
np.invert.at(a, [2, 5, 2])
assert_equal(a, [0, 1, 2, 3, 4, 5 ^ 0xffffffff, 6, 7, 8, 9])
# Test empty subspace
orig = np.arange(4)
a = orig[:, None][:, 0:0]
np.add.at(a, [0, 1], 3)
assert_array_equal(orig, np.arange(4))
# Test with swapped byte order
index = np.array([1, 2, 1], np.dtype('i').newbyteorder())
values = np.array([1, 2, 3, 4], np.dtype('f').newbyteorder())
np.add.at(values, index, 3)
assert_array_equal(values, [1, 8, 6, 4])
# Test exception thrown
values = np.array(['a', 1], dtype=object)
assert_raises(TypeError, np.add.at, values, [0, 1], 1)
assert_array_equal(values, np.array(['a', 1], dtype=object))
# Test multiple output ufuncs raise error, gh-5665
assert_raises(ValueError, np.modf.at, np.arange(10), [1])
def test_reduce_arguments(self):
f = np.add.reduce
d = np.ones((5,2), dtype=int)
o = np.ones((2,), dtype=d.dtype)
r = o * 5
assert_equal(f(d), r)
# a, axis=0, dtype=None, out=None, keepdims=False
assert_equal(f(d, axis=0), r)
assert_equal(f(d, 0), r)
assert_equal(f(d, 0, dtype=None), r)
assert_equal(f(d, 0, dtype='i'), r)
assert_equal(f(d, 0, 'i'), r)
assert_equal(f(d, 0, None), r)
assert_equal(f(d, 0, None, out=None), r)
assert_equal(f(d, 0, None, out=o), r)
assert_equal(f(d, 0, None, o), r)
assert_equal(f(d, 0, None, None), r)
assert_equal(f(d, 0, None, None, keepdims=False), r)
assert_equal(f(d, 0, None, None, True), r.reshape((1,) + r.shape))
# multiple keywords
assert_equal(f(d, axis=0, dtype=None, out=None, keepdims=False), r)
assert_equal(f(d, 0, dtype=None, out=None, keepdims=False), r)
assert_equal(f(d, 0, None, out=None, keepdims=False), r)
# too little
assert_raises(TypeError, f)
# too much
assert_raises(TypeError, f, d, 0, None, None, False, 1)
# invalid axis
assert_raises(TypeError, f, d, "invalid")
assert_raises(TypeError, f, d, axis="invalid")
assert_raises(TypeError, f, d, axis="invalid", dtype=None,
keepdims=True)
# invalid dtype
assert_raises(TypeError, f, d, 0, "invalid")
assert_raises(TypeError, f, d, dtype="invalid")
assert_raises(TypeError, f, d, dtype="invalid", out=None)
# invalid out
assert_raises(TypeError, f, d, 0, None, "invalid")
assert_raises(TypeError, f, d, out="invalid")
assert_raises(TypeError, f, d, out="invalid", dtype=None)
# keepdims boolean, no invalid value
# assert_raises(TypeError, f, d, 0, None, None, "invalid")
# assert_raises(TypeError, f, d, keepdims="invalid", axis=0, dtype=None)
# invalid mix
assert_raises(TypeError, f, d, 0, keepdims="invalid", dtype="invalid",
out=None)
# invalid keyord
assert_raises(TypeError, f, d, axis=0, dtype=None, invalid=0)
assert_raises(TypeError, f, d, invalid=0)
assert_raises(TypeError, f, d, 0, keepdims=True, invalid="invalid",
out=None)
assert_raises(TypeError, f, d, axis=0, dtype=None, keepdims=True,
out=None, invalid=0)
assert_raises(TypeError, f, d, axis=0, dtype=None,
out=None, invalid=0)
def test_structured_equal(self):
# https://github.com/numpy/numpy/issues/4855
class MyA(np.ndarray):
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
return getattr(ufunc, method)(*(input.view(np.ndarray)
for input in inputs), **kwargs)
a = np.arange(12.).reshape(4,3)
ra = a.view(dtype=('f8,f8,f8')).squeeze()
mra = ra.view(MyA)
target = np.array([ True, False, False, False], dtype=bool)
assert_equal(np.all(target == (mra == ra[0])), True)
def test_NotImplemented_not_returned(self):
# See gh-5964 and gh-2091. Some of these functions are not operator
# related and were fixed for other reasons in the past.
binary_funcs = [
np.power, np.add, np.subtract, np.multiply, np.divide,
np.true_divide, np.floor_divide, np.bitwise_and, np.bitwise_or,
np.bitwise_xor, np.left_shift, np.right_shift, np.fmax,
np.fmin, np.fmod, np.hypot, np.logaddexp, np.logaddexp2,
np.logical_and, np.logical_or, np.logical_xor, np.maximum,
np.minimum, np.mod
]
# These functions still return NotImplemented. Will be fixed in
# future.
# bad = [np.greater, np.greater_equal, np.less, np.less_equal, np.not_equal]
a = np.array('1')
b = 1
for f in binary_funcs:
assert_raises(TypeError, f, a, b)
def test_reduce_noncontig_output(self):
# Check that reduction deals with non-contiguous output arrays
# appropriately.
#
# gh-8036
x = np.arange(7*13*8, dtype=np.int16).reshape(7, 13, 8)
x = x[4:6,1:11:6,1:5].transpose(1, 2, 0)
y_base = np.arange(4*4, dtype=np.int16).reshape(4, 4)
y = y_base[::2,:]
y_base_copy = y_base.copy()
r0 = np.add.reduce(x, out=y.copy(), axis=2)
r1 = np.add.reduce(x, out=y, axis=2)
# The results should match, and y_base shouldn't get clobbered
assert_equal(r0, r1)
assert_equal(y_base[1,:], y_base_copy[1,:])
assert_equal(y_base[3,:], y_base_copy[3,:])
def test_no_doc_string(self):
# gh-9337
assert_('\n' not in umt.inner1d_no_doc.__doc__)
if __name__ == "__main__":
run_module_suite()
| gpl-2.0 |
justin-ho/passwd-mng | pycrypto-2.6.1/lib/Crypto/Protocol/KDF.py | 123 | 5071 | #
# KDF.py : a collection of Key Derivation Functions
#
# Part of the Python Cryptography Toolkit
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
"""This file contains a collection of standard key derivation functions.
A key derivation function derives one or more secondary secret keys from
one primary secret (a master key or a pass phrase).
This is typically done to insulate the secondary keys from each other,
to avoid that leakage of a secondary key compromises the security of the
master key, or to thwart attacks on pass phrases (e.g. via rainbow tables).
:undocumented: __revision__
"""
__revision__ = "$Id$"
import math
import struct
from Crypto.Util.py3compat import *
from Crypto.Hash import SHA as SHA1, HMAC
from Crypto.Util.strxor import strxor
def PBKDF1(password, salt, dkLen, count=1000, hashAlgo=None):
"""Derive one key from a password (or passphrase).
This function performs key derivation according an old version of
the PKCS#5 standard (v1.5).
This algorithm is called ``PBKDF1``. Even though it is still described
in the latest version of the PKCS#5 standard (version 2, or RFC2898),
newer applications should use the more secure and versatile `PBKDF2` instead.
:Parameters:
password : string
The secret password or pass phrase to generate the key from.
salt : byte string
An 8 byte string to use for better protection from dictionary attacks.
This value does not need to be kept secret, but it should be randomly
chosen for each derivation.
dkLen : integer
The length of the desired key. Default is 16 bytes, suitable for instance for `Crypto.Cipher.AES`.
count : integer
The number of iterations to carry out. It's recommended to use at least 1000.
hashAlgo : module
The hash algorithm to use, as a module or an object from the `Crypto.Hash` package.
The digest length must be no shorter than ``dkLen``.
The default algorithm is `SHA1`.
:Return: A byte string of length `dkLen` that can be used as key.
"""
if not hashAlgo:
hashAlgo = SHA1
password = tobytes(password)
pHash = hashAlgo.new(password+salt)
digest = pHash.digest_size
if dkLen>digest:
raise ValueError("Selected hash algorithm has a too short digest (%d bytes)." % digest)
if len(salt)!=8:
raise ValueError("Salt is not 8 bytes long.")
for i in xrange(count-1):
pHash = pHash.new(pHash.digest())
return pHash.digest()[:dkLen]
def PBKDF2(password, salt, dkLen=16, count=1000, prf=None):
"""Derive one or more keys from a password (or passphrase).
This performs key derivation according to the PKCS#5 standard (v2.0),
by means of the ``PBKDF2`` algorithm.
:Parameters:
password : string
The secret password or pass phrase to generate the key from.
salt : string
A string to use for better protection from dictionary attacks.
This value does not need to be kept secret, but it should be randomly
chosen for each derivation. It is recommended to be at least 8 bytes long.
dkLen : integer
The cumulative length of the desired keys. Default is 16 bytes, suitable for instance for `Crypto.Cipher.AES`.
count : integer
The number of iterations to carry out. It's recommended to use at least 1000.
prf : callable
A pseudorandom function. It must be a function that returns a pseudorandom string
from two parameters: a secret and a salt. If not specified, HMAC-SHA1 is used.
:Return: A byte string of length `dkLen` that can be used as key material.
If you wanted multiple keys, just break up this string into segments of the desired length.
"""
password = tobytes(password)
if prf is None:
prf = lambda p,s: HMAC.new(p,s,SHA1).digest()
key = b('')
i = 1
while len(key)<dkLen:
U = previousU = prf(password,salt+struct.pack(">I", i))
for j in xrange(count-1):
previousU = t = prf(password,previousU)
U = strxor(U,t)
key += U
i = i + 1
return key[:dkLen]
| gpl-3.0 |
ceci/pygments-hack | pygments/styles/tango.py | 75 | 7096 | # -*- coding: utf-8 -*-
"""
pygments.styles.tango
~~~~~~~~~~~~~~~~~~~~~
The Crunchy default Style inspired from the color palette from
the Tango Icon Theme Guidelines.
http://tango.freedesktop.org/Tango_Icon_Theme_Guidelines
Butter: #fce94f #edd400 #c4a000
Orange: #fcaf3e #f57900 #ce5c00
Chocolate: #e9b96e #c17d11 #8f5902
Chameleon: #8ae234 #73d216 #4e9a06
Sky Blue: #729fcf #3465a4 #204a87
Plum: #ad7fa8 #75507b #5c35cc
Scarlet Red:#ef2929 #cc0000 #a40000
Aluminium: #eeeeec #d3d7cf #babdb6
#888a85 #555753 #2e3436
Not all of the above colors are used; other colors added:
very light grey: #f8f8f8 (for background)
This style can be used as a template as it includes all the known
Token types, unlike most (if not all) of the styles included in the
Pygments distribution.
However, since Crunchy is intended to be used by beginners, we have strived
to create a style that gloss over subtle distinctions between different
categories.
Taking Python for example, comments (Comment.*) and docstrings (String.Doc)
have been chosen to have the same style. Similarly, keywords (Keyword.*),
and Operator.Word (and, or, in) have been assigned the same style.
:copyright: Copyright 2006-2010 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic, Whitespace, Punctuation, Other, Literal
class TangoStyle(Style):
"""
The Crunchy default Style inspired from the color palette from
the Tango Icon Theme Guidelines.
"""
# work in progress...
background_color = "#f8f8f8"
default_style = ""
styles = {
# No corresponding class for the following:
#Text: "", # class: ''
Whitespace: "underline #f8f8f8", # class: 'w'
Error: "#a40000 border:#ef2929", # class: 'err'
Other: "#000000", # class 'x'
Comment: "italic #8f5902", # class: 'c'
Comment.Multiline: "italic #8f5902", # class: 'cm'
Comment.Preproc: "italic #8f5902", # class: 'cp'
Comment.Single: "italic #8f5902", # class: 'c1'
Comment.Special: "italic #8f5902", # class: 'cs'
Keyword: "bold #204a87", # class: 'k'
Keyword.Constant: "bold #204a87", # class: 'kc'
Keyword.Declaration: "bold #204a87", # class: 'kd'
Keyword.Namespace: "bold #204a87", # class: 'kn'
Keyword.Pseudo: "bold #204a87", # class: 'kp'
Keyword.Reserved: "bold #204a87", # class: 'kr'
Keyword.Type: "bold #204a87", # class: 'kt'
Operator: "bold #ce5c00", # class: 'o'
Operator.Word: "bold #204a87", # class: 'ow' - like keywords
Punctuation: "bold #000000", # class: 'p'
# because special names such as Name.Class, Name.Function, etc.
# are not recognized as such later in the parsing, we choose them
# to look the same as ordinary variables.
Name: "#000000", # class: 'n'
Name.Attribute: "#c4a000", # class: 'na' - to be revised
Name.Builtin: "#204a87", # class: 'nb'
Name.Builtin.Pseudo: "#3465a4", # class: 'bp'
Name.Class: "#000000", # class: 'nc' - to be revised
Name.Constant: "#000000", # class: 'no' - to be revised
Name.Decorator: "bold #5c35cc", # class: 'nd' - to be revised
Name.Entity: "#ce5c00", # class: 'ni'
Name.Exception: "bold #cc0000", # class: 'ne'
Name.Function: "#000000", # class: 'nf'
Name.Property: "#000000", # class: 'py'
Name.Label: "#f57900", # class: 'nl'
Name.Namespace: "#000000", # class: 'nn' - to be revised
Name.Other: "#000000", # class: 'nx'
Name.Tag: "bold #204a87", # class: 'nt' - like a keyword
Name.Variable: "#000000", # class: 'nv' - to be revised
Name.Variable.Class: "#000000", # class: 'vc' - to be revised
Name.Variable.Global: "#000000", # class: 'vg' - to be revised
Name.Variable.Instance: "#000000", # class: 'vi' - to be revised
# since the tango light blue does not show up well in text, we choose
# a pure blue instead.
Number: "bold #0000cf", # class: 'm'
Number.Float: "bold #0000cf", # class: 'mf'
Number.Hex: "bold #0000cf", # class: 'mh'
Number.Integer: "bold #0000cf", # class: 'mi'
Number.Integer.Long: "bold #0000cf", # class: 'il'
Number.Oct: "bold #0000cf", # class: 'mo'
Literal: "#000000", # class: 'l'
Literal.Date: "#000000", # class: 'ld'
String: "#4e9a06", # class: 's'
String.Backtick: "#4e9a06", # class: 'sb'
String.Char: "#4e9a06", # class: 'sc'
String.Doc: "italic #8f5902", # class: 'sd' - like a comment
String.Double: "#4e9a06", # class: 's2'
String.Escape: "#4e9a06", # class: 'se'
String.Heredoc: "#4e9a06", # class: 'sh'
String.Interpol: "#4e9a06", # class: 'si'
String.Other: "#4e9a06", # class: 'sx'
String.Regex: "#4e9a06", # class: 'sr'
String.Single: "#4e9a06", # class: 's1'
String.Symbol: "#4e9a06", # class: 'ss'
Generic: "#000000", # class: 'g'
Generic.Deleted: "#a40000", # class: 'gd'
Generic.Emph: "italic #000000", # class: 'ge'
Generic.Error: "#ef2929", # class: 'gr'
Generic.Heading: "bold #000080", # class: 'gh'
Generic.Inserted: "#00A000", # class: 'gi'
Generic.Output: "italic #000000", # class: 'go'
Generic.Prompt: "#8f5902", # class: 'gp'
Generic.Strong: "bold #000000", # class: 'gs'
Generic.Subheading: "bold #800080", # class: 'gu'
Generic.Traceback: "bold #a40000", # class: 'gt'
}
| bsd-2-clause |
lzw120/django | django/contrib/gis/tests/geo3d/models.py | 404 | 1835 | from django.contrib.gis.db import models
class City3D(models.Model):
name = models.CharField(max_length=30)
point = models.PointField(dim=3)
objects = models.GeoManager()
def __unicode__(self):
return self.name
class Interstate2D(models.Model):
name = models.CharField(max_length=30)
line = models.LineStringField(srid=4269)
objects = models.GeoManager()
def __unicode__(self):
return self.name
class Interstate3D(models.Model):
name = models.CharField(max_length=30)
line = models.LineStringField(dim=3, srid=4269)
objects = models.GeoManager()
def __unicode__(self):
return self.name
class InterstateProj2D(models.Model):
name = models.CharField(max_length=30)
line = models.LineStringField(srid=32140)
objects = models.GeoManager()
def __unicode__(self):
return self.name
class InterstateProj3D(models.Model):
name = models.CharField(max_length=30)
line = models.LineStringField(dim=3, srid=32140)
objects = models.GeoManager()
def __unicode__(self):
return self.name
class Polygon2D(models.Model):
name = models.CharField(max_length=30)
poly = models.PolygonField(srid=32140)
objects = models.GeoManager()
def __unicode__(self):
return self.name
class Polygon3D(models.Model):
name = models.CharField(max_length=30)
poly = models.PolygonField(dim=3, srid=32140)
objects = models.GeoManager()
def __unicode__(self):
return self.name
class Point2D(models.Model):
point = models.PointField()
objects = models.GeoManager()
class Point3D(models.Model):
point = models.PointField(dim=3)
objects = models.GeoManager()
class MultiPoint3D(models.Model):
mpoint = models.MultiPointField(dim=3)
objects = models.GeoManager()
| bsd-3-clause |
twz915/django | django/core/checks/security/sessions.py | 134 | 2558 | from django.conf import settings
from .. import Tags, Warning, register
def add_session_cookie_message(message):
return message + (
" Using a secure-only session cookie makes it more difficult for "
"network traffic sniffers to hijack user sessions."
)
W010 = Warning(
add_session_cookie_message(
"You have 'django.contrib.sessions' in your INSTALLED_APPS, "
"but you have not set SESSION_COOKIE_SECURE to True."
),
id='security.W010',
)
W011 = Warning(
add_session_cookie_message(
"You have 'django.contrib.sessions.middleware.SessionMiddleware' "
"in your MIDDLEWARE, but you have not set "
"SESSION_COOKIE_SECURE to True."
),
id='security.W011',
)
W012 = Warning(
add_session_cookie_message("SESSION_COOKIE_SECURE is not set to True."),
id='security.W012',
)
def add_httponly_message(message):
return message + (
" Using an HttpOnly session cookie makes it more difficult for "
"cross-site scripting attacks to hijack user sessions."
)
W013 = Warning(
add_httponly_message(
"You have 'django.contrib.sessions' in your INSTALLED_APPS, "
"but you have not set SESSION_COOKIE_HTTPONLY to True.",
),
id='security.W013',
)
W014 = Warning(
add_httponly_message(
"You have 'django.contrib.sessions.middleware.SessionMiddleware' "
"in your MIDDLEWARE, but you have not set "
"SESSION_COOKIE_HTTPONLY to True."
),
id='security.W014',
)
W015 = Warning(
add_httponly_message("SESSION_COOKIE_HTTPONLY is not set to True."),
id='security.W015',
)
@register(Tags.security, deploy=True)
def check_session_cookie_secure(app_configs, **kwargs):
errors = []
if not settings.SESSION_COOKIE_SECURE:
if _session_app():
errors.append(W010)
if _session_middleware():
errors.append(W011)
if len(errors) > 1:
errors = [W012]
return errors
@register(Tags.security, deploy=True)
def check_session_cookie_httponly(app_configs, **kwargs):
errors = []
if not settings.SESSION_COOKIE_HTTPONLY:
if _session_app():
errors.append(W013)
if _session_middleware():
errors.append(W014)
if len(errors) > 1:
errors = [W015]
return errors
def _session_middleware():
return 'django.contrib.sessions.middleware.SessionMiddleware' in settings.MIDDLEWARE
def _session_app():
return "django.contrib.sessions" in settings.INSTALLED_APPS
| bsd-3-clause |
rbarbe/qBittorrent | src/searchengine/nova/engines/kickasstorrents.py | 7 | 3165 | #VERSION: 1.28
#AUTHORS: Christophe Dumez (chris@qbittorrent.org)
#CONTRIBUTORS: Diego de las Heras (ngosang@hotmail.es)
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from novaprinter import prettyPrinter
from helpers import retrieve_url, download_file
import json
class kickasstorrents(object):
url = 'https://kat.cr'
name = 'Kickass Torrents'
supported_categories = {'all': '', 'movies': 'Movies', 'tv': 'TV', 'music': 'Music', 'games': 'Games', 'software': 'Applications'}
def __init__(self):
pass
def download_torrent(self, info):
print download_file(info, info)
def search(self, what, cat='all'):
i = 1
while True and i < 11:
json_data = retrieve_url(self.url+'/json.php?q=%s&page=%d'%(what, i))
try:
json_dict = json.loads(json_data)
except:
i += 1
continue
if int(json_dict['total_results']) <= 0:
return
for r in json_dict['list']:
try:
if cat != 'all' and self.supported_categories[cat] != r['category']:
continue
res_dict = dict()
res_dict['name'] = r['title']
res_dict['size'] = str(r['size'])
res_dict['seeds'] = r['seeds']
res_dict['leech'] = r['leechs']
res_dict['link'] = r['torrentLink']
res_dict['desc_link'] = r['link'].replace('http://', 'https://')
res_dict['engine_url'] = self.url
prettyPrinter(res_dict)
except:
pass
i += 1
| gpl-2.0 |
industrydive/mezzanine | mezzanine/blog/management/commands/import_blogger.py | 8 | 4375 | from __future__ import unicode_literals
from datetime import datetime, timedelta
from optparse import make_option
from time import timezone
import re
from django.core.management.base import CommandError
from mezzanine.blog.management.base import BaseImporterCommand
# TODO: update this to use v3 of the blogger API.
class Command(BaseImporterCommand):
"""
Implements a Blogger importer. Takes a Blogger ID in order to be able to
determine which blog it should point to and harvest the XML from.
"""
option_list = BaseImporterCommand.option_list + (
make_option("-b", "--blogger-id", dest="blog_id",
help="Blogger Blog ID from blogger dashboard"),
)
def handle_import(self, options):
"""
Gets posts from Blogger.
"""
blog_id = options.get("blog_id")
if blog_id is None:
raise CommandError("Usage is import_blogger %s" % self.args)
try:
from gdata import service
except ImportError:
raise CommandError("Could not import the gdata library.")
blogger = service.GDataService()
blogger.service = "blogger"
blogger.server = "www.blogger.com"
start_index = 1
processed_posts = []
new_posts = 1
while new_posts:
new_posts = 0
query = service.Query()
query.feed = "/feeds/%s/posts/full" % blog_id
query.max_results = 500
query.start_index = start_index
try:
feed = blogger.Get(query.ToUri())
except service.RequestError as err:
message = "There was a service error. The response was: " \
"%(status)s %(reason)s - %(body)s" % err.message
raise CommandError(message, blogger.server + query.feed,
err.message["status"])
for (i, entry) in enumerate(feed.entry):
# this basically gets the unique post ID from the URL to itself
# and pulls the ID off the end.
post_id = entry.GetSelfLink().href.split("/")[-1]
# Skip duplicate posts. Important for the last query.
if post_id in processed_posts:
continue
title = entry.title.text
content = entry.content.text
# this strips off the time zone info off the end as we want UTC
clean_date = entry.published.text[:re.search(r"\.\d{3}",
entry.published.text).end()]
published_date = datetime.strptime(clean_date,
"%Y-%m-%dT%H:%M:%S.%f") - timedelta(seconds=timezone)
# TODO - issues with content not generating correct <P> tags
tags = [tag.term for tag in entry.category]
post = self.add_post(title=title, content=content,
pub_date=published_date, tags=tags)
# get the comments from the post feed and then add them to
# the post details
comment_url = "/feeds/%s/%s/comments/full?max-results=1000"
comments = blogger.Get(comment_url % (blog_id, post_id))
for comment in comments.entry:
email = comment.author[0].email.text
author_name = comment.author[0].name.text
# Strip off the time zone info off the end as we want UTC
clean_date = comment.published.text[:re.search(r"\.\d{3}",
comment.published.text).end()]
comment_date = datetime.strptime(clean_date,
"%Y-%m-%dT%H:%M:%S.%f") - timedelta(seconds=timezone)
website = ""
if comment.author[0].uri:
website = comment.author[0].uri.text
body = comment.content.text
# add the comment as a dict to the end of the comments list
self.add_comment(post=post, name=author_name, email=email,
body=body, website=website,
pub_date=comment_date)
processed_posts.append(post_id)
new_posts += 1
start_index += 500
| bsd-2-clause |
mathspace/django | django/utils/feedgenerator.py | 22 | 17939 | """
Syndication feed generation library -- used for generating RSS, etc.
Sample usage:
>>> from django.utils import feedgenerator
>>> feed = feedgenerator.Rss201rev2Feed(
... title="Poynter E-Media Tidbits",
... link="http://www.poynter.org/column.asp?id=31",
... description="A group Weblog by the sharpest minds in online media/journalism/publishing.",
... language="en",
... )
>>> feed.add_item(
... title="Hello",
... link="http://www.holovaty.com/test/",
... description="Testing."
... )
>>> with open('test.rss', 'w') as fp:
... feed.write(fp, 'utf-8')
For definitions of the different versions of RSS, see:
http://web.archive.org/web/20110718035220/http://diveintomark.org/archives/2004/02/04/incompatible-rss
"""
from __future__ import unicode_literals
import datetime
import warnings
from django.utils import datetime_safe, six
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.encoding import force_text, iri_to_uri
from django.utils.six import StringIO
from django.utils.six.moves.urllib.parse import urlparse
from django.utils.timezone import utc
from django.utils.xmlutils import SimplerXMLGenerator
def rfc2822_date(date):
# We can't use strftime() because it produces locale-dependent results, so
# we have to map english month and day names manually
months = ('Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec',)
days = ('Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun')
# Support datetime objects older than 1900
date = datetime_safe.new_datetime(date)
# We do this ourselves to be timezone aware, email.Utils is not tz aware.
dow = days[date.weekday()]
month = months[date.month - 1]
time_str = date.strftime('%s, %%d %s %%Y %%H:%%M:%%S ' % (dow, month))
if six.PY2: # strftime returns a byte string in Python 2
time_str = time_str.decode('utf-8')
offset = date.utcoffset()
# Historically, this function assumes that naive datetimes are in UTC.
if offset is None:
return time_str + '-0000'
else:
timezone = (offset.days * 24 * 60) + (offset.seconds // 60)
hour, minute = divmod(timezone, 60)
return time_str + '%+03d%02d' % (hour, minute)
def rfc3339_date(date):
# Support datetime objects older than 1900
date = datetime_safe.new_datetime(date)
time_str = date.strftime('%Y-%m-%dT%H:%M:%S')
if six.PY2: # strftime returns a byte string in Python 2
time_str = time_str.decode('utf-8')
offset = date.utcoffset()
# Historically, this function assumes that naive datetimes are in UTC.
if offset is None:
return time_str + 'Z'
else:
timezone = (offset.days * 24 * 60) + (offset.seconds // 60)
hour, minute = divmod(timezone, 60)
return time_str + '%+03d:%02d' % (hour, minute)
def get_tag_uri(url, date):
"""
Creates a TagURI.
See http://web.archive.org/web/20110514113830/http://diveintomark.org/archives/2004/05/28/howto-atom-id
"""
bits = urlparse(url)
d = ''
if date is not None:
d = ',%s' % datetime_safe.new_datetime(date).strftime('%Y-%m-%d')
return 'tag:%s%s:%s/%s' % (bits.hostname, d, bits.path, bits.fragment)
class SyndicationFeed(object):
"Base class for all syndication feeds. Subclasses should provide write()"
def __init__(self, title, link, description, language=None, author_email=None,
author_name=None, author_link=None, subtitle=None, categories=None,
feed_url=None, feed_copyright=None, feed_guid=None, ttl=None, **kwargs):
def to_unicode(s):
return force_text(s, strings_only=True)
if categories:
categories = [force_text(c) for c in categories]
if ttl is not None:
# Force ints to unicode
ttl = force_text(ttl)
self.feed = {
'title': to_unicode(title),
'link': iri_to_uri(link),
'description': to_unicode(description),
'language': to_unicode(language),
'author_email': to_unicode(author_email),
'author_name': to_unicode(author_name),
'author_link': iri_to_uri(author_link),
'subtitle': to_unicode(subtitle),
'categories': categories or (),
'feed_url': iri_to_uri(feed_url),
'feed_copyright': to_unicode(feed_copyright),
'id': feed_guid or link,
'ttl': ttl,
}
self.feed.update(kwargs)
self.items = []
def add_item(self, title, link, description, author_email=None,
author_name=None, author_link=None, pubdate=None, comments=None,
unique_id=None, unique_id_is_permalink=None, enclosure=None,
categories=(), item_copyright=None, ttl=None, updateddate=None,
enclosures=None, **kwargs):
"""
Adds an item to the feed. All args are expected to be Python Unicode
objects except pubdate and updateddate, which are datetime.datetime
objects, and enclosures, which is an iterable of instances of the
Enclosure class.
"""
def to_unicode(s):
return force_text(s, strings_only=True)
if categories:
categories = [to_unicode(c) for c in categories]
if ttl is not None:
# Force ints to unicode
ttl = force_text(ttl)
if enclosure is None:
enclosures = [] if enclosures is None else enclosures
else:
warnings.warn(
"The enclosure keyword argument is deprecated, "
"use enclosures instead.",
RemovedInDjango20Warning,
stacklevel=2,
)
enclosures = [enclosure]
item = {
'title': to_unicode(title),
'link': iri_to_uri(link),
'description': to_unicode(description),
'author_email': to_unicode(author_email),
'author_name': to_unicode(author_name),
'author_link': iri_to_uri(author_link),
'pubdate': pubdate,
'updateddate': updateddate,
'comments': to_unicode(comments),
'unique_id': to_unicode(unique_id),
'unique_id_is_permalink': unique_id_is_permalink,
'enclosures': enclosures,
'categories': categories or (),
'item_copyright': to_unicode(item_copyright),
'ttl': ttl,
}
item.update(kwargs)
self.items.append(item)
def num_items(self):
return len(self.items)
def root_attributes(self):
"""
Return extra attributes to place on the root (i.e. feed/channel) element.
Called from write().
"""
return {}
def add_root_elements(self, handler):
"""
Add elements in the root (i.e. feed/channel) element. Called
from write().
"""
pass
def item_attributes(self, item):
"""
Return extra attributes to place on each item (i.e. item/entry) element.
"""
return {}
def add_item_elements(self, handler, item):
"""
Add elements on each item (i.e. item/entry) element.
"""
pass
def write(self, outfile, encoding):
"""
Outputs the feed in the given encoding to outfile, which is a file-like
object. Subclasses should override this.
"""
raise NotImplementedError('subclasses of SyndicationFeed must provide a write() method')
def writeString(self, encoding):
"""
Returns the feed in the given encoding as a string.
"""
s = StringIO()
self.write(s, encoding)
return s.getvalue()
def latest_post_date(self):
"""
Returns the latest item's pubdate or updateddate. If no items
have either of these attributes this returns the current UTC date/time.
"""
latest_date = None
date_keys = ('updateddate', 'pubdate')
for item in self.items:
for date_key in date_keys:
item_date = item.get(date_key)
if item_date:
if latest_date is None or item_date > latest_date:
latest_date = item_date
# datetime.now(tz=utc) is slower, as documented in django.utils.timezone.now
return latest_date or datetime.datetime.utcnow().replace(tzinfo=utc)
class Enclosure(object):
"Represents an RSS enclosure"
def __init__(self, url, length, mime_type):
"All args are expected to be Python Unicode objects"
self.length, self.mime_type = length, mime_type
self.url = iri_to_uri(url)
class RssFeed(SyndicationFeed):
content_type = 'application/rss+xml; charset=utf-8'
def write(self, outfile, encoding):
handler = SimplerXMLGenerator(outfile, encoding)
handler.startDocument()
handler.startElement("rss", self.rss_attributes())
handler.startElement("channel", self.root_attributes())
self.add_root_elements(handler)
self.write_items(handler)
self.endChannelElement(handler)
handler.endElement("rss")
def rss_attributes(self):
return {"version": self._version,
"xmlns:atom": "http://www.w3.org/2005/Atom"}
def write_items(self, handler):
for item in self.items:
handler.startElement('item', self.item_attributes(item))
self.add_item_elements(handler, item)
handler.endElement("item")
def add_root_elements(self, handler):
handler.addQuickElement("title", self.feed['title'])
handler.addQuickElement("link", self.feed['link'])
handler.addQuickElement("description", self.feed['description'])
if self.feed['feed_url'] is not None:
handler.addQuickElement("atom:link", None, {"rel": "self", "href": self.feed['feed_url']})
if self.feed['language'] is not None:
handler.addQuickElement("language", self.feed['language'])
for cat in self.feed['categories']:
handler.addQuickElement("category", cat)
if self.feed['feed_copyright'] is not None:
handler.addQuickElement("copyright", self.feed['feed_copyright'])
handler.addQuickElement("lastBuildDate", rfc2822_date(self.latest_post_date()))
if self.feed['ttl'] is not None:
handler.addQuickElement("ttl", self.feed['ttl'])
def endChannelElement(self, handler):
handler.endElement("channel")
@property
def mime_type(self):
warnings.warn(
'The mime_type attribute of RssFeed is deprecated. '
'Use content_type instead.',
RemovedInDjango20Warning, stacklevel=2
)
return self.content_type
class RssUserland091Feed(RssFeed):
_version = "0.91"
def add_item_elements(self, handler, item):
handler.addQuickElement("title", item['title'])
handler.addQuickElement("link", item['link'])
if item['description'] is not None:
handler.addQuickElement("description", item['description'])
class Rss201rev2Feed(RssFeed):
# Spec: http://blogs.law.harvard.edu/tech/rss
_version = "2.0"
def add_item_elements(self, handler, item):
handler.addQuickElement("title", item['title'])
handler.addQuickElement("link", item['link'])
if item['description'] is not None:
handler.addQuickElement("description", item['description'])
# Author information.
if item["author_name"] and item["author_email"]:
handler.addQuickElement("author", "%s (%s)" % (item['author_email'], item['author_name']))
elif item["author_email"]:
handler.addQuickElement("author", item["author_email"])
elif item["author_name"]:
handler.addQuickElement(
"dc:creator", item["author_name"], {"xmlns:dc": "http://purl.org/dc/elements/1.1/"}
)
if item['pubdate'] is not None:
handler.addQuickElement("pubDate", rfc2822_date(item['pubdate']))
if item['comments'] is not None:
handler.addQuickElement("comments", item['comments'])
if item['unique_id'] is not None:
guid_attrs = {}
if isinstance(item.get('unique_id_is_permalink'), bool):
guid_attrs['isPermaLink'] = str(item['unique_id_is_permalink']).lower()
handler.addQuickElement("guid", item['unique_id'], guid_attrs)
if item['ttl'] is not None:
handler.addQuickElement("ttl", item['ttl'])
# Enclosure.
if item['enclosures']:
enclosures = list(item['enclosures'])
if len(enclosures) > 1:
raise ValueError(
"RSS feed items may only have one enclosure, see "
"http://www.rssboard.org/rss-profile#element-channel-item-enclosure"
)
enclosure = enclosures[0]
handler.addQuickElement('enclosure', '', {
'url': enclosure.url,
'length': enclosure.length,
'type': enclosure.mime_type,
})
# Categories.
for cat in item['categories']:
handler.addQuickElement("category", cat)
class Atom1Feed(SyndicationFeed):
# Spec: https://tools.ietf.org/html/rfc4287
content_type = 'application/atom+xml; charset=utf-8'
ns = "http://www.w3.org/2005/Atom"
def write(self, outfile, encoding):
handler = SimplerXMLGenerator(outfile, encoding)
handler.startDocument()
handler.startElement('feed', self.root_attributes())
self.add_root_elements(handler)
self.write_items(handler)
handler.endElement("feed")
def root_attributes(self):
if self.feed['language'] is not None:
return {"xmlns": self.ns, "xml:lang": self.feed['language']}
else:
return {"xmlns": self.ns}
def add_root_elements(self, handler):
handler.addQuickElement("title", self.feed['title'])
handler.addQuickElement("link", "", {"rel": "alternate", "href": self.feed['link']})
if self.feed['feed_url'] is not None:
handler.addQuickElement("link", "", {"rel": "self", "href": self.feed['feed_url']})
handler.addQuickElement("id", self.feed['id'])
handler.addQuickElement("updated", rfc3339_date(self.latest_post_date()))
if self.feed['author_name'] is not None:
handler.startElement("author", {})
handler.addQuickElement("name", self.feed['author_name'])
if self.feed['author_email'] is not None:
handler.addQuickElement("email", self.feed['author_email'])
if self.feed['author_link'] is not None:
handler.addQuickElement("uri", self.feed['author_link'])
handler.endElement("author")
if self.feed['subtitle'] is not None:
handler.addQuickElement("subtitle", self.feed['subtitle'])
for cat in self.feed['categories']:
handler.addQuickElement("category", "", {"term": cat})
if self.feed['feed_copyright'] is not None:
handler.addQuickElement("rights", self.feed['feed_copyright'])
def write_items(self, handler):
for item in self.items:
handler.startElement("entry", self.item_attributes(item))
self.add_item_elements(handler, item)
handler.endElement("entry")
def add_item_elements(self, handler, item):
handler.addQuickElement("title", item['title'])
handler.addQuickElement("link", "", {"href": item['link'], "rel": "alternate"})
if item['pubdate'] is not None:
handler.addQuickElement('published', rfc3339_date(item['pubdate']))
if item['updateddate'] is not None:
handler.addQuickElement('updated', rfc3339_date(item['updateddate']))
# Author information.
if item['author_name'] is not None:
handler.startElement("author", {})
handler.addQuickElement("name", item['author_name'])
if item['author_email'] is not None:
handler.addQuickElement("email", item['author_email'])
if item['author_link'] is not None:
handler.addQuickElement("uri", item['author_link'])
handler.endElement("author")
# Unique ID.
if item['unique_id'] is not None:
unique_id = item['unique_id']
else:
unique_id = get_tag_uri(item['link'], item['pubdate'])
handler.addQuickElement("id", unique_id)
# Summary.
if item['description'] is not None:
handler.addQuickElement("summary", item['description'], {"type": "html"})
# Enclosures.
for enclosure in item['enclosures']:
handler.addQuickElement('link', '', {
'rel': 'enclosure',
'href': enclosure.url,
'length': enclosure.length,
'type': enclosure.mime_type,
})
# Categories.
for cat in item['categories']:
handler.addQuickElement("category", "", {"term": cat})
# Rights.
if item['item_copyright'] is not None:
handler.addQuickElement("rights", item['item_copyright'])
@property
def mime_type(self):
warnings.warn(
'The mime_type attribute of Atom1Feed is deprecated. '
'Use content_type instead.',
RemovedInDjango20Warning, stacklevel=2
)
return self.content_type
# This isolates the decision of what the system default is, so calling code can
# do "feedgenerator.DefaultFeed" instead of "feedgenerator.Rss201rev2Feed".
DefaultFeed = Rss201rev2Feed
| bsd-3-clause |
DolphinDream/sverchok | nodes/CAD/merge_mesh_2d.py | 2 | 4660 | # This file is part of project Sverchok. It's copyrighted by the contributors
# recorded in the version control history of the file, available from
# its original location https://github.com/nortikin/sverchok/commit/master
#
# SPDX-License-Identifier: GPL3
# License-Filename: LICENSE
import bpy
from sverchok.node_tree import SverchCustomTreeNode
from sverchok.data_structure import updateNode, throttle_and_update_node
from sverchok.utils.geom_2d.merge_mesh import merge_mesh
class SvMergeMesh2D(bpy.types.Node, SverchCustomTreeNode):
"""
Triggers: Merge two 2d meshes
Each mesh can have disjoint parts
Only X and Y coordinate takes in account
"""
bl_idname = 'SvMergeMesh2D'
bl_label = 'Merge mesh 2D'
bl_icon = 'AUTOMERGE_ON'
@throttle_and_update_node
def update_sockets(self, context):
links = {sock.name: [link.to_socket for link in sock.links] for sock in self.outputs}
[self.outputs.remove(sock) for sock in self.outputs[2:]]
new_socks = []
if self.simple_mask:
new_socks.append(self.outputs.new('SvStringsSocket', 'Mask A'))
new_socks.append(self.outputs.new('SvStringsSocket', 'Mask B'))
if self.index_mask:
new_socks.append(self.outputs.new('SvStringsSocket', 'Face index A'))
new_socks.append(self.outputs.new('SvStringsSocket', 'Face index B'))
[[self.id_data.links.new(sock, link) for link in links[sock.name]]
for sock in new_socks if sock.name in links]
simple_mask: bpy.props.BoolProperty(name='Simple mask', update=update_sockets, default=True,
description='Switching between two type of masks')
index_mask: bpy.props.BoolProperty(name="Index mask", update=update_sockets,
description="Mask of output mesh represented indexes"
" of faces from mesh A and Mesh B")
accuracy: bpy.props.IntProperty(name='Accuracy', update=updateNode, default=5, min=3, max=12,
description='Some errors of the node can be fixed by changing this value')
def draw_buttons_ext(self, context, layout):
col = layout.column(align=True)
col.prop(self, 'simple_mask', toggle=True)
col.prop(self, 'index_mask', toggle=True)
col.prop(self, 'accuracy')
def sv_init(self, context):
self.inputs.new('SvVerticesSocket', 'Verts A')
self.inputs.new('SvStringsSocket', 'Faces A')
self.inputs.new('SvVerticesSocket', 'Verts B')
self.inputs.new('SvStringsSocket', 'Faces B')
self.outputs.new('SvVerticesSocket', 'Verts')
self.outputs.new('SvStringsSocket', 'Faces')
self.outputs.new('SvStringsSocket', 'Mask A')
self.outputs.new('SvStringsSocket', 'Mask B')
def process(self):
if not all([sock.is_linked for sock in self.inputs]):
return
out = []
for sv_verts_a, sv_faces_a, sv_verts_b, sv_faces_b in zip(self.inputs['Verts A'].sv_get(),
self.inputs['Faces A'].sv_get(),
self.inputs['Verts B'].sv_get(),
self.inputs['Faces B'].sv_get()):
out.append(merge_mesh(sv_verts_a, sv_faces_a, sv_verts_b, sv_faces_b, self.simple_mask, self.index_mask,
self.accuracy))
if self.simple_mask and self.index_mask:
out_verts, out_faces, mask_a, mask_b, face_index_a, face_index_b = zip(*out)
self.outputs['Mask A'].sv_set(mask_a)
self.outputs['Mask B'].sv_set(mask_b)
self.outputs['Face index A'].sv_set(face_index_a)
self.outputs['Face index B'].sv_set(face_index_b)
elif self.simple_mask:
out_verts, out_faces, mask_a, mask_b = zip(*out)
self.outputs['Mask A'].sv_set(mask_a)
self.outputs['Mask B'].sv_set(mask_b)
elif self.index_mask:
out_verts, out_faces, face_index_a, face_index_b = zip(*out)
self.outputs['Face index A'].sv_set(face_index_a)
self.outputs['Face index B'].sv_set(face_index_b)
else:
out_verts, out_faces = zip(*out)
self.outputs['Verts'].sv_set(out_verts)
self.outputs['Faces'].sv_set(out_faces)
def register():
bpy.utils.register_class(SvMergeMesh2D)
def unregister():
bpy.utils.unregister_class(SvMergeMesh2D)
| gpl-3.0 |
alhashash/odoo | addons/google_calendar/google_calendar.py | 4 | 50219 | # -*- coding: utf-8 -*-
import operator
import simplejson
import urllib2
import openerp
from openerp import tools
from openerp import SUPERUSER_ID
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT, exception_to_unicode
from openerp.tools.translate import _
from openerp.http import request
from datetime import datetime, timedelta
from dateutil import parser
import pytz
from openerp.osv import fields, osv
import logging
_logger = logging.getLogger(__name__)
def status_response(status, substr=False):
if substr:
return int(str(status)[0])
else:
return status_response(status, substr=True) == 2
class Meta(type):
""" This Meta class allow to define class as a structure, and so instancied variable
in __init__ to avoid to have side effect alike 'static' variable """
def __new__(typ, name, parents, attrs):
methods = dict((k, v) for k, v in attrs.iteritems()
if callable(v))
attrs = dict((k, v) for k, v in attrs.iteritems()
if not callable(v))
def init(self, **kw):
for k, v in attrs.iteritems():
setattr(self, k, v)
for k, v in kw.iteritems():
assert k in attrs
setattr(self, k, v)
methods['__init__'] = init
methods['__getitem__'] = getattr
return type.__new__(typ, name, parents, methods)
class Struct(object):
__metaclass__ = Meta
class OpenerpEvent(Struct):
event = False
found = False
event_id = False
isRecurrence = False
isInstance = False
update = False
status = False
attendee_id = False
synchro = False
class GmailEvent(Struct):
event = False
found = False
isRecurrence = False
isInstance = False
update = False
status = False
class SyncEvent(object):
def __init__(self):
self.OE = OpenerpEvent()
self.GG = GmailEvent()
self.OP = None
def __getitem__(self, key):
return getattr(self, key)
def compute_OP(self, modeFull=True):
#If event are already in Gmail and in OpenERP
if self.OE.found and self.GG.found:
#If the event has been deleted from one side, we delete on other side !
if self.OE.status != self.GG.status:
self.OP = Delete((self.OE.status and "OE") or (self.GG.status and "GG"),
'The event has been deleted from one side, we delete on other side !')
#If event is not deleted !
elif self.OE.status and self.GG.status:
if self.OE.update.split('.')[0] != self.GG.update.split('.')[0]:
if self.OE.update < self.GG.update:
tmpSrc = 'GG'
elif self.OE.update > self.GG.update:
tmpSrc = 'OE'
assert tmpSrc in ['GG', 'OE']
#if self.OP.action == None:
if self[tmpSrc].isRecurrence:
if self[tmpSrc].status:
self.OP = Update(tmpSrc, 'Only need to update, because i\'m active')
else:
self.OP = Exclude(tmpSrc, 'Need to Exclude (Me = First event from recurrence) from recurrence')
elif self[tmpSrc].isInstance:
self.OP = Update(tmpSrc, 'Only need to update, because already an exclu')
else:
self.OP = Update(tmpSrc, 'Simply Update... I\'m a single event')
else:
if not self.OE.synchro or self.OE.synchro.split('.')[0] < self.OE.update.split('.')[0]:
self.OP = Update('OE', 'Event already updated by another user, but not synchro with my google calendar')
else:
self.OP = NothingToDo("", 'Not update needed')
else:
self.OP = NothingToDo("", "Both are already deleted")
# New in openERP... Create on create_events of synchronize function
elif self.OE.found and not self.GG.found:
if self.OE.status:
self.OP = Delete('OE', 'Update or delete from GOOGLE')
else:
if not modeFull:
self.OP = Delete('GG', 'Deleted from Odoo, need to delete it from Gmail if already created')
else:
self.OP = NothingToDo("", "Already Deleted in gmail and unlinked in Odoo")
elif self.GG.found and not self.OE.found:
tmpSrc = 'GG'
if not self.GG.status and not self.GG.isInstance:
# don't need to make something... because event has been created and deleted before the synchronization
self.OP = NothingToDo("", 'Nothing to do... Create and Delete directly')
else:
if self.GG.isInstance:
if self[tmpSrc].status:
self.OP = Exclude(tmpSrc, 'Need to create the new exclu')
else:
self.OP = Exclude(tmpSrc, 'Need to copy and Exclude')
else:
self.OP = Create(tmpSrc, 'New EVENT CREATE from GMAIL')
def __str__(self):
return self.__repr__()
def __repr__(self):
myPrint = "\n\n---- A SYNC EVENT ---"
myPrint += "\n ID OE: %s " % (self.OE.event and self.OE.event.id)
myPrint += "\n ID GG: %s " % (self.GG.event and self.GG.event.get('id', False))
myPrint += "\n Name OE: %s " % (self.OE.event and self.OE.event.name.encode('utf8'))
myPrint += "\n Name GG: %s " % (self.GG.event and self.GG.event.get('summary', '').encode('utf8'))
myPrint += "\n Found OE:%5s vs GG: %5s" % (self.OE.found, self.GG.found)
myPrint += "\n Recurrence OE:%5s vs GG: %5s" % (self.OE.isRecurrence, self.GG.isRecurrence)
myPrint += "\n Instance OE:%5s vs GG: %5s" % (self.OE.isInstance, self.GG.isInstance)
myPrint += "\n Synchro OE: %10s " % (self.OE.synchro)
myPrint += "\n Update OE: %10s " % (self.OE.update)
myPrint += "\n Update GG: %10s " % (self.GG.update)
myPrint += "\n Status OE:%5s vs GG: %5s" % (self.OE.status, self.GG.status)
if (self.OP is None):
myPrint += "\n Action %s" % "---!!!---NONE---!!!---"
else:
myPrint += "\n Action %s" % type(self.OP).__name__
myPrint += "\n Source %s" % (self.OP.src)
myPrint += "\n comment %s" % (self.OP.info)
return myPrint
class SyncOperation(object):
def __init__(self, src, info, **kw):
self.src = src
self.info = info
for k, v in kw.items():
setattr(self, k, v)
def __str__(self):
return 'in__STR__'
class Create(SyncOperation):
pass
class Update(SyncOperation):
pass
class Delete(SyncOperation):
pass
class NothingToDo(SyncOperation):
pass
class Exclude(SyncOperation):
pass
class google_calendar(osv.AbstractModel):
STR_SERVICE = 'calendar'
_name = 'google.%s' % STR_SERVICE
def generate_data(self, cr, uid, event, isCreating=False, context=None):
if event.allday:
start_date = fields.datetime.context_timestamp(cr, uid, datetime.strptime(event.start, tools.DEFAULT_SERVER_DATETIME_FORMAT), context=context).isoformat('T').split('T')[0]
final_date = fields.datetime.context_timestamp(cr, uid, datetime.strptime(event.start, tools.DEFAULT_SERVER_DATETIME_FORMAT) + timedelta(hours=event.duration) + timedelta(days=isCreating and 1 or 0), context=context).isoformat('T').split('T')[0]
type = 'date'
vstype = 'dateTime'
else:
start_date = fields.datetime.context_timestamp(cr, uid, datetime.strptime(event.start, tools.DEFAULT_SERVER_DATETIME_FORMAT), context=context).isoformat('T')
final_date = fields.datetime.context_timestamp(cr, uid, datetime.strptime(event.stop, tools.DEFAULT_SERVER_DATETIME_FORMAT), context=context).isoformat('T')
type = 'dateTime'
vstype = 'date'
attendee_list = []
for attendee in event.attendee_ids:
attendee_list.append({
'email': attendee.email or 'NoEmail@mail.com',
'displayName': attendee.partner_id.name,
'responseStatus': attendee.state or 'needsAction',
})
reminders = []
for alarm in event.alarm_ids:
reminders.append({
"method": "email" if alarm.type == "email" else "popup",
"minutes": alarm.duration_minutes
})
data = {
"summary": event.name or '',
"description": event.description or '',
"start": {
type: start_date,
vstype: None,
'timeZone': 'UTC'
},
"end": {
type: final_date,
vstype: None,
'timeZone': 'UTC'
},
"attendees": attendee_list,
"reminders": {
"overrides": reminders,
"useDefault": "false"
},
"location": event.location or '',
"visibility": event['class'] or 'public',
}
if event.recurrency and event.rrule:
data["recurrence"] = ["RRULE:" + event.rrule]
if not event.active:
data["state"] = "cancelled"
if not self.get_need_synchro_attendee(cr, uid, context=context):
data.pop("attendees")
return data
def create_an_event(self, cr, uid, event, context=None):
gs_pool = self.pool['google.service']
data = self.generate_data(cr, uid, event, isCreating=True, context=context)
url = "/calendar/v3/calendars/%s/events?fields=%s&access_token=%s" % ('primary', urllib2.quote('id,updated'), self.get_token(cr, uid, context))
headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
data_json = simplejson.dumps(data)
return gs_pool._do_request(cr, uid, url, data_json, headers, type='POST', context=context)
def delete_an_event(self, cr, uid, event_id, context=None):
gs_pool = self.pool['google.service']
params = {
'access_token': self.get_token(cr, uid, context)
}
headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
url = "/calendar/v3/calendars/%s/events/%s" % ('primary', event_id)
return gs_pool._do_request(cr, uid, url, params, headers, type='DELETE', context=context)
def get_calendar_primary_id(self, cr, uid, context=None):
params = {
'fields': 'id',
'access_token': self.get_token(cr, uid, context)
}
headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
url = "/calendar/v3/calendars/primary"
try:
st, content, ask_time = self.pool['google.service']._do_request(cr, uid, url, params, headers, type='GET', context=context)
except Exception, e:
if (e.code == 401): # Token invalid / Acces unauthorized
error_msg = "Your token is invalid or has been revoked !"
registry = openerp.modules.registry.RegistryManager.get(request.session.db)
with registry.cursor() as cur:
self.pool['res.users'].write(cur, SUPERUSER_ID, [uid], {'google_calendar_token': False, 'google_calendar_token_validity': False}, context=context)
raise self.pool.get('res.config.settings').get_config_warning(cr, _(error_msg), context=context)
raise
return (status_response(st), content['id'] or False, ask_time)
def get_event_synchro_dict(self, cr, uid, lastSync=False, token=False, nextPageToken=False, context=None):
if not token:
token = self.get_token(cr, uid, context)
params = {
'fields': 'items,nextPageToken',
'access_token': token,
'maxResults': 1000,
#'timeMin': self.get_minTime(cr, uid, context=context).strftime("%Y-%m-%dT%H:%M:%S.%fz"),
}
if lastSync:
params['updatedMin'] = lastSync.strftime("%Y-%m-%dT%H:%M:%S.%fz")
params['showDeleted'] = True
else:
params['timeMin'] = self.get_minTime(cr, uid, context=context).strftime("%Y-%m-%dT%H:%M:%S.%fz")
headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
url = "/calendar/v3/calendars/%s/events" % 'primary'
if nextPageToken:
params['pageToken'] = nextPageToken
status, content, ask_time = self.pool['google.service']._do_request(cr, uid, url, params, headers, type='GET', context=context)
google_events_dict = {}
for google_event in content['items']:
google_events_dict[google_event['id']] = google_event
if content.get('nextPageToken'):
google_events_dict.update(
self.get_event_synchro_dict(cr, uid, lastSync=lastSync, token=token, nextPageToken=content['nextPageToken'], context=context)
)
return google_events_dict
def get_one_event_synchro(self, cr, uid, google_id, context=None):
token = self.get_token(cr, uid, context)
params = {
'access_token': token,
'maxResults': 1000,
'showDeleted': True,
}
headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
url = "/calendar/v3/calendars/%s/events/%s" % ('primary', google_id)
try:
status, content, ask_time = self.pool['google.service']._do_request(cr, uid, url, params, headers, type='GET', context=context)
except:
_logger.info("Calendar Synchro - In except of get_one_event_synchro")
pass
return status_response(status) and content or False
def update_to_google(self, cr, uid, oe_event, google_event, context):
calendar_event = self.pool['calendar.event']
url = "/calendar/v3/calendars/%s/events/%s?fields=%s&access_token=%s" % ('primary', google_event['id'], 'id,updated', self.get_token(cr, uid, context))
headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
data = self.generate_data(cr, uid, oe_event, context)
data['sequence'] = google_event.get('sequence', 0)
data_json = simplejson.dumps(data)
status, content, ask_time = self.pool['google.service']._do_request(cr, uid, url, data_json, headers, type='PATCH', context=context)
update_date = datetime.strptime(content['updated'], "%Y-%m-%dT%H:%M:%S.%fz")
calendar_event.write(cr, uid, [oe_event.id], {'oe_update_date': update_date})
if context['curr_attendee']:
self.pool['calendar.attendee'].write(cr, uid, [context['curr_attendee']], {'oe_synchro_date': update_date}, context)
def update_an_event(self, cr, uid, event, context=None):
data = self.generate_data(cr, uid, event, context=context)
url = "/calendar/v3/calendars/%s/events/%s" % ('primary', event.google_internal_event_id)
headers = {}
data['access_token'] = self.get_token(cr, uid, context)
status, response, ask_time = self.pool['google.service']._do_request(cr, uid, url, data, headers, type='GET', context=context)
#TO_CHECK : , if http fail, no event, do DELETE ?
return response
def update_recurrent_event_exclu(self, cr, uid, instance_id, event_ori_google_id, event_new, context=None):
gs_pool = self.pool['google.service']
data = self.generate_data(cr, uid, event_new, context=context)
data['recurringEventId'] = event_ori_google_id
data['originalStartTime'] = event_new.recurrent_id_date
url = "/calendar/v3/calendars/%s/events/%s?access_token=%s" % ('primary', instance_id, self.get_token(cr, uid, context))
headers = {'Content-type': 'application/json'}
data['sequence'] = self.get_sequence(cr, uid, instance_id, context)
data_json = simplejson.dumps(data)
return gs_pool._do_request(cr, uid, url, data_json, headers, type='PUT', context=context)
def update_from_google(self, cr, uid, event, single_event_dict, type, context):
if context is None:
context = []
calendar_event = self.pool['calendar.event']
res_partner_obj = self.pool['res.partner']
calendar_attendee_obj = self.pool['calendar.attendee']
calendar_alarm_obj = self.pool['calendar.alarm']
user_obj = self.pool['res.users']
myPartnerID = user_obj.browse(cr, uid, uid, context).partner_id.id
attendee_record = []
alarm_record = []
partner_record = [(4, myPartnerID)]
result = {}
if self.get_need_synchro_attendee(cr, uid, context=context):
for google_attendee in single_event_dict.get('attendees', []):
if type == "write":
for oe_attendee in event['attendee_ids']:
if oe_attendee.email == google_attendee['email']:
calendar_attendee_obj.write(cr, uid, [oe_attendee.id], {'state': google_attendee['responseStatus']}, context=context)
google_attendee['found'] = True
continue
if google_attendee.get('found'):
continue
attendee_id = res_partner_obj.search(cr, uid, [('email', '=', google_attendee['email'])], context=context)
if not attendee_id:
data = {
'email': google_attendee['email'],
'customer': False,
'name': google_attendee.get("displayName", False) or google_attendee['email']
}
attendee_id = [res_partner_obj.create(cr, uid, data, context=context)]
attendee = res_partner_obj.read(cr, uid, attendee_id[0], ['email'], context=context)
partner_record.append((4, attendee.get('id')))
attendee['partner_id'] = attendee.pop('id')
attendee['state'] = google_attendee['responseStatus']
attendee_record.append((0, 0, attendee))
for google_alarm in single_event_dict.get('reminders', {}).get('overrides', []):
alarm_id = calendar_alarm_obj.search(
cr,
uid,
[
('type', '=', google_alarm['method'] if google_alarm['method'] == 'email' else 'notification'),
('duration_minutes', '=', google_alarm['minutes'])
],
context=context
)
if not alarm_id:
data = {
'type': google_alarm['method'] if google_alarm['method'] == 'email' else 'notification',
'duration': google_alarm['minutes'],
'interval': 'minutes',
'name': "%s minutes - %s" % (google_alarm['minutes'], google_alarm['method'])
}
alarm_id = [calendar_alarm_obj.create(cr, uid, data, context=context)]
alarm_record.append(alarm_id[0])
UTC = pytz.timezone('UTC')
if single_event_dict.get('start') and single_event_dict.get('end'): # If not cancelled
if single_event_dict['start'].get('dateTime', False) and single_event_dict['end'].get('dateTime', False):
date = parser.parse(single_event_dict['start']['dateTime'])
stop = parser.parse(single_event_dict['end']['dateTime'])
date = str(date.astimezone(UTC))[:-6]
stop = str(stop.astimezone(UTC))[:-6]
allday = False
else:
date = (single_event_dict['start']['date'])
stop = (single_event_dict['end']['date'])
d_end = datetime.strptime(stop, DEFAULT_SERVER_DATE_FORMAT)
allday = True
d_end = d_end + timedelta(days=-1)
stop = d_end.strftime(DEFAULT_SERVER_DATE_FORMAT)
update_date = datetime.strptime(single_event_dict['updated'], "%Y-%m-%dT%H:%M:%S.%fz")
result.update({
'start': date,
'stop': stop,
'allday': allday
})
result.update({
'attendee_ids': attendee_record,
'partner_ids': list(set(partner_record)),
'alarm_ids': [(6, 0, alarm_record)],
'name': single_event_dict.get('summary', 'Event'),
'description': single_event_dict.get('description', False),
'location': single_event_dict.get('location', False),
'class': single_event_dict.get('visibility', 'public'),
'oe_update_date': update_date,
})
if single_event_dict.get("recurrence", False):
rrule = [rule for rule in single_event_dict["recurrence"] if rule.startswith("RRULE:")][0][6:]
result['rrule'] = rrule
context = dict(context or {}, no_mail_to_attendees=True)
if type == "write":
res = calendar_event.write(cr, uid, event['id'], result, context=context)
elif type == "copy":
result['recurrence'] = True
res = calendar_event.write(cr, uid, [event['id']], result, context=context)
elif type == "create":
res = calendar_event.create(cr, uid, result, context=context)
if context['curr_attendee']:
self.pool['calendar.attendee'].write(cr, uid, [context['curr_attendee']], {'oe_synchro_date': update_date, 'google_internal_event_id': single_event_dict.get('id', False)}, context)
return res
def remove_references(self, cr, uid, context=None):
current_user = self.pool['res.users'].browse(cr, SUPERUSER_ID, uid, context=context)
reset_data = {
'google_calendar_rtoken': False,
'google_calendar_token': False,
'google_calendar_token_validity': False,
'google_calendar_last_sync_date': False,
'google_calendar_cal_id': False,
}
all_my_attendees = self.pool['calendar.attendee'].search(cr, uid, [('partner_id', '=', current_user.partner_id.id)], context=context)
self.pool['calendar.attendee'].write(cr, uid, all_my_attendees, {'oe_synchro_date': False, 'google_internal_event_id': False}, context=context)
current_user.write(reset_data)
return True
def synchronize_events_cron(self, cr, uid, context=None):
ids = self.pool['res.users'].search(cr, uid, [('google_calendar_last_sync_date', '!=', False)], context=context)
_logger.info("Calendar Synchro - Started by cron")
for user_to_sync in ids:
_logger.info("Calendar Synchro - Starting synchronization for a new user [%s] " % user_to_sync)
try:
resp = self.synchronize_events(cr, user_to_sync, False, lastSync=True, context=None)
if resp.get("status") == "need_reset":
_logger.info("[%s] Calendar Synchro - Failed - NEED RESET !" % user_to_sync)
else:
_logger.info("[%s] Calendar Synchro - Done with status : %s !" % (user_to_sync, resp.get("status")))
except Exception, e:
_logger.info("[%s] Calendar Synchro - Exception : %s !" % (user_to_sync, exception_to_unicode(e)))
_logger.info("Calendar Synchro - Ended by cron")
def synchronize_events(self, cr, uid, ids, lastSync=True, context=None):
if context is None:
context = {}
# def isValidSync(syncToken):
# gs_pool = self.pool['google.service']
# params = {
# 'maxResults': 1,
# 'fields': 'id',
# 'access_token': self.get_token(cr, uid, context),
# 'syncToken': syncToken,
# }
# url = "/calendar/v3/calendars/primary/events"
# status, response = gs_pool._do_request(cr, uid, url, params, type='GET', context=context)
# return int(status) != 410
user_to_sync = ids and ids[0] or uid
current_user = self.pool['res.users'].browse(cr, SUPERUSER_ID, user_to_sync, context=context)
st, current_google, ask_time = self.get_calendar_primary_id(cr, user_to_sync, context=context)
if current_user.google_calendar_cal_id:
if current_google != current_user.google_calendar_cal_id:
return {
"status": "need_reset",
"info": {
"old_name": current_user.google_calendar_cal_id,
"new_name": current_google
},
"url": ''
}
if lastSync and self.get_last_sync_date(cr, user_to_sync, context=context) and not self.get_disable_since_synchro(cr, user_to_sync, context=context):
lastSync = self.get_last_sync_date(cr, user_to_sync, context)
_logger.info("[%s] Calendar Synchro - MODE SINCE_MODIFIED : %s !" % (user_to_sync, lastSync.strftime(DEFAULT_SERVER_DATETIME_FORMAT)))
else:
lastSync = False
_logger.info("[%s] Calendar Synchro - MODE FULL SYNCHRO FORCED" % user_to_sync)
else:
current_user.write({'google_calendar_cal_id': current_google})
lastSync = False
_logger.info("[%s] Calendar Synchro - MODE FULL SYNCHRO - NEW CAL ID" % user_to_sync)
new_ids = []
new_ids += self.create_new_events(cr, user_to_sync, context=context)
new_ids += self.bind_recurring_events_to_google(cr, user_to_sync, context)
res = self.update_events(cr, user_to_sync, lastSync, context)
current_user.write({'google_calendar_last_sync_date': ask_time})
return {
"status": res and "need_refresh" or "no_new_event_from_google",
"url": ''
}
def create_new_events(self, cr, uid, context=None):
if context is None:
context = {}
new_ids = []
ev_obj = self.pool['calendar.event']
att_obj = self.pool['calendar.attendee']
user_obj = self.pool['res.users']
myPartnerID = user_obj.browse(cr, uid, uid, context=context).partner_id.id
context_norecurrent = context.copy()
context_norecurrent['virtual_id'] = False
my_att_ids = att_obj.search(cr, uid, [('partner_id', '=', myPartnerID),
('google_internal_event_id', '=', False),
'|',
('event_id.stop', '>', self.get_minTime(cr, uid, context=context).strftime(DEFAULT_SERVER_DATETIME_FORMAT)),
('event_id.final_date', '>', self.get_minTime(cr, uid, context=context).strftime(DEFAULT_SERVER_DATETIME_FORMAT)),
], context=context_norecurrent)
for att in att_obj.browse(cr, uid, my_att_ids, context=context):
if not att.event_id.recurrent_id or att.event_id.recurrent_id == 0:
st, response, ask_time = self.create_an_event(cr, uid, att.event_id, context=context)
if status_response(st):
update_date = datetime.strptime(response['updated'], "%Y-%m-%dT%H:%M:%S.%fz")
ev_obj.write(cr, uid, att.event_id.id, {'oe_update_date': update_date})
new_ids.append(response['id'])
att_obj.write(cr, uid, [att.id], {'google_internal_event_id': response['id'], 'oe_synchro_date': update_date})
cr.commit()
else:
_logger.warning("Impossible to create event %s. [%s] Enable DEBUG for response detail.", att.event_id.id, st)
_logger.debug("Response : %s" % response)
return new_ids
def get_context_no_virtual(self, context):
context_norecurrent = context.copy()
context_norecurrent['virtual_id'] = False
context_norecurrent['active_test'] = False
return context_norecurrent
def bind_recurring_events_to_google(self, cr, uid, context=None):
if context is None:
context = {}
new_ids = []
ev_obj = self.pool['calendar.event']
att_obj = self.pool['calendar.attendee']
user_obj = self.pool['res.users']
myPartnerID = user_obj.browse(cr, uid, uid, context=context).partner_id.id
context_norecurrent = self.get_context_no_virtual(context)
my_att_ids = att_obj.search(cr, uid, [('partner_id', '=', myPartnerID), ('google_internal_event_id', '=', False)], context=context_norecurrent)
for att in att_obj.browse(cr, uid, my_att_ids, context=context):
if att.event_id.recurrent_id and att.event_id.recurrent_id > 0:
new_google_internal_event_id = False
source_event_record = ev_obj.browse(cr, uid, att.event_id.recurrent_id, context)
source_attendee_record_id = att_obj.search(cr, uid, [('partner_id', '=', myPartnerID), ('event_id', '=', source_event_record.id)], context=context)
if not source_attendee_record_id:
continue
source_attendee_record = att_obj.browse(cr, uid, source_attendee_record_id, context)[0]
if att.event_id.recurrent_id_date and source_event_record.allday and source_attendee_record.google_internal_event_id:
new_google_internal_event_id = source_attendee_record.google_internal_event_id + '_' + att.event_id.recurrent_id_date.split(' ')[0].replace('-', '')
elif att.event_id.recurrent_id_date and source_attendee_record.google_internal_event_id:
new_google_internal_event_id = source_attendee_record.google_internal_event_id + '_' + att.event_id.recurrent_id_date.replace('-', '').replace(' ', 'T').replace(':', '') + 'Z'
if new_google_internal_event_id:
#TODO WARNING, NEED TO CHECK THAT EVENT and ALL instance NOT DELETE IN GMAIL BEFORE !
try:
st, response, ask_time = self.update_recurrent_event_exclu(cr, uid, new_google_internal_event_id, source_attendee_record.google_internal_event_id, att.event_id, context=context)
if status_response(st):
att_obj.write(cr, uid, [att.id], {'google_internal_event_id': new_google_internal_event_id}, context=context)
new_ids.append(new_google_internal_event_id)
cr.commit()
else:
_logger.warning("Impossible to create event %s. [%s]" % (att.event_id.id, st))
_logger.debug("Response : %s" % response)
except:
pass
return new_ids
def update_events(self, cr, uid, lastSync=False, context=None):
context = dict(context or {})
calendar_event = self.pool['calendar.event']
user_obj = self.pool['res.users']
att_obj = self.pool['calendar.attendee']
myPartnerID = user_obj.browse(cr, uid, uid, context=context).partner_id.id
context_novirtual = self.get_context_no_virtual(context)
if lastSync:
try:
all_event_from_google = self.get_event_synchro_dict(cr, uid, lastSync=lastSync, context=context)
except urllib2.HTTPError, e:
if e.code == 410: # GONE, Google is lost.
# we need to force the rollback from this cursor, because it locks my res_users but I need to write in this tuple before to raise.
cr.rollback()
registry = openerp.modules.registry.RegistryManager.get(request.session.db)
with registry.cursor() as cur:
self.pool['res.users'].write(cur, SUPERUSER_ID, [uid], {'google_calendar_last_sync_date': False}, context=context)
error_key = simplejson.loads(str(e))
error_key = error_key.get('error', {}).get('message', 'nc')
error_msg = "Google is lost... the next synchro will be a full synchro. \n\n %s" % error_key
raise self.pool.get('res.config.settings').get_config_warning(cr, _(error_msg), context=context)
my_google_att_ids = att_obj.search(cr, uid, [
('partner_id', '=', myPartnerID),
('google_internal_event_id', 'in', all_event_from_google.keys())
], context=context_novirtual)
my_openerp_att_ids = att_obj.search(cr, uid, [
('partner_id', '=', myPartnerID),
('event_id.oe_update_date', '>', lastSync and lastSync.strftime(DEFAULT_SERVER_DATETIME_FORMAT) or self.get_minTime(cr, uid, context).strftime(DEFAULT_SERVER_DATETIME_FORMAT)),
('google_internal_event_id', '!=', False),
], context=context_novirtual)
my_openerp_googleinternal_ids = att_obj.read(cr, uid, my_openerp_att_ids, ['google_internal_event_id', 'event_id'], context=context_novirtual)
if self.get_print_log(cr, uid, context=context):
_logger.info("Calendar Synchro - \n\nUPDATE IN GOOGLE\n%s\n\nRETRIEVE FROM OE\n%s\n\nUPDATE IN OE\n%s\n\nRETRIEVE FROM GG\n%s\n\n" % (all_event_from_google, my_google_att_ids, my_openerp_att_ids, my_openerp_googleinternal_ids))
for giid in my_openerp_googleinternal_ids:
active = True # if not sure, we request google
if giid.get('event_id'):
active = calendar_event.browse(cr, uid, int(giid.get('event_id')[0]), context=context_novirtual).active
if giid.get('google_internal_event_id') and not all_event_from_google.get(giid.get('google_internal_event_id')) and active:
one_event = self.get_one_event_synchro(cr, uid, giid.get('google_internal_event_id'), context=context)
if one_event:
all_event_from_google[one_event['id']] = one_event
my_att_ids = list(set(my_google_att_ids + my_openerp_att_ids))
else:
domain = [
('partner_id', '=', myPartnerID),
('google_internal_event_id', '!=', False),
'|',
('event_id.stop', '>', self.get_minTime(cr, uid, context).strftime(DEFAULT_SERVER_DATETIME_FORMAT)),
('event_id.final_date', '>', self.get_minTime(cr, uid, context).strftime(DEFAULT_SERVER_DATETIME_FORMAT)),
]
# Select all events from OpenERP which have been already synchronized in gmail
my_att_ids = att_obj.search(cr, uid, domain, context=context_novirtual)
all_event_from_google = self.get_event_synchro_dict(cr, uid, lastSync=False, context=context)
event_to_synchronize = {}
for att in att_obj.browse(cr, uid, my_att_ids, context=context):
event = att.event_id
base_event_id = att.google_internal_event_id.rsplit('_', 1)[0]
if base_event_id not in event_to_synchronize:
event_to_synchronize[base_event_id] = {}
if att.google_internal_event_id not in event_to_synchronize[base_event_id]:
event_to_synchronize[base_event_id][att.google_internal_event_id] = SyncEvent()
ev_to_sync = event_to_synchronize[base_event_id][att.google_internal_event_id]
ev_to_sync.OE.attendee_id = att.id
ev_to_sync.OE.event = event
ev_to_sync.OE.found = True
ev_to_sync.OE.event_id = event.id
ev_to_sync.OE.isRecurrence = event.recurrency
ev_to_sync.OE.isInstance = bool(event.recurrent_id and event.recurrent_id > 0)
ev_to_sync.OE.update = event.oe_update_date
ev_to_sync.OE.status = event.active
ev_to_sync.OE.synchro = att.oe_synchro_date
for event in all_event_from_google.values():
event_id = event.get('id')
base_event_id = event_id.rsplit('_', 1)[0]
if base_event_id not in event_to_synchronize:
event_to_synchronize[base_event_id] = {}
if event_id not in event_to_synchronize[base_event_id]:
event_to_synchronize[base_event_id][event_id] = SyncEvent()
ev_to_sync = event_to_synchronize[base_event_id][event_id]
ev_to_sync.GG.event = event
ev_to_sync.GG.found = True
ev_to_sync.GG.isRecurrence = bool(event.get('recurrence', ''))
ev_to_sync.GG.isInstance = bool(event.get('recurringEventId', 0))
ev_to_sync.GG.update = event.get('updated', None) # if deleted, no date without browse event
if ev_to_sync.GG.update:
ev_to_sync.GG.update = ev_to_sync.GG.update.replace('T', ' ').replace('Z', '')
ev_to_sync.GG.status = (event.get('status') != 'cancelled')
######################
# PRE-PROCESSING #
######################
for base_event in event_to_synchronize:
for current_event in event_to_synchronize[base_event]:
event_to_synchronize[base_event][current_event].compute_OP(modeFull=not lastSync)
if self.get_print_log(cr, uid, context=context):
if not isinstance(event_to_synchronize[base_event][current_event].OP, NothingToDo):
_logger.info(event_to_synchronize[base_event])
######################
# DO ACTION #
######################
for base_event in event_to_synchronize:
event_to_synchronize[base_event] = sorted(event_to_synchronize[base_event].iteritems(), key=operator.itemgetter(0))
for current_event in event_to_synchronize[base_event]:
cr.commit()
event = current_event[1] # event is an Sync Event !
actToDo = event.OP
actSrc = event.OP.src
context['curr_attendee'] = event.OE.attendee_id
if isinstance(actToDo, NothingToDo):
continue
elif isinstance(actToDo, Create):
context_tmp = context.copy()
context_tmp['NewMeeting'] = True
if actSrc == 'GG':
res = self.update_from_google(cr, uid, False, event.GG.event, "create", context=context_tmp)
event.OE.event_id = res
meeting = calendar_event.browse(cr, uid, res, context=context)
attendee_record_id = att_obj.search(cr, uid, [('partner_id', '=', myPartnerID), ('event_id', '=', res)], context=context)
self.pool['calendar.attendee'].write(cr, uid, attendee_record_id, {'oe_synchro_date': meeting.oe_update_date, 'google_internal_event_id': event.GG.event['id']}, context=context_tmp)
elif actSrc == 'OE':
raise "Should be never here, creation for OE is done before update !"
#TODO Add to batch
elif isinstance(actToDo, Update):
if actSrc == 'GG':
self.update_from_google(cr, uid, event.OE.event, event.GG.event, 'write', context)
elif actSrc == 'OE':
self.update_to_google(cr, uid, event.OE.event, event.GG.event, context)
elif isinstance(actToDo, Exclude):
if actSrc == 'OE':
self.delete_an_event(cr, uid, current_event[0], context=context)
elif actSrc == 'GG':
new_google_event_id = event.GG.event['id'].rsplit('_', 1)[1]
if 'T' in new_google_event_id:
new_google_event_id = new_google_event_id.replace('T', '')[:-1]
else:
new_google_event_id = new_google_event_id + "000000"
if event.GG.status:
parent_event = {}
if not event_to_synchronize[base_event][0][1].OE.event_id:
main_ev = att_obj.search_read(cr, uid, [('google_internal_event_id', '=', event.GG.event['id'].rsplit('_', 1)[0])], fields=['event_id'], context=context_novirtual)
event_to_synchronize[base_event][0][1].OE.event_id = main_ev[0].get('event_id')[0]
parent_event['id'] = "%s-%s" % (event_to_synchronize[base_event][0][1].OE.event_id, new_google_event_id)
res = self.update_from_google(cr, uid, parent_event, event.GG.event, "copy", context)
else:
parent_oe_id = event_to_synchronize[base_event][0][1].OE.event_id
if parent_oe_id:
calendar_event.unlink(cr, uid, "%s-%s" % (parent_oe_id, new_google_event_id), can_be_deleted=True, context=context)
elif isinstance(actToDo, Delete):
if actSrc == 'GG':
try:
self.delete_an_event(cr, uid, current_event[0], context=context)
except Exception, e:
error = simplejson.loads(e.read())
error_nr = error.get('error', {}).get('code')
# if already deleted from gmail or never created
if error_nr in (404, 410,):
pass
else:
raise e
elif actSrc == 'OE':
calendar_event.unlink(cr, uid, event.OE.event_id, can_be_deleted=False, context=context)
return True
def check_and_sync(self, cr, uid, oe_event, google_event, context):
if datetime.strptime(oe_event.oe_update_date, "%Y-%m-%d %H:%M:%S.%f") > datetime.strptime(google_event['updated'], "%Y-%m-%dT%H:%M:%S.%fz"):
self.update_to_google(cr, uid, oe_event, google_event, context)
elif datetime.strptime(oe_event.oe_update_date, "%Y-%m-%d %H:%M:%S.%f") < datetime.strptime(google_event['updated'], "%Y-%m-%dT%H:%M:%S.%fz"):
self.update_from_google(cr, uid, oe_event, google_event, 'write', context)
def get_sequence(self, cr, uid, instance_id, context=None):
gs_pool = self.pool['google.service']
params = {
'fields': 'sequence',
'access_token': self.get_token(cr, uid, context)
}
headers = {'Content-type': 'application/json'}
url = "/calendar/v3/calendars/%s/events/%s" % ('primary', instance_id)
st, content, ask_time = gs_pool._do_request(cr, uid, url, params, headers, type='GET', context=context)
return content.get('sequence', 0)
#################################
## MANAGE CONNEXION TO GMAIL ##
#################################
def get_token(self, cr, uid, context=None):
current_user = self.pool['res.users'].browse(cr, uid, uid, context=context)
if not current_user.google_calendar_token_validity or \
datetime.strptime(current_user.google_calendar_token_validity.split('.')[0], DEFAULT_SERVER_DATETIME_FORMAT) < (datetime.now() + timedelta(minutes=1)):
self.do_refresh_token(cr, uid, context=context)
current_user.refresh()
return current_user.google_calendar_token
def get_last_sync_date(self, cr, uid, context=None):
current_user = self.pool['res.users'].browse(cr, uid, uid, context=context)
return current_user.google_calendar_last_sync_date and datetime.strptime(current_user.google_calendar_last_sync_date, DEFAULT_SERVER_DATETIME_FORMAT) + timedelta(minutes=0) or False
def do_refresh_token(self, cr, uid, context=None):
current_user = self.pool['res.users'].browse(cr, uid, uid, context=context)
gs_pool = self.pool['google.service']
all_token = gs_pool._refresh_google_token_json(cr, uid, current_user.google_calendar_rtoken, self.STR_SERVICE, context=context)
vals = {}
vals['google_%s_token_validity' % self.STR_SERVICE] = datetime.now() + timedelta(seconds=all_token.get('expires_in'))
vals['google_%s_token' % self.STR_SERVICE] = all_token.get('access_token')
self.pool['res.users'].write(cr, SUPERUSER_ID, uid, vals, context=context)
def need_authorize(self, cr, uid, context=None):
current_user = self.pool['res.users'].browse(cr, uid, uid, context=context)
return current_user.google_calendar_rtoken is False
def get_calendar_scope(self, RO=False):
readonly = RO and '.readonly' or ''
return 'https://www.googleapis.com/auth/calendar%s' % (readonly)
def authorize_google_uri(self, cr, uid, from_url='http://www.openerp.com', context=None):
url = self.pool['google.service']._get_authorize_uri(cr, uid, from_url, self.STR_SERVICE, scope=self.get_calendar_scope(), context=context)
return url
def can_authorize_google(self, cr, uid, context=None):
return self.pool['res.users'].has_group(cr, uid, 'base.group_erp_manager')
def set_all_tokens(self, cr, uid, authorization_code, context=None):
gs_pool = self.pool['google.service']
all_token = gs_pool._get_google_token_json(cr, uid, authorization_code, self.STR_SERVICE, context=context)
vals = {}
vals['google_%s_rtoken' % self.STR_SERVICE] = all_token.get('refresh_token')
vals['google_%s_token_validity' % self.STR_SERVICE] = datetime.now() + timedelta(seconds=all_token.get('expires_in'))
vals['google_%s_token' % self.STR_SERVICE] = all_token.get('access_token')
self.pool['res.users'].write(cr, SUPERUSER_ID, uid, vals, context=context)
def get_minTime(self, cr, uid, context=None):
number_of_week = self.pool['ir.config_parameter'].get_param(cr, uid, 'calendar.week_synchro', default=13)
return datetime.now() - timedelta(weeks=number_of_week)
def get_need_synchro_attendee(self, cr, uid, context=None):
return self.pool['ir.config_parameter'].get_param(cr, uid, 'calendar.block_synchro_attendee', default=True)
def get_disable_since_synchro(self, cr, uid, context=None):
return self.pool['ir.config_parameter'].get_param(cr, uid, 'calendar.block_since_synchro', default=False)
def get_print_log(self, cr, uid, context=None):
return self.pool['ir.config_parameter'].get_param(cr, uid, 'calendar.debug_print', default=False)
class res_users(osv.Model):
_inherit = 'res.users'
_columns = {
'google_calendar_rtoken': fields.char('Refresh Token'),
'google_calendar_token': fields.char('User token'),
'google_calendar_token_validity': fields.datetime('Token Validity'),
'google_calendar_last_sync_date': fields.datetime('Last synchro date'),
'google_calendar_cal_id': fields.char('Calendar ID', help='Last Calendar ID who has been synchronized. If it is changed, we remove \
all links between GoogleID and Odoo Google Internal ID')
}
class calendar_event(osv.Model):
_inherit = "calendar.event"
def get_fields_need_update_google(self, cr, uid, context=None):
return ['name', 'description', 'allday', 'date', 'date_end', 'stop', 'attendee_ids', 'alarm_ids', 'location', 'class', 'active']
def write(self, cr, uid, ids, vals, context=None):
if context is None:
context = {}
sync_fields = set(self.get_fields_need_update_google(cr, uid, context))
if (set(vals.keys()) & sync_fields) and 'oe_update_date' not in vals.keys() and 'NewMeeting' not in context:
vals['oe_update_date'] = datetime.now()
return super(calendar_event, self).write(cr, uid, ids, vals, context=context)
def copy(self, cr, uid, id, default=None, context=None):
default = default or {}
if default.get('write_type', False):
del default['write_type']
elif default.get('recurrent_id', False):
default['oe_update_date'] = datetime.now()
else:
default['oe_update_date'] = False
return super(calendar_event, self).copy(cr, uid, id, default, context)
def unlink(self, cr, uid, ids, can_be_deleted=False, context=None):
return super(calendar_event, self).unlink(cr, uid, ids, can_be_deleted=can_be_deleted, context=context)
_columns = {
'oe_update_date': fields.datetime('Odoo Update Date'),
}
class calendar_attendee(osv.Model):
_inherit = 'calendar.attendee'
_columns = {
'google_internal_event_id': fields.char('Google Calendar Event Id'),
'oe_synchro_date': fields.datetime('Odoo Synchro Date'),
}
_sql_constraints = [('google_id_uniq', 'unique(google_internal_event_id,partner_id,event_id)', 'Google ID should be unique!')]
def write(self, cr, uid, ids, vals, context=None):
if context is None:
context = {}
for id in ids:
ref = vals.get('event_id', self.browse(cr, uid, id, context=context).event_id.id)
# If attendees are updated, we need to specify that next synchro need an action
# Except if it come from an update_from_google
if not context.get('curr_attendee', False) and not context.get('NewMeeting', False):
self.pool['calendar.event'].write(cr, uid, ref, {'oe_update_date': datetime.now()}, context)
return super(calendar_attendee, self).write(cr, uid, ids, vals, context=context)
| agpl-3.0 |
zhaishaomin/LDS-prefetcher-research | gem5_src/arch/x86/isa/insts/simd128/floating_point/arithmetic/horizontal_subtraction.py | 91 | 2160 | # Copyright (c) 2007 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
microcode = '''
# HSUBPS
# HSUBPD
'''
| apache-2.0 |
pavelchristof/gomoku-ai | tensorflow/python/ops/parsing_ops.py | 2 | 49651 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Parsing Ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import re
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_parsing_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import sparse_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import,undefined-variable
from tensorflow.python.ops.gen_parsing_ops import *
# pylint: enable=wildcard-import,undefined-variable
from tensorflow.python.platform import tf_logging
ops.NotDifferentiable("DecodeRaw")
ops.NotDifferentiable("ParseTensor")
ops.NotDifferentiable("StringToNumber")
class VarLenFeature(collections.namedtuple("VarLenFeature", ["dtype"])):
"""Configuration for parsing a variable-length input feature.
Fields:
dtype: Data type of input.
"""
pass
class SparseFeature(
collections.namedtuple(
"SparseFeature",
["index_key", "value_key", "dtype", "size", "already_sorted"])):
"""Configuration for parsing a sparse input feature from an `Example`.
Note, preferably use `VarLenFeature` (possibly in combination with a
`SequenceExample`) in order to parse out `SparseTensor`s instead of
`SparseFeature` due to its simplicity.
Closely mimicking the `SparseTensor` that will be obtained by parsing an
`Example` with a `SparseFeature` config, a `SparseFeature` contains a
* `value_key`: The name of key for a `Feature` in the `Example` whose parsed
`Tensor` will be the resulting `SparseTensor.values`.
* `index_key`: A list of names - one for each dimension in the resulting
`SparseTensor` whose `indices[i][dim]` indicating the position of
the `i`-th value in the `dim` dimension will be equal to the `i`-th value in
the Feature with key named `index_key[dim]` in the `Example`.
* `size`: A list of ints for the resulting `SparseTensor.dense_shape`.
For example, we can represent the following 2D `SparseTensor`
```python
SparseTensor(indices=[[3, 1], [20, 0]],
values=[0.5, -1.0]
dense_shape=[100, 3])
```
with an `Example` input proto
```python
features {
feature { key: "val" value { float_list { value: [ 0.5, -1.0 ] } } }
feature { key: "ix0" value { int64_list { value: [ 3, 20 ] } } }
feature { key: "ix1" value { int64_list { value: [ 1, 0 ] } } }
}
```
and `SparseFeature` config with 2 `index_key`s
```python
SparseFeature(index_key=["ix0", "ix1"],
value_key="val",
dtype=tf.float32,
size=[100, 3])
```
Fields:
index_key: A single string name or a list of string names of index features.
For each key the underlying feature's type must be `int64` and its length
must always match that of the `value_key` feature.
To represent `SparseTensor`s with a `dense_shape` of `rank` higher than 1
a list of length `rank` should be used.
value_key: Name of value feature. The underlying feature's type must
be `dtype` and its length must always match that of all the `index_key`s'
features.
dtype: Data type of the `value_key` feature.
size: A Python int or list thereof specifying the dense shape. Should be a
list if and only if `index_key` is a list. In that case the list must be
equal to the length of `index_key`. Each for each entry `i` all values in
the `index_key`[i] feature must be in `[0, size[i])`.
already_sorted: A Python boolean to specify whether the values in
`value_key` are already sorted by their index position. If so skip
sorting. False by default (optional).
"""
def __new__(cls, index_key, value_key, dtype, size, already_sorted=False):
return super(SparseFeature, cls).__new__(
cls, index_key, value_key, dtype, size, already_sorted)
class FixedLenFeature(collections.namedtuple(
"FixedLenFeature", ["shape", "dtype", "default_value"])):
"""Configuration for parsing a fixed-length input feature.
To treat sparse input as dense, provide a `default_value`; otherwise,
the parse functions will fail on any examples missing this feature.
Fields:
shape: Shape of input data.
dtype: Data type of input.
default_value: Value to be used if an example is missing this feature. It
must be compatible with `dtype` and of the specified `shape`.
"""
def __new__(cls, shape, dtype, default_value=None):
return super(FixedLenFeature, cls).__new__(
cls, shape, dtype, default_value)
class FixedLenSequenceFeature(collections.namedtuple(
"FixedLenSequenceFeature",
["shape", "dtype", "allow_missing", "default_value"])):
"""Configuration for parsing a variable-length input feature into a `Tensor`.
The resulting `Tensor` of parsing a single `SequenceExample` or `Example` has
a static `shape` of `[None] + shape` and the specified `dtype`.
The resulting `Tensor` of parsing a `batch_size` many `Example`s has
a static `shape` of `[batch_size, None] + shape` and the specified `dtype`.
The entries in the `batch` from different `Examples` will be padded with
`default_value` to the maximum length present in the `batch`.
To treat a sparse input as dense, provide `allow_missing=True`; otherwise,
the parse functions will fail on any examples missing this feature.
Fields:
shape: Shape of input data for dimension 2 and higher. First dimension is
of variable length `None`.
dtype: Data type of input.
allow_missing: Whether to allow this feature to be missing from a feature
list item. Is available only for parsing `SequenceExample` not for
parsing `Examples`.
default_value: Scalar value to be used to pad multiple `Example`s to their
maximum length. Irrelevant for parsing a single `Example` or
`SequenceExample`. Defaults to "" for dtype string and 0 otherwise
(optional).
"""
def __new__(cls, shape, dtype, allow_missing=False, default_value=None):
return super(FixedLenSequenceFeature, cls).__new__(
cls, shape, dtype, allow_missing, default_value)
def _features_to_raw_params(features, types):
"""Split feature tuples into raw params used by `gen_parsing_ops`.
Args:
features: A `dict` mapping feature keys to objects of a type in `types`.
types: Type of features to allow, among `FixedLenFeature`, `VarLenFeature`,
`SparseFeature`, and `FixedLenSequenceFeature`.
Returns:
Tuple of `sparse_keys`, `sparse_types`, `dense_keys`, `dense_types`,
`dense_defaults`, `dense_shapes`.
Raises:
ValueError: if `features` contains an item not in `types`, or an invalid
feature.
"""
sparse_keys = []
sparse_types = []
dense_keys = []
dense_types = []
dense_defaults = {}
dense_shapes = []
if features:
# NOTE: We iterate over sorted keys to keep things deterministic.
for key in sorted(features.keys()):
feature = features[key]
if isinstance(feature, VarLenFeature):
if VarLenFeature not in types:
raise ValueError("Unsupported VarLenFeature %s." % feature)
if not feature.dtype:
raise ValueError("Missing type for feature %s." % key)
sparse_keys.append(key)
sparse_types.append(feature.dtype)
elif isinstance(feature, SparseFeature):
if SparseFeature not in types:
raise ValueError("Unsupported SparseFeature %s." % feature)
if not feature.index_key:
raise ValueError(
"Missing index_key for SparseFeature %s." % feature)
if not feature.value_key:
raise ValueError(
"Missing value_key for SparseFeature %s." % feature)
if not feature.dtype:
raise ValueError("Missing type for feature %s." % key)
index_keys = feature.index_key
if isinstance(index_keys, str):
index_keys = [index_keys]
elif len(index_keys) > 1:
tf_logging.warning("SparseFeature is a complicated feature config "
"and should only be used after careful "
"consideration of VarLenFeature.")
for index_key in sorted(index_keys):
if index_key in sparse_keys:
dtype = sparse_types[sparse_keys.index(index_key)]
if dtype != dtypes.int64:
raise ValueError("Conflicting type %s vs int64 for feature %s." %
(dtype, index_key))
else:
sparse_keys.append(index_key)
sparse_types.append(dtypes.int64)
if feature.value_key in sparse_keys:
dtype = sparse_types[sparse_keys.index(feature.value_key)]
if dtype != feature.dtype:
raise ValueError("Conflicting type %s vs %s for feature %s." % (
dtype, feature.dtype, feature.value_key))
else:
sparse_keys.append(feature.value_key)
sparse_types.append(feature.dtype)
elif isinstance(feature, FixedLenFeature):
if FixedLenFeature not in types:
raise ValueError("Unsupported FixedLenFeature %s." % feature)
if not feature.dtype:
raise ValueError("Missing type for feature %s." % key)
if feature.shape is None:
raise ValueError("Missing shape for feature %s." % key)
feature_tensor_shape = tensor_shape.as_shape(feature.shape)
if (feature.shape and feature_tensor_shape.ndims and
feature_tensor_shape.dims[0].value is None):
raise ValueError("First dimension of shape for feature %s unknown. "
"Consider using FixedLenSequenceFeature." % key)
if (feature.shape is not None and
not feature_tensor_shape.is_fully_defined()):
raise ValueError("All dimensions of shape for feature %s need to be "
"known but received %s." % (key, str(feature.shape)))
dense_keys.append(key)
dense_shapes.append(feature.shape)
dense_types.append(feature.dtype)
if feature.default_value is not None:
dense_defaults[key] = feature.default_value
elif isinstance(feature, FixedLenSequenceFeature):
if FixedLenSequenceFeature not in types:
raise ValueError("Unsupported FixedLenSequenceFeature %s." % feature)
if not feature.dtype:
raise ValueError("Missing type for feature %s." % key)
if feature.shape is None:
raise ValueError("Missing shape for feature %s." % key)
dense_keys.append(key)
dense_shapes.append(feature.shape)
dense_types.append(feature.dtype)
if feature.allow_missing:
dense_defaults[key] = None
if feature.default_value is not None:
dense_defaults[key] = feature.default_value
else:
raise ValueError("Invalid feature %s:%s." % (key, feature))
return (
sparse_keys, sparse_types, dense_keys, dense_types, dense_defaults,
dense_shapes)
def _construct_sparse_tensors_for_sparse_features(features, tensor_dict):
"""Merges SparseTensors of indices and values of SparseFeatures.
Constructs new dict based on `tensor_dict`. For `SparseFeatures` in the values
of `features` expects their `index_key`s and `index_value`s to be present in
`tensor_dict` mapping to `SparseTensor`s. Constructs a single `SparseTensor`
from them, and adds it to the result with the key from `features`.
Copies other keys and values from `tensor_dict` with keys present in
`features`.
Args:
features: A `dict` mapping feature keys to `SparseFeature` values.
Values of other types will be ignored.
tensor_dict: A `dict` mapping feature keys to `Tensor` and `SparseTensor`
values. Expected to contain keys of the `SparseFeature`s' `index_key`s and
`value_key`s and mapping them to `SparseTensor`s.
Returns:
A `dict` mapping feature keys to `Tensor` and `SparseTensor` values. Similar
to `tensor_dict` except each `SparseFeature`s in `features` results in a
single `SparseTensor`.
"""
tensor_dict = dict(tensor_dict) # Do not modify argument passed in.
# Construct SparseTensors for SparseFeatures.
for key in sorted(features.keys()):
feature = features[key]
if isinstance(feature, SparseFeature):
if isinstance(feature.index_key, str):
sp_ids = tensor_dict[feature.index_key]
else:
sp_ids = [tensor_dict[index_key] for index_key in feature.index_key]
sp_values = tensor_dict[feature.value_key]
tensor_dict[key] = sparse_ops.sparse_merge(
sp_ids,
sp_values,
vocab_size=feature.size,
already_sorted=feature.already_sorted)
# Remove tensors from dictionary that were only used to construct
# SparseTensors for SparseFeature.
for key in set(tensor_dict) - set(features):
del tensor_dict[key]
return tensor_dict
def _prepend_none_dimension(features):
if features:
modified_features = dict(features) # Create a copy to modify
for key, feature in features.items():
if isinstance(feature, FixedLenSequenceFeature):
if not feature.allow_missing:
raise ValueError("Unsupported: FixedLenSequenceFeature requires "
"allow_missing to be True.")
modified_features[key] = FixedLenSequenceFeature(
[None] + list(feature.shape),
feature.dtype,
feature.allow_missing,
feature.default_value)
return modified_features
else:
return features
def parse_example(serialized, features, name=None, example_names=None):
# pylint: disable=line-too-long
"""Parses `Example` protos into a `dict` of tensors.
Parses a number of serialized [`Example`](https://www.tensorflow.org/code/tensorflow/core/example/example.proto)
protos given in `serialized`. We refer to `serialized` as a batch with
`batch_size` many entries of individual `Example` protos.
`example_names` may contain descriptive names for the corresponding serialized
protos. These may be useful for debugging purposes, but they have no effect on
the output. If not `None`, `example_names` must be the same length as
`serialized`.
This op parses serialized examples into a dictionary mapping keys to `Tensor`
and `SparseTensor` objects. `features` is a dict from keys to `VarLenFeature`,
`SparseFeature`, and `FixedLenFeature` objects. Each `VarLenFeature`
and `SparseFeature` is mapped to a `SparseTensor`, and each
`FixedLenFeature` is mapped to a `Tensor`.
Each `VarLenFeature` maps to a `SparseTensor` of the specified type
representing a ragged matrix. Its indices are `[batch, index]` where `batch`
identifies the example in `serialized`, and `index` is the value's index in
the list of values associated with that feature and example.
Each `SparseFeature` maps to a `SparseTensor` of the specified type
representing a Tensor of `dense_shape` `[batch_size] + SparseFeature.size`.
Its `values` come from the feature in the examples with key `value_key`.
A `values[i]` comes from a position `k` in the feature of an example at batch
entry `batch`. This positional information is recorded in `indices[i]` as
`[batch, index_0, index_1, ...]` where `index_j` is the `k-th` value of
the feature in the example at with key `SparseFeature.index_key[j].
In other words, we split the indices (except the first index indicating the
batch entry) of a `SparseTensor` by dimension into different features of the
`Example`. Due to its complexity a `VarLenFeature` should be preferred over a
`SparseFeature` whenever possible.
Each `FixedLenFeature` `df` maps to a `Tensor` of the specified type (or
`tf.float32` if not specified) and shape `(serialized.size(),) + df.shape`.
`FixedLenFeature` entries with a `default_value` are optional. With no default
value, we will fail if that `Feature` is missing from any example in
`serialized`.
Each `FixedLenSequenceFeature` `df` maps to a `Tensor` of the specified type
(or `tf.float32` if not specified) and shape
`(serialized.size(), None) + df.shape`.
All examples in `serialized` will be padded with `default_value` along the
second dimension.
Examples:
For example, if one expects a `tf.float32` `VarLenFeature` `ft` and three
serialized `Example`s are provided:
```
serialized = [
features
{ feature { key: "ft" value { float_list { value: [1.0, 2.0] } } } },
features
{ feature []},
features
{ feature { key: "ft" value { float_list { value: [3.0] } } }
]
```
then the output will look like:
```python
{"ft": SparseTensor(indices=[[0, 0], [0, 1], [2, 0]],
values=[1.0, 2.0, 3.0],
dense_shape=(3, 2)) }
```
If instead a `FixedLenSequenceFeature` with `default_value = -1.0` and
`shape=[]` is used then the output will look like:
```python
{"ft": [[1.0, 2.0], [3.0, -1.0]]}
```
Given two `Example` input protos in `serialized`:
```
[
features {
feature { key: "kw" value { bytes_list { value: [ "knit", "big" ] } } }
feature { key: "gps" value { float_list { value: [] } } }
},
features {
feature { key: "kw" value { bytes_list { value: [ "emmy" ] } } }
feature { key: "dank" value { int64_list { value: [ 42 ] } } }
feature { key: "gps" value { } }
}
]
```
And arguments
```
example_names: ["input0", "input1"],
features: {
"kw": VarLenFeature(tf.string),
"dank": VarLenFeature(tf.int64),
"gps": VarLenFeature(tf.float32),
}
```
Then the output is a dictionary:
```python
{
"kw": SparseTensor(
indices=[[0, 0], [0, 1], [1, 0]],
values=["knit", "big", "emmy"]
dense_shape=[2, 2]),
"dank": SparseTensor(
indices=[[1, 0]],
values=[42],
dense_shape=[2, 1]),
"gps": SparseTensor(
indices=[],
values=[],
dense_shape=[2, 0]),
}
```
For dense results in two serialized `Example`s:
```
[
features {
feature { key: "age" value { int64_list { value: [ 0 ] } } }
feature { key: "gender" value { bytes_list { value: [ "f" ] } } }
},
features {
feature { key: "age" value { int64_list { value: [] } } }
feature { key: "gender" value { bytes_list { value: [ "f" ] } } }
}
]
```
We can use arguments:
```
example_names: ["input0", "input1"],
features: {
"age": FixedLenFeature([], dtype=tf.int64, default_value=-1),
"gender": FixedLenFeature([], dtype=tf.string),
}
```
And the expected output is:
```python
{
"age": [[0], [-1]],
"gender": [["f"], ["f"]],
}
```
An alternative to `VarLenFeature` to obtain a `SparseTensor` is
`SparseFeature`. For example, given two `Example` input protos in
`serialized`:
```
[
features {
feature { key: "val" value { float_list { value: [ 0.5, -1.0 ] } } }
feature { key: "ix" value { int64_list { value: [ 3, 20 ] } } }
},
features {
feature { key: "val" value { float_list { value: [ 0.0 ] } } }
feature { key: "ix" value { int64_list { value: [ 42 ] } } }
}
]
```
And arguments
```
example_names: ["input0", "input1"],
features: {
"sparse": SparseFeature(
index_key="ix", value_key="val", dtype=tf.float32, size=100),
}
```
Then the output is a dictionary:
```python
{
"sparse": SparseTensor(
indices=[[0, 3], [0, 20], [1, 42]],
values=[0.5, -1.0, 0.0]
dense_shape=[2, 100]),
}
```
Args:
serialized: A vector (1-D Tensor) of strings, a batch of binary
serialized `Example` protos.
features: A `dict` mapping feature keys to `FixedLenFeature`,
`VarLenFeature`, and `SparseFeature` values.
name: A name for this operation (optional).
example_names: A vector (1-D Tensor) of strings (optional), the names of
the serialized protos in the batch.
Returns:
A `dict` mapping feature keys to `Tensor` and `SparseTensor` values.
Raises:
ValueError: if any feature is invalid.
"""
if not features:
raise ValueError("Missing: features was %s." % features)
features = _prepend_none_dimension(features)
(sparse_keys, sparse_types, dense_keys, dense_types, dense_defaults,
dense_shapes) = _features_to_raw_params(
features,
[VarLenFeature, SparseFeature, FixedLenFeature, FixedLenSequenceFeature])
outputs = _parse_example_raw(
serialized, example_names, sparse_keys, sparse_types, dense_keys,
dense_types, dense_defaults, dense_shapes, name)
return _construct_sparse_tensors_for_sparse_features(features, outputs)
def _parse_example_raw(serialized,
names=None,
sparse_keys=None,
sparse_types=None,
dense_keys=None,
dense_types=None,
dense_defaults=None,
dense_shapes=None,
name=None):
"""Parses `Example` protos.
Args:
serialized: A vector (1-D Tensor) of strings, a batch of binary
serialized `Example` protos.
names: A vector (1-D Tensor) of strings (optional), the names of
the serialized protos.
sparse_keys: A list of string keys in the examples' features.
The results for these keys will be returned as `SparseTensor` objects.
sparse_types: A list of `DTypes` of the same length as `sparse_keys`.
Only `tf.float32` (`FloatList`), `tf.int64` (`Int64List`),
and `tf.string` (`BytesList`) are supported.
dense_keys: A list of string keys in the examples' features.
The results for these keys will be returned as `Tensor`s
dense_types: A list of DTypes of the same length as `dense_keys`.
Only `tf.float32` (`FloatList`), `tf.int64` (`Int64List`),
and `tf.string` (`BytesList`) are supported.
dense_defaults: A dict mapping string keys to `Tensor`s.
The keys of the dict must match the dense_keys of the feature.
dense_shapes: A list of tuples with the same length as `dense_keys`.
The shape of the data for each dense feature referenced by `dense_keys`.
Required for any input tensors identified by `dense_keys`. Must be
either fully defined, or may contain an unknown first dimension.
An unknown first dimension means the feature is treated as having
a variable number of blocks, and the output shape along this dimension
is considered unknown at graph build time. Padding is applied for
minibatch elements smaller than the maximum number of blocks for the
given feature along this dimension.
name: A name for this operation (optional).
Returns:
A `dict` mapping keys to `Tensor`s and `SparseTensor`s.
Raises:
ValueError: If sparse and dense key sets intersect, or input lengths do not
match up.
"""
with ops.name_scope(name, "ParseExample", [serialized, names]):
names = [] if names is None else names
dense_defaults = {} if dense_defaults is None else dense_defaults
sparse_keys = [] if sparse_keys is None else sparse_keys
sparse_types = [] if sparse_types is None else sparse_types
dense_keys = [] if dense_keys is None else dense_keys
dense_types = [] if dense_types is None else dense_types
dense_shapes = (
[[]] * len(dense_keys) if dense_shapes is None else dense_shapes)
num_dense = len(dense_keys)
num_sparse = len(sparse_keys)
if len(dense_shapes) != num_dense:
raise ValueError("len(dense_shapes) != len(dense_keys): %d vs. %d"
% (len(dense_shapes), num_dense))
if len(dense_types) != num_dense:
raise ValueError("len(dense_types) != len(num_dense): %d vs. %d"
% (len(dense_types), num_dense))
if len(sparse_types) != num_sparse:
raise ValueError("len(sparse_types) != len(sparse_keys): %d vs. %d"
% (len(sparse_types), num_sparse))
if num_dense + num_sparse == 0:
raise ValueError("Must provide at least one sparse key or dense key")
if not set(dense_keys).isdisjoint(set(sparse_keys)):
raise ValueError(
"Dense and sparse keys must not intersect; intersection: %s" %
set(dense_keys).intersection(set(sparse_keys)))
# Convert dense_shapes to TensorShape object.
dense_shapes = [tensor_shape.as_shape(shape) for shape in dense_shapes]
dense_defaults_vec = []
for i, key in enumerate(dense_keys):
default_value = dense_defaults.get(key)
dense_shape = dense_shapes[i]
if (dense_shape.ndims is not None and dense_shape.ndims > 0 and
dense_shape[0].value is None):
# Variable stride dense shape, the default value should be a
# scalar padding value
if default_value is None:
default_value = ops.convert_to_tensor(
"" if dense_types[i] == dtypes.string else 0,
dtype=dense_types[i])
else:
# Reshape to a scalar to ensure user gets an error if they
# provide a tensor that's not intended to be a padding value
# (0 or 2+ elements).
key_name = "padding_" + re.sub("[^A-Za-z0-9_.\\-/]", "_", key)
default_value = ops.convert_to_tensor(
default_value, dtype=dense_types[i], name=key_name)
default_value = array_ops.reshape(default_value, [])
else:
if default_value is None:
default_value = constant_op.constant([], dtype=dense_types[i])
elif not isinstance(default_value, ops.Tensor):
key_name = "key_" + re.sub("[^A-Za-z0-9_.\\-/]", "_", key)
default_value = ops.convert_to_tensor(
default_value, dtype=dense_types[i], name=key_name)
default_value = array_ops.reshape(default_value, dense_shape)
dense_defaults_vec.append(default_value)
# Finally, convert dense_shapes to TensorShapeProto
dense_shapes = [shape.as_proto() for shape in dense_shapes]
# pylint: disable=protected-access
outputs = gen_parsing_ops._parse_example(
serialized=serialized,
names=names,
dense_defaults=dense_defaults_vec,
sparse_keys=sparse_keys,
sparse_types=sparse_types,
dense_keys=dense_keys,
dense_shapes=dense_shapes,
name=name)
# pylint: enable=protected-access
(sparse_indices, sparse_values, sparse_shapes, dense_values) = outputs
sparse_tensors = [
sparse_tensor.SparseTensor(ix, val, shape) for (ix, val, shape)
in zip(sparse_indices, sparse_values, sparse_shapes)]
return dict(zip(sparse_keys + dense_keys, sparse_tensors + dense_values))
def parse_single_example(serialized, features, name=None, example_names=None):
"""Parses a single `Example` proto.
Similar to `parse_example`, except:
For dense tensors, the returned `Tensor` is identical to the output of
`parse_example`, except there is no batch dimension, the output shape is the
same as the shape given in `dense_shape`.
For `SparseTensor`s, the first (batch) column of the indices matrix is removed
(the indices matrix is a column vector), the values vector is unchanged, and
the first (`batch_size`) entry of the shape vector is removed (it is now a
single element vector).
One might see performance advantages by batching `Example` protos with
`parse_example` instead of using this function directly.
Args:
serialized: A scalar string Tensor, a single serialized Example.
See `_parse_single_example_raw` documentation for more details.
features: A `dict` mapping feature keys to `FixedLenFeature` or
`VarLenFeature` values.
name: A name for this operation (optional).
example_names: (Optional) A scalar string Tensor, the associated name.
See `_parse_single_example_raw` documentation for more details.
Returns:
A `dict` mapping feature keys to `Tensor` and `SparseTensor` values.
Raises:
ValueError: if any feature is invalid.
"""
if not features:
raise ValueError("Missing features.")
features = _prepend_none_dimension(features)
(sparse_keys, sparse_types, dense_keys, dense_types, dense_defaults,
dense_shapes) = _features_to_raw_params(
features,
[VarLenFeature, FixedLenFeature, FixedLenSequenceFeature, SparseFeature])
outputs = _parse_single_example_raw(
serialized, example_names, sparse_keys, sparse_types, dense_keys,
dense_types, dense_defaults, dense_shapes, name)
return _construct_sparse_tensors_for_sparse_features(features, outputs)
def _parse_single_example_raw(serialized,
names=None,
sparse_keys=None,
sparse_types=None,
dense_keys=None,
dense_types=None,
dense_defaults=None,
dense_shapes=None,
name=None):
"""Parses a single `Example` proto.
Args:
serialized: A scalar string Tensor, a single serialized Example.
See `_parse_example_raw` documentation for more details.
names: (Optional) A scalar string Tensor, the associated name.
See `_parse_example_raw` documentation for more details.
sparse_keys: See `_parse_example_raw` documentation for more details.
sparse_types: See `_parse_example_raw` documentation for more details.
dense_keys: See `_parse_example_raw` documentation for more details.
dense_types: See `_parse_example_raw` documentation for more details.
dense_defaults: See `_parse_example_raw` documentation for more details.
dense_shapes: See `_parse_example_raw` documentation for more details.
name: A name for this operation (optional).
Returns:
A `dict` mapping feature keys to `Tensor` and `SparseTensor` values.
Raises:
ValueError: if any feature is invalid.
"""
with ops.name_scope(name, "ParseSingleExample", [serialized, names]):
serialized = ops.convert_to_tensor(serialized)
serialized_shape = serialized.get_shape()
if serialized_shape.ndims is not None:
if serialized_shape.ndims != 0:
raise ValueError("Input serialized must be a scalar")
else:
serialized = control_flow_ops.with_dependencies(
[control_flow_ops.Assert(
math_ops.equal(array_ops.rank(serialized), 0),
["Input serialized must be a scalar"],
name="SerializedIsScalar")],
serialized,
name="SerializedDependencies")
serialized = array_ops.expand_dims(serialized, 0)
if names is not None:
names = ops.convert_to_tensor(names)
names_shape = names.get_shape()
if names_shape.ndims is not None:
if names_shape.ndims != 0:
raise ValueError("Input names must be a scalar")
else:
names = control_flow_ops.with_dependencies(
[control_flow_ops.Assert(
math_ops.equal(array_ops.rank(names), 0),
["Input names must be a scalar"],
name="NamesIsScalar")],
names,
name="NamesDependencies")
names = array_ops.expand_dims(names, 0)
outputs = _parse_example_raw(
serialized,
names=names,
sparse_keys=sparse_keys,
sparse_types=sparse_types,
dense_keys=dense_keys,
dense_types=dense_types,
dense_defaults=dense_defaults,
dense_shapes=dense_shapes,
name=name)
if dense_keys is not None:
for d in dense_keys:
d_name = re.sub("[^A-Za-z0-9_.\\-/]", "_", d)
outputs[d] = array_ops.squeeze(
outputs[d], [0], name="Squeeze_%s" % d_name)
if sparse_keys is not None:
for s in sparse_keys:
s_name = re.sub("[^A-Za-z0-9_.\\-/]", "_", s)
outputs[s] = sparse_tensor.SparseTensor(
array_ops.slice(outputs[s].indices,
[0, 1], [-1, -1], name="Slice_Indices_%s" % s_name),
outputs[s].values,
array_ops.slice(outputs[s].dense_shape,
[1], [-1], name="Squeeze_Shape_%s" % s_name))
return outputs
def parse_single_sequence_example(
serialized, context_features=None, sequence_features=None,
example_name=None, name=None):
# pylint: disable=line-too-long
"""Parses a single `SequenceExample` proto.
Parses a single serialized [`SequenceExample`](https://www.tensorflow.org/code/tensorflow/core/example/example.proto)
proto given in `serialized`.
This op parses a serialized sequence example into a tuple of dictionaries
mapping keys to `Tensor` and `SparseTensor` objects respectively.
The first dictionary contains mappings for keys appearing in
`context_features`, and the second dictionary contains mappings for keys
appearing in `sequence_features`.
At least one of `context_features` and `sequence_features` must be provided
and non-empty.
The `context_features` keys are associated with a `SequenceExample` as a
whole, independent of time / frame. In contrast, the `sequence_features` keys
provide a way to access variable-length data within the `FeatureList` section
of the `SequenceExample` proto. While the shapes of `context_features` values
are fixed with respect to frame, the frame dimension (the first dimension)
of `sequence_features` values may vary between `SequenceExample` protos,
and even between `feature_list` keys within the same `SequenceExample`.
`context_features` contains `VarLenFeature` and `FixedLenFeature` objects.
Each `VarLenFeature` is mapped to a `SparseTensor`, and each `FixedLenFeature`
is mapped to a `Tensor`, of the specified type, shape, and default value.
`sequence_features` contains `VarLenFeature` and `FixedLenSequenceFeature`
objects. Each `VarLenFeature` is mapped to a `SparseTensor`, and each
`FixedLenSequenceFeature` is mapped to a `Tensor`, each of the specified type.
The shape will be `(T,) + df.dense_shape` for `FixedLenSequenceFeature` `df`, where
`T` is the length of the associated `FeatureList` in the `SequenceExample`.
For instance, `FixedLenSequenceFeature([])` yields a scalar 1-D `Tensor` of
static shape `[None]` and dynamic shape `[T]`, while
`FixedLenSequenceFeature([k])` (for `int k >= 1`) yields a 2-D matrix `Tensor`
of static shape `[None, k]` and dynamic shape `[T, k]`.
Each `SparseTensor` corresponding to `sequence_features` represents a ragged
vector. Its indices are `[time, index]`, where `time` is the `FeatureList`
entry and `index` is the value's index in the list of values associated with
that time.
`FixedLenFeature` entries with a `default_value` and `FixedLenSequenceFeature`
entries with `allow_missing=True` are optional; otherwise, we will fail if
that `Feature` or `FeatureList` is missing from any example in `serialized`.
`example_name` may contain a descriptive name for the corresponding serialized
proto. This may be useful for debugging purposes, but it has no effect on the
output. If not `None`, `example_name` must be a scalar.
Args:
serialized: A scalar (0-D Tensor) of type string, a single binary
serialized `SequenceExample` proto.
context_features: A `dict` mapping feature keys to `FixedLenFeature` or
`VarLenFeature` values. These features are associated with a
`SequenceExample` as a whole.
sequence_features: A `dict` mapping feature keys to
`FixedLenSequenceFeature` or `VarLenFeature` values. These features are
associated with data within the `FeatureList` section of the
`SequenceExample` proto.
example_name: A scalar (0-D Tensor) of strings (optional), the name of
the serialized proto.
name: A name for this operation (optional).
Returns:
A tuple of two `dict`s, each mapping keys to `Tensor`s and `SparseTensor`s.
The first dict contains the context key/values.
The second dict contains the feature_list key/values.
Raises:
ValueError: if any feature is invalid.
"""
# pylint: enable=line-too-long
if not (context_features or sequence_features):
raise ValueError("Missing features.")
(context_sparse_keys, context_sparse_types, context_dense_keys,
context_dense_types, context_dense_defaults,
context_dense_shapes) = _features_to_raw_params(
context_features, [VarLenFeature, FixedLenFeature])
(feature_list_sparse_keys, feature_list_sparse_types,
feature_list_dense_keys, feature_list_dense_types,
feature_list_dense_defaults,
feature_list_dense_shapes) = _features_to_raw_params(
sequence_features, [VarLenFeature, FixedLenSequenceFeature])
return _parse_single_sequence_example_raw(
serialized, context_sparse_keys, context_sparse_types,
context_dense_keys, context_dense_types, context_dense_defaults,
context_dense_shapes, feature_list_sparse_keys,
feature_list_sparse_types, feature_list_dense_keys,
feature_list_dense_types, feature_list_dense_shapes,
feature_list_dense_defaults, example_name, name)
def _parse_single_sequence_example_raw(serialized,
context_sparse_keys=None,
context_sparse_types=None,
context_dense_keys=None,
context_dense_types=None,
context_dense_defaults=None,
context_dense_shapes=None,
feature_list_sparse_keys=None,
feature_list_sparse_types=None,
feature_list_dense_keys=None,
feature_list_dense_types=None,
feature_list_dense_shapes=None,
feature_list_dense_defaults=None,
debug_name=None,
name=None):
"""Parses a single `SequenceExample` proto.
Args:
serialized: A scalar (0-D Tensor) of type string, a single binary
serialized `SequenceExample` proto.
context_sparse_keys: A list of string keys in the `SequenceExample`'s
features. The results for these keys will be returned as
`SparseTensor` objects.
context_sparse_types: A list of `DTypes`, the same length as `sparse_keys`.
Only `tf.float32` (`FloatList`), `tf.int64` (`Int64List`),
and `tf.string` (`BytesList`) are supported.
context_dense_keys: A list of string keys in the examples' features.
The results for these keys will be returned as `Tensor`s
context_dense_types: A list of DTypes, same length as `context_dense_keys`.
Only `tf.float32` (`FloatList`), `tf.int64` (`Int64List`),
and `tf.string` (`BytesList`) are supported.
context_dense_defaults: A dict mapping string keys to `Tensor`s.
The keys of the dict must match the context_dense_keys of the feature.
context_dense_shapes: A list of tuples, same length as `context_dense_keys`.
The shape of the data for each context_dense feature referenced by
`context_dense_keys`. Required for any input tensors identified by
`context_dense_keys` whose shapes are anything other than `[]` or `[1]`.
feature_list_sparse_keys: A list of string keys in the `SequenceExample`'s
feature_lists. The results for these keys will be returned as
`SparseTensor` objects.
feature_list_sparse_types: A list of `DTypes`, same length as `sparse_keys`.
Only `tf.float32` (`FloatList`), `tf.int64` (`Int64List`),
and `tf.string` (`BytesList`) are supported.
feature_list_dense_keys: A list of string keys in the `SequenceExample`'s
features_lists. The results for these keys will be returned as `Tensor`s.
feature_list_dense_types: A list of `DTypes`, same length as
`feature_list_dense_keys`. Only `tf.float32` (`FloatList`),
`tf.int64` (`Int64List`), and `tf.string` (`BytesList`) are supported.
feature_list_dense_shapes: A list of tuples, same length as
`feature_list_dense_keys`. The shape of the data for each
`FeatureList` feature referenced by `feature_list_dense_keys`.
feature_list_dense_defaults: A dict mapping key strings to values.
The only currently allowed value is `None`. Any key appearing
in this dict with value `None` is allowed to be missing from the
`SequenceExample`. If missing, the key is treated as zero-length.
debug_name: A scalar (0-D Tensor) of strings (optional), the name of
the serialized proto.
name: A name for this operation (optional).
Returns:
A tuple of two `dict`s, each mapping keys to `Tensor`s and `SparseTensor`s.
The first dict contains the context key/values.
The second dict contains the feature_list key/values.
Raises:
ValueError: If context_sparse and context_dense key sets intersect,
if input lengths do not match up, or if a value in
feature_list_dense_defaults is not None.
TypeError: if feature_list_dense_defaults is not either None or a dict.
"""
with ops.name_scope(name, "ParseSingleSequenceExample", [serialized]):
context_dense_defaults = (
{} if context_dense_defaults is None else context_dense_defaults)
context_sparse_keys = (
[] if context_sparse_keys is None else context_sparse_keys)
context_sparse_types = (
[] if context_sparse_types is None else context_sparse_types)
context_dense_keys = (
[] if context_dense_keys is None else context_dense_keys)
context_dense_types = (
[] if context_dense_types is None else context_dense_types)
context_dense_shapes = (
[[]] * len(context_dense_keys)
if context_dense_shapes is None else context_dense_shapes)
feature_list_sparse_keys = (
[] if feature_list_sparse_keys is None else feature_list_sparse_keys)
feature_list_sparse_types = (
[] if feature_list_sparse_types is None else feature_list_sparse_types)
feature_list_dense_keys = (
[] if feature_list_dense_keys is None else feature_list_dense_keys)
feature_list_dense_types = (
[] if feature_list_dense_types is None else feature_list_dense_types)
feature_list_dense_shapes = (
[[]] * len(feature_list_dense_keys)
if feature_list_dense_shapes is None else feature_list_dense_shapes)
feature_list_dense_defaults = (
dict() if feature_list_dense_defaults is None
else feature_list_dense_defaults)
debug_name = "" if debug_name is None else debug_name
# Internal
feature_list_dense_missing_assumed_empty = []
num_context_dense = len(context_dense_keys)
num_feature_list_dense = len(feature_list_dense_keys)
num_context_sparse = len(context_sparse_keys)
num_feature_list_sparse = len(feature_list_sparse_keys)
if len(context_dense_shapes) != num_context_dense:
raise ValueError(
"len(context_dense_shapes) != len(context_dense_keys): %d vs. %d"
% (len(context_dense_shapes), num_context_dense))
if len(context_dense_types) != num_context_dense:
raise ValueError(
"len(context_dense_types) != len(num_context_dense): %d vs. %d"
% (len(context_dense_types), num_context_dense))
if len(feature_list_dense_shapes) != num_feature_list_dense:
raise ValueError(
"len(feature_list_dense_shapes) != len(feature_list_dense_keys): "
"%d vs. %d" % (len(feature_list_dense_shapes),
num_feature_list_dense))
if len(feature_list_dense_types) != num_feature_list_dense:
raise ValueError(
"len(feature_list_dense_types) != len(num_feature_list_dense):"
"%d vs. %d" % (len(feature_list_dense_types), num_feature_list_dense))
if len(context_sparse_types) != num_context_sparse:
raise ValueError(
"len(context_sparse_types) != len(context_sparse_keys): %d vs. %d"
% (len(context_sparse_types), num_context_sparse))
if len(feature_list_sparse_types) != num_feature_list_sparse:
raise ValueError(
"len(feature_list_sparse_types) != len(feature_list_sparse_keys): "
"%d vs. %d"
% (len(feature_list_sparse_types), num_feature_list_sparse))
if (num_context_dense + num_context_sparse
+ num_feature_list_dense + num_feature_list_sparse) == 0:
raise ValueError(
"Must provide at least one context_sparse key, context_dense key, "
", feature_list_sparse key, or feature_list_dense key")
if not set(context_dense_keys).isdisjoint(set(context_sparse_keys)):
raise ValueError(
"context_dense and context_sparse keys must not intersect; "
"intersection: %s" %
set(context_dense_keys).intersection(set(context_sparse_keys)))
if not set(feature_list_dense_keys).isdisjoint(
set(feature_list_sparse_keys)):
raise ValueError(
"feature_list_dense and feature_list_sparse keys must not intersect; "
"intersection: %s" %
set(feature_list_dense_keys).intersection(
set(feature_list_sparse_keys)))
if not isinstance(feature_list_dense_defaults, dict):
raise TypeError("feature_list_dense_defaults must be a dict")
for k, v in feature_list_dense_defaults.items():
if v is not None:
raise ValueError("Value feature_list_dense_defaults[%s] must be None"
% k)
feature_list_dense_missing_assumed_empty.append(k)
context_dense_defaults_vec = []
for i, key in enumerate(context_dense_keys):
default_value = context_dense_defaults.get(key)
if default_value is None:
default_value = constant_op.constant([], dtype=context_dense_types[i])
elif not isinstance(default_value, ops.Tensor):
key_name = "key_" + re.sub("[^A-Za-z0-9_.\\-/]", "_", key)
default_value = ops.convert_to_tensor(
default_value, dtype=context_dense_types[i], name=key_name)
default_value = array_ops.reshape(
default_value, context_dense_shapes[i])
context_dense_defaults_vec.append(default_value)
context_dense_shapes = [tensor_shape.as_shape(shape).as_proto()
for shape in context_dense_shapes]
feature_list_dense_shapes = [tensor_shape.as_shape(shape).as_proto()
for shape in feature_list_dense_shapes]
# pylint: disable=protected-access
outputs = gen_parsing_ops._parse_single_sequence_example(
serialized=serialized,
debug_name=debug_name,
context_dense_defaults=context_dense_defaults_vec,
context_sparse_keys=context_sparse_keys,
context_sparse_types=context_sparse_types,
context_dense_keys=context_dense_keys,
context_dense_shapes=context_dense_shapes,
feature_list_sparse_keys=feature_list_sparse_keys,
feature_list_sparse_types=feature_list_sparse_types,
feature_list_dense_keys=feature_list_dense_keys,
feature_list_dense_types=feature_list_dense_types,
feature_list_dense_shapes=feature_list_dense_shapes,
feature_list_dense_missing_assumed_empty=(
feature_list_dense_missing_assumed_empty),
name=name)
# pylint: enable=protected-access
(context_sparse_indices, context_sparse_values,
context_sparse_shapes, context_dense_values,
feature_list_sparse_indices, feature_list_sparse_values,
feature_list_sparse_shapes, feature_list_dense_values) = outputs
context_sparse_tensors = [
sparse_tensor.SparseTensor(ix, val, shape) for (ix, val, shape)
in zip(context_sparse_indices,
context_sparse_values,
context_sparse_shapes)]
feature_list_sparse_tensors = [
sparse_tensor.SparseTensor(ix, val, shape) for (ix, val, shape)
in zip(feature_list_sparse_indices,
feature_list_sparse_values,
feature_list_sparse_shapes)]
context_output = dict(
zip(context_sparse_keys + context_dense_keys,
context_sparse_tensors + context_dense_values))
feature_list_output = dict(
zip(feature_list_sparse_keys + feature_list_dense_keys,
feature_list_sparse_tensors + feature_list_dense_values))
return (context_output, feature_list_output)
| apache-2.0 |
prutseltje/ansible | lib/ansible/modules/cloud/amazon/ecs_task.py | 18 | 13622 | #!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ecs_task
short_description: run, start or stop a task in ecs
description:
- Creates or deletes instances of task definitions.
version_added: "2.0"
author: Mark Chance(@Java1Guy)
requirements: [ json, botocore, boto3 ]
options:
operation:
description:
- Which task operation to execute
required: True
choices: ['run', 'start', 'stop']
cluster:
description:
- The name of the cluster to run the task on
required: False
task_definition:
description:
- The task definition to start or run
required: False
overrides:
description:
- A dictionary of values to pass to the new instances
required: False
count:
description:
- How many new instances to start
required: False
task:
description:
- The task to stop
required: False
container_instances:
description:
- The list of container instances on which to deploy the task
required: False
started_by:
description:
- A value showing who or what started the task (for informational purposes)
required: False
network_configuration:
description:
- network configuration of the service. Only applicable for task definitions created with C(awsvpc) I(network_mode).
- I(network_configuration) has two keys, I(subnets), a list of subnet IDs to which the task is attached and I(security_groups),
a list of group names or group IDs for the task
version_added: 2.6
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Simple example of run task
- name: Run task
ecs_task:
operation: run
cluster: console-sample-app-static-cluster
task_definition: console-sample-app-static-taskdef
count: 1
started_by: ansible_user
register: task_output
# Simple example of start task
- name: Start a task
ecs_task:
operation: start
cluster: console-sample-app-static-cluster
task_definition: console-sample-app-static-taskdef
task: "arn:aws:ecs:us-west-2:172139249013:task/3f8353d1-29a8-4689-bbf6-ad79937ffe8a"
container_instances:
- arn:aws:ecs:us-west-2:172139249013:container-instance/79c23f22-876c-438a-bddf-55c98a3538a8
started_by: ansible_user
network_configuration:
subnets:
- subnet-abcd1234
security_groups:
- sg-aaaa1111
- my_security_group
register: task_output
- name: Stop a task
ecs_task:
operation: stop
cluster: console-sample-app-static-cluster
task_definition: console-sample-app-static-taskdef
task: "arn:aws:ecs:us-west-2:172139249013:task/3f8353d1-29a8-4689-bbf6-ad79937ffe8a"
'''
RETURN = '''
task:
description: details about the tast that was started
returned: success
type: complex
contains:
taskArn:
description: The Amazon Resource Name (ARN) that identifies the task.
returned: always
type: string
clusterArn:
description: The Amazon Resource Name (ARN) of the of the cluster that hosts the task.
returned: only when details is true
type: string
taskDefinitionArn:
description: The Amazon Resource Name (ARN) of the task definition.
returned: only when details is true
type: string
containerInstanceArn:
description: The Amazon Resource Name (ARN) of the container running the task.
returned: only when details is true
type: string
overrides:
description: The container overrides set for this task.
returned: only when details is true
type: list of complex
lastStatus:
description: The last recorded status of the task.
returned: only when details is true
type: string
desiredStatus:
description: The desired status of the task.
returned: only when details is true
type: string
containers:
description: The container details.
returned: only when details is true
type: list of complex
startedBy:
description: The used who started the task.
returned: only when details is true
type: string
stoppedReason:
description: The reason why the task was stopped.
returned: only when details is true
type: string
createdAt:
description: The timestamp of when the task was created.
returned: only when details is true
type: string
startedAt:
description: The timestamp of when the task was started.
returned: only when details is true
type: string
stoppedAt:
description: The timestamp of when the task was stopped.
returned: only when details is true
type: string
'''
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import ec2_argument_spec, get_ec2_security_group_ids_from_names
try:
import botocore
except ImportError:
pass # handled by AnsibleAWSModule
class EcsExecManager:
"""Handles ECS Tasks"""
def __init__(self, module):
self.module = module
self.ecs = module.client('ecs')
self.ec2 = module.client('ec2')
def format_network_configuration(self, network_config):
result = dict()
if 'subnets' in network_config:
result['subnets'] = network_config['subnets']
else:
self.module.fail_json(msg="Network configuration must include subnets")
if 'security_groups' in network_config:
groups = network_config['security_groups']
if any(not sg.startswith('sg-') for sg in groups):
try:
vpc_id = self.ec2.describe_subnets(SubnetIds=[result['subnets'][0]])['Subnets'][0]['VpcId']
groups = get_ec2_security_group_ids_from_names(groups, self.ec2, vpc_id)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg="Couldn't look up security groups")
result['securityGroups'] = groups
return dict(awsvpcConfiguration=result)
def list_tasks(self, cluster_name, service_name, status):
response = self.ecs.list_tasks(
cluster=cluster_name,
family=service_name,
desiredStatus=status
)
if len(response['taskArns']) > 0:
for c in response['taskArns']:
if c.endswith(service_name):
return c
return None
def run_task(self, cluster, task_definition, overrides, count, startedBy):
if overrides is None:
overrides = dict()
params = dict(cluster=cluster, taskDefinition=task_definition,
overrides=overrides, count=count, startedBy=startedBy)
if self.module.params['network_configuration']:
params['networkConfiguration'] = self.format_network_configuration(self.module.params['network_configuration'])
try:
response = self.ecs.run_task(**params)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg="Couldn't run task")
# include tasks and failures
return response['tasks']
def start_task(self, cluster, task_definition, overrides, container_instances, startedBy):
args = dict()
if cluster:
args['cluster'] = cluster
if task_definition:
args['taskDefinition'] = task_definition
if overrides:
args['overrides'] = overrides
if container_instances:
args['containerInstances'] = container_instances
if startedBy:
args['startedBy'] = startedBy
if self.module.params['network_configuration']:
args['networkConfiguration'] = self.format_network_configuration(self.module.params['network_configuration'])
try:
response = self.ecs.start_task(**args)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
self.module.fail_json_aws(e, msg="Couldn't start task")
# include tasks and failures
return response['tasks']
def stop_task(self, cluster, task):
response = self.ecs.stop_task(cluster=cluster, task=task)
return response['task']
def ecs_api_handles_network_configuration(self):
from distutils.version import LooseVersion
# There doesn't seem to be a nice way to inspect botocore to look
# for attributes (and networkConfiguration is not an explicit argument
# to e.g. ecs.run_task, it's just passed as a keyword argument)
return LooseVersion(botocore.__version__) >= LooseVersion('1.7.44')
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
operation=dict(required=True, choices=['run', 'start', 'stop']),
cluster=dict(required=False, type='str'), # R S P
task_definition=dict(required=False, type='str'), # R* S*
overrides=dict(required=False, type='dict'), # R S
count=dict(required=False, type='int'), # R
task=dict(required=False, type='str'), # P*
container_instances=dict(required=False, type='list'), # S*
started_by=dict(required=False, type='str'), # R S
network_configuration=dict(required=False, type='dict')
))
module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
# Validate Inputs
if module.params['operation'] == 'run':
if 'task_definition' not in module.params and module.params['task_definition'] is None:
module.fail_json(msg="To run a task, a task_definition must be specified")
task_to_list = module.params['task_definition']
status_type = "RUNNING"
if module.params['operation'] == 'start':
if 'task_definition' not in module.params and module.params['task_definition'] is None:
module.fail_json(msg="To start a task, a task_definition must be specified")
if 'container_instances' not in module.params and module.params['container_instances'] is None:
module.fail_json(msg="To start a task, container instances must be specified")
task_to_list = module.params['task']
status_type = "RUNNING"
if module.params['operation'] == 'stop':
if 'task' not in module.params and module.params['task'] is None:
module.fail_json(msg="To stop a task, a task must be specified")
if 'task_definition' not in module.params and module.params['task_definition'] is None:
module.fail_json(msg="To stop a task, a task definition must be specified")
task_to_list = module.params['task_definition']
status_type = "STOPPED"
service_mgr = EcsExecManager(module)
if module.params['network_configuration'] and not service_mgr.ecs_api_handles_network_configuration():
module.fail_json(msg='botocore needs to be version 1.7.44 or higher to use network configuration')
existing = service_mgr.list_tasks(module.params['cluster'], task_to_list, status_type)
results = dict(changed=False)
if module.params['operation'] == 'run':
if existing:
# TBD - validate the rest of the details
results['task'] = existing
else:
if not module.check_mode:
results['task'] = service_mgr.run_task(
module.params['cluster'],
module.params['task_definition'],
module.params['overrides'],
module.params['count'],
module.params['started_by'])
results['changed'] = True
elif module.params['operation'] == 'start':
if existing:
# TBD - validate the rest of the details
results['task'] = existing
else:
if not module.check_mode:
results['task'] = service_mgr.start_task(
module.params['cluster'],
module.params['task_definition'],
module.params['overrides'],
module.params['container_instances'],
module.params['started_by']
)
results['changed'] = True
elif module.params['operation'] == 'stop':
if existing:
results['task'] = existing
else:
if not module.check_mode:
# it exists, so we should delete it and mark changed.
# return info about the cluster deleted
results['task'] = service_mgr.stop_task(
module.params['cluster'],
module.params['task']
)
results['changed'] = True
module.exit_json(**results)
if __name__ == '__main__':
main()
| gpl-3.0 |
nels83/android_kernel_common | tools/perf/util/setup.py | 4998 | 1330 | #!/usr/bin/python2
from distutils.core import setup, Extension
from os import getenv
from distutils.command.build_ext import build_ext as _build_ext
from distutils.command.install_lib import install_lib as _install_lib
class build_ext(_build_ext):
def finalize_options(self):
_build_ext.finalize_options(self)
self.build_lib = build_lib
self.build_temp = build_tmp
class install_lib(_install_lib):
def finalize_options(self):
_install_lib.finalize_options(self)
self.build_dir = build_lib
cflags = ['-fno-strict-aliasing', '-Wno-write-strings']
cflags += getenv('CFLAGS', '').split()
build_lib = getenv('PYTHON_EXTBUILD_LIB')
build_tmp = getenv('PYTHON_EXTBUILD_TMP')
ext_sources = [f.strip() for f in file('util/python-ext-sources')
if len(f.strip()) > 0 and f[0] != '#']
perf = Extension('perf',
sources = ext_sources,
include_dirs = ['util/include'],
extra_compile_args = cflags,
)
setup(name='perf',
version='0.1',
description='Interface with the Linux profiling infrastructure',
author='Arnaldo Carvalho de Melo',
author_email='acme@redhat.com',
license='GPLv2',
url='http://perf.wiki.kernel.org',
ext_modules=[perf],
cmdclass={'build_ext': build_ext, 'install_lib': install_lib})
| gpl-2.0 |
tarc/gyp | tools/graphviz.py | 2679 | 2878 | #!/usr/bin/env python
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Using the JSON dumped by the dump-dependency-json generator,
generate input suitable for graphviz to render a dependency graph of
targets."""
import collections
import json
import sys
def ParseTarget(target):
target, _, suffix = target.partition('#')
filename, _, target = target.partition(':')
return filename, target, suffix
def LoadEdges(filename, targets):
"""Load the edges map from the dump file, and filter it to only
show targets in |targets| and their depedendents."""
file = open('dump.json')
edges = json.load(file)
file.close()
# Copy out only the edges we're interested in from the full edge list.
target_edges = {}
to_visit = targets[:]
while to_visit:
src = to_visit.pop()
if src in target_edges:
continue
target_edges[src] = edges[src]
to_visit.extend(edges[src])
return target_edges
def WriteGraph(edges):
"""Print a graphviz graph to stdout.
|edges| is a map of target to a list of other targets it depends on."""
# Bucket targets by file.
files = collections.defaultdict(list)
for src, dst in edges.items():
build_file, target_name, toolset = ParseTarget(src)
files[build_file].append(src)
print 'digraph D {'
print ' fontsize=8' # Used by subgraphs.
print ' node [fontsize=8]'
# Output nodes by file. We must first write out each node within
# its file grouping before writing out any edges that may refer
# to those nodes.
for filename, targets in files.items():
if len(targets) == 1:
# If there's only one node for this file, simplify
# the display by making it a box without an internal node.
target = targets[0]
build_file, target_name, toolset = ParseTarget(target)
print ' "%s" [shape=box, label="%s\\n%s"]' % (target, filename,
target_name)
else:
# Group multiple nodes together in a subgraph.
print ' subgraph "cluster_%s" {' % filename
print ' label = "%s"' % filename
for target in targets:
build_file, target_name, toolset = ParseTarget(target)
print ' "%s" [label="%s"]' % (target, target_name)
print ' }'
# Now that we've placed all the nodes within subgraphs, output all
# the edges between nodes.
for src, dsts in edges.items():
for dst in dsts:
print ' "%s" -> "%s"' % (src, dst)
print '}'
def main():
if len(sys.argv) < 2:
print >>sys.stderr, __doc__
print >>sys.stderr
print >>sys.stderr, 'usage: %s target1 target2...' % (sys.argv[0])
return 1
edges = LoadEdges('dump.json', sys.argv[1:])
WriteGraph(edges)
return 0
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause |
eneldoserrata/marcos_openerp | oemedical/oemedical_his/__init__.py | 10 | 1141 | # -*- coding: utf-8 -*-
#/#############################################################################
#
# Tech-Receptives Solutions Pvt. Ltd.
# Copyright (C) 2004-TODAY Tech-Receptives(<http://www.techreceptives.com>)
# Special Credit and Thanks to Thymbra Latinoamericana S.A.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#/#############################################################################
import models
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
jballanc/openmicroscopy | components/tools/OmeroPy/test/unit/tablestest/test_servants.py | 3 | 11478 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Test of the Tables facility independent of Ice.
Copyright 2009 Glencoe Software, Inc. All rights reserved.
Use is subject to license terms supplied in LICENSE.txt
"""
import pytest
import Ice
import omero, omero.tables
import omero_ext.uuid as uuid # see ticket:3774
import sys, os, logging
import library as lib
from omero.columns import *
from path import path
logging.basicConfig(level=logging.DEBUG)
class communicator_provider(object):
def __init__(self, ic = None):
self.ic = ic
def __call__(self, *args):
return self.ic
class mock_communicator(object):
def __init__(self):
self.delegate = Ice.initialize()
for of in ObjectFactories.values():
of.register(self.delegate) # Columns
# Delegated
def getProperties(self):
return self.delegate.getProperties()
def findObjectFactory(self, s):
return self.delegate.findObjectFactory(s)
# Overridden
def stringToProxy(self, arg):
return arg
class mock_current(object):
def __init__(self, communicator):
self.adapter = mock_adapter(communicator)
self.ctx = {}
class mock_adapter(object):
def __init__(self, communicator):
self.ic = communicator
def addWithUUID(self, arg):
return arg
def add(self, arg, id):
return arg
def getCommunicator(self):
return self.ic
class mocked_internal_service_factory(object):
def __init__(self, sf = None):
if sf == None:
sf = mocked_service_factory()
self.sf = sf
def __call__(self, *args, **kwargs):
if not self.sf:
raise Exception("Mock error connecting to server")
return self.sf
class mocked_service_factory(object):
def __init__(self):
self.db_uuid = str(uuid.uuid4())
self.return_values = []
def keepAlive(self, *args):
pass
def getAdminService(self):
return mocked_admin_service(True)
def getConfigService(self):
return mocked_config_service(self.db_uuid, self.return_values)
def getQueryService(self):
return mocked_query_service(self.return_values)
def destroy(self):
pass
class mocked_admin_service(object):
def __init__(self, can_update):
self.can_update = can_update
def canUpdate(self, file_obj, call_context=None):
return self.can_update
class mocked_config_service(object):
def __init__(self, db_uuid, return_values):
self.db_uuid = db_uuid
self.return_values = return_values
def getDatabaseUuid(self):
return self.db_uuid
def getConfigValue(self, str):
rv = self.return_values.pop(0)
if isinstance(rv, omero.ServerError):
raise rv
else:
return rv
class mocked_query_service(object):
def __init__(self, return_values):
self.return_values = return_values
def findByQuery(self, *args):
rv = self.return_values.pop(0)
if isinstance(rv, omero.ServerError):
raise rv
else:
return rv
def get(self, *args):
rv = self.return_values.pop(0)
if isinstance(rv, omero.ServerError):
raise rv
else:
return rv
class mock_internal_repo(object):
def __init__(self, dir):
self.path = dir / "mock.h5"
def __call__(self, *args):
return self
def getProxy(self):
return self
def getFilePath(self, *args):
return self.path
class mock_table(object):
def __call__(self, *args):
self.table = args[0]
return self
class mock_storage(object):
def __init__(self):
self.up = False
self.down = False
def incr(self, *args):
self.up = True
def decr(self, *args):
self.down = True
class TestTables(lib.TestCase):
def setup_method(self, method):
lib.TestCase.setup_method(self, method)
# Session
self.sf_provider = mocked_internal_service_factory()
omero.util.internal_service_factory = self.sf_provider
self.sf = self.sf_provider()
# Context
serverid = "mock_table"
self.communicator = mock_communicator()
self.communicator_provider = communicator_provider(self.communicator)
self.stop_event = omero.util.concurrency.get_event()
self.ctx = omero.util.ServerContext(serverid, self.communicator, self.stop_event)
self.current = mock_current(self.communicator)
self.__tables = []
def teardown_method(self, method):
"""
To prevent cleanup from taking place, we hold on to all the tables until the end.
This is caused by the reuse of TableI instances after the Tables go out of scope.
"""
for t in self.__tables:
t.__del__()
def tablesI(self, internal_repo = None):
if internal_repo is None:
internal_repo = mock_internal_repo(self.tmp)
t = omero.tables.TablesI(self.ctx, mock_table(), internal_repo)
self.__tables.append(t)
return t
def repouuid(self):
"""
Returns a string similar to that written by RandomAccessFile.writeUTF() in Java
"""
return "XX%s" % uuid.uuid4()
def repodir(self, make = True):
self.tmp = path(self.tmpdir())
self.communicator.getProperties().setProperty("omero.repo.dir", str(self.tmp))
repo = self.tmp / ".omero" / "repository"
if make:
repo.makedirs()
return str(repo)
def repofile(self, db_uuid, repo_uuid=None):
if repo_uuid == None:
repo_uuid = self.repouuid()
f = self.repodir()
f = path(f) / db_uuid
f.makedirs()
f = f / "repo_uuid"
f.write_lines([repo_uuid])
# Note: some of the following method were added as __init__ called
# first _get_dir() and then _get_uuid(), so the naming is off
def testTablesIGetDirNoRepoSet(self):
self.sf.return_values.append(self.tmpdir())
pytest.raises(omero.ResourceError, omero.tables.TablesI, self.ctx)
def testTablesIGetDirNoRepoCreated(self):
self.repodir(False)
pytest.raises(omero.ResourceError, omero.tables.TablesI, self.ctx)
def testTablesIGetDirGetsRepoThenNoSF(self):
self.repodir()
omero.util.internal_service_factory = mocked_internal_service_factory(None)
pytest.raises(Exception, omero.tables.TablesI, self.ctx)
def testTablesIGetDirGetsRepoGetsSFCantFindRepoFile(self):
self.repodir()
pytest.raises(IOError, omero.tables.TablesI, self.ctx)
def testTablesIGetDirGetsRepoGetsSFCantFindRepoObject(self):
self.repofile(self.sf.db_uuid)
self.sf.return_values.append( omero.ApiUsageException(None, None, "Can't Find") )
pytest.raises(omero.ApiUsageException, omero.tables.TablesI, self.ctx)
def testTablesIGetDirGetsRepoGetsSFGetsRepo(self):
self.repofile(self.sf.db_uuid)
self.sf.return_values.append( omero.model.OriginalFileI( 1, False) )
tables = self.tablesI()
def testTables(self, newfile = True):
if newfile:
self.repofile(self.sf.db_uuid)
f = omero.model.OriginalFileI(1, True)
f.details.group = omero.model.ExperimenterGroupI(1, False)
self.sf.return_values.append(f)
tables = self.tablesI()
table = tables.getTable(f, self.sf, self.current)
assert table
assert table.table
assert table.table.storage
return table
def testTableOriginalFileLoaded(self):
f1 = omero.model.OriginalFileI(1, False)
f2 = omero.model.OriginalFileI(1, True)
f2.details.group = omero.model.ExperimenterGroupI(1, False)
self.sf.return_values.append(f2)
storage = mock_storage()
self.ctx.newSession()
table = omero.tables.TableI(self.ctx, f1, self.sf, storage)
assert table.file_obj.details.group
def testTableIncrDecr(self):
f = omero.model.OriginalFileI(1, True)
f.details.group = omero.model.ExperimenterGroupI(1, False)
storage = mock_storage()
table = omero.tables.TableI(self.ctx, f, self.sf, storage)
assert storage.up
table.cleanup()
assert storage.down
def testTablePreInitialized(self):
f = omero.model.OriginalFileI(1, True)
f.details.group = omero.model.ExperimenterGroupI(1, False)
mocktable = self.testTables()
table1 = mocktable.table
storage = table1.storage
storage.initialize([LongColumnI("a",None,[])])
table2 = omero.tables.TableI(self.ctx, f, self.sf, storage)
table2.cleanup()
table1.cleanup()
def testTableModifications(self):
mocktable = self.testTables()
table = mocktable.table
storage = table.storage
storage.initialize([LongColumnI("a",None,[])])
assert storage.uptodate(table.stamp)
storage._stamp += 1 # Not really allowed
assert not storage.uptodate(table.stamp)
table.cleanup()
def testTableModifications(self):
mocktable = self.testTables()
table = mocktable.table
storage = table.storage
storage.initialize([LongColumnI("a",None,[])])
assert storage.uptodate(table.stamp)
storage._stamp += 1 # Not really allowed
assert not storage.uptodate(table.stamp)
table.cleanup()
def testTableAddData(self, newfile = True, cleanup = True):
mocktable = self.testTables(newfile)
table = mocktable.table
storage = table.storage
assert storage
table.initialize([LongColumnI("a", None,[]), DoubleColumnI("b", None, [])])
template = table.getHeaders(self.current)
template[0].values = [ 1 ]*5
template[1].values = [2.0]*5
table.addData(template)
if cleanup:
table.cleanup()
return table
def testTableSearch(self):
table = self.testTableAddData(True, False)
rv = list(table.getWhereList('(a==1)',None,None,None,None,None))
assert range(5) == rv
data = table.readCoordinates(rv, self.current)
assert 2 == len(data.columns)
for i in range(5):
assert 1 == data.columns[0].values[i]
assert 2.0 == data.columns[1].values[i]
table.cleanup()
def testErrorInStorage(self):
self.repofile(self.sf.db_uuid)
of = omero.model.OriginalFileI( 1, False)
self.sf.return_values.append( of )
internal_repo = mock_internal_repo(self.tmp)
f = open(internal_repo.path,"w")
f.write("this file is not HDF")
f.close()
tables = self.tablesI(internal_repo)
pytest.raises(omero.ValidationException, tables.getTable, of, self.sf, self.current)
def testErrorInGet(self):
self.repofile(self.sf.db_uuid)
f = omero.model.OriginalFileI(1, True)
f.details.group = omero.model.ExperimenterGroupI(1, False)
self.sf.return_values.append( f )
tables = self.tablesI()
table = tables.getTable(f, self.sf, self.current).table # From mock
cols = [ omero.columns.LongColumnI('name','desc',None) ]
table.initialize(cols)
cols[0].values = [1,2,3,4]
table.addData(cols)
table.getWhereList('(name==1)',None,0,0,0,self.current)
| gpl-2.0 |
jcpowermac/ansible | lib/ansible/modules/database/proxysql/proxysql_replication_hostgroups.py | 42 | 13461 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: proxysql_replication_hostgroups
version_added: "2.3"
author: "Ben Mildren (@bmildren)"
short_description: Manages replication hostgroups using the proxysql admin
interface.
description:
- Each row in mysql_replication_hostgroups represent a pair of
writer_hostgroup and reader_hostgroup. ProxySQL will monitor the value of
read_only for all the servers in specified hostgroups, and based on the
value of read_only will assign the server to the writer or reader
hostgroups.
options:
writer_hostgroup:
description:
- Id of the writer hostgroup.
required: True
reader_hostgroup:
description:
- Id of the reader hostgroup.
required: True
comment:
description:
- Text field that can be used for any purposed defined by the user.
state:
description:
- When C(present) - adds the replication hostgroup, when C(absent) -
removes the replication hostgroup.
choices: [ "present", "absent" ]
default: present
extends_documentation_fragment:
- proxysql.managing_config
- proxysql.connectivity
'''
EXAMPLES = '''
---
# This example adds a replication hostgroup, it saves the mysql server config
# to disk, but avoids loading the mysql server config to runtime (this might be
# because several replication hostgroup are being added and the user wants to
# push the config to runtime in a single batch using the
# M(proxysql_manage_config) module). It uses supplied credentials to connect
# to the proxysql admin interface.
- proxysql_replication_hostgroups:
login_user: 'admin'
login_password: 'admin'
writer_hostgroup: 1
reader_hostgroup: 2
state: present
load_to_runtime: False
# This example removes a replication hostgroup, saves the mysql server config
# to disk, and dynamically loads the mysql server config to runtime. It uses
# credentials in a supplied config file to connect to the proxysql admin
# interface.
- proxysql_replication_hostgroups:
config_file: '~/proxysql.cnf'
writer_hostgroup: 3
reader_hostgroup: 4
state: absent
'''
RETURN = '''
stdout:
description: The replication hostgroup modified or removed from proxysql
returned: On create/update will return the newly modified group, on delete
it will return the deleted record.
type: dict
"sample": {
"changed": true,
"msg": "Added server to mysql_hosts",
"repl_group": {
"comment": "",
"reader_hostgroup": "1",
"writer_hostgroup": "2"
},
"state": "present"
}
'''
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.mysql import mysql_connect
from ansible.module_utils._text import to_native
try:
import MySQLdb
import MySQLdb.cursors
except ImportError:
MYSQLDB_FOUND = False
else:
MYSQLDB_FOUND = True
# ===========================================
# proxysql module specific support methods.
#
def perform_checks(module):
if module.params["login_port"] < 0 \
or module.params["login_port"] > 65535:
module.fail_json(
msg="login_port must be a valid unix port number (0-65535)"
)
if not module.params["writer_hostgroup"] >= 0:
module.fail_json(
msg="writer_hostgroup must be a integer greater than or equal to 0"
)
if not module.params["reader_hostgroup"] == \
module.params["writer_hostgroup"]:
if not module.params["reader_hostgroup"] > 0:
module.fail_json(
msg=("writer_hostgroup must be a integer greater than" +
" or equal to 0")
)
else:
module.fail_json(
msg="reader_hostgroup cannot equal writer_hostgroup"
)
if not MYSQLDB_FOUND:
module.fail_json(
msg="the python mysqldb module is required"
)
def save_config_to_disk(cursor):
cursor.execute("SAVE MYSQL SERVERS TO DISK")
return True
def load_config_to_runtime(cursor):
cursor.execute("LOAD MYSQL SERVERS TO RUNTIME")
return True
class ProxySQLReplicationHostgroup(object):
def __init__(self, module):
self.state = module.params["state"]
self.save_to_disk = module.params["save_to_disk"]
self.load_to_runtime = module.params["load_to_runtime"]
self.writer_hostgroup = module.params["writer_hostgroup"]
self.reader_hostgroup = module.params["reader_hostgroup"]
self.comment = module.params["comment"]
def check_repl_group_config(self, cursor, keys):
query_string = \
"""SELECT count(*) AS `repl_groups`
FROM mysql_replication_hostgroups
WHERE writer_hostgroup = %s
AND reader_hostgroup = %s"""
query_data = \
[self.writer_hostgroup,
self.reader_hostgroup]
if self.comment and not keys:
query_string += "\n AND comment = %s"
query_data.append(self.comment)
cursor.execute(query_string, query_data)
check_count = cursor.fetchone()
return (int(check_count['repl_groups']) > 0)
def get_repl_group_config(self, cursor):
query_string = \
"""SELECT *
FROM mysql_replication_hostgroups
WHERE writer_hostgroup = %s
AND reader_hostgroup = %s"""
query_data = \
[self.writer_hostgroup,
self.reader_hostgroup]
cursor.execute(query_string, query_data)
repl_group = cursor.fetchone()
return repl_group
def create_repl_group_config(self, cursor):
query_string = \
"""INSERT INTO mysql_replication_hostgroups (
writer_hostgroup,
reader_hostgroup,
comment)
VALUES (%s, %s, %s)"""
query_data = \
[self.writer_hostgroup,
self.reader_hostgroup,
self.comment or '']
cursor.execute(query_string, query_data)
return True
def update_repl_group_config(self, cursor):
query_string = \
"""UPDATE mysql_replication_hostgroups
SET comment = %s
WHERE writer_hostgroup = %s
AND reader_hostgroup = %s"""
query_data = \
[self.comment,
self.writer_hostgroup,
self.reader_hostgroup]
cursor.execute(query_string, query_data)
return True
def delete_repl_group_config(self, cursor):
query_string = \
"""DELETE FROM mysql_replication_hostgroups
WHERE writer_hostgroup = %s
AND reader_hostgroup = %s"""
query_data = \
[self.writer_hostgroup,
self.reader_hostgroup]
cursor.execute(query_string, query_data)
return True
def manage_config(self, cursor, state):
if state:
if self.save_to_disk:
save_config_to_disk(cursor)
if self.load_to_runtime:
load_config_to_runtime(cursor)
def create_repl_group(self, check_mode, result, cursor):
if not check_mode:
result['changed'] = \
self.create_repl_group_config(cursor)
result['msg'] = "Added server to mysql_hosts"
result['repl_group'] = \
self.get_repl_group_config(cursor)
self.manage_config(cursor,
result['changed'])
else:
result['changed'] = True
result['msg'] = ("Repl group would have been added to" +
" mysql_replication_hostgroups, however" +
" check_mode is enabled.")
def update_repl_group(self, check_mode, result, cursor):
if not check_mode:
result['changed'] = \
self.update_repl_group_config(cursor)
result['msg'] = "Updated server in mysql_hosts"
result['repl_group'] = \
self.get_repl_group_config(cursor)
self.manage_config(cursor,
result['changed'])
else:
result['changed'] = True
result['msg'] = ("Repl group would have been updated in" +
" mysql_replication_hostgroups, however" +
" check_mode is enabled.")
def delete_repl_group(self, check_mode, result, cursor):
if not check_mode:
result['repl_group'] = \
self.get_repl_group_config(cursor)
result['changed'] = \
self.delete_repl_group_config(cursor)
result['msg'] = "Deleted server from mysql_hosts"
self.manage_config(cursor,
result['changed'])
else:
result['changed'] = True
result['msg'] = ("Repl group would have been deleted from" +
" mysql_replication_hostgroups, however" +
" check_mode is enabled.")
# ===========================================
# Module execution.
#
def main():
module = AnsibleModule(
argument_spec=dict(
login_user=dict(default=None, type='str'),
login_password=dict(default=None, no_log=True, type='str'),
login_host=dict(default="127.0.0.1"),
login_unix_socket=dict(default=None),
login_port=dict(default=6032, type='int'),
config_file=dict(default="", type='path'),
writer_hostgroup=dict(required=True, type='int'),
reader_hostgroup=dict(required=True, type='int'),
comment=dict(type='str'),
state=dict(default='present', choices=['present',
'absent']),
save_to_disk=dict(default=True, type='bool'),
load_to_runtime=dict(default=True, type='bool')
),
supports_check_mode=True
)
perform_checks(module)
login_user = module.params["login_user"]
login_password = module.params["login_password"]
config_file = module.params["config_file"]
cursor = None
try:
cursor = mysql_connect(module,
login_user,
login_password,
config_file,
cursor_class=MySQLdb.cursors.DictCursor)
except MySQLdb.Error as e:
module.fail_json(
msg="unable to connect to ProxySQL Admin Module.. %s" % to_native(e)
)
proxysql_repl_group = ProxySQLReplicationHostgroup(module)
result = {}
result['state'] = proxysql_repl_group.state
if proxysql_repl_group.state == "present":
try:
if not proxysql_repl_group.check_repl_group_config(cursor,
keys=True):
proxysql_repl_group.create_repl_group(module.check_mode,
result,
cursor)
else:
if not proxysql_repl_group.check_repl_group_config(cursor,
keys=False):
proxysql_repl_group.update_repl_group(module.check_mode,
result,
cursor)
else:
result['changed'] = False
result['msg'] = ("The repl group already exists in" +
" mysql_replication_hostgroups and" +
" doesn't need to be updated.")
result['repl_group'] = \
proxysql_repl_group.get_repl_group_config(cursor)
except MySQLdb.Error as e:
module.fail_json(
msg="unable to modify replication hostgroup.. %s" % to_native(e)
)
elif proxysql_repl_group.state == "absent":
try:
if proxysql_repl_group.check_repl_group_config(cursor,
keys=True):
proxysql_repl_group.delete_repl_group(module.check_mode,
result,
cursor)
else:
result['changed'] = False
result['msg'] = ("The repl group is already absent from the" +
" mysql_replication_hostgroups memory" +
" configuration")
except MySQLdb.Error as e:
module.fail_json(
msg="unable to delete replication hostgroup.. %s" % to_native(e)
)
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
Diblo/Pikaptcha | pikaptcha/console.py | 1 | 9224 | import argparse
import sys
import pikaptcha
from pikaptcha.ptcexceptions import *
from pikaptcha.tos import *
from pikaptcha.gmailv import *
from pikaptcha.url import *
from pgoapi.exceptions import AuthException, ServerSideRequestThrottlingException, NotLoggedInException
import pprint
import threading
import getopt
import urllib2
import imaplib
import string
import re
def parse_arguments(args):
"""Parse the command line arguments for the console commands.
Args:
args (List[str]): List of string arguments to be parsed.
Returns:
Namespace: Namespace with the parsed arguments.
"""
parser = argparse.ArgumentParser(
description='Pokemon Trainer Club Account Creator'
)
parser.add_argument(
'-u', '--username', type=str, default=None,
help='Username for the new account (defaults to random string).'
)
parser.add_argument(
'-p', '--password', type=str, default=None,
help='Password for the new account (defaults to random string).'
)
parser.add_argument(
'-e', '--email', type=str, default=None,
help='Email for the new account (defaults to random email-like string).'
)
parser.add_argument(
'-m', '--plusmail', type=str, default=None,
help='Email template for the new account. Use something like aaaa@gmail.com (defaults to nothing).'
)
parser.add_argument(
'-av', '--autoverify', type=bool, default=False,
help='Append the argument -av True if you want to use autoverify with +mail.'
)
parser.add_argument(
'-b', '--birthday', type=str, default=None,
help='Birthday for the new account. Must be YYYY-MM-DD. (defaults to a random birthday).'
)
parser.add_argument(
'-c','--count', type=int,default=1,
help='Number of accounts to generate.'
)
parser.add_argument(
'-r','--recaptcha', type=str, default=None,
help='Your 2captcha key from settings'
)
parser.add_argument(
'-gm', '--googlemail', type=str, default=None,
help='This is the mail for the google account when auto verify is activate (Only required if plus mail is different from google mail)'
)
parser.add_argument(
'-gp','--googlepass', type=str, default=None,
help='This is the password for the google account and is require to activate auto verify when using the plus mail'
)
parser.add_argument(
'-t','--textfile', type=str, default="usernames.txt",
help='This is the location you want to save usernames.txt'
)
parser.add_argument(
'-of','--outputformat', type=str, default="compact",
help='If you choose compact, you get user:pass. If you choose pkgo, you get -u user -p pass'
)
parser.add_argument(
'-it','--inputtext', type=str, default=None,
help='This is the location you want to read usernames in the format user:pass'
)
parser.add_argument(
'-sn','--startnum', type=int, default=None,
help='If you specify both -u and -c, it will append a number to the end. This allows you to choose where to start from'
)
parser.add_argument(
'-ct','--captchatimeout', type=int, default=1000,
help='Allows you to set the time to timeout captcha and forget that account (and forgeit $0.003).'
)
parser.add_argument(
'-l','--location', type=str, default="40.7127837,-74.005941",
help='This is the location that will be spoofed when we verify TOS'
)
parser.add_argument(
'-px','--proxy', type=str, default=None,
help='Proxy to be used when accepting the Terms of Services. Must be host:port (ex. 1.1.1.1:80). Must be a HTTPS proxy.'
)
return parser.parse_args(args)
def _verify_autoverify_email(settings):
if (settings['args'].googlepass is not None and settings['args'].plusmail == None and settings['args'].googlemail == None):
raise PTCInvalidEmailException("You have to specify a plusmail (--plusmail or -m) or a google email (--googlemail or -gm) to use autoverification.")
def _verify_plusmail_format(settings):
if (settings['args'].plusmail != None and not re.match(r"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$)", settings['args'].plusmail)):
raise PTCInvalidEmailException("Invalid email format to use with plusmail.")
def _verify_twocaptcha_balance(settings):
if (settings['args'].recaptcha != None and settings['balance'] == 'ERROR_KEY_DOES_NOT_EXIST'):
raise PTCTwocaptchaException("2captcha key does not exist.")
if (settings['args'].recaptcha != None and float(settings['balance']) < float(settings['args'].count)*0.003):
raise PTCTwocaptchaException("It does not seem like you have enough balance for this run. Lower the count or increase your balance.")
def _verify_settings(settings):
verifications=[_verify_autoverify_email, _verify_plusmail_format, _verify_twocaptcha_balance]
for verification in verifications:
try:
verification(settings)
except PTCException, e:
print e.message
print "Terminating."
sys.exit()
return True
def entry():
"""Main entry point for the package console commands"""
args = parse_arguments(sys.argv[1:])
captchabal = None
if args.recaptcha != None:
captchabal = "Failed"
while(captchabal == "Failed"):
captchabal = openurl("http://2captcha.com/res.php?key=" + args.recaptcha + "&action=getbalance")
print("Your 2captcha balance is: " + captchabal)
print("This run will cost you approximately: " + str(float(args.count)*0.003))
username = args.username
if args.inputtext != None:
print("Reading accounts from: " + args.inputtext)
lines = [line.rstrip('\n') for line in open(args.inputtext, "r")]
args.count = len(lines)
if _verify_settings({'args':args, 'balance':captchabal}):
if (args.googlepass is not None):
with open(args.textfile, "a") as ulist:
ulist.write("The following accounts use the email address: " + args.plusmail + "\n")
ulist.close()
for x in range(0,args.count):
print("Making account #" + str(x+1))
if ((args.username != None) and (args.count != 1) and (args.inputtext == None)):
if(args.startnum == None):
username = args.username + str(x+1)
else:
username = args.username + str(args.startnum+x)
if (args.inputtext != None):
username = ((lines[x]).split(":"))[0]
args.password = ((lines[x]).split(":"))[1]
error_msg = None
try:
try:
account_info = pikaptcha.random_account(username, args.password, args.email, args.birthday, args.plusmail, args.recaptcha, args.captchatimeout)
print(' Username: {}'.format(account_info["username"]))
print(' Password: {}'.format(account_info["password"]))
print(' Email : {}'.format(account_info["email"]))
# Accept Terms Service
accept_tos(account_info["username"], account_info["password"], args.location, args.proxy)
# Verify email
if (args.googlepass is not None):
if (args.googlemail is not None):
email_verify(args.googlemail, args.googlepass)
else:
email_verify(args.plusmail, args.googlepass)
# Append usernames
with open(args.textfile, "a") as ulist:
if args.outputformat == "pkgo":
ulist.write(" -u " + account_info["username"]+" -p "+account_info["password"]+"")
elif args.outputformat == "pkgocsv":
ulist.write("ptc,"+account_info["username"]+","+account_info["password"]+"\n")
else:
ulist.write(account_info["username"]+":"+account_info["password"]+"\n")
ulist.close()
# Handle account creation failure exceptions
except PTCInvalidPasswordException as err:
error_msg = 'Invalid password: {}'.format(err)
except (PTCInvalidEmailException, PTCInvalidNameException) as err:
error_msg = 'Failed to create account! {}'.format(err)
except PTCException as err:
error_msg = 'Failed to create account! General error: {}'.format(err)
except Exception:
import traceback
error_msg = "Generic Exception: " + traceback.format_exc()
if error_msg:
if args.count == 1:
sys.exit(error_msg)
print(error_msg)
with open(args.textfile, "a") as ulist:
ulist.write("\n")
ulist.close()
| gpl-3.0 |
csaez/mauto | mauto/tests/main_tests.py | 1 | 1059 | import mauto
from nose import with_setup
def setup():
return mauto.new_macro("testsuite")
def setup_in_memory():
return mauto.new_macro("testsuite", save=False)
def teardown():
mauto.remove_macro("testsuite")
@with_setup(setup, teardown)
def test_list_macros():
return len(mauto.list_macros()) >= 1
@with_setup(setup, teardown)
def test_new_macro():
mauto.remove_macro("testsuite")
assert mauto.new_macro("testsuite")
@with_setup(setup, teardown)
def test_get_macro():
return mauto.get_macro("testsuite")
@with_setup(setup, teardown)
def test_remove_macro():
mauto.remove_macro("testsuite")
@with_setup(setup, teardown)
def test_save_macro():
assert mauto.save_macro("testsuite")
def test_show():
assert mauto.show is not None
@with_setup(setup, teardown)
def test_get_filepath():
fp = mauto.get_filepath("testsuite")
print fp
assert "testsuite.json" in fp
@with_setup(setup_in_memory, teardown)
def test_get_filepath2():
fp = mauto.get_filepath("testsuite")
assert fp is None
| mit |
40223151/2015cd_midterm | static/Brython3.1.0-20150301-090019/Lib/xml/dom/expatbuilder.py | 733 | 35733 | """Facility to use the Expat parser to load a minidom instance
from a string or file.
This avoids all the overhead of SAX and pulldom to gain performance.
"""
# Warning!
#
# This module is tightly bound to the implementation details of the
# minidom DOM and can't be used with other DOM implementations. This
# is due, in part, to a lack of appropriate methods in the DOM (there is
# no way to create Entity and Notation nodes via the DOM Level 2
# interface), and for performance. The later is the cause of some fairly
# cryptic code.
#
# Performance hacks:
#
# - .character_data_handler() has an extra case in which continuing
# data is appended to an existing Text node; this can be a
# speedup since pyexpat can break up character data into multiple
# callbacks even though we set the buffer_text attribute on the
# parser. This also gives us the advantage that we don't need a
# separate normalization pass.
#
# - Determining that a node exists is done using an identity comparison
# with None rather than a truth test; this avoids searching for and
# calling any methods on the node object if it exists. (A rather
# nice speedup is achieved this way as well!)
from xml.dom import xmlbuilder, minidom, Node
from xml.dom import EMPTY_NAMESPACE, EMPTY_PREFIX, XMLNS_NAMESPACE
from xml.parsers import expat
from xml.dom.minidom import _append_child, _set_attribute_node
from xml.dom.NodeFilter import NodeFilter
TEXT_NODE = Node.TEXT_NODE
CDATA_SECTION_NODE = Node.CDATA_SECTION_NODE
DOCUMENT_NODE = Node.DOCUMENT_NODE
FILTER_ACCEPT = xmlbuilder.DOMBuilderFilter.FILTER_ACCEPT
FILTER_REJECT = xmlbuilder.DOMBuilderFilter.FILTER_REJECT
FILTER_SKIP = xmlbuilder.DOMBuilderFilter.FILTER_SKIP
FILTER_INTERRUPT = xmlbuilder.DOMBuilderFilter.FILTER_INTERRUPT
theDOMImplementation = minidom.getDOMImplementation()
# Expat typename -> TypeInfo
_typeinfo_map = {
"CDATA": minidom.TypeInfo(None, "cdata"),
"ENUM": minidom.TypeInfo(None, "enumeration"),
"ENTITY": minidom.TypeInfo(None, "entity"),
"ENTITIES": minidom.TypeInfo(None, "entities"),
"ID": minidom.TypeInfo(None, "id"),
"IDREF": minidom.TypeInfo(None, "idref"),
"IDREFS": minidom.TypeInfo(None, "idrefs"),
"NMTOKEN": minidom.TypeInfo(None, "nmtoken"),
"NMTOKENS": minidom.TypeInfo(None, "nmtokens"),
}
class ElementInfo(object):
__slots__ = '_attr_info', '_model', 'tagName'
def __init__(self, tagName, model=None):
self.tagName = tagName
self._attr_info = []
self._model = model
def __getstate__(self):
return self._attr_info, self._model, self.tagName
def __setstate__(self, state):
self._attr_info, self._model, self.tagName = state
def getAttributeType(self, aname):
for info in self._attr_info:
if info[1] == aname:
t = info[-2]
if t[0] == "(":
return _typeinfo_map["ENUM"]
else:
return _typeinfo_map[info[-2]]
return minidom._no_type
def getAttributeTypeNS(self, namespaceURI, localName):
return minidom._no_type
def isElementContent(self):
if self._model:
type = self._model[0]
return type not in (expat.model.XML_CTYPE_ANY,
expat.model.XML_CTYPE_MIXED)
else:
return False
def isEmpty(self):
if self._model:
return self._model[0] == expat.model.XML_CTYPE_EMPTY
else:
return False
def isId(self, aname):
for info in self._attr_info:
if info[1] == aname:
return info[-2] == "ID"
return False
def isIdNS(self, euri, ename, auri, aname):
# not sure this is meaningful
return self.isId((auri, aname))
def _intern(builder, s):
return builder._intern_setdefault(s, s)
def _parse_ns_name(builder, name):
assert ' ' in name
parts = name.split(' ')
intern = builder._intern_setdefault
if len(parts) == 3:
uri, localname, prefix = parts
prefix = intern(prefix, prefix)
qname = "%s:%s" % (prefix, localname)
qname = intern(qname, qname)
localname = intern(localname, localname)
else:
uri, localname = parts
prefix = EMPTY_PREFIX
qname = localname = intern(localname, localname)
return intern(uri, uri), localname, prefix, qname
class ExpatBuilder:
"""Document builder that uses Expat to build a ParsedXML.DOM document
instance."""
def __init__(self, options=None):
if options is None:
options = xmlbuilder.Options()
self._options = options
if self._options.filter is not None:
self._filter = FilterVisibilityController(self._options.filter)
else:
self._filter = None
# This *really* doesn't do anything in this case, so
# override it with something fast & minimal.
self._finish_start_element = id
self._parser = None
self.reset()
def createParser(self):
"""Create a new parser object."""
return expat.ParserCreate()
def getParser(self):
"""Return the parser object, creating a new one if needed."""
if not self._parser:
self._parser = self.createParser()
self._intern_setdefault = self._parser.intern.setdefault
self._parser.buffer_text = True
self._parser.ordered_attributes = True
self._parser.specified_attributes = True
self.install(self._parser)
return self._parser
def reset(self):
"""Free all data structures used during DOM construction."""
self.document = theDOMImplementation.createDocument(
EMPTY_NAMESPACE, None, None)
self.curNode = self.document
self._elem_info = self.document._elem_info
self._cdata = False
def install(self, parser):
"""Install the callbacks needed to build the DOM into the parser."""
# This creates circular references!
parser.StartDoctypeDeclHandler = self.start_doctype_decl_handler
parser.StartElementHandler = self.first_element_handler
parser.EndElementHandler = self.end_element_handler
parser.ProcessingInstructionHandler = self.pi_handler
if self._options.entities:
parser.EntityDeclHandler = self.entity_decl_handler
parser.NotationDeclHandler = self.notation_decl_handler
if self._options.comments:
parser.CommentHandler = self.comment_handler
if self._options.cdata_sections:
parser.StartCdataSectionHandler = self.start_cdata_section_handler
parser.EndCdataSectionHandler = self.end_cdata_section_handler
parser.CharacterDataHandler = self.character_data_handler_cdata
else:
parser.CharacterDataHandler = self.character_data_handler
parser.ExternalEntityRefHandler = self.external_entity_ref_handler
parser.XmlDeclHandler = self.xml_decl_handler
parser.ElementDeclHandler = self.element_decl_handler
parser.AttlistDeclHandler = self.attlist_decl_handler
def parseFile(self, file):
"""Parse a document from a file object, returning the document
node."""
parser = self.getParser()
first_buffer = True
try:
while 1:
buffer = file.read(16*1024)
if not buffer:
break
parser.Parse(buffer, 0)
if first_buffer and self.document.documentElement:
self._setup_subset(buffer)
first_buffer = False
parser.Parse("", True)
except ParseEscape:
pass
doc = self.document
self.reset()
self._parser = None
return doc
def parseString(self, string):
"""Parse a document from a string, returning the document node."""
parser = self.getParser()
try:
parser.Parse(string, True)
self._setup_subset(string)
except ParseEscape:
pass
doc = self.document
self.reset()
self._parser = None
return doc
def _setup_subset(self, buffer):
"""Load the internal subset if there might be one."""
if self.document.doctype:
extractor = InternalSubsetExtractor()
extractor.parseString(buffer)
subset = extractor.getSubset()
self.document.doctype.internalSubset = subset
def start_doctype_decl_handler(self, doctypeName, systemId, publicId,
has_internal_subset):
doctype = self.document.implementation.createDocumentType(
doctypeName, publicId, systemId)
doctype.ownerDocument = self.document
_append_child(self.document, doctype)
self.document.doctype = doctype
if self._filter and self._filter.acceptNode(doctype) == FILTER_REJECT:
self.document.doctype = None
del self.document.childNodes[-1]
doctype = None
self._parser.EntityDeclHandler = None
self._parser.NotationDeclHandler = None
if has_internal_subset:
if doctype is not None:
doctype.entities._seq = []
doctype.notations._seq = []
self._parser.CommentHandler = None
self._parser.ProcessingInstructionHandler = None
self._parser.EndDoctypeDeclHandler = self.end_doctype_decl_handler
def end_doctype_decl_handler(self):
if self._options.comments:
self._parser.CommentHandler = self.comment_handler
self._parser.ProcessingInstructionHandler = self.pi_handler
if not (self._elem_info or self._filter):
self._finish_end_element = id
def pi_handler(self, target, data):
node = self.document.createProcessingInstruction(target, data)
_append_child(self.curNode, node)
if self._filter and self._filter.acceptNode(node) == FILTER_REJECT:
self.curNode.removeChild(node)
def character_data_handler_cdata(self, data):
childNodes = self.curNode.childNodes
if self._cdata:
if ( self._cdata_continue
and childNodes[-1].nodeType == CDATA_SECTION_NODE):
childNodes[-1].appendData(data)
return
node = self.document.createCDATASection(data)
self._cdata_continue = True
elif childNodes and childNodes[-1].nodeType == TEXT_NODE:
node = childNodes[-1]
value = node.data + data
node.data = value
return
else:
node = minidom.Text()
node.data = data
node.ownerDocument = self.document
_append_child(self.curNode, node)
def character_data_handler(self, data):
childNodes = self.curNode.childNodes
if childNodes and childNodes[-1].nodeType == TEXT_NODE:
node = childNodes[-1]
node.data = node.data + data
return
node = minidom.Text()
node.data = node.data + data
node.ownerDocument = self.document
_append_child(self.curNode, node)
def entity_decl_handler(self, entityName, is_parameter_entity, value,
base, systemId, publicId, notationName):
if is_parameter_entity:
# we don't care about parameter entities for the DOM
return
if not self._options.entities:
return
node = self.document._create_entity(entityName, publicId,
systemId, notationName)
if value is not None:
# internal entity
# node *should* be readonly, but we'll cheat
child = self.document.createTextNode(value)
node.childNodes.append(child)
self.document.doctype.entities._seq.append(node)
if self._filter and self._filter.acceptNode(node) == FILTER_REJECT:
del self.document.doctype.entities._seq[-1]
def notation_decl_handler(self, notationName, base, systemId, publicId):
node = self.document._create_notation(notationName, publicId, systemId)
self.document.doctype.notations._seq.append(node)
if self._filter and self._filter.acceptNode(node) == FILTER_ACCEPT:
del self.document.doctype.notations._seq[-1]
def comment_handler(self, data):
node = self.document.createComment(data)
_append_child(self.curNode, node)
if self._filter and self._filter.acceptNode(node) == FILTER_REJECT:
self.curNode.removeChild(node)
def start_cdata_section_handler(self):
self._cdata = True
self._cdata_continue = False
def end_cdata_section_handler(self):
self._cdata = False
self._cdata_continue = False
def external_entity_ref_handler(self, context, base, systemId, publicId):
return 1
def first_element_handler(self, name, attributes):
if self._filter is None and not self._elem_info:
self._finish_end_element = id
self.getParser().StartElementHandler = self.start_element_handler
self.start_element_handler(name, attributes)
def start_element_handler(self, name, attributes):
node = self.document.createElement(name)
_append_child(self.curNode, node)
self.curNode = node
if attributes:
for i in range(0, len(attributes), 2):
a = minidom.Attr(attributes[i], EMPTY_NAMESPACE,
None, EMPTY_PREFIX)
value = attributes[i+1]
a.value = value
a.ownerDocument = self.document
_set_attribute_node(node, a)
if node is not self.document.documentElement:
self._finish_start_element(node)
def _finish_start_element(self, node):
if self._filter:
# To be general, we'd have to call isSameNode(), but this
# is sufficient for minidom:
if node is self.document.documentElement:
return
filt = self._filter.startContainer(node)
if filt == FILTER_REJECT:
# ignore this node & all descendents
Rejecter(self)
elif filt == FILTER_SKIP:
# ignore this node, but make it's children become
# children of the parent node
Skipper(self)
else:
return
self.curNode = node.parentNode
node.parentNode.removeChild(node)
node.unlink()
# If this ever changes, Namespaces.end_element_handler() needs to
# be changed to match.
#
def end_element_handler(self, name):
curNode = self.curNode
self.curNode = curNode.parentNode
self._finish_end_element(curNode)
def _finish_end_element(self, curNode):
info = self._elem_info.get(curNode.tagName)
if info:
self._handle_white_text_nodes(curNode, info)
if self._filter:
if curNode is self.document.documentElement:
return
if self._filter.acceptNode(curNode) == FILTER_REJECT:
self.curNode.removeChild(curNode)
curNode.unlink()
def _handle_white_text_nodes(self, node, info):
if (self._options.whitespace_in_element_content
or not info.isElementContent()):
return
# We have element type information and should remove ignorable
# whitespace; identify for text nodes which contain only
# whitespace.
L = []
for child in node.childNodes:
if child.nodeType == TEXT_NODE and not child.data.strip():
L.append(child)
# Remove ignorable whitespace from the tree.
for child in L:
node.removeChild(child)
def element_decl_handler(self, name, model):
info = self._elem_info.get(name)
if info is None:
self._elem_info[name] = ElementInfo(name, model)
else:
assert info._model is None
info._model = model
def attlist_decl_handler(self, elem, name, type, default, required):
info = self._elem_info.get(elem)
if info is None:
info = ElementInfo(elem)
self._elem_info[elem] = info
info._attr_info.append(
[None, name, None, None, default, 0, type, required])
def xml_decl_handler(self, version, encoding, standalone):
self.document.version = version
self.document.encoding = encoding
# This is still a little ugly, thanks to the pyexpat API. ;-(
if standalone >= 0:
if standalone:
self.document.standalone = True
else:
self.document.standalone = False
# Don't include FILTER_INTERRUPT, since that's checked separately
# where allowed.
_ALLOWED_FILTER_RETURNS = (FILTER_ACCEPT, FILTER_REJECT, FILTER_SKIP)
class FilterVisibilityController(object):
"""Wrapper around a DOMBuilderFilter which implements the checks
to make the whatToShow filter attribute work."""
__slots__ = 'filter',
def __init__(self, filter):
self.filter = filter
def startContainer(self, node):
mask = self._nodetype_mask[node.nodeType]
if self.filter.whatToShow & mask:
val = self.filter.startContainer(node)
if val == FILTER_INTERRUPT:
raise ParseEscape
if val not in _ALLOWED_FILTER_RETURNS:
raise ValueError(
"startContainer() returned illegal value: " + repr(val))
return val
else:
return FILTER_ACCEPT
def acceptNode(self, node):
mask = self._nodetype_mask[node.nodeType]
if self.filter.whatToShow & mask:
val = self.filter.acceptNode(node)
if val == FILTER_INTERRUPT:
raise ParseEscape
if val == FILTER_SKIP:
# move all child nodes to the parent, and remove this node
parent = node.parentNode
for child in node.childNodes[:]:
parent.appendChild(child)
# node is handled by the caller
return FILTER_REJECT
if val not in _ALLOWED_FILTER_RETURNS:
raise ValueError(
"acceptNode() returned illegal value: " + repr(val))
return val
else:
return FILTER_ACCEPT
_nodetype_mask = {
Node.ELEMENT_NODE: NodeFilter.SHOW_ELEMENT,
Node.ATTRIBUTE_NODE: NodeFilter.SHOW_ATTRIBUTE,
Node.TEXT_NODE: NodeFilter.SHOW_TEXT,
Node.CDATA_SECTION_NODE: NodeFilter.SHOW_CDATA_SECTION,
Node.ENTITY_REFERENCE_NODE: NodeFilter.SHOW_ENTITY_REFERENCE,
Node.ENTITY_NODE: NodeFilter.SHOW_ENTITY,
Node.PROCESSING_INSTRUCTION_NODE: NodeFilter.SHOW_PROCESSING_INSTRUCTION,
Node.COMMENT_NODE: NodeFilter.SHOW_COMMENT,
Node.DOCUMENT_NODE: NodeFilter.SHOW_DOCUMENT,
Node.DOCUMENT_TYPE_NODE: NodeFilter.SHOW_DOCUMENT_TYPE,
Node.DOCUMENT_FRAGMENT_NODE: NodeFilter.SHOW_DOCUMENT_FRAGMENT,
Node.NOTATION_NODE: NodeFilter.SHOW_NOTATION,
}
class FilterCrutch(object):
__slots__ = '_builder', '_level', '_old_start', '_old_end'
def __init__(self, builder):
self._level = 0
self._builder = builder
parser = builder._parser
self._old_start = parser.StartElementHandler
self._old_end = parser.EndElementHandler
parser.StartElementHandler = self.start_element_handler
parser.EndElementHandler = self.end_element_handler
class Rejecter(FilterCrutch):
__slots__ = ()
def __init__(self, builder):
FilterCrutch.__init__(self, builder)
parser = builder._parser
for name in ("ProcessingInstructionHandler",
"CommentHandler",
"CharacterDataHandler",
"StartCdataSectionHandler",
"EndCdataSectionHandler",
"ExternalEntityRefHandler",
):
setattr(parser, name, None)
def start_element_handler(self, *args):
self._level = self._level + 1
def end_element_handler(self, *args):
if self._level == 0:
# restore the old handlers
parser = self._builder._parser
self._builder.install(parser)
parser.StartElementHandler = self._old_start
parser.EndElementHandler = self._old_end
else:
self._level = self._level - 1
class Skipper(FilterCrutch):
__slots__ = ()
def start_element_handler(self, *args):
node = self._builder.curNode
self._old_start(*args)
if self._builder.curNode is not node:
self._level = self._level + 1
def end_element_handler(self, *args):
if self._level == 0:
# We're popping back out of the node we're skipping, so we
# shouldn't need to do anything but reset the handlers.
self._builder._parser.StartElementHandler = self._old_start
self._builder._parser.EndElementHandler = self._old_end
self._builder = None
else:
self._level = self._level - 1
self._old_end(*args)
# framework document used by the fragment builder.
# Takes a string for the doctype, subset string, and namespace attrs string.
_FRAGMENT_BUILDER_INTERNAL_SYSTEM_ID = \
"http://xml.python.org/entities/fragment-builder/internal"
_FRAGMENT_BUILDER_TEMPLATE = (
'''\
<!DOCTYPE wrapper
%%s [
<!ENTITY fragment-builder-internal
SYSTEM "%s">
%%s
]>
<wrapper %%s
>&fragment-builder-internal;</wrapper>'''
% _FRAGMENT_BUILDER_INTERNAL_SYSTEM_ID)
class FragmentBuilder(ExpatBuilder):
"""Builder which constructs document fragments given XML source
text and a context node.
The context node is expected to provide information about the
namespace declarations which are in scope at the start of the
fragment.
"""
def __init__(self, context, options=None):
if context.nodeType == DOCUMENT_NODE:
self.originalDocument = context
self.context = context
else:
self.originalDocument = context.ownerDocument
self.context = context
ExpatBuilder.__init__(self, options)
def reset(self):
ExpatBuilder.reset(self)
self.fragment = None
def parseFile(self, file):
"""Parse a document fragment from a file object, returning the
fragment node."""
return self.parseString(file.read())
def parseString(self, string):
"""Parse a document fragment from a string, returning the
fragment node."""
self._source = string
parser = self.getParser()
doctype = self.originalDocument.doctype
ident = ""
if doctype:
subset = doctype.internalSubset or self._getDeclarations()
if doctype.publicId:
ident = ('PUBLIC "%s" "%s"'
% (doctype.publicId, doctype.systemId))
elif doctype.systemId:
ident = 'SYSTEM "%s"' % doctype.systemId
else:
subset = ""
nsattrs = self._getNSattrs() # get ns decls from node's ancestors
document = _FRAGMENT_BUILDER_TEMPLATE % (ident, subset, nsattrs)
try:
parser.Parse(document, 1)
except:
self.reset()
raise
fragment = self.fragment
self.reset()
## self._parser = None
return fragment
def _getDeclarations(self):
"""Re-create the internal subset from the DocumentType node.
This is only needed if we don't already have the
internalSubset as a string.
"""
doctype = self.context.ownerDocument.doctype
s = ""
if doctype:
for i in range(doctype.notations.length):
notation = doctype.notations.item(i)
if s:
s = s + "\n "
s = "%s<!NOTATION %s" % (s, notation.nodeName)
if notation.publicId:
s = '%s PUBLIC "%s"\n "%s">' \
% (s, notation.publicId, notation.systemId)
else:
s = '%s SYSTEM "%s">' % (s, notation.systemId)
for i in range(doctype.entities.length):
entity = doctype.entities.item(i)
if s:
s = s + "\n "
s = "%s<!ENTITY %s" % (s, entity.nodeName)
if entity.publicId:
s = '%s PUBLIC "%s"\n "%s"' \
% (s, entity.publicId, entity.systemId)
elif entity.systemId:
s = '%s SYSTEM "%s"' % (s, entity.systemId)
else:
s = '%s "%s"' % (s, entity.firstChild.data)
if entity.notationName:
s = "%s NOTATION %s" % (s, entity.notationName)
s = s + ">"
return s
def _getNSattrs(self):
return ""
def external_entity_ref_handler(self, context, base, systemId, publicId):
if systemId == _FRAGMENT_BUILDER_INTERNAL_SYSTEM_ID:
# this entref is the one that we made to put the subtree
# in; all of our given input is parsed in here.
old_document = self.document
old_cur_node = self.curNode
parser = self._parser.ExternalEntityParserCreate(context)
# put the real document back, parse into the fragment to return
self.document = self.originalDocument
self.fragment = self.document.createDocumentFragment()
self.curNode = self.fragment
try:
parser.Parse(self._source, 1)
finally:
self.curNode = old_cur_node
self.document = old_document
self._source = None
return -1
else:
return ExpatBuilder.external_entity_ref_handler(
self, context, base, systemId, publicId)
class Namespaces:
"""Mix-in class for builders; adds support for namespaces."""
def _initNamespaces(self):
# list of (prefix, uri) ns declarations. Namespace attrs are
# constructed from this and added to the element's attrs.
self._ns_ordered_prefixes = []
def createParser(self):
"""Create a new namespace-handling parser."""
parser = expat.ParserCreate(namespace_separator=" ")
parser.namespace_prefixes = True
return parser
def install(self, parser):
"""Insert the namespace-handlers onto the parser."""
ExpatBuilder.install(self, parser)
if self._options.namespace_declarations:
parser.StartNamespaceDeclHandler = (
self.start_namespace_decl_handler)
def start_namespace_decl_handler(self, prefix, uri):
"""Push this namespace declaration on our storage."""
self._ns_ordered_prefixes.append((prefix, uri))
def start_element_handler(self, name, attributes):
if ' ' in name:
uri, localname, prefix, qname = _parse_ns_name(self, name)
else:
uri = EMPTY_NAMESPACE
qname = name
localname = None
prefix = EMPTY_PREFIX
node = minidom.Element(qname, uri, prefix, localname)
node.ownerDocument = self.document
_append_child(self.curNode, node)
self.curNode = node
if self._ns_ordered_prefixes:
for prefix, uri in self._ns_ordered_prefixes:
if prefix:
a = minidom.Attr(_intern(self, 'xmlns:' + prefix),
XMLNS_NAMESPACE, prefix, "xmlns")
else:
a = minidom.Attr("xmlns", XMLNS_NAMESPACE,
"xmlns", EMPTY_PREFIX)
a.value = uri
a.ownerDocument = self.document
_set_attribute_node(node, a)
del self._ns_ordered_prefixes[:]
if attributes:
node._ensure_attributes()
_attrs = node._attrs
_attrsNS = node._attrsNS
for i in range(0, len(attributes), 2):
aname = attributes[i]
value = attributes[i+1]
if ' ' in aname:
uri, localname, prefix, qname = _parse_ns_name(self, aname)
a = minidom.Attr(qname, uri, localname, prefix)
_attrs[qname] = a
_attrsNS[(uri, localname)] = a
else:
a = minidom.Attr(aname, EMPTY_NAMESPACE,
aname, EMPTY_PREFIX)
_attrs[aname] = a
_attrsNS[(EMPTY_NAMESPACE, aname)] = a
a.ownerDocument = self.document
a.value = value
a.ownerElement = node
if __debug__:
# This only adds some asserts to the original
# end_element_handler(), so we only define this when -O is not
# used. If changing one, be sure to check the other to see if
# it needs to be changed as well.
#
def end_element_handler(self, name):
curNode = self.curNode
if ' ' in name:
uri, localname, prefix, qname = _parse_ns_name(self, name)
assert (curNode.namespaceURI == uri
and curNode.localName == localname
and curNode.prefix == prefix), \
"element stack messed up! (namespace)"
else:
assert curNode.nodeName == name, \
"element stack messed up - bad nodeName"
assert curNode.namespaceURI == EMPTY_NAMESPACE, \
"element stack messed up - bad namespaceURI"
self.curNode = curNode.parentNode
self._finish_end_element(curNode)
class ExpatBuilderNS(Namespaces, ExpatBuilder):
"""Document builder that supports namespaces."""
def reset(self):
ExpatBuilder.reset(self)
self._initNamespaces()
class FragmentBuilderNS(Namespaces, FragmentBuilder):
"""Fragment builder that supports namespaces."""
def reset(self):
FragmentBuilder.reset(self)
self._initNamespaces()
def _getNSattrs(self):
"""Return string of namespace attributes from this element and
ancestors."""
# XXX This needs to be re-written to walk the ancestors of the
# context to build up the namespace information from
# declarations, elements, and attributes found in context.
# Otherwise we have to store a bunch more data on the DOM
# (though that *might* be more reliable -- not clear).
attrs = ""
context = self.context
L = []
while context:
if hasattr(context, '_ns_prefix_uri'):
for prefix, uri in context._ns_prefix_uri.items():
# add every new NS decl from context to L and attrs string
if prefix in L:
continue
L.append(prefix)
if prefix:
declname = "xmlns:" + prefix
else:
declname = "xmlns"
if attrs:
attrs = "%s\n %s='%s'" % (attrs, declname, uri)
else:
attrs = " %s='%s'" % (declname, uri)
context = context.parentNode
return attrs
class ParseEscape(Exception):
"""Exception raised to short-circuit parsing in InternalSubsetExtractor."""
pass
class InternalSubsetExtractor(ExpatBuilder):
"""XML processor which can rip out the internal document type subset."""
subset = None
def getSubset(self):
"""Return the internal subset as a string."""
return self.subset
def parseFile(self, file):
try:
ExpatBuilder.parseFile(self, file)
except ParseEscape:
pass
def parseString(self, string):
try:
ExpatBuilder.parseString(self, string)
except ParseEscape:
pass
def install(self, parser):
parser.StartDoctypeDeclHandler = self.start_doctype_decl_handler
parser.StartElementHandler = self.start_element_handler
def start_doctype_decl_handler(self, name, publicId, systemId,
has_internal_subset):
if has_internal_subset:
parser = self.getParser()
self.subset = []
parser.DefaultHandler = self.subset.append
parser.EndDoctypeDeclHandler = self.end_doctype_decl_handler
else:
raise ParseEscape()
def end_doctype_decl_handler(self):
s = ''.join(self.subset).replace('\r\n', '\n').replace('\r', '\n')
self.subset = s
raise ParseEscape()
def start_element_handler(self, name, attrs):
raise ParseEscape()
def parse(file, namespaces=True):
"""Parse a document, returning the resulting Document node.
'file' may be either a file name or an open file object.
"""
if namespaces:
builder = ExpatBuilderNS()
else:
builder = ExpatBuilder()
if isinstance(file, str):
fp = open(file, 'rb')
try:
result = builder.parseFile(fp)
finally:
fp.close()
else:
result = builder.parseFile(file)
return result
def parseString(string, namespaces=True):
"""Parse a document from a string, returning the resulting
Document node.
"""
if namespaces:
builder = ExpatBuilderNS()
else:
builder = ExpatBuilder()
return builder.parseString(string)
def parseFragment(file, context, namespaces=True):
"""Parse a fragment of a document, given the context from which it
was originally extracted. context should be the parent of the
node(s) which are in the fragment.
'file' may be either a file name or an open file object.
"""
if namespaces:
builder = FragmentBuilderNS(context)
else:
builder = FragmentBuilder(context)
if isinstance(file, str):
fp = open(file, 'rb')
try:
result = builder.parseFile(fp)
finally:
fp.close()
else:
result = builder.parseFile(file)
return result
def parseFragmentString(string, context, namespaces=True):
"""Parse a fragment of a document from a string, given the context
from which it was originally extracted. context should be the
parent of the node(s) which are in the fragment.
"""
if namespaces:
builder = FragmentBuilderNS(context)
else:
builder = FragmentBuilder(context)
return builder.parseString(string)
def makeBuilder(options):
"""Create a builder based on an Options object."""
if options.namespaces:
return ExpatBuilderNS(options)
else:
return ExpatBuilder(options)
| gpl-3.0 |
eviljeff/olympia | src/olympia/stats/migrations/0001_initial.py | 6 | 3759 | # Generated by Django 2.2.5 on 2019-09-12 13:50
from django.db import migrations, models
import django.db.models.deletion
import django_extensions.db.fields.json
import olympia.amo.fields
import olympia.stats.models
class Migration(migrations.Migration):
initial = True
dependencies = [
('addons', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='UpdateCount',
fields=[
('id', olympia.amo.fields.PositiveAutoField(primary_key=True, serialize=False)),
('count', models.PositiveIntegerField()),
('date', models.DateField()),
('versions', django_extensions.db.fields.json.JSONField(db_column='version', default=dict, null=True)),
('statuses', django_extensions.db.fields.json.JSONField(db_column='status', default=dict, null=True)),
('applications', django_extensions.db.fields.json.JSONField(db_column='application', default=dict, null=True)),
('oses', django_extensions.db.fields.json.JSONField(db_column='os', default=dict, null=True)),
('locales', django_extensions.db.fields.json.JSONField(db_column='locale', default=dict, null=True)),
('addon', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='addons.Addon')),
],
options={
'db_table': 'update_counts',
},
bases=(olympia.amo.models.SearchMixin, models.Model),
),
migrations.CreateModel(
name='DownloadCount',
fields=[
('id', olympia.amo.fields.PositiveAutoField(primary_key=True, serialize=False)),
('count', models.PositiveIntegerField()),
('date', models.DateField()),
('sources', django_extensions.db.fields.json.JSONField(db_column='src', default=dict, null=True)),
('addon', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='addons.Addon')),
],
options={
'db_table': 'download_counts',
},
bases=(olympia.amo.models.SearchMixin, models.Model),
),
migrations.AddIndex(
model_name='updatecount',
index=models.Index(fields=['count'], name='count'),
),
migrations.AddIndex(
model_name='updatecount',
index=models.Index(fields=['addon'], name='addon_id'),
),
migrations.AddIndex(
model_name='updatecount',
index=models.Index(fields=['date'], name='date'),
),
migrations.AddIndex(
model_name='updatecount',
index=models.Index(fields=['addon', 'count'], name='addon_and_count'),
),
migrations.AddIndex(
model_name='updatecount',
index=models.Index(fields=['addon', 'date'], name='addon_date_idx'),
),
migrations.AddIndex(
model_name='downloadcount',
index=models.Index(fields=['count'], name='count'),
),
migrations.AddIndex(
model_name='downloadcount',
index=models.Index(fields=['addon'], name='addon_id'),
),
migrations.AddIndex(
model_name='downloadcount',
index=models.Index(fields=['addon', 'count'], name='addon_and_count'),
),
migrations.AddIndex(
model_name='downloadcount',
index=models.Index(fields=['addon', 'date'], name='addon_date_idx'),
),
migrations.AddConstraint(
model_name='downloadcount',
constraint=models.UniqueConstraint(fields=('date', 'addon'), name='date_2'),
),
]
| bsd-3-clause |
ScottBuchanan/eden | modules/s3db/deploy.py | 1 | 114755 | # -*- coding: utf-8 -*-
""" Sahana Eden Deployments Model
@copyright: 2011-2015 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ("S3DeploymentModel",
"S3DeploymentAlertModel",
"deploy_rheader",
"deploy_apply",
"deploy_alert_select_recipients",
"deploy_Inbox",
"deploy_response_select_mission",
)
try:
# try stdlib (Python 2.6)
import json
except ImportError:
try:
# try external module
import simplejson as json
except:
# fallback to pure-Python module
import gluon.contrib.simplejson as json
from gluon import *
from ..s3 import *
from s3layouts import S3AddResourceLink
# =============================================================================
class S3DeploymentModel(S3Model):
names = ("deploy_mission",
"deploy_mission_id",
"deploy_mission_document",
"deploy_application",
"deploy_assignment",
"deploy_assignment_appraisal",
"deploy_assignment_experience",
)
def model(self):
T = current.T
db = current.db
add_components = self.add_components
configure = self.configure
crud_strings = current.response.s3.crud_strings
define_table = self.define_table
super_link = self.super_link
messages = current.messages
NONE = messages["NONE"]
UNKNOWN_OPT = messages.UNKNOWN_OPT
human_resource_id = self.hrm_human_resource_id
# ---------------------------------------------------------------------
# Mission
#
mission_status_opts = {1 : T("Closed"),
2 : T("Open")
}
tablename = "deploy_mission"
define_table(tablename,
super_link("doc_id", "doc_entity"),
Field("name",
label = T("Name"),
represent = self.deploy_mission_name_represent,
requires = IS_NOT_EMPTY(),
),
# @ToDo: Link to location via link table
# link table could be event_event_location for IFRC (would still allow 1 multi-country event to have multiple missions)
self.gis_location_id(),
# @ToDo: Link to event_type via event_id link table instead of duplicating
self.event_type_id(),
self.org_organisation_id(),
Field("code", length = 24,
represent = lambda v: s3_unicode(v) if v else NONE,
),
Field("status", "integer",
default = 2,
label = T("Status"),
represent = lambda opt: \
mission_status_opts.get(opt,
UNKNOWN_OPT),
requires = IS_IN_SET(mission_status_opts),
),
# @todo: change into real fields written onaccept?
Field.Method("hrquantity",
deploy_mission_hrquantity),
Field.Method("response_count",
deploy_mission_response_count),
s3_comments(),
*s3_meta_fields())
# CRUD Form
crud_form = S3SQLCustomForm("name",
"event_type_id",
"location_id",
"code",
"status",
# Files
S3SQLInlineComponent(
"document",
name = "file",
label = T("Files"),
fields = ["file", "comments"],
filterby = dict(field = "file",
options = "",
invert = True,
)
),
# Links
S3SQLInlineComponent(
"document",
name = "url",
label = T("Links"),
fields = ["url", "comments"],
filterby = dict(field = "url",
options = None,
invert = True,
)
),
#S3SQLInlineComponent("document",
#name = "file",
#label = T("Attachments"),
#fields = ["file",
#"comments",
#],
#),
"comments",
"created_on",
)
# Profile
list_layout = deploy_MissionProfileLayout()
alert_widget = dict(label = "Alerts",
insert = lambda r, list_id, title, url: \
A(title,
_href=r.url(component="alert",
method="create"),
_class="action-btn profile-add-btn"),
label_create = "Create Alert",
type = "datalist",
list_fields = ["modified_on",
"mission_id",
"message_id",
"subject",
"body",
],
tablename = "deploy_alert",
context = "mission",
list_layout = list_layout,
pagesize = 10,
)
list_fields = ["created_on",
"mission_id",
"comments",
"human_resource_id$id",
"human_resource_id$person_id",
"human_resource_id$organisation_id",
"message_id$body",
"message_id$from_address",
"message_id$attachment.document_id$file",
]
response_widget = dict(label = "Responses",
insert = False,
type = "datalist",
tablename = "deploy_response",
# Can't be 'response' as this clobbers web2py global
function = "response_message",
list_fields = list_fields,
context = "mission",
list_layout = list_layout,
# The popup datalist isn't currently functional (needs card layout applying) and not ideal UX anyway
#pagesize = 10,
pagesize = None,
)
hr_label = current.deployment_settings.get_deploy_hr_label()
if hr_label == "Member":
label = "Members Deployed"
label_create = "Deploy New Member"
elif hr_label == "Staff":
label = "Staff Deployed"
label_create = "Deploy New Staff"
elif hr_label == "Volunteer":
label = "Volunteers Deployed"
label_create = "Deploy New Volunteer"
assignment_widget = dict(label = label,
insert = lambda r, list_id, title, url: \
A(title,
_href=r.url(component="assignment",
method="create"),
_class="action-btn profile-add-btn"),
label_create = label_create,
tablename = "deploy_assignment",
type = "datalist",
#type = "datatable",
#actions = dt_row_actions,
list_fields = [
"human_resource_id$id",
"human_resource_id$person_id",
"human_resource_id$organisation_id",
"start_date",
"end_date",
"job_title_id",
"job_title",
"appraisal.rating",
"mission_id",
],
context = "mission",
list_layout = list_layout,
pagesize = None, # all records
)
docs_widget = dict(label = "Documents & Links",
label_create = "Add New Document / Link",
type = "datalist",
tablename = "doc_document",
context = ("~.doc_id", "doc_id"),
icon = "attachment",
# Default renderer:
#list_layout = s3db.doc_document_list_layouts,
)
# Table configuration
profile = URL(c="deploy", f="mission", args=["[id]", "profile"])
configure(tablename,
create_next = profile,
crud_form = crud_form,
delete_next = URL(c="deploy", f="mission", args="summary"),
filter_widgets = [
S3TextFilter(["name",
"code",
"event_type_id$name",
],
label=T("Search")
),
S3LocationFilter("location_id",
label=messages.COUNTRY,
widget="multiselect",
levels=["L0"],
hidden=True
),
S3OptionsFilter("event_type_id",
widget="multiselect",
hidden=True
),
S3OptionsFilter("status",
options=mission_status_opts,
hidden=True
),
S3DateFilter("created_on",
hide_time=True,
hidden=True
),
],
list_fields = ["name",
(T("Date"), "created_on"),
"event_type_id",
(T("Country"), "location_id"),
"code",
(T("Responses"), "response_count"),
(T(label), "hrquantity"),
"status",
],
orderby = "deploy_mission.created_on desc",
profile_cols = 1,
profile_header = lambda r: \
deploy_rheader(r, profile=True),
profile_widgets = [alert_widget,
response_widget,
assignment_widget,
docs_widget,
],
summary = [{"name": "rheader",
"common": True,
"widgets": [{"method": self.add_button}]
},
{"name": "table",
"label": "Table",
"widgets": [{"method": "datatable"}]
},
{"name": "report",
"label": "Report",
"widgets": [{"method": "report",
"ajax_init": True}],
},
{"name": "map",
"label": "Map",
"widgets": [{"method": "map",
"ajax_init": True}],
},
],
super_entity = "doc_entity",
update_next = profile,
)
# Components
add_components(tablename,
deploy_assignment = "mission_id",
deploy_alert = "mission_id",
deploy_response = "mission_id",
)
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("Create Mission"),
title_display = T("Mission"),
title_list = T("Missions"),
title_update = T("Edit Mission Details"),
title_upload = T("Import Missions"),
label_list_button = T("List Missions"),
label_delete_button = T("Delete Mission"),
msg_record_created = T("Mission added"),
msg_record_modified = T("Mission Details updated"),
msg_record_deleted = T("Mission deleted"),
msg_list_empty = T("No Missions currently registered"))
# Reusable field
represent = S3Represent(lookup = tablename,
linkto = URL(f="mission",
args=["[id]", "profile"]),
show_link = True)
mission_id = S3ReusableField("mission_id", "reference %s" % tablename,
label = T("Mission"),
ondelete = "CASCADE",
represent = represent,
requires = IS_ONE_OF(db,
"deploy_mission.id",
represent),
)
# ---------------------------------------------------------------------
# Link table to link documents to missions, responses or assignments
#
tablename = "deploy_mission_document"
define_table(tablename,
mission_id(),
self.msg_message_id(),
self.doc_document_id(),
*s3_meta_fields())
# ---------------------------------------------------------------------
# Application of human resources
# - agreement that an HR is generally available for assignments
# - can come with certain restrictions
#
tablename = "deploy_application"
define_table(tablename,
human_resource_id(empty = False,
label = T(hr_label)),
Field("active", "boolean",
default = True,
label = T("Roster Status"),
represent = lambda opt: T("active") if opt else T("inactive"),
),
*s3_meta_fields())
configure(tablename,
delete_next = URL(c="deploy", f="human_resource", args="summary"),
)
# ---------------------------------------------------------------------
# Assignment of human resources
# - actual assignment of an HR to a mission
#
tablename = "deploy_assignment"
define_table(tablename,
mission_id(),
human_resource_id(empty = False,
label = T(hr_label)),
self.hrm_job_title_id(),
Field("job_title",
label = T("Position"),
),
# These get copied to hrm_experience
# rest of fields may not be filled-out, but are in attachments
s3_date("start_date", # Only field visible when deploying from Mission profile
label = T("Start Date"),
),
s3_date("end_date",
label = T("End Date"),
start_field = "deploy_assignment_start_date",
default_interval = 12,
),
*s3_meta_fields())
# Table configuration
configure(tablename,
context = {"mission": "mission_id",
},
onaccept = self.deploy_assignment_onaccept,
filter_widgets = [
S3TextFilter(["human_resource_id$person_id$first_name",
"human_resource_id$person_id$middle_name",
"human_resource_id$person_id$last_name",
"mission_id$code",
],
label=T("Search")
),
S3OptionsFilter("mission_id$event_type_id",
widget="multiselect",
hidden=True
),
S3LocationFilter("mission_id$location_id",
label=messages.COUNTRY,
widget="multiselect",
levels=["L0"],
hidden=True
),
S3OptionsFilter("job_title_id",
widget="multiselect",
hidden=True,
),
S3DateFilter("start_date",
hide_time=True,
hidden=True,
),
],
summary = [
{"name": "table",
"label": "Table",
"widgets": [{"method": "datatable"}]
},
{"name": "report",
"label": "Report",
"widgets": [{"method": "report",
"ajax_init": True}]
},
],
)
# Components
add_components(tablename,
hrm_appraisal = {"name": "appraisal",
"link": "deploy_assignment_appraisal",
"joinby": "assignment_id",
"key": "appraisal_id",
"autodelete": False,
},
)
assignment_id = S3ReusableField("assignment_id",
"reference %s" % tablename,
ondelete = "CASCADE")
# ---------------------------------------------------------------------
# Link Assignments to Appraisals
#
tablename = "deploy_assignment_appraisal"
define_table(tablename,
assignment_id(empty = False),
Field("appraisal_id", self.hrm_appraisal),
*s3_meta_fields())
configure(tablename,
ondelete_cascade = \
self.deploy_assignment_appraisal_ondelete_cascade,
)
# ---------------------------------------------------------------------
# Link Assignments to Experience
#
tablename = "deploy_assignment_experience"
define_table(tablename,
assignment_id(empty = False),
Field("experience_id", self.hrm_experience),
*s3_meta_fields())
configure(tablename,
ondelete_cascade = \
self.deploy_assignment_experience_ondelete_cascade,
)
# ---------------------------------------------------------------------
# Assignment of assets
#
# @todo: deploy_asset_assignment
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return dict(deploy_mission_id = mission_id,
)
# -------------------------------------------------------------------------
def defaults(self):
"""
Safe defaults for model-global names in case module is disabled
"""
dummy = S3ReusableField("dummy_id", "integer",
readable = False,
writable = False)
return dict(deploy_mission_id = lambda **attr: dummy("mission_id"),
)
# -------------------------------------------------------------------------
@staticmethod
def add_button(r, widget_id=None, visible=True, **attr):
# Check permission only here, i.e. when the summary is
# actually being rendered:
if current.auth.s3_has_permission("create", r.tablename):
return A(S3Method.crud_string(r.tablename,
"label_create"),
_href=r.url(method="create", id=0, vars={}),
_class="action-btn",
)
else:
return ""
# -------------------------------------------------------------------------
@staticmethod
def deploy_mission_name_represent(name):
table = current.s3db.deploy_mission
mission = current.db(table.name == name).select(table.id,
limitby=(0, 1)
).first()
if not mission:
return name
return A(name,
_href=URL(c="deploy", f="mission",
args=[mission.id, "profile"]))
# -------------------------------------------------------------------------
@staticmethod
def deploy_assignment_onaccept(form):
"""
Create/update linked hrm_experience record for assignment
@param form: the form
"""
db = current.db
s3db = current.s3db
form_vars = form.vars
assignment_id = form_vars.id
fields = ("human_resource_id",
"mission_id",
"job_title",
"job_title_id",
"start_date",
"end_date",
)
if any(key not in form_vars for key in fields):
# Need to reload the record
atable = db.deploy_assignment
query = (atable.id == assignment_id)
qfields = [atable[f] for f in fields]
row = db(query).select(limitby=(0, 1), *qfields).first()
if row:
data = dict((k, row[k]) for k in fields)
else:
# No such record
return
else:
# Can use form vars
data = dict((k, form_vars[k]) for k in fields)
hr = mission = None
# Lookup person details
human_resource_id = data.pop("human_resource_id")
if human_resource_id:
hrtable = s3db.hrm_human_resource
hr = db(hrtable.id == human_resource_id).select(hrtable.person_id,
hrtable.type,
limitby=(0, 1)
).first()
if hr:
data["person_id"] = hr.person_id
data["employment_type"] = hr.type
# Lookup mission details
mission_id = data.pop("mission_id")
if mission_id:
mtable = db.deploy_mission
mission = db(mtable.id == mission_id).select(mtable.location_id,
mtable.organisation_id,
limitby=(0, 1)
).first()
if mission:
data["location_id"] = mission.location_id
data["organisation_id"] = mission.organisation_id
if hr and mission:
etable = s3db.hrm_experience
# Lookup experience record for this assignment
ltable = s3db.deploy_assignment_experience
query = ltable.assignment_id == assignment_id
link = db(query).select(ltable.experience_id,
limitby=(0, 1)
).first()
if link:
# Update experience
db(etable.id == link.experience_id).update(**data)
else:
# Create experience record
experience_id = etable.insert(**data)
# Create link
ltable = db.deploy_assignment_experience
ltable.insert(assignment_id = assignment_id,
experience_id = experience_id,
)
return
# -------------------------------------------------------------------------
@staticmethod
def deploy_assignment_experience_ondelete_cascade(row, tablename=None):
"""
Remove linked hrm_experience record
@param row: the link to be deleted
@param tablename: the tablename (ignored)
"""
s3db = current.s3db
# Lookup experience ID
table = s3db.deploy_assignment_experience
link = current.db(table.id == row.id).select(table.id,
table.experience_id,
limitby=(0, 1)).first()
if not link:
return
else:
# Prevent infinite cascade
link.update_record(experience_id=None)
s3db.resource("hrm_experience", id=link.experience_id).delete()
# -------------------------------------------------------------------------
@staticmethod
def deploy_assignment_appraisal_ondelete_cascade(row, tablename=None):
"""
Remove linked hrm_appraisal record
@param row: the link to be deleted
@param tablename: the tablename (ignored)
"""
s3db = current.s3db
# Lookup experience ID
table = s3db.deploy_assignment_appraisal
link = current.db(table.id == row.id).select(table.id,
table.appraisal_id,
limitby=(0, 1)).first()
if not link:
return
else:
# Prevent infinite cascade
link.update_record(appraisal_id=None)
s3db.resource("hrm_appraisal", id=link.appraisal_id).delete()
# =============================================================================
class S3DeploymentAlertModel(S3Model):
names = ("deploy_alert",
"deploy_alert_recipient",
"deploy_response",
)
def model(self):
T = current.T
db = current.db
add_components = self.add_components
configure = self.configure
crud_strings = current.response.s3.crud_strings
define_table = self.define_table
NONE = current.messages["NONE"]
human_resource_id = self.hrm_human_resource_id
message_id = self.msg_message_id
mission_id = self.deploy_mission_id
hr_label = current.deployment_settings.get_deploy_hr_label()
contact_method_opts = {1: T("Email"),
2: T("SMS"),
#3: T("Twitter"),
#9: T("All"),
9: T("Both"),
}
# ---------------------------------------------------------------------
# Alert
# - also the PE representing its Recipients
#
tablename = "deploy_alert"
define_table(tablename,
self.super_link("pe_id", "pr_pentity"),
mission_id(
requires = IS_ONE_OF(db,
"deploy_mission.id",
S3Represent(lookup="deploy_mission"),
filterby="status",
filter_opts=(2,),
),
),
Field("contact_method", "integer",
default = 1,
label = T("Send By"),
represent = lambda opt: \
contact_method_opts.get(opt, NONE),
requires = IS_IN_SET(contact_method_opts),
),
Field("subject", length=78, # RFC 2822
label = T("Subject"),
# Not used by SMS
#requires = IS_NOT_EMPTY(),
),
Field("body", "text",
label = T("Message"),
represent = lambda v: v or NONE,
requires = IS_NOT_EMPTY(),
),
# Link to the Message once sent
message_id(readable = False),
*s3_meta_fields())
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("Create Alert"),
title_display = T("Alert Details"),
title_list = T("Alerts"),
title_update = T("Edit Alert Details"),
title_upload = T("Import Alerts"),
label_list_button = T("List Alerts"),
label_delete_button = T("Delete Alert"),
msg_record_created = T("Alert added"),
msg_record_modified = T("Alert Details updated"),
msg_record_deleted = T("Alert deleted"),
msg_list_empty = T("No Alerts currently registered"))
# CRUD Form
crud_form = S3SQLCustomForm("mission_id",
"contact_method",
"subject",
"body",
"modified_on",
)
# Table Configuration
configure(tablename,
super_entity = "pr_pentity",
context = {"mission": "mission_id"},
crud_form = crud_form,
list_fields = ["mission_id",
"contact_method",
"subject",
"body",
"alert_recipient.human_resource_id",
],
)
# Components
add_components(tablename,
deploy_alert_recipient = {"name": "recipient",
"joinby": "alert_id",
},
hrm_human_resource = {"name": "select",
"link": "deploy_alert_recipient",
"joinby": "alert_id",
"key": "human_resource_id",
"autodelete": False,
},
)
# Custom method to send alerts
self.set_method("deploy", "alert",
method = "send",
action = self.deploy_alert_send)
# Reusable field
represent = S3Represent(lookup=tablename)
alert_id = S3ReusableField("alert_id", "reference %s" % tablename,
label = T("Alert"),
ondelete = "CASCADE",
represent = represent,
requires = IS_ONE_OF(db, "deploy_alert.id",
represent),
)
# ---------------------------------------------------------------------
# Recipients of the Alert
#
tablename = "deploy_alert_recipient"
define_table(tablename,
alert_id(),
human_resource_id(empty = False,
label = T(hr_label)),
*s3_meta_fields())
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("Add Recipient"),
title_display = T("Recipient Details"),
title_list = T("Recipients"),
title_update = T("Edit Recipient Details"),
title_upload = T("Import Recipients"),
label_list_button = T("List Recipients"),
label_delete_button = T("Delete Recipient"),
msg_record_created = T("Recipient added"),
msg_record_modified = T("Recipient Details updated"),
msg_record_deleted = T("Recipient deleted"),
msg_list_empty = T("No Recipients currently defined"))
# ---------------------------------------------------------------------
# Responses to Alerts
#
tablename = "deploy_response"
define_table(tablename,
mission_id(),
human_resource_id(label = T(hr_label)),
message_id(label = T("Message"),
writable = False),
s3_comments(),
*s3_meta_fields())
crud_form = S3SQLCustomForm("mission_id",
"human_resource_id",
"message_id",
"comments",
# @todo:
#S3SQLInlineComponent("document"),
)
# Table Configuration
configure(tablename,
context = {"mission": "mission_id"},
crud_form = crud_form,
#editable = False,
insertable = False,
update_onaccept = self.deploy_response_update_onaccept,
)
# CRUD Strings
NO_MESSAGES = T("No Messages found")
crud_strings[tablename] = Storage(
title_display = T("Response Message"),
title_list = T("Response Messages"),
title_update = T("Edit Response Details"),
label_list_button = T("All Response Messages"),
label_delete_button = T("Delete Message"),
msg_record_deleted = T("Message deleted"),
msg_no_match = NO_MESSAGES,
msg_list_empty = NO_MESSAGES)
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return dict()
# -------------------------------------------------------------------------
@staticmethod
def deploy_alert_send(r, **attr):
"""
Custom Method to send an Alert
"""
alert_id = r.id
if r.representation != "html" or not alert_id or r.component:
raise HTTP(501, BADMETHOD)
# Must have permission to update the alert in order to send it
authorised = current.auth.s3_has_permission("update", "deploy_alert",
record_id = alert_id)
if not authorised:
r.unauthorised()
T = current.T
record = r.record
# Always redirect to the Mission Profile
mission_id = record.mission_id
next_url = URL(f="mission", args=[mission_id, "profile"])
# Check whether the alert has already been sent
# - alerts should be read-only after creation
if record.message_id:
current.session.error = T("This Alert has already been sent!")
redirect(next_url)
db = current.db
s3db = current.s3db
table = s3db.deploy_alert
contact_method = record.contact_method
# Check whether there are recipients
ltable = db.deploy_alert_recipient
query = (ltable.alert_id == alert_id) & \
(ltable.deleted == False)
if contact_method == 9:
# Save a subsequent query
recipients = db(query).select(ltable.human_resource_id)
else:
recipients = db(query).select(ltable.id,
limitby=(0, 1)).first()
if not recipients:
current.session.error = T("This Alert has no Recipients yet!")
redirect(next_url)
# Send Message
message = record.body
msg = current.msg
if contact_method == 2:
# Send SMS
message_id = msg.send_by_pe_id(record.pe_id,
contact_method = "SMS",
message=message,
)
elif contact_method == 9:
# Send both
# Create separate alert for this
id = table.insert(body = message,
contact_method = 2,
mission_id = mission_id,
created_by = record.created_by,
created_on = record.created_on,
)
new_alert = dict(id=id)
s3db.update_super(table, new_alert)
# Add Recipients
for row in recipients:
ltable.insert(alert_id = id,
human_resource_id = row.human_resource_id,
)
# Send SMS
message_id = msg.send_by_pe_id(new_alert["pe_id"],
contact_method = "SMS",
message=message,
)
# Update the Alert to show it's been Sent
db(table.id == id).update(message_id=message_id)
if contact_method in (1, 9):
# Send Email
# Embed the mission_id to parse replies
# = @ToDo: Use a Message Template to add Footer (very simple one for RDRT)
message = "%s\n:mission_id:%s:" % (message, mission_id)
# Lookup from_address
# @ToDo: Allow multiple channels to be defined &
# select the appropriate one for this mission
ctable = s3db.msg_email_channel
channel = db(ctable.deleted == False).select(ctable.username,
ctable.server,
limitby = (0, 1)
).first()
if not channel:
current.session.error = T("Need to configure an Email Address!")
redirect(URL(f="email_channel"))
from_address = "%s@%s" % (channel.username, channel.server)
message_id = msg.send_by_pe_id(record.pe_id,
subject=record.subject,
message=message,
from_address=from_address,
)
# Update the Alert to show it's been Sent
data = dict(message_id=message_id)
if contact_method == 2:
# Clear the Subject
data["subject"] = None
elif contact_method == 9:
# Also modify the contact_method to show that this is the email one
data["contact_method"] = 1
db(table.id == alert_id).update(**data)
# Return to the Mission Profile
current.session.confirmation = T("Alert Sent")
redirect(next_url)
# -------------------------------------------------------------------------
@staticmethod
def deploy_response_update_onaccept(form):
"""
Update the doc_id in all attachments (doc_document) to the
hrm_human_resource the response is linked to.
@param form: the form
"""
db = current.db
s3db = current.s3db
data = form.vars
if not data or "id" not in data:
return
# Get message ID and human resource ID
if "human_resource_id" not in data or "message_id" not in data:
rtable = s3db.deploy_response
response = db(rtable.id == data.id).select(rtable.human_resource_id,
rtable.message_id,
limitby=(0, 1)
).first()
if not response:
return
human_resource_id = response.human_resource_id
message_id = response.message_id
else:
human_resource_id = data.human_resource_id
message_id = data.message_id
# Update doc_id in all attachments (if any)
dtable = s3db.doc_document
ltable = s3db.deploy_mission_document
query = (ltable.message_id == response.message_id) & \
(dtable.id == ltable.document_id) & \
(ltable.deleted == False) & \
(dtable.deleted == False)
attachments = db(query).select(dtable.id)
if attachments:
# Get the doc_id from the hrm_human_resource
doc_id = None
if human_resource_id:
htable = s3db.hrm_human_resource
hr = db(htable.id == human_resource_id).select(htable.doc_id,
limitby=(0, 1)
).first()
if hr:
doc_id = hr.doc_id
db(dtable.id.belongs(attachments)).update(doc_id=doc_id)
return
# =============================================================================
def deploy_rheader(r, tabs=[], profile=False):
""" Deployment Resource Headers """
if r.representation != "html":
# RHeaders only used in interactive views
return None
record = r.record
if not record:
# List or Create form: rheader makes no sense here
return None
has_permission = current.auth.s3_has_permission
T = current.T
table = r.table
tablename = r.tablename
rheader = None
resourcename = r.name
if resourcename == "alert":
alert_id = r.id
db = current.db
ltable = db.deploy_alert_recipient
query = (ltable.alert_id == alert_id) & \
(ltable.deleted == False)
recipients = db(query).count()
unsent = not r.record.message_id
authorised = has_permission("update", tablename, record_id=alert_id)
if unsent and authorised:
send_button = BUTTON(T("Send Alert"), _class="alert-send-btn")
if recipients:
send_button.update(_onclick="window.location.href='%s'" %
URL(c="deploy",
f="alert",
args=[alert_id, "send"]))
else:
send_button.update(_disabled="disabled")
else:
send_button = ""
# Tabs
tabs = [(T("Message"), None),
(T("Recipients (%(number)s Total)") %
dict(number=recipients),
"recipient"),
]
if unsent and authorised:
# Insert tab to select recipients
tabs.insert(1, (T("Select Recipients"), "select"))
rheader_tabs = s3_rheader_tabs(r, tabs)
rheader = DIV(TABLE(TR(TH("%s: " % table.mission_id.label),
table.mission_id.represent(record.mission_id),
send_button,
),
TR(TH("%s: " % table.subject.label),
record.subject
),
), rheader_tabs, _class="alert-rheader")
elif resourcename == "mission":
if not profile and not r.component:
rheader = ""
else:
crud_string = S3Method.crud_string
record = r.record
title = crud_string(r.tablename, "title_display")
if record:
title = "%s: %s" % (title, record.name)
edit_btn = ""
if profile and \
current.auth.s3_has_permission("update",
"deploy_mission",
record_id=r.id):
crud_button = S3CRUD.crud_button
edit_btn = crud_button(T("Edit"),
_href=r.url(method="update"))
label = lambda f, table=table, record=record, **attr: \
TH("%s: " % table[f].label, **attr)
value = lambda f, table=table, record=record, **attr: \
TD(table[f].represent(record[f]), **attr)
rheader = DIV(H2(title),
TABLE(TR(label("event_type_id"),
value("event_type_id"),
label("location_id"),
value("location_id"),
label("code"),
value("code"),
),
TR(label("created_on"),
value("created_on"),
label("status"),
value("status"),
),
TR(label("comments"),
value("comments",
_class="mission-comments",
_colspan="6",
),
),
),
_class="mission-rheader"
)
if edit_btn:
rheader[-1][0].append(edit_btn)
else:
rheader = H2(title)
return rheader
# =============================================================================
def deploy_mission_hrquantity(row):
""" Number of human resources deployed """
if hasattr(row, "deploy_mission"):
row = row.deploy_mission
try:
mission_id = row.id
except AttributeError:
return 0
db = current.db
table = db.deploy_assignment
count = table.id.count()
row = db(table.mission_id == mission_id).select(count).first()
if row:
return row[count]
else:
return 0
# =============================================================================
def deploy_mission_response_count(row):
""" Number of responses to a mission """
if hasattr(row, "deploy_mission"):
row = row.deploy_mission
try:
mission_id = row.id
except AttributeError:
return 0
db = current.db
table = db.deploy_response
count = table.id.count()
row = db(table.mission_id == mission_id).select(count).first()
if row:
return row[count]
else:
return 0
# =============================================================================
def deploy_member_filter(status=False):
"""
Filter widgets for members (hrm_human_resource), used in
custom methods for member selection, e.g. deploy_apply
or deploy_alert_select_recipients
"""
T = current.T
widgets = [S3TextFilter(["person_id$first_name",
"person_id$middle_name",
"person_id$last_name",
],
label=T("Name"),
),
S3OptionsFilter("organisation_id",
filter=True,
hidden=True,
),
S3OptionsFilter("credential.job_title_id",
# @ToDo: Label setting
label = T("Sector"),
hidden=True,
),
]
settings = current.deployment_settings
if settings.get_org_regions():
if settings.get_org_regions_hierarchical():
widgets.insert(1, S3HierarchyFilter("organisation_id$region_id",
lookup="org_region",
hidden=True,
none=T("No Region"),
))
else:
widgets.insert(1, S3OptionsFilter("organisation_id$region_id",
widget="multiselect",
filter=True,
))
if status:
# Additional filter for roster status (default=active), allows
# to explicitly include inactive roster members when selecting
# alert recipients (only used there)
widgets.insert(1, S3OptionsFilter("application.active",
cols = 2,
default = True,
# Don't hide otherwise default
# doesn't apply:
#hidden = False,
label = T("Status"),
options = {"True": T("active"),
"False": T("inactive"),
},
))
return widgets
# =============================================================================
class deploy_Inbox(S3Method):
def apply_method(self, r, **attr):
"""
Custom method for email inbox, provides a datatable with bulk-delete
option
@param r: the S3Request
@param attr: the controller attributes
"""
T = current.T
s3db = current.s3db
response = current.response
s3 = response.s3
resource = self.resource
if r.http == "POST":
deleted = 0
post_vars = r.post_vars
if all([n in post_vars for n in ("delete", "selected", "mode")]):
selected = post_vars.selected
if selected:
selected = selected.split(",")
else:
selected = []
if selected:
# Handle exclusion filter
if post_vars.mode == "Exclusive":
if "filterURL" in post_vars:
filters = S3URLQuery.parse_url(post_vars.ajaxURL)
else:
filters = None
query = ~(FS("id").belongs(selected))
mresource = s3db.resource("msg_email",
filter=query,
vars=filters,
)
if response.s3.filter:
mresource.add_filter(response.s3.filter)
rows = mresource.select(["id"], as_rows=True)
selected = [str(row.id) for row in rows]
query = (FS("id").belongs(selected))
mresource = s3db.resource("msg_email", filter=query)
else:
mresource = resource
# Delete the messages
deleted = mresource.delete(format=r.representation)
if deleted:
response.confirmation = T("%(number)s messages deleted") % \
dict(number=deleted)
else:
response.warning = T("No messages could be deleted")
# List fields
list_fields = ["id",
"date",
"from_address",
"subject",
"body",
(T("Attachments"), "attachment.document_id"),
]
# Truncate message body
table = resource.table
table.body.represent = lambda body: DIV(XML(body),
_class="s3-truncate")
s3_trunk8()
# Data table filter & sorting
get_vars = r.get_vars
totalrows = resource.count()
if "pageLength" in get_vars:
display_length = get_vars["pageLength"]
if display_length == "None":
display_length = None
else:
display_length = int(display_length)
else:
display_length = 25
if display_length:
limit = 4 * display_length
else:
limit = None
filter, orderby, left = resource.datatable_filter(list_fields, get_vars)
resource.add_filter(filter)
# Extract the data
data = resource.select(list_fields,
start=0,
limit=limit,
orderby=orderby,
left=left,
count=True,
represent=True)
# Instantiate the data table
filteredrows = data["numrows"]
dt = S3DataTable(data["rfields"], data["rows"])
dt_id = "datatable"
# Bulk actions
# @todo: user confirmation
dt_bulk_actions = [(T("Delete"), "delete")]
if r.representation == "html":
# Action buttons
s3.actions = [{"label": str(T("Link to Mission")),
"_class": "action-btn link",
"url": URL(f="email_inbox", args=["[id]", "select"]),
},
]
S3CRUD.action_buttons(r,
editable=False,
read_url = r.url(method="read", id="[id]"),
delete_url = r.url(method="delete", id="[id]"),
)
# Export not needed
s3.no_formats = True
# Render data table
items = dt.html(totalrows,
filteredrows,
dt_id,
dt_ajax_url=URL(c = "deploy",
f = "email_inbox",
extension = "aadata",
vars = {},
),
dt_bulk_actions = dt_bulk_actions,
dt_pageLength = display_length,
dt_pagination = "true",
dt_searching = "true",
)
response.view = "list_filter.html"
return {"items": items,
"title": S3CRUD.crud_string(resource.tablename, "title_list"),
}
elif r.representation == "aadata":
# Ajax refresh
echo = int(get_vars.draw) if "draw" in get_vars else None
response = current.response
response.headers["Content-Type"] = "application/json"
return dt.json(totalrows,
filteredrows,
dt_id,
echo,
dt_bulk_actions = dt_bulk_actions)
else:
r.error(405, current.ERROR.BAD_FORMAT)
# =============================================================================
def deploy_apply(r, **attr):
"""
Custom method to select new RDRT members
@todo: make workflow re-usable for manual assignments
"""
# Requires permission to create deploy_application
authorised = current.auth.s3_has_permission("create", "deploy_application")
if not authorised:
r.unauthorised()
T = current.T
s3db = current.s3db
get_vars = r.get_vars
response = current.response
#settings = current.deployment_settings
if r.http == "POST":
added = 0
post_vars = r.post_vars
if all([n in post_vars for n in ("add", "selected", "mode")]):
selected = post_vars.selected
if selected:
selected = selected.split(",")
else:
selected = []
db = current.db
atable = s3db.deploy_application
if selected:
# Handle exclusion filter
if post_vars.mode == "Exclusive":
if "filterURL" in post_vars:
filters = S3URLQuery.parse_url(post_vars.ajaxURL)
else:
filters = None
query = ~(FS("id").belongs(selected))
hresource = s3db.resource("hrm_human_resource",
filter=query, vars=filters)
rows = hresource.select(["id"], as_rows=True)
selected = [str(row.id) for row in rows]
query = (atable.human_resource_id.belongs(selected)) & \
(atable.deleted != True)
rows = db(query).select(atable.id,
atable.active)
rows = dict((row.id, row) for row in rows)
for human_resource_id in selected:
try:
hr_id = int(human_resource_id.strip())
except ValueError:
continue
if hr_id in rows:
row = rows[hr_id]
if not row.active:
row.update_record(active=True)
added += 1
else:
atable.insert(human_resource_id=human_resource_id,
active=True)
added += 1
# @ToDo: Move 'RDRT' label to settings
current.session.confirmation = T("%(number)s RDRT members added") % \
dict(number=added)
if added > 0:
redirect(URL(f="human_resource", args=["summary"], vars={}))
else:
redirect(URL(f="application", vars={}))
elif r.http == "GET":
# Filter widgets
filter_widgets = deploy_member_filter()
# List fields
list_fields = ["id",
"person_id",
"job_title_id",
"organisation_id",
]
# Data table
resource = r.resource
totalrows = resource.count()
if "pageLength" in get_vars:
display_length = get_vars["pageLength"]
if display_length == "None":
display_length = None
else:
display_length = int(display_length)
else:
display_length = 25
if display_length:
limit = 4 * display_length
else:
limit = None
filter, orderby, left = resource.datatable_filter(list_fields, get_vars)
resource.add_filter(filter)
data = resource.select(list_fields,
start=0,
limit=limit,
orderby=orderby,
left=left,
count=True,
represent=True)
filteredrows = data["numrows"]
dt = S3DataTable(data["rfields"], data["rows"])
dt_id = "datatable"
# Bulk actions
# @todo: generalize label
dt_bulk_actions = [(T("Add as RDRT Members"), "add")]
if r.representation == "html":
# Page load
resource.configure(deletable = False)
#dt.defaultActionButtons(resource)
profile_url = URL(f = "human_resource",
args = ["[id]", "profile"])
S3CRUD.action_buttons(r,
deletable = False,
read_url = profile_url,
update_url = profile_url)
response.s3.no_formats = True
# Data table (items)
items = dt.html(totalrows,
filteredrows,
dt_id,
dt_pageLength=display_length,
dt_ajax_url=URL(c="deploy",
f="application",
extension="aadata",
vars={},
),
dt_searching="false",
dt_pagination="true",
dt_bulk_actions=dt_bulk_actions,
)
# Filter form
if filter_widgets:
# Where to retrieve filtered data from:
_vars = resource.crud._remove_filters(r.get_vars)
filter_submit_url = r.url(vars=_vars)
# Where to retrieve updated filter options from:
filter_ajax_url = URL(f="human_resource",
args=["filter.options"],
vars={})
get_config = resource.get_config
filter_clear = get_config("filter_clear", True)
filter_formstyle = get_config("filter_formstyle", None)
filter_submit = get_config("filter_submit", True)
filter_form = S3FilterForm(filter_widgets,
clear=filter_clear,
formstyle=filter_formstyle,
submit=filter_submit,
ajax=True,
url=filter_submit_url,
ajaxurl=filter_ajax_url,
_class="filter-form",
_id="datatable-filter-form",
)
fresource = current.s3db.resource(resource.tablename)
alias = resource.alias if r.component else None
ff = filter_form.html(fresource,
r.get_vars,
target="datatable",
alias=alias)
else:
ff = ""
output = dict(items = items,
# @todo: generalize
title = T("Add RDRT Members"),
list_filter_form = ff)
response.view = "list_filter.html"
return output
elif r.representation == "aadata":
# Ajax refresh
if "draw" in get_vars:
echo = int(get_vars.draw)
else:
echo = None
items = dt.json(totalrows,
filteredrows,
dt_id,
echo,
dt_bulk_actions=dt_bulk_actions)
response.headers["Content-Type"] = "application/json"
return items
else:
r.error(501, current.ERROR.BAD_FORMAT)
else:
r.error(405, current.ERROR.BAD_METHOD)
# =============================================================================
def deploy_alert_select_recipients(r, **attr):
"""
Custom method to select Recipients for an Alert
"""
alert_id = r.id
if r.representation not in ("html", "aadata") or \
not alert_id or \
not r.component:
r.error(405, current.ERROR.BAD_METHOD)
# Must have permission to update the alert in order to add recipients
authorised = current.auth.s3_has_permission("update", "deploy_alert",
record_id = alert_id)
if not authorised:
r.unauthorised()
T = current.T
s3db = current.s3db
response = current.response
member_query = FS("application.active") != None
if r.http == "POST":
added = 0
post_vars = r.post_vars
if all([n in post_vars for n in ("select", "selected", "mode")]):
selected = post_vars.selected
if selected:
selected = selected.split(",")
else:
selected = []
db = current.db
# Handle exclusion filter
if post_vars.mode == "Exclusive":
if "filterURL" in post_vars:
filters = S3URLQuery.parse_url(post_vars.filterURL)
else:
filters = None
query = member_query & \
(~(FS("id").belongs(selected)))
hresource = s3db.resource("hrm_human_resource",
filter=query, vars=filters)
rows = hresource.select(["id"], as_rows=True)
selected = [str(row.id) for row in rows]
rtable = s3db.deploy_alert_recipient
query = (rtable.alert_id == alert_id) & \
(rtable.human_resource_id.belongs(selected)) & \
(rtable.deleted != True)
rows = db(query).select(rtable.human_resource_id)
skip = set(row.human_resource_id for row in rows)
for human_resource_id in selected:
try:
hr_id = int(human_resource_id.strip())
except ValueError:
continue
if hr_id in skip:
continue
rtable.insert(alert_id=alert_id,
human_resource_id=human_resource_id,
)
added += 1
if not selected:
response.warning = T("No Recipients Selected!")
else:
response.confirmation = T("%(number)s Recipients added to Alert") % \
dict(number=added)
get_vars = r.get_vars or {}
representation = r.representation
settings = current.deployment_settings
resource = s3db.resource("hrm_human_resource",
filter=member_query, vars=r.get_vars)
# Filter widgets (including roster status)
filter_widgets = deploy_member_filter(status=True)
if filter_widgets and representation == "html":
# Apply filter defaults
resource.configure(filter_widgets = filter_widgets)
S3FilterForm.apply_filter_defaults(r, resource)
# List fields
list_fields = ["id",
"person_id",
"job_title_id",
"organisation_id",
]
# Data table
totalrows = resource.count()
if "pageLength" in get_vars:
display_length = get_vars["pageLength"]
if display_length == "None":
display_length = None
else:
display_length = int(display_length)
else:
display_length = 25
if display_length:
limit = 4 * display_length
else:
limit = None
filter, orderby, left = resource.datatable_filter(list_fields, get_vars)
resource.add_filter(filter)
data = resource.select(list_fields,
start=0,
limit=limit,
orderby=orderby,
left=left,
count=True,
represent=True)
filteredrows = data["numrows"]
dt = S3DataTable(data["rfields"], data["rows"])
dt_id = "datatable"
# Bulk actions
dt_bulk_actions = [(T("Select as Recipients"), "select")]
if representation == "html":
# Page load
resource.configure(deletable = False)
#dt.defaultActionButtons(resource)
response.s3.no_formats = True
# Data table (items)
items = dt.html(totalrows,
filteredrows,
dt_id,
dt_ajax_url=r.url(representation="aadata"),
dt_bulk_actions=dt_bulk_actions,
dt_pageLength=display_length,
dt_pagination="true",
dt_searching="false",
)
# Filter form
if filter_widgets:
# Where to retrieve filtered data from:
_vars = resource.crud._remove_filters(r.get_vars)
filter_submit_url = r.url(vars=_vars)
# Where to retrieve updated filter options from:
filter_ajax_url = URL(f="human_resource",
args=["filter.options"],
vars={})
get_config = resource.get_config
filter_clear = get_config("filter_clear", True)
filter_formstyle = get_config("filter_formstyle", None)
filter_submit = get_config("filter_submit", True)
filter_form = S3FilterForm(filter_widgets,
clear=filter_clear,
formstyle=filter_formstyle,
submit=filter_submit,
ajax=True,
url=filter_submit_url,
ajaxurl=filter_ajax_url,
_class="filter-form",
_id="datatable-filter-form",
)
fresource = current.s3db.resource(resource.tablename)
alias = resource.alias if r.component else None
ff = filter_form.html(fresource,
r.get_vars,
target="datatable",
alias=alias)
else:
ff = ""
output = dict(items=items,
title=T("Select Recipients"),
list_filter_form=ff)
# Maintain RHeader for consistency
if attr.get("rheader"):
rheader = attr["rheader"](r)
if rheader:
output["rheader"] = rheader
response.view = "list_filter.html"
return output
elif representation == "aadata":
# Ajax refresh
if "draw" in get_vars:
echo = int(get_vars.draw)
else:
echo = None
items = dt.json(totalrows,
filteredrows,
dt_id,
echo,
dt_bulk_actions=dt_bulk_actions)
response.headers["Content-Type"] = "application/json"
return items
else:
r.error(501, current.ERROR.BAD_FORMAT)
# =============================================================================
def deploy_response_select_mission(r, **attr):
"""
Custom method to Link a Response to a Mission &/or Human Resource
"""
message_id = r.record.message_id if r.record else None
if r.representation not in ("html", "aadata") or not message_id or not r.component:
r.error(405, current.ERROR.BAD_METHOD)
T = current.T
db = current.db
s3db = current.s3db
atable = s3db.msg_attachment
dtable = db.doc_document
query = (atable.message_id == message_id) & \
(atable.document_id == dtable.id)
atts = db(query).select(dtable.id,
dtable.file,
dtable.name,
)
response = current.response
mission_query = FS("mission.status") == 2
get_vars = r.get_vars or {}
mission_id = get_vars.get("mission_id", None)
if mission_id:
hr_id = get_vars.get("hr_id", None)
if not hr_id:
# @ToDo: deployment_setting for 'Member' label
current.session.warning = T("No Member Selected!")
# Can still link to the mission, member can be set
# manually in the mission profile
s3db.deploy_response.insert(message_id = message_id,
mission_id = mission_id,
)
else:
s3db.deploy_response.insert(message_id = message_id,
mission_id = mission_id,
human_resource_id = hr_id,
)
# Are there any attachments?
if atts:
ltable = s3db.deploy_mission_document
if hr_id:
# Set documents to the Member's doc_id
hrtable = s3db.hrm_human_resource
doc_id = db(hrtable.id == hr_id).select(hrtable.doc_id,
limitby=(0, 1)
).first().doc_id
for a in atts:
# Link to Mission
document_id = a.id
ltable.insert(mission_id = mission_id,
message_id = message_id,
document_id = document_id)
if hr_id:
db(dtable.id == document_id).update(doc_id = doc_id)
#mission = XML(A(T("Mission"),
# _href=URL(c="deploy", f="mission",
# args=[mission_id, "profile"])))
#current.session.confirmation = T("Response linked to %(mission)s") % \
# dict(mission=mission)
current.session.confirmation = T("Response linked to Mission")
redirect(URL(c="deploy", f="email_inbox"))
settings = current.deployment_settings
resource = s3db.resource("deploy_mission",
filter=mission_query, vars=r.get_vars)
# Filter widgets
filter_widgets = s3db.get_config("deploy_mission", "filter_widgets")
# List fields
list_fields = s3db.get_config("deploy_mission", "list_fields")
list_fields.insert(0, "id")
# Data table
totalrows = resource.count()
if "pageLength" in get_vars:
display_length = get_vars["pageLength"]
if display_length == "None":
display_length = None
else:
display_length = int(display_length)
else:
display_length = 25
if display_length:
limit = 4 * display_length
else:
limit = None
filter, orderby, left = resource.datatable_filter(list_fields, get_vars)
if not orderby:
# Most recent missions on top
orderby = "deploy_mission.created_on desc"
resource.add_filter(filter)
data = resource.select(list_fields,
start=0,
limit=limit,
orderby=orderby,
left=left,
count=True,
represent=True)
filteredrows = data["numrows"]
dt = S3DataTable(data["rfields"], data["rows"])
dt_id = "datatable"
if r.representation == "html":
# Page load
resource.configure(deletable = False)
record = r.record
action_vars = dict(mission_id="[id]")
# Can we identify the Member?
from ..s3.s3parser import S3Parsing
from_address = record.from_address
hr_id = S3Parsing().lookup_human_resource(from_address)
if hr_id:
action_vars["hr_id"] = hr_id
s3 = response.s3
s3.actions = [dict(label=str(T("Select Mission")),
_class="action-btn",
url=URL(f="email_inbox",
args=[r.id, "select"],
vars=action_vars,
)),
]
s3.no_formats = True
# Data table (items)
items = dt.html(totalrows,
filteredrows,
dt_id,
dt_ajax_url=r.url(representation="aadata"),
dt_pageLength=display_length,
dt_pagination="true",
dt_searching="false",
)
# Filter form
if filter_widgets:
# Where to retrieve filtered data from:
_vars = resource.crud._remove_filters(r.get_vars)
filter_submit_url = r.url(vars=_vars)
# Where to retrieve updated filter options from:
filter_ajax_url = URL(f="mission",
args=["filter.options"],
vars={})
get_config = resource.get_config
filter_clear = get_config("filter_clear", True)
filter_formstyle = get_config("filter_formstyle", None)
filter_submit = get_config("filter_submit", True)
filter_form = S3FilterForm(filter_widgets,
clear=filter_clear,
formstyle=filter_formstyle,
submit=filter_submit,
ajax=True,
url=filter_submit_url,
ajaxurl=filter_ajax_url,
_class="filter-form",
_id="datatable-filter-form",
)
fresource = s3db.resource(resource.tablename)
alias = resource.alias if r.component else None
ff = filter_form.html(fresource,
r.get_vars,
target="datatable",
alias=alias)
else:
ff = ""
output = dict(items=items,
title=T("Select Mission"),
list_filter_form=ff)
# Add RHeader
if hr_id:
from_address = A(from_address,
_href=URL(c="deploy", f="human_resource",
args=[hr_id, "profile"],
)
)
row = ""
else:
id = "deploy_response_human_resource_id__row"
# @ToDo: deployment_setting for 'Member' label
title = T("Select Member")
label = "%s:" % title
field = s3db.deploy_response.human_resource_id
# @ToDo: Get fancier & auto-click if there is just a single Mission
script = \
'''S3.update_links=function(){
var value=$('#deploy_response_human_resource_id').val()
if(value){
$('.action-btn.link').each(function(){
var url=this.href
var posn=url.indexOf('&hr_id=')
if(posn>0){url=url.split('&hr_id=')[0]+'&hr_id='+value
}else{url+='&hr_id='+value}
$(this).attr('href',url)})}}'''
s3.js_global.append(script)
post_process = '''S3.update_links()'''
widget = S3HumanResourceAutocompleteWidget(post_process=post_process)
widget = widget(field, None)
comment = DIV(_class="tooltip",
_title="%s|%s" % (title,
current.messages.AUTOCOMPLETE_HELP))
# @ToDo: Handle non-callable formstyles
row = s3.crud.formstyle(id, label, widget, comment)
if isinstance(row, tuple):
row = TAG[""](row[0],
row[1],
)
# Any attachments?
if atts:
attachments = TABLE(TR(TH("%s: " % T("Attachments"))))
for a in atts:
url = URL(c="default", f="download",
args=a.file)
attachments.append(TR(TD(A(ICON("attachment"),
a.name,
_href=url))))
else:
attachments = ""
# @ToDo: Add Reply button
rheader = DIV(row,
TABLE(TR(TH("%s: " % T("From")),
from_address,
),
TR(TH("%s: " % T("Date")),
record.created_on,
),
TR(TH("%s: " % T("Subject")),
record.subject,
),
TR(TH("%s: " % T("Message Text")),
),
),
DIV(record.body, _class="message-body s3-truncate"),
attachments,
)
output["rheader"] = rheader
s3_trunk8(lines=5)
response.view = "list_filter.html"
return output
elif r.representation == "aadata":
# Ajax refresh
if "draw" in get_vars:
echo = int(get_vars.draw)
else:
echo = None
items = dt.json(totalrows,
filteredrows,
dt_id,
echo,
dt_bulk_actions=dt_bulk_actions)
response.headers["Content-Type"] = "application/json"
return items
else:
r.error(501, current.ERROR.BAD_FORMAT)
# =============================================================================
class deploy_MissionProfileLayout(S3DataListLayout):
""" DataList layout for Mission Profile """
# -------------------------------------------------------------------------
def __init__(self, profile="deploy_mission"):
""" Constructor """
super(deploy_MissionProfileLayout, self).__init__(profile=profile)
self.dcount = {}
self.avgrat = {}
self.deployed = set()
self.appraisals = {}
self.use_regions = current.deployment_settings.get_org_regions()
# -------------------------------------------------------------------------
def prep(self, resource, records):
"""
Bulk lookups for cards
@param resource: the resource
@param records: the records as returned from S3Resource.select
"""
db = current.db
s3db = current.s3db
tablename = resource.tablename
if tablename == "deploy_alert":
# Recipients, aggregated by alert
record_ids = set(record["_row"]["deploy_alert.id"] for record in records)
htable = s3db.hrm_human_resource
number_of_recipients = htable.id.count()
rtable = s3db.deploy_alert_recipient
alert_id = rtable.alert_id
use_regions = self.use_regions
if use_regions:
otable = s3db.org_organisation
region_id = otable.region_id
fields = [alert_id, region_id, number_of_recipients]
left = [htable.on(htable.id==rtable.human_resource_id),
otable.on(otable.id==htable.organisation_id),
]
groupby = [alert_id, region_id]
else:
fields = [alert_id, number_of_recipients]
left = [htable.on(htable.id==rtable.human_resource_id)]
groupby = [alert_id]
query = (alert_id.belongs(record_ids)) & \
(rtable.deleted != True)
rows = current.db(query).select(left=left,
groupby=groupby,
*fields)
recipient_numbers = {}
for row in rows:
alert = row[alert_id]
if alert in recipient_numbers:
recipient_numbers[alert].append(row)
else:
recipient_numbers[alert] = [row]
self.recipient_numbers = recipient_numbers
# Representations of the region_ids
if use_regions:
# not needed with regions = False
represent = otable.region_id.represent
represent.none = current.T("No Region")
region_ids = [row[region_id] for row in rows]
self.region_names = represent.bulk(region_ids)
else:
self.region_names = {}
elif tablename == "deploy_response":
dcount = self.dcount
avgrat = self.avgrat
deployed = self.deployed
mission_id = None
for record in records:
raw = record["_row"]
human_resource_id = raw["hrm_human_resource.id"]
if human_resource_id:
dcount[human_resource_id] = 0
avgrat[human_resource_id] = None
if not mission_id:
# Should be the same for all rows
mission_id = raw["deploy_response.mission_id"]
hr_ids = dcount.keys()
if hr_ids:
# Number of previous deployments
table = s3db.deploy_assignment
human_resource_id = table.human_resource_id
deployment_count = table.id.count()
query = (human_resource_id.belongs(hr_ids)) & \
(table.deleted != True)
rows = db(query).select(human_resource_id,
deployment_count,
groupby = human_resource_id,
)
for row in rows:
dcount[row[human_resource_id]] = row[deployment_count]
# Members deployed for this mission
query = (human_resource_id.belongs(hr_ids)) & \
(table.mission_id == mission_id) & \
(table.deleted != True)
rows = db(query).select(human_resource_id)
for row in rows:
deployed.add(row[human_resource_id])
# Average appraisal rating
atable = s3db.hrm_appraisal
htable = s3db.hrm_human_resource
human_resource_id = htable.id
average_rating = atable.rating.avg()
query = (human_resource_id.belongs(hr_ids)) & \
(htable.person_id == atable.person_id) & \
(atable.deleted != True) & \
(atable.rating != None) & \
(atable.rating > 0)
rows = db(query).select(human_resource_id,
average_rating,
groupby = human_resource_id,
)
for row in rows:
avgrat[row[human_resource_id]] = row[average_rating]
elif tablename == "deploy_assignment":
record_ids = set(record["_row"]["deploy_assignment.id"]
for record in records)
atable = s3db.hrm_appraisal
ltable = s3db.deploy_assignment_appraisal
query = (ltable.assignment_id.belongs(record_ids)) & \
(ltable.deleted != True) & \
(atable.id == ltable.appraisal_id)
rows = current.db(query).select(ltable.assignment_id,
ltable.appraisal_id,
)
appraisals = {}
for row in rows:
appraisals[row.assignment_id] = row.appraisal_id
self.appraisals = appraisals
return
# -------------------------------------------------------------------------
def render_header(self, list_id, item_id, resource, rfields, record):
"""
Render the card header
@param list_id: the HTML ID of the list
@param item_id: the HTML ID of the item
@param resource: the S3Resource to render
@param rfields: the S3ResourceFields to render
@param record: the record as dict
"""
# No card header in this layout
return None
# -------------------------------------------------------------------------
def render_body(self, list_id, item_id, resource, rfields, record):
"""
Render the card body
@param list_id: the HTML ID of the list
@param item_id: the HTML ID of the item
@param resource: the S3Resource to render
@param rfields: the S3ResourceFields to render
@param record: the record as dict
"""
db = current.db
s3db = current.s3db
has_permission = current.auth.s3_has_permission
table = resource.table
tablename = resource.tablename
T = current.T
pkey = str(resource._id)
raw = record["_row"]
record_id = raw[pkey]
# Specific contents and workflow
contents = workflow = None
if tablename == "deploy_alert":
# Message subject as title
subject = record["deploy_alert.subject"]
total_recipients = 0
rows = self.recipient_numbers.get(record_id)
if rows:
# Labels
hr_label = current.deployment_settings.get_deploy_hr_label()
HR_LABEL = T(hr_label)
if hr_label == "Member":
HRS_LABEL = T("Members")
elif hr_label == "Staff":
HRS_LABEL = HR_LABEL
elif hr_label == "Volunteer":
HRS_LABEL = T("Volunteers")
htable = s3db.hrm_human_resource
rcount = htable.id.count()
if not self.use_regions:
total_recipients = rows[0][rcount]
label = HR_LABEL if total_recipients == 1 else HRS_LABEL
link = URL(f = "alert", args = [record_id, "recipient"])
recipients = SPAN(A("%s %s" % (total_recipients, label),
_href=link,
),
)
else:
region = s3db.org_organisation.region_id
region_names = self.region_names
UNKNOWN_OPT = current.messages.UNKNOWN_OPT
recipients = []
no_region = None
for row in rows:
# Region
region_id = row[region]
region_name = region_names.get(region_id, UNKNOWN_OPT)
region_filter = {
"recipient.human_resource_id$" \
"organisation_id$region_id__belongs": region_id
}
# Number of recipients
num = row[rcount]
total_recipients += num
label = HR_LABEL if num == 1 else HRS_LABEL
# Link
link = URL(f = "alert",
args = [record_id, "recipient"],
vars = region_filter)
# Recipient list item
recipient = SPAN("%s (" % region_name,
A("%s %s" % (num, label),
_href=link,
),
")",
)
if region_id:
recipients.extend([recipient, ", "])
else:
no_region = [recipient, ", "]
# Append "no region" at the end of the list
if no_region:
recipients.extend(no_region)
recipients = TAG[""](recipients[:-1])
else:
recipients = T("No Recipients Selected")
# Modified-date corresponds to sent-date
modified_on = record["deploy_alert.modified_on"]
# Has this alert been sent?
sent = True if raw["deploy_alert.message_id"] else False
if sent:
status = SPAN(ICON("sent"),
T("sent"), _class="alert-status")
else:
status = SPAN(ICON("unsent"),
T("not sent"), _class="red alert-status")
# Message
message = record["deploy_alert.body"]
# Contents
contents = DIV(
DIV(
DIV(subject,
_class="card-title"),
DIV(recipients,
_class="card-category"),
_class="media-heading"
),
DIV(modified_on, status, _class="card-subtitle"),
DIV(message, _class="message-body s3-truncate"),
_class="media-body",
)
# Workflow
if not sent and total_recipients and \
has_permission("update", table, record_id=record_id):
send = A(ICON("mail"),
SPAN(T("Send this Alert"),
_class="card-action"),
_onclick="window.location.href='%s'" %
URL(c="deploy", f="alert",
args=[record_id, "send"]),
_class="action-lnk",
)
workflow = [send]
elif tablename == "deploy_response":
human_resource_id = raw["hrm_human_resource.id"]
# Title linked to member profile
if human_resource_id:
person_id = record["hrm_human_resource.person_id"]
profile_url = URL(f="human_resource", args=[human_resource_id, "profile"])
profile_title = T("Open Member Profile (in a new tab)")
person = A(person_id,
_href=profile_url,
_target="_blank",
_title=profile_title)
else:
person_id = "%s (%s)" % \
(T("Unknown"), record["msg_message.from_address"])
person = person_id
# Organisation
organisation = record["hrm_human_resource.organisation_id"]
# Created_on corresponds to received-date
created_on = record["deploy_response.created_on"]
# Message Data
message = record["msg_message.body"]
# Dropdown of available documents
documents = raw["doc_document.file"]
if documents:
if not isinstance(documents, list):
documents = [documents]
bootstrap = current.response.s3.formstyle == "bootstrap"
if bootstrap:
docs = UL(_class="dropdown-menu",
_role="menu",
)
else:
docs = SPAN(_id="attachments",
_class="profile-data-value",
)
retrieve = db.doc_document.file.retrieve
for doc in documents:
try:
doc_name = retrieve(doc)[0]
except (IOError, TypeError):
doc_name = current.messages["NONE"]
doc_url = URL(c="default", f="download",
args=[doc])
if bootstrap:
doc_item = LI(A(ICON("file"),
" ",
doc_name,
_href=doc_url,
),
_role="menuitem",
)
else:
doc_item = A(ICON("file"),
" ",
doc_name,
_href=doc_url,
)
docs.append(doc_item)
docs.append(", ")
if bootstrap:
docs = DIV(A(ICON("attachment"),
SPAN(_class="caret"),
_class="btn dropdown-toggle",
_href="#",
**{"_data-toggle": "dropdown"}
),
doc_list,
_class="btn-group attachments dropdown pull-right",
)
else:
# Remove final comma
docs.components.pop()
docs = DIV(LABEL("%s:" % T("Attachments"),
_class = "profile-data-label",
_for="attachments",
),
docs,
_class = "profile-data",
)
else:
docs = ""
# Number of previous deployments and average rating
# (looked up in-bulk in self.prep)
if hasattr(self, "dcount"):
dcount = self.dcount.get(human_resource_id, 0)
if hasattr(self, "avgrat"):
avgrat = self.avgrat.get(human_resource_id)
dcount_id = "profile-data-dcount-%s" % record_id
avgrat_id = "profile-data-avgrat-%s" % record_id
dinfo = DIV(LABEL("%s:" % T("Previous Deployments"),
_for=dcount_id,
_class="profile-data-label"),
SPAN(dcount,
_id=dcount_id,
_class="profile-data-value"),
LABEL("%s:" % T("Average Rating"),
_for=avgrat_id,
_class="profile-data-label"),
SPAN(avgrat,
_id=avgrat_id,
_class="profile-data-value"),
_class="profile-data",
)
# Comments
comments_id = "profile-data-comments-%s" % record_id
comments = DIV(LABEL("%s:" % T("Comments"),
_for=comments_id,
_class="profile-data-label"),
SPAN(record["deploy_response.comments"],
_id=comments_id,
_class="profile-data-value s3-truncate"),
_class="profile-data",
)
# Contents
contents = DIV(
DIV(
DIV(person,
_class="card-title"),
DIV(organisation,
_class="card-category"),
_class="media-heading",
),
DIV(created_on, _class="card-subtitle"),
DIV(message, _class="message-body s3-truncate"),
docs,
dinfo,
comments,
_class="media-body",
)
# Workflow
if human_resource_id:
if hasattr(self, "deployed") and human_resource_id in self.deployed:
deploy = A(ICON("deployed"),
SPAN(T("Member Deployed"),
_class="card-action"),
_class="action-lnk",
)
elif has_permission("create", "deploy_assignment"):
mission_id = raw["deploy_response.mission_id"]
url = URL(f="mission",
args=[mission_id, "assignment", "create"],
vars={"member_id": human_resource_id})
deploy = A(ICON("deploy"),
SPAN(T("Deploy this Member"),
_class="card-action"),
_href=url,
_class="action-lnk"
)
else:
deploy = None
if deploy:
workflow = [deploy]
elif tablename == "deploy_assignment":
human_resource_id = raw["hrm_human_resource.id"]
# Title linked to member profile
profile_url = URL(f="human_resource", args=[human_resource_id, "profile"])
profile_title = T("Open Member Profile (in a new tab)")
person = A(record["hrm_human_resource.person_id"],
_href=profile_url,
_target="_blank",
_title=profile_title)
# Organisation
organisation = record["hrm_human_resource.organisation_id"]
fields = dict((rfield.colname, rfield) for rfield in rfields)
render = lambda colname: self.render_column(item_id,
fields[colname],
record)
# Contents
contents = DIV(
DIV(
DIV(person,
_class="card-title"),
DIV(organisation,
_class="card-category"),
_class="media-heading"),
render("deploy_assignment.start_date"),
render("deploy_assignment.end_date"),
render("deploy_assignment.job_title_id"),
render("deploy_assignment.job_title"),
render("hrm_appraisal.rating"),
_class="media-body",
)
# Workflow actions
appraisal = self.appraisals.get(record_id)
person_id = raw["hrm_human_resource.person_id"]
if appraisal and \
has_permission("update", "hrm_appraisal", record_id=appraisal.id):
# Appraisal already uploaded => edit
EDIT_APPRAISAL = T("Open Appraisal")
url = URL(c="deploy", f="person",
args=[person_id,
"appraisal",
appraisal.id,
"update.popup"
],
vars={"refresh": list_id,
"record": record_id
})
edit = A(ICON("attachment"),
SPAN(EDIT_APPRAISAL, _class="card-action"),
_href=url,
_class="s3_modal action-lnk",
_title=EDIT_APPRAISAL,
)
workflow = [edit]
elif has_permission("update", table, record_id=record_id):
# No appraisal uploaded yet => upload
# Currently we assume that anyone who can edit the
# assignment can upload the appraisal
_class = "action-lnk"
UPLOAD_APPRAISAL = T("Upload Appraisal")
mission_id = raw["deploy_assignment.mission_id"]
url = URL(c="deploy", f="person",
args=[person_id,
"appraisal",
"create.popup"
],
vars={"mission_id": mission_id,
"refresh": list_id,
"record": record_id,
})
upload = A(ICON("upload"),
SPAN(UPLOAD_APPRAISAL, _class="card-action"),
_href=url,
_class="s3_modal action-lnk",
_title=UPLOAD_APPRAISAL,
)
workflow = [upload]
body = DIV(_class="media")
# Body icon
icon = self.render_icon(list_id, resource)
if icon:
body.append(icon)
# Toolbox and workflow actions
toolbox = self.render_toolbox(list_id, resource, record)
if toolbox:
if workflow:
toolbox.insert(0, DIV(workflow, _class="card-actions"))
body.append(toolbox)
# Contents
if contents:
body.append(contents)
return body
# -------------------------------------------------------------------------
def render_icon(self, list_id, resource):
"""
Render the body icon
@param list_id: the list ID
@param resource: the S3Resource
"""
tablename = resource.tablename
if tablename == "deploy_alert":
icon = "alert.png"
elif tablename == "deploy_response":
icon = "email.png"
elif tablename == "deploy_assignment":
icon = "member.png"
else:
return None
return A(IMG(_src=URL(c="static", f="themes",
args=["IFRC", "img", icon]),
_class="media-object",
),
_class="pull-left",
)
# -------------------------------------------------------------------------
def render_toolbox(self, list_id, resource, record):
"""
Render the toolbox
@param list_id: the HTML ID of the list
@param resource: the S3Resource to render
@param record: the record as dict
"""
table = resource.table
tablename = resource.tablename
record_id = record[str(resource._id)]
open_url = update_url = None
if tablename == "deploy_alert":
open_url = URL(f="alert", args=[record_id])
elif tablename == "deploy_response":
update_url = URL(f="response_message",
args=[record_id, "update.popup"],
vars={"refresh": list_id,
"record": record_id,
"profile": self.profile,
},
)
elif tablename == "deploy_assignment":
update_url = URL(c="deploy", f="assignment",
args=[record_id, "update.popup"],
vars={"refresh": list_id,
"record": record_id,
"profile": self.profile,
},
)
has_permission = current.auth.s3_has_permission
crud_string = S3Method.crud_string
toolbox = DIV(_class="edit-bar fright")
if update_url and \
has_permission("update", table, record_id=record_id):
btn = A(ICON("edit"),
_href=update_url,
_class="s3_modal",
_title=crud_string(tablename, "title_update"))
toolbox.append(btn)
elif open_url:
btn = A(ICON("file-alt"),
_href=open_url,
_title=crud_string(tablename, "title_display"))
toolbox.append(btn)
if has_permission("delete", table, record_id=record_id):
btn = A(ICON("delete"),
_class="dl-item-delete",
_title=crud_string(tablename, "label_delete_button"))
toolbox.append(btn)
return toolbox
# -------------------------------------------------------------------------
def render_column(self, item_id, rfield, record):
"""
Render a data column.
@param item_id: the HTML element ID of the item
@param rfield: the S3ResourceField for the column
@param record: the record (from S3Resource.select)
"""
colname = rfield.colname
if colname not in record:
return None
value = record[colname]
value_id = "%s-%s" % (item_id, rfield.colname.replace(".", "_"))
label = LABEL("%s:" % rfield.label,
_for = value_id,
_class = "profile-data-label")
value = SPAN(value,
_id = value_id,
_class = "profile-data-value")
return TAG[""](label, value)
# END =========================================================================
| mit |
jayme-github/CouchPotatoServer | libs/chardet/mbcssm.py | 215 | 18214 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from constants import eStart, eError, eItsMe
# BIG5
BIG5_cls = ( \
1,1,1,1,1,1,1,1, # 00 - 07 #allow 0x00 as legal value
1,1,1,1,1,1,0,0, # 08 - 0f
1,1,1,1,1,1,1,1, # 10 - 17
1,1,1,0,1,1,1,1, # 18 - 1f
1,1,1,1,1,1,1,1, # 20 - 27
1,1,1,1,1,1,1,1, # 28 - 2f
1,1,1,1,1,1,1,1, # 30 - 37
1,1,1,1,1,1,1,1, # 38 - 3f
2,2,2,2,2,2,2,2, # 40 - 47
2,2,2,2,2,2,2,2, # 48 - 4f
2,2,2,2,2,2,2,2, # 50 - 57
2,2,2,2,2,2,2,2, # 58 - 5f
2,2,2,2,2,2,2,2, # 60 - 67
2,2,2,2,2,2,2,2, # 68 - 6f
2,2,2,2,2,2,2,2, # 70 - 77
2,2,2,2,2,2,2,1, # 78 - 7f
4,4,4,4,4,4,4,4, # 80 - 87
4,4,4,4,4,4,4,4, # 88 - 8f
4,4,4,4,4,4,4,4, # 90 - 97
4,4,4,4,4,4,4,4, # 98 - 9f
4,3,3,3,3,3,3,3, # a0 - a7
3,3,3,3,3,3,3,3, # a8 - af
3,3,3,3,3,3,3,3, # b0 - b7
3,3,3,3,3,3,3,3, # b8 - bf
3,3,3,3,3,3,3,3, # c0 - c7
3,3,3,3,3,3,3,3, # c8 - cf
3,3,3,3,3,3,3,3, # d0 - d7
3,3,3,3,3,3,3,3, # d8 - df
3,3,3,3,3,3,3,3, # e0 - e7
3,3,3,3,3,3,3,3, # e8 - ef
3,3,3,3,3,3,3,3, # f0 - f7
3,3,3,3,3,3,3,0) # f8 - ff
BIG5_st = ( \
eError,eStart,eStart, 3,eError,eError,eError,eError,#00-07
eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eError,#08-0f
eError,eStart,eStart,eStart,eStart,eStart,eStart,eStart)#10-17
Big5CharLenTable = (0, 1, 1, 2, 0)
Big5SMModel = {'classTable': BIG5_cls,
'classFactor': 5,
'stateTable': BIG5_st,
'charLenTable': Big5CharLenTable,
'name': 'Big5'}
# EUC-JP
EUCJP_cls = ( \
4,4,4,4,4,4,4,4, # 00 - 07
4,4,4,4,4,4,5,5, # 08 - 0f
4,4,4,4,4,4,4,4, # 10 - 17
4,4,4,5,4,4,4,4, # 18 - 1f
4,4,4,4,4,4,4,4, # 20 - 27
4,4,4,4,4,4,4,4, # 28 - 2f
4,4,4,4,4,4,4,4, # 30 - 37
4,4,4,4,4,4,4,4, # 38 - 3f
4,4,4,4,4,4,4,4, # 40 - 47
4,4,4,4,4,4,4,4, # 48 - 4f
4,4,4,4,4,4,4,4, # 50 - 57
4,4,4,4,4,4,4,4, # 58 - 5f
4,4,4,4,4,4,4,4, # 60 - 67
4,4,4,4,4,4,4,4, # 68 - 6f
4,4,4,4,4,4,4,4, # 70 - 77
4,4,4,4,4,4,4,4, # 78 - 7f
5,5,5,5,5,5,5,5, # 80 - 87
5,5,5,5,5,5,1,3, # 88 - 8f
5,5,5,5,5,5,5,5, # 90 - 97
5,5,5,5,5,5,5,5, # 98 - 9f
5,2,2,2,2,2,2,2, # a0 - a7
2,2,2,2,2,2,2,2, # a8 - af
2,2,2,2,2,2,2,2, # b0 - b7
2,2,2,2,2,2,2,2, # b8 - bf
2,2,2,2,2,2,2,2, # c0 - c7
2,2,2,2,2,2,2,2, # c8 - cf
2,2,2,2,2,2,2,2, # d0 - d7
2,2,2,2,2,2,2,2, # d8 - df
0,0,0,0,0,0,0,0, # e0 - e7
0,0,0,0,0,0,0,0, # e8 - ef
0,0,0,0,0,0,0,0, # f0 - f7
0,0,0,0,0,0,0,5) # f8 - ff
EUCJP_st = ( \
3, 4, 3, 5,eStart,eError,eError,eError,#00-07
eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,#08-0f
eItsMe,eItsMe,eStart,eError,eStart,eError,eError,eError,#10-17
eError,eError,eStart,eError,eError,eError, 3,eError,#18-1f
3,eError,eError,eError,eStart,eStart,eStart,eStart)#20-27
EUCJPCharLenTable = (2, 2, 2, 3, 1, 0)
EUCJPSMModel = {'classTable': EUCJP_cls,
'classFactor': 6,
'stateTable': EUCJP_st,
'charLenTable': EUCJPCharLenTable,
'name': 'EUC-JP'}
# EUC-KR
EUCKR_cls = ( \
1,1,1,1,1,1,1,1, # 00 - 07
1,1,1,1,1,1,0,0, # 08 - 0f
1,1,1,1,1,1,1,1, # 10 - 17
1,1,1,0,1,1,1,1, # 18 - 1f
1,1,1,1,1,1,1,1, # 20 - 27
1,1,1,1,1,1,1,1, # 28 - 2f
1,1,1,1,1,1,1,1, # 30 - 37
1,1,1,1,1,1,1,1, # 38 - 3f
1,1,1,1,1,1,1,1, # 40 - 47
1,1,1,1,1,1,1,1, # 48 - 4f
1,1,1,1,1,1,1,1, # 50 - 57
1,1,1,1,1,1,1,1, # 58 - 5f
1,1,1,1,1,1,1,1, # 60 - 67
1,1,1,1,1,1,1,1, # 68 - 6f
1,1,1,1,1,1,1,1, # 70 - 77
1,1,1,1,1,1,1,1, # 78 - 7f
0,0,0,0,0,0,0,0, # 80 - 87
0,0,0,0,0,0,0,0, # 88 - 8f
0,0,0,0,0,0,0,0, # 90 - 97
0,0,0,0,0,0,0,0, # 98 - 9f
0,2,2,2,2,2,2,2, # a0 - a7
2,2,2,2,2,3,3,3, # a8 - af
2,2,2,2,2,2,2,2, # b0 - b7
2,2,2,2,2,2,2,2, # b8 - bf
2,2,2,2,2,2,2,2, # c0 - c7
2,3,2,2,2,2,2,2, # c8 - cf
2,2,2,2,2,2,2,2, # d0 - d7
2,2,2,2,2,2,2,2, # d8 - df
2,2,2,2,2,2,2,2, # e0 - e7
2,2,2,2,2,2,2,2, # e8 - ef
2,2,2,2,2,2,2,2, # f0 - f7
2,2,2,2,2,2,2,0) # f8 - ff
EUCKR_st = (
eError,eStart, 3,eError,eError,eError,eError,eError,#00-07
eItsMe,eItsMe,eItsMe,eItsMe,eError,eError,eStart,eStart)#08-0f
EUCKRCharLenTable = (0, 1, 2, 0)
EUCKRSMModel = {'classTable': EUCKR_cls,
'classFactor': 4,
'stateTable': EUCKR_st,
'charLenTable': EUCKRCharLenTable,
'name': 'EUC-KR'}
# EUC-TW
EUCTW_cls = ( \
2,2,2,2,2,2,2,2, # 00 - 07
2,2,2,2,2,2,0,0, # 08 - 0f
2,2,2,2,2,2,2,2, # 10 - 17
2,2,2,0,2,2,2,2, # 18 - 1f
2,2,2,2,2,2,2,2, # 20 - 27
2,2,2,2,2,2,2,2, # 28 - 2f
2,2,2,2,2,2,2,2, # 30 - 37
2,2,2,2,2,2,2,2, # 38 - 3f
2,2,2,2,2,2,2,2, # 40 - 47
2,2,2,2,2,2,2,2, # 48 - 4f
2,2,2,2,2,2,2,2, # 50 - 57
2,2,2,2,2,2,2,2, # 58 - 5f
2,2,2,2,2,2,2,2, # 60 - 67
2,2,2,2,2,2,2,2, # 68 - 6f
2,2,2,2,2,2,2,2, # 70 - 77
2,2,2,2,2,2,2,2, # 78 - 7f
0,0,0,0,0,0,0,0, # 80 - 87
0,0,0,0,0,0,6,0, # 88 - 8f
0,0,0,0,0,0,0,0, # 90 - 97
0,0,0,0,0,0,0,0, # 98 - 9f
0,3,4,4,4,4,4,4, # a0 - a7
5,5,1,1,1,1,1,1, # a8 - af
1,1,1,1,1,1,1,1, # b0 - b7
1,1,1,1,1,1,1,1, # b8 - bf
1,1,3,1,3,3,3,3, # c0 - c7
3,3,3,3,3,3,3,3, # c8 - cf
3,3,3,3,3,3,3,3, # d0 - d7
3,3,3,3,3,3,3,3, # d8 - df
3,3,3,3,3,3,3,3, # e0 - e7
3,3,3,3,3,3,3,3, # e8 - ef
3,3,3,3,3,3,3,3, # f0 - f7
3,3,3,3,3,3,3,0) # f8 - ff
EUCTW_st = ( \
eError,eError,eStart, 3, 3, 3, 4,eError,#00-07
eError,eError,eError,eError,eError,eError,eItsMe,eItsMe,#08-0f
eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eError,eStart,eError,#10-17
eStart,eStart,eStart,eError,eError,eError,eError,eError,#18-1f
5,eError,eError,eError,eStart,eError,eStart,eStart,#20-27
eStart,eError,eStart,eStart,eStart,eStart,eStart,eStart)#28-2f
EUCTWCharLenTable = (0, 0, 1, 2, 2, 2, 3)
EUCTWSMModel = {'classTable': EUCTW_cls,
'classFactor': 7,
'stateTable': EUCTW_st,
'charLenTable': EUCTWCharLenTable,
'name': 'x-euc-tw'}
# GB2312
GB2312_cls = ( \
1,1,1,1,1,1,1,1, # 00 - 07
1,1,1,1,1,1,0,0, # 08 - 0f
1,1,1,1,1,1,1,1, # 10 - 17
1,1,1,0,1,1,1,1, # 18 - 1f
1,1,1,1,1,1,1,1, # 20 - 27
1,1,1,1,1,1,1,1, # 28 - 2f
3,3,3,3,3,3,3,3, # 30 - 37
3,3,1,1,1,1,1,1, # 38 - 3f
2,2,2,2,2,2,2,2, # 40 - 47
2,2,2,2,2,2,2,2, # 48 - 4f
2,2,2,2,2,2,2,2, # 50 - 57
2,2,2,2,2,2,2,2, # 58 - 5f
2,2,2,2,2,2,2,2, # 60 - 67
2,2,2,2,2,2,2,2, # 68 - 6f
2,2,2,2,2,2,2,2, # 70 - 77
2,2,2,2,2,2,2,4, # 78 - 7f
5,6,6,6,6,6,6,6, # 80 - 87
6,6,6,6,6,6,6,6, # 88 - 8f
6,6,6,6,6,6,6,6, # 90 - 97
6,6,6,6,6,6,6,6, # 98 - 9f
6,6,6,6,6,6,6,6, # a0 - a7
6,6,6,6,6,6,6,6, # a8 - af
6,6,6,6,6,6,6,6, # b0 - b7
6,6,6,6,6,6,6,6, # b8 - bf
6,6,6,6,6,6,6,6, # c0 - c7
6,6,6,6,6,6,6,6, # c8 - cf
6,6,6,6,6,6,6,6, # d0 - d7
6,6,6,6,6,6,6,6, # d8 - df
6,6,6,6,6,6,6,6, # e0 - e7
6,6,6,6,6,6,6,6, # e8 - ef
6,6,6,6,6,6,6,6, # f0 - f7
6,6,6,6,6,6,6,0) # f8 - ff
GB2312_st = ( \
eError,eStart,eStart,eStart,eStart,eStart, 3,eError,#00-07
eError,eError,eError,eError,eError,eError,eItsMe,eItsMe,#08-0f
eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eError,eError,eStart,#10-17
4,eError,eStart,eStart,eError,eError,eError,eError,#18-1f
eError,eError, 5,eError,eError,eError,eItsMe,eError,#20-27
eError,eError,eStart,eStart,eStart,eStart,eStart,eStart)#28-2f
# To be accurate, the length of class 6 can be either 2 or 4.
# But it is not necessary to discriminate between the two since
# it is used for frequency analysis only, and we are validing
# each code range there as well. So it is safe to set it to be
# 2 here.
GB2312CharLenTable = (0, 1, 1, 1, 1, 1, 2)
GB2312SMModel = {'classTable': GB2312_cls,
'classFactor': 7,
'stateTable': GB2312_st,
'charLenTable': GB2312CharLenTable,
'name': 'GB2312'}
# Shift_JIS
SJIS_cls = ( \
1,1,1,1,1,1,1,1, # 00 - 07
1,1,1,1,1,1,0,0, # 08 - 0f
1,1,1,1,1,1,1,1, # 10 - 17
1,1,1,0,1,1,1,1, # 18 - 1f
1,1,1,1,1,1,1,1, # 20 - 27
1,1,1,1,1,1,1,1, # 28 - 2f
1,1,1,1,1,1,1,1, # 30 - 37
1,1,1,1,1,1,1,1, # 38 - 3f
2,2,2,2,2,2,2,2, # 40 - 47
2,2,2,2,2,2,2,2, # 48 - 4f
2,2,2,2,2,2,2,2, # 50 - 57
2,2,2,2,2,2,2,2, # 58 - 5f
2,2,2,2,2,2,2,2, # 60 - 67
2,2,2,2,2,2,2,2, # 68 - 6f
2,2,2,2,2,2,2,2, # 70 - 77
2,2,2,2,2,2,2,1, # 78 - 7f
3,3,3,3,3,3,3,3, # 80 - 87
3,3,3,3,3,3,3,3, # 88 - 8f
3,3,3,3,3,3,3,3, # 90 - 97
3,3,3,3,3,3,3,3, # 98 - 9f
#0xa0 is illegal in sjis encoding, but some pages does
#contain such byte. We need to be more error forgiven.
2,2,2,2,2,2,2,2, # a0 - a7
2,2,2,2,2,2,2,2, # a8 - af
2,2,2,2,2,2,2,2, # b0 - b7
2,2,2,2,2,2,2,2, # b8 - bf
2,2,2,2,2,2,2,2, # c0 - c7
2,2,2,2,2,2,2,2, # c8 - cf
2,2,2,2,2,2,2,2, # d0 - d7
2,2,2,2,2,2,2,2, # d8 - df
3,3,3,3,3,3,3,3, # e0 - e7
3,3,3,3,3,4,4,4, # e8 - ef
4,4,4,4,4,4,4,4, # f0 - f7
4,4,4,4,4,0,0,0) # f8 - ff
SJIS_st = ( \
eError,eStart,eStart, 3,eError,eError,eError,eError,#00-07
eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,#08-0f
eItsMe,eItsMe,eError,eError,eStart,eStart,eStart,eStart)#10-17
SJISCharLenTable = (0, 1, 1, 2, 0, 0)
SJISSMModel = {'classTable': SJIS_cls,
'classFactor': 6,
'stateTable': SJIS_st,
'charLenTable': SJISCharLenTable,
'name': 'Shift_JIS'}
# UCS2-BE
UCS2BE_cls = ( \
0,0,0,0,0,0,0,0, # 00 - 07
0,0,1,0,0,2,0,0, # 08 - 0f
0,0,0,0,0,0,0,0, # 10 - 17
0,0,0,3,0,0,0,0, # 18 - 1f
0,0,0,0,0,0,0,0, # 20 - 27
0,3,3,3,3,3,0,0, # 28 - 2f
0,0,0,0,0,0,0,0, # 30 - 37
0,0,0,0,0,0,0,0, # 38 - 3f
0,0,0,0,0,0,0,0, # 40 - 47
0,0,0,0,0,0,0,0, # 48 - 4f
0,0,0,0,0,0,0,0, # 50 - 57
0,0,0,0,0,0,0,0, # 58 - 5f
0,0,0,0,0,0,0,0, # 60 - 67
0,0,0,0,0,0,0,0, # 68 - 6f
0,0,0,0,0,0,0,0, # 70 - 77
0,0,0,0,0,0,0,0, # 78 - 7f
0,0,0,0,0,0,0,0, # 80 - 87
0,0,0,0,0,0,0,0, # 88 - 8f
0,0,0,0,0,0,0,0, # 90 - 97
0,0,0,0,0,0,0,0, # 98 - 9f
0,0,0,0,0,0,0,0, # a0 - a7
0,0,0,0,0,0,0,0, # a8 - af
0,0,0,0,0,0,0,0, # b0 - b7
0,0,0,0,0,0,0,0, # b8 - bf
0,0,0,0,0,0,0,0, # c0 - c7
0,0,0,0,0,0,0,0, # c8 - cf
0,0,0,0,0,0,0,0, # d0 - d7
0,0,0,0,0,0,0,0, # d8 - df
0,0,0,0,0,0,0,0, # e0 - e7
0,0,0,0,0,0,0,0, # e8 - ef
0,0,0,0,0,0,0,0, # f0 - f7
0,0,0,0,0,0,4,5) # f8 - ff
UCS2BE_st = ( \
5, 7, 7,eError, 4, 3,eError,eError,#00-07
eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,#08-0f
eItsMe,eItsMe, 6, 6, 6, 6,eError,eError,#10-17
6, 6, 6, 6, 6,eItsMe, 6, 6,#18-1f
6, 6, 6, 6, 5, 7, 7,eError,#20-27
5, 8, 6, 6,eError, 6, 6, 6,#28-2f
6, 6, 6, 6,eError,eError,eStart,eStart)#30-37
UCS2BECharLenTable = (2, 2, 2, 0, 2, 2)
UCS2BESMModel = {'classTable': UCS2BE_cls,
'classFactor': 6,
'stateTable': UCS2BE_st,
'charLenTable': UCS2BECharLenTable,
'name': 'UTF-16BE'}
# UCS2-LE
UCS2LE_cls = ( \
0,0,0,0,0,0,0,0, # 00 - 07
0,0,1,0,0,2,0,0, # 08 - 0f
0,0,0,0,0,0,0,0, # 10 - 17
0,0,0,3,0,0,0,0, # 18 - 1f
0,0,0,0,0,0,0,0, # 20 - 27
0,3,3,3,3,3,0,0, # 28 - 2f
0,0,0,0,0,0,0,0, # 30 - 37
0,0,0,0,0,0,0,0, # 38 - 3f
0,0,0,0,0,0,0,0, # 40 - 47
0,0,0,0,0,0,0,0, # 48 - 4f
0,0,0,0,0,0,0,0, # 50 - 57
0,0,0,0,0,0,0,0, # 58 - 5f
0,0,0,0,0,0,0,0, # 60 - 67
0,0,0,0,0,0,0,0, # 68 - 6f
0,0,0,0,0,0,0,0, # 70 - 77
0,0,0,0,0,0,0,0, # 78 - 7f
0,0,0,0,0,0,0,0, # 80 - 87
0,0,0,0,0,0,0,0, # 88 - 8f
0,0,0,0,0,0,0,0, # 90 - 97
0,0,0,0,0,0,0,0, # 98 - 9f
0,0,0,0,0,0,0,0, # a0 - a7
0,0,0,0,0,0,0,0, # a8 - af
0,0,0,0,0,0,0,0, # b0 - b7
0,0,0,0,0,0,0,0, # b8 - bf
0,0,0,0,0,0,0,0, # c0 - c7
0,0,0,0,0,0,0,0, # c8 - cf
0,0,0,0,0,0,0,0, # d0 - d7
0,0,0,0,0,0,0,0, # d8 - df
0,0,0,0,0,0,0,0, # e0 - e7
0,0,0,0,0,0,0,0, # e8 - ef
0,0,0,0,0,0,0,0, # f0 - f7
0,0,0,0,0,0,4,5) # f8 - ff
UCS2LE_st = ( \
6, 6, 7, 6, 4, 3,eError,eError,#00-07
eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,#08-0f
eItsMe,eItsMe, 5, 5, 5,eError,eItsMe,eError,#10-17
5, 5, 5,eError, 5,eError, 6, 6,#18-1f
7, 6, 8, 8, 5, 5, 5,eError,#20-27
5, 5, 5,eError,eError,eError, 5, 5,#28-2f
5, 5, 5,eError, 5,eError,eStart,eStart)#30-37
UCS2LECharLenTable = (2, 2, 2, 2, 2, 2)
UCS2LESMModel = {'classTable': UCS2LE_cls,
'classFactor': 6,
'stateTable': UCS2LE_st,
'charLenTable': UCS2LECharLenTable,
'name': 'UTF-16LE'}
# UTF-8
UTF8_cls = ( \
1,1,1,1,1,1,1,1, # 00 - 07 #allow 0x00 as a legal value
1,1,1,1,1,1,0,0, # 08 - 0f
1,1,1,1,1,1,1,1, # 10 - 17
1,1,1,0,1,1,1,1, # 18 - 1f
1,1,1,1,1,1,1,1, # 20 - 27
1,1,1,1,1,1,1,1, # 28 - 2f
1,1,1,1,1,1,1,1, # 30 - 37
1,1,1,1,1,1,1,1, # 38 - 3f
1,1,1,1,1,1,1,1, # 40 - 47
1,1,1,1,1,1,1,1, # 48 - 4f
1,1,1,1,1,1,1,1, # 50 - 57
1,1,1,1,1,1,1,1, # 58 - 5f
1,1,1,1,1,1,1,1, # 60 - 67
1,1,1,1,1,1,1,1, # 68 - 6f
1,1,1,1,1,1,1,1, # 70 - 77
1,1,1,1,1,1,1,1, # 78 - 7f
2,2,2,2,3,3,3,3, # 80 - 87
4,4,4,4,4,4,4,4, # 88 - 8f
4,4,4,4,4,4,4,4, # 90 - 97
4,4,4,4,4,4,4,4, # 98 - 9f
5,5,5,5,5,5,5,5, # a0 - a7
5,5,5,5,5,5,5,5, # a8 - af
5,5,5,5,5,5,5,5, # b0 - b7
5,5,5,5,5,5,5,5, # b8 - bf
0,0,6,6,6,6,6,6, # c0 - c7
6,6,6,6,6,6,6,6, # c8 - cf
6,6,6,6,6,6,6,6, # d0 - d7
6,6,6,6,6,6,6,6, # d8 - df
7,8,8,8,8,8,8,8, # e0 - e7
8,8,8,8,8,9,8,8, # e8 - ef
10,11,11,11,11,11,11,11, # f0 - f7
12,13,13,13,14,15,0,0) # f8 - ff
UTF8_st = ( \
eError,eStart,eError,eError,eError,eError, 12, 10,#00-07
9, 11, 8, 7, 6, 5, 4, 3,#08-0f
eError,eError,eError,eError,eError,eError,eError,eError,#10-17
eError,eError,eError,eError,eError,eError,eError,eError,#18-1f
eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,#20-27
eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,#28-2f
eError,eError, 5, 5, 5, 5,eError,eError,#30-37
eError,eError,eError,eError,eError,eError,eError,eError,#38-3f
eError,eError,eError, 5, 5, 5,eError,eError,#40-47
eError,eError,eError,eError,eError,eError,eError,eError,#48-4f
eError,eError, 7, 7, 7, 7,eError,eError,#50-57
eError,eError,eError,eError,eError,eError,eError,eError,#58-5f
eError,eError,eError,eError, 7, 7,eError,eError,#60-67
eError,eError,eError,eError,eError,eError,eError,eError,#68-6f
eError,eError, 9, 9, 9, 9,eError,eError,#70-77
eError,eError,eError,eError,eError,eError,eError,eError,#78-7f
eError,eError,eError,eError,eError, 9,eError,eError,#80-87
eError,eError,eError,eError,eError,eError,eError,eError,#88-8f
eError,eError, 12, 12, 12, 12,eError,eError,#90-97
eError,eError,eError,eError,eError,eError,eError,eError,#98-9f
eError,eError,eError,eError,eError, 12,eError,eError,#a0-a7
eError,eError,eError,eError,eError,eError,eError,eError,#a8-af
eError,eError, 12, 12, 12,eError,eError,eError,#b0-b7
eError,eError,eError,eError,eError,eError,eError,eError,#b8-bf
eError,eError,eStart,eStart,eStart,eStart,eError,eError,#c0-c7
eError,eError,eError,eError,eError,eError,eError,eError)#c8-cf
UTF8CharLenTable = (0, 1, 0, 0, 0, 0, 2, 3, 3, 3, 4, 4, 5, 5, 6, 6)
UTF8SMModel = {'classTable': UTF8_cls,
'classFactor': 16,
'stateTable': UTF8_st,
'charLenTable': UTF8CharLenTable,
'name': 'UTF-8'}
| gpl-3.0 |
rjshaver/bitcoin | qa/rpc-tests/test_framework/blockstore.py | 95 | 4447 | # BlockStore: a helper class that keeps a map of blocks and implements
# helper functions for responding to getheaders and getdata,
# and for constructing a getheaders message
#
from mininode import *
import dbm
class BlockStore(object):
def __init__(self, datadir):
self.blockDB = dbm.open(datadir + "/blocks", 'c')
self.currentBlock = 0L
self.headers_map = dict()
def close(self):
self.blockDB.close()
def get(self, blockhash):
serialized_block = None
try:
serialized_block = self.blockDB[repr(blockhash)]
except KeyError:
return None
f = cStringIO.StringIO(serialized_block)
ret = CBlock()
ret.deserialize(f)
ret.calc_sha256()
return ret
def get_header(self, blockhash):
try:
return self.headers_map[blockhash]
except KeyError:
return None
# Note: this pulls full blocks out of the database just to retrieve
# the headers -- perhaps we could keep a separate data structure
# to avoid this overhead.
def headers_for(self, locator, hash_stop, current_tip=None):
if current_tip is None:
current_tip = self.currentBlock
current_block_header = self.get_header(current_tip)
if current_block_header is None:
return None
response = msg_headers()
headersList = [ current_block_header ]
maxheaders = 2000
while (headersList[0].sha256 not in locator.vHave):
prevBlockHash = headersList[0].hashPrevBlock
prevBlockHeader = self.get_header(prevBlockHash)
if prevBlockHeader is not None:
headersList.insert(0, prevBlockHeader)
else:
break
headersList = headersList[:maxheaders] # truncate if we have too many
hashList = [x.sha256 for x in headersList]
index = len(headersList)
if (hash_stop in hashList):
index = hashList.index(hash_stop)+1
response.headers = headersList[:index]
return response
def add_block(self, block):
block.calc_sha256()
try:
self.blockDB[repr(block.sha256)] = bytes(block.serialize())
except TypeError as e:
print "Unexpected error: ", sys.exc_info()[0], e.args
self.currentBlock = block.sha256
self.headers_map[block.sha256] = CBlockHeader(block)
def add_header(self, header):
self.headers_map[header.sha256] = header
def get_blocks(self, inv):
responses = []
for i in inv:
if (i.type == 2): # MSG_BLOCK
block = self.get(i.hash)
if block is not None:
responses.append(msg_block(block))
return responses
def get_locator(self, current_tip=None):
if current_tip is None:
current_tip = self.currentBlock
r = []
counter = 0
step = 1
lastBlock = self.get(current_tip)
while lastBlock is not None:
r.append(lastBlock.hashPrevBlock)
for i in range(step):
lastBlock = self.get(lastBlock.hashPrevBlock)
if lastBlock is None:
break
counter += 1
if counter > 10:
step *= 2
locator = CBlockLocator()
locator.vHave = r
return locator
class TxStore(object):
def __init__(self, datadir):
self.txDB = dbm.open(datadir + "/transactions", 'c')
def close(self):
self.txDB.close()
def get(self, txhash):
serialized_tx = None
try:
serialized_tx = self.txDB[repr(txhash)]
except KeyError:
return None
f = cStringIO.StringIO(serialized_tx)
ret = CTransaction()
ret.deserialize(f)
ret.calc_sha256()
return ret
def add_transaction(self, tx):
tx.calc_sha256()
try:
self.txDB[repr(tx.sha256)] = bytes(tx.serialize())
except TypeError as e:
print "Unexpected error: ", sys.exc_info()[0], e.args
def get_transactions(self, inv):
responses = []
for i in inv:
if (i.type == 1): # MSG_TX
tx = self.get(i.hash)
if tx is not None:
responses.append(msg_tx(tx))
return responses
| mit |
minesense/VisTrails | contrib/titan/vtkviewcell.py | 6 | 38658 | ############################################################################
##
## Copyright (C) 2006-2010 University of Utah. All rights reserved.
##
## This file is part of VisTrails.
##
## This file may be used under the terms of the GNU General Public
## License version 2.0 as published by the Free Software Foundation
## and appearing in the file LICENSE.GPL included in the packaging of
## this file. Please review the following to ensure GNU General Public
## Licensing requirements will be met:
## http://www.opensource.org/licenses/gpl-license.php
##
## If you are unsure which license is appropriate for your use (for
## instance, you are interested in developing a commercial derivative
## of VisTrails), please contact us at contact@vistrails.org.
##
## This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
## WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
##
############################################################################
################################################################################
# File QVTKViewWidget.py
# File for displaying a vtkRenderWindow in a Qt's QWidget ported from
# VTK/GUISupport/QVTK. Combine altogether to a single class: QVTKViewWidget
################################################################################
import vtk
from PyQt4 import QtCore, QtGui
import sip
from core import system
from core.modules.module_registry import get_module_registry
from packages.spreadsheet.basic_widgets import SpreadsheetCell, CellLocation
from packages.spreadsheet.spreadsheet_cell import QCellWidget, QCellToolBar
import vtkcell_rc
import gc
from gui.qt import qt_super
import core.db.action
from core.vistrail.action import Action
from core.vistrail.port import Port
from core.vistrail import module
from core.vistrail import connection
from core.vistrail.module_function import ModuleFunction
from core.vistrail.module_param import ModuleParam
from core.vistrail.location import Location
from core.modules.vistrails_module import ModuleError
import copy
################################################################################
class VTKViewCell(SpreadsheetCell):
"""
VTKViewCell is a VisTrails Module that can display vtkRenderWindow inside a cell
"""
def __init__(self):
SpreadsheetCell.__init__(self)
self.cellWidget = None
def compute(self):
""" compute() -> None
Dispatch the vtkRenderer to the actual rendering widget
"""
renderView = self.force_get_input('SetRenderView')
if renderView==None:
raise ModuleError(self, 'A vtkRenderView input is required.')
self.cellWidget = self.displayAndWait(QVTKViewWidget, (renderView,))
AsciiToKeySymTable = ( None, None, None, None, None, None, None,
None, None,
"Tab", None, None, None, None, None, None,
None, None, None, None, None, None,
None, None, None, None, None, None,
None, None, None, None,
"space", "exclam", "quotedbl", "numbersign",
"dollar", "percent", "ampersand", "quoteright",
"parenleft", "parenright", "asterisk", "plus",
"comma", "minus", "period", "slash",
"0", "1", "2", "3", "4", "5", "6", "7",
"8", "9", "colon", "semicolon", "less", "equal",
"greater", "question",
"at", "A", "B", "C", "D", "E", "F", "G",
"H", "I", "J", "K", "L", "M", "N", "O",
"P", "Q", "R", "S", "T", "U", "V", "W",
"X", "Y", "Z", "bracketleft",
"backslash", "bracketright", "asciicircum",
"underscore",
"quoteleft", "a", "b", "c", "d", "e", "f", "g",
"h", "i", "j", "k", "l", "m", "n", "o",
"p", "q", "r", "s", "t", "u", "v", "w",
"x", "y", "z", "braceleft", "bar", "braceright",
"asciitilde", "Delete",
None, None, None, None, None, None, None, None,
None, None, None, None, None, None, None, None,
None, None, None, None, None, None, None, None,
None, None, None, None, None, None, None, None,
None, None, None, None, None, None, None, None,
None, None, None, None, None, None, None, None,
None, None, None, None, None, None, None, None,
None, None, None, None, None, None, None, None,
None, None, None, None, None, None, None, None,
None, None, None, None, None, None, None, None,
None, None, None, None, None, None, None, None,
None, None, None, None, None, None, None, None,
None, None, None, None, None, None, None, None,
None, None, None, None, None, None, None, None,
None, None, None, None, None, None, None, None,
None, None, None, None, None, None, None, None)
class QVTKViewWidget(QCellWidget):
"""
QVTKViewWidget is the actual rendering widget that can display
vtkRenderer inside a Qt QWidget
"""
def __init__(self, parent=None, f=QtCore.Qt.WindowFlags()):
""" QVTKViewWidget(parent: QWidget, f: WindowFlags) -> QVTKViewWidget
Initialize QVTKViewWidget with a toolbar with its own device
context
"""
QCellWidget.__init__(self, parent, f | QtCore.Qt.MSWindowsOwnDC)
self.interacting = None
self.mRenWin = None
self.setAttribute(QtCore.Qt.WA_OpaquePaintEvent)
self.setAttribute(QtCore.Qt.WA_PaintOnScreen)
self.setMouseTracking(True)
self.setFocusPolicy(QtCore.Qt.StrongFocus)
self.setSizePolicy(QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding,
QtGui.QSizePolicy.Expanding))
self.toolBarType = QVTKViewWidgetToolBar
self.setAnimationEnabled(True)
def removeObserversFromInteractorStyle(self):
""" removeObserversFromInteractorStyle() -> None
Remove all python binding from interactor style observers for
safely freeing the cell
"""
iren = self.mRenWin.GetInteractor()
if iren:
style = iren.GetInteractorStyle()
style.RemoveObservers("InteractionEvent")
style.RemoveObservers("EndPickEvent")
style.RemoveObservers("CharEvent")
style.RemoveObservers("MouseWheelForwardEvent")
style.RemoveObservers("MouseWheelBackwardEvent")
def addObserversToInteractorStyle(self):
""" addObserversToInteractorStyle() -> None
Assign observer to the current interactor style
"""
iren = self.mRenWin.GetInteractor()
if iren:
style = iren.GetInteractorStyle()
style.AddObserver("InteractionEvent", self.interactionEvent)
style.AddObserver("EndPickEvent", self.interactionEvent)
style.AddObserver("CharEvent", self.charEvent)
style.AddObserver("MouseWheelForwardEvent", self.interactionEvent)
style.AddObserver("MouseWheelBackwardEvent", self.interactionEvent)
def deleteLater(self):
""" deleteLater() -> None
Make sure to free render window resource when
deallocating. Overriding PyQt deleteLater to free up
resources
"""
self.renderer_maps = {}
for ren in self.getRendererList():
self.mRenWin.RemoveRenderer(ren)
self.removeObserversFromInteractorStyle()
self.SetRenderWindow(None)
QCellWidget.deleteLater(self)
def updateContents(self, inputPorts):
""" updateContents(inputPorts: tuple)
Updates the cell contents with new vtkRenderer
"""
(renderView, ) = inputPorts
renWin = renderView.vtkInstance.GetRenderWindow()
renWin.DoubleBufferOn()
self.SetRenderWindow(renWin)
renderView.vtkInstance.ResetCamera()
self.addObserversToInteractorStyle()
# renWin = self.GetRenderWindow()
# renderers = [renderView.vtkInstance.GetRenderer()]
# iren = renWin.GetInteractor()
# Update interactor style
# self.removeObserversFromInteractorStyle()
# if renderView==None:
# if iStyle==None:
# iStyleInstance = vtk.vtkInteractorStyleTrackballCamera()
# else:
# iStyleInstance = iStyle.vtkInstance
# iren.SetInteractorStyle(iStyleInstance)
# self.addObserversToInteractorStyle()
# Capture window into history for playback
# Call this at the end to capture the image after rendering
QCellWidget.updateContents(self, inputPorts)
def GetRenderWindow(self):
""" GetRenderWindow() -> vtkRenderWindow
Return the associated vtkRenderWindow
"""
if not self.mRenWin:
win = vtk.vtkRenderWindow()
win.DoubleBufferOn()
self.SetRenderWindow(win)
del win
return self.mRenWin
def SetRenderWindow(self,w):
""" SetRenderWindow(w: vtkRenderWindow)
Set a new render window to QVTKViewWidget and initialize the
interactor as well
"""
if w == self.mRenWin:
return
if self.mRenWin:
if self.mRenWin.GetMapped():
self.mRenWin.Finalize()
self.mRenWin = w
if self.mRenWin:
self.mRenWin.Register(None)
if system.systemType=='Linux':
try:
vp = '_%s_void_p' % (hex(int(QtGui.QX11Info.display()))[2:])
except TypeError:
#This was change for PyQt4.2
if isinstance(QtGui.QX11Info.display(),QtGui.Display):
display = sip.unwrapinstance(QtGui.QX11Info.display())
vp = '_%s_void_p' % (hex(display)[2:])
self.mRenWin.SetDisplayId(vp)
if not self.mRenWin.GetMapped():
self.mRenWin.GetInteractor().Initialize()
system.XDestroyWindow(self.mRenWin.GetGenericDisplayId(),
self.mRenWin.GetGenericWindowId())
self.mRenWin.Finalize()
self.mRenWin.SetWindowInfo(str(int(self.winId())))
else:
self.mRenWin.SetWindowInfo(str(int(self.winId())))
if self.isVisible():
self.mRenWin.Start()
def GetInteractor(self):
""" GetInteractor() -> vtkInteractor
Return the vtkInteractor control this QVTKViewWidget
"""
return self.GetRenderWindow().GetInteractor()
def event(self, e):
""" event(e: QEvent) -> depends on event type
Process window and interaction events
"""
if e.type()==QtCore.QEvent.ParentAboutToChange:
if self.mRenWin:
if self.mRenWin.GetMapped():
self.mRenWin.Finalize()
else:
if e.type()==QtCore.QEvent.ParentChange:
if self.mRenWin:
self.mRenWin.SetWindowInfo(str(int(self.winId())))
if self.isVisible():
self.mRenWin.Start()
if QtCore.QObject.event(self,e):
return 1
if e.type() == QtCore.QEvent.KeyPress:
self.keyPressEvent(e)
if e.isAccepted():
return e.isAccepted()
return qt_super(QVTKViewWidget, self).event(e)
# return QtGui.QWidget.event(self,e)
# Was this right? Wasn't this supposed to be QCellWidget.event()?
def resizeWindow(self, width, height):
""" resizeWindow(width: int, height: int) -> None
Work around vtk bugs for resizing window
"""
########################################################
# VTK - BUGGGGGGGGG - GRRRRRRRRR
# This is a 'bug' in vtkWin32OpenGLRenderWindow(.cxx)
# If a render window is mapped to screen, the actual
# window size is the client area of the window in Win32.
# However, this real window size is only updated through
# vtkWin32OpenGLRenderWindow::GetSize(). So this has to
# be called here to get the cell size correctly. This
# invalidates the condition in the next SetSize().
# We can use self.mRenWin.SetSize(0,0) here but it will
# cause flickering and decrease performance!
# SetPosition(curX,curY) also works here but slower.
self.mRenWin.GetSize()
self.mRenWin.SetSize(width, height)
if self.mRenWin.GetInteractor():
self.mRenWin.GetInteractor().SetSize(width, height)
def resizeEvent(self, e):
""" resizeEvent(e: QEvent) -> None
Re-adjust the vtkRenderWindow size then QVTKViewWidget resized
"""
qt_super(QVTKViewWidget, self).resizeEvent(e)
if not self.mRenWin:
return
self.resizeWindow(self.width(), self.height())
self.mRenWin.Render()
def moveEvent(self,e):
""" moveEvent(e: QEvent) -> None
Echo the move event into vtkRenderWindow
"""
qt_super(QVTKViewWidget, self).moveEvent(e)
if not self.mRenWin:
return
self.mRenWin.SetPosition(self.x(),self.y())
def paintEvent(self, e):
""" paintEvent(e: QPaintEvent) -> None
Paint the QVTKViewWidget with vtkRenderWindow
"""
iren = None
if self.mRenWin:
iren = self.mRenWin.GetInteractor()
if (not iren) or (not iren.GetEnabled()):
return
if hasattr(self.mRenWin, 'UpdateGLRegion'):
self.mRenWin.UpdateGLRegion()
self.mRenWin.Render()
def SelectActiveRenderer(self,iren):
""" SelectActiveRenderer(iren: vtkRenderWindowIteractor) -> None
Only make the vtkRenderer below the mouse cursor active
"""
epos = iren.GetEventPosition()
rens = iren.GetRenderWindow().GetRenderers()
rens.InitTraversal()
for i in xrange(rens.GetNumberOfItems()):
ren = rens.GetNextItem()
ren.SetInteractive(ren.IsInViewport(epos[0], epos[1]))
def mousePressEvent(self,e):
""" mousePressEvent(e: QMouseEvent) -> None
Echo mouse event to vtkRenderWindowwInteractor
"""
iren = None
if self.mRenWin:
iren = self.mRenWin.GetInteractor()
if (not iren) or (not iren.GetEnabled()):
return
ctrl = (e.modifiers()&QtCore.Qt.ControlModifier)
isDoubleClick = e.type()==QtCore.QEvent.MouseButtonDblClick
iren.SetEventInformationFlipY(e.x(),e.y(),
ctrl,
(e.modifiers()&QtCore.Qt.ShiftModifier),
chr(0),
isDoubleClick,
None)
invoke = {QtCore.Qt.LeftButton:"LeftButtonPressEvent",
QtCore.Qt.MidButton:"MiddleButtonPressEvent",
QtCore.Qt.RightButton:"RightButtonPressEvent"}
self.SelectActiveRenderer(iren)
if ctrl:
e.ignore()
return
self.interacting = self.getActiveRenderer(iren)
if e.button() in invoke:
iren.InvokeEvent(invoke[e.button()])
def mouseMoveEvent(self,e):
""" mouseMoveEvent(e: QMouseEvent) -> None
Echo mouse event to vtkRenderWindowwInteractor
"""
iren = None
if self.mRenWin:
iren = self.mRenWin.GetInteractor()
if (not iren) or (not iren.GetEnabled()):
return
iren.SetEventInformationFlipY(e.x(),e.y(),
(e.modifiers()&QtCore.Qt.ControlModifier),
(e.modifiers()&QtCore.Qt.ShiftModifier),
chr(0), 0, None)
iren.InvokeEvent("MouseMoveEvent")
def enterEvent(self,e):
""" enterEvent(e: QEvent) -> None
Echo mouse event to vtkRenderWindowwInteractor
"""
iren = None
if self.mRenWin:
iren = self.mRenWin.GetInteractor()
if (not iren) or (not iren.GetEnabled()):
return
iren.InvokeEvent("EnterEvent")
def leaveEvent(self,e):
""" leaveEvent(e: QEvent) -> None
Echo mouse event to vtkRenderWindowwInteractor
"""
iren = None
if self.mRenWin:
iren = self.mRenWin.GetInteractor()
if (not iren) or (not iren.GetEnabled()):
return
iren.InvokeEvent("LeaveEvent")
def mouseReleaseEvent(self,e):
""" mouseReleaseEvent(e: QEvent) -> None
Echo mouse event to vtkRenderWindowwInteractor
"""
iren = None
if self.mRenWin:
iren = self.mRenWin.GetInteractor()
if (not iren) or (not iren.GetEnabled()):
return
iren.SetEventInformationFlipY(e.x(),e.y(),
(e.modifiers()&QtCore.Qt.ControlModifier),
(e.modifiers()&QtCore.Qt.ShiftModifier),
chr(0),0,None)
invoke = {QtCore.Qt.LeftButton:"LeftButtonReleaseEvent",
QtCore.Qt.MidButton:"MiddleButtonReleaseEvent",
QtCore.Qt.RightButton:"RightButtonReleaseEvent"}
self.interacting = None
if e.button() in invoke:
iren.InvokeEvent(invoke[e.button()])
def keyPressEvent(self,e):
""" keyPressEvent(e: QKeyEvent) -> None
Disallow 'quit' key in vtkRenderWindowwInteractor and sync the others
"""
iren = None
if self.mRenWin:
iren = self.mRenWin.GetInteractor()
if (not iren) or (not iren.GetEnabled()):
return
ascii_key = None
if e.text().length()>0:
ascii_key = e.text().toLatin1()[0]
else:
ascii_key = chr(0)
keysym = self.ascii_to_key_sym(ord(ascii_key))
if not keysym:
keysym = self.qt_key_to_key_sym(e.key())
# Ignore 'q' or 'e' or Ctrl-anykey
ctrl = (e.modifiers()&QtCore.Qt.ControlModifier)
shift = (e.modifiers()&QtCore.Qt.ShiftModifier)
if (keysym in ['q', 'e'] or ctrl):
e.ignore()
return
iren.SetKeyEventInformation(ctrl,shift,ascii_key, e.count(), keysym)
iren.InvokeEvent("KeyPressEvent")
if ascii_key:
iren.InvokeEvent("CharEvent")
def keyReleaseEvent(self,e):
""" keyReleaseEvent(e: QKeyEvent) -> None
Disallow 'quit' key in vtkRenderWindowwInteractor and sync the others
"""
iren = None
if self.mRenWin:
iren = self.mRenWin.GetInteractor()
if (not iren) or (not iren.GetEnabled()):
return
ascii_key = None
if e.text().length()>0:
ascii_key = e.text().toLatin1()[0]
else:
ascii_key = chr(0)
keysym = self.ascii_to_key_sym(ord(ascii_key))
if not keysym:
keysym = self.qt_key_to_key_sym(e.key())
# Ignore 'q' or 'e' or Ctrl-anykey
ctrl = (e.modifiers()&QtCore.Qt.ControlModifier)
shift = (e.modifiers()&QtCore.Qt.ShiftModifier)
if (keysym in ['q','e'] or ctrl):
e.ignore()
return
iren.SetKeyEventInformation(ctrl, shift, ascii_key, e.count(), keysym)
iren.InvokeEvent("KeyReleaseEvent")
def wheelEvent(self,e):
""" wheelEvent(e: QWheelEvent) -> None
Zoom in/out while scrolling the mouse
"""
iren = None
if self.mRenWin:
iren = self.mRenWin.GetInteractor()
if (not iren) or (not iren.GetEnabled()):
return
iren.SetEventInformationFlipY(e.x(),e.y(),
(e.modifiers()&QtCore.Qt.ControlModifier),
(e.modifiers()&QtCore.Qt.ShiftModifier),
chr(0),0,None)
self.SelectActiveRenderer(iren)
if e.delta()>0:
iren.InvokeEvent("MouseWheelForwardEvent")
else:
iren.InvokeEvent("MouseWheelBackwardEvent")
def focusInEvent(self,e):
""" focusInEvent(e: QFocusEvent) -> None
Ignore focus event
"""
pass
def focusOutEvent(self,e):
""" focusOutEvent(e: QFocusEvent) -> None
Ignore focus event
"""
pass
def contextMenuEvent(self,e):
""" contextMenuEvent(e: QContextMenuEvent) -> None
Make sure to get the right mouse position for the context menu
event, i.e. also the right click
"""
iren = None
if self.mRenWin:
iren = self.mRenWin.GetInteractor()
if (not iren) or (not iren.GetEnabled()):
return
ctrl = int(e.modifiers()&QtCore.Qt.ControlModifier)
shift = int(e.modifiers()&QtCore.Qt.ShiftModifier)
iren.SetEventInformationFlipY(e.x(),e.y(),ctrl,shift,chr(0),0,None)
iren.InvokeEvent("ContextMenuEvent")
def ascii_to_key_sym(self,i):
""" ascii_to_key_sym(i: int) -> str
Convert ASCII code into key name
"""
global AsciiToKeySymTable
return AsciiToKeySymTable[i]
def qt_key_to_key_sym(self,i):
""" qt_key_to_key_sym(i: QtCore.Qt.Keycode) -> str
Convert Qt key code into key name
"""
handler = {QtCore.Qt.Key_Backspace:"BackSpace",
QtCore.Qt.Key_Tab:"Tab",
QtCore.Qt.Key_Backtab:"Tab",
QtCore.Qt.Key_Return:"Return",
QtCore.Qt.Key_Enter:"Return",
QtCore.Qt.Key_Shift:"Shift_L",
QtCore.Qt.Key_Control:"Control_L",
QtCore.Qt.Key_Alt:"Alt_L",
QtCore.Qt.Key_Pause:"Pause",
QtCore.Qt.Key_CapsLock:"Caps_Lock",
QtCore.Qt.Key_Escape:"Escape",
QtCore.Qt.Key_Space:"space",
QtCore.Qt.Key_End:"End",
QtCore.Qt.Key_Home:"Home",
QtCore.Qt.Key_Left:"Left",
QtCore.Qt.Key_Up:"Up",
QtCore.Qt.Key_Right:"Right",
QtCore.Qt.Key_Down:"Down",
QtCore.Qt.Key_SysReq:"Snapshot",
QtCore.Qt.Key_Insert:"Insert",
QtCore.Qt.Key_Delete:"Delete",
QtCore.Qt.Key_Help:"Help",
QtCore.Qt.Key_0:"0",
QtCore.Qt.Key_1:"1",
QtCore.Qt.Key_2:"2",
QtCore.Qt.Key_3:"3",
QtCore.Qt.Key_4:"4",
QtCore.Qt.Key_5:"5",
QtCore.Qt.Key_6:"6",
QtCore.Qt.Key_7:"7",
QtCore.Qt.Key_8:"8",
QtCore.Qt.Key_9:"9",
QtCore.Qt.Key_A:"a",
QtCore.Qt.Key_B:"b",
QtCore.Qt.Key_C:"c",
QtCore.Qt.Key_D:"d",
QtCore.Qt.Key_E:"e",
QtCore.Qt.Key_F:"f",
QtCore.Qt.Key_G:"g",
QtCore.Qt.Key_H:"h",
QtCore.Qt.Key_I:"i",
QtCore.Qt.Key_J:"h",
QtCore.Qt.Key_K:"k",
QtCore.Qt.Key_L:"l",
QtCore.Qt.Key_M:"m",
QtCore.Qt.Key_N:"n",
QtCore.Qt.Key_O:"o",
QtCore.Qt.Key_P:"p",
QtCore.Qt.Key_Q:"q",
QtCore.Qt.Key_R:"r",
QtCore.Qt.Key_S:"s",
QtCore.Qt.Key_T:"t",
QtCore.Qt.Key_U:"u",
QtCore.Qt.Key_V:"v",
QtCore.Qt.Key_W:"w",
QtCore.Qt.Key_X:"x",
QtCore.Qt.Key_Y:"y",
QtCore.Qt.Key_Z:"z",
QtCore.Qt.Key_Asterisk:"asterisk",
QtCore.Qt.Key_Plus:"plus",
QtCore.Qt.Key_Minus:"minus",
QtCore.Qt.Key_Period:"period",
QtCore.Qt.Key_Slash:"slash",
QtCore.Qt.Key_F1:"F1",
QtCore.Qt.Key_F2:"F2",
QtCore.Qt.Key_F3:"F3",
QtCore.Qt.Key_F4:"F4",
QtCore.Qt.Key_F5:"F5",
QtCore.Qt.Key_F6:"F6",
QtCore.Qt.Key_F7:"F7",
QtCore.Qt.Key_F8:"F8",
QtCore.Qt.Key_F9:"F9",
QtCore.Qt.Key_F10:"F10",
QtCore.Qt.Key_F11:"F11",
QtCore.Qt.Key_F12:"F12",
QtCore.Qt.Key_F13:"F13",
QtCore.Qt.Key_F14:"F14",
QtCore.Qt.Key_F15:"F15",
QtCore.Qt.Key_F16:"F16",
QtCore.Qt.Key_F17:"F17",
QtCore.Qt.Key_F18:"F18",
QtCore.Qt.Key_F19:"F19",
QtCore.Qt.Key_F20:"F20",
QtCore.Qt.Key_F21:"F21",
QtCore.Qt.Key_F22:"F22",
QtCore.Qt.Key_F23:"F23",
QtCore.Qt.Key_F24:"F24",
QtCore.Qt.Key_NumLock:"Num_Lock",
QtCore.Qt.Key_ScrollLock:"Scroll_Lock"}
if i in handler:
return handler[i]
else:
return "None"
def getRendererList(self):
""" getRendererList() -> list
Return a list of vtkRenderer running in this QVTKViewWidget
"""
result = []
renWin = self.GetRenderWindow()
renderers = renWin.GetRenderers()
renderers.InitTraversal()
for i in xrange(renderers.GetNumberOfItems()):
result.append(renderers.GetNextItem())
return result
def getActiveRenderer(self, iren):
""" getActiveRenderer(iren: vtkRenderWindowwInteractor) -> vtkRenderer
Return the active vtkRenderer under mouse
"""
epos = list(iren.GetEventPosition())
if epos[1]<0:
epos[1] = -epos[1]
rens = iren.GetRenderWindow().GetRenderers()
rens.InitTraversal()
for i in xrange(rens.GetNumberOfItems()):
ren = rens.GetNextItem()
if ren.IsInViewport(epos[0], epos[1]):
return ren
return None
def findSheetTabWidget(self):
""" findSheetTabWidget() -> QTabWidget
Find and return the sheet tab widget
"""
p = self.parent()
while p:
if hasattr(p, 'isSheetTabWidget'):
if p.isSheetTabWidget()==True:
return p
p = p.parent()
return None
def getRenderersInCellList(self, sheet, cells):
""" isRendererIn(sheet: spreadsheet.StandardWidgetSheet,
cells: [(int,int)]) -> bool
Get the list of renderers in side a list of (row, column)
cells.
"""
rens = []
for (row, col) in cells:
cell = sheet.getCell(row, col)
if hasattr(cell, 'getRendererList'):
rens += cell.getRendererList()
return rens
def getSelectedCellWidgets(self):
sheet = self.findSheetTabWidget()
if sheet:
iren = self.mRenWin.GetInteractor()
ren = self.interacting
if not ren: ren = self.getActiveRenderer(iren)
if ren:
cells = sheet.getSelectedLocations()
if (ren in self.getRenderersInCellList(sheet, cells)):
return [sheet.getCell(row, col)
for (row, col) in cells
if hasattr(sheet.getCell(row, col),
'getRendererList')]
return []
def interactionEvent(self, istyle, name):
""" interactionEvent(istyle: vtkInteractorStyle, name: str) -> None
Make sure interactions sync across selected renderers
"""
if name=='MouseWheelForwardEvent':
istyle.OnMouseWheelForward()
if name=='MouseWheelBackwardEvent':
istyle.OnMouseWheelBackward()
ren = self.interacting
if not ren:
ren = self.getActiveRenderer(istyle.GetInteractor())
if ren:
cam = ren.GetActiveCamera()
cpos = cam.GetPosition()
cfol = cam.GetFocalPoint()
cup = cam.GetViewUp()
for cell in self.getSelectedCellWidgets():
if cell!=self and hasattr(cell, 'getRendererList'):
rens = cell.getRendererList()
for r in rens:
if r!=ren:
dcam = r.GetActiveCamera()
dcam.SetPosition(cpos)
dcam.SetFocalPoint(cfol)
dcam.SetViewUp(cup)
r.ResetCameraClippingRange()
cell.update()
def charEvent(self, istyle, name):
""" charEvent(istyle: vtkInteractorStyle, name: str) -> None
Make sure key presses also sync across selected renderers
"""
iren = istyle.GetInteractor()
ren = self.interacting
if not ren: ren = self.getActiveRenderer(iren)
if ren:
keyCode = iren.GetKeyCode()
if keyCode in ['w','W','s','S','r','R','p','P']:
for cell in self.getSelectedCellWidgets():
if hasattr(cell, 'GetInteractor'):
selectedIren = cell.GetInteractor()
selectedIren.SetKeyCode(keyCode)
selectedIren.GetInteractorStyle().OnChar()
selectedIren.Render()
istyle.OnChar()
def saveToPNG(self, filename):
""" saveToPNG(filename: str) -> filename or vtkUnsignedCharArray
Save the current widget contents to an image file. If
str==None, then it returns the vtkUnsignedCharArray containing
the PNG image. Otherwise, the filename is returned.
"""
w2i = vtk.vtkWindowToImageFilter()
w2i.ReadFrontBufferOff()
w2i.SetInput(self.mRenWin)
# Render twice to get a clean image on the back buffer
self.mRenWin.Render()
self.mRenWin.Render()
w2i.Update()
writer = vtk.vtkPNGWriter()
writer.SetInputConnection(w2i.GetOutputPort())
if filename!=None:
writer.SetFileName(filename)
else:
writer.WriteToMemoryOn()
writer.Write()
if filename:
return filename
else:
return writer.GetResult()
def captureWindow(self):
""" captureWindow() -> None
Capture the window contents to file
"""
fn = QtGui.QFileDialog.getSaveFileName(None,
"Save file as...",
"screenshot.png",
"Images (*.png)")
if fn.isNull():
return
self.saveToPNG(str(fn))
def grabWindowPixmap(self):
""" grabWindowImage() -> QPixmap
Widget special grabbing function
"""
uchar = self.saveToPNG(None)
ba = QtCore.QByteArray()
buf = QtCore.QBuffer(ba)
buf.open(QtCore.QIODevice.WriteOnly)
for i in xrange(uchar.GetNumberOfTuples()):
c = uchar.GetValue(i)
buf.putChar(chr(c))
buf.close()
pixmap = QtGui.QPixmap()
pixmap.loadFromData(ba, 'PNG')
return pixmap
def dumpToFile(self, filename):
"""dumpToFile() -> None
Dumps itself as an image to a file, calling saveToPNG
"""
self.saveToPNG(filename)
class QVTKViewWidgetCapture(QtGui.QAction):
"""
QVTKViewWidgetCapture is the action to capture the vtk rendering
window to an image
"""
def __init__(self, parent=None):
""" QVTKViewWidgetCapture(parent: QWidget) -> QVTKViewWidgetCapture
Setup the image, status tip, etc. of the action
"""
QtGui.QAction.__init__(self,
QtGui.QIcon(":/images/camera.png"),
"&Capture image to file",
parent)
self.setStatusTip("Capture the rendered image to a file")
def triggeredSlot(self, checked=False):
""" toggledSlot(checked: boolean) -> None
Execute the action when the button is clicked
"""
cellWidget = self.toolBar.getSnappedWidget()
cellWidget.captureWindow()
class QVTKViewWidgetSaveCamera(QtGui.QAction):
"""
QVTKViewWidgetSaveCamera is the action to capture the current camera
of the vtk renderers and save it back to the pipeline
"""
def __init__(self, parent=None):
""" QVTKViewWidgetSaveCamera(parent: QWidget) -> QVTKViewWidgetSaveCamera
Setup the image, status tip, etc. of the action
"""
QtGui.QAction.__init__(self,
"Save &Camera",
parent)
self.setStatusTip("Save current camera views to the pipeline")
def setCamera(self, controller):
ops = []
pipeline = controller.current_pipeline
cellWidget = self.toolBar.getSnappedWidget()
renderers = cellWidget.getRendererList()
for ren in renderers:
cam = ren.GetActiveCamera()
cpos = cam.GetPosition()
cfol = cam.GetFocalPoint()
cup = cam.GetViewUp()
rendererId = cellWidget.renderer_maps[ren]
# Looking for SetActiveCamera()
camera = None
renderer = pipeline.modules[rendererId]
for c in pipeline.connections.values():
if c.destination.moduleId==rendererId:
if c.destination.name=='SetActiveCamera':
camera = pipeline.modules[c.source.moduleId]
break
if not camera:
# Create camera
vtk_package = 'edu.utah.sci.vistrails.vtk'
camera = controller.create_module(vtk_package, 'vtkCamera', '',
0.0, 0.0)
ops.append(('add', camera))
# Connect camera to renderer
camera_conn = controller.create_connection(camera, 'self',
renderer,
'SetActiveCamera')
ops.append(('add', camera_conn))
# update functions
def convert_to_str(arglist):
new_arglist = []
for arg in arglist:
new_arglist.append(str(arg))
return new_arglist
functions = [('SetPosition', convert_to_str(cpos)),
('SetFocalPoint', convert_to_str(cfol)),
('SetViewUp', convert_to_str(cup))]
ops.extend(controller.update_functions_ops(camera, functions))
action = core.db.action.create_action(ops)
controller.add_new_action(action)
controller.perform_action(action)
controller.select_latest_version()
def triggeredSlot(self, checked=False):
""" toggledSlot(checked: boolean) -> None
Execute the action when the button is clicked
"""
visApp = QtCore.QCoreApplication.instance()
if hasattr(visApp, 'builderWindow'):
builderWindow = visApp.builderWindow
if builderWindow:
info = self.toolBar.sheet.getCellPipelineInfo(
self.toolBar.row, self.toolBar.col)
if info:
info = info[0]
viewManager = builderWindow.viewManager
view = viewManager.ensureVistrail(info['locator'])
if view:
controller = view.controller
controller.change_selected_version(info['version'])
self.setCamera(controller)
class QVTKViewWidgetToolBar(QCellToolBar):
"""
QVTKViewWidgetToolBar derives from QCellToolBar to give the VTKViewCell
a customizable toolbar
"""
def createToolBar(self):
""" createToolBar() -> None
This will get call initiallly to add customizable widgets
"""
self.appendAction(QVTKViewWidgetCapture(self))
self.addAnimationButtons()
self.appendAction(QVTKViewWidgetSaveCamera(self))
def registerSelf():
""" registerSelf() -> None
Registry module with the registry
"""
identifier = 'edu.utah.sci.vistrails.vtk'
registry = get_module_registry()
registry.add_module(VTKViewCell)
registry.add_input_port(VTKViewCell, "Location", CellLocation)
import core.debug
for (port,module) in [("SetRenderView",'vtkRenderView')]:
try:
registry.add_input_port(VTKViewCell, port,'(%s:%s)'%(identifier,module))
except Exception, e:
core.debug.warning(str(e))
registry.add_output_port(VTKViewCell, "self", VTKViewCell)
| bsd-3-clause |
tarasane/h2o-3 | h2o-py/h2o/h2o_model_builder.py | 8 | 5491 | from connection import H2OConnection
from frame import H2OFrame
from job import H2OJob
from model.model_future import H2OModelFuture
from model.dim_reduction import H2ODimReductionModel
from model.autoencoder import H2OAutoEncoderModel
from model.multinomial import H2OMultinomialModel
from model.regression import H2ORegressionModel
from model.binomial import H2OBinomialModel
from model.clustering import H2OClusteringModel
def supervised_model_build(x=None,y=None,vx=None,vy=None,algo="",offsets=None,weights=None,fold_column=None,kwargs=None):
is_auto_encoder = kwargs is not None and "autoencoder" in kwargs and kwargs["autoencoder"] is not None
if is_auto_encoder and y is not None: raise ValueError("y should not be specified for autoencoder.")
if not is_auto_encoder and y is None: raise ValueError("Missing response")
if vx is not None and vy is None: raise ValueError("Missing response validating a supervised model")
return _model_build(x,y,vx,vy,algo,offsets,weights,fold_column,kwargs)
def supervised(kwargs):
x =_frame_helper(kwargs["x"],kwargs["training_frame"])
y =_frame_helper(kwargs["y"],kwargs["training_frame"])
vx=_frame_helper(kwargs["validation_x"],kwargs["validation_frame"])
vy=_frame_helper(kwargs["validation_y"],kwargs["validation_frame"])
offsets = _ow("offset_column", kwargs)
weights = _ow("weights_column",kwargs)
fold_column= _ow("fold_column", kwargs)
algo = kwargs["algo"]
parms={k:v for k,v in kwargs.items() if (k not in ["x","y","validation_x","validation_y","algo"] and v is not None) or k=="validation_frame"}
return supervised_model_build(x,y,vx,vy,algo,offsets,weights,fold_column,parms)
def unsupervised_model_build(x,validation_x,algo_url,kwargs): return _model_build(x,None,validation_x,None,algo_url,None,None,None,kwargs)
def unsupervised(kwargs):
x = _frame_helper(kwargs["x"],kwargs["training_frame"]) # y is just None
vx=_frame_helper(kwargs["validation_x"],kwargs["validation_frame"])
algo=kwargs["algo"]
parms={k:v for k,v in kwargs.items() if k not in ["x","validation_x","algo"] and v is not None}
return unsupervised_model_build(x,vx,algo,parms)
def _frame_helper(col,fr):
if col is None: return None
if not isinstance(col,H2OFrame):
if fr is None: raise ValueError("Missing training_frame")
return fr[col] if not isinstance(col,H2OFrame) else col
def _ow(name,kwargs): # for checking offsets and weights, c is column, fr is frame
c=kwargs[name]
fr=kwargs["training_frame"]
if c is None or isinstance(c,H2OFrame): res=c
else:
if fr is None: raise ValueError("offsets/weights/fold given, but missing training_frame")
res=fr[c]
kwargs[name] = None if res is None else res.col_names[0]
if res is not None and kwargs["validation_x"] is not None and kwargs["validation_frame"] is None: # validation frame must have any offsets, weights, folds, etc.
raise ValueError("offsets/weights/fold given, but missing validation_frame")
return res
def _check_frame(x,y,response): # y and response are only ever different for validation
if x is None: return None
x._eager()
if y is not None:
y._eager()
response._eager()
x[response._col_names[0]] = y
return x
def _check_col(x,vx,vfr,col):
x=_check_frame(x,col,col)
vx= None if vfr is None else _check_frame(vx,vfr[col.names[0]],vfr[col.names[0]])
return x,vx
def _model_build(x,y,vx,vy,algo,offsets,weights,fold_column,kwargs):
if x is None: raise ValueError("Missing features")
x =_check_frame(x,y,y)
vx=_check_frame(vx,vy,y)
if offsets is not None: x,vx=_check_col(x,vx,kwargs["validation_frame"],offsets)
if weights is not None: x,vx=_check_col(x,vx,kwargs["validation_frame"],weights)
if fold_column is not None: x,vx=_check_col(x,vx,kwargs["validation_frame"],fold_column)
kwargs['training_frame']=x._id
if vx is not None: kwargs['validation_frame']=vx._id
if y is not None: kwargs['response_column']=y._col_names[0]
kwargs = dict([(k, kwargs[k]._frame()._id if isinstance(kwargs[k], H2OFrame) else kwargs[k]) for k in kwargs if kwargs[k] is not None])
do_future = kwargs.pop("do_future") if "do_future" in kwargs else False
future_model = H2OModelFuture(H2OJob(H2OConnection.post_json("ModelBuilders/"+algo, **kwargs), job_type=(algo+" Model Build")), x)
return future_model if do_future else _resolve_model(future_model, **kwargs)
def _resolve_model(future_model, **kwargs):
future_model.poll()
if '_rest_version' in kwargs.keys(): model_json = H2OConnection.get_json("Models/"+future_model.job.dest_key, _rest_version=kwargs['_rest_version'])["models"][0]
else: model_json = H2OConnection.get_json("Models/"+future_model.job.dest_key)["models"][0]
model_type = model_json["output"]["model_category"]
if model_type=="Binomial": model = H2OBinomialModel( future_model.job.dest_key,model_json)
elif model_type=="Clustering": model = H2OClusteringModel( future_model.job.dest_key,model_json)
elif model_type=="Regression": model = H2ORegressionModel( future_model.job.dest_key,model_json)
elif model_type=="Multinomial": model = H2OMultinomialModel( future_model.job.dest_key,model_json)
elif model_type=="AutoEncoder": model = H2OAutoEncoderModel( future_model.job.dest_key,model_json)
elif model_type=="DimReduction": model = H2ODimReductionModel(future_model.job.dest_key,model_json)
else: raise NotImplementedError(model_type)
return model
| apache-2.0 |
apporc/cinder | cinder/keymgr/conf_key_mgr.py | 4 | 4816 | # Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
An implementation of a key manager that reads its key from the project's
configuration options.
This key manager implementation provides limited security, assuming that the
key remains secret. Using the volume encryption feature as an example,
encryption provides protection against a lost or stolen disk, assuming that
the configuration file that contains the key is not stored on the disk.
Encryption also protects the confidentiality of data as it is transmitted via
iSCSI from the compute host to the storage host (again assuming that an
attacker who intercepts the data does not know the secret key).
Because this implementation uses a single, fixed key, it proffers no
protection once that key is compromised. In particular, different volumes
encrypted with a key provided by this key manager actually share the same
encryption key so *any* volume can be decrypted once the fixed key is known.
"""
import array
import binascii
from oslo_config import cfg
from oslo_log import log as logging
from cinder import exception
from cinder.i18n import _, _LW
from cinder.keymgr import key
from cinder.keymgr import key_mgr
key_mgr_opts = [
cfg.StrOpt('fixed_key',
help='Fixed key returned by key manager, specified in hex'),
]
CONF = cfg.CONF
CONF.register_opts(key_mgr_opts, group='keymgr')
LOG = logging.getLogger(__name__)
class ConfKeyManager(key_mgr.KeyManager):
"""Key Manager that supports one key defined by the fixed_key conf option.
This key manager implementation supports all the methods specified by the
key manager interface. This implementation creates a single key in response
to all invocations of create_key. Side effects (e.g., raising exceptions)
for each method are handled as specified by the key manager interface.
"""
def __init__(self):
super(ConfKeyManager, self).__init__()
self.key_id = '00000000-0000-0000-0000-000000000000'
def _generate_key(self, **kwargs):
_hex = self._generate_hex_key(**kwargs)
key_list = array.array('B', binascii.unhexlify(_hex)).tolist()
return key.SymmetricKey('AES', key_list)
def _generate_hex_key(self, **kwargs):
if CONF.keymgr.fixed_key is None:
LOG.warning(
_LW('config option keymgr.fixed_key has not been defined:'
' some operations may fail unexpectedly'))
raise ValueError(_('keymgr.fixed_key not defined'))
return CONF.keymgr.fixed_key
def create_key(self, ctxt, **kwargs):
"""Creates a key.
This implementation returns a UUID for the created key. A
NotAuthorized exception is raised if the specified context is None.
"""
if ctxt is None:
raise exception.NotAuthorized()
return self.key_id
def store_key(self, ctxt, key, **kwargs):
"""Stores (i.e., registers) a key with the key manager."""
if ctxt is None:
raise exception.NotAuthorized()
if key != self._generate_key():
raise exception.KeyManagerError(
reason="cannot store arbitrary keys")
return self.key_id
def copy_key(self, ctxt, key_id, **kwargs):
if ctxt is None:
raise exception.NotAuthorized()
return self.key_id
def get_key(self, ctxt, key_id, **kwargs):
"""Retrieves the key identified by the specified id.
This implementation returns the key that is associated with the
specified UUID. A NotAuthorized exception is raised if the specified
context is None; a KeyError is raised if the UUID is invalid.
"""
if ctxt is None:
raise exception.NotAuthorized()
if key_id != self.key_id:
raise KeyError(key_id)
return self._generate_key()
def delete_key(self, ctxt, key_id, **kwargs):
if ctxt is None:
raise exception.NotAuthorized()
if key_id != self.key_id:
raise exception.KeyManagerError(
reason="cannot delete non-existent key")
LOG.warning(_LW("Not deleting key %s"), key_id)
| apache-2.0 |
naslanidis/ansible | lib/ansible/modules/database/misc/redis.py | 25 | 10872 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: redis
short_description: Various redis commands, slave and flush
description:
- Unified utility to interact with redis instances.
'slave' sets a redis instance in slave or master mode.
'flush' flushes all the instance or a specified db.
'config' (new in 1.6), ensures a configuration setting on an instance.
version_added: "1.3"
options:
command:
description:
- The selected redis command
required: true
default: null
choices: [ "slave", "flush", "config" ]
login_password:
description:
- The password used to authenticate with (usually not used)
required: false
default: null
login_host:
description:
- The host running the database
required: false
default: localhost
login_port:
description:
- The port to connect to
required: false
default: 6379
master_host:
description:
- The host of the master instance [slave command]
required: false
default: null
master_port:
description:
- The port of the master instance [slave command]
required: false
default: null
slave_mode:
description:
- the mode of the redis instance [slave command]
required: false
default: slave
choices: [ "master", "slave" ]
db:
description:
- The database to flush (used in db mode) [flush command]
required: false
default: null
flush_mode:
description:
- Type of flush (all the dbs in a redis instance or a specific one)
[flush command]
required: false
default: all
choices: [ "all", "db" ]
name:
version_added: 1.6
description:
- A redis config key.
required: false
default: null
value:
version_added: 1.6
description:
- A redis config value.
required: false
default: null
notes:
- Requires the redis-py Python package on the remote host. You can
install it with pip (pip install redis) or with a package manager.
https://github.com/andymccurdy/redis-py
- If the redis master instance we are making slave of is password protected
this needs to be in the redis.conf in the masterauth variable
requirements: [ redis ]
author: "Xabier Larrakoetxea (@slok)"
'''
EXAMPLES = '''
# Set local redis instance to be slave of melee.island on port 6377
- redis:
command: slave
master_host: melee.island
master_port: 6377
# Deactivate slave mode
- redis:
command: slave
slave_mode: master
# Flush all the redis db
- redis:
command: flush
flush_mode: all
# Flush only one db in a redis instance
- redis:
command: flush
db: 1
flush_mode: db
# Configure local redis to have 10000 max clients
- redis:
command: config
name: maxclients
value: 10000
# Configure local redis to have lua time limit of 100 ms
- redis:
command: config
name: lua-time-limit
value: 100
'''
try:
import redis
except ImportError:
redis_found = False
else:
redis_found = True
# ===========================================
# Redis module specific support methods.
#
def set_slave_mode(client, master_host, master_port):
try:
return client.slaveof(master_host, master_port)
except Exception:
return False
def set_master_mode(client):
try:
return client.slaveof()
except Exception:
return False
def flush(client, db=None):
try:
if not isinstance(db, int):
return client.flushall()
else:
# The passed client has been connected to the database already
return client.flushdb()
except Exception:
return False
# ===========================================
# Module execution.
#
def main():
module = AnsibleModule(
argument_spec = dict(
command=dict(default=None, choices=['slave', 'flush', 'config']),
login_password=dict(default=None, no_log=True),
login_host=dict(default='localhost'),
login_port=dict(default=6379, type='int'),
master_host=dict(default=None),
master_port=dict(default=None, type='int'),
slave_mode=dict(default='slave', choices=['master', 'slave']),
db=dict(default=None, type='int'),
flush_mode=dict(default='all', choices=['all', 'db']),
name=dict(default=None),
value=dict(default=None)
),
supports_check_mode = True
)
if not redis_found:
module.fail_json(msg="python redis module is required")
login_password = module.params['login_password']
login_host = module.params['login_host']
login_port = module.params['login_port']
command = module.params['command']
# Slave Command section -----------
if command == "slave":
master_host = module.params['master_host']
master_port = module.params['master_port']
mode = module.params['slave_mode']
#Check if we have all the data
if mode == "slave": # Only need data if we want to be slave
if not master_host:
module.fail_json(
msg='In slave mode master host must be provided')
if not master_port:
module.fail_json(
msg='In slave mode master port must be provided')
#Connect and check
r = redis.StrictRedis(host=login_host,
port=login_port,
password=login_password)
try:
r.ping()
except Exception:
e = get_exception()
module.fail_json(msg="unable to connect to database: %s" % e)
#Check if we are already in the mode that we want
info = r.info()
if mode == "master" and info["role"] == "master":
module.exit_json(changed=False, mode=mode)
elif mode == "slave" and\
info["role"] == "slave" and\
info["master_host"] == master_host and\
info["master_port"] == master_port:
status = {
'status': mode,
'master_host': master_host,
'master_port': master_port,
}
module.exit_json(changed=False, mode=status)
else:
# Do the stuff
# (Check Check_mode before commands so the commands aren't evaluated
# if not necessary)
if mode == "slave":
if module.check_mode or\
set_slave_mode(r, master_host, master_port):
info = r.info()
status = {
'status': mode,
'master_host': master_host,
'master_port': master_port,
}
module.exit_json(changed=True, mode=status)
else:
module.fail_json(msg='Unable to set slave mode')
else:
if module.check_mode or set_master_mode(r):
module.exit_json(changed=True, mode=mode)
else:
module.fail_json(msg='Unable to set master mode')
# flush Command section -----------
elif command == "flush":
db = module.params['db']
mode = module.params['flush_mode']
#Check if we have all the data
if mode == "db":
if db is None:
module.fail_json(
msg="In db mode the db number must be provided")
#Connect and check
r = redis.StrictRedis(host=login_host,
port=login_port,
password=login_password,
db=db)
try:
r.ping()
except Exception:
e = get_exception()
module.fail_json(msg="unable to connect to database: %s" % e)
# Do the stuff
# (Check Check_mode before commands so the commands aren't evaluated
# if not necessary)
if mode == "all":
if module.check_mode or flush(r):
module.exit_json(changed=True, flushed=True)
else: # Flush never fails :)
module.fail_json(msg="Unable to flush all databases")
else:
if module.check_mode or flush(r, db):
module.exit_json(changed=True, flushed=True, db=db)
else: # Flush never fails :)
module.fail_json(msg="Unable to flush '%d' database" % db)
elif command == 'config':
name = module.params['name']
value = module.params['value']
r = redis.StrictRedis(host=login_host,
port=login_port,
password=login_password)
try:
r.ping()
except Exception:
e = get_exception()
module.fail_json(msg="unable to connect to database: %s" % e)
try:
old_value = r.config_get(name)[name]
except Exception:
e = get_exception()
module.fail_json(msg="unable to read config: %s" % e)
changed = old_value != value
if module.check_mode or not changed:
module.exit_json(changed=changed, name=name, value=value)
else:
try:
r.config_set(name, value)
except Exception:
e = get_exception()
module.fail_json(msg="unable to write config: %s" % e)
module.exit_json(changed=changed, name=name, value=value)
else:
module.fail_json(msg='A valid command must be provided')
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.pycompat24 import get_exception
if __name__ == '__main__':
main()
| gpl-3.0 |
gymnasium/edx-platform | common/djangoapps/enrollment/data.py | 6 | 10977 | """
Data Aggregation Layer of the Enrollment API. Collects all enrollment specific data into a single
source to be used throughout the API.
"""
import logging
from django.contrib.auth.models import User
from django.db import transaction
from opaque_keys.edx.keys import CourseKey
from six import text_type
from enrollment.errors import (
CourseEnrollmentClosedError,
CourseEnrollmentExistsError,
CourseEnrollmentFullError,
InvalidEnrollmentAttribute,
UserNotFoundError
)
from enrollment.serializers import CourseEnrollmentSerializer, CourseSerializer
from openedx.core.djangoapps.content.course_overviews.models import CourseOverview
from openedx.core.lib.exceptions import CourseNotFoundError
from student.models import (
AlreadyEnrolledError,
CourseEnrollment,
CourseEnrollmentAttribute,
CourseFullError,
EnrollmentClosedError,
NonExistentCourseError
)
log = logging.getLogger(__name__)
def get_course_enrollments(user_id):
"""Retrieve a list representing all aggregated data for a user's course enrollments.
Construct a representation of all course enrollment data for a specific user.
Args:
user_id (str): The name of the user to retrieve course enrollment information for.
Returns:
A serializable list of dictionaries of all aggregated enrollment data for a user.
"""
qset = CourseEnrollment.objects.filter(
user__username=user_id,
is_active=True
).order_by('created')
enrollments = CourseEnrollmentSerializer(qset, many=True).data
# Find deleted courses and filter them out of the results
deleted = []
valid = []
for enrollment in enrollments:
if enrollment.get("course_details") is not None:
valid.append(enrollment)
else:
deleted.append(enrollment)
if deleted:
log.warning(
(
u"Course enrollments for user %s reference "
u"courses that do not exist (this can occur if a course is deleted)."
), user_id,
)
return valid
def get_course_enrollment(username, course_id):
"""Retrieve an object representing all aggregated data for a user's course enrollment.
Get the course enrollment information for a specific user and course.
Args:
username (str): The name of the user to retrieve course enrollment information for.
course_id (str): The course to retrieve course enrollment information for.
Returns:
A serializable dictionary representing the course enrollment.
"""
course_key = CourseKey.from_string(course_id)
try:
enrollment = CourseEnrollment.objects.get(
user__username=username, course_id=course_key
)
return CourseEnrollmentSerializer(enrollment).data
except CourseEnrollment.DoesNotExist:
return None
def get_user_enrollments(course_key):
"""Based on the course id, return all user enrollments in the course
Args:
course_key (CourseKey): Identifier of the course
from which to retrieve enrollments.
Returns:
A course's user enrollments as a queryset
Raises:
CourseEnrollment.DoesNotExist
"""
return CourseEnrollment.objects.filter(
course_id=course_key,
is_active=True
).order_by('created')
def create_course_enrollment(username, course_id, mode, is_active):
"""Create a new course enrollment for the given user.
Creates a new course enrollment for the specified user username.
Args:
username (str): The name of the user to create a new course enrollment for.
course_id (str): The course to create the course enrollment for.
mode (str): (Optional) The mode for the new enrollment.
is_active (boolean): (Optional) Determines if the enrollment is active.
Returns:
A serializable dictionary representing the new course enrollment.
Raises:
CourseNotFoundError
CourseEnrollmentFullError
EnrollmentClosedError
CourseEnrollmentExistsError
"""
course_key = CourseKey.from_string(course_id)
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
msg = u"Not user with username '{username}' found.".format(username=username)
log.warn(msg)
raise UserNotFoundError(msg)
try:
enrollment = CourseEnrollment.enroll(user, course_key, check_access=True)
return _update_enrollment(enrollment, is_active=is_active, mode=mode)
except NonExistentCourseError as err:
raise CourseNotFoundError(text_type(err))
except EnrollmentClosedError as err:
raise CourseEnrollmentClosedError(text_type(err))
except CourseFullError as err:
raise CourseEnrollmentFullError(text_type(err))
except AlreadyEnrolledError as err:
enrollment = get_course_enrollment(username, course_id)
raise CourseEnrollmentExistsError(text_type(err), enrollment)
def update_course_enrollment(username, course_id, mode=None, is_active=None):
"""Modify a course enrollment for a user.
Allows updates to a specific course enrollment.
Args:
username (str): The name of the user to retrieve course enrollment information for.
course_id (str): The course to retrieve course enrollment information for.
mode (str): (Optional) If specified, modify the mode for this enrollment.
is_active (boolean): (Optional) Determines if the enrollment is active.
Returns:
A serializable dictionary representing the modified course enrollment.
"""
course_key = CourseKey.from_string(course_id)
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
msg = u"Not user with username '{username}' found.".format(username=username)
log.warn(msg)
raise UserNotFoundError(msg)
try:
enrollment = CourseEnrollment.objects.get(user=user, course_id=course_key)
return _update_enrollment(enrollment, is_active=is_active, mode=mode)
except CourseEnrollment.DoesNotExist:
return None
def add_or_update_enrollment_attr(user_id, course_id, attributes):
"""Set enrollment attributes for the enrollment of given user in the
course provided.
Args:
course_id (str): The Course to set enrollment attributes for.
user_id (str): The User to set enrollment attributes for.
attributes (list): Attributes to be set.
Example:
>>>add_or_update_enrollment_attr(
"Bob",
"course-v1-edX-DemoX-1T2015",
[
{
"namespace": "credit",
"name": "provider_id",
"value": "hogwarts",
},
]
)
"""
course_key = CourseKey.from_string(course_id)
user = _get_user(user_id)
enrollment = CourseEnrollment.get_enrollment(user, course_key)
if not _invalid_attribute(attributes) and enrollment is not None:
CourseEnrollmentAttribute.add_enrollment_attr(enrollment, attributes)
def get_enrollment_attributes(user_id, course_id):
"""Retrieve enrollment attributes for given user for provided course.
Args:
user_id: The User to get enrollment attributes for
course_id (str): The Course to get enrollment attributes for.
Example:
>>>get_enrollment_attributes("Bob", "course-v1-edX-DemoX-1T2015")
[
{
"namespace": "credit",
"name": "provider_id",
"value": "hogwarts",
},
]
Returns: list
"""
course_key = CourseKey.from_string(course_id)
user = _get_user(user_id)
enrollment = CourseEnrollment.get_enrollment(user, course_key)
return CourseEnrollmentAttribute.get_enrollment_attributes(enrollment)
def unenroll_user_from_all_courses(user_id):
"""
Set all of a user's enrollments to inactive.
:param user_id: The user being unenrolled.
:return: A list of all courses from which the user was unenrolled.
"""
user = _get_user(user_id)
enrollments = CourseEnrollment.objects.filter(user=user)
with transaction.atomic():
for enrollment in enrollments:
_update_enrollment(enrollment, is_active=False)
return set([str(enrollment.course_id.org) for enrollment in enrollments])
def _get_user(user_id):
"""Retrieve user with provided user_id
Args:
user_id(str): username of the user for which object is to retrieve
Returns: obj
"""
try:
return User.objects.get(username=user_id)
except User.DoesNotExist:
msg = u"Not user with username '{username}' found.".format(username=user_id)
log.warn(msg)
raise UserNotFoundError(msg)
def _update_enrollment(enrollment, is_active=None, mode=None):
enrollment.update_enrollment(is_active=is_active, mode=mode)
enrollment.save()
return CourseEnrollmentSerializer(enrollment).data
def _invalid_attribute(attributes):
"""Validate enrollment attribute
Args:
attributes(dict): dict of attribute
Return:
list of invalid attributes
"""
invalid_attributes = []
for attribute in attributes:
if "namespace" not in attribute:
msg = u"'namespace' not in enrollment attribute"
log.warn(msg)
invalid_attributes.append("namespace")
raise InvalidEnrollmentAttribute(msg)
if "name" not in attribute:
msg = u"'name' not in enrollment attribute"
log.warn(msg)
invalid_attributes.append("name")
raise InvalidEnrollmentAttribute(msg)
if "value" not in attribute:
msg = u"'value' not in enrollment attribute"
log.warn(msg)
invalid_attributes.append("value")
raise InvalidEnrollmentAttribute(msg)
return invalid_attributes
def get_course_enrollment_info(course_id, include_expired=False):
"""Returns all course enrollment information for the given course.
Based on the course id, return all related course information.
Args:
course_id (str): The course to retrieve enrollment information for.
include_expired (bool): Boolean denoting whether expired course modes
should be included in the returned JSON data.
Returns:
A serializable dictionary representing the course's enrollment information.
Raises:
CourseNotFoundError
"""
course_key = CourseKey.from_string(course_id)
try:
course = CourseOverview.get_from_id(course_key)
except CourseOverview.DoesNotExist:
msg = u"Requested enrollment information for unknown course {course}".format(course=course_id)
log.warning(msg)
raise CourseNotFoundError(msg)
else:
return CourseSerializer(course, include_expired=include_expired).data
| agpl-3.0 |
fengshao0907/twemproxy | tests/test_system/test_reload.py | 36 | 4879 | #!/usr/bin/env python
#coding: utf-8
#file : test_reload.py
#author : ning
#date : 2014-09-03 12:28:16
import os
import sys
import redis
PWD = os.path.dirname(os.path.realpath(__file__))
WORKDIR = os.path.join(PWD,'../')
sys.path.append(os.path.join(WORKDIR,'lib/'))
sys.path.append(os.path.join(WORKDIR,'conf/'))
import conf
from server_modules import *
from utils import *
from nose import with_setup
CLUSTER_NAME = 'ntest'
nc_verbose = int(getenv('T_VERBOSE', 5))
mbuf = int(getenv('T_MBUF', 512))
large = int(getenv('T_LARGE', 1000))
T_RELOAD_DELAY = 3 + 1
all_redis = [
RedisServer('127.0.0.1', 2100, '/tmp/r/redis-2100/', CLUSTER_NAME, 'redis-2100'),
RedisServer('127.0.0.1', 2101, '/tmp/r/redis-2101/', CLUSTER_NAME, 'redis-2101'),
]
nc = NutCracker('127.0.0.1', 4100, '/tmp/r/nutcracker-4100', CLUSTER_NAME,
all_redis, mbuf=mbuf, verbose=nc_verbose)
def _setup():
print 'setup(mbuf=%s, verbose=%s)' %(mbuf, nc_verbose)
for r in all_redis + [nc]:
r.deploy()
r.stop()
r.start()
def _teardown():
for r in all_redis + [nc]:
assert(r._alive())
r.stop()
def get_tcp_conn(host, port):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host, port))
s.settimeout(.3)
return s
def send_cmd(s, req, resp):
s.sendall(req)
data = s.recv(10000)
assert(data == resp)
@with_setup(_setup, _teardown)
def test_reload_with_old_conf():
if nc.version() < '0.4.2':
print 'Ignore test_reload for version %s' % nc.version()
return
pid = nc.pid()
# print 'old pid:', pid
r = redis.Redis(nc.host(), nc.port())
r.set('k', 'v')
conn = get_tcp_conn(nc.host(), nc.port())
send_cmd(conn, '*2\r\n$3\r\nGET\r\n$1\r\nk\r\n', '$1\r\nv\r\n')
# nc.reload() is same as nc.stop() and nc.start()
nc.reload()
time.sleep(.01) #it need time for the old process fork new process.
# the old connection is still ok in T_RELOAD_DELAY seconds
send_cmd(conn, '*2\r\n$3\r\nGET\r\n$1\r\nk\r\n', '$1\r\nv\r\n')
# conn2 should connect to new instance
conn2 = get_tcp_conn(nc.host(), nc.port())
send_cmd(conn2, '*2\r\n$3\r\nGET\r\n$1\r\nk\r\n', '$1\r\nv\r\n')
# the old connection is still ok in T_RELOAD_DELAY seconds
send_cmd(conn, '*2\r\n$3\r\nGET\r\n$1\r\nk\r\n', '$1\r\nv\r\n')
time.sleep(T_RELOAD_DELAY)
assert(pid != nc.pid())
# assert the old connection is closed.
send_cmd(conn, '*2\r\n$3\r\nGET\r\n$1\r\nk\r\n', '')
# conn2 should survive
send_cmd(conn2, '*2\r\n$3\r\nGET\r\n$1\r\nk\r\n', '$1\r\nv\r\n')
r = redis.Redis(nc.host(), nc.port())
rst = r.set('k', 'v')
assert(r.get('k') == 'v')
@with_setup(_setup, _teardown)
def test_new_port():
if nc.version() < '0.4.2':
print 'Ignore test_reload for version %s' % nc.version()
return
r = redis.Redis(nc.host(), nc.port())
r.set('k', 'v')
content = '''
reload_test:
listen: 0.0.0.0:4101
hash: fnv1a_64
distribution: modula
redis: true
timeout: 400
servers:
- 127.0.0.1:2100:1 redis-2100
- 127.0.0.1:2101:1 redis-2101
'''
nc.set_config(content)
time.sleep(T_RELOAD_DELAY)
r1 = redis.Redis(nc.host(), nc.port())
r2 = redis.Redis(nc.host(), 4101)
assert_fail('Connection refused', r1.get, 'k')
assert(r2.get('k') == 'v')
@with_setup(_setup, _teardown)
def test_pool_add_del():
if nc.version() < '0.4.2':
print 'Ignore test_reload for version %s' % nc.version()
return
r = redis.Redis(nc.host(), nc.port())
r.set('k', 'v')
content = '''
reload_test:
listen: 0.0.0.0:4100
hash: fnv1a_64
distribution: modula
redis: true
servers:
- 127.0.0.1:2100:1 redis-2100
- 127.0.0.1:2101:1 redis-2101
reload_test2:
listen: 0.0.0.0:4101
hash: fnv1a_64
distribution: modula
redis: true
servers:
- 127.0.0.1:2100:1 redis-2100
- 127.0.0.1:2101:1 redis-2101
'''
nc.set_config(content)
time.sleep(T_RELOAD_DELAY)
r1 = redis.Redis(nc.host(), nc.port())
r2 = redis.Redis(nc.host(), 4101)
assert(r1.get('k') == 'v')
assert(r2.get('k') == 'v')
content = '''
reload_test:
listen: 0.0.0.0:4102
hash: fnv1a_64
distribution: modula
redis: true
preconnect: true
servers:
- 127.0.0.1:2100:1 redis-2100
- 127.0.0.1:2101:1 redis-2101
'''
nc.set_config(content)
time.sleep(T_RELOAD_DELAY)
pid = nc.pid()
print system('ls -l /proc/%s/fd/' % pid)
r3 = redis.Redis(nc.host(), 4102)
assert_fail('Connection refused', r1.get, 'k')
assert_fail('Connection refused', r2.get, 'k')
assert(r3.get('k') == 'v')
fds = system('ls -l /proc/%s/fd/' % pid)
sockets = [s for s in fds.split('\n') if strstr(s, 'socket:') ]
# pool + stat + 2 backend + 1 client
assert(len(sockets) == 5)
| apache-2.0 |
ASCrookes/django | django/core/management/commands/loaddata.py | 294 | 12977 | from __future__ import unicode_literals
import glob
import gzip
import os
import warnings
import zipfile
from itertools import product
from django.apps import apps
from django.conf import settings
from django.core import serializers
from django.core.exceptions import ImproperlyConfigured
from django.core.management.base import BaseCommand, CommandError
from django.core.management.color import no_style
from django.db import (
DEFAULT_DB_ALIAS, DatabaseError, IntegrityError, connections, router,
transaction,
)
from django.utils import lru_cache
from django.utils._os import upath
from django.utils.encoding import force_text
from django.utils.functional import cached_property
from django.utils.glob import glob_escape
try:
import bz2
has_bz2 = True
except ImportError:
has_bz2 = False
class Command(BaseCommand):
help = 'Installs the named fixture(s) in the database.'
missing_args_message = ("No database fixture specified. Please provide the "
"path of at least one fixture in the command line.")
def add_arguments(self, parser):
parser.add_argument('args', metavar='fixture', nargs='+',
help='Fixture labels.')
parser.add_argument('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS, help='Nominates a specific database to load '
'fixtures into. Defaults to the "default" database.')
parser.add_argument('--app', action='store', dest='app_label',
default=None, help='Only look for fixtures in the specified app.')
parser.add_argument('--ignorenonexistent', '-i', action='store_true',
dest='ignore', default=False,
help='Ignores entries in the serialized data for fields that do not '
'currently exist on the model.')
def handle(self, *fixture_labels, **options):
self.ignore = options.get('ignore')
self.using = options.get('database')
self.app_label = options.get('app_label')
self.hide_empty = options.get('hide_empty', False)
self.verbosity = options.get('verbosity')
with transaction.atomic(using=self.using):
self.loaddata(fixture_labels)
# Close the DB connection -- unless we're still in a transaction. This
# is required as a workaround for an edge case in MySQL: if the same
# connection is used to create tables, load data, and query, the query
# can return incorrect results. See Django #7572, MySQL #37735.
if transaction.get_autocommit(self.using):
connections[self.using].close()
def loaddata(self, fixture_labels):
connection = connections[self.using]
# Keep a count of the installed objects and fixtures
self.fixture_count = 0
self.loaded_object_count = 0
self.fixture_object_count = 0
self.models = set()
self.serialization_formats = serializers.get_public_serializer_formats()
# Forcing binary mode may be revisited after dropping Python 2 support (see #22399)
self.compression_formats = {
None: (open, 'rb'),
'gz': (gzip.GzipFile, 'rb'),
'zip': (SingleZipReader, 'r'),
}
if has_bz2:
self.compression_formats['bz2'] = (bz2.BZ2File, 'r')
with connection.constraint_checks_disabled():
for fixture_label in fixture_labels:
self.load_label(fixture_label)
# Since we disabled constraint checks, we must manually check for
# any invalid keys that might have been added
table_names = [model._meta.db_table for model in self.models]
try:
connection.check_constraints(table_names=table_names)
except Exception as e:
e.args = ("Problem installing fixtures: %s" % e,)
raise
# If we found even one object in a fixture, we need to reset the
# database sequences.
if self.loaded_object_count > 0:
sequence_sql = connection.ops.sequence_reset_sql(no_style(), self.models)
if sequence_sql:
if self.verbosity >= 2:
self.stdout.write("Resetting sequences\n")
with connection.cursor() as cursor:
for line in sequence_sql:
cursor.execute(line)
if self.verbosity >= 1:
if self.fixture_count == 0 and self.hide_empty:
pass
elif self.fixture_object_count == self.loaded_object_count:
self.stdout.write("Installed %d object(s) from %d fixture(s)" %
(self.loaded_object_count, self.fixture_count))
else:
self.stdout.write("Installed %d object(s) (of %d) from %d fixture(s)" %
(self.loaded_object_count, self.fixture_object_count, self.fixture_count))
def load_label(self, fixture_label):
"""
Loads fixtures files for a given label.
"""
show_progress = self.verbosity >= 3
for fixture_file, fixture_dir, fixture_name in self.find_fixtures(fixture_label):
_, ser_fmt, cmp_fmt = self.parse_name(os.path.basename(fixture_file))
open_method, mode = self.compression_formats[cmp_fmt]
fixture = open_method(fixture_file, mode)
try:
self.fixture_count += 1
objects_in_fixture = 0
loaded_objects_in_fixture = 0
if self.verbosity >= 2:
self.stdout.write("Installing %s fixture '%s' from %s." %
(ser_fmt, fixture_name, humanize(fixture_dir)))
objects = serializers.deserialize(ser_fmt, fixture,
using=self.using, ignorenonexistent=self.ignore)
for obj in objects:
objects_in_fixture += 1
if router.allow_migrate_model(self.using, obj.object.__class__):
loaded_objects_in_fixture += 1
self.models.add(obj.object.__class__)
try:
obj.save(using=self.using)
if show_progress:
self.stdout.write(
'\rProcessed %i object(s).' % loaded_objects_in_fixture,
ending=''
)
except (DatabaseError, IntegrityError) as e:
e.args = ("Could not load %(app_label)s.%(object_name)s(pk=%(pk)s): %(error_msg)s" % {
'app_label': obj.object._meta.app_label,
'object_name': obj.object._meta.object_name,
'pk': obj.object.pk,
'error_msg': force_text(e)
},)
raise
if objects and show_progress:
self.stdout.write('') # add a newline after progress indicator
self.loaded_object_count += loaded_objects_in_fixture
self.fixture_object_count += objects_in_fixture
except Exception as e:
if not isinstance(e, CommandError):
e.args = ("Problem installing fixture '%s': %s" % (fixture_file, e),)
raise
finally:
fixture.close()
# Warn if the fixture we loaded contains 0 objects.
if objects_in_fixture == 0:
warnings.warn(
"No fixture data found for '%s'. (File format may be "
"invalid.)" % fixture_name,
RuntimeWarning
)
@lru_cache.lru_cache(maxsize=None)
def find_fixtures(self, fixture_label):
"""
Finds fixture files for a given label.
"""
fixture_name, ser_fmt, cmp_fmt = self.parse_name(fixture_label)
databases = [self.using, None]
cmp_fmts = list(self.compression_formats.keys()) if cmp_fmt is None else [cmp_fmt]
ser_fmts = serializers.get_public_serializer_formats() if ser_fmt is None else [ser_fmt]
if self.verbosity >= 2:
self.stdout.write("Loading '%s' fixtures..." % fixture_name)
if os.path.isabs(fixture_name):
fixture_dirs = [os.path.dirname(fixture_name)]
fixture_name = os.path.basename(fixture_name)
else:
fixture_dirs = self.fixture_dirs
if os.path.sep in os.path.normpath(fixture_name):
fixture_dirs = [os.path.join(dir_, os.path.dirname(fixture_name))
for dir_ in fixture_dirs]
fixture_name = os.path.basename(fixture_name)
suffixes = ('.'.join(ext for ext in combo if ext)
for combo in product(databases, ser_fmts, cmp_fmts))
targets = set('.'.join((fixture_name, suffix)) for suffix in suffixes)
fixture_files = []
for fixture_dir in fixture_dirs:
if self.verbosity >= 2:
self.stdout.write("Checking %s for fixtures..." % humanize(fixture_dir))
fixture_files_in_dir = []
path = os.path.join(fixture_dir, fixture_name)
for candidate in glob.iglob(glob_escape(path) + '*'):
if os.path.basename(candidate) in targets:
# Save the fixture_dir and fixture_name for future error messages.
fixture_files_in_dir.append((candidate, fixture_dir, fixture_name))
if self.verbosity >= 2 and not fixture_files_in_dir:
self.stdout.write("No fixture '%s' in %s." %
(fixture_name, humanize(fixture_dir)))
# Check kept for backwards-compatibility; it isn't clear why
# duplicates are only allowed in different directories.
if len(fixture_files_in_dir) > 1:
raise CommandError(
"Multiple fixtures named '%s' in %s. Aborting." %
(fixture_name, humanize(fixture_dir)))
fixture_files.extend(fixture_files_in_dir)
if not fixture_files:
# Warning kept for backwards-compatibility; why not an exception?
warnings.warn("No fixture named '%s' found." % fixture_name)
return fixture_files
@cached_property
def fixture_dirs(self):
"""
Return a list of fixture directories.
The list contains the 'fixtures' subdirectory of each installed
application, if it exists, the directories in FIXTURE_DIRS, and the
current directory.
"""
dirs = []
fixture_dirs = settings.FIXTURE_DIRS
if len(fixture_dirs) != len(set(fixture_dirs)):
raise ImproperlyConfigured("settings.FIXTURE_DIRS contains duplicates.")
for app_config in apps.get_app_configs():
app_label = app_config.label
app_dir = os.path.join(app_config.path, 'fixtures')
if app_dir in fixture_dirs:
raise ImproperlyConfigured(
"'%s' is a default fixture directory for the '%s' app "
"and cannot be listed in settings.FIXTURE_DIRS." % (app_dir, app_label)
)
if self.app_label and app_label != self.app_label:
continue
if os.path.isdir(app_dir):
dirs.append(app_dir)
dirs.extend(list(fixture_dirs))
dirs.append('')
dirs = [upath(os.path.abspath(os.path.realpath(d))) for d in dirs]
return dirs
def parse_name(self, fixture_name):
"""
Splits fixture name in name, serialization format, compression format.
"""
parts = fixture_name.rsplit('.', 2)
if len(parts) > 1 and parts[-1] in self.compression_formats:
cmp_fmt = parts[-1]
parts = parts[:-1]
else:
cmp_fmt = None
if len(parts) > 1:
if parts[-1] in self.serialization_formats:
ser_fmt = parts[-1]
parts = parts[:-1]
else:
raise CommandError(
"Problem installing fixture '%s': %s is not a known "
"serialization format." % (''.join(parts[:-1]), parts[-1]))
else:
ser_fmt = None
name = '.'.join(parts)
return name, ser_fmt, cmp_fmt
class SingleZipReader(zipfile.ZipFile):
def __init__(self, *args, **kwargs):
zipfile.ZipFile.__init__(self, *args, **kwargs)
if len(self.namelist()) != 1:
raise ValueError("Zip-compressed fixtures must contain one file.")
def read(self):
return zipfile.ZipFile.read(self, self.namelist()[0])
def humanize(dirname):
return "'%s'" % dirname if dirname else 'absolute path'
| bsd-3-clause |
SublimeText/Pywin32 | lib/x32/win32com/demos/eventsApartmentThreaded.py | 10 | 3752 | # A sample originally provided by Richard Bell, and modified by Mark Hammond.
# This sample demonstrates how to use COM events in an aparment-threaded
# world. In this world, COM itself ensures that all calls to and events
# from an object happen on the same thread that created the object, even
# if they originated from different threads. For this cross-thread
# marshalling to work, this main thread *must* run a "message-loop" (ie,
# a loop fetching and dispatching Windows messages). Without such message
# processing, dead-locks can occur.
# See also eventsFreeThreaded.py for how to do this in a free-threaded
# world where these marshalling considerations do not exist.
# NOTE: This example uses Internet Explorer, but it should not be considerd
# a "best-practices" for writing against IE events, but for working with
# events in general. For example:
# * The first OnDocumentComplete event is not a reliable indicator that the
# URL has completed loading
# * As we are demonstrating the most efficient way of handling events, when
# running this sample you will see an IE Windows briefly appear, but
# vanish without ever being repainted.
import sys
import os
import win32com.client
import win32api
import win32event
# sys.coinit_flags not set, so pythoncom initializes apartment-threaded.
import pythoncom
import time
class ExplorerEvents:
def __init__(self):
self.event = win32event.CreateEvent(None, 0, 0, None)
def OnDocumentComplete(self,
pDisp=pythoncom.Empty,
URL=pythoncom.Empty):
thread = win32api.GetCurrentThreadId()
print("OnDocumentComplete event processed on thread %d"%thread)
# Set the event our main thread is waiting on.
win32event.SetEvent(self.event)
def OnQuit(self):
thread = win32api.GetCurrentThreadId()
print("OnQuit event processed on thread %d"%thread)
win32event.SetEvent(self.event)
def WaitWhileProcessingMessages(event, timeout = 2):
start = time.clock()
while True:
# Wake 4 times a second - we can't just specify the
# full timeout here, as then it would reset for every
# message we process.
rc = win32event.MsgWaitForMultipleObjects( (event,), 0,
250,
win32event.QS_ALLEVENTS)
if rc == win32event.WAIT_OBJECT_0:
# event signalled - stop now!
return True
if (time.clock() - start) > timeout:
# Timeout expired.
return False
# must be a message.
pythoncom.PumpWaitingMessages()
def TestExplorerEvents():
iexplore = win32com.client.DispatchWithEvents(
"InternetExplorer.Application", ExplorerEvents)
thread = win32api.GetCurrentThreadId()
print('TestExplorerEvents created IE object on thread %d'%thread)
iexplore.Visible = 1
try:
iexplore.Navigate(win32api.GetFullPathName('..\\readme.htm'))
except pythoncom.com_error as details:
print("Warning - could not open the test HTML file", details)
# Wait for the event to be signalled while pumping messages.
if not WaitWhileProcessingMessages(iexplore.event):
print("Document load event FAILED to fire!!!")
iexplore.Quit()
#
# Give IE a chance to shutdown, else it can get upset on fast machines.
# Note, Quit generates events. Although this test does NOT catch them
# it is NECESSARY to pump messages here instead of a sleep so that the Quit
# happens properly!
if not WaitWhileProcessingMessages(iexplore.event):
print("OnQuit event FAILED to fire!!!")
iexplore = None
if __name__=='__main__':
TestExplorerEvents()
| bsd-3-clause |
ryangallen/django | django/contrib/gis/db/models/fields.py | 310 | 17126 | from django.contrib.gis import forms
from django.contrib.gis.db.models.lookups import gis_lookups
from django.contrib.gis.db.models.proxy import SpatialProxy
from django.contrib.gis.gdal import HAS_GDAL
from django.contrib.gis.geometry.backend import Geometry, GeometryException
from django.core.exceptions import ImproperlyConfigured
from django.db.models.expressions import Expression
from django.db.models.fields import Field
from django.utils import six
from django.utils.translation import ugettext_lazy as _
# Local cache of the spatial_ref_sys table, which holds SRID data for each
# spatial database alias. This cache exists so that the database isn't queried
# for SRID info each time a distance query is constructed.
_srid_cache = {}
def get_srid_info(srid, connection):
"""
Returns the units, unit name, and spheroid WKT associated with the
given SRID from the `spatial_ref_sys` (or equivalent) spatial database
table for the given database connection. These results are cached.
"""
global _srid_cache
try:
# The SpatialRefSys model for the spatial backend.
SpatialRefSys = connection.ops.spatial_ref_sys()
except NotImplementedError:
# No `spatial_ref_sys` table in spatial backend (e.g., MySQL).
return None, None, None
if connection.alias not in _srid_cache:
# Initialize SRID dictionary for database if it doesn't exist.
_srid_cache[connection.alias] = {}
if srid not in _srid_cache[connection.alias]:
# Use `SpatialRefSys` model to query for spatial reference info.
sr = SpatialRefSys.objects.using(connection.alias).get(srid=srid)
units, units_name = sr.units
spheroid = SpatialRefSys.get_spheroid(sr.wkt)
_srid_cache[connection.alias][srid] = (units, units_name, spheroid)
return _srid_cache[connection.alias][srid]
class GeoSelectFormatMixin(object):
def select_format(self, compiler, sql, params):
"""
Returns the selection format string, depending on the requirements
of the spatial backend. For example, Oracle and MySQL require custom
selection formats in order to retrieve geometries in OGC WKT. For all
other fields a simple '%s' format string is returned.
"""
connection = compiler.connection
srid = compiler.query.get_context('transformed_srid')
if srid:
sel_fmt = '%s(%%s, %s)' % (connection.ops.transform, srid)
else:
sel_fmt = '%s'
if connection.ops.select:
# This allows operations to be done on fields in the SELECT,
# overriding their values -- used by the Oracle and MySQL
# spatial backends to get database values as WKT, and by the
# `transform` method.
sel_fmt = connection.ops.select % sel_fmt
return sel_fmt % sql, params
class BaseSpatialField(Field):
"""
The Base GIS Field.
It's used as a base class for GeometryField and RasterField. Defines
properties that are common to all GIS fields such as the characteristics
of the spatial reference system of the field.
"""
description = _("The base GIS field.")
# Geodetic units.
geodetic_units = ('decimal degree', 'degree')
def __init__(self, verbose_name=None, srid=4326, spatial_index=True, **kwargs):
"""
The initialization function for base spatial fields. Takes the following
as keyword arguments:
srid:
The spatial reference system identifier, an OGC standard.
Defaults to 4326 (WGS84).
spatial_index:
Indicates whether to create a spatial index. Defaults to True.
Set this instead of 'db_index' for geographic fields since index
creation is different for geometry columns.
"""
# Setting the index flag with the value of the `spatial_index` keyword.
self.spatial_index = spatial_index
# Setting the SRID and getting the units. Unit information must be
# easily available in the field instance for distance queries.
self.srid = srid
# Setting the verbose_name keyword argument with the positional
# first parameter, so this works like normal fields.
kwargs['verbose_name'] = verbose_name
super(BaseSpatialField, self).__init__(**kwargs)
def deconstruct(self):
name, path, args, kwargs = super(BaseSpatialField, self).deconstruct()
# Always include SRID for less fragility; include spatial index if it's
# not the default value.
kwargs['srid'] = self.srid
if self.spatial_index is not True:
kwargs['spatial_index'] = self.spatial_index
return name, path, args, kwargs
def db_type(self, connection):
return connection.ops.geo_db_type(self)
# The following functions are used to get the units, their name, and
# the spheroid corresponding to the SRID of the BaseSpatialField.
def _get_srid_info(self, connection):
# Get attributes from `get_srid_info`.
self._units, self._units_name, self._spheroid = get_srid_info(self.srid, connection)
def spheroid(self, connection):
if not hasattr(self, '_spheroid'):
self._get_srid_info(connection)
return self._spheroid
def units(self, connection):
if not hasattr(self, '_units'):
self._get_srid_info(connection)
return self._units
def units_name(self, connection):
if not hasattr(self, '_units_name'):
self._get_srid_info(connection)
return self._units_name
def geodetic(self, connection):
"""
Returns true if this field's SRID corresponds with a coordinate
system that uses non-projected units (e.g., latitude/longitude).
"""
units_name = self.units_name(connection)
# Some backends like MySQL cannot determine units name. In that case,
# test if srid is 4326 (WGS84), even if this is over-simplification.
return units_name.lower() in self.geodetic_units if units_name else self.srid == 4326
def get_placeholder(self, value, compiler, connection):
"""
Returns the placeholder for the spatial column for the
given value.
"""
return connection.ops.get_geom_placeholder(self, value, compiler)
class GeometryField(GeoSelectFormatMixin, BaseSpatialField):
"""
The base Geometry field -- maps to the OpenGIS Specification Geometry type.
"""
description = _("The base Geometry field -- maps to the OpenGIS Specification Geometry type.")
form_class = forms.GeometryField
# The OpenGIS Geometry name.
geom_type = 'GEOMETRY'
def __init__(self, verbose_name=None, dim=2, geography=False, **kwargs):
"""
The initialization function for geometry fields. In addition to the
parameters from BaseSpatialField, it takes the following as keyword
arguments:
dim:
The number of dimensions for this geometry. Defaults to 2.
extent:
Customize the extent, in a 4-tuple of WGS 84 coordinates, for the
geometry field entry in the `USER_SDO_GEOM_METADATA` table. Defaults
to (-180.0, -90.0, 180.0, 90.0).
tolerance:
Define the tolerance, in meters, to use for the geometry field
entry in the `USER_SDO_GEOM_METADATA` table. Defaults to 0.05.
"""
# Setting the dimension of the geometry field.
self.dim = dim
# Is this a geography rather than a geometry column?
self.geography = geography
# Oracle-specific private attributes for creating the entry in
# `USER_SDO_GEOM_METADATA`
self._extent = kwargs.pop('extent', (-180.0, -90.0, 180.0, 90.0))
self._tolerance = kwargs.pop('tolerance', 0.05)
super(GeometryField, self).__init__(verbose_name=verbose_name, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super(GeometryField, self).deconstruct()
# Include kwargs if they're not the default values.
if self.dim != 2:
kwargs['dim'] = self.dim
if self.geography is not False:
kwargs['geography'] = self.geography
return name, path, args, kwargs
# ### Routines specific to GeometryField ###
def get_distance(self, value, lookup_type, connection):
"""
Returns a distance number in units of the field. For example, if
`D(km=1)` was passed in and the units of the field were in meters,
then 1000 would be returned.
"""
return connection.ops.get_distance(self, value, lookup_type)
def get_prep_value(self, value):
"""
Spatial lookup values are either a parameter that is (or may be
converted to) a geometry, or a sequence of lookup values that
begins with a geometry. This routine will setup the geometry
value properly, and preserve any other lookup parameters before
returning to the caller.
"""
value = super(GeometryField, self).get_prep_value(value)
if isinstance(value, Expression):
return value
elif isinstance(value, (tuple, list)):
geom = value[0]
seq_value = True
else:
geom = value
seq_value = False
# When the input is not a GEOS geometry, attempt to construct one
# from the given string input.
if isinstance(geom, Geometry):
pass
elif isinstance(geom, (bytes, six.string_types)) or hasattr(geom, '__geo_interface__'):
try:
geom = Geometry(geom)
except GeometryException:
raise ValueError('Could not create geometry from lookup value.')
else:
raise ValueError('Cannot use object with type %s for a geometry lookup parameter.' % type(geom).__name__)
# Assigning the SRID value.
geom.srid = self.get_srid(geom)
if seq_value:
lookup_val = [geom]
lookup_val.extend(value[1:])
return tuple(lookup_val)
else:
return geom
def from_db_value(self, value, expression, connection, context):
if value and not isinstance(value, Geometry):
value = Geometry(value)
return value
def get_srid(self, geom):
"""
Returns the default SRID for the given geometry, taking into account
the SRID set for the field. For example, if the input geometry
has no SRID, then that of the field will be returned.
"""
gsrid = geom.srid # SRID of given geometry.
if gsrid is None or self.srid == -1 or (gsrid == -1 and self.srid != -1):
return self.srid
else:
return gsrid
# ### Routines overloaded from Field ###
def contribute_to_class(self, cls, name, **kwargs):
super(GeometryField, self).contribute_to_class(cls, name, **kwargs)
# Setup for lazy-instantiated Geometry object.
setattr(cls, self.attname, SpatialProxy(Geometry, self))
def formfield(self, **kwargs):
defaults = {'form_class': self.form_class,
'geom_type': self.geom_type,
'srid': self.srid,
}
defaults.update(kwargs)
if (self.dim > 2 and 'widget' not in kwargs and
not getattr(defaults['form_class'].widget, 'supports_3d', False)):
defaults['widget'] = forms.Textarea
return super(GeometryField, self).formfield(**defaults)
def get_db_prep_lookup(self, lookup_type, value, connection, prepared=False):
"""
Prepare for the database lookup, and return any spatial parameters
necessary for the query. This includes wrapping any geometry
parameters with a backend-specific adapter and formatting any distance
parameters into the correct units for the coordinate system of the
field.
"""
# special case for isnull lookup
if lookup_type == 'isnull':
return []
elif lookup_type in self.class_lookups:
# Populating the parameters list, and wrapping the Geometry
# with the Adapter of the spatial backend.
if isinstance(value, (tuple, list)):
params = [connection.ops.Adapter(value[0])]
if self.class_lookups[lookup_type].distance:
# Getting the distance parameter in the units of the field.
params += self.get_distance(value[1:], lookup_type, connection)
elif lookup_type in connection.ops.truncate_params:
# Lookup is one where SQL parameters aren't needed from the
# given lookup value.
pass
else:
params += value[1:]
elif isinstance(value, Expression):
params = []
else:
params = [connection.ops.Adapter(value)]
return params
else:
raise ValueError('%s is not a valid spatial lookup for %s.' %
(lookup_type, self.__class__.__name__))
def get_prep_lookup(self, lookup_type, value):
if lookup_type == 'contains':
# 'contains' name might conflict with the "normal" contains lookup,
# for which the value is not prepared, but left as-is.
return self.get_prep_value(value)
return super(GeometryField, self).get_prep_lookup(lookup_type, value)
def get_db_prep_save(self, value, connection):
"Prepares the value for saving in the database."
if not value:
return None
else:
return connection.ops.Adapter(self.get_prep_value(value))
for klass in gis_lookups.values():
GeometryField.register_lookup(klass)
# The OpenGIS Geometry Type Fields
class PointField(GeometryField):
geom_type = 'POINT'
form_class = forms.PointField
description = _("Point")
class LineStringField(GeometryField):
geom_type = 'LINESTRING'
form_class = forms.LineStringField
description = _("Line string")
class PolygonField(GeometryField):
geom_type = 'POLYGON'
form_class = forms.PolygonField
description = _("Polygon")
class MultiPointField(GeometryField):
geom_type = 'MULTIPOINT'
form_class = forms.MultiPointField
description = _("Multi-point")
class MultiLineStringField(GeometryField):
geom_type = 'MULTILINESTRING'
form_class = forms.MultiLineStringField
description = _("Multi-line string")
class MultiPolygonField(GeometryField):
geom_type = 'MULTIPOLYGON'
form_class = forms.MultiPolygonField
description = _("Multi polygon")
class GeometryCollectionField(GeometryField):
geom_type = 'GEOMETRYCOLLECTION'
form_class = forms.GeometryCollectionField
description = _("Geometry collection")
class ExtentField(GeoSelectFormatMixin, Field):
"Used as a return value from an extent aggregate"
description = _("Extent Aggregate Field")
def get_internal_type(self):
return "ExtentField"
class RasterField(BaseSpatialField):
"""
Raster field for GeoDjango -- evaluates into GDALRaster objects.
"""
description = _("Raster Field")
geom_type = 'RASTER'
def __init__(self, *args, **kwargs):
if not HAS_GDAL:
raise ImproperlyConfigured('RasterField requires GDAL.')
super(RasterField, self).__init__(*args, **kwargs)
def _check_connection(self, connection):
# Make sure raster fields are used only on backends with raster support.
if not connection.features.gis_enabled or not connection.features.supports_raster:
raise ImproperlyConfigured('Raster fields require backends with raster support.')
def db_type(self, connection):
self._check_connection(connection)
return super(RasterField, self).db_type(connection)
def from_db_value(self, value, expression, connection, context):
return connection.ops.parse_raster(value)
def get_db_prep_value(self, value, connection, prepared=False):
self._check_connection(connection)
# Prepare raster for writing to database.
if not prepared:
value = connection.ops.deconstruct_raster(value)
return super(RasterField, self).get_db_prep_value(value, connection, prepared)
def contribute_to_class(self, cls, name, **kwargs):
super(RasterField, self).contribute_to_class(cls, name, **kwargs)
# Importing GDALRaster raises an exception on systems without gdal.
from django.contrib.gis.gdal import GDALRaster
# Setup for lazy-instantiated Raster object. For large querysets, the
# instantiation of all GDALRasters can potentially be expensive. This
# delays the instantiation of the objects to the moment of evaluation
# of the raster attribute.
setattr(cls, self.attname, SpatialProxy(GDALRaster, self))
| bsd-3-clause |
laborautonomo/poedit | deps/boost/tools/build/v2/test/conditionals_multiple.py | 38 | 13574 | #!/usr/bin/python
# Copyright 2008 Jurko Gospodnetic
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
# Tests that properties conditioned on more than one other property work as
# expected.
import BoostBuild
###############################################################################
#
# test_multiple_conditions()
# --------------------------
#
###############################################################################
def test_multiple_conditions():
"""Basic tests for properties conditioned on multiple other properties."""
t = BoostBuild.Tester(["--user-config=", "--ignore-site-config",
"toolset=testToolset"], pass_toolset=False, use_test_config=False)
t.write("testToolset.jam", """\
import feature ;
feature.extend toolset : testToolset ;
rule init ( ) { }
""")
t.write("testToolset.py", """\
from b2.build import feature
feature.extend('toolset', ["testToolset"])
def init ( ): pass
""")
t.write("jamroot.jam", """\
import feature ;
import notfile ;
import toolset ;
feature.feature description : : free incidental ;
feature.feature aaa : 1 0 : incidental ;
feature.feature bbb : 1 0 : incidental ;
feature.feature ccc : 1 0 : incidental ;
rule buildRule ( name : targets ? : properties * )
{
for local description in [ feature.get-values description : $(properties) ]
{
ECHO "description:" /$(description)/ ;
}
}
notfile testTarget1 : @buildRule : :
<description>d
<aaa>0:<description>a0
<aaa>1:<description>a1
<aaa>0,<bbb>0:<description>a0-b0
<aaa>0,<bbb>1:<description>a0-b1
<aaa>1,<bbb>0:<description>a1-b0
<aaa>1,<bbb>1:<description>a1-b1
<aaa>0,<bbb>0,<ccc>0:<description>a0-b0-c0
<aaa>0,<bbb>0,<ccc>1:<description>a0-b0-c1
<aaa>0,<bbb>1,<ccc>1:<description>a0-b1-c1
<aaa>1,<bbb>0,<ccc>1:<description>a1-b0-c1
<aaa>1,<bbb>1,<ccc>0:<description>a1-b1-c0
<aaa>1,<bbb>1,<ccc>1:<description>a1-b1-c1 ;
""")
t.run_build_system(["aaa=1", "bbb=1", "ccc=1"])
t.expect_output_lines("description: /d/" )
t.expect_output_lines("description: /a0/" , False)
t.expect_output_lines("description: /a1/" )
t.expect_output_lines("description: /a0-b0/" , False)
t.expect_output_lines("description: /a0-b1/" , False)
t.expect_output_lines("description: /a1-b0/" , False)
t.expect_output_lines("description: /a1-b1/" )
t.expect_output_lines("description: /a0-b0-c0/", False)
t.expect_output_lines("description: /a0-b0-c1/", False)
t.expect_output_lines("description: /a0-b1-c1/", False)
t.expect_output_lines("description: /a1-b0-c1/", False)
t.expect_output_lines("description: /a1-b1-c0/", False)
t.expect_output_lines("description: /a1-b1-c1/" )
t.run_build_system(["aaa=0", "bbb=0", "ccc=1"])
t.expect_output_lines("description: /d/" )
t.expect_output_lines("description: /a0/" )
t.expect_output_lines("description: /a1/" , False)
t.expect_output_lines("description: /a0-b0/" )
t.expect_output_lines("description: /a0-b1/" , False)
t.expect_output_lines("description: /a1-b0/" , False)
t.expect_output_lines("description: /a1-b1/" , False)
t.expect_output_lines("description: /a0-b0-c0/", False)
t.expect_output_lines("description: /a0-b0-c1/" )
t.expect_output_lines("description: /a0-b1-c1/", False)
t.expect_output_lines("description: /a1-b0-c1/", False)
t.expect_output_lines("description: /a1-b1-c0/", False)
t.expect_output_lines("description: /a1-b1-c1/", False)
t.run_build_system(["aaa=0", "bbb=0", "ccc=0"])
t.expect_output_lines("description: /d/" )
t.expect_output_lines("description: /a0/" )
t.expect_output_lines("description: /a1/" , False)
t.expect_output_lines("description: /a0-b0/" )
t.expect_output_lines("description: /a0-b1/" , False)
t.expect_output_lines("description: /a1-b0/" , False)
t.expect_output_lines("description: /a1-b1/" , False)
t.expect_output_lines("description: /a0-b0-c0/" )
t.expect_output_lines("description: /a0-b0-c1/", False)
t.expect_output_lines("description: /a0-b1-c1/", False)
t.expect_output_lines("description: /a1-b0-c1/", False)
t.expect_output_lines("description: /a1-b1-c0/", False)
t.expect_output_lines("description: /a1-b1-c1/", False)
t.cleanup()
###############################################################################
#
# test_multiple_conditions_with_toolset_version()
# -----------------------------------------------
#
###############################################################################
def test_multiple_conditions_with_toolset_version():
"""
Regression tests for properties conditioned on the toolset version
subfeature and some additional properties.
"""
toolset = "testToolset" ;
t = BoostBuild.Tester(["--user-config=", "--ignore-site-config"],
pass_toolset=False, use_test_config=False)
t.write(toolset + ".jam", """\
import feature ;
feature.extend toolset : %(toolset)s ;
feature.subfeature toolset %(toolset)s : version : 0 1 ;
rule init ( version ? ) { }
""" % {"toolset": toolset})
t.write("testToolset.py", """\
from b2.build import feature
feature.extend('toolset', ["%(toolset)s"])
feature.subfeature('toolset', "%(toolset)s", "version", ['0','1'])
def init ( version ): pass
""" % {"toolset": toolset})
t.write("jamroot.jam", """\
import feature ;
import notfile ;
import toolset ;
toolset.using testToolset ;
feature.feature description : : free incidental ;
feature.feature aaa : 0 1 : incidental ;
feature.feature bbb : 0 1 : incidental ;
feature.feature ccc : 0 1 : incidental ;
rule buildRule ( name : targets ? : properties * )
{
local ttt = [ feature.get-values toolset : $(properties) ] ;
local vvv = [ feature.get-values toolset-testToolset:version : $(properties) ] ;
local aaa = [ feature.get-values aaa : $(properties) ] ;
local bbb = [ feature.get-values bbb : $(properties) ] ;
local ccc = [ feature.get-values ccc : $(properties) ] ;
ECHO "toolset:" /$(ttt)/ "version:" /$(vvv)/ "aaa/bbb/ccc:" /$(aaa)/$(bbb)/$(ccc)/ ;
for local description in [ feature.get-values description : $(properties) ]
{
ECHO "description:" /$(description)/ ;
}
}
notfile testTarget1 : @buildRule : :
<toolset>testToolset,<aaa>0:<description>t-a0
<toolset>testToolset,<aaa>1:<description>t-a1
<toolset>testToolset-0,<aaa>0:<description>t0-a0
<toolset>testToolset-0,<aaa>1:<description>t0-a1
<toolset>testToolset-1,<aaa>0:<description>t1-a0
<toolset>testToolset-1,<aaa>1:<description>t1-a1
<toolset>testToolset,<aaa>0,<bbb>0:<description>t-a0-b0
<toolset>testToolset,<aaa>0,<bbb>1:<description>t-a0-b1
<toolset>testToolset,<aaa>1,<bbb>0:<description>t-a1-b0
<toolset>testToolset,<aaa>1,<bbb>1:<description>t-a1-b1
<aaa>0,<toolset>testToolset,<bbb>0:<description>a0-t-b0
<aaa>0,<toolset>testToolset,<bbb>1:<description>a0-t-b1
<aaa>1,<toolset>testToolset,<bbb>0:<description>a1-t-b0
<aaa>1,<toolset>testToolset,<bbb>1:<description>a1-t-b1
<aaa>0,<bbb>0,<toolset>testToolset:<description>a0-b0-t
<aaa>0,<bbb>1,<toolset>testToolset:<description>a0-b1-t
<aaa>1,<bbb>0,<toolset>testToolset:<description>a1-b0-t
<aaa>1,<bbb>1,<toolset>testToolset:<description>a1-b1-t
<toolset>testToolset-0,<aaa>0,<bbb>0:<description>t0-a0-b0
<toolset>testToolset-0,<aaa>0,<bbb>1:<description>t0-a0-b1
<toolset>testToolset-0,<aaa>1,<bbb>0:<description>t0-a1-b0
<toolset>testToolset-0,<aaa>1,<bbb>1:<description>t0-a1-b1
<toolset>testToolset-1,<aaa>0,<bbb>0:<description>t1-a0-b0
<toolset>testToolset-1,<aaa>0,<bbb>1:<description>t1-a0-b1
<toolset>testToolset-1,<aaa>1,<bbb>0:<description>t1-a1-b0
<toolset>testToolset-1,<aaa>1,<bbb>1:<description>t1-a1-b1
<aaa>0,<toolset>testToolset-1,<bbb>0:<description>a0-t1-b0
<aaa>0,<toolset>testToolset-1,<bbb>1:<description>a0-t1-b1
<aaa>1,<toolset>testToolset-0,<bbb>0:<description>a1-t0-b0
<aaa>1,<toolset>testToolset-0,<bbb>1:<description>a1-t0-b1
<bbb>0,<aaa>1,<toolset>testToolset-0:<description>b0-a1-t0
<bbb>0,<aaa>0,<toolset>testToolset-1:<description>b0-a0-t1
<bbb>0,<aaa>1,<toolset>testToolset-1:<description>b0-a1-t1
<bbb>1,<aaa>0,<toolset>testToolset-1:<description>b1-a0-t1
<bbb>1,<aaa>1,<toolset>testToolset-0:<description>b1-a1-t0
<bbb>1,<aaa>1,<toolset>testToolset-1:<description>b1-a1-t1 ;
""")
t.run_build_system(["aaa=1", "bbb=1", "ccc=1", "toolset=%s-0" % toolset])
t.expect_output_lines("description: /t-a0/" , False)
t.expect_output_lines("description: /t-a1/" )
t.expect_output_lines("description: /t0-a0/" , False)
t.expect_output_lines("description: /t0-a1/" )
t.expect_output_lines("description: /t1-a0/" , False)
t.expect_output_lines("description: /t1-a1/" , False)
t.expect_output_lines("description: /t-a0-b0/" , False)
t.expect_output_lines("description: /t-a0-b1/" , False)
t.expect_output_lines("description: /t-a1-b0/" , False)
t.expect_output_lines("description: /t-a1-b1/" )
t.expect_output_lines("description: /a0-t-b0/" , False)
t.expect_output_lines("description: /a0-t-b1/" , False)
t.expect_output_lines("description: /a1-t-b0/" , False)
t.expect_output_lines("description: /a1-t-b1/" )
t.expect_output_lines("description: /a0-b0-t/" , False)
t.expect_output_lines("description: /a0-b1-t/" , False)
t.expect_output_lines("description: /a1-b0-t/" , False)
t.expect_output_lines("description: /a1-b1-t/" )
t.expect_output_lines("description: /t0-a0-b0/", False)
t.expect_output_lines("description: /t0-a0-b1/", False)
t.expect_output_lines("description: /t0-a1-b0/", False)
t.expect_output_lines("description: /t0-a1-b1/" )
t.expect_output_lines("description: /t1-a0-b0/", False)
t.expect_output_lines("description: /t1-a0-b1/", False)
t.expect_output_lines("description: /t1-a1-b0/", False)
t.expect_output_lines("description: /t1-a1-b1/", False)
t.expect_output_lines("description: /a0-t1-b0/", False)
t.expect_output_lines("description: /a0-t1-b1/", False)
t.expect_output_lines("description: /a1-t0-b0/", False)
t.expect_output_lines("description: /a1-t0-b1/" )
t.expect_output_lines("description: /b0-a1-t0/", False)
t.expect_output_lines("description: /b0-a0-t1/", False)
t.expect_output_lines("description: /b0-a1-t1/", False)
t.expect_output_lines("description: /b1-a0-t1/", False)
t.expect_output_lines("description: /b1-a1-t0/" )
t.expect_output_lines("description: /b1-a1-t1/", False)
t.run_build_system(["aaa=1", "bbb=1", "ccc=1", "toolset=%s-1" % toolset])
t.expect_output_lines("description: /t-a0/" , False)
t.expect_output_lines("description: /t-a1/" )
t.expect_output_lines("description: /t0-a0/" , False)
t.expect_output_lines("description: /t0-a1/" , False)
t.expect_output_lines("description: /t1-a0/" , False)
t.expect_output_lines("description: /t1-a1/" )
t.expect_output_lines("description: /t-a0-b0/" , False)
t.expect_output_lines("description: /t-a0-b1/" , False)
t.expect_output_lines("description: /t-a1-b0/" , False)
t.expect_output_lines("description: /t-a1-b1/" )
t.expect_output_lines("description: /a0-t-b0/" , False)
t.expect_output_lines("description: /a0-t-b1/" , False)
t.expect_output_lines("description: /a1-t-b0/" , False)
t.expect_output_lines("description: /a1-t-b1/" )
t.expect_output_lines("description: /a0-b0-t/" , False)
t.expect_output_lines("description: /a0-b1-t/" , False)
t.expect_output_lines("description: /a1-b0-t/" , False)
t.expect_output_lines("description: /a1-b1-t/" )
t.expect_output_lines("description: /t0-a0-b0/", False)
t.expect_output_lines("description: /t0-a0-b1/", False)
t.expect_output_lines("description: /t0-a1-b0/", False)
t.expect_output_lines("description: /t0-a1-b1/", False)
t.expect_output_lines("description: /t1-a0-b0/", False)
t.expect_output_lines("description: /t1-a0-b1/", False)
t.expect_output_lines("description: /t1-a1-b0/", False)
t.expect_output_lines("description: /t1-a1-b1/" )
t.expect_output_lines("description: /a0-t1-b0/", False)
t.expect_output_lines("description: /a0-t1-b1/", False)
t.expect_output_lines("description: /a1-t0-b0/", False)
t.expect_output_lines("description: /a1-t0-b1/", False)
t.expect_output_lines("description: /b0-a1-t0/", False)
t.expect_output_lines("description: /b0-a0-t1/", False)
t.expect_output_lines("description: /b0-a1-t1/", False)
t.expect_output_lines("description: /b1-a0-t1/", False)
t.expect_output_lines("description: /b1-a1-t0/", False)
t.expect_output_lines("description: /b1-a1-t1/" )
t.cleanup()
###############################################################################
#
# main()
# ------
#
###############################################################################
test_multiple_conditions()
test_multiple_conditions_with_toolset_version()
| mit |
lkhomenk/integration_tests | cfme/automate/dialogs/dialog_box.py | 6 | 1921 | import attr
from navmazing import NavigateToAttribute
from cached_property import cached_property
from cfme.modeling.base import BaseCollection, BaseEntity, parent_of_type
from cfme.utils.appliance.implementations.ui import navigator, CFMENavigateStep, navigate_to
from . import AddBoxView, BoxForm
from .dialog_element import ElementCollection
class EditBoxView(BoxForm):
"""EditBox View."""
@property
def is_displayed(self):
return (
self.in_customization and self.service_dialogs.is_opened and
self.title.text == "Editing Dialog {} [Box Information]".format(self.box_label)
)
@attr.s
class Box(BaseEntity):
"""A class representing one Box of dialog."""
box_label = attr.ib()
box_desc = attr.ib(default=None)
_collections = {'elements': ElementCollection}
@cached_property
def elements(self):
return self.collections.elements
@property
def tree_path(self):
return self.parent.tree_path + [self.box_label]
@property
def tab(self):
from .dialog_tab import Tab
return parent_of_type(self, Tab)
@attr.s
class BoxCollection(BaseCollection):
ENTITY = Box
@property
def tree_path(self):
return self.parent.tree_path
def create(self, box_label=None, box_desc=None):
"""Create box method.
Args:
box_label and box_description.
"""
view = navigate_to(self, "Add")
view.new_box.click()
view.edit_box.click()
view.fill({'box_label': box_label, 'box_desc': box_desc})
view.save_button.click()
return self.instantiate(box_label=box_label, box_desc=box_desc)
@navigator.register(BoxCollection)
class Add(CFMENavigateStep):
VIEW = AddBoxView
prerequisite = NavigateToAttribute('parent.parent', 'Add')
def step(self):
self.prerequisite_view.add_section.click()
| gpl-2.0 |
mrquim/mrquimrepo | repo/plugin.video.salts/salts_lib/cloudflare.py | 7 | 6746 |
#
# Copyright (C) 2015 tknorris (Derived from Mikey1234's & Lambda's)
#
# This Program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This Program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with XBMC; see the file COPYING. If not, write to
# the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
# http://www.gnu.org/copyleft/gpl.html
#
# This code is a derivative of the YouTube plugin for XBMC and associated works
# released under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 3
import re
import urllib2
import urllib
import urlparse
import log_utils
import xbmc
from constants import USER_AGENT
MAX_TRIES = 3
logger = log_utils.Logger.get_logger(__name__)
logger.disable()
class NoRedirection(urllib2.HTTPErrorProcessor):
def http_response(self, request, response): # @UnusedVariable
logger.log('Stopping Redirect', log_utils.LOGDEBUG)
return response
https_response = http_response
def solve_equation(equation):
try:
offset = 1 if equation[0] == '+' else 0
return int(eval(equation.replace('!+[]', '1').replace('!![]', '1').replace('[]', '0').replace('(', 'str(')[offset:]))
except:
pass
def solve(url, cj, user_agent=None, wait=True, extra_headers=None):
if extra_headers is None: extra_headers = {}
if user_agent is None: user_agent = USER_AGENT
headers = {'User-Agent': user_agent, 'Referer': url}
if cj is not None:
try: cj.load(ignore_discard=True)
except: pass
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
urllib2.install_opener(opener)
request = urllib2.Request(url)
for key in headers: request.add_header(key, headers[key])
try:
response = urllib2.urlopen(request)
html = response.read()
except urllib2.HTTPError as e:
html = e.read()
tries = 0
while tries < MAX_TRIES:
solver_pattern = 'var (?:s,t,o,p,b,r,e,a,k,i,n,g|t,r,a),f,\s*([^=]+)={"([^"]+)":([^}]+)};.+challenge-form\'\);.*?\n.*?;(.*?);a\.value'
vc_pattern = 'input type="hidden" name="jschl_vc" value="([^"]+)'
pass_pattern = 'input type="hidden" name="pass" value="([^"]+)'
init_match = re.search(solver_pattern, html, re.DOTALL)
vc_match = re.search(vc_pattern, html)
pass_match = re.search(pass_pattern, html)
if not init_match or not vc_match or not pass_match:
logger.log("Couldn't find attribute: init: |%s| vc: |%s| pass: |%s| No cloudflare check?" % (init_match, vc_match, pass_match), log_utils.LOGWARNING)
return False
init_dict, init_var, init_equation, equations = init_match.groups()
vc = vc_match.group(1)
password = pass_match.group(1)
# logger.log("VC is: %s" % (vc), xbmc.LOGDEBUG)
varname = (init_dict, init_var)
result = int(solve_equation(init_equation.rstrip()))
logger.log('Initial value: |%s| Result: |%s|' % (init_equation, result), log_utils.LOGDEBUG)
for equation in equations.split(';'):
equation = equation.rstrip()
if equation[:len('.'.join(varname))] != '.'.join(varname):
logger.log('Equation does not start with varname |%s|' % (equation), log_utils.LOGDEBUG)
else:
equation = equation[len('.'.join(varname)):]
expression = equation[2:]
operator = equation[0]
if operator not in ['+', '-', '*', '/']:
logger.log('Unknown operator: |%s|' % (equation), log_utils.LOGWARNING)
continue
result = int(str(eval(str(result) + operator + str(solve_equation(expression)))))
logger.log('intermediate: %s = %s' % (equation, result), log_utils.LOGDEBUG)
scheme = urlparse.urlparse(url).scheme
domain = urlparse.urlparse(url).hostname
result += len(domain)
logger.log('Final Result: |%s|' % (result), log_utils.LOGDEBUG)
if wait:
logger.log('Sleeping for 5 Seconds', log_utils.LOGDEBUG)
xbmc.sleep(5000)
url = '%s://%s/cdn-cgi/l/chk_jschl?jschl_vc=%s&jschl_answer=%s&pass=%s' % (scheme, domain, vc, result, urllib.quote(password))
logger.log('url: |%s| headers: |%s|' % (url, headers), log_utils.LOGDEBUG)
request = urllib2.Request(url)
for key in headers: request.add_header(key, headers[key])
try:
opener = urllib2.build_opener(NoRedirection)
urllib2.install_opener(opener)
response = urllib2.urlopen(request)
while response.getcode() in [301, 302, 303, 307]:
if cj is not None:
cj.extract_cookies(response, request)
redir_url = response.info().getheader('location')
if not redir_url.startswith('http'):
base_url = '%s://%s' % (scheme, domain)
redir_url = urlparse.urljoin(base_url, redir_url)
request = urllib2.Request(redir_url)
headers.update(extra_headers)
for key in headers: request.add_header(key, headers[key])
if cj is not None:
cj.add_cookie_header(request)
logger.log('redir url: |%s| headers: |%s|' % (redir_url, headers), log_utils.LOGDEBUG)
response = urllib2.urlopen(request)
final = response.read()
if 'cf-browser-verification' in final:
logger.log('CF Failure: html: %s url: %s' % (html, url), log_utils.LOGWARNING)
tries += 1
html = final
else:
break
except urllib2.HTTPError as e:
logger.log('CloudFlare HTTP Error: %s on url: %s' % (e.code, url), log_utils.LOGWARNING)
return False
except urllib2.URLError as e:
logger.log('CloudFlare URLError Error: %s on url: %s' % (e, url), log_utils.LOGWARNING)
return False
if cj is not None:
cj.save()
return final
| gpl-2.0 |
cesarmarinhorj/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/port/builders_unittest.py | 124 | 1909 | # Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import builders
import unittest2 as unittest
class BuildersTest(unittest.TestCase):
def test_path_from_name(self):
tests = {
'test': 'test',
'Mac 10.6 (dbg)(1)': 'Mac_10_6__dbg__1_',
'(.) ': '____',
}
for name, expected in tests.items():
self.assertEqual(expected, builders.builder_path_from_name(name))
| bsd-3-clause |
KyoungRan/Django_React_ex | Django_React_Workshop-mbrochh/django/myvenv/lib/python3.4/site-packages/django/core/checks/security/base.py | 45 | 6645 | from django.conf import settings
from .. import Tags, Warning, register
from ..utils import patch_middleware_message
SECRET_KEY_MIN_LENGTH = 50
SECRET_KEY_MIN_UNIQUE_CHARACTERS = 5
W001 = Warning(
"You do not have 'django.middleware.security.SecurityMiddleware' "
"in your MIDDLEWARE so the SECURE_HSTS_SECONDS, "
"SECURE_CONTENT_TYPE_NOSNIFF, "
"SECURE_BROWSER_XSS_FILTER, and SECURE_SSL_REDIRECT settings "
"will have no effect.",
id='security.W001',
)
W002 = Warning(
"You do not have "
"'django.middleware.clickjacking.XFrameOptionsMiddleware' in your "
"MIDDLEWARE, so your pages will not be served with an "
"'x-frame-options' header. Unless there is a good reason for your "
"site to be served in a frame, you should consider enabling this "
"header to help prevent clickjacking attacks.",
id='security.W002',
)
W004 = Warning(
"You have not set a value for the SECURE_HSTS_SECONDS setting. "
"If your entire site is served only over SSL, you may want to consider "
"setting a value and enabling HTTP Strict Transport Security. "
"Be sure to read the documentation first; enabling HSTS carelessly "
"can cause serious, irreversible problems.",
id='security.W004',
)
W005 = Warning(
"You have not set the SECURE_HSTS_INCLUDE_SUBDOMAINS setting to True. "
"Without this, your site is potentially vulnerable to attack "
"via an insecure connection to a subdomain. Only set this to True if "
"you are certain that all subdomains of your domain should be served "
"exclusively via SSL.",
id='security.W005',
)
W006 = Warning(
"Your SECURE_CONTENT_TYPE_NOSNIFF setting is not set to True, "
"so your pages will not be served with an "
"'x-content-type-options: nosniff' header. "
"You should consider enabling this header to prevent the "
"browser from identifying content types incorrectly.",
id='security.W006',
)
W007 = Warning(
"Your SECURE_BROWSER_XSS_FILTER setting is not set to True, "
"so your pages will not be served with an "
"'x-xss-protection: 1; mode=block' header. "
"You should consider enabling this header to activate the "
"browser's XSS filtering and help prevent XSS attacks.",
id='security.W007',
)
W008 = Warning(
"Your SECURE_SSL_REDIRECT setting is not set to True. "
"Unless your site should be available over both SSL and non-SSL "
"connections, you may want to either set this setting True "
"or configure a load balancer or reverse-proxy server "
"to redirect all connections to HTTPS.",
id='security.W008',
)
W009 = Warning(
"Your SECRET_KEY has less than %(min_length)s characters or less than "
"%(min_unique_chars)s unique characters. Please generate a long and random "
"SECRET_KEY, otherwise many of Django's security-critical features will be "
"vulnerable to attack." % {
'min_length': SECRET_KEY_MIN_LENGTH,
'min_unique_chars': SECRET_KEY_MIN_UNIQUE_CHARACTERS,
},
id='security.W009',
)
W018 = Warning(
"You should not have DEBUG set to True in deployment.",
id='security.W018',
)
W019 = Warning(
"You have "
"'django.middleware.clickjacking.XFrameOptionsMiddleware' in your "
"MIDDLEWARE, but X_FRAME_OPTIONS is not set to 'DENY'. "
"The default is 'SAMEORIGIN', but unless there is a good reason for "
"your site to serve other parts of itself in a frame, you should "
"change it to 'DENY'.",
id='security.W019',
)
W020 = Warning(
"ALLOWED_HOSTS must not be empty in deployment.",
id='security.W020',
)
def _security_middleware():
return ("django.middleware.security.SecurityMiddleware" in settings.MIDDLEWARE_CLASSES or
settings.MIDDLEWARE and "django.middleware.security.SecurityMiddleware" in settings.MIDDLEWARE)
def _xframe_middleware():
return ("django.middleware.clickjacking.XFrameOptionsMiddleware" in settings.MIDDLEWARE_CLASSES or
settings.MIDDLEWARE and "django.middleware.clickjacking.XFrameOptionsMiddleware" in settings.MIDDLEWARE)
@register(Tags.security, deploy=True)
def check_security_middleware(app_configs, **kwargs):
passed_check = _security_middleware()
return [] if passed_check else [patch_middleware_message(W001)]
@register(Tags.security, deploy=True)
def check_xframe_options_middleware(app_configs, **kwargs):
passed_check = _xframe_middleware()
return [] if passed_check else [patch_middleware_message(W002)]
@register(Tags.security, deploy=True)
def check_sts(app_configs, **kwargs):
passed_check = not _security_middleware() or settings.SECURE_HSTS_SECONDS
return [] if passed_check else [W004]
@register(Tags.security, deploy=True)
def check_sts_include_subdomains(app_configs, **kwargs):
passed_check = (
not _security_middleware() or
not settings.SECURE_HSTS_SECONDS or
settings.SECURE_HSTS_INCLUDE_SUBDOMAINS is True
)
return [] if passed_check else [W005]
@register(Tags.security, deploy=True)
def check_content_type_nosniff(app_configs, **kwargs):
passed_check = (
not _security_middleware() or
settings.SECURE_CONTENT_TYPE_NOSNIFF is True
)
return [] if passed_check else [W006]
@register(Tags.security, deploy=True)
def check_xss_filter(app_configs, **kwargs):
passed_check = (
not _security_middleware() or
settings.SECURE_BROWSER_XSS_FILTER is True
)
return [] if passed_check else [W007]
@register(Tags.security, deploy=True)
def check_ssl_redirect(app_configs, **kwargs):
passed_check = (
not _security_middleware() or
settings.SECURE_SSL_REDIRECT is True
)
return [] if passed_check else [W008]
@register(Tags.security, deploy=True)
def check_secret_key(app_configs, **kwargs):
passed_check = (
getattr(settings, 'SECRET_KEY', None) and
len(set(settings.SECRET_KEY)) >= SECRET_KEY_MIN_UNIQUE_CHARACTERS and
len(settings.SECRET_KEY) >= SECRET_KEY_MIN_LENGTH
)
return [] if passed_check else [W009]
@register(Tags.security, deploy=True)
def check_debug(app_configs, **kwargs):
passed_check = not settings.DEBUG
return [] if passed_check else [W018]
@register(Tags.security, deploy=True)
def check_xframe_deny(app_configs, **kwargs):
passed_check = (
not _xframe_middleware() or
settings.X_FRAME_OPTIONS == 'DENY'
)
return [] if passed_check else [patch_middleware_message(W019)]
@register(Tags.security, deploy=True)
def check_allowed_hosts(app_configs, **kwargs):
return [] if settings.ALLOWED_HOSTS else [W020]
| mit |
mantl/mantl | roles/collectd/files/zookeeper-collectd-plugin.py | 36 | 4447 | #! /usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Check Zookeeper Cluster
Zookeeper collectd module adapted from
https://svn.apache.org/repos/asf/zookeeper/trunk/src/contrib/monitoring/check_zookeeper.py
It requires ZooKeeper 3.4.0 or greater. The script needs the 'mntr' 4letter word
command (patch ZOOKEEPER-744) that was now commited to the trunk.
The script also works with ZooKeeper 3.3.x but in a limited way.
"""
import sys
import socket
import re
import collectd
from StringIO import StringIO
ZK_HOSTS = ["192.168.10.2"]
COUNTERS = ["zk_packets_received", "zk_packets_sent"]
class ZooKeeperServer(object):
def __init__(self, host='localhost', port='2181', timeout=1):
self._address = (host, int(port))
self._timeout = timeout
def get_stats(self):
""" Get ZooKeeper server stats as a map """
data = self._send_cmd('mntr')
return self._parse(data)
def _create_socket(self):
return socket.socket()
def _send_cmd(self, cmd):
""" Send a 4letter word command to the server """
s = self._create_socket()
s.settimeout(self._timeout)
s.connect(self._address)
s.send(cmd)
data = s.recv(2048)
s.close()
return data
def _parse(self, data):
""" Parse the output from the 'mntr' 4letter word command """
h = StringIO(data)
result = {}
for line in h.readlines():
try:
key, value = self._parse_line(line)
if key not in ['zk_server_state', 'zk_version']:
result[key] = value
except ValueError:
pass # ignore broken lines
return result
def _parse_line(self, line):
try:
key, value = map(str.strip, line.split('\t'))
except ValueError:
raise ValueError('Found invalid line: %s' % line)
if not key:
raise ValueError('The key is mandatory and should not be empty')
try:
value = int(value)
except (TypeError, ValueError):
pass
return key, value
def read_callback():
""" Get stats for all the servers in the cluster """
for host in ZK_HOSTS:
try:
zk = ZooKeeperServer(host)
stats = zk.get_stats()
for k, v in stats.items():
try:
val = collectd.Values(plugin='zookeeper', meta={'0':True})
val.type = "counter" if k in COUNTERS else "gauge"
val.type_instance = k
val.values = [v]
val.dispatch()
except (TypeError, ValueError):
collectd.error('error dispatching stat; host=%s, key=%s, val=%s' % (host, k, v))
pass
except socket.error, e:
# ignore because the cluster can still work even
# if some servers fail completely
# this error should be also visible in a variable
# exposed by the server in the statistics
log('unable to connect to server "%s"' % (host))
return stats
def configure_callback(conf):
"""Received configuration information"""
global ZK_HOSTS
for node in conf.children:
if node.key == 'Hosts':
ZK_HOSTS = node.values[0].split(',')
else:
collectd.warning('zookeeper plugin: Unknown config key: %s.'
% node.key)
log('Configured with hosts=%s' % (ZK_HOSTS))
def log(msg):
collectd.info('zookeeper plugin: %s' % msg)
collectd.register_config(configure_callback)
collectd.register_read(read_callback)
| apache-2.0 |
DavidNorman/tensorflow | tensorflow/python/ops/parallel_for/array_test.py | 1 | 11942 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for vectorization of array kernels."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.compat import compat
from tensorflow.python.eager import backprop
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import tensor_array_grad # pylint: disable=unused-import
from tensorflow.python.ops.parallel_for import control_flow_ops as pfor_control_flow_ops
from tensorflow.python.ops.parallel_for.test_util import PForTestCase
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class ArrayTest(PForTestCase):
def test_gather(self):
x = random_ops.random_uniform([3, 3, 3])
x2 = array_ops.placeholder_with_default(x, shape=None) # Has dynamic shape.
def loop_fn(i):
outputs = []
x_i = array_ops.gather(x, i)
for y in [x, x2, x_i]:
axes = [0] if y is x_i else [0, 2, -1]
for axis in axes:
outputs.append(array_ops.gather(y, 2, axis=axis))
outputs.append(array_ops.gather(y, i, axis=axis))
outputs.append(array_ops.gather(y, [i], axis=axis))
outputs.append(array_ops.gather(y, [i, 2], axis=axis))
outputs.append(array_ops.gather(y, [[2, i], [i, 1]], axis=axis))
return outputs
self._test_loop_fn(loop_fn, 3)
def test_gather_nd(self):
x = random_ops.random_uniform([3, 3, 3])
def loop_fn(i):
outputs = []
x_i = array_ops.gather(x, i)
outputs.append(array_ops.gather_nd(x_i, [0], batch_dims=0))
outputs.append(array_ops.gather_nd(x_i, [i], batch_dims=0))
outputs.append(array_ops.gather_nd(x_i, [[i], [i], [i]], batch_dims=1))
return outputs
self._test_loop_fn(loop_fn, 3)
def test_shape(self):
x = random_ops.random_uniform([3, 2, 3])
def loop_fn(i):
x_i = array_ops.gather(x, i)
return array_ops.shape(x_i), array_ops.shape(x_i, out_type=dtypes.int64)
self._test_loop_fn(loop_fn, 3)
def test_size(self):
x = random_ops.random_uniform([3, 2, 3])
def loop_fn(i):
x_i = array_ops.gather(x, i)
return array_ops.size(x_i), array_ops.size(x_i, out_type=dtypes.int64)
self._test_loop_fn(loop_fn, 3)
def test_rank(self):
x = random_ops.random_uniform([3, 2, 3])
def loop_fn(i):
x_i = array_ops.gather(x, i)
return array_ops.rank(x_i)
self._test_loop_fn(loop_fn, 3)
def test_shape_n(self):
x = random_ops.random_uniform([3, 2, 3])
y = random_ops.random_uniform([3])
def loop_fn(i):
x_i = array_ops.gather(x, i)
y_i = array_ops.gather(y, i)
return array_ops.shape_n([x_i, x, y, y_i]), array_ops.shape_n(
[x_i, x, y, y_i], out_type=dtypes.int64)
self._test_loop_fn(loop_fn, 3)
def test_reshape(self):
x = random_ops.random_uniform([3, 2, 3])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return array_ops.reshape(x1, [-1]), array_ops.reshape(x1, [1, 3, 1, -1])
self._test_loop_fn(loop_fn, 3)
def test_broadcast_to(self):
x = random_ops.random_uniform([3, 2, 1, 3])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return (array_ops.broadcast_to(x1, [2, 2, 3]),
array_ops.broadcast_to(x1, [1, 2, 1, 3]))
self._test_loop_fn(loop_fn, 3)
def test_expand_dims(self):
x = random_ops.random_uniform([3, 2, 3])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return array_ops.expand_dims(
x1, axis=-1), array_ops.expand_dims(
x1, axis=1)
self._test_loop_fn(loop_fn, 3)
def test_one_hot(self):
indices = random_ops.random_uniform(
[3, 2, 3], minval=0, maxval=4, dtype=dtypes.int32)
def loop_fn(i):
indices_i = array_ops.gather(indices, i)
return (array_ops.one_hot(indices_i, depth=4, on_value=2., off_value=-2.),
array_ops.one_hot(indices_i, depth=4, axis=1))
self._test_loop_fn(loop_fn, 3)
def test_searchsorted(self):
sorted_inputs = math_ops.cumsum(random_ops.random_uniform([3, 2, 4]),
axis=-1)
values = random_ops.random_uniform([2, 3], minval=-1, maxval=4.5)
def loop_fn(i):
inputs_i = array_ops.gather(sorted_inputs, i)
return [array_ops.searchsorted(inputs_i, values, out_type=dtypes.int32,
side="left"), # creates LowerBound op.
array_ops.searchsorted(inputs_i, values, out_type=dtypes.int64,
side="right")] # creates UpperBound op.
self._test_loop_fn(loop_fn, 3)
def test_slice(self):
x = random_ops.random_uniform([3, 2, 3])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return array_ops.slice(x1, begin=(0, 1), size=(2, 1))
self._test_loop_fn(loop_fn, 3)
def test_tile(self):
x = random_ops.random_uniform([3, 2, 3])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return array_ops.tile(x1, [2, 1])
self._test_loop_fn(loop_fn, 3)
def test_tile_loop_dependent(self):
x = random_ops.random_uniform([3, 2, 3])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return array_ops.tile(x1, [i, 1])
with self.assertRaisesRegexp(ValueError, "expected to be loop invariant"):
pfor_control_flow_ops.pfor(loop_fn, 2)
def test_pack(self):
x = random_ops.random_uniform([3, 2, 3])
y = random_ops.random_uniform([2, 3])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return array_ops.stack([x1, y], axis=-1)
self._test_loop_fn(loop_fn, 1)
def test_unpack(self):
x = random_ops.random_uniform([3, 2, 3, 4])
def loop_fn(i):
x_i = array_ops.gather(x, i)
return array_ops.unstack(
x_i, 4, axis=-1), array_ops.unstack(
x_i, 3, axis=1)
self._test_loop_fn(loop_fn, 3)
def test_pad(self):
x = random_ops.random_uniform([3, 2, 3])
padding = constant_op.constant([[1, 2], [3, 4]])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return array_ops.pad(x1, padding, mode="CONSTANT")
self._test_loop_fn(loop_fn, 3)
def test_split(self):
x = random_ops.random_uniform([3, 2, 3])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return array_ops.split(x1, 2, axis=0), array_ops.split(x1, 3, axis=-1)
self._test_loop_fn(loop_fn, 3)
def test_split_v(self):
x = random_ops.random_uniform([3, 6, 3])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return (array_ops.split(x1, [2, 1, 3], axis=0),
array_ops.split(x1, [3], axis=-1))
self._test_loop_fn(loop_fn, 3)
def test_squeeze(self):
x = random_ops.random_uniform([5, 1, 2, 1])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return (array_ops.squeeze(x1, axis=0),
array_ops.squeeze(x1, axis=-1),
array_ops.squeeze(x1))
self._test_loop_fn(loop_fn, 3)
def test_transpose(self):
x = random_ops.random_uniform([3, 2, 3, 4])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return array_ops.transpose(x1, [2, 1, 0])
self._test_loop_fn(loop_fn, 3)
def test_zeros_like(self):
x = random_ops.random_uniform([3, 2, 3])
def loop_fn(i):
x1 = array_ops.gather(x, i)
z = array_ops.zeros_like(x1),
return z, z + x1
self._test_loop_fn(loop_fn, 3)
def test_concat_v2(self):
x = random_ops.random_uniform([3, 2, 3])
y = random_ops.random_uniform([2, 3])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return array_ops.concat(
[x1, x1, y], axis=0), array_ops.concat(
[x1, x1, y], axis=-1)
self._test_loop_fn(loop_fn, 3)
def test_unary_cwise_ops(self):
for op in [array_ops.identity, array_ops.stop_gradient]:
with backprop.GradientTape(persistent=True) as g:
x = random_ops.random_uniform([3, 5])
g.watch(x)
# pylint: disable=cell-var-from-loop
def loop_fn(i):
with g:
x1 = array_ops.gather(x, i)
y = op(x1) + x1
loss = nn.l2_loss(y)
return op(x), y, g.gradient(loss, x1)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 3)
def test_identity_n(self):
x = random_ops.random_uniform([3, 4])
def loop_fn(i):
return array_ops.identity_n([x, array_ops.gather(x, i)])
self._test_loop_fn(loop_fn, 3)
def test_matrix_band_part(self):
x = random_ops.random_uniform([3, 4, 2, 2])
for num_lower, num_upper in ((0, -1), (-1, 0), (1, 1)):
# pylint: disable=cell-var-from-loop
def loop_fn(i):
return array_ops.matrix_band_part(
array_ops.gather(x, i),
num_lower=num_lower,
num_upper=num_upper)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 3)
def test_matrix_diag(self):
x = random_ops.random_uniform([3, 2, 4])
def loop_fn(i):
diagonal = array_ops.gather(x, i)
if compat.forward_compatible(2019, 10, 31):
return array_ops.matrix_diag(diagonal, k=(0, 1), num_rows=4, num_cols=5)
return array_ops.matrix_diag(diagonal)
self._test_loop_fn(loop_fn, 3)
def test_matrix_diag_part(self):
x = random_ops.random_uniform([3, 4, 6])
def loop_fn(i):
input = array_ops.gather(x, i) # pylint: disable=redefined-builtin
if compat.forward_compatible(2019, 10, 31):
return array_ops.matrix_diag_part(input, k=(-2, 0), padding_value=3)
return array_ops.matrix_diag_part(input)
self._test_loop_fn(loop_fn, 3)
def test_matrix_set_diag(self):
matrices = random_ops.random_uniform([3, 4, 4])
diags = random_ops.random_uniform([3, 4])
if compat.forward_compatible(2019, 10, 31):
bands = random_ops.random_uniform([3, 3, 4])
def loop_fn(i):
matrix_i = array_ops.gather(matrices, i)
diag_i = array_ops.gather(diags, i)
results = [
array_ops.matrix_set_diag(matrix_i, diag_i),
array_ops.matrix_set_diag(matrices[0, ...], diag_i),
array_ops.matrix_set_diag(matrix_i, diags[0, ...])
]
if compat.forward_compatible(2019, 10, 31):
k = (-1, 1)
band_i = array_ops.gather(bands, i)
results.extend([
array_ops.matrix_set_diag(matrix_i, band_i, k=k),
array_ops.matrix_set_diag(matrices[0, ...], band_i, k=k),
array_ops.matrix_set_diag(matrix_i, bands[0, ...], k=k)
])
return results
self._test_loop_fn(loop_fn, 3)
def test_strided_slice(self):
with backprop.GradientTape(persistent=True) as g:
x = random_ops.random_uniform([3, 3, 4, 4, 2, 2, 2])
g.watch(x)
def loop_fn(i):
with g:
x_i = array_ops.gather(x, i)
y = x_i[:2, ::2, 1::3, ..., array_ops.newaxis, 1]
loss = nn.l2_loss(y)
return y, g.gradient(loss, x_i)
self._test_loop_fn(loop_fn, 3)
if __name__ == "__main__":
test.main()
| apache-2.0 |
elsigh/browserscope | third_party/appengine_tools/devappserver2/start_response_utils_test.py | 10 | 2159 | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tests for google.apphosting.tools.devappserver2.start_response_utils."""
import unittest
from google.appengine.tools.devappserver2 import start_response_utils
class TestCapturingStartResponse(unittest.TestCase):
"""Tests for start_response_util.CapturingStartResponse."""
def test_success(self):
start_response = start_response_utils.CapturingStartResponse()
stream = start_response('200 OK', [('header1', 'value1')])
stream.write('Hello World!')
self.assertEqual('200 OK', start_response.status)
self.assertEqual(None, start_response.exc_info)
self.assertEqual([('header1', 'value1')], start_response.response_headers)
self.assertEqual('Hello World!', start_response.response_stream.getvalue())
def test_exception(self):
exc_info = (object(), object(), object())
start_response = start_response_utils.CapturingStartResponse()
start_response('200 OK', [('header1', 'value1')])
start_response('500 Internal Server Error', [], exc_info)
self.assertEqual('500 Internal Server Error', start_response.status)
self.assertEqual(exc_info, start_response.exc_info)
self.assertEqual([], start_response.response_headers)
def test_merged_response(self):
start_response = start_response_utils.CapturingStartResponse()
stream = start_response('200 OK', [('header1', 'value1')])
stream.write('Hello World!')
self.assertEqual('Hello World! Goodbye World!',
start_response.merged_response([' Goodbye ', 'World!']))
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
h3biomed/ansible | lib/ansible/plugins/action/raw.py | 152 | 1823 | # (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.plugins.action import ActionBase
class ActionModule(ActionBase):
TRANSFERS_FILES = False
def run(self, tmp=None, task_vars=None):
if task_vars is None:
task_vars = dict()
if self._task.environment and any(self._task.environment):
self._display.warning('raw module does not support the environment keyword')
result = super(ActionModule, self).run(tmp, task_vars)
del tmp # tmp no longer has any effect
if self._play_context.check_mode:
# in --check mode, always skip this module execution
result['skipped'] = True
return result
executable = self._task.args.get('executable', False)
result.update(self._low_level_execute_command(self._task.args.get('_raw_params'), executable=executable))
result['changed'] = True
if 'rc' in result and result['rc'] != 0:
result['failed'] = True
result['msg'] = 'non-zero return code'
return result
| gpl-3.0 |
kevclarx/ansible | lib/ansible/plugins/connection/paramiko_ssh.py | 11 | 18902 | # (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
# ---
# The paramiko transport is provided because many distributions, in particular EL6 and before
# do not support ControlPersist in their SSH implementations. This is needed on the Ansible
# control machine to be reasonably efficient with connections. Thus paramiko is faster
# for most users on these platforms. Users with ControlPersist capability can consider
# using -c ssh or configuring the transport in ansible.cfg.
import warnings
import os
import socket
import logging
import tempfile
import traceback
import fcntl
import sys
import re
from termios import tcflush, TCIFLUSH
from binascii import hexlify
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleConnectionFailure, AnsibleFileNotFound
from ansible.module_utils.six import iteritems
from ansible.module_utils.six.moves import input
from ansible.plugins.connection import ConnectionBase
from ansible.utils.path import makedirs_safe
from ansible.module_utils._text import to_bytes
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
AUTHENTICITY_MSG="""
paramiko: The authenticity of host '%s' can't be established.
The %s key fingerprint is %s.
Are you sure you want to continue connecting (yes/no)?
"""
# SSH Options Regex
SETTINGS_REGEX = re.compile(r'(\w+)(?:\s*=\s*|\s+)(.+)')
# prevent paramiko warning noise -- see http://stackoverflow.com/questions/3920502/
HAVE_PARAMIKO=False
with warnings.catch_warnings():
warnings.simplefilter("ignore")
try:
import paramiko
HAVE_PARAMIKO=True
logging.getLogger("paramiko").setLevel(logging.WARNING)
except ImportError:
pass
class MyAddPolicy(object):
"""
Based on AutoAddPolicy in paramiko so we can determine when keys are added
and also prompt for input.
Policy for automatically adding the hostname and new host key to the
local L{HostKeys} object, and saving it. This is used by L{SSHClient}.
"""
def __init__(self, new_stdin, connection):
self._new_stdin = new_stdin
self.connection = connection
def missing_host_key(self, client, hostname, key):
if all((C.HOST_KEY_CHECKING, not C.PARAMIKO_HOST_KEY_AUTO_ADD)):
if C.USE_PERSISTENT_CONNECTIONS:
raise AnsibleConnectionFailure('rejected %s host key for host %s: %s' % (key.get_name(), hostname, hexlify(key.get_fingerprint())))
self.connection.connection_lock()
old_stdin = sys.stdin
sys.stdin = self._new_stdin
# clear out any premature input on sys.stdin
tcflush(sys.stdin, TCIFLUSH)
fingerprint = hexlify(key.get_fingerprint())
ktype = key.get_name()
inp = input(AUTHENTICITY_MSG % (hostname, ktype, fingerprint))
sys.stdin = old_stdin
self.connection.connection_unlock()
if inp not in ['yes','y','']:
raise AnsibleError("host connection rejected by user")
key._added_by_ansible_this_time = True
# existing implementation below:
client._host_keys.add(hostname, key.get_name(), key)
# host keys are actually saved in close() function below
# in order to control ordering.
# keep connection objects on a per host basis to avoid repeated attempts to reconnect
SSH_CONNECTION_CACHE = {}
SFTP_CONNECTION_CACHE = {}
class Connection(ConnectionBase):
''' SSH based connections with Paramiko '''
transport = 'paramiko'
def transport_test(self, connect_timeout):
''' Test the transport mechanism, if available '''
host = self._play_context.remote_addr
port = int(self._play_context.port or 22)
display.vvv("attempting transport test to %s:%s" % (host, port))
sock = socket.create_connection((host, port), connect_timeout)
sock.close()
def _cache_key(self):
return "%s__%s__" % (self._play_context.remote_addr, self._play_context.remote_user)
def _connect(self):
cache_key = self._cache_key()
if cache_key in SSH_CONNECTION_CACHE:
self.ssh = SSH_CONNECTION_CACHE[cache_key]
else:
self.ssh = SSH_CONNECTION_CACHE[cache_key] = self._connect_uncached()
return self
def _parse_proxy_command(self, port=22):
proxy_command = None
# Parse ansible_ssh_common_args, specifically looking for ProxyCommand
ssh_args = [
getattr(self._play_context, 'ssh_extra_args', '') or '',
getattr(self._play_context, 'ssh_common_args', '') or '',
getattr(self._play_context, 'ssh_args', '') or '',
]
if ssh_args is not None:
args = self._split_ssh_args(' '.join(ssh_args))
for i, arg in enumerate(args):
if arg.lower() == 'proxycommand':
# _split_ssh_args split ProxyCommand from the command itself
proxy_command = args[i + 1]
else:
# ProxyCommand and the command itself are a single string
match = SETTINGS_REGEX.match(arg)
if match:
if match.group(1).lower() == 'proxycommand':
proxy_command = match.group(2)
if proxy_command:
break
proxy_command = proxy_command or C.PARAMIKO_PROXY_COMMAND
sock_kwarg = {}
if proxy_command:
replacers = {
'%h': self._play_context.remote_addr,
'%p': port,
'%r': self._play_context.remote_user
}
for find, replace in replacers.items():
proxy_command = proxy_command.replace(find, str(replace))
try:
sock_kwarg = {'sock': paramiko.ProxyCommand(proxy_command)}
display.vvv("CONFIGURE PROXY COMMAND FOR CONNECTION: %s" % proxy_command, host=self._play_context.remote_addr)
except AttributeError:
display.warning('Paramiko ProxyCommand support unavailable. '
'Please upgrade to Paramiko 1.9.0 or newer. '
'Not using configured ProxyCommand')
return sock_kwarg
def _connect_uncached(self):
''' activates the connection object '''
if not HAVE_PARAMIKO:
raise AnsibleError("paramiko is not installed")
port = self._play_context.port or 22
display.vvv("ESTABLISH CONNECTION FOR USER: %s on PORT %s TO %s" % (self._play_context.remote_user, port, self._play_context.remote_addr),
host=self._play_context.remote_addr)
ssh = paramiko.SSHClient()
self.keyfile = os.path.expanduser("~/.ssh/known_hosts")
if C.HOST_KEY_CHECKING:
for ssh_known_hosts in ("/etc/ssh/ssh_known_hosts", "/etc/openssh/ssh_known_hosts"):
try:
#TODO: check if we need to look at several possible locations, possible for loop
ssh.load_system_host_keys(ssh_known_hosts)
break
except IOError:
pass # file was not found, but not required to function
ssh.load_system_host_keys()
sock_kwarg = self._parse_proxy_command(port)
ssh.set_missing_host_key_policy(MyAddPolicy(self._new_stdin, self))
allow_agent = True
if self._play_context.password is not None:
allow_agent = False
try:
key_filename = None
if self._play_context.private_key_file:
key_filename = os.path.expanduser(self._play_context.private_key_file)
ssh.connect(
self._play_context.remote_addr,
username=self._play_context.remote_user,
allow_agent=allow_agent,
look_for_keys=C.PARAMIKO_LOOK_FOR_KEYS,
key_filename=key_filename,
password=self._play_context.password,
timeout=self._play_context.timeout,
port=port,
**sock_kwarg
)
except paramiko.ssh_exception.BadHostKeyException as e:
raise AnsibleConnectionFailure('host key mismatch for %s' % e.hostname)
except Exception as e:
msg = str(e)
if "PID check failed" in msg:
raise AnsibleError("paramiko version issue, please upgrade paramiko on the machine running ansible")
elif "Private key file is encrypted" in msg:
msg = 'ssh %s@%s:%s : %s\nTo connect as a different user, use -u <username>.' % (
self._play_context.remote_user, self._play_context.remote_addr, port, msg)
raise AnsibleConnectionFailure(msg)
else:
raise AnsibleConnectionFailure(msg)
return ssh
def exec_command(self, cmd, in_data=None, sudoable=True):
''' run a command on the remote host '''
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
if in_data:
raise AnsibleError("Internal Error: this module does not support optimized module pipelining")
bufsize = 4096
try:
self.ssh.get_transport().set_keepalive(5)
chan = self.ssh.get_transport().open_session()
except Exception as e:
msg = "Failed to open session"
if len(str(e)) > 0:
msg += ": %s" % str(e)
raise AnsibleConnectionFailure(msg)
# sudo usually requires a PTY (cf. requiretty option), therefore
# we give it one by default (pty=True in ansble.cfg), and we try
# to initialise from the calling environment when sudoable is enabled
if C.PARAMIKO_PTY and sudoable:
chan.get_pty(term=os.getenv('TERM', 'vt100'), width=int(os.getenv('COLUMNS', 0)), height=int(os.getenv('LINES', 0)))
display.vvv("EXEC %s" % cmd, host=self._play_context.remote_addr)
cmd = to_bytes(cmd, errors='surrogate_or_strict')
no_prompt_out = b''
no_prompt_err = b''
become_output = b''
try:
chan.exec_command(cmd)
if self._play_context.prompt:
passprompt = False
become_sucess = False
while not (become_sucess or passprompt):
display.debug('Waiting for Privilege Escalation input')
chunk = chan.recv(bufsize)
display.debug("chunk is: %s" % chunk)
if not chunk:
if b'unknown user' in become_output:
raise AnsibleError( 'user %s does not exist' % self._play_context.become_user)
else:
break
#raise AnsibleError('ssh connection closed waiting for password prompt')
become_output += chunk
# need to check every line because we might get lectured
# and we might get the middle of a line in a chunk
for l in become_output.splitlines(True):
if self.check_become_success(l):
become_sucess = True
break
elif self.check_password_prompt(l):
passprompt = True
break
if passprompt:
if self._play_context.become and self._play_context.become_pass:
chan.sendall(to_bytes(self._play_context.become_pass) + b'\n')
else:
raise AnsibleError("A password is reqired but none was supplied")
else:
no_prompt_out += become_output
no_prompt_err += become_output
except socket.timeout:
raise AnsibleError('ssh timed out waiting for privilege escalation.\n' + become_output)
stdout = b''.join(chan.makefile('rb', bufsize))
stderr = b''.join(chan.makefile_stderr('rb', bufsize))
return (chan.recv_exit_status(), no_prompt_out + stdout, no_prompt_out + stderr)
def put_file(self, in_path, out_path):
''' transfer a file from local to remote '''
super(Connection, self).put_file(in_path, out_path)
display.vvv("PUT %s TO %s" % (in_path, out_path), host=self._play_context.remote_addr)
if not os.path.exists(to_bytes(in_path, errors='surrogate_or_strict')):
raise AnsibleFileNotFound("file or module does not exist: %s" % in_path)
try:
self.sftp = self.ssh.open_sftp()
except Exception as e:
raise AnsibleError("failed to open a SFTP connection (%s)" % e)
try:
self.sftp.put(to_bytes(in_path, errors='surrogate_or_strict'), to_bytes(out_path, errors='surrogate_or_strict'))
except IOError:
raise AnsibleError("failed to transfer file to %s" % out_path)
def _connect_sftp(self):
cache_key = "%s__%s__" % (self._play_context.remote_addr, self._play_context.remote_user)
if cache_key in SFTP_CONNECTION_CACHE:
return SFTP_CONNECTION_CACHE[cache_key]
else:
result = SFTP_CONNECTION_CACHE[cache_key] = self._connect().ssh.open_sftp()
return result
def fetch_file(self, in_path, out_path):
''' save a remote file to the specified path '''
super(Connection, self).fetch_file(in_path, out_path)
display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self._play_context.remote_addr)
try:
self.sftp = self._connect_sftp()
except Exception as e:
raise AnsibleError("failed to open a SFTP connection (%s)", e)
try:
self.sftp.get(to_bytes(in_path, errors='surrogate_or_strict'), to_bytes(out_path, errors='surrogate_or_strict'))
except IOError:
raise AnsibleError("failed to transfer file from %s" % in_path)
def _any_keys_added(self):
for hostname, keys in iteritems(self.ssh._host_keys):
for keytype, key in iteritems(keys):
added_this_time = getattr(key, '_added_by_ansible_this_time', False)
if added_this_time:
return True
return False
def _save_ssh_host_keys(self, filename):
'''
not using the paramiko save_ssh_host_keys function as we want to add new SSH keys at the bottom so folks
don't complain about it :)
'''
if not self._any_keys_added():
return False
path = os.path.expanduser("~/.ssh")
makedirs_safe(path)
f = open(filename, 'w')
for hostname, keys in iteritems(self.ssh._host_keys):
for keytype, key in iteritems(keys):
# was f.write
added_this_time = getattr(key, '_added_by_ansible_this_time', False)
if not added_this_time:
f.write("%s %s %s\n" % (hostname, keytype, key.get_base64()))
for hostname, keys in iteritems(self.ssh._host_keys):
for keytype, key in iteritems(keys):
added_this_time = getattr(key, '_added_by_ansible_this_time', False)
if added_this_time:
f.write("%s %s %s\n" % (hostname, keytype, key.get_base64()))
f.close()
def close(self):
''' terminate the connection '''
cache_key = self._cache_key()
SSH_CONNECTION_CACHE.pop(cache_key, None)
SFTP_CONNECTION_CACHE.pop(cache_key, None)
if hasattr(self, 'sftp'):
if self.sftp is not None:
self.sftp.close()
if C.HOST_KEY_CHECKING and C.PARAMIKO_RECORD_HOST_KEYS and self._any_keys_added():
# add any new SSH host keys -- warning -- this could be slow
# (This doesn't acquire the connection lock because it needs
# to exclude only other known_hosts writers, not connections
# that are starting up.)
lockfile = self.keyfile.replace("known_hosts",".known_hosts.lock")
dirname = os.path.dirname(self.keyfile)
makedirs_safe(dirname)
KEY_LOCK = open(lockfile, 'w')
fcntl.lockf(KEY_LOCK, fcntl.LOCK_EX)
try:
# just in case any were added recently
self.ssh.load_system_host_keys()
self.ssh._host_keys.update(self.ssh._system_host_keys)
# gather information about the current key file, so
# we can ensure the new file has the correct mode/owner
key_dir = os.path.dirname(self.keyfile)
if os.path.exists(self.keyfile):
key_stat = os.stat(self.keyfile)
mode = key_stat.st_mode
uid = key_stat.st_uid
gid = key_stat.st_gid
else:
mode = 33188
uid = os.getuid()
gid = os.getgid()
# Save the new keys to a temporary file and move it into place
# rather than rewriting the file. We set delete=False because
# the file will be moved into place rather than cleaned up.
tmp_keyfile = tempfile.NamedTemporaryFile(dir=key_dir, delete=False)
os.chmod(tmp_keyfile.name, mode & 0o7777)
os.chown(tmp_keyfile.name, uid, gid)
self._save_ssh_host_keys(tmp_keyfile.name)
tmp_keyfile.close()
os.rename(tmp_keyfile.name, self.keyfile)
except:
# unable to save keys, including scenario when key was invalid
# and caught earlier
traceback.print_exc()
pass
fcntl.lockf(KEY_LOCK, fcntl.LOCK_UN)
self.ssh.close()
| gpl-3.0 |
rbaindourov/v8-inspector | Source/chrome/v8/tools/release/auto_roll.py | 22 | 4282 | #!/usr/bin/env python
# Copyright 2014 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import json
import os
import sys
import urllib
from common_includes import *
import chromium_roll
class CheckActiveRoll(Step):
MESSAGE = "Check active roll."
@staticmethod
def ContainsChromiumRoll(changes):
for change in changes:
if change["subject"].startswith("Update V8 to"):
return True
return False
def RunStep(self):
params = {
"closed": 3,
"owner": self._options.author,
"limit": 30,
"format": "json",
}
params = urllib.urlencode(params)
search_url = "https://codereview.chromium.org/search"
result = self.ReadURL(search_url, params, wait_plan=[5, 20])
if self.ContainsChromiumRoll(json.loads(result)["results"]):
print "Stop due to existing Chromium roll."
return True
class DetectLastRoll(Step):
MESSAGE = "Detect commit ID of the last Chromium roll."
def RunStep(self):
# The revision that should be rolled. Check for the latest of the most
# recent releases based on commit timestamp.
revisions = self.GetRecentReleases(
max_age=self._options.max_age * DAY_IN_SECONDS)
assert revisions, "Didn't find any recent release."
# Interpret the DEPS file to retrieve the v8 revision.
# TODO(machenbach): This should be part or the roll-deps api of
# depot_tools.
Var = lambda var: '%s'
exec(FileToText(os.path.join(self._options.chromium, "DEPS")))
# The revision rolled last.
self["last_roll"] = vars['v8_revision']
last_version = self.GetVersionTag(self["last_roll"])
assert last_version, "The last rolled v8 revision is not tagged."
# There must be some progress between the last roll and the new candidate
# revision (i.e. we don't go backwards). The revisions are ordered newest
# to oldest. It is possible that the newest timestamp has no progress
# compared to the last roll, i.e. if the newest release is a cherry-pick
# on a release branch. Then we look further.
for revision in revisions:
version = self.GetVersionTag(revision)
assert version, "Internal error. All recent releases should have a tag"
if SortingKey(last_version) < SortingKey(version):
self["roll"] = revision
break
else:
print("There is no newer v8 revision than the one in Chromium (%s)."
% self["last_roll"])
return True
class RollChromium(Step):
MESSAGE = "Roll V8 into Chromium."
def RunStep(self):
if self._options.roll:
args = [
"--author", self._options.author,
"--reviewer", self._options.reviewer,
"--chromium", self._options.chromium,
"--last-roll", self["last_roll"],
"--use-commit-queue",
self["roll"],
]
if self._options.sheriff:
args.append("--sheriff")
if self._options.dry_run:
args.append("--dry-run")
if self._options.work_dir:
args.extend(["--work-dir", self._options.work_dir])
self._side_effect_handler.Call(chromium_roll.ChromiumRoll().Run, args)
class AutoRoll(ScriptsBase):
def _PrepareOptions(self, parser):
parser.add_argument("-c", "--chromium", required=True,
help=("The path to your Chromium src/ "
"directory to automate the V8 roll."))
parser.add_argument("--max-age", default=3, type=int,
help="Maximum age in days of the latest release.")
parser.add_argument("--roll", help="Call Chromium roll script.",
default=False, action="store_true")
def _ProcessOptions(self, options): # pragma: no cover
if not options.reviewer:
print "A reviewer (-r) is required."
return False
if not options.author:
print "An author (-a) is required."
return False
return True
def _Config(self):
return {
"PERSISTFILE_BASENAME": "/tmp/v8-auto-roll-tempfile",
}
def _Steps(self):
return [
CheckActiveRoll,
DetectLastRoll,
RollChromium,
]
if __name__ == "__main__": # pragma: no cover
sys.exit(AutoRoll().Run())
| bsd-3-clause |
highco-groupe/odoo | addons/membership/wizard/__init__.py | 432 | 1071 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import membership_invoice
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
mtfelix/ClusType | candidate_generation/FrequentPhraseMining/BitVector.py | 3 | 146371 | #!/usr/bin/env python
__version__ = '3.3.2'
__author__ = "Avinash Kak (kak@purdue.edu)"
__date__ = '2014-March-12'
__url__ = 'https://engineering.purdue.edu/kak/dist/BitVector-3.3.2.html'
__copyright__ = "(C) 2014 Avinash Kak. Python Software Foundation."
__doc__ = '''
BitVector.py
Version: ''' + __version__ + '''
Author: Avinash Kak (kak@purdue.edu)
Date: ''' + __date__ + '''
@title
CHANGE LOG:
Version 3.3.2:
This version fixes a bug in the constructor code for creating a
bit vector from a text string. The bug was triggered by
character escapes in such strings.
Version 3.3.1:
This is a minor upgrade to make the syntax of the API method
declarations more uniform. Previously, while most of the method
names used underscores to connect multiple words, some used
camelcasing. Now all use underscores. For backward
compatibility, the old calls will continue to work.
Version 3.3:
This version includes: (1) One additional constructor mode that
allows a bit vector to be constructed directly from the bytes
type objects in the memory. (2) A bugfix in the slice function
for the case when the upper and the lower bounds of the slice
range are identical. (3) A bugfix for the next_set_bit() method.
Version 3.2:
This version includes support for constructing bit vectors
directly from text strings and hex strings. This version also
includes a safety check on the sizes of the two argument bit
vectors when calculating Jaccard similarity between the two.
Version 3.1.1:
This version includes: (1) a fix to the module test code to
account for how string input is handled in the io.StringIO class
in Python 2.7; (2) some improvements to the documentation.
Version 3.1:
This version includes: (1) Correction for a documentation error;
(2) Fix for a bug in slice assignment when one or both of the
slice limits were left unspecified; (3) The non-circular bit
shift methods now return self so that they can be chained; (4) A
method for testing a bitvector for its primality; and (5) A
method that uses Python's 'random.getrandbits()' to generate
a bitvector that can serve as candidate for primes whose bitfield
size is specified.
Version 3.0:
This is a Python 3.x compliant version of the latest incarnation
of the BitVector module. This version should work with both
Python 2.x and Python 3.x.
Version 2.2:
Fixed a couple of bugs, the most important being in the
bitvector initialization code for the cases when the
user-specified value for size conflicts with the user-specified
int value for the vector. Version 2.2 also includes a new
method runs() that returns a list of strings of the consecutive
runs of 1's and 0's in the bitvector. The implementation of
the circular shift operators has also been improved in Version
2.2. This version allows for a chained invocation of these
operators. Additionally, the circular shift operators now
exhibit expected behavior if the user-specified shift value is
negative.
Version 2.1:
Includes enhanced support for folks who use this class for
computer security and cryptography work. You can now call on
the methods of the BitVector class to do Galois Field GF(2^n)
arithmetic on bit arrays. This should save the users of this
class the bother of having to write their own routines for
finding multiplicative inverses in GF(2^n) finite fields.
Version 2.0.1:
Fixed numerous typos and other errors in the documentation page
for the module. The implementation code remains unchanged.
Version 2.0:
To address the needs of the folks who are using the BitVector
class in data mining research, the new version of the class
includes several additional methods. Since the bitvectors used
by these folks can be extremely long, possibly involving
millions of bits, the new version of the class includes a much
faster method for counting the total number of set bits when a
bitvector is sparse. [But note that this new bit counting
method may perform poorly for dense bitvectors. So the old bit
counting method has been retained.] Also for data mining folks,
the new version of the class is provided with similarity and
distance calculation metrics such as the Jaccard similarity
coefficient, the Jaccard distance, and the Hamming distance.
Again for the same folks, the class now also has a
next_set_bit(from_index) method. Other enhancements to the
class include methods for folks who do research in cryptography.
Now you can directly calculate the greatest common divisor of
two bitvectors, or find the multiplicative inverse of one
bitvector modulo another bitvector.
Version 1.5.1:
Removed a bug from the implementation of the right circular
shift operator.
Version 1.5:
This version should prove to be much more efficient for long
bitvectors. Efficiency in BitVector construction when only its
size is specified was achieved by eliminating calls to
_setbit(). The application of logical operators to two
BitVectors of equal length was also made efficient by
eliminating calls to the padding function. Another feature of
this version is the count_bits() method that returns the total
number of bits set in a BitVector instance. Yet another feature
of this version is the setValue() method that alters the bit
pattern associated with a previously constructed BitVector.
Version 1.4.1:
The reset() method now returns 'self' to allow for cascaded
invocation with the slicing operator. Also removed the
discrepancy between the value of the __copyright__ variable in
the module and the value of license variable in setup.py.
Version 1.4:
This version includes the following two upgrades: 1) code for
slice assignment; and 2) A reset function to reinitialize a
previously constructed BitVector. Additionally, the code was
cleaned up with the help of pychecker.
Version 1.3.2:
Fixed a potentially misleading documentation issue for the
Windows users of the BitVector class. If you are writing an
internally generated BitVector to a disk file, you must open the
file in the binary mode. If you don't, the bit patterns that
correspond to line breaks will be misinterpreted. On a Windows
machine in the text mode, the bit pattern 000001010 ('\\n') will
be written out to the disk as 0000110100001010 ('\\r\\n').
Version 1.3.1:
Removed the inconsistency in the internal representation of
bitvectors produced by logical bitwise operations vis-a-vis the
bitvectors created by the constructor. Previously, the logical
bitwise operations resulted in bitvectors that had their bits
packed into lists of ints, as opposed to arrays of unsigned
shorts.
Version 1.3:
(a) One more constructor mode included: When initializing a new
bitvector with an integer value, you can now also specify a size
for the bitvector. The constructor zero-pads the bitvector
from the left with zeros. (b) The BitVector class now supports
'if x in y' syntax to test if the bit pattern 'x' is contained
in the bit pattern 'y'. (c) Improved syntax to conform to
well-established Python idioms. (d) What used to be a comment
before the beginning of each method definition is now a
docstring.
Version 1.2:
(a) One more constructor mode included: You can now construct a
bitvector directly from a string of 1's and 0's. (b) The class
now constructs a shortest possible bit vector from an integer
value. So the bit vector for the integer value 0 is just one
bit of value 0, and so on. (c) All the rich comparison operators
are now overloaded. (d) The class now includes a new method
'intValue()' that returns the unsigned integer value of a bit
vector. This can also be done through '__int__'. (e) The
package now includes a unittest based framework for testing out
an installation. This is in a separate directory called
"TestBitVector".
Version 1.1.1:
The function that does block reads from a disk file now peeks
ahead at the end of each block to see if there is anything
remaining to be read in the file. If nothing remains, the
more_to_read attribute of the BitVector object is set to False.
This simplifies reading loops. This version also allows
BitVectors of size 0 to be constructed
Version 1.1:
I have changed the API significantly to provide more ways for
constructing a bit vector. As a result, it is now necessary to
supply a keyword argument to the constructor.
@title
INSTALLATION:
The BitVector class was packaged using Distutils. For installation,
execute the following command-line in the source directory (this is
the directory that contains the setup.py file after you have
downloaded and uncompressed the tar archive):
python setup.py install
You have to have root privileges for this to work. On Linux
distributions, this will install the module file at a location that
looks like
/usr/lib/python2.7/dist-packages/
If you do not have root access, you have the option of working
directly off the directory in which you downloaded the software by
simply placing the following statements at the top of your scripts
that use the BitVector class
import sys
sys.path.append( "pathname_to_BitVector_directory" )
To uninstall the module, simply delete the source directory, locate
where BitVector was installed with "locate BitVector" and delete
those files. As mentioned above, the full pathname to the installed
version is likely to look like
/usr/lib/python2.7/dist-packages/BitVector*
If you want to carry out a non-standard install of BitVector, look
up the on-line information on Disutils by pointing your browser to
http://docs.python.org/dist/dist.html
@title
INTRODUCTION:
The BitVector class is for a memory-efficient packed representation
of bit arrays and for logical operations on such arrays. The
operations supported on bit vectors are:
__add__ for concatenation
__and__ for bitwise logical AND
__contains__
__eq__, __ne__, __lt__, __le__, __gt__, __ge__
__getitem__ for indexed access
__getslice__ for slice access
__int__ for returning integer value
__invert__ for inverting the 1's and 0's
__iter__ for iterating through
__len__ for len()
__lshift__ for circular shifts to the left
__or__ for bitwise logical OR
__rshift__ for circular shifts to the right
__setitem__ for indexed and slice setting
__str__ for str()
__xor__ for bitwise logical XOR
count_bits
count_bits_sparse faster for sparse bit vectors
deep_copy
divide_into_two
gcd for greatest common divisor
gen_rand_bits_for_prime
get_hex_string_from_bitvector
get_text_from_bitvector
gf_divide for divisions in GF(2^n)
gf_MI for multiplicative inverse in GF(2^n)
gf_multiply for multiplications in GF(2)
gf_multiply_modular for multiplications in GF(2^n)
hamming_distance
int_val for returning the integer value
is_power_of_2
is_power_of_2_sparse faster for sparse bit vectors
jaccard_distance
jaccard_similarity
length
multiplicative_inverse
next_set_bit
pad_from_left
pad_from_right
permute
rank_of_bit_set_at_index
read_bits_from_file
reset
reverse
runs
shift_left for non-circular left shift
shift_right for non-circular right shift
slice assignment
set_value
test_for_primality
unpermute
write_to_file
write_bits_to_fileobject
@title
CONSTRUCTING BIT VECTORS:
You can construct a bit vector in the following different ways:
@tagC0
(C0) You construct an EMPTY bit vector using the following syntax:
bv = BitVector(size = 0)
@tagC1
(C1) You can construct a bit vector directly from either a tuple
or a list of bits, as in
bv = BitVector(bitlist = [1,0,1,0,0,1,0,1,0,0,1,0,1,0,0,1])
@tagC2
(C2) You can construct a bit vector from an integer by
bv = BitVector(intVal = 56789)
The bits stored now will correspond to the binary
representation of the integer. The resulting bit vector is
the shortest possible bit vector for the integer value
supplied. For example, when intVal is 0, the bit vector
constructed will consist of just the bit 0.
@tagC3
(C3) When initializing a bit vector with an intVal as shown above,
you can also specify a size for the bit vector:
bv = BitVector(intVal = 0, size = 8)
will return the bit vector consisting of the bit pattern
00000000. The zero padding needed for meeting the size
requirement is always on the left. If the size supplied is
smaller than what it takes to create the shortest possible
bit vector for intVal, an exception is thrown.
@tagC4
(C4) You can create a zero-initialized bit vector of a given size by
bv = BitVector(size = 62)
This bit vector will hold exactly 62 bits, all initialized to
the 0 bit value.
@tagC5
(C5) You can construct a bit vector from a disk file by a two-step
procedure. First you construct an instance of bit vector by
bv = BitVector(filename = 'somefile')
This bit vector itself is incapable of holding the bits. To
now create bit vectors that actually hold the bits, you need
to make the following sort of a call on the above variable
bv:
bv1 = bv.read_bits_from_file(64)
bv1 will be a regular bit vector containing 64 bits from the
disk file. If you want to re-read a file from the beginning
for some reason, you must obviously first close the file
object that was acquired with a call to the BitVector
constructor with a filename argument. This can be
accomplished by
bv.close_file_object()
@tagC6
(C6) You can construct a bit vector from a string of 1's and 0's by
bv = BitVector(bitstring = '110011110000')
@tagC7
(C7) Yet another way to construct a bit vector is to read the bits
directly from a file-like object, as in
import io
x = "111100001111"
fp_read = io.StringIO( x )
bv = BitVector(fp = fp_read)
print(bv) # 111100001111
@tagC8
(C8) You can also construct a bit vector directly from a text string
as shown by the example:
bv3 = BitVector(textstring = "hello")
print(bv3) # 0110100001100101011011000110110001101111
mytext = bv3.get_text_from_bitvector()
print mytext # hello
The bit vector is constructed by using the one-byte ASCII
encoding of the characters in the text string.
@tagC9
(C9) You can also construct a bit vector directly from a string
of hex digits as shown by the example:
bv4 = BitVector(hexstring = "68656c6c6f")
print(bv4) # 0110100001100101011011000110110001101111
myhexstring = bv4.get_hex_string_from_bitvector()
print myhexstring # 68656c6c6
@tagC10
(C10) You can also construct a bit vector directly from a bytes type
object you previously created in your script. This can be
useful when you are trying to recover the integer parameters
stored in public and private keys. A typical usage scenario:
keydata = base64.b64decode(open(sys.argv[1]).read().split(None)[1])
bv = BitVector.BitVector(rawbytes = keydata)
where sys.argv[1] is meant to supply the name of a public key
file (in this case an SSH RSA public key file).
@title
OPERATIONS SUPPORTED BY THE BITVECTOR CLASS:
@title
DISPLAYING BIT VECTORS:
@tag1
(1) Since the BitVector class implements the __str__ method, a bit
vector can be displayed on a terminal by
print(bitvec)
or, for only Python 2.x, by
print bitvec
Basically, you can always obtain the string representation of a
bit vector by
str(bitvec)
and integer value by
int(bitvec)
@title
ACCESSING AND SETTING INDIVIDUAL BITS AND SLICES:
@tag2
(2) Any single bit of a bit vector bv can be set to 1 or 0 by
bv[M] = 1_or_0
print( bv[M] )
or, for just Python 2.x, by
bv[M] = 1_or_0
print bv[M]
for accessing (and setting) the bit at the position that is
indexed M. You can retrieve the bit at position M by bv[M].
Note that the index 0 corresponds to the first bit at the left
end of a bit pattern. This is made possible by the
implementation of the __getitem__ and __setitem__ methods.
@tag3
(3) A slice of a bit vector obtained by
bv[i:j]
is a bit vector constructed from the bits at index positions
from i through j-1. This is made possible by the
implementation of the __getslice__ method.
@tag4
(4) You can also carry out slice assignment:
bv1 = BitVector(size = 25)
bv2 = BitVector(bitstring = '1010001')
bv1[6:9] = bv2[0:3]
bv3 = BitVector(bitstring = '101')
bv1[0:3] = bv3
The first slice assignment will set the 6th, 7th, and the 8th
bits of the bit vector bv1 according to the first three bits of
bv2. The second slice assignment will set the first three bits
of bv1 according to the three bits in bv3. This is made
possible by the slice setting code in the __setitem__ method.
@tag5
(5) You can iterate over a bit vector, as illustrated by
for bit in bitvec:
print(bit)
This is made possible by the override definition for the special
__iter__() method.
@tag6
(6) Negative subscripts for array-like indexing are supported.
Therefore,
bitvec[-i]
is legal assuming that the index range is not violated. A
negative index carries the usual Python interpretation: The
last element of a bit vector is indexed -1 and the first
element -(n+1) if n is the total number of bits in the bit
vector. Negative subscripts are made possible by
special-casing such access in the implementation of the
__getitem__ method (actually it is the _getbit method).
@tag7
(7) You can reset a previously constructed bit vector to either the
all-zeros state or the all-ones state by
bv1 = BitVector(size = 25)
...
...
bv1.reset(1)
...
...
bv1.reset(0)
The first call to reset() will set all the bits of bv1 to 1's
and the second call all the bits to 0's.
@title
LOGICAL OPERATIONS ON BIT VECTORS:
@tag8
(8) Given two bit vectors bv1 and bv2, you can perform bitwise
logical operations on them by
result_bv = bv1 ^ bv2 # for bitwise XOR
result_bv = bv1 & bv2 # for bitwise AND
result_bv = bv1 | bv2 # for bitwise OR
result_bv = ~bv1 # for bitwise negation
These are made possible by implementing the __xor__, __and__,
__or__, and __invert__ methods, respectively.
@title
COMPARING BIT VECTORS:
@tag9
(9) Given two bit vectors bv1 and bv2, you can carry out the
following comparisons that return Boolean values:
bv1 == bv2
bv1 != bv2
bv1 < bv2
bv1 <= bv2
bv1 > bv2
bv1 >= bv2
The equalities and inequalities are determined by the integer
values associated with the bit vectors. These operator
overloadings are made possible by providing implementation code
for __eq__, __ne__, __lt__, __le__, __gt__, and __ge__,
respectively.
@title
OTHER SUPPORTED OPERATIONS:
@tag10
(10) You can permute and unpermute bit vectors:
bv_permuted = bv.permute(permutation_list)
bv_unpermuted = bv.unpermute(permutation_list)
@tag11
(11) Left and right circular rotations can be carried out by
bitvec << N
bitvec >> N
for circular rotations to the left and to the right by N bit
positions. These operator overloadings are made possible by
implementing the __lshift__ and __rshift__ methods,
respectively.
@tag12
(12) If you want to shift a bitvector non-circularly:
bitvec = BitVector(bitstring = '10010000')
bitvec.shift_left(3) # 10000000
bitvec.shift_right(3) # 00010000
Obviously, for a sufficient large left or right non-circular
shift, you will end up with a bitvector that is all zeros.
@tag13
(13) A bit vector containing an even number of bits can be divided
into two equal parts by
[left_half, right_half] = bitvec.divide_into_two()
where left_half and right_half hold references to the two
returned bit vectors.
@tag14
(14) You can find the integer value of a bit array by
bitvec.int_val()
or by
int(bitvec)
@tag15
(15) You can convert a bit vector into its string representation by
str(bitvec)
@tag16
(16) Because __add__ is supplied, you can always join two bit vectors
by
bitvec3 = bitvec1 + bitvec2
bitvec3 is a new bit vector that contains all the bits of
bitvec1 followed by all the bits of bitvec2.
@tag17
(17) You can find the length of a bitvector by
len = bitvec.length()
@tag18
(18) You can make a deep copy of a bitvector by
bitvec_copy = bitvec.deep_copy()
@tag19
(19) You can write a bit vector directly to a file, as illustrated
by the following example that reads one bit vector from a file
and then writes it to another file
bv = BitVector(filename = 'input.txt')
bv1 = bv.read_bits_from_file(64)
print(bv1)
FILEOUT = open('output.bits', 'wb')
bv1.write_to_file(FILEOUT)
FILEOUT.close()
bv = BitVector(filename = 'output.bits')
bv2 = bv.read_bits_from_file(64)
print(bv2)
IMPORTANT: The size of a bit vector must be a multiple of of 8
for this write function to work. If this
condition is not met, the function will throw an
exception.
IMPORTANT FOR WINDOWS USERS: When writing an internally
generated bit vector out to a disk file, it is
important to open the file in the binary mode as
shown. Otherwise, the bit pattern 00001010
('\\n') in your bitstring will be written out as
0000110100001010 ('\\r\\n'), which is the
linebreak on Windows machines.
@tag20
(20) You can also write a bit vector directly to a stream object, as
illustrated by
fp_write = io.StringIO()
bitvec.write_bits_to_fileobject(fp_write)
print(fp_write.getvalue())
@tag21
(21) You can pad a bit vector from the left or from the right with a
designated number of zeros
bitvec.pad_from_left(n)
bitvec.pad_from_right(n)
In the first case, the new bit vector will be the same as the
old bit vector except for the additional n zeros on the left.
The same thing happens in the second case except that now the
additional n zeros will be on the right.
@tag22
(22) You can test if a bit vector x is contained in another bit
vector y by using the syntax 'if x in y'. This is made
possible by the override definition for the special
__contains__ method.
@tag23
(23) You can change the bit pattern associated with a previously
constructed BitVector instance:
bv = BitVector(intVal = 7, size =16)
print(bv) # 0000000000000111
bv.set_value(intVal = 45)
print(bv) # 101101
@tag24
(24) You can count the number of bits set in a BitVector instance by
bv = BitVector(bitstring = '100111')
print(bv.count_bits()) # 4
@tag25
(25) For folks who use bit vectors with millions of bits in them but
with only a few bits set, your bit counting will go much, much
faster if you call count_bits_sparse() instead of count_bits():
# a BitVector with 2 million bits:
bv = BitVector(size = 2000000)
bv[345234] = 1
bv[233]=1
bv[243]=1
bv[18]=1
bv[785] =1
print(bv.count_bits_sparse()) # 5
@tag26
(26) You can calculate the similarity and the distance between two
bit vectors using the Jaccard similarity coefficient and the
Jaccard distance. Also, you can calculate the Hamming distance
between two bit vectors:
bv1 = BitVector(bitstring = '11111111')
bv2 = BitVector(bitstring = '00101011')
print bv1.jaccard_similarity(bv2)
print(str(bv1.jaccard_distance(bv2)))
print(str(bv1.hamming_distance(bv2)))
@tag27
(27) Starting from a given bit position, you can find the position
index of the next set bit:
bv = BitVector(bitstring = '00000000000001')
print(bv.next_set_bit(5)) # 13
since the position index of the SET bit after the bit
whose position index 5 is 13.
@tag28
(28) You can measure the "rank" of a bit that is set at a given
position. Rank is the number of bits that are set up to the
position of the bit you are interested in.
bv = BitVector(bitstring = '01010101011100')
print(bv.rank_of_bit_set_at_index(10)) # 6
@tag29
(29) You can test whether the integer value of a bit vector is a
power of two. The sparse version of this method will work much
faster for very long bit vectors. However, the regular version
may work faster for small bit vectors.
bv = BitVector(bitstring = '10000000001110')
print(bv.is_power_of_2())
print(bv.is_power_of_2_sparse())
@tag30
(30) Given a bit vector, you can construct a bit vector with all the
bits reversed, in the sense that what was left to right before
now becomes right to left.
bv = BitVector(bitstring = '0001100000000000001')
print(str(bv.reverse()))
@tag31
(31) You can find the greatest common divisor of two bit vectors:
bv1 = BitVector(bitstring = '01100110') # int val: 102
bv2 = BitVector(bitstring = '011010') # int val: 26
bv = bv1.gcd(bv2)
print(int(bv)) # 2
@tag32
(32) You can find the multiplicative inverse of a bit vector
vis-a-vis a given modulus:
bv_modulus = BitVector(intVal = 32)
bv = BitVector(intVal = 17)
bv_result = bv.multiplicative_inverse( bv_modulus )
if bv_result is not None:
print(str(int(bv_result))) # 17
else: print "No multiplicative inverse in this case"
This multiplicative inverse is calculated using normal integer
arithmetic. For multiplicative inverses in GF(2^n), use the
gf_MI() method described below.
@tag33
(33) To find the multiplicative inverse of a bit vector in the
Galois Field GF(2^n) with respect to a modulus polynomial, you
can do the following:
modulus = BitVector(bitstring = '100011011')
n = 8
a = BitVector(bitstring = '00110011')
multi_inverse = a.gf_MI(modulus, n)
print multi_inverse # 01101100
@tag34
(34) If you just want to multiply two bit patterns in GF(2):
a = BitVector(bitstring='0110001')
b = BitVector(bitstring='0110')
c = a.gf_multiply(b)
print(c) # 00010100110
@tag35
(35) On the other hand, if you want to carry out modular
multiplications in the Galois Field GF(2^n):
modulus = BitVector(bitstring='100011011') # AES modulus
n = 8
a = BitVector(bitstring='0110001')
b = BitVector(bitstring='0110')
c = a.gf_multiply_modular(b, modulus, n)
print(c) # 10100110
@tag36
(36) To divide by a modulus bitvector in the Galois Field GF(2^n):
mod = BitVector(bitstring='100011011') # AES modulus
n = 8
bitvec = BitVector(bitstring='11100010110001')
quotient, remainder = bitvec.gf_divide(mod, n)
print(quotient) # 00000000111010
print(remainder) # 10001111
@tag37
(37) You can extract from a bit vector the runs of 1's and 0's
in the vector
bv = BitVector(bitlist = (1,1, 1, 0, 0, 1))
print(str(bv.runs())) # ['111', '00', '1']
@tag38
(38) You can generate a bit vector with random bits that span in
full the specified width. For example, if you wanted the
random bit vector to fully span 32 bits, you would say
bv = BitVector(intVal = 0)
bv = bv.gen_rand_bits_for_prime(32)
print(bv) # 11011010001111011010011111000101
@tag39
(39) You can test whether a randomly generated bit vector is a prime
number using the probabilistic Miller-Rabin test
bv = BitVector(intVal = 0)
bv = bv.gen_rand_bits_for_prime(32)
check = bv.test_for_primality()
print(check)
@tag40
(40) You can call get_text_from_bitvector() to directly convert a bit
vector into a text string (this is a useful thing to do only if
the length of the vector is an integral multiple of 8 and every
byte in your bitvector has a print representation):
bv = BitVector(textstring = "hello")
print(bv) # 0110100001100101011011000110110001101111
mytext = bv3.get_text_from_bitvector()
print mytext # hello
@tag41
(41) You can directly convert a bit vector into a hex string (this
is a useful thing to do only if the length of the vector is an
integral multiple of 4):
bv4 = BitVector(hexstring = "68656c6c6f")
print(bv4) # 0110100001100101011011000110110001101111
myhexstring = bv4.get_hex_string_from_bitvector()
print myhexstring # 68656c6c6
@title
HOW THE BIT VECTORS ARE STORED:
The bits of a bit vector are stored in 16-bit unsigned ints
following Josiah Carlson's recommendation to that effect on the
Pyrex mailing list. As you can see in the code for `__init__()',
after resolving the argument with which the constructor is called,
the very first thing the constructor does is to figure out how many
of those 2-byte ints it needs for the bits (see how the value is
assigned to the variable `two_byte_ints_needed' toward the end of
`__init__()'). For example, if you wanted to store a 64-bit array,
the variable 'two_byte_ints_needed' would be set to 4. (This does
not mean that the size of a bit vector must be a multiple of 16.
Any sized bit vectors can be constructed --- the constructor will
choose the minimum number of two-byte ints needed.) Subsequently,
the constructor acquires an array of zero-initialized 2-byte ints.
The last thing that is done in the code for `__init__()' is to
shift the bits into the array of two-byte ints.
As mentioned above, note that it is not necessary for the size of a
bit vector to be a multiple of 16 even though we are using C's
unsigned short as as a basic unit for storing the bit arrays. The
class BitVector keeps track of the actual number of bits in the bit
vector through the "size" instance variable.
Note that, except for one case, the constructor must be called with
a single keyword argument, which determines how the bit vector will
be constructed. The single exception to this rule is for the
keyword argument `intVal' which can be used along with the `size'
keyword argument. When `intVal' is used without the `size' option,
the bit vector constructed for the integer is the shortest possible
bit vector. On the other hand, when `size' is also specified, the
bit vector is padded with zeroes from the left so that it has the
specified size. The code for `__init__()' begins by making sure
your constructor call only uses the acceptable keywords. The
constraints on how many keywords can be used together in a
constructor call are enforced when we process each keyword option
separately in the rest of the code for `__init__()'.
The first keyword option processed by `__init__()' is for
`filename'. When the constructor is called with the `filename'
keyword, as in
bv = BitVector(filename = 'myfilename')
the call returns a bit vector on which you must subsequently invoke
the `read_bits_from_file()' method to actually obtain a bit vector
consisting of the bits that constitute the information stored in
the file.
The next keyword option considered in `__init__()' is for `fp',
which is for constructing a bit vector by reading off the bits from
a file-like object, as in
x = "111100001111"
fileobj = StringIO.StringIO( x )
bv = BitVector( fp = fileobj )
The keyword option `intVal' considered next is for converting an
integer into a bit vector through a constructor call like
bv = BitVector(intVal = 123456)
The bits stored in the bit vector thus created correspond to the
big-endian binary representation of the integer argument provided
through `intVal' (meaning that the most significant bit will be at
the leftmost position in the bit vector.) THE BIT VECTOR
CONSTRUCTED WITH THE ABOVE CALL IS THE SHORTEST POSSIBLE BIT VECTOR
FOR THE INTEGER SUPPLIED. As a case in point, when `intVal' is set
to 0, the bit vector consists of a single bit is 0 also. When
constructing a bit vector with the `intVal' option, if you also
want to impose a size condition on the bit vector, you can make a
call like
bv = BitVector(intVal = 46, size = 16)
which returns a bit vector of the indicated size by padding the
shortest possible vector for the `intVal' option with zeros from
the left.
The next option processed by `__init_()' is for the `size' keyword
when this keyword is used all by itself. If you want a bit vector
of just 0's of whatever size, you make a call like
bv = BitVector(size = 61)
This returns a bit vector that will hold exactly 61 bits, all
initialized to the zero value.
The next constructor keyword processed by `__init__()' is
`bitstring'. This is to allow a bit vector to be constructed
directly from a bit string as in
bv = BitVector(bitstring = '00110011111')
The keyword considered next is `bitlist' which allows a bit vector
to be constructed from a list or a tuple of individual bits, as in
bv = BitVector(bitlist = (1, 0, 1, 1, 0, 0, 1))
The last two keyword options considered in `__init__()' are for
keywords `textstring' and `hexstring'. If you want to construct a
bitvector directly from a text string, you call
bv = BitVector(textstring = "hello")
The bit vector created corresponds to the ASCII encodings of the
individual characters in the text string.
And if you want to do the same with a hex string, you call
bv = BitVector(hexstring = "68656c6c6f")
Now, as you would expect, the bits in the bit vector will
correspond directly to the hex digits in your hex string.
@title
ACKNOWLEDGMENTS:
The author is grateful to Oleg Broytmann for suggesting many
improvements that were incorporated in Version 1.1 of this package.
The author would like to thank Kurt Schwehr whose email resulted in
the creation of Version 1.2. Kurt also caught an error in my
earlier version of 'setup.py' and suggested a unittest based
approach to the testing of the package. Kurt also supplied the
Makefile that is included in this distribution. The author would
also like to thank all (Scott Daniels, Blair Houghton, and Steven
D'Aprano) for their responses to my comp.lang.python query
concerning how to make a Python input stream peekable. This
feature was included in Version 1.1.1.
With regard to the changes incorporated in Version 1.3, thanks are
owed to Kurt Schwehr and Gabriel Ricardo for bringing to my
attention the bug related to the intVal method of initializing a
bit vector when the value of intVal exceeded sys.maxint. This
problem is fixed in Version 1.3. Version 1.3 also includes many
other improvements that make the syntax better conform to the
standard idioms of Python. These changes and the addition of the
new constructor mode (that allows a bit vector of a given size to
be constructed from an integer value) are also owing to Kurt's
suggestions.
With regard to the changes incorporated in Version 1.3.1, I would
like to thank Michael Haggerty for noticing that the bitwise
logical operators resulted in bit vectors that had their bits
packed into lists of ints, as opposed to arrays of unsigned shorts.
This inconsistency in representation has been removed in version
1.3.1. Michael has also suggested that since BitVector is mutable,
I should be overloading __iand__(), __ior__(), etc., for in-place
modifications of bit vectors. Michael certainly makes a good
point. But I am afraid that this change will break the code for the
existing users of the BitVector class.
I thank Mathieu Roy for bringing to my attention the problem with
writing bitstrings out to a disk files on Windows machines. This
turned out to be a problem more with the documentation than with
the BitVector class itself. On a Windows machine, it is
particularly important that a file you are writing a bitstring into
be opened in binary mode since otherwise the bit pattern 00001010
('\\n') will be written out as 0000110100001010 ('\\r\\n'). This
documentation fix resulted in Version 1.3.2.
With regard to Version 1.4, the suggestions/bug reports made by
John Kominek, Bob Morse, and Steve Ward contributed to this
version. I wish to thank all three. John wanted me to equip the
class with a reset() method so that a previously constructed class
could be reset to either all 0's or all 1's. Bob spotted loose
local variables in the implementation --- presumably left over from
a debugging phase of the code. Bob recommended that I clean up the
code with pychecker. That has been done. Steve noticed that slice
assignment was not working. It should work now.
Version 1.4.1 was prompted by John Kominek suggesting that if
reset() returned self, then the slice operation could be combined
with the reset operation. Thanks John! Another reason for 1.4.1
was to remove the discrepancy between the value of the
__copyright__ variable in the module and the value of license
variable in setup.py. This discrepancy was brought to my attention
by David Eyk. Thanks David!
Version 1.5 has benefited greatly by the suggestions made by Ryan
Cox. By examining the BitVector execution with cProfile, Ryan
observed that my implementation was making unnecessary method calls
to _setbit() when just the size option is used for constructing a
BitVector instance. Since Python allocates cleaned up memory, it
is unnecessary to set the individual bits of a vector if it is
known in advance that they are all zero. Ryan made a similar
observation for the logical operations applied to two BitVector
instances of equal length. He noticed that I was making
unnecessary calls to _resize_pad_from_left() for the case of equal
arguments to logical operations. Ryan also recommended that I
include a method that returns the total number of bits set in a
BitVector instance. The new method count_bits() does exactly
that. Thanks Ryan for all your suggestions. Version 1.5 also
includes the method setValue() that allows the internally stored
bit pattern associated with a previously constructed BitVector to
be changed. A need for this method was expressed by Aleix
Conchillo. Thanks Aleix.
Version 1.5.1 is a quick release to fix a bug in the right circular
shift operator. This bug was discovered by Jasper Spaans. Thanks
very much Jasper.
Version 2.0 was prompted mostly by the needs of the folks who play
with very long bit vectors that may contain millions of bits. I
believe such bit vectors are encountered in data mining research
and development. Towards that end, among the new methods in
Version 2.0, the count_bits_sparse() was provided by Rhiannon Weaver.
She says when a bit vector contains over 2 million bits and only,
say, five bits are set, her method is faster than the older
count_bits() method by a factor of roughly 18. Thanks
Rhiannon. [The logic of the new implementation works best for very
sparse bit vectors. For very dense vectors, it may perform more
slowly than the regular count_bits() method. For that reason, I
have retained the original method.] Rhiannon's implementation is
based on what has been called the Kernighan way at the web site
http://graphics.stanford.edu/~seander/bithacks.html. Version 2.0
also includes a few additional functions posted at this web site
for extracting information from bit fields. Also included in this
new version is the next_set_bit() method supplied by Jason Allum.
I believe this method is also useful for data mining folks. Thanks
Jason. Additional methods in Version 2.0 include the similarity and
the distance metrics for comparing two bit vectors, method for
finding the greatest common divisor of two bit vectors, and a
method that determines the multiplicative inverse of a bit vector
vis-a-vis a modulus. The last two methods should prove useful to
folks in cryptography.
With regard to Version 2.2, I would like to thank Ethan Price for
bringing to my attention a bug in the BitVector initialization code
for the case when both the int value and the size are user-
specified and the two values happen to be inconsistent. Ethan also
discovered that the circular shift operators did not respond to
negative values for the shift. These and some other shortcomings
discovered by Ethan have been fixed in Version 2.2. Thanks Ethan!
For two of the changes included in Version 3.1, I'd like to thank
Libor Wagner and C. David Stahl. Libor discovered a documentation
error in the listing of the 'count_bits_sparse()' method and David
discovered a bug in slice assignment when one or both of the slice
limits are left unspecified. These errors in Version 3.0 have been
fixed in Version 3.1.
Version 3.1.1 was triggered by two emails, one from John-Mark
Gurney and the other from Nessim Kisserli, both related to the
issue of compilation of the module. John-Mark mentioned that since
this module did not work with Python 2.4.3, the statement that the
module was appropriate for all Python 2.x was not correct, and
Nessim reported that he had run into a problem with the compilation
of the test portion of the code with Python 2.7 where a string of
1's and 0's is supplied to io.StringIO() for the construction of a
memory file. Both these issues have been resolved in 3.1.1.
Version 3.2 was triggered by my own desire to include additional
functionality in the module to make it more useful for
experimenting with hashing functions. While I was at it, I also
included in it a couple of safety checks on the lengths of the two
arguments bit vectors when computing their Jaccard similarity. I
could see the need for these checks after receiving an email from
Patrick Nisch about the error messages he was receiving during
Jaccard similarity calculations. Thanks Patrick!
Version 3.3 includes a correction by John Gleeson for a bug in the
next_set_bit() method. Thanks, John!
Version 3.3.1 resulted from Thor Smith observing that my naming
convention for the API methods was not uniform. Whereas most used
the underscore for joining multiple words, some were based on
camelcasing. Thanks, Thor!
Version 3.3.2 was in response to a bug discovery by Juan Corredor.
The bug related to constructing bit vectors from text strings that
include character escapes. Thanks, Juan!
@title
ABOUT THE AUTHOR:
Avi Kak is the author of "Programming with Objects: A Comparative
Presentation of Object-Oriented Programming with C++ and Java",
published by John-Wiley in 2003. This book presents a new approach
to the combined learning of two large object-oriented languages,
C++ and Java. It is being used as a text in a number of
educational programs around the world. This book has also been
translated into Chinese. Avi Kak is also the author of "Scripting
with Objects: A Comparative Presentation of Object-Oriented
Scripting with Perl and Python," published in 2008 by John-Wiley.
@title
SOME EXAMPLE CODE:
#!/usr/bin/env python
import BitVector
# Construct a bit vector from a list or tuple of bits:
bv = BitVector.BitVector( bitlist = (1, 0, 0, 1) )
print(bv) # 1001
# Construct a bit vector from an integer:
bv = BitVector.BitVector( intVal = 5678 )
print(bv) # 0001011000101110
# Construct a bit vector of a given size from a given
# integer:
bv = BitVector( intVal = 45, size = 16 )
print(bv) # 0000000000101101
# Construct a zero-initialized bit vector of a given size:
bv = BitVector.BitVector( size = 5 )
print(bv) # 00000
# Construct a bit vector from a bit string:
bv = BitVector.BitVector( bitstring = '110001' )
print(bv[0], bv[1], bv[2], bv[3], bv[4], bv[5]) # 1 1 0 0 0 1
print(bv[-1], bv[-2], bv[-3], bv[-4], bv[-5], bv[-6]) # 1 0 0 0 1 1
# Construct a bit vector from a file like object:
import io
x = "111100001111"
fp_read = io.StringIO( x )
bv = BitVector( fp = fp_read )
print(bv) # 111100001111
# Experiments with bitwise logical operations:
bv3 = bv1 | bv2
bv3 = bv1 & bv2
bv3 = bv1 ^ bv2
bv6 = ~bv5
# Find the length of a bit vector
print( str(len( bitvec ) ) )
# Find the integer value of a bit vector
print( bitvec.intValue() )
# Open a file for reading bit vectors from
bv = BitVector.BitVector( filename = 'TestBitVector/testinput1.txt' )
print( bv ) # nothing yet
bv1 = bv.read_bits_from_file(64)
print( bv1 ) # first 64 bits from the file
# Divide a bit vector into two equal sub-vectors:
[bv1, bv2] = bitvec.divide_into_two()
# Permute and Un-Permute a bit vector:
bv2 = bitvec.permute( permutation_list )
bv2 = bitvec.unpermute( permutation_list )
# Try circular shifts to the left and to the right
bitvec << 7
bitvec >> 7
# Try 'if x in y' syntax for bit vectors:
bv1 = BitVector( bitstring = '0011001100' )
bv2 = BitVector( bitstring = '110011' )
if bv2 in bv1:
print( "%s is in %s" % (bv2, bv1) )
else:
print( "%s is not in %s" % (bv2, bv1) )
.....
.....
(For a more complete working example, see the
example code in the BitVectorDemo.py file in the
Examples sub-directory.)
@endofdocs
'''
import array
import operator
import sys
_hexdict = { '0' : '0000', '1' : '0001', '2' : '0010', '3' : '0011',
'4' : '0100', '5' : '0101', '6' : '0110', '7' : '0111',
'8' : '1000', '9' : '1001', 'a' : '1010', 'b' : '1011',
'c' : '1100', 'd' : '1101', 'e' : '1110', 'f' : '1111' }
def _readblock(blocksize, bitvector):
'''
If this function succeeds in reading all blocksize bits, it uses the
tell-read-seek mechanism to peek ahead to see if there is anything more to be
read in the file. If there is nothing further to be read, it sets the more_to_read
attribute of the BitVector instance to False. Obviously, this can only be done for
seekable streams such as those connected with disk files. According to Blair
Houghton, a similar feature could presumably be implemented for socket streams by
using recv() or recvfrom() if you set the flags argument to MSG_PEEK.
'''
global _hexdict
bitstring = ''
i = 0
while ( i < blocksize / 8 ):
i += 1
byte = bitvector.FILEIN.read(1)
if byte == b'':
if len(bitstring) < blocksize:
bitvector.more_to_read = False
return bitstring
if sys.version_info[0] == 3:
hexvalue = '%02x' % byte[0]
else:
hexvalue = hex( ord( byte ) )
hexvalue = hexvalue[2:]
if len( hexvalue ) == 1:
hexvalue = '0' + hexvalue
bitstring += _hexdict[ hexvalue[0] ]
bitstring += _hexdict[ hexvalue[1] ]
file_pos = bitvector.FILEIN.tell()
# peek at the next byte; moves file position only if a
# byte is read
next_byte = bitvector.FILEIN.read(1)
if next_byte:
# pretend we never read the byte
bitvector.FILEIN.seek( file_pos )
else:
bitvector.more_to_read = False
return bitstring
#------------------------------ BitVector Class Definition --------------------------------
class BitVector( object ):
def __init__( self, *args, **kwargs ):
if args:
raise ValueError(
'''BitVector constructor can only be called with
keyword arguments for the following keywords:
filename, fp, size, intVal, bitlist, bitstring,
hexstring, textstring, and rawbytes)''')
allowed_keys = 'bitlist','bitstring','filename','fp','intVal',\
'size','textstring','hexstring','rawbytes'
keywords_used = kwargs.keys()
for keyword in keywords_used:
if keyword not in allowed_keys:
raise ValueError("Wrong keyword used --- check spelling")
filename=fp=intVal=size=bitlist=bitstring=textstring=hexstring=rawbytes=None
if 'filename' in kwargs : filename=kwargs.pop('filename')
if 'fp' in kwargs : fp = kwargs.pop('fp')
if 'size' in kwargs : size = kwargs.pop('size')
if 'intVal' in kwargs : intVal = kwargs.pop('intVal')
if 'bitlist' in kwargs : bitlist = kwargs.pop('bitlist')
if 'bitstring' in kwargs : bitstring = kwargs.pop('bitstring')
if 'hexstring' in kwargs : hexstring = kwargs.pop('hexstring')
if 'textstring' in kwargs : textstring = kwargs.pop('textstring')
if 'rawbytes' in kwargs : rawbytes = kwargs.pop('rawbytes')
self.filename = None
self.size = 0
self.FILEIN = None
self.FILEOUT = None
if filename:
if fp or size or intVal or bitlist or bitstring or hexstring or textstring or rawbytes:
raise ValueError('''When filename is specified, you cannot give values
to any other constructor args''')
self.filename = filename
self.FILEIN = open(filename, 'rb')
self.more_to_read = True
return
elif fp:
if filename or size or intVal or bitlist or bitstring or hexstring or \
textstring or rawbytes:
raise ValueError('''When fileobject is specified, you cannot give
values to any other constructor args''')
bits = self.read_bits_from_fileobject(fp)
bitlist = list(map(int, bits))
self.size = len( bitlist )
elif intVal or intVal == 0:
if filename or fp or bitlist or bitstring or hexstring or textstring or rawbytes:
raise ValueError('''When intVal is specified, you can only give a
value to the 'size' constructor arg''')
if intVal == 0:
bitlist = [0]
if size is None:
self.size = 1
elif size == 0:
raise ValueError('''The value specified for size must be at least
as large as for the smallest bit vector possible
for intVal''')
else:
if size < len(bitlist):
raise ValueError('''The value specified for size must be at least
as large as for the smallest bit vector
possible for intVal''')
n = size - len(bitlist)
bitlist = [0]*n + bitlist
self.size = len(bitlist)
else:
hexVal = hex(intVal).lower().rstrip('l')
hexVal = hexVal[2:]
if len(hexVal) == 1:
hexVal = '0' + hexVal
bitlist = ''.join(map(lambda x: _hexdict[x],hexVal))
bitlist = list(map( int, bitlist))
i = 0
while (i < len(bitlist)):
if bitlist[i] == 1: break
i += 1
del bitlist[0:i]
if size is None:
self.size = len(bitlist)
elif size == 0:
if size < len(bitlist):
raise ValueError('''The value specified for size must be at least
as large as for the smallest bit vector possible
for intVal''')
else:
if size < len(bitlist):
raise ValueError('''The value specified for size must be at least
as large as for the smallest bit vector possible
for intVal''')
n = size - len(bitlist)
bitlist = [0]*n + bitlist
self.size = len( bitlist )
elif size is not None and size >= 0:
if filename or fp or intVal or bitlist or bitstring or hexstring or \
textstring or rawbytes:
raise ValueError('''When size is specified (without an intVal), you cannot
give values to any other constructor args''')
self.size = size
two_byte_ints_needed = (size + 15) // 16
self.vector = array.array('H', [0]*two_byte_ints_needed)
return
elif bitstring or bitstring == '':
if filename or fp or size or intVal or bitlist or hexstring or textstring or rawbytes:
raise ValueError('''When a bitstring is specified, you cannot give
values to any other constructor args''')
bitlist = list(map(int, list(bitstring)))
self.size = len(bitlist)
elif bitlist:
if filename or fp or size or intVal or bitstring or hexstring or textstring or rawbytes:
raise ValueError('''When bits are specified, you cannot give values
to any other constructor args''')
self.size = len(bitlist)
elif textstring or textstring == '':
if filename or fp or size or intVal or bitlist or bitstring or hexstring or rawbytes:
raise ValueError('''When bits are specified through textstring, you
cannot give values to any other constructor args''')
hexlist = ''.join(map(lambda x: x[2:], map(lambda x: hex(x) if len(hex(x)[2:])==2 \
else hex(x)[:2] + '0' + hex(x)[2:], map(ord, list(textstring)))))
bitlist = list(map(int,list(''.join(map(lambda x: _hexdict[x], list(hexlist))))))
self.size = len(bitlist)
elif hexstring or hexstring == '':
if filename or fp or size or intVal or bitlist or bitstring or textstring or rawbytes:
raise ValueError('''When bits are specified through hexstring, you
cannot give values to any other constructor args''')
bitlist = list(map(int,list(''.join(map(lambda x: _hexdict[x], list(hexstring))))))
self.size = len(bitlist)
elif rawbytes:
if filename or fp or size or intVal or bitlist or bitstring or textstring or hexstring:
raise ValueError('''When bits are specified through rawbytes, you
cannot give values to any other constructor args''')
import binascii
hexlist = binascii.hexlify(rawbytes)
if sys.version_info[0] == 3:
bitlist = list(map(int,list(''.join(map(lambda x: _hexdict[x], \
list(map(chr,list(hexlist))))))))
else:
bitlist = list(map(int,list(''.join(map(lambda x: _hexdict[x], list(hexlist))))))
self.size = len(bitlist)
else:
raise ValueError("wrong arg(s) for constructor")
two_byte_ints_needed = (len(bitlist) + 15) // 16
self.vector = array.array( 'H', [0]*two_byte_ints_needed )
list( map( self._setbit, range(len(bitlist)), bitlist) )
def _setbit(self, posn, val):
'Set the bit at the designated position to the value shown'
if val not in (0, 1):
raise ValueError( "incorrect value for a bit" )
if isinstance( posn, (tuple) ):
posn = posn[0]
if posn >= self.size or posn < -self.size:
raise ValueError( "index range error" )
if posn < 0: posn = self.size + posn
block_index = posn // 16
shift = posn & 15
cv = self.vector[block_index]
if ( cv >> shift ) & 1 != val:
self.vector[block_index] = cv ^ (1 << shift)
def _getbit(self, pos):
'Get the bit from the designated position'
if not isinstance( pos, slice ):
if pos >= self.size or pos < -self.size:
raise ValueError( "index range error" )
if pos < 0: pos = self.size + pos
return ( self.vector[pos//16] >> (pos&15) ) & 1
else:
bitstring = ''
if pos.start is None:
start = 0
else:
start = pos.start
if pos.stop is None:
stop = self.size
else:
stop = pos.stop
for i in range( start, stop ):
bitstring += str(self[i])
return BitVector( bitstring = bitstring )
def __xor__(self, other):
'''
Take a bitwise 'XOR' of the bit vector on which the method is invoked with
the argument bit vector. Return the result as a new bit vector. If the two
bit vectors are not of the same size, pad the shorter one with zeros from the
left.
'''
if self.size < other.size:
bv1 = self._resize_pad_from_left(other.size - self.size)
bv2 = other
elif self.size > other.size:
bv1 = self
bv2 = other._resize_pad_from_left(self.size - other.size)
else:
bv1 = self
bv2 = other
res = BitVector( size = bv1.size )
lpb = map(operator.__xor__, bv1.vector, bv2.vector)
res.vector = array.array( 'H', lpb )
return res
def __and__(self, other):
'''
Take a bitwise 'AND' of the bit vector on which the method is invoked with
the argument bit vector. Return the result as a new bit vector. If the two
bit vectors are not of the same size, pad the shorter one with zeros from the
left.
'''
if self.size < other.size:
bv1 = self._resize_pad_from_left(other.size - self.size)
bv2 = other
elif self.size > other.size:
bv1 = self
bv2 = other._resize_pad_from_left(self.size - other.size)
else:
bv1 = self
bv2 = other
res = BitVector( size = bv1.size )
lpb = map(operator.__and__, bv1.vector, bv2.vector)
res.vector = array.array( 'H', lpb )
return res
def __or__(self, other):
'''
Take a bitwise 'OR' of the bit vector on which the method is invoked with the
argument bit vector. Return the result as a new bit vector. If the two bit
vectors are not of the same size, pad the shorter one with zero's from the
left.
'''
if self.size < other.size:
bv1 = self._resize_pad_from_left(other.size - self.size)
bv2 = other
elif self.size > other.size:
bv1 = self
bv2 = other._resize_pad_from_left(self.size - other.size)
else:
bv1 = self
bv2 = other
res = BitVector( size = bv1.size )
lpb = map(operator.__or__, bv1.vector, bv2.vector)
res.vector = array.array( 'H', lpb )
return res
def __invert__(self):
'''
Invert the bits in the bit vector on which the method is invoked
and return the result as a new bit vector.
'''
res = BitVector( size = self.size )
lpb = list(map( operator.__inv__, self.vector ))
res.vector = array.array( 'H' )
for i in range(len(lpb)):
res.vector.append( lpb[i] & 0x0000FFFF )
return res
def __add__(self, other):
'''
Concatenate the argument bit vector with the bit vector on which the method
is invoked. Return the concatenated bit vector as a new BitVector object.
'''
i = 0
outlist = []
while ( i < self.size ):
outlist.append( self[i] )
i += 1
i = 0
while ( i < other.size ):
outlist.append( other[i] )
i += 1
return BitVector( bitlist = outlist )
def _getsize(self):
'Return the number of bits in a bit vector.'
return self.size
def read_bits_from_file(self, blocksize):
'''
Read blocksize bits from a disk file and return a BitVector object containing
the bits. If the file contains fewer bits than blocksize, construct the
BitVector object from however many bits there are in the file. If the file
contains zero bits, return a BitVector object of size attribute set to 0.
'''
error_str = '''You need to first construct a BitVector
object with a filename as argument'''
if not self.filename:
raise SyntaxError( error_str )
if blocksize % 8 != 0:
raise ValueError( "block size must be a multiple of 8" )
bitstr = _readblock( blocksize, self )
if len( bitstr ) == 0:
return BitVector( size = 0 )
else:
return BitVector( bitstring = bitstr )
def read_bits_from_fileobject( self, fp ):
'''
This function is meant to read a bit string from a file like
object.
'''
bitlist = []
while 1:
bit = fp.read()
if bit == '': return bitlist
bitlist += bit
def write_bits_to_fileobject( self, fp ):
'''
This function is meant to write a bit vector directly to a file like object.
Note that whereas 'write_to_file' method creates a memory footprint that
corresponds exactly to the bit vector, the 'write_bits_to_fileobject'
actually writes out the 1's and 0's as individual items to the file object.
That makes this method convenient for creating a string representation of a
bit vector, especially if you use the StringIO class, as shown in the test
code.
'''
for bit_index in range(self.size):
# For Python 3.x:
if sys.version_info[0] == 3:
if self[bit_index] == 0:
fp.write( str('0') )
else:
fp.write( str('1') )
# For Python 2.x:
else:
if self[bit_index] == 0:
fp.write( unicode('0') )
else:
fp.write( unicode('1') )
def divide_into_two(self):
'''
Divides an even-sized bit vector into two and returns the two halves as a
list of two bit vectors.
'''
if self.size % 2 != 0:
raise ValueError( "must have even num bits" )
i = 0
outlist1 = []
while ( i < self.size /2 ):
outlist1.append( self[i] )
i += 1
outlist2 = []
while ( i < self.size ):
outlist2.append( self[i] )
i += 1
return [ BitVector( bitlist = outlist1 ),
BitVector( bitlist = outlist2 ) ]
def permute(self, permute_list):
'''
Permute a bit vector according to the indices shown in the second argument
list. Return the permuted bit vector as a new bit vector.
'''
if max(permute_list) > self.size -1:
raise ValueError( "Bad permutation index" )
outlist = []
i = 0
while ( i < len( permute_list ) ):
outlist.append( self[ permute_list[i] ] )
i += 1
return BitVector( bitlist = outlist )
def unpermute(self, permute_list):
'''
Unpermute the bit vector according to the permutation list supplied as the
second argument. If you first permute a bit vector by using permute() and
then unpermute() it using the same permutation list, you will get back the
original bit vector.
'''
if max(permute_list) > self.size -1:
raise ValueError( "Bad permutation index" )
if self.size != len( permute_list ):
raise ValueError( "Bad size for permute list" )
out_bv = BitVector( size = self.size )
i = 0
while ( i < len(permute_list) ):
out_bv[ permute_list[i] ] = self[i]
i += 1
return out_bv
def write_to_file(self, file_out):
'''
Write the bitvector to the file object file_out. (A file object is returned
by a call to open()). Since all file I/O is byte oriented, the bitvector must
be multiple of 8 bits. Each byte treated as MSB first (0th index).
'''
err_str = '''Only a bit vector whose length is a multiple of 8 can
be written to a file. Use the padding functions to satisfy
this constraint.'''
if not self.FILEOUT:
self.FILEOUT = file_out
if self.size % 8:
raise ValueError( err_str )
for byte in range( int(self.size/8) ):
value = 0
for bit in range(8):
value += (self._getbit( byte*8+(7 - bit) ) << bit )
if sys.version_info[0] == 3:
file_out.write( bytes(chr(value), 'utf-8') )
else:
file_out.write( chr(value) )
def close_file_object(self):
'''
For closing a file object that was used for reading the bits into one or more
BitVector objects.
'''
if not self.FILEIN:
raise SyntaxError( "No associated open file" )
self.FILEIN.close()
def int_val(self):
'Return the integer value of a bitvector'
intVal = 0
for i in range(self.size):
intVal += self[i] * (2 ** (self.size - i - 1))
return intVal
intValue = int_val
def get_text_from_bitvector(self):
'''
Return the text string formed by dividing the bitvector into bytes from the
left and replacing each byte by its ASCII character (this is a useful thing
to do only if the length of the vector is an integral multiple of 8 and every
byte in your bitvector has a print representation)
'''
if self.size % 8:
raise ValueError('''\nThe bitvector for get_text_from_bitvector()
must be an integral multiple of 8 bits''')
return ''.join(map(chr, map(int,[self[i:i+8] for i in range(0,self.size,8)])))
getTextFromBitVector = get_text_from_bitvector
def get_hex_string_from_bitvector(self):
'''
Return a string of hex digits by scanning the bits from the left and
replacing each sequence of 4 bits by its corresponding hex digit (this is a
useful thing to do only if the length of the vector is an integral multiple
of 4)
'''
if self.size % 4:
raise ValueError('''\nThe bitvector for get_hex_string_from_bitvector()
must be an integral multiple of 4 bits''')
return ''.join(map(lambda x: x.replace('0x',''), \
map(hex,map(int,[self[i:i+4] for i in range(0,self.size,4)]))))
getHexStringFromBitVector = get_hex_string_from_bitvector
def __lshift__( self, n ):
'For an in-place left circular shift by n bit positions'
if self.size == 0:
raise ValueError('''Circular shift of an empty vector
makes no sense''')
if n < 0:
return self >> abs(n)
for i in range(n):
self.circular_rotate_left_by_one()
return self
def __rshift__( self, n ):
'For an in-place right circular shift by n bit positions.'
if self.size == 0:
raise ValueError('''Circular shift of an empty vector
makes no sense''')
if n < 0:
return self << abs(n)
for i in range(n):
self.circular_rotate_right_by_one()
return self
def circular_rotate_left_by_one(self):
'For a one-bit in-place left circular shift'
size = len(self.vector)
bitstring_leftmost_bit = self.vector[0] & 1
left_most_bits = list(map(operator.__and__, self.vector, [1]*size))
left_most_bits.append(left_most_bits[0])
del(left_most_bits[0])
self.vector = list(map(operator.__rshift__, self.vector, [1]*size))
self.vector = list(map( operator.__or__, self.vector, \
list( map(operator.__lshift__, left_most_bits, [15]*size) )))
self._setbit(self.size -1, bitstring_leftmost_bit)
def circular_rotate_right_by_one(self):
'For a one-bit in-place right circular shift'
size = len(self.vector)
bitstring_rightmost_bit = self[self.size - 1]
right_most_bits = list(map( operator.__and__,
self.vector, [0x8000]*size ))
self.vector = list(map( operator.__and__, self.vector, [~0x8000]*size ))
right_most_bits.insert(0, bitstring_rightmost_bit)
right_most_bits.pop()
self.vector = list(map(operator.__lshift__, self.vector, [1]*size))
self.vector = list(map( operator.__or__, self.vector, \
list(map(operator.__rshift__, right_most_bits, [15]*size))))
self._setbit(0, bitstring_rightmost_bit)
def circular_rot_left(self):
'''
This is merely another implementation of the method
circular_rotate_left_by_one() shown above. This one does NOT use map
functions. This method carries out a one-bit left circular shift of a bit
vector.
'''
max_index = (self.size -1) // 16
left_most_bit = self.vector[0] & 1
self.vector[0] = self.vector[0] >> 1
for i in range(1, max_index + 1):
left_bit = self.vector[i] & 1
self.vector[i] = self.vector[i] >> 1
self.vector[i-1] |= left_bit << 15
self._setbit(self.size -1, left_most_bit)
def circular_rot_right(self):
'''
This is merely another implementation of the method
circular_rotate_right_by_one() shown above. This one does NOT use map
functions. This method does a one-bit right circular shift of a bit vector.
'''
max_index = (self.size -1) // 16
right_most_bit = self[self.size - 1]
self.vector[max_index] &= ~0x8000
self.vector[max_index] = self.vector[max_index] << 1
for i in range(max_index-1, -1, -1):
right_bit = self.vector[i] & 0x8000
self.vector[i] &= ~0x8000
self.vector[i] = self.vector[i] << 1
self.vector[i+1] |= right_bit >> 15
self._setbit(0, right_most_bit)
def shift_left_by_one(self):
'''
For a one-bit in-place left non-circular shift. Note that bitvector size
does not change. The leftmost bit that moves past the first element of the
bitvector is discarded and rightmost bit of the returned vector is set to
zero.
'''
size = len(self.vector)
left_most_bits = list(map(operator.__and__, self.vector, [1]*size))
left_most_bits.append(left_most_bits[0])
del(left_most_bits[0])
self.vector = list(map(operator.__rshift__, self.vector, [1]*size))
self.vector = list(map( operator.__or__, self.vector, \
list(map(operator.__lshift__, left_most_bits, [15]*size))))
self._setbit(self.size -1, 0)
def shift_right_by_one(self):
'''
For a one-bit in-place right non-circular shift. Note that bitvector size
does not change. The rightmost bit that moves past the last element of the
bitvector is discarded and leftmost bit of the returned vector is set to
zero.
'''
size = len(self.vector)
right_most_bits = list(map( operator.__and__, self.vector, [0x8000]*size ))
self.vector = list(map( operator.__and__, self.vector, [~0x8000]*size ))
right_most_bits.insert(0, 0)
right_most_bits.pop()
self.vector = list(map(operator.__lshift__, self.vector, [1]*size))
self.vector = list(map( operator.__or__, self.vector, \
list(map(operator.__rshift__,right_most_bits, [15]*size))))
self._setbit(0, 0)
def shift_left( self, n ):
'For an in-place left non-circular shift by n bit positions'
for i in range(n):
self.shift_left_by_one()
return self
def shift_right( self, n ):
'For an in-place right non-circular shift by n bit positions.'
for i in range(n):
self.shift_right_by_one()
return self
# Allow array like subscripting for getting and setting:
__getitem__ = _getbit
def __setitem__(self, pos, item):
'''
This is needed for both slice assignments and for index assignments. It
checks the types of pos and item to see if the call is for slice assignment.
For slice assignment, pos must be of type 'slice' and item of type BitVector.
For index assignment, the argument types are checked in the _setbit() method.
'''
# The following section is for slice assignment:
if isinstance(pos, slice):
if (not isinstance( item, BitVector )):
raise TypeError('''For slice assignment,
the right hand side must be a BitVector''')
if (not pos.start and not pos.stop):
return item.deep_copy()
elif not pos.start:
if (pos.stop != len(item)):
raise ValueError('incompatible lengths for slice assignment')
for i in range(pos.stop):
self[i] = item[ i ]
return
elif not pos.stop:
if ((len(self) - pos.start) != len(item)):
raise ValueError('incompatible lengths for slice assignment')
for i in range(len(item)-1):
self[pos.start + i] = item[ i ]
return
else:
if ( (pos.stop - pos.start) != len(item) ):
raise ValueError('incompatible lengths for slice assignment')
for i in range( pos.start, pos.stop ):
self[i] = item[ i - pos.start ]
return
# For index assignment use _setbit()
self._setbit(pos, item)
def __getslice__(self, i, j):
'Fetch slices with [i:j], [:], etc.'
if self.size == 0:
return BitVector( bitstring = '' )
if i == j:
return BitVector( bitstring = '' )
slicebits = []
if j > self.size: j = self.size
for x in range(i,j):
slicebits.append( self[x] )
return BitVector( bitlist = slicebits )
# Allow len() to work:
__len__ = _getsize
# Allow int() to work:
__int__ = int_val
def __iter__(self):
'''
To allow iterations over a bit vector by supporting the 'for bit in
bit_vector' syntax:
'''
return BitVectorIterator(self)
def __str__(self):
'To create a print representation'
if self.size == 0:
return ''
return ''.join(map(str, self))
# Compare two bit vectors:
def __eq__(self, other):
if self.size != other.size:
return False
i = 0
while ( i < self.size ):
if (self[i] != other[i]): return False
i += 1
return True
def __ne__(self, other):
return not self == other
def __lt__(self, other):
return self.intValue() < other.intValue()
def __le__(self, other):
return self.intValue() <= other.intValue()
def __gt__(self, other):
return self.intValue() > other.intValue()
def __ge__(self, other):
return self.intValue() >= other.intValue()
def deep_copy( self ):
'Make a deep copy of a bit vector'
copy = str( self )
return BitVector( bitstring = copy )
_make_deep_copy = deep_copy
def _resize_pad_from_left( self, n ):
'''
Resize a bit vector by padding with n 0's from the left. Return the result as
a new bit vector.
'''
new_str = '0'*n + str( self )
return BitVector( bitstring = new_str )
def _resize_pad_from_right( self, n ):
'''
Resize a bit vector by padding with n 0's from the right. Return the result
as a new bit vector.
'''
new_str = str( self ) + '0'*n
return BitVector( bitstring = new_str )
def pad_from_left( self, n ):
'Pad a bit vector with n zeros from the left'
new_str = '0'*n + str( self )
bitlist = list(map( int, list(new_str) ))
self.size = len( bitlist )
two_byte_ints_needed = (len(bitlist) + 15) // 16
self.vector = array.array( 'H', [0]*two_byte_ints_needed )
list(map( self._setbit, enumerate(bitlist), bitlist))
def pad_from_right( self, n ):
'Pad a bit vector with n zeros from the right'
new_str = str( self ) + '0'*n
bitlist = list(map( int, list(new_str) ))
self.size = len( bitlist )
two_byte_ints_needed = (len(bitlist) + 15) // 16
self.vector = array.array( 'H', [0]*two_byte_ints_needed )
list(map( self._setbit, enumerate(bitlist), bitlist))
def __contains__( self, otherBitVec ):
'''
This supports 'if x in y' and 'if x not in y' syntax for bit vectors.
'''
if self.size == 0:
raise ValueError("First arg bitvec has no bits")
elif self.size < otherBitVec.size:
raise ValueError("First arg bitvec too short")
max_index = self.size - otherBitVec.size + 1
for i in range(max_index):
if self[i:i+otherBitVec.size] == otherBitVec:
return True
return False
def reset( self, val ):
'''
Resets a previously created BitVector to either all zeros or all ones
depending on the argument val. Returns self to allow for syntax like
bv = bv1[3:6].reset(1)
or
bv = bv1[:].reset(1)
'''
if val not in (0,1):
raise ValueError( "Incorrect reset argument" )
bitlist = [val for i in range( self.size )]
list(map( self._setbit, enumerate(bitlist), bitlist ))
return self
def count_bits( self ):
'''
Return the number of bits set in a BitVector instance.
'''
from functools import reduce
return reduce( lambda x, y: int(x)+int(y), self )
def set_value(self, *args, **kwargs):
'''
Changes the bit pattern associated with a previously constructed BitVector
instance. The allowable modes for changing the internally stored bit pattern
are the same as for the constructor.
'''
self.__init__( *args, **kwargs )
setValue = set_value
def count_bits_sparse(self):
'''
For sparse bit vectors, this method, contributed by Rhiannon, will be much
faster. She estimates that if a bit vector with over 2 millions bits has
only five bits set, this will return the answer in 1/18 of the time taken by
the count_bits() method. Note however, that count_bits() may work much
faster for dense-packed bit vectors. Rhianon's implementation is based on an
algorithm generally known as the Brian Kernighan's way, although its
antecedents predate its mention by Kernighan and Ritchie.
'''
num = 0
for intval in self.vector:
if intval == 0: continue
c = 0; iv = intval
while iv > 0:
iv = iv & (iv -1)
c = c + 1
num = num + c
return num
def jaccard_similarity(self, other):
'''
Computes the Jaccard similarity coefficient between two bit vectors
'''
assert self.intValue() > 0 or other.intValue() > 0, \
'Jaccard called on two zero vectors --- NOT ALLOWED'
assert self.size == other.size, 'vectors of unequal length'
intersect = self & other
union = self | other
return ( intersect.count_bits_sparse() / float( union.count_bits_sparse() ) )
def jaccard_distance( self, other ):
'''
Computes the Jaccard distance between two bit vectors
'''
assert self.size == other.size, 'vectors of unequal length'
return 1 - self.jaccard_similarity( other )
def hamming_distance( self, other ):
'''
Computes the Hamming distance between two bit vectors
'''
assert self.size == other.size, 'vectors of unequal length'
diff = self ^ other
return diff.count_bits_sparse()
def next_set_bit(self, from_index=0):
'''
This method, contributed originally by Jason Allum and updated subsequently
by John Gleeson, calculates the position of the next set bit at or after the
current position index. It returns -1 if there is no next set bit.
'''
assert from_index >= 0, 'from_index must be nonnegative'
i = from_index
v = self.vector
l = len(v)
o = i >> 4
s = i & 0x0F
i = o << 4
while o < l:
h = v[o]
if h:
i += s
m = 1 << s
while m != (1 << 0x10):
if h & m: return i
m <<= 1
i += 1
else:
i += 0x10
s = 0
o += 1
return -1
def rank_of_bit_set_at_index(self, position):
'''
For a bit that is set at the argument 'position', this method returns how
many bits are set to the left of that bit. For example, in the bit pattern
000101100100, a call to this method with position set to 9 will return 4.
'''
assert self[position] == 1, 'the arg bit not set'
bv = self[0:position+1]
return bv.count_bits()
def is_power_of_2( self ):
'''
Determines whether the integer value of a bit vector is a power of
2.
'''
if self.intValue() == 0: return False
bv = self & BitVector( intVal = self.intValue() - 1 )
if bv.intValue() == 0: return True
return False
isPowerOf2 = is_power_of_2
def is_power_of_2_sparse(self):
'''
Faster version of is_power_of2() for sparse bit vectors
'''
if self.count_bits_sparse() == 1: return True
return False
isPowerOf2_sparse = is_power_of_2_sparse
def reverse(self):
'''
Returns a new bit vector by reversing the bits in the bit vector on which the
method is invoked.
'''
reverseList = []
i = 1
while ( i < self.size + 1 ):
reverseList.append( self[ -i ] )
i += 1
return BitVector( bitlist = reverseList )
def gcd(self, other):
'''
Using Euclid's Algorithm, returns the greatest common divisor of the integer
value of the bit vector on which the method is invoked and the integer value
of the argument bit vector.
'''
a = self.intValue(); b = other.intValue()
if a < b: a,b = b,a
while b != 0:
a, b = b, a % b
return BitVector( intVal = a )
def multiplicative_inverse(self, modulus):
'''
Calculates the multiplicative inverse of a bit vector modulo the bit vector
that is supplied as the argument. Code based on the Extended Euclid's
Algorithm.
'''
MOD = mod = modulus.intValue(); num = self.intValue()
x, x_old = 0, 1
y, y_old = 1, 0
while mod:
quotient = num // mod
num, mod = mod, num % mod
x, x_old = x_old - x * quotient, x
y, y_old = y_old - y * quotient, y
if num != 1:
return None
else:
MI = (x_old + MOD) % MOD
return BitVector( intVal = MI )
def length(self):
return self.size
def gf_multiply(self, b):
'''
In the set of polynomials defined over GF(2), multiplies the bitvector on
which the method is invoked with the bitvector b. Returns the product
bitvector.
'''
a = self.deep_copy()
b_copy = b.deep_copy()
a_highest_power = a.length() - a.next_set_bit(0) - 1
b_highest_power = b.length() - b_copy.next_set_bit(0) - 1
result = BitVector( size = a.length()+b_copy.length() )
a.pad_from_left( result.length() - a.length() )
b_copy.pad_from_left( result.length() - b_copy.length() )
for i,bit in enumerate(b_copy):
if bit == 1:
power = b_copy.length() - i - 1
a_copy = a.deep_copy()
a_copy.shift_left( power )
result ^= a_copy
return result
def gf_divide(self, mod, n):
'''
Carries out modular division of a bitvector by the modulus bitvector mod in
GF(2^n) finite field. Returns both the quotient and the remainder.
'''
num = self
if mod.length() > n+1:
raise ValueError("Modulus bit pattern too long")
quotient = BitVector( intVal = 0, size = num.length() )
remainder = num.deep_copy()
i = 0
while 1:
i = i+1
if (i==num.length()): break
mod_highest_power = mod.length()-mod.next_set_bit(0)-1
if remainder.next_set_bit(0) == -1:
remainder_highest_power = 0
else:
remainder_highest_power = remainder.length() - remainder.next_set_bit(0) - 1
if (remainder_highest_power < mod_highest_power) or int(remainder)==0:
break
else:
exponent_shift = remainder_highest_power - mod_highest_power
quotient[quotient.length()-exponent_shift-1] = 1
quotient_mod_product = mod.deep_copy();
quotient_mod_product.pad_from_left(remainder.length() - mod.length())
quotient_mod_product.shift_left(exponent_shift)
remainder = remainder ^ quotient_mod_product
if remainder.length() > n:
remainder = remainder[remainder.length()-n:]
return quotient, remainder
def gf_multiply_modular(self, b, mod, n):
'''
Multiplies a bitvector with the bitvector b in GF(2^n) finite field with the
modulus bit pattern set to mod
'''
a = self
a_copy = a.deep_copy()
b_copy = b.deep_copy()
product = a_copy.gf_multiply(b_copy)
quotient, remainder = product.gf_divide(mod, n)
return remainder
def gf_MI(self, mod, n):
'''
Returns the multiplicative inverse of a vector in the GF(2^n) finite field
with the modulus polynomial set to mod
'''
num = self
NUM = num.deep_copy(); MOD = mod.deep_copy()
x = BitVector( size=mod.length() )
x_old = BitVector( intVal=1, size=mod.length() )
y = BitVector( intVal=1, size=mod.length() )
y_old = BitVector( size=mod.length() )
while int(mod):
quotient, remainder = num.gf_divide(mod, n)
num, mod = mod, remainder
x, x_old = x_old ^ quotient.gf_multiply(x), x
y, y_old = y_old ^ quotient.gf_multiply(y), y
if int(num) != 1:
return "NO MI. However, the GCD of ", str(NUM), " and ", \
str(MOD), " is ", str(num)
else:
z = x_old ^ MOD
quotient, remainder = z.gf_divide(MOD, n)
return remainder
def runs(self):
'''
Returns a list of the consecutive runs of 1's and 0's in the bit vector.
Each run is either a string of all 1's or a string of all 0's.
'''
if self.size == 0:
raise ValueError('''An empty vector has no runs''')
allruns = []
run = ''
previous_bit = self[0]
if previous_bit == 0:
run = '0'
else:
run = '1'
for bit in list(self)[1:]:
if bit == 0 and previous_bit == 0:
run += '0'
elif bit == 1 and previous_bit == 0:
allruns.append( run )
run = '1'
elif bit == 0 and previous_bit == 1:
allruns.append( run )
run = '0'
else:
run += '1'
previous_bit = bit
allruns.append( run )
return allruns
def test_for_primality(self):
'''
Check if the integer value of the bitvector is a prime through the
Miller-Rabin probabilistic test of primality. If not found to be a
composite, estimate the probability of the bitvector being a prime using this
test.
'''
p = int(self)
probes = [2,3,5,7,11,13,17]
for a in probes:
if a == p: return 1
if any([p % a == 0 for a in probes]): return 0
k, q = 0, p-1
while not q&1:
q >>= 1
k += 1
for a in probes:
a_raised_to_q = pow(a, q, p)
if a_raised_to_q == 1 or a_raised_to_q == p-1: continue
a_raised_to_jq = a_raised_to_q
primeflag = 0
for j in range(k-1):
a_raised_to_jq = pow(a_raised_to_jq, 2, p)
if a_raised_to_jq == p-1:
primeflag = 1
break
if not primeflag: return 0
probability_of_prime = 1 - 1.0/(4 ** len(probes))
return probability_of_prime
def gen_rand_bits_for_prime(self, width):
'''
The bulk of the work here is done by calling random.getrandbits( width) which
returns an integer whose binary code representation will not be larger than
the argument 'width'. However, when random numbers are generated as
candidates for primes, you often want to make sure that the random number
thus created spans the full width specified by 'width' and that the number is
odd. This we do by setting the two most significant bits and the least
significant bit.
'''
import random
candidate = random.getrandbits( width )
candidate |= 1
candidate |= (1 << width-1)
candidate |= (2 << width-3)
return BitVector( intVal = candidate )
#----------------------- BitVectorIterator Class -----------------------
class BitVectorIterator:
def __init__( self, bitvec ):
self.items = []
for i in range( bitvec.size ):
self.items.append( bitvec._getbit(i) )
self.index = -1
def __iter__( self ):
return self
def next( self ):
self.index += 1
if self.index < len( self.items ):
return self.items[ self.index ]
else:
raise StopIteration
__next__ = next
#------------------------ End of Class Definition -----------------------
#------------------------ Test Code Follows -----------------------
if __name__ == '__main__':
# Construct an EMPTY bit vector (a bit vector of size 0):
print("\nConstructing an EMPTY bit vector (a bit vector of size 0):")
bv1 = BitVector( size = 0 )
print(bv1) # no output
# Construct a bit vector of size 2:
print("\nConstructing a bit vector of size 2:")
bv2 = BitVector( size = 2 )
print(bv2) # 00
# Joining two bit vectors:
print("\nOutput concatenation of two previous bit vectors:")
result = bv1 + bv2
print(result) # 00
# Construct a bit vector with a tuple of bits:
print("\nThis is a bit vector from a tuple of bits:")
bv = BitVector(bitlist=(1, 0, 0, 1))
print(bv) # 1001
# Construct a bit vector with a list of bits:
print("\nThis is a bit vector from a list of bits:")
bv = BitVector(bitlist=[1, 1, 0, 1])
print(bv) # 1101
# Construct a bit vector from an integer
bv = BitVector(intVal=5678)
print("\nBit vector constructed from integer 5678:")
print(bv) # 1011000101110
print("\nBit vector constructed from integer 0:")
bv = BitVector(intVal=0)
print(bv) # 0
print("\nBit vector constructed from integer 2:")
bv = BitVector(intVal=2)
print(bv) # 10
print("\nBit vector constructed from integer 3:")
bv = BitVector(intVal=3)
print(bv) # 11
print("\nBit vector constructed from integer 123456:")
bv = BitVector(intVal=123456)
print(bv) # 11110001001000000
print("\nInt value of the previous bit vector as computed by int_val():")
print(bv.int_val()) # 123456
print("\nInt value of the previous bit vector as computed by int():")
print(int(bv)) # 123456
# Construct a bit vector from a very large integer:
x = 12345678901234567890123456789012345678901234567890123456789012345678901234567890
bv = BitVector(intVal=x)
print("\nHere is a bit vector constructed from a very large integer:")
print(bv)
print("The integer value of the above bit vector is:%d" % int(bv))
# Construct a bit vector directly from a file-like object:
import io
x = "111100001111"
x = ""
if sys.version_info[0] == 3:
x = "111100001111"
else:
x = unicode("111100001111")
fp_read = io.StringIO(x)
bv = BitVector( fp = fp_read )
print("\nBit vector constructed directed from a file like object:")
print(bv) # 111100001111
# Construct a bit vector directly from a bit string:
bv = BitVector( bitstring = '00110011' )
print("\nBit Vector constructed directly from a bit string:")
print(bv) # 00110011
bv = BitVector(bitstring = '')
print("\nBit Vector constructed directly from an empty bit string:")
print(bv) # nothing
print("\nInteger value of the previous bit vector:")
print(bv.int_val()) # 0
print("\nConstructing a bit vector from the textstring 'hello':")
bv3 = BitVector(textstring = "hello")
print(bv3)
mytext = bv3.get_text_from_bitvector()
print("Text recovered from the previous bitvector: ")
print(mytext) # hello
print("\nConstructing a bit vector from the textstring 'hello\\njello':")
bv3 = BitVector(textstring = "hello\njello")
print(bv3)
mytext = bv3.get_text_from_bitvector()
print("Text recovered from the previous bitvector:")
print(mytext) # hello
# jello
print("\nConstructing a bit vector from the hexstring '68656c6c6f':")
bv4 = BitVector(hexstring = "68656c6c6f")
print(bv4)
myhexstring = bv4.get_hex_string_from_bitvector()
print("Hex string recovered from the previous bitvector: ")
print(myhexstring) # 68656c6c6f
print("\nDemonstrating the raw bytes mode of constructing a bit vector (useful for reading public and private keys):")
mypubkey = 'ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA5amriY96HQS8Y/nKc8zu3zOylvpOn3vzMmWwrtyDy+aBvns4UC1RXoaD9rDKqNNMCBAQwWDsYwCAFsrBzbxRQONHePX8lRWgM87MseWGlu6WPzWGiJMclTAO9CTknplG9wlNzLQBj3dP1M895iLF6jvJ7GR+V3CRU6UUbMmRvgPcsfv6ec9RRPm/B8ftUuQICL0jt4tKdPG45PBJUylHs71FuE9FJNp01hrj1EMFObNTcsy9zuis0YPyzArTYSOUsGglleExAQYi7iLh17pAa+y6fZrGLsptgqryuftN9Q4NqPuTiFjlqRowCDU7sSxKDgU7bzhshyVx3+pzXO4D2Q== kak@pixie'
import base64
if sys.version_info[0] == 3:
import binascii
keydata = base64.b64decode(bytes(mypubkey.split(None)[1], 'utf-8'))
else:
keydata = base64.b64decode(mypubkey.split(None)[1])
bv = BitVector( rawbytes = keydata )
print(bv)
# Test array-like indexing for a bit vector:
bv = BitVector( bitstring = '110001' )
print("\nPrints out bits individually from bitstring 110001:")
print(bv[0], bv[1], bv[2], bv[3], bv[4], bv[5]) # 1 1 0 0 0 1
print("\nSame as above but using negative array indexing:")
print(bv[-1], bv[-2], bv[-3], bv[-4], bv[-5], bv[-6]) # 1 0 0 0 1 1
# Test setting bit values with positive and negative
# accessors:
bv = BitVector( bitstring = '1111' )
print("\nBitstring for 1111:")
print(bv) # 1111
print("\nReset individual bits of above vector:")
bv[0]=0;bv[1]=0;bv[2]=0;bv[3]=0
print(bv) # 0000
print("\nDo the same as above with negative indices:")
bv[-1]=1;bv[-2]=1;bv[-4]=1
print(bv) # 1011
print("\nCheck equality and inequality ops:")
bv1 = BitVector( bitstring = '00110011' )
bv2 = BitVector( bitlist = [0,0,1,1,0,0,1,1] )
print(bv1 == bv2) # True
print(bv1 != bv2) # False
print(bv1 < bv2) # False
print(bv1 <= bv2) # True
bv3 = BitVector( intVal = 5678 )
print(bv3.int_val()) # 5678
print(bv3) # 10110000101110
print(bv1 == bv3) # False
print(bv3 > bv1) # True
print(bv3 >= bv1) # True
# Write a bit vector to a file like object
fp_write = io.StringIO()
bv.write_bits_to_fileobject( fp_write )
print("\nGet bit vector written out to a file-like object:")
print(fp_write.getvalue()) # 1011
print("\nExperiments with bitwise logical operations:")
bv3 = bv1 | bv2
print(bv3) # 00110011
bv3 = bv1 & bv2
print(bv3) # 00110011
bv3 = bv1 + bv2
print(bv3) # 0011001100110011
bv4 = BitVector( size = 3 )
print(bv4) # 000
bv5 = bv3 + bv4
print(bv5) # 0011001100110011000
bv6 = ~bv5
print(bv6) # 1100110011001100111
bv7 = bv5 & bv6
print(bv7) # 0000000000000000000
bv7 = bv5 | bv6
print(bv7) # 1111111111111111111
print("\nTry logical operations on bit vectors of different sizes:")
print(BitVector( intVal = 6 ) ^ BitVector( intVal = 13 )) # 1011
print(BitVector( intVal = 6 ) & BitVector( intVal = 13 )) # 0100
print(BitVector( intVal = 6 ) | BitVector( intVal = 13 )) # 1111
print(BitVector( intVal = 1 ) ^ BitVector( intVal = 13 )) # 1100
print(BitVector( intVal = 1 ) & BitVector( intVal = 13 )) # 0001
print(BitVector( intVal = 1 ) | BitVector( intVal = 13 )) # 1101
print("\nExperiments with setbit() and len():")
bv7[7] = 0
print(bv7) # 1111111011111111111
print(len( bv7 )) # 19
bv8 = (bv5 & bv6) ^ bv7
print(bv8) # 1111111011111111111
print("\nConstruct a bit vector from what is in the file testinput1.txt:")
bv = BitVector( filename = 'TestBitVector/testinput1.txt' )
#print bv # nothing to show
bv1 = bv.read_bits_from_file(64)
print("\nPrint out the first 64 bits read from the file:")
print(bv1)
# 0100000100100000011010000111010101101110011001110111001001111001
print("\nRead the next 64 bits from the same file:")
bv2 = bv.read_bits_from_file(64)
print(bv2)
# 0010000001100010011100100110111101110111011011100010000001100110
print("\nTake xor of the previous two bit vectors:")
bv3 = bv1 ^ (bv2)
print(bv3)
# 0110000101000010000110100001101000011001000010010101001000011111
print("\nExperiment with dividing an even-sized vector into two:")
[bv4, bv5] = bv3.divide_into_two()
print(bv4) # 01100001010000100001101000011010
print(bv5) # 00011001000010010101001000011111
# Permute a bit vector:
print("\nWe will use this bit vector for experiments with permute()")
bv1 = BitVector( bitlist = [1, 0, 0, 1, 1, 0, 1] )
print(bv1) # 1001101
bv2 = bv1.permute( [6, 2, 0, 1] )
print("\nPermuted and contracted form of the previous bit vector:")
print(bv2) # 1010
print("\nExperiment with writing an internally generated bit vector out to a disk file:")
bv1 = BitVector( bitstring = '00001010' )
FILEOUT = open( 'TestBitVector/test.txt', 'wb' )
bv1.write_to_file( FILEOUT )
FILEOUT.close()
bv2 = BitVector( filename = 'TestBitVector/test.txt' )
bv3 = bv2.read_bits_from_file( 32 )
print("\nDisplay bit vectors written out to file and read back from the file and their respective lengths:")
print( str(bv1) + " " + str(bv3))
print(str(len(bv1)) + " " + str(len(bv3)))
print("\nExperiments with reading a file from the beginning to end:")
bv = BitVector( filename = 'TestBitVector/testinput4.txt' )
print("\nHere are all the bits read from the file:")
while (bv.more_to_read):
bv_read = bv.read_bits_from_file( 64 )
print(bv_read)
print("\n")
print("\nExperiment with closing a file object and start extracting bit vectors from the file from the beginning again:")
bv.close_file_object()
bv = BitVector( filename = 'TestBitVector/testinput4.txt' )
bv1 = bv.read_bits_from_file(64)
print("\nHere are all the first 64 bits read from the file again after the file object was closed and opened again:")
print(bv1)
FILEOUT = open( 'TestBitVector/testinput5.txt', 'wb' )
bv1.write_to_file( FILEOUT )
FILEOUT.close()
print("\nExperiment in 64-bit permutation and unpermutation of the previous 64-bit bitvector:")
print("The permutation array was generated separately by the Fisher-Yates shuffle algorithm:")
bv2 = bv1.permute( [22, 47, 33, 36, 18, 6, 32, 29, 54, 62, 4,
9, 42, 39, 45, 59, 8, 50, 35, 20, 25, 49,
15, 61, 55, 60, 0, 14, 38, 40, 23, 17, 41,
10, 57, 12, 30, 3, 52, 11, 26, 43, 21, 13,
58, 37, 48, 28, 1, 63, 2, 31, 53, 56, 44, 24,
51, 19, 7, 5, 34, 27, 16, 46] )
print("Permuted bit vector:")
print(bv2)
bv3 = bv2.unpermute( [22, 47, 33, 36, 18, 6, 32, 29, 54, 62, 4,
9, 42, 39, 45, 59, 8, 50, 35, 20, 25, 49,
15, 61, 55, 60, 0, 14, 38, 40, 23, 17, 41,
10, 57, 12, 30, 3, 52, 11, 26, 43, 21, 13,
58, 37, 48, 28, 1, 63, 2, 31, 53, 56, 44, 24,
51, 19, 7, 5, 34, 27, 16, 46] )
print("Unpurmute the bit vector:")
print(bv3)
print("\nTry circular shifts to the left and to the right for the following bit vector:")
print(bv3) # 0100000100100000011010000111010101101110011001110111001001111001
print("\nCircular shift to the left by 7 positions:")
bv3 << 7
print(bv3) # 1001000000110100001110101011011100110011101110010011110010100000
print("\nCircular shift to the right by 7 positions:")
bv3 >> 7
print(bv3) # 0100000100100000011010000111010101101110011001110111001001111001
print("Test len() on the above bit vector:")
print(len( bv3 )) # 64
print("\nTest forming a [5:22] slice of the above bit vector:")
bv4 = bv3[5:22]
print(bv4) # 00100100000011010
print("\nTest the iterator:")
for bit in bv4:
print(bit) # 0 0 1 0 0 1 0 0 0 0 0 0 1 1 0 1 0
print("\nDemonstrate padding a bit vector from left:")
bv = BitVector(bitstring = '101010')
bv.pad_from_left(4)
print(bv) # 0000101010
print("\nDemonstrate padding a bit vector from right:")
bv.pad_from_right(4)
print(bv) # 00001010100000
print("\nTest the syntax 'if bit_vector_1 in bit_vector_2' syntax:")
try:
bv1 = BitVector(bitstring = '0011001100')
bv2 = BitVector(bitstring = '110011')
if bv2 in bv1:
print("%s is in %s" % (bv2, bv1))
else:
print("%s is not in %s" % (bv2, bv1))
except ValueError as arg:
print("Error Message: " + str(arg))
print("\nTest the size modifier when a bit vector is initialized with the intVal method:")
bv = BitVector(intVal = 45, size = 16)
print(bv) # 0000000000101101
bv = BitVector(intVal = 0, size = 8)
print(bv) # 00000000
bv = BitVector(intVal = 1, size = 8)
print(bv) # 00000001
print("\nTesting slice assignment:")
bv1 = BitVector( size = 25 )
print("bv1= " + str(bv1)) # 0000000000000000000000000
bv2 = BitVector( bitstring = '1010001' )
print("bv2= " + str(bv2)) # 1010001
bv1[6:9] = bv2[0:3]
print("bv1= " + str(bv1)) # 0000001010000000000000000
bv1[:5] = bv1[5:10]
print("bv1= " + str(bv1)) # 0101001010000000000000000
bv1[20:] = bv1[5:10]
print("bv1= " + str(bv1)) # 0101001010000000000001010
bv1[:] = bv1[:]
print("bv1= " + str(bv1)) # 0101001010000000000001010
bv3 = bv1[:]
print("bv3= " + str(bv3)) # 0101001010000000000001010
print("\nTesting reset function:")
bv1.reset(1)
print("bv1= " + str(bv1)) # 1111111111111111111111111
print(bv1[3:9].reset(0)) # 000000
print(bv1[:].reset(0)) # 0000000000000000000000000
print("\nTesting count_bit():")
bv = BitVector(intVal = 45, size = 16)
y = bv.count_bits()
print(y) # 4
bv = BitVector(bitstring = '100111')
print(bv.count_bits()) # 4
bv = BitVector(bitstring = '00111000')
print(bv.count_bits()) # 3
bv = BitVector(bitstring = '001')
print(bv.count_bits()) # 1
bv = BitVector(bitstring = '00000000000000')
print(bv.count_bits()) # 0
print("\nTest set_value idea:")
bv = BitVector(intVal = 7, size =16)
print(bv) # 0000000000000111
bv.set_value(intVal = 45)
print(bv) # 101101
print("\nTesting count_bits_sparse():")
bv = BitVector(size = 2000000)
bv[345234] = 1
bv[233]=1
bv[243]=1
bv[18]=1
bv[785] =1
print("The number of bits set: " + str(bv.count_bits_sparse())) # 5
print("\nTesting Jaccard similarity and distance and Hamming distance:")
bv1 = BitVector(bitstring = '11111111')
bv2 = BitVector(bitstring = '00101011')
print("Jaccard similarity: " + str(bv1.jaccard_similarity(bv2))) # 0.5
print("Jaccard distance: " + str(bv1.jaccard_distance(bv2))) # 0.5
print("Hamming distance: " + str(bv1.hamming_distance(bv2))) # 4
print("\nTesting next_set_bit():")
bv = BitVector(bitstring = '00000000000001')
print(bv.next_set_bit(5)) # 13
bv = BitVector(bitstring = '000000000000001')
print(bv.next_set_bit(5)) # 14
bv = BitVector(bitstring = '0000000000000001')
print(bv.next_set_bit(5)) # 15
bv = BitVector(bitstring = '00000000000000001')
print(bv.next_set_bit(5)) # 16
print("\nTesting rank_of_bit_set_at_index():")
bv = BitVector(bitstring = '01010101011100')
print(bv.rank_of_bit_set_at_index( 10 )) # 6
print("\nTesting is_power_of_2():")
bv = BitVector(bitstring = '10000000001110')
print("int value: " + str(int(bv))) # 826
print(bv.is_power_of_2()) # False
print("\nTesting is_power_of_2_sparse():")
print(bv.is_power_of_2_sparse()) # False
print("\nTesting reverse():")
bv = BitVector(bitstring = '0001100000000000001')
print("original bv: " + str(bv)) # 0001100000000000001
print("reversed bv: " + str(bv.reverse())) # 1000000000000011000
print("\nTesting Greatest Common Divisor (gcd):")
bv1 = BitVector(bitstring = '01100110')
print("first arg bv: " + str(bv1) + " of int value: " + str(int(bv1))) #102
bv2 = BitVector(bitstring = '011010')
print("second arg bv: " + str(bv2) + " of int value: " + str(int(bv2)))# 26
bv = bv1.gcd(bv2)
print("gcd bitvec is: " + str(bv) + " of int value: " + str(int(bv))) # 2
print("\nTesting multiplicative_inverse:")
bv_modulus = BitVector(intVal = 32)
print("modulus is bitvec: " + str(bv_modulus) + " of int value: " + str(int(bv_modulus)))
bv = BitVector(intVal = 17)
print("bv: " + str(bv) + " of int value: " + str(int(bv)))
result = bv.multiplicative_inverse(bv_modulus)
if result is not None:
print("MI bitvec is: " + str(result) + " of int value: " + str(int(result)))
else: print("No multiplicative inverse in this case")
# 17
print("\nTest multiplication in GF(2):")
a = BitVector(bitstring='0110001')
b = BitVector(bitstring='0110')
c = a.gf_multiply(b)
print("Product of a=" + str(a) + " b=" + str(b) + " is " + str(c))
# 00010100110
print("\nTest division in GF(2^n):")
mod = BitVector(bitstring='100011011') # AES modulus
n = 8
a = BitVector(bitstring='11100010110001')
quotient, remainder = a.gf_divide(mod, n)
print("Dividing a=" + str(a) + " by mod=" + str(mod) + " in GF(2^8) returns the quotient " \
+ str(quotient) + " and the remainder " + str(remainder))
# 10001111
print("\nTest modular multiplication in GF(2^n):")
modulus = BitVector(bitstring='100011011') # AES modulus
n = 8
a = BitVector(bitstring='0110001')
b = BitVector(bitstring='0110')
c = a.gf_multiply_modular(b, modulus, n)
print("Modular product of a=" + str(a) + " b=" + str(b) + " in GF(2^8) is " + str(c))
# 10100110
print("\nTest multiplicative inverses in GF(2^3) with " + \
"modulus polynomial = x^3 + x + 1:")
print("Find multiplicative inverse of a single bit array")
modulus = BitVector(bitstring='100011011') # AES modulus
n = 8
a = BitVector(bitstring='00110011')
mi = a.gf_MI(modulus,n)
print("Multiplicative inverse of " + str(a) + " in GF(2^8) is " + str(mi))
print("\nIn the following three rows shown, the first row shows the " +\
"\nbinary code words, the second the multiplicative inverses," +\
"\nand the third the product of a binary word with its" +\
"\nmultiplicative inverse:\n")
mod = BitVector(bitstring = '1011')
n = 3
bitarrays = [BitVector(intVal=x, size=n) for x in range(1,2**3)]
mi_list = [x.gf_MI(mod,n) for x in bitarrays]
mi_str_list = [str(x.gf_MI(mod,n)) for x in bitarrays]
print("bit arrays in GF(2^3): " + str([str(x) for x in bitarrays]))
print("multiplicati_inverses: " + str(mi_str_list))
products = [ str(bitarrays[i].gf_multiply_modular(mi_list[i], mod, n)) \
for i in range(len(bitarrays)) ]
print("bit_array * multi_inv: " + str(products))
# UNCOMMENT THE FOLLOWING LINES FOR
# DISPLAYING ALL OF THE MULTIPLICATIVE
# INVERSES IN GF(2^8) WITH THE AES MODULUS:
# print("\nMultiplicative inverses in GF(2^8) with " + \
# "modulus polynomial x^8 + x^4 + x^3 + x + 1:")
# print("\n(This may take a few seconds)\n")
# mod = BitVector(bitstring = '100011011')
# n = 8
# bitarrays = [BitVector(intVal=x, size=n) for x in range(1,2**8)]
# mi_list = [x.gf_MI(mod,n) for x in bitarrays]
# mi_str_list = [str(x.gf_MI(mod,n)) for x in bitarrays]
# print("\nMultiplicative Inverses:\n\n" + str(mi_str_list))
# products = [ str(bitarrays[i].gf_multiply_modular(mi_list[i], mod, n)) \
# for i in range(len(bitarrays)) ]
# print("\nShown below is the product of each binary code word " +\
# "in GF(2^3) and its multiplicative inverse:\n\n")
# print(products)
print("\nExperimenting with runs():")
bv = BitVector(bitlist = (1, 0, 0, 1))
print("For bit vector: " + str(bv))
print(" the runs are: " + str(bv.runs()))
bv = BitVector(bitlist = (1, 0))
print("For bit vector: " + str(bv))
print(" the runs are: " + str(bv.runs()))
bv = BitVector(bitlist = (0, 1))
print("For bit vector: " + str(bv))
print(" the runs are: " + str(bv.runs()))
bv = BitVector(bitlist = (0, 0, 0, 1))
print("For bit vector: " + str(bv))
print(" the runs are: " + str(bv.runs()))
bv = BitVector(bitlist = (0, 1, 1, 0))
print("For bit vector: " + str(bv))
print(" the runs are: " + str(bv.runs()))
print("\nExperiments with chained invocations of circular shifts:")
bv = BitVector(bitlist = (1,1, 1, 0, 0, 1))
print(bv)
bv >> 1
print(bv)
bv >> 1 >> 1
print(bv)
bv = BitVector(bitlist = (1,1, 1, 0, 0, 1))
print(bv)
bv << 1
print(bv)
bv << 1 << 1
print(bv)
print("\nExperiments with chained invocations of NON-circular shifts:")
bv = BitVector(bitlist = (1,1, 1, 0, 0, 1))
print(bv)
bv.shift_right(1)
print(bv)
bv.shift_right(1).shift_right(1)
print(bv)
bv = BitVector(bitlist = (1,1, 1, 0, 0, 1))
print(bv)
bv.shift_left(1)
print(bv)
bv.shift_left(1).shift_left(1)
print(bv)
# UNCOMMENT THE FOLLOWING LINES TO TEST THE
# PRIMALITY TESTING METHOD. IT SHOULD SHOW
# THAT ALL OF THE FOLLOWING NUMBERS ARE PRIME:
# print("\nExperiments with primality testing. If a number is not prime, its primality " +
# "test output must be zero. Otherwise, it should a number very close to 1.0.")
# primes = [179, 233, 283, 353, 419, 467, 547, 607, 661, 739, 811, 877, \
# 947, 1019, 1087, 1153, 1229, 1297, 1381, 1453, 1523, 1597, \
# 1663, 1741, 1823, 1901, 7001, 7109, 7211, 7307, 7417, 7507, \
# 7573, 7649, 7727, 7841]
# for p in primes:
# bv = BitVector(intVal = p)
# check = bv.test_for_primality()
# print("The primality test for " + str(p) + ": " + str(check))
print("\nGenerate 32-bit wide candidate for primality testing:")
bv = BitVector(intVal = 0)
bv = bv.gen_rand_bits_for_prime(32)
print(bv)
check = bv.test_for_primality()
print("The primality test for " + str(int(bv)) + ": " + str(check))
| gpl-3.0 |
ip-tools/ip-navigator | patzilla/util/web/uwsgi/uwsgidecorators.py | 1 | 9668 | # https://github.com/unbit/uwsgi/blob/master/uwsgidecorators.py
from functools import partial
import sys
from threading import Thread
try:
import cPickle as pickle
except:
import pickle
import uwsgi
if uwsgi.masterpid() == 0:
raise Exception(
"you have to enable the uWSGI master process to use this module")
spooler_functions = {}
mule_functions = {}
postfork_chain = []
def get_free_signal():
for signum in range(0, 256):
if not uwsgi.signal_registered(signum):
return signum
raise Exception("No free uwsgi signal available")
def manage_spool_request(vars):
f = spooler_functions[vars['ud_spool_func']]
if 'args' in vars:
args = pickle.loads(vars.pop('args'))
kwargs = pickle.loads(vars.pop('kwargs'))
ret = f(*args, **kwargs)
else:
ret = f(vars)
if not 'ud_spool_ret' in vars:
return ret
return int(vars['ud_spool_ret'])
def postfork_chain_hook():
for f in postfork_chain:
f()
uwsgi.spooler = manage_spool_request
uwsgi.post_fork_hook = postfork_chain_hook
class postfork(object):
def __init__(self, f):
if callable(f):
self.wid = 0
self.f = f
else:
self.f = None
self.wid = f
postfork_chain.append(self)
def __call__(self, *args, **kwargs):
if self.f:
if self.wid > 0 and self.wid != uwsgi.worker_id():
return
return self.f()
self.f = args[0]
class _spoolraw(object):
def __call__(self, *args, **kwargs):
arguments = self.base_dict
if not self.pass_arguments:
if len(args) > 0:
arguments.update(args[0])
if kwargs:
arguments.update(kwargs)
else:
spooler_args = {}
for key in ('message_dict', 'spooler', 'priority', 'at', 'body'):
if key in kwargs:
spooler_args.update({key: kwargs.pop(key)})
arguments.update(spooler_args)
arguments.update({'args': pickle.dumps(args), 'kwargs': pickle.dumps(kwargs)})
return uwsgi.spool(arguments)
# For backward compatibility (uWSGI < 1.9.13)
def spool(self, *args, **kwargs):
return self.__class__.__call__(self, *args, **kwargs)
def __init__(self, f, pass_arguments):
if not 'spooler' in uwsgi.opt:
raise Exception(
"you have to enable the uWSGI spooler to use @%s decorator" % self.__class__.__name__)
self.f = f
spooler_functions[self.f.__name__] = self.f
# For backward compatibility (uWSGI < 1.9.13)
self.f.spool = self.__call__
self.pass_arguments = pass_arguments
self.base_dict = {'ud_spool_func': self.f.__name__}
class _spool(_spoolraw):
def __call__(self, *args, **kwargs):
self.base_dict['ud_spool_ret'] = str(uwsgi.SPOOL_OK)
return _spoolraw.__call__(self, *args, **kwargs)
class _spoolforever(_spoolraw):
def __call__(self, *args, **kwargs):
self.base_dict['ud_spool_ret'] = str(uwsgi.SPOOL_RETRY)
return _spoolraw.__call__(self, *args, **kwargs)
def spool_decorate(f=None, pass_arguments=False, _class=_spoolraw):
if not f:
return partial(_class, pass_arguments=pass_arguments)
return _class(f, pass_arguments)
def spoolraw(f=None, pass_arguments=False):
return spool_decorate(f, pass_arguments)
def spool(f=None, pass_arguments=False):
return spool_decorate(f, pass_arguments, _spool)
def spoolforever(f=None, pass_arguments=False):
return spool_decorate(f, pass_arguments, _spoolforever)
class mulefunc(object):
def __init__(self, f):
if callable(f):
self.fname = f.__name__
self.mule = 0
mule_functions[f.__name__] = f
else:
self.mule = f
self.fname = None
def real_call(self, *args, **kwargs):
uwsgi.mule_msg(pickle.dumps(
{
'service': 'uwsgi_mulefunc',
'func': self.fname,
'args': args,
'kwargs': kwargs
}
), self.mule)
def __call__(self, *args, **kwargs):
if not self.fname:
self.fname = args[0].__name__
mule_functions[self.fname] = args[0]
return self.real_call
return self.real_call(*args, **kwargs)
def mule_msg_dispatcher(message):
msg = pickle.loads(message)
if msg['service'] == 'uwsgi_mulefunc':
return mule_functions[msg['func']](*msg['args'], **msg['kwargs'])
uwsgi.mule_msg_hook = mule_msg_dispatcher
class rpc(object):
def __init__(self, name):
self.name = name
def __call__(self, f):
uwsgi.register_rpc(self.name, f)
return f
class farm_loop(object):
def __init__(self, f, farm):
self.f = f
self.farm = farm
def __call__(self):
if uwsgi.mule_id() == 0:
return
if not uwsgi.in_farm(self.farm):
return
while True:
message = uwsgi.farm_get_msg()
if message:
self.f(message)
class farm(object):
def __init__(self, name=None, **kwargs):
self.name = name
def __call__(self, f):
postfork_chain.append(farm_loop(f, self.name))
class mule_brain(object):
def __init__(self, f, num):
self.f = f
self.num = num
def __call__(self):
if uwsgi.mule_id() == self.num:
try:
self.f()
except:
exc = sys.exc_info()
sys.excepthook(exc[0], exc[1], exc[2])
sys.exit(1)
class mule_brainloop(mule_brain):
def __call__(self):
if uwsgi.mule_id() == self.num:
while True:
try:
self.f()
except:
exc = sys.exc_info()
sys.excepthook(exc[0], exc[1], exc[2])
sys.exit(1)
class mule(object):
def __init__(self, num):
self.num = num
def __call__(self, f):
postfork_chain.append(mule_brain(f, self.num))
class muleloop(mule):
def __call__(self, f):
postfork_chain.append(mule_brainloop(f, self.num))
class mulemsg_loop(object):
def __init__(self, f, num):
self.f = f
self.num = num
def __call__(self):
if uwsgi.mule_id() == self.num:
while True:
message = uwsgi.mule_get_msg()
if message:
self.f(message)
class mulemsg(object):
def __init__(self, num):
self.num = num
def __call__(self, f):
postfork_chain.append(mulemsg_loop(f, self.num))
class signal(object):
def __init__(self, num, **kwargs):
self.num = num
self.target = kwargs.get('target', '')
def __call__(self, f):
uwsgi.register_signal(self.num, self.target, f)
return f
class timer(object):
def __init__(self, secs, **kwargs):
self.num = kwargs.get('signum', get_free_signal())
self.secs = secs
self.target = kwargs.get('target', '')
def __call__(self, f):
uwsgi.register_signal(self.num, self.target, f)
uwsgi.add_timer(self.num, self.secs)
return f
class cron(object):
def __init__(self, minute, hour, day, month, dayweek, **kwargs):
self.num = kwargs.get('signum', get_free_signal())
self.minute = minute
self.hour = hour
self.day = day
self.month = month
self.dayweek = dayweek
self.target = kwargs.get('target', '')
def __call__(self, f):
uwsgi.register_signal(self.num, self.target, f)
uwsgi.add_cron(self.num, self.minute, self.hour,
self.day, self.month, self.dayweek)
return f
class rbtimer(object):
def __init__(self, secs, **kwargs):
self.num = kwargs.get('signum', get_free_signal())
self.secs = secs
self.target = kwargs.get('target', '')
def __call__(self, f):
uwsgi.register_signal(self.num, self.target, f)
uwsgi.add_rb_timer(self.num, self.secs)
return f
class filemon(object):
def __init__(self, fsobj, **kwargs):
self.num = kwargs.get('signum', get_free_signal())
self.fsobj = fsobj
self.target = kwargs.get('target', '')
def __call__(self, f):
uwsgi.register_signal(self.num, self.target, f)
uwsgi.add_file_monitor(self.num, self.fsobj)
return f
class erlang(object):
def __init__(self, name):
self.name = name
def __call__(self, f):
uwsgi.erlang_register_process(self.name, f)
return f
class lock(object):
def __init__(self, f):
self.f = f
def __call__(self, *args, **kwargs):
# ensure the spooler will not call it
if uwsgi.i_am_the_spooler():
return
uwsgi.lock()
try:
return self.f(*args, **kwargs)
finally:
uwsgi.unlock()
class thread(object):
def __init__(self, f):
self.f = f
def __call__(self, *args):
t = Thread(target=self.f, args=args)
t.daemon = True
t.start()
return self.f
class harakiri(object):
def __init__(self, seconds):
self.s = seconds
def real_call(self, *args, **kwargs):
uwsgi.set_user_harakiri(self.s)
r = self.f(*args, **kwargs)
uwsgi.set_user_harakiri(0)
return r
def __call__(self, f):
self.f = f
return self.real_call
| agpl-3.0 |
r3tard/BartusBot | lib/Crypto/SelfTest/Protocol/test_rfc1751.py | 132 | 2208 | #
# Test script for Crypto.Util.RFC1751.
#
# Part of the Python Cryptography Toolkit
#
# Written by Andrew Kuchling and others
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
__revision__ = "$Id$"
import binascii
import unittest
from Crypto.Util import RFC1751
from Crypto.Util.py3compat import *
test_data = [('EB33F77EE73D4053', 'TIDE ITCH SLOW REIN RULE MOT'),
('CCAC2AED591056BE4F90FD441C534766',
'RASH BUSH MILK LOOK BAD BRIM AVID GAFF BAIT ROT POD LOVE'),
('EFF81F9BFBC65350920CDD7416DE8009',
'TROD MUTE TAIL WARM CHAR KONG HAAG CITY BORE O TEAL AWL')
]
class RFC1751Test_k2e (unittest.TestCase):
def runTest (self):
"Check converting keys to English"
for key, words in test_data:
key=binascii.a2b_hex(b(key))
self.assertEqual(RFC1751.key_to_english(key), words)
class RFC1751Test_e2k (unittest.TestCase):
def runTest (self):
"Check converting English strings to keys"
for key, words in test_data:
key=binascii.a2b_hex(b(key))
self.assertEqual(RFC1751.english_to_key(words), key)
# class RFC1751Test
def get_tests(config={}):
return [RFC1751Test_k2e(), RFC1751Test_e2k()]
if __name__ == "__main__":
unittest.main()
| apache-2.0 |
ericzundel/pants | tests/python/pants_test/backend/graph_info/tasks/test_list_owners.py | 15 | 3524 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from textwrap import dedent
from pants.backend.graph_info.tasks.list_owners import ListOwners
from pants.backend.python.targets.python_library import PythonLibrary
from pants.base.exceptions import TaskError
from pants.build_graph.build_file_aliases import BuildFileAliases
from pants_test.tasks.task_test_base import ConsoleTaskTestBase
class ListOwnersTest(ConsoleTaskTestBase):
@classmethod
def task_type(cls):
return ListOwners
@property
def alias_groups(self):
return BuildFileAliases(targets={'python_library': PythonLibrary})
def setUp(self):
super(ListOwnersTest, self).setUp()
def add_to_build_file(path, name, *sources):
all_sources = ["'{}'".format(source) for source in list(sources)]
self.add_to_build_file(path, dedent("""
python_library(name='{name}',
sources=[{all_sources}]
)
""".format(name=name, all_sources=','.join(all_sources))))
add_to_build_file('a', 'b', 'b.txt')
add_to_build_file('a/c', 'd', 'd.txt')
add_to_build_file('a/c', 'd2', 'd.txt')
add_to_build_file('a/c', 'e', 'e.txt', 'f.txt', 'g.txt')
add_to_build_file('a', 'c', 'c/c.txt')
add_to_build_file('a', 'h', 'c/h.txt')
add_to_build_file('a/c', 'h', 'h.txt')
def test_no_targets(self):
self.assert_console_output(passthru_args=['a/a.txt'])
def test_no_targets_output_format_json(self):
self.assert_console_output(dedent("""
{
"a/a.txt": []
}""").lstrip('\n'),
passthru_args=['a/a.txt'],
options={'output_format': 'json'}
)
def test_one_target(self):
self.assert_console_output('a:b', passthru_args=['a/b.txt'])
def test_one_target_output_format_json(self):
self.assert_console_output(dedent("""
{
"a/b.txt": [
"a:b"
]
}""").lstrip('\n'),
passthru_args=['a/b.txt'],
options={'output_format': 'json'}
)
def test_multiple_targets(self):
self.assert_console_output('a/c:d', 'a/c:d2', passthru_args=['a/c/d.txt'])
def test_multiple_targets_output_format_json(self):
self.assert_console_output(dedent("""
{
"a/c/d.txt": [
"a/c:d",
"a/c:d2"
]
}""").lstrip('\n'),
passthru_args=['a/c/d.txt'],
options={'output_format': 'json'}
)
def test_target_in_parent_directory(self):
self.assert_console_output('a:c', passthru_args=['a/c/c.txt'])
def test_multiple_targets_one_in_parent_directory(self):
self.assert_console_output('a:h', 'a/c:h', passthru_args=['a/c/h.txt'])
def test_target_with_multiple_sources(self):
self.assert_console_output('a/c:e', passthru_args=['a/c/e.txt'])
def test_no_sources(self):
self.assert_console_raises(TaskError, passthru_args=[])
def test_too_many_sources_output_format_text(self):
self.assert_console_raises(TaskError, passthru_args=['a/a.txt', 'a/b.txt'])
def test_multiple_sources_output_format_json(self):
self.assert_console_output(dedent("""
{
"a/b.txt": [
"a:b"
],
"a/a.txt": []
}""").lstrip('\n'),
passthru_args=['a/a.txt', 'a/b.txt'],
options={'output_format': 'json'}
)
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.