repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
JeckoHeroOrg/three.js | utils/exporters/blender/addons/io_three/exporter/api/object.py | 54 | 13118 | import math
import mathutils
import bpy
from bpy import data, context, types
from bpy_extras.io_utils import axis_conversion
from .. import constants, logger, utilities, exceptions
from .constants import (
MESH,
EMPTY,
ARMATURE,
LAMP,
SPOT,
SUN,
POINT,
HEMI,
CAMERA,
PERSP,
ORTHO,
RENDER,
NO_SHADOW,
ZYX
)
# Blender doesn't seem to have a good way to link a mesh back to the
# objects that are instancing it, or it is bloody obvious and I haven't
# discovered yet. This manifest serves as a way for me to map a mesh
# node to the object nodes that are using it.
_MESH_MAP = {}
def _object(func):
"""
:param func:
"""
def inner(arg, *args, **kwargs):
"""
:param arg:
:param *args:
:param **kwargs:
"""
if isinstance(arg, types.Object):
obj = arg
else:
obj = data.objects[arg]
return func(obj, *args, **kwargs)
return inner
def clear_mesh_map():
"""Clears the mesh map, required on initialization"""
_MESH_MAP.clear()
def assemblies(valid_types, options):
"""
:param valid_types:
:param options:
"""
logger.debug('object.assemblies(%s)', valid_types)
for obj in data.objects:
# rigged assets are parented under armature nodes
if obj.parent and obj.parent.type != ARMATURE:
continue
if obj.parent and obj.parent.type == ARMATURE:
logger.info('Has armature parent %s', obj.name)
if _valid_node(obj, valid_types, options):
yield obj.name
@_object
def cast_shadow(obj):
"""
:param obj:
"""
logger.debug('object.cast_shadow(%s)', obj)
if obj.type == LAMP:
if obj.data.type in (SPOT, SUN):
ret = obj.data.shadow_method != NO_SHADOW
else:
logger.info("%s is a lamp but this lamp type does not "
"have supported shadows in ThreeJS", obj.name)
ret = None
return ret
elif obj.type == MESH:
mat = material(obj)
if mat:
return data.materials[mat].use_cast_shadows
else:
return False
@_object
def children(obj, valid_types):
"""
:param obj:
:param valid_types:
"""
logger.debug('object.children(%s, %s)', obj, valid_types)
for child in obj.children:
if child.type in valid_types:
yield child.name
@_object
def material(obj):
"""
:param obj:
"""
logger.debug('object.material(%s)', obj)
try:
return obj.material_slots[0].name
except IndexError:
pass
@_object
def mesh(obj, options):
"""
:param obj:
:param options:
"""
logger.debug('object.mesh(%s, %s)', obj, options)
if obj.type != MESH:
return
for mesh_, objects in _MESH_MAP.items():
if obj in objects:
return mesh_
else:
logger.debug('Could not map object, updating manifest')
mesh_ = extract_mesh(obj, options)
if len(mesh_.tessfaces) is not 0:
manifest = _MESH_MAP.setdefault(mesh_.name, [])
manifest.append(obj)
mesh_name = mesh_.name
else:
# possibly just being used as a controller
logger.info('Object %s has no faces', obj.name)
mesh_name = None
return mesh_name
@_object
def name(obj):
"""
:param obj:
"""
return obj.name
@_object
def node_type(obj):
"""
:param obj:
"""
logger.debug('object.node_type(%s)', obj)
# standard transformation nodes are inferred
if obj.type == MESH:
return constants.MESH.title()
elif obj.type == EMPTY:
return constants.OBJECT.title()
dispatch = {
LAMP: {
POINT: constants.POINT_LIGHT,
SUN: constants.DIRECTIONAL_LIGHT,
SPOT: constants.SPOT_LIGHT,
HEMI: constants.HEMISPHERE_LIGHT
},
CAMERA: {
PERSP: constants.PERSPECTIVE_CAMERA,
ORTHO: constants.ORTHOGRAPHIC_CAMERA
}
}
try:
return dispatch[obj.type][obj.data.type]
except AttributeError:
msg = 'Invalid type: %s' % obj.type
raise exceptions.UnsupportedObjectType(msg)
def nodes(valid_types, options):
"""
:param valid_types:
:param options:
"""
for obj in data.objects:
if _valid_node(obj, valid_types, options):
yield obj.name
@_object
def position(obj, options):
"""
:param obj:
:param options:
"""
logger.debug('object.position(%s)', obj)
vector = matrix(obj, options).to_translation()
return (vector.x, vector.y, vector.z)
@_object
def receive_shadow(obj):
"""
:param obj:
"""
if obj.type == MESH:
mat = material(obj)
if mat:
return data.materials[mat].use_shadows
else:
return False
AXIS_CONVERSION = axis_conversion(to_forward='Z', to_up='Y').to_4x4()
@_object
def matrix(obj, options):
"""
:param obj:
:param options:
"""
logger.debug('object.matrix(%s)', obj)
if options.get(constants.HIERARCHY, False) and obj.parent:
parent_inverted = obj.parent.matrix_world.inverted(mathutils.Matrix())
return parent_inverted * obj.matrix_world
else:
return AXIS_CONVERSION * obj.matrix_world
@_object
def rotation(obj, options):
"""
:param obj:
:param options:
"""
logger.debug('object.rotation(%s)', obj)
vector = matrix(obj, options).to_euler(ZYX)
return (vector.x, vector.y, vector.z)
@_object
def scale(obj, options):
"""
:param obj:
:param options:
"""
logger.debug('object.scale(%s)', obj)
vector = matrix(obj, options).to_scale()
return (vector.x, vector.y, vector.z)
@_object
def select(obj):
"""
:param obj:
"""
obj.select = True
@_object
def unselect(obj):
"""
:param obj:
"""
obj.select = False
@_object
def visible(obj):
"""
:param obj:
"""
logger.debug('object.visible(%s)', obj)
return obj.is_visible(context.scene)
def extract_mesh(obj, options, recalculate=False):
"""
:param obj:
:param options:
:param recalculate: (Default value = False)
"""
logger.debug('object.extract_mesh(%s, %s)', obj, options)
apply_modifiers = options.get(constants.APPLY_MODIFIERS, True)
if apply_modifiers:
bpy.ops.object.mode_set(mode='OBJECT')
mesh_node = obj.to_mesh(context.scene, apply_modifiers, RENDER)
# transfer the geometry type to the extracted mesh
mesh_node.THREE_geometry_type = obj.data.THREE_geometry_type
# now determine whether or not to export using the geometry type
# set globally from the exporter's options or to use the local
# override on the mesh node itself
opt_buffer = options.get(constants.GEOMETRY_TYPE)
opt_buffer = opt_buffer == constants.BUFFER_GEOMETRY
prop_buffer = mesh_node.THREE_geometry_type == constants.BUFFER_GEOMETRY
# if doing buffer geometry it is imperative to triangulate the mesh
if opt_buffer or prop_buffer:
original_mesh = obj.data
obj.data = mesh_node
logger.debug('swapped %s for %s',
original_mesh.name,
mesh_node.name)
bpy.ops.object.mode_set(mode='OBJECT')
obj.select = True
bpy.context.scene.objects.active = obj
logger.info('Applying triangulation to %s', obj.data.name)
bpy.ops.object.modifier_add(type='TRIANGULATE')
bpy.ops.object.modifier_apply(apply_as='DATA',
modifier='Triangulate')
obj.data = original_mesh
obj.select = False
# recalculate the normals to face outwards, this is usually
# best after applying a modifiers, especialy for something
# like the mirror
if recalculate:
logger.info('Recalculating normals')
original_mesh = obj.data
obj.data = mesh_node
bpy.context.scene.objects.active = obj
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.mesh.select_all(action='SELECT')
bpy.ops.mesh.normals_make_consistent()
bpy.ops.object.editmode_toggle()
obj.data = original_mesh
if not options.get(constants.SCENE):
xrot = mathutils.Matrix.Rotation(-math.pi/2, 4, 'X')
mesh_node.transform(xrot * obj.matrix_world)
# now generate a unique name
index = 0
while True:
if index is 0:
mesh_name = '%sGeometry' % obj.data.name
else:
mesh_name = '%sGeometry.%d' % (obj.data.name, index)
try:
data.meshes[mesh_name]
index += 1
except KeyError:
break
mesh_node.name = mesh_name
mesh_node.update(calc_tessface=True)
mesh_node.calc_normals()
mesh_node.calc_tessface()
scale_ = options.get(constants.SCALE, 1)
mesh_node.transform(mathutils.Matrix.Scale(scale_, 4))
return mesh_node
def objects_using_mesh(mesh_node):
"""
:param mesh_node:
:return: list of object names
"""
logger.debug('object.objects_using_mesh(%s)', mesh_node)
for mesh_name, objects in _MESH_MAP.items():
if mesh_name == mesh_node.name:
return objects
else:
logger.warning('Could not find mesh mapping')
def prep_meshes(options):
"""Prep the mesh nodes. Preperation includes identifying:
- nodes that are on visible layers
- nodes that have export disabled
- nodes that have modifiers that need to be applied
:param options:
"""
logger.debug('object.prep_meshes(%s)', options)
mapping = {}
visible_layers = _visible_scene_layers()
for obj in data.objects:
if obj.type != MESH:
continue
# this is ideal for skipping controller or proxy nodes
# that may apply to a Blender but not a 3js scene
if not _on_visible_layer(obj, visible_layers):
logger.info('%s is not on a visible layer', obj.name)
continue
# if someone really insists on a visible node not being exportable
if not obj.THREE_export:
logger.info('%s export is disabled', obj.name)
continue
# need to apply modifiers before moving on, and before
# handling instancing. it is possible for 2 or more objects
# instance the same mesh but to not all use the same modifiers
# this logic identifies the object with modifiers and extracts
# the mesh making the mesh unique to this particular object
if len(obj.modifiers):
logger.info('%s has modifiers' % obj.name)
mesh_node = extract_mesh(obj, options, recalculate=True)
_MESH_MAP[mesh_node.name] = [obj]
continue
logger.info('adding mesh %s.%s to prep',
obj.name, obj.data.name)
manifest = mapping.setdefault(obj.data.name, [])
manifest.append(obj)
# now associate the extracted mesh node with all the objects
# that are instancing it
for objects in mapping.values():
mesh_node = extract_mesh(objects[0], options)
_MESH_MAP[mesh_node.name] = objects
def extracted_meshes():
"""
:return: names of extracted mesh nodes
"""
logger.debug('object.extracted_meshes()')
return [key for key in _MESH_MAP.keys()]
def _on_visible_layer(obj, visible_layers):
"""
:param obj:
:param visible_layers:
"""
is_visible = False
for index, layer in enumerate(obj.layers):
if layer and index in visible_layers:
is_visible = True
break
if not is_visible:
logger.info('%s is on a hidden layer', obj.name)
return is_visible
def _visible_scene_layers():
"""
:return: list of visiible layer indices
"""
visible_layers = []
for index, layer in enumerate(context.scene.layers):
if layer:
visible_layers.append(index)
return visible_layers
def _valid_node(obj, valid_types, options):
"""
:param obj:
:param valid_types:
:param options:
"""
if obj.type not in valid_types:
return False
# skip objects that are not on visible layers
visible_layers = _visible_scene_layers()
if not _on_visible_layer(obj, visible_layers):
return False
try:
export = obj.THREE_export
except AttributeError:
export = True
if not export:
return False
mesh_node = mesh(obj, options)
is_mesh = obj.type == MESH
# skip objects that a mesh could not be resolved
if is_mesh and not mesh_node:
return False
# secondary test; if a mesh node was resolved but no
# faces are detected then bow out
if is_mesh:
mesh_node = data.meshes[mesh_node]
if len(mesh_node.tessfaces) is 0:
return False
# if we get this far assume that the mesh is valid
return True
| mit |
pjz/Zappa | test_settings.py | 1 | 1325 | APP_MODULE = 'tests.test_app'
APP_FUNCTION = 'hello_world'
DJANGO_SETTINGS = None
DEBUG = 'True'
LOG_LEVEL = 'DEBUG'
SCRIPT_NAME = 'hello_world'
DOMAIN = None
API_STAGE = 'ttt888'
PROJECT_NAME = 'ttt888'
REMOTE_ENV='s3://lmbda/test_env.json'
## test_env.json
#{
# "hello": "world"
#}
#
AWS_EVENT_MAPPING = {
'arn:aws:s3:1': 'test_settings.aws_s3_event',
'arn:aws:sns:1': 'test_settings.aws_sns_event',
'arn:aws:dynamodb:1': 'test_settings.aws_dynamodb_event',
'arn:aws:kinesis:1': 'test_settings.aws_kinesis_event',
'arn:aws:sqs:1': 'test_settings.aws_sqs_event'
}
ENVIRONMENT_VARIABLES={'testenv': 'envtest'}
AUTHORIZER_FUNCTION='test_settings.authorizer_event'
def prebuild_me():
print("This is a prebuild script!")
def callback(self):
print("this is a callback")
def aws_s3_event(event, content):
return "AWS S3 EVENT"
def aws_sns_event(event, content):
return "AWS SNS EVENT"
def aws_async_sns_event(arg1, arg2, arg3):
return "AWS ASYNC SNS EVENT"
def aws_dynamodb_event(event, content):
return "AWS DYNAMODB EVENT"
def aws_kinesis_event(event, content):
return "AWS KINESIS EVENT"
def aws_sqs_event(event, content):
return "AWS SQS EVENT"
def authorizer_event(event, content):
return "AUTHORIZER_EVENT"
def command():
print("command")
| mit |
dawagner/parameter-framework | test/functional-tests-legacy/ACTCampaignEngine.py | 20 | 3241 | #!/usr/bin/python2
# -*-coding:utf-8 -*
# Copyright (c) 2011-2015, Intel Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Create a test suite for all tests about SET/GET commands
Uses PfwSetTsetSuite to create a single instance of the HAL
for all the SET/GEt commands.
These commands are tested using the methods of the classes
"BooleanTestCase", etc...
"""
import sys
import os
import unittest
import shutil
from Util import PfwUnitTestLib
class Logger(object):
def __init__(self, filename="Default.log"):
self.terminal = sys.stdout
self.log = open(filename, "a")
def write(self, message):
self.terminal.write(message)
self.log.write(message)
def testsRunner(testDirectory):
tests = unittest.defaultTestLoader.discover(testDirectory, pattern='t*.py')
runner = unittest.TextTestRunner(verbosity=2)
return runner.run(tests).wasSuccessful()
def main():
pfw_root = os.environ["PFW_ROOT"]
pfw_result = os.environ["PFW_RESULT"]
xml_path = "xml/configuration/ParameterFrameworkConfiguration.xml"
os.environ["PFW_TEST_TOOLS"] = os.path.dirname(os.path.abspath(__file__))
os.environ["PFW_TEST_CONFIGURATION"] = os.path.join(pfw_root, xml_path)
try:
# This directory must not exist. An exception will be raised if it does.
os.makedirs(pfw_result)
isAlive = os.path.join(pfw_result,"isAlive")
with open(isAlive, 'w') as fout:
fout.write('true')
needResync = os.path.join(pfw_result,"needResync")
with open(needResync, 'w') as fout:
fout.write('false')
success = testsRunner('PfwTestCase')
finally:
shutil.rmtree(pfw_result)
sys.exit(0 if success else 1)
if __name__ == "__main__":
main()
| bsd-3-clause |
chokribr/invenio | invenio/ext/sqlalchemy/engines/mysql.py | 12 | 6252 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2011, 2012, 2013, 2014 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Mysql dialect."""
import base64
# SQLAlchemy
import sqlalchemy
import sqlalchemy.types as types
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.schema import CreateIndex, PrimaryKeyConstraint
@compiles(CreateIndex, 'mysql')
def visit_create_index(element, compiler, **kw):
"""Return create index statement with defined length for text field.
example:
CREATE TABLE tableA
...
description TEXT(40)
...
INDEX ix_tableA_description ON (description(40))
"""
index = element.element
preparer = compiler.preparer
table = preparer.format_table(index.table)
name = preparer.quote(index.name, index.name.quote)
text = "ALTER TABLE %s ADD " % (table, )
if index.unique:
text += "UNIQUE "
text += "INDEX %s" % (name, )
lst = index.kwargs.get('mysql_length', None)
columns = []
for i, c in enumerate(index.columns):
cname = c.name
suffix = ''
if isinstance(lst, (list, tuple)) and len(lst) > i \
and lst[i] is not None:
suffix = '(%d)' % lst[i]
elif c.type != types.NULLTYPE \
and str(c.type).startswith('TEXT') \
and (c.type.length is not None):
suffix = '(%d)' % c.type.length
columns.append(cname + suffix)
text += '(' + ', '.join(columns) + ')'
if 'mysql_using' in index.kwargs:
using = index.kwargs['mysql_using']
if using is not None:
text += " USING %s" % (preparer.quote(using, index.quote))
return text
@compiles(PrimaryKeyConstraint, 'mysql')
def visit_primary_key_constraint(*element):
"""
Return create primary key constrains.
Return create primary key constrains with defined length
for text field.
"""
constraint, compiler = element
if len(constraint) == 0:
return ''
text = ""
if constraint.name is not None:
text += "CONSTRAINT %s " % \
compiler.preparer.format_constraint(constraint)
text += "PRIMARY KEY "
text += "(%s)" % ', '.join(c.name +
(c.type != types.NULLTYPE
and (str(c.type).startswith('TEXT')
and (c.type.length is not None))
and '(%d)' % c.type.length
or '')
for c in constraint)
text += compiler.define_constraint_deferrability(constraint)
return text
@compiles(types.Text, 'sqlite')
@compiles(sqlalchemy.dialects.mysql.TEXT, 'sqlite')
def compile_text(element, compiler, **kw):
"""Redefine Text filed type for SQLite and MySQL."""
return 'TEXT'
@compiles(types.Binary, 'sqlite')
def compile_binary(element, compiler, **kw):
"""Redefine Binary filed type for SQLite."""
return 'BLOB'
@compiles(types.LargeBinary, 'sqlite')
def compile_largebinary(element, compiler, **kw):
"""Redefine LargeBinary filed type for SQLite."""
return 'LONGBLOB'
@compiles(types.Text, 'mysql')
@compiles(sqlalchemy.dialects.mysql.TEXT, 'mysql')
def compile_text(element, compiler, **kw):
"""Redefine Text filed type for MySQL."""
return 'TEXT'
@compiles(types.Binary, 'mysql')
def compile_binary(element, compiler, **kw):
"""Redefine Binary filed type for MySQL."""
return 'BLOB'
@compiles(types.LargeBinary, 'mysql')
def compile_largebinary(element, compiler, **kw):
"""Redefine LargeBinary filed type for MySQL."""
return 'LONGBLOB'
from sqlalchemy.types import TypeDecorator
class iBinary(TypeDecorator):
"""Printable binary typea."""
impl = types.Binary
def __init__(self, *arg, **kw):
"""Init iBinary type."""
self.__class__.impl = self.impl
TypeDecorator.__init__(self, *arg, **kw)
def process_bind_param(self, value, dialect):
"""Decode string before saving to database."""
return (value is not None) and base64.decodestring(value) or None
def process_result_value(self, value, dialect):
"""Encode binary data to string."""
return (value is not None) and base64.encodestring(value) or None
class iLargeBinary(TypeDecorator):
"""Printable large binary type."""
impl = types.LargeBinary
def __init__(self, *arg, **kw):
"""Init iLargeBinary type."""
self.__class__.impl = self.impl
TypeDecorator.__init__(self, *arg, **kw)
def process_bind_param(self, value, dialect):
"""Decode string before saving to database."""
return (value is not None) and base64.decodestring(value) or None
def process_result_value(self, value, dialect):
"""Encode binary data to string."""
return (value is not None) and base64.encodestring(value) or None
class iMediumBinary(TypeDecorator):
"""Printable large binary type."""
impl = sqlalchemy.dialects.mysql.MEDIUMBLOB
def __init__(self, *arg, **kw):
"""Init iMediumBinary type."""
self.__class__.impl = self.impl
TypeDecorator.__init__(self, *arg, **kw)
def process_bind_param(self, value, dialect):
"""Decode string before saving to database."""
return (value is not None) and base64.decodestring(value) or None
def process_result_value(self, value, dialect):
"""Encode binary data to string."""
return (value is not None) and base64.encodestring(value) or None
| gpl-2.0 |
MariusCC/packstack | packstack/modules/ospluginutils.py | 2 | 4613 |
import logging
import os
import re
from packstack.installer import basedefs
from packstack.installer.setup_controller import Controller
from packstack.installer.exceptions import PackStackError
controller = Controller()
PUPPET_DIR = os.path.join(basedefs.DIR_PROJECT_DIR, "puppet")
PUPPET_TEMPLATE_DIR = os.path.join(PUPPET_DIR, "templates")
class NovaConfig(object):
"""
Helper class to create puppet manifest entries for nova_config
"""
def __init__(self):
self.options = {}
def addOption(self, n, v):
self.options[n] = v
def getManifestEntry(self):
entry = ""
if not self.options:
return entry
entry += "nova_config{\n"
for k, v in self.options.items():
entry += ' "%s": value => "%s";\n' % (k, v)
entry += "}"
return entry
class ManifestFiles(object):
def __init__(self):
self.filelist = []
self.data = {}
# continuous manifest file that have the same marker can be
# installed in parallel, if on different servers
def addFile(self, filename, marker, data=''):
self.data[filename] = self.data.get(filename, '') + '\n' + data
for f, p in self.filelist:
if f == filename:
return
self.filelist.append((filename, marker))
def getFiles(self):
return [f for f in self.filelist]
def writeManifests(self):
"""
Write out the manifest data to disk, this should only be called once
write before the puppet manifests are copied to the various servers
"""
os.mkdir(basedefs.PUPPET_MANIFEST_DIR, 0700)
for fname, data in self.data.items():
path = os.path.join(basedefs.PUPPET_MANIFEST_DIR, fname)
fd = os.open(path, os.O_WRONLY | os.O_CREAT | os.O_EXCL, 0600)
with os.fdopen(fd, 'w') as fp:
fp.write(data)
manifestfiles = ManifestFiles()
def getManifestTemplate(template_name):
with open(os.path.join(PUPPET_TEMPLATE_DIR, template_name)) as fp:
return fp.read() % controller.CONF
def appendManifestFile(manifest_name, data, marker=''):
manifestfiles.addFile(manifest_name, marker, data)
def gethostlist(CONF):
hosts = []
for key, value in CONF.items():
if key.endswith("_HOST"):
value = value.split('/')[0]
if value and value not in hosts:
hosts.append(value)
if key.endswith("_HOSTS"):
for host in value.split(","):
host = host.strip()
host = host.split('/')[0]
if host and host not in hosts:
hosts.append(host)
return hosts
_error_exceptions = [
# puppet preloads a provider using the mysql command before it is installed
re.compile('Command mysql is missing'),
# puppet preloads a database_grant provider which fails if /root/.my.cnf
# this is ok because it will be retried later if needed
re.compile('Could not prefetch database_grant provider.*?\\.my\\.cnf'),
# swift puppet module tries to install swift-plugin-s3, there is no such
# pakage on RHEL, fixed in the upstream puppet module
re.compile('yum.*?install swift-plugin-s3'),
]
def isErrorException(line):
for ee in _error_exceptions:
if ee.search(line):
return True
return False
_re_color = re.compile('\x1b.*?\d\dm')
_re_errorline = re.compile('err: | Syntax error at|^Duplicate definition:|'
'^No matching value for selector param|'
'^Parameter name failed:|Error: |^Invalid tag |'
'^Invalid parameter |^Duplicate declaration: '
'^Could not find resource |^Could not parse for ')
def validate_puppet_logfile(logfile):
"""
Check a puppet log file for errors and raise an error if we find any
"""
fp = open(logfile)
data = fp.read()
fp.close()
manifestfile = os.path.splitext(logfile)[0]
for line in data.split('\n'):
line = line.strip()
if _re_errorline.search(line) is None:
continue
message = _re_color.sub('', line) # remove colors
if isErrorException(line):
logging.info("Ignoring expected error during puppet run %s : %s" %
(manifestfile, message))
continue
message = "Error during puppet run : " + message
logging.error("Error during remote puppet apply of " + manifestfile)
logging.error(data)
raise PackStackError(message)
| apache-2.0 |
bak1an/django | tests/expressions/test_queryset_values.py | 103 | 2622 | from django.db.models.aggregates import Sum
from django.db.models.expressions import F
from django.test import TestCase
from .models import Company, Employee
class ValuesExpressionsTests(TestCase):
@classmethod
def setUpTestData(cls):
Company.objects.create(
name='Example Inc.', num_employees=2300, num_chairs=5,
ceo=Employee.objects.create(firstname='Joe', lastname='Smith', salary=10)
)
Company.objects.create(
name='Foobar Ltd.', num_employees=3, num_chairs=4,
ceo=Employee.objects.create(firstname='Frank', lastname='Meyer', salary=20)
)
Company.objects.create(
name='Test GmbH', num_employees=32, num_chairs=1,
ceo=Employee.objects.create(firstname='Max', lastname='Mustermann', salary=30)
)
def test_values_expression(self):
self.assertSequenceEqual(
Company.objects.values(salary=F('ceo__salary')),
[{'salary': 10}, {'salary': 20}, {'salary': 30}],
)
def test_values_expression_group_by(self):
# values() applies annotate() first, so values selected are grouped by
# id, not firstname.
Employee.objects.create(firstname='Joe', lastname='Jones', salary=2)
joes = Employee.objects.filter(firstname='Joe')
self.assertSequenceEqual(
joes.values('firstname', sum_salary=Sum('salary')).order_by('sum_salary'),
[{'firstname': 'Joe', 'sum_salary': 2}, {'firstname': 'Joe', 'sum_salary': 10}],
)
self.assertSequenceEqual(
joes.values('firstname').annotate(sum_salary=Sum('salary')),
[{'firstname': 'Joe', 'sum_salary': 12}]
)
def test_chained_values_with_expression(self):
Employee.objects.create(firstname='Joe', lastname='Jones', salary=2)
joes = Employee.objects.filter(firstname='Joe').values('firstname')
self.assertSequenceEqual(
joes.values('firstname', sum_salary=Sum('salary')),
[{'firstname': 'Joe', 'sum_salary': 12}]
)
self.assertSequenceEqual(
joes.values(sum_salary=Sum('salary')),
[{'sum_salary': 12}]
)
def test_values_list_expression(self):
companies = Company.objects.values_list('name', F('ceo__salary'))
self.assertSequenceEqual(companies, [('Example Inc.', 10), ('Foobar Ltd.', 20), ('Test GmbH', 30)])
def test_values_list_expression_flat(self):
companies = Company.objects.values_list(F('ceo__salary'), flat=True)
self.assertSequenceEqual(companies, (10, 20, 30))
| bsd-3-clause |
daira/tahoe-lafs-debian | src/allmydata/unknown.py | 9 | 8348 |
from zope.interface import implements
from twisted.internet import defer
from allmydata.interfaces import IFilesystemNode, MustNotBeUnknownRWError, \
MustBeDeepImmutableError
from allmydata import uri
from allmydata.uri import ALLEGED_READONLY_PREFIX, ALLEGED_IMMUTABLE_PREFIX
# See ticket #833 for design rationale of UnknownNodes.
def strip_prefix_for_ro(ro_uri, deep_immutable):
"""Strip prefixes when storing an URI in a ro_uri slot."""
# It is possible for an alleged-immutable URI to be put into a
# mutable directory. In that case the ALLEGED_IMMUTABLE_PREFIX
# should not be stripped. In other cases, the prefix can safely
# be stripped because it is implied by the context.
if ro_uri.startswith(ALLEGED_IMMUTABLE_PREFIX):
if not deep_immutable:
return ro_uri
return ro_uri[len(ALLEGED_IMMUTABLE_PREFIX):]
elif ro_uri.startswith(ALLEGED_READONLY_PREFIX):
return ro_uri[len(ALLEGED_READONLY_PREFIX):]
else:
return ro_uri
class UnknownNode:
implements(IFilesystemNode)
def __init__(self, given_rw_uri, given_ro_uri, deep_immutable=False,
name=u"<unknown name>"):
assert given_rw_uri is None or isinstance(given_rw_uri, str)
assert given_ro_uri is None or isinstance(given_ro_uri, str)
given_rw_uri = given_rw_uri or None
given_ro_uri = given_ro_uri or None
# We don't raise errors when creating an UnknownNode; we instead create an
# opaque node (with rw_uri and ro_uri both None) that records the error.
# This avoids breaking operations that never store the opaque node.
# Note that this means that if a stored dirnode has only a rw_uri, it
# might be dropped. Any future "write-only" cap formats should have a dummy
# unusable readcap to stop that from happening.
self.error = None
self.rw_uri = self.ro_uri = None
if given_rw_uri:
if deep_immutable:
if given_rw_uri.startswith(ALLEGED_IMMUTABLE_PREFIX) and not given_ro_uri:
# We needed an immutable cap, and were given one. It was given in the
# rw_uri slot, but that's fine; we'll move it to ro_uri below.
pass
elif not given_ro_uri:
self.error = MustNotBeUnknownRWError("cannot attach unknown rw cap as immutable child",
name, True)
return # node will be opaque
else:
# We could report either error, but this probably makes more sense.
self.error = MustBeDeepImmutableError("cannot attach unknown rw cap as immutable child",
name)
return # node will be opaque
if not given_ro_uri:
# We were given a single cap argument, or a rw_uri with no ro_uri.
if not (given_rw_uri.startswith(ALLEGED_READONLY_PREFIX)
or given_rw_uri.startswith(ALLEGED_IMMUTABLE_PREFIX)):
# If the single cap is unprefixed, then we cannot tell whether it is a
# writecap, and we don't know how to diminish it to a readcap if it is one.
# If it didn't *already* have at least an ALLEGED_READONLY_PREFIX, then
# prefixing it would be a bad idea because we have been given no reason
# to believe that it is a readcap, so we might be letting a client
# inadvertently grant excess write authority.
self.error = MustNotBeUnknownRWError("cannot attach unknown rw cap as child",
name, False)
return # node will be opaque
# OTOH, if the single cap already had a prefix (which is of the required
# strength otherwise an error would have been thrown above), then treat it
# as though it had been given in the ro_uri slot. This has a similar effect
# to the use for known caps of 'bigcap = writecap or readcap' in
# nodemaker.py: create_from_cap. It enables copying of unknown readcaps to
# work in as many cases as we can securely allow.
given_ro_uri = given_rw_uri
given_rw_uri = None
elif given_ro_uri.startswith(ALLEGED_IMMUTABLE_PREFIX):
# Strange corner case: we were given a cap in both slots, with the ro_uri
# alleged to be immutable. A real immutable object wouldn't have a writecap.
self.error = MustBeDeepImmutableError("cannot accept a child entry that specifies "
"both rw_uri, and ro_uri with an imm. prefix",
name)
return # node will be opaque
# If the ro_uri definitely fails the constraint, it should be treated as opaque and
# the error recorded.
if given_ro_uri:
read_cap = uri.from_string(given_ro_uri, deep_immutable=deep_immutable, name=name)
if isinstance(read_cap, uri.UnknownURI):
self.error = read_cap.get_error()
if self.error:
assert self.rw_uri is None and self.ro_uri is None
return
if deep_immutable:
assert self.rw_uri is None
# strengthen the constraint on ro_uri to ALLEGED_IMMUTABLE_PREFIX
if given_ro_uri:
if given_ro_uri.startswith(ALLEGED_IMMUTABLE_PREFIX):
self.ro_uri = given_ro_uri
elif given_ro_uri.startswith(ALLEGED_READONLY_PREFIX):
self.ro_uri = ALLEGED_IMMUTABLE_PREFIX + given_ro_uri[len(ALLEGED_READONLY_PREFIX):]
else:
self.ro_uri = ALLEGED_IMMUTABLE_PREFIX + given_ro_uri
else:
# not immutable, so a writecap is allowed
self.rw_uri = given_rw_uri
# strengthen the constraint on ro_uri to ALLEGED_READONLY_PREFIX
if given_ro_uri:
if (given_ro_uri.startswith(ALLEGED_READONLY_PREFIX) or
given_ro_uri.startswith(ALLEGED_IMMUTABLE_PREFIX)):
self.ro_uri = given_ro_uri
else:
self.ro_uri = ALLEGED_READONLY_PREFIX + given_ro_uri
def get_cap(self):
return uri.UnknownURI(self.rw_uri or self.ro_uri)
def get_readcap(self):
return uri.UnknownURI(self.ro_uri)
def is_readonly(self):
raise AssertionError("an UnknownNode might be either read-only or "
"read/write, so we shouldn't be calling is_readonly")
def is_mutable(self):
raise AssertionError("an UnknownNode might be either mutable or immutable, "
"so we shouldn't be calling is_mutable")
def is_unknown(self):
return True
def is_allowed_in_immutable_directory(self):
# An UnknownNode consisting only of a ro_uri is allowed in an
# immutable directory, even though we do not know that it is
# immutable (or even read-only), provided that no error was detected.
return not self.error and not self.rw_uri
def is_alleged_immutable(self):
return not self.error and not self.rw_uri and (not self.ro_uri or self.ro_uri.startswith(ALLEGED_IMMUTABLE_PREFIX))
def raise_error(self):
if self.error is not None:
raise self.error
def get_uri(self):
return self.rw_uri or self.ro_uri
def get_write_uri(self):
return self.rw_uri
def get_readonly_uri(self):
return self.ro_uri
def get_storage_index(self):
return None
def get_verify_cap(self):
return None
def get_repair_cap(self):
return None
def get_size(self):
return None
def get_current_size(self):
return defer.succeed(None)
def check(self, monitor, verify, add_lease):
return defer.succeed(None)
def check_and_repair(self, monitor, verify, add_lease):
return defer.succeed(None)
| gpl-2.0 |
freedomhui/cinder | cinder/tests/test_netapp_nfs.py | 4 | 7564 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 NetApp, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Unit tests for the NetApp-specific NFS driver module (netapp_nfs)"""
from cinder import context
from cinder import test
from cinder import exception
from cinder.volume import netapp_nfs
from cinder.volume import netapp
from cinder.volume import nfs
from mox import IsA
from mox import IgnoreArg
from mox import MockObject
import mox
import suds
import types
class FakeVolume(object):
def __init__(self, size=0):
self.size = size
self.id = hash(self)
self.name = None
def __getitem__(self, key):
return self.__dict__[key]
class FakeSnapshot(object):
def __init__(self, volume_size=0):
self.volume_name = None
self.name = None
self.volume_id = None
self.volume_size = volume_size
self.user_id = None
self.status = None
def __getitem__(self, key):
return self.__dict__[key]
class FakeResponce(object):
def __init__(self, status):
"""
:param status: Either 'failed' or 'passed'
"""
self.Status = status
if status == 'failed':
self.Reason = 'Sample error'
class NetappNfsDriverTestCase(test.TestCase):
"""Test case for NetApp specific NFS clone driver"""
def setUp(self):
self._driver = netapp_nfs.NetAppNFSDriver()
self._mox = mox.Mox()
def tearDown(self):
self._mox.UnsetStubs()
def test_check_for_setup_error(self):
mox = self._mox
drv = self._driver
required_flags = [
'netapp_wsdl_url',
'netapp_login',
'netapp_password',
'netapp_server_hostname',
'netapp_server_port'
]
# check exception raises when flags are not set
self.assertRaises(exception.CinderException,
drv.check_for_setup_error)
# set required flags
for flag in required_flags:
setattr(netapp.FLAGS, flag, 'val')
mox.StubOutWithMock(nfs.NfsDriver, 'check_for_setup_error')
nfs.NfsDriver.check_for_setup_error()
mox.ReplayAll()
drv.check_for_setup_error()
mox.VerifyAll()
# restore initial FLAGS
for flag in required_flags:
delattr(netapp.FLAGS, flag)
def test_do_setup(self):
mox = self._mox
drv = self._driver
mox.StubOutWithMock(drv, 'check_for_setup_error')
mox.StubOutWithMock(netapp_nfs.NetAppNFSDriver, '_get_client')
drv.check_for_setup_error()
netapp_nfs.NetAppNFSDriver._get_client()
mox.ReplayAll()
drv.do_setup(IsA(context.RequestContext))
mox.VerifyAll()
def test_create_snapshot(self):
"""Test snapshot can be created and deleted"""
mox = self._mox
drv = self._driver
mox.StubOutWithMock(drv, '_clone_volume')
drv._clone_volume(IgnoreArg(), IgnoreArg(), IgnoreArg())
mox.ReplayAll()
drv.create_snapshot(FakeSnapshot())
mox.VerifyAll()
def test_create_volume_from_snapshot(self):
"""Tests volume creation from snapshot"""
drv = self._driver
mox = self._mox
volume = FakeVolume(1)
snapshot = FakeSnapshot(2)
self.assertRaises(exception.CinderException,
drv.create_volume_from_snapshot,
volume,
snapshot)
snapshot = FakeSnapshot(1)
location = '127.0.0.1:/nfs'
expected_result = {'provider_location': location}
mox.StubOutWithMock(drv, '_clone_volume')
mox.StubOutWithMock(drv, '_get_volume_location')
drv._clone_volume(IgnoreArg(), IgnoreArg(), IgnoreArg())
drv._get_volume_location(IgnoreArg()).AndReturn(location)
mox.ReplayAll()
loc = drv.create_volume_from_snapshot(volume, snapshot)
self.assertEquals(loc, expected_result)
mox.VerifyAll()
def _prepare_delete_snapshot_mock(self, snapshot_exists):
drv = self._driver
mox = self._mox
mox.StubOutWithMock(drv, '_get_provider_location')
mox.StubOutWithMock(drv, '_volume_not_present')
if snapshot_exists:
mox.StubOutWithMock(drv, '_execute')
mox.StubOutWithMock(drv, '_get_volume_path')
drv._get_provider_location(IgnoreArg())
drv._volume_not_present(IgnoreArg(), IgnoreArg())\
.AndReturn(not snapshot_exists)
if snapshot_exists:
drv._get_volume_path(IgnoreArg(), IgnoreArg())
drv._execute('rm', None, run_as_root=True)
mox.ReplayAll()
return mox
def test_delete_existing_snapshot(self):
drv = self._driver
mox = self._prepare_delete_snapshot_mock(True)
drv.delete_snapshot(FakeSnapshot())
mox.VerifyAll()
def test_delete_missing_snapshot(self):
drv = self._driver
mox = self._prepare_delete_snapshot_mock(False)
drv.delete_snapshot(FakeSnapshot())
mox.VerifyAll()
def _prepare_clone_mock(self, status):
drv = self._driver
mox = self._mox
volume = FakeVolume()
setattr(volume, 'provider_location', '127.0.0.1:/nfs')
drv._client = MockObject(suds.client.Client)
drv._client.factory = MockObject(suds.client.Factory)
drv._client.service = MockObject(suds.client.ServiceSelector)
# ApiProxy() method is generated by ServiceSelector at runtime from the
# XML, so mocking is impossible.
setattr(drv._client.service,
'ApiProxy',
types.MethodType(lambda *args, **kwargs: FakeResponce(status),
suds.client.ServiceSelector))
mox.StubOutWithMock(drv, '_get_host_id')
mox.StubOutWithMock(drv, '_get_full_export_path')
drv._get_host_id(IgnoreArg()).AndReturn('10')
drv._get_full_export_path(IgnoreArg(), IgnoreArg()).AndReturn('/nfs')
return mox
def test_successfull_clone_volume(self):
drv = self._driver
mox = self._prepare_clone_mock('passed')
mox.ReplayAll()
volume_name = 'volume_name'
clone_name = 'clone_name'
volume_id = volume_name + str(hash(volume_name))
drv._clone_volume(volume_name, clone_name, volume_id)
mox.VerifyAll()
def test_failed_clone_volume(self):
drv = self._driver
mox = self._prepare_clone_mock('failed')
mox.ReplayAll()
volume_name = 'volume_name'
clone_name = 'clone_name'
volume_id = volume_name + str(hash(volume_name))
self.assertRaises(exception.CinderException,
drv._clone_volume,
volume_name, clone_name, volume_id)
mox.VerifyAll()
| apache-2.0 |
bryceguo/robotframework-selenium2library | src/Selenium2Library/locators/tableelementfinder.py | 33 | 3235 | from selenium.common.exceptions import NoSuchElementException
from Selenium2Library import utils
from elementfinder import ElementFinder
class TableElementFinder(object):
def __init__(self, element_finder=None):
if not element_finder:
element_finder = ElementFinder()
self._element_finder = element_finder
self._locator_suffixes = {
('css', 'default'): [''],
('css', 'content'): [''],
('css', 'header'): [' th'],
('css', 'footer'): [' tfoot td'],
('css', 'row'): [' tr:nth-child(%s)'],
('css', 'col'): [' tr td:nth-child(%s)', ' tr th:nth-child(%s)'],
('xpath', 'default'): [''],
('xpath', 'content'): ['//*'],
('xpath', 'header'): ['//th'],
('xpath', 'footer'): ['//tfoot//td'],
('xpath', 'row'): ['//tr[%s]//*'],
('xpath', 'col'): ['//tr//*[self::td or self::th][%s]']
};
def find(self, browser, table_locator):
locators = self._parse_table_locator(table_locator, 'default')
return self._search_in_locators(browser, locators, None)
def find_by_content(self, browser, table_locator, content):
locators = self._parse_table_locator(table_locator, 'content')
return self._search_in_locators(browser, locators, content)
def find_by_header(self, browser, table_locator, content):
locators = self._parse_table_locator(table_locator, 'header')
return self._search_in_locators(browser, locators, content)
def find_by_footer(self, browser, table_locator, content):
locators = self._parse_table_locator(table_locator, 'footer')
return self._search_in_locators(browser, locators, content)
def find_by_row(self, browser, table_locator, col, content):
locators = self._parse_table_locator(table_locator, 'row')
locators = [locator % str(col) for locator in locators]
return self._search_in_locators(browser, locators, content)
def find_by_col(self, browser, table_locator, col, content):
locators = self._parse_table_locator(table_locator, 'col')
locators = [locator % str(col) for locator in locators]
return self._search_in_locators(browser, locators, content)
def _parse_table_locator(self, table_locator, location_method):
if table_locator.startswith('xpath='):
table_locator_type = 'xpath'
else:
if not table_locator.startswith('css='):
table_locator = "css=table#%s" % table_locator
table_locator_type = 'css'
locator_suffixes = self._locator_suffixes[(table_locator_type, location_method)]
return map(
lambda locator_suffix: table_locator + locator_suffix,
locator_suffixes)
def _search_in_locators(self, browser, locators, content):
for locator in locators:
elements = self._element_finder.find(browser, locator)
for element in elements:
if content is None: return element
element_text = element.text
if element_text and content in element_text:
return element
return None
| apache-2.0 |
ujenmr/ansible | lib/ansible/modules/system/alternatives.py | 50 | 5280 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2014, Gabe Mulley <gabe.mulley@gmail.com>
# Copyright: (c) 2015, David Wittman <dwittman@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: alternatives
short_description: Manages alternative programs for common commands
description:
- Manages symbolic links using the 'update-alternatives' tool.
- Useful when multiple programs are installed but provide similar functionality (e.g. different editors).
version_added: "1.6"
author:
- David Wittman (@DavidWittman)
- Gabe Mulley (@mulby)
options:
name:
description:
- The generic name of the link.
type: str
required: true
path:
description:
- The path to the real executable that the link should point to.
type: path
required: true
link:
description:
- The path to the symbolic link that should point to the real executable.
- This option is always required on RHEL-based distributions. On Debian-based distributions this option is
required when the alternative I(name) is unknown to the system.
type: path
priority:
description:
- The priority of the alternative.
type: int
default: 50
version_added: "2.2"
requirements: [ update-alternatives ]
'''
EXAMPLES = r'''
- name: Correct java version selected
alternatives:
name: java
path: /usr/lib/jvm/java-7-openjdk-amd64/jre/bin/java
- name: Alternatives link created
alternatives:
name: hadoop-conf
link: /etc/hadoop/conf
path: /etc/hadoop/conf.ansible
- name: Make java 32 bit an alternative with low priority
alternatives:
name: java
path: /usr/lib/jvm/java-7-openjdk-i386/jre/bin/java
priority: -10
'''
import os
import re
import subprocess
from ansible.module_utils.basic import AnsibleModule
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(type='str', required=True),
path=dict(type='path', required=True),
link=dict(type='path'),
priority=dict(type='int', default=50),
),
supports_check_mode=True,
)
params = module.params
name = params['name']
path = params['path']
link = params['link']
priority = params['priority']
UPDATE_ALTERNATIVES = module.get_bin_path('update-alternatives', True)
current_path = None
all_alternatives = []
# Run `update-alternatives --display <name>` to find existing alternatives
(rc, display_output, _) = module.run_command(
['env', 'LC_ALL=C', UPDATE_ALTERNATIVES, '--display', name]
)
if rc == 0:
# Alternatives already exist for this link group
# Parse the output to determine the current path of the symlink and
# available alternatives
current_path_regex = re.compile(r'^\s*link currently points to (.*)$',
re.MULTILINE)
alternative_regex = re.compile(r'^(\/.*)\s-\spriority', re.MULTILINE)
match = current_path_regex.search(display_output)
if match:
current_path = match.group(1)
all_alternatives = alternative_regex.findall(display_output)
if not link:
# Read the current symlink target from `update-alternatives --query`
# in case we need to install the new alternative before setting it.
#
# This is only compatible on Debian-based systems, as the other
# alternatives don't have --query available
rc, query_output, _ = module.run_command(
['env', 'LC_ALL=C', UPDATE_ALTERNATIVES, '--query', name]
)
if rc == 0:
for line in query_output.splitlines():
if line.startswith('Link:'):
link = line.split()[1]
break
if current_path != path:
if module.check_mode:
module.exit_json(changed=True, current_path=current_path)
try:
# install the requested path if necessary
if path not in all_alternatives:
if not os.path.exists(path):
module.fail_json(msg="Specified path %s does not exist" % path)
if not link:
module.fail_json(msg="Needed to install the alternative, but unable to do so as we are missing the link")
module.run_command(
[UPDATE_ALTERNATIVES, '--install', link, name, path, str(priority)],
check_rc=True
)
# select the requested path
module.run_command(
[UPDATE_ALTERNATIVES, '--set', name, path],
check_rc=True
)
module.exit_json(changed=True)
except subprocess.CalledProcessError as cpe:
module.fail_json(msg=str(dir(cpe)))
else:
module.exit_json(changed=False)
if __name__ == '__main__':
main()
| gpl-3.0 |
BayesianLogic/blog | tools/blog_py_lexer/blog/lexer.py | 1 | 3373 | from pygments.lexer import RegexLexer, bygroups, include
from pygments.token import *
class BlogLexer(RegexLexer):
name = 'BLOG'
aliases = ['blog']
filenames = ['*.blog', '*.dblog']
operators = ['\\-\\>', ':', '\\+', '\\-', '\\*', '/', '\\[', ']',
'\\{', '}', '!', '\\<', '\\>', '\\<=', '\\>=', '==', '!=',
'&', '\\|', '=\\>', '#', '\\^', '%', '@']
wordops = ['isEmptyString', 'succ', 'pred',
'prev', 'inv', 'det', 'min', 'max',
'round', 'transpose', 'sin', 'cos', 'tan',
'atan2', 'sum', 'vstack', 'eye', 'zeros',
'ones', 'toInt', 'toReal', 'diag', 'repmat',
'hstack', 'vstack', 'pi', 'trace']
deliminators = [',', ';', '\\(', '\\)', '=', '~']
keywords = ['extern','import','fixed','distinct','random','origin',
'param','type', 'forall', 'exists', 'obs', 'query',
'if', 'then', 'else', 'for', 'case', 'in']
types = ['Integer','Real','Boolean','NaturalNum','List','Map',
'Timestep','RealMatrix','IntegerMatrix']
distribs = ['TabularCPD', 'Distribution','Gaussian',
'UniformChoice', 'MultivarGaussian', 'Poisson',
'Bernoulli', 'BooleanDistrib', 'Binomial', 'Beta', 'BoundedGenometric',
'Categorical', 'Dirichlet', 'EqualsCPD', 'Gamma', 'Geometric', 'Iota',
'LinearGaussian', 'MixtureDistrib', 'Multinomial',
'NegativeBinamial', 'RoundedLogNormal', 'TabularInterp',
'UniformVector', 'UnivarGaussian',
'Exponential', 'UniformInt', 'UniformReal']
idname_reg = '[a-zA-Z_]\\w*'
def gen_regex(ops):
return "|".join(ops)
tokens = {
'root' : [
(r'//.*?\n', Comment.Single),
(r'(?s)/\*.*?\*/', Comment.Multiline),
('('+idname_reg+')(\\()', bygroups(Name.Function, Punctuation)),
('('+gen_regex(types)+')\\b', Keyword.Type),
('('+gen_regex(distribs)+')\\b', Name.Class),
('('+gen_regex(keywords)+')\\b', Keyword),
(gen_regex(operators), Operator),
('(' + gen_regex(wordops) +')\\b', Operator.Word),
('(true|false|null)\\b', Keyword.Constant),
('('+idname_reg+')\\b', Name),
(r'"(\\\\|\\"|[^"])*"', String),
(gen_regex(deliminators), Punctuation),
(r'\d*\.\d+', Number.Float),
(r'\d+', Number.Integer),
(r'\s+', Text),
]
}
def run_tests():
tests = [
"type Person;",
"distinct Person Alice, Bob, P[100];",
"random Real x1_x2x3 ~ Gaussian(0, 1);\nrandom Real y ~ Gaussian(x, 1);",
"random type0 funcname(type1 x) =expression;\nrandom type0 funcname(type1 x) dependency-expression;",
"random NaturalNum x ~ Poisson(a);",
"param Real a: 0 < a & a < 10 ;"
"random Real funcname(type1 x);",
"1.0 + 2.0 * 3.0 - 4.0",
"Twice( 10.0 ) * 5.5",
"fixed NaturalNum[] c = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10];",
"fixed NaturalNum[][] table = [1, 2, 3; 4, 5, 6];",
"fixed List<NaturalNum> a = List(1, 2, 3, 4, 5, 6);",
"fixed Map<Boolean, Real> map1 = {true -> 0.3, false -> 0.7};",
"Categorical<Boolean> cpd1 =Categorical({true -> 0.3, false -> 0.7});",
"List",
"/*abc */",
"""
/* Evidence for the Hidden Markov Model.
*/
"""
]
lexer = BlogLexer()
for test in tests:
print(test)
for token in (lexer.get_tokens(test)):
print(token)
if __name__ == '__main__':
run_tests()
| bsd-3-clause |
ananthonline/grpc | src/python/grpcio/tests/unit/framework/face/testing/coverage.py | 2 | 4002 | # Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Governs coverage for the tests of the Face layer of RPC Framework."""
import abc
import six
# These classes are only valid when inherited by unittest.TestCases.
# pylint: disable=invalid-name
class BlockingCoverage(six.with_metaclass(abc.ABCMeta)):
"""Specification of test coverage for blocking behaviors."""
@abc.abstractmethod
def testSuccessfulUnaryRequestUnaryResponse(self):
raise NotImplementedError()
@abc.abstractmethod
def testSuccessfulUnaryRequestStreamResponse(self):
raise NotImplementedError()
@abc.abstractmethod
def testSuccessfulStreamRequestUnaryResponse(self):
raise NotImplementedError()
@abc.abstractmethod
def testSuccessfulStreamRequestStreamResponse(self):
raise NotImplementedError()
@abc.abstractmethod
def testSequentialInvocations(self):
raise NotImplementedError()
@abc.abstractmethod
def testExpiredUnaryRequestUnaryResponse(self):
raise NotImplementedError()
@abc.abstractmethod
def testExpiredUnaryRequestStreamResponse(self):
raise NotImplementedError()
@abc.abstractmethod
def testExpiredStreamRequestUnaryResponse(self):
raise NotImplementedError()
@abc.abstractmethod
def testExpiredStreamRequestStreamResponse(self):
raise NotImplementedError()
@abc.abstractmethod
def testFailedUnaryRequestUnaryResponse(self):
raise NotImplementedError()
@abc.abstractmethod
def testFailedUnaryRequestStreamResponse(self):
raise NotImplementedError()
@abc.abstractmethod
def testFailedStreamRequestUnaryResponse(self):
raise NotImplementedError()
@abc.abstractmethod
def testFailedStreamRequestStreamResponse(self):
raise NotImplementedError()
class FullCoverage(six.with_metaclass(abc.ABCMeta, BlockingCoverage)):
"""Specification of test coverage for non-blocking behaviors."""
@abc.abstractmethod
def testParallelInvocations(self):
raise NotImplementedError()
@abc.abstractmethod
def testWaitingForSomeButNotAllParallelInvocations(self):
raise NotImplementedError()
@abc.abstractmethod
def testCancelledUnaryRequestUnaryResponse(self):
raise NotImplementedError()
@abc.abstractmethod
def testCancelledUnaryRequestStreamResponse(self):
raise NotImplementedError()
@abc.abstractmethod
def testCancelledStreamRequestUnaryResponse(self):
raise NotImplementedError()
@abc.abstractmethod
def testCancelledStreamRequestStreamResponse(self):
raise NotImplementedError()
| bsd-3-clause |
pombredanne/MOG | nova/tests/api/openstack/compute/plugins/v3/test_hypervisors.py | 8 | 22884 | # Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from lxml import etree
from webob import exc
from nova.api.openstack.compute.plugins.v3 import hypervisors
from nova import db
from nova.db.sqlalchemy import api as db_api
from nova import exception
from nova import test
from nova.tests.api.openstack import fakes
TEST_HYPERS = [
dict(id=1,
service_id=1,
service=dict(id=1,
host="compute1",
binary="nova-compute",
topic="compute_topic",
report_count=5,
disabled=False,
availability_zone="nova"),
vcpus=4,
memory_mb=10 * 1024,
local_gb=250,
vcpus_used=2,
memory_mb_used=5 * 1024,
local_gb_used=125,
hypervisor_type="xen",
hypervisor_version=3,
hypervisor_hostname="hyper1",
free_ram_mb=5 * 1024,
free_disk_gb=125,
current_workload=2,
running_vms=2,
cpu_info='cpu_info',
disk_available_least=100),
dict(id=2,
service_id=2,
service=dict(id=2,
host="compute2",
binary="nova-compute",
topic="compute_topic",
report_count=5,
disabled=False,
availability_zone="nova"),
vcpus=4,
memory_mb=10 * 1024,
local_gb=250,
vcpus_used=2,
memory_mb_used=5 * 1024,
local_gb_used=125,
hypervisor_type="xen",
hypervisor_version=3,
hypervisor_hostname="hyper2",
free_ram_mb=5 * 1024,
free_disk_gb=125,
current_workload=2,
running_vms=2,
cpu_info='cpu_info',
disk_available_least=100)]
TEST_SERVERS = [dict(name="inst1", uuid="uuid1", host="compute1"),
dict(name="inst2", uuid="uuid2", host="compute2"),
dict(name="inst3", uuid="uuid3", host="compute1"),
dict(name="inst4", uuid="uuid4", host="compute2")]
@db_api.require_admin_context
def fake_compute_node_get_all(context):
return TEST_HYPERS
def fake_compute_node_search_by_hypervisor(context, hypervisor_re):
return TEST_HYPERS
def fake_compute_node_get(context, compute_id):
for hyper in TEST_HYPERS:
if hyper['id'] == compute_id:
return hyper
raise exception.ComputeHostNotFound(host=compute_id)
def fake_compute_node_statistics(context):
result = dict(
count=0,
vcpus=0,
memory_mb=0,
local_gb=0,
vcpus_used=0,
memory_mb_used=0,
local_gb_used=0,
free_ram_mb=0,
free_disk_gb=0,
current_workload=0,
running_vms=0,
disk_available_least=0,
)
for hyper in TEST_HYPERS:
for key in result:
if key == 'count':
result[key] += 1
else:
result[key] += hyper[key]
return result
def fake_instance_get_all_by_host(context, host):
results = []
for inst in TEST_SERVERS:
if inst['host'] == host:
results.append(inst)
return results
class HypervisorsTest(test.NoDBTestCase):
def setUp(self):
super(HypervisorsTest, self).setUp()
self.controller = hypervisors.HypervisorsController()
self.stubs.Set(db, 'compute_node_get_all', fake_compute_node_get_all)
self.stubs.Set(db, 'compute_node_search_by_hypervisor',
fake_compute_node_search_by_hypervisor)
self.stubs.Set(db, 'compute_node_get',
fake_compute_node_get)
self.stubs.Set(db, 'compute_node_statistics',
fake_compute_node_statistics)
self.stubs.Set(db, 'instance_get_all_by_host',
fake_instance_get_all_by_host)
def test_view_hypervisor_nodetail_noservers(self):
result = self.controller._view_hypervisor(TEST_HYPERS[0], False)
self.assertEqual(result, dict(id=1, hypervisor_hostname="hyper1"))
def test_view_hypervisor_detail_noservers(self):
result = self.controller._view_hypervisor(TEST_HYPERS[0], True)
self.assertEqual(result, dict(
id=1,
hypervisor_hostname="hyper1",
vcpus=4,
memory_mb=10 * 1024,
local_gb=250,
vcpus_used=2,
memory_mb_used=5 * 1024,
local_gb_used=125,
hypervisor_type="xen",
hypervisor_version=3,
free_ram_mb=5 * 1024,
free_disk_gb=125,
current_workload=2,
running_vms=2,
cpu_info='cpu_info',
disk_available_least=100,
service=dict(id=1, host='compute1')))
def test_view_hypervisor_servers(self):
result = self.controller._view_hypervisor(TEST_HYPERS[0], False,
TEST_SERVERS)
self.assertEqual(result, dict(
id=1,
hypervisor_hostname="hyper1",
servers=[
dict(name="inst1", id="uuid1"),
dict(name="inst2", id="uuid2"),
dict(name="inst3", id="uuid3"),
dict(name="inst4", id="uuid4")]))
def test_index(self):
req = fakes.HTTPRequestV3.blank('/os-hypervisors',
use_admin_context=True)
result = self.controller.index(req)
self.assertEqual(result, dict(hypervisors=[
dict(id=1, hypervisor_hostname="hyper1"),
dict(id=2, hypervisor_hostname="hyper2")]))
def test_index_non_admin(self):
req = fakes.HTTPRequestV3.blank('/os-hypervisors')
self.assertRaises(exception.PolicyNotAuthorized,
self.controller.index, req)
def test_detail(self):
req = fakes.HTTPRequestV3.blank('/os-hypervisors/detail',
use_admin_context=True)
result = self.controller.detail(req)
self.assertEqual(result, dict(hypervisors=[
dict(id=1,
service=dict(id=1, host="compute1"),
vcpus=4,
memory_mb=10 * 1024,
local_gb=250,
vcpus_used=2,
memory_mb_used=5 * 1024,
local_gb_used=125,
hypervisor_type="xen",
hypervisor_version=3,
hypervisor_hostname="hyper1",
free_ram_mb=5 * 1024,
free_disk_gb=125,
current_workload=2,
running_vms=2,
cpu_info='cpu_info',
disk_available_least=100),
dict(id=2,
service=dict(id=2, host="compute2"),
vcpus=4,
memory_mb=10 * 1024,
local_gb=250,
vcpus_used=2,
memory_mb_used=5 * 1024,
local_gb_used=125,
hypervisor_type="xen",
hypervisor_version=3,
hypervisor_hostname="hyper2",
free_ram_mb=5 * 1024,
free_disk_gb=125,
current_workload=2,
running_vms=2,
cpu_info='cpu_info',
disk_available_least=100)]))
def test_detail_non_admin(self):
req = fakes.HTTPRequestV3.blank('/os-hypervisors/detail')
self.assertRaises(exception.PolicyNotAuthorized,
self.controller.detail, req)
def test_show_noid(self):
req = fakes.HTTPRequestV3.blank('/os-hypervisors/3',
use_admin_context=True)
self.assertRaises(exc.HTTPNotFound, self.controller.show, req, '3')
def test_show_non_integer_id(self):
req = fakes.HTTPRequestV3.blank('/os-hypervisors/abc',
use_admin_context=True)
self.assertRaises(exc.HTTPNotFound, self.controller.show, req, 'abc')
def test_show_withid(self):
req = fakes.HTTPRequestV3.blank('/os-hypervisors/1',
use_admin_context=True)
result = self.controller.show(req, '1')
self.assertEqual(result, dict(hypervisor=dict(
id=1,
service=dict(id=1, host="compute1"),
vcpus=4,
memory_mb=10 * 1024,
local_gb=250,
vcpus_used=2,
memory_mb_used=5 * 1024,
local_gb_used=125,
hypervisor_type="xen",
hypervisor_version=3,
hypervisor_hostname="hyper1",
free_ram_mb=5 * 1024,
free_disk_gb=125,
current_workload=2,
running_vms=2,
cpu_info='cpu_info',
disk_available_least=100)))
def test_show_non_admin(self):
req = fakes.HTTPRequestV3.blank('/os-hypervisors/1')
self.assertRaises(exception.PolicyNotAuthorized,
self.controller.show, req, '1')
def test_uptime_noid(self):
req = fakes.HTTPRequestV3.blank('/os-hypervisors/3',
use_admin_context=True)
self.assertRaises(exc.HTTPNotFound, self.controller.show, req, '3')
def test_uptime_notimplemented(self):
def fake_get_host_uptime(context, hyp):
raise exc.HTTPNotImplemented()
self.stubs.Set(self.controller.host_api, 'get_host_uptime',
fake_get_host_uptime)
req = fakes.HTTPRequestV3.blank('/os-hypervisors/1',
use_admin_context=True)
self.assertRaises(exc.HTTPNotImplemented,
self.controller.uptime, req, '1')
def test_uptime_implemented(self):
def fake_get_host_uptime(context, hyp):
return "fake uptime"
self.stubs.Set(self.controller.host_api, 'get_host_uptime',
fake_get_host_uptime)
req = fakes.HTTPRequestV3.blank('/os-hypervisors/1',
use_admin_context=True)
result = self.controller.uptime(req, '1')
self.assertEqual(result, dict(hypervisor=dict(
id=1,
hypervisor_hostname="hyper1",
uptime="fake uptime")))
def test_uptime_non_integer_id(self):
req = fakes.HTTPRequestV3.blank('/os-hypervisors/abc/uptime',
use_admin_context=True)
self.assertRaises(exc.HTTPNotFound, self.controller.uptime, req, 'abc')
def test_uptime_non_admin(self):
req = fakes.HTTPRequestV3.blank('/os-hypervisors/1/uptime')
self.assertRaises(exception.PolicyNotAuthorized,
self.controller.uptime, req, '1')
def test_search(self):
req = fakes.HTTPRequestV3.blank('/os-hypervisors/search?query=hyper',
use_admin_context=True)
result = self.controller.search(req)
self.assertEqual(result, dict(hypervisors=[
dict(id=1, hypervisor_hostname="hyper1"),
dict(id=2, hypervisor_hostname="hyper2")]))
def test_search_non_exist(self):
def fake_compute_node_search_by_hypervisor_return_empty(context,
hypervisor_re):
return []
self.stubs.Set(db, 'compute_node_search_by_hypervisor',
fake_compute_node_search_by_hypervisor_return_empty)
req = fakes.HTTPRequestV3.blank('/os-hypervisors/search?query=a',
use_admin_context=True)
result = self.controller.search(req)
self.assertEqual(result, dict(hypervisors=[]))
def test_search_without_query(self):
req = fakes.HTTPRequestV3.blank('/os-hypervisors/search',
use_admin_context=True)
self.assertRaises(exc.HTTPBadRequest, self.controller.search, req)
def test_servers(self):
req = fakes.HTTPRequestV3.blank('/os-hypervisors/1/servers',
use_admin_context=True)
result = self.controller.servers(req, '1')
self.assertEqual(result, dict(hypervisor=
dict(id=1,
hypervisor_hostname="hyper1",
servers=[
dict(name="inst1", id="uuid1"),
dict(name="inst3", id="uuid3")])))
def test_servers_non_id(self):
req = fakes.HTTPRequestV3.blank('/os-hypervisors/3/servers',
use_admin_context=True)
self.assertRaises(exc.HTTPNotFound, self.controller.servers, req, '3')
def test_servers_non_admin(self):
req = fakes.HTTPRequestV3.blank('/os-hypervisors/1/servers')
self.assertRaises(exception.PolicyNotAuthorized,
self.controller.servers, req, '1')
def test_servers_return_empty(self):
def fake_instance_get_all_by_host_return_empty(context, hypervisor_re):
return []
self.stubs.Set(db, 'instance_get_all_by_host',
fake_instance_get_all_by_host_return_empty)
req = fakes.HTTPRequestV3.blank('/os-hypervisors/1/servers',
use_admin_context=True)
result = self.controller.servers(req, '1')
self.assertEqual(result, dict(hypervisor=
dict(id=1,
hypervisor_hostname="hyper1",
servers=[])))
def test_servers_with_non_integer_hypervisor_id(self):
req = fakes.HTTPRequestV3.blank('/os-hypervisors/abc/servers',
use_admin_context=True)
self.assertRaises(exc.HTTPNotFound,
self.controller.servers, req, 'abc')
def test_statistics(self):
req = fakes.HTTPRequestV3.blank('/os-hypervisors/statistics',
use_admin_context=True)
result = self.controller.statistics(req)
self.assertEqual(result, dict(hypervisor_statistics=dict(
count=2,
vcpus=8,
memory_mb=20 * 1024,
local_gb=500,
vcpus_used=4,
memory_mb_used=10 * 1024,
local_gb_used=250,
free_ram_mb=10 * 1024,
free_disk_gb=250,
current_workload=4,
running_vms=4,
disk_available_least=200)))
def test_statistics_non_admin(self):
req = fakes.HTTPRequestV3.blank('/os-hypervisors/statistics')
self.assertRaises(exception.PolicyNotAuthorized,
self.controller.statistics, req)
class HypervisorsSerializersTest(test.NoDBTestCase):
def compare_to_exemplar(self, exemplar, hyper):
# Check attributes
for key, value in exemplar.items():
if key in ('service', 'servers'):
# These turn into child elements and get tested
# separately below...
continue
self.assertEqual(str(value), hyper.get(key))
# Check child elements
required_children = set([child for child in ('service', 'servers')
if child in exemplar])
for child in hyper:
self.assertTrue(child.tag in required_children)
required_children.remove(child.tag)
# Check the node...
if child.tag == 'service':
for key, value in exemplar['service'].items():
self.assertEqual(str(value), child.get(key))
elif child.tag == 'servers':
self.assertEqual(len(child), len(exemplar['servers']))
for idx, grandchild in enumerate(child):
self.assertEqual('server', grandchild.tag)
for key, value in exemplar['servers'][idx].items():
self.assertEqual(str(value), grandchild.get(key))
# Are they all accounted for?
self.assertEqual(len(required_children), 0)
def test_index_serializer(self):
serializer = hypervisors.HypervisorIndexTemplate()
exemplar = dict(hypervisors=[
dict(hypervisor_hostname="hyper1",
id=1),
dict(hypervisor_hostname="hyper2",
id=2)])
text = serializer.serialize(exemplar)
tree = etree.fromstring(text)
self.assertEqual('hypervisors', tree.tag)
self.assertEqual(len(exemplar['hypervisors']), len(tree))
for idx, hyper in enumerate(tree):
self.assertEqual('hypervisor', hyper.tag)
self.compare_to_exemplar(exemplar['hypervisors'][idx], hyper)
def test_detail_serializer(self):
serializer = hypervisors.HypervisorDetailTemplate()
exemplar = dict(hypervisors=[
dict(hypervisor_hostname="hyper1",
id=1,
vcpus=4,
memory_mb=10 * 1024,
local_gb=500,
vcpus_used=2,
memory_mb_used=5 * 1024,
local_gb_used=250,
hypervisor_type='xen',
hypervisor_version=3,
free_ram_mb=5 * 1024,
free_disk_gb=250,
current_workload=2,
running_vms=2,
cpu_info="json data",
disk_available_least=100,
service=dict(id=1, host="compute1")),
dict(hypervisor_hostname="hyper2",
id=2,
vcpus=4,
memory_mb=10 * 1024,
local_gb=500,
vcpus_used=2,
memory_mb_used=5 * 1024,
local_gb_used=250,
hypervisor_type='xen',
hypervisor_version=3,
free_ram_mb=5 * 1024,
free_disk_gb=250,
current_workload=2,
running_vms=2,
cpu_info="json data",
disk_available_least=100,
service=dict(id=2, host="compute2"))])
text = serializer.serialize(exemplar)
tree = etree.fromstring(text)
self.assertEqual('hypervisors', tree.tag)
self.assertEqual(len(exemplar['hypervisors']), len(tree))
for idx, hyper in enumerate(tree):
self.assertEqual('hypervisor', hyper.tag)
self.compare_to_exemplar(exemplar['hypervisors'][idx], hyper)
def test_show_serializer(self):
serializer = hypervisors.HypervisorTemplate()
exemplar = dict(hypervisor=dict(
hypervisor_hostname="hyper1",
id=1,
vcpus=4,
memory_mb=10 * 1024,
local_gb=500,
vcpus_used=2,
memory_mb_used=5 * 1024,
local_gb_used=250,
hypervisor_type='xen',
hypervisor_version=3,
free_ram_mb=5 * 1024,
free_disk_gb=250,
current_workload=2,
running_vms=2,
cpu_info="json data",
disk_available_least=100,
service=dict(id=1, host="compute1")))
text = serializer.serialize(exemplar)
tree = etree.fromstring(text)
self.assertEqual('hypervisor', tree.tag)
self.compare_to_exemplar(exemplar['hypervisor'], tree)
def test_uptime_serializer(self):
serializer = hypervisors.HypervisorUptimeTemplate()
exemplar = dict(hypervisor=dict(
hypervisor_hostname="hyper1",
id=1,
uptime='fake uptime'))
text = serializer.serialize(exemplar)
tree = etree.fromstring(text)
self.assertEqual('hypervisor', tree.tag)
self.compare_to_exemplar(exemplar['hypervisor'], tree)
def test_servers_serializer(self):
serializer = hypervisors.HypervisorServersTemplate()
exemplar = dict(hypervisor=
dict(hypervisor_hostname="hyper1",
id=1,
servers=[
dict(name="inst1",
id="uuid1"),
dict(name="inst2",
id="uuid2")]))
text = serializer.serialize(exemplar)
tree = etree.fromstring(text)
self.assertEqual('hypervisor', tree.tag)
self.compare_to_exemplar(exemplar['hypervisor'], tree)
def test_statistics_serializer(self):
serializer = hypervisors.HypervisorStatisticsTemplate()
exemplar = dict(hypervisor_statistics=dict(
count=2,
vcpus=8,
memory_mb=20 * 1024,
local_gb=500,
vcpus_used=4,
memory_mb_used=10 * 1024,
local_gb_used=250,
free_ram_mb=10 * 1024,
free_disk_gb=250,
current_workload=4,
running_vms=4,
disk_available_least=200))
text = serializer.serialize(exemplar)
tree = etree.fromstring(text)
self.assertEqual('hypervisor_statistics', tree.tag)
self.compare_to_exemplar(exemplar['hypervisor_statistics'], tree)
| apache-2.0 |
Thoshh/wapad | lib/python2.7/site-packages/django/contrib/gis/maps/google/zoom.py | 527 | 6676 | from __future__ import unicode_literals
from math import atan, exp, log, pi, sin
from django.contrib.gis.geos import GEOSGeometry, LinearRing, Point, Polygon
from django.contrib.gis.maps.google.gmap import GoogleMapException
from django.utils.six.moves import range
# Constants used for degree to radian conversion, and vice-versa.
DTOR = pi / 180.
RTOD = 180. / pi
class GoogleZoom(object):
"""
GoogleZoom is a utility for performing operations related to the zoom
levels on Google Maps.
This class is inspired by the OpenStreetMap Mapnik tile generation routine
`generate_tiles.py`, and the article "How Big Is the World" (Hack #16) in
"Google Maps Hacks" by Rich Gibson and Schuyler Erle.
`generate_tiles.py` may be found at:
http://trac.openstreetmap.org/browser/applications/rendering/mapnik/generate_tiles.py
"Google Maps Hacks" may be found at http://safari.oreilly.com/0596101619
"""
def __init__(self, num_zoom=19, tilesize=256):
"Initializes the Google Zoom object."
# Google's tilesize is 256x256, square tiles are assumed.
self._tilesize = tilesize
# The number of zoom levels
self._nzoom = num_zoom
# Initializing arrays to hold the parameters for each one of the
# zoom levels.
self._degpp = [] # Degrees per pixel
self._radpp = [] # Radians per pixel
self._npix = [] # 1/2 the number of pixels for a tile at the given zoom level
# Incrementing through the zoom levels and populating the parameter arrays.
z = tilesize # The number of pixels per zoom level.
for i in range(num_zoom):
# Getting the degrees and radians per pixel, and the 1/2 the number of
# for every zoom level.
self._degpp.append(z / 360.) # degrees per pixel
self._radpp.append(z / (2 * pi)) # radians per pixel
self._npix.append(z / 2) # number of pixels to center of tile
# Multiplying `z` by 2 for the next iteration.
z *= 2
def __len__(self):
"Returns the number of zoom levels."
return self._nzoom
def get_lon_lat(self, lonlat):
"Unpacks longitude, latitude from GEOS Points and 2-tuples."
if isinstance(lonlat, Point):
lon, lat = lonlat.coords
else:
lon, lat = lonlat
return lon, lat
def lonlat_to_pixel(self, lonlat, zoom):
"Converts a longitude, latitude coordinate pair for the given zoom level."
# Setting up, unpacking the longitude, latitude values and getting the
# number of pixels for the given zoom level.
lon, lat = self.get_lon_lat(lonlat)
npix = self._npix[zoom]
# Calculating the pixel x coordinate by multiplying the longitude value
# with the number of degrees/pixel at the given zoom level.
px_x = round(npix + (lon * self._degpp[zoom]))
# Creating the factor, and ensuring that 1 or -1 is not passed in as the
# base to the logarithm. Here's why:
# if fac = -1, we'll get log(0) which is undefined;
# if fac = 1, our logarithm base will be divided by 0, also undefined.
fac = min(max(sin(DTOR * lat), -0.9999), 0.9999)
# Calculating the pixel y coordinate.
px_y = round(npix + (0.5 * log((1 + fac) / (1 - fac)) * (-1.0 * self._radpp[zoom])))
# Returning the pixel x, y to the caller of the function.
return (px_x, px_y)
def pixel_to_lonlat(self, px, zoom):
"Converts a pixel to a longitude, latitude pair at the given zoom level."
if len(px) != 2:
raise TypeError('Pixel should be a sequence of two elements.')
# Getting the number of pixels for the given zoom level.
npix = self._npix[zoom]
# Calculating the longitude value, using the degrees per pixel.
lon = (px[0] - npix) / self._degpp[zoom]
# Calculating the latitude value.
lat = RTOD * (2 * atan(exp((px[1] - npix) / (-1.0 * self._radpp[zoom]))) - 0.5 * pi)
# Returning the longitude, latitude coordinate pair.
return (lon, lat)
def tile(self, lonlat, zoom):
"""
Returns a Polygon corresponding to the region represented by a fictional
Google Tile for the given longitude/latitude pair and zoom level. This
tile is used to determine the size of a tile at the given point.
"""
# The given lonlat is the center of the tile.
delta = self._tilesize / 2
# Getting the pixel coordinates corresponding to the
# the longitude/latitude.
px = self.lonlat_to_pixel(lonlat, zoom)
# Getting the lower-left and upper-right lat/lon coordinates
# for the bounding box of the tile.
ll = self.pixel_to_lonlat((px[0] - delta, px[1] - delta), zoom)
ur = self.pixel_to_lonlat((px[0] + delta, px[1] + delta), zoom)
# Constructing the Polygon, representing the tile and returning.
return Polygon(LinearRing(ll, (ll[0], ur[1]), ur, (ur[0], ll[1]), ll), srid=4326)
def get_zoom(self, geom):
"Returns the optimal Zoom level for the given geometry."
# Checking the input type.
if not isinstance(geom, GEOSGeometry) or geom.srid != 4326:
raise TypeError('get_zoom() expects a GEOS Geometry with an SRID of 4326.')
# Getting the envelope for the geometry, and its associated width, height
# and centroid.
env = geom.envelope
env_w, env_h = self.get_width_height(env.extent)
center = env.centroid
for z in range(self._nzoom):
# Getting the tile at the zoom level.
tile_w, tile_h = self.get_width_height(self.tile(center, z).extent)
# When we span more than one tile, this is an approximately good
# zoom level.
if (env_w > tile_w) or (env_h > tile_h):
if z == 0:
raise GoogleMapException('Geometry width and height should not exceed that of the Earth.')
return z - 1
# Otherwise, we've zoomed in to the max.
return self._nzoom - 1
def get_width_height(self, extent):
"""
Returns the width and height for the given extent.
"""
# Getting the lower-left, upper-left, and upper-right
# coordinates from the extent.
ll = Point(extent[:2])
ul = Point(extent[0], extent[3])
ur = Point(extent[2:])
# Calculating the width and height.
height = ll.distance(ul)
width = ul.distance(ur)
return width, height
| mit |
technologiescollege/Blockly-rduino-communication | scripts/Lib/site-packages/pip/_vendor/cachecontrol/caches/redis_cache.py | 9 | 1102 | from __future__ import division
from datetime import datetime
from pip._vendor.cachecontrol.cache import BaseCache
def total_seconds(td):
"""Python 2.6 compatability"""
if hasattr(td, 'total_seconds'):
return int(td.total_seconds())
ms = td.microseconds
secs = (td.seconds + td.days * 24 * 3600)
return int((ms + secs * 10**6) / 10**6)
class RedisCache(BaseCache):
def __init__(self, conn):
self.conn = conn
def get(self, key):
return self.conn.get(key)
def set(self, key, value, expires=None):
if not expires:
self.conn.set(key, value)
else:
expires = expires - datetime.utcnow()
self.conn.setex(key, total_seconds(expires), value)
def delete(self, key):
self.conn.delete(key)
def clear(self):
"""Helper for clearing all the keys in a database. Use with
caution!"""
for key in self.conn.keys():
self.conn.delete(key)
def close(self):
"""Redis uses connection pooling, no need to close the connection."""
pass
| gpl-3.0 |
davidzchen/tensorflow | tensorflow/python/kernel_tests/sparse_reshape_op_test.py | 6 | 17718 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SparseReshape."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.platform import test
class SparseReshapeTest(test.TestCase):
def _SparseTensorPlaceholder(self):
return sparse_tensor.SparseTensor(
array_ops.placeholder(dtypes.int64),
array_ops.placeholder(dtypes.float64),
array_ops.placeholder(dtypes.int64))
def _SparseTensorValue_5x6(self):
ind = np.array([[0, 0], [1, 0], [1, 3], [1, 4], [3, 2],
[3, 3]]).astype(np.int64)
val = np.array([0, 10, 13, 14, 32, 33]).astype(np.float64)
shape = np.array([5, 6]).astype(np.int64)
return sparse_tensor.SparseTensorValue(ind, val, shape)
def _SparseTensorValue_2x3x4(self):
ind = np.array([[0, 0, 1], [0, 1, 0], [0, 1, 2], [1, 0, 3], [1, 1, 1],
[1, 1, 3], [1, 2, 2]])
val = np.array([1, 10, 12, 103, 111, 113, 122])
shape = np.array([2, 3, 4])
return sparse_tensor.SparseTensorValue(ind, val, shape)
def testStaticShapeInfoPreserved(self):
sp_input = sparse_tensor.SparseTensor.from_value(
self._SparseTensorValue_5x6())
self.assertAllEqual((5, 6), sp_input.get_shape())
sp_output = sparse_ops.sparse_reshape(sp_input, shape=(1, 5, 2, 3))
self.assertAllEqual((1, 5, 2, 3), sp_output.get_shape())
def testStaticShapeInfoPreservedWithInferredDims(self):
sp_input = sparse_tensor.SparseTensor.from_value(
self._SparseTensorValue_2x3x4())
self.assertAllEqual((2, 3, 4), sp_input.get_shape())
sp_output = sparse_ops.sparse_reshape(sp_input, shape=(2, -1))
self.assertAllEqual((2, 3 * 4), sp_output.get_shape())
@test_util.run_deprecated_v1
def testRaisesIfMoreThanOneInferredDim(self):
sp_input = sparse_tensor.SparseTensor.from_value(
self._SparseTensorValue_2x3x4())
with self.assertRaisesRegex(ValueError, "At most one dimension can"):
sparse_ops.sparse_reshape(sp_input, shape=(-1, 2, -1))
@test_util.run_deprecated_v1
def testRaisesIfInferredShapeNotPossible(self):
sp_input = sparse_tensor.SparseTensor.from_value(
self._SparseTensorValue_2x3x4())
with self.assertRaisesRegex(ValueError, "Cannot reshape"):
sparse_ops.sparse_reshape(sp_input, shape=(-1, 7))
@test_util.run_deprecated_v1
def testPropagatesFullyKnownDenseShapeWhenShapePartiallyKnown(self):
sp_input = sparse_tensor.SparseTensor.from_value(
self._SparseTensorValue_2x3x4())
self.assertAllEqual((2, 3, 4), sp_input.shape)
sp_output = sparse_ops.sparse_reshape(
sp_input, shape=array_ops.concat(
(constant_op.constant([2], dtype=dtypes.int64),
array_ops.placeholder(dtype=dtypes.int64, shape=[1])),
axis=0))
self.assertAllEqual((2, 3 * 4), sp_output.shape)
def testSameShape(self):
with self.session(use_gpu=False) as sess:
input_val = self._SparseTensorValue_5x6()
sp_output = sparse_ops.sparse_reshape(input_val, [5, 6])
output_val = self.evaluate(sp_output)
self.assertAllEqual(output_val.indices, input_val.indices)
self.assertAllEqual(output_val.values, input_val.values)
self.assertAllEqual(output_val.dense_shape, input_val.dense_shape)
@test_util.run_deprecated_v1
def testFeedSameShape(self):
with self.session(use_gpu=False) as sess:
sp_input = self._SparseTensorPlaceholder()
input_val = self._SparseTensorValue_5x6()
sp_output = sparse_ops.sparse_reshape(sp_input, [5, 6])
output_val = sess.run(sp_output, {sp_input: input_val})
self.assertAllEqual(output_val.indices, input_val.indices)
self.assertAllEqual(output_val.values, input_val.values)
self.assertAllEqual(output_val.dense_shape, input_val.dense_shape)
@test_util.run_deprecated_v1
def testWorksWellWithTfShape(self):
with self.session(use_gpu=False) as sess:
sp_input = self._SparseTensorPlaceholder()
input_val = self._SparseTensorValue_5x6()
shape = array_ops.shape(sp_input) # tf.shape generates int32 output
sp_output = sparse_ops.sparse_reshape(sp_input, shape)
output_val = sess.run(sp_output, {sp_input: input_val})
self.assertAllEqual(output_val.indices, input_val.indices)
self.assertAllEqual(output_val.values, input_val.values)
self.assertAllEqual(output_val.dense_shape, input_val.dense_shape)
@test_util.run_deprecated_v1
def testFeedSameShapeWithInferredDim(self):
with self.session(use_gpu=False) as sess:
sp_input = self._SparseTensorPlaceholder()
input_val = self._SparseTensorValue_5x6()
sp_output = sparse_ops.sparse_reshape(sp_input, [-1, 6])
output_val = sess.run(sp_output, {sp_input: input_val})
self.assertAllEqual(output_val.indices, input_val.indices)
self.assertAllEqual(output_val.values, input_val.values)
self.assertAllEqual(output_val.dense_shape, input_val.dense_shape)
@test_util.run_deprecated_v1
def testFeedNewShapeSameRank(self):
with self.session(use_gpu=False) as sess:
sp_input = self._SparseTensorPlaceholder()
input_val = self._SparseTensorValue_5x6()
sp_output = sparse_ops.sparse_reshape(sp_input, [3, 10])
output_val = sess.run(sp_output, {sp_input: input_val})
self.assertAllEqual(output_val.indices,
np.array([[0, 0], [0, 6], [0, 9], [1, 0], [2, 0],
[2, 1]]))
self.assertAllEqual(output_val.values, input_val.values)
self.assertAllEqual(output_val.dense_shape, [3, 10])
@test_util.run_deprecated_v1
def testFeedNewShapeSameRankWithInferredDim(self):
with self.session(use_gpu=False) as sess:
sp_input = self._SparseTensorPlaceholder()
input_val = self._SparseTensorValue_5x6()
sp_output = sparse_ops.sparse_reshape(sp_input, [3, -1])
output_val = sess.run(sp_output, {sp_input: input_val})
self.assertAllEqual(output_val.indices,
np.array([[0, 0], [0, 6], [0, 9], [1, 0], [2, 0],
[2, 1]]))
self.assertAllEqual(output_val.values, input_val.values)
self.assertAllEqual(output_val.dense_shape, [3, 10])
def testUpRank(self):
with self.session(use_gpu=False) as sess:
input_val = self._SparseTensorValue_5x6()
sp_output = sparse_ops.sparse_reshape(input_val, [2, 3, 5])
output_val = self.evaluate(sp_output)
self.assertAllEqual(output_val.indices,
np.array([[0, 0, 0], [0, 1, 1], [0, 1, 4], [0, 2, 0],
[1, 1, 0], [1, 1, 1]]))
self.assertAllEqual(output_val.values, input_val.values)
self.assertAllEqual(output_val.dense_shape, [2, 3, 5])
@test_util.run_deprecated_v1
def testFeedUpRank(self):
with self.session(use_gpu=False) as sess:
sp_input = self._SparseTensorPlaceholder()
input_val = self._SparseTensorValue_5x6()
sp_output = sparse_ops.sparse_reshape(sp_input, [2, 3, 5])
output_val = sess.run(sp_output, {sp_input: input_val})
self.assertAllEqual(output_val.indices,
np.array([[0, 0, 0], [0, 1, 1], [0, 1, 4], [0, 2, 0],
[1, 1, 0], [1, 1, 1]]))
self.assertAllEqual(output_val.values, input_val.values)
self.assertAllEqual(output_val.dense_shape, [2, 3, 5])
@test_util.run_deprecated_v1
def testFeedUpRankWithInferredDim(self):
with self.session(use_gpu=False) as sess:
sp_input = self._SparseTensorPlaceholder()
input_val = self._SparseTensorValue_5x6()
sp_output = sparse_ops.sparse_reshape(sp_input, [2, -1, 5])
output_val = sess.run(sp_output, {sp_input: input_val})
self.assertAllEqual(output_val.indices,
np.array([[0, 0, 0], [0, 1, 1], [0, 1, 4], [0, 2, 0],
[1, 1, 0], [1, 1, 1]]))
self.assertAllEqual(output_val.values, input_val.values)
self.assertAllEqual(output_val.dense_shape, [2, 3, 5])
@test_util.run_deprecated_v1
def testFeedDownRank(self):
with self.session(use_gpu=False) as sess:
sp_input = self._SparseTensorPlaceholder()
input_val = self._SparseTensorValue_2x3x4()
sp_output = sparse_ops.sparse_reshape(sp_input, [6, 4])
output_val = sess.run(sp_output, {sp_input: input_val})
self.assertAllEqual(output_val.indices,
np.array([[0, 1], [1, 0], [1, 2], [3, 3], [4, 1],
[4, 3], [5, 2]]))
self.assertAllEqual(output_val.values, input_val.values)
self.assertAllEqual(output_val.dense_shape, [6, 4])
@test_util.run_deprecated_v1
def testFeedDownRankWithInferredDim(self):
with self.session(use_gpu=False) as sess:
sp_input = self._SparseTensorPlaceholder()
input_val = self._SparseTensorValue_2x3x4()
sp_output = sparse_ops.sparse_reshape(sp_input, [6, -1])
output_val = sess.run(sp_output, {sp_input: input_val})
self.assertAllEqual(output_val.indices,
np.array([[0, 1], [1, 0], [1, 2], [3, 3], [4, 1],
[4, 3], [5, 2]]))
self.assertAllEqual(output_val.values, input_val.values)
self.assertAllEqual(output_val.dense_shape, [6, 4])
@test_util.run_deprecated_v1
def testFeedMultipleInferredDims(self):
with self.session(use_gpu=False) as sess:
sp_input = self._SparseTensorPlaceholder()
input_val = self._SparseTensorValue_5x6()
sp_output = sparse_ops.sparse_reshape(sp_input, [4, -1, -1])
with self.assertRaisesOpError("only one output dimension may be -1"):
sess.run(sp_output, {sp_input: input_val})
@test_util.run_deprecated_v1
def testProvideStaticallyMismatchedSizes(self):
input_val = self._SparseTensorValue_5x6()
sp_input = sparse_tensor.SparseTensor.from_value(input_val)
with self.assertRaisesRegex(ValueError, "Cannot reshape"):
sparse_ops.sparse_reshape(sp_input, [4, 7])
@test_util.run_deprecated_v1
def testFeedMismatchedSizes(self):
with self.session(use_gpu=False) as sess:
sp_input = self._SparseTensorPlaceholder()
input_val = self._SparseTensorValue_5x6()
sp_output = sparse_ops.sparse_reshape(sp_input, [4, 7])
with self.assertRaisesOpError(
"Input to reshape is a tensor with 30 dense values"):
sess.run(sp_output, {sp_input: input_val})
@test_util.run_deprecated_v1
def testFeedMismatchedSizesWithInferredDim(self):
with self.session(use_gpu=False) as sess:
sp_input = self._SparseTensorPlaceholder()
input_val = self._SparseTensorValue_5x6()
sp_output = sparse_ops.sparse_reshape(sp_input, [4, -1])
with self.assertRaisesOpError("requested shape requires a multiple"):
sess.run(sp_output, {sp_input: input_val})
@test_util.run_deprecated_v1
def testFeedPartialShapes(self):
with self.session(use_gpu=False):
# Incorporate new rank into shape information if known
sp_input = self._SparseTensorPlaceholder()
sp_output = sparse_ops.sparse_reshape(sp_input, [2, 3, 5])
self.assertListEqual(sp_output.indices.get_shape().as_list(), [None, 3])
self.assertListEqual(sp_output.dense_shape.get_shape().as_list(), [3])
# Incorporate known shape information about input indices in output
# indices
sp_input = self._SparseTensorPlaceholder()
sp_input.indices.set_shape([5, None])
sp_output = sparse_ops.sparse_reshape(sp_input, [2, 3, 5])
self.assertListEqual(sp_output.indices.get_shape().as_list(), [5, 3])
self.assertListEqual(sp_output.dense_shape.get_shape().as_list(), [3])
# Even if new_shape has no shape information, we know the ranks of
# output indices and shape
sp_input = self._SparseTensorPlaceholder()
sp_input.indices.set_shape([5, None])
new_shape = array_ops.placeholder(dtypes.int64)
sp_output = sparse_ops.sparse_reshape(sp_input, new_shape)
self.assertListEqual(sp_output.indices.get_shape().as_list(), [5, None])
self.assertListEqual(sp_output.dense_shape.get_shape().as_list(), [None])
@test_util.run_deprecated_v1
def testFeedDenseReshapeSemantics(self):
with self.session(use_gpu=False) as sess:
# Compute a random rank-5 initial shape and new shape, randomly sparsify
# it, and check that the output of SparseReshape has the same semantics
# as a dense reshape.
factors = np.array([2] * 4 + [3] * 4 + [5] * 4) # 810k total elements
orig_rank = np.random.randint(2, 7)
orig_map = np.random.randint(orig_rank, size=factors.shape)
orig_shape = [np.prod(factors[orig_map == d]) for d in range(orig_rank)]
new_rank = np.random.randint(2, 7)
new_map = np.random.randint(new_rank, size=factors.shape)
new_shape = [np.prod(factors[new_map == d]) for d in range(new_rank)]
orig_dense = np.random.uniform(size=orig_shape)
orig_indices = np.transpose(np.nonzero(orig_dense < 0.5))
orig_values = orig_dense[orig_dense < 0.5]
new_dense = np.reshape(orig_dense, new_shape)
new_indices = np.transpose(np.nonzero(new_dense < 0.5))
new_values = new_dense[new_dense < 0.5]
sp_input = self._SparseTensorPlaceholder()
input_val = sparse_tensor.SparseTensorValue(orig_indices, orig_values,
orig_shape)
sp_output = sparse_ops.sparse_reshape(sp_input, new_shape)
output_val = sess.run(sp_output, {sp_input: input_val})
self.assertAllEqual(output_val.indices, new_indices)
self.assertAllEqual(output_val.values, new_values)
self.assertAllEqual(output_val.dense_shape, new_shape)
class EmptySparseTensorReshapeTest(test.TestCase, parameterized.TestCase):
"""Tests for reshaping 0-sized SparseTensors, compared w/ dense tensors."""
def _MakeAndReshapeTensor(self, tensor_class, original_shape, target_shape):
if tensor_class == "sparse":
ind = np.zeros([0, len(original_shape)]).astype(np.int64)
val = np.array([]).astype(np.float64)
shape = np.array(original_shape).astype(np.int64)
sp_input = sparse_tensor.SparseTensorValue(ind, val, shape)
sp_output = self.evaluate(
sparse_ops.sparse_reshape(sp_input, target_shape))
return sp_output.dense_shape
else:
dense_input = array_ops.zeros(original_shape)
dense_output = self.evaluate(array_ops.reshape(dense_input, target_shape))
return dense_output.shape
@parameterized.named_parameters([
("Dense", "dense"),
("Sparse", "sparse"),
])
def testImpliedReshapeEmpty1DTensor(self, tensor_class):
self.assertAllEqual(
self._MakeAndReshapeTensor(tensor_class, [0], [-1, 1]), [0, 1])
self.assertAllEqual(
self._MakeAndReshapeTensor(tensor_class, [0], [-1, 1, 2]), [0, 1, 2])
@parameterized.named_parameters([
("Dense", "dense"),
("Sparse", "sparse"),
])
def testImpliedReshapeEmpty2DTensor(self, tensor_class):
self.assertAllEqual(
self._MakeAndReshapeTensor(tensor_class, [1, 0], [-1, 1]), [0, 1])
self.assertAllEqual(
self._MakeAndReshapeTensor(tensor_class, [1, 0], [-1, 2, 3]), [0, 2, 3])
@parameterized.named_parameters([
("Dense", "dense"),
("Sparse", "sparse"),
])
def testImpliedReshapeEmpty3DTensor(self, tensor_class):
self.assertAllEqual(
self._MakeAndReshapeTensor(tensor_class, [1, 0, 0], [-1, 2, 3]),
[0, 2, 3])
@parameterized.named_parameters([
("Dense", "dense"),
("Sparse", "sparse"),
])
def testImpliedReshapeEmpty4DTensor(self, tensor_class):
self.assertAllEqual(
self._MakeAndReshapeTensor(tensor_class, [2, 4, 0, 6], [-1, 4, 6, 2]),
[0, 4, 6, 2])
def testImpliedDimTogetherWithZeroDimCausesError(self):
# NOTE: When implied dimensions and zero dimensions coexist in the target
# shape, the behavior currently differs between sparse and regular tensors.
with self.assertRaises(errors.InvalidArgumentError):
self._MakeAndReshapeTensor("sparse", [0], [-1, 0])
with self.assertRaises(errors.InvalidArgumentError):
self._MakeAndReshapeTensor("sparse", [1, 0], [-1, 0])
with self.assertRaises(errors.InvalidArgumentError):
self._MakeAndReshapeTensor("sparse", [1, 2, 0], [2, -1, 0])
with self.assertRaises(errors.InvalidArgumentError):
self._MakeAndReshapeTensor("sparse", [1, 2, 3, 0], [2, 0, -1, 3])
if __name__ == "__main__":
test.main()
| apache-2.0 |
terencehonles/mailman | src/mailman/handlers/replybot.py | 3 | 5031 | # Copyright (C) 1998-2012 by the Free Software Foundation, Inc.
#
# This file is part of GNU Mailman.
#
# GNU Mailman is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# GNU Mailman is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# GNU Mailman. If not, see <http://www.gnu.org/licenses/>.
"""Handler for automatic responses."""
from __future__ import absolute_import, print_function, unicode_literals
__metaclass__ = type
__all__ = [
'Replybot',
]
import logging
from zope.component import getUtility
from zope.interface import implementer
from mailman.core.i18n import _
from mailman.email.message import UserNotification
from mailman.interfaces.autorespond import (
ALWAYS_REPLY, IAutoResponseSet, Response, ResponseAction)
from mailman.interfaces.handler import IHandler
from mailman.interfaces.usermanager import IUserManager
from mailman.utilities.datetime import today
from mailman.utilities.string import expand, wrap
log = logging.getLogger('mailman.error')
@implementer(IHandler)
class Replybot:
"""Send automatic responses."""
name = 'replybot'
description = _('Send automatic responses.')
def process(self, mlist, msg, msgdata):
"""See `IHandler`."""
# There are several cases where the replybot is short-circuited:
# * the original message has an "X-Ack: No" header
# * the message has a Precedence header with values bulk, junk, or
# list, and there's no explicit "X-Ack: yes" header
# * the message metadata has a true 'noack' key
ack = msg.get('x-ack', '').lower()
if ack == 'no' or msgdata.get('noack'):
return
precedence = msg.get('precedence', '').lower()
if ack != 'yes' and precedence in ('bulk', 'junk', 'list'):
return
# Check to see if the list is even configured to autorespond to this
# email message. Note: the incoming message processors should set the
# destination key in the message data.
if msgdata.get('to_owner'):
if mlist.autorespond_owner is ResponseAction.none:
return
response_type = Response.owner
response_text = mlist.autoresponse_owner_text
elif msgdata.get('to_request'):
if mlist.autorespond_requests is ResponseAction.none:
return
response_type = Response.command
response_text = mlist.autoresponse_request_text
elif msgdata.get('to_list'):
if mlist.autorespond_postings is ResponseAction.none:
return
response_type = Response.postings
response_text = mlist.autoresponse_postings_text
else:
# There are no automatic responses for any other destination.
return
# Now see if we're in the grace period for this sender. grace_period
# = 0 means always automatically respond, as does an "X-Ack: yes"
# header (useful for debugging).
response_set = IAutoResponseSet(mlist)
user_manager = getUtility(IUserManager)
address = user_manager.get_address(msg.sender)
if address is None:
address = user_manager.create_address(msg.sender)
grace_period = mlist.autoresponse_grace_period
if grace_period > ALWAYS_REPLY and ack != 'yes':
last = response_set.last_response(address, response_type)
if last is not None and last.date_sent + grace_period > today():
return
# Okay, we know we're going to respond to this sender, craft the
# message, send it, and update the database.
display_name = mlist.display_name
subject = _(
'Auto-response for your message to the "$display_name" '
'mailing list')
# Do string interpolation into the autoresponse text
d = dict(list_name = mlist.list_name,
display_name = display_name,
listurl = mlist.script_url('listinfo'),
requestemail = mlist.request_address,
owneremail = mlist.owner_address,
)
# Interpolation and Wrap the response text.
text = wrap(expand(response_text, d))
outmsg = UserNotification(msg.sender, mlist.bounces_address,
subject, text, mlist.preferred_language)
outmsg['X-Mailer'] = _('The Mailman Replybot')
# prevent recursions and mail loops!
outmsg['X-Ack'] = 'No'
outmsg.send(mlist)
response_set.response_sent(address, response_type)
| gpl-3.0 |
taliax/easybuild-easyblocks | easybuild/easyblocks/i/imkl.py | 5 | 17376 | # #
# Copyright 2009-2015 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
# #
"""
EasyBuild support for installing the Intel Math Kernel Library (MKL), implemented as an easyblock
@author: Stijn De Weirdt (Ghent University)
@author: Dries Verdegem (Ghent University)
@author: Kenneth Hoste (Ghent University)
@author: Pieter De Baets (Ghent University)
@author: Jens Timmerman (Ghent University)
@author: Ward Poelmans (Ghent University)
"""
import itertools
import os
import shutil
import tempfile
from distutils.version import LooseVersion
import easybuild.tools.environment as env
import easybuild.tools.toolchain as toolchain
from easybuild.easyblocks.generic.intelbase import IntelBase, ACTIVATION_NAME_2012, LICENSE_FILE_NAME_2012
from easybuild.framework.easyconfig import CUSTOM
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.filetools import rmtree2
from easybuild.tools.modules import get_software_root
from easybuild.tools.run import run_cmd
class EB_imkl(IntelBase):
"""
Class that can be used to install mkl
- tested with 10.2.1.017
-- will fail for all older versions (due to newer silent installer)
"""
@staticmethod
def extra_options():
"""Add easyconfig parameters custom to imkl (e.g. interfaces)."""
extra_vars = {
'interfaces': [True, "Indicates whether interfaces should be built", CUSTOM],
}
return IntelBase.extra_options(extra_vars)
def __init__(self, *args, **kwargs):
super(EB_imkl, self).__init__(*args, **kwargs)
# make sure $MKLROOT isn't set, it's known to cause problems with the installation
self.cfg.update('unwanted_env_vars', ['MKLROOT'])
def install_step(self):
"""
Actual installation
- create silent cfg file
- execute command
"""
silent_cfg_names_map = None
silent_cfg_extras = None
if LooseVersion(self.version) < LooseVersion('11.1'):
# since imkl v11.1, silent.cfg has been slightly changed to be 'more standard'
silent_cfg_names_map = {
'activation_name': ACTIVATION_NAME_2012,
'license_file_name': LICENSE_FILE_NAME_2012,
}
if LooseVersion(self.version) >= LooseVersion('11.1'):
silent_cfg_extras = {
'COMPONENTS': 'ALL',
}
super(EB_imkl, self).install_step(silent_cfg_names_map=silent_cfg_names_map, silent_cfg_extras=silent_cfg_extras)
def make_module_req_guess(self):
"""
A dictionary of possible directories to look for
"""
if LooseVersion(self.version) >= LooseVersion('10.3'):
if self.cfg['m32']:
raise EasyBuildError("32-bit not supported yet for IMKL v%s (>= 10.3)", self.version)
else:
retdict = {
'PATH': ['bin', 'mkl/bin', 'mkl/bin/intel64', 'composerxe-2011/bin'],
'LD_LIBRARY_PATH': ['lib/intel64', 'mkl/lib/intel64'],
'LIBRARY_PATH': ['lib/intel64', 'mkl/lib/intel64'],
'MANPATH': ['man', 'man/en_US'],
'CPATH': ['mkl/include', 'mkl/include/fftw'],
'FPATH': ['mkl/include', 'mkl/include/fftw'],
}
if LooseVersion(self.version) >= LooseVersion('11.0'):
if LooseVersion(self.version) >= LooseVersion('11.1'):
retdict['MIC_LD_LIBRARY_PATH'] = ['lib/mic', 'mkl/lib/mic'];
else:
retdict['MIC_LD_LIBRARY_PATH'] = ['compiler/lib/mic', 'mkl/lib/mic'];
return retdict;
else:
if self.cfg['m32']:
return {
'PATH': ['bin', 'bin/ia32', 'tbb/bin/ia32'],
'LD_LIBRARY_PATH': ['lib', 'lib/32'],
'LIBRARY_PATH': ['lib', 'lib/32'],
'MANPATH': ['man', 'share/man', 'man/en_US'],
'CPATH': ['include'],
'FPATH': ['include']
}
else:
return {
'PATH': ['bin', 'bin/intel64', 'tbb/bin/em64t'],
'LD_LIBRARY_PATH': ['lib', 'lib/em64t'],
'LIBRARY_PATH': ['lib', 'lib/em64t'],
'MANPATH': ['man', 'share/man', 'man/en_US'],
'CPATH': ['include'],
'FPATH': ['include'],
}
def make_module_extra(self):
"""Overwritten from Application to add extra txt"""
txt = super(EB_imkl, self).make_module_extra()
txt += self.module_generator.set_environment('MKLROOT', os.path.join(self.installdir, 'mkl'))
return txt
def post_install_step(self):
"""
Install group libraries and interfaces (if desired).
"""
super(EB_imkl, self).post_install_step()
# reload the dependencies
self.load_dependency_modules()
if self.cfg['m32']:
extra = {
'libmkl.so': 'GROUP (-lmkl_intel -lmkl_intel_thread -lmkl_core)',
'libmkl_em64t.a': 'GROUP (libmkl_intel.a libmkl_intel_thread.a libmkl_core.a)',
'libmkl_solver.a': 'GROUP (libmkl_solver.a)',
'libmkl_scalapack.a': 'GROUP (libmkl_scalapack_core.a)',
'libmkl_lapack.a': 'GROUP (libmkl_intel.a libmkl_intel_thread.a libmkl_core.a)',
'libmkl_cdft.a': 'GROUP (libmkl_cdft_core.a)'
}
else:
extra = {
'libmkl.so': 'GROUP (-lmkl_intel_lp64 -lmkl_intel_thread -lmkl_core)',
'libmkl_em64t.a': 'GROUP (libmkl_intel_lp64.a libmkl_intel_thread.a libmkl_core.a)',
'libmkl_solver.a': 'GROUP (libmkl_solver_lp64.a)',
'libmkl_scalapack.a': 'GROUP (libmkl_scalapack_lp64.a)',
'libmkl_lapack.a': 'GROUP (libmkl_intel_lp64.a libmkl_intel_thread.a libmkl_core.a)',
'libmkl_cdft.a': 'GROUP (libmkl_cdft_core.a)'
}
if LooseVersion(self.version) >= LooseVersion('10.3'):
libsubdir = os.path.join('mkl', 'lib', 'intel64')
else:
if self.cfg['m32']:
libsubdir = os.path.join('lib', '32')
else:
libsubdir = os.path.join('lib', 'em64t')
for fil, txt in extra.items():
dest = os.path.join(self.installdir, libsubdir, fil)
if not os.path.exists(dest):
try:
f = open(dest, 'w')
f.write(txt)
f.close()
self.log.info("File %s written" % dest)
except IOError, err:
raise EasyBuildError("Can't write file %s: %s", dest, err)
# build the mkl interfaces, if desired
if self.cfg['interfaces']:
if LooseVersion(self.version) >= LooseVersion('10.3'):
intsubdir = os.path.join('mkl', 'interfaces')
inttarget = 'libintel64'
else:
intsubdir = 'interfaces'
if self.cfg['m32']:
inttarget = 'lib32'
else:
inttarget = 'libem64t'
cmd = "make -f makefile %s" % inttarget
# blas95 and lapack95 need more work, ignore for now
# blas95 and lapack also need include/.mod to be processed
fftw2libs = ['fftw2xc', 'fftw2xf']
fftw3libs = ['fftw3xc', 'fftw3xf']
cdftlibs = ['fftw2x_cdft']
if LooseVersion(self.version) >= LooseVersion('10.3'):
cdftlibs.append('fftw3x_cdft')
interfacedir = os.path.join(self.installdir, intsubdir)
try:
os.chdir(interfacedir)
self.log.info("Changed to interfaces directory %s" % interfacedir)
except OSError, err:
raise EasyBuildError("Can't change to interfaces directory %s", interfacedir)
compopt = None
# determine whether we're using a non-Intel GCC-based toolchain
# can't use toolchain.comp_family, because of dummy toolchain used when installing imkl
if get_software_root('icc') is None:
if get_software_root('GCC'):
compopt = 'compiler=gnu'
else:
raise EasyBuildError("Not using either Intel compilers nor GCC, "
"don't know how to build wrapper libs")
else:
compopt = 'compiler=intel'
for lib in fftw2libs + fftw3libs + cdftlibs:
buildopts = [compopt]
if lib in fftw3libs:
buildopts.append('install_to=$INSTALL_DIR')
elif lib in cdftlibs:
mpi_spec = None
# check whether MPI_FAMILY constant is defined, so mpi_family() can be used
if hasattr(self.toolchain, 'MPI_FAMILY') and self.toolchain.MPI_FAMILY is not None:
mpi_spec_by_fam = {
toolchain.MPICH: 'mpich2', # MPICH is MPICH v3.x, which is MPICH2 compatible
toolchain.MPICH2: 'mpich2',
toolchain.MVAPICH2: 'mpich2',
toolchain.OPENMPI: 'openmpi',
}
mpi_fam = self.toolchain.mpi_family()
mpi_spec = mpi_spec_by_fam.get(mpi_fam)
self.log.debug("Determined MPI specification based on MPI toolchain component: %s" % mpi_spec)
else:
# can't use toolchain.mpi_family, because of dummy toolchain
if get_software_root('MPICH2') or get_software_root('MVAPICH2'):
mpi_spec = 'mpich2'
elif get_software_root('OpenMPI'):
mpi_spec = 'openmpi'
self.log.debug("Determined MPI specification based on loaded MPI module: %s" % mpi_spec)
if mpi_spec is not None:
buildopts.append('mpi=%s' % mpi_spec)
precflags = ['']
if lib.startswith('fftw2x') and not self.cfg['m32']:
# build both single and double precision variants
precflags = ['PRECISION=MKL_DOUBLE', 'PRECISION=MKL_SINGLE']
intflags = ['']
if lib in cdftlibs and not self.cfg['m32']:
# build both 32-bit and 64-bit interfaces
intflags = ['interface=lp64', 'interface=ilp64']
allopts = [list(opts) for opts in itertools.product(intflags, precflags)]
for flags, extraopts in itertools.product(['', '-fPIC'], allopts):
tup = (lib, flags, buildopts, extraopts)
self.log.debug("Building lib %s with: flags %s, buildopts %s, extraopts %s" % tup)
tmpbuild = tempfile.mkdtemp(dir=self.builddir)
self.log.debug("Created temporary directory %s" % tmpbuild)
# always set INSTALL_DIR, SPEC_OPT, COPTS and CFLAGS
# fftw2x(c|f): use $INSTALL_DIR, $CFLAGS and $COPTS
# fftw3x(c|f): use $CFLAGS
# fftw*cdft: use $INSTALL_DIR and $SPEC_OPT
env.setvar('INSTALL_DIR', tmpbuild)
env.setvar('SPEC_OPT', flags)
env.setvar('COPTS', flags)
env.setvar('CFLAGS', flags)
try:
intdir = os.path.join(interfacedir, lib)
os.chdir(intdir)
self.log.info("Changed to interface %s directory %s" % (lib, intdir))
except OSError, err:
raise EasyBuildError("Can't change to interface %s directory %s: %s", lib, intdir, err)
fullcmd = "%s %s" % (cmd, ' '.join(buildopts + extraopts))
res = run_cmd(fullcmd, log_all=True, simple=True)
if not res:
raise EasyBuildError("Building %s (flags: %s, fullcmd: %s) failed", lib, flags, fullcmd)
for fn in os.listdir(tmpbuild):
src = os.path.join(tmpbuild, fn)
if flags == '-fPIC':
# add _pic to filename
ff = fn.split('.')
fn = '.'.join(ff[:-1]) + '_pic.' + ff[-1]
dest = os.path.join(self.installdir, libsubdir, fn)
try:
if os.path.isfile(src):
shutil.move(src, dest)
self.log.info("Moved %s to %s" % (src, dest))
except OSError, err:
raise EasyBuildError("Failed to move %s to %s: %s", src, dest, err)
rmtree2(tmpbuild)
def sanity_check_step(self):
"""Custom sanity check paths for Intel MKL."""
mklfiles = None
mkldirs = None
ver = LooseVersion(self.version)
libs = ["libmkl_core.so", "libmkl_gnu_thread.so", "libmkl_intel_thread.so", "libmkl_sequential.so"]
extralibs = ["libmkl_blacs_intelmpi_%(suff)s.so", "libmkl_scalapack_%(suff)s.so"]
if self.cfg['interfaces']:
compsuff = '_intel'
if get_software_root('icc') is None:
if get_software_root('GCC'):
compsuff = '_gnu'
else:
raise EasyBuildError("Not using Intel compilers or GCC, don't know compiler suffix for FFTW libraries.")
precs = ['_double', '_single']
if ver < LooseVersion('11'):
# no precision suffix in libfftw2 libs before imkl v11
precs = ['']
fftw_vers = ['2x%s%s' % (x, prec) for x in ['c', 'f'] for prec in precs] + ['3xc', '3xf']
pics = ['', '_pic']
libs = ['libfftw%s%s%s.a' % (fftwver, compsuff, pic) for fftwver in fftw_vers for pic in pics]
fftw_cdft_vers = ['2x_cdft_DOUBLE']
if not self.cfg['m32']:
fftw_cdft_vers.append('2x_cdft_SINGLE')
if ver >= LooseVersion('10.3'):
fftw_cdft_vers.append('3x_cdft')
if ver >= LooseVersion('11.0.2'):
bits = ['_lp64']
if not self.cfg['m32']:
bits.append('_ilp64')
else:
# no bits suffix in cdft libs before imkl v11.0.2
bits = ['']
libs += ['libfftw%s%s%s.a' % x for x in itertools.product(fftw_cdft_vers, bits, pics)]
if ver >= LooseVersion('10.3'):
if self.cfg['m32']:
raise EasyBuildError("Sanity check for 32-bit not implemented yet for IMKL v%s (>= 10.3)", self.version)
else:
mkldirs = ["bin", "mkl/bin", "mkl/bin/intel64", "mkl/lib/intel64", "mkl/include"]
libs += [lib % {'suff': suff} for lib in extralibs for suff in ['lp64', 'ilp64']]
mklfiles = ["mkl/lib/intel64/libmkl.so", "mkl/include/mkl.h"] + \
["mkl/lib/intel64/%s" % lib for lib in libs]
if ver >= LooseVersion('10.3.4') and ver < LooseVersion('11.1'):
mkldirs += ["compiler/lib/intel64"]
else:
mkldirs += ["lib/intel64"]
else:
if self.cfg['m32']:
mklfiles = ["lib/32/libmkl.so", "include/mkl.h"] + \
["lib/32/%s" % lib for lib in libs]
mkldirs = ["lib/32", "include/32", "interfaces"]
else:
libs += [lib % {'suff': suff} for lib in extralibs for suff in ['lp64', 'ilp64']]
mklfiles = ["lib/em64t/libmkl.so", "include/mkl.h"] + \
["lib/em64t/%s" % lib for lib in libs]
mkldirs = ["lib/em64t", "include/em64t", "interfaces"]
custom_paths = {
'files': mklfiles,
'dirs': mkldirs,
}
super(EB_imkl, self).sanity_check_step(custom_paths=custom_paths)
| gpl-2.0 |
salguarnieri/intellij-community | python/lib/Lib/site-packages/django/core/mail/backends/console.py | 308 | 1295 | """
Email backend that writes messages to console instead of sending them.
"""
import sys
import threading
from django.core.mail.backends.base import BaseEmailBackend
class EmailBackend(BaseEmailBackend):
def __init__(self, *args, **kwargs):
self.stream = kwargs.pop('stream', sys.stdout)
self._lock = threading.RLock()
super(EmailBackend, self).__init__(*args, **kwargs)
def send_messages(self, email_messages):
"""Write all messages to the stream in a thread-safe way."""
if not email_messages:
return
self._lock.acquire()
try:
# The try-except is nested to allow for
# Python 2.4 support (Refs #12147)
try:
stream_created = self.open()
for message in email_messages:
self.stream.write('%s\n' % message.message().as_string())
self.stream.write('-'*79)
self.stream.write('\n')
self.stream.flush() # flush after each message
if stream_created:
self.close()
except:
if not self.fail_silently:
raise
finally:
self._lock.release()
return len(email_messages)
| apache-2.0 |
mancoast/CPythonPyc_test | cpython/260_test_call.py | 90 | 3124 | import unittest
from test import test_support
# The test cases here cover several paths through the function calling
# code. They depend on the METH_XXX flag that is used to define a C
# function, which can't be verified from Python. If the METH_XXX decl
# for a C function changes, these tests may not cover the right paths.
class CFunctionCalls(unittest.TestCase):
def test_varargs0(self):
self.assertRaises(TypeError, {}.has_key)
def test_varargs1(self):
{}.has_key(0)
def test_varargs2(self):
self.assertRaises(TypeError, {}.has_key, 0, 1)
def test_varargs0_ext(self):
try:
{}.has_key(*())
except TypeError:
pass
def test_varargs1_ext(self):
{}.has_key(*(0,))
def test_varargs2_ext(self):
try:
{}.has_key(*(1, 2))
except TypeError:
pass
else:
raise RuntimeError
def test_varargs0_kw(self):
self.assertRaises(TypeError, {}.has_key, x=2)
def test_varargs1_kw(self):
self.assertRaises(TypeError, {}.has_key, x=2)
def test_varargs2_kw(self):
self.assertRaises(TypeError, {}.has_key, x=2, y=2)
def test_oldargs0_0(self):
{}.keys()
def test_oldargs0_1(self):
self.assertRaises(TypeError, {}.keys, 0)
def test_oldargs0_2(self):
self.assertRaises(TypeError, {}.keys, 0, 1)
def test_oldargs0_0_ext(self):
{}.keys(*())
def test_oldargs0_1_ext(self):
try:
{}.keys(*(0,))
except TypeError:
pass
else:
raise RuntimeError
def test_oldargs0_2_ext(self):
try:
{}.keys(*(1, 2))
except TypeError:
pass
else:
raise RuntimeError
def test_oldargs0_0_kw(self):
try:
{}.keys(x=2)
except TypeError:
pass
else:
raise RuntimeError
def test_oldargs0_1_kw(self):
self.assertRaises(TypeError, {}.keys, x=2)
def test_oldargs0_2_kw(self):
self.assertRaises(TypeError, {}.keys, x=2, y=2)
def test_oldargs1_0(self):
self.assertRaises(TypeError, [].count)
def test_oldargs1_1(self):
[].count(1)
def test_oldargs1_2(self):
self.assertRaises(TypeError, [].count, 1, 2)
def test_oldargs1_0_ext(self):
try:
[].count(*())
except TypeError:
pass
else:
raise RuntimeError
def test_oldargs1_1_ext(self):
[].count(*(1,))
def test_oldargs1_2_ext(self):
try:
[].count(*(1, 2))
except TypeError:
pass
else:
raise RuntimeError
def test_oldargs1_0_kw(self):
self.assertRaises(TypeError, [].count, x=2)
def test_oldargs1_1_kw(self):
self.assertRaises(TypeError, [].count, {}, x=2)
def test_oldargs1_2_kw(self):
self.assertRaises(TypeError, [].count, x=2, y=2)
def test_main():
test_support.run_unittest(CFunctionCalls)
if __name__ == "__main__":
test_main()
| gpl-3.0 |
haematologic/cellcounter | cellcounter/accounts/views.py | 1 | 7496 | from braces.views import LoginRequiredMixin
from django.contrib import messages
from django.contrib.auth import authenticate, login
from django.contrib.auth.forms import PasswordChangeForm
from django.contrib.auth.forms import SetPasswordForm
from django.contrib.auth.models import User
from django.contrib.auth.tokens import default_token_generator
from django.core.exceptions import PermissionDenied
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.utils.decorators import method_decorator
from django.utils.http import urlsafe_base64_decode
from django.utils.safestring import mark_safe
from django.views.decorators.debug import sensitive_post_parameters
from django.views.generic import FormView, UpdateView, DetailView, DeleteView
from ratelimit.exceptions import Ratelimited
from ratelimit.mixins import RatelimitMixin
from ratelimit.utils import is_ratelimited
from .forms import EmailUserCreationForm, PasswordResetForm
class RateLimitedFormView(FormView):
ratelimit_key = 'ip'
ratelimit_block = True
ratelimit_rate = '1/h'
ratelimit_group = None
def dispatch(self, *args, **kwargs):
ratelimited = is_ratelimited(request=self.request,
group=self.ratelimit_group,
key=self.ratelimit_key,
rate=self.ratelimit_rate,
increment=False)
if ratelimited and self.ratelimit_block:
raise Ratelimited()
return super(RateLimitedFormView, self).dispatch(*args, **kwargs)
class RegistrationView(RateLimitedFormView):
template_name = 'accounts/register.html'
form_class = EmailUserCreationForm
ratelimit_group = 'registration'
def form_valid(self, form):
user = form.save()
messages.success(self.request,
mark_safe(
"Successfully registered, you are now logged in! <a href='%s'>View your profile</a>" %
reverse('user-detail', kwargs={'pk': user.id})))
user = authenticate(username=form.cleaned_data['username'],
password=form.cleaned_data['password1'])
login(self.request, user)
is_ratelimited(request=self.request, group=self.ratelimit_group, key=self.ratelimit_key,
rate=self.ratelimit_rate, increment=True)
return super(RegistrationView, self).form_valid(form)
def get_success_url(self):
return reverse('new_count')
class PasswordChangeView(LoginRequiredMixin, FormView):
template_name = 'accounts/password_change.html'
form_class = PasswordChangeForm
def get_form_kwargs(self):
kwargs = super(PasswordChangeView, self).get_form_kwargs()
kwargs['user'] = self.request.user
return kwargs
def form_valid(self, form):
form.save()
messages.success(self.request, "Password changed successfully")
return HttpResponseRedirect(reverse('new_count'))
class UserDetailView(LoginRequiredMixin, DetailView):
model = User
context_object_name = 'user_detail'
template_name = 'accounts/user_detail.html'
def get_object(self, queryset=None):
if self.request.user.id == int(self.kwargs['pk']):
return super(UserDetailView, self).get_object()
else:
raise PermissionDenied
def get_context_data(self, **kwargs):
context = super(UserDetailView, self).get_context_data(**kwargs)
context['keyboards'] = self.object.keyboard_set.all().order_by('-is_primary')
return context
class UserDeleteView(LoginRequiredMixin, DeleteView):
model = User
context_object_name = 'user_object'
template_name = 'accounts/user_check_delete.html'
def get_object(self, queryset=None):
if self.request.user.id == int(self.kwargs['pk']):
return super(UserDeleteView, self).get_object()
else:
raise PermissionDenied
def get_success_url(self):
messages.success(self.request, "User account deleted")
return reverse('new_count')
class UserUpdateView(LoginRequiredMixin, UpdateView):
model = User
fields = ['first_name', 'last_name', 'email', ]
template_name = 'accounts/user_update.html'
def get_object(self, queryset=None):
if self.request.user.id == int(self.kwargs['pk']):
return super(UserUpdateView, self).get_object()
else:
raise PermissionDenied
def get_success_url(self):
messages.success(self.request, "User details updated")
return reverse('user-detail', kwargs={'pk': self.kwargs['pk']})
class PasswordResetView(RatelimitMixin, FormView):
template_name = 'accounts/reset_form.html'
form_class = PasswordResetForm
ratelimit_rate = '5/h'
ratelimit_group = 'pwdreset'
ratelimit_key = 'ip'
ratelimit_block = True
def form_valid(self, form):
form.save(request=self.request)
messages.success(self.request, 'Reset email sent')
return super(PasswordResetView, self).form_valid(form)
def form_invalid(self, form):
"""Don't expose form errors to the user"""
return HttpResponseRedirect(self.get_success_url())
def get_success_url(self):
return reverse('new_count')
class PasswordResetConfirmView(FormView):
template_name = 'accounts/reset_confirm.html'
form_class = SetPasswordForm
@method_decorator(sensitive_post_parameters())
def dispatch(self, request, *args, **kwargs):
return super(PasswordResetConfirmView, self).dispatch(request, *args, **kwargs)
@staticmethod
def valid_user(uidb64):
try:
uid = urlsafe_base64_decode(uidb64)
user = User.objects.get(pk=uid)
except (TypeError, ValueError, OverflowError, User.DoesNotExist):
return None
return user
@staticmethod
def valid_token(user, token):
if user is not None:
return default_token_generator.check_token(user, token)
else:
return False
def _valid_inputs(self, uidb64, token):
self.user_object = self.valid_user(uidb64)
return self.valid_token(self.user_object, token)
def get(self, request, *args, **kwargs):
if self._valid_inputs(self.kwargs['uidb64'], self.kwargs['token']):
form = self.get_form(self.get_form_class())
return self.render_to_response(self.get_context_data(form=form, validlink=True))
else:
return self.render_to_response(self.get_context_data(validlink=False))
def post(self, request, *args, **kwargs):
if self._valid_inputs(self.kwargs['uidb64'], self.kwargs['token']):
return super(PasswordResetConfirmView, self).post(request, *args, **kwargs)
else:
return self.render_to_response(self.get_context_data(validlink=False))
def get_form_kwargs(self):
kwargs = super(PasswordResetConfirmView, self).get_form_kwargs()
kwargs['user'] = self.user_object
return kwargs
def form_valid(self, form):
form.save()
messages.success(self.request, 'Password reset successfully')
return HttpResponseRedirect(reverse('new_count'))
def rate_limited(request, exception):
messages.error(request, 'You have been rate limited')
return HttpResponseRedirect(reverse('new_count'))
| mit |
alrusdi/lettuce | tests/integration/lib/Django-1.2.5/django/core/management/commands/reset.py | 81 | 2273 | from optparse import make_option
from django.conf import settings
from django.core.management.base import AppCommand, CommandError
from django.core.management.color import no_style
from django.core.management.sql import sql_reset
from django.db import connections, transaction, DEFAULT_DB_ALIAS
class Command(AppCommand):
option_list = AppCommand.option_list + (
make_option('--noinput', action='store_false', dest='interactive', default=True,
help='Tells Django to NOT prompt the user for input of any kind.'),
make_option('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS, help='Nominates a database to reset. '
'Defaults to the "default" database.'),
)
help = "Executes ``sqlreset`` for the given app(s) in the current database."
args = '[appname ...]'
output_transaction = True
def handle_app(self, app, **options):
using = options.get('database', DEFAULT_DB_ALIAS)
connection = connections[using]
app_name = app.__name__.split('.')[-2]
self.style = no_style()
sql_list = sql_reset(app, self.style, connection)
if options.get('interactive'):
confirm = raw_input("""
You have requested a database reset.
This will IRREVERSIBLY DESTROY any data for
the "%s" application in the database "%s".
Are you sure you want to do this?
Type 'yes' to continue, or 'no' to cancel: """ % (app_name, connection.settings_dict['NAME']))
else:
confirm = 'yes'
if confirm == 'yes':
try:
cursor = connection.cursor()
for sql in sql_list:
cursor.execute(sql)
except Exception, e:
transaction.rollback_unless_managed()
raise CommandError("""Error: %s couldn't be reset. Possible reasons:
* The database isn't running or isn't configured correctly.
* At least one of the database tables doesn't exist.
* The SQL was invalid.
Hint: Look at the output of 'django-admin.py sqlreset %s'. That's the SQL this command wasn't able to run.
The full error: %s""" % (app_name, app_name, e))
transaction.commit_unless_managed()
else:
print "Reset cancelled."
| gpl-3.0 |
rohitwaghchaure/digitales_erpnext | erpnext/accounts/report/budget_variance_report/budget_variance_report.py | 1 | 4804 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _, msgprint
from frappe.utils import flt
from frappe.utils import formatdate
import time
from erpnext.accounts.utils import get_fiscal_year
from erpnext.controllers.trends import get_period_date_ranges, get_period_month_ranges
def execute(filters=None):
if not filters: filters = {}
columns = get_columns(filters)
period_month_ranges = get_period_month_ranges(filters["period"], filters["fiscal_year"])
cam_map = get_costcenter_account_month_map(filters)
data = []
for cost_center, cost_center_items in cam_map.items():
for account, monthwise_data in cost_center_items.items():
row = [cost_center, account]
totals = [0, 0, 0]
for relevant_months in period_month_ranges:
period_data = [0, 0, 0]
for month in relevant_months:
month_data = monthwise_data.get(month, {})
for i, fieldname in enumerate(["target", "actual", "variance"]):
value = flt(month_data.get(fieldname))
period_data[i] += value
totals[i] += value
period_data[2] = period_data[0] - period_data[1]
row += period_data
totals[2] = totals[0] - totals[1]
row += totals
data.append(row)
return columns, sorted(data, key=lambda x: (x[0], x[1]))
def get_columns(filters):
for fieldname in ["fiscal_year", "period", "company"]:
if not filters.get(fieldname):
label = (" ".join(fieldname.split("_"))).title()
msgprint(_("Please specify") + ": " + label,
raise_exception=True)
columns = [_("Cost Center") + ":Link/Cost Center:120", _("Account") + ":Link/Account:120"]
group_months = False if filters["period"] == "Monthly" else True
for from_date, to_date in get_period_date_ranges(filters["period"], filters["fiscal_year"]):
for label in [_("Target") + " (%s)", _("Actual") + " (%s)", _("Variance") + " (%s)"]:
if group_months:
label = label % (formatdate(from_date, format_string="MMM") + " - " + formatdate(from_date, format_string="MMM"))
else:
label = label % formatdate(from_date, format_string="MMM")
columns.append(label+":Float:120")
return columns + [_("Total Target") + ":Float:120", _("Total Actual") + ":Float:120",
_("Total Variance") + ":Float:120"]
#Get cost center & target details
def get_costcenter_target_details(filters):
return frappe.db.sql("""select cc.name, cc.distribution_id,
cc.parent_cost_center, bd.account, bd.budget_allocated
from `tabCost Center` cc, `tabBudget Detail` bd
where bd.parent=cc.name and bd.fiscal_year=%s and
cc.company=%s order by cc.name""" % ('%s', '%s'),
(filters.get("fiscal_year"), filters.get("company")), as_dict=1)
#Get target distribution details of accounts of cost center
def get_target_distribution_details(filters):
target_details = {}
for d in frappe.db.sql("""select bd.name, bdd.month, bdd.percentage_allocation
from `tabBudget Distribution Detail` bdd, `tabBudget Distribution` bd
where bdd.parent=bd.name and bd.fiscal_year=%s""", (filters["fiscal_year"]), as_dict=1):
target_details.setdefault(d.name, {}).setdefault(d.month, flt(d.percentage_allocation))
return target_details
#Get actual details from gl entry
def get_actual_details(filters):
ac_details = frappe.db.sql("""select gl.account, gl.debit, gl.credit,
gl.cost_center, MONTHNAME(gl.posting_date) as month_name
from `tabGL Entry` gl, `tabBudget Detail` bd
where gl.fiscal_year=%s and company=%s
and bd.account=gl.account and bd.parent=gl.cost_center""" % ('%s', '%s'),
(filters.get("fiscal_year"), filters.get("company")), as_dict=1)
cc_actual_details = {}
for d in ac_details:
cc_actual_details.setdefault(d.cost_center, {}).setdefault(d.account, []).append(d)
return cc_actual_details
def get_costcenter_account_month_map(filters):
import datetime
costcenter_target_details = get_costcenter_target_details(filters)
tdd = get_target_distribution_details(filters)
actual_details = get_actual_details(filters)
cam_map = {}
for ccd in costcenter_target_details:
for month_id in range(1, 13):
month = datetime.date(2013, month_id, 1).strftime('%B')
cam_map.setdefault(ccd.name, {}).setdefault(ccd.account, {})\
.setdefault(month, frappe._dict({
"target": 0.0, "actual": 0.0
}))
tav_dict = cam_map[ccd.name][ccd.account][month]
month_percentage = tdd.get(ccd.distribution_id, {}).get(month, 0) \
if ccd.distribution_id else 100.0/12
tav_dict.target = flt(ccd.budget_allocated) * month_percentage / 100
for ad in actual_details.get(ccd.name, {}).get(ccd.account, []):
if ad.month_name == month:
tav_dict.actual += flt(ad.debit) - flt(ad.credit)
return cam_map
| agpl-3.0 |
blueburningcoder/pybrain | pybrain/structure/modules/statedependentlayer.py | 25 | 3256 | __author__ = 'Thomas Rueckstiess, ruecksti@in.tum.de'
from scipy import random, asarray, zeros, dot
from pybrain.structure.modules.neuronlayer import NeuronLayer
from pybrain.tools.functions import expln, explnPrime
from pybrain.structure.parametercontainer import ParameterContainer
class StateDependentLayer(NeuronLayer, ParameterContainer):
def __init__(self, dim, module, name=None, onesigma=True):
NeuronLayer.__init__(self, dim, name)
self.exploration = zeros(dim, float)
self.state = None
self.onesigma = onesigma
if self.onesigma:
# one single parameter: sigma
ParameterContainer.__init__(self, 1)
else:
# sigmas for all parameters in the exploration module
ParameterContainer.__init__(self, module.paramdim)
# a module for the exploration
assert module.outdim == dim, (
"Passed module does not have right dimension")
self.module = module
self.autoalpha = False
self.enabled = True
def setState(self, state):
self.state = asarray(state)
self.exploration[:] = self.module.activate(self.state)
self.module.reset()
def drawRandomWeights(self):
self.module._setParameters(
random.normal(0, expln(self.params), self.module.paramdim))
def _forwardImplementation(self, inbuf, outbuf):
assert self.exploration != None
if not self.enabled:
outbuf[:] = inbuf
else:
outbuf[:] = inbuf + self.exploration
self.exploration = zeros(self.dim, float)
def _backwardImplementation(self, outerr, inerr, outbuf, inbuf):
if self.onesigma:
# algorithm for one global sigma for all mu's
expln_params = expln(self.params)
sumxsquared = dot(self.state, self.state)
self._derivs += (
sum((outbuf - inbuf) ** 2 - expln_params ** 2 * sumxsquared)
/ expln_params * explnPrime(self.params)
)
inerr[:] = (outbuf - inbuf)
if not self.autoalpha and sumxsquared != 0:
inerr /= expln_params ** 2 * sumxsquared
self._derivs /= expln_params ** 2 * sumxsquared
else:
# Algorithm for seperate sigma for each mu
expln_params = expln(self.params
).reshape(len(outbuf), len(self.state))
explnPrime_params = explnPrime(self.params
).reshape(len(outbuf), len(self.state))
idx = 0
for j in range(len(outbuf)):
sigma_subst2 = dot(self.state ** 2, expln_params[j, :]**2)
for i in range(len(self.state)):
self._derivs[idx] = ((outbuf[j] - inbuf[j]) ** 2 - sigma_subst2) / sigma_subst2 * \
self.state[i] ** 2 * expln_params[j, i] * explnPrime_params[j, i]
if self.autoalpha and sigma_subst2 != 0:
self._derivs[idx] /= sigma_subst2
idx += 1
inerr[j] = (outbuf[j] - inbuf[j])
if not self.autoalpha and sigma_subst2 != 0:
inerr[j] /= sigma_subst2
| bsd-3-clause |
dennisguse/pjsip | tests/pjsua/scripts-recvfrom/231_reg_bad_fail_stale_false_nonce_changed.py | 42 | 1562 | # $Id$
import inc_sip as sip
import inc_sdp as sdp
# In this test we simulate broken server, where:
# - it wants to signal that NONCE has change
# - but it sets stale=false
# For this case pjsip will retry authentication until
# PJSIP_MAX_STALE_COUNT is exceeded.
#
pjsua = "--null-audio --id=sip:CLIENT --registrar sip:127.0.0.1:$PORT " + \
"--realm=python --user=username --password=password"
req1 = sip.RecvfromTransaction("Initial request", 401,
include=["REGISTER sip"],
exclude=["Authorization"],
resp_hdr=["WWW-Authenticate: Digest realm=\"python\", nonce=\"1\""]
)
req2 = sip.RecvfromTransaction("First retry", 401,
include=["REGISTER sip", "Authorization", "nonce=\"1\""],
exclude=["Authorization:[\\s\\S]+Authorization:"],
resp_hdr=["WWW-Authenticate: Digest realm=\"python\", nonce=\"2\", stale=true"]
)
req3 = sip.RecvfromTransaction("Second retry retry", 401,
include=["REGISTER sip", "Authorization", "nonce=\"2\""],
exclude=["Authorization:[\\s\\S]+Authorization:"],
resp_hdr=["WWW-Authenticate: Digest realm=\"python\", nonce=\"3\", stale=true"]
)
req4 = sip.RecvfromTransaction("Third retry", 401,
include=["REGISTER sip", "Authorization", "nonce=\"3\""],
exclude=["Authorization:[\\s\\S]+Authorization:"],
resp_hdr=["WWW-Authenticate: Digest realm=\"python\", nonce=\"4\", stale=true"],
expect="PJSIP_EAUTHSTALECOUNT"
)
recvfrom_cfg = sip.RecvfromCfg("Failed registration retry (server rejects with stale=true) ",
pjsua, [req1, req2, req3, req4])
| gpl-2.0 |
lexman/tuttle | tests/test_log_follower.py | 1 | 8283 | # -*- coding: utf-8 -*-
from tests.functional_tests import isolate, run_tuttle_file
from cStringIO import StringIO
from tuttle.log_follower import LogTracer, LogsFollower
import logging
import sys
class CaptureOutputs(object):
"""
Captures stdin and stdout
"""
def __init__(self):
self._oldout, self._olderr = sys.stdout, sys.stderr
def __enter__(self):
self._out = StringIO()
sys.stdout,sys.stderr = self._out, self._out
return self
def __exit__(self, *args):
sys.stdout, sys.stderr = self._oldout, self._olderr
self.output = self._out.getvalue()
class TestLogFollower():
@isolate([])
def test_log_single_file(self):
"""LogTracer should log the content of a file"""
with CaptureOutputs() as co:
logger = LogsFollower.get_logger()
lt = LogTracer(logger, logging.INFO, "test.log")
with open("test.log", "w") as f:
f.write("line 1\n")
f.write("line 2\n")
f.write("line 3\n")
lt.trace()
output = co.output
assert output.find("line 1") >= 0, output
assert output.find("line 2") >= 0, output
assert output.find("line 3") >= 0, output
@isolate([])
def test_log_should_not_double_carriage_return(self):
""" """
with CaptureOutputs() as co:
logger = LogsFollower.get_logger()
lt = LogTracer(logger, logging.INFO, "test.log")
with open("test.log", "w") as f:
f.write("line 1\n")
f.write("line 2\n")
lt.trace()
output = co.output
assert output.find("\n\n") == -1, output
@isolate([])
def test_log_should_(self):
""" The last char of the file must be logged even if the
file does not finish with CR """
with CaptureOutputs() as co:
logger = LogsFollower.get_logger()
lt = LogTracer(logger, logging.INFO, "test.log")
with open("test.log", "w") as f:
f.write("line 1")
lt.trace()
output = co.output
assert output.find("line 1") >= 0, output
@isolate([])
def test_log_huge_file(self):
"""LogTracer should log the content of a big file in stdout"""
with CaptureOutputs() as co:
logger = LogsFollower.get_logger()
lt = LogTracer(logger, "namespace", "test.log")
with open("test.log", "w") as f:
for i in xrange(5000):
f.write("line {}\n".format(i))
while lt.trace():
pass
output = co.output
assert output.find("line 1") >= 0, output
assert output.find("line 2") >= 0, output
assert output.find("line 3") >= 0, output
assert output.find("line 4999") >= 0, output
@isolate([])
def test_log_multiple_files(self):
"""LogTracer should log the content of several files in stdout"""
with CaptureOutputs() as co:
lf = LogsFollower()
lf.follow_process("w1.stdout", "w1.stderr", "process1")
lf.follow_process("w2.stdout", "w2.stderr", "process2")
lf.follow_process("w3.stdout", "w3.stderr", "process3")
with open("w1.stdout", "w") as fo1, \
open("w1.stderr", "w") as fe1, \
open("w2.stdout", "w") as fo2, \
open("w2.stderr", "w") as fe2, \
open("w3.stdout", "w") as fo3, \
open("w3.stderr", "w") as fe3 :
for i in xrange(5000):
fo1.write("w1.stdout - line {}\n".format(i))
fe1.write("w1.stderr - line {}\n".format(i))
fo2.write("w2.stdout - line {}\n".format(i))
fe2.write("w2.stderr - line {}\n".format(i))
fo3.write("w3.stdout - line {}\n".format(i))
fe3.write("w3.stderr - line {}\n".format(i))
while lf.trace_logs():
pass
output = co.output
assert output.find("w1.stderr - line 1") >= 0, output
assert output.find("w1.stdout - line 1") >= 0, output
assert output.find("w2.stderr - line 1") >= 0, output
assert output.find("w2.stdout - line 1") >= 0, output
assert output.find("w3.stdout - line 1") >= 0, output
assert output.find("w3.stderr - line 1") >= 0, output
assert output.find("w1.stderr - line 4999") >= 0, output
assert output.find("w1.stdout - line 4999") >= 0, output
assert output.find("w2.stderr - line 4999") >= 0, output
assert output.find("w2.stdout - line 4999") >= 0, output
assert output.find("w3.stdout - line 4999") >= 0, output
assert output.find("w3.stderr - line 4999") >= 0, output
@isolate([])
def test_log_format(self):
"""logs should display log level and message"""
with CaptureOutputs() as co:
logger = LogsFollower.get_logger()
logger.info("MESSAGE")
assert co.output.find("MESSAGE") == 0, co.output
@isolate([])
def test_log_format_stdout_stderr(self):
"""logs should display log level and message"""
with CaptureOutputs() as co:
lf = LogsFollower()
lf.follow_process("stdout", "stderr", "process_id")
with open("stdout", "w") as fout, \
open("stderr", "w") as ferr:
fout.write("file stdout")
ferr.write("file stderr")
while lf.trace_logs():
pass
assert co.output.find("[process_id::stdout] file stdout") >= 0, co.output
assert co.output.find("[process_id::stderr] file stderr") >= 0, co.output
@isolate([])
def test_log_in_background(self):
"""Should log in background ans stop when foreground processing
is over"""
import time
with CaptureOutputs() as co:
lf = LogsFollower()
lf.follow_process("stdout", "stderr", "process_id")
lf.trace_in_background()
with open("stdout", "w") as fout, \
open("stderr", "w") as ferr:
fout.write("file stdout")
ferr.write("file stderr")
lf.terminate()
assert co.output.find("[process_id::stdout] file stdout") >= 0, co.output
assert co.output.find("[process_id::stderr] file stderr") >= 0, co.output
@isolate([])
def test_log_a_lot_in_background(self):
"""Should log in background ans stop when foreground processing
is over even with a lot a data"""
with CaptureOutputs() as co:
lf = LogsFollower()
lf.follow_process("stdout", "stderr", "process_id")
lf.trace_in_background()
with open("stdout", "w") as fout, \
open("stderr", "w") as ferr:
fout.write("file stdout")
ferr.write("file stderr")
for i in xrange(5000):
fout.write("stdout - line {}\n".format(i))
ferr.write("stderr - line {}\n".format(i))
lf.terminate()
assert co.output.find("[process_id::stdout] stdout - line 1") >= 0, co.output
assert co.output.find("[process_id::stderr] stderr - line 1") >= 0, co.output
assert co.output.find("[process_id::stdout] stdout - line 4999") >= 0, co.output
assert co.output.find("[process_id::stderr] stderr - line 4999") >= 0, co.output
@isolate([])
def test_thread_protection(self):
"""When a section of code using the LogsFollower is complete, the thread should stop"""
lf = LogsFollower()
lf.follow_process("stdout", "stderr", "process_id")
with lf.trace_in_background():
assert lf._thread.is_alive(), "Backgroung thread isn't running..."
with open("stdout", "w") as fout, \
open("stderr", "w") as ferr:
fout.write("file stdout")
ferr.write("file stderr")
assert not lf._thread.is_alive(), "Backgroung hasn't stopped !"
| mit |
JulyKikuAkita/PythonPrac | cs15211/LongestWordInDictionaryThroughDeleting.py | 1 | 4177 | __source__ = 'https://leetcode.com/problems/longest-word-in-dictionary-through-deleting/'
# Time: O(nlogn + n*x), Sorting takes O(nlogn) and isSubsequence takes O(x)
# Space: O(logn) Sorting takes O(logn) space in average case.
#
# Description: 524. Longest Word in Dictionary through Deleting
#
# Given a string and a string dictionary, find the longest string in the dictionary
# that can be formed by deleting some characters of the given string.
# If there are more than one possible results, return the longest word with the smallest lexicographical order.
# If there is no possible result, return the empty string.
#
# Example 1:
# Input:
# s = "abpcplea", d = ["ale","apple","monkey","plea"]
#
# Output:
# "apple"
# Example 2:
# Input:
# s = "abpcplea", d = ["a","b","c"]
#
# Output:
# "a"
#
# Note:
# All the strings in the input will only contain lower-case letters.
# The size of the dictionary won't exceed 1,000.
# The length of all the strings in the input won't exceed 1,000.
# Hide Company Tags Google
# Hide Tags Two Pointers Sort
#
import unittest
# Let's check whether each word is a subsequence of S individually by "best" order
# (largest size, then lexicographically smallest.) Then if we find a match,
# we know the word being considered must be the best possible answer,
# since better answers were already considered beforehand.
#
# Let's figure out how to check if a needle (word) is a subsequence of a haystack (S).
# This is a classic problem with the following solution: walk through S,
# keeping track of the position (i) of the needle that indicates that word[i:]
# still remains to be matched to S at this point in time. Whenever word[i] matches the current character in S,
# we only have to match word[i+1:], so we increment i. At the end of this process, i == len(word)
# if and only if we've matched every character in word to some character in S in order of our walk.
#
# 584ms 19.60%
class Solution(object):
def findLongestWord(self, s, d):
"""
:type s: str
:type d: List[str]
:rtype: str
"""
d.sort(key = lambda x: (-len(x), x))
for word in d:
i = 0
for c in s:
if i < len(word) and word[i] == c:
i += 1
if i == len(word):
return word
return ""
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
if __name__ == '__main__':
unittest.main()
Java = '''
# Thought: https://leetcode.com/problems/longest-word-in-dictionary-through-deleting/solution/
1.
An alternate, more efficient solution which avoids sorting the dictionary:
# 33ms 65.40%
class Solution {
public String findLongestWord(String s, List<String> d) {
String res = "";
for (String key : d) {
int i = 0;
for ( char c : s.toCharArray()) {
if ( i < key.length() && c == key.charAt(i)) i++;
}
if (i == key.length() && key.length() >= res.length()) {
if (key.length() > res.length() || key.compareTo(res) < 0) { //asec
res = key;
}
}
}
return res;
}
}
2.
Idea is sort the dictionary d first by length DESC then lexicographical ASC
and test if p is SubSequence of s. The first match is the answer.
# 106ms 4.57%
class Solution {
public String findLongestWord(String s, List<String> d) {
if (s.length() == 0 || d.size() == 0) return "";
//sort dict:
Collections.sort(d, (a, b) -> {
return s2.length() != s1.length() ?
s2.length() - s1.length() : //desc
s1.compareTo(s2); //asec
});
for (String key : d) {
if (s.length() < key.length()) continue;
if (isSubSeq(key, s)) return key;
}
return "";
}
public boolean isSubSeq(String needle, String Hay) {
int i = 0;
for (char c : Hay.toCharArray()) {
if (i < needle.length() && c == needle.charAt(i)) {
i++;
}
}
return i == needle.length();
}
}
'''
| apache-2.0 |
rakeshmi/tempest | tempest/api/compute/admin/test_aggregates_negative.py | 8 | 9415 | # Copyright 2013 Huawei Technologies Co.,LTD.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest_lib import exceptions as lib_exc
from tempest.api.compute import base
from tempest.common import tempest_fixtures as fixtures
from tempest.common.utils import data_utils
from tempest import test
class AggregatesAdminNegativeTestJSON(base.BaseV2ComputeAdminTest):
"""
Tests Aggregates API that require admin privileges
"""
@classmethod
def setup_clients(cls):
super(AggregatesAdminNegativeTestJSON, cls).setup_clients()
cls.client = cls.os_adm.aggregates_client
cls.user_client = cls.aggregates_client
@classmethod
def resource_setup(cls):
super(AggregatesAdminNegativeTestJSON, cls).resource_setup()
cls.aggregate_name_prefix = 'test_aggregate'
cls.az_name_prefix = 'test_az'
hosts_all = cls.os_adm.hosts_client.list_hosts()['hosts']
hosts = map(lambda x: x['host_name'],
filter(lambda y: y['service'] == 'compute', hosts_all))
cls.host = hosts[0]
@test.attr(type=['negative'])
@test.idempotent_id('86a1cb14-da37-4a70-b056-903fd56dfe29')
def test_aggregate_create_as_user(self):
# Regular user is not allowed to create an aggregate.
aggregate_name = data_utils.rand_name(self.aggregate_name_prefix)
self.assertRaises(lib_exc.Forbidden,
self.user_client.create_aggregate,
name=aggregate_name)
@test.attr(type=['negative'])
@test.idempotent_id('3b8a1929-3793-4e92-bcb4-dfa572ee6c1d')
def test_aggregate_create_aggregate_name_length_less_than_1(self):
# the length of aggregate name should >= 1 and <=255
self.assertRaises(lib_exc.BadRequest,
self.client.create_aggregate,
name='')
@test.attr(type=['negative'])
@test.idempotent_id('4c194563-543b-4e70-a719-557bbe947fac')
def test_aggregate_create_aggregate_name_length_exceeds_255(self):
# the length of aggregate name should >= 1 and <=255
aggregate_name = 'a' * 256
self.assertRaises(lib_exc.BadRequest,
self.client.create_aggregate,
name=aggregate_name)
@test.attr(type=['negative'])
@test.idempotent_id('9c23a291-b0b1-487b-b464-132e061151b3')
def test_aggregate_create_with_existent_aggregate_name(self):
# creating an aggregate with existent aggregate name is forbidden
aggregate_name = data_utils.rand_name(self.aggregate_name_prefix)
aggregate = self.client.create_aggregate(name=aggregate_name)
self.addCleanup(self.client.delete_aggregate,
aggregate['aggregate']['id'])
self.assertRaises(lib_exc.Conflict,
self.client.create_aggregate,
name=aggregate_name)
@test.attr(type=['negative'])
@test.idempotent_id('cd6de795-c15d-45f1-8d9e-813c6bb72a3d')
def test_aggregate_delete_as_user(self):
# Regular user is not allowed to delete an aggregate.
aggregate_name = data_utils.rand_name(self.aggregate_name_prefix)
aggregate = (self.client.create_aggregate(name=aggregate_name)
['aggregate'])
self.addCleanup(self.client.delete_aggregate, aggregate['id'])
self.assertRaises(lib_exc.Forbidden,
self.user_client.delete_aggregate,
aggregate['id'])
@test.attr(type=['negative'])
@test.idempotent_id('b7d475a6-5dcd-4ff4-b70a-cd9de66a6672')
def test_aggregate_list_as_user(self):
# Regular user is not allowed to list aggregates.
self.assertRaises(lib_exc.Forbidden,
self.user_client.list_aggregates)
@test.attr(type=['negative'])
@test.idempotent_id('557cad12-34c9-4ff4-95f0-22f0dfbaf7dc')
def test_aggregate_get_details_as_user(self):
# Regular user is not allowed to get aggregate details.
aggregate_name = data_utils.rand_name(self.aggregate_name_prefix)
aggregate = (self.client.create_aggregate(name=aggregate_name)
['aggregate'])
self.addCleanup(self.client.delete_aggregate, aggregate['id'])
self.assertRaises(lib_exc.Forbidden,
self.user_client.show_aggregate,
aggregate['id'])
@test.attr(type=['negative'])
@test.idempotent_id('c74f4bf1-4708-4ff2-95a0-f49eaca951bd')
def test_aggregate_delete_with_invalid_id(self):
# Delete an aggregate with invalid id should raise exceptions.
self.assertRaises(lib_exc.NotFound,
self.client.delete_aggregate, -1)
@test.attr(type=['negative'])
@test.idempotent_id('3c916244-2c46-49a4-9b55-b20bb0ae512c')
def test_aggregate_get_details_with_invalid_id(self):
# Get aggregate details with invalid id should raise exceptions.
self.assertRaises(lib_exc.NotFound,
self.client.show_aggregate, -1)
@test.attr(type=['negative'])
@test.idempotent_id('0ef07828-12b4-45ba-87cc-41425faf5711')
def test_aggregate_add_non_exist_host(self):
# Adding a non-exist host to an aggregate should raise exceptions.
hosts_all = self.os_adm.hosts_client.list_hosts()['hosts']
hosts = map(lambda x: x['host_name'], hosts_all)
while True:
non_exist_host = data_utils.rand_name('nonexist_host')
if non_exist_host not in hosts:
break
aggregate_name = data_utils.rand_name(self.aggregate_name_prefix)
aggregate = (self.client.create_aggregate(name=aggregate_name)
['aggregate'])
self.addCleanup(self.client.delete_aggregate, aggregate['id'])
self.assertRaises(lib_exc.NotFound, self.client.add_host,
aggregate['id'], host=non_exist_host)
@test.attr(type=['negative'])
@test.idempotent_id('7324c334-bd13-4c93-8521-5877322c3d51')
def test_aggregate_add_host_as_user(self):
# Regular user is not allowed to add a host to an aggregate.
aggregate_name = data_utils.rand_name(self.aggregate_name_prefix)
aggregate = (self.client.create_aggregate(name=aggregate_name)
['aggregate'])
self.addCleanup(self.client.delete_aggregate, aggregate['id'])
self.assertRaises(lib_exc.Forbidden,
self.user_client.add_host,
aggregate['id'], host=self.host)
@test.attr(type=['negative'])
@test.idempotent_id('19dd44e1-c435-4ee1-a402-88c4f90b5950')
def test_aggregate_add_existent_host(self):
self.useFixture(fixtures.LockFixture('availability_zone'))
aggregate_name = data_utils.rand_name(self.aggregate_name_prefix)
aggregate = (self.client.create_aggregate(name=aggregate_name)
['aggregate'])
self.addCleanup(self.client.delete_aggregate, aggregate['id'])
self.client.add_host(aggregate['id'], host=self.host)
self.addCleanup(self.client.remove_host, aggregate['id'],
host=self.host)
self.assertRaises(lib_exc.Conflict, self.client.add_host,
aggregate['id'], host=self.host)
@test.attr(type=['negative'])
@test.idempotent_id('7a53af20-137a-4e44-a4ae-e19260e626d9')
def test_aggregate_remove_host_as_user(self):
# Regular user is not allowed to remove a host from an aggregate.
self.useFixture(fixtures.LockFixture('availability_zone'))
aggregate_name = data_utils.rand_name(self.aggregate_name_prefix)
aggregate = (self.client.create_aggregate(name=aggregate_name)
['aggregate'])
self.addCleanup(self.client.delete_aggregate, aggregate['id'])
self.client.add_host(aggregate['id'], host=self.host)
self.addCleanup(self.client.remove_host, aggregate['id'],
host=self.host)
self.assertRaises(lib_exc.Forbidden,
self.user_client.remove_host,
aggregate['id'], host=self.host)
@test.attr(type=['negative'])
@test.idempotent_id('95d6a6fa-8da9-4426-84d0-eec0329f2e4d')
def test_aggregate_remove_nonexistent_host(self):
non_exist_host = data_utils.rand_name('nonexist_host')
aggregate_name = data_utils.rand_name(self.aggregate_name_prefix)
aggregate = (self.client.create_aggregate(name=aggregate_name)
['aggregate'])
self.addCleanup(self.client.delete_aggregate, aggregate['id'])
self.assertRaises(lib_exc.NotFound, self.client.remove_host,
aggregate['id'], host=non_exist_host)
| apache-2.0 |
tbadgu/Barcamp-Bangalore-Android-App | gcm_flask/werkzeug/exceptions.py | 84 | 16350 | # -*- coding: utf-8 -*-
"""
werkzeug.exceptions
~~~~~~~~~~~~~~~~~~~
This module implements a number of Python exceptions you can raise from
within your views to trigger a standard non-200 response.
Usage Example
-------------
::
from werkzeug.wrappers import BaseRequest
from werkzeug.wsgi import responder
from werkzeug.exceptions import HTTPException, NotFound
def view(request):
raise NotFound()
@responder
def application(environ, start_response):
request = BaseRequest(environ)
try:
return view(request)
except HTTPException, e:
return e
As you can see from this example those exceptions are callable WSGI
applications. Because of Python 2.4 compatibility those do not extend
from the response objects but only from the python exception class.
As a matter of fact they are not Werkzeug response objects. However you
can get a response object by calling ``get_response()`` on a HTTP
exception.
Keep in mind that you have to pass an environment to ``get_response()``
because some errors fetch additional information from the WSGI
environment.
If you want to hook in a different exception page to say, a 404 status
code, you can add a second except for a specific subclass of an error::
@responder
def application(environ, start_response):
request = BaseRequest(environ)
try:
return view(request)
except NotFound, e:
return not_found(request)
except HTTPException, e:
return e
:copyright: (c) 2011 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import sys
from werkzeug._internal import HTTP_STATUS_CODES, _get_environ
class HTTPException(Exception):
"""
Baseclass for all HTTP exceptions. This exception can be called as WSGI
application to render a default error page or you can catch the subclasses
of it independently and render nicer error messages.
"""
code = None
description = None
def __init__(self, description=None):
Exception.__init__(self, '%d %s' % (self.code, self.name))
if description is not None:
self.description = description
@classmethod
def wrap(cls, exception, name=None):
"""This method returns a new subclass of the exception provided that
also is a subclass of `BadRequest`.
"""
class newcls(cls, exception):
def __init__(self, arg=None, description=None):
cls.__init__(self, description)
exception.__init__(self, arg)
newcls.__module__ = sys._getframe(1).f_globals.get('__name__')
newcls.__name__ = name or cls.__name__ + exception.__name__
return newcls
@property
def name(self):
"""The status name."""
return HTTP_STATUS_CODES[self.code]
def get_description(self, environ):
"""Get the description."""
environ = _get_environ(environ)
return self.description
def get_body(self, environ):
"""Get the HTML body."""
return (
'<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">\n'
'<title>%(code)s %(name)s</title>\n'
'<h1>%(name)s</h1>\n'
'%(description)s\n'
) % {
'code': self.code,
'name': escape(self.name),
'description': self.get_description(environ)
}
def get_headers(self, environ):
"""Get a list of headers."""
return [('Content-Type', 'text/html')]
def get_response(self, environ):
"""Get a response object.
:param environ: the environ for the request.
:return: a :class:`BaseResponse` object or a subclass thereof.
"""
# lazily imported for various reasons. For one, we can use the exceptions
# with custom responses (testing exception instances against types) and
# so we don't ever have to import the wrappers, but also because there
# are circular dependencies when bootstrapping the module.
environ = _get_environ(environ)
from werkzeug.wrappers import BaseResponse
headers = self.get_headers(environ)
return BaseResponse(self.get_body(environ), self.code, headers)
def __call__(self, environ, start_response):
"""Call the exception as WSGI application.
:param environ: the WSGI environment.
:param start_response: the response callable provided by the WSGI
server.
"""
response = self.get_response(environ)
return response(environ, start_response)
def __str__(self):
return unicode(self).encode('utf-8')
def __unicode__(self):
if 'description' in self.__dict__:
txt = self.description
else:
txt = self.name
return '%d: %s' % (self.code, txt)
def __repr__(self):
return '<%s \'%s\'>' % (self.__class__.__name__, self)
class _ProxyException(HTTPException):
"""An HTTP exception that expands renders a WSGI application on error."""
def __init__(self, response):
Exception.__init__(self, 'proxy exception for %r' % response)
self.response = response
def get_response(self, environ):
return self.response
class BadRequest(HTTPException):
"""*400* `Bad Request`
Raise if the browser sends something to the application the application
or server cannot handle.
"""
code = 400
description = (
'<p>The browser (or proxy) sent a request that this server could '
'not understand.</p>'
)
class ClientDisconnected(BadRequest):
"""Internal exception that is raised if Werkzeug detects a disconnected
client. Since the client is already gone at that point attempting to
send the error message to the client might not work and might ultimately
result in another exception in the server. Mainly this is here so that
it is silenced by default as far as Werkzeug is concerned.
Since disconnections cannot be reliably detected and are unspecified
by WSGI to a large extend this might or might not be raised if a client
is gone.
.. versionadded:: 0.8
"""
class Unauthorized(HTTPException):
"""*401* `Unauthorized`
Raise if the user is not authorized. Also used if you want to use HTTP
basic auth.
"""
code = 401
description = (
'<p>The server could not verify that you are authorized to access '
'the URL requested. You either supplied the wrong credentials (e.g. '
'a bad password), or your browser doesn\'t understand how to supply '
'the credentials required.</p><p>In case you are allowed to request '
'the document, please check your user-id and password and try '
'again.</p>'
)
class Forbidden(HTTPException):
"""*403* `Forbidden`
Raise if the user doesn't have the permission for the requested resource
but was authenticated.
"""
code = 403
description = (
'<p>You don\'t have the permission to access the requested resource. '
'It is either read-protected or not readable by the server.</p>'
)
class NotFound(HTTPException):
"""*404* `Not Found`
Raise if a resource does not exist and never existed.
"""
code = 404
description = (
'<p>The requested URL was not found on the server.</p>'
'<p>If you entered the URL manually please check your spelling and '
'try again.</p>'
)
class MethodNotAllowed(HTTPException):
"""*405* `Method Not Allowed`
Raise if the server used a method the resource does not handle. For
example `POST` if the resource is view only. Especially useful for REST.
The first argument for this exception should be a list of allowed methods.
Strictly speaking the response would be invalid if you don't provide valid
methods in the header which you can do with that list.
"""
code = 405
def __init__(self, valid_methods=None, description=None):
"""Takes an optional list of valid http methods
starting with werkzeug 0.3 the list will be mandatory."""
HTTPException.__init__(self, description)
self.valid_methods = valid_methods
def get_headers(self, environ):
headers = HTTPException.get_headers(self, environ)
if self.valid_methods:
headers.append(('Allow', ', '.join(self.valid_methods)))
return headers
def get_description(self, environ):
m = escape(environ.get('REQUEST_METHOD', 'GET'))
return '<p>The method %s is not allowed for the requested URL.</p>' % m
class NotAcceptable(HTTPException):
"""*406* `Not Acceptable`
Raise if the server can't return any content conforming to the
`Accept` headers of the client.
"""
code = 406
description = (
'<p>The resource identified by the request is only capable of '
'generating response entities which have content characteristics '
'not acceptable according to the accept headers sent in the '
'request.</p>'
)
class RequestTimeout(HTTPException):
"""*408* `Request Timeout`
Raise to signalize a timeout.
"""
code = 408
description = (
'<p>The server closed the network connection because the browser '
'didn\'t finish the request within the specified time.</p>'
)
class Conflict(HTTPException):
"""*409* `Conflict`
Raise to signal that a request cannot be completed because it conflicts
with the current state on the server.
.. versionadded:: 0.7
"""
code = 409
description = (
'<p>A conflict happened while processing the request. The resource '
'might have been modified while the request was being processed.'
)
class Gone(HTTPException):
"""*410* `Gone`
Raise if a resource existed previously and went away without new location.
"""
code = 410
description = (
'<p>The requested URL is no longer available on this server and '
'there is no forwarding address.</p><p>If you followed a link '
'from a foreign page, please contact the author of this page.'
)
class LengthRequired(HTTPException):
"""*411* `Length Required`
Raise if the browser submitted data but no ``Content-Length`` header which
is required for the kind of processing the server does.
"""
code = 411
description = (
'<p>A request with this method requires a valid <code>Content-'
'Length</code> header.</p>'
)
class PreconditionFailed(HTTPException):
"""*412* `Precondition Failed`
Status code used in combination with ``If-Match``, ``If-None-Match``, or
``If-Unmodified-Since``.
"""
code = 412
description = (
'<p>The precondition on the request for the URL failed positive '
'evaluation.</p>'
)
class RequestEntityTooLarge(HTTPException):
"""*413* `Request Entity Too Large`
The status code one should return if the data submitted exceeded a given
limit.
"""
code = 413
description = (
'<p>The data value transmitted exceeds the capacity limit.</p>'
)
class RequestURITooLarge(HTTPException):
"""*414* `Request URI Too Large`
Like *413* but for too long URLs.
"""
code = 414
description = (
'<p>The length of the requested URL exceeds the capacity limit '
'for this server. The request cannot be processed.</p>'
)
class UnsupportedMediaType(HTTPException):
"""*415* `Unsupported Media Type`
The status code returned if the server is unable to handle the media type
the client transmitted.
"""
code = 415
description = (
'<p>The server does not support the media type transmitted in '
'the request.</p>'
)
class RequestedRangeNotSatisfiable(HTTPException):
"""*416* `Requested Range Not Satisfiable`
The client asked for a part of the file that lies beyond the end
of the file.
.. versionadded:: 0.7
"""
code = 416
description = (
'<p>The server cannot provide the requested range.'
)
class ExpectationFailed(HTTPException):
"""*417* `Expectation Failed`
The server cannot meet the requirements of the Expect request-header.
.. versionadded:: 0.7
"""
code = 417
description = (
'<p>The server could not meet the requirements of the Expect header'
)
class ImATeapot(HTTPException):
"""*418* `I'm a teapot`
The server should return this if it is a teapot and someone attempted
to brew coffee with it.
.. versionadded:: 0.7
"""
code = 418
description = (
'<p>This server is a teapot, not a coffee machine'
)
class InternalServerError(HTTPException):
"""*500* `Internal Server Error`
Raise if an internal server error occurred. This is a good fallback if an
unknown error occurred in the dispatcher.
"""
code = 500
description = (
'<p>The server encountered an internal error and was unable to '
'complete your request. Either the server is overloaded or there '
'is an error in the application.</p>'
)
class NotImplemented(HTTPException):
"""*501* `Not Implemented`
Raise if the application does not support the action requested by the
browser.
"""
code = 501
description = (
'<p>The server does not support the action requested by the '
'browser.</p>'
)
class BadGateway(HTTPException):
"""*502* `Bad Gateway`
If you do proxying in your application you should return this status code
if you received an invalid response from the upstream server it accessed
in attempting to fulfill the request.
"""
code = 502
description = (
'<p>The proxy server received an invalid response from an upstream '
'server.</p>'
)
class ServiceUnavailable(HTTPException):
"""*503* `Service Unavailable`
Status code you should return if a service is temporarily unavailable.
"""
code = 503
description = (
'<p>The server is temporarily unable to service your request due to '
'maintenance downtime or capacity problems. Please try again '
'later.</p>'
)
default_exceptions = {}
__all__ = ['HTTPException']
def _find_exceptions():
for name, obj in globals().iteritems():
try:
if getattr(obj, 'code', None) is not None:
default_exceptions[obj.code] = obj
__all__.append(obj.__name__)
except TypeError: # pragma: no cover
continue
_find_exceptions()
del _find_exceptions
#: raised by the request functions if they were unable to decode the
#: incoming data properly.
HTTPUnicodeError = BadRequest.wrap(UnicodeError, 'HTTPUnicodeError')
class Aborter(object):
"""
When passed a dict of code -> exception items it can be used as
callable that raises exceptions. If the first argument to the
callable is an integer it will be looked up in the mapping, if it's
a WSGI application it will be raised in a proxy exception.
The rest of the arguments are forwarded to the exception constructor.
"""
def __init__(self, mapping=None, extra=None):
if mapping is None:
mapping = default_exceptions
self.mapping = dict(mapping)
if extra is not None:
self.mapping.update(extra)
def __call__(self, code, *args, **kwargs):
if not args and not kwargs and not isinstance(code, (int, long)):
raise _ProxyException(code)
if code not in self.mapping:
raise LookupError('no exception for %r' % code)
raise self.mapping[code](*args, **kwargs)
abort = Aborter()
#: an exception that is used internally to signal both a key error and a
#: bad request. Used by a lot of the datastructures.
BadRequestKeyError = BadRequest.wrap(KeyError)
# imported here because of circular dependencies of werkzeug.utils
from werkzeug.utils import escape
| apache-2.0 |
uclouvain/osis_louvain | assessments/forms/score_file.py | 1 | 1880 | ##############################################################################
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2018 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
##############################################################################
from django import forms
from django.utils.translation import ugettext_lazy as _
class ScoreFileForm(forms.Form):
file = forms.FileField(error_messages={'required': _('no_file_submitted')})
def clean_file(self):
file = self.cleaned_data['file']
content_type = file.content_type.split('/')[1]
valid_content_type = 'vnd.openxmlformats-officedocument.spreadsheetml.sheet' in content_type
if ".xlsx" not in file.name or not valid_content_type:
self.add_error('file', forms.ValidationError(_('file_must_be_xlsx'), code='invalid'))
return file
| agpl-3.0 |
martinbuc/missionplanner | Lib/site-packages/scipy/fftpack/benchmarks/bench_basic.py | 63 | 7559 | """ Test functions for fftpack.basic module
"""
import sys
from numpy.testing import *
from scipy.fftpack import ifft, fft, fftn, irfft, rfft
from numpy import arange, asarray, zeros, dot, exp, pi, double, cdouble
import numpy.fft
from numpy.random import rand
def random(size):
return rand(*size)
def direct_dft(x):
x = asarray(x)
n = len(x)
y = zeros(n,dtype=cdouble)
w = -arange(n)*(2j*pi/n)
for i in range(n):
y[i] = dot(exp(i*w),x)
return y
def direct_idft(x):
x = asarray(x)
n = len(x)
y = zeros(n,dtype=cdouble)
w = arange(n)*(2j*pi/n)
for i in range(n):
y[i] = dot(exp(i*w),x)/n
return y
class TestFft(TestCase):
def bench_random(self):
from numpy.fft import fft as numpy_fft
print
print ' Fast Fourier Transform'
print '================================================='
print ' | real input | complex input '
print '-------------------------------------------------'
print ' size | scipy | numpy | scipy | numpy '
print '-------------------------------------------------'
for size,repeat in [(100,7000),(1000,2000),
(256,10000),
(512,10000),
(1024,1000),
(2048,1000),
(2048*2,500),
(2048*4,500),
]:
print '%5s' % size,
sys.stdout.flush()
for x in [random([size]).astype(double),
random([size]).astype(cdouble)+random([size]).astype(cdouble)*1j
]:
if size > 500: y = fft(x)
else: y = direct_dft(x)
assert_array_almost_equal(fft(x),y)
print '|%8.2f' % measure('fft(x)',repeat),
sys.stdout.flush()
assert_array_almost_equal(numpy_fft(x),y)
print '|%8.2f' % measure('numpy_fft(x)',repeat),
sys.stdout.flush()
print ' (secs for %s calls)' % (repeat)
sys.stdout.flush()
class TestIfft(TestCase):
def bench_random(self):
from numpy.fft import ifft as numpy_ifft
print
print ' Inverse Fast Fourier Transform'
print '==============================================='
print ' | real input | complex input '
print '-----------------------------------------------'
print ' size | scipy | numpy | scipy | numpy '
print '-----------------------------------------------'
for size,repeat in [(100,7000),(1000,2000),
(256,10000),
(512,10000),
(1024,1000),
(2048,1000),
(2048*2,500),
(2048*4,500),
]:
print '%5s' % size,
sys.stdout.flush()
for x in [random([size]).astype(double),
random([size]).astype(cdouble)+random([size]).astype(cdouble)*1j
]:
if size > 500: y = ifft(x)
else: y = direct_idft(x)
assert_array_almost_equal(ifft(x),y)
print '|%8.2f' % measure('ifft(x)',repeat),
sys.stdout.flush()
assert_array_almost_equal(numpy_ifft(x),y)
print '|%8.2f' % measure('numpy_ifft(x)',repeat),
sys.stdout.flush()
print ' (secs for %s calls)' % (repeat)
sys.stdout.flush()
class TestRfft(TestCase):
def bench_random(self):
from numpy.fft import rfft as numpy_rfft
print
print 'Fast Fourier Transform (real data)'
print '=================================='
print ' size | scipy | numpy '
print '----------------------------------'
for size,repeat in [(100,7000),(1000,2000),
(256,10000),
(512,10000),
(1024,1000),
(2048,1000),
(2048*2,500),
(2048*4,500),
]:
print '%5s' % size,
sys.stdout.flush()
x = random([size]).astype(double)
print '|%8.2f' % measure('rfft(x)',repeat),
sys.stdout.flush()
print '|%8.2f' % measure('numpy_rfft(x)',repeat),
sys.stdout.flush()
print ' (secs for %s calls)' % (repeat)
sys.stdout.flush()
class TestIrfft(TestCase):
def bench_random(self):
from numpy.fft import irfft as numpy_irfft
print
print 'Inverse Fast Fourier Transform (real data)'
print '=================================='
print ' size | scipy | numpy '
print '----------------------------------'
for size,repeat in [(100,7000),(1000,2000),
(256,10000),
(512,10000),
(1024,1000),
(2048,1000),
(2048*2,500),
(2048*4,500),
]:
print '%5s' % size,
sys.stdout.flush()
x = random([size]).astype(double)
x1 = zeros(size/2+1,dtype=cdouble)
x1[0] = x[0]
for i in range(1,size/2):
x1[i] = x[2*i-1] + 1j * x[2*i]
if not size%2:
x1[-1] = x[-1]
y = irfft(x)
print '|%8.2f' % measure('irfft(x)',repeat),
sys.stdout.flush()
assert_array_almost_equal(numpy_irfft(x1,size),y)
print '|%8.2f' % measure('numpy_irfft(x1,size)',repeat),
sys.stdout.flush()
print ' (secs for %s calls)' % (repeat)
sys.stdout.flush()
class TestFftn(TestCase):
def bench_random(self):
from numpy.fft import fftn as numpy_fftn
print
print ' Multi-dimensional Fast Fourier Transform'
print '==================================================='
print ' | real input | complex input '
print '---------------------------------------------------'
print ' size | scipy | numpy | scipy | numpy '
print '---------------------------------------------------'
for size,repeat in [((100,100),100),((1000,100),7),
((256,256),10),
((512,512),3),
]:
print '%9s' % ('%sx%s'%size),
sys.stdout.flush()
for x in [random(size).astype(double),
random(size).astype(cdouble)+random(size).astype(cdouble)*1j
]:
y = fftn(x)
#if size > 500: y = fftn(x)
#else: y = direct_dft(x)
assert_array_almost_equal(fftn(x),y)
print '|%8.2f' % measure('fftn(x)',repeat),
sys.stdout.flush()
assert_array_almost_equal(numpy_fftn(x),y)
print '|%8.2f' % measure('numpy_fftn(x)',repeat),
sys.stdout.flush()
print ' (secs for %s calls)' % (repeat)
sys.stdout.flush()
if __name__ == "__main__":
run_module_suite()
| gpl-3.0 |
flwh/KK_mt6589_iq451 | prebuilts/python/linux-x86/2.7.5/lib/python2.7/idlelib/dynOptionMenuWidget.py | 160 | 1302 | """
OptionMenu widget modified to allow dynamic menu reconfiguration
and setting of highlightthickness
"""
from Tkinter import OptionMenu
from Tkinter import _setit
import copy
class DynOptionMenu(OptionMenu):
"""
unlike OptionMenu, our kwargs can include highlightthickness
"""
def __init__(self, master, variable, value, *values, **kwargs):
#get a copy of kwargs before OptionMenu.__init__ munges them
kwargsCopy=copy.copy(kwargs)
if 'highlightthickness' in kwargs.keys():
del(kwargs['highlightthickness'])
OptionMenu.__init__(self, master, variable, value, *values, **kwargs)
self.config(highlightthickness=kwargsCopy.get('highlightthickness'))
#self.menu=self['menu']
self.variable=variable
self.command=kwargs.get('command')
def SetMenu(self,valueList,value=None):
"""
clear and reload the menu with a new set of options.
valueList - list of new options
value - initial value to set the optionmenu's menubutton to
"""
self['menu'].delete(0,'end')
for item in valueList:
self['menu'].add_command(label=item,
command=_setit(self.variable,item,self.command))
if value:
self.variable.set(value)
| gpl-2.0 |
watonyweng/horizon | horizon/loaders.py | 36 | 2340 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Wrapper for loading templates from "templates" directories in panel modules.
"""
import os
import django
from django.conf import settings
from django.template.base import TemplateDoesNotExist # noqa
if django.get_version() >= '1.8':
from django.template.engine import Engine
from django.template.loaders.base import Loader as tLoaderCls
else:
from django.template.loader import BaseLoader as tLoaderCls # noqa
from django.utils._os import safe_join # noqa
# Set up a cache of the panel directories to search.
panel_template_dirs = {}
class TemplateLoader(tLoaderCls):
is_usable = True
def get_template_sources(self, template_name):
bits = template_name.split('/', 2)
if len(bits) == 3:
dash_name, panel_name, remainder = bits
key = os.path.join(dash_name, panel_name)
if key in panel_template_dirs:
template_dir = panel_template_dirs[key]
try:
yield safe_join(template_dir, panel_name, remainder)
except UnicodeDecodeError:
# The template dir name wasn't valid UTF-8.
raise
except ValueError:
# The joined path was located outside of template_dir.
pass
def load_template_source(self, template_name, template_dirs=None):
for path in self.get_template_sources(template_name):
try:
with open(path) as file:
return (file.read().decode(settings.FILE_CHARSET), path)
except IOError:
pass
raise TemplateDoesNotExist(template_name)
if django.get_version() >= '1.8':
e = Engine()
_loader = TemplateLoader(e)
else:
_loader = TemplateLoader()
| apache-2.0 |
hsharsha/perfrunner | perfrunner/tests/functional.py | 1 | 1852 | import unittest
from perfrunner.__main__ import get_options
from perfrunner.helpers.memcached import MemcachedHelper
from perfrunner.helpers.remote import RemoteHelper
from perfrunner.helpers.rest import RestHelper
from perfrunner.settings import ClusterSpec, TestConfig
from perfrunner.tests import TargetIterator
class FunctionalTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
options, _args = get_options()
override = \
_args and (arg.split('.') for arg in ' '.join(_args).split(','))
self.cluster_spec = ClusterSpec()
self.cluster_spec.parse(options.cluster_spec_fname)
self.test_config = TestConfig()
self.test_config.parse(options.test_config_fname, override)
self.target_iterator = TargetIterator(self.cluster_spec,
self.test_config)
self.memcached = MemcachedHelper(self.test_config)
self.remote = RemoteHelper(self.cluster_spec, self.test_config)
self.rest = RestHelper(self.cluster_spec)
super(FunctionalTest, self).__init__(*args, **kwargs)
class MemcachedTests(FunctionalTest):
def test_num_threads(self):
expected_threads = self.test_config.cluster.num_cpus
if expected_threads is None:
cores = self.remote.detect_number_cores()
expected_threads = int(0.75 * cores)
for target in self.target_iterator:
host = target.node.split(':')[0]
port = self.rest.get_memcached_port(target.node)
stats = self.memcached.get_stats(host, port, target.bucket,
stats='')
num_threads = int(stats['threads'])
self.assertEqual(num_threads, expected_threads)
if __name__ == '__main__':
unittest.main(argv=['functional.py'])
| apache-2.0 |
mfasq1Monash/FIT3140 | interpreter.py | 1 | 6491 | '''
Author: Michael Asquith, Aaron Gruneklee
Created: 2014.12.08
Last Modified: 2014.12.23
Interpreter for a simple functional programming language.
Access with interpret(command)
Based on Peter Norvig's Lispy interpreter, http://norvig.com/lispy.html
'''
import math, operator as op
from robotio import RobotIO
Symbol = str
class VariableAlreadyPresentException(Exception):
pass
class FunctionAlreadyDefinedException(Exception):
pass
class VariableAlreadySetException(Exception):
pass
class VariableNotFoundException(Exception):
pass
class InterpretedList(list):
pass
class Procedure(object):
"""A user-defined method for the interpreter"""
def __init__(self, parms, stats, env, inter):
self.parameters = parms
self.statements = stats
self.environment = env
self.interpreter = inter
def __call__(self, *args):
localVariables = Environment(self.parameters, args, self.environment)
return self.interpreter.evaluate(self.statements, localVariables)
class Environment(dict):
"""A set of variables for the interpreter or a method within it."""
def __init__(self, parms=(), expressions=(), outer=None):
"""When evaluating, procedures will pass in their parameters"""
self.update(zip(parms, expressions))
self.outer = outer
def find(self, variable):
"""Returns the lowest level Environment which has variable"""
if variable in self:
return self
try:
return self.outer.find(variable)
except AttributeError:
raise VariableNotFoundException
def add_new(self, variable, value):
"""Adds a new definition to the environment. If the variable is already present, raises a KeyAlreadyPresentError"""
if variable in self:
raise(VariableAlreadyPresentException)
self[variable] = value
class Interpreter:
"""After initialising an interpreter, run expressions by calling interpret.
"""
def __init__(self, newRobotIO):
"""Creates an interpreter with standard math operations and variables.
Can send input/output to newRobotIO
"""
self.global_environment = self.standard_environment()
self.robotio = newRobotIO
def interpret(self, code):
"""Parses and executes code a string in the form of:
(method_name argument1 argument2)
Arguments which are expressions must be placed in brackets.
Arguments which are not expressions must not be placed in brackets.
"""
return self.evaluate(self.parse(code))
def parse(self, code):
"Read an expression from a string."
return self.read_from_tokens(self.tokenize(code))
def tokenize(self, s):
"Convert a string into a list of tokens."
return s.replace('(',' ( ').replace(')',' ) ').split()
def read_from_tokens(self, tokens):
"Read an expression from a sequence of tokens."
if len(tokens) == 0:
raise SyntaxError('unexpected EOF while reading')
token = tokens.pop(0)
if '(' == token:
L = []
while tokens[0] != ')':
L.append(self.read_from_tokens(tokens))
tokens.pop(0) # pop off ')'
return L
elif ')' == token:
raise SyntaxError('unexpected )')
else:
return self.atom(token)
def atom(self, token):
"Numbers become numbers, booleans become booleans, everything else become symbols."
try:
return int(token)
except ValueError:
if token.lower() == 'true':
return True
elif token.lower() == 'false':
return False
else:
return Symbol(token)
def standard_environment(self):
"Creates the base variable environment"
env = Environment()
env.update(vars(math))
env.update({
'+':op.add, '-':op.sub, '*':op.mul, '/':op.div,
'>':op.gt, '<':op.lt, '>=':op.ge, '<=':op.le, '=':op.eq,
'define':None, 'if':None, 'set':None, 'comment':None,
'%': lambda x,y: abs(x % y),
'and': lambda x,y: x and y,
'or': lambda x,y: x or y,
'not': lambda x: not x,
'move': lambda x: self.robotio.move(x),
'turn': lambda x: self.robotio.turn(x),
'detect-wall': lambda x: self.robotio.detect_wall(x),
'detect-goal': lambda x: self.robotio.detect_goal(x),
'[]': InterpretedList(),
'build': lambda x,y: InterpretedList([x] + y),
'head': lambda x: x[0],
'tail': lambda x: InterpretedList(x[1:])
})
return env
def evaluate(self, x, env=None):
if env == None:
env = self.global_environment
# If x is a list, must be evaluating a method
if isinstance(x, list):
if isinstance(x, InterpretedList):
return x
method = x.pop(0)
# Defines a function
if method == 'define':
try:
self.global_environment.add_new(x[0], Procedure(x[1], x[2], env, self))
except VariableAlreadyPresentException:
raise FunctionAlreadyDefinedException
# If statement. [Test, consequences, alternative]
elif method == 'if':
if self.evaluate(x[0]):
return self.evaluate(x[1])
return self.evaluate(x[2])
# Sets a variable
elif method == 'set':
try:
env.add_new(x[0], self.evaluate(x[1],env))
except VariableAlreadyPresentException:
raise VariableAlreadySetException
return
elif method == 'comment':
return
# Executes all other functions
else:
method = self.evaluate(method, self.global_environment)
args = [self.evaluate(variable, env) for variable in x]
return method(*args)
elif isinstance(x, Symbol):
return self.evaluate(env.find(x)[x])
else:
return x
| mit |
pyfisch/servo | tests/wpt/web-platform-tests/webdriver/tests/find_element/user_prompts.py | 24 | 3954 | # META: timeout=long
import pytest
from tests.support.asserts import (
assert_error,
assert_same_element,
assert_success,
assert_dialog_handled,
)
from tests.support.inline import inline
def find_element(session, using, value):
return session.transport.send(
"POST", "session/{session_id}/element".format(**vars(session)),
{"using": using, "value": value})
@pytest.fixture
def check_user_prompt_closed_without_exception(session, create_dialog):
def check_user_prompt_closed_without_exception(dialog_type, retval):
session.url = inline("<p>bar</p>")
element = session.find.css("p", all=False)
create_dialog(dialog_type, text=dialog_type)
response = find_element(session, "css selector", "p")
value = assert_success(response)
assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
assert_same_element(session, value, element)
return check_user_prompt_closed_without_exception
@pytest.fixture
def check_user_prompt_closed_with_exception(session, create_dialog):
def check_user_prompt_closed_with_exception(dialog_type, retval):
session.url = inline("<p>bar</p>")
create_dialog(dialog_type, text=dialog_type)
response = find_element(session, "css selector", "p")
assert_error(response, "unexpected alert open")
assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
return check_user_prompt_closed_with_exception
@pytest.fixture
def check_user_prompt_not_closed_but_exception(session, create_dialog):
def check_user_prompt_not_closed_but_exception(dialog_type):
session.url = inline("<p>bar</p>")
create_dialog(dialog_type, text=dialog_type)
response = find_element(session, "css selector", "p")
assert_error(response, "unexpected alert open")
assert session.alert.text == dialog_type
session.alert.dismiss()
return check_user_prompt_not_closed_but_exception
@pytest.mark.capabilities({"unhandledPromptBehavior": "accept"})
@pytest.mark.parametrize("dialog_type, retval", [
("alert", None),
("confirm", True),
("prompt", ""),
])
def test_accept(check_user_prompt_closed_without_exception, dialog_type, retval):
check_user_prompt_closed_without_exception(dialog_type, retval)
@pytest.mark.capabilities({"unhandledPromptBehavior": "accept and notify"})
@pytest.mark.parametrize("dialog_type, retval", [
("alert", None),
("confirm", True),
("prompt", ""),
])
def test_accept_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
check_user_prompt_closed_with_exception(dialog_type, retval)
@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss"})
@pytest.mark.parametrize("dialog_type, retval", [
("alert", None),
("confirm", False),
("prompt", None),
])
def test_dismiss(check_user_prompt_closed_without_exception, dialog_type, retval):
check_user_prompt_closed_without_exception(dialog_type, retval)
@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss and notify"})
@pytest.mark.parametrize("dialog_type, retval", [
("alert", None),
("confirm", False),
("prompt", None),
])
def test_dismiss_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
check_user_prompt_closed_with_exception(dialog_type, retval)
@pytest.mark.capabilities({"unhandledPromptBehavior": "ignore"})
@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
def test_ignore(check_user_prompt_not_closed_but_exception, dialog_type):
check_user_prompt_not_closed_but_exception(dialog_type)
@pytest.mark.parametrize("dialog_type, retval", [
("alert", None),
("confirm", False),
("prompt", None),
])
def test_default(check_user_prompt_closed_with_exception, dialog_type, retval):
check_user_prompt_closed_with_exception(dialog_type, retval)
| mpl-2.0 |
nico01f/nifo.github.io | node_modules/grunt-docker/node_modules/docker/node_modules/pygmentize-bundled/vendor/pygments/build-3.3/pygments/lexers/_phpbuiltins.py | 95 | 122088 | # -*- coding: utf-8 -*-
"""
pygments.lexers._phpbuiltins
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This file loads the function names and their modules from the
php webpage and generates itself.
Do not alter the MODULES dict by hand!
WARNING: the generation transfers quite much data over your
internet connection. don't run that at home, use
a server ;-)
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
MODULES = {'.NET': ['dotnet_load'],
'APC': ['apc_add',
'apc_bin_dump',
'apc_bin_dumpfile',
'apc_bin_load',
'apc_bin_loadfile',
'apc_cache_info',
'apc_cas',
'apc_clear_cache',
'apc_compile_file',
'apc_dec',
'apc_define_constants',
'apc_delete_file',
'apc_delete',
'apc_exists',
'apc_fetch',
'apc_inc',
'apc_load_constants',
'apc_sma_info',
'apc_store'],
'APD': ['apd_breakpoint',
'apd_callstack',
'apd_clunk',
'apd_continue',
'apd_croak',
'apd_dump_function_table',
'apd_dump_persistent_resources',
'apd_dump_regular_resources',
'apd_echo',
'apd_get_active_symbols',
'apd_set_pprof_trace',
'apd_set_session_trace_socket',
'apd_set_session_trace',
'apd_set_session',
'override_function',
'rename_function'],
'Aliases and deprecated Mysqli': ['mysqli_bind_param',
'mysqli_bind_result',
'mysqli_client_encoding',
'mysqli_connect',
'mysqli_disable_reads_from_master',
'mysqli_disable_rpl_parse',
'mysqli_enable_reads_from_master',
'mysqli_enable_rpl_parse',
'mysqli_escape_string',
'mysqli_execute',
'mysqli_fetch',
'mysqli_get_metadata',
'mysqli_master_query',
'mysqli_param_count',
'mysqli_report',
'mysqli_rpl_parse_enabled',
'mysqli_rpl_probe',
'mysqli_rpl_query_type',
'mysqli_send_long_data',
'mysqli_send_query',
'mysqli_set_opt',
'mysqli_slave_query'],
'Apache': ['apache_child_terminate',
'apache_get_modules',
'apache_get_version',
'apache_getenv',
'apache_lookup_uri',
'apache_note',
'apache_request_headers',
'apache_reset_timeout',
'apache_response_headers',
'apache_setenv',
'getallheaders',
'virtual'],
'Array': ['array_change_key_case',
'array_chunk',
'array_combine',
'array_count_values',
'array_diff_assoc',
'array_diff_key',
'array_diff_uassoc',
'array_diff_ukey',
'array_diff',
'array_fill_keys',
'array_fill',
'array_filter',
'array_flip',
'array_intersect_assoc',
'array_intersect_key',
'array_intersect_uassoc',
'array_intersect_ukey',
'array_intersect',
'array_key_exists',
'array_keys',
'array_map',
'array_merge_recursive',
'array_merge',
'array_multisort',
'array_pad',
'array_pop',
'array_product',
'array_push',
'array_rand',
'array_reduce',
'array_replace_recursive',
'array_replace',
'array_reverse',
'array_search',
'array_shift',
'array_slice',
'array_splice',
'array_sum',
'array_udiff_assoc',
'array_udiff_uassoc',
'array_udiff',
'array_uintersect_assoc',
'array_uintersect_uassoc',
'array_uintersect',
'array_unique',
'array_unshift',
'array_values',
'array_walk_recursive',
'array_walk',
'array',
'arsort',
'asort',
'compact',
'count',
'current',
'each',
'end',
'extract',
'in_array',
'key',
'krsort',
'ksort',
'list',
'natcasesort',
'natsort',
'next',
'pos',
'prev',
'range',
'reset',
'rsort',
'shuffle',
'sizeof',
'sort',
'uasort',
'uksort',
'usort'],
'BBCode': ['bbcode_add_element',
'bbcode_add_smiley',
'bbcode_create',
'bbcode_destroy',
'bbcode_parse',
'bbcode_set_arg_parser',
'bbcode_set_flags'],
'BC Math': ['bcadd',
'bccomp',
'bcdiv',
'bcmod',
'bcmul',
'bcpow',
'bcpowmod',
'bcscale',
'bcsqrt',
'bcsub'],
'Bzip2': ['bzclose',
'bzcompress',
'bzdecompress',
'bzerrno',
'bzerror',
'bzerrstr',
'bzflush',
'bzopen',
'bzread',
'bzwrite'],
'COM': ['com_addref',
'com_create_guid',
'com_event_sink',
'com_get_active_object',
'com_get',
'com_invoke',
'com_isenum',
'com_load_typelib',
'com_load',
'com_message_pump',
'com_print_typeinfo',
'com_propget',
'com_propput',
'com_propset',
'com_release',
'com_set',
'variant_abs',
'variant_add',
'variant_and',
'variant_cast',
'variant_cat',
'variant_cmp',
'variant_date_from_timestamp',
'variant_date_to_timestamp',
'variant_div',
'variant_eqv',
'variant_fix',
'variant_get_type',
'variant_idiv',
'variant_imp',
'variant_int',
'variant_mod',
'variant_mul',
'variant_neg',
'variant_not',
'variant_or',
'variant_pow',
'variant_round',
'variant_set_type',
'variant_set',
'variant_sub',
'variant_xor'],
'CUBRID': ['cubrid_affected_rows',
'cubrid_bind',
'cubrid_close_prepare',
'cubrid_close_request',
'cubrid_col_get',
'cubrid_col_size',
'cubrid_column_names',
'cubrid_column_types',
'cubrid_commit',
'cubrid_connect_with_url',
'cubrid_connect',
'cubrid_current_oid',
'cubrid_disconnect',
'cubrid_drop',
'cubrid_error_code_facility',
'cubrid_error_code',
'cubrid_error_msg',
'cubrid_execute',
'cubrid_fetch',
'cubrid_free_result',
'cubrid_get_charset',
'cubrid_get_class_name',
'cubrid_get_client_info',
'cubrid_get_db_parameter',
'cubrid_get_server_info',
'cubrid_get',
'cubrid_insert_id',
'cubrid_is_instance',
'cubrid_lob_close',
'cubrid_lob_export',
'cubrid_lob_get',
'cubrid_lob_send',
'cubrid_lob_size',
'cubrid_lock_read',
'cubrid_lock_write',
'cubrid_move_cursor',
'cubrid_num_cols',
'cubrid_num_rows',
'cubrid_prepare',
'cubrid_put',
'cubrid_rollback',
'cubrid_schema',
'cubrid_seq_drop',
'cubrid_seq_insert',
'cubrid_seq_put',
'cubrid_set_add',
'cubrid_set_drop',
'cubrid_version'],
'Cairo': ['cairo_create',
'cairo_font_face_get_type',
'cairo_font_face_status',
'cairo_font_options_create',
'cairo_font_options_equal',
'cairo_font_options_get_antialias',
'cairo_font_options_get_hint_metrics',
'cairo_font_options_get_hint_style',
'cairo_font_options_get_subpixel_order',
'cairo_font_options_hash',
'cairo_font_options_merge',
'cairo_font_options_set_antialias',
'cairo_font_options_set_hint_metrics',
'cairo_font_options_set_hint_style',
'cairo_font_options_set_subpixel_order',
'cairo_font_options_status',
'cairo_format_stride_for_width',
'cairo_image_surface_create_for_data',
'cairo_image_surface_create_from_png',
'cairo_image_surface_create',
'cairo_image_surface_get_data',
'cairo_image_surface_get_format',
'cairo_image_surface_get_height',
'cairo_image_surface_get_stride',
'cairo_image_surface_get_width',
'cairo_matrix_create_scale',
'cairo_matrix_create_translate',
'cairo_matrix_invert',
'cairo_matrix_multiply',
'cairo_matrix_rotate',
'cairo_matrix_transform_distance',
'cairo_matrix_transform_point',
'cairo_matrix_translate',
'cairo_pattern_add_color_stop_rgb',
'cairo_pattern_add_color_stop_rgba',
'cairo_pattern_create_for_surface',
'cairo_pattern_create_linear',
'cairo_pattern_create_radial',
'cairo_pattern_create_rgb',
'cairo_pattern_create_rgba',
'cairo_pattern_get_color_stop_count',
'cairo_pattern_get_color_stop_rgba',
'cairo_pattern_get_extend',
'cairo_pattern_get_filter',
'cairo_pattern_get_linear_points',
'cairo_pattern_get_matrix',
'cairo_pattern_get_radial_circles',
'cairo_pattern_get_rgba',
'cairo_pattern_get_surface',
'cairo_pattern_get_type',
'cairo_pattern_set_extend',
'cairo_pattern_set_filter',
'cairo_pattern_set_matrix',
'cairo_pattern_status',
'cairo_pdf_surface_create',
'cairo_pdf_surface_set_size',
'cairo_ps_get_levels',
'cairo_ps_level_to_string',
'cairo_ps_surface_create',
'cairo_ps_surface_dsc_begin_page_setup',
'cairo_ps_surface_dsc_begin_setup',
'cairo_ps_surface_dsc_comment',
'cairo_ps_surface_get_eps',
'cairo_ps_surface_restrict_to_level',
'cairo_ps_surface_set_eps',
'cairo_ps_surface_set_size',
'cairo_scaled_font_create',
'cairo_scaled_font_extents',
'cairo_scaled_font_get_ctm',
'cairo_scaled_font_get_font_face',
'cairo_scaled_font_get_font_matrix',
'cairo_scaled_font_get_font_options',
'cairo_scaled_font_get_scale_matrix',
'cairo_scaled_font_get_type',
'cairo_scaled_font_glyph_extents',
'cairo_scaled_font_status',
'cairo_scaled_font_text_extents',
'cairo_surface_copy_page',
'cairo_surface_create_similar',
'cairo_surface_finish',
'cairo_surface_flush',
'cairo_surface_get_content',
'cairo_surface_get_device_offset',
'cairo_surface_get_font_options',
'cairo_surface_get_type',
'cairo_surface_mark_dirty_rectangle',
'cairo_surface_mark_dirty',
'cairo_surface_set_device_offset',
'cairo_surface_set_fallback_resolution',
'cairo_surface_show_page',
'cairo_surface_status',
'cairo_surface_write_to_png',
'cairo_svg_surface_create',
'cairo_svg_surface_restrict_to_version',
'cairo_svg_version_to_string'],
'Calendar': ['cal_days_in_month',
'cal_from_jd',
'cal_info',
'cal_to_jd',
'easter_date',
'easter_days',
'FrenchToJD',
'GregorianToJD',
'JDDayOfWeek',
'JDMonthName',
'JDToFrench',
'JDToGregorian',
'jdtojewish',
'JDToJulian',
'jdtounix',
'JewishToJD',
'JulianToJD',
'unixtojd'],
'Classes/Object': ['call_user_method_array',
'call_user_method',
'class_alias',
'class_exists',
'get_called_class',
'get_class_methods',
'get_class_vars',
'get_class',
'get_declared_classes',
'get_declared_interfaces',
'get_object_vars',
'get_parent_class',
'interface_exists',
'is_a',
'is_subclass_of',
'method_exists',
'property_exists'],
'Classkit': ['classkit_import',
'classkit_method_add',
'classkit_method_copy',
'classkit_method_redefine',
'classkit_method_remove',
'classkit_method_rename'],
'Crack': ['crack_check',
'crack_closedict',
'crack_getlastmessage',
'crack_opendict'],
'Ctype': ['ctype_alnum',
'ctype_alpha',
'ctype_cntrl',
'ctype_digit',
'ctype_graph',
'ctype_lower',
'ctype_print',
'ctype_punct'],
'Cyrus': ['cyrus_authenticate',
'cyrus_bind',
'cyrus_close',
'cyrus_connect',
'cyrus_query',
'cyrus_unbind'],
'DB++': ['dbplus_add',
'dbplus_aql',
'dbplus_chdir',
'dbplus_close',
'dbplus_curr',
'dbplus_errcode',
'dbplus_errno',
'dbplus_find',
'dbplus_first',
'dbplus_flush',
'dbplus_freealllocks',
'dbplus_freelock',
'dbplus_freerlocks',
'dbplus_getlock',
'dbplus_getunique',
'dbplus_info',
'dbplus_last',
'dbplus_lockrel',
'dbplus_next',
'dbplus_open',
'dbplus_prev',
'dbplus_rchperm',
'dbplus_rcreate',
'dbplus_rcrtexact',
'dbplus_rcrtlike',
'dbplus_resolve',
'dbplus_restorepos',
'dbplus_rkeys',
'dbplus_ropen',
'dbplus_rquery',
'dbplus_rrename',
'dbplus_rsecindex',
'dbplus_runlink',
'dbplus_rzap',
'dbplus_savepos',
'dbplus_setindex',
'dbplus_setindexbynumber',
'dbplus_sql',
'dbplus_tcl',
'dbplus_tremove',
'dbplus_undo',
'dbplus_undoprepare',
'dbplus_unlockrel',
'dbplus_unselect',
'dbplus_update',
'dbplus_xlockrel',
'dbplus_xunlockrel'],
'DBA': ['dba_close',
'dba_delete',
'dba_exists',
'dba_fetch',
'dba_firstkey',
'dba_handlers',
'dba_insert',
'dba_key_split',
'dba_list',
'dba_nextkey',
'dba_open',
'dba_optimize',
'dba_popen',
'dba_replace',
'dba_sync'],
'DOM': ['dom_import_simplexml'],
'DOM XML (PHP 4)': ['domxml_new_doc',
'domxml_open_file',
'domxml_open_mem',
'domxml_version',
'domxml_xmltree',
'domxml_xslt_stylesheet_doc',
'domxml_xslt_stylesheet_file',
'domxml_xslt_stylesheet',
'domxml_xslt_version',
'xpath_eval_expression',
'xpath_eval',
'xpath_new_context',
'xpath_register_ns_auto',
'xpath_register_ns',
'xptr_eval',
'xptr_new_context'],
'Date/Time': ['checkdate',
'date_add',
'date_create_from_format',
'date_create',
'date_date_set',
'date_default_timezone_get',
'date_default_timezone_set',
'date_diff',
'date_format',
'date_get_last_errors',
'date_interval_create_from_date_string',
'date_interval_format',
'date_isodate_set',
'date_modify',
'date_offset_get',
'date_parse_from_format',
'date_parse',
'date_sub',
'date_sun_info',
'date_sunrise',
'date_sunset',
'date_time_set',
'date_timestamp_get',
'date_timestamp_set',
'date_timezone_get',
'date_timezone_set',
'date',
'getdate',
'gettimeofday',
'gmdate',
'gmmktime',
'gmstrftime',
'idate',
'localtime',
'microtime',
'mktime',
'strftime',
'strptime',
'strtotime',
'time',
'timezone_abbreviations_list',
'timezone_identifiers_list',
'timezone_location_get',
'timezone_name_from_abbr',
'timezone_name_get',
'timezone_offset_get',
'timezone_open',
'timezone_transitions_get',
'timezone_version_get'],
'Direct IO': ['dio_close', 'dio_fcntl', 'dio_open'],
'Directory': ['chdir',
'chroot',
'closedir',
'getcwd',
'opendir',
'readdir',
'rewinddir',
'scandir'],
'Enchant': ['enchant_broker_describe',
'enchant_broker_dict_exists',
'enchant_broker_free_dict',
'enchant_broker_free',
'enchant_broker_get_error',
'enchant_broker_init',
'enchant_broker_list_dicts',
'enchant_broker_request_dict',
'enchant_broker_request_pwl_dict',
'enchant_broker_set_ordering',
'enchant_dict_add_to_personal',
'enchant_dict_add_to_session',
'enchant_dict_check',
'enchant_dict_describe',
'enchant_dict_get_error',
'enchant_dict_is_in_session',
'enchant_dict_quick_check',
'enchant_dict_store_replacement',
'enchant_dict_suggest'],
'Error Handling': ['debug_backtrace',
'debug_print_backtrace',
'error_get_last',
'error_log',
'error_reporting',
'restore_error_handler',
'restore_exception_handler',
'set_error_handler',
'set_exception_handler',
'trigger_error',
'user_error'],
'Exif': ['exif_imagetype',
'exif_read_data',
'exif_tagname',
'exif_thumbnail',
'read_exif_data'],
'Expect': ['expect_expectl'],
'FAM': ['fam_cancel_monitor',
'fam_close',
'fam_monitor_collection',
'fam_monitor_directory',
'fam_monitor_file',
'fam_next_event',
'fam_open',
'fam_pending',
'fam_resume_monitor',
'fam_suspend_monitor'],
'FDF': ['fdf_add_doc_javascript',
'fdf_add_template',
'fdf_close',
'fdf_create',
'fdf_enum_values',
'fdf_errno',
'fdf_error',
'fdf_get_ap',
'fdf_get_attachment',
'fdf_get_encoding',
'fdf_get_file',
'fdf_get_flags',
'fdf_get_opt',
'fdf_get_status',
'fdf_get_value',
'fdf_get_version',
'fdf_header',
'fdf_next_field_name',
'fdf_open_string',
'fdf_open',
'fdf_remove_item',
'fdf_save_string',
'fdf_save',
'fdf_set_ap',
'fdf_set_encoding',
'fdf_set_file',
'fdf_set_flags',
'fdf_set_javascript_action',
'fdf_set_on_import_javascript',
'fdf_set_opt',
'fdf_set_status',
'fdf_set_submit_form_action',
'fdf_set_target_frame',
'fdf_set_value',
'fdf_set_version'],
'FTP': ['ftp_alloc',
'ftp_cdup',
'ftp_chdir',
'ftp_chmod',
'ftp_close',
'ftp_connect',
'ftp_delete',
'ftp_exec',
'ftp_fget',
'ftp_fput',
'ftp_get_option',
'ftp_get',
'ftp_login',
'ftp_mdtm',
'ftp_mkdir',
'ftp_nb_continue',
'ftp_nb_fget',
'ftp_nb_fput',
'ftp_nb_get',
'ftp_nb_put',
'ftp_nlist',
'ftp_pasv',
'ftp_put',
'ftp_pwd',
'ftp_quit',
'ftp_raw',
'ftp_rawlist',
'ftp_rename',
'ftp_rmdir',
'ftp_set_option',
'ftp_site',
'ftp_size',
'ftp_ssl_connect',
'ftp_systype'],
'Fileinfo': ['finfo_buffer',
'finfo_close',
'finfo_file',
'finfo_open',
'finfo_set_flags',
'mime_content_type'],
'Filesystem': ['basename',
'chgrp',
'chmod',
'chown',
'clearstatcache',
'copy',
'dirname',
'disk_free_space',
'disk_total_space',
'diskfreespace',
'fclose',
'feof',
'fflush',
'fgetc',
'fgetcsv',
'fgets',
'fgetss',
'file_exists',
'file_get_contents',
'file_put_contents',
'file',
'fileatime',
'filectime',
'filegroup',
'fileinode',
'filemtime',
'fileowner',
'fileperms',
'filesize',
'filetype',
'flock',
'fnmatch',
'fopen',
'fpassthru',
'fputcsv',
'fputs',
'fread',
'fscanf',
'fseek',
'fstat',
'ftell',
'ftruncate',
'fwrite',
'glob',
'is_dir',
'is_executable',
'is_file',
'is_link',
'is_readable',
'is_uploaded_file',
'is_writable',
'is_writeable',
'lchgrp',
'lchown',
'link',
'linkinfo',
'lstat',
'mkdir',
'move_uploaded_file',
'parse_ini_file',
'parse_ini_string',
'pathinfo',
'pclose',
'popen',
'readfile',
'readlink',
'realpath_cache_get',
'realpath_cache_size',
'realpath',
'rename',
'rewind',
'rmdir',
'set_file_buffer',
'stat',
'symlink',
'tempnam',
'tmpfile',
'touch',
'umask',
'unlink'],
'Filter': ['filter_has_var',
'filter_id',
'filter_input_array',
'filter_input',
'filter_list',
'filter_var_array',
'filter_var'],
'Firebird/InterBase': ['ibase_add_user',
'ibase_affected_rows',
'ibase_backup',
'ibase_blob_add',
'ibase_blob_cancel',
'ibase_blob_close',
'ibase_blob_create',
'ibase_blob_echo',
'ibase_blob_get',
'ibase_blob_import',
'ibase_blob_info',
'ibase_blob_open',
'ibase_close',
'ibase_commit_ret',
'ibase_commit',
'ibase_connect',
'ibase_db_info',
'ibase_delete_user',
'ibase_drop_db',
'ibase_errcode',
'ibase_errmsg',
'ibase_execute',
'ibase_fetch_assoc',
'ibase_fetch_object',
'ibase_fetch_row',
'ibase_field_info',
'ibase_free_event_handler',
'ibase_free_query',
'ibase_free_result',
'ibase_gen_id',
'ibase_maintain_db',
'ibase_modify_user',
'ibase_name_result',
'ibase_num_fields',
'ibase_num_params',
'ibase_param_info',
'ibase_pconnect',
'ibase_prepare',
'ibase_query',
'ibase_restore',
'ibase_rollback_ret',
'ibase_rollback',
'ibase_server_info',
'ibase_service_attach',
'ibase_service_detach',
'ibase_set_event_handler',
'ibase_timefmt',
'ibase_trans',
'ibase_wait_event'],
'FriBiDi': ['fribidi_log2vis'],
'FrontBase': ['fbsql_affected_rows',
'fbsql_autocommit',
'fbsql_blob_size',
'fbsql_change_user',
'fbsql_clob_size',
'fbsql_close',
'fbsql_commit',
'fbsql_connect',
'fbsql_create_blob',
'fbsql_create_clob',
'fbsql_create_db',
'fbsql_data_seek',
'fbsql_database_password',
'fbsql_database',
'fbsql_db_query',
'fbsql_db_status',
'fbsql_drop_db',
'fbsql_errno',
'fbsql_error',
'fbsql_fetch_array',
'fbsql_fetch_assoc',
'fbsql_fetch_field',
'fbsql_fetch_lengths',
'fbsql_fetch_object',
'fbsql_fetch_row',
'fbsql_field_flags',
'fbsql_field_len',
'fbsql_field_name',
'fbsql_field_seek',
'fbsql_field_table',
'fbsql_field_type',
'fbsql_free_result',
'fbsql_get_autostart_info',
'fbsql_hostname',
'fbsql_insert_id',
'fbsql_list_dbs',
'fbsql_list_fields',
'fbsql_list_tables',
'fbsql_next_result',
'fbsql_num_fields',
'fbsql_num_rows',
'fbsql_password',
'fbsql_pconnect',
'fbsql_query',
'fbsql_read_blob',
'fbsql_read_clob',
'fbsql_result',
'fbsql_rollback',
'fbsql_rows_fetched',
'fbsql_select_db',
'fbsql_set_characterset',
'fbsql_set_lob_mode',
'fbsql_set_password',
'fbsql_set_transaction',
'fbsql_start_db',
'fbsql_stop_db',
'fbsql_table_name',
'fbsql_tablename',
'fbsql_username',
'fbsql_warnings'],
'Function handling': ['call_user_func_array',
'call_user_func',
'create_function',
'forward_static_call_array',
'forward_static_call',
'func_get_arg',
'func_get_args',
'func_num_args',
'function_exists',
'get_defined_functions',
'register_shutdown_function',
'register_tick_function',
'unregister_tick_function'],
'GD and Image': ['gd_info',
'getimagesize',
'image_type_to_extension',
'image_type_to_mime_type'],
'GMP': ['gmp_abs',
'gmp_add',
'gmp_and',
'gmp_clrbit',
'gmp_cmp',
'gmp_com',
'gmp_div_q',
'gmp_div_qr',
'gmp_div_r',
'gmp_div',
'gmp_divexact',
'gmp_fact',
'gmp_gcd',
'gmp_gcdext',
'gmp_hamdist',
'gmp_init',
'gmp_intval',
'gmp_invert',
'gmp_jacobi',
'gmp_legendre',
'gmp_mod',
'gmp_mul',
'gmp_neg',
'gmp_nextprime',
'gmp_or',
'gmp_perfect_square',
'gmp_popcount',
'gmp_pow',
'gmp_powm',
'gmp_prob_prime',
'gmp_random',
'gmp_scan0',
'gmp_scan1',
'gmp_setbit',
'gmp_sign',
'gmp_sqrt',
'gmp_sqrtrem',
'gmp_strval',
'gmp_sub',
'gmp_testbit',
'gmp_xor'],
'GeoIP': ['geoip_continent_code_by_name',
'geoip_country_code_by_name',
'geoip_country_code3_by_name',
'geoip_country_name_by_name',
'geoip_database_info',
'geoip_db_avail',
'geoip_db_filename',
'geoip_db_get_all_info',
'geoip_id_by_name',
'geoip_isp_by_name',
'geoip_org_by_name',
'geoip_record_by_name',
'geoip_region_by_name',
'geoip_region_name_by_code',
'geoip_time_zone_by_country_and_region'],
'Gettext': ['bind_textdomain_codeset',
'bindtextdomain',
'dcgettext',
'dcngettext',
'dgettext',
'dngettext',
'gettext',
'ngettext',
'textdomain'],
'GnuPG': ['gnupg_adddecryptkey',
'gnupg_addencryptkey',
'gnupg_addsignkey',
'gnupg_cleardecryptkeys',
'gnupg_clearencryptkeys',
'gnupg_clearsignkeys',
'gnupg_decrypt',
'gnupg_decryptverify',
'gnupg_encrypt',
'gnupg_encryptsign',
'gnupg_export',
'gnupg_geterror',
'gnupg_getprotocol',
'gnupg_import',
'gnupg_init',
'gnupg_keyinfo',
'gnupg_setarmor',
'gnupg_seterrormode',
'gnupg_setsignmode',
'gnupg_sign',
'gnupg_verify'],
'Gopher': ['gopher_parsedir'],
'Grapheme': ['grapheme_extract',
'grapheme_stripos',
'grapheme_stristr',
'grapheme_strlen',
'grapheme_strpos',
'grapheme_strripos',
'grapheme_strrpos',
'grapheme_strstr',
'grapheme_substr'],
'Gupnp': ['gupnp_context_get_host_ip',
'gupnp_context_get_port',
'gupnp_context_get_subscription_timeout',
'gupnp_context_host_path',
'gupnp_context_new',
'gupnp_context_set_subscription_timeout',
'gupnp_context_timeout_add',
'gupnp_context_unhost_path',
'gupnp_control_point_browse_start',
'gupnp_control_point_browse_stop',
'gupnp_control_point_callback_set',
'gupnp_control_point_new',
'gupnp_device_action_callback_set',
'gupnp_device_info_get_service',
'gupnp_device_info_get',
'gupnp_root_device_get_available',
'gupnp_root_device_get_relative_location',
'gupnp_root_device_new',
'gupnp_root_device_set_available',
'gupnp_root_device_start',
'gupnp_root_device_stop',
'gupnp_service_action_get',
'gupnp_service_action_return_error',
'gupnp_service_action_return',
'gupnp_service_action_set',
'gupnp_service_freeze_notify',
'gupnp_service_info_get_introspection',
'gupnp_service_info_get',
'gupnp_service_introspection_get_state_variable',
'gupnp_service_notify',
'gupnp_service_proxy_action_get',
'gupnp_service_proxy_action_set',
'gupnp_service_proxy_add_notify',
'gupnp_service_proxy_callback_set',
'gupnp_service_proxy_get_subscribed',
'gupnp_service_proxy_remove_notify',
'gupnp_service_proxy_set_subscribed',
'gupnp_service_thaw_notify'],
'HTTP': ['http_cache_etag',
'http_cache_last_modified',
'http_chunked_decode',
'http_deflate',
'http_inflate',
'http_build_cookie',
'http_date',
'http_get_request_body_stream',
'http_get_request_body',
'http_get_request_headers',
'http_match_etag',
'http_match_modified',
'http_match_request_header',
'http_support',
'http_negotiate_charset',
'http_negotiate_content_type',
'http_negotiate_language',
'ob_deflatehandler',
'ob_etaghandler',
'ob_inflatehandler',
'http_parse_cookie',
'http_parse_headers',
'http_parse_message',
'http_parse_params',
'http_persistent_handles_clean',
'http_persistent_handles_count',
'http_persistent_handles_ident',
'http_get',
'http_head',
'http_post_data',
'http_post_fields',
'http_put_data',
'http_put_file',
'http_put_stream',
'http_request_body_encode',
'http_request_method_exists',
'http_request_method_name',
'http_request_method_register',
'http_request_method_unregister',
'http_request',
'http_redirect',
'http_send_content_disposition',
'http_send_content_type',
'http_send_data',
'http_send_file',
'http_send_last_modified',
'http_send_status',
'http_send_stream',
'http_throttle',
'http_build_str',
'http_build_url'],
'Hash': ['hash_algos',
'hash_copy',
'hash_file',
'hash_final',
'hash_hmac_file',
'hash_hmac',
'hash_init',
'hash_update_file',
'hash_update_stream',
'hash_update',
'hash'],
'Hyperwave': ['hw_Array2Objrec',
'hw_changeobject',
'hw_Children',
'hw_ChildrenObj',
'hw_Close',
'hw_Connect',
'hw_connection_info',
'hw_cp',
'hw_Deleteobject',
'hw_DocByAnchor',
'hw_DocByAnchorObj',
'hw_Document_Attributes',
'hw_Document_BodyTag',
'hw_Document_Content',
'hw_Document_SetContent',
'hw_Document_Size',
'hw_dummy',
'hw_EditText',
'hw_Error',
'hw_ErrorMsg',
'hw_Free_Document',
'hw_GetAnchors',
'hw_GetAnchorsObj',
'hw_GetAndLock',
'hw_GetChildColl',
'hw_GetChildCollObj',
'hw_GetChildDocColl',
'hw_GetChildDocCollObj',
'hw_GetObject',
'hw_GetObjectByQuery',
'hw_GetObjectByQueryColl',
'hw_GetObjectByQueryCollObj',
'hw_GetObjectByQueryObj',
'hw_GetParents',
'hw_GetParentsObj',
'hw_getrellink',
'hw_GetRemote',
'hw_getremotechildren',
'hw_GetSrcByDestObj',
'hw_GetText',
'hw_getusername',
'hw_Identify',
'hw_InCollections',
'hw_Info',
'hw_InsColl',
'hw_InsDoc',
'hw_insertanchors',
'hw_InsertDocument',
'hw_InsertObject',
'hw_mapid',
'hw_Modifyobject',
'hw_mv',
'hw_New_Document',
'hw_objrec2array',
'hw_Output_Document',
'hw_pConnect',
'hw_PipeDocument',
'hw_Root',
'hw_setlinkroot',
'hw_stat',
'hw_Unlock',
'hw_Who'],
'Hyperwave API': ['hw_api_attribute',
'hwapi_hgcsp',
'hw_api_content',
'hw_api_object'],
'IBM DB2': ['db2_autocommit',
'db2_bind_param',
'db2_client_info',
'db2_close',
'db2_column_privileges',
'db2_columns',
'db2_commit',
'db2_conn_error',
'db2_conn_errormsg',
'db2_connect',
'db2_cursor_type',
'db2_escape_string',
'db2_exec',
'db2_execute',
'db2_fetch_array',
'db2_fetch_assoc',
'db2_fetch_both',
'db2_fetch_object',
'db2_fetch_row',
'db2_field_display_size',
'db2_field_name',
'db2_field_num',
'db2_field_precision',
'db2_field_scale',
'db2_field_type',
'db2_field_width',
'db2_foreign_keys',
'db2_free_result',
'db2_free_stmt',
'db2_get_option',
'db2_last_insert_id'],
'ID3': ['id3_get_frame_long_name',
'id3_get_frame_short_name',
'id3_get_genre_id',
'id3_get_genre_list',
'id3_get_genre_name',
'id3_get_tag',
'id3_get_version',
'id3_remove_tag',
'id3_set_tag'],
'IDN': ['idn_to_ascii', 'idn_to_unicode', 'idn_to_utf8'],
'IIS': ['iis_add_server',
'iis_get_dir_security',
'iis_get_script_map',
'iis_get_server_by_comment',
'iis_get_server_by_path',
'iis_get_server_rights',
'iis_get_service_state',
'iis_remove_server',
'iis_set_app_settings',
'iis_set_dir_security',
'iis_set_script_map',
'iis_set_server_rights',
'iis_start_server',
'iis_start_service',
'iis_stop_server',
'iis_stop_service'],
'IMAP': ['imap_8bit',
'imap_alerts',
'imap_append',
'imap_base64',
'imap_binary',
'imap_body',
'imap_bodystruct',
'imap_check',
'imap_clearflag_full',
'imap_close',
'imap_createmailbox',
'imap_delete',
'imap_deletemailbox',
'imap_errors',
'imap_expunge',
'imap_fetch_overview',
'imap_fetchbody',
'imap_fetchheader',
'imap_fetchmime',
'imap_fetchstructure',
'imap_gc',
'imap_get_quota',
'imap_get_quotaroot',
'imap_getacl',
'imap_getmailboxes',
'imap_getsubscribed',
'imap_header',
'imap_headerinfo',
'imap_headers',
'imap_last_error',
'imap_list',
'imap_listmailbox',
'imap_listscan',
'imap_listsubscribed',
'imap_lsub',
'imap_mail_compose',
'imap_mail_copy',
'imap_mail_move',
'imap_mail',
'imap_mailboxmsginfo',
'imap_mime_header_decode',
'imap_msgno',
'imap_num_msg',
'imap_num_recent',
'imap_open',
'imap_ping',
'imap_qprint',
'imap_renamemailbox',
'imap_reopen',
'imap_rfc822_parse_adrlist',
'imap_rfc822_parse_headers',
'imap_rfc822_write_address',
'imap_savebody',
'imap_scanmailbox',
'imap_search',
'imap_set_quota',
'imap_setacl',
'imap_setflag_full',
'imap_sort',
'imap_status',
'imap_subscribe',
'imap_thread',
'imap_timeout',
'imap_uid',
'imap_undelete',
'imap_unsubscribe',
'imap_utf7_decode',
'imap_utf7_encode',
'imap_utf8'],
'Informix': ['ifx_affected_rows',
'ifx_blobinfile_mode',
'ifx_byteasvarchar',
'ifx_close',
'ifx_connect',
'ifx_copy_blob',
'ifx_create_blob',
'ifx_create_char',
'ifx_do',
'ifx_error',
'ifx_errormsg',
'ifx_fetch_row',
'ifx_fieldproperties',
'ifx_fieldtypes',
'ifx_free_blob',
'ifx_free_char',
'ifx_free_result',
'ifx_get_blob',
'ifx_get_char',
'ifx_getsqlca',
'ifx_htmltbl_result',
'ifx_nullformat',
'ifx_num_fields',
'ifx_num_rows',
'ifx_pconnect',
'ifx_prepare',
'ifx_query',
'ifx_textasvarchar',
'ifx_update_blob',
'ifx_update_char',
'ifxus_close_slob',
'ifxus_create_slob',
'ifxus_free_slob',
'ifxus_open_slob',
'ifxus_read_slob',
'ifxus_seek_slob',
'ifxus_tell_slob',
'ifxus_write_slob'],
'Ingres': ['ingres_autocommit_state',
'ingres_autocommit',
'ingres_charset',
'ingres_close',
'ingres_commit',
'ingres_connect',
'ingres_cursor',
'ingres_errno',
'ingres_error',
'ingres_errsqlstate',
'ingres_escape_string',
'ingres_execute',
'ingres_fetch_array',
'ingres_fetch_assoc',
'ingres_fetch_object',
'ingres_fetch_proc_return',
'ingres_fetch_row',
'ingres_field_length',
'ingres_field_name',
'ingres_field_nullable',
'ingres_field_precision',
'ingres_field_scale',
'ingres_field_type',
'ingres_free_result',
'ingres_next_error',
'ingres_num_fields',
'ingres_num_rows',
'ingres_pconnect',
'ingres_prepare',
'ingres_query',
'ingres_result_seek',
'ingres_rollback',
'ingres_set_environment',
'ingres_unbuffered_query'],
'Inotify': ['inotify_add_watch',
'inotify_init',
'inotify_queue_len',
'inotify_read',
'inotify_rm_watch'],
'JSON': ['json_decode', 'json_encode', 'json_last_error'],
'Java': ['java_last_exception_clear', 'java_last_exception_get'],
'Judy': ['judy_type', 'judy_version'],
'KADM5': ['kadm5_chpass_principal',
'kadm5_create_principal',
'kadm5_delete_principal',
'kadm5_destroy',
'kadm5_flush',
'kadm5_get_policies',
'kadm5_get_principal',
'kadm5_get_principals',
'kadm5_init_with_password',
'kadm5_modify_principal'],
'LDAP': ['ldap_8859_to_t61',
'ldap_add',
'ldap_bind',
'ldap_close',
'ldap_compare',
'ldap_connect',
'ldap_count_entries',
'ldap_delete',
'ldap_dn2ufn',
'ldap_err2str',
'ldap_errno',
'ldap_error',
'ldap_explode_dn',
'ldap_first_attribute',
'ldap_first_entry',
'ldap_first_reference',
'ldap_free_result',
'ldap_get_attributes',
'ldap_get_dn',
'ldap_get_entries',
'ldap_get_option',
'ldap_get_values_len',
'ldap_get_values',
'ldap_list',
'ldap_mod_add',
'ldap_mod_del',
'ldap_mod_replace',
'ldap_modify',
'ldap_next_attribute',
'ldap_next_entry',
'ldap_next_reference',
'ldap_parse_reference',
'ldap_parse_result',
'ldap_read',
'ldap_rename',
'ldap_sasl_bind',
'ldap_search',
'ldap_set_option',
'ldap_set_rebind_proc',
'ldap_sort',
'ldap_start_tls',
'ldap_t61_to_8859',
'ldap_unbind'],
'LZF': ['lzf_compress', 'lzf_decompress', 'lzf_optimized_for'],
'Libevent': ['event_add',
'event_base_free',
'event_base_loop',
'event_base_loopbreak',
'event_base_loopexit',
'event_base_new',
'event_base_priority_init',
'event_base_set',
'event_buffer_base_set',
'event_buffer_disable',
'event_buffer_enable',
'event_buffer_fd_set',
'event_buffer_free',
'event_buffer_new',
'event_buffer_priority_set',
'event_buffer_read',
'event_buffer_set_callback',
'event_buffer_timeout_set',
'event_buffer_watermark_set',
'event_buffer_write',
'event_del',
'event_free',
'event_new',
'event_set'],
'Lotus Notes': ['notes_body',
'notes_copy_db',
'notes_create_db',
'notes_create_note',
'notes_drop_db',
'notes_find_note',
'notes_header_info',
'notes_list_msgs',
'notes_mark_read',
'notes_mark_unread',
'notes_nav_create',
'notes_search',
'notes_unread',
'notes_version'],
'MCVE': ['m_checkstatus',
'm_completeauthorizations',
'm_connect',
'm_connectionerror',
'm_deletetrans',
'm_destroyconn',
'm_destroyengine',
'm_getcell',
'm_getcellbynum',
'm_getcommadelimited',
'm_getheader',
'm_initconn',
'm_initengine',
'm_iscommadelimited',
'm_maxconntimeout',
'm_monitor',
'm_numcolumns',
'm_numrows',
'm_parsecommadelimited',
'm_responsekeys'],
'Mail': ['ezmlm_hash', 'mail'],
'Mailparse': ['mailparse_determine_best_xfer_encoding',
'mailparse_msg_create',
'mailparse_msg_extract_part_file',
'mailparse_msg_extract_part',
'mailparse_msg_extract_whole_part_file',
'mailparse_msg_free',
'mailparse_msg_get_part_data',
'mailparse_msg_get_part',
'mailparse_msg_get_structure',
'mailparse_msg_parse_file',
'mailparse_msg_parse',
'mailparse_rfc822_parse_addresses',
'mailparse_stream_encode',
'mailparse_uudecode_all'],
'Math': ['abs',
'acos',
'acosh',
'asin',
'asinh',
'atan2',
'atan',
'atanh',
'base_convert',
'bindec',
'ceil',
'cos',
'cosh',
'decbin',
'dechex',
'decoct',
'deg2rad',
'exp',
'expm1'],
'MaxDB': ['maxdb_affected_rows',
'maxdb_autocommit',
'maxdb_bind_param',
'maxdb_bind_result',
'maxdb_change_user',
'maxdb_character_set_name',
'maxdb_client_encoding',
'maxdb_close_long_data',
'maxdb_close',
'maxdb_commit',
'maxdb_connect_errno',
'maxdb_connect_error',
'maxdb_connect',
'maxdb_data_seek',
'maxdb_debug',
'maxdb_disable_reads_from_master',
'maxdb_disable_rpl_parse',
'maxdb_dump_debug_info',
'maxdb_embedded_connect',
'maxdb_enable_reads_from_master',
'maxdb_enable_rpl_parse',
'maxdb_errno',
'maxdb_error',
'maxdb_escape_string',
'maxdb_execute',
'maxdb_fetch_array',
'maxdb_fetch_assoc',
'maxdb_fetch_field_direct',
'maxdb_fetch_field',
'maxdb_fetch_fields',
'maxdb_fetch_lengths',
'maxdb_fetch_object',
'maxdb_fetch_row',
'maxdb_fetch',
'maxdb_field_count',
'maxdb_field_seek',
'maxdb_field_tell',
'maxdb_free_result',
'maxdb_get_client_info',
'maxdb_get_client_version',
'maxdb_get_host_info',
'maxdb_get_metadata',
'maxdb_get_proto_info',
'maxdb_get_server_info',
'maxdb_get_server_version',
'maxdb_info',
'maxdb_init',
'maxdb_insert_id',
'maxdb_kill',
'maxdb_master_query',
'maxdb_more_results',
'maxdb_multi_query',
'maxdb_next_result',
'maxdb_num_fields',
'maxdb_num_rows',
'maxdb_options',
'maxdb_param_count',
'maxdb_ping',
'maxdb_prepare',
'maxdb_query',
'maxdb_real_connect',
'maxdb_real_escape_string',
'maxdb_real_query',
'maxdb_report',
'maxdb_rollback',
'maxdb_rpl_parse_enabled',
'maxdb_rpl_probe',
'maxdb_rpl_query_type',
'maxdb_select_db',
'maxdb_send_long_data',
'maxdb_send_query',
'maxdb_server_end',
'maxdb_server_init',
'maxdb_set_opt',
'maxdb_sqlstate',
'maxdb_ssl_set',
'maxdb_stat',
'maxdb_stmt_affected_rows'],
'Mcrypt': ['mcrypt_cbc',
'mcrypt_cfb',
'mcrypt_create_iv',
'mcrypt_decrypt',
'mcrypt_ecb',
'mcrypt_enc_get_algorithms_name',
'mcrypt_enc_get_block_size',
'mcrypt_enc_get_iv_size',
'mcrypt_enc_get_key_size',
'mcrypt_enc_get_modes_name',
'mcrypt_enc_get_supported_key_sizes',
'mcrypt_enc_is_block_algorithm_mode',
'mcrypt_enc_is_block_algorithm',
'mcrypt_enc_is_block_mode',
'mcrypt_enc_self_test',
'mcrypt_encrypt',
'mcrypt_generic_deinit',
'mcrypt_generic_end',
'mcrypt_generic_init',
'mcrypt_generic',
'mcrypt_get_block_size',
'mcrypt_get_cipher_name',
'mcrypt_get_iv_size',
'mcrypt_get_key_size',
'mcrypt_list_algorithms',
'mcrypt_list_modes',
'mcrypt_module_close',
'mcrypt_module_get_algo_block_size',
'mcrypt_module_get_algo_key_size',
'mcrypt_module_get_supported_key_sizes',
'mcrypt_module_is_block_algorithm_mode',
'mcrypt_module_is_block_algorithm',
'mcrypt_module_is_block_mode',
'mcrypt_module_open',
'mcrypt_module_self_test',
'mcrypt_ofb',
'mdecrypt_generic'],
'Memcache': ['memcache_debug'],
'Mhash': ['mhash_count',
'mhash_get_block_size',
'mhash_get_hash_name',
'mhash_keygen_s2k',
'mhash'],
'Ming': ['ming_keypress',
'ming_setcubicthreshold',
'ming_setscale',
'ming_setswfcompression',
'ming_useconstants',
'ming_useswfversion'],
'Misc.': ['connection_aborted',
'connection_status',
'connection_timeout',
'constant',
'define',
'defined',
'die',
'eval',
'exit',
'get_browser',
'__halt_compiler',
'highlight_file',
'highlight_string',
'ignore_user_abort',
'pack',
'php_check_syntax',
'php_strip_whitespace',
'show_source',
'sleep',
'sys_getloadavg',
'time_nanosleep',
'time_sleep_until',
'uniqid',
'unpack',
'usleep'],
'Mongo': ['bson_decode', 'bson_encode'],
'Msession': ['msession_connect',
'msession_count',
'msession_create',
'msession_destroy',
'msession_disconnect',
'msession_find',
'msession_get_array',
'msession_get_data',
'msession_get',
'msession_inc',
'msession_list',
'msession_listvar',
'msession_lock',
'msession_plugin',
'msession_randstr',
'msession_set_array',
'msession_set_data',
'msession_set',
'msession_timeout',
'msession_uniq',
'msession_unlock'],
'Mssql': ['mssql_bind',
'mssql_close',
'mssql_connect',
'mssql_data_seek',
'mssql_execute',
'mssql_fetch_array',
'mssql_fetch_assoc',
'mssql_fetch_batch',
'mssql_fetch_field',
'mssql_fetch_object',
'mssql_fetch_row',
'mssql_field_length',
'mssql_field_name',
'mssql_field_seek',
'mssql_field_type',
'mssql_free_result',
'mssql_free_statement',
'mssql_get_last_message',
'mssql_guid_string',
'mssql_init',
'mssql_min_error_severity',
'mssql_min_message_severity',
'mssql_next_result',
'mssql_num_fields',
'mssql_num_rows',
'mssql_pconnect',
'mssql_query',
'mssql_result',
'mssql_rows_affected',
'mssql_select_db'],
'Multibyte String': ['mb_check_encoding',
'mb_convert_case',
'mb_convert_encoding',
'mb_convert_kana',
'mb_convert_variables',
'mb_decode_mimeheader',
'mb_decode_numericentity',
'mb_detect_encoding',
'mb_detect_order',
'mb_encode_mimeheader',
'mb_encode_numericentity',
'mb_encoding_aliases',
'mb_ereg_match',
'mb_ereg_replace',
'mb_ereg_search_getpos',
'mb_ereg_search_getregs',
'mb_ereg_search_init',
'mb_ereg_search_pos',
'mb_ereg_search_regs',
'mb_ereg_search_setpos',
'mb_ereg_search',
'mb_ereg',
'mb_eregi_replace',
'mb_eregi',
'mb_get_info',
'mb_http_input',
'mb_http_output',
'mb_internal_encoding',
'mb_language',
'mb_list_encodings',
'mb_output_handler',
'mb_parse_str',
'mb_preferred_mime_name',
'mb_regex_encoding',
'mb_regex_set_options',
'mb_send_mail',
'mb_split',
'mb_strcut',
'mb_strimwidth',
'mb_stripos',
'mb_stristr',
'mb_strlen',
'mb_strpos',
'mb_strrchr',
'mb_strrichr',
'mb_strripos',
'mb_strrpos',
'mb_strstr',
'mb_strtolower',
'mb_strtoupper',
'mb_strwidth',
'mb_substitute_character',
'mb_substr_count',
'mb_substr'],
'MySQL': ['mysql_affected_rows',
'mysql_client_encoding',
'mysql_close',
'mysql_connect',
'mysql_create_db',
'mysql_data_seek',
'mysql_db_name',
'mysql_db_query',
'mysql_drop_db',
'mysql_errno',
'mysql_error',
'mysql_escape_string',
'mysql_fetch_array',
'mysql_fetch_assoc',
'mysql_fetch_field',
'mysql_fetch_lengths',
'mysql_fetch_object',
'mysql_fetch_row',
'mysql_field_flags',
'mysql_field_len',
'mysql_field_name',
'mysql_field_seek',
'mysql_field_table',
'mysql_field_type',
'mysql_free_result',
'mysql_get_client_info',
'mysql_get_host_info',
'mysql_get_proto_info',
'mysql_get_server_info',
'mysql_info',
'mysql_insert_id',
'mysql_list_dbs',
'mysql_list_fields',
'mysql_list_processes',
'mysql_list_tables',
'mysql_num_fields',
'mysql_num_rows',
'mysql_pconnect',
'mysql_ping',
'mysql_query',
'mysql_real_escape_string',
'mysql_result',
'mysql_select_db',
'mysql_set_charset',
'mysql_stat',
'mysql_tablename',
'mysql_thread_id',
'mysql_unbuffered_query'],
'NSAPI': ['nsapi_request_headers', 'nsapi_response_headers', 'nsapi_virtual'],
'Ncurses': ['ncurses_addch',
'ncurses_addchnstr',
'ncurses_addchstr',
'ncurses_addnstr',
'ncurses_addstr',
'ncurses_assume_default_colors',
'ncurses_attroff',
'ncurses_attron',
'ncurses_attrset',
'ncurses_baudrate',
'ncurses_beep',
'ncurses_bkgd',
'ncurses_bkgdset',
'ncurses_border',
'ncurses_bottom_panel',
'ncurses_can_change_color',
'ncurses_cbreak',
'ncurses_clear',
'ncurses_clrtobot',
'ncurses_clrtoeol',
'ncurses_color_content',
'ncurses_color_set',
'ncurses_curs_set',
'ncurses_def_prog_mode',
'ncurses_def_shell_mode',
'ncurses_define_key',
'ncurses_del_panel',
'ncurses_delay_output',
'ncurses_delch',
'ncurses_deleteln',
'ncurses_delwin',
'ncurses_doupdate',
'ncurses_echo',
'ncurses_echochar',
'ncurses_end',
'ncurses_erase',
'ncurses_erasechar',
'ncurses_filter',
'ncurses_flash',
'ncurses_flushinp',
'ncurses_getch',
'ncurses_getmaxyx',
'ncurses_getmouse',
'ncurses_getyx',
'ncurses_halfdelay',
'ncurses_has_colors',
'ncurses_has_ic',
'ncurses_has_il',
'ncurses_has_key',
'ncurses_hide_panel',
'ncurses_hline',
'ncurses_inch',
'ncurses_init_color',
'ncurses_init_pair',
'ncurses_init',
'ncurses_insch',
'ncurses_insdelln',
'ncurses_insertln',
'ncurses_insstr',
'ncurses_instr',
'ncurses_isendwin',
'ncurses_keyok',
'ncurses_keypad',
'ncurses_killchar',
'ncurses_longname',
'ncurses_meta',
'ncurses_mouse_trafo',
'ncurses_mouseinterval',
'ncurses_mousemask',
'ncurses_move_panel',
'ncurses_move',
'ncurses_mvaddch',
'ncurses_mvaddchnstr',
'ncurses_mvaddchstr',
'ncurses_mvaddnstr',
'ncurses_mvaddstr',
'ncurses_mvcur',
'ncurses_mvdelch',
'ncurses_mvgetch',
'ncurses_mvhline',
'ncurses_mvinch',
'ncurses_mvvline',
'ncurses_mvwaddstr',
'ncurses_napms',
'ncurses_new_panel',
'ncurses_newpad',
'ncurses_newwin',
'ncurses_nl',
'ncurses_nocbreak',
'ncurses_noecho',
'ncurses_nonl',
'ncurses_noqiflush',
'ncurses_noraw',
'ncurses_pair_content',
'ncurses_panel_above',
'ncurses_panel_below',
'ncurses_panel_window',
'ncurses_pnoutrefresh',
'ncurses_prefresh',
'ncurses_putp',
'ncurses_qiflush',
'ncurses_raw',
'ncurses_refresh',
'ncurses_replace_panel',
'ncurses_reset_prog_mode',
'ncurses_reset_shell_mode',
'ncurses_resetty',
'ncurses_savetty',
'ncurses_scr_dump',
'ncurses_scr_init',
'ncurses_scr_restore',
'ncurses_scr_set',
'ncurses_scrl',
'ncurses_show_panel',
'ncurses_slk_attr',
'ncurses_slk_attroff',
'ncurses_slk_attron',
'ncurses_slk_attrset',
'ncurses_slk_clear',
'ncurses_slk_color',
'ncurses_slk_init',
'ncurses_slk_noutrefresh',
'ncurses_slk_refresh',
'ncurses_slk_restore',
'ncurses_slk_set',
'ncurses_slk_touch',
'ncurses_standend',
'ncurses_standout',
'ncurses_start_color',
'ncurses_termattrs',
'ncurses_termname',
'ncurses_timeout',
'ncurses_top_panel',
'ncurses_typeahead',
'ncurses_ungetch',
'ncurses_ungetmouse',
'ncurses_update_panels',
'ncurses_use_default_colors',
'ncurses_use_env',
'ncurses_use_extended_names',
'ncurses_vidattr',
'ncurses_vline',
'ncurses_waddch',
'ncurses_waddstr',
'ncurses_wattroff',
'ncurses_wattron',
'ncurses_wattrset',
'ncurses_wborder',
'ncurses_wclear',
'ncurses_wcolor_set',
'ncurses_werase',
'ncurses_wgetch',
'ncurses_whline',
'ncurses_wmouse_trafo',
'ncurses_wmove',
'ncurses_wnoutrefresh',
'ncurses_wrefresh',
'ncurses_wstandend',
'ncurses_wstandout',
'ncurses_wvline'],
'Network': ['checkdnsrr',
'closelog',
'define_syslog_variables',
'dns_check_record',
'dns_get_mx',
'dns_get_record',
'fsockopen',
'gethostbyaddr',
'gethostbyname',
'gethostbynamel'],
'Newt': ['newt_bell',
'newt_button_bar',
'newt_button',
'newt_centered_window',
'newt_checkbox_get_value',
'newt_checkbox_set_flags',
'newt_checkbox_set_value',
'newt_checkbox_tree_add_item',
'newt_checkbox_tree_find_item',
'newt_checkbox_tree_get_current',
'newt_checkbox_tree_get_entry_value',
'newt_checkbox_tree_get_multi_selection',
'newt_checkbox_tree_get_selection',
'newt_checkbox_tree_multi',
'newt_checkbox_tree_set_current',
'newt_checkbox_tree_set_entry_value',
'newt_checkbox_tree_set_entry',
'newt_checkbox_tree_set_width',
'newt_checkbox_tree',
'newt_checkbox',
'newt_clear_key_buffer'],
'OAuth': ['oauth_get_sbs', 'oauth_urlencode'],
'OCI8': ['oci_bind_array_by_name',
'oci_bind_by_name',
'oci_cancel',
'oci_close',
'oci_commit',
'oci_connect',
'oci_define_by_name',
'oci_error',
'oci_execute',
'oci_fetch_all',
'oci_fetch_array',
'oci_fetch_assoc',
'oci_fetch_object',
'oci_fetch_row',
'oci_fetch',
'oci_field_is_null',
'oci_field_name',
'oci_field_precision',
'oci_field_scale',
'oci_field_size',
'oci_field_type_raw',
'oci_field_type',
'oci_free_statement',
'oci_internal_debug',
'oci_lob_copy',
'oci_lob_is_equal',
'oci_new_collection',
'oci_new_connect',
'oci_new_cursor',
'oci_new_descriptor',
'oci_num_fields',
'oci_num_rows',
'oci_parse',
'oci_password_change',
'oci_pconnect',
'oci_result',
'oci_rollback',
'oci_server_version',
'oci_set_action',
'oci_set_client_identifier',
'oci_set_client_info',
'oci_set_edition',
'oci_set_module_name',
'oci_set_prefetch',
'oci_statement_type'],
'ODBC': ['odbc_autocommit',
'odbc_binmode',
'odbc_close_all',
'odbc_close',
'odbc_columnprivileges',
'odbc_columns',
'odbc_commit',
'odbc_connect',
'odbc_cursor',
'odbc_data_source',
'odbc_do',
'odbc_error',
'odbc_errormsg',
'odbc_exec',
'odbc_execute',
'odbc_fetch_array',
'odbc_fetch_into',
'odbc_fetch_object',
'odbc_fetch_row',
'odbc_field_len',
'odbc_field_name',
'odbc_field_num',
'odbc_field_precision',
'odbc_field_scale',
'odbc_field_type',
'odbc_foreignkeys',
'odbc_free_result',
'odbc_gettypeinfo',
'odbc_longreadlen',
'odbc_next_result',
'odbc_num_fields',
'odbc_num_rows',
'odbc_pconnect',
'odbc_prepare',
'odbc_primarykeys',
'odbc_procedurecolumns',
'odbc_procedures',
'odbc_result_all',
'odbc_result',
'odbc_rollback',
'odbc_setoption',
'odbc_specialcolumns',
'odbc_statistics',
'odbc_tableprivileges',
'odbc_tables'],
'Object Aggregation': ['aggregate_info',
'aggregate_methods_by_list',
'aggregate_methods_by_regexp'],
'Object overloading': ['overload'],
'OpenAL': ['openal_buffer_create',
'openal_buffer_data',
'openal_buffer_destroy',
'openal_buffer_get',
'openal_buffer_loadwav',
'openal_context_create',
'openal_context_current',
'openal_context_destroy',
'openal_context_process',
'openal_context_suspend',
'openal_device_close',
'openal_device_open',
'openal_listener_get',
'openal_listener_set',
'openal_source_create',
'openal_source_destroy',
'openal_source_get',
'openal_source_pause',
'openal_source_play',
'openal_source_rewind',
'openal_source_set',
'openal_source_stop',
'openal_stream'],
'OpenSSL': ['openssl_csr_export_to_file',
'openssl_csr_export',
'openssl_csr_get_public_key',
'openssl_csr_get_subject',
'openssl_csr_new',
'openssl_csr_sign',
'openssl_decrypt',
'openssl_dh_compute_key',
'openssl_digest',
'openssl_encrypt',
'openssl_error_string',
'openssl_free_key',
'openssl_get_cipher_methods',
'openssl_get_md_methods',
'openssl_get_privatekey',
'openssl_get_publickey',
'openssl_open',
'openssl_pkcs12_export_to_file',
'openssl_pkcs12_export',
'openssl_pkcs12_read',
'openssl_pkcs7_decrypt',
'openssl_pkcs7_encrypt',
'openssl_pkcs7_sign',
'openssl_pkcs7_verify',
'openssl_pkey_export_to_file',
'openssl_pkey_export',
'openssl_pkey_free',
'openssl_pkey_get_details',
'openssl_pkey_get_private',
'openssl_pkey_get_public',
'openssl_pkey_new',
'openssl_private_decrypt',
'openssl_private_encrypt',
'openssl_public_decrypt',
'openssl_public_encrypt',
'openssl_random_pseudo_bytes',
'openssl_seal',
'openssl_sign',
'openssl_verify',
'openssl_x509_check_private_key',
'openssl_x509_checkpurpose',
'openssl_x509_export_to_file',
'openssl_x509_export',
'openssl_x509_free',
'openssl_x509_parse',
'openssl_x509_read'],
'Output Control': ['flush',
'ob_clean',
'ob_end_clean',
'ob_end_flush',
'ob_flush',
'ob_get_clean',
'ob_get_contents',
'ob_get_flush',
'ob_get_length',
'ob_get_level',
'ob_get_status',
'ob_gzhandler',
'ob_implicit_flush',
'ob_list_handlers',
'ob_start',
'output_add_rewrite_var',
'output_reset_rewrite_vars'],
'Ovrimos SQL': ['ovrimos_close',
'ovrimos_commit',
'ovrimos_connect',
'ovrimos_cursor',
'ovrimos_exec',
'ovrimos_execute',
'ovrimos_fetch_into',
'ovrimos_fetch_row',
'ovrimos_field_len',
'ovrimos_field_name',
'ovrimos_field_num',
'ovrimos_field_type',
'ovrimos_free_result',
'ovrimos_longreadlen',
'ovrimos_num_fields',
'ovrimos_num_rows',
'ovrimos_prepare',
'ovrimos_result_all',
'ovrimos_result',
'ovrimos_rollback'],
'PCNTL': ['pcntl_alarm',
'pcntl_exec',
'pcntl_fork',
'pcntl_getpriority',
'pcntl_setpriority',
'pcntl_signal_dispatch',
'pcntl_signal',
'pcntl_sigprocmask',
'pcntl_sigtimedwait',
'pcntl_sigwaitinfo',
'pcntl_wait',
'pcntl_waitpid',
'pcntl_wexitstatus',
'pcntl_wifexited',
'pcntl_wifsignaled',
'pcntl_wifstopped',
'pcntl_wstopsig',
'pcntl_wtermsig'],
'PCRE': ['preg_filter',
'preg_grep',
'preg_last_error',
'preg_match_all',
'preg_match',
'preg_quote',
'preg_replace_callback',
'preg_replace',
'preg_split'],
'PDF': ['PDF_activate_item',
'PDF_add_annotation',
'PDF_add_bookmark',
'PDF_add_launchlink',
'PDF_add_locallink',
'PDF_add_nameddest',
'PDF_add_note',
'PDF_add_outline',
'PDF_add_pdflink',
'PDF_add_table_cell',
'PDF_add_textflow',
'PDF_add_thumbnail',
'PDF_add_weblink',
'PDF_arc',
'PDF_arcn',
'PDF_attach_file',
'PDF_begin_document',
'PDF_begin_font',
'PDF_begin_glyph',
'PDF_begin_item',
'PDF_begin_layer',
'PDF_begin_page_ext',
'PDF_begin_page',
'PDF_begin_pattern',
'PDF_begin_template_ext',
'PDF_begin_template',
'PDF_circle',
'PDF_clip',
'PDF_close_image',
'PDF_close_pdi_page',
'PDF_close_pdi',
'PDF_close',
'PDF_closepath_fill_stroke',
'PDF_closepath_stroke',
'PDF_closepath',
'PDF_concat',
'PDF_continue_text',
'PDF_create_3dview',
'PDF_create_action',
'PDF_create_annotation',
'PDF_create_bookmark',
'PDF_create_field',
'PDF_create_fieldgroup',
'PDF_create_gstate',
'PDF_create_pvf',
'PDF_create_textflow',
'PDF_curveto',
'PDF_define_layer',
'PDF_delete_pvf',
'PDF_delete_table',
'PDF_delete_textflow',
'PDF_delete',
'PDF_encoding_set_char',
'PDF_end_document',
'PDF_end_font',
'PDF_end_glyph',
'PDF_end_item',
'PDF_end_layer',
'PDF_end_page_ext',
'PDF_end_page',
'PDF_end_pattern',
'PDF_end_template',
'PDF_endpath',
'PDF_fill_imageblock',
'PDF_fill_pdfblock',
'PDF_fill_stroke',
'PDF_fill_textblock',
'PDF_fill',
'PDF_findfont',
'PDF_fit_image',
'PDF_fit_pdi_page',
'PDF_fit_table',
'PDF_fit_textflow',
'PDF_fit_textline',
'PDF_get_apiname',
'PDF_get_buffer',
'PDF_get_errmsg',
'PDF_get_errnum',
'PDF_get_font',
'PDF_get_fontname',
'PDF_get_fontsize',
'PDF_get_image_height',
'PDF_get_image_width',
'PDF_get_majorversion',
'PDF_get_minorversion',
'PDF_get_parameter',
'PDF_get_pdi_parameter',
'PDF_get_pdi_value',
'PDF_get_value',
'PDF_info_font',
'PDF_info_matchbox',
'PDF_info_table',
'PDF_info_textflow',
'PDF_info_textline',
'PDF_initgraphics',
'PDF_lineto',
'PDF_load_3ddata',
'PDF_load_font',
'PDF_load_iccprofile',
'PDF_load_image',
'PDF_makespotcolor',
'PDF_moveto',
'PDF_new',
'PDF_open_ccitt',
'PDF_open_file',
'PDF_open_gif',
'PDF_open_image_file',
'PDF_open_image',
'PDF_open_jpeg',
'PDF_open_memory_image',
'PDF_open_pdi_document',
'PDF_open_pdi_page',
'PDF_open_pdi',
'PDF_open_tiff',
'PDF_pcos_get_number',
'PDF_pcos_get_stream',
'PDF_pcos_get_string',
'PDF_place_image',
'PDF_place_pdi_page',
'PDF_process_pdi',
'PDF_rect',
'PDF_restore',
'PDF_resume_page',
'PDF_rotate',
'PDF_save',
'PDF_scale',
'PDF_set_border_color',
'PDF_set_border_dash',
'PDF_set_border_style',
'PDF_set_char_spacing',
'PDF_set_duration',
'PDF_set_gstate',
'PDF_set_horiz_scaling',
'PDF_set_info_author',
'PDF_set_info_creator',
'PDF_set_info_keywords',
'PDF_set_info_subject',
'PDF_set_info_title',
'PDF_set_info',
'PDF_set_layer_dependency',
'PDF_set_leading',
'PDF_set_parameter',
'PDF_set_text_matrix',
'PDF_set_text_pos',
'PDF_set_text_rendering',
'PDF_set_text_rise',
'PDF_set_value',
'PDF_set_word_spacing',
'PDF_setcolor',
'PDF_setdash',
'PDF_setdashpattern',
'PDF_setflat',
'PDF_setfont',
'PDF_setgray_fill',
'PDF_setgray_stroke',
'PDF_setgray',
'PDF_setlinecap',
'PDF_setlinejoin',
'PDF_setlinewidth',
'PDF_setmatrix',
'PDF_setmiterlimit',
'PDF_setpolydash',
'PDF_setrgbcolor_fill',
'PDF_setrgbcolor_stroke',
'PDF_setrgbcolor',
'PDF_shading_pattern',
'PDF_shading',
'PDF_shfill',
'PDF_show_boxed',
'PDF_show_xy',
'PDF_show',
'PDF_skew',
'PDF_stringwidth',
'PDF_stroke',
'PDF_suspend_page',
'PDF_translate',
'PDF_utf16_to_utf8',
'PDF_utf32_to_utf16',
'PDF_utf8_to_utf16'],
'PHP Options/Info': ['assert_options',
'assert',
'dl',
'extension_loaded',
'gc_collect_cycles',
'gc_disable',
'gc_enable',
'gc_enabled',
'get_cfg_var',
'get_current_user',
'get_defined_constants',
'get_extension_funcs',
'get_include_path',
'get_included_files',
'get_loaded_extensions',
'get_magic_quotes_gpc',
'get_magic_quotes_runtime',
'get_required_files',
'getenv',
'getlastmod',
'getmygid',
'getmyinode',
'getmypid',
'getmyuid',
'getopt',
'getrusage',
'ini_alter',
'ini_get_all',
'ini_get',
'ini_restore',
'ini_set',
'magic_quotes_runtime',
'memory_get_peak_usage',
'memory_get_usage',
'php_ini_loaded_file',
'php_ini_scanned_files',
'php_logo_guid',
'php_sapi_name',
'php_uname',
'phpcredits',
'phpinfo',
'phpversion',
'putenv',
'restore_include_path',
'set_include_path',
'set_magic_quotes_runtime',
'set_time_limit',
'sys_get_temp_dir',
'version_compare',
'zend_logo_guid',
'zend_thread_id',
'zend_version'],
'POSIX': ['posix_access',
'posix_ctermid',
'posix_errno',
'posix_get_last_error',
'posix_getcwd',
'posix_getegid',
'posix_geteuid',
'posix_getgid',
'posix_getgrgid',
'posix_getgrnam',
'posix_getgroups',
'posix_getlogin',
'posix_getpgid',
'posix_getpgrp',
'posix_getpid',
'posix_getppid',
'posix_getpwnam',
'posix_getpwuid',
'posix_getrlimit',
'posix_getsid',
'posix_getuid',
'posix_initgroups',
'posix_isatty',
'posix_kill',
'posix_mkfifo',
'posix_mknod',
'posix_setegid',
'posix_seteuid',
'posix_setgid',
'posix_setpgid',
'posix_setsid',
'posix_setuid',
'posix_strerror',
'posix_times',
'posix_ttyname',
'posix_uname'],
'POSIX Regex': ['ereg_replace',
'ereg',
'eregi_replace',
'eregi',
'split',
'spliti',
'sql_regcase'],
'PS': ['ps_add_bookmark',
'ps_add_launchlink',
'ps_add_locallink',
'ps_add_note',
'ps_add_pdflink',
'ps_add_weblink',
'ps_arc',
'ps_arcn',
'ps_begin_page',
'ps_begin_pattern',
'ps_begin_template',
'ps_circle',
'ps_clip',
'ps_close_image',
'ps_close',
'ps_closepath_stroke',
'ps_closepath',
'ps_continue_text',
'ps_curveto',
'ps_delete',
'ps_end_page',
'ps_end_pattern',
'ps_end_template',
'ps_fill_stroke',
'ps_fill',
'ps_findfont',
'ps_get_buffer',
'ps_get_parameter',
'ps_get_value',
'ps_hyphenate',
'ps_include_file',
'ps_lineto',
'ps_makespotcolor',
'ps_moveto',
'ps_new',
'ps_open_file',
'ps_open_image_file',
'ps_open_image',
'ps_open_memory_image',
'ps_place_image',
'ps_rect',
'ps_restore',
'ps_rotate',
'ps_save',
'ps_scale',
'ps_set_border_color',
'ps_set_border_dash',
'ps_set_border_style',
'ps_set_info',
'ps_set_parameter',
'ps_set_text_pos',
'ps_set_value',
'ps_setcolor',
'ps_setdash',
'ps_setflat',
'ps_setfont',
'ps_setgray',
'ps_setlinecap',
'ps_setlinejoin',
'ps_setlinewidth',
'ps_setmiterlimit',
'ps_setoverprintmode',
'ps_setpolydash',
'ps_shading_pattern',
'ps_shading',
'ps_shfill',
'ps_show_boxed',
'ps_show_xy2',
'ps_show_xy',
'ps_show2',
'ps_show',
'ps_string_geometry',
'ps_stringwidth',
'ps_stroke',
'ps_symbol_name',
'ps_symbol_width',
'ps_symbol',
'ps_translate'],
'Paradox': ['px_close',
'px_create_fp',
'px_date2string',
'px_delete_record',
'px_delete',
'px_get_field',
'px_get_info',
'px_get_parameter',
'px_get_record',
'px_get_schema',
'px_get_value',
'px_insert_record',
'px_new',
'px_numfields',
'px_numrecords',
'px_open_fp',
'px_put_record',
'px_retrieve_record',
'px_set_blob_file',
'px_set_parameter',
'px_set_tablename',
'px_set_targetencoding',
'px_set_value',
'px_timestamp2string',
'px_update_record'],
'Parsekit': ['parsekit_compile_file',
'parsekit_compile_string',
'parsekit_func_arginfo'],
'PostgreSQL': ['pg_affected_rows',
'pg_cancel_query',
'pg_client_encoding',
'pg_close',
'pg_connect',
'pg_connection_busy',
'pg_connection_reset',
'pg_connection_status',
'pg_convert',
'pg_copy_from',
'pg_copy_to',
'pg_dbname',
'pg_delete',
'pg_end_copy',
'pg_escape_bytea',
'pg_escape_string',
'pg_execute',
'pg_fetch_all_columns',
'pg_fetch_all',
'pg_fetch_array',
'pg_fetch_assoc',
'pg_fetch_object',
'pg_fetch_result',
'pg_fetch_row',
'pg_field_is_null',
'pg_field_name',
'pg_field_num',
'pg_field_prtlen',
'pg_field_size',
'pg_field_table',
'pg_field_type_oid',
'pg_field_type',
'pg_free_result',
'pg_get_notify',
'pg_get_pid',
'pg_get_result',
'pg_host',
'pg_insert',
'pg_last_error',
'pg_last_notice',
'pg_last_oid',
'pg_lo_close',
'pg_lo_create',
'pg_lo_export',
'pg_lo_import',
'pg_lo_open',
'pg_lo_read_all',
'pg_lo_read',
'pg_lo_seek',
'pg_lo_tell',
'pg_lo_unlink',
'pg_lo_write',
'pg_meta_data',
'pg_num_fields',
'pg_num_rows',
'pg_options',
'pg_parameter_status',
'pg_pconnect',
'pg_ping',
'pg_port',
'pg_prepare'],
'Printer': ['printer_abort',
'printer_close',
'printer_create_brush',
'printer_create_dc',
'printer_create_font',
'printer_create_pen',
'printer_delete_brush',
'printer_delete_dc',
'printer_delete_font',
'printer_delete_pen',
'printer_draw_bmp',
'printer_draw_chord',
'printer_draw_elipse',
'printer_draw_line',
'printer_draw_pie',
'printer_draw_rectangle',
'printer_draw_roundrect',
'printer_draw_text',
'printer_end_doc',
'printer_end_page',
'printer_get_option',
'printer_list',
'printer_logical_fontheight',
'printer_open',
'printer_select_brush',
'printer_select_font',
'printer_select_pen',
'printer_set_option',
'printer_start_doc',
'printer_start_page',
'printer_write'],
'Program execution': ['escapeshellarg',
'escapeshellcmd',
'exec',
'passthru',
'proc_close',
'proc_get_status',
'proc_nice',
'proc_open',
'proc_terminate',
'shell_exec',
'system'],
'Pspell': ['pspell_add_to_personal',
'pspell_add_to_session',
'pspell_check',
'pspell_clear_session',
'pspell_config_create',
'pspell_config_data_dir',
'pspell_config_dict_dir',
'pspell_config_ignore',
'pspell_config_mode',
'pspell_config_personal',
'pspell_config_repl',
'pspell_config_runtogether',
'pspell_config_save_repl'],
'RPM Reader': ['rpm_close',
'rpm_get_tag',
'rpm_is_valid',
'rpm_open',
'rpm_version'],
'RRD': ['rrd_create',
'rrd_error',
'rrd_fetch',
'rrd_first',
'rrd_graph',
'rrd_info',
'rrd_last',
'rrd_lastupdate',
'rrd_restore',
'rrd_tune',
'rrd_update',
'rrd_xport'],
'Radius': ['radius_acct_open',
'radius_add_server',
'radius_auth_open',
'radius_close',
'radius_config',
'radius_create_request',
'radius_cvt_addr',
'radius_cvt_int',
'radius_cvt_string',
'radius_demangle_mppe_key',
'radius_demangle',
'radius_get_attr',
'radius_get_vendor_attr',
'radius_put_addr',
'radius_put_attr',
'radius_put_int',
'radius_put_string',
'radius_put_vendor_addr',
'radius_put_vendor_attr',
'radius_put_vendor_int',
'radius_put_vendor_string',
'radius_request_authenticator',
'radius_send_request',
'radius_server_secret',
'radius_strerror'],
'Rar': ['rar_wrapper_cache_stats'],
'Readline': ['readline_add_history',
'readline_callback_handler_install',
'readline_callback_handler_remove',
'readline_callback_read_char',
'readline_clear_history',
'readline_completion_function',
'readline_info',
'readline_list_history',
'readline_on_new_line',
'readline_read_history',
'readline_redisplay',
'readline_write_history',
'readline'],
'Recode': ['recode_file', 'recode_string', 'recode'],
'SNMP': ['snmp_get_quick_print',
'snmp_get_valueretrieval',
'snmp_read_mib',
'snmp_set_enum_print',
'snmp_set_oid_numeric_print',
'snmp_set_oid_output_format',
'snmp_set_quick_print',
'snmp_set_valueretrieval',
'snmp2_get',
'snmp2_getnext',
'snmp2_real_walk',
'snmp2_set',
'snmp2_walk',
'snmp3_get',
'snmp3_getnext',
'snmp3_real_walk',
'snmp3_set',
'snmp3_walk',
'snmpget',
'snmpgetnext',
'snmprealwalk',
'snmpset',
'snmpwalk',
'snmpwalkoid'],
'SOAP': ['is_soap_fault', 'use_soap_error_handler'],
'SPL': ['class_implements',
'class_parents',
'iterator_apply',
'iterator_count',
'iterator_to_array',
'spl_autoload_call',
'spl_autoload_extensions',
'spl_autoload_functions',
'spl_autoload_register',
'spl_autoload_unregister',
'spl_autoload',
'spl_classes',
'spl_object_hash'],
'SPPLUS': ['calcul_hmac', 'calculhmac', 'nthmac', 'signeurlpaiement'],
'SQLite': ['sqlite_array_query', 'sqlite_busy_timeout', 'sqlite_changes'],
'SSH2': ['ssh2_auth_hostbased_file',
'ssh2_auth_none',
'ssh2_auth_password',
'ssh2_auth_pubkey_file',
'ssh2_connect',
'ssh2_exec',
'ssh2_fetch_stream',
'ssh2_fingerprint',
'ssh2_methods_negotiated',
'ssh2_publickey_add',
'ssh2_publickey_init',
'ssh2_publickey_list',
'ssh2_publickey_remove',
'ssh2_scp_recv',
'ssh2_scp_send',
'ssh2_sftp_lstat',
'ssh2_sftp_mkdir',
'ssh2_sftp_readlink',
'ssh2_sftp_realpath',
'ssh2_sftp_rename',
'ssh2_sftp_rmdir',
'ssh2_sftp_stat',
'ssh2_sftp_symlink',
'ssh2_sftp_unlink',
'ssh2_sftp',
'ssh2_shell',
'ssh2_tunnel'],
'SVN': ['svn_add',
'svn_auth_get_parameter',
'svn_auth_set_parameter',
'svn_blame',
'svn_cat',
'svn_checkout',
'svn_cleanup',
'svn_client_version',
'svn_commit',
'svn_delete',
'svn_diff',
'svn_export',
'svn_fs_abort_txn',
'svn_fs_apply_text',
'svn_fs_begin_txn2',
'svn_fs_change_node_prop',
'svn_fs_check_path',
'svn_fs_contents_changed',
'svn_fs_copy',
'svn_fs_delete',
'svn_fs_dir_entries',
'svn_fs_file_contents',
'svn_fs_file_length',
'svn_fs_is_dir',
'svn_fs_is_file',
'svn_fs_make_dir',
'svn_fs_make_file',
'svn_fs_node_created_rev',
'svn_fs_node_prop',
'svn_fs_props_changed',
'svn_fs_revision_prop',
'svn_fs_revision_root',
'svn_fs_txn_root',
'svn_fs_youngest_rev',
'svn_import',
'svn_log',
'svn_ls',
'svn_mkdir',
'svn_repos_create',
'svn_repos_fs_begin_txn_for_commit',
'svn_repos_fs_commit_txn',
'svn_repos_fs',
'svn_repos_hotcopy',
'svn_repos_open',
'svn_repos_recover',
'svn_revert',
'svn_status',
'svn_update'],
'SWF': ['swf_actiongeturl',
'swf_actiongotoframe',
'swf_actiongotolabel',
'swf_actionnextframe',
'swf_actionplay',
'swf_actionprevframe',
'swf_actionsettarget',
'swf_actionstop',
'swf_actiontogglequality',
'swf_actionwaitforframe',
'swf_addbuttonrecord',
'swf_addcolor',
'swf_closefile',
'swf_definebitmap',
'swf_definefont',
'swf_defineline',
'swf_definepoly',
'swf_definerect',
'swf_definetext',
'swf_endbutton',
'swf_enddoaction',
'swf_endshape',
'swf_endsymbol',
'swf_fontsize',
'swf_fontslant',
'swf_fonttracking',
'swf_getbitmapinfo',
'swf_getfontinfo',
'swf_getframe',
'swf_labelframe',
'swf_lookat',
'swf_modifyobject',
'swf_mulcolor',
'swf_nextid',
'swf_oncondition',
'swf_openfile',
'swf_ortho2',
'swf_ortho',
'swf_perspective',
'swf_placeobject',
'swf_polarview',
'swf_popmatrix',
'swf_posround',
'swf_pushmatrix',
'swf_removeobject',
'swf_rotate',
'swf_scale',
'swf_setfont',
'swf_setframe',
'swf_shapearc',
'swf_shapecurveto3',
'swf_shapecurveto',
'swf_shapefillbitmapclip',
'swf_shapefillbitmaptile',
'swf_shapefilloff',
'swf_shapefillsolid',
'swf_shapelinesolid',
'swf_shapelineto',
'swf_shapemoveto',
'swf_showframe',
'swf_startbutton',
'swf_startdoaction',
'swf_startshape',
'swf_startsymbol',
'swf_textwidth',
'swf_translate',
'swf_viewport'],
'Semaphore': ['ftok',
'msg_get_queue',
'msg_queue_exists',
'msg_receive',
'msg_remove_queue',
'msg_send',
'msg_set_queue',
'msg_stat_queue',
'sem_acquire',
'sem_get',
'sem_release',
'sem_remove',
'shm_attach',
'shm_detach',
'shm_get_var',
'shm_has_var',
'shm_put_var',
'shm_remove_var',
'shm_remove'],
'Session': ['session_cache_expire',
'session_cache_limiter',
'session_commit',
'session_decode',
'session_destroy',
'session_encode',
'session_get_cookie_params',
'session_id',
'session_is_registered',
'session_module_name',
'session_name',
'session_regenerate_id',
'session_register',
'session_save_path',
'session_set_cookie_params',
'session_set_save_handler',
'session_start',
'session_unregister',
'session_unset',
'session_write_close'],
'Session PgSQL': ['session_pgsql_add_error',
'session_pgsql_get_error',
'session_pgsql_get_field',
'session_pgsql_reset',
'session_pgsql_set_field',
'session_pgsql_status'],
'Shared Memory': ['shmop_close',
'shmop_delete',
'shmop_open',
'shmop_read',
'shmop_size',
'shmop_write'],
'SimpleXML': ['simplexml_import_dom',
'simplexml_load_file',
'simplexml_load_string'],
'Socket': ['socket_accept',
'socket_bind',
'socket_clear_error',
'socket_close',
'socket_connect',
'socket_create_listen',
'socket_create_pair',
'socket_create',
'socket_get_option',
'socket_getpeername',
'socket_getsockname',
'socket_last_error',
'socket_listen',
'socket_read',
'socket_recv',
'socket_recvfrom',
'socket_select',
'socket_send',
'socket_sendto',
'socket_set_block',
'socket_set_nonblock',
'socket_set_option',
'socket_shutdown',
'socket_strerror',
'socket_write'],
'Solr': ['solr_get_version'],
'Statistic': ['stats_absolute_deviation',
'stats_cdf_beta',
'stats_cdf_binomial',
'stats_cdf_cauchy',
'stats_cdf_chisquare',
'stats_cdf_exponential',
'stats_cdf_f',
'stats_cdf_gamma',
'stats_cdf_laplace',
'stats_cdf_logistic',
'stats_cdf_negative_binomial',
'stats_cdf_noncentral_chisquare',
'stats_cdf_noncentral_f',
'stats_cdf_poisson',
'stats_cdf_t',
'stats_cdf_uniform',
'stats_cdf_weibull',
'stats_covariance',
'stats_den_uniform',
'stats_dens_beta',
'stats_dens_cauchy',
'stats_dens_chisquare',
'stats_dens_exponential',
'stats_dens_f',
'stats_dens_gamma',
'stats_dens_laplace',
'stats_dens_logistic',
'stats_dens_negative_binomial',
'stats_dens_normal',
'stats_dens_pmf_binomial',
'stats_dens_pmf_hypergeometric',
'stats_dens_pmf_poisson',
'stats_dens_t',
'stats_dens_weibull',
'stats_harmonic_mean',
'stats_kurtosis',
'stats_rand_gen_beta',
'stats_rand_gen_chisquare',
'stats_rand_gen_exponential',
'stats_rand_gen_f',
'stats_rand_gen_funiform',
'stats_rand_gen_gamma',
'stats_rand_gen_ibinomial_negative',
'stats_rand_gen_ibinomial',
'stats_rand_gen_int',
'stats_rand_gen_ipoisson',
'stats_rand_gen_iuniform',
'stats_rand_gen_noncenral_chisquare',
'stats_rand_gen_noncentral_f',
'stats_rand_gen_noncentral_t',
'stats_rand_gen_normal',
'stats_rand_gen_t',
'stats_rand_get_seeds',
'stats_rand_phrase_to_seeds',
'stats_rand_ranf',
'stats_rand_setall',
'stats_skew',
'stats_standard_deviation',
'stats_stat_binomial_coef',
'stats_stat_correlation',
'stats_stat_gennch',
'stats_stat_independent_t',
'stats_stat_innerproduct',
'stats_stat_noncentral_t',
'stats_stat_paired_t',
'stats_stat_percentile',
'stats_stat_powersum',
'stats_variance'],
'Stomp': ['stomp_connect_error', 'stomp_version'],
'Stream': ['set_socket_blocking',
'stream_bucket_append',
'stream_bucket_make_writeable',
'stream_bucket_new',
'stream_bucket_prepend',
'stream_context_create',
'stream_context_get_default',
'stream_context_get_options',
'stream_context_get_params',
'stream_context_set_default',
'stream_context_set_option',
'stream_context_set_params',
'stream_copy_to_stream',
'stream_encoding',
'stream_filter_append',
'stream_filter_prepend',
'stream_filter_register',
'stream_filter_remove',
'stream_get_contents',
'stream_get_filters',
'stream_get_line',
'stream_get_meta_data',
'stream_get_transports',
'stream_get_wrappers',
'stream_is_local',
'stream_notification_callback',
'stream_register_wrapper',
'stream_resolve_include_path',
'stream_select'],
'String': ['addcslashes',
'addslashes',
'bin2hex',
'chop',
'chr',
'chunk_split',
'convert_cyr_string',
'convert_uudecode',
'convert_uuencode',
'count_chars',
'crc32',
'crypt',
'echo',
'explode',
'fprintf',
'get_html_translation_table',
'hebrev',
'hebrevc',
'html_entity_decode',
'htmlentities',
'htmlspecialchars_decode',
'htmlspecialchars',
'implode',
'join',
'lcfirst',
'levenshtein',
'localeconv',
'ltrim',
'md5_file',
'md5',
'metaphone',
'money_format',
'nl_langinfo',
'nl2br',
'number_format',
'ord',
'parse_str',
'print',
'printf',
'quoted_printable_decode',
'quoted_printable_encode',
'quotemeta',
'rtrim',
'setlocale',
'sha1_file',
'sha1',
'similar_text',
'soundex',
'sprintf',
'sscanf',
'str_getcsv',
'str_ireplace',
'str_pad',
'str_repeat',
'str_replace',
'str_rot13',
'str_shuffle',
'str_split',
'str_word_count',
'strcasecmp',
'strchr',
'strcmp',
'strcoll',
'strcspn',
'strip_tags',
'stripcslashes',
'stripos',
'stripslashes',
'stristr',
'strlen',
'strnatcasecmp',
'strnatcmp',
'strncasecmp',
'strncmp',
'strpbrk',
'strpos',
'strrchr',
'strrev',
'strripos',
'strrpos',
'strspn'],
'Sybase': ['sybase_affected_rows',
'sybase_close',
'sybase_connect',
'sybase_data_seek',
'sybase_deadlock_retry_count',
'sybase_fetch_array',
'sybase_fetch_assoc',
'sybase_fetch_field',
'sybase_fetch_object',
'sybase_fetch_row',
'sybase_field_seek',
'sybase_free_result',
'sybase_get_last_message',
'sybase_min_client_severity',
'sybase_min_error_severity',
'sybase_min_message_severity',
'sybase_min_server_severity',
'sybase_num_fields',
'sybase_num_rows',
'sybase_pconnect',
'sybase_query',
'sybase_result',
'sybase_select_db',
'sybase_set_message_handler',
'sybase_unbuffered_query'],
'TCP': ['tcpwrap_check'],
'Tidy': ['ob_tidyhandler',
'tidy_access_count',
'tidy_config_count',
'tidy_error_count',
'tidy_get_error_buffer',
'tidy_get_output',
'tidy_load_config',
'tidy_reset_config',
'tidy_save_config',
'tidy_set_encoding',
'tidy_setopt',
'tidy_warning_count'],
'Tokenizer': ['token_get_all', 'token_name'],
'URL': ['base64_decode',
'base64_encode',
'get_headers',
'get_meta_tags',
'http_build_query',
'parse_url',
'rawurldecode',
'rawurlencode',
'urldecode',
'urlencode'],
'Variable handling': ['debug_zval_dump',
'doubleval',
'empty',
'floatval',
'get_defined_vars',
'get_resource_type',
'gettype',
'import_request_variables',
'intval',
'is_array',
'is_bool',
'is_callable',
'is_double',
'is_float',
'is_int',
'is_integer',
'is_long',
'is_null',
'is_numeric',
'is_object',
'is_real',
'is_resource',
'is_scalar',
'is_string',
'isset',
'print_r',
'serialize',
'settype',
'strval',
'unserialize',
'unset',
'var_dump',
'var_export'],
'W32api': ['w32api_deftype',
'w32api_init_dtype',
'w32api_invoke_function',
'w32api_register_function',
'w32api_set_call_method'],
'WDDX': ['wddx_add_vars',
'wddx_deserialize',
'wddx_packet_end',
'wddx_packet_start',
'wddx_serialize_value',
'wddx_serialize_vars',
'wddx_unserialize'],
'WinCache': ['wincache_fcache_fileinfo',
'wincache_fcache_meminfo',
'wincache_lock',
'wincache_ocache_fileinfo',
'wincache_ocache_meminfo',
'wincache_refresh_if_changed',
'wincache_rplist_fileinfo',
'wincache_rplist_meminfo',
'wincache_scache_info',
'wincache_scache_meminfo',
'wincache_ucache_add',
'wincache_ucache_cas',
'wincache_ucache_clear',
'wincache_ucache_dec',
'wincache_ucache_delete',
'wincache_ucache_exists',
'wincache_ucache_get',
'wincache_ucache_inc',
'wincache_ucache_info',
'wincache_ucache_meminfo',
'wincache_ucache_set',
'wincache_unlock'],
'XML Parser': ['utf8_decode'],
'XML-RPC': ['xmlrpc_decode_request',
'xmlrpc_decode',
'xmlrpc_encode_request',
'xmlrpc_encode',
'xmlrpc_get_type',
'xmlrpc_is_fault',
'xmlrpc_parse_method_descriptions',
'xmlrpc_server_add_introspection_data',
'xmlrpc_server_call_method',
'xmlrpc_server_create',
'xmlrpc_server_destroy',
'xmlrpc_server_register_introspection_callback',
'xmlrpc_server_register_method',
'xmlrpc_set_type'],
'XSLT (PHP4)': ['xslt_backend_info',
'xslt_backend_name',
'xslt_backend_version',
'xslt_create',
'xslt_errno',
'xslt_error',
'xslt_free',
'xslt_getopt',
'xslt_process',
'xslt_set_base',
'xslt_set_encoding',
'xslt_set_error_handler',
'xslt_set_log',
'xslt_set_object',
'xslt_set_sax_handler',
'xslt_set_sax_handlers',
'xslt_set_scheme_handler',
'xslt_set_scheme_handlers',
'xslt_setopt'],
'YAZ': ['yaz_addinfo',
'yaz_ccl_conf',
'yaz_ccl_parse',
'yaz_close',
'yaz_connect',
'yaz_database',
'yaz_element',
'yaz_errno',
'yaz_error',
'yaz_es_result',
'yaz_es',
'yaz_get_option',
'yaz_hits',
'yaz_itemorder',
'yaz_present',
'yaz_range',
'yaz_record',
'yaz_scan_result',
'yaz_scan',
'yaz_schema',
'yaz_search',
'yaz_set_option',
'yaz_sort',
'yaz_syntax',
'yaz_wait'],
'YP/NIS': ['yp_all',
'yp_cat',
'yp_err_string',
'yp_errno',
'yp_first',
'yp_get_default_domain',
'yp_master',
'yp_match',
'yp_next',
'yp_order'],
'Yaml': ['yaml_emit_file',
'yaml_emit',
'yaml_parse_file',
'yaml_parse_url',
'yaml_parse'],
'Zip': ['zip_close',
'zip_entry_close',
'zip_entry_compressedsize',
'zip_entry_compressionmethod',
'zip_entry_filesize',
'zip_entry_name',
'zip_entry_open',
'zip_entry_read',
'zip_open',
'zip_read'],
'Zlib': ['gzclose',
'gzcompress',
'gzdecode',
'gzdeflate',
'gzencode',
'gzeof',
'gzfile',
'gzgetc',
'gzgets',
'gzgetss',
'gzinflate',
'gzopen',
'gzpassthru',
'gzputs',
'gzread',
'gzrewind',
'gzseek',
'gztell',
'gzuncompress',
'gzwrite',
'readgzfile',
'zlib_get_coding_type'],
'bcompiler': ['bcompiler_load_exe',
'bcompiler_load',
'bcompiler_parse_class',
'bcompiler_read',
'bcompiler_write_class',
'bcompiler_write_constant',
'bcompiler_write_exe_footer',
'bcompiler_write_file',
'bcompiler_write_footer',
'bcompiler_write_function',
'bcompiler_write_functions_from_file',
'bcompiler_write_header',
'bcompiler_write_included_filename'],
'cURL': ['curl_close',
'curl_copy_handle',
'curl_errno',
'curl_error',
'curl_exec',
'curl_getinfo',
'curl_init',
'curl_multi_add_handle',
'curl_multi_close',
'curl_multi_exec',
'curl_multi_getcontent',
'curl_multi_info_read',
'curl_multi_init',
'curl_multi_remove_handle',
'curl_multi_select',
'curl_setopt_array',
'curl_setopt',
'curl_version'],
'chdb': ['chdb_create'],
'dBase': ['dbase_add_record',
'dbase_close',
'dbase_create',
'dbase_delete_record',
'dbase_get_header_info',
'dbase_get_record_with_names',
'dbase_get_record',
'dbase_numfields',
'dbase_numrecords',
'dbase_open',
'dbase_pack',
'dbase_replace_record'],
'dbx': ['dbx_close',
'dbx_compare',
'dbx_connect',
'dbx_error',
'dbx_escape_string',
'dbx_fetch_row'],
'filePro': ['filepro_fieldcount',
'filepro_fieldname',
'filepro_fieldtype',
'filepro_fieldwidth',
'filepro_retrieve',
'filepro_rowcount',
'filepro'],
'iconv': ['iconv_get_encoding',
'iconv_mime_decode_headers',
'iconv_mime_decode',
'iconv_mime_encode',
'iconv_set_encoding',
'iconv_strlen',
'iconv_strpos',
'iconv_strrpos',
'iconv_substr',
'iconv',
'ob_iconv_handler'],
'inclued': ['inclued_get_data'],
'intl': ['intl_error_name',
'intl_get_error_code',
'intl_get_error_message',
'intl_is_failure'],
'libxml': ['libxml_clear_errors',
'libxml_disable_entity_loader',
'libxml_get_errors',
'libxml_get_last_error',
'libxml_set_streams_context',
'libxml_use_internal_errors'],
'mSQL': ['msql_affected_rows',
'msql_close',
'msql_connect',
'msql_create_db',
'msql_createdb',
'msql_data_seek',
'msql_db_query',
'msql_dbname',
'msql_drop_db',
'msql_error',
'msql_fetch_array',
'msql_fetch_field',
'msql_fetch_object',
'msql_fetch_row',
'msql_field_flags',
'msql_field_len',
'msql_field_name',
'msql_field_seek',
'msql_field_table',
'msql_field_type',
'msql_fieldflags',
'msql_fieldlen',
'msql_fieldname',
'msql_fieldtable',
'msql_fieldtype',
'msql_free_result',
'msql_list_dbs',
'msql_list_fields',
'msql_list_tables',
'msql_num_fields',
'msql_num_rows',
'msql_numfields',
'msql_numrows',
'msql_pconnect',
'msql_query',
'msql_regcase',
'msql_result',
'msql_select_db',
'msql_tablename',
'msql'],
'mnoGoSearch': ['udm_add_search_limit',
'udm_alloc_agent_array',
'udm_alloc_agent',
'udm_api_version',
'udm_cat_list',
'udm_cat_path',
'udm_check_charset',
'udm_check_stored',
'udm_clear_search_limits',
'udm_close_stored',
'udm_crc32',
'udm_errno',
'udm_error',
'udm_find',
'udm_free_agent',
'udm_free_ispell_data',
'udm_free_res',
'udm_get_doc_count',
'udm_get_res_field',
'udm_get_res_param',
'udm_hash32',
'udm_load_ispell_data',
'udm_open_stored',
'udm_set_agent_param'],
'mqseries': ['mqseries_back',
'mqseries_begin',
'mqseries_close',
'mqseries_cmit',
'mqseries_conn',
'mqseries_connx',
'mqseries_disc',
'mqseries_get',
'mqseries_inq',
'mqseries_open',
'mqseries_put1',
'mqseries_put',
'mqseries_set',
'mqseries_strerror'],
'mysqlnd_qc': ['mysqlnd_qc_change_handler',
'mysqlnd_qc_clear_cache',
'mysqlnd_qc_get_cache_info',
'mysqlnd_qc_get_core_stats',
'mysqlnd_qc_get_handler',
'mysqlnd_qc_get_query_trace_log',
'mysqlnd_qc_set_user_handlers'],
'qtdom': ['qdom_error', 'qdom_tree'],
'runkit': ['runkit_class_adopt',
'runkit_class_emancipate',
'runkit_constant_add',
'runkit_constant_redefine',
'runkit_constant_remove',
'runkit_function_add',
'runkit_function_copy',
'runkit_function_redefine',
'runkit_function_remove',
'runkit_function_rename',
'runkit_import',
'runkit_lint_file',
'runkit_lint',
'runkit_method_add',
'runkit_method_copy',
'runkit_method_redefine',
'runkit_method_remove',
'runkit_method_rename',
'runkit_return_value_used',
'runkit_sandbox_output_handler',
'runkit_superglobals'],
'ssdeep': ['ssdeep_fuzzy_compare',
'ssdeep_fuzzy_hash_filename',
'ssdeep_fuzzy_hash'],
'vpopmail': ['vpopmail_add_alias_domain_ex',
'vpopmail_add_alias_domain',
'vpopmail_add_domain_ex',
'vpopmail_add_domain',
'vpopmail_add_user',
'vpopmail_alias_add',
'vpopmail_alias_del_domain',
'vpopmail_alias_del',
'vpopmail_alias_get_all',
'vpopmail_alias_get',
'vpopmail_auth_user',
'vpopmail_del_domain_ex',
'vpopmail_del_domain',
'vpopmail_del_user',
'vpopmail_error',
'vpopmail_passwd',
'vpopmail_set_user_quota'],
'win32ps': ['win32_ps_list_procs', 'win32_ps_stat_mem', 'win32_ps_stat_proc'],
'win32service': ['win32_continue_service',
'win32_create_service',
'win32_delete_service',
'win32_get_last_control_message',
'win32_pause_service',
'win32_query_service_status',
'win32_set_service_status',
'win32_start_service_ctrl_dispatcher',
'win32_start_service',
'win32_stop_service'],
'xattr': ['xattr_get',
'xattr_list',
'xattr_remove',
'xattr_set',
'xattr_supported'],
'xdiff': ['xdiff_file_bdiff_size',
'xdiff_file_bdiff',
'xdiff_file_bpatch',
'xdiff_file_diff_binary',
'xdiff_file_diff',
'xdiff_file_merge3',
'xdiff_file_patch_binary',
'xdiff_file_patch',
'xdiff_file_rabdiff',
'xdiff_string_bdiff_size',
'xdiff_string_bdiff',
'xdiff_string_bpatch',
'xdiff_string_diff_binary',
'xdiff_string_diff',
'xdiff_string_merge3',
'xdiff_string_patch_binary',
'xdiff_string_patch',
'xdiff_string_rabdiff']}
if __name__ == '__main__':
import glob
import os
import pprint
import re
import shutil
import tarfile
import urllib.request, urllib.parse, urllib.error
PHP_MANUAL_URL = 'http://us3.php.net/distributions/manual/php_manual_en.tar.gz'
PHP_MANUAL_DIR = './php-chunked-xhtml/'
PHP_REFERENCE_GLOB = 'ref.*'
PHP_FUNCTION_RE = '<a href="function\..*?\.html">(.*?)</a>'
PHP_MODULE_RE = '<title>(.*?) Functions</title>'
def get_php_functions():
function_re = re.compile(PHP_FUNCTION_RE)
module_re = re.compile(PHP_MODULE_RE)
modules = {}
for file in get_php_references():
module = ''
for line in open(file):
if not module:
search = module_re.search(line)
if search:
module = search.group(1)
modules[module] = []
elif '<h2>Table of Contents</h2>' in line:
for match in function_re.finditer(line):
fn = match.group(1)
if '->' not in fn and '::' not in fn:
modules[module].append(fn)
# These are dummy manual pages, not actual functions
if module == 'PHP Options/Info':
modules[module].remove('main')
elif module == 'Filesystem':
modules[module].remove('delete')
if not modules[module]:
del modules[module]
break
return modules
def get_php_references():
download = urllib.request.urlretrieve(PHP_MANUAL_URL)
tar = tarfile.open(download[0])
tar.extractall()
tar.close()
for file in glob.glob("%s%s" % (PHP_MANUAL_DIR, PHP_REFERENCE_GLOB)):
yield file
os.remove(download[0])
def regenerate(filename, modules):
f = open(filename)
try:
content = f.read()
finally:
f.close()
header = content[:content.find('MODULES = {')]
footer = content[content.find("if __name__ == '__main__':"):]
f = open(filename, 'w')
f.write(header)
f.write('MODULES = %s\n\n' % pprint.pformat(modules))
f.write(footer)
f.close()
def run():
print('>> Downloading Function Index')
modules = get_php_functions()
total = sum(len(v) for v in modules.values())
print('%d functions found' % total)
regenerate(__file__, modules)
shutil.rmtree(PHP_MANUAL_DIR)
run()
| mit |
scottrice/Ice | ice/tasks/engine.py | 1 | 1118 | # encoding: utf-8
import os
from pysteam import paths as steam_paths
from pysteam import shortcuts
from pysteam import steam as steam_module
from ice import backups
from ice import configuration
from ice import consoles
from ice import emulators
from ice import paths
from ice import settings
from ice.logs import logger
from ice.persistence.config_file_backing_store import ConfigFileBackingStore
class TaskEngine(object):
def __init__(self, steam):
self.steam = steam
logger.debug("Initializing Ice")
# We want to ignore the anonymous context, cause theres no reason to sync
# ROMs for it since you cant log in as said user.
is_user_context = lambda context: context.user_id != 'anonymous'
self.users = filter(is_user_context, steam_module.local_user_contexts(self.steam))
def run(self, tasks, app_settings, dry_run=False):
if self.steam is None:
logger.error("Cannot run Ice because Steam doesn't appear to be installed")
return
logger.info("=========== Starting Ice ===========")
for task in tasks:
task(app_settings, self.users, dry_run=dry_run)
| mit |
Peddle/hue | desktop/core/ext-py/Django-1.6.10/django/utils/image.py | 98 | 4983 | # -*- coding: utf-8 -*-
"""
To provide a shim layer over Pillow/PIL situation until the PIL support is
removed.
Combinations To Account For
===========================
* Pillow:
* never has ``_imaging`` under any Python
* has the ``Image.alpha_composite``, which may aid in detection
* PIL
* CPython 2.x may have _imaging (& work)
* CPython 2.x may *NOT* have _imaging (broken & needs a error message)
* CPython 3.x doesn't work
* PyPy will *NOT* have _imaging (but works?)
* On some platforms (Homebrew and RHEL6 reported) _imaging isn't available,
the needed import is from PIL import _imaging (refs #21355)
Restated, that looks like:
* If we're on Python 2.x, it could be either Pillow or PIL:
* If ``import _imaging`` results in ``ImportError``, either they have a
working Pillow installation or a broken PIL installation, so we need to
detect further:
* To detect, we first ``import Image``.
* If ``Image`` has a ``alpha_composite`` attribute present, only Pillow
has this, so we assume it's working.
* If ``Image`` DOES NOT have a ``alpha_composite``attribute, it must be
PIL & is a broken (likely C compiler-less) install, which we need to
warn the user about.
* If ``import _imaging`` works, it must be PIL & is a working install.
* Python 3.x
* If ``import Image`` works, it must be Pillow, since PIL isn't Python 3.x
compatible.
* PyPy
* If ``import _imaging`` results in ``ImportError``, it could be either
Pillow or PIL, both of which work without it on PyPy, so we're fine.
Approach
========
* Attempt to import ``Image``
* ``ImportError`` - nothing is installed, toss an exception
* Either Pillow or the PIL is installed, so continue detecting
* Attempt to ``hasattr(Image, 'alpha_composite')``
* If it works, it's Pillow & working
* If it fails, we've got a PIL install, continue detecting
* The only option here is that we're on Python 2.x or PyPy, of which
we only care about if we're on CPython.
* If we're on CPython, attempt to ``from PIL import _imaging`` and
``import _imaging``
* ``ImportError`` - Bad install, toss an exception
"""
from __future__ import unicode_literals
import warnings
from django.core.exceptions import ImproperlyConfigured
from django.utils.translation import ugettext_lazy as _
Image = None
_imaging = None
ImageFile = None
def _detect_image_library():
global Image
global _imaging
global ImageFile
# Skip re-attempting to import if we've already run detection.
if Image is not None:
return Image, _imaging, ImageFile
# Assume it's not there.
PIL_imaging = False
try:
# Try from the Pillow (or one variant of PIL) install location first.
from PIL import Image as PILImage
except ImportError as err:
try:
# If that failed, try the alternate import syntax for PIL.
import Image as PILImage
except ImportError as err:
# Neither worked, so it's likely not installed.
raise ImproperlyConfigured(
_("Neither Pillow nor PIL could be imported: %s") % err
)
# ``Image.alpha_composite`` was added to Pillow in SHA: e414c6 & is not
# available in any version of the PIL.
if hasattr(PILImage, 'alpha_composite'):
PIL_imaging = False
else:
# We're dealing with the PIL. Determine if we're on CPython & if
# ``_imaging`` is available.
import platform
# This is the Alex Approved™ way.
# See http://mail.python.org/pipermail//pypy-dev/2011-November/008739.html
if platform.python_implementation().lower() == 'cpython':
# We're on CPython (likely 2.x). Since a C compiler is needed to
# produce a fully-working PIL & will create a ``_imaging`` module,
# we'll attempt to import it to verify their kit works.
try:
from PIL import _imaging as PIL_imaging
except ImportError:
try:
import _imaging as PIL_imaging
except ImportError as err:
raise ImproperlyConfigured(
_("The '_imaging' module for the PIL could not be "
"imported: %s") % err
)
# Try to import ImageFile as well.
try:
from PIL import ImageFile as PILImageFile
except ImportError:
import ImageFile as PILImageFile
# Finally, warn about deprecation...
if PIL_imaging is not False:
warnings.warn(
"Support for the PIL will be removed in Django 1.8. Please " +
"uninstall it & install Pillow instead.",
PendingDeprecationWarning
)
return PILImage, PIL_imaging, PILImageFile
Image, _imaging, ImageFile = _detect_image_library()
| apache-2.0 |
glidernet/python-ogn-client | tests/client/test_AprsClient.py | 1 | 6185 | import unittest
import unittest.mock as mock
from ogn.parser import parse
from ogn.client.client import create_aprs_login, AprsClient
from ogn.client.settings import APRS_APP_NAME, APRS_APP_VER, APRS_KEEPALIVE_TIME
class AprsClientTest(unittest.TestCase):
def test_create_aprs_login(self):
basic_login = create_aprs_login('klaus', -1, 'myApp', '0.1')
self.assertEqual('user klaus pass -1 vers myApp 0.1\n', basic_login)
login_with_filter = create_aprs_login('klaus', -1, 'myApp', '0.1', 'r/48.0/11.0/100')
self.assertEqual('user klaus pass -1 vers myApp 0.1 filter r/48.0/11.0/100\n', login_with_filter)
def test_initialisation(self):
client = AprsClient(aprs_user='testuser', aprs_filter='')
self.assertEqual(client.aprs_user, 'testuser')
self.assertEqual(client.aprs_filter, '')
@mock.patch('ogn.client.client.socket')
def test_connect_full_feed(self, mock_socket):
client = AprsClient(aprs_user='testuser', aprs_filter='')
client.connect()
client.sock.send.assert_called_once_with('user testuser pass -1 vers {} {}\n'.format(
APRS_APP_NAME, APRS_APP_VER).encode('ascii'))
client.sock.makefile.assert_called_once_with('rb')
@mock.patch('ogn.client.client.socket')
def test_connect_client_defined_filter(self, mock_socket):
client = AprsClient(aprs_user='testuser', aprs_filter='r/50.4976/9.9495/100')
client.connect()
client.sock.send.assert_called_once_with('user testuser pass -1 vers {} {} filter r/50.4976/9.9495/100\n'.format(
APRS_APP_NAME, APRS_APP_VER).encode('ascii'))
client.sock.makefile.assert_called_once_with('rb')
@mock.patch('ogn.client.client.socket')
def test_disconnect(self, mock_socket):
client = AprsClient(aprs_user='testuser', aprs_filter='')
client.connect()
client.disconnect()
client.sock.shutdown.assert_called_once_with(0)
client.sock.close.assert_called_once_with()
self.assertTrue(client._kill)
@mock.patch('ogn.client.client.socket')
def test_run(self, mock_socket):
import socket
mock_socket.error = socket.error
client = AprsClient(aprs_user='testuser', aprs_filter='')
client.connect()
client.sock_file.readline = mock.MagicMock()
client.sock_file.readline.side_effect = [b'Normal text blabla',
b'my weird character \xc2\xa5',
UnicodeDecodeError('funnycodec', b'\x00\x00', 1, 2, 'This is just a fake reason!'),
b'... show must go on',
BrokenPipeError(),
b'... and on',
ConnectionResetError(),
b'... and on',
socket.error(),
b'... and on',
b'',
b'... and on',
KeyboardInterrupt()]
try:
client.run(callback=lambda msg: print("got: {}".format(msg)), autoreconnect=True)
except KeyboardInterrupt:
pass
finally:
client.disconnect()
@mock.patch('ogn.client.client.time')
@mock.patch('ogn.client.client.socket')
def test_run_keepalive(self, mock_socket, mock_time):
import socket
mock_socket.error = socket.error
client = AprsClient(aprs_user='testuser', aprs_filter='')
client.connect()
client.sock_file.readline = mock.MagicMock()
client.sock_file.readline.side_effect = [b'Normal text blabla',
KeyboardInterrupt()]
mock_time.side_effect = [0, 0, APRS_KEEPALIVE_TIME + 1, APRS_KEEPALIVE_TIME + 1]
timed_callback = mock.MagicMock()
try:
client.run(callback=lambda msg: print("got: {}".format(msg)), timed_callback=timed_callback)
except KeyboardInterrupt:
pass
finally:
client.disconnect()
timed_callback.assert_called_with(client)
def test_reset_kill_reconnect(self):
client = AprsClient(aprs_user='testuser', aprs_filter='')
client.connect()
# .run() should be allowed to execute after .connect()
mock_callback = mock.MagicMock(
side_effect=lambda raw_msg: client.disconnect())
self.assertFalse(client._kill)
client.run(callback=mock_callback, autoreconnect=True)
# After .disconnect(), client._kill should be True
self.assertTrue(client._kill)
self.assertEqual(mock_callback.call_count, 1)
# After we reconnect, .run() should be able to run again
mock_callback.reset_mock()
client.connect()
client.run(callback=mock_callback, autoreconnect=True)
self.assertEqual(mock_callback.call_count, 1)
def test_50_live_messages(self):
print("Enter")
self.remaining_messages = 50
def process_message(raw_message):
if raw_message[0] == '#':
return
try:
message = parse(raw_message)
print("{}: {}".format(message['aprs_type'], raw_message))
except NotImplementedError as e:
print("{}: {}".format(e, raw_message))
return
if self.remaining_messages > 0:
self.remaining_messages -= 1
else:
raise KeyboardInterrupt
client = AprsClient(aprs_user='testuser', aprs_filter='')
client.connect()
try:
client.run(callback=process_message, autoreconnect=True)
except KeyboardInterrupt:
pass
finally:
client.disconnect()
self.assertTrue(True)
| agpl-3.0 |
mujiansu/arangodb | 3rdParty/V8-4.3.61/third_party/python_26/Tools/pynche/ChipViewer.py | 94 | 4998 | """Chip viewer and widget.
In the lower left corner of the main Pynche window, you will see two
ChipWidgets, one for the selected color and one for the nearest color. The
selected color is the actual RGB value expressed as an X11 #COLOR name. The
nearest color is the named color from the X11 database that is closest to the
selected color in 3D space. There may be other colors equally close, but the
nearest one is the first one found.
Clicking on the nearest color chip selects that named color.
The ChipViewer class includes the entire lower left quandrant; i.e. both the
selected and nearest ChipWidgets.
"""
from Tkinter import *
import ColorDB
class ChipWidget:
_WIDTH = 150
_HEIGHT = 80
def __init__(self,
master = None,
width = _WIDTH,
height = _HEIGHT,
text = 'Color',
initialcolor = 'blue',
presscmd = None,
releasecmd = None):
# create the text label
self.__label = Label(master, text=text)
self.__label.grid(row=0, column=0)
# create the color chip, implemented as a frame
self.__chip = Frame(master, relief=RAISED, borderwidth=2,
width=width,
height=height,
background=initialcolor)
self.__chip.grid(row=1, column=0)
# create the color name
self.__namevar = StringVar()
self.__namevar.set(initialcolor)
self.__name = Entry(master, textvariable=self.__namevar,
relief=FLAT, justify=CENTER, state=DISABLED,
font=self.__label['font'])
self.__name.grid(row=2, column=0)
# create the message area
self.__msgvar = StringVar()
self.__name = Entry(master, textvariable=self.__msgvar,
relief=FLAT, justify=CENTER, state=DISABLED,
font=self.__label['font'])
self.__name.grid(row=3, column=0)
# set bindings
if presscmd:
self.__chip.bind('<ButtonPress-1>', presscmd)
if releasecmd:
self.__chip.bind('<ButtonRelease-1>', releasecmd)
def set_color(self, color):
self.__chip.config(background=color)
def get_color(self):
return self.__chip['background']
def set_name(self, colorname):
self.__namevar.set(colorname)
def set_message(self, message):
self.__msgvar.set(message)
def press(self):
self.__chip.configure(relief=SUNKEN)
def release(self):
self.__chip.configure(relief=RAISED)
class ChipViewer:
def __init__(self, switchboard, master=None):
self.__sb = switchboard
self.__frame = Frame(master, relief=RAISED, borderwidth=1)
self.__frame.grid(row=3, column=0, ipadx=5, sticky='NSEW')
# create the chip that will display the currently selected color
# exactly
self.__sframe = Frame(self.__frame)
self.__sframe.grid(row=0, column=0)
self.__selected = ChipWidget(self.__sframe, text='Selected')
# create the chip that will display the nearest real X11 color
# database color name
self.__nframe = Frame(self.__frame)
self.__nframe.grid(row=0, column=1)
self.__nearest = ChipWidget(self.__nframe, text='Nearest',
presscmd = self.__buttonpress,
releasecmd = self.__buttonrelease)
def update_yourself(self, red, green, blue):
# Selected always shows the #rrggbb name of the color, nearest always
# shows the name of the nearest color in the database. BAW: should
# an exact match be indicated in some way?
#
# Always use the #rrggbb style to actually set the color, since we may
# not be using X color names (e.g. "web-safe" names)
colordb = self.__sb.colordb()
rgbtuple = (red, green, blue)
rrggbb = ColorDB.triplet_to_rrggbb(rgbtuple)
# find the nearest
nearest = colordb.nearest(red, green, blue)
nearest_tuple = colordb.find_byname(nearest)
nearest_rrggbb = ColorDB.triplet_to_rrggbb(nearest_tuple)
self.__selected.set_color(rrggbb)
self.__nearest.set_color(nearest_rrggbb)
# set the name and messages areas
self.__selected.set_name(rrggbb)
if rrggbb == nearest_rrggbb:
self.__selected.set_message(nearest)
else:
self.__selected.set_message('')
self.__nearest.set_name(nearest_rrggbb)
self.__nearest.set_message(nearest)
def __buttonpress(self, event=None):
self.__nearest.press()
def __buttonrelease(self, event=None):
self.__nearest.release()
rrggbb = self.__nearest.get_color()
red, green, blue = ColorDB.rrggbb_to_triplet(rrggbb)
self.__sb.update_views(red, green, blue)
| apache-2.0 |
TheWitchers/Team | TestingArea/TESTZONE_methods.py | 1 | 1111 | __author__ = 'dvir'
import tkFileDialog
import sqlite3
conn = sqlite3.connect(tkFileDialog.askopenfilename())
c = conn.cursor()
# using example db
def ex_show_purch(price):
l = []
for row in c.execute("SELECT symbol FROM stocks WHERE price > " + str(price) + ""):
print row
l.append(row)
print l
return l
ex_show_purch(raw_input("Enter Price: "))
# for project db
def show_purch(name):
l = []
for row in c.execute("SELECT * FROM Purchaseses WHERE nickname = '" + name + "'"):
print row
l.append(row)
print l
return l
def correct_user(id, pas):
if len(c.execute("SELECT * FROM Users WHERE username = '" + id + "' AND password = '" + pas + "'")) > 0:
print "user exists"
else:
print "user does not exist"
def has_inf(col, tbl, info):
if len(c.execute(
"SELECT '" + col + "' FROM Users WHERE username = '" + id + "' AND '" + col + "' = '" + info + "'")) > 0:
print col + "already exists"
else:
print col + " is OK"
| gpl-2.0 |
plaes/numpy | numpy/distutils/core.py | 3 | 8524 |
import sys
from distutils.core import *
if 'setuptools' in sys.modules:
have_setuptools = True
from setuptools import setup as old_setup
# easy_install imports math, it may be picked up from cwd
from setuptools.command import easy_install
try:
# very old versions of setuptools don't have this
from setuptools.command import bdist_egg
except ImportError:
have_setuptools = False
else:
from distutils.core import setup as old_setup
have_setuptools = False
import warnings
import distutils.core
import distutils.dist
from numpy.distutils.extension import Extension
from numpy.distutils.numpy_distribution import NumpyDistribution
from numpy.distutils.command import config, config_compiler, \
build, build_py, build_ext, build_clib, build_src, build_scripts, \
sdist, install_data, install_headers, install, bdist_rpm, scons, \
install_clib
from numpy.distutils.misc_util import get_data_files, is_sequence, is_string
numpy_cmdclass = {'build': build.build,
'build_src': build_src.build_src,
'build_scripts': build_scripts.build_scripts,
'config_cc': config_compiler.config_cc,
'config_fc': config_compiler.config_fc,
'config': config.config,
'build_ext': build_ext.build_ext,
'build_py': build_py.build_py,
'build_clib': build_clib.build_clib,
'sdist': sdist.sdist,
'scons': scons.scons,
'install_data': install_data.install_data,
'install_headers': install_headers.install_headers,
'install_clib': install_clib.install_clib,
'install': install.install,
'bdist_rpm': bdist_rpm.bdist_rpm,
}
if have_setuptools:
# Use our own versions of develop and egg_info to ensure that build_src is
# handled appropriately.
from numpy.distutils.command import develop, egg_info
numpy_cmdclass['bdist_egg'] = bdist_egg.bdist_egg
numpy_cmdclass['develop'] = develop.develop
numpy_cmdclass['easy_install'] = easy_install.easy_install
numpy_cmdclass['egg_info'] = egg_info.egg_info
def _dict_append(d, **kws):
for k,v in kws.items():
if k not in d:
d[k] = v
continue
dv = d[k]
if isinstance(dv, tuple):
d[k] = dv + tuple(v)
elif isinstance(dv, list):
d[k] = dv + list(v)
elif isinstance(dv, dict):
_dict_append(dv, **v)
elif is_string(dv):
d[k] = dv + v
else:
raise TypeError, repr(type(dv))
def _command_line_ok(_cache=[]):
""" Return True if command line does not contain any
help or display requests.
"""
if _cache:
return _cache[0]
ok = True
display_opts = ['--'+n for n in Distribution.display_option_names]
for o in Distribution.display_options:
if o[1]:
display_opts.append('-'+o[1])
for arg in sys.argv:
if arg.startswith('--help') or arg=='-h' or arg in display_opts:
ok = False
break
_cache.append(ok)
return ok
def get_distribution(always=False):
dist = distutils.core._setup_distribution
# XXX Hack to get numpy installable with easy_install.
# The problem is easy_install runs it's own setup(), which
# sets up distutils.core._setup_distribution. However,
# when our setup() runs, that gets overwritten and lost.
# We can't use isinstance, as the DistributionWithoutHelpCommands
# class is local to a function in setuptools.command.easy_install
if dist is not None and \
'DistributionWithoutHelpCommands' in repr(dist):
#raise NotImplementedError("setuptools not supported yet for numpy.scons branch")
dist = None
if always and dist is None:
dist = NumpyDistribution()
return dist
def _exit_interactive_session(_cache=[]):
if _cache:
return # been here
_cache.append(1)
print '-'*72
raw_input('Press ENTER to close the interactive session..')
print '='*72
def setup(**attr):
if len(sys.argv)<=1 and not attr.get('script_args',[]):
from interactive import interactive_sys_argv
import atexit
atexit.register(_exit_interactive_session)
sys.argv[:] = interactive_sys_argv(sys.argv)
if len(sys.argv)>1:
return setup(**attr)
cmdclass = numpy_cmdclass.copy()
new_attr = attr.copy()
if 'cmdclass' in new_attr:
cmdclass.update(new_attr['cmdclass'])
new_attr['cmdclass'] = cmdclass
if 'configuration' in new_attr:
# To avoid calling configuration if there are any errors
# or help request in command in the line.
configuration = new_attr.pop('configuration')
old_dist = distutils.core._setup_distribution
old_stop = distutils.core._setup_stop_after
distutils.core._setup_distribution = None
distutils.core._setup_stop_after = "commandline"
try:
dist = setup(**new_attr)
finally:
distutils.core._setup_distribution = old_dist
distutils.core._setup_stop_after = old_stop
if dist.help or not _command_line_ok():
# probably displayed help, skip running any commands
return dist
# create setup dictionary and append to new_attr
config = configuration()
if hasattr(config,'todict'):
config = config.todict()
_dict_append(new_attr, **config)
# Move extension source libraries to libraries
libraries = []
for ext in new_attr.get('ext_modules',[]):
new_libraries = []
for item in ext.libraries:
if is_sequence(item):
lib_name, build_info = item
_check_append_ext_library(libraries, item)
new_libraries.append(lib_name)
elif is_string(item):
new_libraries.append(item)
else:
raise TypeError("invalid description of extension module "
"library %r" % (item,))
ext.libraries = new_libraries
if libraries:
if 'libraries' not in new_attr:
new_attr['libraries'] = []
for item in libraries:
_check_append_library(new_attr['libraries'], item)
# sources in ext_modules or libraries may contain header files
if ('ext_modules' in new_attr or 'libraries' in new_attr) \
and 'headers' not in new_attr:
new_attr['headers'] = []
# Use our custom NumpyDistribution class instead of distutils' one
new_attr['distclass'] = NumpyDistribution
return old_setup(**new_attr)
def _check_append_library(libraries, item):
for libitem in libraries:
if is_sequence(libitem):
if is_sequence(item):
if item[0]==libitem[0]:
if item[1] is libitem[1]:
return
warnings.warn("[0] libraries list contains %r with"
" different build_info" % (item[0],))
break
else:
if item==libitem[0]:
warnings.warn("[1] libraries list contains %r with"
" no build_info" % (item[0],))
break
else:
if is_sequence(item):
if item[0]==libitem:
warnings.warn("[2] libraries list contains %r with"
" no build_info" % (item[0],))
break
else:
if item==libitem:
return
libraries.append(item)
def _check_append_ext_library(libraries, (lib_name,build_info)):
for item in libraries:
if is_sequence(item):
if item[0]==lib_name:
if item[1] is build_info:
return
warnings.warn("[3] libraries list contains %r with"
" different build_info" % (lib_name,))
break
elif item==lib_name:
warnings.warn("[4] libraries list contains %r with"
" no build_info" % (lib_name,))
break
libraries.append((lib_name,build_info))
| bsd-3-clause |
rudhir-upretee/SUMO_Src | tools/output/analyze_teleports.py | 2 | 2664 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@file analyze_teleports.py
@author Jakob Erdmann
@date 2012-11-20
@version $Id: analyze_teleports.py 13106 2012-12-02 13:44:57Z behrisch $
Extract statistics from the warning outputs of a simulation run for plotting.
SUMO, Simulation of Urban MObility; see http://sumo.sourceforge.net/
Copyright (C) 2008-2012 DLR (http://www.dlr.de/) and contributors
All rights reserved
"""
import os,sys
import re
from collections import defaultdict
def parse_log(logfile, edges=True, aggregate=3600):
print "Parsing %s" % logfile
reFrom = re.compile("lane='([^']*)'")
reTime = re.compile("time=(\d*)\.")
# counts per lane
waitingCounts = defaultdict(lambda:0)
collisionCounts = defaultdict(lambda:0)
# counts per step
waitingStepCounts = defaultdict(lambda:0)
collisionStepCounts = defaultdict(lambda:0)
for line in open(logfile):
try:
if "Warning: Teleporting vehicle" in line:
edge = reFrom.search(line).group(1)
time = reTime.search(line).group(1)
if edges:
edge = edge[:-2]
if "collision" in line:
collisionCounts[edge] += 1
collisionStepCounts[int(time) / aggregate] += 1
else:
waitingCounts[edge] += 1
waitingStepCounts[int(time) / aggregate] += 1
except:
print sys.exc_info()
sys.exit("error when parsing line '%s'" % line)
return (waitingCounts, collisionCounts,
waitingStepCounts, collisionStepCounts)
def print_counts(countDict, label):
counts = [(v,k) for k,v in countDict.items()]
counts.sort()
print counts
print label, 'total:', sum(countDict.values())
def main(logfile):
waitingCounts, collisionCounts, waitingStepCounts, collisionStepCounts = parse_log(logfile)
print_counts(waitingCounts, 'waiting')
print_counts(collisionCounts, 'collisions')
# generate plot
min_step = min(min(waitingStepCounts.keys()),
min(collisionStepCounts.keys()))
max_step = max(max(waitingStepCounts.keys()),
max(collisionStepCounts.keys()))
plotfile = logfile + '.plot'
with open(plotfile, 'w') as f:
f.write("# plot '%s' using 1:2 with lines title 'waiting', '%s' using 1:3 with lines title 'collisions'\n" % (
plotfile, plotfile))
for step in range(min_step, max_step + 1):
print >>f, ' '.join(map(str,[step, waitingStepCounts[step], collisionStepCounts[step]]))
if __name__ == "__main__":
main(*sys.argv[1:])
| gpl-3.0 |
kagayakidan/scikit-learn | examples/linear_model/plot_lasso_and_elasticnet.py | 249 | 1982 | """
========================================
Lasso and Elastic Net for Sparse Signals
========================================
Estimates Lasso and Elastic-Net regression models on a manually generated
sparse signal corrupted with an additive noise. Estimated coefficients are
compared with the ground-truth.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import r2_score
###############################################################################
# generate some sparse data to play with
np.random.seed(42)
n_samples, n_features = 50, 200
X = np.random.randn(n_samples, n_features)
coef = 3 * np.random.randn(n_features)
inds = np.arange(n_features)
np.random.shuffle(inds)
coef[inds[10:]] = 0 # sparsify coef
y = np.dot(X, coef)
# add noise
y += 0.01 * np.random.normal((n_samples,))
# Split data in train set and test set
n_samples = X.shape[0]
X_train, y_train = X[:n_samples / 2], y[:n_samples / 2]
X_test, y_test = X[n_samples / 2:], y[n_samples / 2:]
###############################################################################
# Lasso
from sklearn.linear_model import Lasso
alpha = 0.1
lasso = Lasso(alpha=alpha)
y_pred_lasso = lasso.fit(X_train, y_train).predict(X_test)
r2_score_lasso = r2_score(y_test, y_pred_lasso)
print(lasso)
print("r^2 on test data : %f" % r2_score_lasso)
###############################################################################
# ElasticNet
from sklearn.linear_model import ElasticNet
enet = ElasticNet(alpha=alpha, l1_ratio=0.7)
y_pred_enet = enet.fit(X_train, y_train).predict(X_test)
r2_score_enet = r2_score(y_test, y_pred_enet)
print(enet)
print("r^2 on test data : %f" % r2_score_enet)
plt.plot(enet.coef_, label='Elastic net coefficients')
plt.plot(lasso.coef_, label='Lasso coefficients')
plt.plot(coef, '--', label='original coefficients')
plt.legend(loc='best')
plt.title("Lasso R^2: %f, Elastic Net R^2: %f"
% (r2_score_lasso, r2_score_enet))
plt.show()
| bsd-3-clause |
cyberthrone/pupy | pupy/modules/socks5proxy.py | 19 | 7572 | # -*- coding: UTF8 -*-
# --------------------------------------------------------------
# Copyright (c) 2015, Nicolas VERDIER (contact@n1nj4.eu)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE
# --------------------------------------------------------------
#RFC @https://www.ietf.org/rfc/rfc1928.txt
from pupylib.PupyModule import *
import StringIO
import pupylib.utils
import SocketServer
import threading
import socket
import logging
import struct
import traceback
import time
__class_name__="Socks5Proxy"
CODE_SUCCEEDED='\x00'
CODE_GENERAL_SRV_FAILURE='\x01'
CODE_CONN_NOT_ALLOWED='\x02'
CODE_NET_NOT_REACHABLE='\x03'
CODE_HOST_UNREACHABLE='\x04'
CODE_CONN_REFUSED='\x05'
CODE_TTL_EXPIRED='\x06'
CODE_COMMAND_NOT_SUPPORTED='\x07'
CODE_ADDRESS_TYPE_NOT_SUPPORTED='\x08'
CODE_UNASSIGNED='\x09'
class SocketPiper(threading.Thread):
def __init__(self, read_sock, write_sock):
threading.Thread.__init__(self)
self.daemon=True
self.read_sock=read_sock
self.write_sock=write_sock
def run(self):
try:
self.read_sock.setblocking(0)
while True:
data=""
try:
data+=self.read_sock.recv(1000000)
if not data:
break
except Exception as e:
if e[0]==9:#errno connection closed
break
if not data:
time.sleep(0.05)
continue
self.write_sock.sendall(data)
except Exception as e:
logging.debug("error in socket piper: %s"%str(traceback.format_exc()))
finally:
try:
self.write_sock.shutdown(socket.SHUT_RDWR)
self.write_sock.close()
except Exception:
pass
try:
self.read_sock.shutdown(socket.SHUT_RDWR)
self.read_sock.close()
except Exception:
pass
logging.debug("piper finished")
class Socks5RequestHandler(SocketServer.BaseRequestHandler):
def _socks_response(self, code, terminate=False):
ip="".join([chr(int(i)) for i in self.server.server_address[0].split(".")])
port=struct.pack("!H",self.server.server_address[1])
self.request.sendall("\x05"+code+"\x00"+"\x01"+ip+port)
if terminate:
self.request.shutdown(socket.SHUT_RDWR)
self.request.close()
def handle(self):
self.request.settimeout(5)
VER=self.request.recv(1)
NMETHODS=self.request.recv(1)
METHODS=self.request.recv(int(struct.unpack("!B",NMETHODS)[0]))
"""
o X'00' NO AUTHENTICATION REQUIRED
o X'01' GSSAPI
o X'02' USERNAME/PASSWORD
o X'03' to X'7F' IANA ASSIGNED
o X'80' to X'FE' RESERVED FOR PRIVATE METHODS
o X'FF' NO ACCEPTABLE METHODS
"""
#for now only no authentication is supported :
self.request.sendall("\x05\x00")
VER=self.request.recv(1)
if VER!="\x05":
logging.debug("receiving unsuported socks version: %s"%VER.encode('hex'))
self._socks_response(CODE_GENERAL_SRV_FAILURE, terminate=True)
return
CMD=self.request.recv(1)
if CMD!="\x01": # we only support CONNECT for now
logging.debug("receiving unsuported socks CMD: %s"%CMD.encode('hex'))
self._socks_response(CODE_COMMAND_NOT_SUPPORTED, terminate=True)
return
RSV=self.request.recv(1)
DST_ADDR=None
DST_PORT=None
ATYP=self.request.recv(1)
if ATYP=="\x01":
DST_ADDR=".".join([str(ord(x)) for x in self.request.recv(4)])
DST_PORT=struct.unpack("!H",self.request.recv(2))[0]
elif ATYP=="\x03":
DOMAIN_LEN=int(struct.unpack("!B",self.request.recv(1))[0])
DST_ADDR=self.request.recv(DOMAIN_LEN)
DST_PORT=struct.unpack("!H",self.request.recv(2))[0]
else: #TODO: ipv6
logging.debug("atyp not supported: %s"%ATYP.encode('hex'))
self._socks_response(CODE_ADDRESS_TYPE_NOT_SUPPORTED, terminate=True)
return
#now we have all we need, we can open the socket proxyfied through rpyc :)
logging.debug("connecting to %s:%s through the rpyc client"%(DST_ADDR,DST_PORT))
rsocket_mod=self.server.rpyc_client.conn.modules.socket
rsocket=rsocket_mod.socket(rsocket_mod.AF_INET,rsocket_mod.SOCK_STREAM)
rsocket.settimeout(5)
try:
rsocket.connect((DST_ADDR, DST_PORT))
except Exception as e:
logging.debug("error: %s"%e)
if e[0]==10060:
logging.debug("unreachable !")
self._socks_response(CODE_HOST_UNREACHABLE, terminate=True)
else:
self._socks_response(CODE_NET_NOT_REACHABLE, terminate=True)
return
self._socks_response(CODE_SUCCEEDED)
logging.debug("connection succeeded !")
#self.request.settimeout(30)
#rsocket.settimeout(30)
sp1=SocketPiper(self.request, rsocket)
sp2=SocketPiper(rsocket, self.request)
sp1.start()
sp2.start()
sp1.join()
sp2.join()
logging.debug("conn to %s:%s closed"%(DST_ADDR,DST_PORT))
class Socks5Server(SocketServer.TCPServer):
allow_reuse_address = True
def __init__(self, server_address, RequestHandlerClass, bind_and_activate=True, rpyc_client=None):
self.rpyc_client=rpyc_client
SocketServer.TCPServer.__init__(self, server_address, RequestHandlerClass, bind_and_activate)
class ThreadedSocks5Server(SocketServer.ThreadingMixIn, Socks5Server):
pass
class Socks5Proxy(PupyModule):
""" start a socks5 proxy going through a client """
max_clients=1
unique_instance=True
daemon=True
server=None
def init_argparse(self):
self.arg_parser = PupyArgumentParser(prog='socks5proxy', description=self.__doc__)
self.arg_parser.add_argument('-p', '--port', default='1080')
self.arg_parser.add_argument('action', choices=['start','stop'])
def stop_daemon(self):
self.success("shuting down socks server ...")
if self.server:
self.server.shutdown()
del self.server
self.success("socks server shut down")
else:
self.error("server is None")
def run(self, args):
if args.action=="start":
if self.server is None:
self.success("starting server ...")
self.server = ThreadedSocks5Server(("127.0.0.1", int(args.port)), Socks5RequestHandler, rpyc_client=self.client)
t=threading.Thread(target=self.server.serve_forever)
t.daemon=True
t.start()
self.success("socks5 server started on 127.0.0.1:%s"%args.port)
else:
self.error("socks5 server is already started !")
elif args.action=="stop":
if self.server:
self.job.stop()
del self.job
self.success("socks5 server stopped !")
else:
self.error("socks5 server is already stopped")
| bsd-3-clause |
attilahorvath/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/tool/steps/validatereviewer.py | 119 | 2894 | # Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
import sys
from webkitpy.common.checkout.changelog import ChangeLog
from webkitpy.tool.steps.abstractstep import AbstractStep
from webkitpy.tool.steps.options import Options
_log = logging.getLogger(__name__)
# FIXME: Some of this logic should probably be unified with CommitterValidator?
class ValidateReviewer(AbstractStep):
@classmethod
def options(cls):
return AbstractStep.options() + [
Options.non_interactive,
]
def run(self, state):
# FIXME: For now we disable this check when a user is driving the script
# this check is too draconian (and too poorly tested) to foist upon users.
if not self._options.non_interactive:
return
for changelog_path in self.cached_lookup(state, "changelogs"):
changelog_entry = ChangeLog(changelog_path).latest_entry()
if changelog_entry.has_valid_reviewer():
continue
reviewer_text = changelog_entry.reviewer_text()
if reviewer_text:
_log.info("%s found in %s does not appear to be a valid reviewer according to committers.py." % (reviewer_text, changelog_path))
_log.error('%s neither lists a valid reviewer nor contains the string "Unreviewed" or "Rubber stamp" (case insensitive).' % changelog_path)
sys.exit(1)
| bsd-3-clause |
avinashkunuje/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/tool/commands/adduserstogroups.py | 124 | 3195 | # Copyright (c) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from webkitpy.tool.multicommandtool import Command
class AddUsersToGroups(Command):
name = "add-users-to-groups"
help_text = "Add users matching subtring to specified groups"
# This probably belongs in bugzilla.py
known_groups = ['canconfirm', 'editbugs']
def execute(self, options, args, tool):
search_string = args[0]
# FIXME: We could allow users to specify groups on the command line.
list_title = 'Add users matching "%s" which groups?' % search_string
# FIXME: Need a way to specify that "none" is not allowed.
# FIXME: We could lookup what groups the current user is able to grant from bugzilla.
groups = tool.user.prompt_with_list(list_title, self.known_groups, can_choose_multiple=True)
if not groups:
print "No groups specified."
return
login_userid_pairs = tool.bugs.queries.fetch_login_userid_pairs_matching_substring(search_string)
if not login_userid_pairs:
print "No users found matching '%s'" % search_string
return
print "Found %s users matching %s:" % (len(login_userid_pairs), search_string)
for (login, user_id) in login_userid_pairs:
print "%s (%s)" % (login, user_id)
confirm_message = "Are you sure you want add %s users to groups %s? (This action cannot be undone using webkit-patch.)" % (len(login_userid_pairs), groups)
if not tool.user.confirm(confirm_message):
return
for (login, user_id) in login_userid_pairs:
print "Adding %s to %s" % (login, groups)
tool.bugs.add_user_to_groups(user_id, groups)
| bsd-3-clause |
wendlers/edubot-snap | ext/requests/packages/chardet/eucjpprober.py | 2919 | 3678 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
from . import constants
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import EUCJPDistributionAnalysis
from .jpcntx import EUCJPContextAnalysis
from .mbcssm import EUCJPSMModel
class EUCJPProber(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(EUCJPSMModel)
self._mDistributionAnalyzer = EUCJPDistributionAnalysis()
self._mContextAnalyzer = EUCJPContextAnalysis()
self.reset()
def reset(self):
MultiByteCharSetProber.reset(self)
self._mContextAnalyzer.reset()
def get_charset_name(self):
return "EUC-JP"
def feed(self, aBuf):
aLen = len(aBuf)
for i in range(0, aLen):
# PY3K: aBuf is a byte array, so aBuf[i] is an int, not a byte
codingState = self._mCodingSM.next_state(aBuf[i])
if codingState == constants.eError:
if constants._debug:
sys.stderr.write(self.get_charset_name()
+ ' prober hit error at byte ' + str(i)
+ '\n')
self._mState = constants.eNotMe
break
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == constants.eStart:
charLen = self._mCodingSM.get_current_charlen()
if i == 0:
self._mLastChar[1] = aBuf[0]
self._mContextAnalyzer.feed(self._mLastChar, charLen)
self._mDistributionAnalyzer.feed(self._mLastChar, charLen)
else:
self._mContextAnalyzer.feed(aBuf[i - 1:i + 1], charLen)
self._mDistributionAnalyzer.feed(aBuf[i - 1:i + 1],
charLen)
self._mLastChar[0] = aBuf[aLen - 1]
if self.get_state() == constants.eDetecting:
if (self._mContextAnalyzer.got_enough_data() and
(self.get_confidence() > constants.SHORTCUT_THRESHOLD)):
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
contxtCf = self._mContextAnalyzer.get_confidence()
distribCf = self._mDistributionAnalyzer.get_confidence()
return max(contxtCf, distribCf)
| mit |
kangkot/arangodb | 3rdParty/V8-4.3.61/third_party/python_26/Lib/site-packages/isapi/install.py | 17 | 23014 | """Installation utilities for Python ISAPI filters and extensions."""
# this code adapted from "Tomcat JK2 ISAPI redirector", part of Apache
# Created July 2004, Mark Hammond.
import sys, os, imp, shutil, stat
from win32com.client import GetObject, Dispatch
from win32com.client.gencache import EnsureModule, EnsureDispatch
import pythoncom
import winerror
import traceback
_APP_INPROC = 0
_APP_OUTPROC = 1
_APP_POOLED = 2
_IIS_OBJECT = "IIS://LocalHost/W3SVC"
_IIS_SERVER = "IIsWebServer"
_IIS_WEBDIR = "IIsWebDirectory"
_IIS_WEBVIRTUALDIR = "IIsWebVirtualDir"
_IIS_FILTERS = "IIsFilters"
_IIS_FILTER = "IIsFilter"
_DEFAULT_SERVER_NAME = "Default Web Site"
_DEFAULT_HEADERS = "X-Powered-By: Python"
_DEFAULT_PROTECTION = _APP_POOLED
# Default is for 'execute' only access - ie, only the extension
# can be used. This can be overridden via your install script.
_DEFAULT_ACCESS_EXECUTE = True
_DEFAULT_ACCESS_READ = False
_DEFAULT_ACCESS_WRITE = False
_DEFAULT_ACCESS_SCRIPT = False
_DEFAULT_CONTENT_INDEXED = False
_DEFAULT_ENABLE_DIR_BROWSING = False
_DEFAULT_ENABLE_DEFAULT_DOC = False
is_debug_build = False
for imp_ext, _, _ in imp.get_suffixes():
if imp_ext == "_d.pyd":
is_debug_build = True
break
this_dir = os.path.abspath(os.path.dirname(__file__))
class FilterParameters:
Name = None
Description = None
Path = None
Server = None
# Params that control if/how AddExtensionFile is called.
AddExtensionFile = True
AddExtensionFile_Enabled = True
AddExtensionFile_GroupID = None # defaults to Name
AddExtensionFile_CanDelete = True
AddExtensionFile_Description = None # defaults to Description.
def __init__(self, **kw):
self.__dict__.update(kw)
class VirtualDirParameters:
Name = None # Must be provided.
Description = None # defaults to Name
AppProtection = _DEFAULT_PROTECTION
Headers = _DEFAULT_HEADERS
Path = None # defaults to WWW root.
AccessExecute = _DEFAULT_ACCESS_EXECUTE
AccessRead = _DEFAULT_ACCESS_READ
AccessWrite = _DEFAULT_ACCESS_WRITE
AccessScript = _DEFAULT_ACCESS_SCRIPT
ContentIndexed = _DEFAULT_CONTENT_INDEXED
EnableDirBrowsing = _DEFAULT_ENABLE_DIR_BROWSING
EnableDefaultDoc = _DEFAULT_ENABLE_DEFAULT_DOC
DefaultDoc = None # Only set in IIS if not None
ScriptMaps = []
ScriptMapUpdate = "end" # can be 'start', 'end', 'replace'
Server = None
def __init__(self, **kw):
self.__dict__.update(kw)
class ScriptMapParams:
Extension = None
Module = None
Flags = 5
Verbs = ""
# Params that control if/how AddExtensionFile is called.
AddExtensionFile = True
AddExtensionFile_Enabled = True
AddExtensionFile_GroupID = None # defaults to Name
AddExtensionFile_CanDelete = True
AddExtensionFile_Description = None # defaults to Description.
def __init__(self, **kw):
self.__dict__.update(kw)
class ISAPIParameters:
ServerName = _DEFAULT_SERVER_NAME
# Description = None
Filters = []
VirtualDirs = []
def __init__(self, **kw):
self.__dict__.update(kw)
verbose = 1 # The level - 0 is quiet.
def log(level, what):
if verbose >= level:
print what
# Convert an ADSI COM exception to the Win32 error code embedded in it.
def _GetWin32ErrorCode(com_exc):
hr, msg, exc, narg = com_exc
# If we have more details in the 'exc' struct, use it.
if exc:
hr = exc[-1]
if winerror.HRESULT_FACILITY(hr) != winerror.FACILITY_WIN32:
raise
return winerror.SCODE_CODE(hr)
class InstallationError(Exception): pass
class ItemNotFound(InstallationError): pass
class ConfigurationError(InstallationError): pass
def FindPath(options, server, name):
if name.lower().startswith("iis://"):
return name
else:
if name and name[0] != "/":
name = "/"+name
return FindWebServer(options, server)+"/ROOT"+name
def FindWebServer(options, server_desc):
# command-line options get first go, and are assumed in 'mbcs' encoding
# (well, assumed MBCS by the time they got to sys.argv...)
if options.server:
server_desc = options.server
# but someone may have explicitly already set unicode...
if type(server_desc) != unicode:
server_desc = server_desc.decode("mbcs")
# If the config passed by the caller doesn't specify one, use the default
if not server_desc:
server = _IIS_OBJECT+"/1"
else:
# Assume the user has passed either the instance_id or "server
# description" - loop over all objects until we find it.
ob = GetObject(_IIS_OBJECT)
look = server_desc.lower().strip()
for sub in ob:
# ID is generally a number, but no need to assume that.
this_id = getattr(sub, "Name", "").lower().strip()
this_comment = getattr(sub, "ServerComment", "").lower().strip()
if this_id == look or this_comment == look:
server = sub.AdsPath
break
else:
raise ItemNotFound, \
"No web sites match the description '%s'" % (server_desc,)
# Check it is good.
try:
GetObject(server)
except pythoncom.com_error, details:
hr, msg, exc, arg_err = details
if exc and exc[2]:
msg = exc[2]
raise ItemNotFound, \
"WebServer %s: %s" % (server, msg)
return server
def CreateDirectory(params, options):
_CallHook(params, "PreInstall", options)
if not params.Name:
raise ConfigurationError, "No Name param"
slash = params.Name.rfind("/")
if slash >= 0:
parent = params.Name[:slash]
name = params.Name[slash+1:]
else:
parent = ""
name = params.Name
webDir = GetObject(FindPath(options, params.Server, parent))
if parent:
# Note that the directory won't be visible in the IIS UI
# unless the directory exists on the filesystem.
keyType = _IIS_WEBDIR
else:
keyType = _IIS_WEBVIRTUALDIR
# We used to go to lengths to keep an existing virtual directory
# in place. However, in some cases the existing directories got
# into a bad state, and an update failed to get them working.
# So we nuke it first. If this is a problem, we could consider adding
# a --keep-existing option.
try:
# Also seen the Class change to a generic IISObject - so nuke
# *any* existing object, regardless of Class
existing = GetObject(FindPath(options, params.Server, params.Name))
webDir.Delete(existing.Class, existing.Name)
log(2, "Deleted old directory '%s'" % (params.Name,))
except pythoncom.com_error:
pass
newDir = webDir.Create(keyType, name)
log(2, "Creating new directory '%s'..." % (params.Name,))
friendly = params.Description or params.Name
newDir.AppFriendlyName = friendly
try:
path = params.Path or webDir.Path
newDir.Path = path
except AttributeError:
pass
newDir.AppCreate2(params.AppProtection)
newDir.HttpCustomHeaders = params.Headers
log(2, "Setting directory options...")
newDir.AccessExecute = params.AccessExecute
newDir.AccessRead = params.AccessRead
newDir.AccessWrite = params.AccessWrite
newDir.AccessScript = params.AccessScript
newDir.ContentIndexed = params.ContentIndexed
newDir.EnableDirBrowsing = params.EnableDirBrowsing
newDir.EnableDefaultDoc = params.EnableDefaultDoc
if params.DefaultDoc is not None:
newDir.DefaultDoc = params.DefaultDoc
newDir.SetInfo()
smp_items = []
for smp in params.ScriptMaps:
item = "%s,%s,%s" % (smp.Extension, smp.Module, smp.Flags)
# IIS gets upset if there is a trailing verb comma, but no verbs
if smp.Verbs:
item += "," + smp.Verbs
smp_items.append(item)
if params.ScriptMapUpdate == "replace":
newDir.ScriptMaps = smp_items
elif params.ScriptMapUpdate == "end":
for item in smp_items:
if item not in newDir.ScriptMaps:
newDir.ScriptMaps = newDir.ScriptMaps + (item,)
elif params.ScriptMapUpdate == "start":
for item in smp_items:
if item not in newDir.ScriptMaps:
newDir.ScriptMaps = (item,) + newDir.ScriptMaps
else:
raise ConfigurationError, \
"Unknown ScriptMapUpdate option '%s'" % (params.ScriptMapUpdate,)
newDir.SetInfo()
_CallHook(params, "PostInstall", options, newDir)
log(1, "Configured Virtual Directory: %s" % (params.Name,))
return newDir
def CreateISAPIFilter(filterParams, options):
server = FindWebServer(options, filterParams.Server)
_CallHook(filterParams, "PreInstall", options)
try:
filters = GetObject(server+"/Filters")
except pythoncom.com_error, (hr, msg, exc, arg):
# Brand new sites don't have the '/Filters' collection - create it.
# Any errors other than 'not found' we shouldn't ignore.
if winerror.HRESULT_FACILITY(hr) != winerror.FACILITY_WIN32 or \
winerror.HRESULT_CODE(hr) != winerror.ERROR_PATH_NOT_FOUND:
raise
server_ob = GetObject(server)
filters = server_ob.Create(_IIS_FILTERS, "Filters")
filters.FilterLoadOrder = ""
filters.SetInfo()
# As for VirtualDir, delete an existing one.
try:
filters.Delete(_IIS_FILTER, filterParams.Name)
log(2, "Deleted old filter '%s'" % (filterParams.Name,))
except pythoncom.com_error:
pass
newFilter = filters.Create(_IIS_FILTER, filterParams.Name)
log(2, "Created new ISAPI filter...")
assert os.path.isfile(filterParams.Path)
newFilter.FilterPath = filterParams.Path
newFilter.FilterDescription = filterParams.Description
newFilter.SetInfo()
load_order = [b.strip() for b in filters.FilterLoadOrder.split(",") if b]
if filterParams.Name not in load_order:
load_order.append(filterParams.Name)
filters.FilterLoadOrder = ",".join(load_order)
filters.SetInfo()
_CallHook(filterParams, "PostInstall", options, newFilter)
log (1, "Configured Filter: %s" % (filterParams.Name,))
return newFilter
def DeleteISAPIFilter(filterParams, options):
_CallHook(filterParams, "PreRemove", options)
server = FindWebServer(options, filterParams.Server)
ob_path = server+"/Filters"
try:
filters = GetObject(ob_path)
except pythoncom.com_error, details:
# failure to open the filters just means a totally clean IIS install
# (IIS5 at least has no 'Filters' key when freshly installed).
log(2, "ISAPI filter path '%s' did not exist." % (ob_path,))
return
try:
filters.Delete(_IIS_FILTER, filterParams.Name)
log(2, "Deleted ISAPI filter '%s'" % (filterParams.Name,))
except pythoncom.com_error, details:
rc = _GetWin32ErrorCode(details)
if rc != winerror.ERROR_PATH_NOT_FOUND:
raise
log(2, "ISAPI filter '%s' did not exist." % (filterParams.Name,))
# Remove from the load order
load_order = [b.strip() for b in filters.FilterLoadOrder.split(",") if b]
if filterParams.Name in load_order:
load_order.remove(filterParams.Name)
filters.FilterLoadOrder = ",".join(load_order)
filters.SetInfo()
_CallHook(filterParams, "PostRemove", options)
log (1, "Deleted Filter: %s" % (filterParams.Name,))
def _AddExtensionFile(module, def_groupid, def_desc, params, options):
group_id = params.AddExtensionFile_GroupID or def_groupid
desc = params.AddExtensionFile_Description or def_desc
try:
ob = GetObject(_IIS_OBJECT)
ob.AddExtensionFile(module,
params.AddExtensionFile_Enabled,
group_id,
params.AddExtensionFile_CanDelete,
desc)
log(2, "Added extension file '%s' (%s)" % (module, desc))
except (pythoncom.com_error, AttributeError), details:
# IIS5 always fails. Probably should upgrade this to
# complain more loudly if IIS6 fails.
log(2, "Failed to add extension file '%s': %s" % (module, details))
def AddExtensionFiles(params, options):
"""Register the modules used by the filters/extensions as a trusted
'extension module' - required by the default IIS6 security settings."""
# Add each module only once.
added = {}
for vd in params.VirtualDirs:
for smp in vd.ScriptMaps:
if not added.has_key(smp.Module) and smp.AddExtensionFile:
_AddExtensionFile(smp.Module, vd.Name, vd.Description, smp,
options)
added[smp.Module] = True
for fd in params.Filters:
if not added.has_key(fd.Path) and fd.AddExtensionFile:
_AddExtensionFile(fd.Path, fd.Name, fd.Description, fd, options)
added[fd.Path] = True
def _DeleteExtensionFileRecord(module, options):
try:
ob = GetObject(_IIS_OBJECT)
ob.DeleteExtensionFileRecord(module)
log(2, "Deleted extension file record for '%s'" % module)
except (pythoncom.com_error, AttributeError), details:
log(2, "Failed to remove extension file '%s': %s" % (module, details))
def DeleteExtensionFileRecords(params, options):
deleted = {} # only remove each .dll once.
for vd in params.VirtualDirs:
for smp in vd.ScriptMaps:
if not deleted.has_key(smp.Module) and smp.AddExtensionFile:
_DeleteExtensionFileRecord(smp.Module, options)
deleted[smp.Module] = True
for filter_def in params.Filters:
if not deleted.has_key(filter_def.Path) and filter_def.AddExtensionFile:
_DeleteExtensionFileRecord(filter_def.Path, options)
deleted[filter_def.Path] = True
def CheckLoaderModule(dll_name):
suffix = ""
if is_debug_build: suffix = "_d"
template = os.path.join(this_dir,
"PyISAPI_loader" + suffix + ".dll")
if not os.path.isfile(template):
raise ConfigurationError, \
"Template loader '%s' does not exist" % (template,)
# We can't do a simple "is newer" check, as the DLL is specific to the
# Python version. So we check the date-time and size are identical,
# and skip the copy in that case.
src_stat = os.stat(template)
try:
dest_stat = os.stat(dll_name)
except os.error:
same = 0
else:
same = src_stat[stat.ST_SIZE]==dest_stat[stat.ST_SIZE] and \
src_stat[stat.ST_MTIME]==dest_stat[stat.ST_MTIME]
if not same:
log(2, "Updating %s->%s" % (template, dll_name))
shutil.copyfile(template, dll_name)
shutil.copystat(template, dll_name)
else:
log(2, "%s is up to date." % (dll_name,))
def _CallHook(ob, hook_name, options, *extra_args):
func = getattr(ob, hook_name, None)
if func is not None:
args = (ob,options) + extra_args
func(*args)
def Install(params, options):
_CallHook(params, "PreInstall", options)
for vd in params.VirtualDirs:
CreateDirectory(vd, options)
for filter_def in params.Filters:
CreateISAPIFilter(filter_def, options)
AddExtensionFiles(params, options)
_CallHook(params, "PostInstall", options)
def Uninstall(params, options):
_CallHook(params, "PreRemove", options)
DeleteExtensionFileRecords(params, options)
for vd in params.VirtualDirs:
_CallHook(vd, "PreRemove", options)
try:
directory = GetObject(FindPath(options, vd.Server, vd.Name))
except pythoncom.com_error, details:
rc = _GetWin32ErrorCode(details)
if rc != winerror.ERROR_PATH_NOT_FOUND:
raise
log(2, "VirtualDirectory '%s' did not exist" % vd.Name)
directory = None
if directory is not None:
# Be robust should IIS get upset about unloading.
try:
directory.AppUnLoad()
except:
exc_val = sys.exc_info()[1]
log(2, "AppUnLoad() for %s failed: %s" % (vd.Name, exc_val))
# Continue trying to delete it.
try:
parent = GetObject(directory.Parent)
parent.Delete(directory.Class, directory.Name)
log (1, "Deleted Virtual Directory: %s" % (vd.Name,))
except:
exc_val = sys.exc_info()[1]
log(1, "Failed to remove directory %s: %s" % (vd.Name, exc_val))
_CallHook(vd, "PostRemove", options)
for filter_def in params.Filters:
DeleteISAPIFilter(filter_def, options)
_CallHook(params, "PostRemove", options)
# Patch up any missing module names in the params, replacing them with
# the DLL name that hosts this extension/filter.
def _PatchParamsModule(params, dll_name, file_must_exist = True):
if file_must_exist:
if not os.path.isfile(dll_name):
raise ConfigurationError, "%s does not exist" % (dll_name,)
# Patch up all references to the DLL.
for f in params.Filters:
if f.Path is None: f.Path = dll_name
for d in params.VirtualDirs:
for sm in d.ScriptMaps:
if sm.Module is None: sm.Module = dll_name
def GetLoaderModuleName(mod_name, check_module = None):
# find the name of the DLL hosting us.
# By default, this is "_{module_base_name}.dll"
if hasattr(sys, "frozen"):
# What to do? The .dll knows its name, but this is likely to be
# executed via a .exe, which does not know.
base, ext = os.path.splitext(mod_name)
path, base = os.path.split(base)
# handle the common case of 'foo.exe'/'foow.exe'
if base.endswith('w'):
base = base[:-1]
# For py2exe, we have '_foo.dll' as the standard pyisapi loader - but
# 'foo.dll' is what we use (it just delegates).
# So no leading '_' on the installed name.
dll_name = os.path.abspath(os.path.join(path, base + ".dll"))
else:
base, ext = os.path.splitext(mod_name)
path, base = os.path.split(base)
dll_name = os.path.abspath(os.path.join(path, "_" + base + ".dll"))
# Check we actually have it.
if check_module is None: check_module = not hasattr(sys, "frozen")
if check_module:
CheckLoaderModule(dll_name)
return dll_name
def InstallModule(conf_module_name, params, options):
if not hasattr(sys, "frozen"):
conf_module_name = os.path.abspath(conf_module_name)
if not os.path.isfile(conf_module_name):
raise ConfigurationError, "%s does not exist" % (conf_module_name,)
loader_dll = GetLoaderModuleName(conf_module_name)
_PatchParamsModule(params, loader_dll)
Install(params, options)
def UninstallModule(conf_module_name, params, options):
loader_dll = GetLoaderModuleName(conf_module_name, False)
_PatchParamsModule(params, loader_dll, False)
Uninstall(params, options)
standard_arguments = {
"install" : "Install the extension",
"remove" : "Remove the extension"
}
# We support 2 ways of extending our command-line/install support.
# * Many of the installation items allow you to specify "PreInstall",
# "PostInstall", "PreRemove" and "PostRemove" hooks
# All hooks are called with the 'params' object being operated on, and
# the 'optparser' options for this session (ie, the command-line options)
# PostInstall for VirtualDirectories and Filters both have an additional
# param - the ADSI object just created.
# * You can pass your own option parser for us to use, and/or define a map
# with your own custom arg handlers. It is a map of 'arg'->function.
# The function is called with (options, log_fn, arg). The function's
# docstring is used in the usage output.
def HandleCommandLine(params, argv=None, conf_module_name = None,
default_arg = "install",
opt_parser = None, custom_arg_handlers = {}):
"""Perform installation or removal of an ISAPI filter or extension.
This module handles standard command-line options and configuration
information, and installs, removes or updates the configuration of an
ISAPI filter or extension.
You must pass your configuration information in params - all other
arguments are optional, and allow you to configure the installation
process.
"""
global verbose
from optparse import OptionParser
argv = argv or sys.argv
conf_module_name = conf_module_name or sys.argv[0]
if opt_parser is None:
# Build our own parser.
parser = OptionParser(usage='')
else:
# The caller is providing their own filter, presumably with their
# own options all setup.
parser = opt_parser
# build a usage string if we don't have one.
if not parser.get_usage():
all_args = standard_arguments.copy()
for arg, handler in custom_arg_handlers.items():
all_args[arg] = handler.__doc__
arg_names = "|".join(all_args.keys())
usage_string = "%prog [options] [" + arg_names + "]\n"
usage_string += "commands:\n"
for arg, desc in all_args.items():
usage_string += " %-10s: %s" % (arg, desc) + "\n"
parser.set_usage(usage_string[:-1])
parser.add_option("-q", "--quiet",
action="store_false", dest="verbose", default=True,
help="don't print status messages to stdout")
parser.add_option("-v", "--verbosity", action="count",
dest="verbose", default=1,
help="increase the verbosity of status messages")
parser.add_option("", "--server", action="store",
help="Specifies the IIS server to install/uninstall on." \
" Default is '%s/1'" % (_IIS_OBJECT,))
(options, args) = parser.parse_args(argv[1:])
verbose = options.verbose
if not args:
args = [default_arg]
try:
for arg in args:
if arg == "install":
InstallModule(conf_module_name, params, options)
log(1, "Installation complete.")
elif arg in ["remove", "uninstall"]:
UninstallModule(conf_module_name, params, options)
log(1, "Uninstallation complete.")
else:
handler = custom_arg_handlers.get(arg, None)
if handler is None:
parser.error("Invalid arg '%s'" % (arg,))
handler(options, log, arg)
except (ItemNotFound, InstallationError), details:
if options.verbose > 1:
traceback.print_exc()
print "%s: %s" % (details.__class__.__name__, details)
| apache-2.0 |
wlonk/warehouse | tests/functional/pages/base.py | 3 | 2225 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import urllib.parse
from bok_choy.page_object import PageObject as _PageObject, unguarded
from bok_choy.promise import EmptyPromise
class PageObject(_PageObject):
has_client_side_includes = True
def __init__(self, *args, base_url, **kwargs):
self.base_url = base_url
super().__init__(*args, **kwargs)
@property
def url(self):
return urllib.parse.urljoin(self.base_url, self.path)
@property
@abc.abstractmethod
def path(self):
"""
Return the path of the page. This may be dynamic,
determined by configuration options passed to the
page object's constructor.
"""
return None
@unguarded
def wait_for_page(self, timeout=30):
b = self.browser
# This is mostly copied from the original PageObject.wait_for_page(),
# we duplicate it here because we want to check this before executing
# our own checks.
EmptyPromise(
lambda: b.execute_script("return document.readyState=='complete'"),
"The document and all sub-resources have finished loading.",
timeout=timeout,
).fulfill()
if self.has_client_side_includes:
# Ensure that our HTML includes has successfully fired.
EmptyPromise(
lambda: b.execute_script(
"return window._WarehouseHTMLIncluded"),
"The document has finished executing client side includes.",
timeout=timeout,
).fulfill()
# Run the rest of the items that we want to wait on the page for.
return super().wait_for_page(timeout=timeout)
| apache-2.0 |
Galax-e/laravel-enterprise-starter-kit | vendor/mockery/mockery/docs/conf.py | 468 | 8442 | # -*- coding: utf-8 -*-
#
# Mockery Docs documentation build configuration file, created by
# sphinx-quickstart on Mon Mar 3 14:04:26 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.todo',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Mockery Docs'
copyright = u'2014, Pádraic Brady, Dave Marshall, Wouter, Graham Campbell'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.9'
# The full version, including alpha/beta/rc tags.
release = '0.9'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'MockeryDocsdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index2', 'MockeryDocs.tex', u'Mockery Docs Documentation',
u'Pádraic Brady, Dave Marshall, Wouter, Graham Campbell', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index2', 'mockerydocs', u'Mockery Docs Documentation',
[u'Pádraic Brady, Dave Marshall, Wouter, Graham Campbell'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index2', 'MockeryDocs', u'Mockery Docs Documentation',
u'Pádraic Brady, Dave Marshall, Wouter, Graham Campbell', 'MockeryDocs', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
#on_rtd is whether we are on readthedocs.org, this line of code grabbed from docs.readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
print sphinx_rtd_theme.get_html_theme_path()
| gpl-3.0 |
yavalvas/yav_com | build/matplotlib/lib/mpl_examples/pylab_examples/centered_ticklabels.py | 6 | 1355 | # sometimes it is nice to have ticklabels centered. mpl currently
# associates a label with a tick, and the label can be aligned
# 'center', 'left', or 'right' using the horizontal alignment property:
#
#
# for label in ax.xaxis.get_xticklabels():
# label.set_horizontalalignment('right')
#
#
# but this doesn't help center the label between ticks. One solution
# is to "face it". Use the minor ticks to place a tick centered
# between the major ticks. Here is an example that labels the months,
# centered between the ticks
import numpy as np
import matplotlib.cbook as cbook
import matplotlib.dates as dates
import matplotlib.ticker as ticker
import matplotlib.pyplot as plt
# load some financial data; apple's stock price
fh = cbook.get_sample_data('aapl.npy.gz')
r = np.load(fh); fh.close()
r = r[-250:] # get the last 250 days
fig, ax = plt.subplots()
ax.plot(r.date, r.adj_close)
ax.xaxis.set_major_locator(dates.MonthLocator())
ax.xaxis.set_minor_locator(dates.MonthLocator(bymonthday=15))
ax.xaxis.set_major_formatter(ticker.NullFormatter())
ax.xaxis.set_minor_formatter(dates.DateFormatter('%b'))
for tick in ax.xaxis.get_minor_ticks():
tick.tick1line.set_markersize(0)
tick.tick2line.set_markersize(0)
tick.label1.set_horizontalalignment('center')
imid = len(r)/2
ax.set_xlabel(str(r.date[imid].year))
plt.show()
| mit |
ammarkhann/FinalSeniorCode | lib/python2.7/site-packages/sympy/series/tests/test_gruntz.py | 51 | 15626 | from sympy import Symbol, exp, log, oo, Rational, I, sin, gamma, loggamma, S, \
atan, acot, pi, cancel, E, erf, sqrt, zeta, cos, digamma, Integer, Ei, EulerGamma
from sympy.functions.elementary.hyperbolic import cosh, coth, sinh, tanh
from sympy.series.gruntz import compare, mrv, rewrite, mrv_leadterm, gruntz, \
sign
from sympy.utilities.pytest import XFAIL, skip, slow
"""
This test suite is testing the limit algorithm using the bottom up approach.
See the documentation in limits2.py. The algorithm itself is highly recursive
by nature, so "compare" is logically the lowest part of the algorithm, yet in
some sense it's the most complex part, because it needs to calculate a limit
to return the result.
Nevertheless, the rest of the algorithm depends on compare working correctly.
"""
x = Symbol('x', real=True)
m = Symbol('m', real=True)
runslow = False
def _sskip():
if not runslow:
skip("slow")
@slow
def test_gruntz_evaluation():
# Gruntz' thesis pp. 122 to 123
# 8.1
assert gruntz(exp(x)*(exp(1/x - exp(-x)) - exp(1/x)), x, oo) == -1
# 8.2
assert gruntz(exp(x)*(exp(1/x + exp(-x) + exp(-x**2))
- exp(1/x - exp(-exp(x)))), x, oo) == 1
# 8.3
assert gruntz(exp(exp(x - exp(-x))/(1 - 1/x)) - exp(exp(x)), x, oo) == oo
# 8.5
assert gruntz(exp(exp(exp(x + exp(-x)))) / exp(exp(exp(x))), x, oo) == oo
# 8.6
assert gruntz(exp(exp(exp(x))) / exp(exp(exp(x - exp(-exp(x))))),
x, oo) == oo
# 8.7
assert gruntz(exp(exp(exp(x))) / exp(exp(exp(x - exp(-exp(exp(x)))))),
x, oo) == 1
# 8.8
assert gruntz(exp(exp(x)) / exp(exp(x - exp(-exp(exp(x))))), x, oo) == 1
# 8.9
assert gruntz(log(x)**2 * exp(sqrt(log(x))*(log(log(x)))**2
* exp(sqrt(log(log(x))) * (log(log(log(x))))**3)) / sqrt(x),
x, oo) == 0
# 8.10
assert gruntz((x*log(x)*(log(x*exp(x) - x**2))**2)
/ (log(log(x**2 + 2*exp(exp(3*x**3*log(x)))))), x, oo) == S(1)/3
# 8.11
assert gruntz((exp(x*exp(-x)/(exp(-x) + exp(-2*x**2/(x + 1)))) - exp(x))/x,
x, oo) == -exp(2)
# 8.12
assert gruntz((3**x + 5**x)**(1/x), x, oo) == 5
# 8.13
assert gruntz(x/log(x**(log(x**(log(2)/log(x))))), x, oo) == oo
# 8.14
assert gruntz(exp(exp(2*log(x**5 + x)*log(log(x))))
/ exp(exp(10*log(x)*log(log(x)))), x, oo) == oo
# 8.15
assert gruntz(exp(exp(S(5)/2*x**(-S(5)/7) + S(21)/8*x**(S(6)/11)
+ 2*x**(-8) + S(54)/17*x**(S(49)/45) ))**8
/ log(log(-log(S(4)/3*x**(-S(5)/14))))**(S(7)/6), x, oo) == oo
# 8.16
assert gruntz((exp(4*x*exp(-x)/(1/exp(x) + 1/exp(2*x**2/(x + 1)))) - exp(x))
/ exp(x)**4, x, oo) == 1
# 8.17
assert gruntz(exp(x*exp(-x)/(exp(-x) + exp(-2*x**2/(x + 1))))/exp(x), x, oo) \
== 1
# 8.19
assert gruntz(log(x)*(log(log(x) + log(log(x))) - log(log(x)))
/ (log(log(x) + log(log(log(x))))), x, oo) == 1
# 8.20
assert gruntz(exp((log(log(x + exp(log(x)*log(log(x))))))
/ (log(log(log(exp(x) + x + log(x)))))), x, oo) == E
# Another
assert gruntz(exp(exp(exp(x + exp(-x)))) / exp(exp(x)), x, oo) == oo
def test_gruntz_evaluation_slow():
_sskip()
# 8.4
assert gruntz(exp(exp(exp(x)/(1 - 1/x)))
- exp(exp(exp(x)/(1 - 1/x - log(x)**(-log(x))))), x, oo) == -oo
# 8.18
assert gruntz((exp(exp(-x/(1 + exp(-x))))*exp(-x/(1 + exp(-x/(1 + exp(-x)))))
*exp(exp(-x + exp(-x/(1 + exp(-x))))))
/ (exp(-x/(1 + exp(-x))))**2 - exp(x) + x, x, oo) == 2
@slow
def test_gruntz_eval_special():
# Gruntz, p. 126
assert gruntz(exp(x)*(sin(1/x + exp(-x)) - sin(1/x + exp(-x**2))), x, oo) == 1
assert gruntz((erf(x - exp(-exp(x))) - erf(x)) * exp(exp(x)) * exp(x**2),
x, oo) == -2/sqrt(pi)
assert gruntz(exp(exp(x)) * (exp(sin(1/x + exp(-exp(x)))) - exp(sin(1/x))),
x, oo) == 1
assert gruntz(exp(x)*(gamma(x + exp(-x)) - gamma(x)), x, oo) == oo
assert gruntz(exp(exp(digamma(digamma(x))))/x, x, oo) == exp(-S(1)/2)
assert gruntz(exp(exp(digamma(log(x))))/x, x, oo) == exp(-S(1)/2)
assert gruntz(digamma(digamma(digamma(x))), x, oo) == oo
assert gruntz(loggamma(loggamma(x)), x, oo) == oo
assert gruntz(((gamma(x + 1/gamma(x)) - gamma(x))/log(x) - cos(1/x))
* x*log(x), x, oo) == -S(1)/2
assert gruntz(x * (gamma(x - 1/gamma(x)) - gamma(x) + log(x)), x, oo) \
== S(1)/2
assert gruntz((gamma(x + 1/gamma(x)) - gamma(x)) / log(x), x, oo) == 1
def test_gruntz_eval_special_slow():
_sskip()
assert gruntz(gamma(x + 1)/sqrt(2*pi)
- exp(-x)*(x**(x + S(1)/2) + x**(x - S(1)/2)/12), x, oo) == oo
assert gruntz(exp(exp(exp(digamma(digamma(digamma(x))))))/x, x, oo) == 0
@XFAIL
def test_grunts_eval_special_slow_sometimes_fail():
_sskip()
# XXX This sometimes fails!!!
assert gruntz(exp(gamma(x - exp(-x))*exp(1/x)) - exp(gamma(x)), x, oo) == oo
@XFAIL
def test_gruntz_eval_special_fail():
# TODO exponential integral Ei
assert gruntz(
(Ei(x - exp(-exp(x))) - Ei(x)) *exp(-x)*exp(exp(x))*x, x, oo) == -1
# TODO zeta function series
assert gruntz(
exp((log(2) + 1)*x) * (zeta(x + exp(-x)) - zeta(x)), x, oo) == -log(2)
# TODO 8.35 - 8.37 (bessel, max-min)
def test_gruntz_hyperbolic():
assert gruntz(cosh(x), x, oo) == oo
assert gruntz(cosh(x), x, -oo) == oo
assert gruntz(sinh(x), x, oo) == oo
assert gruntz(sinh(x), x, -oo) == -oo
assert gruntz(2*cosh(x)*exp(x), x, oo) == oo
assert gruntz(2*cosh(x)*exp(x), x, -oo) == 1
assert gruntz(2*sinh(x)*exp(x), x, oo) == oo
assert gruntz(2*sinh(x)*exp(x), x, -oo) == -1
assert gruntz(tanh(x), x, oo) == 1
assert gruntz(tanh(x), x, -oo) == -1
assert gruntz(coth(x), x, oo) == 1
assert gruntz(coth(x), x, -oo) == -1
def test_compare1():
assert compare(2, x, x) == "<"
assert compare(x, exp(x), x) == "<"
assert compare(exp(x), exp(x**2), x) == "<"
assert compare(exp(x**2), exp(exp(x)), x) == "<"
assert compare(1, exp(exp(x)), x) == "<"
assert compare(x, 2, x) == ">"
assert compare(exp(x), x, x) == ">"
assert compare(exp(x**2), exp(x), x) == ">"
assert compare(exp(exp(x)), exp(x**2), x) == ">"
assert compare(exp(exp(x)), 1, x) == ">"
assert compare(2, 3, x) == "="
assert compare(3, -5, x) == "="
assert compare(2, -5, x) == "="
assert compare(x, x**2, x) == "="
assert compare(x**2, x**3, x) == "="
assert compare(x**3, 1/x, x) == "="
assert compare(1/x, x**m, x) == "="
assert compare(x**m, -x, x) == "="
assert compare(exp(x), exp(-x), x) == "="
assert compare(exp(-x), exp(2*x), x) == "="
assert compare(exp(2*x), exp(x)**2, x) == "="
assert compare(exp(x)**2, exp(x + exp(-x)), x) == "="
assert compare(exp(x), exp(x + exp(-x)), x) == "="
assert compare(exp(x**2), 1/exp(x**2), x) == "="
def test_compare2():
assert compare(exp(x), x**5, x) == ">"
assert compare(exp(x**2), exp(x)**2, x) == ">"
assert compare(exp(x), exp(x + exp(-x)), x) == "="
assert compare(exp(x + exp(-x)), exp(x), x) == "="
assert compare(exp(x + exp(-x)), exp(-x), x) == "="
assert compare(exp(-x), x, x) == ">"
assert compare(x, exp(-x), x) == "<"
assert compare(exp(x + 1/x), x, x) == ">"
assert compare(exp(-exp(x)), exp(x), x) == ">"
assert compare(exp(exp(-exp(x)) + x), exp(-exp(x)), x) == "<"
def test_compare3():
assert compare(exp(exp(x)), exp(x + exp(-exp(x))), x) == ">"
def test_sign1():
assert sign(Rational(0), x) == 0
assert sign(Rational(3), x) == 1
assert sign(Rational(-5), x) == -1
assert sign(log(x), x) == 1
assert sign(exp(-x), x) == 1
assert sign(exp(x), x) == 1
assert sign(-exp(x), x) == -1
assert sign(3 - 1/x, x) == 1
assert sign(-3 - 1/x, x) == -1
assert sign(sin(1/x), x) == 1
assert sign((x**Integer(2)), x) == 1
def test_sign2():
assert sign(x, x) == 1
assert sign(-x, x) == -1
y = Symbol("y", positive=True)
assert sign(y, x) == 1
assert sign(-y, x) == -1
assert sign(y*x, x) == 1
assert sign(-y*x, x) == -1
def mmrv(a, b):
return set(mrv(a, b)[0].keys())
def test_mrv1():
assert mmrv(x, x) == set([x])
assert mmrv(x + 1/x, x) == set([x])
assert mmrv(x**2, x) == set([x])
assert mmrv(log(x), x) == set([x])
assert mmrv(exp(x), x) == set([exp(x)])
assert mmrv(exp(-x), x) == set([exp(-x)])
assert mmrv(exp(x**2), x) == set([exp(x**2)])
assert mmrv(-exp(1/x), x) == set([x])
assert mmrv(exp(x + 1/x), x) == set([exp(x + 1/x)])
def test_mrv2a():
assert mmrv(exp(x + exp(-exp(x))), x) == set([exp(-exp(x))])
assert mmrv(exp(x + exp(-x)), x) == set([exp(x + exp(-x)), exp(-x)])
assert mmrv(exp(1/x + exp(-x)), x) == set([exp(-x)])
#sometimes infinite recursion due to log(exp(x**2)) not simplifying
def test_mrv2b():
assert mmrv(exp(x + exp(-x**2)), x) == set([exp(-x**2)])
#sometimes infinite recursion due to log(exp(x**2)) not simplifying
def test_mrv2c():
assert mmrv(
exp(-x + 1/x**2) - exp(x + 1/x), x) == set([exp(x + 1/x), exp(1/x**2 - x)])
#sometimes infinite recursion due to log(exp(x**2)) not simplifying
def test_mrv3():
assert mmrv(exp(x**2) + x*exp(x) + log(x)**x/x, x) == set([exp(x**2)])
assert mmrv(
exp(x)*(exp(1/x + exp(-x)) - exp(1/x)), x) == set([exp(x), exp(-x)])
assert mmrv(log(
x**2 + 2*exp(exp(3*x**3*log(x)))), x) == set([exp(exp(3*x**3*log(x)))])
assert mmrv(log(x - log(x))/log(x), x) == set([x])
assert mmrv(
(exp(1/x - exp(-x)) - exp(1/x))*exp(x), x) == set([exp(x), exp(-x)])
assert mmrv(
1/exp(-x + exp(-x)) - exp(x), x) == set([exp(x), exp(-x), exp(x - exp(-x))])
assert mmrv(log(log(x*exp(x*exp(x)) + 1)), x) == set([exp(x*exp(x))])
assert mmrv(exp(exp(log(log(x) + 1/x))), x) == set([x])
def test_mrv4():
ln = log
assert mmrv((ln(ln(x) + ln(ln(x))) - ln(ln(x)))/ln(ln(x) + ln(ln(ln(x))))*ln(x),
x) == set([x])
assert mmrv(log(log(x*exp(x*exp(x)) + 1)) - exp(exp(log(log(x) + 1/x))), x) == \
set([exp(x*exp(x))])
def mrewrite(a, b, c):
return rewrite(a[1], a[0], b, c)
def test_rewrite1():
e = exp(x)
assert mrewrite(mrv(e, x), x, m) == (1/m, -x)
e = exp(x**2)
assert mrewrite(mrv(e, x), x, m) == (1/m, -x**2)
e = exp(x + 1/x)
assert mrewrite(mrv(e, x), x, m) == (1/m, -x - 1/x)
e = 1/exp(-x + exp(-x)) - exp(x)
assert mrewrite(mrv(e, x), x, m) == (1/(m*exp(m)) - 1/m, -x)
def test_rewrite2():
e = exp(x)*log(log(exp(x)))
assert mmrv(e, x) == set([exp(x)])
assert mrewrite(mrv(e, x), x, m) == (1/m*log(x), -x)
#sometimes infinite recursion due to log(exp(x**2)) not simplifying
def test_rewrite3():
e = exp(-x + 1/x**2) - exp(x + 1/x)
#both of these are correct and should be equivalent:
assert mrewrite(mrv(e, x), x, m) in [(-1/m + m*exp(
1/x + 1/x**2), -x - 1/x), (m - 1/m*exp(1/x + x**(-2)), x**(-2) - x)]
def test_mrv_leadterm1():
assert mrv_leadterm(-exp(1/x), x) == (-1, 0)
assert mrv_leadterm(1/exp(-x + exp(-x)) - exp(x), x) == (-1, 0)
assert mrv_leadterm(
(exp(1/x - exp(-x)) - exp(1/x))*exp(x), x) == (-exp(1/x), 0)
def test_mrv_leadterm2():
#Gruntz: p51, 3.25
assert mrv_leadterm((log(exp(x) + x) - x)/log(exp(x) + log(x))*exp(x), x) == \
(1, 0)
def test_mrv_leadterm3():
#Gruntz: p56, 3.27
assert mmrv(exp(-x + exp(-x)*exp(-x*log(x))), x) == set([exp(-x - x*log(x))])
assert mrv_leadterm(exp(-x + exp(-x)*exp(-x*log(x))), x) == (exp(-x), 0)
def test_limit1():
assert gruntz(x, x, oo) == oo
assert gruntz(x, x, -oo) == -oo
assert gruntz(-x, x, oo) == -oo
assert gruntz(x**2, x, -oo) == oo
assert gruntz(-x**2, x, oo) == -oo
assert gruntz(x*log(x), x, 0, dir="+") == 0
assert gruntz(1/x, x, oo) == 0
assert gruntz(exp(x), x, oo) == oo
assert gruntz(-exp(x), x, oo) == -oo
assert gruntz(exp(x)/x, x, oo) == oo
assert gruntz(1/x - exp(-x), x, oo) == 0
assert gruntz(x + 1/x, x, oo) == oo
def test_limit2():
assert gruntz(x**x, x, 0, dir="+") == 1
assert gruntz((exp(x) - 1)/x, x, 0) == 1
assert gruntz(1 + 1/x, x, oo) == 1
assert gruntz(-exp(1/x), x, oo) == -1
assert gruntz(x + exp(-x), x, oo) == oo
assert gruntz(x + exp(-x**2), x, oo) == oo
assert gruntz(x + exp(-exp(x)), x, oo) == oo
assert gruntz(13 + 1/x - exp(-x), x, oo) == 13
def test_limit3():
a = Symbol('a')
assert gruntz(x - log(1 + exp(x)), x, oo) == 0
assert gruntz(x - log(a + exp(x)), x, oo) == 0
assert gruntz(exp(x)/(1 + exp(x)), x, oo) == 1
assert gruntz(exp(x)/(a + exp(x)), x, oo) == 1
def test_limit4():
#issue 3463
assert gruntz((3**x + 5**x)**(1/x), x, oo) == 5
#issue 3463
assert gruntz((3**(1/x) + 5**(1/x))**x, x, 0) == 5
@XFAIL
def test_MrvTestCase_page47_ex3_21():
h = exp(-x/(1 + exp(-x)))
expr = exp(h)*exp(-x/(1 + h))*exp(exp(-x + h))/h**2 - exp(x) + x
expected = set([1/h, exp(x), exp(x - h), exp(x/(1 + h))])
# XXX Incorrect result
assert mrv(expr, x).difference(expected) == set()
def test_I():
from sympy.functions import sign as sgn
y = Symbol("y")
assert gruntz(I*x, x, oo) == I*oo
assert gruntz(y*I*x, x, oo) == y*I*oo
assert gruntz(y*3*I*x, x, oo) == y*I*oo
assert gruntz(y*3*sin(I)*x, x, oo).simplify() == sgn(y)*I*oo
def test_issue_4814():
assert gruntz((x + 1)**(1/log(x + 1)), x, oo) == E
def test_intractable():
assert gruntz(1/gamma(x), x, oo) == 0
assert gruntz(1/loggamma(x), x, oo) == 0
assert gruntz(gamma(x)/loggamma(x), x, oo) == oo
assert gruntz(exp(gamma(x))/gamma(x), x, oo) == oo
assert gruntz(gamma(x), x, 3) == 2
assert gruntz(gamma(S(1)/7 + 1/x), x, oo) == gamma(S(1)/7)
assert gruntz(log(x**x)/log(gamma(x)), x, oo) == 1
assert gruntz(log(gamma(gamma(x)))/exp(x), x, oo) == oo
def test_aseries_trig():
assert cancel(gruntz(1/log(atan(x)), x, oo)
- 1/(log(pi) + log(S(1)/2))) == 0
assert gruntz(1/acot(x), x, -oo) == -oo
def test_exp_log_series():
assert gruntz(x/log(log(x*exp(x))), x, oo) == oo
def test_issue_3644():
assert gruntz(((x**7 + x + 1)/(2**x + x**2))**(-1/x), x, oo) == 2
def test_issue_6843():
n = Symbol('n', integer=True, positive=True)
r = (n + 1)*x**(n + 1)/(x**(n + 1) - 1) - x/(x - 1)
assert gruntz(r, x, 1).simplify() == n/2
def test_issue_4190():
assert gruntz(x - gamma(1/x), x, oo) == S.EulerGamma
@XFAIL
def test_issue_5172():
n = Symbol('n')
r = Symbol('r', positive=True)
c = Symbol('c')
p = Symbol('p', positive=True)
m = Symbol('m', negative=True)
expr = ((2*n*(n - r + 1)/(n + r*(n - r + 1)))**c + \
(r - 1)*(n*(n - r + 2)/(n + r*(n - r + 1)))**c - n)/(n**c - n)
expr = expr.subs(c, c + 1)
assert gruntz(expr.subs(c, m), n, oo) == 1
# fail:
assert gruntz(expr.subs(c, p), n, oo).simplify() == \
(2**(p + 1) + r - 1)/(r + 1)**(p + 1)
def test_issue_4109():
assert gruntz(1/gamma(x), x, 0) == 0
assert gruntz(x*gamma(x), x, 0) == 1
def test_issue_6682():
assert gruntz(exp(2*Ei(-x))/x**2, x, 0) == exp(2*EulerGamma)
def test_issue_7096():
from sympy.functions import sign
assert gruntz(x**-pi, x, 0, dir='-') == oo*sign((-1)**(-pi))
| mit |
MagicStack/asyncpg | asyncpg/transaction.py | 1 | 8297 | # Copyright (C) 2016-present the asyncpg authors and contributors
# <see AUTHORS file>
#
# This module is part of asyncpg and is released under
# the Apache 2.0 License: http://www.apache.org/licenses/LICENSE-2.0
import enum
from . import connresource
from . import exceptions as apg_errors
class TransactionState(enum.Enum):
NEW = 0
STARTED = 1
COMMITTED = 2
ROLLEDBACK = 3
FAILED = 4
ISOLATION_LEVELS = {'read_committed', 'serializable', 'repeatable_read'}
ISOLATION_LEVELS_BY_VALUE = {
'read committed': 'read_committed',
'serializable': 'serializable',
'repeatable read': 'repeatable_read',
}
class Transaction(connresource.ConnectionResource):
"""Represents a transaction or savepoint block.
Transactions are created by calling the
:meth:`Connection.transaction() <connection.Connection.transaction>`
function.
"""
__slots__ = ('_connection', '_isolation', '_readonly', '_deferrable',
'_state', '_nested', '_id', '_managed')
def __init__(self, connection, isolation, readonly, deferrable):
super().__init__(connection)
if isolation and isolation not in ISOLATION_LEVELS:
raise ValueError(
'isolation is expected to be either of {}, '
'got {!r}'.format(ISOLATION_LEVELS, isolation))
self._isolation = isolation
self._readonly = readonly
self._deferrable = deferrable
self._state = TransactionState.NEW
self._nested = False
self._id = None
self._managed = False
async def __aenter__(self):
if self._managed:
raise apg_errors.InterfaceError(
'cannot enter context: already in an `async with` block')
self._managed = True
await self.start()
async def __aexit__(self, extype, ex, tb):
try:
self._check_conn_validity('__aexit__')
except apg_errors.InterfaceError:
if extype is GeneratorExit:
# When a PoolAcquireContext is being exited, and there
# is an open transaction in an async generator that has
# not been iterated fully, there is a possibility that
# Pool.release() would race with this __aexit__(), since
# both would be in concurrent tasks. In such case we
# yield to Pool.release() to do the ROLLBACK for us.
# See https://github.com/MagicStack/asyncpg/issues/232
# for an example.
return
else:
raise
try:
if extype is not None:
await self.__rollback()
else:
await self.__commit()
finally:
self._managed = False
@connresource.guarded
async def start(self):
"""Enter the transaction or savepoint block."""
self.__check_state_base('start')
if self._state is TransactionState.STARTED:
raise apg_errors.InterfaceError(
'cannot start; the transaction is already started')
con = self._connection
if con._top_xact is None:
if con._protocol.is_in_transaction():
raise apg_errors.InterfaceError(
'cannot use Connection.transaction() in '
'a manually started transaction')
con._top_xact = self
else:
# Nested transaction block
if self._isolation:
top_xact_isolation = con._top_xact._isolation
if top_xact_isolation is None:
top_xact_isolation = ISOLATION_LEVELS_BY_VALUE[
await self._connection.fetchval(
'SHOW transaction_isolation;')]
if self._isolation != top_xact_isolation:
raise apg_errors.InterfaceError(
'nested transaction has a different isolation level: '
'current {!r} != outer {!r}'.format(
self._isolation, top_xact_isolation))
self._nested = True
if self._nested:
self._id = con._get_unique_id('savepoint')
query = 'SAVEPOINT {};'.format(self._id)
else:
query = 'BEGIN'
if self._isolation == 'read_committed':
query += ' ISOLATION LEVEL READ COMMITTED'
elif self._isolation == 'repeatable_read':
query += ' ISOLATION LEVEL REPEATABLE READ'
elif self._isolation == 'serializable':
query += ' ISOLATION LEVEL SERIALIZABLE'
if self._readonly:
query += ' READ ONLY'
if self._deferrable:
query += ' DEFERRABLE'
query += ';'
try:
await self._connection.execute(query)
except BaseException:
self._state = TransactionState.FAILED
raise
else:
self._state = TransactionState.STARTED
def __check_state_base(self, opname):
if self._state is TransactionState.COMMITTED:
raise apg_errors.InterfaceError(
'cannot {}; the transaction is already committed'.format(
opname))
if self._state is TransactionState.ROLLEDBACK:
raise apg_errors.InterfaceError(
'cannot {}; the transaction is already rolled back'.format(
opname))
if self._state is TransactionState.FAILED:
raise apg_errors.InterfaceError(
'cannot {}; the transaction is in error state'.format(
opname))
def __check_state(self, opname):
if self._state is not TransactionState.STARTED:
if self._state is TransactionState.NEW:
raise apg_errors.InterfaceError(
'cannot {}; the transaction is not yet started'.format(
opname))
self.__check_state_base(opname)
async def __commit(self):
self.__check_state('commit')
if self._connection._top_xact is self:
self._connection._top_xact = None
if self._nested:
query = 'RELEASE SAVEPOINT {};'.format(self._id)
else:
query = 'COMMIT;'
try:
await self._connection.execute(query)
except BaseException:
self._state = TransactionState.FAILED
raise
else:
self._state = TransactionState.COMMITTED
async def __rollback(self):
self.__check_state('rollback')
if self._connection._top_xact is self:
self._connection._top_xact = None
if self._nested:
query = 'ROLLBACK TO {};'.format(self._id)
else:
query = 'ROLLBACK;'
try:
await self._connection.execute(query)
except BaseException:
self._state = TransactionState.FAILED
raise
else:
self._state = TransactionState.ROLLEDBACK
@connresource.guarded
async def commit(self):
"""Exit the transaction or savepoint block and commit changes."""
if self._managed:
raise apg_errors.InterfaceError(
'cannot manually commit from within an `async with` block')
await self.__commit()
@connresource.guarded
async def rollback(self):
"""Exit the transaction or savepoint block and rollback changes."""
if self._managed:
raise apg_errors.InterfaceError(
'cannot manually rollback from within an `async with` block')
await self.__rollback()
def __repr__(self):
attrs = []
attrs.append('state:{}'.format(self._state.name.lower()))
if self._isolation is not None:
attrs.append(self._isolation)
if self._readonly:
attrs.append('readonly')
if self._deferrable:
attrs.append('deferrable')
if self.__class__.__module__.startswith('asyncpg.'):
mod = 'asyncpg'
else:
mod = self.__class__.__module__
return '<{}.{} {} {:#x}>'.format(
mod, self.__class__.__name__, ' '.join(attrs), id(self))
| apache-2.0 |
y-zeng/grpc | src/python/grpcio_tests/tests/unit/_metadata_code_details_test.py | 15 | 19823 | # Copyright 2016, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests application-provided metadata, status code, and details."""
import threading
import unittest
import grpc
from grpc.framework.foundation import logging_pool
from tests.unit import test_common
from tests.unit.framework.common import test_constants
from tests.unit.framework.common import test_control
_SERIALIZED_REQUEST = b'\x46\x47\x48'
_SERIALIZED_RESPONSE = b'\x49\x50\x51'
_REQUEST_SERIALIZER = lambda unused_request: _SERIALIZED_REQUEST
_REQUEST_DESERIALIZER = lambda unused_serialized_request: object()
_RESPONSE_SERIALIZER = lambda unused_response: _SERIALIZED_RESPONSE
_RESPONSE_DESERIALIZER = lambda unused_serialized_resopnse: object()
_SERVICE = 'test.TestService'
_UNARY_UNARY = 'UnaryUnary'
_UNARY_STREAM = 'UnaryStream'
_STREAM_UNARY = 'StreamUnary'
_STREAM_STREAM = 'StreamStream'
_CLIENT_METADATA = (
('client-md-key', 'client-md-key'),
('client-md-key-bin', b'\x00\x01')
)
_SERVER_INITIAL_METADATA = (
('server-initial-md-key', 'server-initial-md-value'),
('server-initial-md-key-bin', b'\x00\x02')
)
_SERVER_TRAILING_METADATA = (
('server-trailing-md-key', 'server-trailing-md-value'),
('server-trailing-md-key-bin', b'\x00\x03')
)
_NON_OK_CODE = grpc.StatusCode.NOT_FOUND
_DETAILS = 'Test details!'
class _Servicer(object):
def __init__(self):
self._lock = threading.Lock()
self._code = None
self._details = None
self._exception = False
self._return_none = False
self._received_client_metadata = None
def unary_unary(self, request, context):
with self._lock:
self._received_client_metadata = context.invocation_metadata()
context.send_initial_metadata(_SERVER_INITIAL_METADATA)
context.set_trailing_metadata(_SERVER_TRAILING_METADATA)
if self._code is not None:
context.set_code(self._code)
if self._details is not None:
context.set_details(self._details)
if self._exception:
raise test_control.Defect()
else:
return None if self._return_none else object()
def unary_stream(self, request, context):
with self._lock:
self._received_client_metadata = context.invocation_metadata()
context.send_initial_metadata(_SERVER_INITIAL_METADATA)
context.set_trailing_metadata(_SERVER_TRAILING_METADATA)
if self._code is not None:
context.set_code(self._code)
if self._details is not None:
context.set_details(self._details)
for _ in range(test_constants.STREAM_LENGTH // 2):
yield _SERIALIZED_RESPONSE
if self._exception:
raise test_control.Defect()
def stream_unary(self, request_iterator, context):
with self._lock:
self._received_client_metadata = context.invocation_metadata()
context.send_initial_metadata(_SERVER_INITIAL_METADATA)
context.set_trailing_metadata(_SERVER_TRAILING_METADATA)
if self._code is not None:
context.set_code(self._code)
if self._details is not None:
context.set_details(self._details)
# TODO(https://github.com/grpc/grpc/issues/6891): just ignore the
# request iterator.
for ignored_request in request_iterator:
pass
if self._exception:
raise test_control.Defect()
else:
return None if self._return_none else _SERIALIZED_RESPONSE
def stream_stream(self, request_iterator, context):
with self._lock:
self._received_client_metadata = context.invocation_metadata()
context.send_initial_metadata(_SERVER_INITIAL_METADATA)
context.set_trailing_metadata(_SERVER_TRAILING_METADATA)
if self._code is not None:
context.set_code(self._code)
if self._details is not None:
context.set_details(self._details)
# TODO(https://github.com/grpc/grpc/issues/6891): just ignore the
# request iterator.
for ignored_request in request_iterator:
pass
for _ in range(test_constants.STREAM_LENGTH // 3):
yield object()
if self._exception:
raise test_control.Defect()
def set_code(self, code):
with self._lock:
self._code = code
def set_details(self, details):
with self._lock:
self._details = details
def set_exception(self):
with self._lock:
self._exception = True
def set_return_none(self):
with self._lock:
self._return_none = True
def received_client_metadata(self):
with self._lock:
return self._received_client_metadata
def _generic_handler(servicer):
method_handlers = {
_UNARY_UNARY: grpc.unary_unary_rpc_method_handler(
servicer.unary_unary, request_deserializer=_REQUEST_DESERIALIZER,
response_serializer=_RESPONSE_SERIALIZER),
_UNARY_STREAM: grpc.unary_stream_rpc_method_handler(
servicer.unary_stream),
_STREAM_UNARY: grpc.stream_unary_rpc_method_handler(
servicer.stream_unary),
_STREAM_STREAM: grpc.stream_stream_rpc_method_handler(
servicer.stream_stream, request_deserializer=_REQUEST_DESERIALIZER,
response_serializer=_RESPONSE_SERIALIZER),
}
return grpc.method_handlers_generic_handler(_SERVICE, method_handlers)
class MetadataCodeDetailsTest(unittest.TestCase):
def setUp(self):
self._servicer = _Servicer()
self._server_pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY)
self._server = grpc.server(
self._server_pool, handlers=(_generic_handler(self._servicer),))
port = self._server.add_insecure_port('[::]:0')
self._server.start()
channel = grpc.insecure_channel('localhost:{}'.format(port))
self._unary_unary = channel.unary_unary(
'/'.join(('', _SERVICE, _UNARY_UNARY,)),
request_serializer=_REQUEST_SERIALIZER,
response_deserializer=_RESPONSE_DESERIALIZER,)
self._unary_stream = channel.unary_stream(
'/'.join(('', _SERVICE, _UNARY_STREAM,)),)
self._stream_unary = channel.stream_unary(
'/'.join(('', _SERVICE, _STREAM_UNARY,)),)
self._stream_stream = channel.stream_stream(
'/'.join(('', _SERVICE, _STREAM_STREAM,)),
request_serializer=_REQUEST_SERIALIZER,
response_deserializer=_RESPONSE_DESERIALIZER,)
def testSuccessfulUnaryUnary(self):
self._servicer.set_details(_DETAILS)
unused_response, call = self._unary_unary.with_call(
object(), metadata=_CLIENT_METADATA)
self.assertTrue(
test_common.metadata_transmitted(
_CLIENT_METADATA, self._servicer.received_client_metadata()))
self.assertTrue(
test_common.metadata_transmitted(
_SERVER_INITIAL_METADATA, call.initial_metadata()))
self.assertTrue(
test_common.metadata_transmitted(
_SERVER_TRAILING_METADATA, call.trailing_metadata()))
self.assertIs(grpc.StatusCode.OK, call.code())
self.assertEqual(_DETAILS, call.details())
def testSuccessfulUnaryStream(self):
self._servicer.set_details(_DETAILS)
call = self._unary_stream(_SERIALIZED_REQUEST, metadata=_CLIENT_METADATA)
received_initial_metadata = call.initial_metadata()
for _ in call:
pass
self.assertTrue(
test_common.metadata_transmitted(
_CLIENT_METADATA, self._servicer.received_client_metadata()))
self.assertTrue(
test_common.metadata_transmitted(
_SERVER_INITIAL_METADATA, received_initial_metadata))
self.assertTrue(
test_common.metadata_transmitted(
_SERVER_TRAILING_METADATA, call.trailing_metadata()))
self.assertIs(grpc.StatusCode.OK, call.code())
self.assertEqual(_DETAILS, call.details())
def testSuccessfulStreamUnary(self):
self._servicer.set_details(_DETAILS)
unused_response, call = self._stream_unary.with_call(
iter([_SERIALIZED_REQUEST] * test_constants.STREAM_LENGTH),
metadata=_CLIENT_METADATA)
self.assertTrue(
test_common.metadata_transmitted(
_CLIENT_METADATA, self._servicer.received_client_metadata()))
self.assertTrue(
test_common.metadata_transmitted(
_SERVER_INITIAL_METADATA, call.initial_metadata()))
self.assertTrue(
test_common.metadata_transmitted(
_SERVER_TRAILING_METADATA, call.trailing_metadata()))
self.assertIs(grpc.StatusCode.OK, call.code())
self.assertEqual(_DETAILS, call.details())
def testSuccessfulStreamStream(self):
self._servicer.set_details(_DETAILS)
call = self._stream_stream(
iter([object()] * test_constants.STREAM_LENGTH),
metadata=_CLIENT_METADATA)
received_initial_metadata = call.initial_metadata()
for _ in call:
pass
self.assertTrue(
test_common.metadata_transmitted(
_CLIENT_METADATA, self._servicer.received_client_metadata()))
self.assertTrue(
test_common.metadata_transmitted(
_SERVER_INITIAL_METADATA, received_initial_metadata))
self.assertTrue(
test_common.metadata_transmitted(
_SERVER_TRAILING_METADATA, call.trailing_metadata()))
self.assertIs(grpc.StatusCode.OK, call.code())
self.assertEqual(_DETAILS, call.details())
def testCustomCodeUnaryUnary(self):
self._servicer.set_code(_NON_OK_CODE)
self._servicer.set_details(_DETAILS)
with self.assertRaises(grpc.RpcError) as exception_context:
self._unary_unary.with_call(object(), metadata=_CLIENT_METADATA)
self.assertTrue(
test_common.metadata_transmitted(
_CLIENT_METADATA, self._servicer.received_client_metadata()))
self.assertTrue(
test_common.metadata_transmitted(
_SERVER_INITIAL_METADATA,
exception_context.exception.initial_metadata()))
self.assertTrue(
test_common.metadata_transmitted(
_SERVER_TRAILING_METADATA,
exception_context.exception.trailing_metadata()))
self.assertIs(_NON_OK_CODE, exception_context.exception.code())
self.assertEqual(_DETAILS, exception_context.exception.details())
def testCustomCodeUnaryStream(self):
self._servicer.set_code(_NON_OK_CODE)
self._servicer.set_details(_DETAILS)
call = self._unary_stream(_SERIALIZED_REQUEST, metadata=_CLIENT_METADATA)
received_initial_metadata = call.initial_metadata()
with self.assertRaises(grpc.RpcError):
for _ in call:
pass
self.assertTrue(
test_common.metadata_transmitted(
_CLIENT_METADATA, self._servicer.received_client_metadata()))
self.assertTrue(
test_common.metadata_transmitted(
_SERVER_INITIAL_METADATA, received_initial_metadata))
self.assertTrue(
test_common.metadata_transmitted(
_SERVER_TRAILING_METADATA, call.trailing_metadata()))
self.assertIs(_NON_OK_CODE, call.code())
self.assertEqual(_DETAILS, call.details())
def testCustomCodeStreamUnary(self):
self._servicer.set_code(_NON_OK_CODE)
self._servicer.set_details(_DETAILS)
with self.assertRaises(grpc.RpcError) as exception_context:
self._stream_unary.with_call(
iter([_SERIALIZED_REQUEST] * test_constants.STREAM_LENGTH),
metadata=_CLIENT_METADATA)
self.assertTrue(
test_common.metadata_transmitted(
_CLIENT_METADATA, self._servicer.received_client_metadata()))
self.assertTrue(
test_common.metadata_transmitted(
_SERVER_INITIAL_METADATA,
exception_context.exception.initial_metadata()))
self.assertTrue(
test_common.metadata_transmitted(
_SERVER_TRAILING_METADATA,
exception_context.exception.trailing_metadata()))
self.assertIs(_NON_OK_CODE, exception_context.exception.code())
self.assertEqual(_DETAILS, exception_context.exception.details())
def testCustomCodeStreamStream(self):
self._servicer.set_code(_NON_OK_CODE)
self._servicer.set_details(_DETAILS)
call = self._stream_stream(
iter([object()] * test_constants.STREAM_LENGTH),
metadata=_CLIENT_METADATA)
received_initial_metadata = call.initial_metadata()
with self.assertRaises(grpc.RpcError) as exception_context:
for _ in call:
pass
self.assertTrue(
test_common.metadata_transmitted(
_CLIENT_METADATA, self._servicer.received_client_metadata()))
self.assertTrue(
test_common.metadata_transmitted(
_SERVER_INITIAL_METADATA, received_initial_metadata))
self.assertTrue(
test_common.metadata_transmitted(
_SERVER_TRAILING_METADATA,
exception_context.exception.trailing_metadata()))
self.assertIs(_NON_OK_CODE, exception_context.exception.code())
self.assertEqual(_DETAILS, exception_context.exception.details())
def testCustomCodeExceptionUnaryUnary(self):
self._servicer.set_code(_NON_OK_CODE)
self._servicer.set_details(_DETAILS)
self._servicer.set_exception()
with self.assertRaises(grpc.RpcError) as exception_context:
self._unary_unary.with_call(object(), metadata=_CLIENT_METADATA)
self.assertTrue(
test_common.metadata_transmitted(
_CLIENT_METADATA, self._servicer.received_client_metadata()))
self.assertTrue(
test_common.metadata_transmitted(
_SERVER_INITIAL_METADATA,
exception_context.exception.initial_metadata()))
self.assertTrue(
test_common.metadata_transmitted(
_SERVER_TRAILING_METADATA,
exception_context.exception.trailing_metadata()))
self.assertIs(_NON_OK_CODE, exception_context.exception.code())
self.assertEqual(_DETAILS, exception_context.exception.details())
def testCustomCodeExceptionUnaryStream(self):
self._servicer.set_code(_NON_OK_CODE)
self._servicer.set_details(_DETAILS)
self._servicer.set_exception()
call = self._unary_stream(_SERIALIZED_REQUEST, metadata=_CLIENT_METADATA)
received_initial_metadata = call.initial_metadata()
with self.assertRaises(grpc.RpcError):
for _ in call:
pass
self.assertTrue(
test_common.metadata_transmitted(
_CLIENT_METADATA, self._servicer.received_client_metadata()))
self.assertTrue(
test_common.metadata_transmitted(
_SERVER_INITIAL_METADATA, received_initial_metadata))
self.assertTrue(
test_common.metadata_transmitted(
_SERVER_TRAILING_METADATA, call.trailing_metadata()))
self.assertIs(_NON_OK_CODE, call.code())
self.assertEqual(_DETAILS, call.details())
def testCustomCodeExceptionStreamUnary(self):
self._servicer.set_code(_NON_OK_CODE)
self._servicer.set_details(_DETAILS)
self._servicer.set_exception()
with self.assertRaises(grpc.RpcError) as exception_context:
self._stream_unary.with_call(
iter([_SERIALIZED_REQUEST] * test_constants.STREAM_LENGTH),
metadata=_CLIENT_METADATA)
self.assertTrue(
test_common.metadata_transmitted(
_CLIENT_METADATA, self._servicer.received_client_metadata()))
self.assertTrue(
test_common.metadata_transmitted(
_SERVER_INITIAL_METADATA,
exception_context.exception.initial_metadata()))
self.assertTrue(
test_common.metadata_transmitted(
_SERVER_TRAILING_METADATA,
exception_context.exception.trailing_metadata()))
self.assertIs(_NON_OK_CODE, exception_context.exception.code())
self.assertEqual(_DETAILS, exception_context.exception.details())
def testCustomCodeExceptionStreamStream(self):
self._servicer.set_code(_NON_OK_CODE)
self._servicer.set_details(_DETAILS)
self._servicer.set_exception()
call = self._stream_stream(
iter([object()] * test_constants.STREAM_LENGTH),
metadata=_CLIENT_METADATA)
received_initial_metadata = call.initial_metadata()
with self.assertRaises(grpc.RpcError):
for _ in call:
pass
self.assertTrue(
test_common.metadata_transmitted(
_CLIENT_METADATA, self._servicer.received_client_metadata()))
self.assertTrue(
test_common.metadata_transmitted(
_SERVER_INITIAL_METADATA, received_initial_metadata))
self.assertTrue(
test_common.metadata_transmitted(
_SERVER_TRAILING_METADATA, call.trailing_metadata()))
self.assertIs(_NON_OK_CODE, call.code())
self.assertEqual(_DETAILS, call.details())
def testCustomCodeReturnNoneUnaryUnary(self):
self._servicer.set_code(_NON_OK_CODE)
self._servicer.set_details(_DETAILS)
self._servicer.set_return_none()
with self.assertRaises(grpc.RpcError) as exception_context:
self._unary_unary.with_call(object(), metadata=_CLIENT_METADATA)
self.assertTrue(
test_common.metadata_transmitted(
_CLIENT_METADATA, self._servicer.received_client_metadata()))
self.assertTrue(
test_common.metadata_transmitted(
_SERVER_INITIAL_METADATA,
exception_context.exception.initial_metadata()))
self.assertTrue(
test_common.metadata_transmitted(
_SERVER_TRAILING_METADATA,
exception_context.exception.trailing_metadata()))
self.assertIs(_NON_OK_CODE, exception_context.exception.code())
self.assertEqual(_DETAILS, exception_context.exception.details())
def testCustomCodeReturnNoneStreamUnary(self):
self._servicer.set_code(_NON_OK_CODE)
self._servicer.set_details(_DETAILS)
self._servicer.set_return_none()
with self.assertRaises(grpc.RpcError) as exception_context:
self._stream_unary.with_call(
iter([_SERIALIZED_REQUEST] * test_constants.STREAM_LENGTH),
metadata=_CLIENT_METADATA)
self.assertTrue(
test_common.metadata_transmitted(
_CLIENT_METADATA, self._servicer.received_client_metadata()))
self.assertTrue(
test_common.metadata_transmitted(
_SERVER_INITIAL_METADATA,
exception_context.exception.initial_metadata()))
self.assertTrue(
test_common.metadata_transmitted(
_SERVER_TRAILING_METADATA,
exception_context.exception.trailing_metadata()))
self.assertIs(_NON_OK_CODE, exception_context.exception.code())
self.assertEqual(_DETAILS, exception_context.exception.details())
if __name__ == '__main__':
unittest.main(verbosity=2)
| bsd-3-clause |
zangree/ryu | ryu/services/protocols/bgp/operator/commands/show/count.py | 52 | 1840 | import logging
from ryu.services.protocols.bgp.operator.command import Command
from ryu.services.protocols.bgp.operator.command import CommandsResponse
from ryu.services.protocols.bgp.operator.command import STATUS_ERROR
from ryu.services.protocols.bgp.operator.command import STATUS_OK
from ryu.services.protocols.bgp.operator.commands.responses import \
WrongParamResp
LOG = logging.getLogger('bgpspeaker.operator.commands.show.count')
class Count(Command):
help_msg = 'show counters'
param_help_msg = '<vpn-name> <route-family>{ipv4, ipv6}'
command = 'count'
cli_resp_line_template = 'BGP route count for VPN {0} is {1}\n'
def __init__(self, *args, **kwargs):
super(Count, self).__init__(*args, **kwargs)
self.subcommands = {
'all': self.All
}
def action(self, params):
if len(params) < 1:
return CommandsResponse(STATUS_ERROR, 'Not enough params')
else:
vrf_name = params[0]
if len(params) == 2:
vrf_rf = params[1]
else:
vrf_rf = 'ipv4'
from ryu.services.protocols.bgp.operator.internal_api import \
WrongParamError
try:
return CommandsResponse(
STATUS_OK,
self.api.count_single_vrf_routes(vrf_name, vrf_rf)
)
except WrongParamError as e:
return WrongParamResp(e)
class All(Command):
help_msg = 'shows number of routes for all VRFs'
command = 'all'
cli_resp_line_template = 'BGP route count for VPN {0} is {1}\n'
def action(self, params):
if len(params) > 0:
return WrongParamResp()
return CommandsResponse(STATUS_OK, self.api.count_all_vrf_routes())
| apache-2.0 |
rnicoll/bitcoin | test/functional/feature_settings.py | 30 | 3812 | #!/usr/bin/env python3
# Copyright (c) 2017-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test various command line arguments and configuration file parameters."""
import json
from pathlib import Path
from test_framework.test_framework import BitcoinTestFramework
from test_framework.test_node import ErrorMatch
from test_framework.util import assert_equal
class SettingsTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.wallet_names = []
def run_test(self):
node, = self.nodes
settings = Path(node.datadir, self.chain, "settings.json")
conf = Path(node.datadir, "bitcoin.conf")
# Assert empty settings file was created
self.stop_node(0)
with settings.open() as fp:
assert_equal(json.load(fp), {})
# Assert settings are parsed and logged
with settings.open("w") as fp:
json.dump({"string": "string", "num": 5, "bool": True, "null": None, "list": [6, 7]}, fp)
with node.assert_debug_log(expected_msgs=[
'Ignoring unknown rw_settings value bool',
'Ignoring unknown rw_settings value list',
'Ignoring unknown rw_settings value null',
'Ignoring unknown rw_settings value num',
'Ignoring unknown rw_settings value string',
'Setting file arg: string = "string"',
'Setting file arg: num = 5',
'Setting file arg: bool = true',
'Setting file arg: null = null',
'Setting file arg: list = [6,7]',
]):
self.start_node(0)
self.stop_node(0)
# Assert settings are unchanged after shutdown
with settings.open() as fp:
assert_equal(json.load(fp), {"string": "string", "num": 5, "bool": True, "null": None, "list": [6, 7]})
# Test invalid json
with settings.open("w") as fp:
fp.write("invalid json")
node.assert_start_raises_init_error(expected_msg='Unable to parse settings file', match=ErrorMatch.PARTIAL_REGEX)
# Test invalid json object
with settings.open("w") as fp:
fp.write('"string"')
node.assert_start_raises_init_error(expected_msg='Found non-object value "string" in settings file', match=ErrorMatch.PARTIAL_REGEX)
# Test invalid settings file containing duplicate keys
with settings.open("w") as fp:
fp.write('{"key": 1, "key": 2}')
node.assert_start_raises_init_error(expected_msg='Found duplicate key key in settings file', match=ErrorMatch.PARTIAL_REGEX)
# Test invalid settings file is ignored with command line -nosettings
with node.assert_debug_log(expected_msgs=['Command-line arg: settings=false']):
self.start_node(0, extra_args=["-nosettings"])
self.stop_node(0)
# Test invalid settings file is ignored with config file -nosettings
with conf.open('a') as conf:
conf.write('nosettings=1\n')
with node.assert_debug_log(expected_msgs=['Config file arg: [regtest] settings=false']):
self.start_node(0)
self.stop_node(0)
# Test alternate settings path
altsettings = Path(node.datadir, "altsettings.json")
with altsettings.open("w") as fp:
fp.write('{"key": "value"}')
with node.assert_debug_log(expected_msgs=['Setting file arg: key = "value"']):
self.start_node(0, extra_args=["-settings={}".format(altsettings)])
self.stop_node(0)
if __name__ == '__main__':
SettingsTest().main()
| mit |
AntonPalich/sublime-evernote | lib/pygments/formatters/rtf.py | 9 | 4535 | # -*- coding: utf-8 -*-
"""
pygments.formatters.rtf
~~~~~~~~~~~~~~~~~~~~~~~
A formatter that generates RTF files.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.formatter import Formatter
__all__ = ['RtfFormatter']
class RtfFormatter(Formatter):
"""
Format tokens as RTF markup. This formatter automatically outputs full RTF
documents with color information and other useful stuff. Perfect for Copy and
Paste into Microsoft® Word® documents.
.. versionadded:: 0.6
Additional options accepted:
`style`
The style to use, can be a string or a Style subclass (default:
``'default'``).
`fontface`
The used font famliy, for example ``Bitstream Vera Sans``. Defaults to
some generic font which is supposed to have fixed width.
"""
name = 'RTF'
aliases = ['rtf']
filenames = ['*.rtf']
unicodeoutput = False
def __init__(self, **options):
"""
Additional options accepted:
``fontface``
Name of the font used. Could for example be ``'Courier New'``
to further specify the default which is ``'\fmodern'``. The RTF
specification claims that ``\fmodern`` are "Fixed-pitch serif
and sans serif fonts". Hope every RTF implementation thinks
the same about modern...
"""
Formatter.__init__(self, **options)
self.fontface = options.get('fontface') or ''
def _escape(self, text):
return text.replace('\\', '\\\\') \
.replace('{', '\\{') \
.replace('}', '\\}')
def _escape_text(self, text):
# empty strings, should give a small performance improvment
if not text:
return ''
# escape text
text = self._escape(text)
if self.encoding in ('utf-8', 'utf-16', 'utf-32'):
encoding = 'iso-8859-15'
else:
encoding = self.encoding or 'iso-8859-15'
buf = []
for c in text:
if ord(c) > 128:
ansic = c.encode(encoding, 'ignore') or '?'
if ord(ansic) > 128:
ansic = '\\\'%x' % ord(ansic)
else:
ansic = c
buf.append(r'\ud{\u%d%s}' % (ord(c), ansic))
else:
buf.append(str(c))
return ''.join(buf).replace('\n', '\\par\n')
def format_unencoded(self, tokensource, outfile):
# rtf 1.8 header
outfile.write(r'{\rtf1\ansi\deff0'
r'{\fonttbl{\f0\fmodern\fprq1\fcharset0%s;}}'
r'{\colortbl;' % (self.fontface and
' ' + self._escape(self.fontface) or
''))
# convert colors and save them in a mapping to access them later.
color_mapping = {}
offset = 1
for _, style in self.style:
for color in style['color'], style['bgcolor'], style['border']:
if color and color not in color_mapping:
color_mapping[color] = offset
outfile.write(r'\red%d\green%d\blue%d;' % (
int(color[0:2], 16),
int(color[2:4], 16),
int(color[4:6], 16)
))
offset += 1
outfile.write(r'}\f0')
# highlight stream
for ttype, value in tokensource:
while not self.style.styles_token(ttype) and ttype.parent:
ttype = ttype.parent
style = self.style.style_for_token(ttype)
buf = []
if style['bgcolor']:
buf.append(r'\cb%d' % color_mapping[style['bgcolor']])
if style['color']:
buf.append(r'\cf%d' % color_mapping[style['color']])
if style['bold']:
buf.append(r'\b')
if style['italic']:
buf.append(r'\i')
if style['underline']:
buf.append(r'\ul')
if style['border']:
buf.append(r'\chbrdr\chcfpat%d' %
color_mapping[style['border']])
start = ''.join(buf)
if start:
outfile.write('{%s ' % start)
outfile.write(self._escape_text(value))
if start:
outfile.write('}')
outfile.write('}')
| mit |
turtledb/0install | zeroinstall/injector/qdom.py | 1 | 3485 | """A quick DOM implementation.
Python's xml.dom is very slow. The xml.sax module is also slow (as it imports urllib2).
This is our light-weight version.
"""
# Copyright (C) 2009, Thomas Leonard
# See the README file for details, or visit http://0install.net.
from xml.parsers import expat
import zeroinstall
from zeroinstall.injector import versions
_parsed_version = versions.parse_version(zeroinstall.version)
class Element(object):
"""An XML element.
@ivar uri: the element's namespace
@type uri: str
@ivar name: the element's localName
@type name: str
@ivar attrs: the element's attributes (key is in the form [namespace " "] localName)
@type attrs: {str: str}
@ivar childNodes: children
@type childNodes: [L{Element}]
@ivar content: the text content
@type content: str"""
__slots__ = ['uri', 'name', 'attrs', 'childNodes', 'content']
def __init__(self, uri, name, attrs):
"""@type uri: str
@type name: str
@type attrs: {str: str}"""
self.uri = uri
self.name = name
self.attrs = attrs.copy()
self.content = None
self.childNodes = []
def __str__(self):
"""@rtype: str"""
attrs = [n + '=' + self.attrs[n] for n in self.attrs]
start = '<{%s}%s %s' % (self.uri, self.name, ' '.join(attrs))
if self.childNodes:
return start + '>' + '\n'.join(map(str, self.childNodes)) + ('</%s>' % (self.name))
elif self.content:
return start + '>' + self.content + ('</%s>' % (self.name))
else:
return start + '/>'
def getAttribute(self, name):
"""@type name: str
@rtype: str"""
return self.attrs.get(name, None)
class QSAXhandler(object):
"""SAXHandler that builds a tree of L{Element}s"""
def __init__(self, filter_for_version = False):
"""@param filter_for_version: skip elements if their if-0install-version attribute doesn't match L{zeroinstall.version} (since 1.13).
@type filter_for_version: bool
@rtype: bool"""
self.stack = []
if filter_for_version:
self.filter_range = lambda expr: versions.parse_version_expression(expr)(_parsed_version)
else:
self.filter_range = lambda x: True
def startElementNS(self, fullname, attrs):
"""@type fullname: str
@type attrs: {str: str}"""
split = fullname.split(' ', 1)
if len(split) == 2:
self.stack.append(Element(split[0], split[1], attrs))
else:
self.stack.append(Element(None, fullname, attrs))
self.contents = ''
def characters(self, data):
"""@type data: str"""
self.contents += data
def endElementNS(self, name):
"""@type name: str"""
contents = self.contents.strip()
self.stack[-1].content = contents
self.contents = ''
new = self.stack.pop()
if self.stack:
target_versions = new.attrs.get('if-0install-version')
if target_versions and not self.filter_range(target_versions):
return
self.stack[-1].childNodes.append(new)
else:
self.doc = new
def parse(source, filter_for_version = False):
"""Parse an XML stream into a tree of L{Element}s.
@param source: data to parse
@type source: file
@param filter_for_version: skip elements if their if-0install-version attribute doesn't match L{zeroinstall.version} (since 1.13).
@type filter_for_version: bool
@return: the root
@rtype: L{Element}"""
handler = QSAXhandler(filter_for_version)
parser = expat.ParserCreate(namespace_separator = ' ')
parser.StartElementHandler = handler.startElementNS
parser.EndElementHandler = handler.endElementNS
parser.CharacterDataHandler = handler.characters
parser.ParseFile(source)
return handler.doc
| lgpl-2.1 |
ychen820/microblog | y/google-cloud-sdk/platform/google_appengine/lib/django-1.4/django/views/decorators/vary.py | 329 | 1197 | from functools import wraps
from django.utils.cache import patch_vary_headers
from django.utils.decorators import available_attrs
def vary_on_headers(*headers):
"""
A view decorator that adds the specified headers to the Vary header of the
response. Usage:
@vary_on_headers('Cookie', 'Accept-language')
def index(request):
...
Note that the header names are not case-sensitive.
"""
def decorator(func):
@wraps(func, assigned=available_attrs(func))
def inner_func(*args, **kwargs):
response = func(*args, **kwargs)
patch_vary_headers(response, headers)
return response
return inner_func
return decorator
def vary_on_cookie(func):
"""
A view decorator that adds "Cookie" to the Vary header of a response. This
indicates that a page's contents depends on cookies. Usage:
@vary_on_cookie
def index(request):
...
"""
@wraps(func, assigned=available_attrs(func))
def inner_func(*args, **kwargs):
response = func(*args, **kwargs)
patch_vary_headers(response, ('Cookie',))
return response
return inner_func
| bsd-3-clause |
rkharwar/ubuntu-saucy-powerpc | tools/perf/scripts/python/futex-contention.py | 11261 | 1486 | # futex contention
# (c) 2010, Arnaldo Carvalho de Melo <acme@redhat.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Translation of:
#
# http://sourceware.org/systemtap/wiki/WSFutexContention
#
# to perf python scripting.
#
# Measures futex contention
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Util import *
process_names = {}
thread_thislock = {}
thread_blocktime = {}
lock_waits = {} # long-lived stats on (tid,lock) blockage elapsed time
process_names = {} # long-lived pid-to-execname mapping
def syscalls__sys_enter_futex(event, ctxt, cpu, s, ns, tid, comm,
nr, uaddr, op, val, utime, uaddr2, val3):
cmd = op & FUTEX_CMD_MASK
if cmd != FUTEX_WAIT:
return # we don't care about originators of WAKE events
process_names[tid] = comm
thread_thislock[tid] = uaddr
thread_blocktime[tid] = nsecs(s, ns)
def syscalls__sys_exit_futex(event, ctxt, cpu, s, ns, tid, comm,
nr, ret):
if thread_blocktime.has_key(tid):
elapsed = nsecs(s, ns) - thread_blocktime[tid]
add_stats(lock_waits, (tid, thread_thislock[tid]), elapsed)
del thread_blocktime[tid]
del thread_thislock[tid]
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
for (tid, lock) in lock_waits:
min, max, avg, count = lock_waits[tid, lock]
print "%s[%d] lock %x contended %d times, %d avg ns" % \
(process_names[tid], tid, lock, count, avg)
| gpl-2.0 |
Lukiqq/GT-I9100-Galaxian-ICS-Kernel | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py | 11088 | 3246 | # Core.py - Python extension for perf script, core functions
#
# Copyright (C) 2010 by Tom Zanussi <tzanussi@gmail.com>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
from collections import defaultdict
def autodict():
return defaultdict(autodict)
flag_fields = autodict()
symbolic_fields = autodict()
def define_flag_field(event_name, field_name, delim):
flag_fields[event_name][field_name]['delim'] = delim
def define_flag_value(event_name, field_name, value, field_str):
flag_fields[event_name][field_name]['values'][value] = field_str
def define_symbolic_field(event_name, field_name):
# nothing to do, really
pass
def define_symbolic_value(event_name, field_name, value, field_str):
symbolic_fields[event_name][field_name]['values'][value] = field_str
def flag_str(event_name, field_name, value):
string = ""
if flag_fields[event_name][field_name]:
print_delim = 0
keys = flag_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string += flag_fields[event_name][field_name]['values'][idx]
break
if idx and (value & idx) == idx:
if print_delim and flag_fields[event_name][field_name]['delim']:
string += " " + flag_fields[event_name][field_name]['delim'] + " "
string += flag_fields[event_name][field_name]['values'][idx]
print_delim = 1
value &= ~idx
return string
def symbol_str(event_name, field_name, value):
string = ""
if symbolic_fields[event_name][field_name]:
keys = symbolic_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string = symbolic_fields[event_name][field_name]['values'][idx]
break
if (value == idx):
string = symbolic_fields[event_name][field_name]['values'][idx]
break
return string
trace_flags = { 0x00: "NONE", \
0x01: "IRQS_OFF", \
0x02: "IRQS_NOSUPPORT", \
0x04: "NEED_RESCHED", \
0x08: "HARDIRQ", \
0x10: "SOFTIRQ" }
def trace_flag_str(value):
string = ""
print_delim = 0
keys = trace_flags.keys()
for idx in keys:
if not value and not idx:
string += "NONE"
break
if idx and (value & idx) == idx:
if print_delim:
string += " | ";
string += trace_flags[idx]
print_delim = 1
value &= ~idx
return string
def taskState(state):
states = {
0 : "R",
1 : "S",
2 : "D",
64: "DEAD"
}
if state not in states:
return "Unknown"
return states[state]
class EventHeaders:
def __init__(self, common_cpu, common_secs, common_nsecs,
common_pid, common_comm):
self.cpu = common_cpu
self.secs = common_secs
self.nsecs = common_nsecs
self.pid = common_pid
self.comm = common_comm
def ts(self):
return (self.secs * (10 ** 9)) + self.nsecs
def ts_format(self):
return "%d.%d" % (self.secs, int(self.nsecs / 1000))
| gpl-2.0 |
thinkopensolutions/hr | hr_language/models/hr_language.py | 23 | 1774 | # -*- encoding: utf-8 -*-
###############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013 Savoir-faire Linux (<http://www.savoirfairelinux.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from openerp import tools
from openerp import models, fields
class hr_language(models.Model):
_name = 'hr.language'
name = fields.Selection(
tools.scan_languages(),
string=u"Language", required=True)
description = fields.Char(
string=u"Description", size=64, required=True)
employee_id = fields.Many2one(
'hr.employee', string=u"Employee", required=True)
can_read = fields.Boolean(u"Read", default=True, oldname='read')
can_write = fields.Boolean(u"Write", default=True, oldname='write')
can_speak = fields.Boolean(u"Speak", default=True, oldname='speak')
class hr_employee(models.Model):
_inherit = 'hr.employee'
language_ids = fields.One2many(
'hr.language', 'employee_id', u"Languages")
| agpl-3.0 |
andyh616/mne-python | mne/forward/_compute_forward.py | 8 | 34968 | # -*- coding: utf-8 -*-
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Matti Hamalainen <msh@nmr.mgh.harvard.edu>
# Martin Luessi <mluessi@nmr.mgh.harvard.edu>
# Eric Larson <larsoner@uw.edu>
# Mark Wronkiewicz <wronk@uw.edu>
#
# License: BSD (3-clause)
#
# Many of the idealized equations behind these calculations can be found in:
# 1) Realistic conductivity geometry model of the human head for interpretation
# of neuromagnetic data. Hamalainen and Sarvas, 1989. Specific to MNE
# 2) EEG and MEG: forward solutions for inverse methods. Mosher, Leahy, and
# Lewis, 1999. Generalized discussion of forward solutions.
import numpy as np
from copy import deepcopy
from ..surface import (fast_cross_3d, _find_nearest_tri_pt, _get_tri_supp_geom,
_triangle_coords)
from ..io.constants import FIFF
from ..transforms import apply_trans
from ..utils import logger, verbose
from ..parallel import parallel_func
from ..io.compensator import get_current_comp, make_compensator
from ..io.pick import pick_types
# #############################################################################
# COIL SPECIFICATION AND FIELD COMPUTATION MATRIX
def _dup_coil_set(coils, coord_frame, t):
"""Make a duplicate."""
if t is not None and coord_frame != t['from']:
raise RuntimeError('transformation frame does not match the coil set')
coils = deepcopy(coils)
if t is not None:
coord_frame = t['to']
for coil in coils:
coil['r0'] = apply_trans(t['trans'], coil['r0'])
coil['ex'] = apply_trans(t['trans'], coil['ex'], False)
coil['ey'] = apply_trans(t['trans'], coil['ey'], False)
coil['ez'] = apply_trans(t['trans'], coil['ez'], False)
coil['rmag'] = apply_trans(t['trans'], coil['rmag'])
coil['cosmag'] = apply_trans(t['trans'], coil['cosmag'], False)
coil['coord_frame'] = t['to']
return coils, coord_frame
def _check_coil_frame(coils, coord_frame, bem):
"""Check to make sure the coils are in the correct coordinate frame."""
if coord_frame != FIFF.FIFFV_COORD_MRI:
if coord_frame == FIFF.FIFFV_COORD_HEAD:
# Make a transformed duplicate
coils, coord_Frame = _dup_coil_set(coils, coord_frame,
bem['head_mri_t'])
else:
raise RuntimeError('Bad coil coordinate frame %s' % coord_frame)
return coils, coord_frame
def _lin_field_coeff(surf, mult, rmags, cosmags, ws, n_int, n_jobs):
"""Parallel wrapper for _do_lin_field_coeff to compute linear coefficients.
Parameters
----------
surf : dict
Dict containing information for one surface of the BEM
mult : float
Multiplier for particular BEM surface (Iso Skull Approach discussed in
Mosher et al., 1999 and Hamalainen and Sarvas, 1989 Section III?)
rmag : ndarray, shape (n_integration_pts, 3)
3D positions of MEG coil integration points (from coil['rmag'])
cosmag : ndarray, shape (n_integration_pts, 3)
Direction of the MEG coil integration points (from coil['cosmag'])
ws : ndarray, shape (n_sensor_pts,)
Weights for MEG coil integration points
n_int : ndarray, shape (n_MEG_sensors,)
Number of integration points for each MEG sensor
n_jobs : int
Number of jobs to run in parallel
Returns
-------
coeff : list
Linear coefficients with lead fields for each BEM vertex on each sensor
(?)
"""
parallel, p_fun, _ = parallel_func(_do_lin_field_coeff, n_jobs)
nas = np.array_split
coeffs = parallel(p_fun(surf['rr'], t, tn, ta, rmags, cosmags, ws, n_int)
for t, tn, ta in zip(nas(surf['tris'], n_jobs),
nas(surf['tri_nn'], n_jobs),
nas(surf['tri_area'], n_jobs)))
return mult * np.sum(coeffs, axis=0)
def _do_lin_field_coeff(bem_rr, tris, tn, ta, rmags, cosmags, ws, n_int):
"""Compute field coefficients (parallel-friendly).
See section IV of Mosher et al., 1999 (specifically equation 35).
Parameters
----------
bem_rr : ndarray, shape (n_BEM_vertices, 3)
Positions on one BEM surface in 3-space. 2562 BEM vertices for BEM with
5120 triangles (ico-4)
tris : ndarray, shape (n_BEM_vertices, 3)
Vertex indices for each triangle (referring to bem_rr)
tn : ndarray, shape (n_BEM_vertices, 3)
Triangle unit normal vectors
ta : ndarray, shape (n_BEM_vertices,)
Triangle areas
rmag : ndarray, shape (n_sensor_pts, 3)
3D positions of MEG coil integration points (from coil['rmag'])
cosmag : ndarray, shape (n_sensor_pts, 3)
Direction of the MEG coil integration points (from coil['cosmag'])
ws : ndarray, shape (n_sensor_pts,)
Weights for MEG coil integration points
n_int : ndarray, shape (n_MEG_sensors,)
Number of integration points for each MEG sensor
Returns
-------
coeff : ndarray, shape (n_MEG_sensors, n_BEM_vertices)
Linear coefficients with effect of each BEM vertex on each sensor (?)
"""
coeff = np.zeros((len(n_int), len(bem_rr)))
bins = np.repeat(np.arange(len(n_int)), n_int)
for tri, tri_nn, tri_area in zip(tris, tn, ta):
# Accumulate the coefficients for each triangle node and add to the
# corresponding coefficient matrix
tri_rr = bem_rr[tri]
# The following is equivalent to:
# for j, coil in enumerate(coils['coils']):
# x = func(coil['rmag'], coil['cosmag'],
# tri_rr, tri_nn, tri_area)
# res = np.sum(coil['w'][np.newaxis, :] * x, axis=1)
# coeff[j][tri + off] += mult * res
# Simple version (bem_lin_field_coeffs_simple)
zz = []
for trr in tri_rr:
diff = rmags - trr
dl = np.sum(diff * diff, axis=1)
c = fast_cross_3d(diff, tri_nn[np.newaxis, :])
x = tri_area * np.sum(c * cosmags, axis=1) / \
(3.0 * dl * np.sqrt(dl))
zz += [np.bincount(bins, weights=x * ws, minlength=len(n_int))]
coeff[:, tri] += np.array(zz).T
return coeff
def _concatenate_coils(coils):
"""Helper to concatenate MEG coil parameters."""
rmags = np.concatenate([coil['rmag'] for coil in coils])
cosmags = np.concatenate([coil['cosmag'] for coil in coils])
ws = np.concatenate([coil['w'] for coil in coils])
n_int = np.array([len(coil['rmag']) for coil in coils])
return rmags, cosmags, ws, n_int
def _bem_specify_coils(bem, coils, coord_frame, mults, n_jobs):
"""Set up for computing the solution at a set of MEG coils.
Parameters
----------
bem : dict
BEM information
coils : list of dict, len(n_MEG_sensors)
MEG sensor information dicts
coord_frame : int
Class constant identifying coordinate frame
mults : ndarray, shape (1, n_BEM_vertices)
Multiplier for every vertex in BEM
n_jobs : int
Number of jobs to run in parallel
Returns
-------
sol: ndarray, shape (n_MEG_sensors, n_BEM_vertices)
MEG solution
"""
# Make sure MEG coils are in MRI coordinate frame to match BEM coords
coils, coord_frame = _check_coil_frame(coils, coord_frame, bem)
# leaving this in in case we want to easily add in the future
# if method != 'simple': # in ['ferguson', 'urankar']:
# raise NotImplementedError
# Compute the weighting factors to obtain the magnetic field in the linear
# potential approximation
# Process each of the surfaces
rmags, cosmags, ws, n_int = _concatenate_coils(coils)
lens = np.cumsum(np.r_[0, [len(s['rr']) for s in bem['surfs']]])
coeff = np.empty((len(n_int), lens[-1])) # shape(n_coils, n_BEM_verts)
# Compute coeffs for each surface, one at a time
for o1, o2, surf, mult in zip(lens[:-1], lens[1:],
bem['surfs'], bem['field_mult']):
coeff[:, o1:o2] = _lin_field_coeff(surf, mult, rmags, cosmags, ws,
n_int, n_jobs)
# put through the bem
sol = np.dot(coeff, bem['solution'])
sol *= mults
return sol
def _bem_specify_els(bem, els, mults):
"""Set up for computing the solution at a set of EEG electrodes.
Parameters
----------
bem : dict
BEM information
els : list of dict, len(n_EEG_sensors)
List of EEG sensor information dicts
mults: ndarray, shape (1, n_BEM_vertices)
Multiplier for every vertex in BEM
Returns
-------
sol : ndarray, shape (n_EEG_sensors, n_BEM_vertices)
EEG solution
"""
sol = np.zeros((len(els), bem['solution'].shape[1]))
scalp = bem['surfs'][0]
# Get supplementary geometry information for tris and rr
scalp['geom'] = _get_tri_supp_geom(scalp['tris'], scalp['rr'])
inds = np.arange(len(scalp['tris'])) # Inds of every BEM vertex
# Iterate over all electrodes
# In principle this could be parallelized, but pickling overhead is huge
# (makes it slower than non-parallel)
for k, el in enumerate(els):
# Get electrode and reference position in head coords
el_r = apply_trans(bem['head_mri_t']['trans'], el['rmag'])
# Iterate over all integration points
for elw, r in zip(el['w'], el_r):
# Get index of closest tri on scalp BEM to electrode position
best = _find_nearest_tri_pt(inds, r, scalp['geom'], True)[2]
# Calculate a linear interpolation between the vertex values
tri = scalp['tris'][best] # Get 3 vertex indices of closest tri
# Get coords of pt projected onto closest triangle
x, y, z = _triangle_coords(r, scalp['geom'], best)
w = elw * np.array([(1.0 - x - y), x, y])
amt = np.dot(w, bem['solution'][tri])
sol[k] += amt
sol *= mults
return sol
# #############################################################################
# COMPENSATION
def _make_ctf_comp_coils(info, coils):
"""Get the correct compensator for CTF coils."""
# adapted from mne_make_ctf_comp() from mne_ctf_comp.c
logger.info('Setting up compensation data...')
comp_num = get_current_comp(info)
if comp_num is None or comp_num == 0:
logger.info(' No compensation set. Nothing more to do.')
return None
# Need to meaningfully populate comp['set'] dict a.k.a. compset
n_comp_ch = sum([c['kind'] == FIFF.FIFFV_MEG_CH for c in info['chs']])
logger.info(' %d out of %d channels have the compensation set.'
% (n_comp_ch, len(coils)))
# Find the desired compensation data matrix
compensator = make_compensator(info, 0, comp_num, True)
logger.info(' Desired compensation data (%s) found.' % comp_num)
logger.info(' All compensation channels found.')
logger.info(' Preselector created.')
logger.info(' Compensation data matrix created.')
logger.info(' Postselector created.')
return compensator
# #############################################################################
# BEM COMPUTATION
_MAG_FACTOR = 1e-7 # μ_0 / (4π)
# def _bem_inf_pot(rd, Q, rp):
# """The infinite medium potential in one direction. See Eq. (8) in
# Mosher, 1999"""
# NOTE: the (μ_0 / (4π) factor has been moved to _prep_field_communication
# diff = rp - rd # (Observation point position) - (Source position)
# diff2 = np.sum(diff * diff, axis=1) # Squared magnitude of diff
# # (Dipole moment) dot (diff) / (magnitude ^ 3)
# return np.sum(Q * diff, axis=1) / (diff2 * np.sqrt(diff2))
def _bem_inf_pots(mri_rr, bem_rr, mri_Q=None):
"""Compute the infinite medium potential in all 3 directions.
Parameters
----------
mri_rr : ndarray, shape (n_dipole_vertices, 3)
Chunk of 3D dipole positions in MRI coordinates
bem_rr: ndarray, shape (n_BEM_vertices, 3)
3D vertex positions for one BEM surface
mri_Q : ndarray, shape (3, 3)
3x3 head -> MRI transform. I.e., head_mri_t.dot(np.eye(3))
Returns
-------
ndarray : shape(n_dipole_vertices, 3, n_BEM_vertices)
"""
# NOTE: the (μ_0 / (4π) factor has been moved to _prep_field_communication
# Get position difference vector between BEM vertex and dipole
diff = bem_rr.T[np.newaxis, :, :] - mri_rr[:, :, np.newaxis]
diff_norm = np.sum(diff * diff, axis=1)
diff_norm *= np.sqrt(diff_norm) # Position difference magnitude cubed
diff_norm[diff_norm == 0] = 1 # avoid nans
if mri_Q is None: # save time when mri_Q=np.eye(3) (e.g., MEG sensors)
return diff / diff_norm[:, np.newaxis, :]
else: # get components in each direction (e.g., EEG sensors)
return np.einsum('ijk,mj->imk', diff, mri_Q) / diff_norm[:,
np.newaxis, :]
# This function has been refactored to process all points simultaneously
# def _bem_inf_field(rd, Q, rp, d):
# """Infinite-medium magnetic field. See (7) in Mosher, 1999"""
# # Get vector from source to sensor integration point
# diff = rp - rd
# diff2 = np.sum(diff * diff, axis=1) # Get magnitude of diff
#
# # Compute cross product between diff and dipole to get magnetic field at
# # integration point
# x = fast_cross_3d(Q[np.newaxis, :], diff)
#
# # Take magnetic field dotted by integration point normal to get magnetic
# # field threading the current loop. Divide by R^3 (equivalently, R^2 * R)
# return np.sum(x * d, axis=1) / (diff2 * np.sqrt(diff2))
def _bem_inf_fields(rr, rmag, cosmag):
"""Compute infinite-medium magnetic field at one MEG sensor from all
dipoles in all 3 basis directions.
Parameters
----------
rr : ndarray, shape (n_source_points, 3)
3D dipole source positions
rmag : ndarray, shape (n_sensor points, 3)
3D positions of 1 MEG coil's integration points (from coil['rmag'])
cosmag : ndarray, shape (n_sensor_points, 3)
Direction of 1 MEG coil's integration points (from coil['cosmag'])
Returns
-------
ndarray, shape (n_dipoles, 3, n_integration_pts)
Magnetic field from all dipoles at each MEG sensor integration point
"""
# rr, rmag refactored according to Equation (19) in Mosher, 1999
# Knowing that we're doing all directions, refactor above function:
diff = rmag.T[np.newaxis, :, :] - rr[:, :, np.newaxis]
diff_norm = np.sum(diff * diff, axis=1)
diff_norm *= np.sqrt(diff_norm) # Get magnitude of distance cubed
diff_norm[diff_norm == 0] = 1 # avoid nans
# This is the result of cross-prod calcs with basis vectors,
# as if we had taken (Q=np.eye(3)), then multiplied by cosmags
# factor, and then summed across directions
x = np.array([diff[:, 1] * cosmag[:, 2] - diff[:, 2] * cosmag[:, 1],
diff[:, 2] * cosmag[:, 0] - diff[:, 0] * cosmag[:, 2],
diff[:, 0] * cosmag[:, 1] - diff[:, 1] * cosmag[:, 0]])
return np.rollaxis(x / diff_norm, 1)
def _bem_pot_or_field(rr, mri_rr, mri_Q, coils, solution, bem_rr, n_jobs,
coil_type):
"""Calculate the magnetic field or electric potential forward solution.
The code is very similar between EEG and MEG potentials, so combine them.
This does the work of "fwd_comp_field" (which wraps to "fwd_bem_field")
and "fwd_bem_pot_els" in MNE-C.
Parameters
----------
rr : ndarray, shape (n_dipoles, 3)
3D dipole source positions
mri_rr : ndarray, shape (n_dipoles, 3)
3D source positions in MRI coordinates
mri_Q :
3x3 head -> MRI transform. I.e., head_mri_t.dot(np.eye(3))
coils : list of dict, len(sensors)
List of sensors where each element contains sensor specific information
solution : ndarray, shape (n_sensors, n_BEM_rr)
Comes from _bem_specify_coils
bem_rr : ndarray, shape (n_BEM_vertices, 3)
3D vertex positions for all surfaces in the BEM
n_jobs : int
Number of jobs to run in parallel
coil_type : str
'meg' or 'eeg'
Returns
-------
B : ndarray, shape (n_dipoles * 3, n_sensors)
Foward solution for a set of sensors
"""
# Both MEG and EEG have the inifinite-medium potentials
# This could be just vectorized, but eats too much memory, so instead we
# reduce memory by chunking within _do_inf_pots and parallelize, too:
parallel, p_fun, _ = parallel_func(_do_inf_pots, n_jobs)
nas = np.array_split
B = np.sum(parallel(p_fun(mri_rr, sr.copy(), mri_Q, sol.copy())
for sr, sol in zip(nas(bem_rr, n_jobs),
nas(solution.T, n_jobs))), axis=0)
# The copy()s above should make it so the whole objects don't need to be
# pickled...
# Only MEG coils are sensitive to the primary current distribution.
if coil_type == 'meg':
# Primary current contribution (can be calc. in coil/dipole coords)
parallel, p_fun, _ = parallel_func(_do_prim_curr, n_jobs)
pcc = np.concatenate(parallel(p_fun(rr, c)
for c in nas(coils, n_jobs)), axis=1)
B += pcc
B *= _MAG_FACTOR
return B
def _do_prim_curr(rr, coils):
"""Calculate primary currents in a set of MEG coils.
See Mosher et al., 1999 Section II for discussion of primary vs. volume
currents.
Parameters
----------
rr : ndarray, shape (n_dipoles, 3)
3D dipole source positions in head coordinates
coils : list of dict
List of MEG coils where each element contains coil specific information
Returns
-------
pc : ndarray, shape (n_sources, n_MEG_sensors)
Primary current for set of MEG coils due to all sources
"""
pc = np.empty((len(rr) * 3, len(coils)))
for ci, c in enumerate(coils):
# For all integration points, multiply by weights, sum across pts
# and then flatten
pc[:, ci] = np.sum(c['w'] * _bem_inf_fields(rr, c['rmag'],
c['cosmag']), 2).ravel()
return pc
def _do_inf_pots(mri_rr, bem_rr, mri_Q, sol):
"""Calculate infinite potentials for MEG or EEG sensors using chunks.
Parameters
----------
mri_rr : ndarray, shape (n_dipoles, 3)
3D dipole source positions in MRI coordinates
bem_rr : ndarray, shape (n_BEM_vertices, 3)
3D vertex positions for all surfaces in the BEM
mri_Q :
3x3 head -> MRI transform. I.e., head_mri_t.dot(np.eye(3))
sol : ndarray, shape (n_sensors_subset, n_BEM_vertices_subset)
Comes from _bem_specify_coils
Returns
-------
B : ndarray, (n_dipoles * 3, n_sensors)
Foward solution for sensors due to volume currents
"""
# Doing work of 'fwd_bem_pot_calc' in MNE-C
# The following code is equivalent to this, but saves memory
# v0s = _bem_inf_pots(rr, bem_rr, Q) # n_rr x 3 x n_bem_rr
# v0s.shape = (len(rr) * 3, v0s.shape[2])
# B = np.dot(v0s, sol)
# We chunk the source mri_rr's in order to save memory
bounds = np.r_[np.arange(0, len(mri_rr), 1000), len(mri_rr)]
B = np.empty((len(mri_rr) * 3, sol.shape[1]))
for bi in range(len(bounds) - 1):
# v0 in Hamalainen et al., 1989 == v_inf in Mosher, et al., 1999
v0s = _bem_inf_pots(mri_rr[bounds[bi]:bounds[bi + 1]], bem_rr, mri_Q)
v0s.shape = (v0s.shape[0] * 3, v0s.shape[2])
B[3 * bounds[bi]:3 * bounds[bi + 1]] = np.dot(v0s, sol)
return B
# #############################################################################
# SPHERE COMPUTATION
def _sphere_pot_or_field(rr, mri_rr, mri_Q, coils, sphere, bem_rr,
n_jobs, coil_type):
"""Do potential or field for spherical model."""
fun = _eeg_spherepot_coil if coil_type == 'eeg' else _sphere_field
parallel, p_fun, _ = parallel_func(fun, n_jobs)
B = np.concatenate(parallel(p_fun(r, coils, sphere)
for r in np.array_split(rr, n_jobs)))
return B
def _sphere_field(rrs, coils, sphere):
"""Compute field for spherical model using Jukka Sarvas' field computation.
Jukka Sarvas, "Basic mathematical and electromagnetic concepts of the
biomagnetic inverse problem", Phys. Med. Biol. 1987, Vol. 32, 1, 11-22.
The formulas have been manipulated for efficient computation
by Matti Hamalainen, February 1990
"""
rmags, cosmags, ws, n_int = _concatenate_coils(coils)
bins = np.repeat(np.arange(len(n_int)), n_int)
# Shift to the sphere model coordinates
rrs = rrs - sphere['r0']
B = np.zeros((3 * len(rrs), len(coils)))
for ri, rr in enumerate(rrs):
# Check for a dipole at the origin
if np.sqrt(np.dot(rr, rr)) <= 1e-10:
continue
this_poss = rmags - sphere['r0']
# Vector from dipole to the field point
a_vec = this_poss - rr
a = np.sqrt(np.sum(a_vec * a_vec, axis=1))
r = np.sqrt(np.sum(this_poss * this_poss, axis=1))
rr0 = np.sum(this_poss * rr, axis=1)
ar = (r * r) - rr0
ar0 = ar / a
F = a * (r * a + ar)
gr = (a * a) / r + ar0 + 2.0 * (a + r)
g0 = a + 2 * r + ar0
# Compute the dot products needed
re = np.sum(this_poss * cosmags, axis=1)
r0e = np.sum(rr * cosmags, axis=1)
g = (g0 * r0e - gr * re) / (F * F)
good = (a > 0) | (r > 0) | ((a * r) + 1 > 1e-5)
v1 = fast_cross_3d(rr[np.newaxis, :], cosmags)
v2 = fast_cross_3d(rr[np.newaxis, :], this_poss)
xx = ((good * ws)[:, np.newaxis] *
(v1 / F[:, np.newaxis] + v2 * g[:, np.newaxis]))
zz = np.array([np.bincount(bins, weights=x,
minlength=len(n_int)) for x in xx.T])
B[3 * ri:3 * ri + 3, :] = zz
B *= _MAG_FACTOR
return B
def _eeg_spherepot_coil(rrs, coils, sphere):
"""Calculate the EEG in the sphere model."""
rmags, cosmags, ws, n_int = _concatenate_coils(coils)
bins = np.repeat(np.arange(len(n_int)), n_int)
# Shift to the sphere model coordinates
rrs = rrs - sphere['r0']
B = np.zeros((3 * len(rrs), len(coils)))
for ri, rr in enumerate(rrs):
# Only process dipoles inside the innermost sphere
if np.sqrt(np.dot(rr, rr)) >= sphere['layers'][0]['rad']:
continue
# fwd_eeg_spherepot_vec
vval_one = np.zeros((len(rmags), 3))
# Make a weighted sum over the equivalence parameters
for eq in range(sphere['nfit']):
# Scale the dipole position
rd = sphere['mu'][eq] * rr
rd2 = np.sum(rd * rd)
rd2_inv = 1.0 / rd2
# Go over all electrodes
this_pos = rmags - sphere['r0']
# Scale location onto the surface of the sphere (not used)
# if sphere['scale_pos']:
# pos_len = (sphere['layers'][-1]['rad'] /
# np.sqrt(np.sum(this_pos * this_pos, axis=1)))
# this_pos *= pos_len
# Vector from dipole to the field point
a_vec = this_pos - rd
# Compute the dot products needed
a = np.sqrt(np.sum(a_vec * a_vec, axis=1))
a3 = 2.0 / (a * a * a)
r2 = np.sum(this_pos * this_pos, axis=1)
r = np.sqrt(r2)
rrd = np.sum(this_pos * rd, axis=1)
ra = r2 - rrd
rda = rrd - rd2
# The main ingredients
F = a * (r * a + ra)
c1 = a3 * rda + 1.0 / a - 1.0 / r
c2 = a3 + (a + r) / (r * F)
# Mix them together and scale by lambda/(rd*rd)
m1 = (c1 - c2 * rrd)
m2 = c2 * rd2
vval_one += (sphere['lambda'][eq] * rd2_inv *
(m1[:, np.newaxis] * rd +
m2[:, np.newaxis] * this_pos))
# compute total result
xx = vval_one * ws[:, np.newaxis]
zz = np.array([np.bincount(bins, weights=x,
minlength=len(n_int)) for x in xx.T])
B[3 * ri:3 * ri + 3, :] = zz
# finishing by scaling by 1/(4*M_PI)
B *= 0.25 / np.pi
return B
# #############################################################################
# MAGNETIC DIPOLE (e.g. CHPI)
def _magnetic_dipole_field_vec(rrs, coils):
"""Compute an MEG forward solution for a set of magnetic dipoles."""
fwd = np.empty((3 * len(rrs), len(coils)))
# The code below is a more efficient version (~30x) of this:
# for ri, rr in enumerate(rrs):
# for k in range(len(coils)):
# this_coil = coils[k]
# # Go through all points
# diff = this_coil['rmag'] - rr
# dist2 = np.sum(diff * diff, axis=1)[:, np.newaxis]
# dist = np.sqrt(dist2)
# if (dist < 1e-5).any():
# raise RuntimeError('Coil too close')
# dist5 = dist2 * dist2 * dist
# sum_ = (3 * diff * np.sum(diff * this_coil['cosmag'],
# axis=1)[:, np.newaxis] -
# dist2 * this_coil['cosmag']) / dist5
# fwd[3*ri:3*ri+3, k] = 1e-7 * np.dot(this_coil['w'], sum_)
fwd = np.empty((3 * len(rrs), len(coils)))
rmags, cosmags, ws, n_int = _concatenate_coils(coils)
bins = np.repeat(np.arange(len(n_int)), n_int)
for ri, rr in enumerate(rrs):
diff = rmags - rr
dist2 = np.sum(diff * diff, axis=1)[:, np.newaxis]
dist = np.sqrt(dist2)
if (dist < 1e-5).any():
raise RuntimeError('Coil too close (dist = %g m)' % dist.min())
sum_ = ws[:, np.newaxis] * (3 * diff * np.sum(diff * cosmags,
axis=1)[:, np.newaxis] -
dist2 * cosmags) / (dist2 * dist2 * dist)
for ii in range(3):
fwd[3 * ri + ii] = np.bincount(bins, weights=sum_[:, ii],
minlength=len(n_int))
fwd *= 1e-7
return fwd
# #############################################################################
# MAIN TRIAGING FUNCTION
@verbose
def _prep_field_computation(rr, bem, fwd_data, n_jobs, verbose=None):
"""Precompute and store some things that are used for both MEG and EEG.
Calculation includes multiplication factors, coordinate transforms,
compensations, and forward solutions. All are stored in modified fwd_data.
Parameters
----------
rr : ndarray, shape (n_dipoles, 3)
3D dipole source positions in head coordinates
bem : dict
Boundary Element Model information
fwd_data : dict
Dict containing sensor information. Gets updated here with BEM and
sensor information for later foward calculations
n_jobs : int
Number of jobs to run in parallel
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose)
"""
bem_rr = mults = mri_Q = head_mri_t = None
if not bem['is_sphere']:
if bem['bem_method'] != FIFF.FWD_BEM_LINEAR_COLL:
raise RuntimeError('only linear collocation supported')
# Store (and apply soon) μ_0/(4π) factor before source computations
mults = np.repeat(bem['source_mult'] / (4.0 * np.pi),
[len(s['rr']) for s in bem['surfs']])[np.newaxis, :]
# Get positions of BEM points for every surface
bem_rr = np.concatenate([s['rr'] for s in bem['surfs']])
# The dipole location and orientation must be transformed
head_mri_t = bem['head_mri_t']
mri_Q = apply_trans(bem['head_mri_t']['trans'], np.eye(3), False)
# Compute solution and compensation for dif sensor types ('meg', 'eeg')
if len(set(fwd_data['coil_types'])) != len(fwd_data['coil_types']):
raise RuntimeError('Non-unique sensor types found')
compensators, solutions, csolutions = [], [], []
for coil_type, coils, ccoils, info in zip(fwd_data['coil_types'],
fwd_data['coils_list'],
fwd_data['ccoils_list'],
fwd_data['infos']):
compensator = solution = csolution = None
if len(coils) > 0: # Only proceed if sensors exist
if coil_type == 'meg':
# Compose a compensation data set if necessary
compensator = _make_ctf_comp_coils(info, coils)
if not bem['is_sphere']:
if coil_type == 'meg':
# MEG field computation matrices for BEM
start = 'Composing the field computation matrix'
logger.info('\n' + start + '...')
cf = FIFF.FIFFV_COORD_HEAD
# multiply solution by "mults" here for simplicity
solution = _bem_specify_coils(bem, coils, cf, mults,
n_jobs)
if compensator is not None:
logger.info(start + ' (compensation coils)...')
csolution = _bem_specify_coils(bem, ccoils, cf,
mults, n_jobs)
else:
# Compute solution for EEG sensor
solution = _bem_specify_els(bem, coils, mults)
else:
solution = bem
if coil_type == 'eeg':
logger.info('Using the equivalent source approach in the '
'homogeneous sphere for EEG')
compensators.append(compensator)
solutions.append(solution)
csolutions.append(csolution)
# Get appropriate forward physics function depending on sphere or BEM model
fun = _sphere_pot_or_field if bem['is_sphere'] else _bem_pot_or_field
# Update fwd_data with
# bem_rr (3D BEM vertex positions)
# mri_Q (3x3 Head->MRI coord transformation applied to identity matrix)
# head_mri_t (head->MRI coord transform dict)
# fun (_bem_pot_or_field if not 'sphere'; otherwise _sph_pot_or_field)
# solutions (len 2 list; [ndarray, shape (n_MEG_sens, n BEM vertices),
# ndarray, shape (n_EEG_sens, n BEM vertices)]
# csolutions (compensation for solution)
fwd_data.update(dict(bem_rr=bem_rr, mri_Q=mri_Q, head_mri_t=head_mri_t,
compensators=compensators, solutions=solutions,
csolutions=csolutions, fun=fun))
@verbose
def _compute_forwards_meeg(rr, fd, n_jobs, verbose=None):
"""Compute MEG and EEG forward solutions for all sensor types.
Parameters
----------
rr : ndarray, shape (n_dipoles, 3)
3D dipole positions in head coordinates
fd : dict
Dict containing forward data after update in _prep_field_computation
n_jobs : int
Number of jobs to run in parallel
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose)
Returns
-------
Bs : list
Each element contains ndarray, shape (3 * n_dipoles, n_sensors) where
n_sensors depends on which channel types are requested (MEG and/or EEG)
"""
n_jobs = max(min(n_jobs, len(rr)), 1)
Bs = list()
# The dipole location and orientation must be transformed to mri coords
mri_rr = None
if fd['head_mri_t'] is not None:
mri_rr = apply_trans(fd['head_mri_t']['trans'], rr)
mri_Q, bem_rr, fun = fd['mri_Q'], fd['bem_rr'], fd['fun']
for ci in range(len(fd['coils_list'])):
coils, ccoils = fd['coils_list'][ci], fd['ccoils_list'][ci]
if len(coils) == 0: # nothing to do
Bs.append(np.zeros((3 * len(rr), 0)))
continue
coil_type, compensator = fd['coil_types'][ci], fd['compensators'][ci]
solution, csolution = fd['solutions'][ci], fd['csolutions'][ci]
info = fd['infos'][ci]
# Do the actual forward calculation for a list MEG/EEG sensors
logger.info('Computing %s at %d source location%s '
'(free orientations)...'
% (coil_type.upper(), len(rr),
'' if len(rr) == 1 else 's'))
# Calculate foward solution using spherical or BEM model
B = fun(rr, mri_rr, mri_Q, coils, solution, bem_rr, n_jobs,
coil_type)
# Compensate if needed (only done for MEG systems w/compensation)
if compensator is not None:
# Compute the field in the compensation sensors
work = fun(rr, mri_rr, mri_Q, ccoils, csolution, bem_rr,
n_jobs, coil_type)
# Combine solutions so we can do the compensation
both = np.zeros((work.shape[0], B.shape[1] + work.shape[1]))
picks = pick_types(info, meg=True, ref_meg=False)
both[:, picks] = B
picks = pick_types(info, meg=False, ref_meg=True)
both[:, picks] = work
B = np.dot(both, compensator.T)
Bs.append(B)
return Bs
@verbose
def _compute_forwards(rr, bem, coils_list, ccoils_list, infos, coil_types,
n_jobs, verbose=None):
"""Compute the MEG and EEG forward solutions.
This effectively combines compute_forward_meg and compute_forward_eeg
from MNE-C.
Parameters
----------
rr : ndarray, shape (n_sources, 3)
3D dipole in head coordinates
bem : dict
Boundary Element Model information for all surfaces
coils_list : list
List of MEG and/or EEG sensor information dicts
ccoils_list : list
Optional list of MEG compensation information
coil_types : list of str
Sensor types. May contain 'meg' and/or 'eeg'
n_jobs: int
Number of jobs to run in parallel
infos : list, len(2)
infos[0] is MEG info, infos[1] is EEG info
Returns
-------
Bs : list of ndarray
Each element contains ndarray, shape (3 * n_dipoles, n_sensors) where
n_sensors depends on which channel types are requested (MEG and/or EEG)
"""
# Split calculation into two steps to save (potentially) a lot of time
# when e.g. dipole fitting
fwd_data = dict(coils_list=coils_list, ccoils_list=ccoils_list,
infos=infos, coil_types=coil_types)
_prep_field_computation(rr, bem, fwd_data, n_jobs)
Bs = _compute_forwards_meeg(rr, fwd_data, n_jobs)
return Bs
| bsd-3-clause |
agiliopadua/lammps | tools/i-pi/ipi/inputs/simulation.py | 41 | 8482 | """Deals with creating the simulation class.
Copyright (C) 2013, Joshua More and Michele Ceriotti
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http.//www.gnu.org/licenses/>.
Classes:
InputSimulation: Deals with creating the Simulation object from a file, and
writing the checkpoints.
"""
__all__ = ['InputSimulation']
import numpy as np
import os.path, sys
import ipi.engine.simulation
from ipi.utils.depend import *
from ipi.utils.inputvalue import *
from ipi.utils.units import *
from ipi.utils.prng import *
from ipi.utils.io import *
from ipi.utils.io.io_xml import *
from ipi.utils.messages import verbosity
from ipi.inputs.forces import InputForces
from ipi.inputs.prng import InputRandom
from ipi.inputs.initializer import InputInitializer
from ipi.inputs.beads import InputBeads
from ipi.inputs.cell import InputCell
from ipi.inputs.ensembles import InputEnsemble
from ipi.inputs.outputs import InputOutputs
from ipi.inputs.normalmodes import InputNormalModes
from ipi.engine.normalmodes import NormalModes
from ipi.engine.atoms import Atoms
from ipi.engine.beads import Beads
from ipi.engine.cell import Cell
from ipi.engine.initializer import Initializer
class InputSimulation(Input):
"""Simulation input class.
Handles generating the appropriate forcefield class from the xml input file,
and generating the xml checkpoint tags and data from an instance of the
object.
Attributes:
verbosity: A string saying how much should be output to standard output.
Fields:
force: A restart force instance. Used as a model for all the replicas.
ensemble: A restart ensemble instance.
beads: A restart beads instance.
normal_modes: Setup of normal mode integrator.
cell: A restart cell instance.
output: A list of the required outputs.
prng: A random number generator object.
step: An integer giving the current simulation step. Defaults to 0.
total_steps: The total number of steps. Defaults to 1000
total_time: The wall clock time limit. Defaults to 0 (no limit).
initialize: An array of strings giving all the quantities that should
be output.
"""
fields = { "forces" : (InputForces, { "help" : InputForces.default_help }),
"ensemble": (InputEnsemble, { "help" : InputEnsemble.default_help } ),
"prng" : (InputRandom, { "help" : InputRandom.default_help,
"default" : input_default(factory=Random)} ),
"initialize" : (InputInitializer, { "help" : InputInitializer.default_help,
"default" : input_default(factory=Initializer) } ),
"beads" : (InputBeads, { "help" : InputBeads.default_help,
"default" : input_default(factory=Beads, kwargs={'natoms': 0, 'nbeads': 0}) } ),
"normal_modes" : (InputNormalModes, { "help" : InputNormalModes.default_help,
"default" : input_default(factory=NormalModes, kwargs={'mode': "rpmd"}) } ),
"cell" : (InputCell, { "help" : InputCell.default_help,
"default" : input_default(factory=Cell) }),
"output" : (InputOutputs, { "help" : InputOutputs.default_help,
"default": input_default(factory=InputOutputs.make_default) }),
"step" : ( InputValue, { "dtype" : int,
"default" : 0,
"help" : "The current simulation time step." }),
"total_steps": ( InputValue, { "dtype" : int,
"default" : 1000,
"help" : "The total number of steps that will be done. If 'step' is equal to or greater than 'total_steps', then the simulation will finish." }),
"total_time" : ( InputValue, { "dtype" : float,
"default" : 0,
"help" : "The maximum wall clock time (in seconds)." }),
}
attribs = { "verbosity" : (InputAttribute, { "dtype" : str,
"default" : "low",
"options" : [ "quiet", "low", "medium", "high", "debug" ],
"help" : "The level of output on stdout."
})
}
default_help = "This is the top level class that deals with the running of the simulation, including holding the simulation specific properties such as the time step and outputting the data."
default_label = "SIMULATION"
def store(self, simul):
"""Takes a simulation instance and stores a minimal representation of it.
Args:
simul: A simulation object.
"""
super(InputSimulation,self).store()
self.forces.store(simul.flist)
self.ensemble.store(simul.ensemble)
self.beads.store(simul.beads)
self.normal_modes.store(simul.nm)
self.cell.store(simul.cell)
self.prng.store(simul.prng)
self.step.store(simul.step)
self.total_steps.store(simul.tsteps)
self.total_time.store(simul.ttime)
self.output.store(simul.outputs)
# this we pick from the messages class. kind of a "global" but it seems to
# be the best way to pass around the (global) information on the level of output.
if verbosity.debug:
self.verbosity.store("debug")
elif verbosity.high:
self.verbosity.store("high")
elif verbosity.medium:
self.verbosity.store("medium")
elif verbosity.low:
self.verbosity.store("low")
elif verbosity.quiet:
self.verbosity.store("quiet")
else:
raise ValueError("Invalid verbosity level")
def fetch(self):
"""Creates a simulation object.
Returns:
A simulation object of the appropriate type and with the appropriate
properties and other objects given the attributes of the
InputSimulation object.
Raises:
TypeError: Raised if one of the file types in the stride keyword
is incorrect.
"""
super(InputSimulation,self).fetch()
# small hack: initialize here the verbosity level -- we really assume to have
# just one simulation object
verbosity.level=self.verbosity.fetch()
# this creates a simulation object which gathers all the little bits
#TODO use named arguments since this list is a bit too long...
rsim = ipi.engine.simulation.Simulation(self.beads.fetch(), self.cell.fetch(),
self.forces.fetch(), self.ensemble.fetch(), self.prng.fetch(),
self.output.fetch(), self.normal_modes.fetch(),
self.initialize.fetch(), self.step.fetch(),
tsteps=self.total_steps.fetch(),
ttime=self.total_time.fetch())
# this does all of the piping between the components of the simulation
rsim.bind()
return rsim
def check(self):
"""Function that deals with optional arguments.
Deals with the difference between classical and PI dynamics. If there is
no beads argument, the bead positions are generated from the atoms, with
the necklace being fixed at the atom position. Similarly, if no nbeads
argument is specified a classical simulation is done.
Raises:
TypeError: Raised if no beads or atoms attribute is defined.
"""
super(InputSimulation,self).check()
if self.total_steps.fetch() <= self.step.fetch():
raise ValueError("Current step greater than total steps, no dynamics will be done.")
| gpl-2.0 |
fkorotkov/pants | tests/python/pants_test/backend/jvm/tasks/test_classmap_integration.py | 9 | 2536 | # coding=utf-8
# Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pants_test.pants_run_integration_test import PantsRunIntegrationTest
class ClassmapTaskIntegrationTest(PantsRunIntegrationTest):
# A test target with both transitive internal dependency as well as external dependency
TEST_JVM_TARGET = 'testprojects/tests/java/org/pantsbuild/testproject/testjvms:seven'
INTERNAL_MAPPING = ('org.pantsbuild.testproject.testjvms.TestSeven '
'testprojects/tests/java/org/pantsbuild/testproject/testjvms:seven')
INTERNAL_TRANSITIVE_MAPPING = ('org.pantsbuild.testproject.testjvms.TestBase '
'testprojects/tests/java/org/pantsbuild/testproject/testjvms:base')
EXTERNAL_MAPPING = ('org.junit.ClassRule 3rdparty:junit')
UNICODE_TEST_TARGET = 'testprojects/src/java/org/pantsbuild/testproject/unicode/cucumber'
UNICODE_MAPPING = 'cucumber.api.java.zh_cn.假如 3rdparty:cucumber-java'
def test_classmap_none(self):
pants_run = self.do_command('classmap', success=True)
self.assertEqual(len(pants_run.stdout_data.strip().split()), 0)
def test_classmap(self):
pants_run = self.do_command('classmap', self.TEST_JVM_TARGET, success=True)
self.assertIn(self.INTERNAL_MAPPING, pants_run.stdout_data)
self.assertIn(self.INTERNAL_TRANSITIVE_MAPPING, pants_run.stdout_data)
self.assertIn(self.EXTERNAL_MAPPING, pants_run.stdout_data)
def test_classmap_internal_only(self):
pants_run = self.do_command('classmap', '--internal-only', self.TEST_JVM_TARGET, success=True)
self.assertIn(self.INTERNAL_MAPPING, pants_run.stdout_data)
self.assertIn(self.INTERNAL_TRANSITIVE_MAPPING, pants_run.stdout_data)
self.assertNotIn(self.EXTERNAL_MAPPING, pants_run.stdout_data)
def test_classmap_intransitive(self):
pants_run = self.do_command('classmap', '--no-transitive', self.TEST_JVM_TARGET, success=True)
self.assertIn(self.INTERNAL_MAPPING, pants_run.stdout_data)
self.assertNotIn(self.INTERNAL_TRANSITIVE_MAPPING, pants_run.stdout_data)
self.assertNotIn(self.EXTERNAL_MAPPING, pants_run.stdout_data)
def test_classmap_unicode(self):
pants_run = self.do_command('classmap', self.UNICODE_TEST_TARGET, success=True)
self.assertIn(self.UNICODE_MAPPING, pants_run.stdout_data)
| apache-2.0 |
darkleons/lama | addons/mrp_repair/wizard/__init__.py | 445 | 1096 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import cancel_repair
import make_invoice
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
wileeam/airflow | tests/sensors/test_external_task_sensor.py | 4 | 18108 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from datetime import time, timedelta
import pytest
from airflow import DAG, exceptions, settings
from airflow.exceptions import AirflowException, AirflowSensorTimeout
from airflow.models import DagBag, TaskInstance
from airflow.operators.bash import BashOperator
from airflow.operators.dummy_operator import DummyOperator
from airflow.sensors.external_task_sensor import ExternalTaskMarker, ExternalTaskSensor
from airflow.sensors.time_sensor import TimeSensor
from airflow.utils.state import State
from airflow.utils.timezone import datetime
DEFAULT_DATE = datetime(2015, 1, 1)
TEST_DAG_ID = 'unit_test_dag'
TEST_TASK_ID = 'time_sensor_check'
DEV_NULL = '/dev/null'
class TestExternalTaskSensor(unittest.TestCase):
def setUp(self):
self.dagbag = DagBag(
dag_folder=DEV_NULL,
include_examples=True
)
self.args = {
'owner': 'airflow',
'start_date': DEFAULT_DATE
}
self.dag = DAG(TEST_DAG_ID, default_args=self.args)
def test_time_sensor(self):
op = TimeSensor(
task_id=TEST_TASK_ID,
target_time=time(0),
dag=self.dag
)
op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_external_task_sensor(self):
self.test_time_sensor()
op = ExternalTaskSensor(
task_id='test_external_task_sensor_check',
external_dag_id=TEST_DAG_ID,
external_task_id=TEST_TASK_ID,
dag=self.dag
)
op.run(
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
ignore_ti_state=True
)
def test_external_dag_sensor(self):
other_dag = DAG(
'other_dag',
default_args=self.args,
end_date=DEFAULT_DATE,
schedule_interval='@once')
other_dag.create_dagrun(
run_id='test',
start_date=DEFAULT_DATE,
execution_date=DEFAULT_DATE,
state=State.SUCCESS)
op = ExternalTaskSensor(
task_id='test_external_dag_sensor_check',
external_dag_id='other_dag',
external_task_id=None,
dag=self.dag
)
op.run(
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
ignore_ti_state=True
)
def test_templated_sensor(self):
with self.dag:
sensor = ExternalTaskSensor(
task_id='templated_task',
external_dag_id='dag_{{ ds }}',
external_task_id='task_{{ ds }}'
)
instance = TaskInstance(sensor, DEFAULT_DATE)
instance.render_templates()
self.assertEqual(sensor.external_dag_id,
"dag_{}".format(DEFAULT_DATE.date()))
self.assertEqual(sensor.external_task_id,
"task_{}".format(DEFAULT_DATE.date()))
def test_external_task_sensor_fn_multiple_execution_dates(self):
bash_command_code = """
{% set s=execution_date.time().second %}
echo "second is {{ s }}"
if [[ $(( {{ s }} % 60 )) == 1 ]]
then
exit 1
fi
exit 0
"""
dag_external_id = TEST_DAG_ID + '_external'
dag_external = DAG(
dag_external_id,
default_args=self.args,
schedule_interval=timedelta(seconds=1))
task_external_with_failure = BashOperator(
task_id="task_external_with_failure",
bash_command=bash_command_code,
retries=0,
dag=dag_external)
task_external_without_failure = DummyOperator(
task_id="task_external_without_failure",
retries=0,
dag=dag_external)
task_external_without_failure.run(
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + timedelta(seconds=1),
ignore_ti_state=True)
session = settings.Session()
TI = TaskInstance
try:
task_external_with_failure.run(
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + timedelta(seconds=1),
ignore_ti_state=True)
# The test_with_failure task is excepted to fail
# once per minute (the run on the first second of
# each minute).
except Exception as e: # pylint: disable=broad-except
failed_tis = session.query(TI).filter(
TI.dag_id == dag_external_id,
TI.state == State.FAILED,
TI.execution_date == DEFAULT_DATE + timedelta(seconds=1)).all()
if len(failed_tis) == 1 and \
failed_tis[0].task_id == 'task_external_with_failure':
pass
else:
raise e
dag_id = TEST_DAG_ID
dag = DAG(
dag_id,
default_args=self.args,
schedule_interval=timedelta(minutes=1))
task_without_failure = ExternalTaskSensor(
task_id='task_without_failure',
external_dag_id=dag_external_id,
external_task_id='task_external_without_failure',
execution_date_fn=lambda dt: [dt + timedelta(seconds=i)
for i in range(2)],
allowed_states=['success'],
retries=0,
timeout=1,
poke_interval=1,
dag=dag)
task_with_failure = ExternalTaskSensor(
task_id='task_with_failure',
external_dag_id=dag_external_id,
external_task_id='task_external_with_failure',
execution_date_fn=lambda dt: [dt + timedelta(seconds=i)
for i in range(2)],
allowed_states=['success'],
retries=0,
timeout=1,
poke_interval=1,
dag=dag)
task_without_failure.run(
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
ignore_ti_state=True)
with self.assertRaises(AirflowSensorTimeout):
task_with_failure.run(
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
ignore_ti_state=True)
def test_external_task_sensor_delta(self):
self.test_time_sensor()
op = ExternalTaskSensor(
task_id='test_external_task_sensor_check_delta',
external_dag_id=TEST_DAG_ID,
external_task_id=TEST_TASK_ID,
execution_delta=timedelta(0),
allowed_states=['success'],
dag=self.dag
)
op.run(
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
ignore_ti_state=True
)
def test_external_task_sensor_fn(self):
self.test_time_sensor()
# check that the execution_fn works
op1 = ExternalTaskSensor(
task_id='test_external_task_sensor_check_delta',
external_dag_id=TEST_DAG_ID,
external_task_id=TEST_TASK_ID,
execution_date_fn=lambda dt: dt + timedelta(0),
allowed_states=['success'],
dag=self.dag
)
op1.run(
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
ignore_ti_state=True
)
# double check that the execution is being called by failing the test
op2 = ExternalTaskSensor(
task_id='test_external_task_sensor_check_delta',
external_dag_id=TEST_DAG_ID,
external_task_id=TEST_TASK_ID,
execution_date_fn=lambda dt: dt + timedelta(days=1),
allowed_states=['success'],
timeout=1,
poke_interval=1,
dag=self.dag
)
with self.assertRaises(exceptions.AirflowSensorTimeout):
op2.run(
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
ignore_ti_state=True
)
def test_external_task_sensor_error_delta_and_fn(self):
self.test_time_sensor()
# Test that providing execution_delta and a function raises an error
with self.assertRaises(ValueError):
ExternalTaskSensor(
task_id='test_external_task_sensor_check_delta',
external_dag_id=TEST_DAG_ID,
external_task_id=TEST_TASK_ID,
execution_delta=timedelta(0),
execution_date_fn=lambda dt: dt,
allowed_states=['success'],
dag=self.dag
)
def test_catch_invalid_allowed_states(self):
with self.assertRaises(ValueError):
ExternalTaskSensor(
task_id='test_external_task_sensor_check',
external_dag_id=TEST_DAG_ID,
external_task_id=TEST_TASK_ID,
allowed_states=['invalid_state'],
dag=self.dag
)
with self.assertRaises(ValueError):
ExternalTaskSensor(
task_id='test_external_task_sensor_check',
external_dag_id=TEST_DAG_ID,
external_task_id=None,
allowed_states=['invalid_state'],
dag=self.dag
)
def test_external_task_sensor_waits_for_task_check_existence(self):
op = ExternalTaskSensor(
task_id='test_external_task_sensor_check',
external_dag_id="example_bash_operator",
external_task_id="non-existing-task",
check_existence=True,
dag=self.dag
)
with self.assertRaises(AirflowException):
op.run(
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
ignore_ti_state=True
)
def test_external_task_sensor_waits_for_dag_check_existence(self):
op = ExternalTaskSensor(
task_id='test_external_task_sensor_check',
external_dag_id="non-existing-dag",
external_task_id=None,
check_existence=True,
dag=self.dag
)
with self.assertRaises(AirflowException):
op.run(
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
ignore_ti_state=True
)
@pytest.fixture
def dag_bag_ext():
"""
Create a DagBag with DAGs looking like this. The dotted lines represent external dependencies
set up using ExternalTaskMarker and ExternalTaskSensor.
dag_0: task_a_0 >> task_b_0
|
|
dag_1: ---> task_a_1 >> task_b_1
|
|
dag_2: ---> task_a_2 >> task_b_2
|
|
dag_3: ---> task_a_3 >> task_b_3
"""
dag_bag = DagBag(dag_folder=DEV_NULL, include_examples=False)
dag_0 = DAG("dag_0", start_date=DEFAULT_DATE, schedule_interval=None)
task_a_0 = DummyOperator(task_id="task_a_0", dag=dag_0)
task_b_0 = ExternalTaskMarker(task_id="task_b_0",
external_dag_id="dag_1",
external_task_id="task_a_1",
recursion_depth=3,
dag=dag_0)
task_a_0 >> task_b_0
dag_1 = DAG("dag_1", start_date=DEFAULT_DATE, schedule_interval=None)
task_a_1 = ExternalTaskSensor(task_id="task_a_1",
external_dag_id=dag_0.dag_id,
external_task_id=task_b_0.task_id,
dag=dag_1)
task_b_1 = ExternalTaskMarker(task_id="task_b_1",
external_dag_id="dag_2",
external_task_id="task_a_2",
recursion_depth=2,
dag=dag_1)
task_a_1 >> task_b_1
dag_2 = DAG("dag_2", start_date=DEFAULT_DATE, schedule_interval=None)
task_a_2 = ExternalTaskSensor(task_id="task_a_2",
external_dag_id=dag_1.dag_id,
external_task_id=task_b_1.task_id,
dag=dag_2)
task_b_2 = ExternalTaskMarker(task_id="task_b_2",
external_dag_id="dag_3",
external_task_id="task_a_3",
recursion_depth=1,
dag=dag_2)
task_a_2 >> task_b_2
dag_3 = DAG("dag_3", start_date=DEFAULT_DATE, schedule_interval=None)
task_a_3 = ExternalTaskSensor(task_id="task_a_3",
external_dag_id=dag_2.dag_id,
external_task_id=task_b_2.task_id,
dag=dag_3)
task_b_3 = DummyOperator(task_id="task_b_3", dag=dag_3)
task_a_3 >> task_b_3
for dag in [dag_0, dag_1, dag_2, dag_3]:
dag_bag.bag_dag(dag, None, dag)
return dag_bag
def run_tasks(dag_bag):
"""
Run all tasks in the DAGs in the given dag_bag. Return the TaskInstance objects as a dict
keyed by task_id.
"""
tis = {}
for dag in dag_bag.dags.values():
for task in dag.tasks:
ti = TaskInstance(task=task, execution_date=DEFAULT_DATE)
tis[task.task_id] = ti
ti.run()
assert_ti_state_equal(ti, State.SUCCESS)
return tis
def assert_ti_state_equal(task_instance, state):
"""
Assert state of task_instances equals the given state.
"""
task_instance.refresh_from_db()
assert task_instance.state == state
def clear_tasks(dag_bag, dag, task):
"""
Clear the task and its downstream tasks recursively for the dag in the given dagbag.
"""
subdag = dag.sub_dag(task_regex="^{}$".format(task.task_id), include_downstream=True)
subdag.clear(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, dag_bag=dag_bag)
# pylint: disable=redefined-outer-name
def test_external_task_marker_transitive(dag_bag_ext):
"""
Test clearing tasks across DAGs.
"""
tis = run_tasks(dag_bag_ext)
dag_0 = dag_bag_ext.get_dag("dag_0")
task_a_0 = dag_0.get_task("task_a_0")
clear_tasks(dag_bag_ext, dag_0, task_a_0)
ti_a_0 = tis["task_a_0"]
ti_b_3 = tis["task_b_3"]
assert_ti_state_equal(ti_a_0, State.NONE)
assert_ti_state_equal(ti_b_3, State.NONE)
def test_external_task_marker_exception(dag_bag_ext):
"""
Clearing across multiple DAGs should raise AirflowException if more levels are being cleared
than allowed by the recursion_depth of the first ExternalTaskMarker being cleared.
"""
run_tasks(dag_bag_ext)
dag_0 = dag_bag_ext.get_dag("dag_0")
task_a_0 = dag_0.get_task("task_a_0")
task_b_0 = dag_0.get_task("task_b_0")
task_b_0.recursion_depth = 2
with pytest.raises(AirflowException, match="Maximum recursion depth 2"):
clear_tasks(dag_bag_ext, dag_0, task_a_0)
@pytest.fixture
def dag_bag_cyclic():
"""
Create a DagBag with DAGs having cyclic dependenceis set up by ExternalTaskMarker and
ExternalTaskSensor.
dag_0: task_a_0 >> task_b_0
^ |
| |
dag_1: | ---> task_a_1 >> task_b_1
| |
---------------------------------
"""
dag_bag = DagBag(dag_folder=DEV_NULL, include_examples=False)
dag_0 = DAG("dag_0", start_date=DEFAULT_DATE, schedule_interval=None)
task_a_0 = DummyOperator(task_id="task_a_0", dag=dag_0)
task_b_0 = ExternalTaskMarker(task_id="task_b_0",
external_dag_id="dag_1",
external_task_id="task_a_1",
recursion_depth=3,
dag=dag_0)
task_a_0 >> task_b_0
dag_1 = DAG("dag_1", start_date=DEFAULT_DATE, schedule_interval=None)
task_a_1 = ExternalTaskSensor(task_id="task_a_1",
external_dag_id=dag_0.dag_id,
external_task_id=task_b_0.task_id,
dag=dag_1)
task_b_1 = ExternalTaskMarker(task_id="task_b_1",
external_dag_id="dag_0",
external_task_id="task_a_0",
recursion_depth=2,
dag=dag_1)
task_a_1 >> task_b_1
for dag in [dag_0, dag_1]:
dag_bag.bag_dag(dag, None, dag)
return dag_bag
def test_external_task_marker_cyclic(dag_bag_cyclic):
"""
Tests clearing across multiple DAGs that have cyclic dependencies. AirflowException should be
raised.
"""
run_tasks(dag_bag_cyclic)
dag_0 = dag_bag_cyclic.get_dag("dag_0")
task_a_0 = dag_0.get_task("task_a_0")
with pytest.raises(AirflowException, match="Maximum recursion depth 3"):
clear_tasks(dag_bag_cyclic, dag_0, task_a_0)
| apache-2.0 |
Mic92/ansible | contrib/inventory/nagios_ndo.py | 213 | 3842 | #!/usr/bin/env python
# (c) 2014, Jonathan Lestrelin <jonathan.lestrelin@gmail.com>
#
# This file is part of Ansible,
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
"""
Nagios NDO external inventory script.
========================================
Returns hosts and hostgroups from Nagios NDO.
Configuration is read from `nagios_ndo.ini`.
"""
import os
import argparse
try:
import configparser
except ImportError:
import ConfigParser
configparser = ConfigParser
import json
try:
from sqlalchemy import text
from sqlalchemy.engine import create_engine
except ImportError:
print("Error: SQLAlchemy is needed. Try something like: pip install sqlalchemy")
exit(1)
class NagiosNDOInventory(object):
def read_settings(self):
config = configparser.SafeConfigParser()
config.read(os.path.dirname(os.path.realpath(__file__)) + '/nagios_ndo.ini')
if config.has_option('ndo', 'database_uri'):
self.ndo_database_uri = config.get('ndo', 'database_uri')
def read_cli(self):
parser = argparse.ArgumentParser()
parser.add_argument('--host', nargs=1)
parser.add_argument('--list', action='store_true')
self.options = parser.parse_args()
def get_hosts(self):
engine = create_engine(self.ndo_database_uri)
connection = engine.connect()
select_hosts = text("SELECT display_name \
FROM nagios_hosts")
select_hostgroups = text("SELECT alias \
FROM nagios_hostgroups")
select_hostgroup_hosts = text("SELECT h.display_name \
FROM nagios_hostgroup_members hgm, nagios_hosts h, nagios_hostgroups hg \
WHERE hgm.hostgroup_id = hg.hostgroup_id \
AND hgm.host_object_id = h.host_object_id \
AND hg.alias =:hostgroup_alias")
hosts = connection.execute(select_hosts)
self.result['all']['hosts'] = [host['display_name'] for host in hosts]
for hostgroup in connection.execute(select_hostgroups):
hostgroup_alias = hostgroup['alias']
self.result[hostgroup_alias] = {}
hosts = connection.execute(select_hostgroup_hosts, hostgroup_alias=hostgroup_alias)
self.result[hostgroup_alias]['hosts'] = [host['display_name'] for host in hosts]
def __init__(self):
self.defaultgroup = 'group_all'
self.ndo_database_uri = None
self.options = None
self.read_settings()
self.read_cli()
self.result = {}
self.result['all'] = {}
self.result['all']['hosts'] = []
self.result['_meta'] = {}
self.result['_meta']['hostvars'] = {}
if self.ndo_database_uri:
self.get_hosts()
if self.options.host:
print(json.dumps({}))
elif self.options.list:
print(json.dumps(self.result))
else:
print("usage: --list or --host HOSTNAME")
exit(1)
else:
print("Error: Database configuration is missing. See nagios_ndo.ini.")
exit(1)
NagiosNDOInventory()
| gpl-3.0 |
bandit145/ans-between | src/dictops.py | 1 | 1886 | #TODO: allow missing params and args lists to pass tests
from src import logging
class dict_mgm:
#creates ansible command to run
def make_play(data,db_data,location):
if dict_mgm.data_check(data, db_data) == 'OK':
command = 'ansible-playbook {location}'.format(location=location)
#did and incredi bad if else thing
logging.debug(data.keys())
command+=data['name']+' '
if 'params' in data.keys():
command+= dict_mgm.sort_params(data['params'])
if 'args' in data.keys():
command+= dict_mgm.sort_args(data['args'])
if 'password' in data.keys():
password = data['password']
else:
password = None
logging.debug(command)
logging.debug(password)
return command, password
else:
return 'Error', None
#check integrity of submitted data compared to its schema model
def data_check(data,db_data):
logging.debug(data)
logging.debug(db_data)
if len(data) != len(db_data):
logging.debug('triggered 1')
return 'Error'
if data.keys() != db_data.keys():
logging.debug('triggered 2')
return 'Error'
if len(data.values()) != len(db_data.values()):
logging.debug('triggered 3')
return 'Error'
#for playbooks that have no params/args
try:
if len(data['params']) != len(db_data['params']):
logging.debug('triggered 4')
return 'Error'
except KeyError:
pass
try:
if len(data['args']) != len(db_data['args']):
logging.debug('triggered 5')
return 'Error'
except KeyError:
pass
logging.debug('OK')
return 'OK'
def sort_params(params):#deals with param dics
command = ''
for item in params:
keys= list(item.keys())
values= list(item.values())
logging.debug(keys)
logging.debug(values)
command+=keys[0]+' '+values[0]+' '
return command
def sort_args(args): #deals with args list
command = ''
for arg in args:
command+= arg+' '
return command | mit |
eufarn7sp/egads | egads/thirdparty/nappy/nc_interface/na_to_nc.py | 2 | 3191 | # Copyright (C) 2004 CCLRC & NERC( Natural Environment Research Council ).
# This software may be distributed under the terms of the
# Q Public License, version 1.0 or later. http://ndg.nerc.ac.uk/public_docs/QPublic_license.txt
"""
na_to_nc.py
===========
Contains the NAToNC class for converting a NASA Ames file to a NetCDF file.
"""
# Imports from python standard library
import logging
# Imports from external packages
try:
import cdms2 as cdms
except:
try:
import cdms
except:
raise Exception("Could not import third-party software. Nappy requires the CDMS and Numeric packages to be installed to convert to CDMS and NetCDF.")
# Import from nappy package
import nappy.nc_interface.na_to_cdms
from nappy.na_error import na_error
logging.basicConfig()
log = logging.getLogger(__name__)
class NAToNC(nappy.nc_interface.na_to_cdms.NADictToCdmsObjects):
"""
Converts a NASA Ames file to a NetCDF file.
"""
def __init__(self, na_file, variables=None, aux_variables=None,
global_attributes=[("Conventions","CF-1.0")],
time_units=None, time_warning=True,
rename_variables={}):
"""
Sets up instance variables. Note that the argument 'na_file' has a relaxes definition
and can be either a NASA Ames file object or the name of a NASA AMES file.
Typical usage is:
>>> import nappy.nc_interface.na_to_nc as na_to_nc
>>> c = na_to_nc.NAToNC("old_file.na")
>>> c.convert()
>>> c.writeNCFile("new_file.nc")
"""
# First open na_file if it is a file rather than an na_file object
na_file_obj = na_file
print na_file_obj, type(na_file_obj)
if type(na_file_obj) == type("string"):
na_file_obj = nappy.openNAFile(na_file_obj)
nappy.nc_interface.na_to_cdms.NADictToCdmsObjects.__init__(self, na_file_obj, variables=variables,
aux_variables=aux_variables,
global_attributes=global_attributes,
time_units=time_units, time_warning=time_warning,
rename_variables=rename_variables)
def writeNCFile(self, file_name, mode="w"):
"""
Writes the NASA Ames content that has been converted into CDMS objects to a
NetCDF file of name 'file_name'. Note that mode can be set to append so you
can add the data to an existing file.
"""
if not self.converted:
self.convert()
# Create CDMS output file object
fout = cdms.open(file_name, mode=mode)
# Write main variables
for var in self.cdms_variables:
fout.write(var)
# Write aux variables
for avar in self.cdms_aux_variables:
fout.write(avar)
# Write global attributes
for (att, value) in self.global_attributes:
setattr(fout, att, value)
fout.close()
log.info("NetCDF file '%s' written successfully." % file_name)
return True
| gpl-3.0 |
xwolf12/django | django/conf/locale/hr/formats.py | 504 | 2106 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j. E Y.'
TIME_FORMAT = 'H:i'
DATETIME_FORMAT = 'j. E Y. H:i'
YEAR_MONTH_FORMAT = 'F Y.'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'j.m.Y.'
SHORT_DATETIME_FORMAT = 'j.m.Y. H:i'
FIRST_DAY_OF_WEEK = 1
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
# Kept ISO formats as they are in first position
DATE_INPUT_FORMATS = [
'%Y-%m-%d', # '2006-10-25'
'%d.%m.%Y.', '%d.%m.%y.', # '25.10.2006.', '25.10.06.'
'%d. %m. %Y.', '%d. %m. %y.', # '25. 10. 2006.', '25. 10. 06.'
]
DATETIME_INPUT_FORMATS = [
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%d.%m.%Y. %H:%M:%S', # '25.10.2006. 14:30:59'
'%d.%m.%Y. %H:%M:%S.%f', # '25.10.2006. 14:30:59.000200'
'%d.%m.%Y. %H:%M', # '25.10.2006. 14:30'
'%d.%m.%Y.', # '25.10.2006.'
'%d.%m.%y. %H:%M:%S', # '25.10.06. 14:30:59'
'%d.%m.%y. %H:%M:%S.%f', # '25.10.06. 14:30:59.000200'
'%d.%m.%y. %H:%M', # '25.10.06. 14:30'
'%d.%m.%y.', # '25.10.06.'
'%d. %m. %Y. %H:%M:%S', # '25. 10. 2006. 14:30:59'
'%d. %m. %Y. %H:%M:%S.%f', # '25. 10. 2006. 14:30:59.000200'
'%d. %m. %Y. %H:%M', # '25. 10. 2006. 14:30'
'%d. %m. %Y.', # '25. 10. 2006.'
'%d. %m. %y. %H:%M:%S', # '25. 10. 06. 14:30:59'
'%d. %m. %y. %H:%M:%S.%f', # '25. 10. 06. 14:30:59.000200'
'%d. %m. %y. %H:%M', # '25. 10. 06. 14:30'
'%d. %m. %y.', # '25. 10. 06.'
]
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
| bsd-3-clause |
mne-tools/mne-tools.github.io | 0.21/_downloads/ae7d4d6bcae82f99a78c3f8a0c94f7b0/plot_mne_inverse_envelope_correlation.py | 3 | 4522 | """
.. _ex-envelope-correlation:
=============================================
Compute envelope correlations in source space
=============================================
Compute envelope correlations of orthogonalized activity [1]_ [2]_ in source
space using resting state CTF data.
"""
# Authors: Eric Larson <larson.eric.d@gmail.com>
# Sheraz Khan <sheraz@khansheraz.com>
# Denis Engemann <denis.engemann@gmail.com>
#
# License: BSD (3-clause)
import os.path as op
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.connectivity import envelope_correlation
from mne.minimum_norm import make_inverse_operator, apply_inverse_epochs
from mne.preprocessing import compute_proj_ecg, compute_proj_eog
data_path = mne.datasets.brainstorm.bst_resting.data_path()
subjects_dir = op.join(data_path, 'subjects')
subject = 'bst_resting'
trans = op.join(data_path, 'MEG', 'bst_resting', 'bst_resting-trans.fif')
src = op.join(subjects_dir, subject, 'bem', subject + '-oct-6-src.fif')
bem = op.join(subjects_dir, subject, 'bem', subject + '-5120-bem-sol.fif')
raw_fname = op.join(data_path, 'MEG', 'bst_resting',
'subj002_spontaneous_20111102_01_AUX.ds')
##############################################################################
# Here we do some things in the name of speed, such as crop (which will
# hurt SNR) and downsample. Then we compute SSP projectors and apply them.
raw = mne.io.read_raw_ctf(raw_fname, verbose='error')
raw.crop(0, 60).pick_types(meg=True, eeg=False).load_data().resample(80)
raw.apply_gradient_compensation(3)
projs_ecg, _ = compute_proj_ecg(raw, n_grad=1, n_mag=2)
projs_eog, _ = compute_proj_eog(raw, n_grad=1, n_mag=2, ch_name='MLT31-4407')
raw.info['projs'] += projs_ecg
raw.info['projs'] += projs_eog
raw.apply_proj()
cov = mne.compute_raw_covariance(raw) # compute before band-pass of interest
##############################################################################
# Now we band-pass filter our data and create epochs.
raw.filter(14, 30)
events = mne.make_fixed_length_events(raw, duration=5.)
epochs = mne.Epochs(raw, events=events, tmin=0, tmax=5.,
baseline=None, reject=dict(mag=8e-13), preload=True)
del raw
##############################################################################
# Compute the forward and inverse
# -------------------------------
src = mne.read_source_spaces(src)
fwd = mne.make_forward_solution(epochs.info, trans, src, bem)
inv = make_inverse_operator(epochs.info, fwd, cov)
del fwd, src
##############################################################################
# Compute label time series and do envelope correlation
# -----------------------------------------------------
labels = mne.read_labels_from_annot(subject, 'aparc_sub',
subjects_dir=subjects_dir)
epochs.apply_hilbert() # faster to apply in sensor space
stcs = apply_inverse_epochs(epochs, inv, lambda2=1. / 9., pick_ori='normal',
return_generator=True)
label_ts = mne.extract_label_time_course(
stcs, labels, inv['src'], return_generator=True)
corr = envelope_correlation(label_ts, verbose=True)
# let's plot this matrix
fig, ax = plt.subplots(figsize=(4, 4))
ax.imshow(corr, cmap='viridis', clim=np.percentile(corr, [5, 95]))
fig.tight_layout()
##############################################################################
# Compute the degree and plot it
# ------------------------------
# sphinx_gallery_thumbnail_number = 2
threshold_prop = 0.15 # percentage of strongest edges to keep in the graph
degree = mne.connectivity.degree(corr, threshold_prop=threshold_prop)
stc = mne.labels_to_stc(labels, degree)
stc = stc.in_label(mne.Label(inv['src'][0]['vertno'], hemi='lh') +
mne.Label(inv['src'][1]['vertno'], hemi='rh'))
brain = stc.plot(
clim=dict(kind='percent', lims=[75, 85, 95]), colormap='gnuplot',
subjects_dir=subjects_dir, views='dorsal', hemi='both',
smoothing_steps=25, time_label='Beta band')
##############################################################################
# References
# ----------
# .. [1] Hipp JF, Hawellek DJ, Corbetta M, Siegel M, Engel AK (2012)
# Large-scale cortical correlation structure of spontaneous
# oscillatory activity. Nature Neuroscience 15:884–890
# .. [2] Khan S et al. (2018). Maturation trajectories of cortical
# resting-state networks depend on the mediating frequency band.
# Neuroimage 174:57–68
| bsd-3-clause |
broferek/ansible | lib/ansible/plugins/action/net_system.py | 648 | 1057 | # (c) 2017, Ansible Inc,
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.plugins.action.net_base import ActionModule as _ActionModule
class ActionModule(_ActionModule):
def run(self, tmp=None, task_vars=None):
result = super(ActionModule, self).run(tmp, task_vars)
del tmp # tmp no longer has any effect
return result
| gpl-3.0 |
titom1986/CouchPotatoServer | couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/kickstarter.py | 9 | 2230 | # encoding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
class KickStarterIE(InfoExtractor):
_VALID_URL = r'https?://www\.kickstarter\.com/projects/(?P<id>[^/]*)/.*'
_TESTS = [{
'url': 'https://www.kickstarter.com/projects/1404461844/intersection-the-story-of-josh-grant?ref=home_location',
'md5': 'c81addca81327ffa66c642b5d8b08cab',
'info_dict': {
'id': '1404461844',
'ext': 'mp4',
'title': 'Intersection: The Story of Josh Grant by Kyle Cowling',
'description': 'A unique motocross documentary that examines the '
'life and mind of one of sports most elite athletes: Josh Grant.',
},
}, {
'note': 'Embedded video (not using the native kickstarter video service)',
'url': 'https://www.kickstarter.com/projects/597507018/pebble-e-paper-watch-for-iphone-and-android/posts/659178',
'playlist': [
{
'info_dict': {
'id': '78704821',
'ext': 'mp4',
'uploader_id': 'pebble',
'uploader': 'Pebble Technology',
'title': 'Pebble iOS Notifications',
}
}
],
}]
def _real_extract(self, url):
m = re.match(self._VALID_URL, url)
video_id = m.group('id')
webpage = self._download_webpage(url, video_id)
title = self._html_search_regex(
r'<title>\s*(.*?)(?:\s*— Kickstarter)?\s*</title>',
webpage, 'title')
video_url = self._search_regex(
r'data-video-url="(.*?)"',
webpage, 'video URL', default=None)
if video_url is None: # No native kickstarter, look for embedded videos
return {
'_type': 'url_transparent',
'ie_key': 'Generic',
'url': url,
'title': title,
}
return {
'id': video_id,
'url': video_url,
'title': title,
'description': self._og_search_description(webpage),
'thumbnail': self._og_search_thumbnail(webpage),
}
| gpl-3.0 |
indx/indx-core | lib/indx/webserver/handlers/app.py | 2 | 5788 | # Copyright (C) 2011-2013 University of Southampton
# Copyright (C) 2011-2013 Daniel Alexander Smith
# Copyright (C) 2011-2013 Max Van Kleek
# Copyright (C) 2011-2013 Nigel R. Shadbolt
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License, version 3,
# as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging, os, json
from functools import partial
from operator import is_not
from twisted.web.resource import Resource
from twisted.web.static import File
from twisted.web.resource import NoResource
import apps
from indx.webserver.handlers.service import ServiceHandler
# map apps/modulename/api/x -> handler
# map apps/modulename/x -> static
class NoHTMLHandler(Resource):
def render(self, request):
return "Nothing to see here."
class AppsMetaHandler(Resource):
def __init__(self,webserver):
Resource.__init__(self)
self.indx_reactor = webserver.indx_reactor
self.isLeaf = False
self.index = File('html/index.html')
self._register_apps(webserver)
# def getChild(self, path, request):
# # i don't think this actually happens.
# logging.debug('get child ' + path ) # type(path) + " " + repr(request))
# return self.apps.get(path) or NoResource()
def getNewModuleNames(self):
## returns any subdirs of /apps that have a manifest.json
return [d for d in os.listdir('apps') if os.path.exists(os.path.join('apps', d,'manifest.json'))]
def _register_apps(self, server):
## legacy apps (that have __init__.py)
for appname, vals in apps.MODULES.iteritems():
module,html = vals['module'],vals['html']
logging.debug("Legacy App Registering {0} --- module: {1}; html: {2}".format(appname, module, html))
if not html:
file_handler = NoHTMLHandler()
else:
logging.debug('HTML handler {0}'.format(html))
file_handler = File(html)
if getattr(module, 'APP', None):
app = module.APP(server)
logging.debug('registering api child {0}'.format(repr(app)))
file_handler.putChild('api', app)
else:
logging.debug('static content only {0}'.format(html))
pass
# file_handler.putChild(appname,File(html))
logging.debug("legacy putchild {0}, handler: {1} ".format(appname,file_handler))
self.putChild(appname,file_handler) ## this puts things at the base -- rather than putting the app handler
## end of support for legacy apps
## now for new apps!
legacy_apps = apps.MODULES.keys()
new_apps = set(self.getNewModuleNames()) - set(legacy_apps)
basedir = apps.BASEDIR
for appbase in new_apps:
logging.debug("Instantiating handler for New Style App : {0}".format(appbase))
# first add html directory
if os.path.exists(os.path.join(basedir, appbase, 'html')):
# logging.debug("Adding html static dir {0}".format(os.path.join(basedir, appbase, 'html')))
file_handler = File(os.path.join(basedir, appbase, 'html'))
else:
file_handler = NoHTMLHandler()
# # try to see if it's a service
handler = ServiceHandler(server, appbase)
if handler.is_service() :
# logging.debug(" This is a service, so registering an api child >>>>>>>>>>>>>>>>>>>>>>>> ");
## putting child under api
file_handler.putChild('api', handler)
# because register=False in serviceHandler.py
for mapping in handler.get_mappings():
self.indx_reactor.add_mapping(mapping)
if handler.on_boot() :
handler.start()
else:
logging.debug("{0} Not a service, so not registering an app".format(appbase))
pass
file_handler.putChild('.manifest', File(os.path.join(basedir, appbase, 'manifest.json')))
self.putChild(appbase,file_handler) ## this puts things at the base -- rather than putting the app handler
pass
def get_manifest(self, modulename):
try:
manifest_path = os.path.join(apps.BASEDIR, modulename, 'manifest.json')
manifest_data = open(manifest_path,'r')
manifest = json.load(manifest_data)
manifest_data.close()
return manifest
except Exception as e:
logging.error("Unable to load module {0}, skipping.".format(modulename))
return None
def get_modules(self):
## apps and services are modules
all = set(apps.MODULES.keys()).union(self.getNewModuleNames())
def kv(k):
kv = self.get_manifest(k)
if kv is None:
return None
kv["@id"] = k
return kv
return filter(partial(is_not, None), [kv(k) for k in all])
def options(self, request):
self.return_ok(request)
def render(self, request):
## nothing necessary here
logging.debug('render request for apps ')
return self.render(request)
| agpl-3.0 |
nbetcher/latte-2.6.35-crc | scripts/rt-tester/rt-tester.py | 1094 | 5362 | #!/usr/bin/python
#
# rt-mutex tester
#
# (C) 2006 Thomas Gleixner <tglx@linutronix.de>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
import os
import sys
import getopt
import shutil
import string
# Globals
quiet = 0
test = 0
comments = 0
sysfsprefix = "/sys/devices/system/rttest/rttest"
statusfile = "/status"
commandfile = "/command"
# Command opcodes
cmd_opcodes = {
"schedother" : "1",
"schedfifo" : "2",
"lock" : "3",
"locknowait" : "4",
"lockint" : "5",
"lockintnowait" : "6",
"lockcont" : "7",
"unlock" : "8",
"lockbkl" : "9",
"unlockbkl" : "10",
"signal" : "11",
"resetevent" : "98",
"reset" : "99",
}
test_opcodes = {
"prioeq" : ["P" , "eq" , None],
"priolt" : ["P" , "lt" , None],
"priogt" : ["P" , "gt" , None],
"nprioeq" : ["N" , "eq" , None],
"npriolt" : ["N" , "lt" , None],
"npriogt" : ["N" , "gt" , None],
"unlocked" : ["M" , "eq" , 0],
"trylock" : ["M" , "eq" , 1],
"blocked" : ["M" , "eq" , 2],
"blockedwake" : ["M" , "eq" , 3],
"locked" : ["M" , "eq" , 4],
"opcodeeq" : ["O" , "eq" , None],
"opcodelt" : ["O" , "lt" , None],
"opcodegt" : ["O" , "gt" , None],
"eventeq" : ["E" , "eq" , None],
"eventlt" : ["E" , "lt" , None],
"eventgt" : ["E" , "gt" , None],
}
# Print usage information
def usage():
print "rt-tester.py <-c -h -q -t> <testfile>"
print " -c display comments after first command"
print " -h help"
print " -q quiet mode"
print " -t test mode (syntax check)"
print " testfile: read test specification from testfile"
print " otherwise from stdin"
return
# Print progress when not in quiet mode
def progress(str):
if not quiet:
print str
# Analyse a status value
def analyse(val, top, arg):
intval = int(val)
if top[0] == "M":
intval = intval / (10 ** int(arg))
intval = intval % 10
argval = top[2]
elif top[0] == "O":
argval = int(cmd_opcodes.get(arg, arg))
else:
argval = int(arg)
# progress("%d %s %d" %(intval, top[1], argval))
if top[1] == "eq" and intval == argval:
return 1
if top[1] == "lt" and intval < argval:
return 1
if top[1] == "gt" and intval > argval:
return 1
return 0
# Parse the commandline
try:
(options, arguments) = getopt.getopt(sys.argv[1:],'chqt')
except getopt.GetoptError, ex:
usage()
sys.exit(1)
# Parse commandline options
for option, value in options:
if option == "-c":
comments = 1
elif option == "-q":
quiet = 1
elif option == "-t":
test = 1
elif option == '-h':
usage()
sys.exit(0)
# Select the input source
if arguments:
try:
fd = open(arguments[0])
except Exception,ex:
sys.stderr.write("File not found %s\n" %(arguments[0]))
sys.exit(1)
else:
fd = sys.stdin
linenr = 0
# Read the test patterns
while 1:
linenr = linenr + 1
line = fd.readline()
if not len(line):
break
line = line.strip()
parts = line.split(":")
if not parts or len(parts) < 1:
continue
if len(parts[0]) == 0:
continue
if parts[0].startswith("#"):
if comments > 1:
progress(line)
continue
if comments == 1:
comments = 2
progress(line)
cmd = parts[0].strip().lower()
opc = parts[1].strip().lower()
tid = parts[2].strip()
dat = parts[3].strip()
try:
# Test or wait for a status value
if cmd == "t" or cmd == "w":
testop = test_opcodes[opc]
fname = "%s%s%s" %(sysfsprefix, tid, statusfile)
if test:
print fname
continue
while 1:
query = 1
fsta = open(fname, 'r')
status = fsta.readline().strip()
fsta.close()
stat = status.split(",")
for s in stat:
s = s.strip()
if s.startswith(testop[0]):
# Seperate status value
val = s[2:].strip()
query = analyse(val, testop, dat)
break
if query or cmd == "t":
break
progress(" " + status)
if not query:
sys.stderr.write("Test failed in line %d\n" %(linenr))
sys.exit(1)
# Issue a command to the tester
elif cmd == "c":
cmdnr = cmd_opcodes[opc]
# Build command string and sys filename
cmdstr = "%s:%s" %(cmdnr, dat)
fname = "%s%s%s" %(sysfsprefix, tid, commandfile)
if test:
print fname
continue
fcmd = open(fname, 'w')
fcmd.write(cmdstr)
fcmd.close()
except Exception,ex:
sys.stderr.write(str(ex))
sys.stderr.write("\nSyntax error in line %d\n" %(linenr))
if not test:
fd.close()
sys.exit(1)
# Normal exit pass
print "Pass"
sys.exit(0)
| gpl-2.0 |
alwayskidd/LRB | bindings/python/rad_util.py | 212 | 26013 | # Copyright (c) 2007 RADLogic
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""Provide various handy Python functions.
Running this script directly will execute the doctests.
Functions:
int2bin(i, n) -- Convert integer to binary string.
bin2int(bin_string) -- Convert binary string to integer.
reverse(input_string) -- Reverse a string.
transpose(matrix) -- Transpose a list of lists.
polygon_area(points_list) -- Calculate the area of an arbitrary polygon.
timestamp() -- Return string containing current time stamp.
pt2str(point) -- Return prettier string version of point tuple.
gcf(a, b) -- Return the greatest common factor of two numbers.
lcm(a, b) -- Return the least common multiple of two numbers.
permutations(input_list) -- Generate all permutations of a list of items.
reduce_fraction(fraction) -- Reduce fraction (num, denom) to simplest form.
quantile(l, p) -- Return p quantile of list l. E.g. p=0.25 for q1.
trim(l) -- Discard values in list more than 1.5*IQR outside IQR.
nice_units(value) -- Return value converted to human readable units.
uniquify(seq) -- Return sequence with duplicate items in sequence seq removed.
reverse_dict(d) -- Return the dictionary with the items as keys and vice-versa.
lsb(x, n) -- Return the n least significant bits of x.
gray_encode(i) -- Gray encode the given integer.
random_vec(bits, max_value=None) -- Return a random binary vector.
binary_range(bits) -- Return list of all possible binary numbers width=bits.
float_range([start], stop, [step]) -- Return range of floats.
find_common_fixes(s1, s2) -- Find common (prefix, suffix) of two strings.
is_rotated(seq1, seq2) -- Return true if the list is a rotation of other list.
getmodule(obj) -- Return the module that contains the object definition of obj.
(use inspect.getmodule instead, though)
get_args(argv) -- Store command-line args in a dictionary.
This module requires Python >= 2.2
"""
__author__ = 'Tim Wegener <twegener@radlogic.com.au>'
__date__ = '$Date: 2007/03/27 03:15:06 $'
__version__ = '$Revision: 0.45 $'
__credits__ = """
David Chandler, for polygon area algorithm.
(http://www.davidchandler.com/AreaOfAGeneralPolygon.pdf)
"""
import re
import sys
import time
import random
try:
True, False
except NameError:
True, False = (1==1, 0==1)
def int2bin(i, n):
"""Convert decimal integer i to n-bit binary number (string).
>>> int2bin(0, 8)
'00000000'
>>> int2bin(123, 8)
'01111011'
>>> int2bin(123L, 8)
'01111011'
>>> int2bin(15, 2)
Traceback (most recent call last):
ValueError: Value too large for given number of bits.
"""
hex2bin = {'0': '0000', '1': '0001', '2': '0010', '3': '0011',
'4': '0100', '5': '0101', '6': '0110', '7': '0111',
'8': '1000', '9': '1001', 'a': '1010', 'b': '1011',
'c': '1100', 'd': '1101', 'e': '1110', 'f': '1111'}
# Convert to hex then map each hex digit to binary equivalent.
result = ''.join([hex2bin[x] for x in hex(i).lower().replace('l','')[2:]])
# Shrink result to appropriate length.
# Raise an error if the value is changed by the truncation.
if '1' in result[:-n]:
raise ValueError("Value too large for given number of bits.")
result = result[-n:]
# Zero-pad if length longer than mapped result.
result = '0'*(n-len(result)) + result
return result
def bin2int(bin_string):
"""Convert binary number string to decimal integer.
Note: Python > v2 has int(bin_string, 2)
>>> bin2int('1111')
15
>>> bin2int('0101')
5
"""
## result = 0
## bin_list = list(bin_string)
## if len(filter(lambda x: x in ('1','0'), bin_list)) < len(bin_list):
## raise Exception ("bin2int: Error - not a binary number: %s"
## % bin_string)
## bit_list = map(int, bin_list)
## bit_list.reverse() # Make most significant bit have highest index.
## for bit_place in range(len(bit_list)):
## result = result + ((2**bit_place) * bit_list[bit_place])
## return result
return int(bin_string, 2)
def reverse(input_string):
"""Reverse a string. Useful for strings of binary numbers.
>>> reverse('abc')
'cba'
"""
str_list = list(input_string)
str_list.reverse()
return ''.join(str_list)
def transpose(matrix):
"""Transpose a list of lists.
>>> transpose([['a', 'b', 'c'], ['d', 'e', 'f'], ['g', 'h', 'i']])
[['a', 'd', 'g'], ['b', 'e', 'h'], ['c', 'f', 'i']]
>>> transpose([['a', 'b', 'c'], ['d', 'e', 'f']])
[['a', 'd'], ['b', 'e'], ['c', 'f']]
>>> transpose([['a', 'b'], ['d', 'e'], ['g', 'h']])
[['a', 'd', 'g'], ['b', 'e', 'h']]
"""
result = zip(*matrix)
# Convert list of tuples to list of lists.
# map is faster than a list comprehension since it is being used with
# a built-in function as an argument.
result = map(list, result)
return result
def polygon_area(points_list, precision=100):
"""Calculate area of an arbitrary polygon using an algorithm from the web.
Return the area of the polygon as a positive float.
Arguments:
points_list -- list of point tuples [(x0, y0), (x1, y1), (x2, y2), ...]
(Unclosed polygons will be closed automatically.
precision -- Internal arithmetic precision (integer arithmetic).
>>> polygon_area([(0, 0), (0, 1), (1, 1), (1, 2), (2, 2), (2, 0), (0, 0)])
3.0
Credits:
Area of a General Polygon by David Chandler
http://www.davidchandler.com/AreaOfAGeneralPolygon.pdf
"""
# Scale up co-ordinates and convert them to integers.
for i in range(len(points_list)):
points_list[i] = (int(points_list[i][0] * precision),
int(points_list[i][1] * precision))
# Close polygon if not closed.
if points_list[-1] != points_list[0]:
points_list.append(points_list[0])
# Calculate area.
area = 0
for i in range(len(points_list)-1):
(x_i, y_i) = points_list[i]
(x_i_plus_1, y_i_plus_1) = points_list[i+1]
area = area + (x_i_plus_1 * y_i) - (y_i_plus_1 * x_i)
area = abs(area / 2)
# Unscale area.
area = float(area)/(precision**2)
return area
def timestamp():
"""Return string containing current time stamp.
Note: In Python 2 onwards can use time.asctime() with no arguments.
"""
return time.asctime()
def pt2str(point):
"""Return prettier string version of point tuple.
>>> pt2str((1.8, 1.9))
'(1.8, 1.9)'
"""
return "(%s, %s)" % (str(point[0]), str(point[1]))
def gcf(a, b, epsilon=1e-16):
"""Return the greatest common factor of a and b, using Euclidean algorithm.
Arguments:
a, b -- two numbers
If both numbers are integers return an integer result,
otherwise return a float result.
epsilon -- floats less than this magnitude are considered to be zero
(default: 1e-16)
Examples:
>>> gcf(12, 34)
2
>>> gcf(13.5, 4)
0.5
>>> gcf(-2, 4)
2
>>> gcf(5, 0)
5
By (a convenient) definition:
>>> gcf(0, 0)
0
"""
result = max(a, b)
remainder = min(a, b)
while remainder and abs(remainder) > epsilon:
new_remainder = result % remainder
result = remainder
remainder = new_remainder
return abs(result)
def lcm(a, b, precision=None):
"""Return the least common multiple of a and b, using the gcf function.
Arguments:
a, b -- two numbers. If both are integers return an integer result,
otherwise a return a float result.
precision -- scaling factor if a and/or b are floats.
>>> lcm(21, 6)
42
>>> lcm(2.5, 3.5)
17.5
>>> str(lcm(1.5e-8, 2.5e-8, precision=1e9))
'7.5e-08'
By (an arbitary) definition:
>>> lcm(0, 0)
0
"""
# Note: Dummy precision argument is for backwards compatibility.
# Do the division first.
# (See http://en.wikipedia.org/wiki/Least_common_multiple )
denom = gcf(a, b)
if denom == 0:
result = 0
else:
result = a * (b / denom)
return result
def permutations(input_list):
"""Return a list containing all permutations of the input list.
Note: This is a recursive function.
>>> perms = permutations(['a', 'b', 'c'])
>>> perms.sort()
>>> for perm in perms:
... print perm
['a', 'b', 'c']
['a', 'c', 'b']
['b', 'a', 'c']
['b', 'c', 'a']
['c', 'a', 'b']
['c', 'b', 'a']
"""
out_lists = []
if len(input_list) > 1:
# Extract first item in list.
item = input_list[0]
# Find all permutations of remainder of list. (Recursive call.)
sub_lists = permutations(input_list[1:])
# For every permutation of the sub list...
for sub_list in sub_lists:
# Insert the extracted first item at every position of the list.
for i in range(len(input_list)):
new_list = sub_list[:]
new_list.insert(i, item)
out_lists.append(new_list)
else:
# Termination condition: only one item in input list.
out_lists = [input_list]
return out_lists
def reduce_fraction(fraction):
"""Reduce fraction tuple to simplest form. fraction=(num, denom)
>>> reduce_fraction((14, 7))
(2, 1)
>>> reduce_fraction((-2, 4))
(-1, 2)
>>> reduce_fraction((0, 4))
(0, 1)
>>> reduce_fraction((4, 0))
(1, 0)
"""
(numerator, denominator) = fraction
common_factor = abs(gcf(numerator, denominator))
result = (numerator/common_factor, denominator/common_factor)
return result
def quantile(l, p):
"""Return p quantile of list l. E.g. p=0.25 for q1.
See:
http://rweb.stat.umn.edu/R/library/base/html/quantile.html
"""
l_sort = l[:]
l_sort.sort()
n = len(l)
r = 1 + ((n - 1) * p)
i = int(r)
f = r - i
if i < n:
result = (1-f)*l_sort[i-1] + f*l_sort[i]
else:
result = l_sort[i-1]
return result
def trim(l):
"""Discard values in list more than 1.5*IQR outside IQR.
(IQR is inter-quartile-range)
This function uses rad_util.quantile
1.5*IQR -- mild outlier
3*IQR -- extreme outlier
See:
http://wind.cc.whecn.edu/~pwildman/statnew/section_7_-_exploratory_data_analysis.htm
"""
l_sort = l[:]
l_sort.sort()
# Calculate medianscore (based on stats.py lmedianscore by Gary Strangman)
if len(l_sort) % 2 == 0:
# If even number of scores, average middle 2.
index = int(len(l_sort) / 2) # Integer division correct
median = float(l_sort[index] + l_sort[index-1]) / 2
else:
# int divsion gives mid value when count from 0
index = int(len(l_sort) / 2)
median = l_sort[index]
# Calculate IQR.
q1 = quantile(l_sort, 0.25)
q3 = quantile(l_sort, 0.75)
iqr = q3 - q1
iqr_extra = iqr * 1.5
def in_interval(x, i=iqr_extra, q1=q1, q3=q3):
return (x >= q1-i and x <= q3+i)
l_trimmed = [x for x in l_sort if in_interval(x)]
return l_trimmed
def nice_units(value, dp=0, sigfigs=None, suffix='', space=' ',
use_extra_prefixes=False, use_full_name=False, mode='si'):
"""Return value converted to human readable units eg milli, micro, etc.
Arguments:
value -- number in base units
dp -- number of decimal places to display (rounded)
sigfigs -- number of significant figures to display (rounded)
This overrides dp if set.
suffix -- optional unit suffix to append to unit multiplier
space -- seperator between value and unit multiplier (default: ' ')
use_extra_prefixes -- use hecto, deka, deci and centi as well if set.
(default: False)
use_full_name -- use full name for multiplier symbol,
e.g. milli instead of m
(default: False)
mode -- 'si' for SI prefixes, 'bin' for binary multipliers (1024, etc.)
(Default: 'si')
SI prefixes from:
http://physics.nist.gov/cuu/Units/prefixes.html
(Greek mu changed to u.)
Binary prefixes based on:
http://physics.nist.gov/cuu/Units/binary.html
>>> nice_units(2e-11)
'20 p'
>>> nice_units(2e-11, space='')
'20p'
"""
si_prefixes = {1e24: ('Y', 'yotta'),
1e21: ('Z', 'zetta'),
1e18: ('E', 'exa'),
1e15: ('P', 'peta'),
1e12: ('T', 'tera'),
1e9: ('G', 'giga'),
1e6: ('M', 'mega'),
1e3: ('k', 'kilo'),
1e-3: ('m', 'milli'),
1e-6: ('u', 'micro'),
1e-9: ('n', 'nano'),
1e-12: ('p', 'pico'),
1e-15: ('f', 'femto'),
1e-18: ('a', 'atto'),
1e-21: ('z', 'zepto'),
1e-24: ('y', 'yocto')
}
if use_extra_prefixes:
si_prefixes.update({1e2: ('h', 'hecto'),
1e1: ('da', 'deka'),
1e-1: ('d', 'deci'),
1e-2: ('c', 'centi')
})
bin_prefixes = {2**10: ('K', 'kilo'),
2**20: ('M', 'mega'),
2**30: ('G', 'mega'),
2**40: ('T', 'tera'),
2**50: ('P', 'peta'),
2**60: ('E', 'exa')
}
if mode == 'bin':
prefixes = bin_prefixes
else:
prefixes = si_prefixes
prefixes[1] = ('', '') # Unity.
# Determine appropriate multiplier.
multipliers = prefixes.keys()
multipliers.sort()
mult = None
for i in range(len(multipliers) - 1):
lower_mult = multipliers[i]
upper_mult = multipliers[i+1]
if lower_mult <= value < upper_mult:
mult_i = i
break
if mult is None:
if value < multipliers[0]:
mult_i = 0
elif value >= multipliers[-1]:
mult_i = len(multipliers) - 1
mult = multipliers[mult_i]
# Convert value for this multiplier.
new_value = value / mult
# Deal with special case due to rounding.
if sigfigs is None:
if mult_i < (len(multipliers) - 1) and \
round(new_value, dp) == \
round((multipliers[mult_i+1] / mult), dp):
mult = multipliers[mult_i + 1]
new_value = value / mult
# Concatenate multiplier symbol.
if use_full_name:
label_type = 1
else:
label_type = 0
# Round and truncate to appropriate precision.
if sigfigs is None:
str_value = eval('"%.'+str(dp)+'f" % new_value', locals(), {})
else:
str_value = eval('"%.'+str(sigfigs)+'g" % new_value', locals(), {})
return str_value + space + prefixes[mult][label_type] + suffix
def uniquify(seq, preserve_order=False):
"""Return sequence with duplicate items in sequence seq removed.
The code is based on usenet post by Tim Peters.
This code is O(N) if the sequence items are hashable, O(N**2) if not.
Peter Bengtsson has a blog post with an empirical comparison of other
approaches:
http://www.peterbe.com/plog/uniqifiers-benchmark
If order is not important and the sequence items are hashable then
list(set(seq)) is readable and efficient.
If order is important and the sequence items are hashable generator
expressions can be used (in py >= 2.4) (useful for large sequences):
seen = set()
do_something(x for x in seq if x not in seen or seen.add(x))
Arguments:
seq -- sequence
preserve_order -- if not set the order will be arbitrary
Using this option will incur a speed penalty.
(default: False)
Example showing order preservation:
>>> uniquify(['a', 'aa', 'b', 'b', 'ccc', 'ccc', 'd'], preserve_order=True)
['a', 'aa', 'b', 'ccc', 'd']
Example using a sequence of un-hashable items:
>>> uniquify([['z'], ['x'], ['y'], ['z']], preserve_order=True)
[['z'], ['x'], ['y']]
The sorted output or the non-order-preserving approach should equal
that of the sorted order-preserving approach output:
>>> unordered = uniquify([3, 3, 1, 2], preserve_order=False)
>>> unordered.sort()
>>> ordered = uniquify([3, 3, 1, 2], preserve_order=True)
>>> ordered.sort()
>>> ordered
[1, 2, 3]
>>> int(ordered == unordered)
1
"""
try:
# Attempt fast algorithm.
d = {}
if preserve_order:
# This is based on Dave Kirby's method (f8) noted in the post:
# http://www.peterbe.com/plog/uniqifiers-benchmark
return [x for x in seq if (x not in d) and not d.__setitem__(x, 0)]
else:
for x in seq:
d[x] = 0
return d.keys()
except TypeError:
# Have an unhashable object, so use slow algorithm.
result = []
app = result.append
for x in seq:
if x not in result:
app(x)
return result
# Alias to noun form for backward compatibility.
unique = uniquify
def reverse_dict(d):
"""Reverse a dictionary so the items become the keys and vice-versa.
Note: The results will be arbitrary if the items are not unique.
>>> d = reverse_dict({'a': 1, 'b': 2})
>>> d_items = d.items()
>>> d_items.sort()
>>> d_items
[(1, 'a'), (2, 'b')]
"""
result = {}
for key, value in d.items():
result[value] = key
return result
def lsb(x, n):
"""Return the n least significant bits of x.
>>> lsb(13, 3)
5
"""
return x & ((2 ** n) - 1)
def gray_encode(i):
"""Gray encode the given integer."""
return i ^ (i >> 1)
def random_vec(bits, max_value=None):
"""Generate a random binary vector of length bits and given max value."""
vector = ""
for _ in range(int(bits / 10) + 1):
i = int((2**10) * random.random())
vector += int2bin(i, 10)
if max_value and (max_value < 2 ** bits - 1):
vector = int2bin((int(vector, 2) / (2 ** bits - 1)) * max_value, bits)
return vector[0:bits]
def binary_range(bits):
"""Return a list of all possible binary numbers in order with width=bits.
It would be nice to extend it to match the
functionality of python's range() built-in function.
"""
l = []
v = ['0'] * bits
toggle = [1] + [0] * bits
while toggle[bits] != 1:
v_copy = v[:]
v_copy.reverse()
l.append(''.join(v_copy))
toggle = [1] + [0]*bits
i = 0
while i < bits and toggle[i] == 1:
if toggle[i]:
if v[i] == '0':
v[i] = '1'
toggle[i+1] = 0
else:
v[i] = '0'
toggle[i+1] = 1
i += 1
return l
def float_range(start, stop=None, step=None):
"""Return a list containing an arithmetic progression of floats.
Return a list of floats between 0.0 (or start) and stop with an
increment of step.
This is in functionality to python's range() built-in function
but can accept float increments.
As with range(), stop is omitted from the list.
"""
if stop is None:
stop = float(start)
start = 0.0
if step is None:
step = 1.0
cur = float(start)
l = []
while cur < stop:
l.append(cur)
cur += step
return l
def find_common_fixes(s1, s2):
"""Find common (prefix, suffix) of two strings.
>>> find_common_fixes('abc', 'def')
('', '')
>>> find_common_fixes('abcelephantdef', 'abccowdef')
('abc', 'def')
>>> find_common_fixes('abcelephantdef', 'abccow')
('abc', '')
>>> find_common_fixes('elephantdef', 'abccowdef')
('', 'def')
"""
prefix = []
suffix = []
i = 0
common_len = min(len(s1), len(s2))
while i < common_len:
if s1[i] != s2[i]:
break
prefix.append(s1[i])
i += 1
i = 1
while i < (common_len + 1):
if s1[-i] != s2[-i]:
break
suffix.append(s1[-i])
i += 1
suffix.reverse()
prefix = ''.join(prefix)
suffix = ''.join(suffix)
return (prefix, suffix)
def is_rotated(seq1, seq2):
"""Return true if the first sequence is a rotation of the second sequence.
>>> seq1 = ['A', 'B', 'C', 'D']
>>> seq2 = ['C', 'D', 'A', 'B']
>>> int(is_rotated(seq1, seq2))
1
>>> seq2 = ['C', 'D', 'B', 'A']
>>> int(is_rotated(seq1, seq2))
0
>>> seq1 = ['A', 'B', 'C', 'A']
>>> seq2 = ['A', 'A', 'B', 'C']
>>> int(is_rotated(seq1, seq2))
1
>>> seq2 = ['A', 'B', 'C', 'A']
>>> int(is_rotated(seq1, seq2))
1
>>> seq2 = ['A', 'A', 'C', 'B']
>>> int(is_rotated(seq1, seq2))
0
"""
# Do a sanity check.
if len(seq1) != len(seq2):
return False
# Look for occurrences of second sequence head item in first sequence.
start_indexes = []
head_item = seq2[0]
for index1 in range(len(seq1)):
if seq1[index1] == head_item:
start_indexes.append(index1)
# Check that wrapped sequence matches.
double_seq1 = seq1 + seq1
for index1 in start_indexes:
if double_seq1[index1:index1+len(seq1)] == seq2:
return True
return False
def getmodule(obj):
"""Return the module that contains the object definition of obj.
Note: Use inspect.getmodule instead.
Arguments:
obj -- python obj, generally a class or a function
Examples:
A function:
>>> module = getmodule(random.choice)
>>> module.__name__
'random'
>>> module is random
1
A class:
>>> module = getmodule(random.Random)
>>> module.__name__
'random'
>>> module is random
1
A class inheriting from a class in another module:
(note: The inheriting class must define at least one function.)
>>> class MyRandom(random.Random):
... def play(self):
... pass
>>> module = getmodule(MyRandom)
>>> if __name__ == '__main__':
... name = 'rad_util'
... else:
... name = module.__name__
>>> name
'rad_util'
>>> module is sys.modules[__name__]
1
Discussion:
This approach is slightly hackish, and won't work in various situations.
However, this was the approach recommended by GvR, so it's as good as
you'll get.
See GvR's post in this thread:
http://groups.google.com.au/group/comp.lang.python/browse_thread/thread/966a7bdee07e3b34/c3cab3f41ea84236?lnk=st&q=python+determine+class+module&rnum=4&hl=en#c3cab3f41ea84236
"""
if hasattr(obj, 'func_globals'):
func = obj
else:
# Handle classes.
func = None
for item in obj.__dict__.values():
if hasattr(item, 'func_globals'):
func = item
break
if func is None:
raise ValueError("No functions attached to object: %r" % obj)
module_name = func.func_globals['__name__']
# Get module.
module = sys.modules[module_name]
return module
def round_grid(value, grid, mode=0):
"""Round off the given value to the given grid size.
Arguments:
value -- value to be roudne
grid -- result must be a multiple of this
mode -- 0 nearest, 1 up, -1 down
Examples:
>>> round_grid(7.5, 5)
10
>>> round_grid(7.5, 5, mode=-1)
5
>>> round_grid(7.3, 5, mode=1)
10
>>> round_grid(7.3, 5.0, mode=1)
10.0
"""
off_grid = value % grid
if mode == 0:
add_one = int(off_grid >= (grid / 2.0))
elif mode == 1 and off_grid:
add_one = 1
elif mode == -1 and off_grid:
add_one = 0
result = ((int(value / grid) + add_one) * grid)
return result
def get_args(argv):
"""Store command-line args in a dictionary.
-, -- prefixes are removed
Items not prefixed with - or -- are stored as a list, indexed by 'args'
For options that take a value use --option=value
Consider using optparse or getopt (in Python standard library) instead.
"""
d = {}
args = []
for arg in argv:
if arg.startswith('-'):
parts = re.sub(r'^-+', '', arg).split('=')
if len(parts) == 2:
d[parts[0]] = parts[1]
else:
d[parts[0]] = None
else:
args.append(arg)
d['args'] = args
return d
if __name__ == '__main__':
import doctest
doctest.testmod(sys.modules['__main__'])
| gpl-2.0 |
JNeiger/robocup-software | soccer/gameplay/tactics/one_touch_pass.py | 2 | 4562 | import composite_behavior
import behavior
import skills.move
import tactics.coordinated_pass
import robocup
import constants
import main
import skills.angle_receive
import evaluation.touchpass_positioning
import evaluation.passing
import evaluation.chipping
import enum
## A tactic that causes a robot to pass to another one,
# who scores on the goal as fast as possible.
#
# This class is supplemented by touchpass_positioning and angle_receive
class OneTouchPass(composite_behavior.CompositeBehavior):
tpass = evaluation.touchpass_positioning
receivePointChangeThreshold = 0.15 # Percent
class State(enum.Enum):
setup = 1
passing = 2
def __init__(self, skillkicker=None):
super().__init__(continuous=False)
if skillkicker == None:
skillkicker = skills.pivot_kick.PivotKick()
self.tpass_iterations = 0
self.force_reevauation = False
rp = self.calc_receive_point()
for state in OneTouchPass.State:
self.add_state(state, behavior.Behavior.State.running)
self.add_transition(behavior.Behavior.State.start,
OneTouchPass.State.passing, lambda: True,
'immediately')
self.add_transition(
OneTouchPass.State.passing, behavior.Behavior.State.completed,
lambda: self.pass_bhvr.state == behavior.Behavior.State.completed,
'Touchpass completed.')
self.add_transition(OneTouchPass.State.passing,
behavior.Behavior.State.failed, lambda: self.
pass_bhvr.state == behavior.Behavior.State.failed,
'Touchpass failed!')
self.angle_receive = skills.angle_receive.AngleReceive()
self.pass_bhvr = tactics.coordinated_pass.CoordinatedPass(
None,
self.angle_receive, (skillkicker, lambda x: True),
receiver_required=False,
kicker_required=False,
prekick_timeout=20,
use_chipper=True)
def evaluate_chip(self, receive_point):
bp = main.ball().pos
ex_robots = self.subbehavior_with_name('pass').get_robots()
kick_p = evaluation.passing.eval_pass(
bp, receive_point, excluded_robots=ex_robots)
if kick_p < .5:
ex_robots.extend(evaluation.chipping.chippable_robots())
chip_p = evaluation.passing.eval_pass(
bp, receive_point, excluded_robots=ex_robots)
if chip_p > kick_p:
self.subbehavior_with_name('pass').use_chipper = True
def calc_receive_point(self):
ex_robots = list(main.system_state().our_robots)
ex_robots.extend(evaluation.chipping.chippable_robots())
receive_pt, _, _ = OneTouchPass.tpass.eval_best_receive_point(
main.ball().pos, None, ex_robots)
return receive_pt
def reset_receive_point(self):
pass_bhvr = self.subbehavior_with_name('pass')
ex_robots = pass_bhvr.get_robots()
ex_robots.extend(evaluation.chipping.chippable_robots())
receive_pt, target_point, probability = OneTouchPass.tpass.eval_best_receive_point(
main.ball().pos, None, ex_robots)
# only change if increase of beyond the threshold.
if self.force_reevauation or pass_bhvr.receive_point is None or pass_bhvr.target_point is None \
or probability > OneTouchPass.tpass.eval_single_point(main.ball().pos,
pass_bhvr.receive_point, ignore_robots=ex_robots) \
+ OneTouchPass.receivePointChangeThreshold:
pass_bhvr.receive_point = receive_pt
self.pass_bhvr.skillreceiver.target_point = target_point
self.force_reevauation = False
def on_enter_passing(self):
self.angle_receive = skills.angle_receive.AngleReceive()
self.add_subbehavior(self.pass_bhvr, 'pass', priority=5)
if self.pass_bhvr.receive_point == None:
self.reset_receive_point()
def execute_passing(self):
if not self.pass_bhvr.state == tactics.coordinated_pass.CoordinatedPass.State.receiving and self.tpass_iterations > 50 or main.ball(
).pos.y < self.pass_bhvr.receive_point.y:
self.force_reevauation = True
self.reset_receive_point()
self.tpass_iterations = 0
def on_exit_passing(self):
self.remove_subbehavior('pass')
| apache-2.0 |
credativ/pulp | server/test/unit/server/db/migrations/test_0017_distributor_list_published.py | 3 | 1618 | from copy import deepcopy
from datetime import datetime
from unittest import TestCase
from mock import patch, Mock, call
from pulp.server.db.migrate.models import MigrationModule
LAST_PUBLISH = 'last_publish'
MIGRATION = 'pulp.server.db.migrations.0017_distributor_last_published'
class TestMigration(TestCase):
"""
Test the migration.
"""
@patch('.'.join((MIGRATION, 'parse_iso8601_datetime')))
@patch('.'.join((MIGRATION, 'RepoDistributor')))
def test_migrate(self, distributor, parse_iso8601_datetime):
collection = Mock()
found = [
{LAST_PUBLISH: '2015-04-28T18:19:01Z'},
{LAST_PUBLISH: datetime.now()},
{LAST_PUBLISH: '2015-04-28T18:20:01Z'},
{LAST_PUBLISH: datetime.now()},
]
parsed = [1, 2]
collection.find.return_value = deepcopy(found)
distributor.get_collection.return_value = collection
parse_iso8601_datetime.side_effect = parsed
# test
module = MigrationModule(MIGRATION)._module
module.migrate()
# validation
distributor.get_collection.assert_called_once_with()
collection.find.assert_called_once_with()
self.assertEqual(
parse_iso8601_datetime.call_args_list,
[
call(found[0][LAST_PUBLISH]),
call(found[2][LAST_PUBLISH]),
])
self.assertEqual(
collection.save.call_args_list,
[
call({LAST_PUBLISH: parsed[0]}, safe=True),
call({LAST_PUBLISH: parsed[1]}, safe=True)
])
| gpl-2.0 |
GenericStudent/home-assistant | tests/components/openalpr_cloud/test_image_processing.py | 13 | 6882 | """The tests for the openalpr cloud platform."""
import asyncio
from homeassistant.components import camera, image_processing as ip
from homeassistant.components.openalpr_cloud.image_processing import OPENALPR_API_URL
from homeassistant.core import callback
from homeassistant.setup import setup_component
from tests.async_mock import PropertyMock, patch
from tests.common import assert_setup_component, get_test_home_assistant, load_fixture
from tests.components.image_processing import common
class TestOpenAlprCloudSetup:
"""Test class for image processing."""
def setup_method(self):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
def teardown_method(self):
"""Stop everything that was started."""
self.hass.stop()
def test_setup_platform(self):
"""Set up platform with one entity."""
config = {
ip.DOMAIN: {
"platform": "openalpr_cloud",
"source": {"entity_id": "camera.demo_camera"},
"region": "eu",
"api_key": "sk_abcxyz123456",
},
"camera": {"platform": "demo"},
}
with assert_setup_component(1, ip.DOMAIN):
setup_component(self.hass, ip.DOMAIN, config)
self.hass.block_till_done()
assert self.hass.states.get("image_processing.openalpr_demo_camera")
def test_setup_platform_name(self):
"""Set up platform with one entity and set name."""
config = {
ip.DOMAIN: {
"platform": "openalpr_cloud",
"source": {"entity_id": "camera.demo_camera", "name": "test local"},
"region": "eu",
"api_key": "sk_abcxyz123456",
},
"camera": {"platform": "demo"},
}
with assert_setup_component(1, ip.DOMAIN):
setup_component(self.hass, ip.DOMAIN, config)
self.hass.block_till_done()
assert self.hass.states.get("image_processing.test_local")
def test_setup_platform_without_api_key(self):
"""Set up platform with one entity without api_key."""
config = {
ip.DOMAIN: {
"platform": "openalpr_cloud",
"source": {"entity_id": "camera.demo_camera"},
"region": "eu",
},
"camera": {"platform": "demo"},
}
with assert_setup_component(0, ip.DOMAIN):
setup_component(self.hass, ip.DOMAIN, config)
def test_setup_platform_without_region(self):
"""Set up platform with one entity without region."""
config = {
ip.DOMAIN: {
"platform": "openalpr_cloud",
"source": {"entity_id": "camera.demo_camera"},
"api_key": "sk_abcxyz123456",
},
"camera": {"platform": "demo"},
}
with assert_setup_component(0, ip.DOMAIN):
setup_component(self.hass, ip.DOMAIN, config)
class TestOpenAlprCloud:
"""Test class for image processing."""
def setup_method(self):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
config = {
ip.DOMAIN: {
"platform": "openalpr_cloud",
"source": {"entity_id": "camera.demo_camera", "name": "test local"},
"region": "eu",
"api_key": "sk_abcxyz123456",
},
"camera": {"platform": "demo"},
}
with patch(
"homeassistant.components.openalpr_cloud.image_processing."
"OpenAlprCloudEntity.should_poll",
new_callable=PropertyMock(return_value=False),
):
setup_component(self.hass, ip.DOMAIN, config)
self.hass.block_till_done()
self.alpr_events = []
@callback
def mock_alpr_event(event):
"""Mock event."""
self.alpr_events.append(event)
self.hass.bus.listen("image_processing.found_plate", mock_alpr_event)
self.params = {
"secret_key": "sk_abcxyz123456",
"tasks": "plate",
"return_image": 0,
"country": "eu",
}
def teardown_method(self):
"""Stop everything that was started."""
self.hass.stop()
def test_openalpr_process_image(self, aioclient_mock):
"""Set up and scan a picture and test plates from event."""
aioclient_mock.post(
OPENALPR_API_URL,
params=self.params,
text=load_fixture("alpr_cloud.json"),
status=200,
)
with patch(
"homeassistant.components.camera.async_get_image",
return_value=camera.Image("image/jpeg", b"image"),
):
common.scan(self.hass, entity_id="image_processing.test_local")
self.hass.block_till_done()
state = self.hass.states.get("image_processing.test_local")
assert len(aioclient_mock.mock_calls) == 1
assert len(self.alpr_events) == 5
assert state.attributes.get("vehicles") == 1
assert state.state == "H786P0J"
event_data = [
event.data
for event in self.alpr_events
if event.data.get("plate") == "H786P0J"
]
assert len(event_data) == 1
assert event_data[0]["plate"] == "H786P0J"
assert event_data[0]["confidence"] == float(90.436699)
assert event_data[0]["entity_id"] == "image_processing.test_local"
def test_openalpr_process_image_api_error(self, aioclient_mock):
"""Set up and scan a picture and test api error."""
aioclient_mock.post(
OPENALPR_API_URL,
params=self.params,
text="{'error': 'error message'}",
status=400,
)
with patch(
"homeassistant.components.camera.async_get_image",
return_value=camera.Image("image/jpeg", b"image"),
):
common.scan(self.hass, entity_id="image_processing.test_local")
self.hass.block_till_done()
assert len(aioclient_mock.mock_calls) == 1
assert len(self.alpr_events) == 0
def test_openalpr_process_image_api_timeout(self, aioclient_mock):
"""Set up and scan a picture and test api error."""
aioclient_mock.post(
OPENALPR_API_URL, params=self.params, exc=asyncio.TimeoutError()
)
with patch(
"homeassistant.components.camera.async_get_image",
return_value=camera.Image("image/jpeg", b"image"),
):
common.scan(self.hass, entity_id="image_processing.test_local")
self.hass.block_till_done()
assert len(aioclient_mock.mock_calls) == 1
assert len(self.alpr_events) == 0
| apache-2.0 |
dvliman/jaikuengine | .google_appengine/lib/django-1.4/django/contrib/staticfiles/handlers.py | 85 | 2316 | import urllib
from urlparse import urlparse
from django.conf import settings
from django.core.handlers.wsgi import WSGIHandler
from django.contrib.staticfiles import utils
from django.contrib.staticfiles.views import serve
class StaticFilesHandler(WSGIHandler):
"""
WSGI middleware that intercepts calls to the static files directory, as
defined by the STATIC_URL setting, and serves those files.
"""
def __init__(self, application, base_dir=None):
self.application = application
if base_dir:
self.base_dir = base_dir
else:
self.base_dir = self.get_base_dir()
self.base_url = urlparse(self.get_base_url())
super(StaticFilesHandler, self).__init__()
def get_base_dir(self):
return settings.STATIC_ROOT
def get_base_url(self):
utils.check_settings()
return settings.STATIC_URL
def _should_handle(self, path):
"""
Checks if the path should be handled. Ignores the path if:
* the host is provided as part of the base_url
* the request's path isn't under the media path (or equal)
"""
return path.startswith(self.base_url[2]) and not self.base_url[1]
def file_path(self, url):
"""
Returns the relative path to the media file on disk for the given URL.
"""
relative_url = url[len(self.base_url[2]):]
return urllib.url2pathname(relative_url)
def serve(self, request):
"""
Actually serves the request path.
"""
return serve(request, self.file_path(request.path), insecure=True)
def get_response(self, request):
from django.http import Http404
if self._should_handle(request.path):
try:
return self.serve(request)
except Http404, e:
if settings.DEBUG:
from django.views import debug
return debug.technical_404_response(request, e)
return super(StaticFilesHandler, self).get_response(request)
def __call__(self, environ, start_response):
if not self._should_handle(environ['PATH_INFO']):
return self.application(environ, start_response)
return super(StaticFilesHandler, self).__call__(environ, start_response)
| apache-2.0 |
eino-makitalo/odoo | addons/account/report/account_aged_partner_balance.py | 16 | 21514 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import osv
from openerp.report import report_sxw
from common_report_header import common_report_header
class aged_trial_report(report_sxw.rml_parse, common_report_header):
def __init__(self, cr, uid, name, context):
super(aged_trial_report, self).__init__(cr, uid, name, context=context)
self.total_account = []
self.localcontext.update({
'time': time,
'get_lines_with_out_partner': self._get_lines_with_out_partner,
'get_lines': self._get_lines,
'get_total': self._get_total,
'get_direction': self._get_direction,
'get_for_period': self._get_for_period,
'get_company': self._get_company,
'get_currency': self._get_currency,
'get_partners':self._get_partners,
'get_account': self._get_account,
'get_fiscalyear': self._get_fiscalyear,
'get_target_move': self._get_target_move,
})
def set_context(self, objects, data, ids, report_type=None):
obj_move = self.pool.get('account.move.line')
ctx = data['form'].get('used_context', {})
ctx.update({'fiscalyear': False, 'all_fiscalyear': True})
self.query = obj_move._query_get(self.cr, self.uid, obj='l', context=ctx)
self.direction_selection = data['form'].get('direction_selection', 'past')
self.target_move = data['form'].get('target_move', 'all')
self.date_from = data['form'].get('date_from', time.strftime('%Y-%m-%d'))
if (data['form']['result_selection'] == 'customer' ):
self.ACCOUNT_TYPE = ['receivable']
elif (data['form']['result_selection'] == 'supplier'):
self.ACCOUNT_TYPE = ['payable']
else:
self.ACCOUNT_TYPE = ['payable','receivable']
return super(aged_trial_report, self).set_context(objects, data, ids, report_type=report_type)
def _get_lines(self, form):
res = []
move_state = ['draft','posted']
if self.target_move == 'posted':
move_state = ['posted']
self.cr.execute('SELECT DISTINCT res_partner.id AS id,\
res_partner.name AS name \
FROM res_partner,account_move_line AS l, account_account, account_move am\
WHERE (l.account_id=account_account.id) \
AND (l.move_id=am.id) \
AND (am.state IN %s)\
AND (account_account.type IN %s)\
AND account_account.active\
AND ((reconcile_id IS NULL)\
OR (reconcile_id IN (SELECT recon.id FROM account_move_reconcile AS recon WHERE recon.create_date > %s AND not recon.opening_reconciliation)))\
AND (l.partner_id=res_partner.id)\
AND (l.date <= %s)\
AND ' + self.query + ' \
ORDER BY res_partner.name', (tuple(move_state), tuple(self.ACCOUNT_TYPE), self.date_from, self.date_from,))
partners = self.cr.dictfetchall()
## mise a 0 du total
for i in range(7):
self.total_account.append(0)
#
# Build a string like (1,2,3) for easy use in SQL query
partner_ids = [x['id'] for x in partners]
if not partner_ids:
return []
# This dictionary will store the debit-credit for all partners, using partner_id as key.
totals = {}
self.cr.execute('SELECT l.partner_id, SUM(l.debit-l.credit) \
FROM account_move_line AS l, account_account, account_move am \
WHERE (l.account_id = account_account.id) AND (l.move_id=am.id) \
AND (am.state IN %s)\
AND (account_account.type IN %s)\
AND (l.partner_id IN %s)\
AND ((l.reconcile_id IS NULL)\
OR (l.reconcile_id IN (SELECT recon.id FROM account_move_reconcile AS recon WHERE recon.create_date > %s AND not recon.opening_reconciliation)))\
AND ' + self.query + '\
AND account_account.active\
AND (l.date <= %s)\
GROUP BY l.partner_id ', (tuple(move_state), tuple(self.ACCOUNT_TYPE), tuple(partner_ids), self.date_from, self.date_from,))
t = self.cr.fetchall()
for i in t:
totals[i[0]] = i[1]
# This dictionary will store the future or past of all partners
future_past = {}
if self.direction_selection == 'future':
self.cr.execute('SELECT l.partner_id, SUM(l.debit-l.credit) \
FROM account_move_line AS l, account_account, account_move am \
WHERE (l.account_id=account_account.id) AND (l.move_id=am.id) \
AND (am.state IN %s)\
AND (account_account.type IN %s)\
AND (COALESCE(l.date_maturity, l.date) < %s)\
AND (l.partner_id IN %s)\
AND ((l.reconcile_id IS NULL)\
OR (l.reconcile_id IN (SELECT recon.id FROM account_move_reconcile AS recon WHERE recon.create_date > %s AND not recon.opening_reconciliation)))\
AND '+ self.query + '\
AND account_account.active\
AND (l.date <= %s)\
GROUP BY l.partner_id', (tuple(move_state), tuple(self.ACCOUNT_TYPE), self.date_from, tuple(partner_ids),self.date_from, self.date_from,))
t = self.cr.fetchall()
for i in t:
future_past[i[0]] = i[1]
elif self.direction_selection == 'past': # Using elif so people could extend without this breaking
self.cr.execute('SELECT l.partner_id, SUM(l.debit-l.credit) \
FROM account_move_line AS l, account_account, account_move am \
WHERE (l.account_id=account_account.id) AND (l.move_id=am.id)\
AND (am.state IN %s)\
AND (account_account.type IN %s)\
AND (COALESCE(l.date_maturity,l.date) > %s)\
AND (l.partner_id IN %s)\
AND ((l.reconcile_id IS NULL)\
OR (l.reconcile_id IN (SELECT recon.id FROM account_move_reconcile AS recon WHERE recon.create_date > %s AND not recon.opening_reconciliation)))\
AND '+ self.query + '\
AND account_account.active\
AND (l.date <= %s)\
GROUP BY l.partner_id', (tuple(move_state), tuple(self.ACCOUNT_TYPE), self.date_from, tuple(partner_ids), self.date_from, self.date_from,))
t = self.cr.fetchall()
for i in t:
future_past[i[0]] = i[1]
# Use one query per period and store results in history (a list variable)
# Each history will contain: history[1] = {'<partner_id>': <partner_debit-credit>}
history = []
for i in range(5):
args_list = (tuple(move_state), tuple(self.ACCOUNT_TYPE), tuple(partner_ids),self.date_from,)
dates_query = '(COALESCE(l.date_maturity,l.date)'
if form[str(i)]['start'] and form[str(i)]['stop']:
dates_query += ' BETWEEN %s AND %s)'
args_list += (form[str(i)]['start'], form[str(i)]['stop'])
elif form[str(i)]['start']:
dates_query += ' >= %s)'
args_list += (form[str(i)]['start'],)
else:
dates_query += ' <= %s)'
args_list += (form[str(i)]['stop'],)
args_list += (self.date_from,)
self.cr.execute('''SELECT l.partner_id, SUM(l.debit-l.credit), l.reconcile_partial_id
FROM account_move_line AS l, account_account, account_move am
WHERE (l.account_id = account_account.id) AND (l.move_id=am.id)
AND (am.state IN %s)
AND (account_account.type IN %s)
AND (l.partner_id IN %s)
AND ((l.reconcile_id IS NULL)
OR (l.reconcile_id IN (SELECT recon.id FROM account_move_reconcile AS recon WHERE recon.create_date > %s AND not recon.opening_reconciliation)))
AND ''' + self.query + '''
AND account_account.active
AND ''' + dates_query + '''
AND (l.date <= %s)
GROUP BY l.partner_id, l.reconcile_partial_id''', args_list)
partners_partial = self.cr.fetchall()
partners_amount = dict((i[0],0) for i in partners_partial)
for partner_info in partners_partial:
if partner_info[2]:
# in case of partial reconciliation, we want to keep the left amount in the oldest period
self.cr.execute('''SELECT MIN(COALESCE(date_maturity,date)) FROM account_move_line WHERE reconcile_partial_id = %s''', (partner_info[2],))
date = self.cr.fetchall()
partial = False
if 'BETWEEN' in dates_query:
partial = date and args_list[-3] <= date[0][0] <= args_list[-2]
elif '>=' in dates_query:
partial = date and date[0][0] >= form[str(i)]['start']
else:
partial = date and date[0][0] <= form[str(i)]['stop']
if partial:
# partial reconcilation
limit_date = 'COALESCE(l.date_maturity,l.date) %s %%s' % ('<=' if self.direction_selection == 'past' else '>=',)
self.cr.execute('''SELECT SUM(l.debit-l.credit)
FROM account_move_line AS l, account_move AS am
WHERE l.move_id = am.id AND am.state in %s
AND l.reconcile_partial_id = %s
AND ''' + limit_date, (tuple(move_state), partner_info[2], self.date_from))
unreconciled_amount = self.cr.fetchall()
partners_amount[partner_info[0]] += unreconciled_amount[0][0]
else:
partners_amount[partner_info[0]] += partner_info[1]
history.append(partners_amount)
for partner in partners:
values = {}
## If choise selection is in the future
if self.direction_selection == 'future':
# Query here is replaced by one query which gets the all the partners their 'before' value
before = False
if future_past.has_key(partner['id']):
before = [ future_past[partner['id']] ]
self.total_account[6] = self.total_account[6] + (before and before[0] or 0.0)
values['direction'] = before and before[0] or 0.0
elif self.direction_selection == 'past': # Changed this so people could in the future create new direction_selections
# Query here is replaced by one query which gets the all the partners their 'after' value
after = False
if future_past.has_key(partner['id']): # Making sure this partner actually was found by the query
after = [ future_past[partner['id']] ]
self.total_account[6] = self.total_account[6] + (after and after[0] or 0.0)
values['direction'] = after and after[0] or 0.0
for i in range(5):
during = False
if history[i].has_key(partner['id']):
during = [ history[i][partner['id']] ]
# Ajout du compteur
self.total_account[(i)] = self.total_account[(i)] + (during and during[0] or 0)
values[str(i)] = during and during[0] or 0.0
total = False
if totals.has_key( partner['id'] ):
total = [ totals[partner['id']] ]
values['total'] = total and total[0] or 0.0
## Add for total
self.total_account[(i+1)] = self.total_account[(i+1)] + (total and total[0] or 0.0)
values['name'] = partner['name']
res.append(values)
total = 0.0
totals = {}
for r in res:
total += float(r['total'] or 0.0)
for i in range(5)+['direction']:
totals.setdefault(str(i), 0.0)
totals[str(i)] += float(r[str(i)] or 0.0)
return res
def _get_lines_with_out_partner(self, form):
res = []
move_state = ['draft','posted']
if self.target_move == 'posted':
move_state = ['posted']
## mise a 0 du total
for i in range(7):
self.total_account.append(0)
totals = {}
self.cr.execute('SELECT SUM(l.debit-l.credit) \
FROM account_move_line AS l, account_account, account_move am \
WHERE (l.account_id = account_account.id) AND (l.move_id=am.id)\
AND (am.state IN %s)\
AND (l.partner_id IS NULL)\
AND (account_account.type IN %s)\
AND ((l.reconcile_id IS NULL) \
OR (l.reconcile_id IN (SELECT recon.id FROM account_move_reconcile AS recon WHERE recon.create_date > %s AND not recon.opening_reconciliation)))\
AND ' + self.query + '\
AND (l.date <= %s)\
AND account_account.active ',(tuple(move_state), tuple(self.ACCOUNT_TYPE), self.date_from, self.date_from,))
t = self.cr.fetchall()
for i in t:
totals['Unknown Partner'] = i[0]
future_past = {}
if self.direction_selection == 'future':
self.cr.execute('SELECT SUM(l.debit-l.credit) \
FROM account_move_line AS l, account_account, account_move am\
WHERE (l.account_id=account_account.id) AND (l.move_id=am.id)\
AND (am.state IN %s)\
AND (l.partner_id IS NULL)\
AND (account_account.type IN %s)\
AND (COALESCE(l.date_maturity, l.date) < %s)\
AND ((l.reconcile_id IS NULL)\
OR (l.reconcile_id IN (SELECT recon.id FROM account_move_reconcile AS recon WHERE recon.create_date > %s AND not recon.opening_reconciliation)))\
AND '+ self.query + '\
AND account_account.active ', (tuple(move_state), tuple(self.ACCOUNT_TYPE), self.date_from, self.date_from))
t = self.cr.fetchall()
for i in t:
future_past['Unknown Partner'] = i[0]
elif self.direction_selection == 'past': # Using elif so people could extend without this breaking
self.cr.execute('SELECT SUM(l.debit-l.credit) \
FROM account_move_line AS l, account_account, account_move am \
WHERE (l.account_id=account_account.id) AND (l.move_id=am.id)\
AND (am.state IN %s)\
AND (l.partner_id IS NULL)\
AND (account_account.type IN %s)\
AND (COALESCE(l.date_maturity,l.date) > %s)\
AND ((l.reconcile_id IS NULL)\
OR (l.reconcile_id IN (SELECT recon.id FROM account_move_reconcile AS recon WHERE recon.create_date > %s AND not recon.opening_reconciliation)))\
AND '+ self.query + '\
AND account_account.active ', (tuple(move_state), tuple(self.ACCOUNT_TYPE), self.date_from, self.date_from))
t = self.cr.fetchall()
for i in t:
future_past['Unknown Partner'] = i[0]
history = []
for i in range(5):
args_list = (tuple(move_state), tuple(self.ACCOUNT_TYPE), self.date_from,)
dates_query = '(COALESCE(l.date_maturity,l.date)'
if form[str(i)]['start'] and form[str(i)]['stop']:
dates_query += ' BETWEEN %s AND %s)'
args_list += (form[str(i)]['start'], form[str(i)]['stop'])
elif form[str(i)]['start']:
dates_query += ' > %s)'
args_list += (form[str(i)]['start'],)
else:
dates_query += ' < %s)'
args_list += (form[str(i)]['stop'],)
args_list += (self.date_from,)
self.cr.execute('SELECT SUM(l.debit-l.credit)\
FROM account_move_line AS l, account_account, account_move am \
WHERE (l.account_id = account_account.id) AND (l.move_id=am.id)\
AND (am.state IN %s)\
AND (account_account.type IN %s)\
AND (l.partner_id IS NULL)\
AND ((l.reconcile_id IS NULL)\
OR (l.reconcile_id IN (SELECT recon.id FROM account_move_reconcile AS recon WHERE recon.create_date > %s AND not recon.opening_reconciliation)))\
AND '+ self.query + '\
AND account_account.active\
AND ' + dates_query + '\
AND (l.date <= %s)\
GROUP BY l.partner_id', args_list)
t = self.cr.fetchall()
d = {}
for i in t:
d['Unknown Partner'] = i[0]
history.append(d)
values = {}
if self.direction_selection == 'future':
before = False
if future_past.has_key('Unknown Partner'):
before = [ future_past['Unknown Partner'] ]
self.total_account[6] = self.total_account[6] + (before and before[0] or 0.0)
values['direction'] = before and before[0] or 0.0
elif self.direction_selection == 'past':
after = False
if future_past.has_key('Unknown Partner'):
after = [ future_past['Unknown Partner'] ]
self.total_account[6] = self.total_account[6] + (after and after[0] or 0.0)
values['direction'] = after and after[0] or 0.0
for i in range(5):
during = False
if history[i].has_key('Unknown Partner'):
during = [ history[i]['Unknown Partner'] ]
self.total_account[(i)] = self.total_account[(i)] + (during and during[0] or 0)
values[str(i)] = during and during[0] or 0.0
total = False
if totals.has_key( 'Unknown Partner' ):
total = [ totals['Unknown Partner'] ]
values['total'] = total and total[0] or 0.0
## Add for total
self.total_account[(i+1)] = self.total_account[(i+1)] + (total and total[0] or 0.0)
values['name'] = 'Unknown Partner'
if values['total']:
res.append(values)
total = 0.0
totals = {}
for r in res:
total += float(r['total'] or 0.0)
for i in range(5)+['direction']:
totals.setdefault(str(i), 0.0)
totals[str(i)] += float(r[str(i)] or 0.0)
return res
def _get_total(self,pos):
period = self.total_account[int(pos)]
return period or 0.0
def _get_direction(self,pos):
period = self.total_account[int(pos)]
return period or 0.0
def _get_for_period(self,pos):
period = self.total_account[int(pos)]
return period or 0.0
def _get_partners(self,data):
# TODO: deprecated, to remove in trunk
if data['form']['result_selection'] == 'customer':
return self._translate('Receivable Accounts')
elif data['form']['result_selection'] == 'supplier':
return self._translate('Payable Accounts')
elif data['form']['result_selection'] == 'customer_supplier':
return self._translate('Receivable and Payable Accounts')
return ''
class report_agedpartnerbalance(osv.AbstractModel):
_name = 'report.account.report_agedpartnerbalance'
_inherit = 'report.abstract_report'
_template = 'account.report_agedpartnerbalance'
_wrapped_report_class = aged_trial_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
phense/check_duplicate_files | check_duplicate_files.py | 1 | 21660 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""check_duplicate_files.py
Finds all duplicate files in given directories using a hash-algorithm.
After scanning the filesystem for possible duplicate files (all files with a unique
filesize are dismissed, except for Images when selecting the perceptual hash
algorithm). All possible candidate duplicate files are hashed. With pre-filtering,
this module is extremely fast on large file-sets since only a handful of files
need to actually hbe ashed.
Standard use: python3 check_duplicate_files -i /some/folder ./out.txt
"""
# FEATURE(zyrkon): ignore/include win/linux/mac hidden file
# FEATURE(zyrkon): implement multiprocessor for hashing
# FEATURE(zyrkon): find broken symbolic links
# FEATURE(zyrkon): find empty files and directories
# FEATURE(zyrkon): --size 20M-1G to find files between 20mb and 1gb (example)
# FEATURE(zyrkon): maybe a GUI
__author__ = 'Peter Hense (peter.hense@gmail.com)'
__copyright__ = 'Copyright (c) 2015, Peter Hense'
__license__ = 'Apache License Version 2.0'
__credits__ = '' # ['List', 'of', 'programmers']
__status__ = 'Development' # Prototype / Development / Production
__version__ = '0.8'
import os
import sys
if sys.version_info < (3, 0):
sys.stdout.write("Sorry, requires Python 3.x, not Python 2.x\n")
sys.exit(1)
import codecs
import datetime
import hashlib
import json
import operator
import signal
from argparse import ArgumentParser
from argparse import ArgumentTypeError
from collections import defaultdict
from tqdm import *
from stat import *
try:
from PIL import Image # Pillow (modern PIL fork)
except ImportError:
IMG_LIB_ERROR = True
else:
IMG_LIB_ERROR = False
FILEREADERROR = 255
def generate_hashes(filelist, image_list, hashtype, pHash):
""" Main-Module for handling all File-Hashing and saving the hash-results
Args:
filelist: List of file-paths to REGULAR FILES to run a normal hash-algorithm on
image_list: List of file-paths of images to run a perceptual hash-algorithm on
hashtype: hash-algorithm to use for normal files (default=md5)
pHash: boolean switch to activate perceptual image-hashing
Returns:
d_list_hash: dictionary with lists of files sorted by hash-value (key)
errorlist: list of files that could not be accessed / read
"""
d_list_hash = defaultdict(list)
errorlist = []
for file_path in tqdm(filelist, 'hashing', None, True):
hash = _hash(file_path, hashtype)
if hash != FILEREADERROR:
d_list_hash[hash].append(file_path)
else:
errorlist.append(file_path)
if pHash: # perceptual image hashing
d_list_hash_img = defaultdict(list)
for file_path in tqdm(image_list, 'hashing images:', None, True):
hash = _perceptive_hash(file_path)
if hash != FILEREADERROR:
d_list_hash_img[hash].append(file_path)
else:
errorlist.append(file_path)
# calculate hamming-distance between all image-hashes to find
# outliners (hamming distance of two perceptual hashes < 4 means the images
# are basically the same)
index_list = [key for key in d_list_hash_img]
deleted_index_keys = []
for hash1 in tqdm(index_list, 'calculating', None, True):
if hash1 in deleted_index_keys:
continue
for hash2 in index_list:
if hash1 == hash2:
continue # same entry in list
if hash2 in deleted_index_keys:
continue
if _hamming_distance(hash1, hash2) < 4:
d_list_hash_img[hash1] += d_list_hash_img[hash2]
del d_list_hash_img[hash2]
deleted_index_keys.append(hash2)
# Filter out all unique entries from our resultset
_delete_unique_entries(d_list_hash)
if pHash:
_delete_unique_entries(d_list_hash_img)
d_list_hash.update(d_list_hash_img)
return d_list_hash, errorlist
def _perceptive_hash(file_path, hash_size = 8):
"""Calculates a hash-value from an image
Conversion uses a resized, grayscaled pixel-array of the image, converting
the pixel-array to a number-array (differences between neighboring pixels)
and finally converting these values to a hex-string of length hash_size
Args:
file_path: Path to an Image File
hash_size: Size of the generated hash string
Returns:
hash_string: generated hash string
"""
# if memory consumption is to high for many images, it is posisble to use
# with open (file_path, 'rb') as f:
# image = Image.open(f)
# ...
# del image
try:
image = Image.open(file_path)
except:
return FILEREADERROR
# Grayscale and shrink the image in one step
image = image.convert('L').resize((hash_size + 1, hash_size), Image.ANTIALIAS)
pixels = list(image.getdata())
# Compage adjacent pixels
difference = []
for row in range(hash_size):
for col in range(hash_size):
pixel_left = image.getpixel((col, row))
pixel_right = image.getpixel((col +1, row))
difference.append(pixel_left > pixel_right)
# Convert binary array to hexadecimal string
decimal_value = 0
hex_string = []
for index, value in enumerate(difference):
if value:
decimal_value += 2**(index % 8)
if (index % 8) == 7:
hex_string.append(hex(decimal_value)[2:].rjust(2, '0'))
decimal_value = 0
return ''.join(hex_string)
def _hash(file_path, hashtype):
"""Uses a specified standard hash-algorithm to hash a regular file
Args:
file_path: file_path to a regular file that can be hashed
hashtype: version of hash-algorithm, default = md5
Returns:
hash: hash-string of the hashed file
Raises:
Returns global const FILEREADERROR on IOError
"""
try:
with open(file_path, 'rb') as f:
contents = f.read()
except:
return FILEREADERROR
hasher = getattr(hashlib, hashtype.lower(), hashlib.md5)
return hasher(contents).hexdigest()
def _hamming_distance(string1, string2):
""" Calculates the Hamming Distance of two strings, fast version
Args:
string1, string2: two strings of the same length
Returns:
Integer describing the Hamming Distance of the input strings
"""
assert len(string1) == len(string2)
ne = operator.ne # faster than '!=' and 'str.__ne__'
return sum(map(ne, string1, string2))
def scan_directories(directories, pHash):
""" creates file-lists from given directories
Recursively walks the given directories and their subdirectories, checking
all including files and their file-sizes. These are saved inside a dictionary
and pre-filtered by filesize. Optional separate handling of image-files.
Args:
directories: List of directories to crawl
pHash: boolean switch to active separate handling of image-files
Returns:
prefiltered_files: List of files with their file-paths
images: List of image-files if pHash is set, else an empty list
errorlist: List of files that could not be accessed
"""
extensions = ('.jpg', '.jpeg', '.png', '.bmp')
d_list_filesize = defaultdict(list)
images = []
errorlist = []
count = 0
print('Scanning directories...')
# code could be a lot smaller with `if pHash` inside the innermost loop
# it would also lead to a LOT of unnessary checking
if not pHash: # use normal hash on all files
for root_dir in directories:
for path, subdirList, fileList in os.walk(root_dir):
for fname in fileList:
qualified_filename = os.path.join(path, fname)
try: # denied permission for os.stat
st = os.stat(qualified_filename)
if S_ISREG(st.st_mode):
d_list_filesize[st.st_size].append(qualified_filename)
count += 1
except:
errorlist.append(qualified_filename)
count += 1
else: # split list of normal- and image-files
for root_dir in directories:
for path, subdirList, fileList in os.walk(root_dir):
for fname in fileList:
qualified_filename = os.path.join(path, fname)
if fname.endswith(extensions):
images.append(qualified_filename)
count += 1
else:
try:
st = os.stat(qualified_filename)
if S_ISREG(st.st_mode):
d_list_filesize[st.st_size].append(qualified_filename)
count += 1
except:
errorlist.append(qualified_filename)
count += 1
# Statistic
print('\nFiles found: %s' % count)
# pre-filter all files with unique filesize
# this is where we need the dictionary
_delete_unique_entries(d_list_filesize)
# put all filtered files in a list for easier handling
prefiltered_files = [path for paths in d_list_filesize.values() for path in paths]
# Statistic
print('Possible candidates: %s\n' % (prefiltered_files.__len__() + images.__len__()))
return prefiltered_files, images, errorlist
def _delete_unique_entries(dictionary):
""" removes all Lists from a dictionary that contain a single element
Args:
dictionary a dictionary of type defaultdict(set) or defaultdict(list)
"""
mark_for_delete = []
for key in dictionary:
if dictionary[key].__len__() == 1:
mark_for_delete.append(key)
for i in mark_for_delete:
del dictionary[i]
return
def write_output_text(d_list_hash, errorlist, outfile):
""" Writes result of this module in textform to a file
Args:
d_list_hash: found duplicates in form of a dictionary (key = hash-value)
outfile: the path and filename to write the output into (needs write-permission)
errorlist: list of files that could not be accessed
"""
write_errorlist = []
try:
with codecs.open(outfile, 'w', encoding='utf-8') as f:
f.write('\nThe Following File-Duplicates where found:')
f.write('\n==========================================\n')
for key in d_list_hash:
f.write('Hash: %s\n' %key)
for file_path in d_list_hash[key]:
try:
f.write('%s \n' % os.path.normcase(file_path))
except:
write_errorlist.append(file_path)
f.write('-------------------\n')
if errorlist.__len__() > 0:
f.write('\nThe Following Files could not be accessed:')
f.write('\n==========================================\n')
for error in errorlist:
try:
f.write('%s\n' % os.path.normcase(error))
except:
write_errorlist.append(error)
f.flush()
except: #IOError, UnicodeEncodeError
print('\n- Error - Could not open Output File.\n')
if write_errorlist.__len__() > 0:
print('- Error - These files could not be written to output file:\n')
for write_error in write_errorlist:
print('%s\n' % os.path.normcase(write_error))
print('(Please check your filesystem encoding)\n')
return
def write_output_bash(d_list_hash, outfile, create_link):
""" Writes result of this module as a bash script to a file
Args:
d_list_hash: found duplicates in form of a dictionary (key = hash-value)
outfile: the path and filename to write the output into (needs write-permission)
create_link: boolean switch to select, if a deleted file should be
replaced by a hardlink
"""
write_errorlist = []
try:
with codecs.open(outfile, 'w', encoding='utf-8') as f:
f.write('#!/bin/bash\n\n')
f.write('# This script is machine generated and might do harm to your\n')
f.write('# running system.\n')
f.write('# Please check this script carefully before running\n')
if create_link:
f.write('printf "replacing duplicates with hardlinks..."\n')
else:
f.write('printf "deleting duplicates..."\n')
for key in d_list_hash:
try:
original = os.path.normcase(d_list_hash[key][0])
f.write('# ------------------\n')
f.write('# Original: %s\n' % original)
for copy in d_list_hash[key][1:]:
f.write('rm %s\n' % copy)
if create_link:
f.write('ln %s %s\n' % (original, os.path.normcase(copy)))
except:
write_errorlist.append(file_path)
f.flush()
except: #IOError, UnicodeEncodeError
print('\n- Error - Could not open Output File.\n')
if write_errorlist.__len__() > 0:
print('- Error - These files could not be written to output file:\n')
for write_error in write_errorlist:
print('%s\n' % write_error)
print('(Please check your filesystem encoding)\n')
return
def write_output_win(d_list_hash, outfile, create_link):
""" Writes result of this module as a batch script to a file
Args:
d_list_hash: found duplicates in form of a dictionary (key = hash-value)
outfile: the path and filename to write the output into (needs write-permission)
create_link: boolean switch to select, if a deleted file should be
replaced by a hardlink
"""
write_errorlist = []
try:
with codecs.open(outfile, 'w', encoding='utf-8') as f:
f.write('@ECHO OFF\n\n')
f.write('REM This script is machine generated and might do harm to your\n')
f.write('REM running system.\n')
f.write('REM Please check this script carefully before running\n')
if create_link:
f.write('ECHO "replacing duplicates with hardlinks..."\n')
else:
f.write('ECHO "deleting duplicates..."\n')
for key in d_list_hash:
try:
original = os.path.normcase(d_list_hash[key][0])
f.write('REM ------------------\n')
f.write('REM Original: %s\n' % original)
for copy in d_list_hash[key][1:]:
f.write('DEL %s\n' % copy)
if create_link:
f.write('mklink /H %s %s\n' % (os.path.normcase(copy), original))
except:
write_errorlist.append(file_path)
f.flush()
except: #IOError, UnicodeEncodeError
print('\n- Error - Could not open Output File.\n')
if write_errorlist.__len__() > 0:
print('- Error - These files could not be written to output file:\n')
for write_error in write_errorlist:
print('%s\n' % write_error)
print('(Please check your filesystem encoding)\n')
return
def write_output_json(d_list_hash, outfile):
""" Writes result of this module as JSON to a file
Args:
d_list_hash: found duplicates in form of a dictionary (key = hash-value)
outfile: the path and filename to write the output into (needs write-permission)
"""
try:
with codecs.open(outfile, 'w', encoding='utf-8') as f:
json.dump(d_list_hash, f, ensure_ascii=False, indent=4)
except:
print('\n- Error - Could not write JSON Data to file')
return
def _query_yes_no(question, default="yes"):
"""User Console Interaction for Y/N Questions.
Args:
question: String containing a Question that needs User input
default: select the default answer of the question
"""
valid = {"yes": True, "y": True, "ye": True,
"no": False, "n": False}
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(question + prompt)
choice = input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' "
"(or 'y' or 'n').\n")
def _signal_handler(signal, frame):
sys.exit('Aborting...')
def _readable_dir(prospective_dir):
""" Checks if a given string is a valid path on the file-system
Args:
prospective_dir: file-path as String
Returns:
prospective_dir if checks are passed
Raises:
ArgumentTypeError if checks fail
"""
if not os.path.isdir(prospective_dir):
raise ArgumentTypeError('readable_dir:{0} is not a valid path'.format(prospective_dir))
if os.access(prospective_dir, os.R_OK):
return prospective_dir
else:
raise ArgumentTypeError('readable_dir:{0} is not a readable dir'.format(prospective_dir))
def main():
signal.signal(signal.SIGINT, _signal_handler)
signal.signal(signal.SIGTERM, _signal_handler)
start_time = datetime.datetime.now()
parser = ArgumentParser(description = 'Check Duplicate Files')
parser.add_argument('-i', action = 'append', dest = 'dir',
type = _readable_dir,
help = 'add directory to list for duplicate search'
)
parser.add_argument('--hash', action = 'store', dest = 'hashtype',
default = 'md5',
help = 'select hash-type (md5 (default), sha1, sha224, sha256, sha384, sha512)'
)
parser.add_argument('-p', '--perceptual-hashing', action = 'store_true',
dest = 'pHash', default = False,
help = 'enables perceptual hashing of images'
)
parser.add_argument('-o', '--output-format', action = 'store', dest = 'outformat',
default = 'text',
help = 'select output format (text, json, bash_rm, bash_link, win_del, win_link)'
)
parser.add_argument('outfile', #nargs='?',
help = 'output file for found duplicates'
)
parser.add_argument('--version', action='version',
version='%(prog)s {version}'.format(version=__version__))
args = parser.parse_args()
# disable perceptual hashing (normal hashes on all files) when PIL LIB could
# not be loaded and it is not enabled
pHash = ((not IMG_LIB_ERROR) and args.pHash)
if not pHash:
print('(Perceptual Image Scan disabled)')
# Scan all directories and find duplicates by filesize
prefiltered_filelist, images, read_errors = scan_directories(args.dir, pHash)
# Ask the user if he wants to continue, now that he knows how
# many files need to be hashed. Exclude the query-time from
# execution time
time_query = datetime.datetime.now()
if not _query_yes_no('Do you want to continue?', 'yes'):
sys.exit(0)
timedelta_query = datetime.datetime.now() - time_query # timedelta
# generate the hashed and calculate the execution time
# append possible new read-errors to the general error-list
d_list_hash = defaultdict(list)
d_list_hash, read_errors2 = generate_hashes(prefiltered_filelist, images, args.hashtype, pHash)
read_errors += read_errors2
execution_time = datetime.datetime.now() - start_time # timedelta
execution_time -= timedelta_query # timedelta
# write output
output = ['text', 'json', 'bash_rm', 'bash_link', 'win_del', 'win_link']
if args.outformat in output:
if args.outformat == 'text':
write_output_text(d_list_hash, read_errors, args.outfile)
elif args.outformat == 'json':
write_output_json(d_list_hash, args.outfile)
elif args.outformat == 'bash_rm':
write_output_bash(d_list_hash, args.outfile, False)
elif args.outformat == 'bash_link':
write_output_bash(d_list_hash, args.outfile, True)
elif args.outformat == 'win_del':
write_output_win(d_list_hash, args.outfile, False)
elif args.outformat == 'win_link':
write_output_win(d_list_hash, args.outfile, True)
else:
write_output_text(d_list_hash, read_errors, args.outfile)
print('\nExecution Time: %s.%s seconds' % (execution_time.seconds,
execution_time.microseconds))
# done
sys.exit(0)
if __name__ == '__main__':
main() | apache-2.0 |
rds0751/colinkers | env/Lib/site-packages/wheel/util.py | 345 | 4890 | """Utility functions."""
import sys
import os
import base64
import json
import hashlib
try:
from collections import OrderedDict
except ImportError:
OrderedDict = dict
__all__ = ['urlsafe_b64encode', 'urlsafe_b64decode', 'utf8',
'to_json', 'from_json', 'matches_requirement']
def urlsafe_b64encode(data):
"""urlsafe_b64encode without padding"""
return base64.urlsafe_b64encode(data).rstrip(binary('='))
def urlsafe_b64decode(data):
"""urlsafe_b64decode without padding"""
pad = b'=' * (4 - (len(data) & 3))
return base64.urlsafe_b64decode(data + pad)
def to_json(o):
'''Convert given data to JSON.'''
return json.dumps(o, sort_keys=True)
def from_json(j):
'''Decode a JSON payload.'''
return json.loads(j)
def open_for_csv(name, mode):
if sys.version_info[0] < 3:
nl = {}
bin = 'b'
else:
nl = { 'newline': '' }
bin = ''
return open(name, mode + bin, **nl)
try:
unicode
def utf8(data):
'''Utf-8 encode data.'''
if isinstance(data, unicode):
return data.encode('utf-8')
return data
except NameError:
def utf8(data):
'''Utf-8 encode data.'''
if isinstance(data, str):
return data.encode('utf-8')
return data
try:
# For encoding ascii back and forth between bytestrings, as is repeatedly
# necessary in JSON-based crypto under Python 3
unicode
def native(s):
return s
def binary(s):
if isinstance(s, unicode):
return s.encode('ascii')
return s
except NameError:
def native(s):
if isinstance(s, bytes):
return s.decode('ascii')
return s
def binary(s):
if isinstance(s, str):
return s.encode('ascii')
class HashingFile(object):
def __init__(self, fd, hashtype='sha256'):
self.fd = fd
self.hashtype = hashtype
self.hash = hashlib.new(hashtype)
self.length = 0
def write(self, data):
self.hash.update(data)
self.length += len(data)
self.fd.write(data)
def close(self):
self.fd.close()
def digest(self):
if self.hashtype == 'md5':
return self.hash.hexdigest()
digest = self.hash.digest()
return self.hashtype + '=' + native(urlsafe_b64encode(digest))
class OrderedDefaultDict(OrderedDict):
def __init__(self, *args, **kwargs):
if not args:
self.default_factory = None
else:
if not (args[0] is None or callable(args[0])):
raise TypeError('first argument must be callable or None')
self.default_factory = args[0]
args = args[1:]
super(OrderedDefaultDict, self).__init__(*args, **kwargs)
def __missing__ (self, key):
if self.default_factory is None:
raise KeyError(key)
self[key] = default = self.default_factory()
return default
if sys.platform == 'win32':
import ctypes.wintypes
# CSIDL_APPDATA for reference - not used here for compatibility with
# dirspec, which uses LOCAL_APPDATA and COMMON_APPDATA in that order
csidl = dict(CSIDL_APPDATA=26, CSIDL_LOCAL_APPDATA=28,
CSIDL_COMMON_APPDATA=35)
def get_path(name):
SHGFP_TYPE_CURRENT = 0
buf = ctypes.create_unicode_buffer(ctypes.wintypes.MAX_PATH)
ctypes.windll.shell32.SHGetFolderPathW(0, csidl[name], 0, SHGFP_TYPE_CURRENT, buf)
return buf.value
def save_config_path(*resource):
appdata = get_path("CSIDL_LOCAL_APPDATA")
path = os.path.join(appdata, *resource)
if not os.path.isdir(path):
os.makedirs(path)
return path
def load_config_paths(*resource):
ids = ["CSIDL_LOCAL_APPDATA", "CSIDL_COMMON_APPDATA"]
for id in ids:
base = get_path(id)
path = os.path.join(base, *resource)
if os.path.exists(path):
yield path
else:
def save_config_path(*resource):
import xdg.BaseDirectory
return xdg.BaseDirectory.save_config_path(*resource)
def load_config_paths(*resource):
import xdg.BaseDirectory
return xdg.BaseDirectory.load_config_paths(*resource)
def matches_requirement(req, wheels):
"""List of wheels matching a requirement.
:param req: The requirement to satisfy
:param wheels: List of wheels to search.
"""
try:
from pkg_resources import Distribution, Requirement
except ImportError:
raise RuntimeError("Cannot use requirements without pkg_resources")
req = Requirement.parse(req)
selected = []
for wf in wheels:
f = wf.parsed_filename
dist = Distribution(project_name=f.group("name"), version=f.group("ver"))
if dist in req:
selected.append(wf)
return selected
| agpl-3.0 |
BizzCloud/PosBox | addons/website_customer/__openerp__.py | 52 | 1511 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013-Today OpenERP S.A. (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Customer References',
'category': 'Website',
'summary': 'Publish Your Customer References',
'version': '1.0',
'description': """
OpenERP Customer References
===========================
""",
'author': 'OpenERP SA',
'depends': [
'crm_partner_assign',
'website_partner',
'website_google_map',
],
'demo': [
'website_customer_demo.xml',
],
'data': [
'views/website_customer.xml',
],
'qweb': [],
'installable': True,
}
| agpl-3.0 |
yg257/Pangea | lib/boto-2.34.0/tests/unit/vpc/test_vpngateway.py | 114 | 8709 | # -*- coding: UTF-8 -*-
from tests.compat import OrderedDict
from tests.unit import unittest
from tests.unit import AWSMockServiceTestCase
from boto.vpc import VPCConnection, VpnGateway, Attachment
class TestDescribeVpnGateways(AWSMockServiceTestCase):
connection_class = VPCConnection
def default_body(self):
return b"""
<DescribeVpnGatewaysResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-01/">
<requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId>
<vpnGatewaySet>
<item>
<vpnGatewayId>vgw-8db04f81</vpnGatewayId>
<state>available</state>
<type>ipsec.1</type>
<availabilityZone>us-east-1a</availabilityZone>
<attachments>
<item>
<vpcId>vpc-1a2b3c4d</vpcId>
<state>attached</state>
</item>
</attachments>
<tagSet/>
</item>
</vpnGatewaySet>
</DescribeVpnGatewaysResponse>
"""
def test_get_all_vpn_gateways(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.get_all_vpn_gateways(
'vgw-8db04f81', filters=OrderedDict([('state', ['pending', 'available']),
('availability-zone', 'us-east-1a')]))
self.assert_request_parameters({
'Action': 'DescribeVpnGateways',
'VpnGatewayId.1': 'vgw-8db04f81',
'Filter.1.Name': 'state',
'Filter.1.Value.1': 'pending',
'Filter.1.Value.2': 'available',
'Filter.2.Name': 'availability-zone',
'Filter.2.Value.1': 'us-east-1a'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEqual(len(api_response), 1)
self.assertIsInstance(api_response[0], VpnGateway)
self.assertEqual(api_response[0].id, 'vgw-8db04f81')
class TestCreateVpnGateway(AWSMockServiceTestCase):
connection_class = VPCConnection
def default_body(self):
return b"""
<CreateVpnGatewayResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-01/">
<requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId>
<vpnGateway>
<vpnGatewayId>vgw-8db04f81</vpnGatewayId>
<state>pending</state>
<type>ipsec.1</type>
<availabilityZone>us-east-1a</availabilityZone>
<attachments/>
<tagSet/>
</vpnGateway>
</CreateVpnGatewayResponse>
"""
def test_delete_vpn_gateway(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.create_vpn_gateway('ipsec.1', 'us-east-1a')
self.assert_request_parameters({
'Action': 'CreateVpnGateway',
'AvailabilityZone': 'us-east-1a',
'Type': 'ipsec.1'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertIsInstance(api_response, VpnGateway)
self.assertEquals(api_response.id, 'vgw-8db04f81')
class TestDeleteVpnGateway(AWSMockServiceTestCase):
connection_class = VPCConnection
def default_body(self):
return b"""
<DeleteVpnGatewayResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-01/">
<requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId>
<return>true</return>
</DeleteVpnGatewayResponse>
"""
def test_delete_vpn_gateway(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.delete_vpn_gateway('vgw-8db04f81')
self.assert_request_parameters({
'Action': 'DeleteVpnGateway',
'VpnGatewayId': 'vgw-8db04f81'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEqual(api_response, True)
class TestAttachVpnGateway(AWSMockServiceTestCase):
connection_class = VPCConnection
def default_body(self):
return b"""
<AttachVpnGatewayResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-01/">
<requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId>
<attachment>
<vpcId>vpc-1a2b3c4d</vpcId>
<state>attaching</state>
</attachment>
</AttachVpnGatewayResponse>
"""
def test_attach_vpn_gateway(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.attach_vpn_gateway('vgw-8db04f81', 'vpc-1a2b3c4d')
self.assert_request_parameters({
'Action': 'AttachVpnGateway',
'VpnGatewayId': 'vgw-8db04f81',
'VpcId': 'vpc-1a2b3c4d'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertIsInstance(api_response, Attachment)
self.assertEquals(api_response.vpc_id, 'vpc-1a2b3c4d')
self.assertEquals(api_response.state, 'attaching')
class TestDetachVpnGateway(AWSMockServiceTestCase):
connection_class = VPCConnection
def default_body(self):
return b"""
<DetachVpnGatewayResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-01/">
<requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId>
<return>true</return>
</DetachVpnGatewayResponse>
"""
def test_detach_vpn_gateway(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.detach_vpn_gateway('vgw-8db04f81', 'vpc-1a2b3c4d')
self.assert_request_parameters({
'Action': 'DetachVpnGateway',
'VpnGatewayId': 'vgw-8db04f81',
'VpcId': 'vpc-1a2b3c4d'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEqual(api_response, True)
class TestDisableVgwRoutePropagation(AWSMockServiceTestCase):
connection_class = VPCConnection
def default_body(self):
return b"""
<DisableVgwRoutePropagationResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-01/">
<requestId>4f35a1b2-c2c3-4093-b51f-abb9d7311990</requestId>
<return>true</return>
</DisableVgwRoutePropagationResponse>
"""
def test_disable_vgw_route_propagation(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.disable_vgw_route_propagation(
'rtb-c98a35a0', 'vgw-d8e09e8a')
self.assert_request_parameters({
'Action': 'DisableVgwRoutePropagation',
'GatewayId': 'vgw-d8e09e8a',
'RouteTableId': 'rtb-c98a35a0'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEqual(api_response, True)
class TestEnableVgwRoutePropagation(AWSMockServiceTestCase):
connection_class = VPCConnection
def default_body(self):
return b"""
<DisableVgwRoutePropagationResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-01/">
<requestId>4f35a1b2-c2c3-4093-b51f-abb9d7311990</requestId>
<return>true</return>
</DisableVgwRoutePropagationResponse>
"""
def test_enable_vgw_route_propagation(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.enable_vgw_route_propagation(
'rtb-c98a35a0', 'vgw-d8e09e8a')
self.assert_request_parameters({
'Action': 'EnableVgwRoutePropagation',
'GatewayId': 'vgw-d8e09e8a',
'RouteTableId': 'rtb-c98a35a0'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEqual(api_response, True)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
petrpulc/git-cmp | checkers/references.py | 1 | 1113 | """
Reference level checker (existence of given references or all refs/heads ans refs/tags).
"""
from common import Common
from utils import check_diff
def __filter(reference_list):
return set(reference for reference in
reference_list if reference.split('/')[1] in ('heads', 'tags'))
def check():
"""
Run the checker on references.
"""
print("=== References")
if Common.args.references is None:
o_refs = __filter(Common.original.listall_references())
n_refs = __filter(Common.new.listall_references())
check_diff(o_refs, n_refs, "References", 2)
else:
o_refs = set()
for reference in Common.args.references:
if reference not in Common.original.listall_references():
print(" {} does not exist, please report".format(reference))
exit(1)
if reference not in Common.new.listall_references():
print(" {} expected, but not found".format(reference))
exit(1)
o_refs.add(reference)
print(" OK")
Common.references = o_refs
| mit |
kubeflow/kfp-tekton | backend/api/python_http_client/kfp_server_api/__init__.py | 1 | 3915 | # coding: utf-8
# flake8: noqa
"""
Kubeflow Pipelines API
This file contains REST API specification for Kubeflow Pipelines. The file is autogenerated from the swagger definition.
Contact: kubeflow-pipelines@google.com
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
__version__ = "1.5.0"
# import apis into sdk package
from kfp_server_api.api.experiment_service_api import ExperimentServiceApi
from kfp_server_api.api.healthz_service_api import HealthzServiceApi
from kfp_server_api.api.job_service_api import JobServiceApi
from kfp_server_api.api.pipeline_service_api import PipelineServiceApi
from kfp_server_api.api.pipeline_upload_service_api import PipelineUploadServiceApi
from kfp_server_api.api.run_service_api import RunServiceApi
# import ApiClient
from kfp_server_api.api_client import ApiClient
from kfp_server_api.configuration import Configuration
from kfp_server_api.exceptions import OpenApiException
from kfp_server_api.exceptions import ApiTypeError
from kfp_server_api.exceptions import ApiValueError
from kfp_server_api.exceptions import ApiKeyError
from kfp_server_api.exceptions import ApiException
# import models into sdk package
from kfp_server_api.models.api_cron_schedule import ApiCronSchedule
from kfp_server_api.models.api_experiment import ApiExperiment
from kfp_server_api.models.api_experiment_storage_state import ApiExperimentStorageState
from kfp_server_api.models.api_get_healthz_response import ApiGetHealthzResponse
from kfp_server_api.models.api_get_template_response import ApiGetTemplateResponse
from kfp_server_api.models.api_job import ApiJob
from kfp_server_api.models.api_list_experiments_response import ApiListExperimentsResponse
from kfp_server_api.models.api_list_jobs_response import ApiListJobsResponse
from kfp_server_api.models.api_list_pipeline_versions_response import ApiListPipelineVersionsResponse
from kfp_server_api.models.api_list_pipelines_response import ApiListPipelinesResponse
from kfp_server_api.models.api_list_runs_response import ApiListRunsResponse
from kfp_server_api.models.api_parameter import ApiParameter
from kfp_server_api.models.api_periodic_schedule import ApiPeriodicSchedule
from kfp_server_api.models.api_pipeline import ApiPipeline
from kfp_server_api.models.api_pipeline_runtime import ApiPipelineRuntime
from kfp_server_api.models.api_pipeline_spec import ApiPipelineSpec
from kfp_server_api.models.api_pipeline_version import ApiPipelineVersion
from kfp_server_api.models.api_read_artifact_response import ApiReadArtifactResponse
from kfp_server_api.models.api_relationship import ApiRelationship
from kfp_server_api.models.api_report_run_metrics_request import ApiReportRunMetricsRequest
from kfp_server_api.models.api_report_run_metrics_response import ApiReportRunMetricsResponse
from kfp_server_api.models.api_resource_key import ApiResourceKey
from kfp_server_api.models.api_resource_reference import ApiResourceReference
from kfp_server_api.models.api_resource_type import ApiResourceType
from kfp_server_api.models.api_run import ApiRun
from kfp_server_api.models.api_run_detail import ApiRunDetail
from kfp_server_api.models.api_run_metric import ApiRunMetric
from kfp_server_api.models.api_run_storage_state import ApiRunStorageState
from kfp_server_api.models.api_status import ApiStatus
from kfp_server_api.models.api_trigger import ApiTrigger
from kfp_server_api.models.api_url import ApiUrl
from kfp_server_api.models.job_mode import JobMode
from kfp_server_api.models.protobuf_any import ProtobufAny
from kfp_server_api.models.report_run_metrics_response_report_run_metric_result import ReportRunMetricsResponseReportRunMetricResult
from kfp_server_api.models.report_run_metrics_response_report_run_metric_result_status import ReportRunMetricsResponseReportRunMetricResultStatus
from kfp_server_api.models.run_metric_format import RunMetricFormat
| apache-2.0 |
dsquareindia/scikit-learn | examples/model_selection/plot_confusion_matrix.py | 63 | 3231 | """
================
Confusion matrix
================
Example of confusion matrix usage to evaluate the quality
of the output of a classifier on the iris data set. The
diagonal elements represent the number of points for which
the predicted label is equal to the true label, while
off-diagonal elements are those that are mislabeled by the
classifier. The higher the diagonal values of the confusion
matrix the better, indicating many correct predictions.
The figures show the confusion matrix with and without
normalization by class support size (number of elements
in each class). This kind of normalization can be
interesting in case of class imbalance to have a more
visual interpretation of which class is being misclassified.
Here the results are not as good as they could be as our
choice for the regularization parameter C was not the best.
In real life applications this parameter is usually chosen
using :ref:`grid_search`.
"""
print(__doc__)
import itertools
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
# import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
class_names = iris.target_names
# Split the data into a training set and a test set
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# Run classifier, using a model that is too regularized (C too low) to see
# the impact on the results
classifier = svm.SVC(kernel='linear', C=0.01)
y_pred = classifier.fit(X_train, y_train).predict(X_test)
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
# Compute confusion matrix
cnf_matrix = confusion_matrix(y_test, y_pred)
np.set_printoptions(precision=2)
# Plot non-normalized confusion matrix
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=class_names,
title='Confusion matrix, without normalization')
# Plot normalized confusion matrix
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=class_names, normalize=True,
title='Normalized confusion matrix')
plt.show()
| bsd-3-clause |
puzan/ansible | lib/ansible/modules/network/panos/panos_check.py | 25 | 4104 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Ansible module to manage PaloAltoNetworks Firewall
# (c) 2016, techbizdev <techbizdev@paloaltonetworks.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: panos_check
short_description: check if PAN-OS device is ready for configuration
description:
- Check if PAN-OS device is ready for being configured (no pending jobs).
- The check could be done once or multiple times until the device is ready.
author: "Luigi Mori (@jtschichold), Ivan Bojer (@ivanbojer)"
version_added: "2.3"
requirements:
- pan-python
options:
ip_address:
description:
- IP address (or hostname) of PAN-OS device
required: true
password:
description:
- password for authentication
required: true
username:
description:
- username for authentication
required: false
default: "admin"
timeout:
description:
- timeout of API calls
required: false
default: "0"
interval:
description:
- time waited between checks
required: false
default: "0"
'''
EXAMPLES = '''
# single check on 192.168.1.1 with credentials admin/admin
- name: check if ready
panos_check:
ip_address: "192.168.1.1"
password: "admin"
# check for 10 times, every 30 seconds, if device 192.168.1.1
# is ready, using credentials admin/admin
- name: wait for reboot
panos_check:
ip_address: "192.168.1.1"
password: "admin"
register: result
until: not result|failed
retries: 10
delay: 30
'''
RETURN='''
# Default return values
'''
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
from ansible.module_utils.basic import AnsibleModule
import time
try:
import pan.xapi
HAS_LIB = True
except ImportError:
HAS_LIB = False
def check_jobs(jobs, module):
job_check = False
for j in jobs:
status = j.find('.//status')
if status is None:
return False
if status.text != 'FIN':
return False
job_check = True
return job_check
def main():
argument_spec = dict(
ip_address=dict(required=True),
password=dict(required=True, no_log=True),
username=dict(default='admin'),
timeout=dict(default=0, type='int'),
interval=dict(default=0, type='int')
)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
if not HAS_LIB:
module.fail_json(msg='pan-python is required for this module')
ip_address = module.params["ip_address"]
password = module.params["password"]
username = module.params['username']
timeout = module.params['timeout']
interval = module.params['interval']
xapi = pan.xapi.PanXapi(
hostname=ip_address,
api_username=username,
api_password=password,
timeout=60
)
checkpnt = time.time()+timeout
while True:
try:
xapi.op(cmd="show jobs all", cmd_xml=True)
except:
pass
else:
jobs = xapi.element_root.findall('.//job')
if check_jobs(jobs, module):
module.exit_json(changed=True, msg="okey dokey")
if time.time() > checkpnt:
break
time.sleep(interval)
module.fail_json(msg="Timeout")
if __name__ == '__main__':
main()
| gpl-3.0 |
Allow2CEO/browser-ios | brave/node_modules/ad-block/vendor/depot_tools/testing_support/git_test_utils.py | 26 | 15715 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import atexit
import collections
import copy
import datetime
import hashlib
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
def git_hash_data(data, typ='blob'):
"""Calculate the git-style SHA1 for some data.
Only supports 'blob' type data at the moment.
"""
assert typ == 'blob', 'Only support blobs for now'
return hashlib.sha1('blob %s\0%s' % (len(data), data)).hexdigest()
class OrderedSet(collections.MutableSet):
# from http://code.activestate.com/recipes/576694/
def __init__(self, iterable=None):
self.end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.data = {} # key --> [key, prev, next]
if iterable is not None:
self |= iterable
def __contains__(self, key):
return key in self.data
def __eq__(self, other):
if isinstance(other, OrderedSet):
return len(self) == len(other) and list(self) == list(other)
return set(self) == set(other)
def __ne__(self, other):
if isinstance(other, OrderedSet):
return len(self) != len(other) or list(self) != list(other)
return set(self) != set(other)
def __len__(self):
return len(self.data)
def __iter__(self):
end = self.end
curr = end[2]
while curr is not end:
yield curr[0]
curr = curr[2]
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, list(self))
def __reversed__(self):
end = self.end
curr = end[1]
while curr is not end:
yield curr[0]
curr = curr[1]
def add(self, key):
if key not in self.data:
end = self.end
curr = end[1]
curr[2] = end[1] = self.data[key] = [key, curr, end]
def difference_update(self, *others):
for other in others:
for i in other:
self.discard(i)
def discard(self, key):
if key in self.data:
key, prev, nxt = self.data.pop(key)
prev[2] = nxt
nxt[1] = prev
def pop(self, last=True): # pylint: disable=W0221
if not self:
raise KeyError('set is empty')
key = self.end[1][0] if last else self.end[2][0]
self.discard(key)
return key
class GitRepoSchema(object):
"""A declarative git testing repo.
Pass a schema to __init__ in the form of:
A B C D
B E D
This is the repo
A - B - C - D
\ E /
Whitespace doesn't matter. Each line is a declaration of which commits come
before which other commits.
Every commit gets a tag 'tag_%(commit)s'
Every unique terminal commit gets a branch 'branch_%(commit)s'
Last commit in First line is the branch 'master'
Root commits get a ref 'root_%(commit)s'
Timestamps are in topo order, earlier commits (as indicated by their presence
in the schema) get earlier timestamps. Stamps start at the Unix Epoch, and
increment by 1 day each.
"""
COMMIT = collections.namedtuple('COMMIT', 'name parents is_branch is_root')
def __init__(self, repo_schema='',
content_fn=lambda v: {v: {'data': v}}):
"""Builds a new GitRepoSchema.
Args:
repo_schema (str) - Initial schema for this repo. See class docstring for
info on the schema format.
content_fn ((commit_name) -> commit_data) - A function which will be
lazily called to obtain data for each commit. The results of this
function are cached (i.e. it will never be called twice for the same
commit_name). See the docstring on the GitRepo class for the format of
the data returned by this function.
"""
self.master = None
self.par_map = {}
self.data_cache = {}
self.content_fn = content_fn
self.add_commits(repo_schema)
def walk(self):
"""(Generator) Walks the repo schema from roots to tips.
Generates GitRepoSchema.COMMIT objects for each commit.
Throws an AssertionError if it detects a cycle.
"""
is_root = True
par_map = copy.deepcopy(self.par_map)
while par_map:
empty_keys = set(k for k, v in par_map.iteritems() if not v)
assert empty_keys, 'Cycle detected! %s' % par_map
for k in sorted(empty_keys):
yield self.COMMIT(k, self.par_map[k],
not any(k in v for v in self.par_map.itervalues()),
is_root)
del par_map[k]
for v in par_map.itervalues():
v.difference_update(empty_keys)
is_root = False
def add_partial(self, commit, parent=None):
if commit not in self.par_map:
self.par_map[commit] = OrderedSet()
if parent is not None:
self.par_map[commit].add(parent)
def add_commits(self, schema):
"""Adds more commits from a schema into the existing Schema.
Args:
schema (str) - See class docstring for info on schema format.
Throws an AssertionError if it detects a cycle.
"""
for commits in (l.split() for l in schema.splitlines() if l.strip()):
parent = None
for commit in commits:
self.add_partial(commit, parent)
parent = commit
if parent and not self.master:
self.master = parent
for _ in self.walk(): # This will throw if there are any cycles.
pass
def reify(self):
"""Returns a real GitRepo for this GitRepoSchema"""
return GitRepo(self)
def data_for(self, commit):
"""Obtains the data for |commit|.
See the docstring on the GitRepo class for the format of the returned data.
Caches the result on this GitRepoSchema instance.
"""
if commit not in self.data_cache:
self.data_cache[commit] = self.content_fn(commit)
return self.data_cache[commit]
def simple_graph(self):
"""Returns a dictionary of {commit_subject: {parent commit_subjects}}
This allows you to get a very simple connection graph over the whole repo
for comparison purposes. Only commit subjects (not ids, not content/data)
are considered
"""
ret = {}
for commit in self.walk():
ret.setdefault(commit.name, set()).update(commit.parents)
return ret
class GitRepo(object):
"""Creates a real git repo for a GitRepoSchema.
Obtains schema and content information from the GitRepoSchema.
The format for the commit data supplied by GitRepoSchema.data_for is:
{
SPECIAL_KEY: special_value,
...
"path/to/some/file": { 'data': "some data content for this file",
'mode': 0755 },
...
}
The SPECIAL_KEYs are the following attribues of the GitRepo class:
* AUTHOR_NAME
* AUTHOR_EMAIL
* AUTHOR_DATE - must be a datetime.datetime instance
* COMMITTER_NAME
* COMMITTER_EMAIL
* COMMITTER_DATE - must be a datetime.datetime instance
For file content, if 'data' is None, then this commit will `git rm` that file.
"""
BASE_TEMP_DIR = tempfile.mkdtemp(suffix='base', prefix='git_repo')
atexit.register(shutil.rmtree, BASE_TEMP_DIR)
# Singleton objects to specify specific data in a commit dictionary.
AUTHOR_NAME = object()
AUTHOR_EMAIL = object()
AUTHOR_DATE = object()
COMMITTER_NAME = object()
COMMITTER_EMAIL = object()
COMMITTER_DATE = object()
DEFAULT_AUTHOR_NAME = 'Author McAuthorly'
DEFAULT_AUTHOR_EMAIL = 'author@example.com'
DEFAULT_COMMITTER_NAME = 'Charles Committish'
DEFAULT_COMMITTER_EMAIL = 'commitish@example.com'
COMMAND_OUTPUT = collections.namedtuple('COMMAND_OUTPUT', 'retcode stdout')
def __init__(self, schema):
"""Makes new GitRepo.
Automatically creates a temp folder under GitRepo.BASE_TEMP_DIR. It's
recommended that you clean this repo up by calling nuke() on it, but if not,
GitRepo will automatically clean up all allocated repos at the exit of the
program (assuming a normal exit like with sys.exit)
Args:
schema - An instance of GitRepoSchema
"""
self.repo_path = tempfile.mkdtemp(dir=self.BASE_TEMP_DIR)
self.commit_map = {}
self._date = datetime.datetime(1970, 1, 1)
self.to_schema_refs = ['--branches']
self.git('init')
self.git('config', 'user.name', 'testcase')
self.git('config', 'user.email', 'testcase@example.com')
for commit in schema.walk():
self._add_schema_commit(commit, schema.data_for(commit.name))
self.last_commit = self[commit.name]
if schema.master:
self.git('update-ref', 'refs/heads/master', self[schema.master])
def __getitem__(self, commit_name):
"""Gets the hash of a commit by its schema name.
>>> r = GitRepo(GitRepoSchema('A B C'))
>>> r['B']
'7381febe1da03b09da47f009963ab7998a974935'
"""
return self.commit_map[commit_name]
def _add_schema_commit(self, commit, commit_data):
commit_data = commit_data or {}
if commit.parents:
parents = list(commit.parents)
self.git('checkout', '--detach', '-q', self[parents[0]])
if len(parents) > 1:
self.git('merge', '--no-commit', '-q', *[self[x] for x in parents[1:]])
else:
self.git('checkout', '--orphan', 'root_%s' % commit.name)
self.git('rm', '-rf', '.')
env = self.get_git_commit_env(commit_data)
for fname, file_data in commit_data.iteritems():
deleted = False
if 'data' in file_data:
data = file_data.get('data')
if data is None:
deleted = True
self.git('rm', fname)
else:
path = os.path.join(self.repo_path, fname)
pardir = os.path.dirname(path)
if not os.path.exists(pardir):
os.makedirs(pardir)
with open(path, 'wb') as f:
f.write(data)
mode = file_data.get('mode')
if mode and not deleted:
os.chmod(path, mode)
self.git('add', fname)
rslt = self.git('commit', '--allow-empty', '-m', commit.name, env=env)
assert rslt.retcode == 0, 'Failed to commit %s' % str(commit)
self.commit_map[commit.name] = self.git('rev-parse', 'HEAD').stdout.strip()
self.git('tag', 'tag_%s' % commit.name, self[commit.name])
if commit.is_branch:
self.git('branch', '-f', 'branch_%s' % commit.name, self[commit.name])
def get_git_commit_env(self, commit_data=None):
commit_data = commit_data or {}
env = {}
for prefix in ('AUTHOR', 'COMMITTER'):
for suffix in ('NAME', 'EMAIL', 'DATE'):
singleton = '%s_%s' % (prefix, suffix)
key = getattr(self, singleton)
if key in commit_data:
val = commit_data[key]
else:
if suffix == 'DATE':
val = self._date
self._date += datetime.timedelta(days=1)
else:
val = getattr(self, 'DEFAULT_%s' % singleton)
env['GIT_%s' % singleton] = str(val)
return env
def git(self, *args, **kwargs):
"""Runs a git command specified by |args| in this repo."""
assert self.repo_path is not None
try:
with open(os.devnull, 'wb') as devnull:
output = subprocess.check_output(
('git',) + args, cwd=self.repo_path, stderr=devnull, **kwargs)
return self.COMMAND_OUTPUT(0, output)
except subprocess.CalledProcessError as e:
return self.COMMAND_OUTPUT(e.returncode, e.output)
def git_commit(self, message):
return self.git('commit', '-am', message, env=self.get_git_commit_env())
def nuke(self):
"""Obliterates the git repo on disk.
Causes this GitRepo to be unusable.
"""
shutil.rmtree(self.repo_path)
self.repo_path = None
def run(self, fn, *args, **kwargs):
"""Run a python function with the given args and kwargs with the cwd set to
the git repo."""
assert self.repo_path is not None
curdir = os.getcwd()
try:
os.chdir(self.repo_path)
return fn(*args, **kwargs)
finally:
os.chdir(curdir)
def capture_stdio(self, fn, *args, **kwargs):
"""Run a python function with the given args and kwargs with the cwd set to
the git repo.
Returns the (stdout, stderr) of whatever ran, instead of the what |fn|
returned.
"""
stdout = sys.stdout
stderr = sys.stderr
try:
# "multiple statements on a line" pylint: disable=C0321
with tempfile.TemporaryFile() as out, tempfile.TemporaryFile() as err:
sys.stdout = out
sys.stderr = err
try:
self.run(fn, *args, **kwargs)
except SystemExit:
pass
out.seek(0)
err.seek(0)
return out.read(), err.read()
finally:
sys.stdout = stdout
sys.stderr = stderr
def open(self, path, mode='rb'):
return open(os.path.join(self.repo_path, path), mode)
def to_schema(self):
lines = self.git('rev-list', '--parents', '--reverse', '--topo-order',
'--format=%s', *self.to_schema_refs).stdout.splitlines()
hash_to_msg = {}
ret = GitRepoSchema()
current = None
parents = []
for line in lines:
if line.startswith('commit'):
assert current is None
tokens = line.split()
current, parents = tokens[1], tokens[2:]
assert all(p in hash_to_msg for p in parents)
else:
assert current is not None
hash_to_msg[current] = line
ret.add_partial(line)
for parent in parents:
ret.add_partial(line, hash_to_msg[parent])
current = None
parents = []
assert current is None
return ret
class GitRepoSchemaTestBase(unittest.TestCase):
"""A TestCase with a built-in GitRepoSchema.
Expects a class variable REPO_SCHEMA to be a GitRepoSchema string in the form
described by that class.
You may also set class variables in the form COMMIT_%(commit_name)s, which
provide the content for the given commit_name commits.
You probably will end up using either GitRepoReadOnlyTestBase or
GitRepoReadWriteTestBase for real tests.
"""
REPO_SCHEMA = None
@classmethod
def getRepoContent(cls, commit):
return getattr(cls, 'COMMIT_%s' % commit, None)
@classmethod
def setUpClass(cls):
super(GitRepoSchemaTestBase, cls).setUpClass()
assert cls.REPO_SCHEMA is not None
cls.r_schema = GitRepoSchema(cls.REPO_SCHEMA, cls.getRepoContent)
class GitRepoReadOnlyTestBase(GitRepoSchemaTestBase):
"""Injects a GitRepo object given the schema and content from
GitRepoSchemaTestBase into TestCase classes which subclass this.
This GitRepo will appear as self.repo, and will be deleted and recreated once
for the duration of all the tests in the subclass.
"""
REPO_SCHEMA = None
@classmethod
def setUpClass(cls):
super(GitRepoReadOnlyTestBase, cls).setUpClass()
assert cls.REPO_SCHEMA is not None
cls.repo = cls.r_schema.reify()
def setUp(self):
self.repo.git('checkout', '-f', self.repo.last_commit)
@classmethod
def tearDownClass(cls):
cls.repo.nuke()
super(GitRepoReadOnlyTestBase, cls).tearDownClass()
class GitRepoReadWriteTestBase(GitRepoSchemaTestBase):
"""Injects a GitRepo object given the schema and content from
GitRepoSchemaTestBase into TestCase classes which subclass this.
This GitRepo will appear as self.repo, and will be deleted and recreated for
each test function in the subclass.
"""
REPO_SCHEMA = None
def setUp(self):
super(GitRepoReadWriteTestBase, self).setUp()
self.repo = self.r_schema.reify()
def tearDown(self):
self.repo.nuke()
super(GitRepoReadWriteTestBase, self).tearDown()
def assertSchema(self, schema_string):
self.assertEqual(GitRepoSchema(schema_string).simple_graph(),
self.repo.to_schema().simple_graph())
| mpl-2.0 |
vegetableman/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/performance_tests/perftestsrunner_unittest.py | 119 | 12170 | # Copyright (C) 2012 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit tests for run_perf_tests."""
import StringIO
import json
import re
import unittest2 as unittest
from webkitpy.common.host_mock import MockHost
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.port.test import TestPort
from webkitpy.performance_tests.perftest import DEFAULT_TEST_RUNNER_COUNT
from webkitpy.performance_tests.perftestsrunner import PerfTestsRunner
class MainTest(unittest.TestCase):
def create_runner(self, args=[]):
options, parsed_args = PerfTestsRunner._parse_args(args)
test_port = TestPort(host=MockHost(), options=options)
runner = PerfTestsRunner(args=args, port=test_port)
runner._host.filesystem.maybe_make_directory(runner._base_path, 'inspector')
runner._host.filesystem.maybe_make_directory(runner._base_path, 'Bindings')
runner._host.filesystem.maybe_make_directory(runner._base_path, 'Parser')
return runner, test_port
def _add_file(self, runner, dirname, filename, content=True):
dirname = runner._host.filesystem.join(runner._base_path, dirname) if dirname else runner._base_path
runner._host.filesystem.maybe_make_directory(dirname)
runner._host.filesystem.files[runner._host.filesystem.join(dirname, filename)] = content
def test_collect_tests(self):
runner, port = self.create_runner()
self._add_file(runner, 'inspector', 'a_file.html', 'a content')
tests = runner._collect_tests()
self.assertEqual(len(tests), 1)
def _collect_tests_and_sort_test_name(self, runner):
return sorted([test.test_name() for test in runner._collect_tests()])
def test_collect_tests_with_multile_files(self):
runner, port = self.create_runner(args=['PerformanceTests/test1.html', 'test2.html'])
def add_file(filename):
port.host.filesystem.files[runner._host.filesystem.join(runner._base_path, filename)] = 'some content'
add_file('test1.html')
add_file('test2.html')
add_file('test3.html')
port.host.filesystem.chdir(runner._port.perf_tests_dir()[:runner._port.perf_tests_dir().rfind(runner._host.filesystem.sep)])
self.assertItemsEqual(self._collect_tests_and_sort_test_name(runner), ['test1.html', 'test2.html'])
def test_collect_tests_with_skipped_list(self):
runner, port = self.create_runner()
self._add_file(runner, 'inspector', 'test1.html')
self._add_file(runner, 'inspector', 'unsupported_test1.html')
self._add_file(runner, 'inspector', 'test2.html')
self._add_file(runner, 'inspector/resources', 'resource_file.html')
self._add_file(runner, 'unsupported', 'unsupported_test2.html')
port.skipped_perf_tests = lambda: ['inspector/unsupported_test1.html', 'unsupported']
self.assertItemsEqual(self._collect_tests_and_sort_test_name(runner), ['inspector/test1.html', 'inspector/test2.html'])
def test_collect_tests_with_skipped_list_and_files(self):
runner, port = self.create_runner(args=['Suite/Test1.html', 'Suite/SkippedTest1.html', 'SkippedSuite/Test1.html'])
self._add_file(runner, 'SkippedSuite', 'Test1.html')
self._add_file(runner, 'SkippedSuite', 'Test2.html')
self._add_file(runner, 'Suite', 'Test1.html')
self._add_file(runner, 'Suite', 'Test2.html')
self._add_file(runner, 'Suite', 'SkippedTest1.html')
self._add_file(runner, 'Suite', 'SkippedTest2.html')
port.skipped_perf_tests = lambda: ['Suite/SkippedTest1.html', 'Suite/SkippedTest1.html', 'SkippedSuite']
self.assertItemsEqual(self._collect_tests_and_sort_test_name(runner),
['SkippedSuite/Test1.html', 'Suite/SkippedTest1.html', 'Suite/Test1.html'])
def test_collect_tests_with_ignored_skipped_list(self):
runner, port = self.create_runner(args=['--force'])
self._add_file(runner, 'inspector', 'test1.html')
self._add_file(runner, 'inspector', 'unsupported_test1.html')
self._add_file(runner, 'inspector', 'test2.html')
self._add_file(runner, 'inspector/resources', 'resource_file.html')
self._add_file(runner, 'unsupported', 'unsupported_test2.html')
port.skipped_perf_tests = lambda: ['inspector/unsupported_test1.html', 'unsupported']
self.assertItemsEqual(self._collect_tests_and_sort_test_name(runner), ['inspector/test1.html', 'inspector/test2.html', 'inspector/unsupported_test1.html', 'unsupported/unsupported_test2.html'])
def test_collect_tests_should_ignore_replay_tests_by_default(self):
runner, port = self.create_runner()
self._add_file(runner, 'Replay', 'www.webkit.org.replay')
self.assertItemsEqual(runner._collect_tests(), [])
def test_collect_tests_with_replay_tests(self):
runner, port = self.create_runner(args=['--replay'])
self._add_file(runner, 'Replay', 'www.webkit.org.replay')
tests = runner._collect_tests()
self.assertEqual(len(tests), 1)
self.assertEqual(tests[0].__class__.__name__, 'ReplayPerfTest')
def test_default_args(self):
runner, port = self.create_runner()
options, args = PerfTestsRunner._parse_args([])
self.assertTrue(options.build)
self.assertEqual(options.time_out_ms, 600 * 1000)
self.assertTrue(options.generate_results)
self.assertTrue(options.show_results)
self.assertFalse(options.replay)
self.assertTrue(options.use_skipped_list)
self.assertEqual(options.repeat, 1)
self.assertEqual(options.test_runner_count, DEFAULT_TEST_RUNNER_COUNT)
def test_parse_args(self):
runner, port = self.create_runner()
options, args = PerfTestsRunner._parse_args([
'--build-directory=folder42',
'--platform=platform42',
'--builder-name', 'webkit-mac-1',
'--build-number=56',
'--time-out-ms=42',
'--no-show-results',
'--reset-results',
'--output-json-path=a/output.json',
'--slave-config-json-path=a/source.json',
'--test-results-server=somehost',
'--additional-drt-flag=--enable-threaded-parser',
'--additional-drt-flag=--awesomesauce',
'--repeat=5',
'--test-runner-count=5',
'--debug'])
self.assertTrue(options.build)
self.assertEqual(options.build_directory, 'folder42')
self.assertEqual(options.platform, 'platform42')
self.assertEqual(options.builder_name, 'webkit-mac-1')
self.assertEqual(options.build_number, '56')
self.assertEqual(options.time_out_ms, '42')
self.assertEqual(options.configuration, 'Debug')
self.assertFalse(options.show_results)
self.assertTrue(options.reset_results)
self.assertEqual(options.output_json_path, 'a/output.json')
self.assertEqual(options.slave_config_json_path, 'a/source.json')
self.assertEqual(options.test_results_server, 'somehost')
self.assertEqual(options.additional_drt_flag, ['--enable-threaded-parser', '--awesomesauce'])
self.assertEqual(options.repeat, 5)
self.assertEqual(options.test_runner_count, 5)
def test_upload_json(self):
runner, port = self.create_runner()
port.host.filesystem.files['/mock-checkout/some.json'] = 'some content'
class MockFileUploader:
called = []
upload_single_text_file_throws = False
upload_single_text_file_return_value = None
@classmethod
def reset(cls):
cls.called = []
cls.upload_single_text_file_throws = False
cls.upload_single_text_file_return_value = None
def __init__(mock, url, timeout):
self.assertEqual(url, 'https://some.host/some/path')
self.assertTrue(isinstance(timeout, int) and timeout)
mock.called.append('FileUploader')
def upload_single_text_file(mock, filesystem, content_type, filename):
self.assertEqual(filesystem, port.host.filesystem)
self.assertEqual(content_type, 'application/json')
self.assertEqual(filename, 'some.json')
mock.called.append('upload_single_text_file')
if mock.upload_single_text_file_throws:
raise Exception
return mock.upload_single_text_file_return_value
MockFileUploader.upload_single_text_file_return_value = StringIO.StringIO('OK')
self.assertTrue(runner._upload_json('some.host', 'some.json', '/some/path', MockFileUploader))
self.assertEqual(MockFileUploader.called, ['FileUploader', 'upload_single_text_file'])
MockFileUploader.reset()
MockFileUploader.upload_single_text_file_return_value = StringIO.StringIO('Some error')
output = OutputCapture()
output.capture_output()
self.assertFalse(runner._upload_json('some.host', 'some.json', '/some/path', MockFileUploader))
_, _, logs = output.restore_output()
self.assertEqual(logs, 'Uploaded JSON to https://some.host/some/path but got a bad response:\nSome error\n')
# Throwing an exception upload_single_text_file shouldn't blow up _upload_json
MockFileUploader.reset()
MockFileUploader.upload_single_text_file_throws = True
self.assertFalse(runner._upload_json('some.host', 'some.json', '/some/path', MockFileUploader))
self.assertEqual(MockFileUploader.called, ['FileUploader', 'upload_single_text_file'])
MockFileUploader.reset()
MockFileUploader.upload_single_text_file_return_value = StringIO.StringIO('{"status": "OK"}')
self.assertTrue(runner._upload_json('some.host', 'some.json', '/some/path', MockFileUploader))
self.assertEqual(MockFileUploader.called, ['FileUploader', 'upload_single_text_file'])
MockFileUploader.reset()
MockFileUploader.upload_single_text_file_return_value = StringIO.StringIO('{"status": "SomethingHasFailed", "failureStored": false}')
output = OutputCapture()
output.capture_output()
self.assertFalse(runner._upload_json('some.host', 'some.json', '/some/path', MockFileUploader))
_, _, logs = output.restore_output()
serialized_json = json.dumps({'status': 'SomethingHasFailed', 'failureStored': False}, indent=4)
self.assertEqual(logs, 'Uploaded JSON to https://some.host/some/path but got an error:\n%s\n' % serialized_json)
| bsd-3-clause |
noba3/KoTos | addons/script.module.youtube.dl/lib/youtube_dl/extractor/ubu.py | 126 | 1719 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
int_or_none,
qualities,
)
class UbuIE(InfoExtractor):
_VALID_URL = r'http://(?:www\.)?ubu\.com/film/(?P<id>[\da-z_-]+)\.html'
_TEST = {
'url': 'http://ubu.com/film/her_noise.html',
'md5': '138d5652618bf0f03878978db9bef1ee',
'info_dict': {
'id': 'her_noise',
'ext': 'm4v',
'title': 'Her Noise - The Making Of (2007)',
'duration': 3600,
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
title = self._html_search_regex(
r'<title>.+?Film & Video: ([^<]+)</title>', webpage, 'title')
duration = int_or_none(self._html_search_regex(
r'Duration: (\d+) minutes', webpage, 'duration', fatal=False),
invscale=60)
formats = []
FORMAT_REGEXES = [
('sq', r"'flashvars'\s*,\s*'file=([^']+)'"),
('hq', r'href="(http://ubumexico\.centro\.org\.mx/video/[^"]+)"'),
]
preference = qualities([fid for fid, _ in FORMAT_REGEXES])
for format_id, format_regex in FORMAT_REGEXES:
m = re.search(format_regex, webpage)
if m:
formats.append({
'url': m.group(1),
'format_id': format_id,
'preference': preference(format_id),
})
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'duration': duration,
'formats': formats,
}
| gpl-2.0 |
Ravenm/2143-OOP-NASH | python3env/Lib/site-packages/pip/_vendor/requests/models.py | 187 | 29277 | # -*- coding: utf-8 -*-
"""
requests.models
~~~~~~~~~~~~~~~
This module contains the primary objects that power Requests.
"""
import collections
import datetime
from io import BytesIO, UnsupportedOperation
from .hooks import default_hooks
from .structures import CaseInsensitiveDict
from .auth import HTTPBasicAuth
from .cookies import cookiejar_from_dict, get_cookie_header, _copy_cookie_jar
from .packages.urllib3.fields import RequestField
from .packages.urllib3.filepost import encode_multipart_formdata
from .packages.urllib3.util import parse_url
from .packages.urllib3.exceptions import (
DecodeError, ReadTimeoutError, ProtocolError, LocationParseError)
from .exceptions import (
HTTPError, MissingSchema, InvalidURL, ChunkedEncodingError,
ContentDecodingError, ConnectionError, StreamConsumedError)
from .utils import (
guess_filename, get_auth_from_url, requote_uri,
stream_decode_response_unicode, to_key_val_list, parse_header_links,
iter_slices, guess_json_utf, super_len, to_native_string)
from .compat import (
cookielib, urlunparse, urlsplit, urlencode, str, bytes, StringIO,
is_py2, chardet, builtin_str, basestring)
from .compat import json as complexjson
from .status_codes import codes
#: The set of HTTP status codes that indicate an automatically
#: processable redirect.
REDIRECT_STATI = (
codes.moved, # 301
codes.found, # 302
codes.other, # 303
codes.temporary_redirect, # 307
codes.permanent_redirect, # 308
)
DEFAULT_REDIRECT_LIMIT = 30
CONTENT_CHUNK_SIZE = 10 * 1024
ITER_CHUNK_SIZE = 512
class RequestEncodingMixin(object):
@property
def path_url(self):
"""Build the path URL to use."""
url = []
p = urlsplit(self.url)
path = p.path
if not path:
path = '/'
url.append(path)
query = p.query
if query:
url.append('?')
url.append(query)
return ''.join(url)
@staticmethod
def _encode_params(data):
"""Encode parameters in a piece of data.
Will successfully encode parameters when passed as a dict or a list of
2-tuples. Order is retained if data is a list of 2-tuples but arbitrary
if parameters are supplied as a dict.
"""
if isinstance(data, (str, bytes)):
return data
elif hasattr(data, 'read'):
return data
elif hasattr(data, '__iter__'):
result = []
for k, vs in to_key_val_list(data):
if isinstance(vs, basestring) or not hasattr(vs, '__iter__'):
vs = [vs]
for v in vs:
if v is not None:
result.append(
(k.encode('utf-8') if isinstance(k, str) else k,
v.encode('utf-8') if isinstance(v, str) else v))
return urlencode(result, doseq=True)
else:
return data
@staticmethod
def _encode_files(files, data):
"""Build the body for a multipart/form-data request.
Will successfully encode files when passed as a dict or a list of
2-tuples. Order is retained if data is a list of 2-tuples but arbitrary
if parameters are supplied as a dict.
"""
if (not files):
raise ValueError("Files must be provided.")
elif isinstance(data, basestring):
raise ValueError("Data must not be a string.")
new_fields = []
fields = to_key_val_list(data or {})
files = to_key_val_list(files or {})
for field, val in fields:
if isinstance(val, basestring) or not hasattr(val, '__iter__'):
val = [val]
for v in val:
if v is not None:
# Don't call str() on bytestrings: in Py3 it all goes wrong.
if not isinstance(v, bytes):
v = str(v)
new_fields.append(
(field.decode('utf-8') if isinstance(field, bytes) else field,
v.encode('utf-8') if isinstance(v, str) else v))
for (k, v) in files:
# support for explicit filename
ft = None
fh = None
if isinstance(v, (tuple, list)):
if len(v) == 2:
fn, fp = v
elif len(v) == 3:
fn, fp, ft = v
else:
fn, fp, ft, fh = v
else:
fn = guess_filename(v) or k
fp = v
if isinstance(fp, (str, bytes, bytearray)):
fdata = fp
else:
fdata = fp.read()
rf = RequestField(name=k, data=fdata, filename=fn, headers=fh)
rf.make_multipart(content_type=ft)
new_fields.append(rf)
body, content_type = encode_multipart_formdata(new_fields)
return body, content_type
class RequestHooksMixin(object):
def register_hook(self, event, hook):
"""Properly register a hook."""
if event not in self.hooks:
raise ValueError('Unsupported event specified, with event name "%s"' % (event))
if isinstance(hook, collections.Callable):
self.hooks[event].append(hook)
elif hasattr(hook, '__iter__'):
self.hooks[event].extend(h for h in hook if isinstance(h, collections.Callable))
def deregister_hook(self, event, hook):
"""Deregister a previously registered hook.
Returns True if the hook existed, False if not.
"""
try:
self.hooks[event].remove(hook)
return True
except ValueError:
return False
class Request(RequestHooksMixin):
"""A user-created :class:`Request <Request>` object.
Used to prepare a :class:`PreparedRequest <PreparedRequest>`, which is sent to the server.
:param method: HTTP method to use.
:param url: URL to send.
:param headers: dictionary of headers to send.
:param files: dictionary of {filename: fileobject} files to multipart upload.
:param data: the body to attach to the request. If a dictionary is provided, form-encoding will take place.
:param json: json for the body to attach to the request (if files or data is not specified).
:param params: dictionary of URL parameters to append to the URL.
:param auth: Auth handler or (user, pass) tuple.
:param cookies: dictionary or CookieJar of cookies to attach to this request.
:param hooks: dictionary of callback hooks, for internal usage.
Usage::
>>> import requests
>>> req = requests.Request('GET', 'http://httpbin.org/get')
>>> req.prepare()
<PreparedRequest [GET]>
"""
def __init__(self, method=None, url=None, headers=None, files=None,
data=None, params=None, auth=None, cookies=None, hooks=None, json=None):
# Default empty dicts for dict params.
data = [] if data is None else data
files = [] if files is None else files
headers = {} if headers is None else headers
params = {} if params is None else params
hooks = {} if hooks is None else hooks
self.hooks = default_hooks()
for (k, v) in list(hooks.items()):
self.register_hook(event=k, hook=v)
self.method = method
self.url = url
self.headers = headers
self.files = files
self.data = data
self.json = json
self.params = params
self.auth = auth
self.cookies = cookies
def __repr__(self):
return '<Request [%s]>' % (self.method)
def prepare(self):
"""Constructs a :class:`PreparedRequest <PreparedRequest>` for transmission and returns it."""
p = PreparedRequest()
p.prepare(
method=self.method,
url=self.url,
headers=self.headers,
files=self.files,
data=self.data,
json=self.json,
params=self.params,
auth=self.auth,
cookies=self.cookies,
hooks=self.hooks,
)
return p
class PreparedRequest(RequestEncodingMixin, RequestHooksMixin):
"""The fully mutable :class:`PreparedRequest <PreparedRequest>` object,
containing the exact bytes that will be sent to the server.
Generated from either a :class:`Request <Request>` object or manually.
Usage::
>>> import requests
>>> req = requests.Request('GET', 'http://httpbin.org/get')
>>> r = req.prepare()
<PreparedRequest [GET]>
>>> s = requests.Session()
>>> s.send(r)
<Response [200]>
"""
def __init__(self):
#: HTTP verb to send to the server.
self.method = None
#: HTTP URL to send the request to.
self.url = None
#: dictionary of HTTP headers.
self.headers = None
# The `CookieJar` used to create the Cookie header will be stored here
# after prepare_cookies is called
self._cookies = None
#: request body to send to the server.
self.body = None
#: dictionary of callback hooks, for internal usage.
self.hooks = default_hooks()
def prepare(self, method=None, url=None, headers=None, files=None,
data=None, params=None, auth=None, cookies=None, hooks=None, json=None):
"""Prepares the entire request with the given parameters."""
self.prepare_method(method)
self.prepare_url(url, params)
self.prepare_headers(headers)
self.prepare_cookies(cookies)
self.prepare_body(data, files, json)
self.prepare_auth(auth, url)
# Note that prepare_auth must be last to enable authentication schemes
# such as OAuth to work on a fully prepared request.
# This MUST go after prepare_auth. Authenticators could add a hook
self.prepare_hooks(hooks)
def __repr__(self):
return '<PreparedRequest [%s]>' % (self.method)
def copy(self):
p = PreparedRequest()
p.method = self.method
p.url = self.url
p.headers = self.headers.copy() if self.headers is not None else None
p._cookies = _copy_cookie_jar(self._cookies)
p.body = self.body
p.hooks = self.hooks
return p
def prepare_method(self, method):
"""Prepares the given HTTP method."""
self.method = method
if self.method is not None:
self.method = to_native_string(self.method.upper())
def prepare_url(self, url, params):
"""Prepares the given HTTP URL."""
#: Accept objects that have string representations.
#: We're unable to blindly call unicode/str functions
#: as this will include the bytestring indicator (b'')
#: on python 3.x.
#: https://github.com/kennethreitz/requests/pull/2238
if isinstance(url, bytes):
url = url.decode('utf8')
else:
url = unicode(url) if is_py2 else str(url)
# Don't do any URL preparation for non-HTTP schemes like `mailto`,
# `data` etc to work around exceptions from `url_parse`, which
# handles RFC 3986 only.
if ':' in url and not url.lower().startswith('http'):
self.url = url
return
# Support for unicode domain names and paths.
try:
scheme, auth, host, port, path, query, fragment = parse_url(url)
except LocationParseError as e:
raise InvalidURL(*e.args)
if not scheme:
error = ("Invalid URL {0!r}: No schema supplied. Perhaps you meant http://{0}?")
error = error.format(to_native_string(url, 'utf8'))
raise MissingSchema(error)
if not host:
raise InvalidURL("Invalid URL %r: No host supplied" % url)
# Only want to apply IDNA to the hostname
try:
host = host.encode('idna').decode('utf-8')
except UnicodeError:
raise InvalidURL('URL has an invalid label.')
# Carefully reconstruct the network location
netloc = auth or ''
if netloc:
netloc += '@'
netloc += host
if port:
netloc += ':' + str(port)
# Bare domains aren't valid URLs.
if not path:
path = '/'
if is_py2:
if isinstance(scheme, str):
scheme = scheme.encode('utf-8')
if isinstance(netloc, str):
netloc = netloc.encode('utf-8')
if isinstance(path, str):
path = path.encode('utf-8')
if isinstance(query, str):
query = query.encode('utf-8')
if isinstance(fragment, str):
fragment = fragment.encode('utf-8')
if isinstance(params, (str, bytes)):
params = to_native_string(params)
enc_params = self._encode_params(params)
if enc_params:
if query:
query = '%s&%s' % (query, enc_params)
else:
query = enc_params
url = requote_uri(urlunparse([scheme, netloc, path, None, query, fragment]))
self.url = url
def prepare_headers(self, headers):
"""Prepares the given HTTP headers."""
if headers:
self.headers = CaseInsensitiveDict((to_native_string(name), value) for name, value in headers.items())
else:
self.headers = CaseInsensitiveDict()
def prepare_body(self, data, files, json=None):
"""Prepares the given HTTP body data."""
# Check if file, fo, generator, iterator.
# If not, run through normal process.
# Nottin' on you.
body = None
content_type = None
length = None
if not data and json is not None:
content_type = 'application/json'
body = complexjson.dumps(json)
is_stream = all([
hasattr(data, '__iter__'),
not isinstance(data, (basestring, list, tuple, dict))
])
try:
length = super_len(data)
except (TypeError, AttributeError, UnsupportedOperation):
length = None
if is_stream:
body = data
if files:
raise NotImplementedError('Streamed bodies and files are mutually exclusive.')
if length:
self.headers['Content-Length'] = builtin_str(length)
else:
self.headers['Transfer-Encoding'] = 'chunked'
else:
# Multi-part file uploads.
if files:
(body, content_type) = self._encode_files(files, data)
else:
if data:
body = self._encode_params(data)
if isinstance(data, basestring) or hasattr(data, 'read'):
content_type = None
else:
content_type = 'application/x-www-form-urlencoded'
self.prepare_content_length(body)
# Add content-type if it wasn't explicitly provided.
if content_type and ('content-type' not in self.headers):
self.headers['Content-Type'] = content_type
self.body = body
def prepare_content_length(self, body):
if hasattr(body, 'seek') and hasattr(body, 'tell'):
body.seek(0, 2)
self.headers['Content-Length'] = builtin_str(body.tell())
body.seek(0, 0)
elif body is not None:
l = super_len(body)
if l:
self.headers['Content-Length'] = builtin_str(l)
elif (self.method not in ('GET', 'HEAD')) and (self.headers.get('Content-Length') is None):
self.headers['Content-Length'] = '0'
def prepare_auth(self, auth, url=''):
"""Prepares the given HTTP auth data."""
# If no Auth is explicitly provided, extract it from the URL first.
if auth is None:
url_auth = get_auth_from_url(self.url)
auth = url_auth if any(url_auth) else None
if auth:
if isinstance(auth, tuple) and len(auth) == 2:
# special-case basic HTTP auth
auth = HTTPBasicAuth(*auth)
# Allow auth to make its changes.
r = auth(self)
# Update self to reflect the auth changes.
self.__dict__.update(r.__dict__)
# Recompute Content-Length
self.prepare_content_length(self.body)
def prepare_cookies(self, cookies):
"""Prepares the given HTTP cookie data.
This function eventually generates a ``Cookie`` header from the
given cookies using cookielib. Due to cookielib's design, the header
will not be regenerated if it already exists, meaning this function
can only be called once for the life of the
:class:`PreparedRequest <PreparedRequest>` object. Any subsequent calls
to ``prepare_cookies`` will have no actual effect, unless the "Cookie"
header is removed beforehand."""
if isinstance(cookies, cookielib.CookieJar):
self._cookies = cookies
else:
self._cookies = cookiejar_from_dict(cookies)
cookie_header = get_cookie_header(self._cookies, self)
if cookie_header is not None:
self.headers['Cookie'] = cookie_header
def prepare_hooks(self, hooks):
"""Prepares the given hooks."""
# hooks can be passed as None to the prepare method and to this
# method. To prevent iterating over None, simply use an empty list
# if hooks is False-y
hooks = hooks or []
for event in hooks:
self.register_hook(event, hooks[event])
class Response(object):
"""The :class:`Response <Response>` object, which contains a
server's response to an HTTP request.
"""
__attrs__ = [
'_content', 'status_code', 'headers', 'url', 'history',
'encoding', 'reason', 'cookies', 'elapsed', 'request'
]
def __init__(self):
super(Response, self).__init__()
self._content = False
self._content_consumed = False
#: Integer Code of responded HTTP Status, e.g. 404 or 200.
self.status_code = None
#: Case-insensitive Dictionary of Response Headers.
#: For example, ``headers['content-encoding']`` will return the
#: value of a ``'Content-Encoding'`` response header.
self.headers = CaseInsensitiveDict()
#: File-like object representation of response (for advanced usage).
#: Use of ``raw`` requires that ``stream=True`` be set on the request.
# This requirement does not apply for use internally to Requests.
self.raw = None
#: Final URL location of Response.
self.url = None
#: Encoding to decode with when accessing r.text.
self.encoding = None
#: A list of :class:`Response <Response>` objects from
#: the history of the Request. Any redirect responses will end
#: up here. The list is sorted from the oldest to the most recent request.
self.history = []
#: Textual reason of responded HTTP Status, e.g. "Not Found" or "OK".
self.reason = None
#: A CookieJar of Cookies the server sent back.
self.cookies = cookiejar_from_dict({})
#: The amount of time elapsed between sending the request
#: and the arrival of the response (as a timedelta).
#: This property specifically measures the time taken between sending
#: the first byte of the request and finishing parsing the headers. It
#: is therefore unaffected by consuming the response content or the
#: value of the ``stream`` keyword argument.
self.elapsed = datetime.timedelta(0)
#: The :class:`PreparedRequest <PreparedRequest>` object to which this
#: is a response.
self.request = None
def __getstate__(self):
# Consume everything; accessing the content attribute makes
# sure the content has been fully read.
if not self._content_consumed:
self.content
return dict(
(attr, getattr(self, attr, None))
for attr in self.__attrs__
)
def __setstate__(self, state):
for name, value in state.items():
setattr(self, name, value)
# pickled objects do not have .raw
setattr(self, '_content_consumed', True)
setattr(self, 'raw', None)
def __repr__(self):
return '<Response [%s]>' % (self.status_code)
def __bool__(self):
"""Returns true if :attr:`status_code` is 'OK'."""
return self.ok
def __nonzero__(self):
"""Returns true if :attr:`status_code` is 'OK'."""
return self.ok
def __iter__(self):
"""Allows you to use a response as an iterator."""
return self.iter_content(128)
@property
def ok(self):
try:
self.raise_for_status()
except HTTPError:
return False
return True
@property
def is_redirect(self):
"""True if this Response is a well-formed HTTP redirect that could have
been processed automatically (by :meth:`Session.resolve_redirects`).
"""
return ('location' in self.headers and self.status_code in REDIRECT_STATI)
@property
def is_permanent_redirect(self):
"""True if this Response one of the permanent versions of redirect"""
return ('location' in self.headers and self.status_code in (codes.moved_permanently, codes.permanent_redirect))
@property
def apparent_encoding(self):
"""The apparent encoding, provided by the chardet library"""
return chardet.detect(self.content)['encoding']
def iter_content(self, chunk_size=1, decode_unicode=False):
"""Iterates over the response data. When stream=True is set on the
request, this avoids reading the content at once into memory for
large responses. The chunk size is the number of bytes it should
read into memory. This is not necessarily the length of each item
returned as decoding can take place.
If decode_unicode is True, content will be decoded using the best
available encoding based on the response.
"""
def generate():
# Special case for urllib3.
if hasattr(self.raw, 'stream'):
try:
for chunk in self.raw.stream(chunk_size, decode_content=True):
yield chunk
except ProtocolError as e:
raise ChunkedEncodingError(e)
except DecodeError as e:
raise ContentDecodingError(e)
except ReadTimeoutError as e:
raise ConnectionError(e)
else:
# Standard file-like object.
while True:
chunk = self.raw.read(chunk_size)
if not chunk:
break
yield chunk
self._content_consumed = True
if self._content_consumed and isinstance(self._content, bool):
raise StreamConsumedError()
# simulate reading small chunks of the content
reused_chunks = iter_slices(self._content, chunk_size)
stream_chunks = generate()
chunks = reused_chunks if self._content_consumed else stream_chunks
if decode_unicode:
chunks = stream_decode_response_unicode(chunks, self)
return chunks
def iter_lines(self, chunk_size=ITER_CHUNK_SIZE, decode_unicode=None, delimiter=None):
"""Iterates over the response data, one line at a time. When
stream=True is set on the request, this avoids reading the
content at once into memory for large responses.
.. note:: This method is not reentrant safe.
"""
pending = None
for chunk in self.iter_content(chunk_size=chunk_size, decode_unicode=decode_unicode):
if pending is not None:
chunk = pending + chunk
if delimiter:
lines = chunk.split(delimiter)
else:
lines = chunk.splitlines()
if lines and lines[-1] and chunk and lines[-1][-1] == chunk[-1]:
pending = lines.pop()
else:
pending = None
for line in lines:
yield line
if pending is not None:
yield pending
@property
def content(self):
"""Content of the response, in bytes."""
if self._content is False:
# Read the contents.
try:
if self._content_consumed:
raise RuntimeError(
'The content for this response was already consumed')
if self.status_code == 0:
self._content = None
else:
self._content = bytes().join(self.iter_content(CONTENT_CHUNK_SIZE)) or bytes()
except AttributeError:
self._content = None
self._content_consumed = True
# don't need to release the connection; that's been handled by urllib3
# since we exhausted the data.
return self._content
@property
def text(self):
"""Content of the response, in unicode.
If Response.encoding is None, encoding will be guessed using
``chardet``.
The encoding of the response content is determined based solely on HTTP
headers, following RFC 2616 to the letter. If you can take advantage of
non-HTTP knowledge to make a better guess at the encoding, you should
set ``r.encoding`` appropriately before accessing this property.
"""
# Try charset from content-type
content = None
encoding = self.encoding
if not self.content:
return str('')
# Fallback to auto-detected encoding.
if self.encoding is None:
encoding = self.apparent_encoding
# Decode unicode from given encoding.
try:
content = str(self.content, encoding, errors='replace')
except (LookupError, TypeError):
# A LookupError is raised if the encoding was not found which could
# indicate a misspelling or similar mistake.
#
# A TypeError can be raised if encoding is None
#
# So we try blindly encoding.
content = str(self.content, errors='replace')
return content
def json(self, **kwargs):
"""Returns the json-encoded content of a response, if any.
:param \*\*kwargs: Optional arguments that ``json.loads`` takes.
"""
if not self.encoding and len(self.content) > 3:
# No encoding set. JSON RFC 4627 section 3 states we should expect
# UTF-8, -16 or -32. Detect which one to use; If the detection or
# decoding fails, fall back to `self.text` (using chardet to make
# a best guess).
encoding = guess_json_utf(self.content)
if encoding is not None:
try:
return complexjson.loads(
self.content.decode(encoding), **kwargs
)
except UnicodeDecodeError:
# Wrong UTF codec detected; usually because it's not UTF-8
# but some other 8-bit codec. This is an RFC violation,
# and the server didn't bother to tell us what codec *was*
# used.
pass
return complexjson.loads(self.text, **kwargs)
@property
def links(self):
"""Returns the parsed header links of the response, if any."""
header = self.headers.get('link')
# l = MultiDict()
l = {}
if header:
links = parse_header_links(header)
for link in links:
key = link.get('rel') or link.get('url')
l[key] = link
return l
def raise_for_status(self):
"""Raises stored :class:`HTTPError`, if one occurred."""
http_error_msg = ''
if 400 <= self.status_code < 500:
http_error_msg = '%s Client Error: %s for url: %s' % (self.status_code, self.reason, self.url)
elif 500 <= self.status_code < 600:
http_error_msg = '%s Server Error: %s for url: %s' % (self.status_code, self.reason, self.url)
if http_error_msg:
raise HTTPError(http_error_msg, response=self)
def close(self):
"""Releases the connection back to the pool. Once this method has been
called the underlying ``raw`` object must not be accessed again.
*Note: Should not normally need to be called explicitly.*
"""
if not self._content_consumed:
return self.raw.close()
return self.raw.release_conn()
| cc0-1.0 |
mikegagnon/sidenote | prefix-links.py | 1 | 1725 | #!/usr/bin/env python
#
# This is free and unencumbered software released into the public domain.
#
# Sometimes you want to include one sidenote document into another.
# One way you could do that is copy the .md files from one project into another.
# However, this creates a risk of link-tag collisions. I.e. one project
# defines ~foo and the other project also defines ~foo.
#
# prefix-links.py solves this problem. It takes a .md file as input, then
# prefixes each link tag with a random string. Therefore ~foo becomes
# ~4C5FGAL2foo
#
# Then you can safely include .md files from multiple projects into another
# project
#
from sidenote import *
import argparse
import random
import re
import string
# https://stackoverflow.com/questions/2257441/random-string-generation-with-upper-case-letters-and-digits-in-python
key = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(8))
def obscure(filename):
with open(filename) as f:
lines = f.readlines()
for line in lines:
newline = ""
# tokenize the line into links and non-links
for part in LINK_PARSER.split(line):
if LINK_PARSER.match(part):
newpart = part.replace("(##", "(##" + key)
newline += newpart
else:
newline += part
if TILDE_ANCHOR_PARSER.match(newline):
newline = newline.replace("~", "~" + key)
print newline,
if __name__=="__main__":
parser = argparse.ArgumentParser(description='"Obscure" links in a Sidenote document')
parser.add_argument('file', type=str,
help='the markdown file to obscure')
args = parser.parse_args()
obscure(args.file)
| unlicense |
maxrosan/NS-3-support-for-OBS | bindings/python/apidefs/gcc-LP64/ns3_module_virtual_net_device.py | 6 | 14172 | from pybindgen import Module, FileCodeSink, param, retval, cppclass, typehandlers
def register_types(module):
root_module = module.get_root()
## virtual-net-device.h: ns3::VirtualNetDevice [class]
module.add_class('VirtualNetDevice', parent=root_module['ns3::NetDevice'])
## Register a nested module for the namespace Config
nested_module = module.add_cpp_namespace('Config')
register_types_ns3_Config(nested_module)
## Register a nested module for the namespace FatalImpl
nested_module = module.add_cpp_namespace('FatalImpl')
register_types_ns3_FatalImpl(nested_module)
## Register a nested module for the namespace addressUtils
nested_module = module.add_cpp_namespace('addressUtils')
register_types_ns3_addressUtils(nested_module)
## Register a nested module for the namespace aodv
nested_module = module.add_cpp_namespace('aodv')
register_types_ns3_aodv(nested_module)
## Register a nested module for the namespace dot11s
nested_module = module.add_cpp_namespace('dot11s')
register_types_ns3_dot11s(nested_module)
## Register a nested module for the namespace dsdv
nested_module = module.add_cpp_namespace('dsdv')
register_types_ns3_dsdv(nested_module)
## Register a nested module for the namespace flame
nested_module = module.add_cpp_namespace('flame')
register_types_ns3_flame(nested_module)
## Register a nested module for the namespace internal
nested_module = module.add_cpp_namespace('internal')
register_types_ns3_internal(nested_module)
## Register a nested module for the namespace olsr
nested_module = module.add_cpp_namespace('olsr')
register_types_ns3_olsr(nested_module)
def register_types_ns3_Config(module):
root_module = module.get_root()
def register_types_ns3_FatalImpl(module):
root_module = module.get_root()
def register_types_ns3_addressUtils(module):
root_module = module.get_root()
def register_types_ns3_aodv(module):
root_module = module.get_root()
def register_types_ns3_dot11s(module):
root_module = module.get_root()
def register_types_ns3_dsdv(module):
root_module = module.get_root()
def register_types_ns3_flame(module):
root_module = module.get_root()
def register_types_ns3_internal(module):
root_module = module.get_root()
def register_types_ns3_olsr(module):
root_module = module.get_root()
def register_methods(root_module):
register_Ns3VirtualNetDevice_methods(root_module, root_module['ns3::VirtualNetDevice'])
return
def register_Ns3VirtualNetDevice_methods(root_module, cls):
## virtual-net-device.h: ns3::VirtualNetDevice::VirtualNetDevice(ns3::VirtualNetDevice const & arg0) [copy constructor]
cls.add_constructor([param('ns3::VirtualNetDevice const &', 'arg0')])
## virtual-net-device.h: ns3::VirtualNetDevice::VirtualNetDevice() [constructor]
cls.add_constructor([])
## virtual-net-device.h: void ns3::VirtualNetDevice::AddLinkChangeCallback(ns3::Callback<void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> callback) [member function]
cls.add_method('AddLinkChangeCallback',
'void',
[param('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')],
is_virtual=True)
## virtual-net-device.h: ns3::Address ns3::VirtualNetDevice::GetAddress() const [member function]
cls.add_method('GetAddress',
'ns3::Address',
[],
is_const=True, is_virtual=True)
## virtual-net-device.h: ns3::Address ns3::VirtualNetDevice::GetBroadcast() const [member function]
cls.add_method('GetBroadcast',
'ns3::Address',
[],
is_const=True, is_virtual=True)
## virtual-net-device.h: ns3::Ptr<ns3::Channel> ns3::VirtualNetDevice::GetChannel() const [member function]
cls.add_method('GetChannel',
'ns3::Ptr< ns3::Channel >',
[],
is_const=True, is_virtual=True)
## virtual-net-device.h: uint32_t ns3::VirtualNetDevice::GetIfIndex() const [member function]
cls.add_method('GetIfIndex',
'uint32_t',
[],
is_const=True, is_virtual=True)
## virtual-net-device.h: uint16_t ns3::VirtualNetDevice::GetMtu() const [member function]
cls.add_method('GetMtu',
'uint16_t',
[],
is_const=True, is_virtual=True)
## virtual-net-device.h: ns3::Address ns3::VirtualNetDevice::GetMulticast(ns3::Ipv4Address multicastGroup) const [member function]
cls.add_method('GetMulticast',
'ns3::Address',
[param('ns3::Ipv4Address', 'multicastGroup')],
is_const=True, is_virtual=True)
## virtual-net-device.h: ns3::Address ns3::VirtualNetDevice::GetMulticast(ns3::Ipv6Address addr) const [member function]
cls.add_method('GetMulticast',
'ns3::Address',
[param('ns3::Ipv6Address', 'addr')],
is_const=True, is_virtual=True)
## virtual-net-device.h: ns3::Ptr<ns3::Node> ns3::VirtualNetDevice::GetNode() const [member function]
cls.add_method('GetNode',
'ns3::Ptr< ns3::Node >',
[],
is_const=True, is_virtual=True)
## virtual-net-device.h: static ns3::TypeId ns3::VirtualNetDevice::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## virtual-net-device.h: bool ns3::VirtualNetDevice::IsBridge() const [member function]
cls.add_method('IsBridge',
'bool',
[],
is_const=True, is_virtual=True)
## virtual-net-device.h: bool ns3::VirtualNetDevice::IsBroadcast() const [member function]
cls.add_method('IsBroadcast',
'bool',
[],
is_const=True, is_virtual=True)
## virtual-net-device.h: bool ns3::VirtualNetDevice::IsLinkUp() const [member function]
cls.add_method('IsLinkUp',
'bool',
[],
is_const=True, is_virtual=True)
## virtual-net-device.h: bool ns3::VirtualNetDevice::IsMulticast() const [member function]
cls.add_method('IsMulticast',
'bool',
[],
is_const=True, is_virtual=True)
## virtual-net-device.h: bool ns3::VirtualNetDevice::IsPointToPoint() const [member function]
cls.add_method('IsPointToPoint',
'bool',
[],
is_const=True, is_virtual=True)
## virtual-net-device.h: bool ns3::VirtualNetDevice::NeedsArp() const [member function]
cls.add_method('NeedsArp',
'bool',
[],
is_const=True, is_virtual=True)
## virtual-net-device.h: bool ns3::VirtualNetDevice::Receive(ns3::Ptr<ns3::Packet> packet, uint16_t protocol, ns3::Address const & source, ns3::Address const & destination, ns3::NetDevice::PacketType packetType) [member function]
cls.add_method('Receive',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('uint16_t', 'protocol'), param('ns3::Address const &', 'source'), param('ns3::Address const &', 'destination'), param('ns3::NetDevice::PacketType', 'packetType')])
## virtual-net-device.h: bool ns3::VirtualNetDevice::Send(ns3::Ptr<ns3::Packet> packet, ns3::Address const & dest, uint16_t protocolNumber) [member function]
cls.add_method('Send',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')],
is_virtual=True)
## virtual-net-device.h: bool ns3::VirtualNetDevice::SendFrom(ns3::Ptr<ns3::Packet> packet, ns3::Address const & source, ns3::Address const & dest, uint16_t protocolNumber) [member function]
cls.add_method('SendFrom',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'source'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')],
is_virtual=True)
## virtual-net-device.h: void ns3::VirtualNetDevice::SetAddress(ns3::Address address) [member function]
cls.add_method('SetAddress',
'void',
[param('ns3::Address', 'address')],
is_virtual=True)
## virtual-net-device.h: void ns3::VirtualNetDevice::SetIfIndex(uint32_t const index) [member function]
cls.add_method('SetIfIndex',
'void',
[param('uint32_t const', 'index')],
is_virtual=True)
## virtual-net-device.h: void ns3::VirtualNetDevice::SetIsPointToPoint(bool isPointToPoint) [member function]
cls.add_method('SetIsPointToPoint',
'void',
[param('bool', 'isPointToPoint')])
## virtual-net-device.h: bool ns3::VirtualNetDevice::SetMtu(uint16_t const mtu) [member function]
cls.add_method('SetMtu',
'bool',
[param('uint16_t const', 'mtu')],
is_virtual=True)
## virtual-net-device.h: void ns3::VirtualNetDevice::SetNeedsArp(bool needsArp) [member function]
cls.add_method('SetNeedsArp',
'void',
[param('bool', 'needsArp')])
## virtual-net-device.h: void ns3::VirtualNetDevice::SetNode(ns3::Ptr<ns3::Node> node) [member function]
cls.add_method('SetNode',
'void',
[param('ns3::Ptr< ns3::Node >', 'node')],
is_virtual=True)
## virtual-net-device.h: void ns3::VirtualNetDevice::SetPromiscReceiveCallback(ns3::Callback<bool, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::Address const&, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> cb) [member function]
cls.add_method('SetPromiscReceiveCallback',
'void',
[param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_virtual=True)
## virtual-net-device.h: void ns3::VirtualNetDevice::SetReceiveCallback(ns3::Callback<bool, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> cb) [member function]
cls.add_method('SetReceiveCallback',
'void',
[param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_virtual=True)
## virtual-net-device.h: void ns3::VirtualNetDevice::SetSendCallback(ns3::Callback<bool, ns3::Ptr<ns3::Packet>, ns3::Address const&, ns3::Address const&, unsigned short, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> transmitCb) [member function]
cls.add_method('SetSendCallback',
'void',
[param('ns3::Callback< bool, ns3::Ptr< ns3::Packet >, ns3::Address const &, ns3::Address const &, unsigned short, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'transmitCb')])
## virtual-net-device.h: void ns3::VirtualNetDevice::SetSupportsSendFrom(bool supportsSendFrom) [member function]
cls.add_method('SetSupportsSendFrom',
'void',
[param('bool', 'supportsSendFrom')])
## virtual-net-device.h: bool ns3::VirtualNetDevice::SupportsSendFrom() const [member function]
cls.add_method('SupportsSendFrom',
'bool',
[],
is_const=True, is_virtual=True)
## virtual-net-device.h: void ns3::VirtualNetDevice::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_functions(root_module):
module = root_module
register_functions_ns3_Config(module.get_submodule('Config'), root_module)
register_functions_ns3_FatalImpl(module.get_submodule('FatalImpl'), root_module)
register_functions_ns3_addressUtils(module.get_submodule('addressUtils'), root_module)
register_functions_ns3_aodv(module.get_submodule('aodv'), root_module)
register_functions_ns3_dot11s(module.get_submodule('dot11s'), root_module)
register_functions_ns3_dsdv(module.get_submodule('dsdv'), root_module)
register_functions_ns3_flame(module.get_submodule('flame'), root_module)
register_functions_ns3_internal(module.get_submodule('internal'), root_module)
register_functions_ns3_olsr(module.get_submodule('olsr'), root_module)
return
def register_functions_ns3_Config(module, root_module):
return
def register_functions_ns3_FatalImpl(module, root_module):
return
def register_functions_ns3_addressUtils(module, root_module):
return
def register_functions_ns3_aodv(module, root_module):
return
def register_functions_ns3_dot11s(module, root_module):
return
def register_functions_ns3_dsdv(module, root_module):
return
def register_functions_ns3_flame(module, root_module):
return
def register_functions_ns3_internal(module, root_module):
return
def register_functions_ns3_olsr(module, root_module):
return
| gpl-2.0 |
TheTacoScott/GoAtThrottleUp | ServerRelay/cherrypy/lib/auth.py | 40 | 3142 | import cherrypy
from cherrypy.lib import httpauth
def check_auth(users, encrypt=None, realm=None):
"""If an authorization header contains credentials, return True, else False."""
request = cherrypy.serving.request
if 'authorization' in request.headers:
# make sure the provided credentials are correctly set
ah = httpauth.parseAuthorization(request.headers['authorization'])
if ah is None:
raise cherrypy.HTTPError(400, 'Bad Request')
if not encrypt:
encrypt = httpauth.DIGEST_AUTH_ENCODERS[httpauth.MD5]
if hasattr(users, '__call__'):
try:
# backward compatibility
users = users() # expect it to return a dictionary
if not isinstance(users, dict):
raise ValueError("Authentication users must be a dictionary")
# fetch the user password
password = users.get(ah["username"], None)
except TypeError:
# returns a password (encrypted or clear text)
password = users(ah["username"])
else:
if not isinstance(users, dict):
raise ValueError("Authentication users must be a dictionary")
# fetch the user password
password = users.get(ah["username"], None)
# validate the authorization by re-computing it here
# and compare it with what the user-agent provided
if httpauth.checkResponse(ah, password, method=request.method,
encrypt=encrypt, realm=realm):
request.login = ah["username"]
return True
request.login = False
return False
def basic_auth(realm, users, encrypt=None, debug=False):
"""If auth fails, raise 401 with a basic authentication header.
realm
A string containing the authentication realm.
users
A dict of the form: {username: password} or a callable returning a dict.
encrypt
callable used to encrypt the password returned from the user-agent.
if None it defaults to a md5 encryption.
"""
if check_auth(users, encrypt):
if debug:
cherrypy.log('Auth successful', 'TOOLS.BASIC_AUTH')
return
# inform the user-agent this path is protected
cherrypy.serving.response.headers['www-authenticate'] = httpauth.basicAuth(realm)
raise cherrypy.HTTPError(401, "You are not authorized to access that resource")
def digest_auth(realm, users, debug=False):
"""If auth fails, raise 401 with a digest authentication header.
realm
A string containing the authentication realm.
users
A dict of the form: {username: password} or a callable returning a dict.
"""
if check_auth(users, realm=realm):
if debug:
cherrypy.log('Auth successful', 'TOOLS.DIGEST_AUTH')
return
# inform the user-agent this path is protected
cherrypy.serving.response.headers['www-authenticate'] = httpauth.digestAuth(realm)
raise cherrypy.HTTPError(401, "You are not authorized to access that resource")
| mit |
taotie12010/bigfour | lms/djangoapps/ccx/tests/test_ccx_modulestore.py | 24 | 5387 | """
Test the CCXModulestoreWrapper
"""
from collections import deque
from ccx_keys.locator import CCXLocator
import datetime
from itertools import izip_longest, chain
import pytz
from student.tests.factories import AdminFactory
from xmodule.modulestore.tests.django_utils import (
ModuleStoreTestCase,
TEST_DATA_SPLIT_MODULESTORE
)
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from ..models import CustomCourseForEdX
class TestCCXModulestoreWrapper(ModuleStoreTestCase):
"""tests for a modulestore wrapped by CCXModulestoreWrapper
"""
MODULESTORE = TEST_DATA_SPLIT_MODULESTORE
def setUp(self):
"""
Set up tests
"""
super(TestCCXModulestoreWrapper, self).setUp()
self.course = course = CourseFactory.create()
# Create instructor account
coach = AdminFactory.create()
# Create a course outline
self.mooc_start = start = datetime.datetime(
2010, 5, 12, 2, 42, tzinfo=pytz.UTC)
self.mooc_due = due = datetime.datetime(
2010, 7, 7, 0, 0, tzinfo=pytz.UTC)
self.chapters = chapters = [
ItemFactory.create(start=start, parent=course) for _ in xrange(2)
]
self.sequentials = sequentials = [
ItemFactory.create(parent=c) for _ in xrange(2) for c in chapters
]
self.verticals = verticals = [
ItemFactory.create(
due=due, parent=s, graded=True, format='Homework'
) for _ in xrange(2) for s in sequentials
]
self.blocks = [
ItemFactory.create(parent=v) for _ in xrange(2) for v in verticals
]
self.ccx = ccx = CustomCourseForEdX(
course_id=course.id,
display_name='Test CCX',
coach=coach
)
ccx.save()
self.ccx_locator = CCXLocator.from_course_locator(course.id, ccx.id) # pylint: disable=no-member
def get_all_children_bf(self, block):
"""traverse the children of block in a breadth-first order"""
queue = deque([block])
while queue:
item = queue.popleft()
yield item
queue.extend(item.get_children())
def get_course(self, key):
"""get a course given a key"""
with self.store.bulk_operations(key):
course = self.store.get_course(key)
return course
def test_get_course(self):
"""retrieving a course with a ccx key works"""
expected = self.get_course(self.ccx_locator.to_course_locator())
actual = self.get_course(self.ccx_locator)
self.assertEqual(
expected.location.course_key,
actual.location.course_key.to_course_locator())
self.assertEqual(expected.display_name, actual.display_name)
def test_get_children(self):
"""the children of retrieved courses should be the same with course and ccx keys
"""
course_key = self.ccx_locator.to_course_locator()
course = self.get_course(course_key)
ccx = self.get_course(self.ccx_locator)
test_fodder = izip_longest(
self.get_all_children_bf(course), self.get_all_children_bf(ccx)
)
for expected, actual in test_fodder:
if expected is None:
self.fail('course children exhausted before ccx children')
if actual is None:
self.fail('ccx children exhausted before course children')
self.assertEqual(expected.display_name, actual.display_name)
self.assertEqual(expected.location.course_key, course_key)
self.assertEqual(actual.location.course_key, self.ccx_locator)
def test_has_item(self):
"""can verify that a location exists, using ccx block usage key"""
for item in chain(self.chapters, self.sequentials, self.verticals, self.blocks):
block_key = self.ccx_locator.make_usage_key(
item.location.block_type, item.location.block_id
)
self.assertTrue(self.store.has_item(block_key))
def test_get_item(self):
"""can retrieve an item by a location key, using a ccx block usage key
the retrieved item should be the same as the the one read without ccx
info
"""
for expected in chain(self.chapters, self.sequentials, self.verticals, self.blocks):
block_key = self.ccx_locator.make_usage_key(
expected.location.block_type, expected.location.block_id
)
actual = self.store.get_item(block_key)
self.assertEqual(expected.display_name, actual.display_name)
self.assertEqual(expected.location, actual.location.to_block_locator())
def test_publication_api(self):
"""verify that we can correctly discern a published item by ccx key"""
for expected in self.blocks:
block_key = self.ccx_locator.make_usage_key(
expected.location.block_type, expected.location.block_id
)
self.assertTrue(self.store.has_published_version(expected))
self.store.unpublish(block_key, self.user.id)
self.assertFalse(self.store.has_published_version(expected))
self.store.publish(block_key, self.user.id)
self.assertTrue(self.store.has_published_version(expected))
| agpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.