repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
henocdz/dotcom | PIL/WalImageFile.py | 40 | 5422 | # -*- coding: iso-8859-1 -*-
#
# The Python Imaging Library.
# $Id$
#
# WAL file handling
#
# History:
# 2003-04-23 fl created
#
# Copyright (c) 2003 by Fredrik Lundh.
#
# See the README file for information on usage and redistribution.
#
# NOTE: This format cannot be automatically recognized, so the reader
# is not registered for use with Image.open(). To open a WEL file, use
# the WalImageFile.open() function instead.
# This reader is based on the specification available from:
# http://www.flipcode.com/tutorials/tut_q2levels.shtml
# and has been tested with a few sample files found using google.
import Image
def i32(c, o=0):
return ord(c[o])+(ord(c[o+1])<<8)+(ord(c[o+2])<<16)+(ord(c[o+3])<<24)
##
# Load texture from a Quake2 WAL texture file.
# <p>
# By default, a Quake2 standard palette is attached to the texture.
# To override the palette, use the <b>putpalette</b> method.
#
# @param filename WAL file name, or an opened file handle.
# @return An image instance.
def open(filename):
# FIXME: modify to return a WalImageFile instance instead of
# plain Image object ?
if hasattr(filename, "read"):
fp = filename
else:
import __builtin__
fp = __builtin__.open(filename, "rb")
# read header fields
header = fp.read(32+24+32+12)
size = i32(header, 32), i32(header, 36)
offset = i32(header, 40)
# load pixel data
fp.seek(offset)
im = Image.fromstring("P", size, fp.read(size[0] * size[1]))
im.putpalette(quake2palette)
im.format = "WAL"
im.format_description = "Quake2 Texture"
# strings are null-terminated
im.info["name"] = header[:32].split("\0", 1)[0]
next_name = header[56:56+32].split("\0", 1)[0]
if next_name:
im.info["next_name"] = next_name
return im
quake2palette = (
# default palette taken from piffo 0.93 by Hans Häggström
"\x01\x01\x01\x0b\x0b\x0b\x12\x12\x12\x17\x17\x17\x1b\x1b\x1b\x1e"
"\x1e\x1e\x22\x22\x22\x26\x26\x26\x29\x29\x29\x2c\x2c\x2c\x2f\x2f"
"\x2f\x32\x32\x32\x35\x35\x35\x37\x37\x37\x3a\x3a\x3a\x3c\x3c\x3c"
"\x24\x1e\x13\x22\x1c\x12\x20\x1b\x12\x1f\x1a\x10\x1d\x19\x10\x1b"
"\x17\x0f\x1a\x16\x0f\x18\x14\x0d\x17\x13\x0d\x16\x12\x0d\x14\x10"
"\x0b\x13\x0f\x0b\x10\x0d\x0a\x0f\x0b\x0a\x0d\x0b\x07\x0b\x0a\x07"
"\x23\x23\x26\x22\x22\x25\x22\x20\x23\x21\x1f\x22\x20\x1e\x20\x1f"
"\x1d\x1e\x1d\x1b\x1c\x1b\x1a\x1a\x1a\x19\x19\x18\x17\x17\x17\x16"
"\x16\x14\x14\x14\x13\x13\x13\x10\x10\x10\x0f\x0f\x0f\x0d\x0d\x0d"
"\x2d\x28\x20\x29\x24\x1c\x27\x22\x1a\x25\x1f\x17\x38\x2e\x1e\x31"
"\x29\x1a\x2c\x25\x17\x26\x20\x14\x3c\x30\x14\x37\x2c\x13\x33\x28"
"\x12\x2d\x24\x10\x28\x1f\x0f\x22\x1a\x0b\x1b\x14\x0a\x13\x0f\x07"
"\x31\x1a\x16\x30\x17\x13\x2e\x16\x10\x2c\x14\x0d\x2a\x12\x0b\x27"
"\x0f\x0a\x25\x0f\x07\x21\x0d\x01\x1e\x0b\x01\x1c\x0b\x01\x1a\x0b"
"\x01\x18\x0a\x01\x16\x0a\x01\x13\x0a\x01\x10\x07\x01\x0d\x07\x01"
"\x29\x23\x1e\x27\x21\x1c\x26\x20\x1b\x25\x1f\x1a\x23\x1d\x19\x21"
"\x1c\x18\x20\x1b\x17\x1e\x19\x16\x1c\x18\x14\x1b\x17\x13\x19\x14"
"\x10\x17\x13\x0f\x14\x10\x0d\x12\x0f\x0b\x0f\x0b\x0a\x0b\x0a\x07"
"\x26\x1a\x0f\x23\x19\x0f\x20\x17\x0f\x1c\x16\x0f\x19\x13\x0d\x14"
"\x10\x0b\x10\x0d\x0a\x0b\x0a\x07\x33\x22\x1f\x35\x29\x26\x37\x2f"
"\x2d\x39\x35\x34\x37\x39\x3a\x33\x37\x39\x30\x34\x36\x2b\x31\x34"
"\x27\x2e\x31\x22\x2b\x2f\x1d\x28\x2c\x17\x25\x2a\x0f\x20\x26\x0d"
"\x1e\x25\x0b\x1c\x22\x0a\x1b\x20\x07\x19\x1e\x07\x17\x1b\x07\x14"
"\x18\x01\x12\x16\x01\x0f\x12\x01\x0b\x0d\x01\x07\x0a\x01\x01\x01"
"\x2c\x21\x21\x2a\x1f\x1f\x29\x1d\x1d\x27\x1c\x1c\x26\x1a\x1a\x24"
"\x18\x18\x22\x17\x17\x21\x16\x16\x1e\x13\x13\x1b\x12\x12\x18\x10"
"\x10\x16\x0d\x0d\x12\x0b\x0b\x0d\x0a\x0a\x0a\x07\x07\x01\x01\x01"
"\x2e\x30\x29\x2d\x2e\x27\x2b\x2c\x26\x2a\x2a\x24\x28\x29\x23\x27"
"\x27\x21\x26\x26\x1f\x24\x24\x1d\x22\x22\x1c\x1f\x1f\x1a\x1c\x1c"
"\x18\x19\x19\x16\x17\x17\x13\x13\x13\x10\x0f\x0f\x0d\x0b\x0b\x0a"
"\x30\x1e\x1b\x2d\x1c\x19\x2c\x1a\x17\x2a\x19\x14\x28\x17\x13\x26"
"\x16\x10\x24\x13\x0f\x21\x12\x0d\x1f\x10\x0b\x1c\x0f\x0a\x19\x0d"
"\x0a\x16\x0b\x07\x12\x0a\x07\x0f\x07\x01\x0a\x01\x01\x01\x01\x01"
"\x28\x29\x38\x26\x27\x36\x25\x26\x34\x24\x24\x31\x22\x22\x2f\x20"
"\x21\x2d\x1e\x1f\x2a\x1d\x1d\x27\x1b\x1b\x25\x19\x19\x21\x17\x17"
"\x1e\x14\x14\x1b\x13\x12\x17\x10\x0f\x13\x0d\x0b\x0f\x0a\x07\x07"
"\x2f\x32\x29\x2d\x30\x26\x2b\x2e\x24\x29\x2c\x21\x27\x2a\x1e\x25"
"\x28\x1c\x23\x26\x1a\x21\x25\x18\x1e\x22\x14\x1b\x1f\x10\x19\x1c"
"\x0d\x17\x1a\x0a\x13\x17\x07\x10\x13\x01\x0d\x0f\x01\x0a\x0b\x01"
"\x01\x3f\x01\x13\x3c\x0b\x1b\x39\x10\x20\x35\x14\x23\x31\x17\x23"
"\x2d\x18\x23\x29\x18\x3f\x3f\x3f\x3f\x3f\x39\x3f\x3f\x31\x3f\x3f"
"\x2a\x3f\x3f\x20\x3f\x3f\x14\x3f\x3c\x12\x3f\x39\x0f\x3f\x35\x0b"
"\x3f\x32\x07\x3f\x2d\x01\x3d\x2a\x01\x3b\x26\x01\x39\x21\x01\x37"
"\x1d\x01\x34\x1a\x01\x32\x16\x01\x2f\x12\x01\x2d\x0f\x01\x2a\x0b"
"\x01\x27\x07\x01\x23\x01\x01\x1d\x01\x01\x17\x01\x01\x10\x01\x01"
"\x3d\x01\x01\x19\x19\x3f\x3f\x01\x01\x01\x01\x3f\x16\x16\x13\x10"
"\x10\x0f\x0d\x0d\x0b\x3c\x2e\x2a\x36\x27\x20\x30\x21\x18\x29\x1b"
"\x10\x3c\x39\x37\x37\x32\x2f\x31\x2c\x28\x2b\x26\x21\x30\x22\x20"
)
if __name__ == "__main__":
im = open("../hacks/sample.wal")
print im.info, im.mode, im.size
im.save("../out.png")
| mit |
ozamiatin/glance | glance/tests/functional/db/__init__.py | 27 | 1077 | # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# NOTE(markwash): These functions are used in the base tests cases to
# set up the db api implementation under test. Rather than accessing them
# directly, test modules should use the load and reset functions below.
get_db = None
reset_db = None
def load(get_db_fn, reset_db_fn):
global get_db, reset_db
get_db = get_db_fn
reset_db = reset_db_fn
def reset():
global get_db, reset_db
get_db = None
reset_db = None
| apache-2.0 |
egoid/baytree | lib/python2.7/site-packages/django/template/engine.py | 87 | 7582 | from django.core.exceptions import ImproperlyConfigured
from django.utils import lru_cache, six
from django.utils.functional import cached_property
from django.utils.module_loading import import_string
from .base import Context, Template
from .context import _builtin_context_processors
from .exceptions import TemplateDoesNotExist
from .library import import_library
class Engine(object):
default_builtins = [
'django.template.defaulttags',
'django.template.defaultfilters',
'django.template.loader_tags',
]
def __init__(self, dirs=None, app_dirs=False, context_processors=None,
debug=False, loaders=None, string_if_invalid='',
file_charset='utf-8', libraries=None, builtins=None, autoescape=True):
if dirs is None:
dirs = []
if context_processors is None:
context_processors = []
if loaders is None:
loaders = ['django.template.loaders.filesystem.Loader']
if app_dirs:
loaders += ['django.template.loaders.app_directories.Loader']
else:
if app_dirs:
raise ImproperlyConfigured(
"app_dirs must not be set when loaders is defined.")
if libraries is None:
libraries = {}
if builtins is None:
builtins = []
self.dirs = dirs
self.app_dirs = app_dirs
self.autoescape = autoescape
self.context_processors = context_processors
self.debug = debug
self.loaders = loaders
self.string_if_invalid = string_if_invalid
self.file_charset = file_charset
self.libraries = libraries
self.template_libraries = self.get_template_libraries(libraries)
self.builtins = self.default_builtins + builtins
self.template_builtins = self.get_template_builtins(self.builtins)
@staticmethod
@lru_cache.lru_cache()
def get_default():
"""
When only one DjangoTemplates backend is configured, returns it.
Raises ImproperlyConfigured otherwise.
This is required for preserving historical APIs that rely on a
globally available, implicitly configured engine such as:
>>> from django.template import Context, Template
>>> template = Template("Hello {{ name }}!")
>>> context = Context({'name': "world"})
>>> template.render(context)
'Hello world!'
"""
# Since Engine is imported in django.template and since
# DjangoTemplates is a wrapper around this Engine class,
# local imports are required to avoid import loops.
from django.template import engines
from django.template.backends.django import DjangoTemplates
django_engines = [engine for engine in engines.all()
if isinstance(engine, DjangoTemplates)]
if len(django_engines) == 1:
# Unwrap the Engine instance inside DjangoTemplates
return django_engines[0].engine
elif len(django_engines) == 0:
raise ImproperlyConfigured(
"No DjangoTemplates backend is configured.")
else:
raise ImproperlyConfigured(
"Several DjangoTemplates backends are configured. "
"You must select one explicitly.")
@cached_property
def template_context_processors(self):
context_processors = _builtin_context_processors
context_processors += tuple(self.context_processors)
return tuple(import_string(path) for path in context_processors)
def get_template_builtins(self, builtins):
return [import_library(x) for x in builtins]
def get_template_libraries(self, libraries):
loaded = {}
for name, path in libraries.items():
loaded[name] = import_library(path)
return loaded
@cached_property
def template_loaders(self):
return self.get_template_loaders(self.loaders)
def get_template_loaders(self, template_loaders):
loaders = []
for template_loader in template_loaders:
loader = self.find_template_loader(template_loader)
if loader is not None:
loaders.append(loader)
return loaders
def find_template_loader(self, loader):
if isinstance(loader, (tuple, list)):
args = list(loader[1:])
loader = loader[0]
else:
args = []
if isinstance(loader, six.string_types):
loader_class = import_string(loader)
return loader_class(self, *args)
else:
raise ImproperlyConfigured(
"Invalid value in template loaders configuration: %r" % loader)
def find_template(self, name, dirs=None, skip=None):
tried = []
for loader in self.template_loaders:
if loader.supports_recursion:
try:
template = loader.get_template(
name, template_dirs=dirs, skip=skip,
)
return template, template.origin
except TemplateDoesNotExist as e:
tried.extend(e.tried)
else:
# RemovedInDjango20Warning: Use old api for non-recursive
# loaders.
try:
return loader(name, dirs)
except TemplateDoesNotExist:
pass
raise TemplateDoesNotExist(name, tried=tried)
def from_string(self, template_code):
"""
Returns a compiled Template object for the given template code,
handling template inheritance recursively.
"""
return Template(template_code, engine=self)
def get_template(self, template_name):
"""
Returns a compiled Template object for the given template name,
handling template inheritance recursively.
"""
template, origin = self.find_template(template_name)
if not hasattr(template, 'render'):
# template needs to be compiled
template = Template(template, origin, template_name, engine=self)
return template
def render_to_string(self, template_name, context=None):
"""
Render the template specified by template_name with the given context.
For use in Django's test suite.
"""
if isinstance(template_name, (list, tuple)):
t = self.select_template(template_name)
else:
t = self.get_template(template_name)
# Django < 1.8 accepted a Context in `context` even though that's
# unintended. Preserve this ability but don't rewrap `context`.
if isinstance(context, Context):
return t.render(context)
else:
return t.render(Context(context))
def select_template(self, template_name_list):
"""
Given a list of template names, returns the first that can be loaded.
"""
if not template_name_list:
raise TemplateDoesNotExist("No template names provided")
not_found = []
for template_name in template_name_list:
try:
return self.get_template(template_name)
except TemplateDoesNotExist as exc:
if exc.args[0] not in not_found:
not_found.append(exc.args[0])
continue
# If we get here, none of the templates could be loaded
raise TemplateDoesNotExist(', '.join(not_found))
| mit |
pablohoffman/scrapy | scrapy/tests/test_contrib_spiderstate.py | 29 | 1069 | import os
from datetime import datetime
from twisted.trial import unittest
from scrapy.contrib.spiderstate import SpiderState
from scrapy.spider import BaseSpider
class SpiderStateTest(unittest.TestCase):
def test_store_load(self):
jobdir = self.mktemp()
os.mkdir(jobdir)
spider = BaseSpider(name='default')
dt = datetime.now()
ss = SpiderState(jobdir)
ss.spider_opened(spider)
spider.state['one'] = 1
spider.state['dt'] = dt
ss.spider_closed(spider)
spider2 = BaseSpider(name='default')
ss2 = SpiderState(jobdir)
ss2.spider_opened(spider2)
self.assertEqual(spider.state, {'one': 1, 'dt': dt})
ss2.spider_closed(spider2)
def test_state_attribute(self):
# state attribute must be present if jobdir is not set, to provide a
# consistent interface
spider = BaseSpider(name='default')
ss = SpiderState()
ss.spider_opened(spider)
self.assertEqual(spider.state, {})
ss.spider_closed(spider)
| bsd-3-clause |
nickdex/cosmos | code/artificial_intelligence/src/k_nearest_neighbours/k_nearest_neighbours.py | 3 | 2391 | # k Nearest Neighbours implemented from Scratch in Python
import csv
import random
import math
import operator
def loadDataset(filename, split, trainingSet=[], testSet=[]):
with open(filename, "rt") as csvfile:
lines = csv.reader(csvfile)
dataset = list(lines)
for x in range(len(dataset) - 1):
for y in range(4):
dataset[x][y] = float(dataset[x][y])
if random.random() < split:
trainingSet.append(dataset[x])
else:
testSet.append(dataset[x])
def euclideanDistance(instance1, instance2, length):
distance = 0
for x in range(length):
distance += pow((instance1[x] - instance2[x]), 2)
return math.sqrt(distance)
def getNeighbors(trainingSet, testInstance, k):
distances = []
length = len(testInstance) - 1
for x in range(len(trainingSet)):
dist = euclideanDistance(testInstance, trainingSet[x], length)
distances.append((trainingSet[x], dist))
distances.sort(key=operator.itemgetter(1))
neighbors = []
for x in range(k):
neighbors.append(distances[x][0])
return neighbors
def getResponse(neighbors):
classVotes = {}
for x in range(len(neighbors)):
response = neighbors[x][-1]
if response in classVotes:
classVotes[response] += 1
else:
classVotes[response] = 1
sortedVotes = sorted(classVotes.items(), key=operator.itemgetter(1), reverse=True)
return sortedVotes[0][0]
def getAccuracy(testSet, predictions):
correct = 0
for x in range(len(testSet)):
if testSet[x][-1] == predictions[x]:
correct += 1
return (correct / float(len(testSet))) * 100.0
def main():
# prepare data
trainingSet = []
testSet = []
split = 0.67
loadDataset("iris.data", split, trainingSet, testSet)
print("Train set: " + repr(len(trainingSet)))
print("Test set: " + repr(len(testSet)))
# generate predictions
predictions = []
k = 3
for x in range(len(testSet)):
neighbors = getNeighbors(trainingSet, testSet[x], k)
result = getResponse(neighbors)
predictions.append(result)
print("> predicted=" + repr(result) + ", actual=" + repr(testSet[x][-1]))
accuracy = getAccuracy(testSet, predictions)
print("Accuracy: " + repr(accuracy) + "%")
main()
| gpl-3.0 |
lunatik-210/microservices | case-study/flask-auth/test.py | 1 | 1391 | import requests
from time import sleep
from base64 import b64encode
url = "http://localhost:5001/v1"
def post_users():
for i in range(10):
email = "andrew.d.lapin{0}@gmail.com".format(i)
user = dict(email=email, password="root")
requests.post(url+"/users", data=user, headers=headers)
def get_users(token):
headers = { 'Authorization': 'Basic {0}'.format(b64encode('{0}:'.format(token))) }
users = requests.get(url+"/users", headers=headers)
return users.json()['result']['data']
def delete_users(token, users):
headers = { 'Authorization': 'Basic {0}'.format(b64encode('{0}:'.format(token))) }
for user in users:
if user['email'] != "andrew.d.lapin@gmail.com":
requests.delete(url + "/users/" + str(user['id']), headers=headers)
if __name__ == "__main__":
headers = { 'Authorization': 'Basic {0}'.format(b64encode('andrew.d.lapin@gmail.com:root')) }
r = requests.get(url+"/token", headers=headers)
token = r.json().get('result').get('token')
refresh_token = r.json().get('result').get('refresh_token')
#headers = { 'Authorization': 'Basic {0}'.format(b64encode('{0}:{1}'.format(token, refresh_token))) }
#r = requests.get(url+"/refresh_token", headers=headers)
#token = r.json().get('result').get('token')
post_users()
users = get_users(token)
delete_users(token, users)
| gpl-3.0 |
Big-B702/python-for-android | python3-alpha/python3-src/Lib/encodings/iso2022_jp_ext.py | 816 | 1069 | #
# iso2022_jp_ext.py: Python Unicode Codec for ISO2022_JP_EXT
#
# Written by Hye-Shik Chang <perky@FreeBSD.org>
#
import _codecs_iso2022, codecs
import _multibytecodec as mbc
codec = _codecs_iso2022.getcodec('iso2022_jp_ext')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
codecs.IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(
name='iso2022_jp_ext',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
| apache-2.0 |
haripradhan/MissionPlanner | Lib/site-packages/numpy/distutils/fcompiler/vast.py | 94 | 1709 | import os
from numpy.distutils.fcompiler.gnu import GnuFCompiler
compilers = ['VastFCompiler']
class VastFCompiler(GnuFCompiler):
compiler_type = 'vast'
compiler_aliases = ()
description = 'Pacific-Sierra Research Fortran 90 Compiler'
version_pattern = r'\s*Pacific-Sierra Research vf90 '\
'(Personal|Professional)\s+(?P<version>[^\s]*)'
# VAST f90 does not support -o with -c. So, object files are created
# to the current directory and then moved to build directory
object_switch = ' && function _mvfile { mv -v `basename $1` $1 ; } && _mvfile '
executables = {
'version_cmd' : ["vf90", "-v"],
'compiler_f77' : ["g77"],
'compiler_fix' : ["f90", "-Wv,-ya"],
'compiler_f90' : ["f90"],
'linker_so' : ["<F90>"],
'archiver' : ["ar", "-cr"],
'ranlib' : ["ranlib"]
}
module_dir_switch = None #XXX Fix me
module_include_switch = None #XXX Fix me
def find_executables(self):
pass
def get_version_cmd(self):
f90 = self.compiler_f90[0]
d, b = os.path.split(f90)
vf90 = os.path.join(d, 'v'+b)
return vf90
def get_flags_arch(self):
vast_version = self.get_version()
gnu = GnuFCompiler()
gnu.customize(None)
self.version = gnu.get_version()
opt = GnuFCompiler.get_flags_arch(self)
self.version = vast_version
return opt
if __name__ == '__main__':
from distutils import log
log.set_verbosity(2)
from numpy.distutils.fcompiler import new_fcompiler
compiler = new_fcompiler(compiler='vast')
compiler.customize()
print(compiler.get_version())
| gpl-3.0 |
dhenrygithub/QGIS | python/ext-libs/pytz/tzinfo.py | 95 | 19212 | '''Base classes and helpers for building zone specific tzinfo classes'''
from datetime import datetime, timedelta, tzinfo
from bisect import bisect_right
try:
set
except NameError:
from sets import Set as set
import pytz
from pytz.exceptions import AmbiguousTimeError, NonExistentTimeError
__all__ = []
_timedelta_cache = {}
def memorized_timedelta(seconds):
'''Create only one instance of each distinct timedelta'''
try:
return _timedelta_cache[seconds]
except KeyError:
delta = timedelta(seconds=seconds)
_timedelta_cache[seconds] = delta
return delta
_epoch = datetime.utcfromtimestamp(0)
_datetime_cache = {0: _epoch}
def memorized_datetime(seconds):
'''Create only one instance of each distinct datetime'''
try:
return _datetime_cache[seconds]
except KeyError:
# NB. We can't just do datetime.utcfromtimestamp(seconds) as this
# fails with negative values under Windows (Bug #90096)
dt = _epoch + timedelta(seconds=seconds)
_datetime_cache[seconds] = dt
return dt
_ttinfo_cache = {}
def memorized_ttinfo(*args):
'''Create only one instance of each distinct tuple'''
try:
return _ttinfo_cache[args]
except KeyError:
ttinfo = (
memorized_timedelta(args[0]),
memorized_timedelta(args[1]),
args[2]
)
_ttinfo_cache[args] = ttinfo
return ttinfo
_notime = memorized_timedelta(0)
def _to_seconds(td):
'''Convert a timedelta to seconds'''
return td.seconds + td.days * 24 * 60 * 60
class BaseTzInfo(tzinfo):
# Overridden in subclass
_utcoffset = None
_tzname = None
zone = None
def __str__(self):
return self.zone
class StaticTzInfo(BaseTzInfo):
'''A timezone that has a constant offset from UTC
These timezones are rare, as most locations have changed their
offset at some point in their history
'''
def fromutc(self, dt):
'''See datetime.tzinfo.fromutc'''
if dt.tzinfo is not None and dt.tzinfo is not self:
raise ValueError('fromutc: dt.tzinfo is not self')
return (dt + self._utcoffset).replace(tzinfo=self)
def utcoffset(self, dt, is_dst=None):
'''See datetime.tzinfo.utcoffset
is_dst is ignored for StaticTzInfo, and exists only to
retain compatibility with DstTzInfo.
'''
return self._utcoffset
def dst(self, dt, is_dst=None):
'''See datetime.tzinfo.dst
is_dst is ignored for StaticTzInfo, and exists only to
retain compatibility with DstTzInfo.
'''
return _notime
def tzname(self, dt, is_dst=None):
'''See datetime.tzinfo.tzname
is_dst is ignored for StaticTzInfo, and exists only to
retain compatibility with DstTzInfo.
'''
return self._tzname
def localize(self, dt, is_dst=False):
'''Convert naive time to local time'''
if dt.tzinfo is not None:
raise ValueError('Not naive datetime (tzinfo is already set)')
return dt.replace(tzinfo=self)
def normalize(self, dt, is_dst=False):
'''Correct the timezone information on the given datetime.
This is normally a no-op, as StaticTzInfo timezones never have
ambiguous cases to correct:
>>> from pytz import timezone
>>> gmt = timezone('GMT')
>>> isinstance(gmt, StaticTzInfo)
True
>>> dt = datetime(2011, 5, 8, 1, 2, 3, tzinfo=gmt)
>>> gmt.normalize(dt) is dt
True
The supported method of converting between timezones is to use
datetime.astimezone(). Currently normalize() also works:
>>> la = timezone('America/Los_Angeles')
>>> dt = la.localize(datetime(2011, 5, 7, 1, 2, 3))
>>> fmt = '%Y-%m-%d %H:%M:%S %Z (%z)'
>>> gmt.normalize(dt).strftime(fmt)
'2011-05-07 08:02:03 GMT (+0000)'
'''
if dt.tzinfo is self:
return dt
if dt.tzinfo is None:
raise ValueError('Naive time - no tzinfo set')
return dt.astimezone(self)
def __repr__(self):
return '<StaticTzInfo %r>' % (self.zone,)
def __reduce__(self):
# Special pickle to zone remains a singleton and to cope with
# database changes.
return pytz._p, (self.zone,)
class DstTzInfo(BaseTzInfo):
'''A timezone that has a variable offset from UTC
The offset might change if daylight saving time comes into effect,
or at a point in history when the region decides to change their
timezone definition.
'''
# Overridden in subclass
_utc_transition_times = None # Sorted list of DST transition times in UTC
_transition_info = None # [(utcoffset, dstoffset, tzname)] corresponding
# to _utc_transition_times entries
zone = None
# Set in __init__
_tzinfos = None
_dst = None # DST offset
def __init__(self, _inf=None, _tzinfos=None):
if _inf:
self._tzinfos = _tzinfos
self._utcoffset, self._dst, self._tzname = _inf
else:
_tzinfos = {}
self._tzinfos = _tzinfos
self._utcoffset, self._dst, self._tzname = self._transition_info[0]
_tzinfos[self._transition_info[0]] = self
for inf in self._transition_info[1:]:
if inf not in _tzinfos:
_tzinfos[inf] = self.__class__(inf, _tzinfos)
def fromutc(self, dt):
'''See datetime.tzinfo.fromutc'''
if (dt.tzinfo is not None
and getattr(dt.tzinfo, '_tzinfos', None) is not self._tzinfos):
raise ValueError('fromutc: dt.tzinfo is not self')
dt = dt.replace(tzinfo=None)
idx = max(0, bisect_right(self._utc_transition_times, dt) - 1)
inf = self._transition_info[idx]
return (dt + inf[0]).replace(tzinfo=self._tzinfos[inf])
def normalize(self, dt):
'''Correct the timezone information on the given datetime
If date arithmetic crosses DST boundaries, the tzinfo
is not magically adjusted. This method normalizes the
tzinfo to the correct one.
To test, first we need to do some setup
>>> from pytz import timezone
>>> utc = timezone('UTC')
>>> eastern = timezone('US/Eastern')
>>> fmt = '%Y-%m-%d %H:%M:%S %Z (%z)'
We next create a datetime right on an end-of-DST transition point,
the instant when the wallclocks are wound back one hour.
>>> utc_dt = datetime(2002, 10, 27, 6, 0, 0, tzinfo=utc)
>>> loc_dt = utc_dt.astimezone(eastern)
>>> loc_dt.strftime(fmt)
'2002-10-27 01:00:00 EST (-0500)'
Now, if we subtract a few minutes from it, note that the timezone
information has not changed.
>>> before = loc_dt - timedelta(minutes=10)
>>> before.strftime(fmt)
'2002-10-27 00:50:00 EST (-0500)'
But we can fix that by calling the normalize method
>>> before = eastern.normalize(before)
>>> before.strftime(fmt)
'2002-10-27 01:50:00 EDT (-0400)'
The supported method of converting between timezones is to use
datetime.astimezone(). Currently, normalize() also works:
>>> th = timezone('Asia/Bangkok')
>>> am = timezone('Europe/Amsterdam')
>>> dt = th.localize(datetime(2011, 5, 7, 1, 2, 3))
>>> fmt = '%Y-%m-%d %H:%M:%S %Z (%z)'
>>> am.normalize(dt).strftime(fmt)
'2011-05-06 20:02:03 CEST (+0200)'
'''
if dt.tzinfo is None:
raise ValueError('Naive time - no tzinfo set')
# Convert dt in localtime to UTC
offset = dt.tzinfo._utcoffset
dt = dt.replace(tzinfo=None)
dt = dt - offset
# convert it back, and return it
return self.fromutc(dt)
def localize(self, dt, is_dst=False):
'''Convert naive time to local time.
This method should be used to construct localtimes, rather
than passing a tzinfo argument to a datetime constructor.
is_dst is used to determine the correct timezone in the ambigous
period at the end of daylight saving time.
>>> from pytz import timezone
>>> fmt = '%Y-%m-%d %H:%M:%S %Z (%z)'
>>> amdam = timezone('Europe/Amsterdam')
>>> dt = datetime(2004, 10, 31, 2, 0, 0)
>>> loc_dt1 = amdam.localize(dt, is_dst=True)
>>> loc_dt2 = amdam.localize(dt, is_dst=False)
>>> loc_dt1.strftime(fmt)
'2004-10-31 02:00:00 CEST (+0200)'
>>> loc_dt2.strftime(fmt)
'2004-10-31 02:00:00 CET (+0100)'
>>> str(loc_dt2 - loc_dt1)
'1:00:00'
Use is_dst=None to raise an AmbiguousTimeError for ambiguous
times at the end of daylight saving time
>>> try:
... loc_dt1 = amdam.localize(dt, is_dst=None)
... except AmbiguousTimeError:
... print('Ambiguous')
Ambiguous
is_dst defaults to False
>>> amdam.localize(dt) == amdam.localize(dt, False)
True
is_dst is also used to determine the correct timezone in the
wallclock times jumped over at the start of daylight saving time.
>>> pacific = timezone('US/Pacific')
>>> dt = datetime(2008, 3, 9, 2, 0, 0)
>>> ploc_dt1 = pacific.localize(dt, is_dst=True)
>>> ploc_dt2 = pacific.localize(dt, is_dst=False)
>>> ploc_dt1.strftime(fmt)
'2008-03-09 02:00:00 PDT (-0700)'
>>> ploc_dt2.strftime(fmt)
'2008-03-09 02:00:00 PST (-0800)'
>>> str(ploc_dt2 - ploc_dt1)
'1:00:00'
Use is_dst=None to raise a NonExistentTimeError for these skipped
times.
>>> try:
... loc_dt1 = pacific.localize(dt, is_dst=None)
... except NonExistentTimeError:
... print('Non-existent')
Non-existent
'''
if dt.tzinfo is not None:
raise ValueError('Not naive datetime (tzinfo is already set)')
# Find the two best possibilities.
possible_loc_dt = set()
for delta in [timedelta(days=-1), timedelta(days=1)]:
loc_dt = dt + delta
idx = max(0, bisect_right(
self._utc_transition_times, loc_dt) - 1)
inf = self._transition_info[idx]
tzinfo = self._tzinfos[inf]
loc_dt = tzinfo.normalize(dt.replace(tzinfo=tzinfo))
if loc_dt.replace(tzinfo=None) == dt:
possible_loc_dt.add(loc_dt)
if len(possible_loc_dt) == 1:
return possible_loc_dt.pop()
# If there are no possibly correct timezones, we are attempting
# to convert a time that never happened - the time period jumped
# during the start-of-DST transition period.
if len(possible_loc_dt) == 0:
# If we refuse to guess, raise an exception.
if is_dst is None:
raise NonExistentTimeError(dt)
# If we are forcing the pre-DST side of the DST transition, we
# obtain the correct timezone by winding the clock forward a few
# hours.
elif is_dst:
return self.localize(
dt + timedelta(hours=6), is_dst=True) - timedelta(hours=6)
# If we are forcing the post-DST side of the DST transition, we
# obtain the correct timezone by winding the clock back.
else:
return self.localize(
dt - timedelta(hours=6), is_dst=False) + timedelta(hours=6)
# If we get this far, we have multiple possible timezones - this
# is an ambiguous case occuring during the end-of-DST transition.
# If told to be strict, raise an exception since we have an
# ambiguous case
if is_dst is None:
raise AmbiguousTimeError(dt)
# Filter out the possiblilities that don't match the requested
# is_dst
filtered_possible_loc_dt = [
p for p in possible_loc_dt
if bool(p.tzinfo._dst) == is_dst
]
# Hopefully we only have one possibility left. Return it.
if len(filtered_possible_loc_dt) == 1:
return filtered_possible_loc_dt[0]
if len(filtered_possible_loc_dt) == 0:
filtered_possible_loc_dt = list(possible_loc_dt)
# If we get this far, we have in a wierd timezone transition
# where the clocks have been wound back but is_dst is the same
# in both (eg. Europe/Warsaw 1915 when they switched to CET).
# At this point, we just have to guess unless we allow more
# hints to be passed in (such as the UTC offset or abbreviation),
# but that is just getting silly.
#
# Choose the earliest (by UTC) applicable timezone.
sorting_keys = {}
for local_dt in filtered_possible_loc_dt:
key = local_dt.replace(tzinfo=None) - local_dt.tzinfo._utcoffset
sorting_keys[key] = local_dt
first_key = sorted(sorting_keys)[0]
return sorting_keys[first_key]
def utcoffset(self, dt, is_dst=None):
'''See datetime.tzinfo.utcoffset
The is_dst parameter may be used to remove ambiguity during DST
transitions.
>>> from pytz import timezone
>>> tz = timezone('America/St_Johns')
>>> ambiguous = datetime(2009, 10, 31, 23, 30)
>>> tz.utcoffset(ambiguous, is_dst=False)
datetime.timedelta(-1, 73800)
>>> tz.utcoffset(ambiguous, is_dst=True)
datetime.timedelta(-1, 77400)
>>> try:
... tz.utcoffset(ambiguous)
... except AmbiguousTimeError:
... print('Ambiguous')
Ambiguous
'''
if dt is None:
return None
elif dt.tzinfo is not self:
dt = self.localize(dt, is_dst)
return dt.tzinfo._utcoffset
else:
return self._utcoffset
def dst(self, dt, is_dst=None):
'''See datetime.tzinfo.dst
The is_dst parameter may be used to remove ambiguity during DST
transitions.
>>> from pytz import timezone
>>> tz = timezone('America/St_Johns')
>>> normal = datetime(2009, 9, 1)
>>> tz.dst(normal)
datetime.timedelta(0, 3600)
>>> tz.dst(normal, is_dst=False)
datetime.timedelta(0, 3600)
>>> tz.dst(normal, is_dst=True)
datetime.timedelta(0, 3600)
>>> ambiguous = datetime(2009, 10, 31, 23, 30)
>>> tz.dst(ambiguous, is_dst=False)
datetime.timedelta(0)
>>> tz.dst(ambiguous, is_dst=True)
datetime.timedelta(0, 3600)
>>> try:
... tz.dst(ambiguous)
... except AmbiguousTimeError:
... print('Ambiguous')
Ambiguous
'''
if dt is None:
return None
elif dt.tzinfo is not self:
dt = self.localize(dt, is_dst)
return dt.tzinfo._dst
else:
return self._dst
def tzname(self, dt, is_dst=None):
'''See datetime.tzinfo.tzname
The is_dst parameter may be used to remove ambiguity during DST
transitions.
>>> from pytz import timezone
>>> tz = timezone('America/St_Johns')
>>> normal = datetime(2009, 9, 1)
>>> tz.tzname(normal)
'NDT'
>>> tz.tzname(normal, is_dst=False)
'NDT'
>>> tz.tzname(normal, is_dst=True)
'NDT'
>>> ambiguous = datetime(2009, 10, 31, 23, 30)
>>> tz.tzname(ambiguous, is_dst=False)
'NST'
>>> tz.tzname(ambiguous, is_dst=True)
'NDT'
>>> try:
... tz.tzname(ambiguous)
... except AmbiguousTimeError:
... print('Ambiguous')
Ambiguous
'''
if dt is None:
return self.zone
elif dt.tzinfo is not self:
dt = self.localize(dt, is_dst)
return dt.tzinfo._tzname
else:
return self._tzname
def __repr__(self):
if self._dst:
dst = 'DST'
else:
dst = 'STD'
if self._utcoffset > _notime:
return '<DstTzInfo %r %s+%s %s>' % (
self.zone, self._tzname, self._utcoffset, dst
)
else:
return '<DstTzInfo %r %s%s %s>' % (
self.zone, self._tzname, self._utcoffset, dst
)
def __reduce__(self):
# Special pickle to zone remains a singleton and to cope with
# database changes.
return pytz._p, (
self.zone,
_to_seconds(self._utcoffset),
_to_seconds(self._dst),
self._tzname
)
def unpickler(zone, utcoffset=None, dstoffset=None, tzname=None):
"""Factory function for unpickling pytz tzinfo instances.
This is shared for both StaticTzInfo and DstTzInfo instances, because
database changes could cause a zones implementation to switch between
these two base classes and we can't break pickles on a pytz version
upgrade.
"""
# Raises a KeyError if zone no longer exists, which should never happen
# and would be a bug.
tz = pytz.timezone(zone)
# A StaticTzInfo - just return it
if utcoffset is None:
return tz
# This pickle was created from a DstTzInfo. We need to
# determine which of the list of tzinfo instances for this zone
# to use in order to restore the state of any datetime instances using
# it correctly.
utcoffset = memorized_timedelta(utcoffset)
dstoffset = memorized_timedelta(dstoffset)
try:
return tz._tzinfos[(utcoffset, dstoffset, tzname)]
except KeyError:
# The particular state requested in this timezone no longer exists.
# This indicates a corrupt pickle, or the timezone database has been
# corrected violently enough to make this particular
# (utcoffset,dstoffset) no longer exist in the zone, or the
# abbreviation has been changed.
pass
# See if we can find an entry differing only by tzname. Abbreviations
# get changed from the initial guess by the database maintainers to
# match reality when this information is discovered.
for localized_tz in tz._tzinfos.values():
if (localized_tz._utcoffset == utcoffset
and localized_tz._dst == dstoffset):
return localized_tz
# This (utcoffset, dstoffset) information has been removed from the
# zone. Add it back. This might occur when the database maintainers have
# corrected incorrect information. datetime instances using this
# incorrect information will continue to do so, exactly as they were
# before being pickled. This is purely an overly paranoid safety net - I
# doubt this will ever been needed in real life.
inf = (utcoffset, dstoffset, tzname)
tz._tzinfos[inf] = tz.__class__(inf, tz._tzinfos)
return tz._tzinfos[inf]
| gpl-2.0 |
sio2project/oioioi | oioioi/forum/migrations/0008_post_reactions.py | 1 | 1242 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2021-01-23 01:34
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import oioioi.base.fields
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('forum', '0007_post_report_reason'),
]
operations = [
migrations.CreateModel(
name='PostReaction',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type_of_reaction', oioioi.base.fields.EnumField(max_length=64)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='reactions', to='forum.Post', verbose_name='post')),
],
),
migrations.AddField(
model_name='category',
name='reactions_enabled',
field=models.BooleanField(default=False, verbose_name='reactions enabled'),
),
]
| gpl-3.0 |
w495/python-video-shot-detector | shot_detector/features/extractors/parallel_extractor.py | 1 | 3728 | # -*- coding: utf8 -*-
"""
This is part of shot detector.
Produced by w495 at 2017.05.04 04:18:27
"""
from __future__ import absolute_import, division, print_function
import itertools
import logging
import multiprocessing as mp
from concurrent.futures import ProcessPoolExecutor, as_completed
from shot_detector.utils.numerical import shrink
from .base_extractor import BaseExtractor
# noinspection PyAbstractClass
class ParallelExtractor(BaseExtractor):
"""
It helps only with long videos
WARNING:
remember that sending data from process
to another has its own costs!
"""
__logger = logging.getLogger(__name__)
POOL_SIZE = mp.cpu_count()
IMAGE_GROUP_SIZE = 128
def transform_frame_images(self, image_seq, **kwargs):
"""
:param image_seq:
:param kwargs:
:return:
"""
future_seq = self.image_group_future_seq(image_seq, **kwargs)
index_group_seq = self.future_result_seq(future_seq)
for _, group in sorted(index_group_seq):
for image in group:
yield image
@staticmethod
def future_result_seq(future_seq):
"""
:param future_seq:
:return:
"""
future_list = list(future_seq)
future_seq = as_completed(future_list)
for future in future_seq:
yield future.result()
def image_group_future_seq(self, image_seq, **kwargs):
"""
:param image_seq:
:param kwargs:
:return:
"""
image_group_seq = self.image_group_seq(image_seq)
with ProcessPoolExecutor(self.POOL_SIZE) as executor:
for index, image_group in enumerate(image_group_seq):
# Serialization for submit to ProcessPoolExecutor.
image_list = list(image_group)
future = executor.submit(
self.local_transform_frame_images,
index,
image_list,
**kwargs
)
yield future
def local_transform_frame_images(self, index, image_list, **kwargs):
"""
:param index:
:param image_list:
:param kwargs:
:return:
"""
# Deserialization.
image_seq = iter(image_list)
image_seq = super(ParallelExtractor, self) \
.transform_frame_images(image_seq, **kwargs)
image_list = list(image_seq)
return index, image_list
def image_group_seq(self, image_seq):
"""
:param image_seq:
:return:
"""
size = self.IMAGE_GROUP_SIZE
it = iter(image_seq)
group = list(itertools.islice(it, size))
while group:
yield group
group = list(itertools.islice(it, size))
# size = random.randint(32, 512)
# Just for experiments and comparison.
def static_format_frame_images(image_seq, **kwargs):
"""
:param image_seq:
:param kwargs:
:return:
"""
image_seq = static_shrink_frame_images(image_seq, **kwargs)
image_seq = static_normalize_frame_images(image_seq, **kwargs)
return list(image_seq)
# noinspection PyUnusedLocal
def static_shrink_frame_images(image_seq, width=2, height=2):
"""
:param image_seq:
:param width:
:param height:
:return:
"""
for image in image_seq:
yield shrink(image * 1.0, 2, 2)
def static_normalize_frame_images(image_seq, colour_dim=3):
"""
:param image_seq:
:param colour_dim:
:return:
"""
for image in image_seq:
yield image / colour_dim
| bsd-3-clause |
Glucksistemi/EGS-DSM | core/connections/database/migrate.py | 1 | 2704 | """
creating tables in selected database. launch this script in case of new database
"""
import models
import json
print "creating tables..."
models.Player.create_table()
models.Playfield.create_table()
models.CoreLog.create_table()
models.HeartBeat.create_table()
models.PlayerLogonEvent.create_table()
models.PlayerTransferEvent.create_table()
models.ChatMessage.create_table()
PLAYFIELDS = (
{
"name": "Aestus Orbit",
"folder": "SpaceAsteroidFieldRingAestus",
"params": {"PvP": False}
},
{
"name": "Aestus",
"folder": "Lava",
"params": {"PvP": True}
},
{
"name": "Alien Outpost",
"folder": "SpaceAlienStation",
"params": {"PvP": True}
},
{
"name": "Skillon Orbit",
"folder": "SpaceAsteroidsFew",
"params": {"PvP": False}
},
{
"name": "Skillon",
"folder": "Barren",
"params": {"PvP": True}
},
{
"name": "Trading Outpost",
"folder": "SpaceTradingStation",
"params": {"PvP": False}
},
{
"name": "Akua Orbit",
"folder": "SpaceAsteroidFieldRing",
"params": {"PvP": False}
},
{
"name": "Akua Moon",
"folder": "Moon",
"params": {"PvP": False}
},
{
"name": "Omicron Orbit",
"folder": "SpaceAsteroidFieldOmicron",
"params": {"PvP": False}
},
{
"name": "Omicron",
"folder": "Desert",
"params": {"PvP": False}
},
{
"name": "Ningues Orbit",
"folder": "SpaceEmptyNingues",
"params": {"PvP": False}
},
{
"name": "Ningues",
"folder": "Snow",
"params": {"PvP": True}
},
{
"name": "Aitis Orbit",
"folder": "SpaceAsteroids",
"params": {"PvP": False}
},
{
"name": "Aitis",
"folder": "Lava2",
"params": {"PvP": True}
},
{
"name": "Asteroid Field",
"folder": "SpaceAsteroidField",
"params": {"PvP": True}
},
{
"name": "Zeyhines Orbit",
"folder": "SpaceAsteroidsFewZeyhines",
"params": {"PvP": False}
},
{
"name": "Zeyhines",
"folder": "Desert2",
"params": {"PvP": True}
},
{
"name": "Masperon Orbit",
"folder": "SpaceAsteroids",
"params": {"PvP": False}
},
{
"name": "Masperon",
"folder": "Alien",
"params": {"PvP": True}
}
)
print "creating playfields..."
for pf in PLAYFIELDS:
models.Playfield.create(
name = pf['name'],
folder = pf['folder'],
params = json.dumps(pf['params'])
) | bsd-3-clause |
makerbot/conveyor | src/test/python/pi_test_Address.py | 1 | 3686 | import unittest
import sys
import os
#override sys.path for testing only
sys.path.insert(0,'./src/main/python')
import conveyor
import conveyor.address
class TestAddress(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_address_factory(self):
with self.assertRaises(conveyor.address.UnknownProtocolException):
addr = conveyor.address.Address.address_factory("fail")
# Can construct a pipe with weird/invalid name. Expected?
addrObj1 = conveyor.address.Address.address_factory('pipe:foo-bar')
self.assertIsInstance(addrObj1, conveyor.address._AbstractPipeAddress)
if os.name is not 'nt' : # posix cases
self.assertIsInstance(addrObj1, conveyor.address._PosixPipeAddress)
else: # windows cases
self.assertIsInstance(addrObj1, conveyor.address._Win23PipeAddress)
# Can't construct a tcp with weird/invalid name. Expected?
addrObj2 = conveyor.address.Address.address_factory('tcp:something:80')
self.assertIsInstance(addrObj2, conveyor.address.TcpAddress)
def test_base_unimplemented(self):
aObj = conveyor.address.Address()
# as template, these should throw 'NotImplemented'
with self.assertRaises(NotImplementedError):
aObj.listen()
with self.assertRaises(NotImplementedError):
aObj.connect()
class Test_AbstractPipeAddress(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_address_factory(self):
# Can construct a pipe with weird/invalid name. Expected?
addrObj1 = conveyor.address._AbstractPipeAddress._factory('pipe:foo-bar',
['pipe','foo-bar'])
self.assertIsInstance(addrObj1, conveyor.address._AbstractPipeAddress)
if os.name is not 'nt' : # posix cases
self.assertIsInstance(addrObj1, conveyor.address._PosixPipeAddress)
else: # windows cases
self.assertIsInstance(addrObj1, conveyor.address._Win23PipeAddress)
class TestTcpAddress(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_address_factory(self):
with self.assertRaises(conveyor.address.UnknownProtocolException):
addr = conveyor.address.TcpAddress._factory('fail:sauce', ['fail','sauce'])
with self.assertRaises(conveyor.address.MailformedUrlException):
addr = conveyor.address.TcpAddress._factory('tcp:8080',['tcp','8080'])
addrObj1 = conveyor.address.Address.address_factory('tcp:example.com:80')
self.assertIsInstance(addrObj1, conveyor.address.TcpAddress)
def test_get_listener(self):
# test string port numbers fail
with self.assertRaises(TypeError):
y = conveyor.address.TcpAddress('localhost', '8080')
listener = y.listen()
# test 'foregin' host names fail
with self.assertRaises(Exception):
y = conveyor.address.TcpAddress('example.com', 8080)
listener = y.listen()
#test legit connection cretion
y = conveyor.address.TcpAddress('localhost', 8080)
self.assertIsInstance(y, conveyor.address.TcpAddress)
listener = y.listen()
#test 2nd lister additon fails
with self.assertRaises(Exception): # 'Error, Address already in use
z = conveyor.address.TcpAddress('localhost', 8080)
self.assertIsInstance(z, conveyor.address.TcpAddress)
listener = z.listen()
if __name__ == "__main__":
unittest.main()
| agpl-3.0 |
tommo/gii | lib/3rdparty/common/pyqode/core/panels/checker.py | 4 | 4447 | """
Checker panels:
- CheckerPanel: draw checker messages in front of each line
- GlobalCheckerPanel: draw all checker markers as colored rectangle to
offer a global view of all errors
"""
from pyqode.core import icons
from pyqode.core.api import DelayJobRunner, TextHelper, CodeEdit
from pyqode.core.api.panel import Panel, _logger
from pyqode.core.modes.checker import CheckerMessages
from pyqode.qt import QtCore, QtGui, QtWidgets
class CheckerPanel(Panel):
""" Shows messages collected by one or more checker modes """
def __init__(self):
super(CheckerPanel, self).__init__()
self._previous_line = -1
self.scrollable = True
self._job_runner = DelayJobRunner(delay=100)
self.setMouseTracking(True)
#: Info icon
self.info_icon = icons.icon(
'dialog-info', ':pyqode-icons/rc/dialog-info.png',
'fa.info-circle', qta_options={'color': '#4040DD'})
self.warning_icon = icons.icon(
'dialog-warning', ':pyqode-icons/rc/dialog-warning.png',
'fa.exclamation-triangle', qta_options={'color': '#DDDD40'})
self.error_icon = icons.icon(
'dialog-error', ':pyqode-icons/rc/dialog-error.png',
'fa.exclamation-circle', qta_options={'color': '#DD4040'})
def marker_for_line(self, line):
"""
Returns the marker that is displayed at the specified line number if
any.
:param line: The marker line.
:return: Marker of None
:rtype: pyqode.core.Marker
"""
block = self.editor.document().findBlockByNumber(line)
try:
return block.userData().messages
except AttributeError:
return []
def sizeHint(self):
"""
Returns the panel size hint. (fixed with of 16px)
"""
metrics = QtGui.QFontMetricsF(self.editor.font())
size_hint = QtCore.QSize(metrics.height(), metrics.height())
if size_hint.width() > 16:
size_hint.setWidth(16)
return size_hint
def on_uninstall(self):
self._job_runner.cancel_requests()
super(CheckerPanel, self).on_uninstall()
def paintEvent(self, event):
super(CheckerPanel, self).paintEvent(event)
painter = QtGui.QPainter(self)
for top, block_nbr, block in self.editor.visible_blocks:
user_data = block.userData()
if user_data and user_data.messages:
for msg in user_data.messages:
icon = self._icon_from_message(msg)
if icon:
rect = QtCore.QRect()
rect.setX(0)
rect.setY(top)
rect.setWidth(self.sizeHint().width())
rect.setHeight(self.sizeHint().height())
icon.paint(painter, rect)
def _icon_from_message(self, message):
icons = {
CheckerMessages.INFO: self.info_icon,
CheckerMessages.WARNING: self.warning_icon,
CheckerMessages.ERROR: self.error_icon
}
return icons[message.status]
def mouseMoveEvent(self, event):
# Requests a tooltip if the cursor is currently over a marker.
line = TextHelper(self.editor).line_nbr_from_position(event.pos().y())
if line:
markers = self.marker_for_line(line)
text = '\n'.join([marker.description for marker in markers if
marker.description])
if len(markers):
if self._previous_line != line:
top = TextHelper(self.editor).line_pos_from_number(
markers[0].line)
if top:
self._job_runner.request_job(self._display_tooltip,
text, top)
else:
self._job_runner.cancel_requests()
self._previous_line = line
def leaveEvent(self, *args):
"""
Hide tooltip when leaving the panel region.
"""
QtWidgets.QToolTip.hideText()
self._previous_line = -1
def _display_tooltip(self, tooltip, top):
"""
Display tooltip at the specified top position.
"""
QtWidgets.QToolTip.showText(self.mapToGlobal(QtCore.QPoint(
self.sizeHint().width(), top)), tooltip, self)
| mit |
msherry/litle-sdk-for-python-114 | litleSdkPythonTest/unit/TestPostGenerationScript.py | 2 | 12954 | #Copyright (c) 2011-2012 Litle & Co.
#
#Permission is hereby granted, free of charge, to any person
#obtaining a copy of this software and associated documentation
#files (the "Software"), to deal in the Software without
#restriction, including without limitation the rights to use,
#copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the
#Software is furnished to do so, subject to the following
#conditions:
#
#The above copyright notice and this permission notice shall be
#included in all copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
#EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
#OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
#NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
#HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
#WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
#FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
#OTHER DEALINGS IN THE SOFTWARE.
import os, sys
lib_path = os.path.abspath('../all')
sys.path.append(lib_path)
from SetupTest import *
import unittest
from mock import *
from array import *
class TestPostGenerationScript(unittest.TestCase):
def setUp(self):
self.seq = range(10)
def test_minOccurs_zero_on_orderId_orderSource_amount_billToAddress(self):
authorization = litleXmlFields.authorization()
card = litleXmlFields.cardType()
card.number = "4100000000000000"
card.expDate = "1210"
card.type = 'VI'
authorization.card = card
comm = Communications(config)
comm.http_post = MagicMock()
litle = litleOnlineRequest(config)
litle.setCommunications(comm)
litle._processResponse = MagicMock(return_value=None)
litle.sendRequest(authorization)
comm.http_post.assert_called_once()
match_re = RegexMatcher(".*?<litleOnlineRequest.*?<authorization.*?<card>.*?<number>4100000000000000</number>.*?</card>.*?</authorization>.*?")
comm.http_post.assert_called_with(match_re, url=ANY, proxy=ANY, timeout=ANY)
def test_minOccurs_zero_on_number_expDate(self):
authorization = litleXmlFields.authorization()
card = litleXmlFields.cardType()
card.type = 'VI'
authorization.card = card
comm = Communications(config)
comm.http_post = MagicMock()
litle = litleOnlineRequest(config)
litle.setCommunications(comm)
litle._processResponse = MagicMock(return_value=None)
litle.sendRequest(authorization)
comm.http_post.assert_called_once()
match_re = RegexMatcher(".*?<litleOnlineRequest.*?<authorization.*?<card>.*?.*?</card>.*?</authorization>.*?")
comm.http_post.assert_called_with(match_re, url=ANY, proxy=ANY, timeout=ANY)
def test_minOccurs_postDate_message_response_responseTime(self):
outputString = "<litleOnlineResponse version='8.13' response='0' message='Valid Format' xmlns='http://www.litle.com/schema'><echeckRedepositResponse id='' reportGroup='Planets' customerId=''><litleTxnId>273132193500575000</litleTxnId></echeckRedepositResponse></litleOnlineResponse>"
litleXml = litleOnlineRequest(config)
xml_object = litleXml._processResponse(outputString)
self.assertEqual(xml_object.litleTxnId, 273132193500575000)
def test_minOccurs_accountInfo_tokenInfo_cardInfo_cardTokenInfo_code(self):
outputString = "<litleOnlineResponse version='8.13' response='0' message='Valid Format' xmlns='http://www.litle.com/schema'><echeckRedepositResponse id='' reportGroup='Planets' customerId=''><litleTxnId>273132193500575000</litleTxnId><accountUpdater><extendedCardResponse></extendedCardResponse></accountUpdater></echeckRedepositResponse></litleOnlineResponse>"
litleXml = litleOnlineRequest(config)
xml_object = litleXml._processResponse(outputString)
self.assertEqual(xml_object.litleTxnId, 273132193500575000)
def test_minOccurs_authInformation(self):
capturegivenauth = litleXmlFields.captureGivenAuth()
card = litleXmlFields.cardType()
card.type = 'VI'
card.number = "4100000000000001"
card.expDate = "1210"
capturegivenauth.card = card
comm = Communications(config)
comm.http_post = MagicMock()
litle = litleOnlineRequest(config)
litle.setCommunications(comm)
litle._processResponse = MagicMock(return_value=None)
litle.sendRequest(capturegivenauth)
comm.http_post.assert_called_once()
match_re = RegexMatcher(".*?<litleOnlineRequest.*?<captureGivenAuth.*?<card>.*?<number>4100000000000001</number>.*?</card>.*?</captureGivenAuth>.*?")
comm.http_post.assert_called_with(match_re, url=ANY, proxy=ANY, timeout=ANY)
def test_minOccurs_authDate_authCode_capability_entryMode_cardHolderId_litleToken(self):
CaptureGivenAuth = litleXmlFields.captureGivenAuth()
CaptureGivenAuth.amount = 106
CaptureGivenAuth.orderId = "12344"
AuthInfo = litleXmlFields.authInformation()
AuthInfo.authAmount = 12345
CaptureGivenAuth.authInformation = AuthInfo
Pos = litleXmlFields.pos()
CaptureGivenAuth.pos = Pos
CaptureGivenAuth.orderSource = "ecommerce"
Token = litleXmlFields.cardTokenType()
Token.expDate = "1210"
Token.type = 'VI'
Token.cardValidationNum = '555'
CaptureGivenAuth.token = Token
comm = Communications(config)
comm.http_post = MagicMock()
litle = litleOnlineRequest(config)
litle.setCommunications(comm)
litle._processResponse = MagicMock(return_value=None)
litle.sendRequest(CaptureGivenAuth)
comm.http_post.assert_called_once()
match_re = RegexMatcher(".*?<litleOnlineRequest.*?<captureGivenAuth.*?<token>.*?</token>.*?</captureGivenAuth>.*?")
comm.http_post.assert_called_with(match_re, url=ANY, proxy=ANY, timeout=ANY)
def test_minOccurs_echeckOrEcheckToken(self):
echeckCredit = litleXmlFields.echeckCredit()
echeckCredit.amount = 12
echeckCredit.orderId = "12345"
echeckCredit.orderSource = 'ecommerce'
comm = Communications(config)
comm.http_post = MagicMock()
litle = litleOnlineRequest(config)
litle.setCommunications(comm)
litle._processResponse = MagicMock(return_value=None)
litle.sendRequest(echeckCredit)
comm.http_post.assert_called_once()
match_re = RegexMatcher(".*?<litleOnlineRequest.*?")
comm.http_post.assert_called_with(match_re, url=ANY, proxy=ANY, timeout=ANY)
def test_minOccurs_routingNum_accType_accNum(self):
echeckCredit = litleXmlFields.echeckCredit()
echeckCredit.amount = 12
echeckCredit.orderId = "12345"
echeckCredit.orderSource = 'ecommerce'
echeck = litleXmlFields.echeck()
echeckCredit.echeckOrEcheckToken = echeck
comm = Communications(config)
comm.http_post = MagicMock()
litle = litleOnlineRequest(config)
litle.setCommunications(comm)
litle._processResponse = MagicMock(return_value=None)
litle.sendRequest(echeckCredit)
comm.http_post.assert_called_once()
match_re = RegexMatcher(".*?<litleOnlineRequest.*?")
comm.http_post.assert_called_with(match_re, url=ANY, proxy=ANY, timeout=ANY)
def test_minOccurs_taxAmount_itemDescription(self):
Capture = litleXmlFields.capture()
Capture.litleTxnId = 123456000
Capture.amount = 106
Capture.payPalNotes = "Notes"
Enhanced = litleXmlFields.enhancedData()
LineItem = litleXmlFields.lineItemData()
LineItem.lineItemTotalWithTax = 123
iterator = {LineItem : LineItem}
Enhanced.lineItemData = iter(iterator)
Capture.enhancedData = Enhanced
comm = Communications(config)
comm.http_post = MagicMock()
litle = litleOnlineRequest(config)
litle.setCommunications(comm)
litle._processResponse = MagicMock(return_value=None)
litle.sendRequest(Capture)
comm.http_post.assert_called_once()
match_re = RegexMatcher(".*?<litleOnlineRequest.*?")
comm.http_post.assert_called_with(match_re, url=ANY, proxy=ANY, timeout=ANY)
def test_minOccurs_payerId_transactionId_healthcareAmounts_IIASFlag_totalHealthCareAmount(self):
authorization = litleXmlFields.authorization()
authorization.orderId = '12344'
authorization.amount = 106
authorization.orderSource = 'ecommerce'
amexData = litleXmlFields.amexAggregatorData()
authorization.amexAggregatorData = amexData
healthCare = litleXmlFields.healthcareIIAS()
authorization.healthcareIIAS = healthCare
healthCareAmt = litleXmlFields.healthcareAmounts()
authorization.healthcareAmounts = healthCareAmt
paypal = litleXmlFields.payPal()
paypal.token = "1234"
authorization.paypal = paypal
comm = Communications(config)
comm.http_post = MagicMock()
litle = litleOnlineRequest(config)
litle.setCommunications(comm)
litle._processResponse = MagicMock(return_value=None)
litle.sendRequest(authorization)
comm.http_post.assert_called_once()
match_re = RegexMatcher(".*?<litleOnlineRequest.*?")
comm.http_post.assert_called_with(match_re, url=ANY, proxy=ANY, timeout=ANY)
def test_minOccurs_tokenResponseCode_tokenMessage(self):
outputString = "<litleOnlineResponse version='8.13' response='0' message='Valid Format' xmlns='http://www.litle.com/schema'><authorizationResponse id='' reportGroup='DefaultReportGroup' customerId=''><litleTxnId>057484783403434000</litleTxnId><orderId>12344</orderId><response>000</response><responseTime>2012-06-05T16:36:39</responseTime><message>Approved</message><tokenResponse><litleToken>4242424242424242</litleToken><tokenResponseCode>111</tokenResponseCode><bin>bin</bin></tokenResponse></authorizationResponse></litleOnlineResponse>"
litleXml = litleOnlineRequest(config)
xml_object = litleXml._processResponse(outputString)
self.assertEquals("bin", xml_object.tokenResponse.bin)
def test_minOccurs_availableBalance(self):
outputString = "<litleOnlineResponse version='8.13' response='0' message='Valid Format' xmlns='http://www.litle.com/schema'><authorizationResponse id='' reportGroup='DefaultReportGroup' customerId=''><litleTxnId>057484783403434000</litleTxnId><orderId>12344</orderId><response>000</response><responseTime>2012-06-05T16:36:39</responseTime><message>Approved</message><enhancedAuthResponse></enhancedAuthResponse></authorizationResponse></litleOnlineResponse>"
litleXml = litleOnlineRequest(config)
xml_object = litleXml._processResponse(outputString)
self.assertEquals("DefaultReportGroup", xml_object.reportGroup)
def test_minOccurs_bmlMerhcantId(self):
authorization = litleXmlFields.authorization()
authorization.orderId = '12344'
authorization.amount = 106
authorization.orderSource = 'ecommerce'
bml = litleXmlFields.billMeLaterRequest()
authorization.billMeLatertRequest = bml
comm = Communications(config)
comm.http_post = MagicMock()
litle = litleOnlineRequest(config)
litle.setCommunications(comm)
litle._processResponse = MagicMock(return_value=None)
litle.sendRequest(authorization)
comm.http_post.assert_called_once()
match_re = RegexMatcher(".*?<litleOnlineRequest.*?")
comm.http_post.assert_called_with(match_re, url=ANY, proxy=ANY, timeout=ANY)
def test_minOccurs_paypageRegistrationId(self):
sale = litleXmlFields.sale()
sale.orderId = '12344'
sale.amount = 106
sale.orderSource = 'ecommerce'
paypage = litleXmlFields.cardPaypageType()
sale.paypage = paypage
comm = Communications(config)
comm.http_post = MagicMock()
litle = litleOnlineRequest(config)
litle.setCommunications(comm)
litle._processResponse = MagicMock(return_value=None)
litle.sendRequest(sale)
comm.http_post.assert_called_once()
match_re = RegexMatcher(".*?<litleOnlineRequest.*?")
comm.http_post.assert_called_with(match_re, url=ANY, proxy=ANY, timeout=ANY)
def suite():
suite = unittest.TestSuite()
suite = unittest.TestLoader().loadTestsFromTestCase(TestPostGenerationScript)
return suite
if __name__ =='__main__':
unittest.main() | mit |
fidomason/kbengine | kbe/src/lib/python/Lib/test/test_pwd.py | 88 | 4229 | import sys
import unittest
from test import support
pwd = support.import_module('pwd')
class PwdTest(unittest.TestCase):
def test_values(self):
entries = pwd.getpwall()
for e in entries:
self.assertEqual(len(e), 7)
self.assertEqual(e[0], e.pw_name)
self.assertIsInstance(e.pw_name, str)
self.assertEqual(e[1], e.pw_passwd)
self.assertIsInstance(e.pw_passwd, str)
self.assertEqual(e[2], e.pw_uid)
self.assertIsInstance(e.pw_uid, int)
self.assertEqual(e[3], e.pw_gid)
self.assertIsInstance(e.pw_gid, int)
self.assertEqual(e[4], e.pw_gecos)
self.assertIsInstance(e.pw_gecos, str)
self.assertEqual(e[5], e.pw_dir)
self.assertIsInstance(e.pw_dir, str)
self.assertEqual(e[6], e.pw_shell)
self.assertIsInstance(e.pw_shell, str)
# The following won't work, because of duplicate entries
# for one uid
# self.assertEqual(pwd.getpwuid(e.pw_uid), e)
# instead of this collect all entries for one uid
# and check afterwards (done in test_values_extended)
def test_values_extended(self):
entries = pwd.getpwall()
entriesbyname = {}
entriesbyuid = {}
if len(entries) > 1000: # Huge passwd file (NIS?) -- skip this test
self.skipTest('passwd file is huge; extended test skipped')
for e in entries:
entriesbyname.setdefault(e.pw_name, []).append(e)
entriesbyuid.setdefault(e.pw_uid, []).append(e)
# check whether the entry returned by getpwuid()
# for each uid is among those from getpwall() for this uid
for e in entries:
if not e[0] or e[0] == '+':
continue # skip NIS entries etc.
self.assertIn(pwd.getpwnam(e.pw_name), entriesbyname[e.pw_name])
self.assertIn(pwd.getpwuid(e.pw_uid), entriesbyuid[e.pw_uid])
def test_errors(self):
self.assertRaises(TypeError, pwd.getpwuid)
self.assertRaises(TypeError, pwd.getpwuid, 3.14)
self.assertRaises(TypeError, pwd.getpwnam)
self.assertRaises(TypeError, pwd.getpwnam, 42)
self.assertRaises(TypeError, pwd.getpwall, 42)
# try to get some errors
bynames = {}
byuids = {}
for (n, p, u, g, gecos, d, s) in pwd.getpwall():
bynames[n] = u
byuids[u] = n
allnames = list(bynames.keys())
namei = 0
fakename = allnames[namei]
while fakename in bynames:
chars = list(fakename)
for i in range(len(chars)):
if chars[i] == 'z':
chars[i] = 'A'
break
elif chars[i] == 'Z':
continue
else:
chars[i] = chr(ord(chars[i]) + 1)
break
else:
namei = namei + 1
try:
fakename = allnames[namei]
except IndexError:
# should never happen... if so, just forget it
break
fakename = ''.join(chars)
self.assertRaises(KeyError, pwd.getpwnam, fakename)
# In some cases, byuids isn't a complete list of all users in the
# system, so if we try to pick a value not in byuids (via a perturbing
# loop, say), pwd.getpwuid() might still be able to find data for that
# uid. Using sys.maxint may provoke the same problems, but hopefully
# it will be a more repeatable failure.
fakeuid = sys.maxsize
self.assertNotIn(fakeuid, byuids)
self.assertRaises(KeyError, pwd.getpwuid, fakeuid)
# -1 shouldn't be a valid uid because it has a special meaning in many
# uid-related functions
self.assertRaises(KeyError, pwd.getpwuid, -1)
# should be out of uid_t range
self.assertRaises(KeyError, pwd.getpwuid, 2**128)
self.assertRaises(KeyError, pwd.getpwuid, -2**128)
def test_main():
support.run_unittest(PwdTest)
if __name__ == "__main__":
test_main()
| lgpl-3.0 |
shashankbassi92/tornado | tornado/test/websocket_test.py | 99 | 14775 | from __future__ import absolute_import, division, print_function, with_statement
import traceback
from tornado.concurrent import Future
from tornado import gen
from tornado.httpclient import HTTPError, HTTPRequest
from tornado.log import gen_log, app_log
from tornado.testing import AsyncHTTPTestCase, gen_test, bind_unused_port, ExpectLog
from tornado.test.util import unittest
from tornado.web import Application, RequestHandler
from tornado.util import u
try:
import tornado.websocket # noqa
from tornado.util import _websocket_mask_python
except ImportError:
# The unittest module presents misleading errors on ImportError
# (it acts as if websocket_test could not be found, hiding the underlying
# error). If we get an ImportError here (which could happen due to
# TORNADO_EXTENSION=1), print some extra information before failing.
traceback.print_exc()
raise
from tornado.websocket import WebSocketHandler, websocket_connect, WebSocketError
try:
from tornado import speedups
except ImportError:
speedups = None
class TestWebSocketHandler(WebSocketHandler):
"""Base class for testing handlers that exposes the on_close event.
This allows for deterministic cleanup of the associated socket.
"""
def initialize(self, close_future, compression_options=None):
self.close_future = close_future
self.compression_options = compression_options
def get_compression_options(self):
return self.compression_options
def on_close(self):
self.close_future.set_result((self.close_code, self.close_reason))
class EchoHandler(TestWebSocketHandler):
def on_message(self, message):
self.write_message(message, isinstance(message, bytes))
class ErrorInOnMessageHandler(TestWebSocketHandler):
def on_message(self, message):
1 / 0
class HeaderHandler(TestWebSocketHandler):
def open(self):
try:
# In a websocket context, many RequestHandler methods
# raise RuntimeErrors.
self.set_status(503)
raise Exception("did not get expected exception")
except RuntimeError:
pass
self.write_message(self.request.headers.get('X-Test', ''))
class NonWebSocketHandler(RequestHandler):
def get(self):
self.write('ok')
class CloseReasonHandler(TestWebSocketHandler):
def open(self):
self.on_close_called = False
self.close(1001, "goodbye")
class AsyncPrepareHandler(TestWebSocketHandler):
@gen.coroutine
def prepare(self):
yield gen.moment
def on_message(self, message):
self.write_message(message)
class WebSocketBaseTestCase(AsyncHTTPTestCase):
@gen.coroutine
def ws_connect(self, path, compression_options=None):
ws = yield websocket_connect(
'ws://127.0.0.1:%d%s' % (self.get_http_port(), path),
compression_options=compression_options)
raise gen.Return(ws)
@gen.coroutine
def close(self, ws):
"""Close a websocket connection and wait for the server side.
If we don't wait here, there are sometimes leak warnings in the
tests.
"""
ws.close()
yield self.close_future
class WebSocketTest(WebSocketBaseTestCase):
def get_app(self):
self.close_future = Future()
return Application([
('/echo', EchoHandler, dict(close_future=self.close_future)),
('/non_ws', NonWebSocketHandler),
('/header', HeaderHandler, dict(close_future=self.close_future)),
('/close_reason', CloseReasonHandler,
dict(close_future=self.close_future)),
('/error_in_on_message', ErrorInOnMessageHandler,
dict(close_future=self.close_future)),
('/async_prepare', AsyncPrepareHandler,
dict(close_future=self.close_future)),
])
def test_http_request(self):
# WS server, HTTP client.
response = self.fetch('/echo')
self.assertEqual(response.code, 400)
@gen_test
def test_websocket_gen(self):
ws = yield self.ws_connect('/echo')
ws.write_message('hello')
response = yield ws.read_message()
self.assertEqual(response, 'hello')
yield self.close(ws)
def test_websocket_callbacks(self):
websocket_connect(
'ws://127.0.0.1:%d/echo' % self.get_http_port(),
io_loop=self.io_loop, callback=self.stop)
ws = self.wait().result()
ws.write_message('hello')
ws.read_message(self.stop)
response = self.wait().result()
self.assertEqual(response, 'hello')
self.close_future.add_done_callback(lambda f: self.stop())
ws.close()
self.wait()
@gen_test
def test_binary_message(self):
ws = yield self.ws_connect('/echo')
ws.write_message(b'hello \xe9', binary=True)
response = yield ws.read_message()
self.assertEqual(response, b'hello \xe9')
yield self.close(ws)
@gen_test
def test_unicode_message(self):
ws = yield self.ws_connect('/echo')
ws.write_message(u('hello \u00e9'))
response = yield ws.read_message()
self.assertEqual(response, u('hello \u00e9'))
yield self.close(ws)
@gen_test
def test_error_in_on_message(self):
ws = yield self.ws_connect('/error_in_on_message')
ws.write_message('hello')
with ExpectLog(app_log, "Uncaught exception"):
response = yield ws.read_message()
self.assertIs(response, None)
yield self.close(ws)
@gen_test
def test_websocket_http_fail(self):
with self.assertRaises(HTTPError) as cm:
yield self.ws_connect('/notfound')
self.assertEqual(cm.exception.code, 404)
@gen_test
def test_websocket_http_success(self):
with self.assertRaises(WebSocketError):
yield self.ws_connect('/non_ws')
@gen_test
def test_websocket_network_fail(self):
sock, port = bind_unused_port()
sock.close()
with self.assertRaises(IOError):
with ExpectLog(gen_log, ".*"):
yield websocket_connect(
'ws://127.0.0.1:%d/' % port,
io_loop=self.io_loop,
connect_timeout=3600)
@gen_test
def test_websocket_close_buffered_data(self):
ws = yield websocket_connect(
'ws://127.0.0.1:%d/echo' % self.get_http_port())
ws.write_message('hello')
ws.write_message('world')
# Close the underlying stream.
ws.stream.close()
yield self.close_future
@gen_test
def test_websocket_headers(self):
# Ensure that arbitrary headers can be passed through websocket_connect.
ws = yield websocket_connect(
HTTPRequest('ws://127.0.0.1:%d/header' % self.get_http_port(),
headers={'X-Test': 'hello'}))
response = yield ws.read_message()
self.assertEqual(response, 'hello')
yield self.close(ws)
@gen_test
def test_server_close_reason(self):
ws = yield self.ws_connect('/close_reason')
msg = yield ws.read_message()
# A message of None means the other side closed the connection.
self.assertIs(msg, None)
self.assertEqual(ws.close_code, 1001)
self.assertEqual(ws.close_reason, "goodbye")
# The on_close callback is called no matter which side closed.
code, reason = yield self.close_future
# The client echoed the close code it received to the server,
# so the server's close code (returned via close_future) is
# the same.
self.assertEqual(code, 1001)
@gen_test
def test_client_close_reason(self):
ws = yield self.ws_connect('/echo')
ws.close(1001, 'goodbye')
code, reason = yield self.close_future
self.assertEqual(code, 1001)
self.assertEqual(reason, 'goodbye')
@gen_test
def test_async_prepare(self):
# Previously, an async prepare method triggered a bug that would
# result in a timeout on test shutdown (and a memory leak).
ws = yield self.ws_connect('/async_prepare')
ws.write_message('hello')
res = yield ws.read_message()
self.assertEqual(res, 'hello')
@gen_test
def test_check_origin_valid_no_path(self):
port = self.get_http_port()
url = 'ws://127.0.0.1:%d/echo' % port
headers = {'Origin': 'http://127.0.0.1:%d' % port}
ws = yield websocket_connect(HTTPRequest(url, headers=headers),
io_loop=self.io_loop)
ws.write_message('hello')
response = yield ws.read_message()
self.assertEqual(response, 'hello')
yield self.close(ws)
@gen_test
def test_check_origin_valid_with_path(self):
port = self.get_http_port()
url = 'ws://127.0.0.1:%d/echo' % port
headers = {'Origin': 'http://127.0.0.1:%d/something' % port}
ws = yield websocket_connect(HTTPRequest(url, headers=headers),
io_loop=self.io_loop)
ws.write_message('hello')
response = yield ws.read_message()
self.assertEqual(response, 'hello')
yield self.close(ws)
@gen_test
def test_check_origin_invalid_partial_url(self):
port = self.get_http_port()
url = 'ws://127.0.0.1:%d/echo' % port
headers = {'Origin': '127.0.0.1:%d' % port}
with self.assertRaises(HTTPError) as cm:
yield websocket_connect(HTTPRequest(url, headers=headers),
io_loop=self.io_loop)
self.assertEqual(cm.exception.code, 403)
@gen_test
def test_check_origin_invalid(self):
port = self.get_http_port()
url = 'ws://127.0.0.1:%d/echo' % port
# Host is 127.0.0.1, which should not be accessible from some other
# domain
headers = {'Origin': 'http://somewhereelse.com'}
with self.assertRaises(HTTPError) as cm:
yield websocket_connect(HTTPRequest(url, headers=headers),
io_loop=self.io_loop)
self.assertEqual(cm.exception.code, 403)
@gen_test
def test_check_origin_invalid_subdomains(self):
port = self.get_http_port()
url = 'ws://localhost:%d/echo' % port
# Subdomains should be disallowed by default. If we could pass a
# resolver to websocket_connect we could test sibling domains as well.
headers = {'Origin': 'http://subtenant.localhost'}
with self.assertRaises(HTTPError) as cm:
yield websocket_connect(HTTPRequest(url, headers=headers),
io_loop=self.io_loop)
self.assertEqual(cm.exception.code, 403)
class CompressionTestMixin(object):
MESSAGE = 'Hello world. Testing 123 123'
def get_app(self):
self.close_future = Future()
return Application([
('/echo', EchoHandler, dict(
close_future=self.close_future,
compression_options=self.get_server_compression_options())),
])
def get_server_compression_options(self):
return None
def get_client_compression_options(self):
return None
@gen_test
def test_message_sizes(self):
ws = yield self.ws_connect(
'/echo',
compression_options=self.get_client_compression_options())
# Send the same message three times so we can measure the
# effect of the context_takeover options.
for i in range(3):
ws.write_message(self.MESSAGE)
response = yield ws.read_message()
self.assertEqual(response, self.MESSAGE)
self.assertEqual(ws.protocol._message_bytes_out, len(self.MESSAGE) * 3)
self.assertEqual(ws.protocol._message_bytes_in, len(self.MESSAGE) * 3)
self.verify_wire_bytes(ws.protocol._wire_bytes_in,
ws.protocol._wire_bytes_out)
yield self.close(ws)
class UncompressedTestMixin(CompressionTestMixin):
"""Specialization of CompressionTestMixin when we expect no compression."""
def verify_wire_bytes(self, bytes_in, bytes_out):
# Bytes out includes the 4-byte mask key per message.
self.assertEqual(bytes_out, 3 * (len(self.MESSAGE) + 6))
self.assertEqual(bytes_in, 3 * (len(self.MESSAGE) + 2))
class NoCompressionTest(UncompressedTestMixin, WebSocketBaseTestCase):
pass
# If only one side tries to compress, the extension is not negotiated.
class ServerOnlyCompressionTest(UncompressedTestMixin, WebSocketBaseTestCase):
def get_server_compression_options(self):
return {}
class ClientOnlyCompressionTest(UncompressedTestMixin, WebSocketBaseTestCase):
def get_client_compression_options(self):
return {}
class DefaultCompressionTest(CompressionTestMixin, WebSocketBaseTestCase):
def get_server_compression_options(self):
return {}
def get_client_compression_options(self):
return {}
def verify_wire_bytes(self, bytes_in, bytes_out):
self.assertLess(bytes_out, 3 * (len(self.MESSAGE) + 6))
self.assertLess(bytes_in, 3 * (len(self.MESSAGE) + 2))
# Bytes out includes the 4 bytes mask key per message.
self.assertEqual(bytes_out, bytes_in + 12)
class MaskFunctionMixin(object):
# Subclasses should define self.mask(mask, data)
def test_mask(self):
self.assertEqual(self.mask(b'abcd', b''), b'')
self.assertEqual(self.mask(b'abcd', b'b'), b'\x03')
self.assertEqual(self.mask(b'abcd', b'54321'), b'TVPVP')
self.assertEqual(self.mask(b'ZXCV', b'98765432'), b'c`t`olpd')
# Include test cases with \x00 bytes (to ensure that the C
# extension isn't depending on null-terminated strings) and
# bytes with the high bit set (to smoke out signedness issues).
self.assertEqual(self.mask(b'\x00\x01\x02\x03',
b'\xff\xfb\xfd\xfc\xfe\xfa'),
b'\xff\xfa\xff\xff\xfe\xfb')
self.assertEqual(self.mask(b'\xff\xfb\xfd\xfc',
b'\x00\x01\x02\x03\x04\x05'),
b'\xff\xfa\xff\xff\xfb\xfe')
class PythonMaskFunctionTest(MaskFunctionMixin, unittest.TestCase):
def mask(self, mask, data):
return _websocket_mask_python(mask, data)
@unittest.skipIf(speedups is None, "tornado.speedups module not present")
class CythonMaskFunctionTest(MaskFunctionMixin, unittest.TestCase):
def mask(self, mask, data):
return speedups.websocket_mask(mask, data)
| apache-2.0 |
lopp2005/HiSpatialCluster | tool_densfilter.py | 2 | 5412 | # -*- coding: utf-8 -*-
"""
Density Filter Tool
Created on Thu May 11 11:03:05 2017
@author: cheny
"""
from arcpy import Parameter
import arcpy
from section_cpu import dens_filter_cpu
from multiprocessing import cpu_count
class DensFilterTool(object):
def __init__(self):
"""Classify Tool"""
self.label = "4 Density Filtering Tool"
self.description = "Post Processing - Density Filter"
self.canRunInBackground = True
def getParameterInfo(self):
"""Define parameter definitions"""
#1
paramclsinput = Parameter(
displayName="Input Classified Points",
name="in_cls_points",
datatype="DEFeatureClass",
parameterType="Required",
direction="Input")
paramclsinput.filter.list = ["Point"]
#2
paramcntrinput = Parameter(
displayName="Input Centers Points",
name="in_cntr_points",
datatype="DEFeatureClass",
parameterType="Required",
direction="Input")
paramcntrinput.filter.list = ["Point"]
#3
paramidfield = Parameter(
displayName="Identifier Field",
name="id_field",
datatype="Field",
parameterType="Required",
direction="Input")
paramidfield.parameterDependencies = [paramclsinput.name]
paramidfield.filter.list = ['Short','Long']
#4
paramcntridfield = Parameter(
displayName="Center ID Field",
name="cntr_id_field",
datatype="Field",
parameterType="Required",
direction="Input")
paramcntridfield.parameterDependencies = [paramclsinput.name]
paramcntridfield.filter.list = ['Short','Long']
paramcntridfield.value='CNTR_ID'
#5
paramdens = Parameter(
displayName="Density Field",
name="density_field",
datatype="Field",
parameterType="Required",
direction="Input")
# Set the filter to accept only fields that are Short or Long type
paramdens.filter.list = ['Short','Long','Float','Single','Double']
paramdens.parameterDependencies = [paramclsinput.name]
paramdens.value='DENSITY'
#6
paramclsoutput = Parameter(
displayName="Output Classified Points",
name="out_cls_points",
datatype="DEFeatureClass",
parameterType="Required",
direction="Output")
#7
paramdistthrs = Parameter(
displayName="Distance for Density Connection",
name="distthrs",
datatype="GPDouble",
parameterType="Required",
direction="Input"
)
paramdistthrs.value=100.0
#8
paramdensthrs= Parameter(
displayName="Density Threshold for Density Connection",
name="densthrs",
datatype="GPDouble",
parameterType="Required",
direction="Input"
)
paramdensthrs.value=1.2
#9
paramdevice = Parameter(
displayName="Device for Calculation",
name="calc_device",
datatype="GPString",
parameterType="Required",
direction="Input"
)
paramdevice.filter.list=['CPU']
paramdevice.value='CPU'
#10
paramcpuc = Parameter(
displayName="CPU Parallel Cores",
name="cpu_cores",
datatype="GPLong",
parameterType="Required",
direction="Input"
)
paramcpuc.value=cpu_count()
params = [paramclsinput,paramcntrinput,paramidfield,
paramcntridfield,paramdens,paramclsoutput,
paramdistthrs,paramdensthrs,paramdevice,
paramcpuc]
return params
def updateParameters(self, parameters):
# if parameters[0].altered and not parameters[2].altered:
# parameters[2].value=arcpy.Describe(parameters[0].valueAsText).OIDFieldName
if parameters[0].altered and not parameters[5].altered:
in_fe=parameters[0].valueAsText
parameters[5].value=in_fe[:len(in_fe)-4]+'_filter'+in_fe[-4:] if in_fe[-3:]=='shp' else in_fe+'_filter'
return
def execute(self, parameters, messages):
cls_input=parameters[0].valueAsText
cntr_input=parameters[1].valueAsText
id_field=parameters[2].valueAsText
cntr_id_field=parameters[3].valueAsText
dens_field=parameters[4].valueAsText
cls_output=parameters[5].valueAsText
dist_thrs=parameters[6].value
dens_thrs=parameters[7].value
cpu_core=parameters[9].value
dens_filter_cpu(cls_input,cntr_input,id_field,
cntr_id_field,dens_field,cls_output,
dist_thrs,dens_thrs,cpu_core)
return
| apache-2.0 |
shizeeg/pyicqt | src/xdb/pgsql.py | 1 | 8944 | # Copyright 2005-2006 Daniel Henninger <jadestorm@nc.rr.com>
# Licensed for distribution under the GPL version 2, check COPYING for details
#
# Ported to PostgreSQL by sh!zeeg <shizeeque@gmail.com>
#
# PostgreSQL database storage. See db-setup.pgsql in the tools directory in
# the root of the distribution, as well as configuration options in your
# transport config file. (see config_example.xml)
#
import config
import sys
import psycopg2 as sqldb
import re
class XDB:
"""
Class for storage of data.
"""
def __init__(self, name):
""" Creates an XDB object. """
self.db=sqldb.connect(
host=config.xdbDriver_pgsql["server"],
user=config.xdbDriver_pgsql["username"],
password=config.xdbDriver_pgsql["password"],
#charset="utf8",
dbname=config.xdbDriver_pgsql["database"]
)
if not self.db:
print "Unable to connect to PostgreSQL database."
sys.exit(1)
def db_ping(self):
"""
Wrapper function for MySQLdb.ping() to reconnect on lost connection
"""
try:
#self.db.ping()
c = self.db.cursor()
c.execute("SELECT 1;")
except:
self.db=sqldb.connect(
host=config.xdbDriver_pgsql["server"],
user=config.xdbDriver_pgsql["username"],
password=config.xdbDriver_pgsql["password"],
#charset="utf8",
dbname=config.xdbDriver_pgsql["database"]
)
#self.db.ping()
c = self.db.cursor()
c.execute("SELECT 1;")
def getRegistration(self, jabberID):
""" Retrieve registration information from the XDB.
Returns a username and password. """
self.db_ping()
c=self.db.cursor()
c.execute("SELECT username,password,UNHEX(encryptedpassword) FROM register WHERE owner = '%s'" % jabberID)
ret = c.fetchone()
if ret:
(username,password,encpass) = ret
if encpass:
return (username,encpass)
else:
return (username,password)
else:
return None
def getRegistrationList(self):
""" Returns an array of all of the registered jids. """
self.db_ping()
c=self.db.cursor()
c.execute("SELECT owner FROM register")
results = []
ret = c.fetchone()
while ret:
(jid) = ret[0]
results.append(jid)
ret = c.fetchone()
return results
def setRegistration(self, jabberID, username, password):
""" Sets up or creates a registration in the XDB.
username and password are for the legacy account. """
self.db_ping()
c=self.db.cursor()
c.execute("DELETE FROM register WHERE owner = '%s'" % jabberID)
if config.xdbDriver_mysql.get("format","") == "encrypted":
c.execute("INSERT INTO register(owner,username,encryptedpassword) VALUES('%s','%s',HEX('%s'))" % (jabberID, username, password))
else:
c.execute("INSERT INTO register(owner,username,password) VALUES('%s','%s','%s')" % (jabberID, username, password))
def removeRegistration(self, jabberID):
""" Removes a registration from the XDB. """
self.db_ping()
c=self.db.cursor()
c.execute("DELETE FROM register WHERE owner = '%s'" % jabberID)
c.execute("DELETE FROM settings WHERE owner = '%s'" % jabberID)
c.execute("DELETE FROM lists WHERE owner = '%s'" % jabberID)
c.execute("DELETE FROM list_attributes WHERE owner = '%s'" % jabberID)
def getSettingList(self, jabberID):
""" Gets a list of all settings for a user from the XDB. """
self.db_ping()
c=self.db.cursor()
c.execute("SELECT variable,value FROM settings WHERE owner = '%s'" % (jabberID))
results = []
ret = c.fetchone()
while ret:
(variable) = ret[0]
(value) = ret[1]
results[variable] = value
ret = c.fetchone()
return results
def getSetting(self, jabberID, variable):
""" Gets a user setting from the XDB. """
self.db_ping()
c=self.db.cursor()
c.execute("SELECT value FROM settings WHERE owner = '%s' AND variable = '%s'" % (jabberID, variable))
ret = c.fetchone()
if ret:
(value) = ret[0]
return value
else:
return None
def setSetting(self, jabberID, variable, value):
""" Sets a user setting in the XDB. """
self.db_ping()
c=self.db.cursor()
c.execute("DELETE FROM settings WHERE owner = '%s' AND variable = '%s'" % (jabberID, variable))
c.execute("INSERT INTO settings(owner,variable,value) VALUES('%s','%s','%s')" % (jabberID, variable, value))
def getCSetting(self, jabberID, variable):
""" Gets a custom user setting from the XDB. """
self.db_ping()
c=self.db.cursor()
c.execute("SELECT value FROM csettings WHERE owner = '%s' AND variable = '%s'" % (jabberID, variable))
ret = c.fetchone()
if ret:
(value) = ret[0]
return value
else:
return None
def setCSetting(self, jabberID, variable, value):
""" Sets a custom user setting in the XDB. """
self.db_ping()
c=self.db.cursor()
c.execute("DELETE FROM csettings WHERE owner = '%s' AND variable = '%s'" % (jabberID, variable))
c.execute("INSERT INTO csettings(owner,variable,value) VALUES('%s','%s','%s')" % (jabberID, variable, value))
def getXstatusText(self, jabberID, number):
""" Get a latest title and desc for x-status """
self.db_ping()
c=self.db.cursor()
c.execute("SELECT title, value FROM xstatuses WHERE owner = '%s' AND number = '%s'" % (jabberID, number))
ret = c.fetchone()
if ret:
(title) = ret[0]
(value) = ret[1]
return (title, value)
else:
return ('','')
def setXstatusText(self, jabberID, number, title, desc):
""" Set a latest title and desc for x-status """
self.db_ping()
c=self.db.cursor()
c.execute("DELETE FROM xstatuses WHERE owner = '%s' AND number = '%s'" % (jabberID, number))
c.execute("INSERT INTO xstatuses(owner,number,title,value) VALUES('%s','%s','%s','%s')" % (jabberID, number, title, desc))
def getCSettingList(self, jabberID):
""" Gets a list of all custom settings for a user from the XDB. """
self.db_ping()
c=self.db.cursor()
c.execute("SELECT variable,value FROM csettings WHERE owner = '%s'" % (jabberID))
results = dict([])
ret = c.fetchone()
while ret:
(variable) = ret[0]
(value) = ret[1]
results[variable] = value
ret = c.fetchone()
return results
def getListEntry(self, type, jabberID, legacyID):
""" Retrieves a legacy ID entry from a list in
the XDB, based off the type and jabberID you provide. """
self.db_ping()
attributes = {}
c=self.db.cursor()
c.execute("SELECT attribute,value FROM list_attributes WHERE owner = '%s' AND type = '%s' AND jid = '%s'" % (jabberID, type, legacyID))
ret = c.fetchone()
while ret:
(attribute,value) = ret[0:1]
attributes[attribute] = value
ret = c.fetchone()
return attributes
def getListTypes(self, jabberID):
""" Returns an array containing a list of all list types
associated with a user. """
self.db_ping()
types = []
c=self.db.cursor()
c.execute("SELECT type FROM lists WHERE owner = '%s'" % (jabberID))
ret = c.fetchone()
while ret:
(type) = ret[0]
types.append(type)
ret = c.fetchone()
return types
def getList(self, type, jabberID):
""" Retrieves an array containing an entire list of a
jabberID's from the XDB, based off the type and jabberID
you provide. """
self.db_ping()
entities = []
c=self.db.cursor()
c.execute("SELECT jid FROM lists WHERE owner = '%s' AND type = '%s'" % (jabberID, type))
ret = c.fetchone()
while ret:
(jid) = ret[0]
entity = []
entity.append(jid)
attributes = {}
self.db_ping()
ic = self.db.cursor()
ic.execute("SELECT attribute,value FROM list_attributes WHERE owner = '%s' AND type = '%s' AND jid = '%s'" % (jabberID, type, jid))
iret = ic.fetchone()
while iret:
(attribute,value) = iret[0:2]
attributes[attribute] = value
iret = ic.fetchone()
entity.append(attributes)
ret = c.fetchone()
return entities
def setListEntry(self, type, jabberID, legacyID, payload = {}):
""" Updates or adds a legacy ID entry to a list in
the XDB, based off the type and jabberID you provide. """
self.db_ping()
c=self.db.cursor()
c.execute("DELETE FROM lists WHERE owner = '%s' AND type = '%s' AND jid = '%s'" % (jabberID, type, legacyID))
c.execute("DELETE FROM list_attributes WHERE owner = '%s' AND type = '%s' AND jid = '%s'" % (jabberID, type, legacyID))
c.execute("INSERT INTO lists(owner,type,jid) VALUES('%s','%s','%s')" % (jabberID, type, legacyID))
for p in payload.keys():
c.execute("INSERT INTO list_attributes(owner,type,jid,attribute,value) VALUES('%s','%s','%s','%s','%s')" % (jabberID, type, legacyID, p, re.escape(payload[p].replace("'", "\\'"))))
def removeListEntry(self, type, jabberID, legacyID):
""" Removes a legacy ID entry from a list in
the XDB, based off the type and jabberID you provide. """
self.db_ping()
c=self.db.cursor()
c.execute("DELETE FROM lists WHERE owner = '%s' AND type = '%s' AND jid = '%s'" % (jabberID, type, legacyID))
c.execute("DELETE FROM list_attributes WHERE owner = '%s' AND type = '%s' AND jid = '%s'" % (jabberID, type, legacyID))
def housekeep():
""" Perform cleanup type tasks upon startup. """
pass
| gpl-2.0 |
vprime/puuuu | env/lib/python2.7/site-packages/pip/_vendor/requests/packages/charade/constants.py | 3008 | 1335 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
_debug = 0
eDetecting = 0
eFoundIt = 1
eNotMe = 2
eStart = 0
eError = 1
eItsMe = 2
SHORTCUT_THRESHOLD = 0.95
| mit |
maxziv/SEApp | server/lib/flask/helpers.py | 776 | 33793 | # -*- coding: utf-8 -*-
"""
flask.helpers
~~~~~~~~~~~~~
Implements various helpers.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import os
import sys
import pkgutil
import posixpath
import mimetypes
from time import time
from zlib import adler32
from threading import RLock
from werkzeug.routing import BuildError
from functools import update_wrapper
try:
from werkzeug.urls import url_quote
except ImportError:
from urlparse import quote as url_quote
from werkzeug.datastructures import Headers
from werkzeug.exceptions import NotFound
# this was moved in 0.7
try:
from werkzeug.wsgi import wrap_file
except ImportError:
from werkzeug.utils import wrap_file
from jinja2 import FileSystemLoader
from .signals import message_flashed
from .globals import session, _request_ctx_stack, _app_ctx_stack, \
current_app, request
from ._compat import string_types, text_type
# sentinel
_missing = object()
# what separators does this operating system provide that are not a slash?
# this is used by the send_from_directory function to ensure that nobody is
# able to access files from outside the filesystem.
_os_alt_seps = list(sep for sep in [os.path.sep, os.path.altsep]
if sep not in (None, '/'))
def _endpoint_from_view_func(view_func):
"""Internal helper that returns the default endpoint for a given
function. This always is the function name.
"""
assert view_func is not None, 'expected view func if endpoint ' \
'is not provided.'
return view_func.__name__
def stream_with_context(generator_or_function):
"""Request contexts disappear when the response is started on the server.
This is done for efficiency reasons and to make it less likely to encounter
memory leaks with badly written WSGI middlewares. The downside is that if
you are using streamed responses, the generator cannot access request bound
information any more.
This function however can help you keep the context around for longer::
from flask import stream_with_context, request, Response
@app.route('/stream')
def streamed_response():
@stream_with_context
def generate():
yield 'Hello '
yield request.args['name']
yield '!'
return Response(generate())
Alternatively it can also be used around a specific generator::
from flask import stream_with_context, request, Response
@app.route('/stream')
def streamed_response():
def generate():
yield 'Hello '
yield request.args['name']
yield '!'
return Response(stream_with_context(generate()))
.. versionadded:: 0.9
"""
try:
gen = iter(generator_or_function)
except TypeError:
def decorator(*args, **kwargs):
gen = generator_or_function()
return stream_with_context(gen)
return update_wrapper(decorator, generator_or_function)
def generator():
ctx = _request_ctx_stack.top
if ctx is None:
raise RuntimeError('Attempted to stream with context but '
'there was no context in the first place to keep around.')
with ctx:
# Dummy sentinel. Has to be inside the context block or we're
# not actually keeping the context around.
yield None
# The try/finally is here so that if someone passes a WSGI level
# iterator in we're still running the cleanup logic. Generators
# don't need that because they are closed on their destruction
# automatically.
try:
for item in gen:
yield item
finally:
if hasattr(gen, 'close'):
gen.close()
# The trick is to start the generator. Then the code execution runs until
# the first dummy None is yielded at which point the context was already
# pushed. This item is discarded. Then when the iteration continues the
# real generator is executed.
wrapped_g = generator()
next(wrapped_g)
return wrapped_g
def make_response(*args):
"""Sometimes it is necessary to set additional headers in a view. Because
views do not have to return response objects but can return a value that
is converted into a response object by Flask itself, it becomes tricky to
add headers to it. This function can be called instead of using a return
and you will get a response object which you can use to attach headers.
If view looked like this and you want to add a new header::
def index():
return render_template('index.html', foo=42)
You can now do something like this::
def index():
response = make_response(render_template('index.html', foo=42))
response.headers['X-Parachutes'] = 'parachutes are cool'
return response
This function accepts the very same arguments you can return from a
view function. This for example creates a response with a 404 error
code::
response = make_response(render_template('not_found.html'), 404)
The other use case of this function is to force the return value of a
view function into a response which is helpful with view
decorators::
response = make_response(view_function())
response.headers['X-Parachutes'] = 'parachutes are cool'
Internally this function does the following things:
- if no arguments are passed, it creates a new response argument
- if one argument is passed, :meth:`flask.Flask.make_response`
is invoked with it.
- if more than one argument is passed, the arguments are passed
to the :meth:`flask.Flask.make_response` function as tuple.
.. versionadded:: 0.6
"""
if not args:
return current_app.response_class()
if len(args) == 1:
args = args[0]
return current_app.make_response(args)
def url_for(endpoint, **values):
"""Generates a URL to the given endpoint with the method provided.
Variable arguments that are unknown to the target endpoint are appended
to the generated URL as query arguments. If the value of a query argument
is `None`, the whole pair is skipped. In case blueprints are active
you can shortcut references to the same blueprint by prefixing the
local endpoint with a dot (``.``).
This will reference the index function local to the current blueprint::
url_for('.index')
For more information, head over to the :ref:`Quickstart <url-building>`.
To integrate applications, :class:`Flask` has a hook to intercept URL build
errors through :attr:`Flask.build_error_handler`. The `url_for` function
results in a :exc:`~werkzeug.routing.BuildError` when the current app does
not have a URL for the given endpoint and values. When it does, the
:data:`~flask.current_app` calls its :attr:`~Flask.build_error_handler` if
it is not `None`, which can return a string to use as the result of
`url_for` (instead of `url_for`'s default to raise the
:exc:`~werkzeug.routing.BuildError` exception) or re-raise the exception.
An example::
def external_url_handler(error, endpoint, **values):
"Looks up an external URL when `url_for` cannot build a URL."
# This is an example of hooking the build_error_handler.
# Here, lookup_url is some utility function you've built
# which looks up the endpoint in some external URL registry.
url = lookup_url(endpoint, **values)
if url is None:
# External lookup did not have a URL.
# Re-raise the BuildError, in context of original traceback.
exc_type, exc_value, tb = sys.exc_info()
if exc_value is error:
raise exc_type, exc_value, tb
else:
raise error
# url_for will use this result, instead of raising BuildError.
return url
app.build_error_handler = external_url_handler
Here, `error` is the instance of :exc:`~werkzeug.routing.BuildError`, and
`endpoint` and `**values` are the arguments passed into `url_for`. Note
that this is for building URLs outside the current application, and not for
handling 404 NotFound errors.
.. versionadded:: 0.10
The `_scheme` parameter was added.
.. versionadded:: 0.9
The `_anchor` and `_method` parameters were added.
.. versionadded:: 0.9
Calls :meth:`Flask.handle_build_error` on
:exc:`~werkzeug.routing.BuildError`.
:param endpoint: the endpoint of the URL (name of the function)
:param values: the variable arguments of the URL rule
:param _external: if set to `True`, an absolute URL is generated. Server
address can be changed via `SERVER_NAME` configuration variable which
defaults to `localhost`.
:param _scheme: a string specifying the desired URL scheme. The `_external`
parameter must be set to `True` or a `ValueError` is raised.
:param _anchor: if provided this is added as anchor to the URL.
:param _method: if provided this explicitly specifies an HTTP method.
"""
appctx = _app_ctx_stack.top
reqctx = _request_ctx_stack.top
if appctx is None:
raise RuntimeError('Attempted to generate a URL without the '
'application context being pushed. This has to be '
'executed when application context is available.')
# If request specific information is available we have some extra
# features that support "relative" urls.
if reqctx is not None:
url_adapter = reqctx.url_adapter
blueprint_name = request.blueprint
if not reqctx.request._is_old_module:
if endpoint[:1] == '.':
if blueprint_name is not None:
endpoint = blueprint_name + endpoint
else:
endpoint = endpoint[1:]
else:
# TODO: get rid of this deprecated functionality in 1.0
if '.' not in endpoint:
if blueprint_name is not None:
endpoint = blueprint_name + '.' + endpoint
elif endpoint.startswith('.'):
endpoint = endpoint[1:]
external = values.pop('_external', False)
# Otherwise go with the url adapter from the appctx and make
# the urls external by default.
else:
url_adapter = appctx.url_adapter
if url_adapter is None:
raise RuntimeError('Application was not able to create a URL '
'adapter for request independent URL generation. '
'You might be able to fix this by setting '
'the SERVER_NAME config variable.')
external = values.pop('_external', True)
anchor = values.pop('_anchor', None)
method = values.pop('_method', None)
scheme = values.pop('_scheme', None)
appctx.app.inject_url_defaults(endpoint, values)
if scheme is not None:
if not external:
raise ValueError('When specifying _scheme, _external must be True')
url_adapter.url_scheme = scheme
try:
rv = url_adapter.build(endpoint, values, method=method,
force_external=external)
except BuildError as error:
# We need to inject the values again so that the app callback can
# deal with that sort of stuff.
values['_external'] = external
values['_anchor'] = anchor
values['_method'] = method
return appctx.app.handle_url_build_error(error, endpoint, values)
if anchor is not None:
rv += '#' + url_quote(anchor)
return rv
def get_template_attribute(template_name, attribute):
"""Loads a macro (or variable) a template exports. This can be used to
invoke a macro from within Python code. If you for example have a
template named `_cider.html` with the following contents:
.. sourcecode:: html+jinja
{% macro hello(name) %}Hello {{ name }}!{% endmacro %}
You can access this from Python code like this::
hello = get_template_attribute('_cider.html', 'hello')
return hello('World')
.. versionadded:: 0.2
:param template_name: the name of the template
:param attribute: the name of the variable of macro to access
"""
return getattr(current_app.jinja_env.get_template(template_name).module,
attribute)
def flash(message, category='message'):
"""Flashes a message to the next request. In order to remove the
flashed message from the session and to display it to the user,
the template has to call :func:`get_flashed_messages`.
.. versionchanged:: 0.3
`category` parameter added.
:param message: the message to be flashed.
:param category: the category for the message. The following values
are recommended: ``'message'`` for any kind of message,
``'error'`` for errors, ``'info'`` for information
messages and ``'warning'`` for warnings. However any
kind of string can be used as category.
"""
# Original implementation:
#
# session.setdefault('_flashes', []).append((category, message))
#
# This assumed that changes made to mutable structures in the session are
# are always in sync with the sess on object, which is not true for session
# implementations that use external storage for keeping their keys/values.
flashes = session.get('_flashes', [])
flashes.append((category, message))
session['_flashes'] = flashes
message_flashed.send(current_app._get_current_object(),
message=message, category=category)
def get_flashed_messages(with_categories=False, category_filter=[]):
"""Pulls all flashed messages from the session and returns them.
Further calls in the same request to the function will return
the same messages. By default just the messages are returned,
but when `with_categories` is set to `True`, the return value will
be a list of tuples in the form ``(category, message)`` instead.
Filter the flashed messages to one or more categories by providing those
categories in `category_filter`. This allows rendering categories in
separate html blocks. The `with_categories` and `category_filter`
arguments are distinct:
* `with_categories` controls whether categories are returned with message
text (`True` gives a tuple, where `False` gives just the message text).
* `category_filter` filters the messages down to only those matching the
provided categories.
See :ref:`message-flashing-pattern` for examples.
.. versionchanged:: 0.3
`with_categories` parameter added.
.. versionchanged:: 0.9
`category_filter` parameter added.
:param with_categories: set to `True` to also receive categories.
:param category_filter: whitelist of categories to limit return values
"""
flashes = _request_ctx_stack.top.flashes
if flashes is None:
_request_ctx_stack.top.flashes = flashes = session.pop('_flashes') \
if '_flashes' in session else []
if category_filter:
flashes = list(filter(lambda f: f[0] in category_filter, flashes))
if not with_categories:
return [x[1] for x in flashes]
return flashes
def send_file(filename_or_fp, mimetype=None, as_attachment=False,
attachment_filename=None, add_etags=True,
cache_timeout=None, conditional=False):
"""Sends the contents of a file to the client. This will use the
most efficient method available and configured. By default it will
try to use the WSGI server's file_wrapper support. Alternatively
you can set the application's :attr:`~Flask.use_x_sendfile` attribute
to ``True`` to directly emit an `X-Sendfile` header. This however
requires support of the underlying webserver for `X-Sendfile`.
By default it will try to guess the mimetype for you, but you can
also explicitly provide one. For extra security you probably want
to send certain files as attachment (HTML for instance). The mimetype
guessing requires a `filename` or an `attachment_filename` to be
provided.
Please never pass filenames to this function from user sources without
checking them first. Something like this is usually sufficient to
avoid security problems::
if '..' in filename or filename.startswith('/'):
abort(404)
.. versionadded:: 0.2
.. versionadded:: 0.5
The `add_etags`, `cache_timeout` and `conditional` parameters were
added. The default behavior is now to attach etags.
.. versionchanged:: 0.7
mimetype guessing and etag support for file objects was
deprecated because it was unreliable. Pass a filename if you are
able to, otherwise attach an etag yourself. This functionality
will be removed in Flask 1.0
.. versionchanged:: 0.9
cache_timeout pulls its default from application config, when None.
:param filename_or_fp: the filename of the file to send. This is
relative to the :attr:`~Flask.root_path` if a
relative path is specified.
Alternatively a file object might be provided
in which case `X-Sendfile` might not work and
fall back to the traditional method. Make sure
that the file pointer is positioned at the start
of data to send before calling :func:`send_file`.
:param mimetype: the mimetype of the file if provided, otherwise
auto detection happens.
:param as_attachment: set to `True` if you want to send this file with
a ``Content-Disposition: attachment`` header.
:param attachment_filename: the filename for the attachment if it
differs from the file's filename.
:param add_etags: set to `False` to disable attaching of etags.
:param conditional: set to `True` to enable conditional responses.
:param cache_timeout: the timeout in seconds for the headers. When `None`
(default), this value is set by
:meth:`~Flask.get_send_file_max_age` of
:data:`~flask.current_app`.
"""
mtime = None
if isinstance(filename_or_fp, string_types):
filename = filename_or_fp
file = None
else:
from warnings import warn
file = filename_or_fp
filename = getattr(file, 'name', None)
# XXX: this behavior is now deprecated because it was unreliable.
# removed in Flask 1.0
if not attachment_filename and not mimetype \
and isinstance(filename, string_types):
warn(DeprecationWarning('The filename support for file objects '
'passed to send_file is now deprecated. Pass an '
'attach_filename if you want mimetypes to be guessed.'),
stacklevel=2)
if add_etags:
warn(DeprecationWarning('In future flask releases etags will no '
'longer be generated for file objects passed to the send_file '
'function because this behavior was unreliable. Pass '
'filenames instead if possible, otherwise attach an etag '
'yourself based on another value'), stacklevel=2)
if filename is not None:
if not os.path.isabs(filename):
filename = os.path.join(current_app.root_path, filename)
if mimetype is None and (filename or attachment_filename):
mimetype = mimetypes.guess_type(filename or attachment_filename)[0]
if mimetype is None:
mimetype = 'application/octet-stream'
headers = Headers()
if as_attachment:
if attachment_filename is None:
if filename is None:
raise TypeError('filename unavailable, required for '
'sending as attachment')
attachment_filename = os.path.basename(filename)
headers.add('Content-Disposition', 'attachment',
filename=attachment_filename)
if current_app.use_x_sendfile and filename:
if file is not None:
file.close()
headers['X-Sendfile'] = filename
headers['Content-Length'] = os.path.getsize(filename)
data = None
else:
if file is None:
file = open(filename, 'rb')
mtime = os.path.getmtime(filename)
headers['Content-Length'] = os.path.getsize(filename)
data = wrap_file(request.environ, file)
rv = current_app.response_class(data, mimetype=mimetype, headers=headers,
direct_passthrough=True)
# if we know the file modification date, we can store it as the
# the time of the last modification.
if mtime is not None:
rv.last_modified = int(mtime)
rv.cache_control.public = True
if cache_timeout is None:
cache_timeout = current_app.get_send_file_max_age(filename)
if cache_timeout is not None:
rv.cache_control.max_age = cache_timeout
rv.expires = int(time() + cache_timeout)
if add_etags and filename is not None:
rv.set_etag('flask-%s-%s-%s' % (
os.path.getmtime(filename),
os.path.getsize(filename),
adler32(
filename.encode('utf-8') if isinstance(filename, text_type)
else filename
) & 0xffffffff
))
if conditional:
rv = rv.make_conditional(request)
# make sure we don't send x-sendfile for servers that
# ignore the 304 status code for x-sendfile.
if rv.status_code == 304:
rv.headers.pop('x-sendfile', None)
return rv
def safe_join(directory, filename):
"""Safely join `directory` and `filename`.
Example usage::
@app.route('/wiki/<path:filename>')
def wiki_page(filename):
filename = safe_join(app.config['WIKI_FOLDER'], filename)
with open(filename, 'rb') as fd:
content = fd.read() # Read and process the file content...
:param directory: the base directory.
:param filename: the untrusted filename relative to that directory.
:raises: :class:`~werkzeug.exceptions.NotFound` if the resulting path
would fall out of `directory`.
"""
filename = posixpath.normpath(filename)
for sep in _os_alt_seps:
if sep in filename:
raise NotFound()
if os.path.isabs(filename) or \
filename == '..' or \
filename.startswith('../'):
raise NotFound()
return os.path.join(directory, filename)
def send_from_directory(directory, filename, **options):
"""Send a file from a given directory with :func:`send_file`. This
is a secure way to quickly expose static files from an upload folder
or something similar.
Example usage::
@app.route('/uploads/<path:filename>')
def download_file(filename):
return send_from_directory(app.config['UPLOAD_FOLDER'],
filename, as_attachment=True)
.. admonition:: Sending files and Performance
It is strongly recommended to activate either `X-Sendfile` support in
your webserver or (if no authentication happens) to tell the webserver
to serve files for the given path on its own without calling into the
web application for improved performance.
.. versionadded:: 0.5
:param directory: the directory where all the files are stored.
:param filename: the filename relative to that directory to
download.
:param options: optional keyword arguments that are directly
forwarded to :func:`send_file`.
"""
filename = safe_join(directory, filename)
if not os.path.isfile(filename):
raise NotFound()
options.setdefault('conditional', True)
return send_file(filename, **options)
def get_root_path(import_name):
"""Returns the path to a package or cwd if that cannot be found. This
returns the path of a package or the folder that contains a module.
Not to be confused with the package path returned by :func:`find_package`.
"""
# Module already imported and has a file attribute. Use that first.
mod = sys.modules.get(import_name)
if mod is not None and hasattr(mod, '__file__'):
return os.path.dirname(os.path.abspath(mod.__file__))
# Next attempt: check the loader.
loader = pkgutil.get_loader(import_name)
# Loader does not exist or we're referring to an unloaded main module
# or a main module without path (interactive sessions), go with the
# current working directory.
if loader is None or import_name == '__main__':
return os.getcwd()
# For .egg, zipimporter does not have get_filename until Python 2.7.
# Some other loaders might exhibit the same behavior.
if hasattr(loader, 'get_filename'):
filepath = loader.get_filename(import_name)
else:
# Fall back to imports.
__import__(import_name)
filepath = sys.modules[import_name].__file__
# filepath is import_name.py for a module, or __init__.py for a package.
return os.path.dirname(os.path.abspath(filepath))
def find_package(import_name):
"""Finds a package and returns the prefix (or None if the package is
not installed) as well as the folder that contains the package or
module as a tuple. The package path returned is the module that would
have to be added to the pythonpath in order to make it possible to
import the module. The prefix is the path below which a UNIX like
folder structure exists (lib, share etc.).
"""
root_mod_name = import_name.split('.')[0]
loader = pkgutil.get_loader(root_mod_name)
if loader is None or import_name == '__main__':
# import name is not found, or interactive/main module
package_path = os.getcwd()
else:
# For .egg, zipimporter does not have get_filename until Python 2.7.
if hasattr(loader, 'get_filename'):
filename = loader.get_filename(root_mod_name)
elif hasattr(loader, 'archive'):
# zipimporter's loader.archive points to the .egg or .zip
# archive filename is dropped in call to dirname below.
filename = loader.archive
else:
# At least one loader is missing both get_filename and archive:
# Google App Engine's HardenedModulesHook
#
# Fall back to imports.
__import__(import_name)
filename = sys.modules[import_name].__file__
package_path = os.path.abspath(os.path.dirname(filename))
# package_path ends with __init__.py for a package
if loader.is_package(root_mod_name):
package_path = os.path.dirname(package_path)
site_parent, site_folder = os.path.split(package_path)
py_prefix = os.path.abspath(sys.prefix)
if package_path.startswith(py_prefix):
return py_prefix, package_path
elif site_folder.lower() == 'site-packages':
parent, folder = os.path.split(site_parent)
# Windows like installations
if folder.lower() == 'lib':
base_dir = parent
# UNIX like installations
elif os.path.basename(parent).lower() == 'lib':
base_dir = os.path.dirname(parent)
else:
base_dir = site_parent
return base_dir, package_path
return None, package_path
class locked_cached_property(object):
"""A decorator that converts a function into a lazy property. The
function wrapped is called the first time to retrieve the result
and then that calculated result is used the next time you access
the value. Works like the one in Werkzeug but has a lock for
thread safety.
"""
def __init__(self, func, name=None, doc=None):
self.__name__ = name or func.__name__
self.__module__ = func.__module__
self.__doc__ = doc or func.__doc__
self.func = func
self.lock = RLock()
def __get__(self, obj, type=None):
if obj is None:
return self
with self.lock:
value = obj.__dict__.get(self.__name__, _missing)
if value is _missing:
value = self.func(obj)
obj.__dict__[self.__name__] = value
return value
class _PackageBoundObject(object):
def __init__(self, import_name, template_folder=None):
#: The name of the package or module. Do not change this once
#: it was set by the constructor.
self.import_name = import_name
#: location of the templates. `None` if templates should not be
#: exposed.
self.template_folder = template_folder
#: Where is the app root located?
self.root_path = get_root_path(self.import_name)
self._static_folder = None
self._static_url_path = None
def _get_static_folder(self):
if self._static_folder is not None:
return os.path.join(self.root_path, self._static_folder)
def _set_static_folder(self, value):
self._static_folder = value
static_folder = property(_get_static_folder, _set_static_folder)
del _get_static_folder, _set_static_folder
def _get_static_url_path(self):
if self._static_url_path is None:
if self.static_folder is None:
return None
return '/' + os.path.basename(self.static_folder)
return self._static_url_path
def _set_static_url_path(self, value):
self._static_url_path = value
static_url_path = property(_get_static_url_path, _set_static_url_path)
del _get_static_url_path, _set_static_url_path
@property
def has_static_folder(self):
"""This is `True` if the package bound object's container has a
folder named ``'static'``.
.. versionadded:: 0.5
"""
return self.static_folder is not None
@locked_cached_property
def jinja_loader(self):
"""The Jinja loader for this package bound object.
.. versionadded:: 0.5
"""
if self.template_folder is not None:
return FileSystemLoader(os.path.join(self.root_path,
self.template_folder))
def get_send_file_max_age(self, filename):
"""Provides default cache_timeout for the :func:`send_file` functions.
By default, this function returns ``SEND_FILE_MAX_AGE_DEFAULT`` from
the configuration of :data:`~flask.current_app`.
Static file functions such as :func:`send_from_directory` use this
function, and :func:`send_file` calls this function on
:data:`~flask.current_app` when the given cache_timeout is `None`. If a
cache_timeout is given in :func:`send_file`, that timeout is used;
otherwise, this method is called.
This allows subclasses to change the behavior when sending files based
on the filename. For example, to set the cache timeout for .js files
to 60 seconds::
class MyFlask(flask.Flask):
def get_send_file_max_age(self, name):
if name.lower().endswith('.js'):
return 60
return flask.Flask.get_send_file_max_age(self, name)
.. versionadded:: 0.9
"""
return current_app.config['SEND_FILE_MAX_AGE_DEFAULT']
def send_static_file(self, filename):
"""Function used internally to send static files from the static
folder to the browser.
.. versionadded:: 0.5
"""
if not self.has_static_folder:
raise RuntimeError('No static folder for this object')
# Ensure get_send_file_max_age is called in all cases.
# Here, we ensure get_send_file_max_age is called for Blueprints.
cache_timeout = self.get_send_file_max_age(filename)
return send_from_directory(self.static_folder, filename,
cache_timeout=cache_timeout)
def open_resource(self, resource, mode='rb'):
"""Opens a resource from the application's resource folder. To see
how this works, consider the following folder structure::
/myapplication.py
/schema.sql
/static
/style.css
/templates
/layout.html
/index.html
If you want to open the `schema.sql` file you would do the
following::
with app.open_resource('schema.sql') as f:
contents = f.read()
do_something_with(contents)
:param resource: the name of the resource. To access resources within
subfolders use forward slashes as separator.
:param mode: resource file opening mode, default is 'rb'.
"""
if mode not in ('r', 'rb'):
raise ValueError('Resources can only be opened for reading')
return open(os.path.join(self.root_path, resource), mode)
| apache-2.0 |
danielpalomino/gem5 | src/mem/slicc/ast/OodAST.py | 90 | 1802 | #
# Copyright (c) 2011 Mark D. Hill and David A. Wood
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from slicc.ast.ExprAST import ExprAST
class OodAST(ExprAST):
def __init__(self, slicc):
super(OodAST, self).__init__(slicc)
def __repr__(self):
return "[Ood:]"
def generate(self, code):
code += "NULL"
return "OOD"
| bsd-3-clause |
wibbe/ZumGo | vendor/src/github.com/ericlagergren/decimal/_testdata/tables.py | 1 | 8662 | #!/usr/bin/env python3.6
from decimal import *
import gzip
import random
import sys
import math
ops = {
"*": "multiplication",
"+": "addition",
"-": "subtraction",
"/": "division",
"qC": "comparison",
"quant": "quantization",
"A": "absolute-value",
"cfd": "convert-to-string",
"~": "negation",
"*-": "fused-multiply-add",
"L": "base-b-logarithm",
"?": "class",
"V": "square-root",
"%": "remainder",
"Nu": "next-plus",
"Nd": "next-minus",
# Custom
"rat": "convert-to-rat",
"sign": "sign",
"signbit": "signbit",
"exp": "exponential-function",
"log": "natural-logarithm",
"log10": "common-logarithm",
"pow": "power",
"//": "integer-division",
"norm": "reduction",
"rtie": "round-to-integral-exact",
"shift": "shift",
}
modes = {
"=0": ROUND_HALF_EVEN,
"=^": ROUND_HALF_UP,
"0": ROUND_DOWN,
"<": ROUND_FLOOR,
">": ROUND_CEILING,
# ROUND_HALF_DOWN,
"^": ROUND_UP,
# ROUND_05UP,
}
def rand_bool():
return random.randint(0, 1) % 2 == 0
def make_dec(nbits=5000):
r = random.randint(0, 50)
if r == 0:
f = math.nan
elif r == 1:
f = math.inf
else:
f = random.getrandbits(random.randint(1, nbits))
if rand_bool():
if r > 1:
f = -f
else:
f = math.copysign(f, -1)
fs = "{}".format(f)
if rand_bool():
l = list(fs)
if len(l) == 1:
l.insert(0, '.')
else:
l[random.randint(0, len(l) - 1)] = '.'
fs = "".join(l)
return Decimal(fs)
DEC_TEN = Decimal(10)
def rand_dec(quant=None, nbits=None):
if quant is None:
quant = False
if nbits is None:
nbits = 5000
with localcontext() as ctx:
ctx.clear_traps()
if quant:
x = random.randint(0, 750)
if rand_bool():
d = DEC_TEN ** x
else:
d = DEC_TEN ** -x
else:
d = make_dec(nbits)
for _ in range(random.randint(0, 3)):
q = random.randint(0, 4)
if q == 1:
d *= make_dec(nbits)
elif q == 2:
d /= make_dec(nbits)
elif q == 3:
d -= make_dec(nbits)
elif q == 4:
d += make_dec(nbits)
# else: == 0
return d
def conv(x):
if x is None:
return None
if isinstance(x, Decimal):
if x.is_infinite():
return '-Inf' if x.is_signed() else 'Inf'
else:
return x
return x
def write_line(out, prec, op, mode, r, x, y=None, u=None, flags=None):
if x is None:
raise ValueError("bad args")
x = conv(x)
y = conv(y)
u = conv(u)
r = conv(r)
if y is not None:
if u is not None:
str = "d{}{} {} {} {} {} -> {} {}\n".format(
prec, op, mode, x, y, u, r, flags)
else:
str = "d{}{} {} {} {} -> {} {}\n".format(
prec, op, mode, x, y, r, flags)
else:
str = "d{}{} {} {} -> {} {}\n".format(
prec, op, mode, x, r, flags)
out.write(str)
def perform_op(op):
r = None
x = None
y = None # possibly unused
u = None # possibly unused
try:
# Binary
if op == "*":
x = rand_dec()
y = rand_dec()
r = x * y
elif op == "+":
x = rand_dec()
y = rand_dec()
r = x + y
elif op == "-":
x = rand_dec()
y = rand_dec()
r = x - y
elif op == "/":
x = rand_dec()
y = rand_dec()
r = x / y
elif op == "//":
x = rand_dec()
y = rand_dec()
r = x // y
elif op == "%":
x = rand_dec()
y = rand_dec()
r = x % y
elif op == "qC":
x = rand_dec()
y = rand_dec()
r = x.compare(y)
elif op == "quant":
x = rand_dec()
y = rand_dec(True)
r = x.quantize(y)
y = -y.as_tuple().exponent
elif op == "pow":
getcontext().prec += 11
getcontext().prec //= 10
x = rand_dec(nbits=64)
y = rand_dec(nbits=64)
#u = rand_dec(nbits=64)
# The error of Python's decimal power method is < 1 ULP + t, where
# t <= 0.1 ULP, but usually < 0.01 ULP.
getcontext().prec += 1
r = getcontext().power(x, y, u)
getcontext().prec -= 1
r = +r
elif op == "shift":
x = rand_dec()
y = Decimal(random.randint(-getcontext().prec, getcontext().prec))
r = x.shift(y)
# Unary
elif op == "A":
x = rand_dec()
r = getcontext().abs(x)
elif op == "cfd":
x = rand_dec()
r = str(x)
elif op == "rat":
x = rand_dec()
while True:
try:
x, y = x.as_integer_ratio()
if y == 1:
r = +Decimal(x)
else:
r = Decimal(x) / Decimal(y)
break
except Exception: # ValueError if nan, etc.
x = rand_dec()
elif op == "sign":
x = rand_dec()
if x < 0:
r = -1
elif x > 0:
r = +1
else:
r = 0
elif op == "signbit":
x = rand_dec()
r = x.is_signed()
elif op == "~":
x = rand_dec()
r = -x
elif op == "exp":
if getcontext().prec >= 10:
getcontext().prec //= 10
x = rand_dec(nbits=100)
r = x.exp()
elif op == "log":
getcontext().prec += 11
getcontext().prec //= 10
x = rand_dec(nbits=128)
r = x.ln()
elif op == "L":
getcontext().prec += 11
getcontext().prec //= 10
x = rand_dec(nbits=128)
r = x.logb()
elif op == "log10":
getcontext().prec += 11
getcontext().prec //= 10
x = rand_dec(nbits=128)
r = x.log10()
elif op == "?":
x = rand_dec()
r = x.number_class()
elif op == "V":
x = rand_dec()
r = x.sqrt()
elif op == "norm":
x = rand_dec()
r = x.normalize()
elif op == "rtie":
x = rand_dec()
r = x.to_integral_exact()
elif op == "Nu":
x = rand_dec()
r = x.next_plus()
elif op == "Nd":
x = rand_dec()
r = x.next_minus()
# Ternary
elif op == "*-":
x = rand_dec()
y = rand_dec()
u = rand_dec()
r = x.fma(y, u)
else:
raise ValueError("bad op {}".format(op))
except Exception as e:
raise e
return (r, x, y, u)
traps = {
Clamped: "c",
DivisionByZero: "z",
Inexact: "x",
InvalidOperation: "i",
Overflow: "o",
Rounded: "r",
Subnormal: "s",
Underflow: "u",
FloatOperation: "***",
}
def rand_traps():
t = {}
s = ""
for key, val in traps.items():
b = key != FloatOperation and rand_bool()
if b:
s += val
t[key] = int(b)
return (t, s)
# set N higher for local testing.
N = int(sys.argv[1])
def make_tables(items):
for op, name in items:
with gzip.open("{}-tables.gz".format(name), "wt") as f:
for i in range(1, N):
mode = random.choice(list(modes.keys()))
# t, ts = rand_traps()
ctx = getcontext()
ctx.Emax = MAX_EMAX
ctx.Emin = MIN_EMIN
ctx.rounding = modes[mode]
ctx.prec = random.randint(1, 5000)
ctx.clear_traps()
ctx.clear_flags()
r, x, y, u = perform_op(op)
conds = ""
for key, value in ctx.flags.items():
if value == 1 and key != FloatOperation:
conds += traps[key]
write_line(f, ctx.prec, op, mode, r, x, y, u, conds)
items = ops.items()
if len(sys.argv) > 2:
arg = sys.argv[2]
items = [(arg, ops[arg])]
make_tables(items)
| mit |
tsufiev/horizon | openstack_dashboard/dashboards/project/networks/urls.py | 65 | 1987 | # Copyright 2012 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf.urls import include
from django.conf.urls import patterns
from django.conf.urls import url
from openstack_dashboard.dashboards.project.networks.ports \
import urls as port_urls
from openstack_dashboard.dashboards.project.networks.ports \
import views as port_views
from openstack_dashboard.dashboards.project.networks.subnets \
import urls as subnet_urls
from openstack_dashboard.dashboards.project.networks.subnets \
import views as subnet_views
from openstack_dashboard.dashboards.project.networks import views
NETWORKS = r'^(?P<network_id>[^/]+)/%s$'
urlpatterns = patterns(
'',
url(r'^$', views.IndexView.as_view(), name='index'),
url(r'^create$', views.CreateView.as_view(), name='create'),
url(NETWORKS % 'detail', views.DetailView.as_view(), name='detail'),
url(NETWORKS % 'update', views.UpdateView.as_view(), name='update'),
url(NETWORKS % 'subnets/create', subnet_views.CreateView.as_view(),
name='addsubnet'),
url(r'^(?P<network_id>[^/]+)/subnets/(?P<subnet_id>[^/]+)/update$',
subnet_views.UpdateView.as_view(), name='editsubnet'),
url(r'^(?P<network_id>[^/]+)/ports/(?P<port_id>[^/]+)/update$',
port_views.UpdateView.as_view(), name='editport'),
url(r'^subnets/', include(subnet_urls, namespace='subnets')),
url(r'^ports/', include(port_urls, namespace='ports')))
| apache-2.0 |
unreal666/youtube-dl | youtube_dl/extractor/teletask.py | 215 | 1739 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import unified_strdate
class TeleTaskIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?tele-task\.de/archive/video/html5/(?P<id>[0-9]+)'
_TEST = {
'url': 'http://www.tele-task.de/archive/video/html5/26168/',
'info_dict': {
'id': '26168',
'title': 'Duplicate Detection',
},
'playlist': [{
'md5': '290ef69fb2792e481169c3958dbfbd57',
'info_dict': {
'id': '26168-speaker',
'ext': 'mp4',
'title': 'Duplicate Detection',
'upload_date': '20141218',
}
}, {
'md5': 'e1e7218c5f0e4790015a437fcf6c71b4',
'info_dict': {
'id': '26168-slides',
'ext': 'mp4',
'title': 'Duplicate Detection',
'upload_date': '20141218',
}
}]
}
def _real_extract(self, url):
lecture_id = self._match_id(url)
webpage = self._download_webpage(url, lecture_id)
title = self._html_search_regex(
r'itemprop="name">([^<]+)</a>', webpage, 'title')
upload_date = unified_strdate(self._html_search_regex(
r'Date:</td><td>([^<]+)</td>', webpage, 'date', fatal=False))
entries = [{
'id': '%s-%s' % (lecture_id, format_id),
'url': video_url,
'title': title,
'upload_date': upload_date,
} for format_id, video_url in re.findall(
r'<video class="([^"]+)"[^>]*>\s*<source src="([^"]+)"', webpage)]
return self.playlist_result(entries, lecture_id, title)
| unlicense |
PabloCastellano/nodeshot | nodeshot/core/metrics/models.py | 2 | 3990 | from datetime import datetime
from django.contrib.gis.db import models
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes import generic
from django.utils.translation import ugettext_lazy as _
from jsonfield import JSONField
from nodeshot.core.base.models import BaseDate
from .utils import query, write
class Metric(BaseDate):
name = models.CharField(_('name'), max_length=75)
content_type = models.ForeignKey(ContentType, blank=True, null=True)
object_id = models.PositiveIntegerField(blank=True, null=True)
related_object = generic.GenericForeignKey('content_type', 'object_id')
tags = JSONField(_('tags'), blank=True, default={})
query = models.CharField(_('query'), blank=True, max_length=255, help_text='default query')
class Meta:
unique_together = ('name', 'tags', 'content_type', 'object_id')
def __unicode__(self):
return self.name
def save(self, *args, **kwargs):
if self.related_object:
self.tags.update({
'content_type': self.content_type.name,
'object_id': str(self.object_id)
})
if not self.query:
self.query = self.select(sql_only=True)
super(Metric, self).save(*args, **kwargs)
def write(self, values, timestamp=None, database=None):
""" write metric point """
return write(name=self.name,
values=values,
tags=self.tags,
timestamp=timestamp,
database=database)
def select(self, fields=[], since=None, limit=None, q=None, sql_only=False):
if q is not None and ('DROP' in q or 'DELETE' in q):
q = None
if not q:
if fields:
fields = ', '.join(fields)
else:
fields = '*'
if not since:
since = 'now() - 30d'
if isinstance(since, datetime):
since = "'{0}'".format(since.strftime('%Y-%m-%dT%H:%M:%SZ'))
conditions = "time >= {0}".format(since)
tags = ' AND '.join(["{0} = '{1}'".format(*tag) for tag in self.tags.items()])
if tags:
conditions = '{0} AND {1}'.format(conditions, tags)
q = 'SELECT {fields} FROM {name} WHERE {conditions}'.format(fields=fields,
name=self.name,
conditions=conditions)
if limit:
q = '{0} LIMIT {1}'.format(q, limit)
if sql_only:
return q
# return query
return query(q)
# from django.contrib.auth.models import User
from django.contrib.auth import get_user_model
from django.contrib.auth.signals import user_logged_in
from django.db.models.signals import post_save, post_delete
from django.dispatch import receiver
User = get_user_model()
@receiver(user_logged_in, dispatch_uid='user_loggedin')
def user_loggedin(sender, **kwargs):
""" collect metrics about user logins """
tags = {
'user_id': str(kwargs['user'].pk),
'username': kwargs['user'].username,
}
values = {
'value': 1,
'path': kwargs['request'].path
}
write('user_logins', values=values, tags=tags)
@receiver(post_delete, sender=User, dispatch_uid='user_created')
def user_created(sender, **kwargs):
""" collect metrics about users unsubscribing """
write('user_variations', {'variation': -1}, tags={'action': 'deleted'})
write('user_count', {'total': User.objects.count()})
@receiver(post_save, sender=User, dispatch_uid='user_deleted')
def user_deleted(sender, **kwargs):
""" collect metrics about new users signing up """
if kwargs.get('created'):
write('user_variations', {'variation': 1}, tags={'action': 'created'})
write('user_count', {'total': User.objects.count()})
| gpl-3.0 |
rajegannathan/grasp-lift-eeg-cat-dog-solution-updated | python-packages/nolearn-0.5/build/lib.linux-x86_64-2.7/nolearn/tests/test_console.py | 2 | 2473 | import os
from tempfile import NamedTemporaryFile
from mock import patch
from test_inischema import SAMPLE_CONFIGURATION
from test_inischema import SAMPLE_SCHEMA
SAMPLE_CONFIGURATION += """
[env]
somekey = somevalue
"""
class TestCommand(object):
with NamedTemporaryFile(delete=False) as config_file:
config_file.write(SAMPLE_CONFIGURATION)
def some_filename(self):
with NamedTemporaryFile(delete=False) as some_file:
return some_file.name
def test_simple(self):
from ..console import Command
called = []
def second(value1, value2=None):
called.append((value1, value2))
class MyCommand(Command):
__doc__ = """
Usage:
script second <config_file>
"""
schema = SAMPLE_SCHEMA
funcs = [second]
argv = ['script', 'second', self.config_file.name]
MyCommand()(argv)
assert len(called) == 1
assert called[0][0].startswith('a few line breaks')
assert called[0][1] is None
assert os.environ['SOMEKEY'] == 'somevalue'
def test_profiler(self):
from ..console import Command
called = []
def second(value1, value2=None):
called.append((value1, value2))
class MyCommand(Command):
__doc__ = """
Usage:
script second <config_file> [--profile=<file>]
"""
schema = SAMPLE_SCHEMA
funcs = [second]
profile_filename = self.some_filename()
argv = ['script', 'second', self.config_file.name,
'--profile', profile_filename]
MyCommand()(argv)
assert len(called) == 1
with open(profile_filename) as f:
assert(len(f.read()) > 1)
@patch('nolearn.console.pdb.post_mortem')
@patch('nolearn.console.traceback.print_exc')
def test_pdb(self, print_exc, post_mortem):
from ..console import Command
called = []
def second(value1, value2=None):
called.append((value1, value2))
raise ValueError()
class MyCommand(Command):
__doc__ = """
Usage:
script second <config_file> [--pdb]
"""
schema = SAMPLE_SCHEMA
funcs = [second]
argv = ['script', 'second', self.config_file.name, '--pdb']
MyCommand()(argv)
assert len(called) == 1
| bsd-3-clause |
lukeiwanski/tensorflow | tensorflow/python/lib/io/python_io.py | 14 | 1054 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python functions for directly manipulating TFRecord-formatted files.
See the @{$python/python_io} guide.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.lib.io.tf_record import *
# pylint: enable=wildcard-import
| apache-2.0 |
marc-sensenich/ansible | test/units/modules/network/ftd/test_ftd_file_download.py | 52 | 4046 | # Copyright (c) 2018 Cisco and/or its affiliates.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import
import pytest
from ansible.module_utils import basic
from ansible.module_utils.network.ftd.common import HTTPMethod
from ansible.module_utils.network.ftd.fdm_swagger_client import FILE_MODEL_NAME, OperationField
from ansible.modules.network.ftd import ftd_file_download
from units.modules.utils import set_module_args, exit_json, fail_json, AnsibleFailJson, AnsibleExitJson
class TestFtdFileDownload(object):
module = ftd_file_download
@pytest.fixture(autouse=True)
def module_mock(self, mocker):
return mocker.patch.multiple(basic.AnsibleModule, exit_json=exit_json, fail_json=fail_json)
@pytest.fixture
def connection_mock(self, mocker):
connection_class_mock = mocker.patch('ansible.modules.network.ftd.ftd_file_download.Connection')
return connection_class_mock.return_value
@pytest.mark.parametrize("missing_arg", ['operation', 'destination'])
def test_module_should_fail_without_required_args(self, missing_arg):
module_args = {'operation': 'downloadFile', 'destination': '/tmp'}
del module_args[missing_arg]
set_module_args(module_args)
with pytest.raises(AnsibleFailJson) as ex:
self.module.main()
assert 'missing required arguments: %s' % missing_arg in str(ex)
def test_module_should_fail_when_no_operation_spec_found(self, connection_mock):
connection_mock.get_operation_spec.return_value = None
set_module_args({'operation': 'nonExistingDownloadOperation', 'destination': '/tmp'})
with pytest.raises(AnsibleFailJson) as ex:
self.module.main()
result = ex.value.args[0]
assert result['failed']
assert result['msg'] == 'Operation with specified name is not found: nonExistingDownloadOperation'
def test_module_should_fail_when_not_download_operation_specified(self, connection_mock):
connection_mock.get_operation_spec.return_value = {
OperationField.METHOD: HTTPMethod.GET,
OperationField.URL: '/object',
OperationField.MODEL_NAME: 'NetworkObject'
}
set_module_args({'operation': 'nonDownloadOperation', 'destination': '/tmp'})
with pytest.raises(AnsibleFailJson) as ex:
self.module.main()
result = ex.value.args[0]
assert result['failed']
assert result['msg'] == 'Invalid download operation: nonDownloadOperation. ' \
'The operation must make GET request and return a file.'
def test_module_should_call_download_and_return(self, connection_mock):
connection_mock.validate_path_params.return_value = (True, None)
connection_mock.get_operation_spec.return_value = {
OperationField.METHOD: HTTPMethod.GET,
OperationField.URL: '/file/{objId}',
OperationField.MODEL_NAME: FILE_MODEL_NAME
}
set_module_args({
'operation': 'downloadFile',
'path_params': {'objId': '12'},
'destination': '/tmp'
})
with pytest.raises(AnsibleExitJson) as ex:
self.module.main()
result = ex.value.args[0]
assert not result['changed']
connection_mock.download_file.assert_called_once_with('/file/{objId}', '/tmp', {'objId': '12'})
| gpl-3.0 |
geertj/pyskiplist | tests/test_skiplist.py | 1 | 11345 | #
# This file is part of PySkiplist. PySkiplist is Copyright (c) 2012-2015 by
# the PySkiplist authors.
#
# PySkiplist is free software available under the MIT license. See the file
# named LICENSE distributed with this file for the exact licensing terms.
from __future__ import absolute_import, print_function
import random
import unittest
import six
from support import TestCase
from pyskiplist import SkipList
from pyskiplist.skiplist import check, dump, getsize
class TestSkipList(TestCase):
"""Unit test suite for SkipList."""
size = 100
def _create_skiplist(self, size, keysize, valuesize):
sl = SkipList()
pairs = []
values = {}
for i in range(size):
pair = (random.randint(0, keysize), random.randint(0, valuesize))
sl.insert(*pair)
pairs.append(pair)
if pair[0] not in values:
values[pair[0]] = []
values[pair[0]].append(pair[1])
pairs = sorted(pairs, key=lambda x: x[0])
return sl, pairs, values
# GENERAL API ...
def test_level(self):
sl = SkipList()
self.assertEqual(sl.level, 1)
check(sl)
def test_insert(self):
size = self.size
sl = SkipList()
pairs = []
for i in range(size):
pair = (random.randint(0, 2*size), random.randint(0, 10*size))
sl.insert(*pair)
pairs = sorted(pairs + [pair], key=lambda x: x[0])
check(sl); self.assertEqual(list(sl), pairs)
self.assertGreater(sl.level, 1)
def test_replace(self):
size = self.size
sl = SkipList()
values = {}
for i in range(size):
pair = (random.randint(0, 2*size), random.randint(0, 10*size))
sl.replace(*pair)
values[pair[0]] = pair[1]
pairs = sorted(values.items(), key=lambda x: x[0])
check(sl); self.assertEqual(list(sl), pairs)
self.assertGreater(sl.level, 1)
def test_clear(self):
size = self.size
sl = SkipList()
for i in range(size):
sl.insert(random.randint(0, 2*size), random.randint(0, 10*size))
self.assertGreater(sl.level, 1)
self.assertEqual(len(sl), size)
sl.clear()
check(sl); self.assertEqual(list(sl), [])
self.assertEqual(sl.level, 1)
def test_len(self):
size = self.size
sl = SkipList()
pairs = []
for i in range(size):
pair = (random.randint(0, 2*size), random.randint(0, 10*size))
sl.insert(*pair)
pairs = sorted(pairs + [pair], key=lambda x: x[0])
self.assertEqual(len(sl), i+1)
check(sl); self.assertEqual(list(sl), pairs)
def test_bool(self):
sl = SkipList()
self.assertFalse(sl)
self.assertFalse(bool(sl))
check(sl)
sl.insert('foo', 'bar')
self.assertTrue(sl)
self.assertTrue(bool(sl))
check(sl)
def test_repr(self):
sl = SkipList()
sl.insert(1, 2)
sl.insert(3, 4)
self.assertEqual(repr(sl), 'SkipList(((1, 2), (3, 4)))')
check(sl)
def test_iter(self):
size = self.size
sl, pairs, values = self._create_skiplist(size, 2*size, 10*size)
self.assertEqual(list(sl), pairs)
check(sl)
def test_items(self):
size = self.size
sl, pairs, values = self._create_skiplist(size, size, 10*size)
# test .items(), .keys() and .values()
for ix, func in ((slice(0, 2), sl.items), (0, sl.keys), (1, sl.values)):
def ref(start, stop):
return [pair[ix] for pair in pairs
if (start is None or pair[0] >= start)
and (stop is None or pair[0] < stop)]
self.assertEqual(list(func()), ref(None, None))
self.assertEqual(list(func(start=10)), ref(10, None))
self.assertEqual(list(func(start=10.1)), ref(10.1, None))
self.assertEqual(list(func(start=11)), ref(11, None))
self.assertEqual(list(func(stop=90)), ref(None, 90))
self.assertEqual(list(func(stop=90.1)), ref(None, 90.1))
self.assertEqual(list(func(stop=91)), ref(None, 91))
self.assertEqual(list(func(start=10, stop=90)), ref(10, 90))
self.assertEqual(list(func(start=10.1, stop=90)), ref(10.1, 90))
self.assertEqual(list(func(start=10, stop=90.1)), ref(10, 90.1))
self.assertEqual(list(func(start=10.1, stop=90.1)), ref(10.1, 90.1))
check(sl); self.assertEqual(list(sl), pairs)
def test_popitem(self):
size = self.size
sl, pairs, values = self._create_skiplist(size, 2*size, 10*size)
while pairs:
self.assertEqual(sl.popitem(), pairs[0])
del pairs[0]
check(sl); self.assertEqual(list(sl), pairs)
self.assertRaises(KeyError, sl.popitem)
check(sl); self.assertEqual(list(sl), pairs)
# KEY BASED API ...
def test_search(self):
size = self.size
sl, pairs, values = self._create_skiplist(size, 2*size, 10*size)
for key in values:
self.assertEqual(sl.search(key), values[key][0])
check(sl); self.assertEqual(list(sl), pairs)
self.assertIsNone(sl.search(random.randint(3*size, 10*size)))
check(sl); self.assertEqual(list(sl), pairs)
self.assertEqual(sl.search(random.randint(3*size, 10*size), -1), -1)
check(sl); self.assertEqual(list(sl), pairs)
def test_remove(self):
size = self.size
sl, pairs, values = self._create_skiplist(size, 2*size, 10*size)
for key in values:
for value in values[key]:
sl.remove(key)
pairs.remove((key, value))
check(sl); self.assertEqual(list(sl), pairs)
self.assertRaises(KeyError, sl.remove, key)
check(sl); self.assertEqual(list(sl), pairs)
def test_pop(self):
size = self.size
sl, pairs, values = self._create_skiplist(size, 2*size, 10*size)
for key in values:
for value in values[key]:
self.assertEqual(sl.pop(key), value)
pairs.remove((key, value))
check(sl); self.assertEqual(list(sl), pairs)
self.assertRaises(KeyError, sl.pop, key)
check(sl); self.assertEqual(list(sl), pairs)
self.assertIsNone(sl.pop(key, None))
check(sl); self.assertEqual(list(sl), pairs)
self.assertEqual(sl.pop(key, -1), -1)
check(sl); self.assertEqual(list(sl), pairs)
def test_contains(self):
size = self.size
sl, pairs, values = self._create_skiplist(size, 2*size, 10*size)
for key in values:
self.assertIn(key, sl)
check(sl); self.assertEqual(list(sl), pairs)
self.assertNotIn(random.randint(3*size, 10*size), sl)
check(sl); self.assertEqual(list(sl), pairs)
def test_index(self):
size = self.size
sl, pairs, values = self._create_skiplist(size, 2*size, 10*size)
for key in values:
self.assertEqual(sl.index(key), pairs.index((key, values[key][0])))
check(sl); self.assertEqual(list(sl), pairs)
self.assertRaises(KeyError, sl.index, random.randint(3*size, 10*size))
check(sl); self.assertEqual(list(sl), pairs)
def test_count(self):
size = self.size
sl, pairs, values = self._create_skiplist(size, 2*size, 10*size)
for key in values:
self.assertEqual(sl.count(key), len(values[key]))
check(sl); self.assertEqual(list(sl), pairs)
self.assertEqual(sl.count(random.randint(3*size, 10*size)), 0)
check(sl); self.assertEqual(list(sl), pairs)
# BY POSITION API ...
def test_getitem(self):
size = self.size
sl, pairs, values = self._create_skiplist(size, 2*size, 10*size)
for i in range(size):
self.assertEqual(sl[i], pairs[i])
check(sl); self.assertEqual(list(sl), pairs)
self.assertEqual(sl[-i-1], pairs[-i-1])
check(sl); self.assertEqual(list(sl), pairs)
self.assertRaises(IndexError, sl.__getitem__, size)
check(sl); self.assertEqual(list(sl), pairs)
self.assertRaises(IndexError, sl.__getitem__, -size-1)
check(sl); self.assertEqual(list(sl), pairs)
self.assertRaises(TypeError, sl.__getitem__, 'foo')
check(sl); self.assertEqual(list(sl), pairs)
def test_getitem_slice(self):
size = self.size
sl, pairs, values = self._create_skiplist(size, 2*size, 10*size)
for ix in (slice(None, None), slice(None, 10), slice(10, None),
slice(10, 90), slice(10, -10), slice(-10, None),
slice(-10, -1), slice(None, -10), slice(None, -1)):
self.assertEqual(list(sl[ix]), pairs[ix])
check(sl); self.assertEqual(list(sl), pairs)
def test_delitem(self):
size = self.size
sl, pairs, values = self._create_skiplist(size, 2*size, 10*size)
while pairs:
ix = random.randrange(-len(pairs), len(pairs))
del sl[ix]
del pairs[ix]
check(sl); self.assertEqual(list(sl), pairs)
self.assertRaises(IndexError, sl.__delitem__, len(pairs))
check(sl); self.assertEqual(list(sl), pairs)
self.assertRaises(IndexError, sl.__delitem__, -len(pairs)-1)
check(sl); self.assertEqual(list(sl), pairs)
self.assertRaises(TypeError, sl.__delitem__, 'foo')
check(sl); self.assertEqual(list(sl), pairs)
def test_setitem(self):
size = self.size
sl, pairs, values = self._create_skiplist(size, 2*size, 10*size)
for ix, pair in enumerate(pairs):
sl[ix] = 2*pair[1]
pairs[ix] = (pair[0], 2*pair[1])
check(sl); self.assertEqual(list(sl), pairs)
self.assertRaises(IndexError, sl.__setitem__, size, None)
check(sl); self.assertEqual(list(sl), pairs)
self.assertRaises(IndexError, sl.__setitem__, -size-1, None)
check(sl); self.assertEqual(list(sl), pairs)
self.assertRaises(TypeError, sl.__setitem__, 'foo', None)
check(sl); self.assertEqual(list(sl), pairs)
class TestSkipListDebug(TestCase):
"""Coverage for debugging tools."""
def test_size(self):
sl = SkipList()
sl.insert('foo', 'bar')
size = getsize(sl)
self.assertIsInstance(size, int)
self.assertGreater(size, 0)
self.assertLess(size, 5000)
def test_node_size(self):
sl = SkipList()
for i in range(1000):
sl.insert(i, None)
size = getsize(sl)
self.assertIsInstance(size, int)
self.assertGreater(size, 0)
self.assertLess(size/1000, 250)
def test_dump(self):
sl = SkipList()
sl.insert('foo', 'bar')
sl.insert('baz', 'qux')
out = six.StringIO()
dump(sl, out)
s = out.getvalue()
self.assertIsInstance(s, str)
self.assertGreater(len(s), 20)
if __name__ == '__main__':
unittest.main()
| mit |
mach0/QGIS | src/plugins/grass/scripts/r.external.all.py | 45 | 3255 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
***************************************************************************
r.external.all.py
---------------------
Date : July 2009
Copyright : (C) 2009 by Lorenzo Masini
Email : rugginoso at develer dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Lorenzo Masini'
__date__ = 'July 2009'
__copyright__ = '(C) 2009, Lorenzo Masini'
############################################################################
#
# MODULE: qgis.r.external.all.py
# AUTHOR(S): Lorenzo Masini
#
# PURPOSE: Link all GDAL supported raster files into a directory
# to binary raster map layers.
# COPYRIGHT: (C) 2009 by Lorenzo Masini
#
# This program is free software under the GNU General Public
# License (>=v2). Read the file COPYING that comes with GRASS
# for details.
#
#############################################################################
#%Module
#% description: Link all GDAL supported raster files into a directory to binary raster map layers.
#% keywords: raster, import
#%End
#%option
#% key: input
#% type: string
#% gisprompt: input
#% key_desc : name
#% description: Directory containing raster files
#% required : yes
#%end
#%option
#% key: band
#% type: integer
#% description: Band to select
#% answer: 1
#% required : no
#%end
#%flag
#% key: o
#% description: Override projection (use location's projection)
#%end
#%flag
#% key: e
#% description: Extend location extents based on new dataset
#%end
#%flag
#% key: r
#% description: Recursively scan subdirectories
import os
try:
from grass.script import core as grass
except ImportError:
import grass
except:
raise Exception("Cannot find 'grass' Python module. Python is supported by GRASS from version >= 6.4")
def import_directory_of_rasters(directory, recursive):
for dir, dirnames, filenames in os.walk(directory):
for filename in filenames:
if grass.run_command('r.external', flags=flags_string, input=os.path.join(dir, filename), band=options['band'], output=filename[:-4], title=filename[:-4]) != 0:
grass.warning('Cannot import file' + filename)
if not recursive:
break
for dirname in dirnames:
import_directory_of_rasters(dirname, recursive)
def main():
input = options['input']
recursive = flags['r']
import_directory_of_rasters(input, recursive)
if __name__ == "__main__":
options, flags = grass.parser()
flags_string = "".join([k for k in flags.keys() if flags[k] and k != 'r'])
main()
| gpl-2.0 |
Dmitry-Me/cppcheck | tools/test_matchcompiler.py | 2 | 8033 | #!/usr/bin/env python
#
# Cppcheck - A tool for static C/C++ code analysis
# Copyright (C) 2007-2016 Cppcheck team.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest
import matchcompiler
class MatchCompilerTest(unittest.TestCase):
def setUp(self):
self.mc = matchcompiler.MatchCompiler(verify_mode=False)
def test_parseMatch(self):
self.assertEqual(self.mc.parseMatch(' Token::Match(tok, ";") ', 2), [
'Token::Match(tok, ";")', 'tok', ' ";"'])
self.assertEqual(self.mc.parseMatch(' Token::Match(tok,', 2), None)
# multiline Token::Match is not supported yet
self.assertEqual(self.mc.parseMatch(' Token::Match(Token::findsimplematch(tok,")"), ";")', 2), [
'Token::Match(Token::findsimplematch(tok,")"), ";")',
'Token::findsimplematch(tok,")")', ' ";"']) # inner function call
def test_replaceTokenMatch(self):
input = 'if (Token::Match(tok, "foobar")) {'
output = self.mc._replaceTokenMatch(input, 0, "foo.cpp")
self.assertEqual(output, 'if (match1(tok)) {')
input = 'if (Token::Match(tok->next()->next(), "foobar %type% %num%")) {'
output = self.mc._replaceTokenMatch(input, 0, "foo.cpp")
self.assertEqual(output, 'if (match2(tok->next()->next())) {')
input = 'if (Token::Match(tok, "foo\"special\"bar %num%")) {'
output = self.mc._replaceTokenMatch(input, 0, "foo.cpp")
self.assertEqual(
output, 'if (match3(tok)) {')
# test that non-static patterns get passed on unmatched
input = 'if (Token::Match(tok, "struct " + varname)) {'
output = self.mc._replaceTokenMatch(input, 0, "foo.cpp")
self.assertEqual(
output, 'if (Token::Match(tok, "struct " + varname)) {')
# test that non-static patterns get passed on unmatched
input = 'if (Token::Match(tok, "extern \"C\" " + varname)) {'
output = self.mc._replaceTokenMatch(input, 0, "foo.cpp")
self.assertEqual(
output, 'if (Token::Match(tok, "extern \"C\" " + varname)) {')
def test_replaceTokenMatchWithVarId(self):
input = 'if (Token::Match(tok, "foobar %varid%", 123)) {'
output = self.mc._replaceTokenMatch(input, 0, "foo.cpp")
self.assertEqual(output, 'if (match1(tok, 123)) {')
input = 'if (Token::Match(tok->next()->next(), "%varid% foobar", tok->varId())) {'
output = self.mc._replaceTokenMatch(input, 0, "foo.cpp")
self.assertEqual(
output, 'if (match2(tok->next()->next(), tok->varId())) {')
input = 'if (Token::Match(tok, "foo\"special\"bar %type% %varid%", my_varid_cache)) {'
output = self.mc._replaceTokenMatch(input, 0, "foo.cpp")
self.assertEqual(
output, 'if (match3(tok, my_varid_cache)) {')
# test caching: reuse existing matchX()
input = 'if (Token::Match(tok, "foobar %varid%", 123)) {'
output = self.mc._replaceTokenMatch(input, 0, "foo.cpp")
self.assertEqual(output, 'if (match1(tok, 123)) {')
# two in one line
input = 'if (Token::Match(tok, "foobar2 %varid%", 123) || Token::Match(tok, "%type% %varid%", 123)) {'
output = self.mc._replaceTokenMatch(input, 0, "foo.cpp")
self.assertEqual(output, 'if (match4(tok, 123) || match5(tok, 123)) {')
def test_replaceTokenSimpleMatch(self):
input = 'if (Token::simpleMatch(tok, "foobar")) {'
output = self.mc._replaceTokenMatch(input, 0, "foo.cpp")
self.assertEqual(output, 'if (match1(tok)) {')
input = 'if (Token::simpleMatch(tok->next()->next(), "foobar")) {'
output = self.mc._replaceTokenMatch(input, 0, "foo.cpp")
self.assertEqual(output, 'if (match1(tok->next()->next())) {')
input = 'if (Token::simpleMatch(tok, "foo\"special\"bar")) {'
output = self.mc._replaceTokenMatch(input, 0, "foo.cpp")
self.assertEqual(
output, 'if (match2(tok)) {')
def test_replaceTokenFindSimpleMatch(self):
input = 'if (Token::findsimplematch(tok, "foobar")) {'
output = self.mc._replaceTokenFindMatch(input, 0, "foo.cpp")
self.assertEqual(output, 'if (findmatch1(tok) ) {')
input = 'if (Token::findsimplematch(tok->next()->next(), "foobar", tok->link())) {'
output = self.mc._replaceTokenFindMatch(input, 0, "foo.cpp")
self.assertEqual(
output, 'if (findmatch2(tok->next()->next(), tok->link()) ) {')
input = 'if (Token::findsimplematch(tok, "foo\"special\"bar")) {'
output = self.mc._replaceTokenFindMatch(input, 0, "foo.cpp")
self.assertEqual(
output, 'if (findmatch3(tok) ) {')
def test_replaceTokenFindMatch(self):
input = 'if (Token::findmatch(tok, "foobar")) {'
output = self.mc._replaceTokenFindMatch(input, 0, "foo.cpp")
self.assertEqual(output, 'if (findmatch1(tok) ) {')
# findmatch with varid
input = 'if (Token::findmatch(tok, "foobar %varid%", tok->varId())) {'
output = self.mc._replaceTokenFindMatch(input, 0, "foo.cpp")
self.assertEqual(output, 'if (findmatch2(tok, tok->varId()) ) {')
# findmatch with end token
input = 'if (Token::findmatch(tok->next()->next(), "foobar %type%", tok->link())) {'
output = self.mc._replaceTokenFindMatch(input, 0, "foo.cpp")
self.assertEqual(
output, 'if (findmatch3(tok->next()->next(), tok->link()) ) {')
# findmatch with end token and varid
input = 'if (Token::findmatch(tok->next()->next(), "foobar %type% %varid%", tok->link(), 123)) {'
output = self.mc._replaceTokenFindMatch(input, 0, "foo.cpp")
self.assertEqual(
output, 'if (findmatch4(tok->next()->next(), tok->link(), 123) ) {')
def test_parseStringComparison(self):
input = 'str == "abc"'
# offset '5' is chosen as an abritary start offset to look for
res = self.mc._parseStringComparison(input, 5)
self.assertEqual(2, len(res))
self.assertEqual('str == MatchCompiler::makeConstString("abc")', input[:res[0]] +
"MatchCompiler::makeConstString(" + input[res[0]:res[1]] + ")" + input[res[1]:])
input = 'str == "a\\"b\\"c"'
res = self.mc._parseStringComparison(input, 5)
self.assertEqual(2, len(res))
self.assertEqual('str == MatchCompiler::makeConstString("a\\"b\\"c")', input[:res[0]] +
"MatchCompiler::makeConstString(" + input[res[0]:res[1]] + ")" + input[res[1]:])
def test_replaceCStrings(self):
# str() ==
input = 'if (tok2->str() == "abc") {'
output = self.mc._replaceCStrings(input)
self.assertEqual('if (tok2->str() == MatchCompiler::makeConstString("abc")) {', output)
# str() !=
input = 'if (tok2->str() != "xyz") {'
output = self.mc._replaceCStrings(input)
self.assertEqual('if (tok2->str() != MatchCompiler::makeConstString("xyz")) {', output)
# strAt()
input = 'if (match16(parent->tokAt(-3)) && tok->strAt(1) == ")")'
output = self.mc._replaceCStrings(input)
self.assertEqual(
'if (match16(parent->tokAt(-3)) && tok->strAt(1) == MatchCompiler::makeConstString(")"))',
output)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
howthebodyworks/mlmistrels | src/encoder.py | 1 | 3657 | #!/bin/local/bin/python3
# A utility, script, or potentially one day class to convert audio waveforms in to windowed, reduced descriptors, for some machine learning algorithm to go nuts on later
# Authors: James Nichols, Darwin Vickers
# Includes a test of converting then reversing the predictor to see how things sound. Uses Librosa extensively.
import librosa
import matplotlib.pyplot as plt
#import seaborn as sns
import numpy as np
from util import compress
from reconstruct import reconstruct
def randomise_phase(D):
""" A function that takes reals of any and randomises all the phases,
it does so by randomly modifying the angle of a complex number """
# Create a univariate matrix, use the euler identity to make
# uniformly distributed complex numbers of arg 1
rands = np.exp(np.vectorize(complex)(0, 2.0 * np.pi * np.random.random(D.shape)))
return D * rands
class Encode(object):
def __init__(self, win_len = 2048):
self.win_len = win_len
def encode(self, sound):
return sound
def decode(self, A):
return A
class SparseEncode(Encode):
""" An encoder that uses sparse tensor representation of the spectrogram """
def __init__(self, win_len = 2048, n_decomp = 4):
import sklearn.decomposition
self.win_len = win_len
self.n_decomp = n_decomp
self.T = sklearn.decomposition.MiniBatchDictionaryLearning(n_components=self.n_decomp)
def encode(self, sound):
D = librosa.stft(sound, n_fft=self.win_len)
self.comps, A = librosa.decompose.decompose(np.abs(D), transformer=self.T, sort=True)
return A
def decode(self, A):
return librosa.istft(randomise_phase(self.comps.dot(A)))
class BinEncode(Encode):
def __init__(self, win_len = 2048, n_bins = 32):
self.win_len = win_len
self.n_bins = 32
self.bin_size = self.win_len // (2 * self.n_bins)
def encode(self, sound):
D = librosa.stft(sound, n_fft=self.win_len)
# Make the time series of predictors
A = np.zeros([self.n_bins+1, D.shape[1]], dtype=np.complex)
# Try bin-power
for t in range(D.shape[1]):
# Consider the 0 Hz component separately (Maybe get rid of this...?)
A[0, t] = D[0, t]
# Simple mean of the complex numbers n the bin...
A[1:,t] = np.array([np.mean(D[b_start:b_start+self.bin_size,t])*self.bin_size for b_start in range(1, D.shape[0], self.bin_size)])
return A
def decode(self, A):
D = np.zeros((self.win_len//2+1, A.shape[1]), dtype=np.complex)
for t in range(A.shape[1]):
# Simple covering of the bin with mean of the bin
D[0, t] = A[0, t]
D[1:, t] = np.repeat(A[1:, t], self.bin_size)
# The center frequency is given the average
#D_r[0, t] = TS[0, t]
#D_r[1+bin_size//2:D_r.shape[0]:bin_size, t] = TS[1:, t]
# Random frequency in bin is given the average
return librosa.istft(randomise_phase(D))
class PeaksEncode(Encode):
hop_length = 0
sound_length = 0
def __init__(self, win_len = 2048):
self.win_len = win_len
self.hop_length = win_len/4
self.sr=22050
def encode(self, sound):
H_pitch, H_pitch_mag = lr.piptrack(audio, sr = self.sr, n_fft = self.win_len, hop_length = self.hop_length)
features = compress(H_pitch, H_pitch_mag, n_peaks = 16)
return features
def decode(self, A):
reconstruct(A, n_fft = self.win_length, sr = self.sr, hop_length = self.hop_length)
return A | gpl-3.0 |
Balannen/LSMASOMM | atom3/Formalisms/LSMASOMM/canAccessKnArt.py | 1 | 4187 | """
__canAccessKnArt.py_____________________________________________________
Automatically generated AToM3 syntactic object (DO NOT MODIFY DIRECTLY)
Author: bogdan
Modified: Sat Apr 14 23:09:41 2018
________________________________________________________________________
"""
from ASGNode import *
from ATOM3Type import *
from ATOM3String import *
from graph_canAccessKnArt import *
class canAccessKnArt(ASGNode, ATOM3Type):
def __init__(self, parent = None):
ASGNode.__init__(self)
ATOM3Type.__init__(self)
self.graphClass_ = graph_canAccessKnArt
self.isGraphObjectVisual = True
if(hasattr(self, '_setHierarchicalLink')):
self._setHierarchicalLink(False)
if(hasattr(self, '_setHierarchicalNode')):
self._setHierarchicalNode(False)
self.parent = parent
self.ID=ATOM3String('accKA|', 20)
self.keyword_= self.ID
self.generatedAttributes = {'ID': ('ATOM3String', ) }
self.realOrder = ['ID']
self.directEditing = [1]
def clone(self):
cloneObject = canAccessKnArt( self.parent )
for atr in self.realOrder:
cloneObject.setAttrValue(atr, self.getAttrValue(atr).clone() )
cloneObject.keyword_ = cloneObject.ID
ASGNode.cloneActions(self, cloneObject)
return cloneObject
def copy(self, other):
ATOM3Type.copy(self, other)
for atr in self.realOrder:
self.setAttrValue(atr, other.getAttrValue(atr) )
self.keyword_ = self.ID
ASGNode.copy(self, other)
def preCondition (self, actionID, * params):
if self.graphObject_:
return self.graphObject_.preCondition(actionID, params)
else: return None
def postCondition (self, actionID, * params):
if actionID == self.CONNECT:
res = self.ConstraintKnArt(params)
if res: return res
if self.graphObject_:
return self.graphObject_.postCondition(actionID, params)
else: return None
def ConstraintKnArt(self, params):
from CustomCode import *
res = canAccessKnArtCheckConnections(self)
if res is "eitherRoleOrUnit":
return ("Either Role of OrgUnit can access knowledge.", self.graphObject_)
elif res is "onlyOneInput":
return ("Only one Role or OrgUnit can access one knowledge medium.", self.graphObject_)
elif res is "RoleWithOrgOnly":
return ("Role can access OrganisationalKnArt only!", self.graphObject_)
elif res is "OrgUnitWithIndivOnly":
return ("OrgUnit can access IndividualKnArt only!", self.graphObject_)
else:
return
def preAction (self, actionID, * params):
if self.graphObject_:
return self.graphObject_.preAction(actionID, params)
else: return None
def postAction (self, actionID, * params):
if self.graphObject_:
return self.graphObject_.postAction(actionID, params)
else: return None
def QOCA(self, params):
"""
QOCA Constraint Template
NOTE: DO NOT select a POST/PRE action trigger
Constraints will be added/removed in a logical manner by other mechanisms.
"""
return # <--- Remove this if you want to use QOCA
# Get the high level constraint helper and solver
from Qoca.atom3constraints.OffsetConstraints import OffsetConstraints
oc = OffsetConstraints(self.parent.qocaSolver)
# Constraint only makes sense if there exists 2 objects connected to this link
if(not (self.in_connections_ and self.out_connections_)): return
# Get the graphical objects (subclass of graphEntity/graphLink)
graphicalObjectLink = self.graphObject_
graphicalObjectSource = self.in_connections_[0].graphObject_
graphicalObjectTarget = self.out_connections_[0].graphObject_
objTuple = (graphicalObjectSource, graphicalObjectTarget, graphicalObjectLink)
"""
Example constraint, see Kernel/QOCA/atom3constraints/OffsetConstraints.py
For more types of constraints
"""
oc.LeftExactDistance(objTuple, 20)
oc.resolve() # Resolve immediately after creating entity & constraint
| gpl-3.0 |
damdam-s/OpenUpgrade | addons/account_payment/wizard/account_payment_populate_statement.py | 274 | 4160 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from lxml import etree
from openerp.osv import fields, osv
class account_payment_populate_statement(osv.osv_memory):
_name = "account.payment.populate.statement"
_description = "Account Payment Populate Statement"
_columns = {
'lines': fields.many2many('payment.line', 'payment_line_rel_', 'payment_id', 'line_id', 'Payment Lines')
}
def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
line_obj = self.pool.get('payment.line')
res = super(account_payment_populate_statement, self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type, context=context, toolbar=toolbar, submenu=False)
line_ids = line_obj.search(cr, uid, [
('move_line_id.reconcile_id', '=', False),
('bank_statement_line_id', '=', False),
('move_line_id.state','=','valid')])
line_ids.extend(line_obj.search(cr, uid, [
('move_line_id.reconcile_id', '=', False),
('order_id.mode', '=', False),
('move_line_id.state','=','valid')]))
domain = '[("id", "in", '+ str(line_ids)+')]'
doc = etree.XML(res['arch'])
nodes = doc.xpath("//field[@name='lines']")
for node in nodes:
node.set('domain', domain)
res['arch'] = etree.tostring(doc)
return res
def populate_statement(self, cr, uid, ids, context=None):
line_obj = self.pool.get('payment.line')
statement_obj = self.pool.get('account.bank.statement')
statement_line_obj = self.pool.get('account.bank.statement.line')
currency_obj = self.pool.get('res.currency')
if context is None:
context = {}
data = self.read(cr, uid, ids, context=context)[0]
line_ids = data['lines']
if not line_ids:
return {'type': 'ir.actions.act_window_close'}
statement = statement_obj.browse(cr, uid, context['active_id'], context=context)
for line in line_obj.browse(cr, uid, line_ids, context=context):
ctx = context.copy()
ctx['date'] = line.ml_maturity_date # was value_date earlier,but this field exists no more now
amount = currency_obj.compute(cr, uid, line.currency.id,
statement.currency.id, line.amount_currency, context=ctx)
st_line_vals = self._prepare_statement_line_vals(cr, uid, line, amount, statement, context=context)
st_line_id = statement_line_obj.create(cr, uid, st_line_vals, context=context)
line_obj.write(cr, uid, [line.id], {'bank_statement_line_id': st_line_id})
return {'type': 'ir.actions.act_window_close'}
def _prepare_statement_line_vals(self, cr, uid, payment_line, amount,
statement, context=None):
return {
'name': payment_line.order_id.reference or '?',
'amount':-amount,
'partner_id': payment_line.partner_id.id,
'statement_id': statement.id,
'ref': payment_line.communication,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
BaconPancakes/valor | lib/discord/opus.py | 17 | 8695 | # -*- coding: utf-8 -*-
"""
The MIT License (MIT)
Copyright (c) 2015-2016 Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
import ctypes
import ctypes.util
import array
from .errors import DiscordException
import logging
import sys
import os.path
log = logging.getLogger(__name__)
c_int_ptr = ctypes.POINTER(ctypes.c_int)
c_int16_ptr = ctypes.POINTER(ctypes.c_int16)
c_float_ptr = ctypes.POINTER(ctypes.c_float)
class EncoderStruct(ctypes.Structure):
pass
EncoderStructPtr = ctypes.POINTER(EncoderStruct)
# A list of exported functions.
# The first argument is obviously the name.
# The second one are the types of arguments it takes.
# The third is the result type.
exported_functions = [
('opus_strerror', [ctypes.c_int], ctypes.c_char_p),
('opus_encoder_get_size', [ctypes.c_int], ctypes.c_int),
('opus_encoder_create', [ctypes.c_int, ctypes.c_int, ctypes.c_int, c_int_ptr], EncoderStructPtr),
('opus_encode', [EncoderStructPtr, c_int16_ptr, ctypes.c_int, ctypes.c_char_p, ctypes.c_int32], ctypes.c_int32),
('opus_encoder_ctl', None, ctypes.c_int32),
('opus_encoder_destroy', [EncoderStructPtr], None)
]
def libopus_loader(name):
# create the library...
lib = ctypes.cdll.LoadLibrary(name)
# register the functions...
for item in exported_functions:
try:
func = getattr(lib, item[0])
except Exception as e:
raise e
try:
if item[1]:
func.argtypes = item[1]
func.restype = item[2]
except KeyError:
pass
return lib
try:
if sys.platform == 'win32':
_basedir = os.path.dirname(os.path.abspath(__file__))
_bitness = 'x64' if sys.maxsize > 2**32 else 'x86'
_filename = os.path.join(_basedir, 'bin', 'libopus-0.{}.dll'.format(_bitness))
_lib = libopus_loader(_filename)
else:
_lib = libopus_loader(ctypes.util.find_library('opus'))
except Exception as e:
_lib = None
def load_opus(name):
"""Loads the libopus shared library for use with voice.
If this function is not called then the library uses the function
`ctypes.util.find_library`__ and then loads that one
if available.
.. _find library: https://docs.python.org/3.5/library/ctypes.html#finding-shared-libraries
__ `find library`_
Not loading a library leads to voice not working.
This function propagates the exceptions thrown.
Warning
--------
The bitness of the library must match the bitness of your python
interpreter. If the library is 64-bit then your python interpreter
must be 64-bit as well. Usually if there's a mismatch in bitness then
the load will throw an exception.
Note
----
On Windows, the .dll extension is not necessary. However, on Linux
the full extension is required to load the library, e.g. ``libopus.so.1``.
On Linux however, `find library`_ will usually find the library automatically
without you having to call this.
Parameters
----------
name: str
The filename of the shared library.
"""
global _lib
_lib = libopus_loader(name)
def is_loaded():
"""Function to check if opus lib is successfully loaded either
via the ``ctypes.util.find_library`` call of :func:`load_opus`.
This must return ``True`` for voice to work.
Returns
-------
bool
Indicates if the opus library has been loaded.
"""
global _lib
return _lib is not None
class OpusError(DiscordException):
"""An exception that is thrown for libopus related errors.
Attributes
----------
code : int
The error code returned.
"""
def __init__(self, code):
self.code = code
msg = _lib.opus_strerror(self.code).decode('utf-8')
log.info('"{}" has happened'.format(msg))
super().__init__(msg)
class OpusNotLoaded(DiscordException):
"""An exception that is thrown for when libopus is not loaded."""
pass
# Some constants...
OK = 0
APPLICATION_AUDIO = 2049
APPLICATION_VOIP = 2048
APPLICATION_LOWDELAY = 2051
CTL_SET_BITRATE = 4002
CTL_SET_BANDWIDTH = 4008
CTL_SET_FEC = 4012
CTL_SET_PLP = 4014
CTL_SET_SIGNAL = 4024
band_ctl = {
'narrow': 1101,
'medium': 1102,
'wide': 1103,
'superwide': 1104,
'full': 1105,
}
signal_ctl = {
'auto': -1000,
'voice': 3001,
'music': 3002,
}
class Encoder:
def __init__(self, sampling, channels, application=APPLICATION_AUDIO):
self.sampling_rate = sampling
self.channels = channels
self.application = application
self.frame_length = 20
self.sample_size = 2 * self.channels # (bit_rate / 8) but bit_rate == 16
self.samples_per_frame = int(self.sampling_rate / 1000 * self.frame_length)
self.frame_size = self.samples_per_frame * self.sample_size
if not is_loaded():
raise OpusNotLoaded()
self._state = self._create_state()
self.set_bitrate(128)
self.set_fec(True)
self.set_expected_packet_loss_percent(0.15)
self.set_bandwidth('full')
self.set_signal_type('auto')
def __del__(self):
if hasattr(self, '_state'):
_lib.opus_encoder_destroy(self._state)
self._state = None
def _create_state(self):
ret = ctypes.c_int()
result = _lib.opus_encoder_create(self.sampling_rate, self.channels, self.application, ctypes.byref(ret))
if ret.value != 0:
log.info('error has happened in state creation')
raise OpusError(ret.value)
return result
def set_bitrate(self, kbps):
kbps = min(128, max(16, int(kbps)))
ret = _lib.opus_encoder_ctl(self._state, CTL_SET_BITRATE, kbps * 1024)
if ret < 0:
log.info('error has happened in set_bitrate')
raise OpusError(ret)
return kbps
def set_bandwidth(self, req):
if req not in band_ctl:
raise KeyError('%r is not a valid bandwidth setting. Try one of: %s' % (req, ','.join(band_ctl)))
k = band_ctl[req]
ret = _lib.opus_encoder_ctl(self._state, CTL_SET_BANDWIDTH, k)
if ret < 0:
log.info('error has happened in set_bandwidth')
raise OpusError(ret)
def set_signal_type(self, req):
if req not in signal_ctl:
raise KeyError('%r is not a valid signal setting. Try one of: %s' % (req, ','.join(signal_ctl)))
k = signal_ctl[req]
ret = _lib.opus_encoder_ctl(self._state, CTL_SET_SIGNAL, k)
if ret < 0:
log.info('error has happened in set_signal_type')
raise OpusError(ret)
def set_fec(self, enabled=True):
ret = _lib.opus_encoder_ctl(self._state, CTL_SET_FEC, 1 if enabled else 0)
if ret < 0:
log.info('error has happened in set_fec')
raise OpusError(ret)
def set_expected_packet_loss_percent(self, percentage):
ret = _lib.opus_encoder_ctl(self._state, CTL_SET_PLP, min(100, max(0, int(percentage * 100))))
if ret < 0:
log.info('error has happened in set_expected_packet_loss_percent')
raise OpusError(ret)
def encode(self, pcm, frame_size):
max_data_bytes = len(pcm)
pcm = ctypes.cast(pcm, c_int16_ptr)
data = (ctypes.c_char * max_data_bytes)()
ret = _lib.opus_encode(self._state, pcm, frame_size, data, max_data_bytes)
if ret < 0:
log.info('error has happened in encode')
raise OpusError(ret)
return array.array('b', data[:ret]).tobytes()
| gpl-3.0 |
googleads/google-ads-python | google/ads/googleads/v7/enums/types/matching_function_operator.py | 1 | 1216 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v7.enums",
marshal="google.ads.googleads.v7",
manifest={"MatchingFunctionOperatorEnum",},
)
class MatchingFunctionOperatorEnum(proto.Message):
r"""Container for enum describing matching function operator. """
class MatchingFunctionOperator(proto.Enum):
r"""Possible operators in a matching function."""
UNSPECIFIED = 0
UNKNOWN = 1
IN = 2
IDENTITY = 3
EQUALS = 4
AND = 5
CONTAINS_ANY = 6
__all__ = tuple(sorted(__protobuf__.manifest))
| apache-2.0 |
Lilykos/invenio | invenio/legacy/search_engine/utils.py | 19 | 1783 | # -*- coding: utf-8 -*-
# This file is part of Invenio.
# Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2013, 2014 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
# pylint: disable=C0301
"""Invenio search engine utilities."""
from invenio.legacy.dbquery import run_sql
from invenio.legacy.bibrecord import get_fieldvalues
def record_exists(recID):
"""Return 1 if record RECID exists.
Return 0 if it doesn't exist.
Return -1 if it exists but is marked as deleted.
"""
from invenio.config import CFG_CERN_SITE
try: # if recid is '123foo', mysql will return id=123, and we don't want that
recID = int(recID)
except (ValueError, TypeError):
return 0
out = 0
res = run_sql("SELECT id FROM bibrec WHERE id=%s", (recID,), 1)
if res:
# record exists; now check whether it isn't marked as deleted:
dbcollids = get_fieldvalues(recID, "980__%")
if ("DELETED" in dbcollids) or (CFG_CERN_SITE and "DUMMY" in dbcollids):
out = -1 # exists, but marked as deleted
else:
out = 1 # exists fine
return out
| gpl-2.0 |
jamescr/osm2gtfs | osm2gtfs/tests/creators/tests_cr_gam.py | 2 | 1547 | import unittest
import os
import logging
from osm2gtfs.tests.creators.creators_tests import CreatorsTestsAbstract
# Define logging level
logging.basicConfig(level=os.environ.get("LOGLEVEL", "INFO"))
class TestCreatorsCrGam(CreatorsTestsAbstract):
def _get_selector(self):
return "cr_gam"
def _get_required_variables(self):
# Define required values for the tests of this provider
return {
'routes_count': 3,
'stops_count': 33,
'stations_count': 1,
'stops_osm_count': 34,
'route_id_to_check': 2,
'gtfs_files': [
"agency.txt", "calendar.txt", "routes.txt",
"shapes.txt", "stops.txt", "stop_times.txt", "trips.txt"
],
}
def _override_configuration(self):
# Overriding some of the configuration options
self.config.data['stops']['name_auto'] = "no"
# Use local timetable.json
self.config.data['schedule_source'] = os.path.join(
self.standard_variables['fixture_dir'], "timetable.json")
# Use timeframe of reference GTFS
self.config.data['start_date'] = "20200101"
self.config.data['end_date'] = "20201231"
def load_tests(loader, tests, pattern):
# pylint: disable=unused-argument
test_cases = ['test_refresh_routes_cache', 'test_refresh_stops_cache', 'test_gtfs_from_cache']
suite = unittest.TestSuite(map(TestCreatorsCrGam, test_cases))
return suite
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
CSavvy/coursebuilder | models/content.py | 22 | 21971 | # Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common classes and methods for processing text content."""
__author__ = 'Pavel Simakov (psimakov@google.com)'
from pyparsing import alphas
from pyparsing import Combine
from pyparsing import Each
from pyparsing import Group
from pyparsing import Literal
from pyparsing import nums
from pyparsing import Optional
from pyparsing import QuotedString
from pyparsing import Regex
from pyparsing import Suppress
from pyparsing import Word
from pyparsing import ZeroOrMore
from tools import verify
def sep(text):
"""Makes a separator."""
return Suppress(Literal(text))
def key(name):
"""Makes grammar expression for a key."""
return (
Literal(name) ^
(sep('\'') + Literal(name) + sep('\'')) ^
(sep('"') + Literal(name) + sep('"')))
def list_of(term):
"""Makes a delimited list of terms."""
return (
Optional(
term +
ZeroOrMore(Suppress(Literal(',')) + term) +
Optional(Suppress(Literal(',')))
)
)
def chunks(l, n):
"""Partitions the list l into disjoint sub-lists of length n."""
if len(l) % n != 0:
raise Exception('List length is not a multiple on %s', n)
return [l[i:i+n] for i in range(0, len(l), n)]
def make_dict(unused_s, unused_l, toks):
"""Makes a dict from the list using even items as keys, odd as values."""
result = {}
key_value_pairs = chunks(toks, 2)
for key_value_pair in key_value_pairs:
result[key_value_pair[0]] = key_value_pair[1]
return result
def make_list(unused_s, unused_l, toks):
"""Makes a list out of a token tuple holding a list."""
result = []
for item in toks:
result.append(item.asList())
return result
def make_bool(value):
"""Makes a boolean value lambda."""
def make_value():
return verify.Term(verify.BOOLEAN, value)
return make_value
def make_int(value):
"""Makes an int value lambda."""
return int(value[0])
def make_float(value):
"""Makes an float value lambda."""
return float(value[0])
class AssessmentParser13(object):
"""Grammar and parser for the assessment."""
string = (
QuotedString('\'', escChar='\\', multiline=True) ^
QuotedString('"', escChar='\\', multiline=True))
boolean = (
Literal('true').setParseAction(make_bool(True)) ^
Literal('false').setParseAction(make_bool(False)))
float = Combine(
Word(nums) + Optional(Literal('.') + Word(nums))
).setParseAction(make_float)
integer = Word(nums).setParseAction(make_int)
choice_decl = (
string ^
Combine(
sep('correct(') + string + sep(')')
).setParseAction(lambda x: verify.Term(verify.CORRECT, x[0]))
)
regex = (
Regex('/(.*)/i') ^
Combine(
sep('regex(') +
QuotedString('"', escChar='\\') +
sep(')')
).setParseAction(lambda x: verify.Term(verify.REGEX, x[0]))
)
question_decl = (
sep('{') +
Each(
Optional(
key('questionHTML') + sep(':') +
string + Optional(sep(','))) +
Optional(
key('lesson') + sep(':') +
string + Optional(sep(','))) +
Optional(
key('correctAnswerString') + sep(':') +
string + Optional(sep(','))) +
Optional(
key('correctAnswerRegex') + sep(':') +
regex + Optional(sep(','))) +
Optional(
key('correctAnswerNumeric') + sep(':') +
float + Optional(sep(','))) +
Optional(
key('choiceScores') + sep(':') +
sep('[') +
Group(list_of(float)).setParseAction(make_list) +
sep(']') +
Optional(sep(','))) +
Optional(
key('weight') + sep(':') + integer + Optional(sep(','))) +
Optional(
key('multiLine') + sep(':') +
boolean + Optional(sep(','))) +
Optional(
key('choices') + sep(':') +
sep('[') +
Group(list_of(choice_decl)).setParseAction(make_list) +
sep(']') +
Optional(sep(',')))
) +
sep('}')).setParseAction(make_dict)
assessment_grammar = (
sep('assessment') +
sep('=') +
sep('{') +
Each(
Optional(
key('assessmentName') + sep(':') +
string + Optional(sep(','))) +
Optional(
key('preamble') + sep(':') +
string + Optional(sep(','))) +
Optional(
key('checkAnswers') + sep(':') +
boolean + Optional(sep(','))) +
Optional(
key('questionsList') + sep(':') +
sep('[') +
Group(list_of(question_decl)).setParseAction(make_list) +
sep(']') +
Optional(sep(',')))
) +
sep('}') +
Optional(sep(';'))).setParseAction(make_dict)
@classmethod
def parse_string(cls, content):
return cls.assessment_grammar.parseString(content)
@classmethod
def parse_string_in_scope(cls, content, scope, root_name):
"""Parses assessment text following grammar."""
if 'assessment' != root_name:
raise Exception('Unsupported schema: %s', root_name)
# we need to extract the results as a dictionary; so we remove the
# outer array holding it
ast = cls.parse_string(content).asList()
if len(ast) == 1:
ast = ast[0]
return dict(
scope.items() +
{'__builtins__': {}}.items() +
{root_name: ast}.items())
class ActivityParser13(object):
"""Grammar and parser for the activity."""
variable = Word(alphas)
integer = Word(nums).setParseAction(make_int)
string = (
QuotedString('\'', escChar='\\', multiline=True) ^
QuotedString('"', escChar='\\', multiline=True))
boolean = (
Literal('true').setParseAction(make_bool(True)) ^
Literal('false').setParseAction(make_bool(False)))
regex = (
Regex('/(.*)/i') ^
Combine(
sep('regex(') +
QuotedString('"', escChar='\\') +
sep(')')
).setParseAction(lambda x: verify.Term(verify.REGEX, x[0]))
)
choice_decl = Group(
sep('[') +
string + sep(',') +
boolean + sep(',') +
string +
sep(']')
)
choices_decl = Group(
sep('[') +
Optional(list_of(choice_decl)) +
sep(']')
).setParseAction(make_list)
multiple_choice_decl = (
key('questionType') + sep(':') + key('multiple choice') +
Optional(sep(','))
)
multiple_choice = (
sep('{') +
multiple_choice_decl +
Each(
Optional(
key('questionHTML') + sep(':') +
string + Optional(sep(','))) +
Optional(
key('choices') + sep(':') +
choices_decl + Optional(sep(',')))
) +
sep('}')
).setParseAction(make_dict)
free_text_decl = (
key('questionType') + sep(':') + key('freetext') +
Optional(sep(','))
)
free_text = (
sep('{') +
free_text_decl +
Each(
Optional(
key('questionHTML') + sep(':') +
string + Optional(sep(','))) +
Optional(
key('correctAnswerRegex') + sep(':') +
regex + Optional(sep(','))) +
Optional(
key('correctAnswerOutput') + sep(':') +
string + Optional(sep(','))) +
Optional(
key('incorrectAnswerOutput') + sep(':') +
string + Optional(sep(','))) +
Optional(
key('showAnswerPrompt') + sep(':') +
string + Optional(sep(','))) +
Optional(
key('showAnswerOutput') + sep(':') +
string + Optional(sep(','))) +
Optional(
key('outputHeight') + sep(':') +
string + Optional(sep(',')))
) +
sep('}')
).setParseAction(make_dict)
question_list_decl = (
sep('{') +
Each(
Optional(
key('questionHTML') + sep(':') +
string + Optional(sep(','))) +
Optional(
key('choices') + sep(':') +
sep('[') +
Group(list_of(string)).setParseAction(make_list) +
sep(']') +
Optional(sep(','))) +
Optional(
key('correctIndex') + sep(':') +
(integer ^ (
sep('[') +
Group(list_of(integer)).setParseAction(make_list) +
sep(']'))) +
Optional(sep(','))) +
Optional(
key('multiSelect') + sep(':') +
boolean + Optional(sep(','))),
) +
sep('}')).setParseAction(make_dict)
questions_list_decl = Group(
sep('[') +
Optional(list_of(question_list_decl)) +
sep(']')
).setParseAction(make_list)
multiple_choice_group_decl = (
key('questionType') + sep(':') + key('multiple choice group') +
Optional(sep(','))
)
multiple_choice_group = (
sep('{') +
multiple_choice_group_decl +
Each(
Optional(
key('questionGroupHTML') + sep(':') +
string + Optional(sep(','))) +
Optional(
key('allCorrectMinCount') + sep(':') +
integer + Optional(sep(','))) +
Optional(
key('allCorrectOutput') + sep(':') +
string + Optional(sep(','))) +
Optional(
key('someIncorrectOutput') + sep(':') +
string + Optional(sep(','))) +
Optional(
key('questionsList') + sep(':') +
questions_list_decl + Optional(sep(',')))
) +
sep('}')
).setParseAction(make_dict)
activity_grammar = (
sep('activity') +
sep('=') +
sep('[') +
Optional(list_of(
string ^ multiple_choice ^ free_text ^ multiple_choice_group)) +
sep(']') +
Optional(sep(';')))
@classmethod
def parse_string(cls, content):
return cls.activity_grammar.parseString(content)
@classmethod
def parse_string_in_scope(cls, content, scope, root_name):
"""Parses activity text following grammar."""
if 'activity' != root_name:
raise Exception('Unsupported schema: %s', root_name)
return dict(
scope.items() +
{'__builtins__': {}}.items() +
{root_name: cls.parse_string(content).asList()}.items())
# here we register all the parser
SUPPORTED_PARSERS = {
'activity': ActivityParser13, 'assessment': AssessmentParser13}
def verify_activity(activity_text):
"""Parses and semantically verifies activity."""
activity = ActivityParser13.parse_string_in_scope(
activity_text, verify.Activity().scope, 'activity')
assert activity
verifier = verify.Verifier()
verifier.verify_activity_instance(activity, 'test')
def verify_assessment(assessment_text):
"""Parses and semantically verifies assessment."""
assessment = AssessmentParser13.parse_string_in_scope(
assessment_text, verify.Assessment().scope, 'assessment')
assert assessment
verifier = verify.Verifier()
verifier.verify_assessment_instance(assessment, 'test')
def parse_string_in_scope(content, scope, root_name):
parser = SUPPORTED_PARSERS.get(root_name)
if not parser:
raise Exception('Unsupported schema: %s', root_name)
return parser.parse_string_in_scope(content, scope, root_name)
def test_activity_multiple_choice_group():
"""Test activity parsing."""
activity_text = (
"""activity = [
'<p>This is text.</p>',
{
questionType: 'multiple choice group',
questionGroupHTML: '<p>This is text.</p>',
allCorrectMinCount: 55,
allCorrectOutput: '<p>This is text.</p>',
someIncorrectOutput: '<p>This is text.</p>',
questionsList: [
{questionHTML: '<p>This is text.</p>'},
{correctIndex: [1, 2, 3]},
{questionHTML: '<p>This is text.</p>',
correctIndex: 0, multiSelect: false,
choices: ['foo', 'bar'],},
]
},
{
"questionType": 'multiple choice group',
questionGroupHTML:
'<p>This section will test you on colors and numbers.</p>',
questionsList: [
{questionHTML: 'Pick all <i>odd</i> numbers:',
choices: ['1', '2', '3', '4', '5'], correctIndex: [0, 2, 4]},
{questionHTML: 'Pick one <i>even</i> number:',
choices: ['1', '2', '3', '4', '5'], correctIndex: [1, 3],
multiSelect: false},
{questionHTML: 'What color is the sky?',
choices: ['#00FF00', '#00FF00', '#0000FF'], correctIndex: 2}
],
allCorrectMinCount: 2,
allCorrectOutput: 'Great job! You know the material well.',
someIncorrectOutput: 'You must answer at least two questions correctly.'
}
];
""")
verify_activity(activity_text)
def test_activity_multiple_choice():
"""Test activity parsing."""
activity_text = (
"""activity = [
'<p>This is text.</p>',
{
questionType: 'multiple choice',
questionHTML: '<p>This is text.</p>',
choices: [
['<p>This is text.</p>', false, '<p>This is text.</p>'],
['<p>This is text.</p>', true, '<p>This is text.</p>'],
]
}
];
""")
verify_activity(activity_text)
def test_activity_free_text():
"""Test activity parsing."""
activity_text = (
"""activity = [
'<p>This is text.</p>',
{
'questionType': 'freetext',
questionHTML: '<p>This is text.</p>',
showAnswerPrompt: '<p>This is text.</p>',
showAnswerOutput: '<p>This is text.</p>',
correctAnswerRegex: regex("/4|four/i"),
correctAnswerOutput: '<p>This is text.</p>',
incorrectAnswerOutput: '<p>This is text.</p>',
},
{
questionType: 'freetext',
questionHTML: '<p>What color is the snow?</p>',
correctAnswerRegex: regex("/white/i"),
correctAnswerOutput: 'Correct!',
incorrectAnswerOutput: 'Try again.',
showAnswerOutput: 'Our search expert says: white!' },
];
""")
verify_activity(activity_text)
def test_assessment():
"""Test assessment parsing."""
# pylint: disable-msg=anomalous-backslash-in-string
assessment_text = (
"""assessment = {
assessmentName: '12345',
preamble: '<p>This is text.</p>',
checkAnswers: false,
questionsList: [
{questionHTML: '<p>This is text.</p>',
choices:
["A and B", "D and B", correct("A and C"), "C and D", "I don't know"]
},
{questionHTML: '<p>This is text.</p>',
choiceScores: [0, 0.5, 1.0],
weight: 3,
choices: [correct("True"), "False", "I don't know"]
},
{questionHTML: '<p>This is text.</p>',
correctAnswerString: 'sunrise',
correctAnswerNumeric: 7.9
},
{questionHTML: '<p>This is text.</p>',
correctAnswerNumeric: 7,
correctAnswerRegex: regex("/354\s*[+]\s*651/")
}
],
};
""")
# pylint: enable-msg=anomalous-backslash-in-string
verify_assessment(assessment_text)
def test_activity_ast():
"""Test a mix of various activities using legacy and new parser."""
activity_text = (
"""activity = [
'<p>This is just some <i>HTML</i> text!</p>',
{ questionType: 'multiple choice',
questionHTML: '<p>What letter am I thinking about now?</p>',
choices: [
['A', false, '"A" is wrong, try again.'],
['B', true, '"B" is correct!'],
['C', false, '"C" is wrong, try again.'],
['D', false, '"D" is wrong, try again.']
]
},
{ questionType: 'freetext',
questionHTML: '<p>What color is the snow?</p>',
correctAnswerRegex: regex("/white/i"),
correctAnswerOutput: 'Correct!',
incorrectAnswerOutput: 'Try again.',
showAnswerOutput: 'Our search expert says: white!' },
{ questionType: 'multiple choice group',
questionGroupHTML:
'<p>This section will test you on colors and numbers.</p>',
allCorrectMinCount: 2,
questionsList: [
{questionHTML: 'Pick all <i>odd</i> numbers:',
choices: ['1', '2', '3', '4', '5'], correctIndex: [0, 2, 4]},
{questionHTML: 'Pick one <i>even</i> number:',
choices: ['1', '2', '3', '4', '5'], correctIndex: [1, 3],
multiSelect: false},
{questionHTML: 'What color is the sky?',
choices: ['#00FF00', '#00FF00', '#0000FF'], correctIndex: 2}
],
allCorrectOutput: 'Great job! You know the material well.',
someIncorrectOutput: 'You must answer at least two questions correctly.'
}
];
""")
verify_activity(activity_text)
scope = verify.Activity().scope
current_ast = ActivityParser13.parse_string_in_scope(
activity_text, scope, 'activity')
expected_ast = verify.legacy_eval_python_expression_for_test(
activity_text, scope, 'activity')
same = (
len(current_ast.get('activity')) == 4 and
current_ast.get('activity') == expected_ast.get('activity') and
current_ast == expected_ast)
if not same:
import pprint # # pylint: disable-msg=g-import-not-at-top
pprint.pprint(current_ast.get('activity'))
pprint.pprint(expected_ast.get('activity'))
assert same
def test_assessment_ast():
"""Test a mix of various activities using legacy and new parser."""
# pylint: disable-msg=anomalous-backslash-in-string
assessment_text = (
"""assessment = {
preamble: '<p>This is text.</p>',
questionsList: [
{'questionHTML': '<p>This is text.</p>',
choices:
["A and B", "D and B", correct("A and C"), "C and D", "I don't know"]
},
{"questionHTML": '<p>This is text.</p>',
choices: [correct("True"), "False", "I don't know"],
choiceScores: [0, 0.5, 1.0],
weight: 3
},
{questionHTML: '<p>This is text.</p>',
correctAnswerString: 'sunrise'
},
{questionHTML: '<p>This is text.</p>',
correctAnswerRegex: regex("/354\s*[+]\s*651/")
}
],
assessmentName: 'Pre',
checkAnswers: false
}
""")
# pylint: enable-msg=anomalous-backslash-in-string
verify_assessment(assessment_text)
scope = verify.Assessment().scope
current_ast = AssessmentParser13.parse_string_in_scope(
assessment_text, scope, 'assessment')
expected_ast = verify.legacy_eval_python_expression_for_test(
assessment_text, scope, 'assessment')
same = (
len(current_ast.get('assessment')) == 4 and
len(current_ast.get('assessment').get('questionsList')) == 4 and
current_ast.get('assessment') == expected_ast.get('assessment') and
current_ast == expected_ast)
if not same:
import pprint # # pylint: disable-msg=g-import-not-at-top
pprint.pprint(current_ast.get('assessment'))
pprint.pprint(expected_ast.get('assessment'))
assert same
def test_list_of():
"""Test delimited list."""
grammar = Optional(
Literal('[') +
Optional(list_of(Literal('a') ^ Literal('b'))) +
Literal(']'))
assert str(['[', ']']) == str(grammar.parseString('[]'))
assert str(['[', 'a', ']']) == str(grammar.parseString('[a]'))
assert str(['[', 'b', ']']) == str(grammar.parseString('[b]'))
assert str(['[', 'a', ']']) == str(grammar.parseString('[a,]'))
assert str(['[', 'b', ']']) == str(grammar.parseString('[b,]'))
assert str(['[', 'a', 'a', 'a', 'a', ']']) == str(
grammar.parseString('[a, a, a, a]'))
assert str(['[', 'a', 'a', 'a', 'a', ']']) == str(
grammar.parseString('[a,a,a,a]'))
assert str(['[', 'a', 'a', 'a', 'a', ']']) == str(
grammar.parseString('[a,a,a,a,]'))
assert str(['[', 'a', 'b', 'a', 'b', ']']) == str(
grammar.parseString('[a,b,a,b]'))
assert str(['[', 'b', 'a', 'b', 'a', ']']) == str(
grammar.parseString('[b,a,b,a]'))
assert str(['[', 'b', 'b', 'b', 'b', ']']) == str(
grammar.parseString('[b,b,b,b]'))
assert not grammar.parseString('')
assert not grammar.parseString('[c]')
assert not grammar.parseString('[a,c,b]')
def run_all_unit_tests():
"""Run all unit tests."""
original = verify.parse_content
try:
verify.parse_content = parse_string_in_scope
test_list_of()
test_activity_multiple_choice()
test_activity_free_text()
test_activity_multiple_choice_group()
test_activity_ast()
test_assessment()
test_assessment_ast()
# test existing verifier using parsing instead of exec/compile
verify.test_sample_assets()
finally:
verify.parse_content = original
if __name__ == '__main__':
run_all_unit_tests()
| apache-2.0 |
mariosky/evo-drawings | venv/lib/python2.7/site-packages/django/utils/2to3_fixes/fix_unicode.py | 349 | 1181 | """Fixer for __unicode__ methods.
Uses the django.utils.encoding.python_2_unicode_compatible decorator.
"""
from __future__ import unicode_literals
from lib2to3 import fixer_base
from lib2to3.fixer_util import find_indentation, Name, syms, touch_import
from lib2to3.pgen2 import token
from lib2to3.pytree import Leaf, Node
class FixUnicode(fixer_base.BaseFix):
BM_compatible = True
PATTERN = """
classdef< 'class' any+ ':'
suite< any*
funcdef< 'def' unifunc='__unicode__'
parameters< '(' NAME ')' > any+ >
any* > >
"""
def transform(self, node, results):
unifunc = results["unifunc"]
strfunc = Name("__str__", prefix=unifunc.prefix)
unifunc.replace(strfunc)
klass = node.clone()
klass.prefix = '\n' + find_indentation(node)
decorator = Node(syms.decorator, [Leaf(token.AT, "@"), Name('python_2_unicode_compatible')])
decorated = Node(syms.decorated, [decorator, klass], prefix=node.prefix)
node.replace(decorated)
touch_import('django.utils.encoding', 'python_2_unicode_compatible', decorated)
| agpl-3.0 |
flamholz/equilibrator | gibbs/info_pages.py | 1 | 1097 | from django.shortcuts import render_to_response
from equilibrator.gibbs import constants
from django.template.context import RequestContext
def AboutPage(request):
"""Renders the about page."""
return render_to_response('about.html', {})
def FAQPage(request):
"""Renders the FAQ page."""
return render_to_response('faq.html', {})
def WhatsNewPage(request):
"""Renders the FAQ page."""
return render_to_response('new_in_2_0.html', {})
def CitePage(request):
"""Renders the FAQ page."""
return render_to_response('cite.html', {})
def ClassicReactions(request):
"""Renders the classic reactions page."""
return render_to_response('classic_reactions.html', {})
def DownloadPage(request):
"""Renders the download page."""
ph_values = map(lambda x: '%.1f' % x, constants.PH_RANGE_VALUES)
return render_to_response('download.html', {'ph_values': ph_values},
context_instance = RequestContext(request))
def Robots(request):
"""Renders robots.txt."""
return render_to_response('robots.txt', {}) | mit |
AlexLitvino/pyASSA | examples/example_documentation_project/rules_definition.py | 1 | 7677 | # -----------------------------------------------------------------------------
# Copyright 2017 Aleksey Litvinov litvinov.aleks@gmail.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# This file will contain customization for specific project.
# Here only rule_*(**kwargs) should be implemented,
# or helper functions _function_name().
# -----------------------------------------------------------------------------
from pyassa.logger import result_logger
from pyassa.utils import add_description
from custom_utils import report_parser
import re
@add_description(" - [ERROR] Summary field should not be empty string.")
def rule_error_incorrect_summary(**kwargs):
report = kwargs["script"]
parsed_report = report_parser(report)
ERROR_MESSAGE_EMPTY_SUMMARY = "[ERROR] Summary is empty."
if parsed_report["Summary"] == '':
result_logger.info(ERROR_MESSAGE_EMPTY_SUMMARY)
@add_description(" - [WARNING] Summary field should not contain more than 20 words.")
def rule_warning_summary_longer_than_20_words(**kwargs):
report = kwargs["script"]
parsed_report = report_parser(report)
ERROR_MESSAGE_SUMMARY_LONGER_THAN_20_WORDS = "[WARNING] Summary is {summary_length} words long."
summary_length = len(parsed_report["Summary"].split())
if summary_length > 20:
result_logger.info(ERROR_MESSAGE_SUMMARY_LONGER_THAN_20_WORDS.format(summary_length=summary_length))
@add_description(" - [ERROR] Feature field should have predefined value - if Type=Defect or N/A - if Type=Enhancement.")
def rule_error_incorrect_feature(**kwargs):
report = kwargs["script"]
features = kwargs["features"]
parsed_report = report_parser(report)
type_field = parsed_report["Type"]
feature_field = parsed_report["Feature"]
ERROR_MESSAGE_INCORRECT_FEATURE_FOR_DEFECT = "[ERROR] Feature is '{actual_feature}' instead of value from list " + str(features) + " for Defect issue."
ERROR_MESSAGE_INCORRECT_FEATURE_FOR_ENHANCEMENT = "[ERROR] Feature is not N/A for Enhancement issue."
if type_field == "Defect":
if feature_field not in features:
result_logger.info(ERROR_MESSAGE_INCORRECT_FEATURE_FOR_DEFECT.format(actual_feature=feature_field))
elif type_field == "Enhancement":
if feature_field != "N/A":
result_logger.info(ERROR_MESSAGE_INCORRECT_FEATURE_FOR_ENHANCEMENT)
else:
pass # TODO: error handling should be here, unknown type
@add_description(" - [ERROR] Type field should be Defect or Enhancement.")
def rule_error_incorrect_type(**kwargs):
report = kwargs["script"]
parsed_report = report_parser(report)
type_field = parsed_report["Type"]
ERROR_MESSAGE_INCORRECT_TYPE = "[ERROR] Type is '{actual_type}' instead of Defect or Enhancement."
if type_field not in ["Defect", "Enhancement"]:
result_logger.info(ERROR_MESSAGE_INCORRECT_TYPE.format(actual_type=type_field))
@add_description(" - [ERROR] Build number field should be in format Number.Number.Number - if Type=Defect or "
"N/A - if Type=Enhancement.")
def rule_error_incorrect_build_number(**kwargs):
report = kwargs["script"]
parsed_report = report_parser(report)
type_field = parsed_report["Type"]
build_number_field = parsed_report["Build Number"]
ERROR_MESSAGE_INCORRECT_BUILD_NUMBER_FOR_DEFECT = "[ERROR] Build Number is {actual_build_number} instead of " \
"to be in format Number.Number.Number for Defect issue."
ERROR_MESSAGE_INCORRECT_BUILD_NUMBER_FOR_ENHANCEMENT = "[ERROR] Build Number is {actual_build_number} instead of " \
"N/A for Enhancement issue."
if type_field == "Defect":
pattern = r"\d+\.\d+\.\d+"
if not re.match(pattern, build_number_field):
result_logger.info(ERROR_MESSAGE_INCORRECT_BUILD_NUMBER_FOR_DEFECT.format(actual_build_number=build_number_field))
elif type_field == "Enhancement":
if build_number_field != "N/A":
result_logger.info(ERROR_MESSAGE_INCORRECT_BUILD_NUMBER_FOR_ENHANCEMENT.format(actual_build_number=build_number_field))
else:
pass # TODO: error handling should be here, unknown type
@add_description(" - [ERROR] Priority field should be one of the predefined values.")
def rule_error_incorrect_priority(**kwargs):
report = kwargs["script"]
priorities = kwargs["priorities"]
parsed_report = report_parser(report)
priority_field = parsed_report["Priority"]
ERROR_MESSAGE_INCORRECT_PRIORITY = "[ERROR] Priority is '{actual_priority}' instead of value from list: " + str(priorities) + "."
if priority_field not in priorities:
result_logger.info(ERROR_MESSAGE_INCORRECT_PRIORITY.format(actual_priority=priority_field))
@add_description(" - [ERROR] Reported By field should not be empty string.")
def rule_error_empty_reported_by(**kwargs):
report = kwargs["script"]
parsed_report = report_parser(report)
ERROR_MESSAGE_EMPTY_REPORTED_BY = "[ERROR] Reported By is empty."
if parsed_report["Reported By"] == '':
result_logger.info(ERROR_MESSAGE_EMPTY_REPORTED_BY)
@add_description(" - [ERROR] Date field should be in format MM-DD-YYYY.")
def rule_error_incorrect_date_format(**kwargs):
report = kwargs["script"]
parsed_report = report_parser(report)
date_field = parsed_report["Reported On"]
ERROR_MESSAGE_INCORRECT_DATE_FORMAT = "[ERROR] Date field '{date_field}' is not in format MM-DD-YYYY."
# TODO: date validation is very primitive, suggest updating
# it checks fromat MM-DD-YYYY, and 1 <= MM <= 12, 1 <= DD <= 31 - for any moonth
pattern = r"[01]\d-[0123]\d-\d{4}"
is_date_valid = False
if re.match(pattern, date_field):
mm, dd, yyyy = date_field.split('-')
if 1 <= int(mm) <= 12 and 1 <= int(dd) <= 31:
is_date_valid = True
if not is_date_valid:
result_logger.info(ERROR_MESSAGE_INCORRECT_DATE_FORMAT.format(date_field=date_field))
@add_description(" - [ERROR] Environment field shouldn't be empty if Type=Defect.")
def rule_error_environment_empty_for_defect_issue(**kwargs):
report = kwargs["script"]
parsed_report = report_parser(report)
type_field = parsed_report["Type"]
environment_field = parsed_report["Environment"]
ERROR_MESSAGE_EMPTY_ENVIRONMENT_FOR_DEFECT = "[ERROR] Environment is empty when Type=Defect."
if type_field == "Defect":
if environment_field == '':
result_logger.info(ERROR_MESSAGE_EMPTY_ENVIRONMENT_FOR_DEFECT)
@add_description(" - [ERROR] Description field shouldn't be empty string.")
def rule_error_empty_description(**kwargs):
report = kwargs["script"]
parsed_report = report_parser(report)
description_field = parsed_report["Description"]
ERROR_MESSAGE_EMPTY_DESCRIPTION = "[ERROR] Description is empty."
if description_field == '':
result_logger.info(ERROR_MESSAGE_EMPTY_DESCRIPTION)
| apache-2.0 |
lucienfostier/gaffer | python/GafferArnoldUI/__init__.py | 7 | 2549 | ##########################################################################
#
# Copyright (c) 2012, John Haddon. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
__import__( "GafferSceneUI" )
from ._GafferArnoldUI import *
from . import ArnoldShaderUI
from . import ArnoldRenderUI
from . import ShaderMenu
from . import ArnoldOptionsUI
from . import ArnoldAttributesUI
from . import ArnoldLightUI
from . import ArnoldVDBUI
from . import InteractiveArnoldRenderUI
from . import ArnoldDisplacementUI
from . import ArnoldMeshLightUI
from . import ArnoldShaderBallUI
from . import ArnoldAOVShaderUI
from . import ArnoldAtmosphereUI
from . import ArnoldBackgroundUI
from . import ArnoldTextureBakeUI
from . import ArnoldCameraShadersUI
from . import ArnoldLightFilterUI
from . import ArnoldColorManagerUI
from . import CacheMenu
__import__( "IECore" ).loadConfig( "GAFFER_STARTUP_PATHS", subdirectory = "GafferArnoldUI" )
| bsd-3-clause |
normanjaeckel/openslides-votecollector | openslides_votecollector/apps.py | 1 | 2500 | from django.apps import AppConfig
from . import __description__, __verbose_name__, __version__
class VoteCollectorAppConfig(AppConfig):
name = 'openslides_votecollector'
verbose_name = __verbose_name__
description = __description__
version = __version__
angular_site_module = True
angular_projector_module = True
js_files = [
'js/openslides_votecollector/base.js',
'js/openslides_votecollector/site.js',
'js/openslides_votecollector/projector.js'
]
def ready(self):
# Load projector elements.
# Do this by just importing all from these files.
from . import projector # noqa
# Import all required stuff.
from openslides.core.config import config
from openslides.core.signals import post_permission_creation
from openslides.utils.rest_api import router
from .config_variables import get_config_variables
from .signals import (
add_default_seating_plan,
add_permissions_to_builtin_groups
)
from .urls import urlpatterns
from .views import (
AssignmentPollKeypadConnectionViewSet,
KeypadViewSet,
MotionPollKeypadConnectionViewSet,
SeatViewSet,
VotecollectorViewSet
)
# Define config variables
config.update_config_variables(get_config_variables())
# Connect signals.
post_permission_creation.connect(
add_permissions_to_builtin_groups,
dispatch_uid='votecollector_add_permissions_to_builtin_groups'
)
post_permission_creation.connect(
add_default_seating_plan,
dispatch_uid='votecollector_add_default_seating_plan'
)
# Register viewsets.
router.register(self.get_model('VoteCollector').get_collection_string(), VotecollectorViewSet)
router.register(self.get_model('Seat').get_collection_string(), SeatViewSet)
router.register(self.get_model('Keypad').get_collection_string(), KeypadViewSet)
router.register(self.get_model('MotionPollKeypadConnection').get_collection_string(),
MotionPollKeypadConnectionViewSet)
router.register(self.get_model('AssignmentPollKeypadConnection').get_collection_string(),
AssignmentPollKeypadConnectionViewSet)
# Provide plugin urlpatterns to application configuration
self.urlpatterns = urlpatterns
| mit |
khwon/iTerm2 | tests/esctest/tests/cht.py | 31 | 1268 | import esccmd
from esctypes import Point
from escutil import AssertEQ, GetCursorPosition, knownBug
class CHTTests(object):
@knownBug(terminal="iTerm2", reason="iTerm2 doesn't support CHT")
def test_CHT_OneTabStopByDefault(self):
esccmd.CHT()
position = GetCursorPosition()
AssertEQ(position.x(), 9)
@knownBug(terminal="iTerm2", reason="iTerm2 doesn't support CHT")
def test_CHT_ExplicitParameter(self):
esccmd.CHT(2)
position = GetCursorPosition()
AssertEQ(position.x(), 17)
@knownBug(terminal="iTerm2", reason="iTerm2 doesn't support CHT")
@knownBug(terminal="xterm", reason="xterm respects scrolling regions for CHT")
def test_CHT_IgnoresScrollingRegion(self):
# Set a scroll region.
esccmd.DECSET(esccmd.DECLRMM)
esccmd.DECSLRM(5, 30)
# Move to center of region
esccmd.CUP(Point(7, 9))
# Ensure we can tab within the region
esccmd.CHT(2)
position = GetCursorPosition()
AssertEQ(position.x(), 17)
# Ensure that we can tab out of the region
esccmd.CHT(2)
position = GetCursorPosition()
AssertEQ(position.x(), 33)
# Try again, starting before the region.
esccmd.CUP(Point(1, 9))
esccmd.CHT(9)
position = GetCursorPosition()
AssertEQ(position.x(), 73)
| gpl-2.0 |
leeon/annotated-django | tests/invalid_models_tests/test_ordinary_fields.py | 6 | 12212 | # -*- encoding: utf-8 -*-
from __future__ import unicode_literals
import unittest
from django.core.checks import Error
from django.db import connection, models
from .base import IsolatedModelsTestCase
class AutoFieldTests(IsolatedModelsTestCase):
def test_valid_case(self):
class Model(models.Model):
id = models.AutoField(primary_key=True)
field = Model._meta.get_field('id')
errors = field.check()
expected = []
self.assertEqual(errors, expected)
def test_primary_key(self):
# primary_key must be True. Refs #12467.
class Model(models.Model):
field = models.AutoField(primary_key=False)
# Prevent Django from autocreating `id` AutoField, which would
# result in an error, because a model must have exactly one
# AutoField.
another = models.IntegerField(primary_key=True)
field = Model._meta.get_field('field')
errors = field.check()
expected = [
Error(
'AutoFields must set primary_key=True.',
hint=None,
obj=field,
id='fields.E100',
),
]
self.assertEqual(errors, expected)
class BooleanFieldTests(IsolatedModelsTestCase):
def test_nullable_boolean_field(self):
class Model(models.Model):
field = models.BooleanField(null=True)
field = Model._meta.get_field('field')
errors = field.check()
expected = [
Error(
'BooleanFields do not accept null values.',
hint='Use a NullBooleanField instead.',
obj=field,
id='fields.E110',
),
]
self.assertEqual(errors, expected)
class CharFieldTests(IsolatedModelsTestCase):
def test_valid_field(self):
class Model(models.Model):
field = models.CharField(
max_length=255,
choices=[
('1', 'item1'),
('2', 'item2'),
],
db_index=True)
field = Model._meta.get_field('field')
errors = field.check()
expected = []
self.assertEqual(errors, expected)
def test_missing_max_length(self):
class Model(models.Model):
field = models.CharField()
field = Model._meta.get_field('field')
errors = field.check()
expected = [
Error(
"CharFields must define a 'max_length' attribute.",
hint=None,
obj=field,
id='fields.E120',
),
]
self.assertEqual(errors, expected)
def test_negative_max_length(self):
class Model(models.Model):
field = models.CharField(max_length=-1)
field = Model._meta.get_field('field')
errors = field.check()
expected = [
Error(
"'max_length' must be a positive integer.",
hint=None,
obj=field,
id='fields.E121',
),
]
self.assertEqual(errors, expected)
def test_bad_max_length_value(self):
class Model(models.Model):
field = models.CharField(max_length="bad")
field = Model._meta.get_field('field')
errors = field.check()
expected = [
Error(
"'max_length' must be a positive integer.",
hint=None,
obj=field,
id='fields.E121',
),
]
self.assertEqual(errors, expected)
def test_non_iterable_choices(self):
class Model(models.Model):
field = models.CharField(max_length=10, choices='bad')
field = Model._meta.get_field('field')
errors = field.check()
expected = [
Error(
"'choices' must be an iterable (e.g., a list or tuple).",
hint=None,
obj=field,
id='fields.E004',
),
]
self.assertEqual(errors, expected)
def test_choices_containing_non_pairs(self):
class Model(models.Model):
field = models.CharField(max_length=10, choices=[(1, 2, 3), (1, 2, 3)])
field = Model._meta.get_field('field')
errors = field.check()
expected = [
Error(
"'choices' must be an iterable containing (actual value, human readable name) tuples.",
hint=None,
obj=field,
id='fields.E005',
),
]
self.assertEqual(errors, expected)
def test_bad_db_index_value(self):
class Model(models.Model):
field = models.CharField(max_length=10, db_index='bad')
field = Model._meta.get_field('field')
errors = field.check()
expected = [
Error(
"'db_index' must be None, True or False.",
hint=None,
obj=field,
id='fields.E006',
),
]
self.assertEqual(errors, expected)
@unittest.skipUnless(connection.vendor == 'mysql',
"Test valid only for MySQL")
def test_too_long_char_field_under_mysql(self):
from django.db.backends.mysql.validation import DatabaseValidation
class Model(models.Model):
field = models.CharField(unique=True, max_length=256)
field = Model._meta.get_field('field')
validator = DatabaseValidation(connection=None)
errors = validator.check_field(field)
expected = [
Error(
'MySQL does not allow unique CharFields to have a max_length > 255.',
hint=None,
obj=field,
id='mysql.E001',
)
]
self.assertEqual(errors, expected)
class DecimalFieldTests(IsolatedModelsTestCase):
def test_required_attributes(self):
class Model(models.Model):
field = models.DecimalField()
field = Model._meta.get_field('field')
errors = field.check()
expected = [
Error(
"DecimalFields must define a 'decimal_places' attribute.",
hint=None,
obj=field,
id='fields.E130',
),
Error(
"DecimalFields must define a 'max_digits' attribute.",
hint=None,
obj=field,
id='fields.E132',
),
]
self.assertEqual(errors, expected)
def test_negative_max_digits_and_decimal_places(self):
class Model(models.Model):
field = models.DecimalField(max_digits=-1, decimal_places=-1)
field = Model._meta.get_field('field')
errors = field.check()
expected = [
Error(
"'decimal_places' must be a non-negative integer.",
hint=None,
obj=field,
id='fields.E131',
),
Error(
"'max_digits' must be a positive integer.",
hint=None,
obj=field,
id='fields.E133',
),
]
self.assertEqual(errors, expected)
def test_bad_values_of_max_digits_and_decimal_places(self):
class Model(models.Model):
field = models.DecimalField(max_digits="bad", decimal_places="bad")
field = Model._meta.get_field('field')
errors = field.check()
expected = [
Error(
"'decimal_places' must be a non-negative integer.",
hint=None,
obj=field,
id='fields.E131',
),
Error(
"'max_digits' must be a positive integer.",
hint=None,
obj=field,
id='fields.E133',
),
]
self.assertEqual(errors, expected)
def test_decimal_places_greater_than_max_digits(self):
class Model(models.Model):
field = models.DecimalField(max_digits=9, decimal_places=10)
field = Model._meta.get_field('field')
errors = field.check()
expected = [
Error(
"'max_digits' must be greater or equal to 'decimal_places'.",
hint=None,
obj=field,
id='fields.E134',
),
]
self.assertEqual(errors, expected)
def test_valid_field(self):
class Model(models.Model):
field = models.DecimalField(max_digits=10, decimal_places=10)
field = Model._meta.get_field('field')
errors = field.check()
expected = []
self.assertEqual(errors, expected)
class FileFieldTests(IsolatedModelsTestCase):
def test_valid_case(self):
class Model(models.Model):
field = models.FileField(upload_to='somewhere')
field = Model._meta.get_field('field')
errors = field.check()
expected = []
self.assertEqual(errors, expected)
def test_unique(self):
class Model(models.Model):
field = models.FileField(unique=False, upload_to='somewhere')
field = Model._meta.get_field('field')
errors = field.check()
expected = [
Error(
"'unique' is not a valid argument for a FileField.",
hint=None,
obj=field,
id='fields.E200',
)
]
self.assertEqual(errors, expected)
def test_primary_key(self):
class Model(models.Model):
field = models.FileField(primary_key=False, upload_to='somewhere')
field = Model._meta.get_field('field')
errors = field.check()
expected = [
Error(
"'primary_key' is not a valid argument for a FileField.",
hint=None,
obj=field,
id='fields.E201',
)
]
self.assertEqual(errors, expected)
class FilePathFieldTests(IsolatedModelsTestCase):
def test_forbidden_files_and_folders(self):
class Model(models.Model):
field = models.FilePathField(allow_files=False, allow_folders=False)
field = Model._meta.get_field('field')
errors = field.check()
expected = [
Error(
"FilePathFields must have either 'allow_files' or 'allow_folders' set to True.",
hint=None,
obj=field,
id='fields.E140',
),
]
self.assertEqual(errors, expected)
class GenericIPAddressFieldTests(IsolatedModelsTestCase):
def test_non_nullable_blank(self):
class Model(models.Model):
field = models.GenericIPAddressField(null=False, blank=True)
field = Model._meta.get_field('field')
errors = field.check()
expected = [
Error(
('GenericIPAddressFields cannot have blank=True if null=False, '
'as blank values are stored as nulls.'),
hint=None,
obj=field,
id='fields.E150',
),
]
self.assertEqual(errors, expected)
class ImageFieldTests(IsolatedModelsTestCase):
def test_pillow_installed(self):
try:
from PIL import Image # NOQA
except ImportError:
pillow_installed = False
else:
pillow_installed = True
class Model(models.Model):
field = models.ImageField(upload_to='somewhere')
field = Model._meta.get_field('field')
errors = field.check()
expected = [] if pillow_installed else [
Error(
'Cannot use ImageField because Pillow is not installed.',
hint=('Get Pillow at https://pypi.python.org/pypi/Pillow '
'or run command "pip install pillow".'),
obj=field,
id='fields.E210',
),
]
self.assertEqual(errors, expected)
| bsd-3-clause |
h4wkmoon/shinken | shinken/misc/logevent.py | 1 | 4102 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2014 - Savoir-Faire Linux inc.
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
import re
event_type_pattern = re.compile('^\[[0-9]{10}] (?:HOST|SERVICE) (ALERT|NOTIFICATION|FLAPPING|DOWNTIME|EVENT)(?: ALERT)?:.*')
event_types = {
'NOTIFICATION': { # ex: "[1402515279] SERVICE NOTIFICATION: admin;localhost;check-ssh;CRITICAL;notify-service-by-email;Connection refused"
'pattern': '\[([0-9]{10})\] (HOST|SERVICE) (NOTIFICATION): ([^\;]*);([^\;]*);(?:([^\;]*);)?([^\;]*);([^\;]*);(ACKNOWLEDGEMENT)?.*',
'properties': [
'time',
'notification_type', # 'SERVICE' (or could be 'HOST')
'event_type', # 'NOTIFICATION'
'contact', # 'admin'
'hostname', # 'localhost'
'service_desc', # 'check-ssh' (or could be None)
'state', # 'CRITICAL'
'notification_method', # 'notify-service-by-email'
'acknownledgement', # None or 'ACKNOWLEDGEMENT'
]
},
'ALERT': { # ex: "[1329144231] SERVICE ALERT: dfw01-is02-006;cpu load maui;WARNING;HARD;4;WARNING - load average: 5.04, 4.67, 5.04"
'pattern': '^\[([0-9]{10})] (HOST|SERVICE) (ALERT): ([^\;]*);(?:([^\;]*);)?([^\;]*);([^\;]*);([^\;]*);([^\;]*)',
'properties': [
'time',
'alert_type', # 'SERVICE' (or could be 'HOST')
'event_type', # 'ALERT'
'hostname', # 'localhost'
'service_desc', # 'cpu load maui' (or could be None)
'state', # 'WARNING'
'state_type', # 'HARD'
'attempts', # '4'
'output', # 'WARNING - load average: 5.04, 4.67, 5.04'
]
},
'DOWNTIME': { # ex: "[1279250211] HOST DOWNTIME ALERT: maast64;STARTED; Host has entered a period of scheduled downtime"
'pattern': '^\[([0-9]{10})\] (HOST|SERVICE) (DOWNTIME) ALERT: ([^\;]*);(STARTED|STOPPED|CANCELLED);(.*)',
'properties': [
'time',
'downtime_type', # '(SERVICE or could be 'HOST')
'event_type', # 'DOWNTIME'
'hostname', # 'maast64'
'state', # 'STARTED'
'output', # 'Host has entered a period of scheduled downtime'
]
}
}
# Class for parsing event logs
# Populates self.data with the log type's properties
class LogEvent:
def __init__(self, log):
self.data = {}
#Find the type of event
event_type_match = event_type_pattern.match(log)
if event_type_match:
#parse it with it's pattern
event_type = event_types[event_type_match.group(1)]
properties_match = re.match(event_type['pattern'], log)
if properties_match:
# Populate self.data with the event's properties
for i, p in enumerate(event_type['properties']):
self.data[p] = properties_match.group(i+1)
# Convert the time to int
self.data['time'] = int(self.data['time'])
if 'attempts' in self.data:
self.data['attempts'] = int(self.data['attempts'])
def __iter__(self):
return self.data.iteritems()
def __len__(self):
return len(self.data)
def __getitem__(self, key):
return self.data[key]
def __contains__(self, key):
return key in self.data
def __str__(self):
return str(self.data)
| agpl-3.0 |
aleprovencio/cookiecutter-django | {{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/users/tests/test_urls.py | 116 | 1716 | from django.core.urlresolvers import reverse, resolve
from test_plus.test import TestCase
class TestUserURLs(TestCase):
"""Test URL patterns for users app."""
def setUp(self):
self.user = self.make_user()
def test_list_reverse(self):
"""users:list should reverse to /users/."""
self.assertEqual(reverse('users:list'), '/users/')
def test_list_resolve(self):
"""/users/ should resolve to users:list."""
self.assertEqual(resolve('/users/').view_name, 'users:list')
def test_redirect_reverse(self):
"""users:redirect should reverse to /users/~redirect/."""
self.assertEqual(reverse('users:redirect'), '/users/~redirect/')
def test_redirect_resolve(self):
"""/users/~redirect/ should resolve to users:redirect."""
self.assertEqual(
resolve('/users/~redirect/').view_name,
'users:redirect'
)
def test_detail_reverse(self):
"""users:detail should reverse to /users/testuser/."""
self.assertEqual(
reverse('users:detail', kwargs={'username': 'testuser'}),
'/users/testuser/'
)
def test_detail_resolve(self):
"""/users/testuser/ should resolve to users:detail."""
self.assertEqual(resolve('/users/testuser/').view_name, 'users:detail')
def test_update_reverse(self):
"""users:update should reverse to /users/~update/."""
self.assertEqual(reverse('users:update'), '/users/~update/')
def test_update_resolve(self):
"""/users/~update/ should resolve to users:update."""
self.assertEqual(
resolve('/users/~update/').view_name,
'users:update'
)
| bsd-3-clause |
WilsonWangTHU/fast-rcnn | lib/utils/blob.py | 63 | 1625 | # --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""Blob helper functions."""
import numpy as np
import cv2
def im_list_to_blob(ims):
"""Convert a list of images into a network input.
Assumes images are already prepared (means subtracted, BGR order, ...).
"""
max_shape = np.array([im.shape for im in ims]).max(axis=0)
num_images = len(ims)
blob = np.zeros((num_images, max_shape[0], max_shape[1], 3),
dtype=np.float32)
for i in xrange(num_images):
im = ims[i]
blob[i, 0:im.shape[0], 0:im.shape[1], :] = im
# Move channels (axis 3) to axis 1
# Axis order will become: (batch elem, channel, height, width)
channel_swap = (0, 3, 1, 2)
blob = blob.transpose(channel_swap)
return blob
def prep_im_for_blob(im, pixel_means, target_size, max_size):
"""Mean subtract and scale an image for use in a blob."""
im = im.astype(np.float32, copy=False)
im -= pixel_means
im_shape = im.shape
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
im_scale = float(target_size) / float(im_size_min)
# Prevent the biggest axis from being more than MAX_SIZE
if np.round(im_scale * im_size_max) > max_size:
im_scale = float(max_size) / float(im_size_max)
im = cv2.resize(im, None, None, fx=im_scale, fy=im_scale,
interpolation=cv2.INTER_LINEAR)
return im, im_scale
| mit |
rallylee/gem5 | src/cpu/kvm/KvmVM.py | 14 | 2351 | # Copyright (c) 2012 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Andreas Sandberg
from m5.params import *
from m5.proxy import *
from m5.SimObject import SimObject
class KvmVM(SimObject):
type = 'KvmVM'
cxx_header = "cpu/kvm/vm.hh"
coalescedMMIO = \
VectorParam.AddrRange([], "memory ranges for coalesced MMIO")
| bsd-3-clause |
smerritt/swift | test/functional/test_access_control.py | 3 | 109044 | #!/usr/bin/python
# Copyright (c) 2015 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from urlparse import urlparse, urlunparse
import uuid
from random import shuffle
from keystoneclient.v3 import client
from swiftclient import get_auth, http_connection
import test.functional as tf
def setUpModule():
tf.setup_package()
def tearDownModule():
tf.teardown_package()
TEST_CASE_FORMAT = (
'http_method', 'header', 'account_name', 'container_name', 'object_name',
'prep_container_header', 'reseller_prefix', 'target_user_name',
'auth_user_name', 'service_user_name', 'expected')
# http_method : HTTP methods such as PUT, GET, POST, HEAD and so on
# header : headers for a request
# account_name : Account name. Usually the name will be automatically
# created by keystone
# container_name : Container name. If 'UUID' is specified, a container
# name will be created automatically
# object_name : Object name. If 'UUID' is specified, a container
# name will be created automatically
# prep_container_header : headers which will be set on the container
# reseller_prefix : Reseller prefix that will be used for request url.
# Can be None or SERVICE to select the user account
# prefix or the service prefix respectively
# target_user_name : a user name which is used for getting the project id
# of the target
# auth_user_name : a user name which is used for getting a token for
# X-Auth_Token
# service_user_name : a user name which is used for getting a token for
# X-Service-Token
# expected : expected status code
#
# a combination of account_name, container_name and object_name
# represents a target.
# +------------+--------------+-----------+---------+
# |account_name|container_name|object_name| target |
# +------------+--------------+-----------+---------+
# | None | None | None | account |
# +------------+--------------+-----------+---------+
# | None | 'UUID' | None |container|
# +------------+--------------+-----------+---------+
# | None | 'UUID' | 'UUID' | object |
# +------------+--------------+-----------+---------+
#
# The following users are required to run this functional test.
# No.6, tester6, is added for this test.
# +----+-----------+-------+---------+-------------+
# |No. | Domain |Project|User name| Role |
# +----+-----------+-------+---------+-------------+
# | 1 | default | test | tester | admin |
# +----+-----------+-------+---------+-------------+
# | 2 | default | test2 | tester2 | admin |
# +----+-----------+-------+---------+-------------+
# | 3 | default | test | tester3 | _member_ |
# +----+-----------+-------+---------+-------------+
# | 4 |test-domain| test4 | tester4 | admin |
# +----+-----------+-------+---------+-------------+
# | 5 | default | test5 | tester5 | service |
# +----+-----------+-------+---------+-------------+
# | 6 | default | test | tester6 |ResellerAdmin|
# +----+-----------+-------+---------+-------------+
# A scenario of put for account, container and object with
# several roles.
RBAC_PUT = [
# PUT container in own account: ok
('PUT', None, None, 'UUID', None, None,
None, 'tester', 'tester', None, 201),
('PUT', None, None, 'UUID', None, None,
None, 'tester', 'tester', 'tester', 201),
# PUT container in other users account: not allowed for role admin
('PUT', None, None, 'UUID', None, None,
None, 'tester2', 'tester', None, 403),
('PUT', None, None, 'UUID', None, None,
None, 'tester4', 'tester', None, 403),
# PUT container in other users account: not allowed for role _member_
('PUT', None, None, 'UUID', None, None,
None, 'tester3', 'tester3', None, 403),
('PUT', None, None, 'UUID', None, None,
None, 'tester2', 'tester3', None, 403),
('PUT', None, None, 'UUID', None, None,
None, 'tester4', 'tester3', None, 403),
# PUT container in other users account: allowed for role ResellerAdmin
('PUT', None, None, 'UUID', None, None,
None, 'tester6', 'tester6', None, 201),
('PUT', None, None, 'UUID', None, None,
None, 'tester2', 'tester6', None, 201),
('PUT', None, None, 'UUID', None, None,
None, 'tester4', 'tester6', None, 201),
# PUT object in own account: ok
('PUT', None, None, 'UUID', 'UUID', None,
None, 'tester', 'tester', None, 201),
('PUT', None, None, 'UUID', 'UUID', None,
None, 'tester', 'tester', 'tester', 201),
# PUT object in other users account: not allowed for role admin
('PUT', None, None, 'UUID', 'UUID', None,
None, 'tester2', 'tester', None, 403),
('PUT', None, None, 'UUID', 'UUID', None,
None, 'tester4', 'tester', None, 403),
# PUT object in other users account: not allowed for role _member_
('PUT', None, None, 'UUID', 'UUID', None,
None, 'tester3', 'tester3', None, 403),
('PUT', None, None, 'UUID', 'UUID', None,
None, 'tester2', 'tester3', None, 403),
('PUT', None, None, 'UUID', 'UUID', None,
None, 'tester4', 'tester3', None, 403),
# PUT object in other users account: allowed for role ResellerAdmin
('PUT', None, None, 'UUID', 'UUID', None,
None, 'tester6', 'tester6', None, 201),
('PUT', None, None, 'UUID', 'UUID', None,
None, 'tester2', 'tester6', None, 201),
('PUT', None, None, 'UUID', 'UUID', None,
None, 'tester4', 'tester6', None, 201)
]
RBAC_PUT_WITH_SERVICE_PREFIX = [
# PUT container in own account: ok
('PUT', None, None, 'UUID', None, None,
None, 'tester', 'tester', 'tester5', 201),
# PUT container in other users account: not allowed for role service
('PUT', None, None, 'UUID', None, None,
None, 'tester', 'tester3', 'tester5', 403),
('PUT', None, None, 'UUID', None, None,
None, 'tester', None, 'tester5', 401),
('PUT', None, None, 'UUID', None, None,
None, 'tester5', 'tester5', None, 403),
('PUT', None, None, 'UUID', None, None,
None, 'tester2', 'tester5', None, 403),
('PUT', None, None, 'UUID', None, None,
None, 'tester4', 'tester5', None, 403),
# PUT object in own account: ok
('PUT', None, None, 'UUID', 'UUID', None,
None, 'tester', 'tester', 'tester5', 201),
# PUT object in other users account: not allowed for role service
('PUT', None, None, 'UUID', 'UUID', None,
None, 'tester', 'tester3', 'tester5', 403),
('PUT', None, None, 'UUID', 'UUID', None,
None, 'tester', None, 'tester5', 401),
('PUT', None, None, 'UUID', 'UUID', None,
None, 'tester5', 'tester5', None, 403),
('PUT', None, None, 'UUID', 'UUID', None,
None, 'tester2', 'tester5', None, 403),
('PUT', None, None, 'UUID', 'UUID', None,
None, 'tester4', 'tester5', None, 403),
# All following actions are using SERVICE prefix
# PUT container in own account: ok
('PUT', None, None, 'UUID', None, None,
'SERVICE', 'tester', 'tester', 'tester5', 201),
# PUT container fails if wrong user, or only one token sent
('PUT', None, None, 'UUID', None, None,
'SERVICE', 'tester', 'tester3', 'tester5', 403),
('PUT', None, None, 'UUID', None, None,
'SERVICE', 'tester', 'tester', None, 403),
('PUT', None, None, 'UUID', None, None,
'SERVICE', 'tester', 'tester', 'tester', 403),
('PUT', None, None, 'UUID', None, None,
'SERVICE', 'tester', None, 'tester5', 401),
# PUT object in own account: ok
('PUT', None, None, 'UUID', 'UUID', None,
'SERVICE', 'tester', 'tester', 'tester5', 201),
# PUT object fails if wrong user, or only one token sent
('PUT', None, None, 'UUID', 'UUID', None,
'SERVICE', 'tester', 'tester3', 'tester5', 403),
('PUT', None, None, 'UUID', 'UUID', None,
'SERVICE', 'tester', 'tester', None, 403),
('PUT', None, None, 'UUID', 'UUID', None,
'SERVICE', 'tester', 'tester', 'tester', 403),
('PUT', None, None, 'UUID', 'UUID', None,
'SERVICE', 'tester', None, 'tester5', 401),
]
# A scenario of delete for account, container and object with
# several roles.
RBAC_DELETE = [
# DELETE container in own account: ok
('DELETE', None, None, 'UUID', None, None,
None, 'tester', 'tester', None, 204),
('DELETE', None, None, 'UUID', None, None,
None, 'tester', 'tester', 'tester', 204),
# DELETE container in other users account: not allowed for role admin
('DELETE', None, None, 'UUID', None, None,
None, 'tester2', 'tester', None, 403),
('DELETE', None, None, 'UUID', None, None,
None, 'tester4', 'tester', None, 403),
# DELETE container in other users account: not allowed for role _member_
('DELETE', None, None, 'UUID', None, None,
None, 'tester3', 'tester3', None, 403),
('DELETE', None, None, 'UUID', None, None,
None, 'tester2', 'tester3', None, 403),
('DELETE', None, None, 'UUID', None, None,
None, 'tester4', 'tester3', None, 403),
# DELETE container in other users account: allowed for role ResellerAdmin
('DELETE', None, None, 'UUID', None, None,
None, 'tester6', 'tester6', None, 204),
('DELETE', None, None, 'UUID', None, None,
None, 'tester2', 'tester6', None, 204),
('DELETE', None, None, 'UUID', None, None,
None, 'tester4', 'tester6', None, 204),
# DELETE object in own account: ok
('DELETE', None, None, 'UUID', 'UUID', None,
None, 'tester', 'tester', None, 204),
('DELETE', None, None, 'UUID', 'UUID', None,
None, 'tester', 'tester', 'tester', 204),
# DELETE object in other users account: not allowed for role admin
('DELETE', None, None, 'UUID', 'UUID', None,
None, 'tester2', 'tester', None, 403),
('DELETE', None, None, 'UUID', 'UUID', None,
None, 'tester4', 'tester', None, 403),
# DELETE object in other users account: not allowed for role _member_
('DELETE', None, None, 'UUID', 'UUID', None,
None, 'tester3', 'tester3', None, 403),
('DELETE', None, None, 'UUID', 'UUID', None,
None, 'tester2', 'tester3', None, 403),
('DELETE', None, None, 'UUID', 'UUID', None,
None, 'tester4', 'tester3', None, 403),
# DELETE object in other users account: allowed for role ResellerAdmin
('DELETE', None, None, 'UUID', 'UUID', None,
None, 'tester6', 'tester6', None, 204),
('DELETE', None, None, 'UUID', 'UUID', None,
None, 'tester2', 'tester6', None, 204),
('DELETE', None, None, 'UUID', 'UUID', None,
None, 'tester4', 'tester6', None, 204)
]
RBAC_DELETE_WITH_SERVICE_PREFIX = [
# DELETE container in own account: ok
('DELETE', None, None, 'UUID', None, None,
None, 'tester', 'tester', 'tester5', 204),
# DELETE container in other users account: not allowed for role service
('DELETE', None, None, 'UUID', None, None,
None, 'tester', 'tester3', 'tester5', 403),
('DELETE', None, None, 'UUID', None, None,
None, 'tester', None, 'tester5', 401),
('DELETE', None, None, 'UUID', None, None,
None, 'tester5', 'tester5', None, 403),
('DELETE', None, None, 'UUID', None, None,
None, 'tester2', 'tester5', None, 403),
('DELETE', None, None, 'UUID', None, None,
None, 'tester4', 'tester5', None, 403),
# DELETE object in own account: ok
('DELETE', None, None, 'UUID', 'UUID', None,
None, 'tester', 'tester', 'tester5', 204),
# DELETE object in other users account: not allowed for role service
('DELETE', None, None, 'UUID', 'UUID', None,
None, 'tester', 'tester3', 'tester5', 403),
('DELETE', None, None, 'UUID', 'UUID', None,
None, 'tester', None, 'tester5', 401),
('DELETE', None, None, 'UUID', 'UUID', None,
None, 'tester5', 'tester5', None, 403),
('DELETE', None, None, 'UUID', 'UUID', None,
None, 'tester2', 'tester5', None, 403),
('DELETE', None, None, 'UUID', 'UUID', None,
None, 'tester4', 'tester5', None, 403),
# All following actions are using SERVICE prefix
# DELETE container in own account: ok
('DELETE', None, None, 'UUID', None, None,
'SERVICE', 'tester', 'tester', 'tester5', 204),
# DELETE container fails if wrong user, or only one token sent
('DELETE', None, None, 'UUID', None, None,
'SERVICE', 'tester', 'tester3', 'tester5', 403),
('DELETE', None, None, 'UUID', None, None,
'SERVICE', 'tester', 'tester', None, 403),
('DELETE', None, None, 'UUID', None, None,
'SERVICE', 'tester', 'tester', 'tester', 403),
('DELETE', None, None, 'UUID', None, None,
'SERVICE', 'tester', None, 'tester5', 401),
# DELETE object in own account: ok
('DELETE', None, None, 'UUID', 'UUID', None,
'SERVICE', 'tester', 'tester', 'tester5', 204),
# DELETE object fails if wrong user, or only one token sent
('DELETE', None, None, 'UUID', 'UUID', None,
'SERVICE', 'tester', 'tester3', 'tester5', 403),
('DELETE', None, None, 'UUID', 'UUID', None,
'SERVICE', 'tester', 'tester', None, 403),
('DELETE', None, None, 'UUID', 'UUID', None,
'SERVICE', 'tester', 'tester', 'tester', 403),
('DELETE', None, None, 'UUID', 'UUID', None,
'SERVICE', 'tester', None, 'tester5', 401)
]
# A scenario of get for account, container and object with
# several roles.
RBAC_GET = [
# GET own account: ok
('GET', None, None, None, None, None,
None, 'tester', 'tester', None, 200),
('GET', None, None, None, None, None,
None, 'tester', 'tester', 'tester', 200),
# GET other users account: not allowed for role admin
('GET', None, None, None, None, None,
None, 'tester2', 'tester', None, 403),
('GET', None, None, None, None, None,
None, 'tester4', 'tester', None, 403),
# GET other users account: not allowed for role _member_
('GET', None, None, None, None, None,
None, 'tester3', 'tester3', None, 403),
('GET', None, None, None, None, None,
None, 'tester2', 'tester3', None, 403),
('GET', None, None, None, None, None,
None, 'tester4', 'tester3', None, 403),
# GET other users account: allowed for role ResellerAdmin
('GET', None, None, None, None, None,
None, 'tester6', 'tester6', None, 200),
('GET', None, None, None, None, None,
None, 'tester2', 'tester6', None, 200),
('GET', None, None, None, None, None,
None, 'tester4', 'tester6', None, 200),
# GET container in own account: ok
('GET', None, None, 'UUID', None, None,
None, 'tester', 'tester', None, 200),
('GET', None, None, 'UUID', None, None,
None, 'tester', 'tester', 'tester', 200),
# GET container in other users account: not allowed for role admin
('GET', None, None, 'UUID', None, None,
None, 'tester2', 'tester', None, 403),
('GET', None, None, 'UUID', None, None,
None, 'tester4', 'tester', None, 403),
# GET container in other users account: not allowed for role _member_
('GET', None, None, 'UUID', None, None,
None, 'tester3', 'tester3', None, 403),
('GET', None, None, 'UUID', None, None,
None, 'tester2', 'tester3', None, 403),
('GET', None, None, 'UUID', None, None,
None, 'tester4', 'tester3', None, 403),
# GET container in other users account: allowed for role ResellerAdmin
('GET', None, None, 'UUID', None, None,
None, 'tester6', 'tester6', None, 200),
('GET', None, None, 'UUID', None, None,
None, 'tester2', 'tester6', None, 200),
('GET', None, None, 'UUID', None, None,
None, 'tester4', 'tester6', None, 200),
# GET object in own account: ok
('GET', None, None, 'UUID', 'UUID', None,
None, 'tester', 'tester', None, 200),
('GET', None, None, 'UUID', 'UUID', None,
None, 'tester', 'tester', 'tester', 200),
# GET object in other users account: not allowed for role admin
('GET', None, None, 'UUID', 'UUID', None,
None, 'tester2', 'tester', None, 403),
('GET', None, None, 'UUID', 'UUID', None,
None, 'tester4', 'tester', None, 403),
# GET object in other users account: not allowed for role _member_
('GET', None, None, 'UUID', 'UUID', None,
None, 'tester3', 'tester3', None, 403),
('GET', None, None, 'UUID', 'UUID', None,
None, 'tester2', 'tester3', None, 403),
('GET', None, None, 'UUID', 'UUID', None,
None, 'tester4', 'tester3', None, 403),
# GET object in other users account: allowed for role ResellerAdmin
('GET', None, None, 'UUID', 'UUID', None,
None, 'tester6', 'tester6', None, 200),
('GET', None, None, 'UUID', 'UUID', None,
None, 'tester2', 'tester6', None, 200),
('GET', None, None, 'UUID', 'UUID', None,
None, 'tester4', 'tester6', None, 200)
]
RBAC_GET_WITH_SERVICE_PREFIX = [
# GET own account: ok
('GET', None, None, None, None, None,
None, 'tester', 'tester', 'tester5', 200),
# GET other account: not allowed for role service
('GET', None, None, None, None, None,
None, 'tester', 'tester3', 'tester5', 403),
('GET', None, None, None, None, None,
None, 'tester', None, 'tester5', 401),
('GET', None, None, None, None, None,
None, 'tester5', 'tester5', None, 403),
('GET', None, None, None, None, None,
None, 'tester2', 'tester5', None, 403),
('GET', None, None, None, None, None,
None, 'tester4', 'tester5', None, 403),
# GET container in own account: ok
('GET', None, None, 'UUID', None, None,
None, 'tester', 'tester', 'tester5', 200),
# GET container in other users account: not allowed for role service
('GET', None, None, 'UUID', None, None,
None, 'tester', 'tester3', 'tester5', 403),
('GET', None, None, 'UUID', None, None,
None, 'tester', None, 'tester5', 401),
('GET', None, None, 'UUID', None, None,
None, 'tester5', 'tester5', None, 403),
('GET', None, None, 'UUID', None, None,
None, 'tester2', 'tester5', None, 403),
('GET', None, None, 'UUID', None, None,
None, 'tester4', 'tester5', None, 403),
# GET object in own account: ok
('GET', None, None, 'UUID', 'UUID', None,
None, 'tester', 'tester', 'tester5', 200),
# GET object fails if wrong user, or only one token sent
('GET', None, None, 'UUID', 'UUID', None,
None, 'tester', 'tester3', 'tester5', 403),
('GET', None, None, 'UUID', 'UUID', None,
None, 'tester', None, 'tester5', 401),
('GET', None, None, 'UUID', 'UUID', None,
None, 'tester5', 'tester5', None, 403),
('GET', None, None, 'UUID', 'UUID', None,
None, 'tester2', 'tester5', None, 403),
('GET', None, None, 'UUID', 'UUID', None,
None, 'tester4', 'tester5', None, 403),
# All following actions are using SERVICE prefix
# GET own account: ok
('GET', None, None, None, None, None,
'SERVICE', 'tester', 'tester', 'tester5', 200),
# GET other account: not allowed for role service
('GET', None, None, None, None, None,
'SERVICE', 'tester', 'tester3', 'tester5', 403),
('GET', None, None, None, None, None,
'SERVICE', 'tester', 'tester', None, 403),
('GET', None, None, None, None, None,
'SERVICE', 'tester', 'tester', 'tester', 403),
('GET', None, None, None, None, None,
'SERVICE', 'tester', None, 'tester5', 401),
# GET container in own account: ok
('GET', None, None, 'UUID', None, None,
'SERVICE', 'tester', 'tester', 'tester5', 200),
# GET container fails if wrong user, or only one token sent
('GET', None, None, 'UUID', None, None,
'SERVICE', 'tester', 'tester3', 'tester5', 403),
('GET', None, None, 'UUID', None, None,
'SERVICE', 'tester', 'tester', None, 403),
('GET', None, None, 'UUID', None, None,
'SERVICE', 'tester', 'tester', 'tester', 403),
('GET', None, None, 'UUID', None, None,
'SERVICE', 'tester', None, 'tester5', 401),
# GET object in own account: ok
('GET', None, None, 'UUID', 'UUID', None,
'SERVICE', 'tester', 'tester', 'tester5', 200),
# GET object fails if wrong user, or only one token sent
('GET', None, None, 'UUID', 'UUID', None,
'SERVICE', 'tester', 'tester3', 'tester5', 403),
('GET', None, None, 'UUID', 'UUID', None,
'SERVICE', 'tester', 'tester', None, 403),
('GET', None, None, 'UUID', 'UUID', None,
'SERVICE', 'tester', 'tester', 'tester', 403),
('GET', None, None, 'UUID', 'UUID', None,
'SERVICE', 'tester', None, 'tester5', 401)
]
# A scenario of head for account, container and object with
# several roles.
RBAC_HEAD = [
# HEAD own account: ok
('HEAD', None, None, None, None, None,
None, 'tester', 'tester', None, 204),
('HEAD', None, None, None, None, None,
None, 'tester', 'tester', 'tester', 204),
# HEAD other users account: not allowed for role admin
('HEAD', None, None, None, None, None,
None, 'tester2', 'tester', None, 403),
('HEAD', None, None, None, None, None,
None, 'tester4', 'tester', None, 403),
# HEAD other users account: not allowed for role _member_
('HEAD', None, None, None, None, None,
None, 'tester3', 'tester3', None, 403),
('HEAD', None, None, None, None, None,
None, 'tester2', 'tester3', None, 403),
('HEAD', None, None, None, None, None,
None, 'tester4', 'tester3', None, 403),
# HEAD other users account: allowed for role ResellerAdmin
('HEAD', None, None, None, None, None,
None, 'tester6', 'tester6', None, 204),
('HEAD', None, None, None, None, None,
None, 'tester2', 'tester6', None, 204),
('HEAD', None, None, None, None, None,
None, 'tester4', 'tester6', None, 204),
# HEAD container in own account: ok
('HEAD', None, None, 'UUID', None, None,
None, 'tester', 'tester', None, 204),
('HEAD', None, None, 'UUID', None, None,
None, 'tester', 'tester', 'tester', 204),
# HEAD container in other users account: not allowed for role admin
('HEAD', None, None, 'UUID', None, None,
None, 'tester2', 'tester', None, 403),
('HEAD', None, None, 'UUID', None, None,
None, 'tester4', 'tester', None, 403),
# HEAD container in other users account: not allowed for role _member_
('HEAD', None, None, 'UUID', None, None,
None, 'tester3', 'tester3', None, 403),
('HEAD', None, None, 'UUID', None, None,
None, 'tester2', 'tester3', None, 403),
('HEAD', None, None, 'UUID', None, None,
None, 'tester4', 'tester3', None, 403),
# HEAD container in other users account: allowed for role ResellerAdmin
('HEAD', None, None, 'UUID', None, None,
None, 'tester6', 'tester6', None, 204),
('HEAD', None, None, 'UUID', None, None,
None, 'tester2', 'tester6', None, 204),
('HEAD', None, None, 'UUID', None, None,
None, 'tester4', 'tester6', None, 204),
# HEAD object in own account: ok
('HEAD', None, None, 'UUID', 'UUID', None,
None, 'tester', 'tester', None, 200),
('HEAD', None, None, 'UUID', 'UUID', None,
None, 'tester', 'tester', 'tester', 200),
# HEAD object in other users account: not allowed for role admin
('HEAD', None, None, 'UUID', 'UUID', None,
None, 'tester2', 'tester', None, 403),
('HEAD', None, None, 'UUID', 'UUID', None,
None, 'tester4', 'tester', None, 403),
# HEAD object in other users account: not allowed for role _member_
('HEAD', None, None, 'UUID', 'UUID', None,
None, 'tester3', 'tester3', None, 403),
('HEAD', None, None, 'UUID', 'UUID', None,
None, 'tester2', 'tester3', None, 403),
('HEAD', None, None, 'UUID', 'UUID', None,
None, 'tester4', 'tester3', None, 403),
# HEAD object in other users account: allowed for role ResellerAdmin
('HEAD', None, None, 'UUID', 'UUID', None,
None, 'tester6', 'tester6', None, 200),
('HEAD', None, None, 'UUID', 'UUID', None,
None, 'tester2', 'tester6', None, 200),
('HEAD', None, None, 'UUID', 'UUID', None,
None, 'tester4', 'tester6', None, 200)
]
RBAC_HEAD_WITH_SERVICE_PREFIX = [
# HEAD own account: ok
('HEAD', None, None, None, None, None,
None, 'tester', 'tester', 'tester5', 204),
# HEAD other account: not allowed for role service
('HEAD', None, None, None, None, None,
None, 'tester', 'tester3', 'tester5', 403),
('HEAD', None, None, None, None, None,
None, 'tester', None, 'tester5', 401),
('HEAD', None, None, None, None, None,
None, 'tester5', 'tester5', None, 403),
('HEAD', None, None, None, None, None,
None, 'tester2', 'tester5', None, 403),
('HEAD', None, None, None, None, None,
None, 'tester4', 'tester5', None, 403),
# HEAD container in own account: ok
('HEAD', None, None, 'UUID', None, None,
None, 'tester', 'tester', 'tester5', 204),
# HEAD container in other users account: not allowed for role service
('HEAD', None, None, 'UUID', None, None,
None, 'tester', 'tester3', 'tester5', 403),
('HEAD', None, None, 'UUID', None, None,
None, 'tester', None, 'tester5', 401),
('HEAD', None, None, 'UUID', None, None,
None, 'tester5', 'tester5', None, 403),
('HEAD', None, None, 'UUID', None, None,
None, 'tester2', 'tester5', None, 403),
('HEAD', None, None, 'UUID', None, None,
None, 'tester4', 'tester5', None, 403),
# HEAD object in own account: ok
('HEAD', None, None, 'UUID', 'UUID', None,
None, 'tester', 'tester', 'tester5', 200),
# HEAD object fails if wrong user, or only one token sent
('HEAD', None, None, 'UUID', 'UUID', None,
None, 'tester', 'tester3', 'tester5', 403),
('HEAD', None, None, 'UUID', 'UUID', None,
None, 'tester', None, 'tester5', 401),
('HEAD', None, None, 'UUID', 'UUID', None,
None, 'tester5', 'tester5', None, 403),
('HEAD', None, None, 'UUID', 'UUID', None,
None, 'tester2', 'tester5', None, 403),
('HEAD', None, None, 'UUID', 'UUID', None,
None, 'tester4', 'tester5', None, 403),
# All following actions are using SERVICE prefix
# HEAD own account: ok
('HEAD', None, None, None, None, None,
'SERVICE', 'tester', 'tester', 'tester5', 204),
# HEAD other account: not allowed for role service
('HEAD', None, None, None, None, None,
'SERVICE', 'tester', 'tester3', 'tester5', 403),
('HEAD', None, None, None, None, None,
'SERVICE', 'tester', 'tester', None, 403),
('HEAD', None, None, None, None, None,
'SERVICE', 'tester', 'tester', 'tester', 403),
('HEAD', None, None, None, None, None,
'SERVICE', 'tester', None, 'tester5', 401),
# HEAD container in own account: ok
('HEAD', None, None, 'UUID', None, None,
'SERVICE', 'tester', 'tester', 'tester5', 204),
# HEAD container in other users account: not allowed for role service
('HEAD', None, None, 'UUID', None, None,
'SERVICE', 'tester', 'tester3', 'tester5', 403),
('HEAD', None, None, 'UUID', None, None,
'SERVICE', 'tester', 'tester', None, 403),
('HEAD', None, None, 'UUID', None, None,
'SERVICE', 'tester', 'tester', 'tester', 403),
('HEAD', None, None, 'UUID', None, None,
'SERVICE', 'tester', None, 'tester5', 401),
# HEAD object in own account: ok
('HEAD', None, None, 'UUID', 'UUID', None,
'SERVICE', 'tester', 'tester', 'tester5', 200),
# HEAD object fails if wrong user, or only one token sent
('HEAD', None, None, 'UUID', 'UUID', None,
'SERVICE', 'tester', 'tester3', 'tester5', 403),
('HEAD', None, None, 'UUID', 'UUID', None,
'SERVICE', 'tester', 'tester', None, 403),
('HEAD', None, None, 'UUID', 'UUID', None,
'SERVICE', 'tester', 'tester', 'tester', 403),
('HEAD', None, None, 'UUID', 'UUID', None,
'SERVICE', 'tester', None, 'tester5', 401)
]
# A scenario of post for account, container and object with
# several roles.
RBAC_POST = [
# POST own account: ok
('POST', None, None, None, None, None,
None, 'tester', 'tester', None, 204),
('POST', None, None, None, None, None,
None, 'tester', 'tester', 'tester', 204),
# POST other users account: not allowed for role admin
('POST', None, None, None, None, None,
None, 'tester2', 'tester', None, 403),
('POST', None, None, None, None, None,
None, 'tester4', 'tester', None, 403),
# POST other users account: not allowed for role _member_
('POST', None, None, None, None, None,
None, 'tester3', 'tester3', None, 403),
('POST', None, None, None, None, None,
None, 'tester2', 'tester3', None, 403),
('POST', None, None, None, None, None,
None, 'tester4', 'tester3', None, 403),
# POST other users account: allowed for role ResellerAdmin
('POST', None, None, None, None, None,
None, 'tester6', 'tester6', None, 204),
('POST', None, None, None, None, None,
None, 'tester2', 'tester6', None, 204),
('POST', None, None, None, None, None,
None, 'tester4', 'tester6', None, 204),
# POST container in own account: ok
('POST', None, None, 'UUID', None, None,
None, 'tester', 'tester', None, 204),
('POST', None, None, 'UUID', None, None,
None, 'tester', 'tester', 'tester', 204),
# POST container in other users account: not allowed for role admin
('POST', None, None, 'UUID', None, None,
None, 'tester2', 'tester', None, 403),
('POST', None, None, 'UUID', None, None,
None, 'tester4', 'tester', None, 403),
# POST container in other users account: not allowed for role _member_
('POST', None, None, 'UUID', None, None,
None, 'tester3', 'tester3', None, 403),
('POST', None, None, 'UUID', None, None,
None, 'tester2', 'tester3', None, 403),
('POST', None, None, 'UUID', None, None,
None, 'tester4', 'tester3', None, 403),
# POST container in other users account: allowed for role ResellerAdmin
('POST', None, None, 'UUID', None, None,
None, 'tester6', 'tester6', None, 204),
('POST', None, None, 'UUID', None, None,
None, 'tester2', 'tester6', None, 204),
('POST', None, None, 'UUID', None, None,
None, 'tester4', 'tester6', None, 204),
# POST object in own account: ok
('POST', None, None, 'UUID', 'UUID', None,
None, 'tester', 'tester', None, 202),
('POST', None, None, 'UUID', 'UUID', None,
None, 'tester', 'tester', 'tester', 202),
# POST object in other users account: not allowed for role admin
('POST', None, None, 'UUID', 'UUID', None,
None, 'tester2', 'tester', None, 403),
('POST', None, None, 'UUID', 'UUID', None,
None, 'tester4', 'tester', None, 403),
# POST object in other users account: not allowed for role _member_
('POST', None, None, 'UUID', 'UUID', None,
None, 'tester3', 'tester3', None, 403),
('POST', None, None, 'UUID', 'UUID', None,
None, 'tester2', 'tester3', None, 403),
('POST', None, None, 'UUID', 'UUID', None,
None, 'tester4', 'tester3', None, 403),
# POST object in other users account: allowed for role ResellerAdmin
('POST', None, None, 'UUID', 'UUID', None,
None, 'tester6', 'tester6', None, 202),
('POST', None, None, 'UUID', 'UUID', None,
None, 'tester2', 'tester6', None, 202),
('POST', None, None, 'UUID', 'UUID', None,
None, 'tester4', 'tester6', None, 202)
]
RBAC_POST_WITH_SERVICE_PREFIX = [
# POST own account: ok
('POST', None, None, None, None, None,
None, 'tester', 'tester', 'tester5', 204),
# POST own account: ok
('POST', None, None, None, None, None,
None, 'tester', 'tester3', 'tester5', 403),
('POST', None, None, None, None, None,
None, 'tester', None, 'tester5', 401),
('POST', None, None, None, None, None,
None, 'tester5', 'tester5', None, 403),
('POST', None, None, None, None, None,
None, 'tester2', 'tester5', None, 403),
('POST', None, None, None, None, None,
None, 'tester4', 'tester5', None, 403),
# POST container in own account: ok
('POST', None, None, 'UUID', None, None,
None, 'tester', 'tester', 'tester5', 204),
# POST container in other users account: not allowed for role service
('POST', None, None, 'UUID', None, None,
None, 'tester', 'tester3', 'tester5', 403),
('POST', None, None, 'UUID', None, None,
None, 'tester', None, 'tester5', 401),
('POST', None, None, 'UUID', None, None,
None, 'tester5', 'tester5', None, 403),
('POST', None, None, 'UUID', None, None,
None, 'tester2', 'tester5', None, 403),
('POST', None, None, 'UUID', None, None,
None, 'tester4', 'tester5', None, 403),
# POST object in own account: ok
('POST', None, None, 'UUID', 'UUID', None,
None, 'tester', 'tester', 'tester5', 202),
# POST object fails if wrong user, or only one token sent
('POST', None, None, 'UUID', 'UUID', None,
None, 'tester', 'tester3', 'tester5', 403),
('POST', None, None, 'UUID', 'UUID', None,
None, 'tester', None, 'tester5', 401),
('POST', None, None, 'UUID', 'UUID', None,
None, 'tester5', 'tester5', None, 403),
('POST', None, None, 'UUID', 'UUID', None,
None, 'tester2', 'tester5', None, 403),
('POST', None, None, 'UUID', 'UUID', None,
None, 'tester4', 'tester5', None, 403),
# All following actions are using SERVICE prefix
# POST own account: ok
('POST', None, None, None, None, None,
'SERVICE', 'tester', 'tester', 'tester5', 204),
# POST other account: not allowed for role service
('POST', None, None, None, None, None,
'SERVICE', 'tester', 'tester3', 'tester5', 403),
('POST', None, None, None, None, None,
'SERVICE', 'tester', 'tester', None, 403),
('POST', None, None, None, None, None,
'SERVICE', 'tester', 'tester', 'tester', 403),
('POST', None, None, None, None, None,
'SERVICE', 'tester', None, 'tester5', 401),
# POST container in own account: ok
('POST', None, None, 'UUID', None, None,
'SERVICE', 'tester', 'tester', 'tester5', 204),
# POST container in other users account: not allowed for role service
('POST', None, None, 'UUID', None, None,
'SERVICE', 'tester', 'tester3', 'tester5', 403),
('POST', None, None, 'UUID', None, None,
'SERVICE', 'tester', 'tester', None, 403),
('POST', None, None, 'UUID', None, None,
'SERVICE', 'tester', 'tester', 'tester', 403),
('POST', None, None, 'UUID', None, None,
'SERVICE', 'tester', None, 'tester5', 401),
# POST object in own account: ok
('POST', None, None, 'UUID', 'UUID', None,
'SERVICE', 'tester', 'tester', 'tester5', 202),
# POST object fails if wrong user, or only one token sent
('POST', None, None, 'UUID', 'UUID', None,
'SERVICE', 'tester', 'tester3', 'tester5', 403),
('POST', None, None, 'UUID', 'UUID', None,
'SERVICE', 'tester', 'tester', None, 403),
('POST', None, None, 'UUID', 'UUID', None,
'SERVICE', 'tester', 'tester', 'tester', 403),
('POST', None, None, 'UUID', 'UUID', None,
'SERVICE', 'tester', None, 'tester5', 401)
]
# A scenario of options for account, container and object with
# several roles.
RBAC_OPTIONS = [
# OPTIONS request is always ok
('OPTIONS', None, None, None, None, None,
None, 'tester', 'tester', None, 200),
('OPTIONS', None, None, None, None, None,
None, 'tester', 'tester', 'tester', 200),
('OPTIONS', None, None, None, None, None,
None, 'tester2', 'tester', None, 200),
('OPTIONS', None, None, None, None, None,
None, 'tester4', 'tester', None, 200),
('OPTIONS', None, None, None, None, None,
None, 'tester3', 'tester3', None, 200),
('OPTIONS', None, None, None, None, None,
None, 'tester2', 'tester3', None, 200),
('OPTIONS', None, None, None, None, None,
None, 'tester4', 'tester3', None, 200),
('OPTIONS', None, None, None, None, None,
None, 'tester6', 'tester6', None, 200),
('OPTIONS', None, None, None, None, None,
None, 'tester2', 'tester6', None, 200),
('OPTIONS', None, None, None, None, None,
None, 'tester4', 'tester6', None, 200),
('OPTIONS', None, None, 'UUID', None, None,
None, 'tester', 'tester', None, 200),
('OPTIONS', None, None, 'UUID', None, None,
None, 'tester', 'tester', 'tester', 200),
('OPTIONS', None, None, 'UUID', None, None,
None, 'tester2', 'tester', None, 200),
('OPTIONS', None, None, 'UUID', None, None,
None, 'tester4', 'tester', None, 200),
('OPTIONS', None, None, 'UUID', None, None,
None, 'tester3', 'tester3', None, 200),
('OPTIONS', None, None, 'UUID', None, None,
None, 'tester2', 'tester3', None, 200),
('OPTIONS', None, None, 'UUID', None, None,
None, 'tester4', 'tester3', None, 200),
('OPTIONS', None, None, 'UUID', None, None,
None, 'tester6', 'tester6', None, 200),
('OPTIONS', None, None, 'UUID', None, None,
None, 'tester2', 'tester6', None, 200),
('OPTIONS', None, None, 'UUID', None, None,
None, 'tester4', 'tester6', None, 200),
('OPTIONS', None, None, 'UUID', 'UUID', None,
None, 'tester', 'tester', None, 200),
('OPTIONS', None, None, 'UUID', 'UUID', None,
None, 'tester', 'tester', 'tester', 200),
('OPTIONS', None, None, 'UUID', 'UUID', None,
None, 'tester2', 'tester', None, 200),
('OPTIONS', None, None, 'UUID', 'UUID', None,
None, 'tester4', 'tester', None, 200),
('OPTIONS', None, None, 'UUID', 'UUID', None,
None, 'tester3', 'tester3', None, 200),
('OPTIONS', None, None, 'UUID', 'UUID', None,
None, 'tester2', 'tester3', None, 200),
('OPTIONS', None, None, 'UUID', 'UUID', None,
None, 'tester4', 'tester3', None, 200),
('OPTIONS', None, None, 'UUID', 'UUID', None,
None, 'tester6', 'tester6', None, 200),
('OPTIONS', None, None, 'UUID', 'UUID', None,
None, 'tester2', 'tester6', None, 200),
('OPTIONS', None, None, 'UUID', 'UUID', None,
None, 'tester4', 'tester6', None, 200),
('OPTIONS', None, None, None, None,
{"X-Container-Meta-Access-Control-Allow-Origin": "*"},
None, 'tester', 'tester', None, 200),
('OPTIONS', None, None, None, None,
{"X-Container-Meta-Access-Control-Allow-Origin": "http://invalid.com"},
None, 'tester', 'tester', None, 200),
('OPTIONS', None, None, 'UUID', None,
{"X-Container-Meta-Access-Control-Allow-Origin": "*"},
None, 'tester', 'tester', None, 200),
('OPTIONS', None, None, 'UUID', None,
{"X-Container-Meta-Access-Control-Allow-Origin": "http://invalid.com"},
None, 'tester', 'tester', None, 200),
('OPTIONS', None, None, 'UUID', 'UUID',
{"X-Container-Meta-Access-Control-Allow-Origin": "*"},
None, 'tester', 'tester', None, 200),
('OPTIONS', None, None, 'UUID', 'UUID',
{"X-Container-Meta-Access-Control-Allow-Origin": "http://invalid.com"},
None, 'tester', 'tester', None, 200),
('OPTIONS',
{"Origin": "http://localhost", "Access-Control-Request-Method": "GET"},
None, None, None, None, None, 'tester', 'tester', None, 200),
('OPTIONS',
{"Origin": "http://localhost", "Access-Control-Request-Method": "GET"},
None, None, None,
{"X-Container-Meta-Access-Control-Allow-Origin": "*"},
None, 'tester', 'tester', None, 200),
('OPTIONS',
{"Origin": "http://localhost", "Access-Control-Request-Method": "GET"},
None, None, None,
{"X-Container-Meta-Access-Control-Allow-Origin": "http://invalid.com"},
None, 'tester', 'tester', None, 200),
('OPTIONS',
{"Origin": "http://localhost", "Access-Control-Request-Method": "GET"},
None, 'UUID', None, None, None, 'tester', 'tester', None, 401),
('OPTIONS',
{"Origin": "http://localhost", "Access-Control-Request-Method": "GET"},
None, 'UUID', None,
{"X-Container-Meta-Access-Control-Allow-Origin": "*"},
None, 'tester', 'tester', None, 200),
# Not OK for container: wrong origin
('OPTIONS',
{"Origin": "http://localhost", "Access-Control-Request-Method": "GET"},
None, 'UUID', None,
{"X-Container-Meta-Access-Control-Allow-Origin": "http://invalid.com"},
None, 'tester', 'tester', None, 401),
# Not OK for object: missing X-Container-Meta-Access-Control-Allow-Origin
('OPTIONS',
{"Origin": "http://localhost", "Access-Control-Request-Method": "GET"},
None, 'UUID', 'UUID', None, None, 'tester', 'tester', None, 401),
('OPTIONS',
{"Origin": "http://localhost", "Access-Control-Request-Method": "GET"},
None, 'UUID', 'UUID',
{"X-Container-Meta-Access-Control-Allow-Origin": "*"},
None, 'tester', None, None, 200),
# Not OK for object: wrong origin
('OPTIONS',
{"Origin": "http://localhost", "Access-Control-Request-Method": "GET"},
None, 'UUID', 'UUID',
{"X-Container-Meta-Access-Control-Allow-Origin": "http://invalid.com"},
None, 'tester', 'tester', None, 401)
]
RBAC_OPTIONS_WITH_SERVICE_PREFIX = [
# OPTIONS request is always ok
('OPTIONS', None, None, None, None, None,
None, 'tester', 'tester', 'tester5', 200),
('OPTIONS', None, None, None, None, None,
None, 'tester', 'tester3', 'tester5', 200),
('OPTIONS', None, None, None, None, None,
None, 'tester', None, 'tester5', 200),
('OPTIONS', None, None, None, None, None,
None, 'tester5', 'tester5', None, 200),
('OPTIONS', None, None, None, None, None,
None, 'tester2', 'tester5', None, 200),
('OPTIONS', None, None, None, None, None,
None, 'tester4', 'tester5', None, 200),
('OPTIONS', None, None, 'UUID', None, None,
None, 'tester', 'tester', 'tester5', 200),
('OPTIONS', None, None, 'UUID', None, None,
None, 'tester', 'tester3', 'tester5', 200),
('OPTIONS', None, None, 'UUID', None, None,
None, 'tester', None, 'tester5', 200),
('OPTIONS', None, None, 'UUID', None, None,
None, 'tester5', 'tester5', None, 200),
('OPTIONS', None, None, 'UUID', None, None,
None, 'tester2', 'tester5', None, 200),
('OPTIONS', None, None, 'UUID', None, None,
None, 'tester4', 'tester5', None, 200),
('OPTIONS', None, None, 'UUID', 'UUID', None,
None, 'tester', 'tester', 'tester5', 200),
('OPTIONS', None, None, 'UUID', 'UUID', None,
None, 'tester', 'tester3', 'tester5', 200),
('OPTIONS', None, None, 'UUID', 'UUID', None,
None, 'tester', None, 'tester5', 200),
('OPTIONS', None, None, 'UUID', 'UUID', None,
None, 'tester5', 'tester5', None, 200),
('OPTIONS', None, None, 'UUID', 'UUID', None,
None, 'tester2', 'tester5', None, 200),
('OPTIONS', None, None, 'UUID', 'UUID', None,
None, 'tester4', 'tester5', None, 200),
('OPTIONS', None, None, None, None, None,
'SERVICE', 'tester', 'tester', 'tester5', 200),
('OPTIONS', None, None, None, None, None,
'SERVICE', 'tester', 'tester3', 'tester5', 200),
('OPTIONS', None, None, None, None, None,
'SERVICE', 'tester', 'tester', None, 200),
('OPTIONS', None, None, None, None, None,
'SERVICE', 'tester', 'tester', 'tester', 200),
('OPTIONS', None, None, None, None, None,
'SERVICE', 'tester', None, 'tester5', 200),
('OPTIONS', None, None, 'UUID', None, None,
'SERVICE', 'tester', 'tester', 'tester5', 200),
('OPTIONS', None, None, 'UUID', None, None,
'SERVICE', 'tester', 'tester3', 'tester5', 200),
('OPTIONS', None, None, 'UUID', None, None,
'SERVICE', 'tester', 'tester', None, 200),
('OPTIONS', None, None, 'UUID', None, None,
'SERVICE', 'tester', 'tester', 'tester', 200),
('OPTIONS', None, None, 'UUID', None, None,
'SERVICE', 'tester', None, 'tester5', 200),
('OPTIONS', None, None, 'UUID', 'UUID', None,
'SERVICE', 'tester', 'tester', 'tester5', 200),
('OPTIONS', None, None, 'UUID', 'UUID', None,
'SERVICE', 'tester', 'tester3', 'tester5', 200),
('OPTIONS', None, None, 'UUID', 'UUID', None,
'SERVICE', 'tester', 'tester', None, 200),
('OPTIONS', None, None, 'UUID', 'UUID', None,
'SERVICE', 'tester', 'tester', 'tester', 200),
('OPTIONS', None, None, 'UUID', 'UUID', None,
'SERVICE', 'tester', None, 'tester5', 200)
]
# A scenario of put for container ACL
ACL_PUT = [
('PUT',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': 'test:tester3'},
None, 'tester3', 'tester3', None, 403),
('PUT',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': 'test:%(tester3_id)s'},
None, 'tester3', 'tester3', None, 403),
('PUT',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': '%(test_id)s:tester3'},
None, 'tester3', 'tester3', None, 403),
('PUT',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': '%(test_id)s:%(tester3_id)s'},
None, 'tester3', 'tester3', None, 403),
('PUT',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': 'test:tester2'},
None, 'tester3', 'tester3', None, 403),
('PUT',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': 'test:*'},
None, 'tester3', 'tester3', None, 403),
('PUT',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': 'test'},
None, 'tester3', 'tester3', None, 403),
('PUT',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': '%(test_id)s'},
None, 'tester3', 'tester3', None, 403),
('PUT',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': 'test2:tester3'},
None, 'tester3', 'tester3', None, 403),
('PUT',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': 'test2:tester2'},
None, 'tester3', 'tester3', None, 403),
('PUT',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': 'test2:*'},
None, 'tester3', 'tester3', None, 403),
('PUT',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': 'test2'},
None, 'tester3', 'tester3', None, 403),
('PUT',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': '*:tester3'},
None, 'tester3', 'tester3', None, 403),
('PUT',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': '*:tester2'},
None, 'tester3', 'tester3', None, 403),
('PUT',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': '*:*'},
None, 'tester3', 'tester3', None, 403),
('PUT',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': '*'},
None, 'tester3', 'tester3', None, 403),
('PUT',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': '.r:*'},
None, 'tester3', 'tester3', None, 403),
('PUT',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': '.r:*,.rlistings'},
None, 'tester3', 'tester3', None, 403),
('PUT',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': '.r:invalid.domain.com'},
None, 'tester3', 'tester3', None, 403),
('PUT',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': '.r:invalid.domain.com,.rlistings'},
None, 'tester3', 'tester3', None, 403),
('PUT',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': '.rlistings'},
None, 'tester3', 'tester3', None, 403),
('PUT',
None,
None, 'UUID', 'UUID',
{'X-Container-Write': 'test:tester3'},
None, 'tester3', 'tester3', None, 201),
('PUT',
None,
None, 'UUID', 'UUID',
{'X-Container-Write': 'test:%(tester3_id)s'},
None, 'tester3', 'tester3', None, 201),
('PUT',
None,
None, 'UUID', 'UUID',
{'X-Container-Write': '%(test_id)s:tester3'},
None, 'tester3', 'tester3', None, 201),
('PUT',
None,
None, 'UUID', 'UUID',
{'X-Container-Write': '%(test_id)s:%(tester3_id)s'},
None, 'tester3', 'tester3', None, 201),
('PUT',
None,
None, 'UUID', 'UUID',
{'X-Container-Write': 'test:tester2'},
None, 'tester3', 'tester3', None, 403),
('PUT',
None,
None, 'UUID', 'UUID',
{'X-Container-Write': 'test:*'},
None, 'tester3', 'tester3', None, 201),
('PUT',
None,
None, 'UUID', 'UUID',
{'X-Container-Write': 'test'},
None, 'tester3', 'tester3', None, 403),
('PUT',
None,
None, 'UUID', 'UUID',
{'X-Container-Write': '%(test_id)s'},
None, 'tester3', 'tester3', None, 403),
('PUT',
None,
None, 'UUID', 'UUID',
{'X-Container-Write': 'test2:tester3'},
None, 'tester3', 'tester3', None, 403),
('PUT',
None,
None, 'UUID', 'UUID',
{'X-Container-Write': 'test2:tester2'},
None, 'tester3', 'tester3', None, 403),
('PUT',
None,
None, 'UUID', 'UUID',
{'X-Container-Write': 'test2:*'},
None, 'tester3', 'tester3', None, 403),
('PUT',
None,
None, 'UUID', 'UUID',
{'X-Container-Write': 'test2'},
None, 'tester3', 'tester3', None, 403),
('PUT',
None,
None, 'UUID', 'UUID',
{'X-Container-Write': '*:tester3'},
None, 'tester3', 'tester3', None, 201),
('PUT',
None,
None, 'UUID', 'UUID',
{'X-Container-Write': '*:tester2'},
None, 'tester3', 'tester3', None, 403),
('PUT',
None,
None, 'UUID', 'UUID',
{'X-Container-Write': '*:*'},
None, 'tester3', 'tester3', None, 201),
('PUT',
None,
None, 'UUID', 'UUID',
{'X-Container-Write': '*'},
None, 'tester3', 'tester3', None, 403),
('PUT',
None,
None, 'UUID', 'UUID',
None,
None, 'tester3', 'tester3', None, 403)
]
# A scenario of delete for container ACL
ACL_DELETE = [
('DELETE',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': 'test:tester3'},
None, 'tester3', 'tester3', None, 403),
('DELETE',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': 'test:%(tester3_id)s'},
None, 'tester3', 'tester3', None, 403),
('DELETE',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': '%(test_id)s:tester3'},
None, 'tester3', 'tester3', None, 403),
('DELETE',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': '%(test_id)s:%(tester3_id)s'},
None, 'tester3', 'tester3', None, 403),
('DELETE',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': 'test:tester2'},
None, 'tester3', 'tester3', None, 403),
('DELETE',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': 'test:*'},
None, 'tester3', 'tester3', None, 403),
('DELETE',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': 'test'},
None, 'tester3', 'tester3', None, 403),
('DELETE',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': '%(test_id)s'},
None, 'tester3', 'tester3', None, 403),
('DELETE',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': 'test2:tester3'},
None, 'tester3', 'tester3', None, 403),
('DELETE',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': 'test2:tester2'},
None, 'tester3', 'tester3', None, 403),
('DELETE',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': 'test2:*'},
None, 'tester3', 'tester3', None, 403),
('DELETE',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': 'test2'},
None, 'tester3', 'tester3', None, 403),
('DELETE',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': '*:tester3'},
None, 'tester3', 'tester3', None, 403),
('DELETE',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': '*:tester2'},
None, 'tester3', 'tester3', None, 403),
('DELETE',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': '*:*'},
None, 'tester3', 'tester3', None, 403),
('DELETE',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': '*'},
None, 'tester3', 'tester3', None, 403),
('DELETE',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': '.r:*'},
None, 'tester3', 'tester3', None, 403),
('DELETE',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': '.r:*,.rlistings'},
None, 'tester3', 'tester3', None, 403),
('DELETE',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': '.r:invalid.domain.com'},
None, 'tester3', 'tester3', None, 403),
('DELETE',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': '.r:invalid.domain.com,.rlistings'},
None, 'tester3', 'tester3', None, 403),
('DELETE',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': '.rlistings'},
None, 'tester3', 'tester3', None, 403),
('DELETE',
None,
None, 'UUID', 'UUID',
{'X-Container-Write': 'test:tester3'},
None, 'tester3', 'tester3', None, 204),
('DELETE',
None,
None, 'UUID', 'UUID',
{'X-Container-Write': 'test:%(tester3_id)s'},
None, 'tester3', 'tester3', None, 204),
('DELETE',
None,
None, 'UUID', 'UUID',
{'X-Container-Write': '%(test_id)s:tester3'},
None, 'tester3', 'tester3', None, 204),
('DELETE',
None,
None, 'UUID', 'UUID',
{'X-Container-Write': '%(test_id)s:%(tester3_id)s'},
None, 'tester3', 'tester3', None, 204),
('DELETE',
None,
None, 'UUID', 'UUID',
{'X-Container-Write': 'test:tester2'},
None, 'tester3', 'tester3', None, 403),
('DELETE',
None,
None, 'UUID', 'UUID',
{'X-Container-Write': 'test:*'},
None, 'tester3', 'tester3', None, 204),
('DELETE',
None,
None, 'UUID', 'UUID',
{'X-Container-Write': 'test'},
None, 'tester3', 'tester3', None, 403),
('DELETE',
None,
None, 'UUID', 'UUID',
{'X-Container-Write': '%(test_id)s'},
None, 'tester3', 'tester3', None, 403),
('DELETE',
None,
None, 'UUID', 'UUID',
{'X-Container-Write': 'test2:tester3'},
None, 'tester3', 'tester3', None, 403),
('DELETE',
None,
None, 'UUID', 'UUID',
{'X-Container-Write': 'test2:tester2'},
None, 'tester3', 'tester3', None, 403),
('DELETE',
None,
None, 'UUID', 'UUID',
{'X-Container-Write': 'test2:*'},
None, 'tester3', 'tester3', None, 403),
('DELETE',
None,
None, 'UUID', 'UUID',
{'X-Container-Write': 'test2'},
None, 'tester3', 'tester3', None, 403),
('DELETE',
None,
None, 'UUID', 'UUID',
{'X-Container-Write': '*:tester3'},
None, 'tester3', 'tester3', None, 204),
('DELETE',
None,
None, 'UUID', 'UUID',
{'X-Container-Write': '*:tester2'},
None, 'tester3', 'tester3', None, 403),
('DELETE',
None,
None, 'UUID', 'UUID',
{'X-Container-Write': '*:*'},
None, 'tester3', 'tester3', None, 204),
('DELETE',
None,
None, 'UUID', 'UUID',
{'X-Container-Write': '*'},
None, 'tester3', 'tester3', None, 403),
('DELETE',
None,
None, 'UUID', 'UUID',
None,
None, 'tester3', 'tester3', None, 403)
]
# A scenario of get for container ACL
ACL_GET = [
('GET',
None,
None, 'UUID', None,
{'X-Container-Read': 'test:tester3'},
None, 'tester3', 'tester3', None, 200),
('GET',
None,
None, 'UUID', None,
{'X-Container-Read': 'test:%(tester3_id)s'},
None, 'tester3', 'tester3', None, 200),
('GET',
None,
None, 'UUID', None,
{'X-Container-Read': '%(test_id)s:tester3'},
None, 'tester3', 'tester3', None, 200),
('GET',
None,
None, 'UUID', None,
{'X-Container-Read': '%(test_id)s:%(tester3_id)s'},
None, 'tester3', 'tester3', None, 200),
('GET',
None,
None, 'UUID', None,
{'X-Container-Read': 'test:tester2'},
None, 'tester3', 'tester3', None, 403),
('GET',
None,
None, 'UUID', None,
{'X-Container-Read': 'test:*'},
None, 'tester3', 'tester3', None, 200),
('GET',
None,
None, 'UUID', None,
{'X-Container-Read': 'test'},
None, 'tester3', 'tester3', None, 403),
('GET',
None,
None, 'UUID', None,
{'X-Container-Read': '%(test_id)s'},
None, 'tester3', 'tester3', None, 403),
('GET',
None,
None, 'UUID', None,
{'X-Container-Read': 'test2:tester3'},
None, 'tester3', 'tester3', None, 403),
('GET',
None,
None, 'UUID', None,
{'X-Container-Read': 'test2:tester2'},
None, 'tester3', 'tester3', None, 403),
('GET',
None,
None, 'UUID', None,
{'X-Container-Read': 'test2:*'},
None, 'tester3', 'tester3', None, 403),
('GET',
None,
None, 'UUID', None,
{'X-Container-Read': 'test2'},
None, 'tester3', 'tester3', None, 403),
('GET',
None,
None, 'UUID', None,
{'X-Container-Read': '*:tester3'},
None, 'tester3', 'tester3', None, 200),
('GET',
None,
None, 'UUID', None,
{'X-Container-Read': '*:tester2'},
None, 'tester3', 'tester3', None, 403),
('GET',
None,
None, 'UUID', None,
{'X-Container-Read': '*:*'},
None, 'tester3', 'tester3', None, 200),
('GET',
None,
None, 'UUID', None,
{'X-Container-Read': '*'},
None, 'tester3', 'tester3', None, 403),
('GET',
None,
None, 'UUID', None,
{'X-Container-Read': '.r:*'},
None, 'tester3', 'tester3', None, 403),
('GET',
None,
None, 'UUID', None,
{'X-Container-Read': '.r:*,.rlistings'},
None, 'tester3', 'tester3', None, 200),
('GET',
None,
None, 'UUID', None,
{'X-Container-Read': '.r:invalid.domain.com'},
None, 'tester3', 'tester3', None, 403),
('GET',
None,
None, 'UUID', None,
{'X-Container-Read': '.r:invalid.domain.com,.rlistings'},
None, 'tester3', 'tester3', None, 403),
('GET',
None,
None, 'UUID', None,
{'X-Container-Read': '.rlistings'},
None, 'tester3', 'tester3', None, 403),
('GET',
None,
None, 'UUID', None,
{'X-Container-Write': 'test:tester3'},
None, 'tester3', 'tester3', None, 403),
('GET',
None,
None, 'UUID', None,
{'X-Container-Write': 'test:%(tester3_id)s'},
None, 'tester3', 'tester3', None, 403),
('GET',
None,
None, 'UUID', None,
{'X-Container-Write': '%(test_id)s:tester3'},
None, 'tester3', 'tester3', None, 403),
('GET',
None,
None, 'UUID', None,
{'X-Container-Write': '%(test_id)s:%(tester3_id)s'},
None, 'tester3', 'tester3', None, 403),
('GET',
None,
None, 'UUID', None,
{'X-Container-Write': 'test:tester2'},
None, 'tester3', 'tester3', None, 403),
('GET',
None,
None, 'UUID', None,
{'X-Container-Write': 'test:*'},
None, 'tester3', 'tester3', None, 403),
('GET',
None,
None, 'UUID', None,
{'X-Container-Write': 'test'},
None, 'tester3', 'tester3', None, 403),
('GET',
None,
None, 'UUID', None,
{'X-Container-Write': '%(test_id)s'},
None, 'tester3', 'tester3', None, 403),
('GET',
None,
None, 'UUID', None,
{'X-Container-Write': 'test2:tester3'},
None, 'tester3', 'tester3', None, 403),
('GET',
None,
None, 'UUID', None,
{'X-Container-Write': 'test2:tester2'},
None, 'tester3', 'tester3', None, 403),
('GET',
None,
None, 'UUID', None,
{'X-Container-Write': 'test2:*'},
None, 'tester3', 'tester3', None, 403),
('GET',
None,
None, 'UUID', None,
{'X-Container-Write': 'test2'},
None, 'tester3', 'tester3', None, 403),
('GET',
None,
None, 'UUID', None,
{'X-Container-Write': '*:tester3'},
None, 'tester3', 'tester3', None, 403),
('GET',
None,
None, 'UUID', None,
{'X-Container-Write': '*:tester2'},
None, 'tester3', 'tester3', None, 403),
('GET',
None,
None, 'UUID', None,
{'X-Container-Write': '*:*'},
None, 'tester3', 'tester3', None, 403),
('GET',
None,
None, 'UUID', None,
{'X-Container-Write': '*'},
None, 'tester3', 'tester3', None, 403),
('GET',
None,
None, 'UUID', None,
None,
None, 'tester3', 'tester3', None, 403),
('GET',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': 'test:tester3'},
None, 'tester3', 'tester3', None, 200),
('GET',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': 'test:%(tester3_id)s'},
None, 'tester3', 'tester3', None, 200),
('GET',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': '%(test_id)s:tester3'},
None, 'tester3', 'tester3', None, 200),
('GET',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': '%(test_id)s:%(tester3_id)s'},
None, 'tester3', 'tester3', None, 200),
('GET',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': 'test:tester2'},
None, 'tester3', 'tester3', None, 403),
('GET',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': 'test:*'},
None, 'tester3', 'tester3', None, 200),
('GET',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': 'test'},
None, 'tester3', 'tester3', None, 403),
('GET',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': '%(test_id)s'},
None, 'tester3', 'tester3', None, 403),
('GET',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': 'test2:tester3'},
None, 'tester3', 'tester3', None, 403),
('GET',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': 'test2:tester2'},
None, 'tester3', 'tester3', None, 403),
('GET',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': 'test2:*'},
None, 'tester3', 'tester3', None, 403),
('GET',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': 'test2'},
None, 'tester3', 'tester3', None, 403),
('GET',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': '*:tester3'},
None, 'tester3', 'tester3', None, 200),
('GET',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': '*:tester2'},
None, 'tester3', 'tester3', None, 403),
('GET',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': '*:*'},
None, 'tester3', 'tester3', None, 200),
('GET',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': '*'},
None, 'tester3', 'tester3', None, 403),
('GET',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': '.r:*'},
None, 'tester3', 'tester3', None, 200),
('GET',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': '.r:*,.rlistings'},
None, 'tester3', 'tester3', None, 200),
('GET',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': '.r:invalid.domain.com'},
None, 'tester3', 'tester3', None, 403),
('GET',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': '.r:invalid.domain.com,.rlistings'},
None, 'tester3', 'tester3', None, 403),
('GET',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': '.rlistings'},
None, 'tester3', 'tester3', None, 403),
('GET',
None,
None, 'UUID', 'UUID',
{'X-Container-Write': 'test:tester3'},
None, 'tester3', 'tester3', None, 403),
('GET',
None,
None, 'UUID', 'UUID',
{'X-Container-Write': 'test:%(tester3_id)s'},
None, 'tester3', 'tester3', None, 403),
('GET',
None,
None, 'UUID', 'UUID',
{'X-Container-Write': '%(test_id)s:tester3'},
None, 'tester3', 'tester3', None, 403),
('GET',
None,
None, 'UUID', 'UUID',
{'X-Container-Write': '%(test_id)s:%(tester3_id)s'},
None, 'tester3', 'tester3', None, 403),
('GET',
None,
None, 'UUID', 'UUID',
{'X-Container-Write': 'test:tester2'},
None, 'tester3', 'tester3', None, 403),
('GET',
None,
None, 'UUID', 'UUID',
{'X-Container-Write': 'test:*'},
None, 'tester3', 'tester3', None, 403),
('GET',
None,
None, 'UUID', 'UUID',
{'X-Container-Write': 'test'},
None, 'tester3', 'tester3', None, 403),
('GET',
None,
None, 'UUID', 'UUID',
{'X-Container-Write': '%(test_id)s'},
None, 'tester3', 'tester3', None, 403),
('GET',
None,
None, 'UUID', 'UUID',
{'X-Container-Write': 'test2:tester3'},
None, 'tester3', 'tester3', None, 403),
('GET',
None,
None, 'UUID', 'UUID',
{'X-Container-Write': 'test2:tester2'},
None, 'tester3', 'tester3', None, 403),
('GET',
None,
None, 'UUID', 'UUID',
{'X-Container-Write': 'test2:*'},
None, 'tester3', 'tester3', None, 403),
('GET',
None,
None, 'UUID', 'UUID',
{'X-Container-Write': 'test2'},
None, 'tester3', 'tester3', None, 403),
('GET',
None,
None, 'UUID', 'UUID',
{'X-Container-Write': '*:tester3'},
None, 'tester3', 'tester3', None, 403),
('GET',
None,
None, 'UUID', 'UUID',
{'X-Container-Write': '*:tester2'},
None, 'tester3', 'tester3', None, 403),
('GET',
None,
None, 'UUID', 'UUID',
{'X-Container-Write': '*:*'},
None, 'tester3', 'tester3', None, 403),
('GET',
None,
None, 'UUID', 'UUID',
{'X-Container-Write': '*'},
None, 'tester3', 'tester3', None, 403),
('GET',
None,
None, 'UUID', 'UUID',
None,
None, 'tester3', 'tester3', None, 403)
]
# A scenario of head for container ACL
ACL_HEAD = [
('HEAD',
None,
None, 'UUID', None,
{'X-Container-Read': 'test:tester3'},
None, 'tester3', 'tester3', None, 204),
('HEAD',
None,
None, 'UUID', None,
{'X-Container-Read': 'test:%(tester3_id)s'},
None, 'tester3', 'tester3', None, 204),
('HEAD',
None,
None, 'UUID', None,
{'X-Container-Read': '%(test_id)s:tester3'},
None, 'tester3', 'tester3', None, 204),
('HEAD',
None,
None, 'UUID', None,
{'X-Container-Read': '%(test_id)s:%(tester3_id)s'},
None, 'tester3', 'tester3', None, 204),
('HEAD',
None,
None, 'UUID', None,
{'X-Container-Read': 'test:tester2'},
None, 'tester3', 'tester3', None, 403),
('HEAD',
None,
None, 'UUID', None,
{'X-Container-Read': 'test:*'},
None, 'tester3', 'tester3', None, 204),
('HEAD',
None,
None, 'UUID', None,
{'X-Container-Read': 'test'},
None, 'tester3', 'tester3', None, 403),
('HEAD',
None,
None, 'UUID', None,
{'X-Container-Read': '%(test_id)s'},
None, 'tester3', 'tester3', None, 403),
('HEAD',
None,
None, 'UUID', None,
{'X-Container-Read': 'test2:tester3'},
None, 'tester3', 'tester3', None, 403),
('HEAD',
None,
None, 'UUID', None,
{'X-Container-Read': 'test2:tester2'},
None, 'tester3', 'tester3', None, 403),
('HEAD',
None,
None, 'UUID', None,
{'X-Container-Read': 'test2:*'},
None, 'tester3', 'tester3', None, 403),
('HEAD',
None,
None, 'UUID', None,
{'X-Container-Read': 'test2'},
None, 'tester3', 'tester3', None, 403),
('HEAD',
None,
None, 'UUID', None,
{'X-Container-Read': '*:tester3'},
None, 'tester3', 'tester3', None, 204),
('HEAD',
None,
None, 'UUID', None,
{'X-Container-Read': '*:tester2'},
None, 'tester3', 'tester3', None, 403),
('HEAD',
None,
None, 'UUID', None,
{'X-Container-Read': '*:*'},
None, 'tester3', 'tester3', None, 204),
('HEAD',
None,
None, 'UUID', None,
{'X-Container-Read': '*'},
None, 'tester3', 'tester3', None, 403),
('HEAD',
None,
None, 'UUID', None,
{'X-Container-Read': '.r:*'},
None, 'tester3', 'tester3', None, 403),
('HEAD',
None,
None, 'UUID', None,
{'X-Container-Read': '.r:*,.rlistings'},
None, 'tester3', 'tester3', None, 204),
('HEAD',
None,
None, 'UUID', None,
{'X-Container-Read': '.r:invalid.domain.com'},
None, 'tester3', 'tester3', None, 403),
('HEAD',
None,
None, 'UUID', None,
{'X-Container-Read': '.r:invalid.domain.com,.rlistings'},
None, 'tester3', 'tester3', None, 403),
('HEAD',
None,
None, 'UUID', None,
{'X-Container-Read': '.rlistings'},
None, 'tester3', 'tester3', None, 403),
('HEAD',
None,
None, 'UUID', None,
{'X-Container-Write': 'test:tester3'},
None, 'tester3', 'tester3', None, 403),
('HEAD',
None,
None, 'UUID', None,
{'X-Container-Write': 'test:%(tester3_id)s'},
None, 'tester3', 'tester3', None, 403),
('HEAD',
None,
None, 'UUID', None,
{'X-Container-Write': '%(test_id)s:tester3'},
None, 'tester3', 'tester3', None, 403),
('HEAD',
None,
None, 'UUID', None,
{'X-Container-Write': '%(test_id)s:%(tester3_id)s'},
None, 'tester3', 'tester3', None, 403),
('HEAD',
None,
None, 'UUID', None,
{'X-Container-Write': 'test:tester2'},
None, 'tester3', 'tester3', None, 403),
('HEAD',
None,
None, 'UUID', None,
{'X-Container-Write': 'test:*'},
None, 'tester3', 'tester3', None, 403),
('HEAD',
None,
None, 'UUID', None,
{'X-Container-Write': 'test'},
None, 'tester3', 'tester3', None, 403),
('HEAD',
None,
None, 'UUID', None,
{'X-Container-Write': '%(test_id)s'},
None, 'tester3', 'tester3', None, 403),
('HEAD',
None,
None, 'UUID', None,
{'X-Container-Write': 'test2:tester3'},
None, 'tester3', 'tester3', None, 403),
('HEAD',
None,
None, 'UUID', None,
{'X-Container-Write': 'test2:tester2'},
None, 'tester3', 'tester3', None, 403),
('HEAD',
None,
None, 'UUID', None,
{'X-Container-Write': 'test2:*'},
None, 'tester3', 'tester3', None, 403),
('HEAD',
None,
None, 'UUID', None,
{'X-Container-Write': 'test2'},
None, 'tester3', 'tester3', None, 403),
('HEAD',
None,
None, 'UUID', None,
{'X-Container-Write': '*:tester3'},
None, 'tester3', 'tester3', None, 403),
('HEAD',
None,
None, 'UUID', None,
{'X-Container-Write': '*:tester2'},
None, 'tester3', 'tester3', None, 403),
('HEAD',
None,
None, 'UUID', None,
{'X-Container-Write': '*:*'},
None, 'tester3', 'tester3', None, 403),
('HEAD',
None,
None, 'UUID', None,
{'X-Container-Write': '*'},
None, 'tester3', 'tester3', None, 403),
('HEAD',
None,
None, 'UUID', None,
None,
None, 'tester3', 'tester3', None, 403),
('HEAD',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': 'test:tester3'},
None, 'tester3', 'tester3', None, 200),
('HEAD',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': 'test:%(tester3_id)s'},
None, 'tester3', 'tester3', None, 200),
('HEAD',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': '%(test_id)s:tester3'},
None, 'tester3', 'tester3', None, 200),
('HEAD',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': '%(test_id)s:%(tester3_id)s'},
None, 'tester3', 'tester3', None, 200),
('HEAD',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': 'test:tester2'},
None, 'tester3', 'tester3', None, 403),
('HEAD',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': 'test:*'},
None, 'tester3', 'tester3', None, 200),
('HEAD',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': 'test'},
None, 'tester3', 'tester3', None, 403),
('HEAD',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': '%(test_id)s'},
None, 'tester3', 'tester3', None, 403),
('HEAD',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': 'test2:tester3'},
None, 'tester3', 'tester3', None, 403),
('HEAD',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': 'test2:tester2'},
None, 'tester3', 'tester3', None, 403),
('HEAD',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': 'test2:*'},
None, 'tester3', 'tester3', None, 403),
('HEAD',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': 'test2'},
None, 'tester3', 'tester3', None, 403),
('HEAD',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': '*:tester3'},
None, 'tester3', 'tester3', None, 200),
('HEAD',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': '*:tester2'},
None, 'tester3', 'tester3', None, 403),
('HEAD',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': '*:*'},
None, 'tester3', 'tester3', None, 200),
('HEAD',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': '*'},
None, 'tester3', 'tester3', None, 403),
('HEAD',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': '.r:*'},
None, 'tester3', 'tester3', None, 200),
('HEAD',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': '.r:*,.rlistings'},
None, 'tester3', 'tester3', None, 200),
('HEAD',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': '.r:invalid.domain.com'},
None, 'tester3', 'tester3', None, 403),
('HEAD',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': '.r:invalid.domain.com,.rlistings'},
None, 'tester3', 'tester3', None, 403),
('HEAD',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': '.rlistings'},
None, 'tester3', 'tester3', None, 403),
('HEAD',
None,
None, 'UUID', 'UUID',
{'X-Container-Write': 'test:tester3'},
None, 'tester3', 'tester3', None, 403),
('HEAD',
None,
None, 'UUID', 'UUID',
{'X-Container-Write': 'test:%(tester3_id)s'},
None, 'tester3', 'tester3', None, 403),
('HEAD',
None,
None, 'UUID', 'UUID',
{'X-Container-Write': '%(test_id)s:tester3'},
None, 'tester3', 'tester3', None, 403),
('HEAD',
None,
None, 'UUID', 'UUID',
{'X-Container-Write': '%(test_id)s:%(tester3_id)s'},
None, 'tester3', 'tester3', None, 403),
('HEAD',
None,
None, 'UUID', 'UUID',
{'X-Container-Write': 'test:tester2'},
None, 'tester3', 'tester3', None, 403),
('HEAD',
None,
None, 'UUID', 'UUID',
{'X-Container-Write': 'test:*'},
None, 'tester3', 'tester3', None, 403),
('HEAD',
None,
None, 'UUID', 'UUID',
{'X-Container-Write': 'test'},
None, 'tester3', 'tester3', None, 403),
('HEAD',
None,
None, 'UUID', 'UUID',
{'X-Container-Write': '%(test_id)s'},
None, 'tester3', 'tester3', None, 403),
('HEAD',
None,
None, 'UUID', 'UUID',
{'X-Container-Write': 'test2:tester3'},
None, 'tester3', 'tester3', None, 403),
('HEAD',
None,
None, 'UUID', 'UUID',
{'X-Container-Write': 'test2:tester2'},
None, 'tester3', 'tester3', None, 403),
('HEAD',
None,
None, 'UUID', 'UUID',
{'X-Container-Write': 'test2:*'},
None, 'tester3', 'tester3', None, 403),
('HEAD',
None,
None, 'UUID', 'UUID',
{'X-Container-Write': 'test2'},
None, 'tester3', 'tester3', None, 403),
('HEAD',
None,
None, 'UUID', 'UUID',
{'X-Container-Write': '*:tester3'},
None, 'tester3', 'tester3', None, 403),
('HEAD',
None,
None, 'UUID', 'UUID',
{'X-Container-Write': '*:tester2'},
None, 'tester3', 'tester3', None, 403),
('HEAD',
None,
None, 'UUID', 'UUID',
{'X-Container-Write': '*:*'},
None, 'tester3', 'tester3', None, 403),
('HEAD',
None,
None, 'UUID', 'UUID',
{'X-Container-Write': '*'},
None, 'tester3', 'tester3', None, 403),
('HEAD',
None,
None, 'UUID', 'UUID',
None,
None, 'tester3', 'tester3', None, 403)
]
# A scenario of post for container ACL
ACL_POST = [
('POST',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': 'test:tester3'},
None, 'tester3', 'tester3', None, 403),
('POST',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': 'test:%(tester3_id)s'},
None, 'tester3', 'tester3', None, 403),
('POST',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': '%(test_id)s:tester3'},
None, 'tester3', 'tester3', None, 403),
('POST',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': '%(test_id)s:%(tester3_id)s'},
None, 'tester3', 'tester3', None, 403),
('POST',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': 'test:tester2'},
None, 'tester3', 'tester3', None, 403),
('POST',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': 'test:*'},
None, 'tester3', 'tester3', None, 403),
('POST',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': 'test'},
None, 'tester3', 'tester3', None, 403),
('POST',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': '%(test_id)s'},
None, 'tester3', 'tester3', None, 403),
('POST',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': 'test2:tester3'},
None, 'tester3', 'tester3', None, 403),
('POST',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': 'test2:tester2'},
None, 'tester3', 'tester3', None, 403),
('POST',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': 'test2:*'},
None, 'tester3', 'tester3', None, 403),
('POST',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': 'test2'},
None, 'tester3', 'tester3', None, 403),
('POST',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': '*:tester3'},
None, 'tester3', 'tester3', None, 403),
('POST',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': '*:tester2'},
None, 'tester3', 'tester3', None, 403),
('POST',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': '*:*'},
None, 'tester3', 'tester3', None, 403),
('POST',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': '*'},
None, 'tester3', 'tester3', None, 403),
('POST',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': '.r:*'},
None, 'tester3', 'tester3', None, 403),
('POST',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': '.r:*,.rlistings'},
None, 'tester3', 'tester3', None, 403),
('POST',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': '.r:invalid.domain.com'},
None, 'tester3', 'tester3', None, 403),
('POST',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': '.r:invalid.domain.com,.rlistings'},
None, 'tester3', 'tester3', None, 403),
('POST',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': '.rlistings'},
None, 'tester3', 'tester3', None, 403),
('POST',
None,
None, 'UUID', 'UUID',
{'X-Container-Write': 'test:tester3'},
None, 'tester3', 'tester3', None, 202),
('POST',
None,
None, 'UUID', 'UUID',
{'X-Container-Write': 'test:%(tester3_id)s'},
None, 'tester3', 'tester3', None, 202),
('POST',
None,
None, 'UUID', 'UUID',
{'X-Container-Write': '%(test_id)s:tester3'},
None, 'tester3', 'tester3', None, 202),
('POST',
None,
None, 'UUID', 'UUID',
{'X-Container-Write': '%(test_id)s:%(tester3_id)s'},
None, 'tester3', 'tester3', None, 202),
('POST',
None,
None, 'UUID', 'UUID',
{'X-Container-Write': 'test:tester2'},
None, 'tester3', 'tester3', None, 403),
('POST',
None,
None, 'UUID', 'UUID',
{'X-Container-Write': 'test:*'},
None, 'tester3', 'tester3', None, 202),
('POST',
None,
None, 'UUID', 'UUID',
{'X-Container-Write': 'test'},
None, 'tester3', 'tester3', None, 403),
('POST',
None,
None, 'UUID', 'UUID',
{'X-Container-Write': '%(test_id)s'},
None, 'tester3', 'tester3', None, 403),
('POST',
None,
None, 'UUID', 'UUID',
{'X-Container-Write': 'test2:tester3'},
None, 'tester3', 'tester3', None, 403),
('POST',
None,
None, 'UUID', 'UUID',
{'X-Container-Write': 'test2:tester2'},
None, 'tester3', 'tester3', None, 403),
('POST',
None,
None, 'UUID', 'UUID',
{'X-Container-Write': 'test2:*'},
None, 'tester3', 'tester3', None, 403),
('POST',
None,
None, 'UUID', 'UUID',
{'X-Container-Write': 'test2'},
None, 'tester3', 'tester3', None, 403),
('POST',
None,
None, 'UUID', 'UUID',
{'X-Container-Write': '*:tester3'},
None, 'tester3', 'tester3', None, 202),
('POST',
None,
None, 'UUID', 'UUID',
{'X-Container-Write': '*:tester2'},
None, 'tester3', 'tester3', None, 403),
('POST',
None,
None, 'UUID', 'UUID',
{'X-Container-Write': '*:*'},
None, 'tester3', 'tester3', None, 202),
('POST',
None,
None, 'UUID', 'UUID',
{'X-Container-Write': '*'},
None, 'tester3', 'tester3', None, 403),
('POST',
None,
None, 'UUID', 'UUID',
None,
None, 'tester3', 'tester3', None, 403),
('POST',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': 'test',
'X-Container-Write': 'test:tester3'},
None, 'tester3', 'tester3', None, 202),
('POST',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': 'test:tester3',
'X-Container-Write': 'test:tester3'},
None, 'tester3', 'tester3', None, 202),
('POST',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': 'test:tester2',
'X-Container-Write': 'test:tester3'},
None, 'tester3', 'tester3', None, 202),
('POST',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': 'test:%(tester3_id)s',
'X-Container-Write': 'test:tester3'},
None, 'tester3', 'tester3', None, 202),
('POST',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': 'test:*',
'X-Container-Write': 'test:tester3'},
None, 'tester3', 'tester3', None, 202),
('POST',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': 'test2',
'X-Container-Write': 'test:tester3'},
None, 'tester3', 'tester3', None, 202),
('POST',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': 'test2:tester3',
'X-Container-Write': 'test:tester3'},
None, 'tester3', 'tester3', None, 202),
('POST',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': 'test2:tester2',
'X-Container-Write': 'test:tester3'},
None, 'tester3', 'tester3', None, 202),
('POST',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': 'test2:*',
'X-Container-Write': 'test:tester3'},
None, 'tester3', 'tester3', None, 202),
('POST',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': '%(test_id)s',
'X-Container-Write': 'test:tester3'},
None, 'tester3', 'tester3', None, 202),
('POST',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': '%(test_id)s:tester3',
'X-Container-Write': 'test:tester3'},
None, 'tester3', 'tester3', None, 202),
('POST',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': '%(test_id)s:%(tester3_id)s',
'X-Container-Write': 'test:tester3'},
None, 'tester3', 'tester3', None, 202),
('POST',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': '*',
'X-Container-Write': 'test:tester3'},
None, 'tester3', 'tester3', None, 202),
('POST',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': '*:tester3',
'X-Container-Write': 'test:tester3'},
None, 'tester3', 'tester3', None, 202),
('POST',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': '*:tester2',
'X-Container-Write': 'test:tester3'},
None, 'tester3', 'tester3', None, 202),
('POST',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': '*:*',
'X-Container-Write': 'test:tester3'},
None, 'tester3', 'tester3', None, 202),
('POST',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': '.rlistings',
'X-Container-Write': 'test:tester3'},
None, 'tester3', 'tester3', None, 202),
('POST',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': '.r:*',
'X-Container-Write': 'test:tester3'},
None, 'tester3', 'tester3', None, 202),
('POST',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': '.r:*,.rlistings',
'X-Container-Write': 'test:tester3'},
None, 'tester3', 'tester3', None, 202),
('POST',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': '.r:invalid.domain.com',
'X-Container-Write': 'test:tester3'},
None, 'tester3', 'tester3', None, 202),
('POST',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': '.r:invalid.domain.com,.rlistings',
'X-Container-Write': 'test:tester3'},
None, 'tester3', 'tester3', None, 202)
]
# A scenario of options for container ACL
ACL_OPTIONS = [
('OPTIONS',
None,
None, 'UUID', None,
{'X-Container-Read': 'test:tester3'},
None, 'tester3', 'tester3', None, 200),
('OPTIONS',
None,
None, 'UUID', None,
{'X-Container-Read': 'test:%(tester3_id)s'},
None, 'tester3', 'tester3', None, 200),
('OPTIONS',
None,
None, 'UUID', None,
{'X-Container-Read': '%(test_id)s:tester3'},
None, 'tester3', 'tester3', None, 200),
('OPTIONS',
None,
None, 'UUID', None,
{'X-Container-Read': '%(test_id)s:%(tester3_id)s'},
None, 'tester3', 'tester3', None, 200),
('OPTIONS',
None,
None, 'UUID', None,
{'X-Container-Read': 'test:tester2'},
None, 'tester3', 'tester3', None, 200),
('OPTIONS',
None,
None, 'UUID', None,
{'X-Container-Read': 'test:*'},
None, 'tester3', 'tester3', None, 200),
('OPTIONS',
None,
None, 'UUID', None,
{'X-Container-Read': 'test'},
None, 'tester3', 'tester3', None, 200),
('OPTIONS',
None,
None, 'UUID', None,
{'X-Container-Read': '%(test_id)s'},
None, 'tester3', 'tester3', None, 200),
('OPTIONS',
None,
None, 'UUID', None,
{'X-Container-Read': 'test2:tester3'},
None, 'tester3', 'tester3', None, 200),
('OPTIONS',
None,
None, 'UUID', None,
{'X-Container-Read': 'test2:tester2'},
None, 'tester3', 'tester3', None, 200),
('OPTIONS',
None,
None, 'UUID', None,
{'X-Container-Read': 'test2:*'},
None, 'tester3', 'tester3', None, 200),
('OPTIONS',
None,
None, 'UUID', None,
{'X-Container-Read': 'test2'},
None, 'tester3', 'tester3', None, 200),
('OPTIONS',
None,
None, 'UUID', None,
{'X-Container-Read': '*:tester3'},
None, 'tester3', 'tester3', None, 200),
('OPTIONS',
None,
None, 'UUID', None,
{'X-Container-Read': '*:tester2'},
None, 'tester3', 'tester3', None, 200),
('OPTIONS',
None,
None, 'UUID', None,
{'X-Container-Read': '*:*'},
None, 'tester3', 'tester3', None, 200),
('OPTIONS',
None,
None, 'UUID', None,
{'X-Container-Read': '*'},
None, 'tester3', 'tester3', None, 200),
('OPTIONS',
None,
None, 'UUID', None,
{'X-Container-Read': '.r:*'},
None, 'tester3', 'tester3', None, 200),
('OPTIONS',
None,
None, 'UUID', None,
{'X-Container-Read': '.r:*,.rlistings'},
None, 'tester3', 'tester3', None, 200),
('OPTIONS',
None,
None, 'UUID', None,
{'X-Container-Read': '.r:invalid.domain.com'},
None, 'tester3', 'tester3', None, 200),
('OPTIONS',
None,
None, 'UUID', None,
{'X-Container-Read': '.r:invalid.domain.com,.rlistings'},
None, 'tester3', 'tester3', None, 200),
('OPTIONS',
None,
None, 'UUID', None,
{'X-Container-Read': '.rlistings'},
None, 'tester3', 'tester3', None, 200),
('OPTIONS',
None,
None, 'UUID', None,
{'X-Container-Write': 'test:tester3'},
None, 'tester3', 'tester3', None, 200),
('OPTIONS',
None,
None, 'UUID', None,
{'X-Container-Write': 'test:%(tester3_id)s'},
None, 'tester3', 'tester3', None, 200),
('OPTIONS',
None,
None, 'UUID', None,
{'X-Container-Write': '%(test_id)s:tester3'},
None, 'tester3', 'tester3', None, 200),
('OPTIONS',
None,
None, 'UUID', None,
{'X-Container-Write': '%(test_id)s:%(tester3_id)s'},
None, 'tester3', 'tester3', None, 200),
('OPTIONS',
None,
None, 'UUID', None,
{'X-Container-Write': 'test:tester2'},
None, 'tester3', 'tester3', None, 200),
('OPTIONS',
None,
None, 'UUID', None,
{'X-Container-Write': 'test:*'},
None, 'tester3', 'tester3', None, 200),
('OPTIONS',
None,
None, 'UUID', None,
{'X-Container-Write': 'test'},
None, 'tester3', 'tester3', None, 200),
('OPTIONS',
None,
None, 'UUID', None,
{'X-Container-Write': '%(test_id)s'},
None, 'tester3', 'tester3', None, 200),
('OPTIONS',
None,
None, 'UUID', None,
{'X-Container-Write': 'test2:tester3'},
None, 'tester3', 'tester3', None, 200),
('OPTIONS',
None,
None, 'UUID', None,
{'X-Container-Write': 'test2:tester2'},
None, 'tester3', 'tester3', None, 200),
('OPTIONS',
None,
None, 'UUID', None,
{'X-Container-Write': 'test2:*'},
None, 'tester3', 'tester3', None, 200),
('OPTIONS',
None,
None, 'UUID', None,
{'X-Container-Write': 'test2'},
None, 'tester3', 'tester3', None, 200),
('OPTIONS',
None,
None, 'UUID', None,
{'X-Container-Write': '*:tester3'},
None, 'tester3', 'tester3', None, 200),
('OPTIONS',
None,
None, 'UUID', None,
{'X-Container-Write': '*:tester2'},
None, 'tester3', 'tester3', None, 200),
('OPTIONS',
None,
None, 'UUID', None,
{'X-Container-Write': '*:*'},
None, 'tester3', 'tester3', None, 200),
('OPTIONS',
None,
None, 'UUID', None,
{'X-Container-Write': '*'},
None, 'tester3', 'tester3', None, 200),
('OPTIONS',
None,
None, 'UUID', None,
None,
None, 'tester3', 'tester3', None, 200),
('OPTIONS',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': 'test:tester3'},
None, 'tester3', 'tester3', None, 200),
('OPTIONS',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': 'test:%(tester3_id)s'},
None, 'tester3', 'tester3', None, 200),
('OPTIONS',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': '%(test_id)s:tester3'},
None, 'tester3', 'tester3', None, 200),
('OPTIONS',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': '%(test_id)s:%(tester3_id)s'},
None, 'tester3', 'tester3', None, 200),
('OPTIONS',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': 'test:tester2'},
None, 'tester3', 'tester3', None, 200),
('OPTIONS',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': 'test:*'},
None, 'tester3', 'tester3', None, 200),
('OPTIONS',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': 'test'},
None, 'tester3', 'tester3', None, 200),
('OPTIONS',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': '%(test_id)s'},
None, 'tester3', 'tester3', None, 200),
('OPTIONS',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': 'test2:tester3'},
None, 'tester3', 'tester3', None, 200),
('OPTIONS',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': 'test2:tester2'},
None, 'tester3', 'tester3', None, 200),
('OPTIONS',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': 'test2:*'},
None, 'tester3', 'tester3', None, 200),
('OPTIONS',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': 'test2'},
None, 'tester3', 'tester3', None, 200),
('OPTIONS',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': '*:tester3'},
None, 'tester3', 'tester3', None, 200),
('OPTIONS',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': '*:tester2'},
None, 'tester3', 'tester3', None, 200),
('OPTIONS',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': '*:*'},
None, 'tester3', 'tester3', None, 200),
('OPTIONS',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': '*'},
None, 'tester3', 'tester3', None, 200),
('OPTIONS',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': '.r:*'},
None, 'tester3', 'tester3', None, 200),
('OPTIONS',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': '.r:*,.rlistings'},
None, 'tester3', 'tester3', None, 200),
('OPTIONS',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': '.r:invalid.domain.com'},
None, 'tester3', 'tester3', None, 200),
('OPTIONS',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': '.r:invalid.domain.com,.rlistings'},
None, 'tester3', 'tester3', None, 200),
('OPTIONS',
None,
None, 'UUID', 'UUID',
{'X-Container-Read': '.rlistings'},
None, 'tester3', 'tester3', None, 200),
('OPTIONS',
None,
None, 'UUID', 'UUID',
{'X-Container-Write': 'test:tester3'},
None, 'tester3', 'tester3', None, 200),
('OPTIONS',
None,
None, 'UUID', 'UUID',
{'X-Container-Write': 'test:%(tester3_id)s'},
None, 'tester3', 'tester3', None, 200),
('OPTIONS',
None,
None, 'UUID', 'UUID',
{'X-Container-Write': '%(test_id)s:tester3'},
None, 'tester3', 'tester3', None, 200),
('OPTIONS',
None,
None, 'UUID', 'UUID',
{'X-Container-Write': '%(test_id)s:%(tester3_id)s'},
None, 'tester3', 'tester3', None, 200),
('OPTIONS',
None,
None, 'UUID', 'UUID',
{'X-Container-Write': 'test:tester2'},
None, 'tester3', 'tester3', None, 200),
('OPTIONS',
None,
None, 'UUID', 'UUID',
{'X-Container-Write': 'test:*'},
None, 'tester3', 'tester3', None, 200),
('OPTIONS',
None,
None, 'UUID', 'UUID',
{'X-Container-Write': 'test'},
None, 'tester3', 'tester3', None, 200),
('OPTIONS',
None,
None, 'UUID', 'UUID',
{'X-Container-Write': '%(test_id)s'},
None, 'tester3', 'tester3', None, 200),
('OPTIONS',
None,
None, 'UUID', 'UUID',
{'X-Container-Write': 'test2:tester3'},
None, 'tester3', 'tester3', None, 200),
('OPTIONS',
None,
None, 'UUID', 'UUID',
{'X-Container-Write': 'test2:tester2'},
None, 'tester3', 'tester3', None, 200),
('OPTIONS',
None,
None, 'UUID', 'UUID',
{'X-Container-Write': 'test2:*'},
None, 'tester3', 'tester3', None, 200),
('OPTIONS',
None,
None, 'UUID', 'UUID',
{'X-Container-Write': 'test2'},
None, 'tester3', 'tester3', None, 200),
('OPTIONS',
None,
None, 'UUID', 'UUID',
{'X-Container-Write': '*:tester3'},
None, 'tester3', 'tester3', None, 200),
('OPTIONS',
None,
None, 'UUID', 'UUID',
{'X-Container-Write': '*:tester2'},
None, 'tester3', 'tester3', None, 200),
('OPTIONS',
None,
None, 'UUID', 'UUID',
{'X-Container-Write': '*:*'},
None, 'tester3', 'tester3', None, 200),
('OPTIONS',
None,
None, 'UUID', 'UUID',
{'X-Container-Write': '*'},
None, 'tester3', 'tester3', None, 200),
('OPTIONS',
None,
None, 'UUID', 'UUID',
None,
None, 'tester3', 'tester3', None, 200)
]
# http_method : HTTP methods such as PUT, GET, POST, HEAD and so on
# auth_user_name : a user name which is used for getting a token for
# expected : expected status code
TEST_CASE_INFO_FORMAT = ('http_method', 'auth_user_name', 'expected')
RBAC_INFO_GET = [
('GET', 'tester', 200),
('GET', 'tester6', 200),
('GET', 'tester3', 200),
('GET', None, 200)
]
RBAC_INFO_HEAD = [
('HEAD', 'tester', 200),
('HEAD', 'tester6', 200),
('HEAD', 'tester3', 200),
('HEAD', None, 200)
]
RBAC_INFO_OPTIONS = [
('OPTIONS', 'tester', 200),
('OPTIONS', 'tester6', 200),
('OPTIONS', 'tester3', 200),
('OPTIONS', None, 200)
]
RBAC_INFO_GET_WITH_SERVICE_PREFIX = [
('GET', 'tester5', 200)
]
RBAC_INFO_HEAD_WITH_SERVICE_PREFIX = [
('HEAD', 'tester5', 200)
]
RBAC_INFO_OPTIONS_WITH_SERVICE_PREFIX = [
('OPTIONS', 'tester5', 200)
]
class BaseClient(object):
def __init__(self):
self._set_users()
self.auth_url = tf.swift_test_auth
self.insecure = tf.insecure
self.auth_version = tf.swift_test_auth_version
def _set_users(self):
self.users = {}
for index in range(6):
self.users[tf.swift_test_user[index]] = {
'account': tf.swift_test_tenant[index],
'password': tf.swift_test_key[index],
'domain': tf.swift_test_domain[index]}
class KeystoneClient(BaseClient):
def get_id_info(self):
id_info = {}
for user_name, user_info in self.users.iteritems():
if user_name != '':
user_id, project_id = self._get_id(user_name)
id_info[user_name + '_id'] = user_id
id_info[user_info['account'] + '_id'] = project_id
return id_info
def _get_id(self, user_name):
info = self.users.get(user_name)
keystone_client = client.Client(
auth_url=self.auth_url,
version=(self.auth_version,),
username=user_name,
password=info['password'],
project_name=info['account'],
project_domain_name=info['domain'],
user_domain_name=info['domain'])
return keystone_client.user_id, keystone_client.project_id
class SwiftClient(BaseClient):
_tokens = {}
def _get_auth(self, user_name):
info = self.users.get(user_name)
if info is None:
return None, None
os_options = {'user_domain_name': info['domain'],
'project_domain_name': info['domain']}
authargs = dict(snet=False, tenant_name=info['account'],
auth_version=self.auth_version, os_options=os_options,
insecure=self.insecure)
storage_url, token = get_auth(
self.auth_url, user_name, info['password'], **authargs)
return storage_url, token
def auth(self, user_name):
storage_url, token = SwiftClient._tokens.get(user_name, (None, None))
if not token:
SwiftClient._tokens[user_name] = self._get_auth(user_name)
storage_url, token = SwiftClient._tokens.get(user_name)
return storage_url, token
def send_request(self, method, url, token=None, headers=None,
service_token=None):
headers = {} if headers is None else headers.copy()
headers.update({'Content-Type': 'application/json',
'Accept': 'application/json'})
if token:
headers['X-Auth-Token'] = token
if service_token:
headers['X-Service-Token'] = service_token
if self.insecure:
parsed, conn = http_connection(url, insecure=self.insecure)
else:
parsed, conn = http_connection(url)
conn.request(method, parsed.path, headers=headers)
resp = conn.getresponse()
return resp
class BaseTestAC(unittest.TestCase):
def setUp(self):
self.reseller_admin = tf.swift_test_user[5]
self.client = SwiftClient()
def _create_resource_url(self, storage_url, account=None,
container=None, obj=None, reseller_prefix=None):
# e.g.
# storage_url = 'http://localhost/v1/AUTH_xxx'
# storage_url_list[:-1] is ['http:', '', 'localhost', 'v1']
# storage_url_list[-1] is 'AUTH_xxx'
storage_url_list = storage_url.rstrip('/').split('/')
base_url = '/'.join(storage_url_list[:-1])
if account is None:
account = storage_url_list[-1]
if reseller_prefix == 'SERVICE':
# replace endpoint reseller prefix with service reseller prefix
i = (account.index('_') + 1) if '_' in account else 0
account = tf.swift_test_service_prefix + account[i:]
return '/'.join([part for part in (base_url, account, container, obj)
if part])
def _put_container(self, storage_url, token, test_case):
resource_url = self._create_resource_url(
storage_url,
test_case['account_name'],
test_case['container_name'],
reseller_prefix=test_case['reseller_prefix'])
self.created_resources.append(resource_url)
self.client.send_request('PUT', resource_url, token,
headers=test_case['prep_container_header'])
def _put_object(self, storage_url, token, test_case):
resource_url = self._create_resource_url(
storage_url,
test_case['account_name'],
test_case['container_name'],
test_case['object_name'],
reseller_prefix=test_case['reseller_prefix'])
self.created_resources.append(resource_url)
self.client.send_request('PUT', resource_url, token)
def _get_storage_url_and_token(self, storage_url_user, token_user):
storage_url, _junk = self.client.auth(storage_url_user)
_junk, token = self.client.auth(token_user)
return storage_url, token
def _prepare(self, test_case):
storage_url, reseller_token = self._get_storage_url_and_token(
test_case['target_user_name'], self.reseller_admin)
if test_case['http_method'] in ('GET', 'POST', 'DELETE', 'HEAD',
'OPTIONS'):
temp_test_case = test_case.copy()
if test_case['container_name'] is None:
# When the target is for account, dummy container will be
# created to create an account. This account is created by
# account_autocreate.
temp_test_case['container_name'] = uuid.uuid4().hex
self._put_container(storage_url, reseller_token, temp_test_case)
if test_case['object_name']:
self._put_object(storage_url, reseller_token, test_case)
elif test_case['http_method'] in ('PUT',):
if test_case['object_name']:
self._put_container(storage_url, reseller_token, test_case)
def _execute(self, test_case):
storage_url, token = self._get_storage_url_and_token(
test_case['target_user_name'], test_case['auth_user_name'])
service_user = test_case['service_user_name']
service_token = (None if service_user is None
else self.client.auth(service_user)[1])
resource_url = self._create_resource_url(
storage_url,
test_case['account_name'],
test_case['container_name'],
test_case['object_name'],
test_case['reseller_prefix'])
if test_case['http_method'] in ('PUT'):
self.created_resources.append(resource_url)
resp = self.client.send_request(test_case['http_method'],
resource_url,
token,
headers=test_case['header'],
service_token=service_token)
return resp.status
def _cleanup(self):
_junk, reseller_token = self.client.auth(self.reseller_admin)
for resource_url in reversed(self.created_resources):
resp = self.client.send_request('DELETE', resource_url,
reseller_token)
self.assertIn(resp.status, (204, 404))
def _convert_data(self, data):
test_case = dict(zip(TEST_CASE_FORMAT, data))
if test_case['container_name'] == 'UUID':
test_case['container_name'] = uuid.uuid4().hex
if test_case['object_name'] == 'UUID':
test_case['object_name'] = uuid.uuid4().hex
return test_case
def _run_scenario(self, scenario):
for data in scenario:
test_case = self._convert_data(data)
self.created_resources = []
try:
self._prepare(test_case)
result = self._execute(test_case)
self.assertEqual(test_case['expected'],
result,
'Expected %s but got %s for test case %s' %
(test_case['expected'], result, test_case))
finally:
self._cleanup()
class TestRBAC(BaseTestAC):
def test_rbac(self):
if any((tf.skip, tf.skip2, tf.skip3, tf.skip_if_not_v3,
tf.skip_if_no_reseller_admin)):
raise unittest.SkipTest
scenario_rbac = RBAC_PUT + RBAC_DELETE + RBAC_GET +\
RBAC_HEAD + RBAC_POST + RBAC_OPTIONS
shuffle(scenario_rbac)
self._run_scenario(scenario_rbac)
def test_rbac_with_service_prefix(self):
if any((tf.skip, tf.skip2, tf.skip3, tf.skip_if_not_v3,
tf.skip_service_tokens, tf.skip_if_no_reseller_admin)):
raise unittest.SkipTest
scenario_rbac = RBAC_PUT_WITH_SERVICE_PREFIX +\
RBAC_DELETE_WITH_SERVICE_PREFIX +\
RBAC_GET_WITH_SERVICE_PREFIX +\
RBAC_HEAD_WITH_SERVICE_PREFIX +\
RBAC_POST_WITH_SERVICE_PREFIX +\
RBAC_OPTIONS_WITH_SERVICE_PREFIX
shuffle(scenario_rbac)
self._run_scenario(scenario_rbac)
class TestRBACInfo(BaseTestAC):
def _get_info_url(self):
storage_url, _junk = self.client.auth(self.reseller_admin)
parsed_url = urlparse(storage_url)
info_url_parts = (
parsed_url.scheme, parsed_url.netloc, '/info', '', '', '')
return urlunparse(info_url_parts)
def _prepare(self, test_case):
pass
def _execute(self, test_case):
_junk, token = \
self.client.auth(test_case['auth_user_name'])
resp = self.client.send_request(test_case['http_method'],
self.info_url, token)
return resp.status
def _cleanup(self):
pass
def _convert_data(self, data):
test_case = dict(zip(TEST_CASE_INFO_FORMAT, data))
return test_case
def test_rbac_info(self):
if any((tf.skip, tf.skip2, tf.skip3, tf.skip_if_not_v3,
tf.skip_if_no_reseller_admin)):
raise unittest.SkipTest
self.info_url = self._get_info_url()
scenario_rbac_info = RBAC_INFO_GET + RBAC_INFO_HEAD + RBAC_INFO_OPTIONS
shuffle(scenario_rbac_info)
self._run_scenario(scenario_rbac_info)
def test_rbac_info_with_service_prefix(self):
if any((tf.skip, tf.skip2, tf.skip3, tf.skip_if_not_v3,
tf.skip_service_tokens, tf.skip_if_no_reseller_admin)):
raise unittest.SkipTest
self.info_url = self._get_info_url()
scenario_rbac_info = RBAC_INFO_GET_WITH_SERVICE_PREFIX +\
RBAC_INFO_HEAD_WITH_SERVICE_PREFIX +\
RBAC_INFO_OPTIONS_WITH_SERVICE_PREFIX
shuffle(scenario_rbac_info)
self._run_scenario(scenario_rbac_info)
class TestContainerACL(BaseTestAC):
def _convert_data(self, data):
test_case = super(TestContainerACL, self)._convert_data(data)
prep_container_header = test_case['prep_container_header']
if prep_container_header is not None:
for header, header_val in prep_container_header.iteritems():
prep_container_header[header] = header_val % self.id_info
return test_case
def test_container_acl(self):
if any((tf.skip, tf.skip2, tf.skip3, tf.skip_if_not_v3,
tf.skip_if_no_reseller_admin)):
raise unittest.SkipTest
self.id_info = KeystoneClient().get_id_info()
scenario_container_acl = ACL_PUT + ACL_DELETE + ACL_GET +\
ACL_HEAD + ACL_POST + ACL_OPTIONS
shuffle(scenario_container_acl)
self._run_scenario(scenario_container_acl)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
larroy/mxnet | cpp-package/scripts/OpWrapperGenerator.py | 4 | 17414 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# -*- coding: utf-8 -*-
# This is a python script that generates operator wrappers such as FullyConnected,
# based on current libmxnet.dll. This script is written so that we don't need to
# write new operator wrappers when new ones are added to the library.
from ctypes import *
from ctypes.util import find_library
import os
import logging
import platform
import re
import sys
import tempfile
import filecmp
import shutil
import codecs
def gen_enum_value(value):
return 'k' + value[0].upper() + value[1:]
class EnumType:
name = ''
enumValues = []
def __init__(self, typeName = 'ElementWiseOpType', \
typeString = "{'avg', 'max', 'sum'}"):
self.name = typeName
if (typeString[0] == '{'): # is a enum type
isEnum = True
# parse enum
self.enumValues = typeString[typeString.find('{') + 1:typeString.find('}')].split(',')
for i in range(0, len(self.enumValues)):
self.enumValues[i] = self.enumValues[i].strip().strip("'")
else:
logging.warn("trying to parse none-enum type as enum: %s" % typeString)
def GetDefinitionString(self, indent = 0):
indentStr = ' ' * indent
ret = indentStr + 'enum class %s {\n' % self.name
for i in range(0, len(self.enumValues)):
ret = ret + indentStr + ' %s = %d' % (gen_enum_value(self.enumValues[i]), i)
if (i != len(self.enumValues) -1):
ret = ret + ","
ret = ret + "\n"
ret = ret + "};\n"
return ret
def GetDefaultValueString(self, value = ''):
return self.name + "::" + gen_enum_value(value)
def GetEnumStringArray(self, indent = 0):
indentStr = ' ' * indent
ret = indentStr + 'static const char *%sValues[] = {\n' % self.name
for i in range(0, len(self.enumValues)):
ret = ret + indentStr + ' "%s"' % self.enumValues[i]
if (i != len(self.enumValues) -1):
ret = ret + ","
ret = ret + "\n"
ret = ret + indentStr + "};\n"
return ret
def GetConvertEnumVariableToString(self, variable=''):
return "%sValues[int(%s)]" % (self.name, variable)
class Arg:
typeDict = {'boolean':'bool',\
'boolean or None':'dmlc::optional<bool>',\
'Shape(tuple)':'Shape',\
'Symbol':'Symbol',\
'NDArray':'Symbol',\
'NDArray-or-Symbol':'Symbol',\
'Symbol[]':'const std::vector<Symbol>&',\
'Symbol or Symbol[]':'const std::vector<Symbol>&',\
'NDArray[]':'const std::vector<Symbol>&',\
'caffe-layer-parameter':'::caffe::LayerParameter',\
'NDArray-or-Symbol[]':'const std::vector<Symbol>&',\
'float':'mx_float',\
'real_t':'mx_float',\
'int':'int',\
'int (non-negative)': 'uint32_t',\
'long (non-negative)': 'uint64_t',\
'int or None':'dmlc::optional<int>',\
'float or None':'dmlc::optional<float>',\
'long':'int64_t',\
'double':'double',\
'double or None':'dmlc::optional<double>',\
'Shape or None':'dmlc::optional<Shape>',\
'string':'const std::string&',\
'tuple of <float>':'nnvm::Tuple<mx_float>'}
name = ''
type = ''
description = ''
isEnum = False
enum = None
hasDefault = False
defaultString = ''
def __init__(self, opName = '', argName = '', typeString = '', descString = ''):
self.name = argName
self.description = descString
if (typeString[0] == '{'): # is enum type
self.isEnum = True
self.enum = EnumType(self.ConstructEnumTypeName(opName, argName), typeString)
self.type = self.enum.name
else:
try:
self.type = self.typeDict[typeString.split(',')[0]]
except:
print('argument "%s" of operator "%s" has unknown type "%s"' % (argName, opName, typeString))
pass
if typeString.find('default=') != -1:
self.hasDefault = True
self.defaultString = typeString.split('default=')[1].strip().strip("'")
if typeString.startswith('string'):
self.defaultString = self.MakeCString(self.defaultString)
elif self.isEnum:
self.defaultString = self.enum.GetDefaultValueString(self.defaultString)
elif self.defaultString == 'None':
self.defaultString = self.type + '()'
elif self.type == "bool":
if self.defaultString == "1" or self.defaultString == "True":
self.defaultString = "true"
else:
self.defaultString = "false"
elif self.defaultString[0] == '(':
self.defaultString = 'Shape' + self.defaultString
elif self.defaultString[0] == '[':
self.defaultString = 'Shape(' + self.defaultString[1:-1] + ")"
elif self.type == 'dmlc::optional<int>':
self.defaultString = self.type + '(' + self.defaultString + ')'
elif self.type == 'dmlc::optional<bool>':
self.defaultString = self.type + '(' + self.defaultString + ')'
elif typeString.startswith('caffe-layer-parameter'):
self.defaultString = 'textToCaffeLayerParameter(' + self.MakeCString(self.defaultString) + ')'
hasCaffe = True
def MakeCString(self, str):
str = str.replace('\n', "\\n")
str = str.replace('\t', "\\t")
return '\"' + str + '\"'
def ConstructEnumTypeName(self, opName = '', argName = ''):
a = opName[0].upper()
# format ArgName so instead of act_type it returns ActType
argNameWords = argName.split('_')
argName = ''
for an in argNameWords:
argName = argName + an[0].upper() + an[1:]
typeName = a + opName[1:] + argName
return typeName
class Op:
name = ''
description = ''
args = []
def __init__(self, name = '', description = '', args = []):
self.name = name
self.description = description
# add a 'name' argument
nameArg = Arg(self.name, \
'symbol_name', \
'string', \
'name of the resulting symbol')
args.insert(0, nameArg)
# reorder arguments, put those with default value to the end
orderedArgs = []
for arg in args:
if not arg.hasDefault:
orderedArgs.append(arg)
for arg in args:
if arg.hasDefault:
orderedArgs.append(arg)
self.args = orderedArgs
def WrapDescription(self, desc = ''):
ret = []
sentences = desc.split('.')
lines = desc.split('\n')
for line in lines:
line = line.strip()
if len(line) <= 80:
ret.append(line.strip())
else:
while len(line) > 80:
pos = line.rfind(' ', 0, 80)+1
if pos <= 0:
pos = line.find(' ')
if pos < 0:
pos = len(line)
ret.append(line[:pos].strip())
line = line[pos:]
return ret
def GenDescription(self, desc = '', \
firstLineHead = ' * \\brief ', \
otherLineHead = ' * '):
ret = ''
descs = self.WrapDescription(desc)
ret = ret + firstLineHead
if len(descs) == 0:
return ret.rstrip()
ret = (ret + descs[0]).rstrip() + '\n'
for i in range(1, len(descs)):
ret = ret + (otherLineHead + descs[i]).rstrip() + '\n'
return ret
def GetOpDefinitionString(self, use_name, indent=0):
ret = ''
indentStr = ' ' * indent
# define enums if any
for arg in self.args:
if arg.isEnum and use_name:
# comments
ret = ret + self.GenDescription(arg.description, \
'/*! \\brief ', \
' * ')
ret = ret + " */\n"
# definition
ret = ret + arg.enum.GetDefinitionString(indent) + '\n'
# create function comments
ret = ret + self.GenDescription(self.description, \
'/*!\n * \\brief ', \
' * ')
for arg in self.args:
if arg.name != 'symbol_name' or use_name:
ret = ret + self.GenDescription(arg.name + ' ' + arg.description, \
' * \\param ', \
' * ')
ret = ret + " * \\return new symbol\n"
ret = ret + " */\n"
# create function header
declFirstLine = indentStr + 'inline Symbol %s(' % self.name
ret = ret + declFirstLine
argIndentStr = ' ' * len(declFirstLine)
arg_start = 0 if use_name else 1
if len(self.args) > arg_start:
ret = ret + self.GetArgString(self.args[arg_start])
for i in range(arg_start+1, len(self.args)):
ret = ret + ',\n'
ret = ret + argIndentStr + self.GetArgString(self.args[i])
ret = ret + ') {\n'
# create function body
# if there is enum, generate static enum<->string mapping
for arg in self.args:
if arg.isEnum:
ret = ret + arg.enum.GetEnumStringArray(indent + 2)
# now generate code
ret = ret + indentStr + ' return Operator(\"%s\")\n' % self.name
for arg in self.args: # set params
if arg.type == 'Symbol' or \
arg.type == 'const std::string&' or \
arg.type == 'const std::vector<Symbol>&':
continue
v = arg.name
if arg.isEnum:
v = arg.enum.GetConvertEnumVariableToString(v)
ret = ret + indentStr + ' ' * 11 + \
'.SetParam(\"%s\", %s)\n' % (arg.name, v)
#ret = ret[:-1] # get rid of the last \n
symbols = ''
inputAlreadySet = False
for arg in self.args: # set inputs
if arg.type != 'Symbol':
continue
inputAlreadySet = True
#if symbols != '':
# symbols = symbols + ', '
#symbols = symbols + arg.name
ret = ret + indentStr + ' ' * 11 + \
'.SetInput(\"%s\", %s)\n' % (arg.name, arg.name)
for arg in self.args: # set input arrays vector<Symbol>
if arg.type != 'const std::vector<Symbol>&':
continue
if (inputAlreadySet):
logging.error("op %s has both Symbol[] and Symbol inputs!" % self.name)
inputAlreadySet = True
symbols = arg.name
ret = ret + '(%s)\n' % symbols
ret = ret + indentStr + ' ' * 11
if use_name:
ret = ret + '.CreateSymbol(symbol_name);\n'
else:
ret = ret + '.CreateSymbol();\n'
ret = ret + indentStr + '}\n'
return ret
def GetArgString(self, arg):
ret = '%s %s' % (arg.type, arg.name)
if arg.hasDefault:
ret = ret + ' = ' + arg.defaultString
return ret
def ParseAllOps():
"""
MXNET_DLL int MXSymbolListAtomicSymbolCreators(mx_uint *out_size,
AtomicSymbolCreator **out_array);
MXNET_DLL int MXSymbolGetAtomicSymbolInfo(AtomicSymbolCreator creator,
const char **name,
const char **description,
mx_uint *num_args,
const char ***arg_names,
const char ***arg_type_infos,
const char ***arg_descriptions,
const char **key_var_num_args);
"""
cdll.libmxnet = cdll.LoadLibrary(sys.argv[1])
ListOP = cdll.libmxnet.MXSymbolListAtomicSymbolCreators
GetOpInfo = cdll.libmxnet.MXSymbolGetAtomicSymbolInfo
ListOP.argtypes=[POINTER(c_int), POINTER(POINTER(c_void_p))]
GetOpInfo.argtypes=[c_void_p, \
POINTER(c_char_p), \
POINTER(c_char_p), \
POINTER(c_int), \
POINTER(POINTER(c_char_p)), \
POINTER(POINTER(c_char_p)), \
POINTER(POINTER(c_char_p)), \
POINTER(c_char_p), \
POINTER(c_char_p)
]
nOps = c_int()
opHandlers = POINTER(c_void_p)()
r = ListOP(byref(nOps), byref(opHandlers))
ret = ''
ret2 = ''
for i in range(0, nOps.value):
handler = opHandlers[i]
name = c_char_p()
description = c_char_p()
nArgs = c_int()
argNames = POINTER(c_char_p)()
argTypes = POINTER(c_char_p)()
argDescs = POINTER(c_char_p)()
varArgName = c_char_p()
return_type = c_char_p()
GetOpInfo(handler, byref(name), byref(description), \
byref(nArgs), byref(argNames), byref(argTypes), \
byref(argDescs), byref(varArgName), byref(return_type))
if name.value.decode('utf-8').startswith('_'): # get rid of functions like __init__
continue
args = []
for i in range(0, nArgs.value):
arg = Arg(name.value.decode('utf-8'),
argNames[i].decode('utf-8'),
argTypes[i].decode('utf-8'),
argDescs[i].decode('utf-8'))
args.append(arg)
op = Op(name.value.decode('utf-8'), description.value.decode('utf-8'), args)
ret = ret + op.GetOpDefinitionString(True) + "\n"
ret2 = ret2 + op.GetOpDefinitionString(False) + "\n"
return ret + ret2
if __name__ == "__main__":
#et = EnumType(typeName = 'MyET')
#print(et.GetDefinitionString())
#print(et.GetEnumStringArray())
#arg = Arg()
#print(arg.ConstructEnumTypeName('SoftmaxActivation', 'act_type'))
#arg = Arg(opName = 'FullConnected', argName='act_type', \
# typeString="{'elu', 'leaky', 'prelu', 'rrelu'},optional, default='leaky'", \
# descString='Activation function to be applied.')
#print(arg.isEnum)
#print(arg.defaultString)
#arg = Arg("fc", "alpha", "float, optional, default=0.0001", "alpha")
#decl = "%s %s" % (arg.type, arg.name)
#if arg.hasDefault:
# decl = decl + "=" + arg.defaultString
#print(decl)
temp_file_name = ""
output_file = '../include/mxnet-cpp/op.h'
try:
# generate file header
patternStr = ("/*!\n"
"* Copyright (c) 2016 by Contributors\n"
"* \\file op.h\n"
"* \\brief definition of all the operators\n"
"* \\author Chuntao Hong, Xin Li\n"
"*/\n"
"\n"
"#ifndef MXNET_CPP_OP_H_\n"
"#define MXNET_CPP_OP_H_\n"
"\n"
"#include <string>\n"
"#include <vector>\n"
"#include \"mxnet-cpp/base.h\"\n"
"#include \"mxnet-cpp/shape.h\"\n"
"#include \"mxnet-cpp/op_util.h\"\n"
"#include \"mxnet-cpp/operator.h\"\n"
"#include \"dmlc/optional.h\"\n"
"#include \"nnvm/tuple.h\"\n"
"\n"
"namespace mxnet {\n"
"namespace cpp {\n"
"\n"
"%s"
"} //namespace cpp\n"
"} //namespace mxnet\n"
"#endif // MXNET_CPP_OP_H_\n")
# Generate a temporary file name
tf = tempfile.NamedTemporaryFile()
temp_file_name = tf.name
tf.close()
with codecs.open(temp_file_name, 'w', 'utf-8') as f:
f.write(patternStr % ParseAllOps())
except Exception as e:
if (os.path.exists(output_file)):
os.remove(output_file)
if len(temp_file_name) > 0:
os.remove(temp_file_name)
raise(e)
if os.path.exists(output_file):
if not filecmp.cmp(temp_file_name, output_file):
os.remove(output_file)
if not os.path.exists(output_file):
shutil.move(temp_file_name, output_file)
| apache-2.0 |
flyapen/UgFlu | flumotion/test/test_porter.py | 2 | 5495 | # -*- Mode: Python; test-case-name: flumotion.test.test_porter -*-
# vi:si:et:sw=4:sts=4:ts=4
#
# Flumotion - a streaming media server
# Copyright (C) 2004,2005,2006,2007 Fluendo, S.L. (www.fluendo.com).
# All rights reserved.
# This file may be distributed and/or modified under the terms of
# the GNU General Public License version 2 as published by
# the Free Software Foundation.
# This file is distributed without any warranty; without even the implied
# warranty of merchantability or fitness for a particular purpose.
# See "LICENSE.GPL" in the source distribution for more information.
# Licensees having purchased or holding a valid Flumotion Advanced
# Streaming Server license may use this file in accordance with the
# Flumotion Advanced Streaming Server Commercial License Agreement.
# See "LICENSE.Flumotion" in the source distribution for more information.
# Headers in this file shall remain intact.
from flumotion.common import testsuite
from flumotion.component.misc.porter import porter
class FakeTransport:
connected = True
_fileno = 5
def __init__(self, protocol):
self.written = ''
self.protocol = protocol
def loseConnection(self):
self.connected = False
self.protocol.connectionLost(None)
def sendFileDescriptor(self, fd, data):
pass
def write(self, data):
self.written += data
def fileno(self):
return self._fileno
class FakePorter:
foundDestination = False
def findDestination(self, path):
self.foundDestination = True
if path == '/existing':
return FakeAvatar()
return None
class FakeBroker:
def __init__(self):
self.transport = FakeTransport(self)
class FakeMind:
def __init__(self):
self.broker = FakeBroker()
class FakeAvatar:
avatarId = 'testAvatar'
def __init__(self):
self.mind = FakeMind()
def isAttached(self):
return True
class TestPorterProtocol(testsuite.TestCase):
def setUp(self):
self.p = FakePorter()
self.pp = porter.HTTPPorterProtocol(self.p)
self.t = FakeTransport(self.pp)
self.pp.transport = self.t
self.failUnless(self.t.connected)
self.failIf(self.p.foundDestination)
def testNoIdentifier(self):
self.pp.dataReceived('first ')
self.failUnless(self.t.connected)
self.pp.dataReceived('line\n')
self.failIf(self.t.connected)
def testBreakDelimiter(self):
self.pp.dataReceived('first line')
self.failUnless(self.t.connected)
self.pp.dataReceived('\r')
self.pp.dataReceived('\n')
self.failIf(self.t.connected)
class TestHTTPPorterProtocol(testsuite.TestCase):
def setUp(self):
self.p = FakePorter()
self.pp = porter.HTTPPorterProtocol(self.p)
self.t = FakeTransport(self.pp)
self.pp.transport = self.t
self.failUnless(self.t.connected)
self.failIf(self.p.foundDestination)
def testWrongLocation(self):
self.pp.dataReceived('GET ')
self.failUnless(self.t.connected)
self.pp.dataReceived('http://localhost ')
self.pp.dataReceived('HTTP/1.1\r\n')
self.failIf(self.t.connected)
self.failIf(self.p.foundDestination)
def testRightLocationNotFound(self):
self.pp.dataReceived('GET ')
self.failUnless(self.t.connected)
self.pp.dataReceived('http://localhost:8800/notfound ')
self.pp.dataReceived('HTTP/1.1\r\n')
self.failIf(self.t.connected)
self.failUnless(self.p.foundDestination)
self.failUnless(self.t.written)
self.failIf(self.t.written.find('404') < 0)
def testRightLocationFound(self):
self.pp.dataReceived('GET ')
self.failUnless(self.t.connected)
self.pp.dataReceived('http://localhost:8800/existing ')
self.pp.dataReceived('HTTP/1.1\r\n')
self.failIf(self.t.connected)
self.failUnless(self.p.foundDestination)
self.failIf(self.t.written)
class TestHTTPPorterProtocolParser(testsuite.TestCase):
def setUp(self):
self.p = FakePorter()
self.pp = porter.HTTPPorterProtocol(self.p)
self.t = FakeTransport(self.pp)
self.pp.transport = self.t
def tearDown(self):
self.t.loseConnection()
def testSimpleParse(self):
result = self.pp.parseLine('GET /test HTTP/1.0\r\n')
self.assertEquals(result, '/test')
result = self.pp.parseLine('GET /test HTTP/1.1\n')
self.assertEquals(result, '/test')
result = self.pp.parseLine('GET / HTTP/1.0\r\n')
self.assertEquals(result, '/')
def testParseWithHost(self):
result = self.pp.parseLine(
'GET http://some.server.somewhere/test HTTP/1.1\n')
self.assertEquals(result, '/test')
result = self.pp.parseLine(
'GET http://some.server.somewhere:1234/ HTTP/1.1\n')
self.assertEquals(result, '/')
def testParseWithParams(self):
result = self.pp.parseLine(
'GET http://some.server.somewhere:1234/test?'
'arg1=val1&arg2=val2 HTTP/1.1\n')
self.assertEquals(result, '/test')
result = self.pp.parseLine(
'GET /test?arg1=val1&arg2=val2 HTTP/1.1\n')
self.assertEquals(result, '/test')
result = self.pp.parseLine(
'GET /?arg1=val1&arg2=val2 HTTP/1.1\n')
self.assertEquals(result, '/')
| gpl-2.0 |
NAMD/mediacloud | justice/external/babel/messages/extract.py | 67 | 23063 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2007 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://babel.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://babel.edgewall.org/log/.
"""Basic infrastructure for extracting localizable messages from source files.
This module defines an extensible system for collecting localizable message
strings from a variety of sources. A native extractor for Python source files
is builtin, extractors for other sources can be added using very simple plugins.
The main entry points into the extraction functionality are the functions
`extract_from_dir` and `extract_from_file`.
"""
import os
try:
set
except NameError:
from sets import Set as set
import sys
from tokenize import generate_tokens, COMMENT, NAME, OP, STRING
from babel.util import parse_encoding, pathmatch, relpath
from textwrap import dedent
__all__ = ['extract', 'extract_from_dir', 'extract_from_file']
__docformat__ = 'restructuredtext en'
GROUP_NAME = 'babel.extractors'
DEFAULT_KEYWORDS = {
'_': None,
'gettext': None,
'ngettext': (1, 2),
'ugettext': None,
'ungettext': (1, 2),
'dgettext': (2,),
'dngettext': (2, 3),
'N_': None
}
DEFAULT_MAPPING = [('**.py', 'python')]
empty_msgid_warning = (
'%s: warning: Empty msgid. It is reserved by GNU gettext: gettext("") '
'returns the header entry with meta information, not the empty string.')
def _strip_comment_tags(comments, tags):
"""Helper function for `extract` that strips comment tags from strings
in a list of comment lines. This functions operates in-place.
"""
def _strip(line):
for tag in tags:
if line.startswith(tag):
return line[len(tag):].strip()
return line
comments[:] = map(_strip, comments)
def extract_from_dir(dirname=os.getcwd(), method_map=DEFAULT_MAPPING,
options_map=None, keywords=DEFAULT_KEYWORDS,
comment_tags=(), callback=None, strip_comment_tags=False):
"""Extract messages from any source files found in the given directory.
This function generates tuples of the form:
``(filename, lineno, message, comments)``
Which extraction method is used per file is determined by the `method_map`
parameter, which maps extended glob patterns to extraction method names.
For example, the following is the default mapping:
>>> method_map = [
... ('**.py', 'python')
... ]
This basically says that files with the filename extension ".py" at any
level inside the directory should be processed by the "python" extraction
method. Files that don't match any of the mapping patterns are ignored. See
the documentation of the `pathmatch` function for details on the pattern
syntax.
The following extended mapping would also use the "genshi" extraction
method on any file in "templates" subdirectory:
>>> method_map = [
... ('**/templates/**.*', 'genshi'),
... ('**.py', 'python')
... ]
The dictionary provided by the optional `options_map` parameter augments
these mappings. It uses extended glob patterns as keys, and the values are
dictionaries mapping options names to option values (both strings).
The glob patterns of the `options_map` do not necessarily need to be the
same as those used in the method mapping. For example, while all files in
the ``templates`` folders in an application may be Genshi applications, the
options for those files may differ based on extension:
>>> options_map = {
... '**/templates/**.txt': {
... 'template_class': 'genshi.template:TextTemplate',
... 'encoding': 'latin-1'
... },
... '**/templates/**.html': {
... 'include_attrs': ''
... }
... }
:param dirname: the path to the directory to extract messages from
:param method_map: a list of ``(pattern, method)`` tuples that maps of
extraction method names to extended glob patterns
:param options_map: a dictionary of additional options (optional)
:param keywords: a dictionary mapping keywords (i.e. names of functions
that should be recognized as translation functions) to
tuples that specify which of their arguments contain
localizable strings
:param comment_tags: a list of tags of translator comments to search for
and include in the results
:param callback: a function that is called for every file that message are
extracted from, just before the extraction itself is
performed; the function is passed the filename, the name
of the extraction method and and the options dictionary as
positional arguments, in that order
:param strip_comment_tags: a flag that if set to `True` causes all comment
tags to be removed from the collected comments.
:return: an iterator over ``(filename, lineno, funcname, message)`` tuples
:rtype: ``iterator``
:see: `pathmatch`
"""
if options_map is None:
options_map = {}
absname = os.path.abspath(dirname)
for root, dirnames, filenames in os.walk(absname):
for subdir in dirnames:
if subdir.startswith('.') or subdir.startswith('_'):
dirnames.remove(subdir)
dirnames.sort()
filenames.sort()
for filename in filenames:
filename = relpath(
os.path.join(root, filename).replace(os.sep, '/'),
dirname
)
for pattern, method in method_map:
if pathmatch(pattern, filename):
filepath = os.path.join(absname, filename)
options = {}
for opattern, odict in options_map.items():
if pathmatch(opattern, filename):
options = odict
if callback:
callback(filename, method, options)
for lineno, message, comments in \
extract_from_file(method, filepath,
keywords=keywords,
comment_tags=comment_tags,
options=options,
strip_comment_tags=
strip_comment_tags):
yield filename, lineno, message, comments
break
def extract_from_file(method, filename, keywords=DEFAULT_KEYWORDS,
comment_tags=(), options=None, strip_comment_tags=False):
"""Extract messages from a specific file.
This function returns a list of tuples of the form:
``(lineno, funcname, message)``
:param filename: the path to the file to extract messages from
:param method: a string specifying the extraction method (.e.g. "python")
:param keywords: a dictionary mapping keywords (i.e. names of functions
that should be recognized as translation functions) to
tuples that specify which of their arguments contain
localizable strings
:param comment_tags: a list of translator tags to search for and include
in the results
:param strip_comment_tags: a flag that if set to `True` causes all comment
tags to be removed from the collected comments.
:param options: a dictionary of additional options (optional)
:return: the list of extracted messages
:rtype: `list`
"""
fileobj = open(filename, 'U')
try:
return list(extract(method, fileobj, keywords, comment_tags, options,
strip_comment_tags))
finally:
fileobj.close()
def extract(method, fileobj, keywords=DEFAULT_KEYWORDS, comment_tags=(),
options=None, strip_comment_tags=False):
"""Extract messages from the given file-like object using the specified
extraction method.
This function returns a list of tuples of the form:
``(lineno, message, comments)``
The implementation dispatches the actual extraction to plugins, based on the
value of the ``method`` parameter.
>>> source = '''# foo module
... def run(argv):
... print _('Hello, world!')
... '''
>>> from StringIO import StringIO
>>> for message in extract('python', StringIO(source)):
... print message
(3, u'Hello, world!', [])
:param method: a string specifying the extraction method (.e.g. "python");
if this is a simple name, the extraction function will be
looked up by entry point; if it is an explicit reference
to a function (of the form ``package.module:funcname`` or
``package.module.funcname``), the corresponding function
will be imported and used
:param fileobj: the file-like object the messages should be extracted from
:param keywords: a dictionary mapping keywords (i.e. names of functions
that should be recognized as translation functions) to
tuples that specify which of their arguments contain
localizable strings
:param comment_tags: a list of translator tags to search for and include
in the results
:param options: a dictionary of additional options (optional)
:param strip_comment_tags: a flag that if set to `True` causes all comment
tags to be removed from the collected comments.
:return: the list of extracted messages
:rtype: `list`
:raise ValueError: if the extraction method is not registered
"""
func = None
if ':' in method or '.' in method:
if ':' not in method:
lastdot = method.rfind('.')
module, attrname = method[:lastdot], method[lastdot + 1:]
else:
module, attrname = method.split(':', 1)
func = getattr(__import__(module, {}, {}, [attrname]), attrname)
else:
try:
from pkg_resources import working_set
except ImportError:
# pkg_resources is not available, so we resort to looking up the
# builtin extractors directly
builtin = {'ignore': extract_nothing, 'python': extract_python}
func = builtin.get(method)
else:
for entry_point in working_set.iter_entry_points(GROUP_NAME,
method):
func = entry_point.load(require=True)
break
if func is None:
raise ValueError('Unknown extraction method %r' % method)
results = func(fileobj, keywords.keys(), comment_tags,
options=options or {})
for lineno, funcname, messages, comments in results:
if funcname:
spec = keywords[funcname] or (1,)
else:
spec = (1,)
if not isinstance(messages, (list, tuple)):
messages = [messages]
if not messages:
continue
# Validate the messages against the keyword's specification
msgs = []
invalid = False
# last_index is 1 based like the keyword spec
last_index = len(messages)
for index in spec:
if last_index < index:
# Not enough arguments
invalid = True
break
message = messages[index - 1]
if message is None:
invalid = True
break
msgs.append(message)
if invalid:
continue
first_msg_index = spec[0] - 1
if not messages[first_msg_index]:
# An empty string msgid isn't valid, emit a warning
where = '%s:%i' % (hasattr(fileobj, 'name') and \
fileobj.name or '(unknown)', lineno)
print >> sys.stderr, empty_msgid_warning % where
continue
messages = tuple(msgs)
if len(messages) == 1:
messages = messages[0]
if strip_comment_tags:
_strip_comment_tags(comments, comment_tags)
yield lineno, messages, comments
def extract_nothing(fileobj, keywords, comment_tags, options):
"""Pseudo extractor that does not actually extract anything, but simply
returns an empty list.
"""
return []
def extract_python(fileobj, keywords, comment_tags, options):
"""Extract messages from Python source code.
:param fileobj: the seekable, file-like object the messages should be
extracted from
:param keywords: a list of keywords (i.e. function names) that should be
recognized as translation functions
:param comment_tags: a list of translator tags to search for and include
in the results
:param options: a dictionary of additional options (optional)
:return: an iterator over ``(lineno, funcname, message, comments)`` tuples
:rtype: ``iterator``
"""
funcname = lineno = message_lineno = None
call_stack = -1
buf = []
messages = []
translator_comments = []
in_def = in_translator_comments = False
comment_tag = None
encoding = parse_encoding(fileobj) or options.get('encoding', 'iso-8859-1')
tokens = generate_tokens(fileobj.readline)
for tok, value, (lineno, _), _, _ in tokens:
if call_stack == -1 and tok == NAME and value in ('def', 'class'):
in_def = True
elif tok == OP and value == '(':
if in_def:
# Avoid false positives for declarations such as:
# def gettext(arg='message'):
in_def = False
continue
if funcname:
message_lineno = lineno
call_stack += 1
elif in_def and tok == OP and value == ':':
# End of a class definition without parens
in_def = False
continue
elif call_stack == -1 and tok == COMMENT:
# Strip the comment token from the line
value = value.decode(encoding)[1:].strip()
if in_translator_comments and \
translator_comments[-1][0] == lineno - 1:
# We're already inside a translator comment, continue appending
translator_comments.append((lineno, value))
continue
# If execution reaches this point, let's see if comment line
# starts with one of the comment tags
for comment_tag in comment_tags:
if value.startswith(comment_tag):
in_translator_comments = True
translator_comments.append((lineno, value))
break
elif funcname and call_stack == 0:
if tok == OP and value == ')':
if buf:
messages.append(''.join(buf))
del buf[:]
else:
messages.append(None)
if len(messages) > 1:
messages = tuple(messages)
else:
messages = messages[0]
# Comments don't apply unless they immediately preceed the
# message
if translator_comments and \
translator_comments[-1][0] < message_lineno - 1:
translator_comments = []
yield (message_lineno, funcname, messages,
[comment[1] for comment in translator_comments])
funcname = lineno = message_lineno = None
call_stack = -1
messages = []
translator_comments = []
in_translator_comments = False
elif tok == STRING:
# Unwrap quotes in a safe manner, maintaining the string's
# encoding
# https://sourceforge.net/tracker/?func=detail&atid=355470&
# aid=617979&group_id=5470
value = eval('# coding=%s\n%s' % (encoding, value),
{'__builtins__':{}}, {})
if isinstance(value, str):
value = value.decode(encoding)
buf.append(value)
elif tok == OP and value == ',':
if buf:
messages.append(''.join(buf))
del buf[:]
else:
messages.append(None)
if translator_comments:
# We have translator comments, and since we're on a
# comma(,) user is allowed to break into a new line
# Let's increase the last comment's lineno in order
# for the comment to still be a valid one
old_lineno, old_comment = translator_comments.pop()
translator_comments.append((old_lineno+1, old_comment))
elif call_stack > 0 and tok == OP and value == ')':
call_stack -= 1
elif funcname and call_stack == -1:
funcname = None
elif tok == NAME and value in keywords:
funcname = value
def extract_javascript(fileobj, keywords, comment_tags, options):
"""Extract messages from JavaScript source code.
:param fileobj: the seekable, file-like object the messages should be
extracted from
:param keywords: a list of keywords (i.e. function names) that should be
recognized as translation functions
:param comment_tags: a list of translator tags to search for and include
in the results
:param options: a dictionary of additional options (optional)
:return: an iterator over ``(lineno, funcname, message, comments)`` tuples
:rtype: ``iterator``
"""
from babel.messages.jslexer import tokenize, unquote_string
funcname = message_lineno = None
messages = []
last_argument = None
translator_comments = []
concatenate_next = False
encoding = options.get('encoding', 'utf-8')
last_token = None
call_stack = -1
for token in tokenize(fileobj.read().decode(encoding)):
if token.type == 'operator' and token.value == '(':
if funcname:
message_lineno = token.lineno
call_stack += 1
elif call_stack == -1 and token.type == 'linecomment':
value = token.value[2:].strip()
if translator_comments and \
translator_comments[-1][0] == token.lineno - 1:
translator_comments.append((token.lineno, value))
continue
for comment_tag in comment_tags:
if value.startswith(comment_tag):
translator_comments.append((token.lineno, value.strip()))
break
elif token.type == 'multilinecomment':
# only one multi-line comment may preceed a translation
translator_comments = []
value = token.value[2:-2].strip()
for comment_tag in comment_tags:
if value.startswith(comment_tag):
lines = value.splitlines()
if lines:
lines[0] = lines[0].strip()
lines[1:] = dedent('\n'.join(lines[1:])).splitlines()
for offset, line in enumerate(lines):
translator_comments.append((token.lineno + offset,
line))
break
elif funcname and call_stack == 0:
if token.type == 'operator' and token.value == ')':
if last_argument is not None:
messages.append(last_argument)
if len(messages) > 1:
messages = tuple(messages)
elif messages:
messages = messages[0]
else:
messages = None
# Comments don't apply unless they immediately precede the
# message
if translator_comments and \
translator_comments[-1][0] < message_lineno - 1:
translator_comments = []
if messages is not None:
yield (message_lineno, funcname, messages,
[comment[1] for comment in translator_comments])
funcname = message_lineno = last_argument = None
concatenate_next = False
translator_comments = []
messages = []
call_stack = -1
elif token.type == 'string':
new_value = unquote_string(token.value)
if concatenate_next:
last_argument = (last_argument or '') + new_value
concatenate_next = False
else:
last_argument = new_value
elif token.type == 'operator':
if token.value == ',':
if last_argument is not None:
messages.append(last_argument)
last_argument = None
else:
messages.append(None)
concatenate_next = False
elif token.value == '+':
concatenate_next = True
elif call_stack > 0 and token.type == 'operator' \
and token.value == ')':
call_stack -= 1
elif funcname and call_stack == -1:
funcname = None
elif call_stack == -1 and token.type == 'name' and \
token.value in keywords and \
(last_token is None or last_token.type != 'name' or
last_token.value != 'function'):
funcname = token.value
last_token = token
| lgpl-3.0 |
CydarLtd/ansible | lib/ansible/modules/cloud/amazon/ec2_key.py | 52 | 8584 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['stableinterface'],
'supported_by': 'curated'}
DOCUMENTATION = '''
---
module: ec2_key
version_added: "1.5"
short_description: maintain an ec2 key pair.
description:
- maintains ec2 key pairs. This module has a dependency on python-boto >= 2.5
options:
name:
description:
- Name of the key pair.
required: true
key_material:
description:
- Public key material.
required: false
force:
description:
- Force overwrite of already existing key pair if key has changed.
required: false
default: true
version_added: "2.3"
state:
description:
- create or delete keypair
required: false
default: 'present'
aliases: []
wait:
description:
- Wait for the specified action to complete before returning.
required: false
default: false
aliases: []
version_added: "1.6"
wait_timeout:
description:
- How long before wait gives up, in seconds
required: false
default: 300
aliases: []
version_added: "1.6"
extends_documentation_fragment:
- aws
- ec2
author: "Vincent Viallet (@zbal)"
'''
EXAMPLES = '''
# Note: None of these examples set aws_access_key, aws_secret_key, or region.
# It is assumed that their matching environment variables are set.
# Creates a new ec2 key pair named `example` if not present, returns generated
# private key
- name: example ec2 key
ec2_key:
name: example
# Creates a new ec2 key pair named `example` if not present using provided key
# material. This could use the 'file' lookup plugin to pull this off disk.
- name: example2 ec2 key
ec2_key:
name: example2
key_material: 'ssh-rsa AAAAxyz...== me@example.com'
state: present
# Given example2 is already existing, the key will not be replaced because the
# force flag was set to `false`
- name: example2 ec2 key
ec2_key:
name: example2
key_material: 'ssh-rsa AAAAxyz...== me@example.com'
force: false
state: present
# Creates a new ec2 key pair named `example` if not present using provided key
# material
- name: example3 ec2 key
ec2_key:
name: example3
key_material: "{{ item }}"
with_file: /path/to/public_key.id_rsa.pub
# Removes ec2 key pair by name
- name: remove example key
ec2_key:
name: example
state: absent
'''
try:
import boto.ec2
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
from ansible.module_utils._text import to_bytes
import random
import string
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
name=dict(required=True),
key_material=dict(required=False),
force = dict(required=False, type='bool', default=True),
state = dict(default='present', choices=['present', 'absent']),
wait = dict(type='bool', default=False),
wait_timeout = dict(default=300),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
name = module.params['name']
state = module.params.get('state')
key_material = module.params.get('key_material')
force = module.params.get('force')
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
changed = False
ec2 = ec2_connect(module)
# find the key if present
key = ec2.get_key_pair(name)
# Ensure requested key is absent
if state == 'absent':
if key:
'''found a match, delete it'''
if not module.check_mode:
try:
key.delete()
if wait:
start = time.time()
action_complete = False
while (time.time() - start) < wait_timeout:
if not ec2.get_key_pair(name):
action_complete = True
break
time.sleep(1)
if not action_complete:
module.fail_json(msg="timed out while waiting for the key to be removed")
except Exception as e:
module.fail_json(msg="Unable to delete key pair '%s' - %s" % (key, e))
key = None
changed = True
# Ensure requested key is present
elif state == 'present':
if key:
# existing key found
if key_material and force:
# EC2's fingerprints are non-trivial to generate, so push this key
# to a temporary name and make ec2 calculate the fingerprint for us.
#
# http://blog.jbrowne.com/?p=23
# https://forums.aws.amazon.com/thread.jspa?messageID=352828
# find an unused name
test = 'empty'
while test:
randomchars = [random.choice(string.ascii_letters + string.digits) for x in range(0,10)]
tmpkeyname = "ansible-" + ''.join(randomchars)
test = ec2.get_key_pair(tmpkeyname)
# create tmp key
tmpkey = ec2.import_key_pair(tmpkeyname, key_material)
# get tmp key fingerprint
tmpfingerprint = tmpkey.fingerprint
# delete tmp key
tmpkey.delete()
if key.fingerprint != tmpfingerprint:
if not module.check_mode:
key.delete()
key = ec2.import_key_pair(name, key_material)
if wait:
start = time.time()
action_complete = False
while (time.time() - start) < wait_timeout:
if ec2.get_key_pair(name):
action_complete = True
break
time.sleep(1)
if not action_complete:
module.fail_json(msg="timed out while waiting for the key to be re-created")
changed = True
pass
# if the key doesn't exist, create it now
else:
'''no match found, create it'''
if not module.check_mode:
if key_material:
'''We are providing the key, need to import'''
key = ec2.import_key_pair(name, to_bytes(key_material))
else:
'''
No material provided, let AWS handle the key creation and
retrieve the private key
'''
key = ec2.create_key_pair(name)
if wait:
start = time.time()
action_complete = False
while (time.time() - start) < wait_timeout:
if ec2.get_key_pair(name):
action_complete = True
break
time.sleep(1)
if not action_complete:
module.fail_json(msg="timed out while waiting for the key to be created")
changed = True
if key:
data = {
'name': key.name,
'fingerprint': key.fingerprint
}
if key.material:
data.update({'private_key': key.material})
module.exit_json(changed=changed, key=data)
else:
module.exit_json(changed=changed, key=None)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
if __name__ == '__main__':
main()
| gpl-3.0 |
gnowledge/OTM2 | opentreemap/treemap/migrations/0013_auto__del_field_tree_created_by__del_field_plot_created_by.py | 3 | 15159 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'Tree.created_by'
db.delete_column(u'treemap_tree', 'created_by_id')
# Deleting field 'Plot.created_by'
db.delete_column(u'treemap_plot', 'created_by_id')
def backwards(self, orm):
# User chose to not deal with backwards NULL issues for 'Tree.created_by'
raise RuntimeError("Cannot reverse this migration. 'Tree.created_by' and its values cannot be restored.")
# User chose to not deal with backwards NULL issues for 'Plot.created_by'
raise RuntimeError("Cannot reverse this migration. 'Plot.created_by' and its values cannot be restored.")
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'treemap.audit': {
'Meta': {'object_name': 'Audit'},
'action': ('django.db.models.fields.IntegerField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'current_value': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'field': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.Instance']", 'null': 'True', 'blank': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'model_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'previous_value': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'ref_id': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.Audit']", 'null': 'True'}),
'requires_auth': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.User']"})
},
u'treemap.boundary': {
'Meta': {'object_name': 'Boundary'},
'category': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'geom': ('django.contrib.gis.db.models.fields.MultiPolygonField', [], {'srid': '3857', 'db_column': "u'the_geom_webmercator'"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'sort_order': ('django.db.models.fields.IntegerField', [], {})
},
u'treemap.fieldpermission': {
'Meta': {'object_name': 'FieldPermission'},
'field_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.Instance']"}),
'model_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'permission_level': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'role': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.Role']"})
},
u'treemap.importevent': {
'Meta': {'object_name': 'ImportEvent'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'imported_by': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.User']"}),
'imported_on': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
u'treemap.instance': {
'Meta': {'object_name': 'Instance'},
'basemap_data': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'basemap_type': ('django.db.models.fields.CharField', [], {'default': "u'google'", 'max_length': '255'}),
'boundaries': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['treemap.Boundary']", 'null': 'True', 'blank': 'True'}),
'center': ('django.contrib.gis.db.models.fields.PointField', [], {'srid': '3857'}),
'default_role': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'default_role'", 'to': u"orm['treemap.Role']"}),
'geo_rev': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'treemap.instancespecies': {
'Meta': {'object_name': 'InstanceSpecies'},
'common_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.Instance']"}),
'species': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.Species']"})
},
u'treemap.plot': {
'Meta': {'object_name': 'Plot'},
'address_city': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'address_street': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'address_zip': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'}),
'geom': ('django.contrib.gis.db.models.fields.PointField', [], {'srid': '3857', 'db_column': "u'the_geom_webmercator'"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'import_event': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.ImportEvent']", 'null': 'True', 'blank': 'True'}),
'instance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.Instance']"}),
'length': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'owner_orig_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'readonly': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'width': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'})
},
u'treemap.reputationmetric': {
'Meta': {'object_name': 'ReputationMetric'},
'action': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'approval_score': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'denial_score': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'direct_write_score': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.Instance']", 'null': 'True', 'blank': 'True'}),
'model_name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'treemap.role': {
'Meta': {'object_name': 'Role'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.Instance']", 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'rep_thresh': ('django.db.models.fields.IntegerField', [], {})
},
u'treemap.species': {
'Meta': {'object_name': 'Species'},
'bloom_period': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'common_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'cultivar_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'fact_sheet': ('django.db.models.fields.URLField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'fall_conspicuous': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'flower_conspicuous': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'fruit_period': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'genus': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'itree_code': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'max_dbh': ('django.db.models.fields.IntegerField', [], {'default': '200'}),
'max_height': ('django.db.models.fields.IntegerField', [], {'default': '800'}),
'native_status': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'palatable_human': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'plant_guide': ('django.db.models.fields.URLField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'species': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'symbol': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'wildlife_value': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'})
},
u'treemap.tree': {
'Meta': {'object_name': 'Tree'},
'canopy_height': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'date_planted': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'date_removed': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'diameter': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'height': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'import_event': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.ImportEvent']", 'null': 'True', 'blank': 'True'}),
'instance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.Instance']"}),
'plot': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.Plot']"}),
'readonly': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'species': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.Species']", 'null': 'True', 'blank': 'True'})
},
u'treemap.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'reputation': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'roles': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['treemap.Role']", 'null': 'True', 'blank': 'True'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
}
}
complete_apps = ['treemap'] | gpl-3.0 |
DevicePilot/synth | synth/devices/helpers/dark_sky.py | 1 | 6316 | #T!/usr/bin/env python
#
# Looks up historical weather data in Dark Sky
#
# Copyright (c) 2019 DevicePilot Ltd.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import json, http, urllib
import time
import logging
CACHE_FILE = "../synth_logs/dark_sky_cache.txt"
KEY_FILE = "../synth_accounts/default.json"
API_DOMAIN = "api.darksky.net"
HOUR = 60 * 60
DAY = HOUR * 24
if __name__ == "__main__":
CACHE_FILE = "../../../" + CACHE_FILE
KEY_FILE = "../../../" + KEY_FILE
# ==== Dark Sky API ====
def clear_caches():
global caches
caches = {"weather" : {}}
clear_caches()
try:
logging.info("Powered by Dark Sky (tm)") # Requirement of service to display this
f = open(CACHE_FILE)
caches = json.loads(f.read())
f.close()
logging.info("Used existing Dark Sky cache "+CACHE_FILE)
except:
logging.warning("No existing Dark Sky cache")
account_key = None
try:
f = open(KEY_FILE)
kf = json.loads(f.read())
f.close()
account_key = kf["dark_sky_key"]
except:
logging.error("Can't open Dark Sky key file")
def set_headers():
"""Sets the headers for sending to the server.
We assume that the user has a token that allows them to login. """
headers = {}
headers["Content-Type"] = "application/json"
return headers
def add_to_cache(cache, key, contents):
caches[cache][key] = contents
def write_cache():
if CACHE_FILE:
s = json.dumps(caches)
if len(s) > 1e7:
logging.warning("Dark Sky cache file size is getting large: "+str(len(s))+" bytes")
open(CACHE_FILE, "wt").write(s)
else:
logging.info("Not writing Dark Sky back to cache")
def round_time(epoch_seconds):
# Round time to hourly resolution (so if multiple sim runs are done in quick succession, and with timing relative to real time, we don't do lots of lookups for times just a few minutes apart)
return int(epoch_seconds/HOUR) * HOUR
def round_angle(angle):
return int(angle * 1000) / 1000.0 # 3 decimal places is plenty for weather (about 100m)
def extract_and_cache(DS_results, latitude, longitude, epoch_seconds=None):
# Dark Sky returns a standard set of properties both for its "current" reading, and for hourly readings
# On any Time Machine request, it returns a days-worth of hourly readings, so by caching these we
# can reduce DS reads by a factor of 24
# The co-ordinates supplied as params are what the user requested (i.e. what to cache by), not necessarily the location that DS returns (which we ignore)
if epoch_seconds is None:
t = round_time(DS_results["time"])
else:
t = round_time(epoch_seconds)
cache_key = str((latitude, longitude, t))
result = { # We asked for SI units, so...
"external_temperature" : DS_results.get("temperature", 0.0), # C
"wind_speed" : DS_results.get("windSpeed", 0.0), # m/s
"precipitation_intensity" : DS_results.get("precipIntensity", 0.0), # mm/hour
"precipitation_probability" : DS_results.get("precipProbability", 0.0), # 0..1
"cloud_cover" : DS_results.get("cloudCover", 0.5), # 0..1
"humidity" : DS_results.get("humidity", 0.5) # 0..1
}
add_to_cache("weather", cache_key, result)
return result
def get_weather(latitude, longitude, epoch_seconds):
epoch_seconds = round_time(epoch_seconds)
latitude = round_angle(latitude)
longitude = round_angle(longitude)
cache_key = str((latitude, longitude, epoch_seconds))
if cache_key in caches["weather"]:
return caches["weather"][cache_key]
logging.info("Looking up " + cache_key + " in Dark Sky")
time_start = time.time()
conn = http.client.HTTPSConnection(API_DOMAIN)
URL = "/forecast/"+str(account_key)+"/"+str(latitude)+","+str(longitude)+","+str(epoch_seconds)+"?units=si&exclude=minutely,daily,flags" # Using exclude apparently makes it a bit faster?
conn.request('GET', URL, None, set_headers())
resp = conn.getresponse()
result = resp.read().decode('utf-8') # We need a string not bytes[]
time_result = time.time()
try:
data = json.loads(result)
result = extract_and_cache(data["currently"], latitude, longitude, epoch_seconds) # Requested reading
for r in data["hourly"]["data"]:
extract_and_cache(r, latitude, longitude) # Also cache info for other hours that DS has given us
write_cache()
except:
logging.error(URL)
logging.error(str(result))
logging.error(json.dumps(data))
raise
time_end = time.time()
t_fetch = time_result - time_start
t_process = time_end - time_result
if t_fetch > 1 or t_process > 1:
logging.warning("(Dark Sky took "+str(t_fetch)+"s to fetch and "+str(t_process)+"s to process)")
return result
def main():
global CACHE_FILE, caches
logging.basicConfig(level = logging.NOTSET)
logging.info("Starting")
t = 1552898857 # 08:47 on 18/03/2019
(lat, lon) = (52.2053, 0.1218) # Cambridge UK
CACHE_FILE = None # Don't read or write cache file during testing (but still cache during testing)
clear_caches()
for h in range(48):
result = get_weather(lat, lon, t + h * HOUR)
print(result)
if __name__ == "__main__":
main()
| mit |
BrixInMotion/RaspiRobot-Webcontrol | robot_controller.py | 1 | 13931 | #! /usr/bin/env python
# Copyright (c) 2014, Dawn Robotics Ltd
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the Dawn Robotics Ltd nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
import math
import time
import Queue
import mini_driver
import threading
import time
import DrivebyXMC
#---------------------------------------------------------------------------------------------------
class RobotController:
MIN_ANGLE = 0.0
MAX_ANGLE = 180.0
CENTRE_ANGLE = (MIN_ANGLE + MAX_ANGLE)/2.0
MAX_UPDATE_TIME_DIFF = 0.25
TIME_BETWEEN_SERVO_SETTING_UPDATES = 1.0
TIME_BETWEEN_SENSOR_CONFIGURATION_UPDATES = 0.5
JOYSTICK_DEAD_ZONE = 0.1
MAX_ABS_NECK_SPEED = 30.0 # Degrees per second
MOTION_COMMAND_TIMEOUT = 2.0 # If no commands for the motors are recieved in this time then
# the motors (drive and servo) are set to zero speed
#-----------------------------------------------------------------------------------------------
def __init__( self, robotConfig ):
print ("init bus...")
DrivebyXMC.initialize_bus()
self.miniDriver = mini_driver.MiniDriver()
connected = self.miniDriver.connect()
if not connected:
raise Exception( "Unable to connect to the mini driver" )
self.robotConfig = robotConfig
self.leftMotorSpeed = 0
self.rightMotorSpeed = 0
self.panAngle = self.CENTRE_ANGLE
self.tiltAngle = self.CENTRE_ANGLE
self.panSpeed = 0.0
self.tiltSpeed = 0.0
self.lastServoSettingsSendTime = 0.0
self.lastSensorConfigurationSendTime = 0.0
self.lastUpdateTime = 0.0
self.lastMotionCommandTime = time.time()
self.piSensorModuleName = ""
self.piSensorModule = None
self.piSensorReader = None
self.piSensorDict = {}
#-----------------------------------------------------------------------------------------------
def __del__( self ):
self.disconnect()
#-----------------------------------------------------------------------------------------------
def disconnect( self ):
self.miniDriver.disconnect()
#-----------------------------------------------------------------------------------------------
def getStatusDict( self ):
presetMaxAbsMotorSpeed, presetMaxAbsTurnSpeed = self.miniDriver.getPresetMotorSpeeds()
statusDict = {
"batteryVoltage" : self.miniDriver.getBatteryVoltageReading().data,
"presetMaxAbsMotorSpeed" : presetMaxAbsMotorSpeed,
"presetMaxAbsTurnSpeed" : presetMaxAbsTurnSpeed,
"sensors" : self.getSensorDict()
}
return statusDict
#-----------------------------------------------------------------------------------------------
def getSensorDict( self ):
sensorDict = {
"batteryVoltage" : self.miniDriver.getBatteryVoltageReading(),
"digital" : self.miniDriver.getDigitalReadings(),
"analog" : self.miniDriver.getAnalogReadings(),
"ultrasonic" : self.miniDriver.getUltrasonicReading(),
"encoders" : self.miniDriver.getEncodersReading(),
}
sensorDict.update( self.piSensorDict )
return sensorDict
#-----------------------------------------------------------------------------------------------
def normaliseJoystickData( self, joystickX, joystickY ):
stickVectorLength = math.sqrt( joystickX**2 + joystickY**2 )
if stickVectorLength > 1.0:
joystickX /= stickVectorLength
joystickY /= stickVectorLength
if stickVectorLength < self.JOYSTICK_DEAD_ZONE:
joystickX = 0.0
joystickY = 0.0
return ( joystickX, joystickY )
#-----------------------------------------------------------------------------------------------
def centreNeck( self ):
self.panAngle = self.CENTRE_ANGLE
self.tiltAngle = self.CENTRE_ANGLE
self.panSpeed = 0.0
self.tiltSpeed = 0.0
DrivebyXMC.CentreCamera()
#-----------------------------------------------------------------------------------------------
def setMotorJoystickPos( self, joystickX, joystickY ):
joystickX, joystickY = self.normaliseJoystickData( joystickX, joystickY )
if self.robotConfig.usePresetMotorSpeeds:
maxAbsMotorSpeed, maxAbsTurnSpeed = self.miniDriver.getPresetMotorSpeeds()
else:
maxAbsMotorSpeed = self.robotConfig.customMaxAbsMotorSpeed
maxAbsTurnSpeed = self.robotConfig.customMaxAbsTurnSpeed
# Set forward speed from joystickY
leftMotorSpeed = maxAbsMotorSpeed*joystickY
rightMotorSpeed = maxAbsMotorSpeed*joystickY
# Set turn speed from joystickX
leftMotorSpeed += maxAbsTurnSpeed*joystickX
rightMotorSpeed -= maxAbsTurnSpeed*joystickX
leftMotorSpeed = max( -maxAbsMotorSpeed, min( leftMotorSpeed, maxAbsMotorSpeed ) )
rightMotorSpeed = max( -maxAbsMotorSpeed, min( rightMotorSpeed, maxAbsMotorSpeed ) )
self.leftMotorSpeed = leftMotorSpeed*self.robotConfig.leftMotorScale
self.rightMotorSpeed = rightMotorSpeed
self.lastMotionCommandTime = time.time()
#-----------------------------------------------------------------------------------------------
def setMotorSpeeds( self, leftMotorSpeed, rightMotorSpeed ):
if self.robotConfig.usePresetMotorSpeeds:
maxAbsMotorSpeed, maxAbsTurnSpeed = self.miniDriver.getPresetMotorSpeeds()
else:
maxAbsMotorSpeed = self.robotConfig.customMaxAbsMotorSpeed
maxAbsTurnSpeed = self.robotConfig.customMaxAbsTurnSpeed
self.leftMotorSpeed = max( -maxAbsMotorSpeed, min( leftMotorSpeed, maxAbsMotorSpeed ) )
self.rightMotorSpeed = max( -maxAbsMotorSpeed, min( rightMotorSpeed, maxAbsMotorSpeed ) )
print ("leftMotorSpeed: ", leftMotorSpeed, " rightMotorSpeed: ", rightMotorSpeed)
self.lastMotionCommandTime = time.time()
#-----------------------------------------------------------------------------------------------
def setNeckJoystickPos( self, joystickX, joystickY ):
#print 'Servo X: ', joystickX,' Y: ', joystickY
DrivebyXMC.Camera_servos(joystickX, joystickY) #Sende koordinaten an DrivebyXMC
joystickX, joystickY = self.normaliseJoystickData( joystickX, joystickY )
# Set pan and tilt angle speeds
self.panSpeed = -self.MAX_ABS_NECK_SPEED*joystickX
self.tiltSpeed = -self.MAX_ABS_NECK_SPEED*joystickY
self.lastMotionCommandTime = time.time()
#-----------------------------------------------------------------------------------------------
def setClawJoystickPos( self, joystickX, joystickY ):
DrivebyXMC.Claw_servos(joystickX, joystickY) #Sende koordinates to DrivebyXMC
#-----------------------------------------------------------------------------------------------
def setElevatorJoystickPos( self, joystickX, joystickY ):
DrivebyXMC.Elevator_clawturn(joystickX, joystickY) #Sende koordinates to DrivebyXMC
#-----------------------------------------------------------------------------------------------
def setNeckAngles( self, panAngle, tiltAngle ):
self.panAngle = max( self.MIN_ANGLE, min( panAngle, self.MAX_ANGLE ) )
self.tiltAngle = max( self.MIN_ANGLE, min( tiltAngle, self.MAX_ANGLE ) )
self.panSpeed = 0.0
self.tiltSpeed = 0.0
self.lastMotionCommandTime = time.time()
#-----------------------------------------------------------------------------------------------
def _loadPiSensorModule( self ):
if self.robotConfig.piSensorModuleName != "":
# Try to import the new sensor module
newSensorModule = None
try:
newSensorModule = __import__( self.robotConfig.piSensorModuleName, fromlist=[''] )
except Exception as e:
logging.error( "Caught exception when trying to import Pi sensor module" )
logging.error( str( e ) )
if newSensorModule != None:
# We have a new sensor module. Shutdown any existing sensor reader
if self.piSensorReader != None:
self.piSensorReader.shutdown()
self.piSensorReader = None
# Remove reference to existing sensor module
self.piSensorModule = None
self.piSensorModuleName = ""
# Try to create the new Pi sensor reader
newSensorReader = None
try:
newSensorReader = newSensorModule.PiSensorReader()
except Exception as e:
logging.error( "Caught exception when trying to create Pi sensor reader" )
logging.error( str( e ) )
if newSensorReader != None:
self.piSensorModule = newSensorModule
self.piSensorModuleName = self.robotConfig.piSensorModuleName
self.piSensorReader = newSensorReader
#-----------------------------------------------------------------------------------------------
def update( self ):
if not self.miniDriver.isConnected():
return
curTime = time.time()
timeDiff = min( curTime - self.lastUpdateTime, self.MAX_UPDATE_TIME_DIFF )
# Turn off the motors if we haven't received a motion command for a while
if curTime - self.lastMotionCommandTime > self.MOTION_COMMAND_TIMEOUT:
self.leftMotorSpeed = 0.0
self.rightMotorSpeed = 0.0
self.panSpeed = 0.0
self.tiltSpeed = 0.0
# Update the pan and tilt angles
self.panAngle += self.panSpeed*timeDiff
self.tiltAngle += self.tiltSpeed*timeDiff
self.panAngle = max( self.MIN_ANGLE, min( self.panAngle, self.MAX_ANGLE ) )
self.tiltAngle = max( self.MIN_ANGLE, min( self.tiltAngle, self.MAX_ANGLE ) )
# Update the mini driver
#self.miniDriver.setOutputs(
# self.leftMotorSpeed, self.rightMotorSpeed, self.panAngle, self.tiltAngle )
#self.miniDriver.update()
DrivebyXMC.Drive(self.leftMotorSpeed, self.rightMotorSpeed)
# Send servo settings if needed
if curTime - self.lastServoSettingsSendTime >= self.TIME_BETWEEN_SERVO_SETTING_UPDATES:
self.miniDriver.setPanServoLimits(
self.robotConfig.panPulseWidthMin,
self.robotConfig.panPulseWidthMax )
self.miniDriver.setTiltServoLimits(
self.robotConfig.tiltPulseWidthMin,
self.robotConfig.tiltPulseWidthMax )
self.lastServoSettingsSendTime = curTime
# Send sensor configuration if needed
if curTime - self.lastSensorConfigurationSendTime >= self.TIME_BETWEEN_SENSOR_CONFIGURATION_UPDATES:
self.miniDriver.setSensorConfiguration( self.robotConfig.miniDriverSensorConfiguration )
self.lastSensorConfigurationSendTime = curTime
# Change the Pi sensor module if needed
if self.robotConfig.piSensorModuleName != self.piSensorModuleName:
self._loadPiSensorModule()
# Read from any sensors attached to the Pi
if self.piSensorReader != None:
self.piSensorDict = {}
try:
self.piSensorDict = self.piSensorReader.readSensors()
except Exception as e:
logging.error( "Caught exception when trying to read from Pi sensor reader" )
logging.error( str( e ) )
self.lastUpdateTime = curTime
| bsd-3-clause |
Raag079/self-driving-car | Term03-PathPlanning-SemanticSegmentation-and-Systems/P3-Capstone-Project/ros/src/waypoint_loader/waypoint_loader.py | 2 | 2683 | #!/usr/bin/env python
import os
import csv
import math
from geometry_msgs.msg import Quaternion
from styx_msgs.msg import Lane, Waypoint
import tf
import rospy
CSV_HEADER = ['x', 'y', 'z', 'yaw']
MAX_DECEL = 1.0
class WaypointLoader(object):
def __init__(self):
rospy.init_node('waypoint_loader', log_level=rospy.DEBUG)
self.pub = rospy.Publisher('/base_waypoints', Lane, queue_size=1)
self.velocity = rospy.get_param('~velocity')
self.new_waypoint_loader(rospy.get_param('~path'))
rospy.spin()
def new_waypoint_loader(self, path):
if os.path.isfile(path):
waypoints = self.load_waypoints(path)
self.publish(waypoints)
rospy.loginfo('Waypoint Loded')
else:
rospy.logerr('%s is not a file', path)
def quaternion_from_yaw(self, yaw):
return tf.transformations.quaternion_from_euler(0., 0., yaw)
def get_velocity(self, velocity):
return velocity/3.6
def load_waypoints(self, fname):
waypoints = []
with open(fname) as wfile:
reader = csv.DictReader(wfile, CSV_HEADER)
for wp in reader:
p = Waypoint()
p.pose.pose.position.x = float(wp['x'])
p.pose.pose.position.y = float(wp['y'])
p.pose.pose.position.z = float(wp['z'])
q = self.quaternion_from_yaw(float(wp['yaw']))
p.pose.pose.orientation = Quaternion(*q)
p.twist.twist.linear.x = float(self.velocity*0.27778)
waypoints.append(p)
return self.decelerate(waypoints)
def distance(self, p1, p2):
x, y, z = p1.x - p2.x, p1.y - p2.y, p1.z - p2.z
return math.sqrt(x*x + y*y + z*z)
def decelerate(self, waypoints):
last = waypoints[-1]
last.twist.twist.linear.x = 0.
for wp in waypoints[:-1][::-1]:
dist = self.distance(wp.pose.pose.position, last.pose.pose.position)
vel = math.sqrt(2 * MAX_DECEL * dist) * 3.6
if vel < 1.:
vel = 0.
wp.twist.twist.linear.x = min(vel, wp.twist.twist.linear.x)
return waypoints
def publish(self, waypoints):
rate = rospy.Rate(40)
while not rospy.is_shutdown():
lane = Lane()
lane.header.frame_id = '/world'
lane.header.stamp = rospy.Time(0)
lane.waypoints = waypoints
self.pub.publish(lane)
rate.sleep()
if __name__ == '__main__':
try:
WaypointLoader()
except rospy.ROSInterruptException:
rospy.logerr('Could not start waypoint node.')
| mit |
clofresh/xbmc-vhx | resources/lib/requests/structures.py | 5 | 1725 | # -*- coding: utf-8 -*-
"""
requests.structures
~~~~~~~~~~~~~~~~~~~
Data structures that power Requests.
"""
class CaseInsensitiveDict(dict):
"""Case-insensitive Dictionary
For example, ``headers['content-encoding']`` will return the
value of a ``'Content-Encoding'`` response header."""
@property
def lower_keys(self):
if not hasattr(self, '_lower_keys') or not self._lower_keys:
self._lower_keys = dict((k.lower(), k) for k in self.iterkeys())
return self._lower_keys
def _clear_lower_keys(self):
if hasattr(self, '_lower_keys'):
self._lower_keys.clear()
def __setitem__(self, key, value):
dict.__setitem__(self, key, value)
self._clear_lower_keys()
def __delitem__(self, key):
dict.__delitem__(self, key)
self._lower_keys.clear()
def __contains__(self, key):
return key.lower() in self.lower_keys
def __getitem__(self, key):
# We allow fall-through here, so values default to None
if key in self:
return dict.__getitem__(self, self.lower_keys[key.lower()])
def get(self, key, default=None):
if key in self:
return self[key]
else:
return default
class LookupDict(dict):
"""Dictionary lookup object."""
def __init__(self, name=None):
self.name = name
super(LookupDict, self).__init__()
def __repr__(self):
return '<lookup \'%s\'>' % (self.name)
def __getitem__(self, key):
# We allow fall-through here, so values default to None
return self.__dict__.get(key, None)
def get(self, key, default=None):
return self.__dict__.get(key, default) | bsd-3-clause |
stshine/servo | tests/heartbeats/process_logs.py | 139 | 16143 | #!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import argparse
import matplotlib.pyplot as plt
import numpy as np
import os
from os import path
import sys
import warnings
HB_LOG_IDX_START_TIME = 7
HB_LOG_IDX_END_TIME = HB_LOG_IDX_START_TIME + 1
HB_LOG_IDX_START_ENERGY = 14
HB_LOG_IDX_END_ENERGY = HB_LOG_IDX_START_ENERGY + 1
ENERGY_PROFILER_NAME = 'ApplicationHeartbeat'
SUMMARY_OUTPUT = "summary.txt"
SUMMARY_TIME_IDX = 8
SUMMARY_ENERGY_IDX = SUMMARY_TIME_IDX + 1
SUMMARY_POWER_IDX = SUMMARY_ENERGY_IDX + 1
def autolabel(rects, ax):
"""Attach some text labels.
"""
for rect in rects:
ax.text(rect.get_x() + rect.get_width() / 2., 1.05 * rect.get_height(), '', ha='center', va='bottom')
def plot_raw_totals(config, plot_data, max_time, max_time_std, max_energy, max_energy_std, output_dir, normalize):
"""Plot the raw totals for a configuration.
Keyword arguments:
config -- configuration name
plot_data -- (profiler name, total_time, total_time_std, total_energy, total_energy_std)
max_time, max_time_std, max_energy, max_energy_std -- single values
normalize -- True/False
"""
plot_data = sorted(plot_data)
keys = [p for (p, tt, tts, te, tes) in plot_data]
total_times = [tt for (p, tt, tts, te, tes) in plot_data]
total_times_std = [tts for (p, tt, tts, te, tes) in plot_data]
total_energies = [te for (p, tt, tts, te, tes) in plot_data]
total_energies_std = [tes for (p, tt, tts, te, tes) in plot_data]
fig, ax1 = plt.subplots()
ind = np.arange(len(keys)) # the x locations for the groups
width = 0.35 # the width of the bars
# add some text for labels, title and axes ticks
ax1.set_title('Time/Energy Data for Configuration ' + config)
ax1.set_xticks(ind + width)
ax1.set_xticklabels(keys, rotation=45)
fig.set_tight_layout(True)
fig.set_size_inches(len(plot_data) / 1.5, 8)
ax2 = ax1.twinx()
# Normalize
if normalize:
total_times_std /= np.sum(total_times)
total_times /= np.sum(total_times)
total_energies_std /= np.sum(total_energies)
total_energies /= np.sum(total_energies)
ax1.set_ylabel('Time (Normalized)')
ax2.set_ylabel('Energy (Normalized)')
else:
# set time in us instead of ns
total_times_std /= np.array(1000000.0)
total_times /= np.array(1000000.0)
total_energies_std /= np.array(1000000.0)
total_energies /= np.array(1000000.0)
ax1.set_ylabel('Time (ms)')
ax2.set_ylabel('Energy (Joules)')
rects1 = ax1.bar(ind, total_times, width, color='r', yerr=total_times_std)
rects2 = ax2.bar(ind + width, total_energies, width, color='y', yerr=total_energies_std)
ax1.legend([rects1[0], rects2[0]], ['Time', 'Energy'])
# set axis
x1, x2, y1, y2 = plt.axis()
if normalize:
ax1.set_ylim(ymin=0, ymax=1)
ax2.set_ylim(ymin=0, ymax=1)
else:
ax1.set_ylim(ymin=0, ymax=((max_time + max_time_std) * 1.25 / 1000000.0))
ax2.set_ylim(ymin=0, ymax=((max_energy + max_energy_std) * 1.25 / 1000000.0))
autolabel(rects1, ax1)
autolabel(rects2, ax2)
# plt.show()
plt.savefig(path.join(output_dir, config + ".png"))
plt.close(fig)
def create_raw_total_data(config_data):
"""Get the raw data to plot for a configuration
Return: [(profiler, time_mean, time_stddev, energy_mean, energy_stddev)]
Keyword arguments:
config_data -- (trial, trial_data)
"""
# We can't assume that the same number of heartbeats are always issued across trials
# key: profiler name; value: list of timing sums for each trial
profiler_total_times = {}
# key: profiler name; value: list of energy sums for each trial
profiler_total_energies = {}
for (t, td) in config_data:
for (profiler, ts, te, es, ee) in td:
# sum the total times and energies for each profiler in this trial
total_time = np.sum(te - ts)
total_energy = np.sum(ee - es)
# add to list to be averaged later
time_list = profiler_total_times.get(profiler, [])
time_list.append(total_time)
profiler_total_times[profiler] = time_list
energy_list = profiler_total_energies.get(profiler, [])
energy_list.append(total_energy)
profiler_total_energies[profiler] = energy_list
# Get mean and stddev for time and energy totals
return [(profiler,
np.mean(profiler_total_times[profiler]),
np.std(profiler_total_times[profiler]),
np.mean(profiler_total_energies[profiler]),
np.std(profiler_total_energies[profiler]))
for profiler in profiler_total_times.keys()]
def plot_all_raw_totals(config_list, output_dir):
"""Plot column charts of the raw total time/energy spent in each profiler category.
Keyword arguments:
config_list -- [(config, result of process_config_dir(...))]
output_dir -- where to write plots to
"""
raw_total_norm_out_dir = path.join(output_dir, 'raw_totals_normalized')
os.makedirs(raw_total_norm_out_dir)
raw_total_out_dir = path.join(output_dir, 'raw_totals')
os.makedirs(raw_total_out_dir)
# (name, (profiler, (time_mean, time_stddev, energy_mean, energy_stddev)))
raw_totals_data = [(config, create_raw_total_data(config_data)) for (config, config_data) in config_list]
mean_times = []
mean_times_std = []
mean_energies = []
mean_energies_std = []
for profiler_tup in [config_tup[1] for config_tup in raw_totals_data]:
for (p, tt, tts, te, tes) in profiler_tup:
mean_times.append(tt)
mean_times_std.append(tts)
mean_energies.append(te)
mean_energies_std.append(tes)
# get consistent max time/energy values across plots
max_t = np.max(mean_times)
max_t_std = np.max(mean_times_std)
max_e = np.max(mean_energies)
max_e_std = np.max(mean_energies_std)
[plot_raw_totals(data[0], data[1], max_t, max_t_std, max_e, max_e_std, raw_total_norm_out_dir, True)
for data in raw_totals_data]
[plot_raw_totals(data[0], data[1], max_t, max_t_std, max_e, max_e_std, raw_total_out_dir, False)
for data in raw_totals_data]
def plot_trial_time_series(config, trial, trial_data, max_end_time, max_power, output_dir):
"""Plot time series for a single trial.
Keyword arguments:
config -- the config name
trial -- the trial name
trial_data -- [(profiler, [start times], [end times], [start energies], [end energies])]
max_end_time -- single value to use as max X axis value (for consistency across trials)
output_dir -- the output directory
"""
# TODO: Some profilers may have parallel tasks - need to identify this on plots
max_end_time = max_end_time / 1000000.0
trial_data = sorted(trial_data)
fig, ax1 = plt.subplots()
keys = [p for (p, ts, te, es, ee) in trial_data]
# add some text for labels, title and axes ticks
ax1.set_title('Profiler Activity for ' + config + ', ' + trial)
ax1.set_xlabel('Time (ms)')
ax1.grid(True)
width = 8 # the width of the bars
ax1.set_yticks(10 * np.arange(1, len(keys) + 2))
ax1.set_yticklabels(keys)
ax1.set_ylim(ymin=0, ymax=((len(trial_data) + 1) * 10))
ax1.set_xlim(xmin=0, xmax=max_end_time)
fig.set_tight_layout(True)
fig.set_size_inches(16, len(trial_data) / 3)
i = 10
for (p, ts, te, es, ee) in trial_data:
xranges = [(ts[j] / 1000000.0, (te[j] - ts[j]) / 1000000.0) for j in xrange(len(ts))]
ax1.broken_barh(xranges, (i - 0.5 * width, width))
i += 10
# place a vbar at the final time for this trial
last_profiler_times = map(np.nanmax, filter(lambda x: len(x) > 0, [te for (p, ts, te, es, ee) in trial_data]))
plt.axvline(np.max(last_profiler_times) / 1000000.0, color='black')
power_times = []
power_values = []
for (p, ts, te, es, ee) in trial_data:
if p == ENERGY_PROFILER_NAME:
power_times = te / 1000000.0
power_values = (ee - es) / ((te - ts) / 1000.0)
ax2 = ax1.twinx()
ax2.set_xlim(xmin=0, xmax=max_end_time)
ax2.set_ylim(ymin=0, ymax=max_power)
ax2.set_ylabel('Power (Watts)')
ax2.plot(power_times, power_values, color='r')
# plt.show()
plt.savefig(path.join(output_dir, "ts_" + config + "_" + trial + ".png"))
plt.close(fig)
def hb_energy_times_to_power(es, ee, ts, te):
"""Compute power from start and end energy and times.
Return: power values
"""
return (ee - es) / ((te - ts) / 1000.0)
def plot_all_time_series(config_list, output_dir):
"""Plot column charts of the raw total time/energy spent in each profiler category.
Keyword arguments:
config_list -- [(config, result of process_config_dir(...))]
output_dir -- where to write plots to
"""
time_series_out_dir = path.join(output_dir, 'time_series')
os.makedirs(time_series_out_dir)
max_end_times = []
max_power_values = []
for (c, cd) in config_list:
for (t, td) in cd:
trial_max_end_times = map(np.nanmax, filter(lambda x: len(x) > 0, [te for (p, ts, te, es, ee) in td]))
max_end_times.append(np.nanmax(trial_max_end_times))
for (p, ts, te, es, ee) in td:
# We only care about the energy profiler (others aren't reliable for instant power anyway)
if p == ENERGY_PROFILER_NAME and len(te) > 0:
max_power_values.append(np.nanmax(hb_energy_times_to_power(es, ee, ts, te)))
max_time = np.nanmax(max_end_times)
max_power = np.nanmax(np.array(max_power_values)) * 1.2 # leave a little space at the top
for (config, config_data) in config_list:
[plot_trial_time_series(config, trial, trial_data, max_time, max_power, time_series_out_dir)
for (trial, trial_data) in config_data]
def read_heartbeat_log(profiler_hb_log):
"""Read a heartbeat log file.
Return: (profiler name, [start times], [end times], [start energies], [end energies], [instant powers])
Keyword arguments:
profiler_hb_log -- the file to read
"""
with warnings.catch_warnings():
try:
warnings.simplefilter("ignore")
time_start, time_end, energy_start, energy_end = \
np.loadtxt(profiler_hb_log,
dtype=np.dtype('uint64'),
skiprows=1,
usecols=(HB_LOG_IDX_START_TIME,
HB_LOG_IDX_END_TIME,
HB_LOG_IDX_START_ENERGY,
HB_LOG_IDX_END_ENERGY),
unpack=True,
ndmin=1)
except ValueError:
time_start, time_end, energy_start, energy_end = [], [], [], []
name = path.split(profiler_hb_log)[1].split('-')[1].split('.')[0]
return (name,
np.atleast_1d(time_start),
np.atleast_1d(time_end),
np.atleast_1d(energy_start),
np.atleast_1d(energy_end))
def process_trial_dir(trial_dir):
"""Process trial directory.
Return: [(profiler name, [start times], [end times], [start energies], [end energies])]
Time and energy are normalized to 0 start values.
Keyword arguments:
trial_dir -- the directory for this trial
"""
log_data = map(lambda h: read_heartbeat_log(path.join(trial_dir, h)),
filter(lambda f: f.endswith(".log"), os.listdir(trial_dir)))
# Find the earliest timestamps and energy readings
min_t = np.nanmin(map(np.nanmin, filter(lambda x: len(x) > 0, [ts for (profiler, ts, te, es, ee) in log_data])))
min_e = np.nanmin(map(np.nanmin, filter(lambda x: len(x) > 0, [es for (profiler, ts, te, es, ee) in log_data])))
# Normalize timing/energy data to start values of 0
return [(profiler, ts - min_t, te - min_t, es - min_e, ee - min_e) for (profiler, ts, te, es, ee) in log_data]
def process_config_dir(config_dir):
"""Process a configuration directory.
Return: [(trial, [(profiler name, [start times], [end times], [start energies], [end energies])])]
Keyword arguments:
config_dir -- the directory for this configuration - contains subdirectories for each trial
"""
return [(trial_dir, process_trial_dir(path.join(config_dir, trial_dir))) for trial_dir in os.listdir(config_dir)]
def process_logs(log_dir):
"""Process log directory.
Return: [(config, [(trial, [(profiler name, [start times], [end times], [start energies], [end energies])])])]
Keyword arguments:
log_dir -- the log directory to process - contains subdirectories for each configuration
"""
return [((config_dir.split('_')[1], process_config_dir(path.join(log_dir, config_dir))))
for config_dir in os.listdir(log_dir)]
def find_best_executions(log_dir):
"""Get the best time, energy, and power from the characterization summaries.
Return: ((config, trial, min_time), (config, trial, min_energy), (config, trial, min_power))
Keyword arguments:
results -- the results from process_logs(...).
"""
DEFAULT = ('', '', 1000000000.0)
min_time = DEFAULT
min_energy = DEFAULT
min_power = DEFAULT
for config_dir in os.listdir(log_dir):
for trial_dir in os.listdir(path.join(log_dir, config_dir)):
with open(path.join(log_dir, config_dir, trial_dir, SUMMARY_OUTPUT), "r") as s:
lines = s.readlines()
time = float(lines[SUMMARY_TIME_IDX].split(':')[1])
energy = int(lines[SUMMARY_ENERGY_IDX].split(':')[1])
power = float(lines[SUMMARY_POWER_IDX].split(':')[1])
if time < min_time[2]:
min_time = (config_dir, trial_dir, time)
if energy < min_energy[2]:
min_energy = (config_dir, trial_dir, energy)
if power < min_power:
min_power = (config_dir, trial_dir, power)
return (min_time, min_energy, min_power)
def main():
"""This script processes the log files from the "characterize.py" script and produces visualizations.
"""
# Default log directory
directory = 'heartbeat_logs'
# Default output directory
output_dir = 'plots'
# Default android
android = False
# Parsing the input of the script
parser = argparse.ArgumentParser(description="Process Heartbeat log files from characterization")
parser.add_argument("-d", "--directory",
default=directory,
help="Heartbeat log directory \"-d heartbeat_logs\"")
parser.add_argument("-o", "--output",
default=output_dir,
help="Specify the log output directory, for example \"-o plots\"")
parser.add_argument("--android",
action="store_true",
dest="android",
default=False,
help="Specify if processing results from Android")
args = parser.parse_args()
if args.directory:
directory = args.directory
if args.output:
output_dir = args.output
if args.android:
android = args.android
if not os.path.exists(directory):
print "Input directory does not exist: " + directory
sys.exit(1)
if os.path.exists(output_dir):
print "Output directory already exists: " + output_dir
sys.exit(1)
res = process_logs(directory)
if not android:
best = find_best_executions(directory)
print 'Best time:', best[0]
print 'Best energy:', best[1]
print 'Best power:', best[2]
os.makedirs(output_dir)
plot_all_raw_totals(res, output_dir)
plot_all_time_series(res, output_dir)
if __name__ == "__main__":
main()
| mpl-2.0 |
Big-B702/python-for-android | python-build/python-libs/gdata/src/gdata/tlslite/utils/Python_AES.py | 359 | 2121 | """Pure-Python AES implementation."""
from cryptomath import *
from AES import *
from rijndael import rijndael
def new(key, mode, IV):
return Python_AES(key, mode, IV)
class Python_AES(AES):
def __init__(self, key, mode, IV):
AES.__init__(self, key, mode, IV, "python")
self.rijndael = rijndael(key, 16)
self.IV = IV
def encrypt(self, plaintext):
AES.encrypt(self, plaintext)
plaintextBytes = stringToBytes(plaintext)
chainBytes = stringToBytes(self.IV)
#CBC Mode: For each block...
for x in range(len(plaintextBytes)/16):
#XOR with the chaining block
blockBytes = plaintextBytes[x*16 : (x*16)+16]
for y in range(16):
blockBytes[y] ^= chainBytes[y]
blockString = bytesToString(blockBytes)
#Encrypt it
encryptedBytes = stringToBytes(self.rijndael.encrypt(blockString))
#Overwrite the input with the output
for y in range(16):
plaintextBytes[(x*16)+y] = encryptedBytes[y]
#Set the next chaining block
chainBytes = encryptedBytes
self.IV = bytesToString(chainBytes)
return bytesToString(plaintextBytes)
def decrypt(self, ciphertext):
AES.decrypt(self, ciphertext)
ciphertextBytes = stringToBytes(ciphertext)
chainBytes = stringToBytes(self.IV)
#CBC Mode: For each block...
for x in range(len(ciphertextBytes)/16):
#Decrypt it
blockBytes = ciphertextBytes[x*16 : (x*16)+16]
blockString = bytesToString(blockBytes)
decryptedBytes = stringToBytes(self.rijndael.decrypt(blockString))
#XOR with the chaining block and overwrite the input with output
for y in range(16):
decryptedBytes[y] ^= chainBytes[y]
ciphertextBytes[(x*16)+y] = decryptedBytes[y]
#Set the next chaining block
chainBytes = blockBytes
self.IV = bytesToString(chainBytes)
return bytesToString(ciphertextBytes)
| apache-2.0 |
programadorjc/django | tests/staticfiles_tests/settings.py | 147 | 1039 | from __future__ import unicode_literals
import os.path
from django.utils._os import upath
TEST_ROOT = os.path.dirname(upath(__file__))
TESTFILES_PATH = os.path.join(TEST_ROOT, 'apps', 'test', 'static', 'test')
TEST_SETTINGS = {
'DEBUG': True,
'MEDIA_URL': '/media/',
'STATIC_URL': '/static/',
'MEDIA_ROOT': os.path.join(TEST_ROOT, 'project', 'site_media', 'media'),
'STATIC_ROOT': os.path.join(TEST_ROOT, 'project', 'site_media', 'static'),
'STATICFILES_DIRS': [
os.path.join(TEST_ROOT, 'project', 'documents'),
('prefix', os.path.join(TEST_ROOT, 'project', 'prefixed')),
],
'STATICFILES_FINDERS': [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'django.contrib.staticfiles.finders.DefaultStorageFinder',
],
'INSTALLED_APPS': [
'django.contrib.staticfiles',
'staticfiles_tests',
'staticfiles_tests.apps.test',
'staticfiles_tests.apps.no_label',
],
}
| bsd-3-clause |
Salat-Cx65/python-for-android | python3-alpha/python3-src/Tools/unicode/comparecodecs.py | 45 | 1434 | #!/usr/bin/env python3
""" Compare the output of two codecs.
(c) Copyright 2005, Marc-Andre Lemburg (mal@lemburg.com).
Licensed to PSF under a Contributor Agreement.
"""
import sys
def compare_codecs(encoding1, encoding2):
print('Comparing encoding/decoding of %r and %r' % (encoding1, encoding2))
mismatch = 0
# Check encoding
for i in range(sys.maxunicode):
u = chr(i)
try:
c1 = u.encode(encoding1)
except UnicodeError as reason:
c1 = '<undefined>'
try:
c2 = u.encode(encoding2)
except UnicodeError as reason:
c2 = '<undefined>'
if c1 != c2:
print(' * encoding mismatch for 0x%04X: %-14r != %r' % \
(i, c1, c2))
mismatch += 1
# Check decoding
for i in range(256):
c = chr(i)
try:
u1 = c.decode(encoding1)
except UnicodeError:
u1 = '<undefined>'
try:
u2 = c.decode(encoding2)
except UnicodeError:
u2 = '<undefined>'
if u1 != u2:
print(' * decoding mismatch for 0x%04X: %-14r != %r' % \
(i, u1, u2))
mismatch += 1
if mismatch:
print()
print('Found %i mismatches' % mismatch)
else:
print('-> Codecs are identical.')
if __name__ == '__main__':
compare_codecs(sys.argv[1], sys.argv[2])
| apache-2.0 |
jasonwzhy/django | django/contrib/staticfiles/management/commands/runserver.py | 248 | 1361 | from django.conf import settings
from django.contrib.staticfiles.handlers import StaticFilesHandler
from django.core.management.commands.runserver import \
Command as RunserverCommand
class Command(RunserverCommand):
help = "Starts a lightweight Web server for development and also serves static files."
def add_arguments(self, parser):
super(Command, self).add_arguments(parser)
parser.add_argument('--nostatic', action="store_false", dest='use_static_handler', default=True,
help='Tells Django to NOT automatically serve static files at STATIC_URL.')
parser.add_argument('--insecure', action="store_true", dest='insecure_serving', default=False,
help='Allows serving static files even if DEBUG is False.')
def get_handler(self, *args, **options):
"""
Returns the static files serving handler wrapping the default handler,
if static files should be served. Otherwise just returns the default
handler.
"""
handler = super(Command, self).get_handler(*args, **options)
use_static_handler = options.get('use_static_handler', True)
insecure_serving = options.get('insecure_serving', False)
if use_static_handler and (settings.DEBUG or insecure_serving):
return StaticFilesHandler(handler)
return handler
| bsd-3-clause |
rodgerd/cobbler | koan/register.py | 12 | 6112 | """
registration tool for cobbler.
Copyright 2009 Red Hat, Inc.
Michael DeHaan <mdehaan@redhat.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA
"""
import random
import os
import traceback
try:
from optparse import OptionParser
except:
from opt_parse import OptionParser # importing this for backwards compat with 2.2
import exceptions
try:
import subprocess as sub_process
except:
import sub_process
import time
import errno
import sys
import xmlrpclib
import glob
import socket
import utils
import string
import pprint
# usage: cobbler-register [--server=server] [--hostname=hostname] --profile=foo
def main():
"""
Command line stuff...
"""
p = OptionParser()
p.add_option("-s", "--server",
dest="server",
default=os.environ.get("COBBLER_SERVER",""),
help="attach to this cobbler server")
p.add_option("-f", "--fqdn",
dest="hostname",
default="",
help="override the discovered hostname")
p.add_option("-p", "--port",
dest="port",
default="2515",
help="cobbler xmlrpc port (default 25151)")
p.add_option("-P", "--profile",
dest="profile",
default="",
help="assign this profile to this system")
p.add_option("-b", "--batch",
dest="batch",
action="store_true",
help="indicates this is being run from a script")
(options, args) = p.parse_args()
#if not os.getuid() == 0:
# print "koan requires root access"
# return 3
try:
k = Register()
k.server = options.server
k.port = options.port
k.profile = options.profile
k.hostname = options.hostname
k.batch = options.batch
k.run()
except Exception, e:
(xa, xb, tb) = sys.exc_info()
try:
getattr(e,"from_koan")
print str(e)[1:-1] # nice exception, no traceback needed
except:
print xa
print xb
print string.join(traceback.format_list(traceback.extract_tb(tb)))
return 1
return 0
#=======================================================
class InfoException(exceptions.Exception):
"""
Custom exception for tracking of fatal errors.
"""
def __init__(self,value,**args):
self.value = value % args
self.from_koan = 1
def __str__(self):
return repr(self.value)
#=======================================================
class Register:
def __init__(self):
"""
Constructor. Arguments will be filled in by optparse...
"""
self.server = ""
self.port = ""
self.profile = ""
self.hostname = ""
self.batch = ""
#---------------------------------------------------
def run(self):
"""
Commence with the registration already.
"""
# not really required, but probably best that ordinary users don't try
# to run this not knowing what it does.
if os.getuid() != 0:
raise InfoException("root access is required to register")
print "- preparing to koan home"
self.conn = utils.connect_to_server(self.server, self.port)
reg_info = {}
print "- gathering network info"
netinfo = utils.get_network_info()
reg_info["interfaces"] = netinfo
print "- checking hostname"
sysname = ""
if self.hostname != "" and self.hostname != "*AUTO*":
hostname = self.hostname
sysname = self.hostname
else:
hostname = socket.getfqdn()
if hostname == "localhost.localdomain":
if self.hostname == '*AUTO*':
hostname = ""
sysname = str(time.time())
else:
raise InfoException("must specify --fqdn, could not discover")
if sysname == "":
sysname = hostname
if self.profile == "":
raise InfoException("must specify --profile")
# we'll do a profile check here just to avoid some log noise on the remote end.
# network duplication checks and profile checks also happen on the remote end.
avail_profiles = self.conn.get_profiles()
matched_profile = False
for x in avail_profiles:
if x.get("name","") == self.profile:
matched_profile=True
break
reg_info['name'] = sysname
reg_info['profile'] = self.profile
reg_info['hostname'] = hostname
if not matched_profile:
raise InfoException("no such remote profile, see 'koan --list-profiles'")
if not self.batch:
self.conn.register_new_system(reg_info)
print "- registration successful, new system name: %s" % sysname
else:
try:
self.conn.register_new_system(reg_info)
print "- registration successful, new system name: %s" % sysname
except:
traceback.print_exc()
print "- registration failed, ignoring because of --batch"
return
if __name__ == "__main__":
main()
| gpl-2.0 |
manu-chroma/whatsapp-parser | test/test_chat.py | 1 | 1133 | import sys
import unittest
from wp_parser.wp_chat import main as parser
# for capturing print() output
from contextlib import contextmanager
try:
from StringIO import StringIO
except ImportError: #python3
from io import StringIO
@contextmanager
def captured_output():
new_out, new_err = StringIO(), StringIO()
old_out, old_err = sys.stdout, sys.stderr
try:
sys.stdout, sys.stderr = new_out, new_err
yield sys.stdout, sys.stderr
finally:
sys.stdout, sys.stderr = old_out, old_err
class TestChat(unittest.TestCase):
def test_chat_1(self):
# out_filename = str(tmpdir.join("abc"))
# out_filename = ""
for case in ['One', 'Two']:
# creating mock args
args = "-f test/testChat2.txt --root Username{}".format(case).split()
with captured_output() as (result, err):
parser(args)
expected_file = 'test/out/testChat2_Username{}.out'.format(case)
with open(expected_file) as fh:
expected = fh.read()
self.assertEqual(result.getvalue().strip(), expected)
| mit |
yongshengwang/hue | desktop/core/ext-py/Django-1.6.10/tests/many_to_one/tests.py | 59 | 21778 | from __future__ import absolute_import
from copy import deepcopy
import datetime
from django.core.exceptions import MultipleObjectsReturned, FieldError
from django.test import TestCase
from django.utils import six
from django.utils.translation import ugettext_lazy
from .models import Article, Reporter
class ManyToOneTests(TestCase):
def setUp(self):
# Create a few Reporters.
self.r = Reporter(first_name='John', last_name='Smith', email='john@example.com')
self.r.save()
self.r2 = Reporter(first_name='Paul', last_name='Jones', email='paul@example.com')
self.r2.save()
# Create an Article.
self.a = Article(id=None, headline="This is a test",
pub_date=datetime.date(2005, 7, 27), reporter=self.r)
self.a.save()
def test_get(self):
# Article objects have access to their related Reporter objects.
r = self.a.reporter
self.assertEqual(r.id, self.r.id)
# These are strings instead of unicode strings because that's what was used in
# the creation of this reporter (and we haven't refreshed the data from the
# database, which always returns unicode strings).
self.assertEqual((r.first_name, self.r.last_name), ('John', 'Smith'))
def test_create(self):
# You can also instantiate an Article by passing the Reporter's ID
# instead of a Reporter object.
a3 = Article(id=None, headline="Third article",
pub_date=datetime.date(2005, 7, 27), reporter_id=self.r.id)
a3.save()
self.assertEqual(a3.reporter.id, self.r.id)
# Similarly, the reporter ID can be a string.
a4 = Article(id=None, headline="Fourth article",
pub_date=datetime.date(2005, 7, 27), reporter_id=str(self.r.id))
a4.save()
self.assertEqual(repr(a4.reporter), "<Reporter: John Smith>")
def test_add(self):
# Create an Article via the Reporter object.
new_article = self.r.article_set.create(headline="John's second story",
pub_date=datetime.date(2005, 7, 29))
self.assertEqual(repr(new_article), "<Article: John's second story>")
self.assertEqual(new_article.reporter.id, self.r.id)
# Create a new article, and add it to the article set.
new_article2 = Article(headline="Paul's story", pub_date=datetime.date(2006, 1, 17))
self.r.article_set.add(new_article2)
self.assertEqual(new_article2.reporter.id, self.r.id)
self.assertQuerysetEqual(self.r.article_set.all(),
[
"<Article: John's second story>",
"<Article: Paul's story>",
"<Article: This is a test>",
])
# Add the same article to a different article set - check that it moves.
self.r2.article_set.add(new_article2)
self.assertEqual(new_article2.reporter.id, self.r2.id)
self.assertQuerysetEqual(self.r2.article_set.all(), ["<Article: Paul's story>"])
# Adding an object of the wrong type raises TypeError.
with six.assertRaisesRegex(self, TypeError, "'Article' instance expected, got <Reporter.*"):
self.r.article_set.add(self.r2)
self.assertQuerysetEqual(self.r.article_set.all(),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
def test_assign(self):
new_article = self.r.article_set.create(headline="John's second story",
pub_date=datetime.date(2005, 7, 29))
new_article2 = self.r2.article_set.create(headline="Paul's story",
pub_date=datetime.date(2006, 1, 17))
# Assign the article to the reporter directly using the descriptor.
new_article2.reporter = self.r
new_article2.save()
self.assertEqual(repr(new_article2.reporter), "<Reporter: John Smith>")
self.assertEqual(new_article2.reporter.id, self.r.id)
self.assertQuerysetEqual(self.r.article_set.all(), [
"<Article: John's second story>",
"<Article: Paul's story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(self.r2.article_set.all(), [])
# Set the article back again using set descriptor.
self.r2.article_set = [new_article, new_article2]
self.assertQuerysetEqual(self.r.article_set.all(), ["<Article: This is a test>"])
self.assertQuerysetEqual(self.r2.article_set.all(),
[
"<Article: John's second story>",
"<Article: Paul's story>",
])
# Funny case - assignment notation can only go so far; because the
# ForeignKey cannot be null, existing members of the set must remain.
self.r.article_set = [new_article]
self.assertQuerysetEqual(self.r.article_set.all(),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(self.r2.article_set.all(), ["<Article: Paul's story>"])
# Reporter cannot be null - there should not be a clear or remove method
self.assertFalse(hasattr(self.r2.article_set, 'remove'))
self.assertFalse(hasattr(self.r2.article_set, 'clear'))
def test_selects(self):
new_article = self.r.article_set.create(headline="John's second story",
pub_date=datetime.date(2005, 7, 29))
new_article2 = self.r2.article_set.create(headline="Paul's story",
pub_date=datetime.date(2006, 1, 17))
# Reporter objects have access to their related Article objects.
self.assertQuerysetEqual(self.r.article_set.all(), [
"<Article: John's second story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(self.r.article_set.filter(headline__startswith='This'),
["<Article: This is a test>"])
self.assertEqual(self.r.article_set.count(), 2)
self.assertEqual(self.r2.article_set.count(), 1)
# Get articles by id
self.assertQuerysetEqual(Article.objects.filter(id__exact=self.a.id),
["<Article: This is a test>"])
self.assertQuerysetEqual(Article.objects.filter(pk=self.a.id),
["<Article: This is a test>"])
# Query on an article property
self.assertQuerysetEqual(Article.objects.filter(headline__startswith='This'),
["<Article: This is a test>"])
# The API automatically follows relationships as far as you need.
# Use double underscores to separate relationships.
# This works as many levels deep as you want. There's no limit.
# Find all Articles for any Reporter whose first name is "John".
self.assertQuerysetEqual(Article.objects.filter(reporter__first_name__exact='John'),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
# Check that implied __exact also works
self.assertQuerysetEqual(Article.objects.filter(reporter__first_name='John'),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
# Query twice over the related field.
self.assertQuerysetEqual(
Article.objects.filter(reporter__first_name__exact='John',
reporter__last_name__exact='Smith'),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
# The underlying query only makes one join when a related table is referenced twice.
queryset = Article.objects.filter(reporter__first_name__exact='John',
reporter__last_name__exact='Smith')
self.assertNumQueries(1, list, queryset)
self.assertEqual(queryset.query.get_compiler(queryset.db).as_sql()[0].count('INNER JOIN'), 1)
# The automatically joined table has a predictable name.
self.assertQuerysetEqual(
Article.objects.filter(reporter__first_name__exact='John').extra(
where=["many_to_one_reporter.last_name='Smith'"]),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
# ... and should work fine with the unicode that comes out of forms.Form.cleaned_data
self.assertQuerysetEqual(
Article.objects.filter(reporter__first_name__exact='John'
).extra(where=["many_to_one_reporter.last_name='%s'" % 'Smith']),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
# Find all Articles for a Reporter.
# Use direct ID check, pk check, and object comparison
self.assertQuerysetEqual(
Article.objects.filter(reporter__id__exact=self.r.id),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(
Article.objects.filter(reporter__pk=self.r.id),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(
Article.objects.filter(reporter=self.r.id),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(
Article.objects.filter(reporter=self.r),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(
Article.objects.filter(reporter__in=[self.r.id,self.r2.id]).distinct(),
[
"<Article: John's second story>",
"<Article: Paul's story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(
Article.objects.filter(reporter__in=[self.r,self.r2]).distinct(),
[
"<Article: John's second story>",
"<Article: Paul's story>",
"<Article: This is a test>",
])
# You can also use a queryset instead of a literal list of instances.
# The queryset must be reduced to a list of values using values(),
# then converted into a query
self.assertQuerysetEqual(
Article.objects.filter(
reporter__in=Reporter.objects.filter(first_name='John').values('pk').query
).distinct(),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
def test_reverse_selects(self):
a3 = Article.objects.create(id=None, headline="Third article",
pub_date=datetime.date(2005, 7, 27), reporter_id=self.r.id)
a4 = Article.objects.create(id=None, headline="Fourth article",
pub_date=datetime.date(2005, 7, 27), reporter_id=str(self.r.id))
# Reporters can be queried
self.assertQuerysetEqual(Reporter.objects.filter(id__exact=self.r.id),
["<Reporter: John Smith>"])
self.assertQuerysetEqual(Reporter.objects.filter(pk=self.r.id),
["<Reporter: John Smith>"])
self.assertQuerysetEqual(Reporter.objects.filter(first_name__startswith='John'),
["<Reporter: John Smith>"])
# Reporters can query in opposite direction of ForeignKey definition
self.assertQuerysetEqual(Reporter.objects.filter(article__id__exact=self.a.id),
["<Reporter: John Smith>"])
self.assertQuerysetEqual(Reporter.objects.filter(article__pk=self.a.id),
["<Reporter: John Smith>"])
self.assertQuerysetEqual(Reporter.objects.filter(article=self.a.id),
["<Reporter: John Smith>"])
self.assertQuerysetEqual(Reporter.objects.filter(article=self.a),
["<Reporter: John Smith>"])
self.assertQuerysetEqual(
Reporter.objects.filter(article__in=[self.a.id,a3.id]).distinct(),
["<Reporter: John Smith>"])
self.assertQuerysetEqual(
Reporter.objects.filter(article__in=[self.a.id,a3]).distinct(),
["<Reporter: John Smith>"])
self.assertQuerysetEqual(
Reporter.objects.filter(article__in=[self.a,a3]).distinct(),
["<Reporter: John Smith>"])
self.assertQuerysetEqual(
Reporter.objects.filter(article__headline__startswith='T'),
["<Reporter: John Smith>", "<Reporter: John Smith>"],
ordered=False
)
self.assertQuerysetEqual(
Reporter.objects.filter(article__headline__startswith='T').distinct(),
["<Reporter: John Smith>"])
# Counting in the opposite direction works in conjunction with distinct()
self.assertEqual(
Reporter.objects.filter(article__headline__startswith='T').count(), 2)
self.assertEqual(
Reporter.objects.filter(article__headline__startswith='T').distinct().count(), 1)
# Queries can go round in circles.
self.assertQuerysetEqual(
Reporter.objects.filter(article__reporter__first_name__startswith='John'),
[
"<Reporter: John Smith>",
"<Reporter: John Smith>",
"<Reporter: John Smith>",
],
ordered=False
)
self.assertQuerysetEqual(
Reporter.objects.filter(article__reporter__first_name__startswith='John').distinct(),
["<Reporter: John Smith>"])
self.assertQuerysetEqual(
Reporter.objects.filter(article__reporter__exact=self.r).distinct(),
["<Reporter: John Smith>"])
# Check that implied __exact also works.
self.assertQuerysetEqual(
Reporter.objects.filter(article__reporter=self.r).distinct(),
["<Reporter: John Smith>"])
# It's possible to use values() calls across many-to-one relations.
# (Note, too, that we clear the ordering here so as not to drag the
# 'headline' field into the columns being used to determine uniqueness)
d = {'reporter__first_name': 'John', 'reporter__last_name': 'Smith'}
self.assertEqual([d],
list(Article.objects.filter(reporter=self.r).distinct().order_by()
.values('reporter__first_name', 'reporter__last_name')))
def test_select_related(self):
# Check that Article.objects.select_related().dates() works properly when
# there are multiple Articles with the same date but different foreign-key
# objects (Reporters).
r1 = Reporter.objects.create(first_name='Mike', last_name='Royko', email='royko@suntimes.com')
r2 = Reporter.objects.create(first_name='John', last_name='Kass', email='jkass@tribune.com')
Article.objects.create(headline='First', pub_date=datetime.date(1980, 4, 23), reporter=r1)
Article.objects.create(headline='Second', pub_date=datetime.date(1980, 4, 23), reporter=r2)
self.assertEqual(list(Article.objects.select_related().dates('pub_date', 'day')),
[
datetime.date(1980, 4, 23),
datetime.date(2005, 7, 27),
])
self.assertEqual(list(Article.objects.select_related().dates('pub_date', 'month')),
[
datetime.date(1980, 4, 1),
datetime.date(2005, 7, 1),
])
self.assertEqual(list(Article.objects.select_related().dates('pub_date', 'year')),
[
datetime.date(1980, 1, 1),
datetime.date(2005, 1, 1),
])
def test_delete(self):
new_article = self.r.article_set.create(headline="John's second story",
pub_date=datetime.date(2005, 7, 29))
new_article2 = self.r2.article_set.create(headline="Paul's story",
pub_date=datetime.date(2006, 1, 17))
a3 = Article.objects.create(id=None, headline="Third article",
pub_date=datetime.date(2005, 7, 27), reporter_id=self.r.id)
a4 = Article.objects.create(id=None, headline="Fourth article",
pub_date=datetime.date(2005, 7, 27), reporter_id=str(self.r.id))
# If you delete a reporter, his articles will be deleted.
self.assertQuerysetEqual(Article.objects.all(),
[
"<Article: Fourth article>",
"<Article: John's second story>",
"<Article: Paul's story>",
"<Article: Third article>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(Reporter.objects.order_by('first_name'),
[
"<Reporter: John Smith>",
"<Reporter: Paul Jones>",
])
self.r2.delete()
self.assertQuerysetEqual(Article.objects.all(),
[
"<Article: Fourth article>",
"<Article: John's second story>",
"<Article: Third article>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(Reporter.objects.order_by('first_name'),
["<Reporter: John Smith>"])
# You can delete using a JOIN in the query.
Reporter.objects.filter(article__headline__startswith='This').delete()
self.assertQuerysetEqual(Reporter.objects.all(), [])
self.assertQuerysetEqual(Article.objects.all(), [])
def test_regression_12876(self):
# Regression for #12876 -- Model methods that include queries that
# recursive don't cause recursion depth problems under deepcopy.
self.r.cached_query = Article.objects.filter(reporter=self.r)
self.assertEqual(repr(deepcopy(self.r)), "<Reporter: John Smith>")
def test_explicit_fk(self):
# Create a new Article with get_or_create using an explicit value
# for a ForeignKey.
a2, created = Article.objects.get_or_create(id=None,
headline="John's second test",
pub_date=datetime.date(2011, 5, 7),
reporter_id=self.r.id)
self.assertTrue(created)
self.assertEqual(a2.reporter.id, self.r.id)
# You can specify filters containing the explicit FK value.
self.assertQuerysetEqual(
Article.objects.filter(reporter_id__exact=self.r.id),
[
"<Article: John's second test>",
"<Article: This is a test>",
])
# Create an Article by Paul for the same date.
a3 = Article.objects.create(id=None, headline="Paul's commentary",
pub_date=datetime.date(2011, 5, 7),
reporter_id=self.r2.id)
self.assertEqual(a3.reporter.id, self.r2.id)
# Get should respect explicit foreign keys as well.
self.assertRaises(MultipleObjectsReturned,
Article.objects.get, reporter_id=self.r.id)
self.assertEqual(repr(a3),
repr(Article.objects.get(reporter_id=self.r2.id,
pub_date=datetime.date(2011, 5, 7))))
def test_manager_class_caching(self):
r1 = Reporter.objects.create(first_name='Mike')
r2 = Reporter.objects.create(first_name='John')
# Same twice
self.assertTrue(r1.article_set.__class__ is r1.article_set.__class__)
# Same as each other
self.assertTrue(r1.article_set.__class__ is r2.article_set.__class__)
def test_create_relation_with_ugettext_lazy(self):
reporter = Reporter.objects.create(first_name='John',
last_name='Smith',
email='john.smith@example.com')
lazy = ugettext_lazy('test')
reporter.article_set.create(headline=lazy,
pub_date=datetime.date(2011, 6, 10))
notlazy = six.text_type(lazy)
article = reporter.article_set.get()
self.assertEqual(article.headline, notlazy)
def test_values_list_exception(self):
expected_message = "Cannot resolve keyword 'notafield' into field. Choices are: %s"
self.assertRaisesMessage(FieldError,
expected_message % ', '.join(Reporter._meta.get_all_field_names()),
Article.objects.values_list,
'reporter__notafield')
self.assertRaisesMessage(FieldError,
expected_message % ', '.join(['EXTRA',] + Article._meta.get_all_field_names()),
Article.objects.extra(select={'EXTRA': 'EXTRA_SELECT'}).values_list,
'notafield')
| apache-2.0 |
sahutd/youtube-dl | youtube_dl/extractor/ellentv.py | 107 | 2708 | # coding: utf-8
from __future__ import unicode_literals
import json
from .common import InfoExtractor
from ..utils import (
ExtractorError,
)
class EllenTVIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?(?:ellentv|ellentube)\.com/videos/(?P<id>[a-z0-9_-]+)'
_TEST = {
'url': 'http://www.ellentv.com/videos/0-ipq1gsai/',
'md5': '8e3c576bf2e9bfff4d76565f56f94c9c',
'info_dict': {
'id': '0_ipq1gsai',
'ext': 'mp4',
'title': 'Fast Fingers of Fate',
'description': 'md5:587e79fbbd0d73b148bc596d99ce48e6',
'timestamp': 1428035648,
'upload_date': '20150403',
'uploader_id': 'batchUser',
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(
'http://widgets.ellentube.com/videos/%s' % video_id,
video_id)
partner_id = self._search_regex(
r"var\s+partnerId\s*=\s*'([^']+)", webpage, 'partner id')
kaltura_id = self._search_regex(
[r'id="kaltura_player_([^"]+)"',
r"_wb_entry_id\s*:\s*'([^']+)",
r'data-kaltura-entry-id="([^"]+)'],
webpage, 'kaltura id')
return self.url_result('kaltura:%s:%s' % (partner_id, kaltura_id), 'Kaltura')
class EllenTVClipsIE(InfoExtractor):
IE_NAME = 'EllenTV:clips'
_VALID_URL = r'https?://(?:www\.)?ellentv\.com/episodes/(?P<id>[a-z0-9_-]+)'
_TEST = {
'url': 'http://www.ellentv.com/episodes/meryl-streep-vanessa-hudgens/',
'info_dict': {
'id': 'meryl-streep-vanessa-hudgens',
'title': 'Meryl Streep, Vanessa Hudgens',
},
'playlist_mincount': 7,
}
def _real_extract(self, url):
playlist_id = self._match_id(url)
webpage = self._download_webpage(url, playlist_id)
playlist = self._extract_playlist(webpage)
return {
'_type': 'playlist',
'id': playlist_id,
'title': self._og_search_title(webpage),
'entries': self._extract_entries(playlist)
}
def _extract_playlist(self, webpage):
json_string = self._search_regex(r'playerView.addClips\(\[\{(.*?)\}\]\);', webpage, 'json')
try:
return json.loads("[{" + json_string + "}]")
except ValueError as ve:
raise ExtractorError('Failed to download JSON', cause=ve)
def _extract_entries(self, playlist):
return [
self.url_result(
'kaltura:%s:%s' % (item['kaltura_partner_id'], item['kaltura_entry_id']),
'Kaltura')
for item in playlist]
| unlicense |
salguarnieri/intellij-community | python/helpers/epydoc/docwriter/dotgraph.py | 91 | 53233 | # epydoc -- Graph generation
#
# Copyright (C) 2005 Edward Loper
# Author: Edward Loper <edloper@loper.org>
# URL: <http://epydoc.sf.net>
#
# $Id: dotgraph.py 1663 2007-11-07 15:29:47Z dvarrazzo $
"""
Render Graphviz directed graphs as images. Below are some examples.
.. importgraph::
.. classtree:: epydoc.apidoc.APIDoc
.. packagetree:: epydoc
:see: `The Graphviz Homepage
<http://www.research.att.com/sw/tools/graphviz/>`__
"""
__docformat__ = 'restructuredtext'
import re
import sys
from epydoc import log
from epydoc.apidoc import *
from epydoc.util import *
from epydoc.compat import * # Backwards compatibility
# colors for graphs of APIDocs
MODULE_BG = '#d8e8ff'
CLASS_BG = '#d8ffe8'
SELECTED_BG = '#ffd0d0'
BASECLASS_BG = '#e0b0a0'
SUBCLASS_BG = '#e0b0a0'
ROUTINE_BG = '#e8d0b0' # maybe?
INH_LINK_COLOR = '#800000'
######################################################################
#{ Dot Graphs
######################################################################
DOT_COMMAND = 'dot'
"""The command that should be used to spawn dot"""
class DotGraph:
"""
A ``dot`` directed graph. The contents of the graph are
constructed from the following instance variables:
- `nodes`: A list of `DotGraphNode`\\s, encoding the nodes
that are present in the graph. Each node is characterized
a set of attributes, including an optional label.
- `edges`: A list of `DotGraphEdge`\\s, encoding the edges
that are present in the graph. Each edge is characterized
by a set of attributes, including an optional label.
- `node_defaults`: Default attributes for nodes.
- `edge_defaults`: Default attributes for edges.
- `body`: A string that is appended as-is in the body of
the graph. This can be used to build more complex dot
graphs.
The `link()` method can be used to resolve crossreference links
within the graph. In particular, if the 'href' attribute of any
node or edge is assigned a value of the form ``<name>``, then it
will be replaced by the URL of the object with that name. This
applies to the `body` as well as the `nodes` and `edges`.
To render the graph, use the methods `write()` and `render()`.
Usually, you should call `link()` before you render the graph.
"""
_uids = set()
"""A set of all uids that that have been generated, used to ensure
that each new graph has a unique uid."""
DEFAULT_NODE_DEFAULTS={'fontsize':10, 'fontname': 'Helvetica'}
DEFAULT_EDGE_DEFAULTS={'fontsize':10, 'fontname': 'Helvetica'}
def __init__(self, title, body='', node_defaults=None,
edge_defaults=None, caption=None):
"""
Create a new `DotGraph`.
"""
self.title = title
"""The title of the graph."""
self.caption = caption
"""A caption for the graph."""
self.nodes = []
"""A list of the nodes that are present in the graph.
:type: ``list`` of `DotGraphNode`"""
self.edges = []
"""A list of the edges that are present in the graph.
:type: ``list`` of `DotGraphEdge`"""
self.body = body
"""A string that should be included as-is in the body of the
graph.
:type: ``str``"""
self.node_defaults = node_defaults or self.DEFAULT_NODE_DEFAULTS
"""Default attribute values for nodes."""
self.edge_defaults = edge_defaults or self.DEFAULT_EDGE_DEFAULTS
"""Default attribute values for edges."""
self.uid = re.sub(r'\W', '_', title).lower()
"""A unique identifier for this graph. This can be used as a
filename when rendering the graph. No two `DotGraph`\s will
have the same uid."""
# Encode the title, if necessary.
if isinstance(self.title, unicode):
self.title = self.title.encode('ascii', 'xmlcharrefreplace')
# Make sure the UID isn't too long.
self.uid = self.uid[:30]
# Make sure the UID is unique
if self.uid in self._uids:
n = 2
while ('%s_%s' % (self.uid, n)) in self._uids: n += 1
self.uid = '%s_%s' % (self.uid, n)
self._uids.add(self.uid)
def to_html(self, image_file, image_url, center=True):
"""
Return the HTML code that should be uesd to display this graph
(including a client-side image map).
:param image_url: The URL of the image file for this graph;
this should be generated separately with the `write()` method.
"""
# If dotversion >1.8.10, then we can generate the image and
# the cmapx with a single call to dot. Otherwise, we need to
# run dot twice.
if get_dot_version() > [1,8,10]:
cmapx = self._run_dot('-Tgif', '-o%s' % image_file, '-Tcmapx')
if cmapx is None: return '' # failed to render
else:
if not self.write(image_file):
return '' # failed to render
cmapx = self.render('cmapx') or ''
# Decode the cmapx (dot uses utf-8)
try:
cmapx = cmapx.decode('utf-8')
except UnicodeDecodeError:
log.debug('%s: unable to decode cmapx from dot; graph will '
'not have clickable regions' % image_file)
cmapx = ''
title = plaintext_to_html(self.title or '')
caption = plaintext_to_html(self.caption or '')
if title or caption:
css_class = 'graph-with-title'
else:
css_class = 'graph-without-title'
if len(title)+len(caption) > 80:
title_align = 'left'
table_width = ' width="600"'
else:
title_align = 'center'
table_width = ''
if center: s = '<center>'
if title or caption:
s += ('<table border="0" cellpadding="0" cellspacing="0" '
'class="graph"%s>\n <tr><td align="center">\n' %
table_width)
s += (' %s\n <img src="%s" alt=%r usemap="#%s" '
'ismap="ismap" class="%s" />\n' %
(cmapx.strip(), image_url, title, self.uid, css_class))
if title or caption:
s += ' </td></tr>\n <tr><td align=%r>\n' % title_align
if title:
s += '<span class="graph-title">%s</span>' % title
if title and caption:
s += ' -- '
if caption:
s += '<span class="graph-caption">%s</span>' % caption
s += '\n </td></tr>\n</table><br />'
if center: s += '</center>'
return s
def link(self, docstring_linker):
"""
Replace any href attributes whose value is ``<name>`` with
the url of the object whose name is ``<name>``.
"""
# Link xrefs in nodes
self._link_href(self.node_defaults, docstring_linker)
for node in self.nodes:
self._link_href(node.attribs, docstring_linker)
# Link xrefs in edges
self._link_href(self.edge_defaults, docstring_linker)
for edge in self.nodes:
self._link_href(edge.attribs, docstring_linker)
# Link xrefs in body
def subfunc(m):
url = docstring_linker.url_for(m.group(1))
if url: return 'href="%s"%s' % (url, m.group(2))
else: return ''
self.body = re.sub("href\s*=\s*['\"]?<([\w\.]+)>['\"]?\s*(,?)",
subfunc, self.body)
def _link_href(self, attribs, docstring_linker):
"""Helper for `link()`"""
if 'href' in attribs:
m = re.match(r'^<([\w\.]+)>$', attribs['href'])
if m:
url = docstring_linker.url_for(m.group(1))
if url: attribs['href'] = url
else: del attribs['href']
def write(self, filename, language='gif'):
"""
Render the graph using the output format `language`, and write
the result to `filename`.
:return: True if rendering was successful.
"""
result = self._run_dot('-T%s' % language,
'-o%s' % filename)
# Decode into unicode, if necessary.
if language == 'cmapx' and result is not None:
result = result.decode('utf-8')
return (result is not None)
def render(self, language='gif'):
"""
Use the ``dot`` command to render this graph, using the output
format `language`. Return the result as a string, or ``None``
if the rendering failed.
"""
return self._run_dot('-T%s' % language)
def _run_dot(self, *options):
try:
result, err = run_subprocess((DOT_COMMAND,)+options,
self.to_dotfile())
if err: log.warning("Graphviz dot warning(s):\n%s" % err)
except OSError, e:
log.warning("Unable to render Graphviz dot graph:\n%s" % e)
#log.debug(self.to_dotfile())
return None
return result
def to_dotfile(self):
"""
Return the string contents of the dot file that should be used
to render this graph.
"""
lines = ['digraph %s {' % self.uid,
'node [%s]' % ','.join(['%s="%s"' % (k,v) for (k,v)
in self.node_defaults.items()]),
'edge [%s]' % ','.join(['%s="%s"' % (k,v) for (k,v)
in self.edge_defaults.items()])]
if self.body:
lines.append(self.body)
lines.append('/* Nodes */')
for node in self.nodes:
lines.append(node.to_dotfile())
lines.append('/* Edges */')
for edge in self.edges:
lines.append(edge.to_dotfile())
lines.append('}')
# Default dot input encoding is UTF-8
return u'\n'.join(lines).encode('utf-8')
class DotGraphNode:
_next_id = 0
def __init__(self, label=None, html_label=None, **attribs):
if label is not None and html_label is not None:
raise ValueError('Use label or html_label, not both.')
if label is not None: attribs['label'] = label
self._html_label = html_label
self._attribs = attribs
self.id = self.__class__._next_id
self.__class__._next_id += 1
self.port = None
def __getitem__(self, attr):
return self._attribs[attr]
def __setitem__(self, attr, val):
if attr == 'html_label':
self._attribs.pop('label')
self._html_label = val
else:
if attr == 'label': self._html_label = None
self._attribs[attr] = val
def to_dotfile(self):
"""
Return the dot commands that should be used to render this node.
"""
attribs = ['%s="%s"' % (k,v) for (k,v) in self._attribs.items()
if v is not None]
if self._html_label:
attribs.insert(0, 'label=<%s>' % (self._html_label,))
if attribs: attribs = ' [%s]' % (','.join(attribs))
return 'node%d%s' % (self.id, attribs)
class DotGraphEdge:
def __init__(self, start, end, label=None, **attribs):
"""
:type start: `DotGraphNode`
:type end: `DotGraphNode`
"""
assert isinstance(start, DotGraphNode)
assert isinstance(end, DotGraphNode)
if label is not None: attribs['label'] = label
self.start = start #: :type: `DotGraphNode`
self.end = end #: :type: `DotGraphNode`
self._attribs = attribs
def __getitem__(self, attr):
return self._attribs[attr]
def __setitem__(self, attr, val):
self._attribs[attr] = val
def to_dotfile(self):
"""
Return the dot commands that should be used to render this edge.
"""
# Set head & tail ports, if the nodes have preferred ports.
attribs = self._attribs.copy()
if (self.start.port is not None and 'headport' not in attribs):
attribs['headport'] = self.start.port
if (self.end.port is not None and 'tailport' not in attribs):
attribs['tailport'] = self.end.port
# Convert attribs to a string
attribs = ','.join(['%s="%s"' % (k,v) for (k,v) in attribs.items()
if v is not None])
if attribs: attribs = ' [%s]' % attribs
# Return the dotfile edge.
return 'node%d -> node%d%s' % (self.start.id, self.end.id, attribs)
######################################################################
#{ Specialized Nodes for UML Graphs
######################################################################
class DotGraphUmlClassNode(DotGraphNode):
"""
A specialized dot graph node used to display `ClassDoc`\s using
UML notation. The node is rendered as a table with three cells:
the top cell contains the class name; the middle cell contains a
list of attributes; and the bottom cell contains a list of
operations::
+-------------+
| ClassName |
+-------------+
| x: int |
| ... |
+-------------+
| f(self, x) |
| ... |
+-------------+
`DotGraphUmlClassNode`\s may be *collapsed*, in which case they are
drawn as a simple box containing the class name::
+-------------+
| ClassName |
+-------------+
Attributes with types corresponding to documented classes can
optionally be converted into edges, using `link_attributes()`.
:todo: Add more options?
- show/hide operation signature
- show/hide operation signature types
- show/hide operation signature return type
- show/hide attribute types
- use qualifiers
"""
def __init__(self, class_doc, linker, context, collapsed=False,
bgcolor=CLASS_BG, **options):
"""
Create a new `DotGraphUmlClassNode` based on the class
`class_doc`.
:Parameters:
`linker` : `markup.DocstringLinker`
Used to look up URLs for classes.
`context` : `APIDoc`
The context in which this node will be drawn; dotted
names will be contextualized to this context.
`collapsed` : ``bool``
If true, then display this node as a simple box.
`bgcolor` : ```str```
The background color for this node.
`options` : ``dict``
A set of options used to control how the node should
be displayed.
:Keywords:
- `show_private_vars`: If false, then private variables
are filtered out of the attributes & operations lists.
(Default: *False*)
- `show_magic_vars`: If false, then magic variables
(such as ``__init__`` and ``__add__``) are filtered out of
the attributes & operations lists. (Default: *True*)
- `show_inherited_vars`: If false, then inherited variables
are filtered out of the attributes & operations lists.
(Default: *False*)
- `max_attributes`: The maximum number of attributes that
should be listed in the attribute box. If the class has
more than this number of attributes, some will be
ellided. Ellipsis is marked with ``'...'``.
- `max_operations`: The maximum number of operations that
should be listed in the operation box.
- `add_nodes_for_linked_attributes`: If true, then
`link_attributes()` will create new a collapsed node for
the types of a linked attributes if no node yet exists for
that type.
"""
if not isinstance(class_doc, ClassDoc):
raise TypeError('Expected a ClassDoc as 1st argument')
self.class_doc = class_doc
"""The class represented by this node."""
self.linker = linker
"""Used to look up URLs for classes."""
self.context = context
"""The context in which the node will be drawn."""
self.bgcolor = bgcolor
"""The background color of the node."""
self.options = options
"""Options used to control how the node is displayed."""
self.collapsed = collapsed
"""If true, then draw this node as a simple box."""
self.attributes = []
"""The list of VariableDocs for attributes"""
self.operations = []
"""The list of VariableDocs for operations"""
self.qualifiers = []
"""List of (key_label, port) tuples."""
self.edges = []
"""List of edges used to represent this node's attributes.
These should not be added to the `DotGraph`; this node will
generate their dotfile code directly."""
# Initialize operations & attributes lists.
show_private = options.get('show_private_vars', False)
show_magic = options.get('show_magic_vars', True)
show_inherited = options.get('show_inherited_vars', False)
for var in class_doc.sorted_variables:
name = var.canonical_name[-1]
if ((not show_private and var.is_public == False) or
(not show_magic and re.match('__\w+__$', name)) or
(not show_inherited and var.container != class_doc)):
pass
elif isinstance(var.value, RoutineDoc):
self.operations.append(var)
else:
self.attributes.append(var)
# Initialize our dot node settings.
tooltip = self._summary(class_doc)
if tooltip:
# dot chokes on a \n in the attribute...
tooltip = " ".join(tooltip.split())
else:
tooltip = class_doc.canonical_name
DotGraphNode.__init__(self, tooltip=tooltip,
width=0, height=0, shape='plaintext',
href=linker.url_for(class_doc) or NOOP_URL)
#/////////////////////////////////////////////////////////////////
#{ Attribute Linking
#/////////////////////////////////////////////////////////////////
SIMPLE_TYPE_RE = re.compile(
r'^([\w\.]+)$')
"""A regular expression that matches descriptions of simple types."""
COLLECTION_TYPE_RE = re.compile(
r'^(list|set|sequence|tuple|collection) of ([\w\.]+)$')
"""A regular expression that matches descriptions of collection types."""
MAPPING_TYPE_RE = re.compile(
r'^(dict|dictionary|map|mapping) from ([\w\.]+) to ([\w\.]+)$')
"""A regular expression that matches descriptions of mapping types."""
MAPPING_TO_COLLECTION_TYPE_RE = re.compile(
r'^(dict|dictionary|map|mapping) from ([\w\.]+) to '
r'(list|set|sequence|tuple|collection) of ([\w\.]+)$')
"""A regular expression that matches descriptions of mapping types
whose value type is a collection."""
OPTIONAL_TYPE_RE = re.compile(
r'^(None or|optional) ([\w\.]+)$|^([\w\.]+) or None$')
"""A regular expression that matches descriptions of optional types."""
def link_attributes(self, nodes):
"""
Convert any attributes with type descriptions corresponding to
documented classes to edges. The following type descriptions
are currently handled:
- Dotted names: Create an attribute edge to the named type,
labelled with the variable name.
- Collections: Create an attribute edge to the named type,
labelled with the variable name, and marked with '*' at the
type end of the edge.
- Mappings: Create an attribute edge to the named type,
labelled with the variable name, connected to the class by
a qualifier box that contains the key type description.
- Optional: Create an attribute edge to the named type,
labelled with the variable name, and marked with '0..1' at
the type end of the edge.
The edges created by `link_attributes()` are handled internally
by `DotGraphUmlClassNode`; they should *not* be added directly
to the `DotGraph`.
:param nodes: A dictionary mapping from `ClassDoc`\s to
`DotGraphUmlClassNode`\s, used to look up the nodes for
attribute types. If the ``add_nodes_for_linked_attributes``
option is used, then new nodes will be added to this
dictionary for any types that are not already listed.
These added nodes must be added to the `DotGraph`.
"""
# Try to convert each attribute var into a graph edge. If
# _link_attribute returns true, then it succeeded, so remove
# that var from our attribute list; otherwise, leave that var
# in our attribute list.
self.attributes = [var for var in self.attributes
if not self._link_attribute(var, nodes)]
def _link_attribute(self, var, nodes):
"""
Helper for `link_attributes()`: try to convert the attribute
variable `var` into an edge, and add that edge to
`self.edges`. Return ``True`` iff the variable was
successfully converted to an edge (in which case, it should be
removed from the attributes list).
"""
type_descr = self._type_descr(var) or self._type_descr(var.value)
# Simple type.
m = self.SIMPLE_TYPE_RE.match(type_descr)
if m and self._add_attribute_edge(var, nodes, m.group(1)):
return True
# Collection type.
m = self.COLLECTION_TYPE_RE.match(type_descr)
if m and self._add_attribute_edge(var, nodes, m.group(2),
headlabel='*'):
return True
# Optional type.
m = self.OPTIONAL_TYPE_RE.match(type_descr)
if m and self._add_attribute_edge(var, nodes, m.group(2) or m.group(3),
headlabel='0..1'):
return True
# Mapping type.
m = self.MAPPING_TYPE_RE.match(type_descr)
if m:
port = 'qualifier_%s' % var.name
if self._add_attribute_edge(var, nodes, m.group(3),
tailport='%s:e' % port):
self.qualifiers.append( (m.group(2), port) )
return True
# Mapping to collection type.
m = self.MAPPING_TO_COLLECTION_TYPE_RE.match(type_descr)
if m:
port = 'qualifier_%s' % var.name
if self._add_attribute_edge(var, nodes, m.group(4), headlabel='*',
tailport='%s:e' % port):
self.qualifiers.append( (m.group(2), port) )
return True
# We were unable to link this attribute.
return False
def _add_attribute_edge(self, var, nodes, type_str, **attribs):
"""
Helper for `link_attributes()`: try to add an edge for the
given attribute variable `var`. Return ``True`` if
successful.
"""
# Use the type string to look up a corresponding ValueDoc.
type_doc = self.linker.docindex.find(type_str, var)
if not type_doc: return False
# Make sure the type is a class.
if not isinstance(type_doc, ClassDoc): return False
# Get the type ValueDoc's node. If it doesn't have one (and
# add_nodes_for_linked_attributes=True), then create it.
type_node = nodes.get(type_doc)
if not type_node:
if self.options.get('add_nodes_for_linked_attributes', True):
type_node = DotGraphUmlClassNode(type_doc, self.linker,
self.context, collapsed=True)
nodes[type_doc] = type_node
else:
return False
# Add an edge from self to the target type node.
# [xx] should I set constraint=false here?
attribs.setdefault('headport', 'body')
attribs.setdefault('tailport', 'body')
url = self.linker.url_for(var) or NOOP_URL
self.edges.append(DotGraphEdge(self, type_node, label=var.name,
arrowhead='open', href=url,
tooltip=var.canonical_name, labeldistance=1.5,
**attribs))
return True
#/////////////////////////////////////////////////////////////////
#{ Helper Methods
#/////////////////////////////////////////////////////////////////
def _summary(self, api_doc):
"""Return a plaintext summary for `api_doc`"""
if not isinstance(api_doc, APIDoc): return ''
if api_doc.summary in (None, UNKNOWN): return ''
summary = api_doc.summary.to_plaintext(None).strip()
return plaintext_to_html(summary)
_summary = classmethod(_summary)
def _type_descr(self, api_doc):
"""Return a plaintext type description for `api_doc`"""
if not hasattr(api_doc, 'type_descr'): return ''
if api_doc.type_descr in (None, UNKNOWN): return ''
type_descr = api_doc.type_descr.to_plaintext(self.linker).strip()
return plaintext_to_html(type_descr)
def _tooltip(self, var_doc):
"""Return a tooltip for `var_doc`."""
return (self._summary(var_doc) or
self._summary(var_doc.value) or
var_doc.canonical_name)
#/////////////////////////////////////////////////////////////////
#{ Rendering
#/////////////////////////////////////////////////////////////////
def _attribute_cell(self, var_doc):
# Construct the label
label = var_doc.name
type_descr = (self._type_descr(var_doc) or
self._type_descr(var_doc.value))
if type_descr: label += ': %s' % type_descr
# Get the URL
url = self.linker.url_for(var_doc) or NOOP_URL
# Construct & return the pseudo-html code
return self._ATTRIBUTE_CELL % (url, self._tooltip(var_doc), label)
def _operation_cell(self, var_doc):
"""
:todo: do 'word wrapping' on the signature, by starting a new
row in the table, if necessary. How to indent the new
line? Maybe use align=right? I don't think dot has a
.
:todo: Optionally add return type info?
"""
# Construct the label (aka function signature)
func_doc = var_doc.value
args = [self._operation_arg(n, d, func_doc) for (n, d)
in zip(func_doc.posargs, func_doc.posarg_defaults)]
args = [plaintext_to_html(arg) for arg in args]
if func_doc.vararg: args.append('*'+func_doc.vararg)
if func_doc.kwarg: args.append('**'+func_doc.kwarg)
label = '%s(%s)' % (var_doc.name, ', '.join(args))
# Get the URL
url = self.linker.url_for(var_doc) or NOOP_URL
# Construct & return the pseudo-html code
return self._OPERATION_CELL % (url, self._tooltip(var_doc), label)
def _operation_arg(self, name, default, func_doc):
"""
:todo: Handle tuple args better
:todo: Optionally add type info?
"""
if default is None:
return '%s' % name
else:
pyval_repr = default.summary_pyval_repr().to_plaintext(None)
return '%s=%s' % (name, pyval_repr)
def _qualifier_cell(self, key_label, port):
return self._QUALIFIER_CELL % (port, self.bgcolor, key_label)
#: args: (url, tooltip, label)
_ATTRIBUTE_CELL = '''
<TR><TD ALIGN="LEFT" HREF="%s" TOOLTIP="%s">%s</TD></TR>
'''
#: args: (url, tooltip, label)
_OPERATION_CELL = '''
<TR><TD ALIGN="LEFT" HREF="%s" TOOLTIP="%s">%s</TD></TR>
'''
#: args: (port, bgcolor, label)
_QUALIFIER_CELL = '''
<TR><TD VALIGN="BOTTOM" PORT="%s" BGCOLOR="%s" BORDER="1">%s</TD></TR>
'''
_QUALIFIER_DIV = '''
<TR><TD VALIGN="BOTTOM" HEIGHT="10" WIDTH="10" FIXEDSIZE="TRUE"></TD></TR>
'''
#: Args: (rowspan, bgcolor, classname, attributes, operations, qualifiers)
_LABEL = '''
<TABLE BORDER="0" CELLBORDER="0" CELLSPACING="0" CELLPADDING="0">
<TR><TD ROWSPAN="%s">
<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0"
CELLPADDING="0" PORT="body" BGCOLOR="%s">
<TR><TD>%s</TD></TR>
<TR><TD><TABLE BORDER="0" CELLBORDER="0" CELLSPACING="0">
%s</TABLE></TD></TR>
<TR><TD><TABLE BORDER="0" CELLBORDER="0" CELLSPACING="0">
%s</TABLE></TD></TR>
</TABLE>
</TD></TR>
%s
</TABLE>'''
_COLLAPSED_LABEL = '''
<TABLE CELLBORDER="0" BGCOLOR="%s" PORT="body">
<TR><TD>%s</TD></TR>
</TABLE>'''
def _get_html_label(self):
# Get the class name & contextualize it.
classname = self.class_doc.canonical_name
classname = classname.contextualize(self.context.canonical_name)
# If we're collapsed, display the node as a single box.
if self.collapsed:
return self._COLLAPSED_LABEL % (self.bgcolor, classname)
# Construct the attribute list. (If it's too long, truncate)
attrib_cells = [self._attribute_cell(a) for a in self.attributes]
max_attributes = self.options.get('max_attributes', 15)
if len(attrib_cells) == 0:
attrib_cells = ['<TR><TD></TD></TR>']
elif len(attrib_cells) > max_attributes:
attrib_cells[max_attributes-2:-1] = ['<TR><TD>...</TD></TR>']
attributes = ''.join(attrib_cells)
# Construct the operation list. (If it's too long, truncate)
oper_cells = [self._operation_cell(a) for a in self.operations]
max_operations = self.options.get('max_operations', 15)
if len(oper_cells) == 0:
oper_cells = ['<TR><TD></TD></TR>']
elif len(oper_cells) > max_operations:
oper_cells[max_operations-2:-1] = ['<TR><TD>...</TD></TR>']
operations = ''.join(oper_cells)
# Construct the qualifier list & determine the rowspan.
if self.qualifiers:
rowspan = len(self.qualifiers)*2+2
div = self._QUALIFIER_DIV
qualifiers = div+div.join([self._qualifier_cell(l,p) for
(l,p) in self.qualifiers])+div
else:
rowspan = 1
qualifiers = ''
# Put it all together.
return self._LABEL % (rowspan, self.bgcolor, classname,
attributes, operations, qualifiers)
def to_dotfile(self):
attribs = ['%s="%s"' % (k,v) for (k,v) in self._attribs.items()]
attribs.append('label=<%s>' % self._get_html_label())
s = 'node%d%s' % (self.id, ' [%s]' % (','.join(attribs)))
if not self.collapsed:
for edge in self.edges:
s += '\n' + edge.to_dotfile()
return s
class DotGraphUmlModuleNode(DotGraphNode):
"""
A specialized dot grah node used to display `ModuleDoc`\s using
UML notation. Simple module nodes look like::
.----.
+------------+
| modulename |
+------------+
Packages nodes are drawn with their modules & subpackages nested
inside::
.----.
+----------------------------------------+
| packagename |
| |
| .----. .----. .----. |
| +---------+ +---------+ +---------+ |
| | module1 | | module2 | | module3 | |
| +---------+ +---------+ +---------+ |
| |
+----------------------------------------+
"""
def __init__(self, module_doc, linker, context, collapsed=False,
excluded_submodules=(), **options):
self.module_doc = module_doc
self.linker = linker
self.context = context
self.collapsed = collapsed
self.options = options
self.excluded_submodules = excluded_submodules
DotGraphNode.__init__(self, shape='plaintext',
href=linker.url_for(module_doc) or NOOP_URL,
tooltip=module_doc.canonical_name)
#: Expects: (color, color, url, tooltip, body)
_MODULE_LABEL = '''
<TABLE BORDER="0" CELLBORDER="0" CELLSPACING="0" ALIGN="LEFT">
<TR><TD ALIGN="LEFT" VALIGN="BOTTOM" HEIGHT="8" WIDTH="16"
FIXEDSIZE="true" BGCOLOR="%s" BORDER="1" PORT="tab"></TD></TR>
<TR><TD ALIGN="LEFT" VALIGN="TOP" BGCOLOR="%s" BORDER="1" WIDTH="20"
PORT="body" HREF="%s" TOOLTIP="%s">%s</TD></TR>
</TABLE>'''
#: Expects: (name, body_rows)
_NESTED_BODY = '''
<TABLE BORDER="0" CELLBORDER="0" CELLPADDING="0" CELLSPACING="0">
<TR><TD ALIGN="LEFT">%s</TD></TR>
%s
</TABLE>'''
#: Expects: (cells,)
_NESTED_BODY_ROW = '''
<TR><TD>
<TABLE BORDER="0" CELLBORDER="0"><TR>%s</TR></TABLE>
</TD></TR>'''
def _get_html_label(self, package):
"""
:Return: (label, depth, width) where:
- ``label`` is the HTML label
- ``depth`` is the depth of the package tree (for coloring)
- ``width`` is the max width of the HTML label, roughly in
units of characters.
"""
MAX_ROW_WIDTH = 80 # unit is roughly characters.
pkg_name = package.canonical_name
pkg_url = self.linker.url_for(package) or NOOP_URL
if (not package.is_package or len(package.submodules) == 0 or
self.collapsed):
pkg_color = self._color(package, 1)
label = self._MODULE_LABEL % (pkg_color, pkg_color,
pkg_url, pkg_name, pkg_name[-1])
return (label, 1, len(pkg_name[-1])+3)
# Get the label for each submodule, and divide them into rows.
row_list = ['']
row_width = 0
max_depth = 0
max_row_width = len(pkg_name[-1])+3
for submodule in package.submodules:
if submodule in self.excluded_submodules: continue
# Get the submodule's label.
label, depth, width = self._get_html_label(submodule)
# Check if we should start a new row.
if row_width > 0 and width+row_width > MAX_ROW_WIDTH:
row_list.append('')
row_width = 0
# Add the submodule's label to the row.
row_width += width
row_list[-1] += '<TD ALIGN="LEFT">%s</TD>' % label
# Update our max's.
max_depth = max(depth, max_depth)
max_row_width = max(row_width, max_row_width)
# Figure out which color to use.
pkg_color = self._color(package, depth+1)
# Assemble & return the label.
rows = ''.join([self._NESTED_BODY_ROW % r for r in row_list])
body = self._NESTED_BODY % (pkg_name, rows)
label = self._MODULE_LABEL % (pkg_color, pkg_color,
pkg_url, pkg_name, body)
return label, max_depth+1, max_row_width
_COLOR_DIFF = 24
def _color(self, package, depth):
if package == self.context: return SELECTED_BG
else:
# Parse the base color.
if re.match(MODULE_BG, 'r#[0-9a-fA-F]{6}$'):
base = int(MODULE_BG[1:], 16)
else:
base = int('d8e8ff', 16)
red = (base & 0xff0000) >> 16
green = (base & 0x00ff00) >> 8
blue = (base & 0x0000ff)
# Make it darker with each level of depth. (but not *too*
# dark -- package name needs to be readable)
red = max(64, red-(depth-1)*self._COLOR_DIFF)
green = max(64, green-(depth-1)*self._COLOR_DIFF)
blue = max(64, blue-(depth-1)*self._COLOR_DIFF)
# Convert it back to a color string
return '#%06x' % ((red<<16)+(green<<8)+blue)
def to_dotfile(self):
attribs = ['%s="%s"' % (k,v) for (k,v) in self._attribs.items()]
label, depth, width = self._get_html_label(self.module_doc)
attribs.append('label=<%s>' % label)
return 'node%d%s' % (self.id, ' [%s]' % (','.join(attribs)))
######################################################################
#{ Graph Generation Functions
######################################################################
def package_tree_graph(packages, linker, context=None, **options):
"""
Return a `DotGraph` that graphically displays the package
hierarchies for the given packages.
"""
if options.get('style', 'uml') == 'uml': # default to uml style?
if get_dot_version() >= [2]:
return uml_package_tree_graph(packages, linker, context,
**options)
elif 'style' in options:
log.warning('UML style package trees require dot version 2.0+')
graph = DotGraph('Package Tree for %s' % name_list(packages, context),
body='ranksep=.3\n;nodesep=.1\n',
edge_defaults={'dir':'none'})
# Options
if options.get('dir', 'TB') != 'TB': # default: top-to-bottom
graph.body += 'rankdir=%s\n' % options.get('dir', 'TB')
# Get a list of all modules in the package.
queue = list(packages)
modules = set(packages)
for module in queue:
queue.extend(module.submodules)
modules.update(module.submodules)
# Add a node for each module.
nodes = add_valdoc_nodes(graph, modules, linker, context)
# Add an edge for each package/submodule relationship.
for module in modules:
for submodule in module.submodules:
graph.edges.append(DotGraphEdge(nodes[module], nodes[submodule],
headport='tab'))
return graph
def uml_package_tree_graph(packages, linker, context=None, **options):
"""
Return a `DotGraph` that graphically displays the package
hierarchies for the given packages as a nested set of UML
symbols.
"""
graph = DotGraph('Package Tree for %s' % name_list(packages, context))
# Remove any packages whose containers are also in the list.
root_packages = []
for package1 in packages:
for package2 in packages:
if (package1 is not package2 and
package2.canonical_name.dominates(package1.canonical_name)):
break
else:
root_packages.append(package1)
# If the context is a variable, then get its value.
if isinstance(context, VariableDoc) and context.value is not UNKNOWN:
context = context.value
# Return a graph with one node for each root package.
for package in root_packages:
graph.nodes.append(DotGraphUmlModuleNode(package, linker, context))
return graph
######################################################################
def class_tree_graph(bases, linker, context=None, **options):
"""
Return a `DotGraph` that graphically displays the class
hierarchy for the given classes. Options:
- exclude
- dir: LR|RL|BT requests a left-to-right, right-to-left, or
bottom-to- top, drawing. (corresponds to the dot option
'rankdir'
"""
if isinstance(bases, ClassDoc): bases = [bases]
graph = DotGraph('Class Hierarchy for %s' % name_list(bases, context),
body='ranksep=0.3\n',
edge_defaults={'sametail':True, 'dir':'none'})
# Options
if options.get('dir', 'TB') != 'TB': # default: top-down
graph.body += 'rankdir=%s\n' % options.get('dir', 'TB')
exclude = options.get('exclude', ())
# Find all superclasses & subclasses of the given classes.
classes = set(bases)
queue = list(bases)
for cls in queue:
if isinstance(cls, ClassDoc):
if cls.subclasses not in (None, UNKNOWN):
subclasses = cls.subclasses
if exclude:
subclasses = [d for d in subclasses if d not in exclude]
queue.extend(subclasses)
classes.update(subclasses)
queue = list(bases)
for cls in queue:
if isinstance(cls, ClassDoc):
if cls.bases not in (None, UNKNOWN):
bases = cls.bases
if exclude:
bases = [d for d in bases if d not in exclude]
queue.extend(bases)
classes.update(bases)
# Add a node for each cls.
classes = [d for d in classes if isinstance(d, ClassDoc)
if d.pyval is not object]
nodes = add_valdoc_nodes(graph, classes, linker, context)
# Add an edge for each package/subclass relationship.
edges = set()
for cls in classes:
for subcls in cls.subclasses:
if cls in nodes and subcls in nodes:
edges.add((nodes[cls], nodes[subcls]))
graph.edges = [DotGraphEdge(src,dst) for (src,dst) in edges]
return graph
######################################################################
def uml_class_tree_graph(class_doc, linker, context=None, **options):
"""
Return a `DotGraph` that graphically displays the class hierarchy
for the given class, using UML notation. Options:
- max_attributes
- max_operations
- show_private_vars
- show_magic_vars
- link_attributes
"""
nodes = {} # ClassDoc -> DotGraphUmlClassNode
exclude = options.get('exclude', ())
# Create nodes for class_doc and all its bases.
for cls in class_doc.mro():
if cls.pyval is object: continue # don't include `object`.
if cls in exclude: break # stop if we get to an excluded class.
if cls == class_doc: color = SELECTED_BG
else: color = BASECLASS_BG
nodes[cls] = DotGraphUmlClassNode(cls, linker, context,
show_inherited_vars=False,
collapsed=False, bgcolor=color)
# Create nodes for all class_doc's subclasses.
queue = [class_doc]
for cls in queue:
if (isinstance(cls, ClassDoc) and
cls.subclasses not in (None, UNKNOWN)):
for subcls in cls.subclasses:
subcls_name = subcls.canonical_name[-1]
if subcls not in nodes and subcls not in exclude:
queue.append(subcls)
nodes[subcls] = DotGraphUmlClassNode(
subcls, linker, context, collapsed=True,
bgcolor=SUBCLASS_BG)
# Only show variables in the class where they're defined for
# *class_doc*.
mro = class_doc.mro()
for name, var in class_doc.variables.items():
i = mro.index(var.container)
for base in mro[i+1:]:
if base.pyval is object: continue # don't include `object`.
overridden_var = base.variables.get(name)
if overridden_var and overridden_var.container == base:
try:
if isinstance(overridden_var.value, RoutineDoc):
nodes[base].operations.remove(overridden_var)
else:
nodes[base].attributes.remove(overridden_var)
except ValueError:
pass # var is filtered (eg private or magic)
# Keep track of which nodes are part of the inheritance graph
# (since link_attributes might add new nodes)
inheritance_nodes = set(nodes.values())
# Turn attributes into links.
if options.get('link_attributes', True):
for node in nodes.values():
node.link_attributes(nodes)
# Make sure that none of the new attribute edges break the
# rank ordering assigned by inheritance.
for edge in node.edges:
if edge.end in inheritance_nodes:
edge['constraint'] = 'False'
# Construct the graph.
graph = DotGraph('UML class diagram for %s' % class_doc.canonical_name,
body='ranksep=.2\n;nodesep=.3\n')
graph.nodes = nodes.values()
# Add inheritance edges.
for node in inheritance_nodes:
for base in node.class_doc.bases:
if base in nodes:
graph.edges.append(DotGraphEdge(nodes[base], node,
dir='back', arrowtail='empty',
headport='body', tailport='body',
color=INH_LINK_COLOR, weight=100,
style='bold'))
# And we're done!
return graph
######################################################################
def import_graph(modules, docindex, linker, context=None, **options):
graph = DotGraph('Import Graph', body='ranksep=.3\n;nodesep=.3\n')
# Options
if options.get('dir', 'RL') != 'TB': # default: right-to-left.
graph.body += 'rankdir=%s\n' % options.get('dir', 'RL')
# Add a node for each module.
nodes = add_valdoc_nodes(graph, modules, linker, context)
# Edges.
edges = set()
for dst in modules:
if dst.imports in (None, UNKNOWN): continue
for var_name in dst.imports:
for i in range(len(var_name), 0, -1):
val_doc = docindex.find(var_name[:i], context)
if isinstance(val_doc, ModuleDoc):
if val_doc in nodes and dst in nodes:
edges.add((nodes[val_doc], nodes[dst]))
break
graph.edges = [DotGraphEdge(src,dst) for (src,dst) in edges]
return graph
######################################################################
def call_graph(api_docs, docindex, linker, context=None, **options):
"""
:param options:
- ``dir``: rankdir for the graph. (default=LR)
- ``add_callers``: also include callers for any of the
routines in ``api_docs``. (default=False)
- ``add_callees``: also include callees for any of the
routines in ``api_docs``. (default=False)
:todo: Add an ``exclude`` option?
"""
if docindex.callers is None:
log.warning("No profiling information for call graph!")
return DotGraph('Call Graph') # return None instead?
if isinstance(context, VariableDoc):
context = context.value
# Get the set of requested functions.
functions = []
for api_doc in api_docs:
# If it's a variable, get its value.
if isinstance(api_doc, VariableDoc):
api_doc = api_doc.value
# Add the value to the functions list.
if isinstance(api_doc, RoutineDoc):
functions.append(api_doc)
elif isinstance(api_doc, NamespaceDoc):
for vardoc in api_doc.variables.values():
if isinstance(vardoc.value, RoutineDoc):
functions.append(vardoc.value)
# Filter out functions with no callers/callees?
# [xx] this isnt' quite right, esp if add_callers or add_callees
# options are fales.
functions = [f for f in functions if
(f in docindex.callers) or (f in docindex.callees)]
# Add any callers/callees of the selected functions
func_set = set(functions)
if options.get('add_callers', False) or options.get('add_callees', False):
for func_doc in functions:
if options.get('add_callers', False):
func_set.update(docindex.callers.get(func_doc, ()))
if options.get('add_callees', False):
func_set.update(docindex.callees.get(func_doc, ()))
graph = DotGraph('Call Graph for %s' % name_list(api_docs, context),
node_defaults={'shape':'box', 'width': 0, 'height': 0})
# Options
if options.get('dir', 'LR') != 'TB': # default: left-to-right
graph.body += 'rankdir=%s\n' % options.get('dir', 'LR')
nodes = add_valdoc_nodes(graph, func_set, linker, context)
# Find the edges.
edges = set()
for func_doc in functions:
for caller in docindex.callers.get(func_doc, ()):
if caller in nodes:
edges.add( (nodes[caller], nodes[func_doc]) )
for callee in docindex.callees.get(func_doc, ()):
if callee in nodes:
edges.add( (nodes[func_doc], nodes[callee]) )
graph.edges = [DotGraphEdge(src,dst) for (src,dst) in edges]
return graph
######################################################################
#{ Dot Version
######################################################################
_dot_version = None
_DOT_VERSION_RE = re.compile(r'dot version ([\d\.]+)')
def get_dot_version():
global _dot_version
if _dot_version is None:
try:
out, err = run_subprocess([DOT_COMMAND, '-V'])
version_info = err or out
m = _DOT_VERSION_RE.match(version_info)
if m:
_dot_version = [int(x) for x in m.group(1).split('.')]
else:
_dot_version = (0,)
except OSError, e:
_dot_version = (0,)
log.info('Detected dot version %s' % _dot_version)
return _dot_version
######################################################################
#{ Helper Functions
######################################################################
def add_valdoc_nodes(graph, val_docs, linker, context):
"""
:todo: Use different node styles for different subclasses of APIDoc
"""
nodes = {}
val_docs = sorted(val_docs, key=lambda d:d.canonical_name)
for i, val_doc in enumerate(val_docs):
label = val_doc.canonical_name.contextualize(context.canonical_name)
node = nodes[val_doc] = DotGraphNode(label)
graph.nodes.append(node)
specialize_valdoc_node(node, val_doc, context, linker.url_for(val_doc))
return nodes
NOOP_URL = 'javascript:void(0);'
MODULE_NODE_HTML = '''
<TABLE BORDER="0" CELLBORDER="0" CELLSPACING="0"
CELLPADDING="0" PORT="table" ALIGN="LEFT">
<TR><TD ALIGN="LEFT" VALIGN="BOTTOM" HEIGHT="8" WIDTH="16" FIXEDSIZE="true"
BGCOLOR="%s" BORDER="1" PORT="tab"></TD></TR>
<TR><TD ALIGN="LEFT" VALIGN="TOP" BGCOLOR="%s" BORDER="1"
PORT="body" HREF="%s" TOOLTIP="%s">%s</TD></TR>
</TABLE>'''.strip()
def specialize_valdoc_node(node, val_doc, context, url):
"""
Update the style attributes of `node` to reflext its type
and context.
"""
# We can only use html-style nodes if dot_version>2.
dot_version = get_dot_version()
# If val_doc or context is a variable, get its value.
if isinstance(val_doc, VariableDoc) and val_doc.value is not UNKNOWN:
val_doc = val_doc.value
if isinstance(context, VariableDoc) and context.value is not UNKNOWN:
context = context.value
# Set the URL. (Do this even if it points to the page we're
# currently on; otherwise, the tooltip is ignored.)
node['href'] = url or NOOP_URL
if isinstance(val_doc, ModuleDoc) and dot_version >= [2]:
node['shape'] = 'plaintext'
if val_doc == context: color = SELECTED_BG
else: color = MODULE_BG
node['tooltip'] = node['label']
node['html_label'] = MODULE_NODE_HTML % (color, color, url,
val_doc.canonical_name,
node['label'])
node['width'] = node['height'] = 0
node.port = 'body'
elif isinstance(val_doc, RoutineDoc):
node['shape'] = 'box'
node['style'] = 'rounded'
node['width'] = 0
node['height'] = 0
node['label'] = '%s()' % node['label']
node['tooltip'] = node['label']
if val_doc == context:
node['fillcolor'] = SELECTED_BG
node['style'] = 'filled,rounded,bold'
else:
node['shape'] = 'box'
node['width'] = 0
node['height'] = 0
node['tooltip'] = node['label']
if val_doc == context:
node['fillcolor'] = SELECTED_BG
node['style'] = 'filled,bold'
def name_list(api_docs, context=None):
if context is not None:
context = context.canonical_name
names = [str(d.canonical_name.contextualize(context)) for d in api_docs]
if len(names) == 0: return ''
if len(names) == 1: return '%s' % names[0]
elif len(names) == 2: return '%s and %s' % (names[0], names[1])
else:
return '%s, and %s' % (', '.join(names[:-1]), names[-1])
| apache-2.0 |
SAM-IT-SA/odoo | openerp/modules/graph.py | 260 | 7763 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2010-2014 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
""" Modules dependency graph. """
import os, sys, imp
from os.path import join as opj
import itertools
import zipimport
import openerp
import openerp.osv as osv
import openerp.tools as tools
import openerp.tools.osutil as osutil
from openerp.tools.safe_eval import safe_eval as eval
from openerp.tools.translate import _
import zipfile
import openerp.release as release
import re
import base64
from zipfile import PyZipFile, ZIP_DEFLATED
from cStringIO import StringIO
import logging
_logger = logging.getLogger(__name__)
class Graph(dict):
""" Modules dependency graph.
The graph is a mapping from module name to Nodes.
"""
def add_node(self, name, info):
max_depth, father = 0, None
for d in info['depends']:
n = self.get(d) or Node(d, self, None) # lazy creation, do not use default value for get()
if n.depth >= max_depth:
father = n
max_depth = n.depth
if father:
return father.add_child(name, info)
else:
return Node(name, self, info)
def update_from_db(self, cr):
if not len(self):
return
# update the graph with values from the database (if exist)
## First, we set the default values for each package in graph
additional_data = dict((key, {'id': 0, 'state': 'uninstalled', 'dbdemo': False, 'installed_version': None}) for key in self.keys())
## Then we get the values from the database
cr.execute('SELECT name, id, state, demo AS dbdemo, latest_version AS installed_version'
' FROM ir_module_module'
' WHERE name IN %s',(tuple(additional_data),)
)
## and we update the default values with values from the database
additional_data.update((x['name'], x) for x in cr.dictfetchall())
for package in self.values():
for k, v in additional_data[package.name].items():
setattr(package, k, v)
def add_module(self, cr, module, force=None):
self.add_modules(cr, [module], force)
def add_modules(self, cr, module_list, force=None):
if force is None:
force = []
packages = []
len_graph = len(self)
for module in module_list:
# This will raise an exception if no/unreadable descriptor file.
# NOTE The call to load_information_from_description_file is already
# done by db.initialize, so it is possible to not do it again here.
info = openerp.modules.module.load_information_from_description_file(module)
if info and info['installable']:
packages.append((module, info)) # TODO directly a dict, like in get_modules_with_version
else:
_logger.warning('module %s: not installable, skipped', module)
dependencies = dict([(p, info['depends']) for p, info in packages])
current, later = set([p for p, info in packages]), set()
while packages and current > later:
package, info = packages[0]
deps = info['depends']
# if all dependencies of 'package' are already in the graph, add 'package' in the graph
if reduce(lambda x, y: x and y in self, deps, True):
if not package in current:
packages.pop(0)
continue
later.clear()
current.remove(package)
node = self.add_node(package, info)
for kind in ('init', 'demo', 'update'):
if package in tools.config[kind] or 'all' in tools.config[kind] or kind in force:
setattr(node, kind, True)
else:
later.add(package)
packages.append((package, info))
packages.pop(0)
self.update_from_db(cr)
for package in later:
unmet_deps = filter(lambda p: p not in self, dependencies[package])
_logger.error('module %s: Unmet dependencies: %s', package, ', '.join(unmet_deps))
result = len(self) - len_graph
if result != len(module_list):
_logger.warning('Some modules were not loaded.')
return result
def __iter__(self):
level = 0
done = set(self.keys())
while done:
level_modules = sorted((name, module) for name, module in self.items() if module.depth==level)
for name, module in level_modules:
done.remove(name)
yield module
level += 1
def __str__(self):
return '\n'.join(str(n) for n in self if n.depth == 0)
class Node(object):
""" One module in the modules dependency graph.
Node acts as a per-module singleton. A node is constructed via
Graph.add_module() or Graph.add_modules(). Some of its fields are from
ir_module_module (setted by Graph.update_from_db()).
"""
def __new__(cls, name, graph, info):
if name in graph:
inst = graph[name]
else:
inst = object.__new__(cls)
graph[name] = inst
return inst
def __init__(self, name, graph, info):
self.name = name
self.graph = graph
self.info = info or getattr(self, 'info', {})
if not hasattr(self, 'children'):
self.children = []
if not hasattr(self, 'depth'):
self.depth = 0
@property
def data(self):
return self.info
def add_child(self, name, info):
node = Node(name, self.graph, info)
node.depth = self.depth + 1
if node not in self.children:
self.children.append(node)
for attr in ('init', 'update', 'demo'):
if hasattr(self, attr):
setattr(node, attr, True)
self.children.sort(lambda x, y: cmp(x.name, y.name))
return node
def __setattr__(self, name, value):
super(Node, self).__setattr__(name, value)
if name in ('init', 'update', 'demo'):
tools.config[name][self.name] = 1
for child in self.children:
setattr(child, name, value)
if name == 'depth':
for child in self.children:
setattr(child, name, value + 1)
def __iter__(self):
return itertools.chain(iter(self.children), *map(iter, self.children))
def __str__(self):
return self._pprint()
def _pprint(self, depth=0):
s = '%s\n' % self.name
for c in self.children:
s += '%s`-> %s' % (' ' * depth, c._pprint(depth+1))
return s
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
jortel/gofer | test/unit/test_proxy.py | 1 | 1174 | # Copyright (c) 2014 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public
# License as published by the Free Software Foundation; either version
# 2 of the License (GPLv2) or (at your option) any later version.
# There is NO WARRANTY for this software, express or implied,
# including the implied warranties of MERCHANTABILITY,
# NON-INFRINGEMENT, or FITNESS FOR A PARTICULAR PURPOSE. You should
# have received a copy of GPLv2 along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
from unittest import TestCase
from mock import patch
from gofer import Options
from gofer.proxy import Agent
from gofer.rmi.container import Container
class TestProxy(TestCase):
def test_init(self):
url = 'qpid+amqp://host'
address = 'xyz'
options = {'A': 1, 'B': 2}
_agent = Agent(url, address, **options)
_options = Options(options)
self.assertTrue(_agent, Container)
self.assertEqual(_agent._Container__url, url)
self.assertEqual(_agent._Container__address, address)
self.assertEqual(_agent._Container__options.__dict__, _options.__dict__)
| lgpl-2.1 |
ingadhoc/odoo | addons/account_accountant/__openerp__.py | 313 | 1878 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name' : 'Accounting and Finance',
'version' : '1.1',
'author' : 'OpenERP SA',
'category': 'Accounting & Finance',
'sequence': 10,
'summary': 'Financial and Analytic Accounting',
'description': """
Accounting Access Rights
========================
It gives the Administrator user access to all accounting features such as journal items and the chart of accounts.
It assigns manager and user access rights to the Administrator and only user rights to the Demo user.
""",
'website': 'https://www.odoo.com/page/accounting',
'depends' : ['account_voucher'],
'data': [
'security/account_security.xml',
'account_accountant_data.xml'
],
'demo': ['account_accountant_demo.xml'],
'test': [],
'installable': True,
'auto_install': False,
'application': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
dvitme/odoomrp-wip | mrp_production_project_estimated_cost/models/project_project.py | 16 | 1028 | # -*- encoding: utf-8 -*-
##############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
from openerp import models, fields
class ProjectProject(models.Model):
_inherit = 'project.project'
automatic_creation = fields.Boolean('Automatic Creation')
| agpl-3.0 |
vrv/tensorflow | tensorflow/python/ops/distributions/gamma.py | 74 | 10671 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Gamma distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
from tensorflow.python.ops.distributions import distribution
from tensorflow.python.ops.distributions import kullback_leibler
from tensorflow.python.ops.distributions import util as distribution_util
__all__ = [
"Gamma",
"GammaWithSoftplusConcentrationRate",
]
class Gamma(distribution.Distribution):
"""Gamma distribution.
The Gamma distribution is defined over positive real numbers using
parameters `concentration` (aka "alpha") and `rate` (aka "beta").
#### Mathematical Details
The probability density function (pdf) is,
```none
pdf(x; alpha, beta, x > 0) = x**(alpha - 1) exp(-x beta) / Z
Z = Gamma(alpha) beta**alpha
```
where:
* `concentration = alpha`, `alpha > 0`,
* `rate = beta`, `beta > 0`,
* `Z` is the normalizing constant, and,
* `Gamma` is the [gamma function](
https://en.wikipedia.org/wiki/Gamma_function).
The cumulative density function (cdf) is,
```none
cdf(x; alpha, beta, x > 0) = GammaInc(alpha, beta x) / Gamma(alpha)
```
where `GammaInc` is the [lower incomplete Gamma function](
https://en.wikipedia.org/wiki/Incomplete_gamma_function).
The parameters can be intuited via their relationship to mean and stddev,
```none
concentration = alpha = (mean / stddev)**2
rate = beta = mean / stddev**2 = concentration / mean
```
Distribution parameters are automatically broadcast in all functions; see
examples for details.
WARNING: This distribution may draw 0-valued samples for small `concentration`
values. See note in `tf.random_gamma` docstring.
#### Examples
```python
dist = Gamma(concentration=3.0, rate=2.0)
dist2 = Gamma(concentration=[3.0, 4.0], rate=[2.0, 3.0])
```
"""
def __init__(self,
concentration,
rate,
validate_args=False,
allow_nan_stats=True,
name="Gamma"):
"""Construct Gamma with `concentration` and `rate` parameters.
The parameters `concentration` and `rate` must be shaped in a way that
supports broadcasting (e.g. `concentration + rate` is a valid operation).
Args:
concentration: Floating point tensor, the concentration params of the
distribution(s). Must contain only positive values.
rate: Floating point tensor, the inverse scale params of the
distribution(s). Must contain only positive values.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Raises:
TypeError: if `concentration` and `rate` are different dtypes.
"""
parameters = locals()
with ops.name_scope(name, values=[concentration, rate]):
with ops.control_dependencies([
check_ops.assert_positive(concentration),
check_ops.assert_positive(rate),
] if validate_args else []):
self._concentration = array_ops.identity(
concentration, name="concentration")
self._rate = array_ops.identity(rate, name="rate")
check_ops.assert_same_float_dtype(
[self._concentration, self._rate])
super(Gamma, self).__init__(
dtype=self._concentration.dtype,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
reparameterization_type=distribution.NOT_REPARAMETERIZED,
parameters=parameters,
graph_parents=[self._concentration,
self._rate],
name=name)
@staticmethod
def _param_shapes(sample_shape):
return dict(
zip(("concentration", "rate"), ([ops.convert_to_tensor(
sample_shape, dtype=dtypes.int32)] * 2)))
@property
def concentration(self):
"""Concentration parameter."""
return self._concentration
@property
def rate(self):
"""Rate parameter."""
return self._rate
def _batch_shape_tensor(self):
return array_ops.broadcast_dynamic_shape(
array_ops.shape(self.concentration),
array_ops.shape(self.rate))
def _batch_shape(self):
return array_ops.broadcast_static_shape(
self.concentration.get_shape(),
self.rate.get_shape())
def _event_shape_tensor(self):
return constant_op.constant([], dtype=dtypes.int32)
def _event_shape(self):
return tensor_shape.scalar()
@distribution_util.AppendDocstring(
"""Note: See `tf.random_gamma` docstring for sampling details and
caveats.""")
def _sample_n(self, n, seed=None):
return random_ops.random_gamma(
shape=[n],
alpha=self.concentration,
beta=self.rate,
dtype=self.dtype,
seed=seed)
def _log_prob(self, x):
return self._log_unnormalized_prob(x) - self._log_normalization()
def _prob(self, x):
return math_ops.exp(self._log_prob(x))
def _log_cdf(self, x):
return math_ops.log(self._cdf(x))
def _cdf(self, x):
x = self._maybe_assert_valid_sample(x)
# Note that igamma returns the regularized incomplete gamma function,
# which is what we want for the CDF.
return math_ops.igamma(self.concentration, self.rate * x)
def _log_unnormalized_prob(self, x):
x = self._maybe_assert_valid_sample(x)
return (self.concentration - 1.) * math_ops.log(x) - self.rate * x
def _log_normalization(self):
return (math_ops.lgamma(self.concentration)
- self.concentration * math_ops.log(self.rate))
def _entropy(self):
return (self.concentration
- math_ops.log(self.rate)
+ math_ops.lgamma(self.concentration)
+ ((1. - self.concentration) *
math_ops.digamma(self.concentration)))
def _mean(self):
return self.concentration / self.rate
def _variance(self):
return self.concentration / math_ops.square(self.rate)
def _stddev(self):
return math_ops.sqrt(self.concentration) / self.rate
@distribution_util.AppendDocstring(
"""The mode of a gamma distribution is `(shape - 1) / rate` when
`shape > 1`, and `NaN` otherwise. If `self.allow_nan_stats` is `False`,
an exception will be raised rather than returning `NaN`.""")
def _mode(self):
mode = (self.concentration - 1.) / self.rate
if self.allow_nan_stats:
nan = array_ops.fill(
self.batch_shape_tensor(),
np.array(np.nan, dtype=self.dtype.as_numpy_dtype()),
name="nan")
return array_ops.where(self.concentration > 1., mode, nan)
else:
return control_flow_ops.with_dependencies([
check_ops.assert_less(
array_ops.ones([], self.dtype),
self.concentration,
message="mode not defined when any concentration <= 1"),
], mode)
def _maybe_assert_valid_sample(self, x):
check_ops.assert_same_float_dtype(tensors=[x], dtype=self.dtype)
if not self.validate_args:
return x
return control_flow_ops.with_dependencies([
check_ops.assert_positive(x),
], x)
class GammaWithSoftplusConcentrationRate(Gamma):
"""`Gamma` with softplus of `concentration` and `rate`."""
def __init__(self,
concentration,
rate,
validate_args=False,
allow_nan_stats=True,
name="GammaWithSoftplusConcentrationRate"):
parameters = locals()
with ops.name_scope(name, values=[concentration, rate]):
super(GammaWithSoftplusConcentrationRate, self).__init__(
concentration=nn.softplus(concentration,
name="softplus_concentration"),
rate=nn.softplus(rate, name="softplus_rate"),
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=name)
self._parameters = parameters
@kullback_leibler.RegisterKL(Gamma, Gamma)
def _kl_gamma_gamma(g0, g1, name=None):
"""Calculate the batched KL divergence KL(g0 || g1) with g0 and g1 Gamma.
Args:
g0: instance of a Gamma distribution object.
g1: instance of a Gamma distribution object.
name: (optional) Name to use for created operations.
Default is "kl_gamma_gamma".
Returns:
kl_gamma_gamma: `Tensor`. The batchwise KL(g0 || g1).
"""
with ops.name_scope(name, "kl_gamma_gamma", values=[
g0.concentration, g0.rate, g1.concentration, g1.rate]):
# Result from:
# http://www.fil.ion.ucl.ac.uk/~wpenny/publications/densities.ps
# For derivation see:
# http://stats.stackexchange.com/questions/11646/kullback-leibler-divergence-between-two-gamma-distributions pylint: disable=line-too-long
return (((g0.concentration - g1.concentration)
* math_ops.digamma(g0.concentration))
+ math_ops.lgamma(g1.concentration)
- math_ops.lgamma(g0.concentration)
+ g1.concentration * math_ops.log(g0.rate)
- g1.concentration * math_ops.log(g1.rate)
+ g0.concentration * (g1.rate / g0.rate - 1.))
| apache-2.0 |
Outernet-Project/librarian-football | setup.py | 1 | 1157 | import os
from setuptools import setup, find_packages
import librarian_football as pkg
def read(fname):
""" Return content of specified file """
return open(os.path.join(os.path.dirname(__file__), fname)).read()
VERSION = pkg.__version__
setup(
name='librarian-football',
version=VERSION,
license='BSD',
packages=[pkg.__name__],
include_package_data=True,
long_description=read('README.rst'),
install_requires=[
'librarian_core',
'librarian_content',
'requests'
],
dependency_links=[
'git+ssh://git@github.com/Outernet-Project/librarian-core.git#egg=librarian_core-0.1',
'git+ssh://git@github.com/Outernet-Project/librarian-content.git#egg=librarian_content-0.1',
],
classifiers=[
'Development Status :: 3 - Alpha',
'Topic :: Applicaton',
'Framework :: Bottle',
'Environment :: Web Environment',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
) | gpl-3.0 |
chaen/DIRAC | Core/DISET/private/ServiceConfiguration.py | 8 | 3606 | # $HeadURL$
__RCSID__ = "$Id$"
from DIRAC.Core.Utilities import Network, List
from DIRAC.ConfigurationSystem.Client.ConfigurationData import gConfigurationData
from DIRAC.ConfigurationSystem.Client import PathFinder
from DIRAC.Core.DISET.private.Protocols import gDefaultProtocol
class ServiceConfiguration:
def __init__( self, nameList ):
self.serviceName = nameList[0]
self.serviceURL = None
self.nameList = nameList
self.pathList = []
for svcName in nameList:
self.pathList.append( PathFinder.getServiceSection( svcName ) )
def getOption( self, optionName ):
if optionName[0] == "/":
return gConfigurationData.extractOptionFromCFG( optionName )
for path in self.pathList:
value = gConfigurationData.extractOptionFromCFG( "%s/%s" % ( path, optionName ) )
if value:
return value
return None
def getAddress( self ):
return ( "", self.getPort() )
def getHandlerLocation( self ):
return self.getOption( "HandlerPath" )
def getName( self ):
return self.serviceName
def setURL( self, sURL ):
self.serviceURL = sURL
def __getCSURL( self, URL = None ):
optionValue = self.getOption( "URL" )
if optionValue:
return optionValue
return URL
def registerAlsoAs( self ):
optionValue = self.getOption( "RegisterAlsoAs" )
if optionValue:
return List.fromChar( optionValue )
else:
return []
def getMaxThreads( self ):
try:
return int( self.getOption( "MaxThreads" ) )
except:
return 15
def getMinThreads( self ):
try:
return int( self.getOption( "MinThreads" ) )
except:
return 1
def getMaxWaitingPetitions( self ):
try:
return int( self.getOption( "MaxWaitingPetitions" ) )
except:
return 500
def getMaxMessagingConnections( self ):
try:
return int( self.getOption( "MaxMessagingConnections" ) )
except:
return 20
def getMaxThreadsForMethod( self, actionType, method ):
try:
return int( self.getOption( "ThreadLimit/%s/%s" % ( actionType, method ) ) )
except:
return 15
def getCloneProcesses( self ):
try:
return int( self.getOption( "CloneProcesses" ) )
except:
return 1
def getPort( self ):
try:
return int( self.getOption( "Port" ) )
except:
return 9876
def getProtocol( self ):
optionValue = self.getOption( "Protocol" )
if optionValue:
return optionValue
return gDefaultProtocol
def getHostname( self ):
hostname = self.getOption( "/DIRAC/Hostname" )
if not hostname:
return Network.getFQDN()
return hostname
def getURL( self ):
"""
Build the service URL
"""
if self.serviceURL:
return self.serviceURL
protocol = self.getProtocol()
serviceURL = self.__getCSURL()
if serviceURL:
if serviceURL.find( protocol ) != 0:
urlFields = serviceURL.split( ":" )
urlFields[0] = protocol
serviceURL = ":".join( urlFields )
self.setURL( serviceURL )
return serviceURL
hostName = self.getHostname()
port = self.getPort()
serviceURL = "%s://%s:%s/%s" % ( protocol,
hostName,
port,
self.getName() )
if serviceURL[-1] == "/":
serviceURL = serviceURL[:-1]
self.setURL( serviceURL )
return serviceURL
def getContextLifeTime( self ):
optionValue = self.getOption( "ContextLifeTime" )
try:
return int( optionValue )
except:
return 21600
| gpl-3.0 |
jmcarp/regulations-parser | regparser/layer/table_of_contents.py | 4 | 1240 | #vim: set encoding=utf-8
from layer import Layer
from regparser.tree.struct import Node
class TableOfContentsLayer(Layer):
def check_toc_candidacy(self, node):
""" To be eligible to contain a table of contents, all of a node's
children must have a title element. If one of the children is an
empty subpart, we check all it's children. """
for c in node.children:
if c.node_type == Node.EMPTYPART:
for s in c.children:
if not s.title:
return False
elif not c.title:
return False
return True
def process(self, node):
""" Create a table of contents for this node, if it's eligible. We
ignore subparts. """
if self.check_toc_candidacy(node):
layer_element = []
for c in node.children:
if c.node_type == Node.EMPTYPART:
for s in c.children:
layer_element.append(
{'index': s.label, 'title': s.title})
else:
layer_element.append({'index': c.label, 'title': c.title})
return layer_element
return None
| cc0-1.0 |
facebookresearch/Detectron | tools/generate_testdev_from_test.py | 1 | 3208 | #!/usr/bin/env python
# Copyright (c) 2017-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
"""Given a full set of results (boxes, masks, or keypoints) on the 2017 COCO
test set, this script extracts the results subset that corresponds to 2017
test-dev. The test-dev subset can then be submitted to the COCO evaluation
server.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import json
import os
import sys
from detectron.datasets.dataset_catalog import get_ann_fn
from detectron.utils.timer import Timer
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
'--json', dest='json_file',
help='detections json file',
default='', type=str)
parser.add_argument(
'--output-dir', dest='output_dir',
help='output directory',
default='/tmp', type=str)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args
def convert(json_file, output_dir):
print('Reading: {}'.format(json_file))
with open(json_file, 'r') as fid:
dt = json.load(fid)
print('done!')
test_image_info = get_ann_fn('coco_2017_test')
with open(test_image_info, 'r') as fid:
info_test = json.load(fid)
image_test = info_test['images']
image_test_id = [i['id'] for i in image_test]
print('{} has {} images'.format(test_image_info, len(image_test_id)))
test_dev_image_info = get_ann_fn('coco_2017_test-dev')
with open(test_dev_image_info, 'r') as fid:
info_testdev = json.load(fid)
image_testdev = info_testdev['images']
image_testdev_id = [i['id'] for i in image_testdev]
print('{} has {} images'.format(test_dev_image_info, len(image_testdev_id)))
dt_testdev = []
print('Filtering test-dev from test...')
t = Timer()
t.tic()
for i in range(len(dt)):
if i % 1000 == 0:
print('{}/{}'.format(i, len(dt)))
if dt[i]['image_id'] in image_testdev_id:
dt_testdev.append(dt[i])
print('Done filtering ({:2}s)!'.format(t.toc()))
filename, file_extension = os.path.splitext(os.path.basename(json_file))
filename = filename + '_test-dev'
filename = os.path.join(output_dir, filename + file_extension)
with open(filename, 'w') as fid:
info_test = json.dump(dt_testdev, fid)
print('Done writing: {}!'.format(filename))
if __name__ == '__main__':
opts = parse_args()
convert(opts.json_file, opts.output_dir)
| apache-2.0 |
snnn/tensorflow | tensorflow/contrib/layers/python/layers/layers.py | 6 | 138784 | # -*- coding: utf-8 -*-
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=g-short-docstring-punctuation
"""Higher level ops for building layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import six
from tensorflow.contrib.framework.python.ops import add_arg_scope
from tensorflow.contrib.framework.python.ops import variables
from tensorflow.contrib.layers.python.layers import initializers
from tensorflow.contrib.layers.python.layers import utils
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.layers import base
from tensorflow.python.layers import convolutional as convolutional_layers
from tensorflow.python.layers import core as core_layers
from tensorflow.python.layers import normalization as normalization_layers
from tensorflow.python.layers import pooling as pooling_layers
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import standard_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables as tf_variables
from tensorflow.python.training import moving_averages
# TODO(b/28426988): Replace legacy_* fns migrated from slim.
# TODO(b/28426988): Remove legacy_* when all uses have migrated to new API.
__all__ = [
'avg_pool2d', 'avg_pool3d', 'batch_norm', 'bias_add', 'conv1d', 'conv2d',
'conv3d', 'conv2d_in_plane', 'conv2d_transpose', 'conv3d_transpose',
'convolution', 'convolution1d', 'convolution2d', 'convolution2d_in_plane',
'convolution2d_transpose', 'convolution3d', 'convolution3d_transpose',
'dense_to_sparse', 'dropout', 'elu', 'flatten', 'fully_connected', 'GDN',
'gdn', 'images_to_sequence', 'layer_norm', 'linear', 'pool', 'max_pool2d',
'max_pool3d', 'one_hot_encoding', 'relu', 'relu6', 'repeat',
'scale_gradient', 'separable_conv2d', 'separable_convolution2d',
'sequence_to_images', 'softmax', 'spatial_softmax', 'stack', 'unit_norm',
'legacy_fully_connected', 'legacy_linear', 'legacy_relu', 'maxout'
]
DATA_FORMAT_NCHW = 'NCHW'
DATA_FORMAT_NHWC = 'NHWC'
DATA_FORMAT_NCDHW = 'NCDHW'
DATA_FORMAT_NDHWC = 'NDHWC'
@add_arg_scope
def avg_pool2d(inputs,
kernel_size,
stride=2,
padding='VALID',
data_format=DATA_FORMAT_NHWC,
outputs_collections=None,
scope=None):
"""Adds a 2D average pooling op.
It is assumed that the pooling is done per image but not in batch or channels.
Args:
inputs: A 4-D tensor of shape `[batch_size, height, width, channels]` if
`data_format` is `NHWC`, and `[batch_size, channels, height, width]` if
`data_format` is `NCHW`.
kernel_size: A list of length 2: [kernel_height, kernel_width] of the
pooling kernel over which the op is computed. Can be an int if both
values are the same.
stride: A list of length 2: [stride_height, stride_width].
Can be an int if both strides are the same. Note that presently
both strides must have the same value.
padding: The padding method, either 'VALID' or 'SAME'.
data_format: A string. `NHWC` (default) and `NCHW` are supported.
outputs_collections: The collections to which the outputs are added.
scope: Optional scope for name_scope.
Returns:
A `Tensor` representing the results of the pooling operation.
Raises:
ValueError: If `data_format` is neither `NHWC` nor `NCHW`.
"""
if data_format not in (DATA_FORMAT_NCHW, DATA_FORMAT_NHWC):
raise ValueError('data_format has to be either NCHW or NHWC.')
with ops.name_scope(scope, 'AvgPool2D', [inputs]) as sc:
inputs = ops.convert_to_tensor(inputs)
df = ('channels_first'
if data_format and data_format.startswith('NC') else 'channels_last')
layer = pooling_layers.AveragePooling2D(
pool_size=kernel_size,
strides=stride,
padding=padding,
data_format=df,
_scope=sc)
outputs = layer.apply(inputs)
return utils.collect_named_outputs(outputs_collections, sc, outputs)
@add_arg_scope
def avg_pool3d(inputs,
kernel_size,
stride=2,
padding='VALID',
data_format=DATA_FORMAT_NDHWC,
outputs_collections=None,
scope=None):
"""Adds a 3D average pooling op.
It is assumed that the pooling is done per image but not in batch or channels.
Args:
inputs: A 5-D tensor of shape `[batch_size, depth, height, width, channels]`
if `data_format` is `NDHWC`, and `[batch_size, channels, depth, height,
width]` if `data_format` is `NCDHW`.
kernel_size: A list of length 3: [kernel_depth, kernel_height, kernel_width]
of the pooling kernel over which the op is computed. Can be an int if both
values are the same.
stride: A list of length 3: [stride_depth, stride_height, stride_width].
Can be an int if both strides are the same. Note that presently
both strides must have the same value.
padding: The padding method, either 'VALID' or 'SAME'.
data_format: A string. `NDHWC` (default) and `NCDHW` are supported.
outputs_collections: The collections to which the outputs are added.
scope: Optional scope for name_scope.
Returns:
A `Tensor` representing the results of the pooling operation.
Raises:
ValueError: If `data_format` is neither `NDHWC` nor `NCDHW`.
"""
if data_format not in (DATA_FORMAT_NCDHW, DATA_FORMAT_NDHWC):
raise ValueError('data_format has to be either NCDHW or NDHWC.')
with ops.name_scope(scope, 'AvgPool3D', [inputs]) as sc:
inputs = ops.convert_to_tensor(inputs)
df = ('channels_first'
if data_format and data_format.startswith('NC') else 'channels_last')
layer = pooling_layers.AveragePooling3D(
pool_size=kernel_size,
strides=stride,
padding=padding,
data_format=df,
_scope=sc)
outputs = layer.apply(inputs)
return utils.collect_named_outputs(outputs_collections, sc, outputs)
def _fused_batch_norm(inputs,
decay=0.999,
center=True,
scale=False,
epsilon=0.001,
activation_fn=None,
param_initializers=None,
param_regularizers=None,
updates_collections=ops.GraphKeys.UPDATE_OPS,
is_training=True,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
data_format=DATA_FORMAT_NHWC,
zero_debias_moving_mean=False,
scope=None):
"""Adds a Batch Normalization layer from http://arxiv.org/abs/1502.03167.
"Batch Normalization: Accelerating Deep Network Training by Reducing
Internal Covariate Shift"
Sergey Ioffe, Christian Szegedy
Can be used as a normalizer function for conv2d and fully_connected.
Note: when training, the moving_mean and moving_variance need to be updated.
By default the update ops are placed in `tf.GraphKeys.UPDATE_OPS`, so they
need to be added as a dependency to the `train_op`. For example:
```python
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_op = optimizer.minimize(loss)
```
One can set updates_collections=None to force the updates in place, but that
can have a speed penalty, especially in distributed settings.
Args:
inputs: A tensor with 2 or more dimensions, where the first dimension has
`batch_size`. The normalization is over all but the last dimension if
`data_format` is `NHWC` and the second dimension if `data_format` is
`NCHW`.
decay: Decay for the moving average. Reasonable values for `decay` are close
to 1.0, typically in the multiple-nines range: 0.999, 0.99, 0.9, etc.
Lower `decay` value (recommend trying `decay`=0.9) if model experiences
reasonably good training performance but poor validation and/or test
performance.
center: If True, add offset of `beta` to normalized tensor. If False,
`beta` is ignored.
scale: If True, multiply by `gamma`. If False, `gamma` is
not used. When the next layer is linear (also e.g. `nn.relu`), this can be
disabled since the scaling can be done by the next layer.
epsilon: Small float added to variance to avoid dividing by zero.
activation_fn: Activation function, default set to None to skip it and
maintain a linear activation.
param_initializers: Optional initializers for beta, gamma, moving mean and
moving variance.
param_regularizers: Optional regularizer for beta and gamma.
updates_collections: Collections to collect the update ops for computation.
The updates_ops need to be executed with the train_op.
If None, a control dependency would be added to make sure the updates are
computed in place.
is_training: Whether or not the layer is in training mode. In training mode
it would accumulate the statistics of the moments into `moving_mean` and
`moving_variance` using an exponential moving average with the given
`decay`. When it is not in training mode then it would use the values of
the `moving_mean` and the `moving_variance`.
reuse: Whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
variables_collections: Optional collections for the variables.
outputs_collections: Collections to add the outputs.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
data_format: A string. `NHWC` (default) and `NCHW` are supported.
zero_debias_moving_mean: Use zero_debias for moving_mean.
scope: Optional scope for `variable_scope`.
Returns:
A `Tensor` representing the output of the operation.
Raises:
ValueError: If `data_format` is neither `NHWC` nor `NCHW`.
ValueError: If the rank of `inputs` is undefined.
ValueError: If the rank of `inputs` is neither 2 or 4.
ValueError: If rank or `C` dimension of `inputs` is undefined.
"""
if data_format not in (DATA_FORMAT_NCHW, DATA_FORMAT_NHWC):
raise ValueError('data_format has to be either NCHW or NHWC.')
with variable_scope.variable_scope(
scope, 'BatchNorm', [inputs], reuse=reuse) as sc:
inputs = ops.convert_to_tensor(inputs)
original_shape = inputs.get_shape()
original_inputs = inputs
original_rank = original_shape.ndims
if original_rank is None:
raise ValueError('Inputs %s has undefined rank' % inputs.name)
elif original_rank not in [2, 4]:
raise ValueError('Inputs %s has unsupported rank.'
' Expected 2 or 4 but got %d' % (inputs.name,
original_rank))
if original_rank == 2:
channels = inputs.get_shape()[-1].value
if channels is None:
raise ValueError('`C` dimension must be known but is None')
new_shape = [-1, 1, 1, channels]
if data_format == DATA_FORMAT_NCHW:
new_shape = [-1, channels, 1, 1]
inputs = array_ops.reshape(inputs, new_shape)
inputs_shape = inputs.get_shape()
if data_format == DATA_FORMAT_NHWC:
params_shape = inputs_shape[-1:]
else:
params_shape = inputs_shape[1:2]
if not params_shape.is_fully_defined():
raise ValueError('Inputs %s has undefined `C` dimension %s.' %
(inputs.name, params_shape))
# Allocate parameters for the beta and gamma of the normalization.
beta_collections = utils.get_variable_collections(variables_collections,
'beta')
# Float32 required to avoid precision-loss when using fp16 input/output
variable_dtype = dtypes.float32
if not param_initializers:
param_initializers = {}
if not param_regularizers:
param_regularizers = {}
beta_regularizer = param_regularizers.get('beta')
gamma_regularizer = param_regularizers.get('gamma')
if center:
beta_initializer = param_initializers.get('beta',
init_ops.zeros_initializer())
beta = variables.model_variable(
'beta',
shape=params_shape,
dtype=variable_dtype,
initializer=beta_initializer,
regularizer=beta_regularizer,
collections=beta_collections,
trainable=trainable)
else:
beta = array_ops.constant(0.0, dtype=variable_dtype, shape=params_shape)
if scale:
gamma_collections = utils.get_variable_collections(
variables_collections, 'gamma')
gamma_initializer = param_initializers.get('gamma',
init_ops.ones_initializer())
gamma = variables.model_variable(
'gamma',
shape=params_shape,
dtype=variable_dtype,
initializer=gamma_initializer,
regularizer=gamma_regularizer,
collections=gamma_collections,
trainable=trainable)
else:
gamma = array_ops.constant(1.0, dtype=variable_dtype, shape=params_shape)
# Create moving_mean and moving_variance variables and add them to the
# appropriate collections. We disable variable partitioning while creating
# them, because assign_moving_average is not yet supported for partitioned
# variables (this needs to be handled carefully, as it may break
# the checkpoint backward compatibility).
with variable_scope.variable_scope(
variable_scope.get_variable_scope()) as local_scope:
local_scope.set_partitioner(None)
moving_mean_collections = utils.get_variable_collections(
variables_collections, 'moving_mean')
moving_mean_initializer = param_initializers.get(
'moving_mean', init_ops.zeros_initializer())
moving_mean = variables.model_variable(
'moving_mean',
shape=params_shape,
dtype=variable_dtype,
initializer=moving_mean_initializer,
trainable=False,
collections=moving_mean_collections)
moving_variance_collections = utils.get_variable_collections(
variables_collections, 'moving_variance')
moving_variance_initializer = param_initializers.get(
'moving_variance', init_ops.ones_initializer())
moving_variance = variables.model_variable(
'moving_variance',
shape=params_shape,
dtype=variable_dtype,
initializer=moving_variance_initializer,
trainable=False,
collections=moving_variance_collections)
def _fused_batch_norm_training():
return nn.fused_batch_norm(
inputs, gamma, beta, epsilon=epsilon, data_format=data_format)
def _fused_batch_norm_inference():
return nn.fused_batch_norm(
inputs,
gamma,
beta,
mean=moving_mean,
variance=moving_variance,
epsilon=epsilon,
is_training=False,
data_format=data_format)
outputs, mean, variance = utils.smart_cond(
is_training, _fused_batch_norm_training, _fused_batch_norm_inference)
# If `is_training` doesn't have a constant value, because it is a `Tensor`,
# a `Variable` or `Placeholder` then is_training_value will be None and
# `need_updates` will be true.
is_training_value = utils.constant_value(is_training)
need_updates = is_training_value is None or is_training_value
if need_updates:
if updates_collections is None:
no_updates = lambda: outputs
def _force_updates():
"""Internal function forces updates moving_vars if is_training."""
update_moving_mean = moving_averages.assign_moving_average(
moving_mean, mean, decay, zero_debias=zero_debias_moving_mean)
update_moving_variance = moving_averages.assign_moving_average(
moving_variance, variance, decay, zero_debias=False)
with ops.control_dependencies(
[update_moving_mean, update_moving_variance]):
return array_ops.identity(outputs)
outputs = utils.smart_cond(is_training, _force_updates, no_updates)
else:
moving_vars_fn = lambda: (moving_mean, moving_variance)
def _delay_updates():
"""Internal function that delay updates moving_vars if is_training."""
update_moving_mean = moving_averages.assign_moving_average(
moving_mean, mean, decay, zero_debias=zero_debias_moving_mean)
update_moving_variance = moving_averages.assign_moving_average(
moving_variance, variance, decay, zero_debias=False)
return update_moving_mean, update_moving_variance
update_mean, update_variance = utils.smart_cond(
is_training, _delay_updates, moving_vars_fn)
ops.add_to_collections(updates_collections, update_mean)
ops.add_to_collections(updates_collections, update_variance)
outputs.set_shape(inputs_shape)
if original_shape.ndims == 2:
outputs = array_ops.reshape(outputs, array_ops.shape(original_inputs))
if activation_fn is not None:
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections, sc.name, outputs)
@add_arg_scope
def batch_norm(inputs,
decay=0.999,
center=True,
scale=False,
epsilon=0.001,
activation_fn=None,
param_initializers=None,
param_regularizers=None,
updates_collections=ops.GraphKeys.UPDATE_OPS,
is_training=True,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
batch_weights=None,
fused=None,
data_format=DATA_FORMAT_NHWC,
zero_debias_moving_mean=False,
scope=None,
renorm=False,
renorm_clipping=None,
renorm_decay=0.99,
adjustment=None):
"""Adds a Batch Normalization layer from http://arxiv.org/abs/1502.03167.
"Batch Normalization: Accelerating Deep Network Training by Reducing
Internal Covariate Shift"
Sergey Ioffe, Christian Szegedy
Can be used as a normalizer function for conv2d and fully_connected. The
normalization is over all but the last dimension if `data_format` is `NHWC`
and all but the second dimension if `data_format` is `NCHW`. In case of a 2D
tensor this corresponds to the batch dimension, while in case of a 4D tensor
this
corresponds to the batch and space dimensions.
Note: when training, the moving_mean and moving_variance need to be updated.
By default the update ops are placed in `tf.GraphKeys.UPDATE_OPS`, so they
need to be added as a dependency to the `train_op`. For example:
```python
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_op = optimizer.minimize(loss)
```
One can set updates_collections=None to force the updates in place, but that
can have a speed penalty, especially in distributed settings.
Args:
inputs: A tensor with 2 or more dimensions, where the first dimension has
`batch_size`. The normalization is over all but the last dimension if
`data_format` is `NHWC` and the second dimension if `data_format` is
`NCHW`.
decay: Decay for the moving average. Reasonable values for `decay` are close
to 1.0, typically in the multiple-nines range: 0.999, 0.99, 0.9, etc.
Lower `decay` value (recommend trying `decay`=0.9) if model experiences
reasonably good training performance but poor validation and/or test
performance. Try zero_debias_moving_mean=True for improved stability.
center: If True, add offset of `beta` to normalized tensor. If False, `beta`
is ignored.
scale: If True, multiply by `gamma`. If False, `gamma` is
not used. When the next layer is linear (also e.g. `nn.relu`), this can be
disabled since the scaling can be done by the next layer.
epsilon: Small float added to variance to avoid dividing by zero.
activation_fn: Activation function, default set to None to skip it and
maintain a linear activation.
param_initializers: Optional initializers for beta, gamma, moving mean and
moving variance.
param_regularizers: Optional regularizer for beta and gamma.
updates_collections: Collections to collect the update ops for computation.
The updates_ops need to be executed with the train_op.
If None, a control dependency would be added to make sure the updates are
computed in place.
is_training: Whether or not the layer is in training mode. In training mode
it would accumulate the statistics of the moments into `moving_mean` and
`moving_variance` using an exponential moving average with the given
`decay`. When it is not in training mode then it would use the values of
the `moving_mean` and the `moving_variance`.
reuse: Whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
variables_collections: Optional collections for the variables.
outputs_collections: Collections to add the outputs.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
batch_weights: An optional tensor of shape `[batch_size]`,
containing a frequency weight for each batch item. If present,
then the batch normalization uses weighted mean and
variance. (This can be used to correct for bias in training
example selection.)
fused: if `None` or `True`, use a faster, fused implementation if possible.
If `False`, use the system recommended implementation.
data_format: A string. `NHWC` (default) and `NCHW` are supported.
zero_debias_moving_mean: Use zero_debias for moving_mean. It creates a new
pair of variables 'moving_mean/biased' and 'moving_mean/local_step'.
scope: Optional scope for `variable_scope`.
renorm: Whether to use Batch Renormalization
(https://arxiv.org/abs/1702.03275). This adds extra variables during
training. The inference is the same for either value of this parameter.
renorm_clipping: A dictionary that may map keys 'rmax', 'rmin', 'dmax' to
scalar `Tensors` used to clip the renorm correction. The correction
`(r, d)` is used as `corrected_value = normalized_value * r + d`, with
`r` clipped to [rmin, rmax], and `d` to [-dmax, dmax]. Missing rmax, rmin,
dmax are set to inf, 0, inf, respectively.
renorm_decay: Momentum used to update the moving means and standard
deviations with renorm. Unlike `momentum`, this affects training
and should be neither too small (which would add noise) nor too large
(which would give stale estimates). Note that `decay` is still applied
to get the means and variances for inference.
adjustment: A function taking the `Tensor` containing the (dynamic) shape of
the input tensor and returning a pair (scale, bias) to apply to the
normalized values (before gamma and beta), only during training. For
example,
`adjustment = lambda shape: (
tf.random_uniform(shape[-1:], 0.93, 1.07),
tf.random_uniform(shape[-1:], -0.1, 0.1))`
will scale the normalized value by up to 7% up or down, then shift the
result by up to 0.1 (with independent scaling and bias for each feature
but shared across all examples), and finally apply gamma and/or beta. If
`None`, no adjustment is applied.
Returns:
A `Tensor` representing the output of the operation.
Raises:
ValueError: If `data_format` is neither `NHWC` nor `NCHW`.
ValueError: If the rank of `inputs` is undefined.
ValueError: If rank or channels dimension of `inputs` is undefined.
"""
if fused is None:
fused = True
# Only use _fused_batch_norm if all of the following three
# conditions are true:
# (1) fused is set True;
# (2) it is possible to use (currently it doesn't support batch weights,
# renorm, and the case when rank is neither 2 nor 4);
# (3) it is used with zero_debias_moving_mean, or an input shape of rank 2,
# or non-default updates_collections (not implemented in
# normalization_layers.BatchNormalization yet); otherwise use the fused
# implementation in normalization_layers.BatchNormalization.
inputs = ops.convert_to_tensor(inputs)
rank = inputs.get_shape().ndims
possible_to_fuse = (
batch_weights is None and not renorm and rank in [2, 4] and
adjustment is None)
if fused and possible_to_fuse and (
zero_debias_moving_mean or rank == 2 or
updates_collections is not ops.GraphKeys.UPDATE_OPS):
return _fused_batch_norm(
inputs,
decay=decay,
center=center,
scale=scale,
epsilon=epsilon,
activation_fn=activation_fn,
param_initializers=param_initializers,
param_regularizers=param_regularizers,
updates_collections=updates_collections,
is_training=is_training,
reuse=reuse,
variables_collections=variables_collections,
outputs_collections=outputs_collections,
trainable=trainable,
data_format=data_format,
zero_debias_moving_mean=zero_debias_moving_mean,
scope=scope)
if data_format not in (DATA_FORMAT_NCHW, DATA_FORMAT_NHWC):
raise ValueError('data_format has to be either NCHW or NHWC.')
layer_variable_getter = _build_variable_getter()
with variable_scope.variable_scope(
scope,
'BatchNorm', [inputs],
reuse=reuse,
custom_getter=layer_variable_getter) as sc:
inputs = ops.convert_to_tensor(inputs)
# Determine whether we can use the core layer class.
if (batch_weights is None and
updates_collections is ops.GraphKeys.UPDATE_OPS and
not zero_debias_moving_mean):
# Use the core layer class.
axis = 1 if data_format == DATA_FORMAT_NCHW else -1
if not param_initializers:
param_initializers = {}
beta_initializer = param_initializers.get('beta',
init_ops.zeros_initializer())
gamma_initializer = param_initializers.get('gamma',
init_ops.ones_initializer())
moving_mean_initializer = param_initializers.get(
'moving_mean', init_ops.zeros_initializer())
moving_variance_initializer = param_initializers.get(
'moving_variance', init_ops.ones_initializer())
if not param_regularizers:
param_regularizers = {}
beta_regularizer = param_regularizers.get('beta')
gamma_regularizer = param_regularizers.get('gamma')
layer = normalization_layers.BatchNormalization(
axis=axis,
momentum=decay,
epsilon=epsilon,
center=center,
scale=scale,
beta_initializer=beta_initializer,
gamma_initializer=gamma_initializer,
moving_mean_initializer=moving_mean_initializer,
moving_variance_initializer=moving_variance_initializer,
beta_regularizer=beta_regularizer,
gamma_regularizer=gamma_regularizer,
trainable=trainable,
renorm=renorm,
renorm_clipping=renorm_clipping,
renorm_momentum=renorm_decay,
adjustment=adjustment,
name=sc.name,
_scope=sc,
_reuse=reuse,
fused=fused)
outputs = layer.apply(inputs, training=is_training)
# Add variables to collections.
_add_variable_to_collections(layer.moving_mean, variables_collections,
'moving_mean')
_add_variable_to_collections(layer.moving_variance, variables_collections,
'moving_variance')
if layer.beta is not None:
_add_variable_to_collections(layer.beta, variables_collections, 'beta')
if layer.gamma is not None:
_add_variable_to_collections(layer.gamma, variables_collections,
'gamma')
if activation_fn is not None:
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections, sc.name, outputs)
# Not supported by layer class: batch_weights argument,
# and custom updates_collections. In that case, use the legacy BN
# implementation.
# Custom updates collections are not supported because the update logic
# is different in this case, in particular w.r.t. "forced updates" and
# update op reuse.
if renorm:
raise ValueError('renorm is not supported with batch_weights, '
'updates_collections or zero_debias_moving_mean')
inputs_shape = inputs.get_shape()
inputs_rank = inputs_shape.ndims
if inputs_rank is None:
raise ValueError('Inputs %s has undefined rank.' % inputs.name)
dtype = inputs.dtype.base_dtype
if batch_weights is not None:
batch_weights = ops.convert_to_tensor(batch_weights)
inputs_shape[0:1].assert_is_compatible_with(batch_weights.get_shape())
# Reshape batch weight values so they broadcast across inputs.
nshape = [-1] + [1 for _ in range(inputs_rank - 1)]
batch_weights = array_ops.reshape(batch_weights, nshape)
if data_format == DATA_FORMAT_NCHW:
moments_axes = [0] + list(range(2, inputs_rank))
params_shape = inputs_shape[1:2]
# For NCHW format, rather than relying on implicit broadcasting, we
# explicitly reshape the params to params_shape_broadcast when computing
# the moments and the batch normalization.
params_shape_broadcast = list(
[1, inputs_shape[1].value] + [1 for _ in range(2, inputs_rank)])
else:
moments_axes = list(range(inputs_rank - 1))
params_shape = inputs_shape[-1:]
params_shape_broadcast = None
if not params_shape.is_fully_defined():
raise ValueError('Inputs %s has undefined channels dimension %s.' %
(inputs.name, params_shape))
# Allocate parameters for the beta and gamma of the normalization.
beta, gamma = None, None
if not param_initializers:
param_initializers = {}
if center:
beta_collections = utils.get_variable_collections(variables_collections,
'beta')
beta_initializer = param_initializers.get('beta',
init_ops.zeros_initializer())
beta = variables.model_variable(
'beta',
shape=params_shape,
dtype=dtype,
initializer=beta_initializer,
collections=beta_collections,
trainable=trainable)
if scale:
gamma_collections = utils.get_variable_collections(
variables_collections, 'gamma')
gamma_initializer = param_initializers.get('gamma',
init_ops.ones_initializer())
gamma = variables.model_variable(
'gamma',
shape=params_shape,
dtype=dtype,
initializer=gamma_initializer,
collections=gamma_collections,
trainable=trainable)
# Create moving_mean and moving_variance variables and add them to the
# appropriate collections. We disable variable partitioning while creating
# them, because assign_moving_average is not yet supported for partitioned
# variables (this needs to be handled carefully, as it may break
# the checkpoint backward compatibility).
with variable_scope.variable_scope(
variable_scope.get_variable_scope()) as local_scope:
local_scope.set_partitioner(None)
moving_mean_collections = utils.get_variable_collections(
variables_collections, 'moving_mean')
moving_mean_initializer = param_initializers.get(
'moving_mean', init_ops.zeros_initializer())
moving_mean = variables.model_variable(
'moving_mean',
shape=params_shape,
dtype=dtype,
initializer=moving_mean_initializer,
trainable=False,
collections=moving_mean_collections)
moving_variance_collections = utils.get_variable_collections(
variables_collections, 'moving_variance')
moving_variance_initializer = param_initializers.get(
'moving_variance', init_ops.ones_initializer())
moving_variance = variables.model_variable(
'moving_variance',
shape=params_shape,
dtype=dtype,
initializer=moving_variance_initializer,
trainable=False,
collections=moving_variance_collections)
# If `is_training` doesn't have a constant value, because it is a `Tensor`,
# a `Variable` or `Placeholder` then is_training_value will be None and
# `needs_moments` will be true.
is_training_value = utils.constant_value(is_training)
need_moments = is_training_value is None or is_training_value
if need_moments:
# Calculate the moments based on the individual batch.
if batch_weights is None:
if data_format == DATA_FORMAT_NCHW:
mean, variance = nn.moments(inputs, moments_axes, keep_dims=True)
mean = array_ops.reshape(mean, [-1])
variance = array_ops.reshape(variance, [-1])
else:
mean, variance = nn.moments(inputs, moments_axes)
else:
if data_format == DATA_FORMAT_NCHW:
mean, variance = nn.weighted_moments(
inputs, moments_axes, batch_weights, keepdims=True)
mean = array_ops.reshape(mean, [-1])
variance = array_ops.reshape(variance, [-1])
else:
mean, variance = nn.weighted_moments(inputs, moments_axes,
batch_weights)
moving_vars_fn = lambda: (moving_mean, moving_variance)
if updates_collections is None:
def _force_updates():
"""Internal function forces updates moving_vars if is_training."""
update_moving_mean = moving_averages.assign_moving_average(
moving_mean, mean, decay, zero_debias=zero_debias_moving_mean)
update_moving_variance = moving_averages.assign_moving_average(
moving_variance, variance, decay, zero_debias=False)
with ops.control_dependencies(
[update_moving_mean, update_moving_variance]):
return array_ops.identity(mean), array_ops.identity(variance)
mean, variance = utils.smart_cond(is_training, _force_updates,
moving_vars_fn)
else:
def _delay_updates():
"""Internal function that delay updates moving_vars if is_training."""
update_moving_mean = moving_averages.assign_moving_average(
moving_mean, mean, decay, zero_debias=zero_debias_moving_mean)
update_moving_variance = moving_averages.assign_moving_average(
moving_variance, variance, decay, zero_debias=False)
return update_moving_mean, update_moving_variance
update_mean, update_variance = utils.smart_cond(
is_training, _delay_updates, moving_vars_fn)
ops.add_to_collections(updates_collections, update_mean)
ops.add_to_collections(updates_collections, update_variance)
# Use computed moments during training and moving_vars otherwise.
vars_fn = lambda: (mean, variance)
mean, variance = utils.smart_cond(is_training, vars_fn, moving_vars_fn)
else:
mean, variance = moving_mean, moving_variance
if data_format == DATA_FORMAT_NCHW:
mean = array_ops.reshape(mean, params_shape_broadcast)
variance = array_ops.reshape(variance, params_shape_broadcast)
if beta is not None:
beta = array_ops.reshape(beta, params_shape_broadcast)
if gamma is not None:
gamma = array_ops.reshape(gamma, params_shape_broadcast)
# Compute batch_normalization.
outputs = nn.batch_normalization(inputs, mean, variance, beta, gamma,
epsilon)
outputs.set_shape(inputs_shape)
if activation_fn is not None:
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections, sc.name, outputs)
@add_arg_scope
def bias_add(inputs,
activation_fn=None,
initializer=init_ops.zeros_initializer(),
regularizer=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
data_format=DATA_FORMAT_NHWC,
scope=None):
"""Adds a bias to the inputs.
Can be used as a normalizer function for conv2d and fully_connected.
Args:
inputs: A tensor of with at least rank 2 and value for the last dimension,
e.g. `[batch_size, depth]`, `[None, None, None, depth]`.
activation_fn: Activation function, default set to None to skip it and
maintain a linear activation.
initializer: An initializer for the bias, defaults to 0.
regularizer: A regularizer like the result of
`l1_regularizer` or `l2_regularizer`.
reuse: Whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
variables_collections: Optional collections for the variables.
outputs_collections: Collections to add the outputs.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
data_format: A string. 'NHWC' and 'NCHW' are supported.
scope: Optional scope for variable_scope.
Returns:
A tensor representing the result of adding biases to the inputs.
Raises:
ValueError: If `data_format` is neither `NHWC` nor `NCHW`.
ValueError: If `data_format` is `NCHW` and rank of `inputs` is not 4.
ValueError: If the rank of `inputs` is undefined.
ValueError: If rank or `C` dimension of `inputs` is undefined.
"""
if data_format not in (DATA_FORMAT_NCHW, DATA_FORMAT_NHWC):
raise ValueError('data_format has to be either NCHW or NHWC.')
with variable_scope.variable_scope(
scope, 'BiasAdd', [inputs], reuse=reuse) as sc:
inputs = ops.convert_to_tensor(inputs)
dtype = inputs.dtype.base_dtype
inputs_shape = inputs.get_shape()
inputs_rank = inputs_shape.ndims
if inputs_rank is None:
raise ValueError('Dims of shape must be known but is None')
elif inputs_rank != 4 and data_format == DATA_FORMAT_NCHW:
raise ValueError('Data format NCHW only supports 4D Tensor')
axis = 1 if data_format == DATA_FORMAT_NCHW else -1
num_features = inputs_shape[axis].value
if num_features is None:
raise ValueError('`C` dimension must be known but is None')
biases_collections = utils.get_variable_collections(variables_collections,
'biases')
biases = variables.model_variable(
'biases',
shape=[
num_features,
],
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
collections=biases_collections,
trainable=trainable)
outputs = nn.bias_add(inputs, biases, data_format=data_format)
if activation_fn is not None:
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections, sc.name, outputs)
# TODO(jbms): change `rate` parameter to `dilation_rate` for consistency with
# underlying op.
@add_arg_scope
def convolution(inputs,
num_outputs,
kernel_size,
stride=1,
padding='SAME',
data_format=None,
rate=1,
activation_fn=nn.relu,
normalizer_fn=None,
normalizer_params=None,
weights_initializer=initializers.xavier_initializer(),
weights_regularizer=None,
biases_initializer=init_ops.zeros_initializer(),
biases_regularizer=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
scope=None,
conv_dims=None):
"""Adds an N-D convolution followed by an optional batch_norm layer.
It is required that 1 <= N <= 3.
`convolution` creates a variable called `weights`, representing the
convolutional kernel, that is convolved (actually cross-correlated) with the
`inputs` to produce a `Tensor` of activations. If a `normalizer_fn` is
provided (such as `batch_norm`), it is then applied. Otherwise, if
`normalizer_fn` is None and a `biases_initializer` is provided then a `biases`
variable would be created and added the activations. Finally, if
`activation_fn` is not `None`, it is applied to the activations as well.
Performs atrous convolution with input stride/dilation rate equal to `rate`
if a value > 1 for any dimension of `rate` is specified. In this case
`stride` values != 1 are not supported.
Args:
inputs: A Tensor of rank N+2 of shape
`[batch_size] + input_spatial_shape + [in_channels]` if data_format does
not start with "NC" (default), or
`[batch_size, in_channels] + input_spatial_shape` if data_format starts
with "NC".
num_outputs: Integer, the number of output filters.
kernel_size: A sequence of N positive integers specifying the spatial
dimensions of the filters. Can be a single integer to specify the same
value for all spatial dimensions.
stride: A sequence of N positive integers specifying the stride at which to
compute output. Can be a single integer to specify the same value for all
spatial dimensions. Specifying any `stride` value != 1 is incompatible
with specifying any `rate` value != 1.
padding: One of `"VALID"` or `"SAME"`.
data_format: A string or None. Specifies whether the channel dimension of
the `input` and output is the last dimension (default, or if `data_format`
does not start with "NC"), or the second dimension (if `data_format`
starts with "NC"). For N=1, the valid values are "NWC" (default) and
"NCW". For N=2, the valid values are "NHWC" (default) and "NCHW".
For N=3, the valid values are "NDHWC" (default) and "NCDHW".
rate: A sequence of N positive integers specifying the dilation rate to use
for atrous convolution. Can be a single integer to specify the same
value for all spatial dimensions. Specifying any `rate` value != 1 is
incompatible with specifying any `stride` value != 1.
activation_fn: Activation function. The default value is a ReLU function.
Explicitly set it to None to skip it and maintain a linear activation.
normalizer_fn: Normalization function to use instead of `biases`. If
`normalizer_fn` is provided then `biases_initializer` and
`biases_regularizer` are ignored and `biases` are not created nor added.
default set to None for no normalizer function
normalizer_params: Normalization function parameters.
weights_initializer: An initializer for the weights.
weights_regularizer: Optional regularizer for the weights.
biases_initializer: An initializer for the biases. If None skip biases.
biases_regularizer: Optional regularizer for the biases.
reuse: Whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
variables_collections: Optional list of collections for all the variables or
a dictionary containing a different list of collection per variable.
outputs_collections: Collection to add the outputs.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
scope: Optional scope for `variable_scope`.
conv_dims: Optional convolution dimensionality, when set it would use the
corresponding convolution (e.g. 2 for Conv 2D, 3 for Conv 3D, ..). When
leaved to None it would select the convolution dimensionality based on
the input rank (i.e. Conv ND, with N = input_rank - 2).
Returns:
A tensor representing the output of the operation.
Raises:
ValueError: If `data_format` is invalid.
ValueError: Both 'rate' and `stride` are not uniformly 1.
"""
if data_format not in [None, 'NWC', 'NCW', 'NHWC', 'NCHW', 'NDHWC', 'NCDHW']:
raise ValueError('Invalid data_format: %r' % (data_format,))
layer_variable_getter = _build_variable_getter({
'bias': 'biases',
'kernel': 'weights'
})
with variable_scope.variable_scope(
scope, 'Conv', [inputs], reuse=reuse,
custom_getter=layer_variable_getter) as sc:
inputs = ops.convert_to_tensor(inputs)
input_rank = inputs.get_shape().ndims
if conv_dims is not None and conv_dims + 2 != input_rank:
raise ValueError('Convolution expects input with rank %d, got %d' %
(conv_dims + 2, input_rank))
if input_rank == 3:
layer_class = convolutional_layers.Convolution1D
elif input_rank == 4:
layer_class = convolutional_layers.Convolution2D
elif input_rank == 5:
layer_class = convolutional_layers.Convolution3D
else:
raise ValueError('Convolution not supported for input with rank',
input_rank)
df = ('channels_first'
if data_format and data_format.startswith('NC') else 'channels_last')
layer = layer_class(
filters=num_outputs,
kernel_size=kernel_size,
strides=stride,
padding=padding,
data_format=df,
dilation_rate=rate,
activation=None,
use_bias=not normalizer_fn and biases_initializer,
kernel_initializer=weights_initializer,
bias_initializer=biases_initializer,
kernel_regularizer=weights_regularizer,
bias_regularizer=biases_regularizer,
activity_regularizer=None,
trainable=trainable,
name=sc.name,
dtype=inputs.dtype.base_dtype,
_scope=sc,
_reuse=reuse)
outputs = layer.apply(inputs)
# Add variables to collections.
_add_variable_to_collections(layer.kernel, variables_collections, 'weights')
if layer.use_bias:
_add_variable_to_collections(layer.bias, variables_collections, 'biases')
if normalizer_fn is not None:
normalizer_params = normalizer_params or {}
outputs = normalizer_fn(outputs, **normalizer_params)
if activation_fn is not None:
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections, sc.name, outputs)
@add_arg_scope
def convolution1d(inputs,
num_outputs,
kernel_size,
stride=1,
padding='SAME',
data_format=None,
rate=1,
activation_fn=nn.relu,
normalizer_fn=None,
normalizer_params=None,
weights_initializer=initializers.xavier_initializer(),
weights_regularizer=None,
biases_initializer=init_ops.zeros_initializer(),
biases_regularizer=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
scope=None):
return convolution(inputs,
num_outputs,
kernel_size,
stride,
padding,
data_format,
rate,
activation_fn,
normalizer_fn,
normalizer_params,
weights_initializer,
weights_regularizer,
biases_initializer,
biases_regularizer,
reuse,
variables_collections,
outputs_collections,
trainable,
scope,
conv_dims=1)
convolution1d.__doc__ = convolution.__doc__
@add_arg_scope
def convolution2d(inputs,
num_outputs,
kernel_size,
stride=1,
padding='SAME',
data_format=None,
rate=1,
activation_fn=nn.relu,
normalizer_fn=None,
normalizer_params=None,
weights_initializer=initializers.xavier_initializer(),
weights_regularizer=None,
biases_initializer=init_ops.zeros_initializer(),
biases_regularizer=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
scope=None):
return convolution(inputs,
num_outputs,
kernel_size,
stride,
padding,
data_format,
rate,
activation_fn,
normalizer_fn,
normalizer_params,
weights_initializer,
weights_regularizer,
biases_initializer,
biases_regularizer,
reuse,
variables_collections,
outputs_collections,
trainable,
scope,
conv_dims=2)
convolution2d.__doc__ = convolution.__doc__
@add_arg_scope
def convolution3d(inputs,
num_outputs,
kernel_size,
stride=1,
padding='SAME',
data_format=None,
rate=1,
activation_fn=nn.relu,
normalizer_fn=None,
normalizer_params=None,
weights_initializer=initializers.xavier_initializer(),
weights_regularizer=None,
biases_initializer=init_ops.zeros_initializer(),
biases_regularizer=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
scope=None):
return convolution(inputs,
num_outputs,
kernel_size,
stride,
padding,
data_format,
rate,
activation_fn,
normalizer_fn,
normalizer_params,
weights_initializer,
weights_regularizer,
biases_initializer,
biases_regularizer,
reuse,
variables_collections,
outputs_collections,
trainable,
scope,
conv_dims=3)
convolution3d.__doc__ = convolution.__doc__
@add_arg_scope
def convolution2d_in_plane(
inputs,
kernel_size,
stride=1,
padding='SAME',
activation_fn=nn.relu,
normalizer_fn=None,
normalizer_params=None,
weights_initializer=initializers.xavier_initializer(),
weights_regularizer=None,
biases_initializer=init_ops.zeros_initializer(),
biases_regularizer=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
scope=None):
"""Performs the same in-plane convolution to each channel independently.
This is useful for performing various simple channel-independent convolution
operations such as image gradients:
image = tf.constant(..., shape=(16, 240, 320, 3))
vert_gradients = layers.conv2d_in_plane(image,
kernel=[1, -1],
kernel_size=[2, 1])
horz_gradients = layers.conv2d_in_plane(image,
kernel=[1, -1],
kernel_size=[1, 2])
Args:
inputs: A 4-D tensor with dimensions [batch_size, height, width, channels].
kernel_size: A list of length 2 holding the [kernel_height, kernel_width] of
of the pooling. Can be an int if both values are the same.
stride: A list of length 2 `[stride_height, stride_width]`.
Can be an int if both strides are the same. Note that presently
both strides must have the same value.
padding: The padding type to use, either 'SAME' or 'VALID'.
activation_fn: Activation function. The default value is a ReLU function.
Explicitly set it to None to skip it and maintain a linear activation.
normalizer_fn: Normalization function to use instead of `biases`. If
`normalizer_fn` is provided then `biases_initializer` and
`biases_regularizer` are ignored and `biases` are not created nor added.
default set to None for no normalizer function
normalizer_params: Normalization function parameters.
weights_initializer: An initializer for the weights.
weights_regularizer: Optional regularizer for the weights.
biases_initializer: An initializer for the biases. If None skip biases.
biases_regularizer: Optional regularizer for the biases.
reuse: Whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
variables_collections: Optional list of collections for all the variables or
a dictionary containing a different list of collection per variable.
outputs_collections: Collection to add the outputs.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
scope: Optional scope for `variable_scope`.
Returns:
A `Tensor` representing the output of the operation.
"""
with variable_scope.variable_scope(
scope, 'ConvInPlane', [inputs], reuse=reuse) as sc:
dtype = inputs.dtype.base_dtype
kernel_h, kernel_w = utils.two_element_tuple(kernel_size)
stride_h, stride_w = utils.two_element_tuple(stride)
num_filters_in = utils.last_dimension(inputs.get_shape(), min_rank=4)
weights_shape = [kernel_h, kernel_w, 1, 1]
weights_collections = utils.get_variable_collections(
variables_collections, 'weights')
weights = variables.model_variable(
'weights',
shape=weights_shape,
dtype=dtype,
initializer=weights_initializer,
regularizer=weights_regularizer,
collections=weights_collections,
trainable=trainable)
depthwise_weights = array_ops.tile(weights, [1, 1, num_filters_in, 1])
outputs = nn.depthwise_conv2d(inputs, depthwise_weights,
[1, stride_h, stride_w, 1], padding)
if normalizer_fn is not None:
normalizer_params = normalizer_params or {}
outputs = normalizer_fn(outputs, **normalizer_params)
else:
if biases_initializer is not None:
biases_collections = utils.get_variable_collections(
variables_collections, 'biases')
biases = variables.model_variable(
'biases',
shape=[
num_filters_in,
],
dtype=dtype,
initializer=biases_initializer,
regularizer=biases_regularizer,
collections=biases_collections,
trainable=trainable)
outputs = nn.bias_add(outputs, biases)
if activation_fn is not None:
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections, sc.name, outputs)
@add_arg_scope
def convolution2d_transpose(
inputs,
num_outputs,
kernel_size,
stride=1,
padding='SAME',
data_format=DATA_FORMAT_NHWC,
activation_fn=nn.relu,
normalizer_fn=None,
normalizer_params=None,
weights_initializer=initializers.xavier_initializer(),
weights_regularizer=None,
biases_initializer=init_ops.zeros_initializer(),
biases_regularizer=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
scope=None):
"""Adds a convolution2d_transpose with an optional batch normalization layer.
The function creates a variable called `weights`, representing the
kernel, that is convolved with the input. If `normalizer_fn` is `None`, a
second variable called 'biases' is added to the result of the operation.
Args:
inputs: A 4-D `Tensor` of type `float` and shape
`[batch, height, width, in_channels]` for `NHWC` data format or
`[batch, in_channels, height, width]` for `NCHW` data format.
num_outputs: Integer, the number of output filters.
kernel_size: A list of length 2 holding the [kernel_height, kernel_width] of
of the filters. Can be an int if both values are the same.
stride: A list of length 2: [stride_height, stride_width].
Can be an int if both strides are the same. Note that presently
both strides must have the same value.
padding: One of 'VALID' or 'SAME'.
data_format: A string. `NHWC` (default) and `NCHW` are supported.
activation_fn: Activation function. The default value is a ReLU function.
Explicitly set it to None to skip it and maintain a linear activation.
normalizer_fn: Normalization function to use instead of `biases`. If
`normalizer_fn` is provided then `biases_initializer` and
`biases_regularizer` are ignored and `biases` are not created nor added.
default set to None for no normalizer function
normalizer_params: Normalization function parameters.
weights_initializer: An initializer for the weights.
weights_regularizer: Optional regularizer for the weights.
biases_initializer: An initializer for the biases. If None skip biases.
biases_regularizer: Optional regularizer for the biases.
reuse: Whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
variables_collections: Optional list of collections for all the variables or
a dictionary containing a different list of collection per variable.
outputs_collections: Collection to add the outputs.
trainable: Whether or not the variables should be trainable or not.
scope: Optional scope for variable_scope.
Returns:
A tensor representing the output of the operation.
Raises:
ValueError: If 'kernel_size' is not a list of length 2.
ValueError: If `data_format` is neither `NHWC` nor `NCHW`.
ValueError: If `C` dimension of `inputs` is None.
"""
layer_variable_getter = _build_variable_getter({
'bias': 'biases',
'kernel': 'weights'
})
with variable_scope.variable_scope(
scope,
'Conv2d_transpose', [inputs],
reuse=reuse,
custom_getter=layer_variable_getter) as sc:
if data_format not in (DATA_FORMAT_NCHW, DATA_FORMAT_NHWC):
raise ValueError('data_format has to be either NCHW or NHWC.')
inputs = ops.convert_to_tensor(inputs)
df = ('channels_first'
if data_format and data_format.startswith('NC') else 'channels_last')
layer = convolutional_layers.Convolution2DTranspose(
filters=num_outputs,
kernel_size=kernel_size,
strides=stride,
padding=padding,
data_format=df,
activation=None,
use_bias=not normalizer_fn and biases_initializer,
kernel_initializer=weights_initializer,
bias_initializer=biases_initializer,
kernel_regularizer=weights_regularizer,
bias_regularizer=biases_regularizer,
activity_regularizer=None,
trainable=trainable,
name=sc.name,
dtype=inputs.dtype.base_dtype,
_scope=sc,
_reuse=reuse)
outputs = layer.apply(inputs)
# Add variables to collections.
_add_variable_to_collections(layer.kernel, variables_collections, 'weights')
if layer.bias is not None:
_add_variable_to_collections(layer.bias, variables_collections, 'biases')
if normalizer_fn is not None:
normalizer_params = normalizer_params or {}
outputs = normalizer_fn(outputs, **normalizer_params)
if activation_fn is not None:
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections, sc.name, outputs)
@add_arg_scope
def convolution3d_transpose(
inputs,
num_outputs,
kernel_size,
stride=1,
padding='SAME',
data_format=DATA_FORMAT_NDHWC,
activation_fn=nn.relu,
normalizer_fn=None,
normalizer_params=None,
weights_initializer=initializers.xavier_initializer(),
weights_regularizer=None,
biases_initializer=init_ops.zeros_initializer(),
biases_regularizer=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
scope=None):
"""Adds a convolution3d_transpose with an optional batch normalization layer.
The function creates a variable called `weights`, representing the
kernel, that is convolved with the input. If `batch_norm_params` is `None`, a
second variable called 'biases' is added to the result of the operation.
Args:
inputs: A 5-D `Tensor` of type `float` and shape
`[batch, depth, height, width, in_channels]` for `NDHWC` data format or
`[batch, in_channels, depth, height, width]` for `NCDHW` data format.
num_outputs: Integer, the number of output filters.
kernel_size: A list of length 3 holding the [kernel_depth, kernel_height,
kernel_width] of the filters. Can be an int if both values are the same.
stride: A list of length 3: [stride_depth, stride_height, stride_width].
Can be an int if both strides are the same. Note that presently
both strides must have the same value.
padding: One of 'VALID' or 'SAME'.
data_format: A string. `NDHWC` (default) and `NCDHW` are supported.
activation_fn: Activation function. The default value is a ReLU function.
Explicitly set it to None to skip it and maintain a linear activation.
normalizer_fn: Normalization function to use instead of `biases`. If
`normalizer_fn` is provided then `biases_initializer` and
`biases_regularizer` are ignored and `biases` are not created nor added.
default set to None for no normalizer function
normalizer_params: Normalization function parameters.
weights_initializer: An initializer for the weights.
weights_regularizer: Optional regularizer for the weights.
biases_initializer: An initializer for the biases. If None skip biases.
biases_regularizer: Optional regularizer for the biases.
reuse: Whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
variables_collections: Optional list of collections for all the variables or
a dictionary containing a different list of collection per variable.
outputs_collections: Collection to add the outputs.
trainable: Whether or not the variables should be trainable or not.
scope: Optional scope for variable_scope.
Returns:
A tensor representing the output of the operation.
Raises:
ValueError: If 'kernel_size' is not a list of length 3.
ValueError: If `data_format` is neither `NDHWC` nor `NCDHW`.
ValueError: If `C` dimension of `inputs` is None.
"""
layer_variable_getter = _build_variable_getter({
'bias': 'biases',
'kernel': 'weights'
})
with variable_scope.variable_scope(
scope,
'Conv3d_transpose', [inputs],
reuse=reuse,
custom_getter=layer_variable_getter) as sc:
if data_format not in (DATA_FORMAT_NCDHW, DATA_FORMAT_NDHWC):
raise ValueError('data_format has to be either NCDHW or NDHWC.')
inputs = ops.convert_to_tensor(inputs)
df = ('channels_first'
if data_format and data_format.startswith('NC') else 'channels_last')
layer = convolutional_layers.Convolution3DTranspose(
filters=num_outputs,
kernel_size=kernel_size,
strides=stride,
padding=padding,
data_format=df,
activation=None,
use_bias=not normalizer_fn and biases_initializer,
kernel_initializer=weights_initializer,
bias_initializer=biases_initializer,
kernel_regularizer=weights_regularizer,
bias_regularizer=biases_regularizer,
activity_regularizer=None,
trainable=trainable,
name=sc.name,
dtype=inputs.dtype.base_dtype,
_scope=sc,
_reuse=reuse)
outputs = layer.apply(inputs)
# Add variables to collections.
_add_variable_to_collections(layer.kernel, variables_collections, 'weights')
if layer.bias is not None:
_add_variable_to_collections(layer.bias, variables_collections, 'biases')
if normalizer_fn is not None:
normalizer_params = normalizer_params or {}
outputs = normalizer_fn(outputs, **normalizer_params)
if activation_fn is not None:
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections, sc.name, outputs)
@add_arg_scope
def dense_to_sparse(tensor, eos_token=0, outputs_collections=None, scope=None):
"""Converts a dense tensor into a sparse tensor.
An example use would be to convert dense labels to sparse ones
so that they can be fed to the ctc_loss.
Args:
tensor: An `int` `Tensor` to be converted to a `Sparse`.
eos_token: An integer.
It is part of the target label that signifies the end of a sentence.
outputs_collections: Collection to add the outputs.
scope: Optional scope for name_scope.
"""
with variable_scope.variable_scope(scope, 'dense_to_sparse', [tensor]) as sc:
tensor = ops.convert_to_tensor(tensor)
indices = array_ops.where(
math_ops.not_equal(tensor, constant_op.constant(eos_token,
tensor.dtype)))
values = array_ops.gather_nd(tensor, indices)
shape = array_ops.shape(tensor, out_type=dtypes.int64)
outputs = sparse_tensor.SparseTensor(indices, values, shape)
return utils.collect_named_outputs(outputs_collections, sc.name, outputs)
@add_arg_scope
def dropout(inputs,
keep_prob=0.5,
noise_shape=None,
is_training=True,
outputs_collections=None,
scope=None,
seed=None):
"""Returns a dropout op applied to the input.
With probability `keep_prob`, outputs the input element scaled up by
`1 / keep_prob`, otherwise outputs `0`. The scaling is so that the expected
sum is unchanged.
Args:
inputs: The tensor to pass to the nn.dropout op.
keep_prob: A scalar `Tensor` with the same type as x. The probability
that each element is kept.
noise_shape: A 1-D `Tensor` of type `int32`, representing the
shape for randomly generated keep/drop flags.
is_training: A bool `Tensor` indicating whether or not the model
is in training mode. If so, dropout is applied and values scaled.
Otherwise, inputs is returned.
outputs_collections: Collection to add the outputs.
scope: Optional scope for name_scope.
seed: A Python integer. Used to create random seeds. See
`tf.set_random_seed` for behavior.
Returns:
A tensor representing the output of the operation.
"""
with variable_scope.variable_scope(
scope, 'Dropout', [inputs], custom_getter=_model_variable_getter) as sc:
inputs = ops.convert_to_tensor(inputs)
layer = core_layers.Dropout(
rate=1 - keep_prob,
noise_shape=noise_shape,
seed=seed,
name=sc.name,
_scope=sc)
outputs = layer.apply(inputs, training=is_training)
return utils.collect_named_outputs(outputs_collections, sc.name, outputs)
@add_arg_scope
def flatten(inputs, outputs_collections=None, scope=None):
"""Flattens the input while maintaining the batch_size.
Assumes that the first dimension represents the batch.
Args:
inputs: A tensor of size [batch_size, ...].
outputs_collections: Collection to add the outputs.
scope: Optional scope for name_scope.
Returns:
A flattened tensor with shape [batch_size, k].
Raises:
ValueError: If inputs rank is unknown or less than 2.
"""
with ops.name_scope(scope, 'Flatten', [inputs]) as sc:
inputs = ops.convert_to_tensor(inputs)
outputs = core_layers.flatten(inputs)
return utils.collect_named_outputs(outputs_collections, sc, outputs)
def _sparse_inner_flatten(inputs, new_rank):
"""Helper function for `inner_flatten`."""
inputs_rank = inputs.dense_shape.get_shape().as_list()[0]
if inputs_rank < new_rank:
raise ValueError(
'Inputs has rank less than new_rank. {} must have rank at least'
' {}. Received rank {}, shape {}'.format(inputs, new_rank, inputs_rank,
inputs.get_shape()))
outer_dimensions = inputs.dense_shape[:new_rank - 1]
inner_dimensions = inputs.dense_shape[new_rank - 1:]
new_shape = array_ops.concat(
(outer_dimensions, [math_ops.reduce_prod(inner_dimensions)]), 0)
flattened = sparse_ops.sparse_reshape(inputs, new_shape)
return flattened
def _dense_inner_flatten(inputs, new_rank):
"""Helper function for `inner_flatten`."""
rank_assertion = check_ops.assert_rank_at_least(
inputs, new_rank, message='inputs has rank less than new_rank')
with ops.control_dependencies([rank_assertion]):
outer_dimensions = array_ops.strided_slice(
array_ops.shape(inputs), [0], [new_rank - 1])
new_shape = array_ops.concat((outer_dimensions, [-1]), 0)
reshaped = array_ops.reshape(inputs, new_shape)
# if `new_rank` is an integer, try to calculate new shape.
if isinstance(new_rank, six.integer_types):
static_shape = inputs.get_shape()
if static_shape is not None and static_shape.dims is not None:
static_shape = static_shape.as_list()
static_outer_dims = static_shape[:new_rank - 1]
static_inner_dims = static_shape[new_rank - 1:]
flattened_dimension = 1
for inner_dim in static_inner_dims:
if inner_dim is None:
flattened_dimension = None
break
flattened_dimension *= inner_dim
reshaped.set_shape(static_outer_dims + [flattened_dimension])
return reshaped
@add_arg_scope
def _inner_flatten(inputs, new_rank, output_collections=None, scope=None):
"""Flattens inner dimensions of `inputs`, returns a Tensor with `new_rank`.
For example:
'''
x = tf.random_uniform(shape=[1, 2, 3, 4, 5, 6])
y = _inner_flatten(x, 4)
assert y.get_shape().as_list() == [1, 2, 3, (4 * 5 * 6)]
'''
This layer will fail at run time if `new_rank` is greater than the current
rank of `inputs`.
Args:
inputs: A `Tensor` or `SparseTensor`.
new_rank: The desired rank of the returned `Tensor` or `SparseTensor`.
output_collections: Collection to which the outputs will be added.
scope: Optional scope for `name_scope`.
Returns:
A `Tensor` or `SparseTensor` containing the same values as `inputs`, but
with innermost dimensions flattened to obtain rank `new_rank`.
Raises:
TypeError: `inputs` is not a `Tensor` or `SparseTensor`.
"""
with ops.name_scope(scope, 'InnerFlatten', [inputs, new_rank]) as sc:
if isinstance(inputs, sparse_tensor.SparseTensor):
flattened = _sparse_inner_flatten(inputs, new_rank)
else:
inputs = ops.convert_to_tensor(inputs)
flattened = _dense_inner_flatten(inputs, new_rank)
return utils.collect_named_outputs(output_collections, sc, flattened)
def _model_variable_getter(
getter,
name,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
trainable=True,
collections=None,
caching_device=None,
partitioner=None,
rename=None,
use_resource=None,
synchronization=tf_variables.VariableSynchronization.AUTO,
aggregation=tf_variables.VariableAggregation.NONE,
**_):
"""Getter that uses model_variable for compatibility with core layers."""
short_name = name.split('/')[-1]
if rename and short_name in rename:
name_components = name.split('/')
name_components[-1] = rename[short_name]
name = '/'.join(name_components)
return variables.model_variable(
name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
collections=collections,
trainable=trainable,
caching_device=caching_device,
partitioner=partitioner,
custom_getter=getter,
use_resource=use_resource,
synchronization=synchronization,
aggregation=aggregation)
def _build_variable_getter(rename=None):
"""Build a model variable getter that respects scope getter and renames."""
# VariableScope will nest the getters
def layer_variable_getter(getter, *args, **kwargs):
kwargs['rename'] = rename
return _model_variable_getter(getter, *args, **kwargs)
return layer_variable_getter
def _add_variable_to_collections(variable, collections_set, collections_name):
"""Adds variable (or all its parts) to all collections with that name."""
collections = utils.get_variable_collections(collections_set,
collections_name) or []
variables_list = [variable]
if isinstance(variable, tf_variables.PartitionedVariable):
variables_list = [v for v in variable]
for collection in collections:
for var in variables_list:
if var not in ops.get_collection(collection):
ops.add_to_collection(collection, var)
@add_arg_scope
def fully_connected(inputs,
num_outputs,
activation_fn=nn.relu,
normalizer_fn=None,
normalizer_params=None,
weights_initializer=initializers.xavier_initializer(),
weights_regularizer=None,
biases_initializer=init_ops.zeros_initializer(),
biases_regularizer=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
scope=None):
"""Adds a fully connected layer.
`fully_connected` creates a variable called `weights`, representing a fully
connected weight matrix, which is multiplied by the `inputs` to produce a
`Tensor` of hidden units. If a `normalizer_fn` is provided (such as
`batch_norm`), it is then applied. Otherwise, if `normalizer_fn` is
None and a `biases_initializer` is provided then a `biases` variable would be
created and added the hidden units. Finally, if `activation_fn` is not `None`,
it is applied to the hidden units as well.
Note: that if `inputs` have a rank greater than 2, then `inputs` is flattened
prior to the initial matrix multiply by `weights`.
Args:
inputs: A tensor of at least rank 2 and static value for the last dimension;
i.e. `[batch_size, depth]`, `[None, None, None, channels]`.
num_outputs: Integer or long, the number of output units in the layer.
activation_fn: Activation function. The default value is a ReLU function.
Explicitly set it to None to skip it and maintain a linear activation.
normalizer_fn: Normalization function to use instead of `biases`. If
`normalizer_fn` is provided then `biases_initializer` and
`biases_regularizer` are ignored and `biases` are not created nor added.
default set to None for no normalizer function
normalizer_params: Normalization function parameters.
weights_initializer: An initializer for the weights.
weights_regularizer: Optional regularizer for the weights.
biases_initializer: An initializer for the biases. If None skip biases.
biases_regularizer: Optional regularizer for the biases.
reuse: Whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
variables_collections: Optional list of collections for all the variables or
a dictionary containing a different list of collections per variable.
outputs_collections: Collection to add the outputs.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
scope: Optional scope for variable_scope.
Returns:
The tensor variable representing the result of the series of operations.
Raises:
ValueError: If x has rank less than 2 or if its last dimension is not set.
"""
if not isinstance(num_outputs, six.integer_types):
raise ValueError('num_outputs should be int or long, got %s.' %
(num_outputs,))
layer_variable_getter = _build_variable_getter({
'bias': 'biases',
'kernel': 'weights'
})
with variable_scope.variable_scope(
scope,
'fully_connected', [inputs],
reuse=reuse,
custom_getter=layer_variable_getter) as sc:
inputs = ops.convert_to_tensor(inputs)
layer = core_layers.Dense(
units=num_outputs,
activation=None,
use_bias=not normalizer_fn and biases_initializer,
kernel_initializer=weights_initializer,
bias_initializer=biases_initializer,
kernel_regularizer=weights_regularizer,
bias_regularizer=biases_regularizer,
activity_regularizer=None,
trainable=trainable,
name=sc.name,
dtype=inputs.dtype.base_dtype,
_scope=sc,
_reuse=reuse)
outputs = layer.apply(inputs)
# Add variables to collections.
_add_variable_to_collections(layer.kernel, variables_collections, 'weights')
if layer.bias is not None:
_add_variable_to_collections(layer.bias, variables_collections, 'biases')
# Apply normalizer function / layer.
if normalizer_fn is not None:
if not normalizer_params:
normalizer_params = {}
outputs = normalizer_fn(outputs, **normalizer_params)
if activation_fn is not None:
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections, sc.name, outputs)
class GDN(base.Layer):
"""Generalized divisive normalization layer.
Based on the papers:
"Density Modeling of Images using a Generalized Normalization
Transformation"
Johannes Ballé, Valero Laparra, Eero P. Simoncelli
https://arxiv.org/abs/1511.06281
"End-to-end Optimized Image Compression"
Johannes Ballé, Valero Laparra, Eero P. Simoncelli
https://arxiv.org/abs/1611.01704
Implements an activation function that is essentially a multivariate
generalization of a particular sigmoid-type function:
```
y[i] = x[i] / sqrt(beta[i] + sum_j(gamma[j, i] * x[j]))
```
where `i` and `j` run over channels. This implementation never sums across
spatial dimensions. It is similar to local response normalization, but much
more flexible, as `beta` and `gamma` are trainable parameters.
Arguments:
inverse: If `False` (default), compute GDN response. If `True`, compute IGDN
response (one step of fixed point iteration to invert GDN; the division
is replaced by multiplication).
beta_min: Lower bound for beta, to prevent numerical error from causing
square root of zero or negative values.
gamma_init: The gamma matrix will be initialized as the identity matrix
multiplied with this value. If set to zero, the layer is effectively
initialized to the identity operation, since beta is initialized as one.
A good default setting is somewhere between 0 and 0.5.
reparam_offset: Offset added to the reparameterization of beta and gamma.
The reparameterization of beta and gamma as their square roots lets the
training slow down when their values are close to zero, which is desirable
as small values in the denominator can lead to a situation where gradient
noise on beta/gamma leads to extreme amounts of noise in the GDN
activations. However, without the offset, we would get zero gradients if
any elements of beta or gamma were exactly zero, and thus the training
could get stuck. To prevent this, we add this small constant. The default
value was empirically determined as a good starting point. Making it
bigger potentially leads to more gradient noise on the activations, making
it too small may lead to numerical precision issues.
data_format: Format of input tensor. Currently supports `'channels_first'`
and `'channels_last'`.
activity_regularizer: Regularizer function for the output.
trainable: Boolean, if `True`, also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: String, the name of the layer. Layers with the same name will
share weights, but to avoid mistakes we require `reuse=True` in such
cases.
Properties:
inverse: Boolean, whether GDN is computed (`True`) or IGDN (`False`).
data_format: Format of input tensor. Currently supports `'channels_first'`
and `'channels_last'`.
beta: The beta parameter as defined above (1D `Tensor`).
gamma: The gamma parameter as defined above (2D `Tensor`).
"""
def __init__(self,
inverse=False,
beta_min=1e-6,
gamma_init=.1,
reparam_offset=2**-18,
data_format='channels_last',
activity_regularizer=None,
trainable=True,
name=None,
**kwargs):
super(GDN, self).__init__(
trainable=trainable,
name=name,
activity_regularizer=activity_regularizer,
**kwargs)
self.inverse = inverse
self._beta_min = beta_min
self._gamma_init = gamma_init
self._reparam_offset = reparam_offset
self.data_format = data_format
self._channel_axis() # trigger ValueError early
self.input_spec = base.InputSpec(min_ndim=3, max_ndim=5)
def _channel_axis(self):
try:
return {'channels_first': 1, 'channels_last': -1}[self.data_format]
except KeyError:
raise ValueError('Unsupported `data_format` for GDN layer: {}.'.format(
self.data_format))
@staticmethod
def _lower_bound(inputs, bound, name=None):
"""Same as tf.maximum, but with helpful gradient for inputs < bound.
The gradient is overwritten so that it is passed through if the input is not
hitting the bound. If it is, only gradients that push `inputs` higher than
the bound are passed through. No gradients are passed through to the bound.
Args:
inputs: input tensor
bound: lower bound for the input tensor
name: name for this op
Returns:
tf.maximum(inputs, bound)
"""
with ops.name_scope(name, 'GDNLowerBound', [inputs, bound]) as scope:
inputs = ops.convert_to_tensor(inputs, name='inputs')
bound = ops.convert_to_tensor(bound, name='bound')
with ops.get_default_graph().gradient_override_map({
'Maximum': 'GDNLowerBound'
}):
return math_ops.maximum(inputs, bound, name=scope)
@staticmethod
def _lower_bound_grad(op, grad):
"""Gradient for `_lower_bound`.
Args:
op: the tensorflow op for which to calculate a gradient
grad: gradient with respect to the output of the op
Returns:
gradients with respect to the inputs of the op
"""
inputs = op.inputs[0]
bound = op.inputs[1]
pass_through_if = math_ops.logical_or(inputs >= bound, grad < 0)
return [math_ops.cast(pass_through_if, grad.dtype) * grad, None]
def build(self, input_shape):
channel_axis = self._channel_axis()
input_shape = tensor_shape.TensorShape(input_shape)
num_channels = input_shape[channel_axis].value
if num_channels is None:
raise ValueError('The channel dimension of the inputs to `GDN` '
'must be defined.')
self._input_rank = input_shape.ndims
self.input_spec = base.InputSpec(
ndim=input_shape.ndims, axes={
channel_axis: num_channels
})
pedestal = array_ops.constant(self._reparam_offset**2, dtype=self.dtype)
beta_bound = array_ops.constant(
(self._beta_min + self._reparam_offset**2)**.5, dtype=self.dtype)
gamma_bound = array_ops.constant(self._reparam_offset, dtype=self.dtype)
def beta_initializer(shape, dtype=None, partition_info=None):
del partition_info # unused
pedestal = array_ops.constant(self._reparam_offset**2, dtype=self.dtype)
return math_ops.sqrt(array_ops.ones(shape, dtype=dtype) + pedestal)
def gamma_initializer(shape, dtype=None, partition_info=None):
del partition_info # unused
assert len(shape) == 2
assert shape[0] == shape[1]
eye = linalg_ops.eye(shape[0], dtype=dtype)
pedestal = array_ops.constant(self._reparam_offset**2, dtype=self.dtype)
return math_ops.sqrt(self._gamma_init * eye + pedestal)
beta = self.add_variable(
'reparam_beta',
shape=[num_channels],
initializer=beta_initializer,
dtype=self.dtype,
trainable=True)
beta = self._lower_bound(beta, beta_bound)
self.beta = math_ops.square(beta) - pedestal
gamma = self.add_variable(
'reparam_gamma',
shape=[num_channels, num_channels],
initializer=gamma_initializer,
dtype=self.dtype,
trainable=True)
gamma = self._lower_bound(gamma, gamma_bound)
self.gamma = math_ops.square(gamma) - pedestal
self.built = True
def call(self, inputs):
inputs = ops.convert_to_tensor(inputs, dtype=self.dtype)
ndim = self._input_rank
shape = self.gamma.get_shape().as_list()
gamma = array_ops.reshape(self.gamma, (ndim - 2) * [1] + shape)
# Compute normalization pool.
if self.data_format == 'channels_first':
norm_pool = nn.convolution(
math_ops.square(inputs),
gamma,
'VALID',
data_format='NC' + 'DHW' [-(ndim - 2):])
if ndim == 3:
norm_pool = array_ops.expand_dims(norm_pool, 2)
norm_pool = nn.bias_add(norm_pool, self.beta, data_format='NCHW')
norm_pool = array_ops.squeeze(norm_pool, [2])
elif ndim == 5:
shape = array_ops.shape(norm_pool)
norm_pool = array_ops.reshape(norm_pool, shape[:3] + [-1])
norm_pool = nn.bias_add(norm_pool, self.beta, data_format='NCHW')
norm_pool = array_ops.reshape(norm_pool, shape)
else: # ndim == 4
norm_pool = nn.bias_add(norm_pool, self.beta, data_format='NCHW')
else: # channels_last
norm_pool = nn.convolution(math_ops.square(inputs), gamma, 'VALID')
norm_pool = nn.bias_add(norm_pool, self.beta, data_format='NHWC')
norm_pool = math_ops.sqrt(norm_pool)
if self.inverse:
outputs = inputs * norm_pool
else:
outputs = inputs / norm_pool
outputs.set_shape(inputs.get_shape())
return outputs
def compute_output_shape(self, input_shape):
channel_axis = self._channel_axis()
input_shape = tensor_shape.TensorShape(input_shape)
if not 3 <= input_shape.ndim <= 5:
raise ValueError('`input_shape` must be of rank 3 to 5, inclusive.')
if input_shape[channel_axis].value is None:
raise ValueError(
'The channel dimension of `input_shape` must be defined.')
return input_shape
ops.RegisterGradient('GDNLowerBound')(GDN._lower_bound_grad) # pylint:disable=protected-access
def gdn(inputs,
inverse=False,
beta_min=1e-6,
gamma_init=.1,
reparam_offset=2**-18,
data_format='channels_last',
activity_regularizer=None,
trainable=True,
name=None,
reuse=None):
"""Functional interface for GDN layer.
Based on the papers:
"Density Modeling of Images using a Generalized Normalization
Transformation"
Johannes Ballé, Valero Laparra, Eero P. Simoncelli
https://arxiv.org/abs/1511.06281
"End-to-end Optimized Image Compression"
Johannes Ballé, Valero Laparra, Eero P. Simoncelli
https://arxiv.org/abs/1611.01704
Implements an activation function that is essentially a multivariate
generalization of a particular sigmoid-type function:
```
y[i] = x[i] / sqrt(beta[i] + sum_j(gamma[j, i] * x[j]))
```
where `i` and `j` run over channels. This implementation never sums across
spatial dimensions. It is similar to local response normalization, but much
more flexible, as `beta` and `gamma` are trainable parameters.
Args:
inputs: Tensor input.
inverse: If `False` (default), compute GDN response. If `True`, compute IGDN
response (one step of fixed point iteration to invert GDN; the division
is replaced by multiplication).
beta_min: Lower bound for beta, to prevent numerical error from causing
square root of zero or negative values.
gamma_init: The gamma matrix will be initialized as the identity matrix
multiplied with this value. If set to zero, the layer is effectively
initialized to the identity operation, since beta is initialized as one.
A good default setting is somewhere between 0 and 0.5.
reparam_offset: Offset added to the reparameterization of beta and gamma.
The reparameterization of beta and gamma as their square roots lets the
training slow down when their values are close to zero, which is desirable
as small values in the denominator can lead to a situation where gradient
noise on beta/gamma leads to extreme amounts of noise in the GDN
activations. However, without the offset, we would get zero gradients if
any elements of beta or gamma were exactly zero, and thus the training
could get stuck. To prevent this, we add this small constant. The default
value was empirically determined as a good starting point. Making it
bigger potentially leads to more gradient noise on the activations, making
it too small may lead to numerical precision issues.
data_format: Format of input tensor. Currently supports `'channels_first'`
and `'channels_last'`.
activity_regularizer: Regularizer function for the output.
trainable: Boolean, if `True`, also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: String, the name of the layer. Layers with the same name will
share weights, but to avoid mistakes we require `reuse=True` in such
cases.
reuse: Boolean, whether to reuse the weights of a previous layer by the same
name.
Returns:
Output tensor.
"""
layer = GDN(
inverse=inverse,
beta_min=beta_min,
gamma_init=gamma_init,
reparam_offset=reparam_offset,
data_format=data_format,
activity_regularizer=activity_regularizer,
trainable=trainable,
name=name,
dtype=inputs.dtype.base_dtype,
_scope=name,
_reuse=reuse)
return layer.apply(inputs)
@add_arg_scope
def layer_norm(inputs,
center=True,
scale=True,
activation_fn=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
begin_norm_axis=1,
begin_params_axis=-1,
scope=None):
"""Adds a Layer Normalization layer.
Based on the paper:
"Layer Normalization"
Jimmy Lei Ba, Jamie Ryan Kiros, Geoffrey E. Hinton
https://arxiv.org/abs/1607.06450.
Can be used as a normalizer function for conv2d and fully_connected.
Given a tensor `inputs` of rank `R`, moments are calculated and normalization
is performed over axes `begin_norm_axis ... R - 1`. Scaling and centering,
if requested, is performed over axes `begin_params_axis .. R - 1`.
By default, `begin_norm_axis = 1` and `begin_params_axis = -1`,
meaning that normalization is performed over all but the first axis
(the `HWC` if `inputs` is `NHWC`), while the `beta` and `gamma` trainable
parameters are calculated for the rightmost axis (the `C` if `inputs` is
`NHWC`). Scaling and recentering is performed via broadcast of the
`beta` and `gamma` parameters with the normalized tensor.
The shapes of `beta` and `gamma` are `inputs.shape[begin_params_axis:]`,
and this part of the inputs' shape must be fully defined.
Args:
inputs: A tensor having rank `R`. The normalization is performed over
axes `begin_norm_axis ... R - 1` and centering and scaling parameters
are calculated over `begin_params_axis ... R - 1`.
center: If True, add offset of `beta` to normalized tensor. If False, `beta`
is ignored.
scale: If True, multiply by `gamma`. If False, `gamma` is
not used. When the next layer is linear (also e.g. `nn.relu`), this can be
disabled since the scaling can be done by the next layer.
activation_fn: Activation function, default set to None to skip it and
maintain a linear activation.
reuse: Whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
variables_collections: Optional collections for the variables.
outputs_collections: Collections to add the outputs.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
begin_norm_axis: The first normalization dimension: normalization will be
performed along dimensions `begin_norm_axis : rank(inputs)`
begin_params_axis: The first parameter (beta, gamma) dimension: scale
and centering parameters will have dimensions
`begin_params_axis : rank(inputs)` and will be broadcast with the
normalized inputs accordingly.
scope: Optional scope for `variable_scope`.
Returns:
A `Tensor` representing the output of the operation, having the same
shape and dtype as `inputs`.
Raises:
ValueError: If the rank of `inputs` is not known at graph build time,
or if `inputs.shape[begin_params_axis:]` is not fully defined at
graph build time.
"""
with variable_scope.variable_scope(
scope, 'LayerNorm', [inputs], reuse=reuse) as sc:
inputs = ops.convert_to_tensor(inputs)
inputs_shape = inputs.shape
inputs_rank = inputs_shape.ndims
if inputs_rank is None:
raise ValueError('Inputs %s has undefined rank.' % inputs.name)
dtype = inputs.dtype.base_dtype
if begin_norm_axis < 0:
begin_norm_axis = inputs_rank + begin_norm_axis
if begin_params_axis >= inputs_rank or begin_norm_axis >= inputs_rank:
raise ValueError('begin_params_axis (%d) and begin_norm_axis (%d) '
'must be < rank(inputs) (%d)' %
(begin_params_axis, begin_norm_axis, inputs_rank))
params_shape = inputs_shape[begin_params_axis:]
if not params_shape.is_fully_defined():
raise ValueError(
'Inputs %s: shape(inputs)[%s:] is not fully defined: %s' %
(inputs.name, begin_params_axis, inputs_shape))
# Allocate parameters for the beta and gamma of the normalization.
beta, gamma = None, None
if center:
beta_collections = utils.get_variable_collections(variables_collections,
'beta')
beta = variables.model_variable(
'beta',
shape=params_shape,
dtype=dtype,
initializer=init_ops.zeros_initializer(),
collections=beta_collections,
trainable=trainable)
if scale:
gamma_collections = utils.get_variable_collections(
variables_collections, 'gamma')
gamma = variables.model_variable(
'gamma',
shape=params_shape,
dtype=dtype,
initializer=init_ops.ones_initializer(),
collections=gamma_collections,
trainable=trainable)
# Calculate the moments on the last axis (layer activations).
norm_axes = list(range(begin_norm_axis, inputs_rank))
mean, variance = nn.moments(inputs, norm_axes, keep_dims=True)
# Compute layer normalization using the batch_normalization function.
variance_epsilon = 1e-12
outputs = nn.batch_normalization(
inputs,
mean,
variance,
offset=beta,
scale=gamma,
variance_epsilon=variance_epsilon)
outputs.set_shape(inputs_shape)
if activation_fn is not None:
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections, sc.name, outputs)
@add_arg_scope
def images_to_sequence(inputs,
data_format=DATA_FORMAT_NHWC,
outputs_collections=None,
scope=None):
"""Convert a batch of images into a batch of sequences.
Args:
inputs: a (num_images, height, width, depth) tensor
data_format: A string. `NHWC` (default) and `NCHW` are supported.
outputs_collections: The collections to which the outputs are added.
scope: Optional scope for name_scope.
Raises:
ValueError: If `data_format` is not either NCHW or NHWC.
Returns:
(width, num_images*height, depth) sequence tensor
"""
if data_format not in (DATA_FORMAT_NCHW, DATA_FORMAT_NHWC):
raise ValueError('data_format has to be either NCHW or NHWC.')
with ops.name_scope(scope, 'ImagesToSequence', [inputs]) as sc:
inputs = ops.convert_to_tensor(inputs)
df = ('channels_first'
if data_format and data_format.startswith('NC') else 'channels_last')
if df == 'channels_first':
inputs = array_ops.transpose(inputs, [0, 2, 3, 1])
_, _, width, depth = inputs.get_shape().as_list()
s = array_ops.shape(inputs)
batch_size, height = s[0], s[1]
transposed = array_ops.transpose(inputs, [2, 0, 1, 3])
outputs = array_ops.reshape(transposed, [width, batch_size * height, depth])
return utils.collect_named_outputs(outputs_collections, sc, outputs)
@add_arg_scope
def max_pool2d(inputs,
kernel_size,
stride=2,
padding='VALID',
data_format=DATA_FORMAT_NHWC,
outputs_collections=None,
scope=None):
"""Adds a 2D Max Pooling op.
It is assumed that the pooling is done per image but not in batch or channels.
Args:
inputs: A 4-D tensor of shape `[batch_size, height, width, channels]` if
`data_format` is `NHWC`, and `[batch_size, channels, height, width]` if
`data_format` is `NCHW`.
kernel_size: A list of length 2: [kernel_height, kernel_width] of the
pooling kernel over which the op is computed. Can be an int if both
values are the same.
stride: A list of length 2: [stride_height, stride_width].
Can be an int if both strides are the same. Note that presently
both strides must have the same value.
padding: The padding method, either 'VALID' or 'SAME'.
data_format: A string. `NHWC` (default) and `NCHW` are supported.
outputs_collections: The collections to which the outputs are added.
scope: Optional scope for name_scope.
Returns:
A `Tensor` representing the results of the pooling operation.
Raises:
ValueError: If `data_format` is neither `NHWC` nor `NCHW`.
ValueError: If 'kernel_size' is not a 2-D list
"""
if data_format not in (DATA_FORMAT_NCHW, DATA_FORMAT_NHWC):
raise ValueError('data_format has to be either NCHW or NHWC.')
with ops.name_scope(scope, 'MaxPool2D', [inputs]) as sc:
inputs = ops.convert_to_tensor(inputs)
df = ('channels_first'
if data_format and data_format.startswith('NC') else 'channels_last')
layer = pooling_layers.MaxPooling2D(
pool_size=kernel_size,
strides=stride,
padding=padding,
data_format=df,
_scope=sc)
outputs = layer.apply(inputs)
return utils.collect_named_outputs(outputs_collections, sc, outputs)
@add_arg_scope
def max_pool3d(inputs,
kernel_size,
stride=2,
padding='VALID',
data_format=DATA_FORMAT_NDHWC,
outputs_collections=None,
scope=None):
"""Adds a 3D Max Pooling op.
It is assumed that the pooling is done per image but not in batch or channels.
Args:
inputs: A 5-D tensor of shape `[batch_size, depth, height, width, channels]`
if `data_format` is `NDHWC`, and `[batch_size, channels, depth, height,
width]` if `data_format` is `NCDHW`.
kernel_size: A list of length 3: [kernel_depth, kernel_height, kernel_width]
of the pooling kernel over which the op is computed. Can be an int if both
values are the same.
stride: A list of length 3: [stride_depth, stride_height, stride_width].
Can be an int if both strides are the same. Note that presently
both strides must have the same value.
padding: The padding method, either 'VALID' or 'SAME'.
data_format: A string. `NDHWC` (default) and `NCDHW` are supported.
outputs_collections: The collections to which the outputs are added.
scope: Optional scope for name_scope.
Returns:
A `Tensor` representing the results of the pooling operation.
Raises:
ValueError: If `data_format` is neither `NDHWC` nor `NCDHW`.
ValueError: If 'kernel_size' is not a 3-D list
"""
if data_format not in (DATA_FORMAT_NCDHW, DATA_FORMAT_NDHWC):
raise ValueError('data_format has to be either NCDHW or NDHWC.')
with ops.name_scope(scope, 'MaxPool3D', [inputs]) as sc:
inputs = ops.convert_to_tensor(inputs)
df = ('channels_first'
if data_format and data_format.startswith('NC') else 'channels_last')
layer = pooling_layers.MaxPooling3D(
pool_size=kernel_size,
strides=stride,
padding=padding,
data_format=df,
_scope=sc)
outputs = layer.apply(inputs)
return utils.collect_named_outputs(outputs_collections, sc, outputs)
@add_arg_scope
def pool(inputs,
kernel_size,
pooling_type,
padding='VALID',
data_format=None,
dilation_rate=1,
stride=1,
outputs_collections=None,
scope=None):
# pylint: disable=line-too-long
"""Adds a pooling op.
Args:
inputs: Tensor of rank N+2, of shape
`[batch_size] + input_spatial_shape + [num_channels]` if data_format does
not start with "NC" (default), or
`[batch_size, num_channels] + input_spatial_shape` if data_format starts
with "NC". Pooling happens over the spatial dimensions only.
kernel_size: Sequence of N ints >= 1. Can also be a single integer to
specify the same value for all spatial dimensions.
pooling_type: Specifies pooling operation, must be "AVG" or "MAX".
padding: The padding algorithm, must be "SAME" or "VALID".
data_format: A string or None. Specifies whether the channel dimension of
the `input` and output is the last dimension (default, or if `data_format`
does not start with "NC"), or the second dimension (if `data_format`
starts with "NC"). For N=1, the valid values are "NWC" (default) and
"NCW". For N=2, the valid values are "NHWC" (default) and "NCHW".
For N=3, the valid values are "NDHWC" (default) and "NCDHW".
dilation_rate: Optional. Dilation rate. Sequence of N ints >= 1. Defaults
to [1]*N. Can also be a single integer to specify the same value for all
spatial dimensions. If any value of dilation_rate is > 1, then all values
of stride must be 1.
stride: Optional. Sequence of N ints >= 1. Defaults to [1]*N. Can also be
a single integer to specify the same value for all spatial dimensions. If
any value of stride is > 1, then all values of dilation_rate must be 1.
outputs_collections: The collections to which the outputs are added.
scope: Optional scope for name_scope.
Returns:
A `Tensor` representing the results of the pooling operation.
Raises:
ValueError: If arguments are invalid.
"""
# pylint: enable=line-too-long
with ops.name_scope(scope, '%s_pool' % (pooling_type.lower()),
[inputs]) as sc:
inputs = ops.convert_to_tensor(inputs)
input_rank = inputs.get_shape().ndims
if input_rank is None:
raise ValueError('Rank of inputs must be known')
if input_rank < 3:
raise ValueError('Rank of inputs must be >= 3')
num_spatial_dims = input_rank - 2
output = nn.pool(
input=inputs,
window_shape=utils.n_positive_integers(num_spatial_dims, kernel_size),
pooling_type=pooling_type,
padding=padding,
data_format=data_format,
dilation_rate=utils.n_positive_integers(num_spatial_dims,
dilation_rate),
strides=utils.n_positive_integers(num_spatial_dims, stride),
name=sc)
return utils.collect_named_outputs(outputs_collections, sc, output)
@add_arg_scope
def one_hot_encoding(labels,
num_classes,
on_value=1.0,
off_value=0.0,
outputs_collections=None,
scope=None):
"""Transform numeric labels into onehot_labels using `tf.one_hot`.
Args:
labels: [batch_size] target labels.
num_classes: Total number of classes.
on_value: A scalar defining the on-value.
off_value: A scalar defining the off-value.
outputs_collections: Collection to add the outputs.
scope: Optional scope for name_scope.
Returns:
One-hot encoding of the labels.
"""
with ops.name_scope(scope, 'OneHotEncoding', [labels, num_classes]) as sc:
labels = ops.convert_to_tensor(labels)
if labels.dtype == dtypes.int32:
labels = standard_ops.to_int64(labels)
outputs = standard_ops.one_hot(
labels, num_classes, on_value=on_value, off_value=off_value)
return utils.collect_named_outputs(outputs_collections, sc, outputs)
def _apply_activation(y, activation_fn, output_collections):
if activation_fn is not None:
y = activation_fn(y)
ops.add_to_collections(
list(output_collections or []) + [ops.GraphKeys.ACTIVATIONS], y)
return y
def repeat(inputs, repetitions, layer, *args, **kwargs):
"""Applies the same layer with the same arguments repeatedly.
```python
y = repeat(x, 3, conv2d, 64, [3, 3], scope='conv1')
# It is equivalent to:
x = conv2d(x, 64, [3, 3], scope='conv1/conv1_1')
x = conv2d(x, 64, [3, 3], scope='conv1/conv1_2')
y = conv2d(x, 64, [3, 3], scope='conv1/conv1_3')
```
If the `scope` argument is not given in `kwargs`, it is set to
`layer.__name__`, or `layer.func.__name__` (for `functools.partial`
objects). If neither `__name__` nor `func.__name__` is available, the
layers are called with `scope='stack'`.
Args:
inputs: A `Tensor` suitable for layer.
repetitions: Int, number of repetitions.
layer: A layer with arguments `(inputs, *args, **kwargs)`
*args: Extra args for the layer.
**kwargs: Extra kwargs for the layer.
Returns:
A tensor result of applying the layer, repetitions times.
Raises:
ValueError: If the op is unknown or wrong.
"""
scope = kwargs.pop('scope', None)
with variable_scope.variable_scope(scope, 'Repeat', [inputs]):
inputs = ops.convert_to_tensor(inputs)
if scope is None:
if hasattr(layer, '__name__'):
scope = layer.__name__
elif hasattr(layer, 'func') and hasattr(layer.func, '__name__'):
scope = layer.func.__name__ # In case layer is a functools.partial.
else:
scope = 'repeat'
outputs = inputs
for i in range(repetitions):
kwargs['scope'] = scope + '_' + str(i + 1)
outputs = layer(outputs, *args, **kwargs)
return outputs
def _scale_gradient_shape(op):
"""Shape helper function for scale_gradient function below."""
return [op.inputs[0].shape]
def _scale_gradient_grad(op, grad):
"""Python gradient helper function for scale_gradient function below."""
return [grad * op.inputs[1], None]
@function.Defun(
python_grad_func=_scale_gradient_grad, shape_func=_scale_gradient_shape)
def scale_gradient(inputs, gradient_multiplier):
"""Identity operation, but with the gradient multiplied by a tensor.
The TensorFlow gradient system will compute the gradient with respect to
`inputs` as the product of the gradient with respect to the `output`
multiplied by a specified `gradient_multiplier` tensor. If
`gradient_multiplier` is equal to 1, then this results in the true gradient.
Otherwise, it results in a scaled gradient.
This can be useful for adjusting the relative learning rate of different
parameter tensors when performing gradient descent, and because this rescaling
can be inserted at arbitrary locations within a graph, is often more
convenient to apply than simply rescaling the final computed gradients.
Args:
inputs: Tensor to be output.
gradient_multiplier: Tensor by which to multiply the gradient with respect
to `output` to compute the gradient with respect to `inputs`. Its shape
must be broadcastable to the shape of `inputs`.
Returns:
output Tensor, equal to `inputs`.
"""
# gradient_multiplier is implicitly saved by decorator, and only used for
# gradient computation.
del gradient_multiplier
return inputs
@add_arg_scope
def separable_convolution2d(
inputs,
num_outputs,
kernel_size,
depth_multiplier=1,
stride=1,
padding='SAME',
data_format=DATA_FORMAT_NHWC,
rate=1,
activation_fn=nn.relu,
normalizer_fn=None,
normalizer_params=None,
weights_initializer=initializers.xavier_initializer(),
pointwise_initializer=None,
weights_regularizer=None,
biases_initializer=init_ops.zeros_initializer(),
biases_regularizer=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
scope=None):
"""Adds a depth-separable 2D convolution with optional batch_norm layer.
This op first performs a depthwise convolution that acts separately on
channels, creating a variable called `depthwise_weights`. If `num_outputs`
is not None, it adds a pointwise convolution that mixes channels, creating a
variable called `pointwise_weights`. Then, if `normalizer_fn` is None,
it adds bias to the result, creating a variable called 'biases', otherwise,
the `normalizer_fn` is applied. It finally applies an activation function
to produce the end result.
Args:
inputs: A tensor of size [batch_size, height, width, channels].
num_outputs: The number of pointwise convolution output filters. If is
None, then we skip the pointwise convolution stage.
kernel_size: A list of length 2: [kernel_height, kernel_width] of
of the filters. Can be an int if both values are the same.
depth_multiplier: The number of depthwise convolution output channels for
each input channel. The total number of depthwise convolution output
channels will be equal to `num_filters_in * depth_multiplier`.
stride: A list of length 2: [stride_height, stride_width], specifying the
depthwise convolution stride. Can be an int if both strides are the same.
padding: One of 'VALID' or 'SAME'.
data_format: A string. `NHWC` (default) and `NCHW` are supported.
rate: A list of length 2: [rate_height, rate_width], specifying the dilation
rates for atrous convolution. Can be an int if both rates are the same.
If any value is larger than one, then both stride values need to be one.
activation_fn: Activation function. The default value is a ReLU function.
Explicitly set it to None to skip it and maintain a linear activation.
normalizer_fn: Normalization function to use instead of `biases`. If
`normalizer_fn` is provided then `biases_initializer` and
`biases_regularizer` are ignored and `biases` are not created nor added.
default set to None for no normalizer function
normalizer_params: Normalization function parameters.
weights_initializer: An initializer for the depthwise weights.
pointwise_initializer: An initializer for the pointwise weights.
default set to None, means use weights_initializer.
weights_regularizer: Optional regularizer for the weights.
biases_initializer: An initializer for the biases. If None skip biases.
biases_regularizer: Optional regularizer for the biases.
reuse: Whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
variables_collections: Optional list of collections for all the variables or
a dictionary containing a different list of collection per variable.
outputs_collections: Collection to add the outputs.
trainable: Whether or not the variables should be trainable or not.
scope: Optional scope for variable_scope.
Returns:
A `Tensor` representing the output of the operation.
Raises:
ValueError: If `data_format` is invalid.
"""
if data_format not in (DATA_FORMAT_NCHW, DATA_FORMAT_NHWC):
raise ValueError('data_format has to be either NCHW or NHWC.')
layer_variable_getter = _build_variable_getter({
'bias': 'biases',
'depthwise_kernel': 'depthwise_weights',
'pointwise_kernel': 'pointwise_weights'
})
with variable_scope.variable_scope(
scope,
'SeparableConv2d', [inputs],
reuse=reuse,
custom_getter=layer_variable_getter) as sc:
inputs = ops.convert_to_tensor(inputs)
if pointwise_initializer is None:
pointwise_initializer = weights_initializer
df = ('channels_first'
if data_format and data_format.startswith('NC') else 'channels_last')
if num_outputs is not None:
# Apply separable conv using the SeparableConvolution2D layer.
layer = convolutional_layers.SeparableConvolution2D(
filters=num_outputs,
kernel_size=kernel_size,
strides=stride,
padding=padding,
data_format=df,
dilation_rate=utils.two_element_tuple(rate),
activation=None,
depth_multiplier=depth_multiplier,
use_bias=not normalizer_fn and biases_initializer,
depthwise_initializer=weights_initializer,
pointwise_initializer=pointwise_initializer,
bias_initializer=biases_initializer,
depthwise_regularizer=weights_regularizer,
pointwise_regularizer=weights_regularizer,
bias_regularizer=biases_regularizer,
activity_regularizer=None,
trainable=trainable,
name=sc.name,
dtype=inputs.dtype.base_dtype,
_scope=sc,
_reuse=reuse)
outputs = layer.apply(inputs)
# Add variables to collections.
_add_variable_to_collections(layer.depthwise_kernel,
variables_collections, 'weights')
_add_variable_to_collections(layer.pointwise_kernel,
variables_collections, 'weights')
if layer.bias is not None:
_add_variable_to_collections(layer.bias, variables_collections,
'biases')
if normalizer_fn is not None:
normalizer_params = normalizer_params or {}
outputs = normalizer_fn(outputs, **normalizer_params)
else:
# Actually apply depthwise conv instead of separable conv.
dtype = inputs.dtype.base_dtype
kernel_h, kernel_w = utils.two_element_tuple(kernel_size)
stride_h, stride_w = utils.two_element_tuple(stride)
num_filters_in = utils.channel_dimension(
inputs.get_shape(), df, min_rank=4)
weights_collections = utils.get_variable_collections(
variables_collections, 'weights')
depthwise_shape = [kernel_h, kernel_w, num_filters_in, depth_multiplier]
depthwise_weights = variables.model_variable(
'depthwise_weights',
shape=depthwise_shape,
dtype=dtype,
initializer=weights_initializer,
regularizer=weights_regularizer,
trainable=trainable,
collections=weights_collections)
strides = [1, 1, stride_h,
stride_w] if data_format.startswith('NC') else [
1, stride_h, stride_w, 1
]
outputs = nn.depthwise_conv2d(
inputs,
depthwise_weights,
strides,
padding,
rate=utils.two_element_tuple(rate),
data_format=data_format)
num_outputs = depth_multiplier * num_filters_in
if normalizer_fn is not None:
normalizer_params = normalizer_params or {}
outputs = normalizer_fn(outputs, **normalizer_params)
else:
if biases_initializer is not None:
biases_collections = utils.get_variable_collections(
variables_collections, 'biases')
biases = variables.model_variable(
'biases',
shape=[
num_outputs,
],
dtype=dtype,
initializer=biases_initializer,
regularizer=biases_regularizer,
trainable=trainable,
collections=biases_collections)
outputs = nn.bias_add(outputs, biases, data_format=data_format)
if activation_fn is not None:
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections, sc.name, outputs)
@add_arg_scope
def sequence_to_images(inputs,
height,
output_data_format='channels_last',
outputs_collections=None,
scope=None):
"""Convert a batch of sequences into a batch of images.
Args:
inputs: (num_steps, num_batches, depth) sequence tensor
height: the height of the images
output_data_format: Format of output tensor.
Currently supports `'channels_first'` and `'channels_last'`.
outputs_collections: The collections to which the outputs are added.
scope: Optional scope for name_scope.
Returns:
A tensor representing the output of the operation.
"""
with ops.name_scope(scope, 'SequenceToImages', [inputs]) as sc:
inputs = ops.convert_to_tensor(inputs)
width, num_batches, depth = inputs.get_shape().as_list()
if num_batches is None:
num_batches = -1
else:
num_batches //= height
reshaped = array_ops.reshape(inputs,
[width, num_batches, height, depth])
if output_data_format == 'channels_first':
outputs = array_ops.transpose(reshaped, [1, 3, 2, 0])
else:
outputs = array_ops.transpose(reshaped, [1, 2, 0, 3])
return utils.collect_named_outputs(outputs_collections, sc, outputs)
@add_arg_scope
def softmax(logits, scope=None):
"""Performs softmax on Nth dimension of N-dimensional logit tensor.
For two-dimensional logits this reduces to tf.nn.softmax. The N-th dimension
needs to have a specified number of elements (number of classes).
Args:
logits: N-dimensional `Tensor` with logits, where N > 1.
scope: Optional scope for variable_scope.
Returns:
A `Tensor` with same shape and type as logits.
"""
# TODO(jrru): Add axis argument which defaults to last dimension.
with variable_scope.variable_scope(scope, 'softmax', [logits]):
num_logits = utils.last_dimension(logits.get_shape(), min_rank=2)
logits_2d = array_ops.reshape(logits, [-1, num_logits])
predictions = nn.softmax(logits_2d)
predictions = array_ops.reshape(predictions, array_ops.shape(logits))
if not context.executing_eagerly():
predictions.set_shape(logits.get_shape())
return predictions
@add_arg_scope
def spatial_softmax(features,
temperature=None,
name=None,
variables_collections=None,
trainable=True,
data_format='NHWC'):
"""Computes the spatial softmax of a convolutional feature map.
First computes the softmax over the spatial extent of each channel of a
convolutional feature map. Then computes the expected 2D position of the
points of maximal activation for each channel, resulting in a set of
feature keypoints [x1, y1, ... xN, yN] for all N channels.
Read more here:
"Learning visual feature spaces for robotic manipulation with
deep spatial autoencoders." Finn et al., http://arxiv.org/abs/1509.06113.
Args:
features: A `Tensor` of size [batch_size, W, H, num_channels]; the
convolutional feature map.
temperature: Softmax temperature (optional). If None, a learnable
temperature is created.
name: A name for this operation (optional).
variables_collections: Collections for the temperature variable.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
data_format: A string. `NHWC` (default) and `NCHW` are supported.
Returns:
feature_keypoints: A `Tensor` with size [batch_size, num_channels * 2];
the expected 2D locations of each channel's feature keypoint (normalized
to the range (-1,1)). The inner dimension is arranged as
[x1, y1, ... xN, yN].
Raises:
ValueError: If unexpected data_format specified.
ValueError: If num_channels dimension is unspecified.
"""
with variable_scope.variable_scope(name, 'spatial_softmax'):
shape = array_ops.shape(features)
static_shape = features.shape
if data_format == DATA_FORMAT_NHWC:
height, width, num_channels = shape[1], shape[2], static_shape[3]
elif data_format == DATA_FORMAT_NCHW:
num_channels, height, width = static_shape[1], shape[2], shape[3]
else:
raise ValueError('data_format has to be either NCHW or NHWC.')
if num_channels.value is None:
raise ValueError('The num_channels dimension of the inputs to '
'`spatial_softmax` should be defined. Found `None`.')
with ops.name_scope('spatial_softmax_op', 'spatial_softmax_op', [features]):
# Create tensors for x and y coordinate values, scaled to range [-1, 1].
pos_x, pos_y = array_ops.meshgrid(
math_ops.lin_space(-1., 1., num=height),
math_ops.lin_space(-1., 1., num=width),
indexing='ij')
pos_x = array_ops.reshape(pos_x, [height * width])
pos_y = array_ops.reshape(pos_y, [height * width])
if temperature is None:
temp_initializer = init_ops.ones_initializer()
else:
temp_initializer = init_ops.constant_initializer(temperature)
if not trainable:
temp_collections = None
else:
temp_collections = utils.get_variable_collections(
variables_collections, 'temperature')
temperature = variables.model_variable(
'temperature',
shape=(),
dtype=dtypes.float32,
initializer=temp_initializer,
collections=temp_collections,
trainable=trainable)
if data_format == 'NCHW':
features = array_ops.reshape(features, [-1, height * width])
else:
features = array_ops.reshape(
array_ops.transpose(features, [0, 3, 1, 2]), [-1, height * width])
softmax_attention = nn.softmax(features / temperature)
expected_x = math_ops.reduce_sum(
pos_x * softmax_attention, [1], keepdims=True)
expected_y = math_ops.reduce_sum(
pos_y * softmax_attention, [1], keepdims=True)
expected_xy = array_ops.concat([expected_x, expected_y], 1)
feature_keypoints = array_ops.reshape(expected_xy,
[-1, num_channels.value * 2])
feature_keypoints.set_shape([None, num_channels.value * 2])
return feature_keypoints
def stack(inputs, layer, stack_args, **kwargs):
"""Builds a stack of layers by applying layer repeatedly using stack_args.
`stack` allows you to repeatedly apply the same operation with different
arguments `stack_args[i]`. For each application of the layer, `stack` creates
a new scope appended with an increasing number. For example:
```python
y = stack(x, fully_connected, [32, 64, 128], scope='fc')
# It is equivalent to:
x = fully_connected(x, 32, scope='fc/fc_1')
x = fully_connected(x, 64, scope='fc/fc_2')
y = fully_connected(x, 128, scope='fc/fc_3')
```
If the `scope` argument is not given in `kwargs`, it is set to
`layer.__name__`, or `layer.func.__name__` (for `functools.partial`
objects). If neither `__name__` nor `func.__name__` is available, the
layers are called with `scope='stack'`.
Args:
inputs: A `Tensor` suitable for layer.
layer: A layer with arguments `(inputs, *args, **kwargs)`
stack_args: A list/tuple of parameters for each call of layer.
**kwargs: Extra kwargs for the layer.
Returns:
A `Tensor` result of applying the stacked layers.
Raises:
ValueError: If the op is unknown or wrong.
"""
scope = kwargs.pop('scope', None)
if not isinstance(stack_args, (list, tuple)):
raise ValueError('stack_args need to be a list or tuple')
with variable_scope.variable_scope(scope, 'Stack', [inputs]):
inputs = ops.convert_to_tensor(inputs)
if scope is None:
if hasattr(layer, '__name__'):
scope = layer.__name__
elif hasattr(layer, 'func') and hasattr(layer.func, '__name__'):
scope = layer.func.__name__ # In case layer is a functools.partial.
else:
scope = 'stack'
outputs = inputs
for i in range(len(stack_args)):
kwargs['scope'] = scope + '_' + str(i + 1)
layer_args = stack_args[i]
if not isinstance(layer_args, (list, tuple)):
layer_args = [layer_args]
outputs = layer(outputs, *layer_args, **kwargs)
return outputs
@add_arg_scope
def unit_norm(inputs, dim, epsilon=1e-7, scope=None):
"""Normalizes the given input across the specified dimension to unit length.
Note that the rank of `input` must be known.
Args:
inputs: A `Tensor` of arbitrary size.
dim: The dimension along which the input is normalized.
epsilon: A small value to add to the inputs to avoid dividing by zero.
scope: Optional scope for variable_scope.
Returns:
The normalized `Tensor`.
Raises:
ValueError: If dim is smaller than the number of dimensions in 'inputs'.
"""
with variable_scope.variable_scope(scope, 'UnitNorm', [inputs]):
if not inputs.get_shape():
raise ValueError('The input rank must be known.')
input_rank = len(inputs.get_shape().as_list())
if dim < 0 or dim >= input_rank:
raise ValueError('dim must be positive but smaller than the input rank.')
lengths = math_ops.sqrt(
epsilon + math_ops.reduce_sum(math_ops.square(inputs), dim, True))
multiples = []
if dim > 0:
multiples.append(array_ops.ones([dim], dtypes.int32))
multiples.append(
array_ops.strided_slice(array_ops.shape(inputs), [dim], [dim + 1]))
if dim < (input_rank - 1):
multiples.append(array_ops.ones([input_rank - 1 - dim], dtypes.int32))
multiples = array_ops.concat(multiples, 0)
return math_ops.div(inputs, array_ops.tile(lengths, multiples))
@add_arg_scope
def maxout(inputs, num_units, axis=-1, scope=None):
"""Adds a maxout op from https://arxiv.org/abs/1302.4389
"Maxout Networks" Ian J. Goodfellow, David Warde-Farley, Mehdi Mirza, Aaron
Courville,
Yoshua Bengio
Usually the operation is performed in the filter/channel dimension. This can
also be
used after fully-connected layers to reduce number of features.
Arguments:
inputs: Tensor input
num_units: Specifies how many features will remain after maxout
in the `axis` dimension (usually channel).
This must be a factor of number of features.
axis: The dimension where max pooling will be performed. Default is the
last dimension.
scope: Optional scope for variable_scope.
Returns:
A `Tensor` representing the results of the pooling operation.
Raises:
ValueError: if num_units is not multiple of number of features.
"""
with variable_scope.variable_scope(scope, 'MaxOut', [inputs]):
inputs = ops.convert_to_tensor(inputs)
shape = inputs.get_shape().as_list()
num_channels = shape[axis]
if num_channels % num_units:
raise ValueError('number of features({}) is not '
'a multiple of num_units({})'.format(
num_channels, num_units))
shape[axis] = num_units
shape += [num_channels // num_units]
# Dealing with batches with arbitrary sizes
for i in range(len(shape)):
if shape[i] is None:
shape[i] = array_ops.shape(inputs)[i]
outputs = math_ops.reduce_max(
array_ops.reshape(inputs, shape), -1, keepdims=False)
return outputs
def poincare_normalize(x, axis=1, epsilon=1e-5, name=None):
"""Project into the Poincare ball with norm <= 1.0 - epsilon.
https://en.wikipedia.org/wiki/Poincare_ball_model
Used in
Poincare Embeddings for Learning Hierarchical Representations
Maximilian Nickel, Douwe Kiela
https://arxiv.org/pdf/1705.08039.pdf
For a 1-D tensor with `axis = 0`, computes
(x * (1 - epsilon)) / ||x|| if ||x|| > 1 - epsilon
output =
x otherwise
For `x` with more dimensions, independently normalizes each 1-D slice along
dimension `axis`.
Args:
x: A `Tensor`.
axis: Axis along which to normalize. A scalar or a vector of
integers.
epsilon: A small deviation from the edge of the unit sphere for numerical
stability.
name: A name for this operation (optional).
Returns:
A `Tensor` with the same shape as `x`.
"""
with ops.name_scope(name, 'poincare_normalize', [x]) as name:
x = ops.convert_to_tensor(x, name='x')
square_sum = math_ops.reduce_sum(math_ops.square(x), axis, keepdims=True)
x_inv_norm = math_ops.rsqrt(square_sum)
x_inv_norm = math_ops.minimum((1. - epsilon) * x_inv_norm, 1.)
return math_ops.multiply(x, x_inv_norm, name=name)
def legacy_fully_connected(x,
num_output_units,
activation_fn=None,
weight_init=initializers.xavier_initializer(),
bias_init=init_ops.zeros_initializer(),
name=None,
weight_collections=(ops.GraphKeys.WEIGHTS,),
bias_collections=(ops.GraphKeys.BIASES,),
output_collections=(ops.GraphKeys.ACTIVATIONS,),
trainable=True,
weight_regularizer=None,
bias_regularizer=None):
# pylint: disable=anomalous-backslash-in-string
r"""Adds the parameters for a fully connected layer and returns the output.
A fully connected layer is generally defined as a matrix multiply:
`y = f(w * x + b)` where `f` is given by `activation_fn`. If
`activation_fn` is `None`, the result of `y = w * x + b` is
returned.
If `x` has shape [\\(\text{dim}_0, \text{dim}_1, ..., \text{dim}_n\\)]
with more than 2 dimensions (\\(n > 1\\)), then we repeat the matrix
multiply along the first dimensions. The result r is a tensor of shape
[\\(\text{dim}_0, ..., \text{dim}_{n-1},\\) `num_output_units`],
where \\( r_{i_0, ..., i_{n-1}, k} =
\sum_{0 \leq j < \text{dim}_n} x_{i_0, ... i_{n-1}, j} \cdot w_{j, k}\\).
This is accomplished by reshaping `x` to 2-D
[\\(\text{dim}_0 \cdot ... \cdot \text{dim}_{n-1}, \text{dim}_n\\)]
before the matrix multiply and afterwards reshaping it to
[\\(\text{dim}_0, ..., \text{dim}_{n-1},\\) `num_output_units`].
This op creates `w` and optionally `b`. Bias (`b`) can be disabled by setting
`bias_init` to `None`.
The variable creation is compatible with `tf.variable_scope` and so can be
reused with `tf.variable_scope` or `tf.make_template`.
Most of the details of variable creation can be controlled by specifying the
initializers (`weight_init` and `bias_init`) and in which collections to place
the created variables (`weight_collections` and `bias_collections`; note that
the variables are always added to the `VARIABLES` collection). The output of
the layer can be placed in custom collections using `output_collections`.
The collections arguments default to `WEIGHTS`, `BIASES` and `ACTIVATIONS`,
respectively.
A per layer regularization can be specified by setting `weight_regularizer`
and `bias_regularizer`, which are applied to the weights and biases
respectively, and whose output is added to the `REGULARIZATION_LOSSES`
collection.
Args:
x: The input `Tensor`.
num_output_units: The size of the output.
activation_fn: Activation function, default set to None to skip it and
maintain a linear activation.
weight_init: An optional weight initialization, defaults to
`xavier_initializer`.
bias_init: An initializer for the bias, defaults to 0. Set to `None` in
order to disable bias.
name: The name for this operation is used to name operations and to find
variables. If specified it must be unique for this scope, otherwise a
unique name starting with "fully_connected" will be created. See
`tf.variable_scope` for details.
weight_collections: List of graph collections to which weights are added.
bias_collections: List of graph collections to which biases are added.
output_collections: List of graph collections to which outputs are added.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
weight_regularizer: A regularizer like the result of
`l1_regularizer` or `l2_regularizer`. Used for weights.
bias_regularizer: A regularizer like the result of
`l1_regularizer` or `l2_regularizer`. Used for biases.
Returns:
The output of the fully connected layer.
Raises:
ValueError: If x has rank less than 2 or if its last dimension is not set.
"""
with variable_scope.variable_scope(name, 'fully_connected', [x]):
x = ops.convert_to_tensor(x)
dims = x.get_shape().dims
if dims is None:
raise ValueError('dims of x must be known but is None')
if len(dims) < 2:
raise ValueError('rank of x must be at least 2 not: %d' % len(dims))
num_input_units = dims[-1].value
if num_input_units is None:
raise ValueError('last dimension of x must be known but is None')
dtype = x.dtype.base_dtype
weight_collections = set(
list(weight_collections or []) + [ops.GraphKeys.GLOBAL_VARIABLES])
w = variable_scope.get_variable(
'weights',
shape=[num_input_units, num_output_units],
dtype=dtype,
initializer=weight_init,
collections=weight_collections,
regularizer=weight_regularizer,
trainable=trainable)
x_2_dim = x if len(dims) <= 2 else array_ops.reshape(
x, [-1, num_input_units])
y = standard_ops.matmul(x_2_dim, w)
if bias_init is not None:
bias_collections = set(
list(bias_collections or []) + [ops.GraphKeys.GLOBAL_VARIABLES])
b = variable_scope.get_variable(
'bias',
shape=[num_output_units],
dtype=dtype,
initializer=bias_init,
collections=bias_collections,
regularizer=bias_regularizer,
trainable=trainable)
y = nn.bias_add(y, b)
if len(dims) > 2:
out_shape = array_ops.unstack(array_ops.shape(x))
out_shape[-1] = num_output_units
y = array_ops.reshape(y, array_ops.stack(out_shape))
static_shape = x.get_shape().as_list()
static_shape[-1] = num_output_units
y.set_shape(static_shape)
return _apply_activation(y, activation_fn, output_collections)
# TODO(eiderm): Verify and fix autocomplete in colab (also relu6).
# Simple aliases which remove the activation_fn parameter.
elu = functools.partial(fully_connected, activation_fn=nn.elu)
legacy_relu = functools.partial(legacy_fully_connected, activation_fn=nn.relu)
legacy_linear = functools.partial(legacy_fully_connected, activation_fn=None)
relu = functools.partial(fully_connected, activation_fn=nn.relu)
relu6 = functools.partial(fully_connected, activation_fn=nn.relu6)
linear = functools.partial(fully_connected, activation_fn=None)
# Simple alias.
conv1d = convolution1d
conv2d = convolution2d
conv3d = convolution3d
conv2d_transpose = convolution2d_transpose
conv3d_transpose = convolution3d_transpose
conv2d_in_plane = convolution2d_in_plane
separable_conv2d = separable_convolution2d
| apache-2.0 |
WillieMaddox/numpy | numpy/distutils/fcompiler/nag.py | 228 | 1403 | from __future__ import division, absolute_import, print_function
import sys
from numpy.distutils.fcompiler import FCompiler
compilers = ['NAGFCompiler']
class NAGFCompiler(FCompiler):
compiler_type = 'nag'
description = 'NAGWare Fortran 95 Compiler'
version_pattern = r'NAGWare Fortran 95 compiler Release (?P<version>[^\s]*)'
executables = {
'version_cmd' : ["<F90>", "-V"],
'compiler_f77' : ["f95", "-fixed"],
'compiler_fix' : ["f95", "-fixed"],
'compiler_f90' : ["f95"],
'linker_so' : ["<F90>"],
'archiver' : ["ar", "-cr"],
'ranlib' : ["ranlib"]
}
def get_flags_linker_so(self):
if sys.platform=='darwin':
return ['-unsharedf95', '-Wl,-bundle,-flat_namespace,-undefined,suppress']
return ["-Wl,-shared"]
def get_flags_opt(self):
return ['-O4']
def get_flags_arch(self):
version = self.get_version()
if version and version < '5.1':
return ['-target=native']
else:
return ['']
def get_flags_debug(self):
return ['-g', '-gline', '-g90', '-nan', '-C']
if __name__ == '__main__':
from distutils import log
log.set_verbosity(2)
from numpy.distutils.fcompiler import new_fcompiler
compiler = new_fcompiler(compiler='nag')
compiler.customize()
print(compiler.get_version())
| bsd-3-clause |
kvar/ansible | lib/ansible/plugins/cliconf/exos.py | 31 | 9970 | #
# (c) 2017 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
---
cliconf: exos
short_description: Use exos cliconf to run command on Extreme EXOS platform
description:
- This exos plugin provides low level abstraction apis for
sending and receiving CLI commands from Extreme EXOS network devices.
version_added: "2.6"
"""
import re
import json
from ansible.errors import AnsibleConnectionFailure
from ansible.module_utils._text import to_bytes, to_text
from ansible.module_utils.network.common.utils import to_list
from ansible.module_utils.connection import ConnectionError
from ansible.module_utils.common._collections_compat import Mapping
from ansible.module_utils.network.common.config import NetworkConfig, dumps
from ansible.plugins.cliconf import CliconfBase
class Cliconf(CliconfBase):
def get_diff(self, candidate=None, running=None, diff_match='line', diff_ignore_lines=None, path=None, diff_replace='line'):
diff = {}
device_operations = self.get_device_operations()
option_values = self.get_option_values()
if candidate is None and device_operations['supports_generate_diff']:
raise ValueError("candidate configuration is required to generate diff")
if diff_match not in option_values['diff_match']:
raise ValueError("'match' value %s in invalid, valid values are %s" % (diff_match, ', '.join(option_values['diff_match'])))
if diff_replace not in option_values['diff_replace']:
raise ValueError("'replace' value %s in invalid, valid values are %s" % (diff_replace, ', '.join(option_values['diff_replace'])))
# prepare candidate configuration
candidate_obj = NetworkConfig(indent=1)
candidate_obj.load(candidate)
if running and diff_match != 'none' and diff_replace != 'config':
# running configuration
running_obj = NetworkConfig(indent=1, contents=running, ignore_lines=diff_ignore_lines)
configdiffobjs = candidate_obj.difference(running_obj, path=path, match=diff_match, replace=diff_replace)
else:
configdiffobjs = candidate_obj.items
diff['config_diff'] = dumps(configdiffobjs, 'commands') if configdiffobjs else ''
return diff
def get_device_info(self):
device_info = {}
device_info['network_os'] = 'exos'
reply = self.run_commands({'command': 'show switch detail', 'output': 'text'})
data = to_text(reply, errors='surrogate_or_strict').strip()
match = re.search(r'ExtremeXOS version (\S+)', data)
if match:
device_info['network_os_version'] = match.group(1)
match = re.search(r'System Type: +(\S+)', data)
if match:
device_info['network_os_model'] = match.group(1)
match = re.search(r'SysName: +(\S+)', data)
if match:
device_info['network_os_hostname'] = match.group(1)
return device_info
def get_default_flag(self):
# The flag to modify the command to collect configuration with defaults
return 'detail'
def get_config(self, source='running', format='text', flags=None):
options_values = self.get_option_values()
if format not in options_values['format']:
raise ValueError("'format' value %s is invalid. Valid values are %s" % (format, ','.join(options_values['format'])))
lookup = {'running': 'show configuration', 'startup': 'debug cfgmgr show configuration file'}
if source not in lookup:
raise ValueError("fetching configuration from %s is not supported" % source)
cmd = {'command': lookup[source], 'output': 'text'}
if source == 'startup':
reply = self.run_commands({'command': 'show switch', 'format': 'text'})
data = to_text(reply, errors='surrogate_or_strict').strip()
match = re.search(r'Config Selected: +(\S+)\.cfg', data, re.MULTILINE)
if match:
cmd['command'] += match.group(1)
else:
# No Startup(/Selected) Config
return {}
cmd['command'] += ' '.join(to_list(flags))
cmd['command'] = cmd['command'].strip()
return self.run_commands(cmd)[0]
def edit_config(self, candidate=None, commit=True, replace=None, diff=False, comment=None):
resp = {}
operations = self.get_device_operations()
self.check_edit_config_capability(operations, candidate, commit, replace, comment)
results = []
requests = []
if commit:
for line in to_list(candidate):
if not isinstance(line, Mapping):
line = {'command': line}
results.append(self.send_command(**line))
requests.append(line['command'])
else:
raise ValueError('check mode is not supported')
resp['request'] = requests
resp['response'] = results
return resp
def get(self, command, prompt=None, answer=None, sendonly=False, output=None, newline=True, check_all=False):
if output:
command = self._get_command_with_output(command, output)
return self.send_command(command=command, prompt=prompt, answer=answer, sendonly=sendonly, newline=newline, check_all=check_all)
def run_commands(self, commands=None, check_rc=True):
if commands is None:
raise ValueError("'commands' value is required")
responses = list()
for cmd in to_list(commands):
if not isinstance(cmd, Mapping):
cmd = {'command': cmd}
output = cmd.pop('output', None)
if output:
cmd['command'] = self._get_command_with_output(cmd['command'], output)
try:
out = self.send_command(**cmd)
except AnsibleConnectionFailure as e:
if check_rc is True:
raise
out = getattr(e, 'err', e)
if out is not None:
try:
out = to_text(out, errors='surrogate_or_strict').strip()
except UnicodeError:
raise ConnectionError(message=u'Failed to decode output from %s: %s' % (cmd, to_text(out)))
if output and output == 'json':
try:
out = json.loads(out)
except ValueError:
raise ConnectionError('Response was not valid JSON, got {0}'.format(
to_text(out)
))
responses.append(out)
return responses
def get_device_operations(self):
return {
'supports_diff_replace': False, # identify if config should be merged or replaced is supported
'supports_commit': False, # identify if commit is supported by device or not
'supports_rollback': False, # identify if rollback is supported or not
'supports_defaults': True, # identify if fetching running config with default is supported
'supports_commit_comment': False, # identify if adding comment to commit is supported of not
'supports_onbox_diff': False, # identify if on box diff capability is supported or not
'supports_generate_diff': True, # identify if diff capability is supported within plugin
'supports_multiline_delimiter': False, # identify if multiline delimiter is supported within config
'supports_diff_match': True, # identify if match is supported
'supports_diff_ignore_lines': True, # identify if ignore line in diff is supported
'supports_config_replace': False, # identify if running config replace with candidate config is supported
'supports_admin': False, # identify if admin configure mode is supported or not
'supports_commit_label': False, # identify if commit label is supported or not
'supports_replace': False
}
def get_option_values(self):
return {
'format': ['text', 'json'],
'diff_match': ['line', 'strict', 'exact', 'none'],
'diff_replace': ['line', 'block'],
'output': ['text', 'json']
}
def get_capabilities(self):
result = super(Cliconf, self).get_capabilities()
result['rpc'] += ['run_commmands', 'get_default_flag', 'get_diff']
result['device_operations'] = self.get_device_operations()
result['device_info'] = self.get_device_info()
result.update(self.get_option_values())
return json.dumps(result)
def _get_command_with_output(self, command, output):
if output not in self.get_option_values().get('output'):
raise ValueError("'output' value is %s is invalid. Valid values are %s" % (output, ','.join(self.get_option_values().get('output'))))
if output == 'json' and not command.startswith('run script cli2json.py'):
cmd = 'run script cli2json.py %s' % command
else:
cmd = command
return cmd
| gpl-3.0 |
Weihonghao/ECM | Vpy34/lib/python3.5/site-packages/pip/_vendor/requests/packages/urllib3/util/timeout.py | 713 | 9596 | from __future__ import absolute_import
# The default socket timeout, used by httplib to indicate that no timeout was
# specified by the user
from socket import _GLOBAL_DEFAULT_TIMEOUT
import time
from ..exceptions import TimeoutStateError
# A sentinel value to indicate that no timeout was specified by the user in
# urllib3
_Default = object()
def current_time():
"""
Retrieve the current time. This function is mocked out in unit testing.
"""
return time.time()
class Timeout(object):
""" Timeout configuration.
Timeouts can be defined as a default for a pool::
timeout = Timeout(connect=2.0, read=7.0)
http = PoolManager(timeout=timeout)
response = http.request('GET', 'http://example.com/')
Or per-request (which overrides the default for the pool)::
response = http.request('GET', 'http://example.com/', timeout=Timeout(10))
Timeouts can be disabled by setting all the parameters to ``None``::
no_timeout = Timeout(connect=None, read=None)
response = http.request('GET', 'http://example.com/, timeout=no_timeout)
:param total:
This combines the connect and read timeouts into one; the read timeout
will be set to the time leftover from the connect attempt. In the
event that both a connect timeout and a total are specified, or a read
timeout and a total are specified, the shorter timeout will be applied.
Defaults to None.
:type total: integer, float, or None
:param connect:
The maximum amount of time to wait for a connection attempt to a server
to succeed. Omitting the parameter will default the connect timeout to
the system default, probably `the global default timeout in socket.py
<http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_.
None will set an infinite timeout for connection attempts.
:type connect: integer, float, or None
:param read:
The maximum amount of time to wait between consecutive
read operations for a response from the server. Omitting
the parameter will default the read timeout to the system
default, probably `the global default timeout in socket.py
<http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_.
None will set an infinite timeout.
:type read: integer, float, or None
.. note::
Many factors can affect the total amount of time for urllib3 to return
an HTTP response.
For example, Python's DNS resolver does not obey the timeout specified
on the socket. Other factors that can affect total request time include
high CPU load, high swap, the program running at a low priority level,
or other behaviors.
In addition, the read and total timeouts only measure the time between
read operations on the socket connecting the client and the server,
not the total amount of time for the request to return a complete
response. For most requests, the timeout is raised because the server
has not sent the first byte in the specified time. This is not always
the case; if a server streams one byte every fifteen seconds, a timeout
of 20 seconds will not trigger, even though the request will take
several minutes to complete.
If your goal is to cut off any request after a set amount of wall clock
time, consider having a second "watcher" thread to cut off a slow
request.
"""
#: A sentinel object representing the default timeout value
DEFAULT_TIMEOUT = _GLOBAL_DEFAULT_TIMEOUT
def __init__(self, total=None, connect=_Default, read=_Default):
self._connect = self._validate_timeout(connect, 'connect')
self._read = self._validate_timeout(read, 'read')
self.total = self._validate_timeout(total, 'total')
self._start_connect = None
def __str__(self):
return '%s(connect=%r, read=%r, total=%r)' % (
type(self).__name__, self._connect, self._read, self.total)
@classmethod
def _validate_timeout(cls, value, name):
""" Check that a timeout attribute is valid.
:param value: The timeout value to validate
:param name: The name of the timeout attribute to validate. This is
used to specify in error messages.
:return: The validated and casted version of the given value.
:raises ValueError: If the type is not an integer or a float, or if it
is a numeric value less than zero.
"""
if value is _Default:
return cls.DEFAULT_TIMEOUT
if value is None or value is cls.DEFAULT_TIMEOUT:
return value
try:
float(value)
except (TypeError, ValueError):
raise ValueError("Timeout value %s was %s, but it must be an "
"int or float." % (name, value))
try:
if value < 0:
raise ValueError("Attempted to set %s timeout to %s, but the "
"timeout cannot be set to a value less "
"than 0." % (name, value))
except TypeError: # Python 3
raise ValueError("Timeout value %s was %s, but it must be an "
"int or float." % (name, value))
return value
@classmethod
def from_float(cls, timeout):
""" Create a new Timeout from a legacy timeout value.
The timeout value used by httplib.py sets the same timeout on the
connect(), and recv() socket requests. This creates a :class:`Timeout`
object that sets the individual timeouts to the ``timeout`` value
passed to this function.
:param timeout: The legacy timeout value.
:type timeout: integer, float, sentinel default object, or None
:return: Timeout object
:rtype: :class:`Timeout`
"""
return Timeout(read=timeout, connect=timeout)
def clone(self):
""" Create a copy of the timeout object
Timeout properties are stored per-pool but each request needs a fresh
Timeout object to ensure each one has its own start/stop configured.
:return: a copy of the timeout object
:rtype: :class:`Timeout`
"""
# We can't use copy.deepcopy because that will also create a new object
# for _GLOBAL_DEFAULT_TIMEOUT, which socket.py uses as a sentinel to
# detect the user default.
return Timeout(connect=self._connect, read=self._read,
total=self.total)
def start_connect(self):
""" Start the timeout clock, used during a connect() attempt
:raises urllib3.exceptions.TimeoutStateError: if you attempt
to start a timer that has been started already.
"""
if self._start_connect is not None:
raise TimeoutStateError("Timeout timer has already been started.")
self._start_connect = current_time()
return self._start_connect
def get_connect_duration(self):
""" Gets the time elapsed since the call to :meth:`start_connect`.
:return: Elapsed time.
:rtype: float
:raises urllib3.exceptions.TimeoutStateError: if you attempt
to get duration for a timer that hasn't been started.
"""
if self._start_connect is None:
raise TimeoutStateError("Can't get connect duration for timer "
"that has not started.")
return current_time() - self._start_connect
@property
def connect_timeout(self):
""" Get the value to use when setting a connection timeout.
This will be a positive float or integer, the value None
(never timeout), or the default system timeout.
:return: Connect timeout.
:rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None
"""
if self.total is None:
return self._connect
if self._connect is None or self._connect is self.DEFAULT_TIMEOUT:
return self.total
return min(self._connect, self.total)
@property
def read_timeout(self):
""" Get the value for the read timeout.
This assumes some time has elapsed in the connection timeout and
computes the read timeout appropriately.
If self.total is set, the read timeout is dependent on the amount of
time taken by the connect timeout. If the connection time has not been
established, a :exc:`~urllib3.exceptions.TimeoutStateError` will be
raised.
:return: Value to use for the read timeout.
:rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None
:raises urllib3.exceptions.TimeoutStateError: If :meth:`start_connect`
has not yet been called on this object.
"""
if (self.total is not None and
self.total is not self.DEFAULT_TIMEOUT and
self._read is not None and
self._read is not self.DEFAULT_TIMEOUT):
# In case the connect timeout has not yet been established.
if self._start_connect is None:
return self._read
return max(0, min(self.total - self.get_connect_duration(),
self._read))
elif self.total is not None and self.total is not self.DEFAULT_TIMEOUT:
return max(0, self.total - self.get_connect_duration())
else:
return self._read
| agpl-3.0 |
django-leonardo/django-leonardo | leonardo/settings.py | 1 | 11033 |
from __future__ import absolute_import
import os
import six
import logging
import warnings
from django.apps import apps
from django import VERSION
from leonardo.conf.spec import DJANGO_CONF
from leonardo.base import leonardo, default
from leonardo.utils.settings import (get_conf_from_module, merge,
get_loaded_modules)
from importlib import import_module # noqa
from django.utils.module_loading import module_has_submodule # noqa
_file_path = os.path.abspath(os.path.dirname(__file__)).split('/')
BASE_DIR = '/'.join(_file_path[0:-2])
from leonardo.conf.default import *
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
MEDIA_URL = '/media/'
STATIC_URL = '/static/'
if VERSION[:2] >= (1, 8):
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'templates')
],
'OPTIONS': {
'context_processors': default.context_processors,
'loaders': [
'dbtemplates.loader.Loader',
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
'horizon.loaders.TemplateLoader',
],
'debug': True
},
},
]
else:
TEMPLATE_DIRS = [
os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'templates')
]
TEMPLATE_CONTEXT_PROCESSORS = default.context_processors
TEMPLATE_LOADERS = (
'dbtemplates.loader.Loader',
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
'horizon.loaders.TemplateLoader',
)
try:
# obsolete location since 1.0.3 use `leonrdo_site.settings`
from leonardo_site.local.settings import *
warnings.warn(
'leonardo_site.local.settings is obsolete use new location')
except ImportError:
pass
try:
# full settings
# TODO support configurable from local_settings
# LEONARDO_PROJECT_NAME = 'leonardo_site'
from leonardo_site.settings import *
except ImportError:
pass
try:
# local settings
from local_settings import *
except ImportError:
warnings.warn(
'local_settings was not found in $PYTHONPATH !')
if not DEBUG:
if VERSION[:2] >= (1, 8):
TEMPLATES[0]['OPTIONS']['loaders'] = [
('django.template.loaders.cached.Loader', [
'dbtemplates.loader.Loader',
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
'horizon.loaders.TemplateLoader',
])]
TEMPLATES[0]['OPTIONS']['debug'] = False
else:
# Debugging stuff
TEMPLATE_DEBUG = DEBUG
TEMPLATES[0]['OPTIONS']['loaders'] = [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
'horizon.loaders.TemplateLoader',
'dbtemplates.loader.Loader'
]
COMPRESS_ENABLED = False
APPS = merge(APPS, default.core)
if 'media' in APPS:
FILER_IMAGE_MODEL = 'leonardo.module.media.models.Image'
try:
from leonardo.conf.horizon import *
from leonardo.conf.static import *
except Exception as e:
warnings.warn(
'Could not import static packages %s' % str(e))
if LEONARDO_SYSTEM_MODULE:
APPS = merge(APPS, ['leonardo_system'])
HORIZON_CONFIG['system_module'] = True
else:
HORIZON_CONFIG['system_module'] = False
if not apps.ready:
# load directly specified apps
leonardo.get_app_modules(APPS)
# propagate settings to leonardo
leonardo.MODULES_AUTOLOAD = LEONARDO_MODULE_AUTO_INCLUDE
# load all modules
leonardo.load_modules()
# just propagate all loaded modules to settings
LEONARDO_MODULES = leonardo.get_modules()
# iterate over sorted modules
for mod, mod_cfg in LEONARDO_MODULES:
try:
# import all settings keys from module
if module_has_submodule(mod, "settings"):
try:
settings_mod = import_module(
'{0}.settings'.format(mod.__name__))
for k in dir(settings_mod):
if not k.startswith("_"):
val = getattr(settings_mod, k, None)
globals()[k] = val
locals()[k] = val
except Exception as e:
warnings.warn(
'Exception "{}" raised during loading '
'settings from {}'.format(str(e), mod))
# go through django keys and merge it to main settings
for key in DJANGO_CONF.keys():
updated_value = mod_cfg.get_value(key, globals()[key])
globals()[key] = updated_value
locals()[key] = updated_value
# map value to leonardo but under our internal name
setattr(leonardo, DJANGO_CONF[key], updated_value)
if mod_cfg.urls_conf:
MODULE_URLS[mod_cfg.urls_conf] = {'is_public': mod_cfg.public}
# TODO move to utils.settings
# support for one level nested in config dictionary
for config_key, config_value in six.iteritems(mod_cfg.config):
if isinstance(config_value, dict):
CONSTANCE_CONFIG_GROUPS.update({config_key: config_value})
for c_key, c_value in six.iteritems(config_value):
mod_cfg.config[c_key] = c_value
# remove from main dict
mod_cfg.config.pop(config_key)
else:
if isinstance(mod_cfg.optgroup, six.string_types):
CONSTANCE_CONFIG_GROUPS.update({
mod_cfg.optgroup: mod_cfg.config})
else:
if 'ungrouped' in CONSTANCE_CONFIG_GROUPS:
CONSTANCE_CONFIG_GROUPS[
'ungrouped'].update(mod_cfg.config)
else:
CONSTANCE_CONFIG_GROUPS['ungrouped'] = \
mod_cfg.config
# import and update absolute overrides
for model, method in six.iteritems(mod_cfg.absolute_url_overrides):
try:
_mod = import_module(".".join(method.split('.')[:-1]))
ABSOLUTE_URL_OVERRIDES[model] = getattr(
_mod, method.split('.')[-1])
except Exception as e:
raise e
for nav_extension in mod_cfg.navigation_extensions:
try:
import_module(nav_extension)
except ImportError:
pass
CONSTANCE_CONFIG.update(mod_cfg.config)
if VERSION[:2] >= (1, 8):
TEMPLATES[0]['DIRS'] = merge(
TEMPLATES[0]['DIRS'], mod_cfg.dirs)
cp = TEMPLATES[0]['OPTIONS']['context_processors']
TEMPLATES[0]['OPTIONS']['context_processors'] = merge(
cp, mod_cfg.context_processors)
else:
TEMPLATE_CONTEXT_PROCESSORS = merge(
TEMPLATE_CONTEXT_PROCESSORS, mod_cfg.context_processors)
TEMPLATE_DIRS = merge(TEMPLATE_DIRS, mod_cfg.dirs)
# collect grouped widgets
if isinstance(mod_cfg.optgroup, six.string_types):
if len(mod_cfg.widgets) > 0:
WIDGETS[mod_cfg.optgroup] = merge(
getattr(WIDGETS, mod_cfg.optgroup, []), mod_cfg.widgets)
else:
if len(mod_cfg.widgets) > 0 and DEBUG:
warnings.warn('You have ungrouped widgets'
', please specify your ``optgroup``'
'which categorize your widgets in %s' % mod)
if len(mod_cfg.widgets) > 0:
WIDGETS['ungrouped'] = merge(
getattr(WIDGETS, 'ungrouped', []), mod_cfg.widgets)
except Exception as e:
warnings.warn(
'Exception "{}" raised during loading '
'module {}'.format(str(e), mod))
else:
warnings.warn("Leonardo modules are already loaded. Skiped now.")
setattr(leonardo, 'widgets', WIDGETS)
# FINALLY OVERRIDE ALL
try:
# full settings
from leonardo_site.local.settings import *
except ImportError:
pass
try:
# full settings
from leonardo_site.settings import *
except ImportError:
pass
try:
# local settings
from local_settings import *
except ImportError:
warnings.warn(
'Missing local_settings !')
# and again merge core with others
APPS = merge(APPS, default.core)
# go through django keys and merge it to main settings
for key in DJANGO_CONF.keys():
# map value to leonardo but under our internal name
setattr(leonardo, DJANGO_CONF[key], globals()[key])
if DEBUG:
try:
import debug_toolbar
INSTALLED_APPS = merge(INSTALLED_APPS, ['debug_toolbar'])
from leonardo.conf.debug import *
except ImportError:
if DEBUG:
warnings.warn('DEBUG is set to True but, DEBUG tools '
'is not installed please run '
'"pip install django-leonardo[debug]"')
# async messages
try:
import async_messages
INSTALLED_APPS = merge(INSTALLED_APPS, ['async_messages'])
MIDDLEWARE_CLASSES = merge(MIDDLEWARE_CLASSES,
['async_messages.middleware.AsyncMiddleware'])
except ImportError:
pass
# django contrib redirects
if 'django.contrib.redirects' in INSTALLED_APPS:
MIDDLEWARE_CLASSES += ['django.contrib.redirects.middleware.RedirectFallbackMiddleware']
# use js files instead of horizon
HORIZON_CONFIG['js_files'] = leonardo.js_files
HORIZON_CONFIG['js_compress_files'] = leonardo.js_compress_files
HORIZON_CONFIG['js_spec_files'] = leonardo.js_spec_files
HORIZON_CONFIG['css_files'] = leonardo.css_files
HORIZON_CONFIG['scss_files'] = leonardo.scss_files
HORIZON_CONFIG['angular_modules'] = leonardo.angular_modules
HORIZON_CONFIG['page_actions'] = leonardo.page_actions
HORIZON_CONFIG['widget_actions'] = leonardo.widget_actions
from leonardo.conf.static import find_static_files # noqa
# populate HORIZON_CONFIG with auto-discovered JavaScript sources, mock files,
# specs files and external templates.
find_static_files(HORIZON_CONFIG)
leonardo.js_files = merge(HORIZON_CONFIG['js_files'], leonardo.js_files)
# path horizon config
from horizon import conf
conf.HORIZON_CONFIG = HORIZON_CONFIG
if DEBUG:
logging.basicConfig(level=logging.DEBUG)
| bsd-3-clause |
valentin-krasontovitsch/ansible | lib/ansible/modules/remote_management/oneview/oneview_ethernet_network_facts.py | 125 | 4863 | #!/usr/bin/python
# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: oneview_ethernet_network_facts
short_description: Retrieve the facts about one or more of the OneView Ethernet Networks
description:
- Retrieve the facts about one or more of the Ethernet Networks from OneView.
version_added: "2.4"
requirements:
- hpOneView >= 2.0.1
author:
- Felipe Bulsoni (@fgbulsoni)
- Thiago Miotto (@tmiotto)
- Adriane Cardozo (@adriane-cardozo)
options:
name:
description:
- Ethernet Network name.
options:
description:
- "List with options to gather additional facts about an Ethernet Network and related resources.
Options allowed: C(associatedProfiles) and C(associatedUplinkGroups)."
extends_documentation_fragment:
- oneview
- oneview.factsparams
'''
EXAMPLES = '''
- name: Gather facts about all Ethernet Networks
oneview_ethernet_network_facts:
config: /etc/oneview/oneview_config.json
delegate_to: localhost
- debug: var=ethernet_networks
- name: Gather paginated and filtered facts about Ethernet Networks
oneview_ethernet_network_facts:
config: /etc/oneview/oneview_config.json
params:
start: 1
count: 3
sort: 'name:descending'
filter: 'purpose=General'
delegate_to: localhost
- debug: var=ethernet_networks
- name: Gather facts about an Ethernet Network by name
oneview_ethernet_network_facts:
config: /etc/oneview/oneview_config.json
name: Ethernet network name
delegate_to: localhost
- debug: var=ethernet_networks
- name: Gather facts about an Ethernet Network by name with options
oneview_ethernet_network_facts:
config: /etc/oneview/oneview_config.json
name: eth1
options:
- associatedProfiles
- associatedUplinkGroups
delegate_to: localhost
- debug: var=enet_associated_profiles
- debug: var=enet_associated_uplink_groups
'''
RETURN = '''
ethernet_networks:
description: Has all the OneView facts about the Ethernet Networks.
returned: Always, but can be null.
type: dict
enet_associated_profiles:
description: Has all the OneView facts about the profiles which are using the Ethernet network.
returned: When requested, but can be null.
type: dict
enet_associated_uplink_groups:
description: Has all the OneView facts about the uplink sets which are using the Ethernet network.
returned: When requested, but can be null.
type: dict
'''
from ansible.module_utils.oneview import OneViewModuleBase
class EthernetNetworkFactsModule(OneViewModuleBase):
argument_spec = dict(
name=dict(type='str'),
options=dict(type='list'),
params=dict(type='dict')
)
def __init__(self):
super(EthernetNetworkFactsModule, self).__init__(additional_arg_spec=self.argument_spec)
self.resource_client = self.oneview_client.ethernet_networks
def execute_module(self):
ansible_facts = {}
if self.module.params['name']:
ethernet_networks = self.resource_client.get_by('name', self.module.params['name'])
if self.module.params.get('options') and ethernet_networks:
ansible_facts = self.__gather_optional_facts(ethernet_networks[0])
else:
ethernet_networks = self.resource_client.get_all(**self.facts_params)
ansible_facts['ethernet_networks'] = ethernet_networks
return dict(changed=False, ansible_facts=ansible_facts)
def __gather_optional_facts(self, ethernet_network):
ansible_facts = {}
if self.options.get('associatedProfiles'):
ansible_facts['enet_associated_profiles'] = self.__get_associated_profiles(ethernet_network)
if self.options.get('associatedUplinkGroups'):
ansible_facts['enet_associated_uplink_groups'] = self.__get_associated_uplink_groups(ethernet_network)
return ansible_facts
def __get_associated_profiles(self, ethernet_network):
associated_profiles = self.resource_client.get_associated_profiles(ethernet_network['uri'])
return [self.oneview_client.server_profiles.get(x) for x in associated_profiles]
def __get_associated_uplink_groups(self, ethernet_network):
uplink_groups = self.resource_client.get_associated_uplink_groups(ethernet_network['uri'])
return [self.oneview_client.uplink_sets.get(x) for x in uplink_groups]
def main():
EthernetNetworkFactsModule().run()
if __name__ == '__main__':
main()
| gpl-3.0 |
pigeonflight/strider-plone | docker/appengine/lib/django-1.3/django/contrib/messages/storage/user_messages.py | 308 | 2303 | """
Storages used to assist in the deprecation of contrib.auth User messages.
"""
from django.contrib.messages import constants
from django.contrib.messages.storage.base import BaseStorage, Message
from django.contrib.auth.models import User
from django.contrib.messages.storage.fallback import FallbackStorage
class UserMessagesStorage(BaseStorage):
"""
Retrieves messages from the User, using the legacy user.message_set API.
This storage is "read-only" insofar as it can only retrieve and delete
messages, not store them.
"""
session_key = '_messages'
def _get_messages_queryset(self):
"""
Returns the QuerySet containing all user messages (or ``None`` if
request.user is not a contrib.auth User).
"""
user = getattr(self.request, 'user', None)
if isinstance(user, User):
return user._message_set.all()
def add(self, *args, **kwargs):
raise NotImplementedError('This message storage is read-only.')
def _get(self, *args, **kwargs):
"""
Retrieves a list of messages assigned to the User. This backend never
stores anything, so all_retrieved is assumed to be False.
"""
queryset = self._get_messages_queryset()
if queryset is None:
# This is a read-only and optional storage, so to ensure other
# storages will also be read if used with FallbackStorage an empty
# list is returned rather than None.
return [], False
messages = []
for user_message in queryset:
messages.append(Message(constants.INFO, user_message.message))
return messages, False
def _store(self, messages, *args, **kwargs):
"""
Removes any messages assigned to the User and returns the list of
messages (since no messages are stored in this read-only storage).
"""
queryset = self._get_messages_queryset()
if queryset is not None:
queryset.delete()
return messages
class LegacyFallbackStorage(FallbackStorage):
"""
Works like ``FallbackStorage`` but also handles retrieving (and clearing)
contrib.auth User messages.
"""
storage_classes = (UserMessagesStorage,) + FallbackStorage.storage_classes
| mit |
AlperSaltabas/OR_Tools_Google_API | examples/python/labeled_dice.py | 34 | 4106 | # Copyright 2010 Hakan Kjellerstrand hakank@bonetmail.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Labeled dice problem in Google CP Solver.
From Jim Orlin 'Colored letters, labeled dice: a logic puzzle'
http://jimorlin.wordpress.com/2009/02/17/colored-letters-labeled-dice-a-logic-puzzle/
'''
My daughter Jenn bough a puzzle book, and showed me a cute puzzle. There
are 13 words as follows: BUOY, CAVE, CELT, FLUB, FORK, HEMP, JUDY,
JUNK, LIMN, QUIP, SWAG, VISA, WISH.
There are 24 different letters that appear in the 13 words. The question
is: can one assign the 24 letters to 4 different cubes so that the
four letters of each word appears on different cubes. (There is one
letter from each word on each cube.) It might be fun for you to try
it. I'll give a small hint at the end of this post. The puzzle was
created by Humphrey Dudley.
'''
Jim Orlin's followup 'Update on Logic Puzzle':
http://jimorlin.wordpress.com/2009/02/21/update-on-logic-puzzle/
Compare with the following models:
* ECLiPSe: http://hakank.org/eclipse/labeled_dice.ecl
* Comet : http://www.hakank.org/comet/labeled_dice.co
* Gecode : http://hakank.org/gecode/labeled_dice.cpp
* SICStus: http://hakank.org/sicstus/labeled_dice.pl
* Zinc : http://hakank.org/minizinc/labeled_dice.zinc
This model was created by Hakan Kjellerstrand (hakank@bonetmail.com)
Also see my other Google CP Solver models:
http://www.hakank.org/google_or_tools/
"""
from ortools.constraint_solver import pywrapcp
def main():
# Create the solver.
solver = pywrapcp.Solver("Labeled dice")
#
# data
#
n = 4
m = 24
A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q, R, S, T, U, V, W, Y = (
range(m))
letters = ["A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M",
"N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "Y"]
num_words = 13
words = [
[B, U, O, Y],
[C, A, V, E],
[C, E, L, T],
[F, L, U, B],
[F, O, R, K],
[H, E, M, P],
[J, U, D, Y],
[J, U, N, K],
[L, I, M, N],
[Q, U, I, P],
[S, W, A, G],
[V, I, S, A],
[W, I, S, H]
]
#
# declare variables
#
dice = [solver.IntVar(0, n - 1, "dice[%i]" % i) for i in range(m)]
#
# constraints
#
# the letters in a word must be on a different die
for i in range(num_words):
solver.Add(solver.AllDifferent([dice[words[i][j]] for j in range(n)]))
# there must be exactly 6 letters of each die
for i in range(n):
b = [solver.IsEqualCstVar(dice[j], i) for j in range(m)]
solver.Add(solver.Sum(b) == 6)
#
# solution and search
#
solution = solver.Assignment()
solution.Add(dice)
db = solver.Phase(dice,
solver.CHOOSE_FIRST_UNBOUND,
solver.ASSIGN_MIN_VALUE)
#
# result
#
solver.NewSearch(db)
num_solutions = 0
while solver.NextSolution():
num_solutions += 1
# print "dice:", [(letters[i],dice[i].Value()) for i in range(m)]
for d in range(n):
print "die %i:" % d,
for i in range(m):
if dice[i].Value() == d:
print letters[i],
print
print "The words with the cube label:"
for i in range(num_words):
for j in range(n):
print "%s (%i)" % (letters[words[i][j]], dice[words[i][j]].Value()),
print
print
solver.EndSearch()
print
print "num_solutions:", num_solutions
print "failures:", solver.Failures()
print "branches:", solver.Branches()
print "WallTime:", solver.WallTime()
if __name__ == "__main__":
main()
| apache-2.0 |
Ictp/indico | bin/utils/changeCreatorToReservation.py | 1 | 4132 | # -*- coding: utf-8 -*-
##
##
## This file is part of Indico.
## Copyright (C) 2002 - 2014 European Organization for Nuclear Research (CERN).
##
## Indico is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 3 of the
## License, or (at your option) any later version.
##
## Indico is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Indico;if not, see <http://www.gnu.org/licenses/>.
import getopt, sys
from indico.core.db import DBMgr
from MaKaC.plugins.RoomBooking.default.factory import Factory
from MaKaC.plugins.RoomBooking.default.reservation import Reservation
from MaKaC.rb_reservation import ReservationBase
from MaKaC.rb_location import CrossLocationQueries
from MaKaC.user import AvatarHolder
def changeCreator(oldUser, newUser):
dbi = DBMgr.getInstance()
dbi.startRequest()
Factory.getDALManager().connect()
# check if the users exist
if AvatarHolder().getById(oldUser) is None:
print "There is no user with id %s"%oldUser
return
if AvatarHolder().getById(newUser) is None:
print "There is no user with id %s"%newUser
return
resvEx = ReservationBase()
resvEx.createdBy = oldUser
allResv4OldUser = CrossLocationQueries.getReservations( resvExample = resvEx)
if allResv4OldUser == []:
print "No reservations for user %s"%oldUser
return
# resvs = ReservationBase.getReservations()
# allResv4OldUser = [x for x in allResv if x.createdBy == oldUser]
if type(allResv4OldUser) is not list:
allResv4OldUser = [allResv4OldUser]
# Modify reservations
for r in allResv4OldUser:
r.createdBy = newUser
#print r.createdBy, r.id
# Update index
userReservationsIndexBTree = Reservation.getUserReservationsIndexRoot()
newUserResvs = userReservationsIndexBTree.get( newUser )
if newUserResvs == None:
newUserResvs = [] # New list of reservations for this room
userReservationsIndexBTree.insert( newUser, newUserResvs )
newUserResvs.extend( allResv4OldUser )
userReservationsIndexBTree[newUser] = newUserResvs[:]
if userReservationsIndexBTree.has_key(oldUser):
userReservationsIndexBTree.pop(oldUser)
userReservationsIndexBTree._p_changed = 1
# close DB connection
Factory.getDALManager().commit()
Factory.getDALManager().disconnect()
dbi.endRequest()
print "%s reservations have moved from creator %s to creator %s" % (len(allResv4OldUser), oldUser, newUser)
def listResv4User(user):
dbi = DBMgr.getInstance()
dbi.startRequest()
Factory.getDALManager().connect()
resvEx = ReservationBase()
resvEx.createdBy = user
allResv = CrossLocationQueries.getReservations( resvExample = resvEx)
print "User %s has %s resevations created by him/her"%(user, len(allResv))
Factory.getDALManager().disconnect()
dbi.endRequest()
def usage():
print "Usage: %s -o 44 -n 55" % sys.argv[0]
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], "o:n:l:", ["old=", "new=", "list="])
except getopt.GetoptError, err:
# print help information and exit:
print str(err) # will print something like "option -a not recognized"
usage()
sys.exit(0)
oldUser = None
newUser = None
userToList = None
for o, a in opts:
if o == "-o":
oldUser = str(a)
elif o == "-n":
newUser = str(a)
elif o == "-l":
userToList = str(a)
else:
assert False, "unhandled option"
if userToList:
listResv4User(userToList)
elif oldUser and newUser:
changeCreator(oldUser, newUser)
else:
usage()
sys.exit(0)
if __name__ == "__main__":
main()
| gpl-3.0 |
shownomercy/django | tests/gis_tests/geo3d/tests.py | 199 | 17484 | from __future__ import unicode_literals
import os
import re
from unittest import skipUnless
from django.contrib.gis.db.models import Extent3D, Union
from django.contrib.gis.db.models.functions import (
AsGeoJSON, AsKML, Length, Perimeter, Scale, Translate,
)
from django.contrib.gis.gdal import HAS_GDAL
from django.contrib.gis.geos import GEOSGeometry, LineString, Point, Polygon
from django.test import TestCase, ignore_warnings, skipUnlessDBFeature
from django.utils._os import upath
from django.utils.deprecation import (
RemovedInDjango20Warning, RemovedInDjango110Warning,
)
from .models import (
City3D, Interstate2D, Interstate3D, InterstateProj2D, InterstateProj3D,
MultiPoint3D, Point2D, Point3D, Polygon2D, Polygon3D,
)
if HAS_GDAL:
from django.contrib.gis.utils import LayerMapping, LayerMapError
data_path = os.path.realpath(os.path.join(os.path.dirname(upath(__file__)), '..', 'data'))
city_file = os.path.join(data_path, 'cities', 'cities.shp')
vrt_file = os.path.join(data_path, 'test_vrt', 'test_vrt.vrt')
# The coordinates of each city, with Z values corresponding to their
# altitude in meters.
city_data = (
('Houston', (-95.363151, 29.763374, 18)),
('Dallas', (-96.801611, 32.782057, 147)),
('Oklahoma City', (-97.521157, 34.464642, 380)),
('Wellington', (174.783117, -41.315268, 14)),
('Pueblo', (-104.609252, 38.255001, 1433)),
('Lawrence', (-95.235060, 38.971823, 251)),
('Chicago', (-87.650175, 41.850385, 181)),
('Victoria', (-123.305196, 48.462611, 15)),
)
# Reference mapping of city name to its altitude (Z value).
city_dict = {name: coords for name, coords in city_data}
# 3D freeway data derived from the National Elevation Dataset:
# http://seamless.usgs.gov/products/9arc.php
interstate_data = (
('I-45',
'LINESTRING(-95.3708481 29.7765870 11.339,-95.3694580 29.7787980 4.536,'
'-95.3690305 29.7797359 9.762,-95.3691886 29.7812450 12.448,'
'-95.3696447 29.7850144 10.457,-95.3702511 29.7868518 9.418,'
'-95.3706724 29.7881286 14.858,-95.3711632 29.7896157 15.386,'
'-95.3714525 29.7936267 13.168,-95.3717848 29.7955007 15.104,'
'-95.3717719 29.7969804 16.516,-95.3717305 29.7982117 13.923,'
'-95.3717254 29.8000778 14.385,-95.3719875 29.8013539 15.160,'
'-95.3720575 29.8026785 15.544,-95.3721321 29.8040912 14.975,'
'-95.3722074 29.8050998 15.688,-95.3722779 29.8060430 16.099,'
'-95.3733818 29.8076750 15.197,-95.3741563 29.8103686 17.268,'
'-95.3749458 29.8129927 19.857,-95.3763564 29.8144557 15.435)',
(11.339, 4.536, 9.762, 12.448, 10.457, 9.418, 14.858,
15.386, 13.168, 15.104, 16.516, 13.923, 14.385, 15.16,
15.544, 14.975, 15.688, 16.099, 15.197, 17.268, 19.857,
15.435),
),
)
# Bounding box polygon for inner-loop of Houston (in projected coordinate
# system 32140), with elevation values from the National Elevation Dataset
# (see above).
bbox_data = (
'POLYGON((941527.97 4225693.20,962596.48 4226349.75,963152.57 4209023.95,'
'942051.75 4208366.38,941527.97 4225693.20))',
(21.71, 13.21, 9.12, 16.40, 21.71)
)
class Geo3DLoadingHelper(object):
def _load_interstate_data(self):
# Interstate (2D / 3D and Geographic/Projected variants)
for name, line, exp_z in interstate_data:
line_3d = GEOSGeometry(line, srid=4269)
line_2d = LineString([l[:2] for l in line_3d.coords], srid=4269)
# Creating a geographic and projected version of the
# interstate in both 2D and 3D.
Interstate3D.objects.create(name=name, line=line_3d)
InterstateProj3D.objects.create(name=name, line=line_3d)
Interstate2D.objects.create(name=name, line=line_2d)
InterstateProj2D.objects.create(name=name, line=line_2d)
def _load_city_data(self):
for name, pnt_data in city_data:
City3D.objects.create(name=name, point=Point(*pnt_data, srid=4326))
def _load_polygon_data(self):
bbox_wkt, bbox_z = bbox_data
bbox_2d = GEOSGeometry(bbox_wkt, srid=32140)
bbox_3d = Polygon(tuple((x, y, z) for (x, y), z in zip(bbox_2d[0].coords, bbox_z)), srid=32140)
Polygon2D.objects.create(name='2D BBox', poly=bbox_2d)
Polygon3D.objects.create(name='3D BBox', poly=bbox_3d)
@skipUnless(HAS_GDAL, "GDAL is required for Geo3DTest.")
@skipUnlessDBFeature("gis_enabled", "supports_3d_storage")
class Geo3DTest(Geo3DLoadingHelper, TestCase):
"""
Only a subset of the PostGIS routines are 3D-enabled, and this TestCase
tries to test the features that can handle 3D and that are also
available within GeoDjango. For more information, see the PostGIS docs
on the routines that support 3D:
http://postgis.net/docs/PostGIS_Special_Functions_Index.html#PostGIS_3D_Functions
"""
def test_3d_hasz(self):
"""
Make sure data is 3D and has expected Z values -- shouldn't change
because of coordinate system.
"""
self._load_interstate_data()
for name, line, exp_z in interstate_data:
interstate = Interstate3D.objects.get(name=name)
interstate_proj = InterstateProj3D.objects.get(name=name)
for i in [interstate, interstate_proj]:
self.assertTrue(i.line.hasz)
self.assertEqual(exp_z, tuple(i.line.z))
self._load_city_data()
for name, pnt_data in city_data:
city = City3D.objects.get(name=name)
z = pnt_data[2]
self.assertTrue(city.point.hasz)
self.assertEqual(z, city.point.z)
def test_3d_polygons(self):
"""
Test the creation of polygon 3D models.
"""
self._load_polygon_data()
p3d = Polygon3D.objects.get(name='3D BBox')
self.assertTrue(p3d.poly.hasz)
self.assertIsInstance(p3d.poly, Polygon)
self.assertEqual(p3d.poly.srid, 32140)
def test_3d_layermapping(self):
"""
Testing LayerMapping on 3D models.
"""
point_mapping = {'point': 'POINT'}
mpoint_mapping = {'mpoint': 'MULTIPOINT'}
# The VRT is 3D, but should still be able to map sans the Z.
lm = LayerMapping(Point2D, vrt_file, point_mapping, transform=False)
lm.save()
self.assertEqual(3, Point2D.objects.count())
# The city shapefile is 2D, and won't be able to fill the coordinates
# in the 3D model -- thus, a LayerMapError is raised.
self.assertRaises(LayerMapError, LayerMapping,
Point3D, city_file, point_mapping, transform=False)
# 3D model should take 3D data just fine.
lm = LayerMapping(Point3D, vrt_file, point_mapping, transform=False)
lm.save()
self.assertEqual(3, Point3D.objects.count())
# Making sure LayerMapping.make_multi works right, by converting
# a Point25D into a MultiPoint25D.
lm = LayerMapping(MultiPoint3D, vrt_file, mpoint_mapping, transform=False)
lm.save()
self.assertEqual(3, MultiPoint3D.objects.count())
@ignore_warnings(category=RemovedInDjango20Warning)
def test_kml(self):
"""
Test GeoQuerySet.kml() with Z values.
"""
self._load_city_data()
h = City3D.objects.kml(precision=6).get(name='Houston')
# KML should be 3D.
# `SELECT ST_AsKML(point, 6) FROM geo3d_city3d WHERE name = 'Houston';`
ref_kml_regex = re.compile(r'^<Point><coordinates>-95.363\d+,29.763\d+,18</coordinates></Point>$')
self.assertTrue(ref_kml_regex.match(h.kml))
@ignore_warnings(category=RemovedInDjango20Warning)
def test_geojson(self):
"""
Test GeoQuerySet.geojson() with Z values.
"""
self._load_city_data()
h = City3D.objects.geojson(precision=6).get(name='Houston')
# GeoJSON should be 3D
# `SELECT ST_AsGeoJSON(point, 6) FROM geo3d_city3d WHERE name='Houston';`
ref_json_regex = re.compile(r'^{"type":"Point","coordinates":\[-95.363151,29.763374,18(\.0+)?\]}$')
self.assertTrue(ref_json_regex.match(h.geojson))
@skipUnlessDBFeature("supports_3d_functions")
def test_union(self):
"""
Testing the Union aggregate of 3D models.
"""
# PostGIS query that returned the reference EWKT for this test:
# `SELECT ST_AsText(ST_Union(point)) FROM geo3d_city3d;`
self._load_city_data()
ref_ewkt = (
'SRID=4326;MULTIPOINT(-123.305196 48.462611 15,-104.609252 38.255001 1433,'
'-97.521157 34.464642 380,-96.801611 32.782057 147,-95.363151 29.763374 18,'
'-95.23506 38.971823 251,-87.650175 41.850385 181,174.783117 -41.315268 14)'
)
ref_union = GEOSGeometry(ref_ewkt)
union = City3D.objects.aggregate(Union('point'))['point__union']
self.assertTrue(union.hasz)
# Ordering of points in the resulting geometry may vary between implementations
self.assertSetEqual({p.ewkt for p in ref_union}, {p.ewkt for p in union})
@skipUnlessDBFeature("supports_3d_functions")
@ignore_warnings(category=RemovedInDjango110Warning)
def test_extent(self):
"""
Testing the Extent3D aggregate for 3D models.
"""
self._load_city_data()
# `SELECT ST_Extent3D(point) FROM geo3d_city3d;`
ref_extent3d = (-123.305196, -41.315268, 14, 174.783117, 48.462611, 1433)
extent1 = City3D.objects.aggregate(Extent3D('point'))['point__extent3d']
extent2 = City3D.objects.extent3d()
def check_extent3d(extent3d, tol=6):
for ref_val, ext_val in zip(ref_extent3d, extent3d):
self.assertAlmostEqual(ref_val, ext_val, tol)
for e3d in [extent1, extent2]:
check_extent3d(e3d)
self.assertIsNone(City3D.objects.none().extent3d())
self.assertIsNone(City3D.objects.none().aggregate(Extent3D('point'))['point__extent3d'])
@ignore_warnings(category=RemovedInDjango20Warning)
@skipUnlessDBFeature("supports_3d_functions")
def test_perimeter(self):
"""
Testing GeoQuerySet.perimeter() on 3D fields.
"""
self._load_polygon_data()
# Reference query for values below:
# `SELECT ST_Perimeter3D(poly), ST_Perimeter2D(poly) FROM geo3d_polygon3d;`
ref_perim_3d = 76859.2620451
ref_perim_2d = 76859.2577803
tol = 6
self.assertAlmostEqual(ref_perim_2d,
Polygon2D.objects.perimeter().get(name='2D BBox').perimeter.m,
tol)
self.assertAlmostEqual(ref_perim_3d,
Polygon3D.objects.perimeter().get(name='3D BBox').perimeter.m,
tol)
@ignore_warnings(category=RemovedInDjango20Warning)
@skipUnlessDBFeature("supports_3d_functions")
def test_length(self):
"""
Testing GeoQuerySet.length() on 3D fields.
"""
# ST_Length_Spheroid Z-aware, and thus does not need to use
# a separate function internally.
# `SELECT ST_Length_Spheroid(line, 'SPHEROID["GRS 1980",6378137,298.257222101]')
# FROM geo3d_interstate[2d|3d];`
self._load_interstate_data()
tol = 3
ref_length_2d = 4368.1721949481
ref_length_3d = 4368.62547052088
self.assertAlmostEqual(ref_length_2d,
Interstate2D.objects.length().get(name='I-45').length.m,
tol)
self.assertAlmostEqual(ref_length_3d,
Interstate3D.objects.length().get(name='I-45').length.m,
tol)
# Making sure `ST_Length3D` is used on for a projected
# and 3D model rather than `ST_Length`.
# `SELECT ST_Length(line) FROM geo3d_interstateproj2d;`
ref_length_2d = 4367.71564892392
# `SELECT ST_Length3D(line) FROM geo3d_interstateproj3d;`
ref_length_3d = 4368.16897234101
self.assertAlmostEqual(ref_length_2d,
InterstateProj2D.objects.length().get(name='I-45').length.m,
tol)
self.assertAlmostEqual(ref_length_3d,
InterstateProj3D.objects.length().get(name='I-45').length.m,
tol)
@ignore_warnings(category=RemovedInDjango20Warning)
@skipUnlessDBFeature("supports_3d_functions")
def test_scale(self):
"""
Testing GeoQuerySet.scale() on Z values.
"""
self._load_city_data()
# Mapping of City name to reference Z values.
zscales = (-3, 4, 23)
for zscale in zscales:
for city in City3D.objects.scale(1.0, 1.0, zscale):
self.assertEqual(city_dict[city.name][2] * zscale, city.scale.z)
@ignore_warnings(category=RemovedInDjango20Warning)
@skipUnlessDBFeature("supports_3d_functions")
def test_translate(self):
"""
Testing GeoQuerySet.translate() on Z values.
"""
self._load_city_data()
ztranslations = (5.23, 23, -17)
for ztrans in ztranslations:
for city in City3D.objects.translate(0, 0, ztrans):
self.assertEqual(city_dict[city.name][2] + ztrans, city.translate.z)
@skipUnless(HAS_GDAL, "GDAL is required for Geo3DTest.")
@skipUnlessDBFeature("gis_enabled", "supports_3d_functions")
class Geo3DFunctionsTests(Geo3DLoadingHelper, TestCase):
def test_kml(self):
"""
Test KML() function with Z values.
"""
self._load_city_data()
h = City3D.objects.annotate(kml=AsKML('point', precision=6)).get(name='Houston')
# KML should be 3D.
# `SELECT ST_AsKML(point, 6) FROM geo3d_city3d WHERE name = 'Houston';`
ref_kml_regex = re.compile(r'^<Point><coordinates>-95.363\d+,29.763\d+,18</coordinates></Point>$')
self.assertTrue(ref_kml_regex.match(h.kml))
def test_geojson(self):
"""
Test GeoJSON() function with Z values.
"""
self._load_city_data()
h = City3D.objects.annotate(geojson=AsGeoJSON('point', precision=6)).get(name='Houston')
# GeoJSON should be 3D
# `SELECT ST_AsGeoJSON(point, 6) FROM geo3d_city3d WHERE name='Houston';`
ref_json_regex = re.compile(r'^{"type":"Point","coordinates":\[-95.363151,29.763374,18(\.0+)?\]}$')
self.assertTrue(ref_json_regex.match(h.geojson))
def test_perimeter(self):
"""
Testing Perimeter() function on 3D fields.
"""
self._load_polygon_data()
# Reference query for values below:
# `SELECT ST_Perimeter3D(poly), ST_Perimeter2D(poly) FROM geo3d_polygon3d;`
ref_perim_3d = 76859.2620451
ref_perim_2d = 76859.2577803
tol = 6
poly2d = Polygon2D.objects.annotate(perimeter=Perimeter('poly')).get(name='2D BBox')
self.assertAlmostEqual(ref_perim_2d, poly2d.perimeter.m, tol)
poly3d = Polygon3D.objects.annotate(perimeter=Perimeter('poly')).get(name='3D BBox')
self.assertAlmostEqual(ref_perim_3d, poly3d.perimeter.m, tol)
def test_length(self):
"""
Testing Length() function on 3D fields.
"""
# ST_Length_Spheroid Z-aware, and thus does not need to use
# a separate function internally.
# `SELECT ST_Length_Spheroid(line, 'SPHEROID["GRS 1980",6378137,298.257222101]')
# FROM geo3d_interstate[2d|3d];`
self._load_interstate_data()
tol = 3
ref_length_2d = 4368.1721949481
ref_length_3d = 4368.62547052088
inter2d = Interstate2D.objects.annotate(length=Length('line')).get(name='I-45')
self.assertAlmostEqual(ref_length_2d, inter2d.length.m, tol)
inter3d = Interstate3D.objects.annotate(length=Length('line')).get(name='I-45')
self.assertAlmostEqual(ref_length_3d, inter3d.length.m, tol)
# Making sure `ST_Length3D` is used on for a projected
# and 3D model rather than `ST_Length`.
# `SELECT ST_Length(line) FROM geo3d_interstateproj2d;`
ref_length_2d = 4367.71564892392
# `SELECT ST_Length3D(line) FROM geo3d_interstateproj3d;`
ref_length_3d = 4368.16897234101
inter2d = InterstateProj2D.objects.annotate(length=Length('line')).get(name='I-45')
self.assertAlmostEqual(ref_length_2d, inter2d.length.m, tol)
inter3d = InterstateProj3D.objects.annotate(length=Length('line')).get(name='I-45')
self.assertAlmostEqual(ref_length_3d, inter3d.length.m, tol)
def test_scale(self):
"""
Testing Scale() function on Z values.
"""
self._load_city_data()
# Mapping of City name to reference Z values.
zscales = (-3, 4, 23)
for zscale in zscales:
for city in City3D.objects.annotate(scale=Scale('point', 1.0, 1.0, zscale)):
self.assertEqual(city_dict[city.name][2] * zscale, city.scale.z)
def test_translate(self):
"""
Testing Translate() function on Z values.
"""
self._load_city_data()
ztranslations = (5.23, 23, -17)
for ztrans in ztranslations:
for city in City3D.objects.annotate(translate=Translate('point', 0, 0, ztrans)):
self.assertEqual(city_dict[city.name][2] + ztrans, city.translate.z)
| bsd-3-clause |
da1z/intellij-community | python/helpers/pydev/pydevd_attach_to_process/winappdbg/disasm.py | 102 | 24409 | #!~/.wine/drive_c/Python25/python.exe
# -*- coding: utf-8 -*-
# Copyright (c) 2009-2014, Mario Vilas
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice,this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Binary code disassembly.
@group Disassembler loader:
Disassembler, Engine
@group Disassembler engines:
BeaEngine, CapstoneEngine, DistormEngine,
LibdisassembleEngine, PyDasmEngine
"""
from __future__ import with_statement
__revision__ = "$Id$"
__all__ = [
'Disassembler',
'Engine',
'BeaEngine',
'CapstoneEngine',
'DistormEngine',
'LibdisassembleEngine',
'PyDasmEngine',
]
from winappdbg.textio import HexDump
from winappdbg import win32
import ctypes
import warnings
# lazy imports
BeaEnginePython = None
distorm3 = None
pydasm = None
libdisassemble = None
capstone = None
#==============================================================================
class Engine (object):
"""
Base class for disassembly engine adaptors.
@type name: str
@cvar name: Engine name to use with the L{Disassembler} class.
@type desc: str
@cvar desc: User friendly name of the disassembler engine.
@type url: str
@cvar url: Download URL.
@type supported: set(str)
@cvar supported: Set of supported processor architectures.
For more details see L{win32.version._get_arch}.
@type arch: str
@ivar arch: Name of the processor architecture.
"""
name = "<insert engine name here>"
desc = "<insert engine description here>"
url = "<insert download url here>"
supported = set()
def __init__(self, arch = None):
"""
@type arch: str
@param arch: Name of the processor architecture.
If not provided the current processor architecture is assumed.
For more details see L{win32.version._get_arch}.
@raise NotImplementedError: This disassembler doesn't support the
requested processor architecture.
"""
self.arch = self._validate_arch(arch)
try:
self._import_dependencies()
except ImportError:
msg = "%s is not installed or can't be found. Download it from: %s"
msg = msg % (self.name, self.url)
raise NotImplementedError(msg)
def _validate_arch(self, arch = None):
"""
@type arch: str
@param arch: Name of the processor architecture.
If not provided the current processor architecture is assumed.
For more details see L{win32.version._get_arch}.
@rtype: str
@return: Name of the processor architecture.
If not provided the current processor architecture is assumed.
For more details see L{win32.version._get_arch}.
@raise NotImplementedError: This disassembler doesn't support the
requested processor architecture.
"""
# Use the default architecture if none specified.
if not arch:
arch = win32.arch
# Validate the architecture.
if arch not in self.supported:
msg = "The %s engine cannot decode %s code."
msg = msg % (self.name, arch)
raise NotImplementedError(msg)
# Return the architecture.
return arch
def _import_dependencies(self):
"""
Loads the dependencies for this disassembler.
@raise ImportError: This disassembler cannot find or load the
necessary dependencies to make it work.
"""
raise SyntaxError("Subclasses MUST implement this method!")
def decode(self, address, code):
"""
@type address: int
@param address: Memory address where the code was read from.
@type code: str
@param code: Machine code to disassemble.
@rtype: list of tuple( long, int, str, str )
@return: List of tuples. Each tuple represents an assembly instruction
and contains:
- Memory address of instruction.
- Size of instruction in bytes.
- Disassembly line of instruction.
- Hexadecimal dump of instruction.
@raise NotImplementedError: This disassembler could not be loaded.
This may be due to missing dependencies.
"""
raise NotImplementedError()
#==============================================================================
class BeaEngine (Engine):
"""
Integration with the BeaEngine disassembler by Beatrix.
@see: U{https://sourceforge.net/projects/winappdbg/files/additional%20packages/BeaEngine/}
"""
name = "BeaEngine"
desc = "BeaEngine disassembler by Beatrix"
url = "https://sourceforge.net/projects/winappdbg/files/additional%20packages/BeaEngine/"
supported = set((
win32.ARCH_I386,
win32.ARCH_AMD64,
))
def _import_dependencies(self):
# Load the BeaEngine ctypes wrapper.
global BeaEnginePython
if BeaEnginePython is None:
import BeaEnginePython
def decode(self, address, code):
addressof = ctypes.addressof
# Instance the code buffer.
buffer = ctypes.create_string_buffer(code)
buffer_ptr = addressof(buffer)
# Instance the disassembler structure.
Instruction = BeaEnginePython.DISASM()
Instruction.VirtualAddr = address
Instruction.EIP = buffer_ptr
Instruction.SecurityBlock = buffer_ptr + len(code)
if self.arch == win32.ARCH_I386:
Instruction.Archi = 0
else:
Instruction.Archi = 0x40
Instruction.Options = ( BeaEnginePython.Tabulation +
BeaEnginePython.NasmSyntax +
BeaEnginePython.SuffixedNumeral +
BeaEnginePython.ShowSegmentRegs )
# Prepare for looping over each instruction.
result = []
Disasm = BeaEnginePython.Disasm
InstructionPtr = addressof(Instruction)
hexdump = HexDump.hexadecimal
append = result.append
OUT_OF_BLOCK = BeaEnginePython.OUT_OF_BLOCK
UNKNOWN_OPCODE = BeaEnginePython.UNKNOWN_OPCODE
# For each decoded instruction...
while True:
# Calculate the current offset into the buffer.
offset = Instruction.EIP - buffer_ptr
# If we've gone past the buffer, break the loop.
if offset >= len(code):
break
# Decode the current instruction.
InstrLength = Disasm(InstructionPtr)
# If BeaEngine detects we've gone past the buffer, break the loop.
if InstrLength == OUT_OF_BLOCK:
break
# The instruction could not be decoded.
if InstrLength == UNKNOWN_OPCODE:
# Output a single byte as a "db" instruction.
char = "%.2X" % ord(buffer[offset])
result.append((
Instruction.VirtualAddr,
1,
"db %sh" % char,
char,
))
Instruction.VirtualAddr += 1
Instruction.EIP += 1
# The instruction was decoded but reading past the buffer's end.
# This can happen when the last instruction is a prefix without an
# opcode. For example: decode(0, '\x66')
elif offset + InstrLength > len(code):
# Output each byte as a "db" instruction.
for char in buffer[ offset : offset + len(code) ]:
char = "%.2X" % ord(char)
result.append((
Instruction.VirtualAddr,
1,
"db %sh" % char,
char,
))
Instruction.VirtualAddr += 1
Instruction.EIP += 1
# The instruction was decoded correctly.
else:
# Output the decoded instruction.
append((
Instruction.VirtualAddr,
InstrLength,
Instruction.CompleteInstr.strip(),
hexdump(buffer.raw[offset:offset+InstrLength]),
))
Instruction.VirtualAddr += InstrLength
Instruction.EIP += InstrLength
# Return the list of decoded instructions.
return result
#==============================================================================
class DistormEngine (Engine):
"""
Integration with the diStorm disassembler by Gil Dabah.
@see: U{https://code.google.com/p/distorm3}
"""
name = "diStorm"
desc = "diStorm disassembler by Gil Dabah"
url = "https://code.google.com/p/distorm3"
supported = set((
win32.ARCH_I386,
win32.ARCH_AMD64,
))
def _import_dependencies(self):
# Load the distorm bindings.
global distorm3
if distorm3 is None:
try:
import distorm3
except ImportError:
import distorm as distorm3
# Load the decoder function.
self.__decode = distorm3.Decode
# Load the bits flag.
self.__flag = {
win32.ARCH_I386: distorm3.Decode32Bits,
win32.ARCH_AMD64: distorm3.Decode64Bits,
}[self.arch]
def decode(self, address, code):
return self.__decode(address, code, self.__flag)
#==============================================================================
class PyDasmEngine (Engine):
"""
Integration with PyDasm: Python bindings to libdasm.
@see: U{https://code.google.com/p/libdasm/}
"""
name = "PyDasm"
desc = "PyDasm: Python bindings to libdasm"
url = "https://code.google.com/p/libdasm/"
supported = set((
win32.ARCH_I386,
))
def _import_dependencies(self):
# Load the libdasm bindings.
global pydasm
if pydasm is None:
import pydasm
def decode(self, address, code):
# Decode each instruction in the buffer.
result = []
offset = 0
while offset < len(code):
# Try to decode the current instruction.
instruction = pydasm.get_instruction(code[offset:offset+32],
pydasm.MODE_32)
# Get the memory address of the current instruction.
current = address + offset
# Illegal opcode or opcode longer than remaining buffer.
if not instruction or instruction.length + offset > len(code):
hexdump = '%.2X' % ord(code[offset])
disasm = 'db 0x%s' % hexdump
ilen = 1
# Correctly decoded instruction.
else:
disasm = pydasm.get_instruction_string(instruction,
pydasm.FORMAT_INTEL,
current)
ilen = instruction.length
hexdump = HexDump.hexadecimal(code[offset:offset+ilen])
# Add the decoded instruction to the list.
result.append((
current,
ilen,
disasm,
hexdump,
))
# Move to the next instruction.
offset += ilen
# Return the list of decoded instructions.
return result
#==============================================================================
class LibdisassembleEngine (Engine):
"""
Integration with Immunity libdisassemble.
@see: U{http://www.immunitysec.com/resources-freesoftware.shtml}
"""
name = "Libdisassemble"
desc = "Immunity libdisassemble"
url = "http://www.immunitysec.com/resources-freesoftware.shtml"
supported = set((
win32.ARCH_I386,
))
def _import_dependencies(self):
# Load the libdisassemble module.
# Since it doesn't come with an installer or an __init__.py file
# users can only install it manually however they feel like it,
# so we'll have to do a bit of guessing to find it.
global libdisassemble
if libdisassemble is None:
try:
# If installed properly with __init__.py
import libdisassemble.disassemble as libdisassemble
except ImportError:
# If installed by just copying and pasting the files
import disassemble as libdisassemble
def decode(self, address, code):
# Decode each instruction in the buffer.
result = []
offset = 0
while offset < len(code):
# Decode the current instruction.
opcode = libdisassemble.Opcode( code[offset:offset+32] )
length = opcode.getSize()
disasm = opcode.printOpcode('INTEL')
hexdump = HexDump.hexadecimal( code[offset:offset+length] )
# Add the decoded instruction to the list.
result.append((
address + offset,
length,
disasm,
hexdump,
))
# Move to the next instruction.
offset += length
# Return the list of decoded instructions.
return result
#==============================================================================
class CapstoneEngine (Engine):
"""
Integration with the Capstone disassembler by Nguyen Anh Quynh.
@see: U{http://www.capstone-engine.org/}
"""
name = "Capstone"
desc = "Capstone disassembler by Nguyen Anh Quynh"
url = "http://www.capstone-engine.org/"
supported = set((
win32.ARCH_I386,
win32.ARCH_AMD64,
win32.ARCH_THUMB,
win32.ARCH_ARM,
win32.ARCH_ARM64,
))
def _import_dependencies(self):
# Load the Capstone bindings.
global capstone
if capstone is None:
import capstone
# Load the constants for the requested architecture.
self.__constants = {
win32.ARCH_I386:
(capstone.CS_ARCH_X86, capstone.CS_MODE_32),
win32.ARCH_AMD64:
(capstone.CS_ARCH_X86, capstone.CS_MODE_64),
win32.ARCH_THUMB:
(capstone.CS_ARCH_ARM, capstone.CS_MODE_THUMB),
win32.ARCH_ARM:
(capstone.CS_ARCH_ARM, capstone.CS_MODE_ARM),
win32.ARCH_ARM64:
(capstone.CS_ARCH_ARM64, capstone.CS_MODE_ARM),
}
# Test for the bug in early versions of Capstone.
# If found, warn the user about it.
try:
self.__bug = not isinstance(
capstone.cs_disasm_quick(
capstone.CS_ARCH_X86, capstone.CS_MODE_32, "\x90", 1)[0],
capstone.capstone.CsInsn)
except AttributeError:
self.__bug = False
if self.__bug:
warnings.warn(
"This version of the Capstone bindings is unstable,"
" please upgrade to a newer one!",
RuntimeWarning, stacklevel=4)
def decode(self, address, code):
# Get the constants for the requested architecture.
arch, mode = self.__constants[self.arch]
# Get the decoder function outside the loop.
decoder = capstone.cs_disasm_quick
# If the buggy version of the bindings are being used, we need to catch
# all exceptions broadly. If not, we only need to catch CsError.
if self.__bug:
CsError = Exception
else:
CsError = capstone.CsError
# Create the variables for the instruction length, mnemonic and
# operands. That way they won't be created within the loop,
# minimizing the chances data might be overwritten.
# This only makes sense for the buggy vesion of the bindings, normally
# memory accesses are safe).
length = mnemonic = op_str = None
# For each instruction...
result = []
offset = 0
while offset < len(code):
# Disassemble a single instruction, because disassembling multiple
# instructions may cause excessive memory usage (Capstone allocates
# approximately 1K of metadata per each decoded instruction).
instr = None
try:
instr = decoder(
arch, mode, code[offset:offset+16], address+offset, 1)[0]
except IndexError:
pass # No instructions decoded.
except CsError:
pass # Any other error.
# On success add the decoded instruction.
if instr is not None:
# Get the instruction length, mnemonic and operands.
# Copy the values quickly before someone overwrites them,
# if using the buggy version of the bindings (otherwise it's
# irrelevant in which order we access the properties).
length = instr.size
mnemonic = instr.mnemonic
op_str = instr.op_str
# Concatenate the mnemonic and the operands.
if op_str:
disasm = "%s %s" % (mnemonic, op_str)
else:
disasm = mnemonic
# Get the instruction bytes as a hexadecimal dump.
hexdump = HexDump.hexadecimal( code[offset:offset+length] )
# On error add a "define constant" instruction.
# The exact instruction depends on the architecture.
else:
# The number of bytes to skip depends on the architecture.
# On Intel processors we'll skip one byte, since we can't
# really know the instruction length. On the rest of the
# architectures we always know the instruction length.
if self.arch in (win32.ARCH_I386, win32.ARCH_AMD64):
length = 1
else:
length = 4
# Get the skipped bytes as a hexadecimal dump.
skipped = code[offset:offset+length]
hexdump = HexDump.hexadecimal(skipped)
# Build the "define constant" instruction.
# On Intel processors it's "db".
# On ARM processors it's "dcb".
if self.arch in (win32.ARCH_I386, win32.ARCH_AMD64):
mnemonic = "db "
else:
mnemonic = "dcb "
bytes = []
for b in skipped:
if b.isalpha():
bytes.append("'%s'" % b)
else:
bytes.append("0x%x" % ord(b))
op_str = ", ".join(bytes)
disasm = mnemonic + op_str
# Add the decoded instruction to the list.
result.append((
address + offset,
length,
disasm,
hexdump,
))
# Update the offset.
offset += length
# Return the list of decoded instructions.
return result
#==============================================================================
# TODO: use a lock to access __decoder
# TODO: look in sys.modules for whichever disassembler is already loaded
class Disassembler (object):
"""
Generic disassembler. Uses a set of adapters to decide which library to
load for which supported platform.
@type engines: tuple( L{Engine} )
@cvar engines: Set of supported engines. If you implement your own adapter
you can add its class here to make it available to L{Disassembler}.
Supported disassemblers are:
"""
engines = (
DistormEngine, # diStorm engine goes first for backwards compatibility
BeaEngine,
CapstoneEngine,
LibdisassembleEngine,
PyDasmEngine,
)
# Add the list of supported disassemblers to the docstring.
__doc__ += "\n"
for e in engines:
__doc__ += " - %s - %s (U{%s})\n" % (e.name, e.desc, e.url)
del e
# Cache of already loaded disassemblers.
__decoder = {}
def __new__(cls, arch = None, engine = None):
"""
Factory class. You can't really instance a L{Disassembler} object,
instead one of the adapter L{Engine} subclasses is returned.
@type arch: str
@param arch: (Optional) Name of the processor architecture.
If not provided the current processor architecture is assumed.
For more details see L{win32.version._get_arch}.
@type engine: str
@param engine: (Optional) Name of the disassembler engine.
If not provided a compatible one is loaded automatically.
See: L{Engine.name}
@raise NotImplementedError: No compatible disassembler was found that
could decode machine code for the requested architecture. This may
be due to missing dependencies.
@raise ValueError: An unknown engine name was supplied.
"""
# Use the default architecture if none specified.
if not arch:
arch = win32.arch
# Return a compatible engine if none specified.
if not engine:
found = False
for clazz in cls.engines:
try:
if arch in clazz.supported:
selected = (clazz.name, arch)
try:
decoder = cls.__decoder[selected]
except KeyError:
decoder = clazz(arch)
cls.__decoder[selected] = decoder
return decoder
except NotImplementedError:
pass
msg = "No disassembler engine available for %s code." % arch
raise NotImplementedError(msg)
# Return the specified engine.
selected = (engine, arch)
try:
decoder = cls.__decoder[selected]
except KeyError:
found = False
engineLower = engine.lower()
for clazz in cls.engines:
if clazz.name.lower() == engineLower:
found = True
break
if not found:
msg = "Unsupported disassembler engine: %s" % engine
raise ValueError(msg)
if arch not in clazz.supported:
msg = "The %s engine cannot decode %s code." % selected
raise NotImplementedError(msg)
decoder = clazz(arch)
cls.__decoder[selected] = decoder
return decoder
| apache-2.0 |
pombreda/xhtml2pdf | sx/w3c/cssSpecial.py | 1 | 12815 | # -*- coding: ISO-8859-1 -*-
#############################################
## (C)opyright by Dirk Holtwick, 2002-2007 ##
## All rights reserved ##
#############################################
__reversion__ = "$Revision: 20 $"
__author__ = "$Author: holtwick $"
__date__ = "$Date: 2007-10-09 12:58:24 +0200 (Di, 09 Okt 2007) $"
"""
Helper for complex CSS definitons like font, margin, padding and border
Optimized for use with PISA
"""
import types
import logging
log = logging.getLogger("ho.css")
def toList(value):
if type(value)!=types.ListType:
return [value]
return value
_styleTable = {
"normal": "",
"italic": "",
"oblique": "",
}
_variantTable = {
"normal": None,
"small-caps": None,
}
_weightTable = {
"light": 300,
"lighter": 300, # fake relativness for now
"normal": 400,
"bold": 700,
"bolder": 700, # fake relativness for now
"100": 100,
"200": 200,
"300": 300,
"400": 400,
"500": 500,
"600": 600,
"700": 700,
"800": 800,
"900": 900,
#wx.LIGHT: 300,
#wx.NORMAL: 400,
#wx.BOLD: 700,
}
_absSizeTable = {
"xx-small" : 3./5.,
"x-small": 3./4.,
"small": 8./9.,
"medium": 1./1.,
"large": 6./5.,
"x-large": 3./2.,
"xx-large": 2./1.,
"xxx-large": 3./1.,
"larger": 1.25, # XXX Not totaly CSS conform:
"smaller": 0.75, # http://www.w3.org/TR/CSS21/fonts.html#propdef-font-size
}
_borderStyleTable = {
"none": 0,
"hidden": 0,
"dotted": 1,
"dashed": 1,
"solid": 1,
"double": 1,
"groove": 1,
"ridge": 1,
"inset": 1,
"outset": 1,
}
'''
_relSizeTable = {
'pt':
# pt: absolute point size
# Note: this is 1/72th of an inch
(lambda value, pt: value),
'px':
# px: pixels, relative to the viewing device
# Note: approximate at the size of a pt
(lambda value, pt: value),
'ex':
# ex: proportional to the 'x-height' of the parent font
# Note: can't seem to dervie this value from wx.Font methods,
# so we'll approximate by calling it 1/2 a pt
(lambda value, pt: 2 * value),
'pc':
# pc: 12:1 pica:point size
# Note: this is 1/6th of an inch
(lambda value, pt: 12*value),
'in':
# in: 72 inches per point
(lambda value, pt: 72*value),
'cm':
# in: 72 inches per point, 2.54 cm per inch
(lambda value, pt,_r=72./2.54: _r*value),
'mm':
# in: 72 inches per point, 25.4 mm per inch
(lambda value, pt,_r=72./25.4: _r*value),
'%':
# %: percentage of the parent's pointSize
(lambda value, pt: 0.01 * pt * value),
'em':
# em: proportional to the 'font-size' of the parent font
(lambda value, pt: pt * value),
}
'''
def getNextPart(parts):
if parts:
part = parts.pop(0)
else:
part = None
return part
def isSize(value):
return value and ((type(value) is types.TupleType) or value=="0")
def splitBorder(parts):
width = style = color = None
copy_parts = parts[:]
part = getNextPart(parts)
# Width
if isSize(part):
width = part
part = getNextPart(parts)
# Style
if part and _borderStyleTable.has_key(part.lower()):
style = part
part = getNextPart(parts)
# Color
if part:
color = part
if len(parts)>1:
log.warn("Border not split up correctly, rest: %r", parts)
# log.debug("Border styles: %r -> %r ", copy_parts, (width, style, color))
return (width, style, color)
def parseSpecialRules(declarations, debug=0):
# print selectors, declarations
# CSS MODIFY!
dd = []
for d in declarations:
if debug:
log.debug("CSS special IN: %r", d)
name, parts, last = d
parts = toList(parts)
# FONT
if name == "font":
# [ [ <'font-style'> || <'font-variant'> || <'font-weight'> ]? <'font-size'> [ / <'line-height'> ]? <'font-family'> ] | inherit
ddlen = len(dd)
part = getNextPart(parts)
# Style
if part and _styleTable.has_key(part):
dd.append(("font-style", part, last))
part = getNextPart(parts)
# Variant
if part and _variantTable.has_key(part):
dd.append(("font-variant", part, last))
part = getNextPart(parts)
# Weight
if part and _weightTable.has_key(part):
dd.append(("font-weight", part, last))
part = getNextPart(parts)
# Size and Line Height
if isinstance(part, tuple) and len(part) == 3:
fontSize, slash, lineHeight = part
assert slash == '/'
dd.append(("font-size", fontSize, last))
dd.append(("line-height", lineHeight, last))
else:
dd.append(("font-size", part, last))
# Face/ Family
dd.append(("font-face", parts, last))
# BACKGROUND
elif name == "background":
# [<'background-color'> || <'background-image'> || <'background-repeat'> || <'background-attachment'> || <'background-position'>] | inherit
part = getNextPart(parts)
# Color
if part and (not part.startswith("url")):
dd.append(("background-color", part, last))
part = getNextPart(parts)
# Background
if part:
dd.append(("background-url", part, last))
# XXX Incomplete! Error in url()!
# MARGIN
elif name == "margin":
if len(parts)==1:
top = bottom = left = right = parts[0]
elif len(parts)==2:
top = bottom = parts[0]
left = right = parts[1]
elif len(parts)==3:
top = parts[0]
left = right = parts[1]
bottom = parts[2]
elif len(parts)==4:
top = parts[0]
right = parts[1]
bottom = parts[2]
left = parts[3]
else:
continue
dd.append(("margin-left", left, last))
dd.append(("margin-right", right, last))
dd.append(("margin-top", top, last))
dd.append(("margin-bottom", bottom, last))
# PADDING
elif name == "padding":
if len(parts)==1:
top = bottom = left = right = parts[0]
elif len(parts)==2:
top = bottom = parts[0]
left = right = parts[1]
elif len(parts)==3:
top = parts[0]
left = right = parts[1]
bottom = parts[2]
elif len(parts)==4:
top = parts[0]
right = parts[1]
bottom = parts[2]
left = parts[3]
else:
continue
dd.append(("padding-left", left, last))
dd.append(("padding-right", right, last))
dd.append(("padding-top", top, last))
dd.append(("padding-bottom", bottom, last))
# BORDER WIDTH
elif name == "border-width":
if len(parts)==1:
top = bottom = left = right = parts[0]
elif len(parts)==2:
top = bottom = parts[0]
left = right = parts[1]
elif len(parts)==3:
top = parts[0]
left = right = parts[1]
bottom = parts[2]
elif len(parts)==4:
top = parts[0]
right = parts[1]
bottom = parts[2]
left = parts[3]
else:
continue
dd.append(("border-left-width", left, last))
dd.append(("border-right-width", right, last))
dd.append(("border-top-width", top, last))
dd.append(("border-bottom-width", bottom, last))
# BORDER COLOR
elif name == "border-color":
if len(parts)==1:
top = bottom = left = right = parts[0]
elif len(parts)==2:
top = bottom = parts[0]
left = right = parts[1]
elif len(parts)==3:
top = parts[0]
left = right = parts[1]
bottom = parts[2]
elif len(parts)==4:
top = parts[0]
right = parts[1]
bottom = parts[2]
left = parts[3]
else:
continue
dd.append(("border-left-color", left, last))
dd.append(("border-right-color", right, last))
dd.append(("border-top-color", top, last))
dd.append(("border-bottom-color", bottom, last))
# BORDER STYLE
elif name == "border-style":
if len(parts)==1:
top = bottom = left = right = parts[0]
elif len(parts)==2:
top = bottom = parts[0]
left = right = parts[1]
elif len(parts)==3:
top = parts[0]
left = right = parts[1]
bottom = parts[2]
elif len(parts)==4:
top = parts[0]
right = parts[1]
bottom = parts[2]
left = parts[3]
else:
continue
dd.append(("border-left-style", left, last))
dd.append(("border-right-style", right, last))
dd.append(("border-top-style", top, last))
dd.append(("border-bottom-style", bottom, last))
# BORDER
elif name == "border":
width, style, color = splitBorder(parts)
if width is not None:
dd.append(("border-left-width", width, last))
dd.append(("border-right-width", width, last))
dd.append(("border-top-width", width, last))
dd.append(("border-bottom-width", width, last))
if style is not None:
dd.append(("border-left-style", style, last))
dd.append(("border-right-style", style, last))
dd.append(("border-top-style", style, last))
dd.append(("border-bottom-style", style, last))
if color is not None:
dd.append(("border-left-color", color, last))
dd.append(("border-right-color", color, last))
dd.append(("border-top-color", color, last))
dd.append(("border-bottom-color", color, last))
# BORDER TOP, BOTTOM, LEFT, RIGHT
elif name in ("border-top", "border-bottom", "border-left", "border-right"):
direction = name[7:]
width, style, color = splitBorder(parts)
if width is not None:
dd.append(("border-" + direction + "-width", width, last))
if style is not None:
dd.append(("border-" + direction + "-style", style, last))
if color is not None:
dd.append(("border-" + direction + "-color", color, last))
# REST
else:
dd.append(d)
if debug and dd:
log.debug("CSS special OUT:\n%s", "\n".join([repr(d) for d in dd]))
if 0: #declarations!=dd:
print "###", declarations
print "#->", dd
# CSS MODIFY! END
return dd
#import re
#_rxhttp = re.compile(r"url\([\'\"]?http\:\/\/[^\/]", re.IGNORECASE|re.DOTALL)
def cleanupCSS(src):
# src = _rxhttp.sub('url(', src)
return src | gpl-2.0 |
gcrahay/fir_irma_plugin | fir_irma/urls.py | 1 | 1221 | from django.conf.urls import include, url
from fir_irma.settings import settings
api_urlpatterns = [
url(r'^$', 'fir_irma.views.not_found', name='base'),
url(r'^scans$', 'fir_irma.views.irma_scan_new'),
url(r'^scans/(?P<scan_id>[^/]+)/files$', 'fir_irma.views.irma_scan_upload'),
url(r'^scans/(?P<scan_id>[^/]+)/launch$', 'fir_irma.views.irma_scan_launch'),
url(r'^scans/(?P<scan_id>[^/]+)(?P<tail>(?:.*)?)$', 'fir_irma.views.irma_scan_generic'),
url(r'^probes$', 'fir_irma.views.irma_probes'),
url(r'^search/files', 'fir_irma.views.irma_search'),
]
common_urlpatterns = [
url(r'^(?P<sub>selection|upload|search|maintenance|)$', 'fir_irma.views.irma_index', name='index'),
url(r'^scan/(?P<scan_id>[a-zA-Z0-9\-]+)(?:/.*)?$', 'fir_irma.views.irma_redirect_index', name='details'),
url(r'^views/(?P<name>maintenance|selection|search|details|scan|upload)\.html$', 'fir_irma.views.irma_view',
name='view'),
url(r'^js/irma.js$', 'fir_irma.views.irma_app', name='app'),
]
urlpatterns = [
url(r'^api/v1/', include(api_urlpatterns, namespace='api')),
]
if settings.IRMA_HAS_UI:
urlpatterns += [
url(r'^', include(common_urlpatterns, namespace='ui')),
]
| apache-2.0 |
chunywang/crosswalk-test-suite | cordova/cordova-feature-android-tests/feature/app_multiple_apk_false.py | 14 | 2665 | #!/usr/bin/env python
#
# Copyright (c) 2016 Intel Corporation.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of works must retain the original copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the original copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Intel Corporation nor the names of its contributors
# may be used to endorse or promote products derived from this work without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors:
# Zhu, Yongyong <yongyongx.zhu@intel.com>
import unittest
import os
import sys
import commands
import comm
class TestAppMultipleApkFalse(unittest.TestCase):
def test_multiple_apk_false(self):
comm.setUp()
app_name = "testMultipleApk"
pkg_name = " com.example." + app_name.lower()
content = "<a href='http://www.intel.com'>Intel</a>\n</body>"
key = "</body>"
replace_index_list = [key, content]
comm.create(
app_name,
pkg_name,
comm.MODE,
None,
replace_index_list,
self, None, "false")
comm.build(app_name, 0, self, True, False)
if comm.MODE == "embedded":
comm.checkFileSize(os.path.join(comm.testapp_path, "%s.apk" % app_name), 40, 50, self)
else:
comm.checkFileSize(os.path.join(comm.testapp_path, "%s.apk" % app_name), 1, 5, self)
comm.app_install(app_name, pkg_name, self)
comm.app_launch(app_name, pkg_name, self)
comm.app_stop(pkg_name, self)
comm.app_uninstall(pkg_name, self)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
linktlh/Toontown-journey | otp/ai/GarbageLeakServerEventAggregator.py | 6 | 1454 | from direct.showbase.DirectObject import DirectObject
from direct.showbase import GarbageReport
class GarbageLeakServerEventAggregator(DirectObject):
def __init__(self, cr):
self.cr = cr
self._doLaterName = None
self._sentLeakDesc2num = {}
self._curLeakDesc2num = {}
self.accept(GarbageReport.GarbageCycleCountAnnounceEvent, self._handleCycleCounts)
return
def destroy(self):
self._stopSending()
self.ignoreAll()
del self.cr
def _handleCycleCounts(self, desc2num):
self._curLeakDesc2num = desc2num
self._startSending()
def _startSending(self):
if not self._doLaterName:
self._sendLeaks()
self._doLaterName = uniqueName('%s-sendGarbageLeakInfo' % self.__class__.__name__)
self.doMethodLater(60 * 60.0, self._sendLeaks, self._doLaterName)
def _stopSending(self):
if self._doLaterName:
self.removeTask(self._doLaterName)
self._doLaterName = None
return
def _sendLeaks(self, task = None):
for desc, curNum in self._curLeakDesc2num.iteritems():
self._sentLeakDesc2num.setdefault(desc, 0)
num = curNum - self._sentLeakDesc2num[desc]
if num > 0:
base.cr.timeManager.d_setClientGarbageLeak(num, desc)
self._sentLeakDesc2num[desc] = curNum
if task:
return task.again
| apache-2.0 |
LabAdvComp/tukey_middleware | tools/utils.py | 1 | 4144 | # Copyright 2013 Open Cloud Consortium
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import sys
import os
from novaclient import utils as novaclient_utils
from tukey_middleware.modules.ids import Client
class TokenFileClient(object):
''' Handles saving the id_service_auth_token from the id client and
providing with statement functionality. '''
def __init__(self, args, path='~/.id_service_auth_token'):
self.path = path
self.args = args
self.client = None
def __enter__(self):
try:
if self.path is None:
raise IOError
with open(os.path.expanduser(self.path)) as id_file:
id_service_auth_token = id_file.read()
except IOError:
id_service_auth_token = None
self.client = client_from_args(self.args, id_service_auth_token)
return self.client
def __exit__(self, exit_type, value, traceback):
try:
if self.client.id_auth_token:
with open(os.path.expanduser(self.path),
'w+') as id_file:
id_file.write(self.client.id_auth_token)
except KeyError:
pass
def err(msg):
print >> sys.stderr, msg
def client_from_args(args, id_service_auth_token=None):
''' From args as returned by argparse.ArgumentParser.parse_args()
format arguments to tukey_middleware.moduels.ids.Client and return new
object '''
return Client(args.id_service, os_username=args.os_username,
os_tenant_name=args.os_tenant_name, os_password=args.os_password,
os_auth_url=args.os_auth_url, os_auth_token=args.os_auth_token,
interface=args.interface,
id_service_auth_token=id_service_auth_token,
swift_auth_url=args.swift_auth_url,
swift_tenant=args.swift_tenant,
swift_username=args.swift_username,
swift_password=args.swift_password)
def add_openstack_env(arg_parser):
''' Add OS_ environment variables to and argparse.ArgumentParser.'''
arg_parser.add_argument('--os-username', metavar='<auth-user-name>',
default=novaclient_utils.env('OS_USERNAME', 'NOVA_USERNAME'),
help='Defaults to env[OS_USERNAME].')
arg_parser.add_argument('--os_username', help=argparse.SUPPRESS)
arg_parser.add_argument('--os-password', metavar='<auth-password>',
default=novaclient_utils.env('OS_PASSWORD', 'NOVA_PASSWORD'),
help='Defaults to env[OS_PASSWORD].')
arg_parser.add_argument('--os_password', help=argparse.SUPPRESS)
arg_parser.add_argument('--os-tenant-name', metavar='<auth-tenant-name>',
default=novaclient_utils.env('OS_TENANT_NAME', 'NOVA_PROJECT_ID'),
help='Defaults to env[OS_TENANT_NAME].')
arg_parser.add_argument('--os_tenant_name', help=argparse.SUPPRESS)
arg_parser.add_argument('--os-auth-url', metavar='<auth-url>',
default=novaclient_utils.env('OS_AUTH_URL', 'NOVA_URL'),
help='Defaults to env[OS_AUTH_URL].')
arg_parser.add_argument('--os_auth_url', help=argparse.SUPPRESS)
arg_parser.add_argument('--os-auth-token', metavar='<auth-token>',
default=novaclient_utils.env('OS_AUTH_TOKEN', 'NOVA_AUTH_TOKEN'),
help='Defaults to env[OS_AUTH_TOKEN].')
arg_parser.add_argument('--os_auth_token', help=argparse.SUPPRESS)
arg_parser.add_argument("--swift-auth-url", type=str)
arg_parser.add_argument("--swift-username", type=str)
arg_parser.add_argument("--swift-tenant", type=str)
arg_parser.add_argument("--swift-password", type=str)
| apache-2.0 |
viniciuschiele/flask-webapi | flask_webapi/fields.py | 1 | 23621 | """
Provides a set of classes to serialize Python objects.
"""
import copy
import datetime
import decimal
import uuid
from collections import OrderedDict
from werkzeug.utils import cached_property
from .exceptions import ValidationError
from .utils import dateparse, formatting, html, missing, timezone
from .validators import LengthValidator, RangeValidator
MISSING_ERROR_MESSAGE = 'ValidationError raised by `{class_name}`, but error key `{key}` does ' \
'not exist in the `error_messages` dictionary.'
class Field(object):
default_error_messages = {
'required': 'This field is required.',
'null': 'This field may not be null.',
'validator_failed': 'Invalid value.'
}
def __init__(self, dump_only=False, load_only=False, required=None, default=missing, allow_none=None,
dump_to=None, load_from=None, error_messages=None, validators=None):
self.dump_only = dump_only
self.load_only = load_only
self.default = default
self.allow_none = allow_none
self.dump_to = dump_to
self.load_from = load_from
self.validators = validators or []
# If `required` is unset, then use `True` unless a default is provided.
if required is None:
self.required = default is missing
else:
self.required = required
if allow_none is None:
self.allow_none = default is None
else:
self.allow_none = allow_none
# Collect default error message from self and parent classes
messages = {}
for cls in reversed(self.__class__.__mro__):
messages.update(getattr(cls, 'default_error_messages', {}))
messages.update(error_messages or {})
self.error_messages = messages
self.field_name = None
self.parent = None
@cached_property
def root(self):
"""
Returns the top-level serializer for this field.
"""
root = self
while root.parent is not None:
root = root.parent
return root
def bind(self, field_name, parent):
self.field_name = field_name
self.parent = parent
if not self.dump_to:
self.dump_to = field_name
if not self.load_from:
self.load_from = field_name
def get_attribute(self, instance):
if isinstance(instance, dict):
return instance.get(self.field_name, missing)
else:
return getattr(instance, self.field_name, missing)
def get_value(self, dictionary):
value = dictionary.get(self.load_from, missing)
if html.is_html_input(dictionary):
if value == '' and self.allow_none:
return None
elif value == '' and not self.required:
return missing
return value
def get_default(self):
if self.default is missing:
return missing
if callable(self.default):
return self.default()
return self.default
def dump(self, value):
if value is missing:
return self.get_default()
if value is None:
return None
return self._dump(value)
def load(self, data):
if data is missing:
if getattr(self.root, 'partial', False):
return missing
if self.required:
self._fail('required')
return self.get_default()
if data is None:
if not self.allow_none:
self._fail('null')
return None
validated_data = self._load(data)
self._validate(validated_data)
return validated_data
def _dump(self, value):
raise NotImplementedError()
def _load(self, value):
raise NotImplementedError()
def _fail(self, key, **kwargs):
try:
message = self.error_messages[key]
message = formatting.format_error_message(message, **kwargs)
if isinstance(message, dict):
raise ValidationError(**message)
raise ValidationError(message)
except KeyError:
class_name = self.__class__.__name__
message = formatting.format_error_message(MISSING_ERROR_MESSAGE, class_name=class_name, key=key)
raise AssertionError(message)
def _validate(self, data):
errors = []
for validator in self.validators:
try:
if validator(data) is False:
self._fail('validator_failed')
except ValidationError as err:
if isinstance(err.message, dict):
raise
errors.append(err)
if errors:
raise ValidationError(errors)
class BooleanField(Field):
default_error_messages = {
'invalid': '"{value}" is not a valid boolean.'
}
TRUE_VALUES = {'t', 'T', 'true', 'True', 'TRUE', '1', 1, True}
FALSE_VALUES = {'f', 'F', 'false', 'False', 'FALSE', '0', 0, 0.0, False}
def _load(self, value):
try:
if value in self.TRUE_VALUES:
return True
if value in self.FALSE_VALUES:
return False
except TypeError:
pass
self._fail('invalid', value=value)
def _dump(self, value):
if value in self.TRUE_VALUES:
return True
if value in self.FALSE_VALUES:
return False
return bool(value)
class DateField(Field):
default_error_messages = {
'invalid': 'Date has wrong format.',
'datetime': 'Expected a date but got a datetime.',
}
def _load(self, value):
if isinstance(value, datetime.datetime):
self._fail('datetime')
if isinstance(value, datetime.date):
return value
try:
parsed = dateparse.parse_date(value)
if parsed is not None:
return parsed
except (ValueError, TypeError):
pass
self._fail('invalid')
def _dump(self, value):
return value.isoformat()
class DateTimeField(Field):
default_error_messages = {
'invalid': 'Datetime has wrong format.',
'date': 'Expected a datetime but got a date.',
}
default_timezone = None
def __init__(self, default_timezone=None, *args, **kwargs):
super().__init__(*args, **kwargs)
if default_timezone is not None:
self.default_timezone = default_timezone
def _load(self, value):
if isinstance(value, datetime.datetime):
return self._enforce_timezone(value)
if isinstance(value, datetime.date):
self._fail('date')
try:
parsed = dateparse.parse_datetime(value)
if parsed is not None:
return self._enforce_timezone(parsed)
except (ValueError, TypeError):
pass
self._fail('invalid')
def _dump(self, value):
value = value.isoformat()
if value.endswith('+00:00'):
value = value[:-6] + 'Z'
return value
def _enforce_timezone(self, value):
"""
When `self.default_timezone` is `None`, always return naive datetimes.
When `self.default_timezone` is not `None`, always return aware datetimes.
"""
if self.default_timezone is not None and not timezone.is_aware(value):
return timezone.make_aware(value, self.default_timezone)
return value
class DecimalField(Field):
default_error_messages = {
'invalid': 'A valid number is required.',
'max_value': 'Ensure this value is less than or equal to {max_value}.',
'min_value': 'Ensure this value is greater than or equal to {min_value}.',
'max_digits': 'Ensure that there are no more than {max_digits} digits in total.',
'max_decimal_places': 'Ensure that there are no more than {max_decimal_places} decimal places.',
'max_whole_digits': 'Ensure that there are no more than {max_whole_digits} digits before the decimal point.',
'max_string_length': 'String value too large.'
}
MAX_STRING_LENGTH = 1000 # Guard against malicious string inputs.
def __init__(self, max_digits=None, decimal_places=None, max_value=None, min_value=None, **kwargs):
super(DecimalField, self).__init__(**kwargs)
self.max_digits = max_digits
self.decimal_places = decimal_places
self.max_value = max_value
self.min_value = min_value
if self.max_digits is not None and self.decimal_places is not None:
self.max_whole_digits = self.max_digits - self.decimal_places
else:
self.max_whole_digits = None
if self.min_value is not None or self.max_value is not None:
self.validators.append(RangeValidator(min_value, max_value, self.error_messages))
def _load(self, value):
"""
Validate that the input is a decimal number and return a Decimal
instance.
:param value: The value to be decoded.
"""
value = str(value)
if len(value) > self.MAX_STRING_LENGTH:
self._fail('max_string_length')
try:
value = decimal.Decimal(value)
# Check for NaN and for infinity and negative infinity.
if value.is_nan() or value.is_infinite():
self._fail('invalid')
return self._validate_precision(value)
except decimal.DecimalException:
self._fail('invalid')
def _dump(self, value):
if not isinstance(value, decimal.Decimal):
value = decimal.Decimal(str(value))
quantized = self._quantize(value)
return '{0:f}'.format(quantized)
def _quantize(self, value):
"""
Quantize the decimal value to the configured precision.
"""
context = decimal.getcontext().copy()
context.prec = self.max_digits
return value.quantize(
decimal.Decimal('.1') ** self.decimal_places,
context=context)
def _validate_precision(self, value):
"""
Ensure that there are no more than max_digits in the number, and no
more than decimal_places digits after the decimal point.
Override this method to disable the precision validation for input
values or to enhance it in any way you need to.
"""
sign, digittuple, exponent = value.as_tuple()
if exponent >= 0:
# 1234500.0
total_digits = len(digittuple) + exponent
whole_digits = total_digits
decimal_places = 0
elif len(digittuple) > abs(exponent):
# 123.45
total_digits = len(digittuple)
whole_digits = total_digits - abs(exponent)
decimal_places = abs(exponent)
else:
# 0.001234
total_digits = abs(exponent)
whole_digits = 0
decimal_places = total_digits
if self.max_digits is not None and total_digits > self.max_digits:
self._fail('max_digits', max_digits=self.max_digits)
if self.decimal_places is not None and decimal_places > self.decimal_places:
self._fail('max_decimal_places', max_decimal_places=self.decimal_places)
if self.max_whole_digits is not None and whole_digits > self.max_whole_digits:
self._fail('max_whole_digits', max_whole_digits=self.max_whole_digits)
return value
class DelimitedListField(Field):
"""
A delimited list composed with another `Field` class that loads from a delimited string.
"""
default_error_messages = {
'invalid': 'A valid string is required.',
'empty': 'This list may not be empty.'
}
delimiter = ','
def __init__(self, child, allow_empty=True, delimiter=None, **kwargs):
"""
Initializes a new instance of `DelimitedList`.
:param Field child: A field instance.
:param str delimiter: Delimiter between values.
"""
super().__init__(**kwargs)
self.child = child
self.allow_empty = allow_empty
self.delimiter = delimiter or self.delimiter
def _load(self, value):
if not isinstance(value, str):
self._fail('invalid')
if value == '':
values = []
else:
values = value.split(self.delimiter)
if not self.allow_empty and len(value) == 0:
self._fail('empty')
return [self.child.load(v) for v in values]
def _dump(self, value):
values = [self.child.dump(item) for item in value]
return self.delimiter.join(str(v) for v in values)
class EnumField(Field):
"""
A field that provides a set of enumerated values which an attribute must be constrained to.
"""
default_error_messages = {
'invalid': '"{value}" is not a valid choice.'
}
def __init__(self, enum_type, **kwargs):
super().__init__(**kwargs)
self.enum_type = enum_type
# we get the type of the first member
# to convert the input value to this format.
# if we don't convert it will raise an
# exception if the input value type is not the
# same as member type.
self.member_type = type(list(self.enum_type)[0].value)
def _load(self, value):
try:
if type(value) is self.enum_type:
return value
# converts the input value to make sure
# it is the same type as the member's type
member_value = self.member_type(value)
return self.enum_type(member_value)
except (ValueError, TypeError):
self._fail('invalid', value=value)
def _dump(self, value):
if type(value) is self.enum_type:
return value.value
# converts the input value to make sure
# it is the same type as the member's type
value = self.member_type(value)
return self.enum_type(value).value
class IntegerField(Field):
default_error_messages = {
'invalid': 'A valid integer is required.',
'max_value': 'Must be at most {max_value}.',
'min_value': 'Must be at least {min_value}.',
'max_string_length': 'String value too large.'
}
MAX_STRING_LENGTH = 1000 # Guard against malicious string inputs.
def __init__(self, min_value=None, max_value=None, **kwargs):
super().__init__(**kwargs)
self.min_value = min_value
self.max_value = max_value
if self.min_value is not None or self.max_value is not None:
self.validators.append(RangeValidator(min_value, max_value, self.error_messages))
def _load(self, value):
if isinstance(value, str) and len(value) > self.MAX_STRING_LENGTH:
self._fail('max_string_length')
try:
return int(value)
except (ValueError, TypeError):
self._fail('invalid')
def _dump(self, value):
return int(value)
class FloatField(Field):
default_error_messages = {
'invalid': 'A valid number is required.',
'max_value': 'Ensure this value is less than or equal to {max_value}.',
'min_value': 'Ensure this value is greater than or equal to {min_value}.',
'max_string_length': 'String value too large.'
}
MAX_STRING_LENGTH = 1000 # Guard against malicious string inputs.
def __init__(self, min_value=None, max_value=None, **kwargs):
super().__init__(**kwargs)
self.min_value = min_value
self.max_value = max_value
if self.min_value is not None or self.max_value is not None:
self.validators.append(RangeValidator(min_value, max_value, self.error_messages))
def _load(self, value):
if isinstance(value, str) and len(value) > self.MAX_STRING_LENGTH:
self._fail('max_string_length')
try:
return float(value)
except (TypeError, ValueError):
self._fail('invalid')
def _dump(self, value):
return float(value)
class ListField(Field):
default_error_messages = {
'invalid': 'Not a valid list.',
'empty': 'This list may not be empty.'
}
def __init__(self, child, allow_empty=True, *args, **kwargs):
super().__init__(*args, **kwargs)
self.child = child
self.allow_empty = allow_empty
def get_value(self, dictionary):
value = dictionary.get(self.load_from, missing)
if value is missing:
return value
if html.is_html_input(dictionary):
value = dictionary.getlist(self.load_from)
return value
def _load(self, value):
"""
List of dicts of native values <- List of dicts of primitive datatypes.
"""
if not isinstance(value, list):
self._fail('invalid')
if not self.allow_empty and len(value) == 0:
self._fail('empty')
result = []
errors = {}
for idx, item in enumerate(value):
try:
result.append(self.child.load(item))
except ValidationError as e:
errors[idx] = e
if errors:
raise ValidationError(errors)
return result
def _dump(self, value):
"""
List of object instances -> List of dicts of primitive datatypes.
"""
return [self.child.dump(item) for item in value]
class StringField(Field):
default_error_messages = {
'blank': 'This field may not be blank.',
'max_length': 'Longer than maximum length {max_length}.',
'min_length': 'Shorter than minimum length {min_length}.'
}
def __init__(self, allow_blank=False, trim_whitespace=True, min_length=None, max_length=None, **kwargs):
super().__init__(**kwargs)
self.allow_blank = allow_blank
self.trim_whitespace = trim_whitespace
self.min_length = min_length
self.max_length = max_length
if self.min_length is not None or self.max_length is not None:
self.validators.append(
LengthValidator(self.min_length, self.max_length, error_messages=self.error_messages))
def get_value(self, dictionary):
value = dictionary.get(self.load_from, missing)
if html.is_html_input(dictionary):
if value == '' and self.allow_none:
return '' if self.allow_blank else None
elif value == '' and not self.required:
return '' if self.allow_blank else missing
return value
def _load(self, value):
value = str(value)
if self.trim_whitespace:
value = value.strip()
if value == '' and not self.allow_blank:
if self.allow_none:
return None
self._fail('blank')
return value
def _dump(self, value):
return str(value)
class UUIDField(Field):
default_error_messages = {
'invalid': '"{value}" is not a valid UUID.',
}
def _load(self, value):
if isinstance(value, uuid.UUID):
return value
try:
return uuid.UUID(hex=value)
except (AttributeError, ValueError):
self._fail('invalid', value=value)
def _dump(self, value):
return str(value)
class SchemaMeta(type):
def __new__(mcs, name, bases, attrs):
attrs['_declared_fields'] = mcs._get_declared_fields(bases, attrs)
return super(SchemaMeta, mcs).__new__(mcs, name, bases, attrs)
@classmethod
def _get_declared_fields(mcs, bases, attrs):
fields = []
for attr_name, attr_value in list(attrs.items()):
if isinstance(attr_value, Field):
fields.append((attr_name, attrs.pop(attr_name)))
for base in reversed(bases):
if hasattr(base, '_declared_fields'):
fields = list(base._declared_fields.items()) + fields
return OrderedDict(fields)
class Schema(Field, metaclass=SchemaMeta):
default_error_messages = {
'invalid': 'Invalid data. Expected a dictionary, but got {datatype}.'
}
def __init__(self, only=None, partial=False, *args, **kwargs):
super().__init__(*args, **kwargs)
only = only or ()
if not isinstance(only, (list, tuple)):
raise AssertionError('`only` has to be a list or tuple')
self.only = only or ()
self.partial = partial
self.fields = {}
# used to cache the load only
# and dump only fields.
self._load_fields = []
self._dump_fields = []
self.refresh()
def loads(self, data):
instance = [self.load(value) for value in data]
return self.post_loads(instance, data)
def dumps(self, instances):
data = [self.dump(instance) for instance in instances]
data = self.post_dumps(data, instances)
return data
def post_load(self, data, original_data):
return data
def post_loads(self, data, original_data):
return data
def post_dump(self, data, original_data):
return data
def post_dumps(self, data, original_data):
return data
def post_validate(self, data):
pass
def refresh(self):
self.fields = copy.deepcopy(self._declared_fields)
if self.only:
field_names = set(self.only)
else:
field_names = set(self.fields)
self._load_fields = []
self._dump_fields = []
for field_name, field in self.fields.items():
field.bind(field_name, self)
if field.field_name in field_names:
if field.load_only:
self._load_fields.append(field)
elif field.dump_only:
self._dump_fields.append(field)
else:
self._load_fields.append(field)
self._dump_fields.append(field)
def _load(self, data):
if not isinstance(data, dict):
self._fail('invalid', datatype=type(data).__name__)
result = dict()
errors = dict()
for field in self._load_fields:
try:
value = field.get_value(data)
value = field.load(value)
if value is not missing:
result[field.field_name] = value
except ValidationError as err:
errors[field.field_name] = err
if errors:
raise ValidationError(errors)
return self.post_load(result, data)
def _dump(self, instance):
result = dict()
for field in self._dump_fields:
value = field.get_attribute(instance)
value = field.dump(value)
if value is not missing:
result[field.dump_to] = value
return self.post_dump(result, instance)
def _validate(self, data):
errors = []
try:
super()._validate(data)
except ValidationError as err:
errors.append(err)
try:
self.post_validate(data)
except ValidationError as err:
errors.append(err)
d = {}
for error in errors:
if isinstance(error.message, dict):
d.update(error.message)
else:
d['_serializer'] = error
if d:
raise ValidationError(d)
| mit |
damahou/sagewui | sagewui_kernels/sage/interfaces.py | 1 | 19866 | # -*- coding: utf-8 -*
"""
sagewui_kernels.sage interfaces
AUTHORS:
- William Stein
- J Miguel Farto
"""
#############################################################################
#
# Copyright (C) 2009 William Stein <wstein@gmail.com>
# Copyright (C) 2015 J Miguel Farto <jmfarto@gmail.com>
# Distributed under the terms of the GNU General Public License (GPL)
# The full text of the GPL is available at:
# http://www.gnu.org/licenses/
#
#############################################################################
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from builtins import chr
from builtins import object
from builtins import str
import os
import re
import shutil
import stat
import tempfile
from time import time as walltime
from base64 import b64encode
import pexpect
class SageServerABC(object):
"""
A controlled Python process that executes code. This is a
reference implementation.
"""
def __init__(self, **kwds):
"""
Initialize this worksheet process.
"""
raise NotImplementedError
def __repr__(self):
"""
Return string representation of this worksheet process.
"""
return "Worksheet process"
def __getstate__(self):
"""
Used for pickling. We return an empty state otherwise
this could not be pickled.
"""
return {}
def interrupt(self):
"""
Send an interrupt signal to the currently running computation
in the controlled process. This may or may not succeed. Call
``self.is_computing()`` to find out if it did.
"""
raise NotImplementedError
def quit(self):
"""
Quit this worksheet process.
"""
raise NotImplementedError
def start(self):
"""
Start this worksheet process running.
"""
raise NotImplementedError
def update(self):
"""
Update this worksheet process
"""
# default implementation is to do nothing.
def is_computing(self):
"""
Return True if a computation is currently running in this worksheet
subprocess.
OUTPUT:
- ``bool``
"""
raise NotImplementedError
def is_started(self):
"""
Return true if this worksheet subprocess has already been started.
OUTPUT:
- ``bool``
"""
raise NotImplementedError
def execute(self, string, data=None):
"""
Start executing the given string in this subprocess.
INPUT:
- ``string`` -- a string containing code to be executed.
- ``data`` -- a string or None; if given, must specify an
absolute path on the server host filesystem. This may
be ignored by some worksheet process implementations.
"""
raise NotImplementedError
def output_status(self):
"""
Return OutputStatus object, which includes output from the
subprocess from the last executed command up until now,
information about files that were created, and whether
computing is now done.
OUTPUT:
- ``OutputStatus`` object.
"""
raise NotImplementedError
class SageServerExpect(SageServerABC):
"""
A controlled Python process that executes code using expect.
INPUT:
- ``process_limits`` -- None or a ProcessLimits objects as defined by
the ``sagenb.interfaces.ProcessLimits`` object.
"""
modes = ['raw', 'python', 'sage']
def __init__(self,
process_limits=None,
timeout=0.05,
python='sage --python',
init_code=None,
sage_code=None):
"""
Initialize this worksheet process.
"""
self._output_status = OutputStatus('', [], True)
self._expect = None
self._is_started = False
self._is_computing = True
self._timeout = timeout
self._prompt = "__SAGE__"
self._all_tempdirs = []
self._process_limits = process_limits
self._max_walltime = None
self._start_walltime = None
self._data_dir = None
self._python = python
self._so_far = ''
self._start_label = None
self._tempdir = ''
if sage_code is None:
sage_code = os.path.join(os.path.split(__file__)[0], 'sage_code')
self._init_script = os.path.join(sage_code, 'init.py')
limit_code = '\n'.join((
'import resource',
'def process_limit(lim, rlimit, alt_rlimit=None):',
' if lim is not None:',
' rlimit = getattr(resource, rlimit, alt_rlimit)',
' if rlimit is not None:',
' hard_lim = resource.getrlimit(rlimit)[1]',
' if hard_lim == resource.RLIM_INFINITY or '
'lim <= hard_lim:',
' resource.setrlimit(rlimit, (lim, hard_lim))',
'',
'',
))
if process_limits is not None:
lim_tpt = '{}process_limit({}, "RLIMIT_{}", alt_rlimit={}'\
')\n'
limit_code = lim_tpt.format(limit_code, process_limits.max_vmem,
'VMEM', 'resource.RLIMIT_AS')
limit_code = lim_tpt.format(limit_code, process_limits.max_cputime,
'CPU', None)
limit_code = lim_tpt.format(limit_code,
process_limits.max_processes,
'NPROC', None)
init_code = '{}{}\n\n_support_.sys.ps1 = "{}"'.format(
limit_code, init_code, self._prompt)
if process_limits and process_limits.max_walltime:
self._max_walltime = process_limits.max_walltime
self.execute(init_code, mode='raw')
self.execute('print("INIT OK")', mode='python')
def command(self):
return '{} -i {}'.format(self._python, self._init_script)
def __del__(self):
try:
self._cleanup_tempfiles()
except:
pass
try:
self._cleanup_data_dir()
except:
pass
def _cleanup_data_dir(self):
if self._data_dir is not None:
os.chmod(self._data_dir, stat.S_IRWXU)
def _cleanup_tempfiles(self):
for X in self._all_tempdirs:
try:
shutil.rmtree(X, ignore_errors=True)
except:
pass
def __repr__(self):
"""
Return string representation of this worksheet process.
"""
return "Pexpect implementation of worksheet process"
def interrupt(self):
"""
Send an interrupt signal to the currently running computation
in the controlled process. This may or may not succeed. Call
``self.is_computing()`` to find out if it did.
"""
if self._expect is None:
return
try:
self._expect.sendline(chr(3))
except:
pass
def quit(self):
"""
Quit this worksheet process.
"""
if self._expect is None:
return
try:
self._expect.sendline(chr(3)) # send ctrl-c
self._expect.sendline('quit_sage()')
except:
pass
try:
os.killpg(self._expect.pid, 9)
os.kill(self._expect.pid, 9)
except OSError:
pass
self._expect = None
self._is_started = False
self._is_computing = False
self._start_walltime = None
self._cleanup_tempfiles()
self._cleanup_data_dir()
def start(self):
"""
Start this worksheet process running.
"""
self._expect = pexpect.spawn(self.command())
self._expect.setecho(False)
self._is_started = True
self._is_computing = False
self._number = 0
self._read()
self._start_walltime = walltime()
def update(self):
"""
This should be called periodically by the server processes.
It does things like checking for timeouts, etc.
"""
self._check_for_walltimeout()
def _check_for_walltimeout(self):
"""
Check if the walltimeout has been reached, and if so, kill
this worksheet process.
"""
if (self._is_started and
self._max_walltime and self._start_walltime and
walltime() - self._start_walltime > self._max_walltime):
self.quit()
def is_computing(self):
"""
Return True if a computation is currently running in this worksheet
subprocess.
OUTPUT:
- ``bool``
"""
return self._is_computing
def is_started(self):
"""
Return true if this worksheet subprocess has already been started.
OUTPUT:
- ``bool``
"""
return self._is_started
def get_tmpdir(self):
"""
Return two strings (local, remote), where local is the name
of a pre-created temporary directory, and remote is the name
of the same directory but on the machine on which the actual
worksheet process is running.
OUTPUT:
- local directory
- remote directory
"""
# In this implementation the remote process is just running
# as the same user on the local machine.
s = tempfile.mkdtemp()
return (s, s)
def execute(self, code, data=None, mode='sage', print_time=False):
"""
Start executing the given code in this subprocess.
INPUT:
- ``code`` -- a code containing code to be executed.
- ``data`` -- a code or None; if given, must specify an
absolute path on the server host filesystem. This may
be ignored by some worksheet process implementations.
"""
if self._expect is None:
self.start()
if self._expect is None:
raise RuntimeError(
"unable to start subprocess using command '%s'" % self.command(
))
if mode != 'raw':
self._number += 1
self._start_label = 'START{}'.format(self._number)
local, remote = self.get_tmpdir()
code = '_support_.os.chdir("{}")\n{}'.format(remote, code)
if data is not None:
# make a symbolic link from the data directory into local tmp
# directory
self._data = os.path.split(data)[1]
self._data_dir = data
os.chmod(data, stat.S_IRWXO | stat.S_IRWXU | stat.S_IRWXG)
os.symlink(data, os.path.join(local, self._data))
else:
self._data = ''
self._tempdir = local
self._so_far = ''
self._is_computing = True
self._all_tempdirs.append(self._tempdir)
try:
self._expect.sendline(
'_support_.execute_code('
'"{}", globals(), mode="{}", start_label="{}", '
'print_time={})'.format(
b64encode(code.encode('utf-8')).decode('utf-8'),
mode, self._start_label, print_time))
except OSError as msg:
self._is_computing = False
self._so_far = str(msg)
def _read(self):
try:
self._expect.expect(pexpect.EOF, self._timeout)
# got EOF subprocess must have crashed; cleanup
print("got EOF subprocess must have crashed...")
print(self._expect.before)
self.quit()
except:
pass
def output_status(self):
"""
Return OutputStatus object, which includes output from the
subprocess from the last executed command up until now,
information about files that were created, and whether
computing is now done.
OUTPUT:
- ``OutputStatus`` object.
"""
self._read()
if self._expect is None:
self._is_computing = False
else:
self._so_far = self._expect.before.decode('utf-8')
v = re.findall('{}.*{}'.format(self._start_label, self._prompt),
self._so_far, re.DOTALL)
if len(v) > 0:
self._is_computing = False
s = v[0][len(self._start_label):-len(self._prompt)]
else:
v = re.findall('{}.*'.format(self._start_label),
self._so_far, re.DOTALL)
if len(v) > 0:
s = v[0][len(self._start_label):]
else:
s = ''
if s.endswith(self._prompt):
s = s[:-len(self._prompt)]
files = []
if os.path.exists(self._tempdir):
files = [os.path.join(self._tempdir, x) for x in os.listdir(
self._tempdir) if x != self._data]
return OutputStatus(s, files, not self._is_computing)
class SageServerExpectRemote(SageServerExpect):
"""
This worksheet process class implements computation of worksheet
code as another user possibly on another machine, with the
following requirements:
1. ssh keys are setup for passwordless login from the server to the
remote user account, and
2. there is a shared filesystem that both users can write to,
which need not be mounted in the same location.
VULNERABILITIES: It is possible for a malicious user to see code
input by other notebook users whose processes are currently
running. However, the moment any calculation finishes, the file
results are moved back to the the notebook server in a protected
placed, and everything but the input file is deleted, so the
damage that can be done is limited. In particular, users can't
simply browse much from other users.
INPUT:
- ``user_at_host`` -- a string of the form 'username@host'
such that 'ssh user@host' does not require a password, e.g.,
setup by typing ``ssh-keygen`` as the notebook server and
worksheet users, then putting ~/.ssh/id_rsa.pub as the file
.ssh/authorized_keys. You must make the permissions of
files and directories right.
- ``local_directory`` -- (default: None) name of a directory on
the local computer that the notebook server can write to,
which the remote computer also has read/write access to. If
set to ``None``, then first try the environment variable
:envvar:`SAGENB_TMPDIR` if it exists, then :envvar:`TMPDIR`.
Otherwise, fall back to ``/tmp``.
- ``remote_directory`` -- (default: None) if the local_directory is
mounted on the remote machine as a different directory name,
this string is that directory name.
- ``process_limits`` -- None or a ProcessLimits objects as defined by
the ``sagenb.interfaces.ProcessLimits`` object.
"""
def __init__(self,
user_at_host,
local_directory=None,
remote_directory=None,
**kwargs):
self._user_at_host = user_at_host
if local_directory is None:
local_directory = os.environ.get("SAGENB_TMPDIR")
if local_directory is None:
local_directory = os.environ.get("TMPDIR")
if local_directory is None:
local_directory = "/tmp"
self._local_directory = local_directory
if remote_directory is None:
remote_directory = local_directory
self._remote_directory = remote_directory
SageServerExpect.__init__(self, **kwargs)
def command(self):
return 'ssh -t {} "{}"'.format(
self._user_at_host, SageServerExpect.command(self))
def get_tmpdir(self):
"""
Return two strings (local, remote), where local is the name
of a pre-created temporary directory, and remote is the name
of the same directory but on the machine on which the actual
worksheet process is running.
"""
# In this implementation the remote process is just running
# as the same user on the local machine.
local = tempfile.mkdtemp(dir=self._local_directory)
remote = os.path.join(self._remote_directory, local[
len(self._local_directory):].lstrip(os.path.sep))
# Make it so local is world read/writable -- so that the remote
# worksheet process can write to it.
os.chmod(local, stat.S_IRWXO | stat.S_IRWXU | stat.S_IRWXG)
return (local, remote)
class OutputStatus(object):
"""
Object that records current status of output from executing some
code in a worksheet process. An OutputStatus object has three
attributes:
- ``output`` - a string, the output so far
- ``filenames`` -- list of names of files created by this execution
- ``done`` -- bool; whether or not the computation is now done
"""
def __init__(self, output, filenames, done, tempdir=None):
"""
INPUT:
- ``output`` -- a string
- ``filenames`` -- a list of filenames
- ``done`` -- bool, if True then computation is done, so ``output``
is complete.
- ``tempdir`` -- (default: None) a filename of a directory; after
computation is done, the caller is responsible for
deleting this directory.
"""
self.output = output
self.filenames = filenames
self.done = done
self.tempdir = tempdir
def __repr__(self):
"""
Return string representation of this output status.
"""
return (
"Output Status:\n\toutput: '%s'\n\tfilenames: %s\n\tdone: %s" % (
self.output, self.filenames, self.done))
class ProcessLimits(object):
"""
INPUT:
- ``max_vmem`` -- maximum virtual memory available to worksheet
process in megabytes, e.g., 500 would limit worksheet to
use 500 megabytes.
- ``max_cputime`` -- maximum cpu time in seconds available to
worksheet process. After this amount of cputime is used,
the worksheet process is killed.
- ``max_walltime`` -- maximum wall time in seconds available
to worksheet process. After this amount of time elapses, the
worksheet subprocess is killed.
- ``max_processes`` -- maximum number of processes the
worksheet process can create
"""
def __init__(self,
max_vmem=None, # maximum amount of virtual memory
# available to the shell in megabytes
max_cputime=None, # maximum cpu time in seconds
max_walltime=None, # maximum wall time in seconds
max_processes=None,
):
self.max_vmem = max_vmem
self.max_cputime = max_cputime
self.max_walltime = max_walltime
self.max_processes = max_processes
def __repr__(self):
return 'Process limit object:' + \
'\n\tmax_vmem = %s MB' % self.max_vmem + \
'\n\tmax_cputime = %s' % self.max_cputime + \
'\n\tmax_walltime = %s' % self.max_walltime + \
'\n\tmax_processes = %s' % self.max_processes
| gpl-3.0 |
uclouvain/osis | base/models/enums/link_type.py | 2 | 1444 | ##############################################################################
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2019 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
##############################################################################
from django.utils.translation import gettext_lazy as _
from base.models.utils.utils import ChoiceEnum
class LinkTypes(ChoiceEnum):
REFERENCE = _("Reference")
| agpl-3.0 |
Yellowen/Owrang | patches/june_2013/p03_buying_selling_for_price_list.py | 1 | 1032 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd.
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import webnotes
from webnotes.utils import cint
import MySQLdb
def execute():
webnotes.reload_doc("setup", "doctype", "price_list")
webnotes.reload_doc("setup", "doctype", "item_price")
try:
for price_list in webnotes.conn.sql_list("""select name from `tabPrice List`"""):
buying, selling = False, False
for b, s in webnotes.conn.sql("""select distinct buying, selling
from `tabItem Price` where price_list_name=%s""", price_list):
buying = buying or cint(b)
selling = selling or cint(s)
buying_or_selling = "Selling" if selling else "Buying"
webnotes.conn.set_value("Price List", price_list, "buying_or_selling", buying_or_selling)
except MySQLdb.OperationalError, e:
if e.args[0] == 1054:
webnotes.conn.sql("""update `tabPrice List` set buying_or_selling='Selling'
where ifnull(buying_or_selling, '')='' """)
else:
raise e | agpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.