repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
kzhong1991/Flight-AR.Drone-2 | src/3rdparty/Qt4.8.4/src/3rdparty/webkit/Source/ThirdParty/gtest/scripts/fuse_gtest_files.py | 314 | 8813 | #!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""fuse_gtest_files.py v0.2.0
Fuses Google Test source code into a .h file and a .cc file.
SYNOPSIS
fuse_gtest_files.py [GTEST_ROOT_DIR] OUTPUT_DIR
Scans GTEST_ROOT_DIR for Google Test source code, and generates
two files: OUTPUT_DIR/gtest/gtest.h and OUTPUT_DIR/gtest/gtest-all.cc.
Then you can build your tests by adding OUTPUT_DIR to the include
search path and linking with OUTPUT_DIR/gtest/gtest-all.cc. These
two files contain everything you need to use Google Test. Hence
you can "install" Google Test by copying them to wherever you want.
GTEST_ROOT_DIR can be omitted and defaults to the parent
directory of the directory holding this script.
EXAMPLES
./fuse_gtest_files.py fused_gtest
./fuse_gtest_files.py path/to/unpacked/gtest fused_gtest
This tool is experimental. In particular, it assumes that there is no
conditional inclusion of Google Test headers. Please report any
problems to googletestframework@googlegroups.com. You can read
http://code.google.com/p/googletest/wiki/GoogleTestAdvancedGuide for
more information.
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import re
import sets
import sys
# We assume that this file is in the scripts/ directory in the Google
# Test root directory.
DEFAULT_GTEST_ROOT_DIR = os.path.join(os.path.dirname(__file__), '..')
# Regex for matching '#include <gtest/...>'.
INCLUDE_GTEST_FILE_REGEX = re.compile(r'^\s*#\s*include\s*<(gtest/.+)>')
# Regex for matching '#include "src/..."'.
INCLUDE_SRC_FILE_REGEX = re.compile(r'^\s*#\s*include\s*"(src/.+)"')
# Where to find the source seed files.
GTEST_H_SEED = 'include/gtest/gtest.h'
GTEST_SPI_H_SEED = 'include/gtest/gtest-spi.h'
GTEST_ALL_CC_SEED = 'src/gtest-all.cc'
# Where to put the generated files.
GTEST_H_OUTPUT = 'gtest/gtest.h'
GTEST_ALL_CC_OUTPUT = 'gtest/gtest-all.cc'
def VerifyFileExists(directory, relative_path):
"""Verifies that the given file exists; aborts on failure.
relative_path is the file path relative to the given directory.
"""
if not os.path.isfile(os.path.join(directory, relative_path)):
print 'ERROR: Cannot find %s in directory %s.' % (relative_path,
directory)
print ('Please either specify a valid project root directory '
'or omit it on the command line.')
sys.exit(1)
def ValidateGTestRootDir(gtest_root):
"""Makes sure gtest_root points to a valid gtest root directory.
The function aborts the program on failure.
"""
VerifyFileExists(gtest_root, GTEST_H_SEED)
VerifyFileExists(gtest_root, GTEST_ALL_CC_SEED)
def VerifyOutputFile(output_dir, relative_path):
"""Verifies that the given output file path is valid.
relative_path is relative to the output_dir directory.
"""
# Makes sure the output file either doesn't exist or can be overwritten.
output_file = os.path.join(output_dir, relative_path)
if os.path.exists(output_file):
# TODO(wan@google.com): The following user-interaction doesn't
# work with automated processes. We should provide a way for the
# Makefile to force overwriting the files.
print ('%s already exists in directory %s - overwrite it? (y/N) ' %
(relative_path, output_dir))
answer = sys.stdin.readline().strip()
if answer not in ['y', 'Y']:
print 'ABORTED.'
sys.exit(1)
# Makes sure the directory holding the output file exists; creates
# it and all its ancestors if necessary.
parent_directory = os.path.dirname(output_file)
if not os.path.isdir(parent_directory):
os.makedirs(parent_directory)
def ValidateOutputDir(output_dir):
"""Makes sure output_dir points to a valid output directory.
The function aborts the program on failure.
"""
VerifyOutputFile(output_dir, GTEST_H_OUTPUT)
VerifyOutputFile(output_dir, GTEST_ALL_CC_OUTPUT)
def FuseGTestH(gtest_root, output_dir):
"""Scans folder gtest_root to generate gtest/gtest.h in output_dir."""
output_file = file(os.path.join(output_dir, GTEST_H_OUTPUT), 'w')
processed_files = sets.Set() # Holds all gtest headers we've processed.
def ProcessFile(gtest_header_path):
"""Processes the given gtest header file."""
# We don't process the same header twice.
if gtest_header_path in processed_files:
return
processed_files.add(gtest_header_path)
# Reads each line in the given gtest header.
for line in file(os.path.join(gtest_root, gtest_header_path), 'r'):
m = INCLUDE_GTEST_FILE_REGEX.match(line)
if m:
# It's '#include <gtest/...>' - let's process it recursively.
ProcessFile('include/' + m.group(1))
else:
# Otherwise we copy the line unchanged to the output file.
output_file.write(line)
ProcessFile(GTEST_H_SEED)
output_file.close()
def FuseGTestAllCcToFile(gtest_root, output_file):
"""Scans folder gtest_root to generate gtest/gtest-all.cc in output_file."""
processed_files = sets.Set()
def ProcessFile(gtest_source_file):
"""Processes the given gtest source file."""
# We don't process the same #included file twice.
if gtest_source_file in processed_files:
return
processed_files.add(gtest_source_file)
# Reads each line in the given gtest source file.
for line in file(os.path.join(gtest_root, gtest_source_file), 'r'):
m = INCLUDE_GTEST_FILE_REGEX.match(line)
if m:
if 'include/' + m.group(1) == GTEST_SPI_H_SEED:
# It's '#include <gtest/gtest-spi.h>'. This file is not
# #included by <gtest/gtest.h>, so we need to process it.
ProcessFile(GTEST_SPI_H_SEED)
else:
# It's '#include <gtest/foo.h>' where foo is not gtest-spi.
# We treat it as '#include <gtest/gtest.h>', as all other
# gtest headers are being fused into gtest.h and cannot be
# #included directly.
# There is no need to #include <gtest/gtest.h> more than once.
if not GTEST_H_SEED in processed_files:
processed_files.add(GTEST_H_SEED)
output_file.write('#include <%s>\n' % (GTEST_H_OUTPUT,))
else:
m = INCLUDE_SRC_FILE_REGEX.match(line)
if m:
# It's '#include "src/foo"' - let's process it recursively.
ProcessFile(m.group(1))
else:
output_file.write(line)
ProcessFile(GTEST_ALL_CC_SEED)
def FuseGTestAllCc(gtest_root, output_dir):
"""Scans folder gtest_root to generate gtest/gtest-all.cc in output_dir."""
output_file = file(os.path.join(output_dir, GTEST_ALL_CC_OUTPUT), 'w')
FuseGTestAllCcToFile(gtest_root, output_file)
output_file.close()
def FuseGTest(gtest_root, output_dir):
"""Fuses gtest.h and gtest-all.cc."""
ValidateGTestRootDir(gtest_root)
ValidateOutputDir(output_dir)
FuseGTestH(gtest_root, output_dir)
FuseGTestAllCc(gtest_root, output_dir)
def main():
argc = len(sys.argv)
if argc == 2:
# fuse_gtest_files.py OUTPUT_DIR
FuseGTest(DEFAULT_GTEST_ROOT_DIR, sys.argv[1])
elif argc == 3:
# fuse_gtest_files.py GTEST_ROOT_DIR OUTPUT_DIR
FuseGTest(sys.argv[1], sys.argv[2])
else:
print __doc__
sys.exit(1)
if __name__ == '__main__':
main()
| bsd-3-clause |
tanmaykm/edx-platform | lms/djangoapps/notes/views.py | 142 | 1741 | """
Views to support the edX Notes feature.
"""
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.http import Http404
from edxmako.shortcuts import render_to_response
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from courseware.courses import get_course_with_access
from courseware.tabs import EnrolledTab
from notes.models import Note
from notes.utils import notes_enabled_for_course
from xmodule.annotator_token import retrieve_token
from django.utils.translation import ugettext_noop
@login_required
def notes(request, course_id):
''' Displays the student's notes. '''
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
course = get_course_with_access(request.user, 'load', course_key)
if not notes_enabled_for_course(course):
raise Http404
notes = Note.objects.filter(course_id=course_key, user=request.user).order_by('-created', 'uri')
student = request.user
storage = course.annotation_storage_url
context = {
'course': course,
'notes': notes,
'student': student,
'storage': storage,
'token': retrieve_token(student.email, course.annotation_token_secret),
'default_tab': 'myNotes',
}
return render_to_response('notes.html', context)
class NotesTab(EnrolledTab):
"""
A tab for the course notes.
"""
type = 'notes'
title = ugettext_noop("My Notes")
view_name = "notes"
@classmethod
def is_enabled(cls, course, user=None):
if not super(NotesTab, cls).is_enabled(course, user):
return False
return settings.FEATURES.get('ENABLE_STUDENT_NOTES') and "notes" in course.advanced_modules
| agpl-3.0 |
r0k3/arctic | tests/integration/store/test_pickle_store.py | 1 | 3584 | import bson
from datetime import datetime as dt, timedelta
from mock import patch
import numpy as np
import re
from arctic.arctic import Arctic
def test_save_read_bson(library):
blob = {'foo': dt(2015, 1, 1), 'bar': ['a', 'b', ['x', 'y', 'z']]}
library.write('BLOB', blob)
saved_blob = library.read('BLOB').data
assert blob == saved_blob
def test_save_read_big_encodable(library):
blob = {'foo': 'a' * 1024 * 1024 * 20}
library.write('BLOB', blob)
saved_blob = library.read('BLOB').data
assert blob == saved_blob
def test_save_read_bson_object(library):
blob = {'foo': dt(2015, 1, 1), 'object': Arctic}
library.write('BLOB', blob)
saved_blob = library.read('BLOB').data
assert blob == saved_blob
def test_get_info_bson_object(library):
blob = {'foo': dt(2015, 1, 1), 'object': Arctic}
library.write('BLOB', blob)
assert library.get_info('BLOB')['handler'] == 'PickleStore'
def test_bson_large_object(library):
blob = {'foo': dt(2015, 1, 1), 'object': Arctic,
'large_thing': np.random.rand(int(2.1 * 1024 * 1024)).tostring()}
assert len(blob['large_thing']) > 16 * 1024 * 1024
library.write('BLOB', blob)
saved_blob = library.read('BLOB').data
assert blob == saved_blob
def test_bson_leak_objects_delete(library):
blob = {'foo': dt(2015, 1, 1), 'object': Arctic}
library.write('BLOB', blob)
assert library._collection.count() == 1
assert library._collection.versions.count() == 1
library.delete('BLOB')
assert library._collection.count() == 0
assert library._collection.versions.count() == 0
def test_bson_leak_objects_prune_previous(library):
blob = {'foo': dt(2015, 1, 1), 'object': Arctic}
yesterday = dt.utcnow() - timedelta(days=1, seconds=1)
_id = bson.ObjectId.from_datetime(yesterday)
with patch("bson.ObjectId", return_value=_id):
library.write('BLOB', blob)
assert library._collection.count() == 1
assert library._collection.versions.count() == 1
_id = bson.ObjectId.from_datetime(dt.utcnow() - timedelta(minutes=130))
with patch("bson.ObjectId", return_value=_id):
library.write('BLOB', {}, prune_previous_version=False)
assert library._collection.count() == 1
assert library._collection.versions.count() == 2
# This write should pruned the oldest version in the chunk collection
library.write('BLOB', {})
assert library._collection.count() == 0
assert library._collection.versions.count() == 2
def test_prune_previous_doesnt_kill_other_objects(library):
blob = {'foo': dt(2015, 1, 1), 'object': Arctic}
yesterday = dt.utcnow() - timedelta(days=1, seconds=1)
_id = bson.ObjectId.from_datetime(yesterday)
with patch("bson.ObjectId", return_value=_id):
library.write('BLOB', blob, prune_previous_version=False)
assert library._collection.count() == 1
assert library._collection.versions.count() == 1
_id = bson.ObjectId.from_datetime(dt.utcnow() - timedelta(hours=10))
with patch("bson.ObjectId", return_value=_id):
library.write('BLOB', blob, prune_previous_version=False)
assert library._collection.count() == 1
assert library._collection.versions.count() == 2
# This write should pruned the oldest version in the chunk collection
library.write('BLOB', {})
assert library._collection.count() == 1
assert library._collection.versions.count() == 2
library._delete_version('BLOB', 2)
assert library._collection.count() == 0
assert library._collection.versions.count() == 1
| lgpl-2.1 |
tensorflow/models | research/slim/nets/cifarnet.py | 3 | 4629 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains a variant of the CIFAR-10 model definition."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
import tf_slim as slim
# pylint: disable=g-long-lambda
trunc_normal = lambda stddev: tf.truncated_normal_initializer(
stddev=stddev)
def cifarnet(images, num_classes=10, is_training=False,
dropout_keep_prob=0.5,
prediction_fn=slim.softmax,
scope='CifarNet'):
"""Creates a variant of the CifarNet model.
Note that since the output is a set of 'logits', the values fall in the
interval of (-infinity, infinity). Consequently, to convert the outputs to a
probability distribution over the characters, one will need to convert them
using the softmax function:
logits = cifarnet.cifarnet(images, is_training=False)
probabilities = tf.nn.softmax(logits)
predictions = tf.argmax(logits, 1)
Args:
images: A batch of `Tensors` of size [batch_size, height, width, channels].
num_classes: the number of classes in the dataset. If 0 or None, the logits
layer is omitted and the input features to the logits layer are returned
instead.
is_training: specifies whether or not we're currently training the model.
This variable will determine the behaviour of the dropout layer.
dropout_keep_prob: the percentage of activation values that are retained.
prediction_fn: a function to get predictions out of logits.
scope: Optional variable_scope.
Returns:
net: a 2D Tensor with the logits (pre-softmax activations) if num_classes
is a non-zero integer, or the input to the logits layer if num_classes
is 0 or None.
end_points: a dictionary from components of the network to the corresponding
activation.
"""
end_points = {}
with tf.variable_scope(scope, 'CifarNet', [images]):
net = slim.conv2d(images, 64, [5, 5], scope='conv1')
end_points['conv1'] = net
net = slim.max_pool2d(net, [2, 2], 2, scope='pool1')
end_points['pool1'] = net
net = tf.nn.lrn(net, 4, bias=1.0, alpha=0.001/9.0, beta=0.75, name='norm1')
net = slim.conv2d(net, 64, [5, 5], scope='conv2')
end_points['conv2'] = net
net = tf.nn.lrn(net, 4, bias=1.0, alpha=0.001/9.0, beta=0.75, name='norm2')
net = slim.max_pool2d(net, [2, 2], 2, scope='pool2')
end_points['pool2'] = net
net = slim.flatten(net)
end_points['Flatten'] = net
net = slim.fully_connected(net, 384, scope='fc3')
end_points['fc3'] = net
net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
scope='dropout3')
net = slim.fully_connected(net, 192, scope='fc4')
end_points['fc4'] = net
if not num_classes:
return net, end_points
logits = slim.fully_connected(
net,
num_classes,
biases_initializer=tf.zeros_initializer(),
weights_initializer=trunc_normal(1 / 192.0),
weights_regularizer=None,
activation_fn=None,
scope='logits')
end_points['Logits'] = logits
end_points['Predictions'] = prediction_fn(logits, scope='Predictions')
return logits, end_points
cifarnet.default_image_size = 32
def cifarnet_arg_scope(weight_decay=0.004):
"""Defines the default cifarnet argument scope.
Args:
weight_decay: The weight decay to use for regularizing the model.
Returns:
An `arg_scope` to use for the inception v3 model.
"""
with slim.arg_scope(
[slim.conv2d],
weights_initializer=tf.truncated_normal_initializer(
stddev=5e-2),
activation_fn=tf.nn.relu):
with slim.arg_scope(
[slim.fully_connected],
biases_initializer=tf.constant_initializer(0.1),
weights_initializer=trunc_normal(0.04),
weights_regularizer=slim.l2_regularizer(weight_decay),
activation_fn=tf.nn.relu) as sc:
return sc
| apache-2.0 |
kthordarson/youtube-dl-ruv | youtube_dl/extractor/sapo.py | 178 | 4498 | # encoding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
parse_duration,
unified_strdate,
)
class SapoIE(InfoExtractor):
IE_DESC = 'SAPO Vídeos'
_VALID_URL = r'https?://(?:(?:v2|www)\.)?videos\.sapo\.(?:pt|cv|ao|mz|tl)/(?P<id>[\da-zA-Z]{20})'
_TESTS = [
{
'url': 'http://videos.sapo.pt/UBz95kOtiWYUMTA5Ghfi',
'md5': '79ee523f6ecb9233ac25075dee0eda83',
'note': 'SD video',
'info_dict': {
'id': 'UBz95kOtiWYUMTA5Ghfi',
'ext': 'mp4',
'title': 'Benfica - Marcas na Hitória',
'description': 'md5:c9082000a128c3fd57bf0299e1367f22',
'duration': 264,
'uploader': 'tiago_1988',
'upload_date': '20080229',
'categories': ['benfica', 'cabral', 'desporto', 'futebol', 'geovanni', 'hooijdonk', 'joao', 'karel', 'lisboa', 'miccoli'],
},
},
{
'url': 'http://videos.sapo.pt/IyusNAZ791ZdoCY5H5IF',
'md5': '90a2f283cfb49193fe06e861613a72aa',
'note': 'HD video',
'info_dict': {
'id': 'IyusNAZ791ZdoCY5H5IF',
'ext': 'mp4',
'title': 'Codebits VII - Report',
'description': 'md5:6448d6fd81ce86feac05321f354dbdc8',
'duration': 144,
'uploader': 'codebits',
'upload_date': '20140427',
'categories': ['codebits', 'codebits2014'],
},
},
{
'url': 'http://v2.videos.sapo.pt/yLqjzPtbTimsn2wWBKHz',
'md5': 'e5aa7cc0bdc6db9b33df1a48e49a15ac',
'note': 'v2 video',
'info_dict': {
'id': 'yLqjzPtbTimsn2wWBKHz',
'ext': 'mp4',
'title': 'Hipnose Condicionativa 4',
'description': 'md5:ef0481abf8fb4ae6f525088a6dadbc40',
'duration': 692,
'uploader': 'sapozen',
'upload_date': '20090609',
'categories': ['condicionativa', 'heloisa', 'hipnose', 'miranda', 'sapo', 'zen'],
},
},
]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
item = self._download_xml(
'http://rd3.videos.sapo.pt/%s/rss2' % video_id, video_id).find('./channel/item')
title = item.find('./title').text
description = item.find('./{http://videos.sapo.pt/mrss/}synopse').text
thumbnail = item.find('./{http://search.yahoo.com/mrss/}content').get('url')
duration = parse_duration(item.find('./{http://videos.sapo.pt/mrss/}time').text)
uploader = item.find('./{http://videos.sapo.pt/mrss/}author').text
upload_date = unified_strdate(item.find('./pubDate').text)
view_count = int(item.find('./{http://videos.sapo.pt/mrss/}views').text)
comment_count = int(item.find('./{http://videos.sapo.pt/mrss/}comment_count').text)
tags = item.find('./{http://videos.sapo.pt/mrss/}tags').text
categories = tags.split() if tags else []
age_limit = 18 if item.find('./{http://videos.sapo.pt/mrss/}m18').text == 'true' else 0
video_url = item.find('./{http://videos.sapo.pt/mrss/}videoFile').text
video_size = item.find('./{http://videos.sapo.pt/mrss/}videoSize').text.split('x')
formats = [{
'url': video_url,
'ext': 'mp4',
'format_id': 'sd',
'width': int(video_size[0]),
'height': int(video_size[1]),
}]
if item.find('./{http://videos.sapo.pt/mrss/}HD').text == 'true':
formats.append({
'url': re.sub(r'/mov/1$', '/mov/39', video_url),
'ext': 'mp4',
'format_id': 'hd',
'width': 1280,
'height': 720,
})
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'duration': duration,
'uploader': uploader,
'upload_date': upload_date,
'view_count': view_count,
'comment_count': comment_count,
'categories': categories,
'age_limit': age_limit,
'formats': formats,
}
| unlicense |
JacobJacob/volatility | volatility/plugins/iehistory.py | 44 | 6520 | # Volatility
# Copyright (C) 2007-2013 Volatility Foundation
# Copyright (c) 2010, 2011, 2012 Michael Ligh <michael.ligh@mnin.org>
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
## http://www.docslide.com/forensic-analysis-of-internet-explorer-activity-files/
## http://libmsiecf.googlecode.com/files/MSIE%20Cache%20File%20%28index.dat%29%20format.pdf
import volatility.obj as obj
import volatility.plugins.taskmods as taskmods
import volatility.utils as utils
import volatility.win32.tasks as tasks
class _URL_RECORD(obj.CType):
"""A class for URL and LEAK records"""
def is_valid(self):
return obj.CType.is_valid(self) and self.Length > 0 and self.Length < 32768
@property
def Length(self):
return self.m('Length') * 0x80
def has_data(self):
"""Determine if a record has data"""
## for LEAK records the DataOffset is sometimes 0xdeadbeef
return (self.DataOffset > 0 and self.DataOffset < self.Length
and not self.Url.split(":")[0]
in ["PrivacIE", "ietld", "iecompat", "Visited"])
class IEHistoryVTypes(obj.ProfileModification):
"""Apply structures for IE history parsing"""
conditions = {'os': lambda x: x == 'windows'}
def modification(self, profile):
profile.vtypes.update({
'_URL_RECORD' : [ None, {
'Signature' : [ 0, ['String', dict(length = 4)]],
'Length' : [ 0x4, ['unsigned int']],
'LastModified' : [ 0x08, ['WinTimeStamp', dict(is_utc = True)]], # secondary
'LastAccessed' : [ 0x10, ['WinTimeStamp', dict(is_utc = True)]], # primary
'UrlOffset' : [ 0x34, ['unsigned char']],
'FileOffset' : [ 0x3C, ['unsigned int']],
'DataOffset' : [ 0x44, ['unsigned int']],
'DataSize': [ 0x48, ['unsigned int']],
'Url' : [ lambda x : x.obj_offset + x.UrlOffset, ['String', dict(length = 4096)]],
'File' : [ lambda x : x.obj_offset + x.FileOffset, ['String', dict(length = 4096)]],
'Data' : [ lambda x : x.obj_offset + x.DataOffset, ['String', dict(length = 4096)]],
}],
'_REDR_RECORD' : [ None, {
'Signature' : [ 0, ['String', dict(length = 4)]],
'Length' : [ 0x4, ['unsigned int']],
'Url' : [ 0x10, ['String', dict(length = 4096)]],
}],
})
profile.object_classes.update({
'_URL_RECORD' : _URL_RECORD,
'_REDR_RECORD': _URL_RECORD,
})
class IEHistory(taskmods.DllList):
"""Reconstruct Internet Explorer cache / history"""
def __init__(self, config, *args, **kwargs):
taskmods.DllList.__init__(self, config, *args, **kwargs)
config.add_option("LEAK", short_option = 'L',
default = False, action = 'store_true',
help = 'Find LEAK records (deleted)')
config.add_option("REDR", short_option = 'R',
default = False, action = 'store_true',
help = 'Find REDR records (redirected)')
def calculate(self):
kernel_space = utils.load_as(self._config)
## Select the tags to scan for. Always find visited URLs,
## but make freed and redirected records optional.
tags = ["URL "]
if self._config.LEAK:
tags.append("LEAK")
if self._config.REDR:
tags.append("REDR")
## Define the record type based on the tag
tag_records = {
"URL " : "_URL_RECORD",
"LEAK" : "_URL_RECORD",
"REDR" : "_REDR_RECORD"}
## Enumerate processes based on the --pid and --offset
for proc in self.filter_tasks(tasks.pslist(kernel_space)):
## Acquire a process specific AS
ps_as = proc.get_process_address_space()
for hit in proc.search_process_memory(tags):
## Get a preview of the data to see what tag was detected
tag = ps_as.read(hit, 4)
## Create the appropriate object type based on the tag
record = obj.Object(tag_records[tag], offset = hit, vm = ps_as)
if record.is_valid():
yield proc, record
def render_text(self, outfd, data):
for process, record in data:
outfd.write("*" * 50 + "\n")
outfd.write("Process: {0} {1}\n".format(process.UniqueProcessId, process.ImageFileName))
outfd.write("Cache type \"{0}\" at {1:#x}\n".format(record.Signature, record.obj_offset))
outfd.write("Record length: {0:#x}\n".format(record.Length))
outfd.write("Location: {0}\n".format(record.Url))
## Extended fields are available for these records
if record.obj_name == "_URL_RECORD":
outfd.write("Last modified: {0}\n".format(record.LastModified))
outfd.write("Last accessed: {0}\n".format(record.LastAccessed))
outfd.write("File Offset: {0:#x}, Data Offset: {1:#x}, Data Length: {2:#x}\n".format(record.Length, record.FileOffset, record.DataOffset, record.DataSize))
if record.FileOffset > 0:
outfd.write("File: {0}\n".format(record.File))
if record.has_data():
outfd.write("Data: {0}\n".format(record.Data))
def render_csv(self, outfd, data):
for process, record in data:
if record.obj_name == "_URL_RECORD":
t1 = str(record.LastModified or '')
t2 = str(record.LastAccessed or '')
else:
t1 = t2 = ""
outfd.write("{0},{1},{2},{3}\n".format(record.Signature, t1.strip(), t2.strip(), record.Url))
| gpl-2.0 |
mjg/Impressive | src/osdfont.py | 2 | 11590 | ##### OSD FONT RENDERER ########################################################
typesUnicodeType = type(u'unicode')
typesStringType = type(b'bytestring')
# force a string or sequence of ordinals into a unicode string
def ForceUnicode(s, charset='iso8859-15'):
if type(s) == typesUnicodeType:
return s
if type(s) == typesStringType:
return s.decode(charset, 'ignore')
if isinstance(s, (tuple, list, range)):
try:
unichr
except NameError:
unichr = chr
return u''.join(map(unichr, s))
raise TypeError("string argument not convertible to Unicode")
# search a system font path for a font file
def SearchFont(root, name):
if not os.path.isdir(root):
return None
infix = ""
fontfile = []
while (len(infix) < 10) and not(fontfile):
fontfile = list(filter(os.path.isfile, glob.glob(root + infix + name)))
infix += "*/"
if not fontfile:
return None
else:
return fontfile[0]
# load a system font
def LoadFont(dirs, name, size):
# first try to load the font directly
try:
return ImageFont.truetype(name, size, encoding='unic')
except:
pass
# no need to search further on Windows
if os.name == 'nt':
return None
# start search for the font
for dir in dirs:
fontfile = SearchFont(dir + "/", name)
if fontfile:
try:
return ImageFont.truetype(fontfile, size, encoding='unic')
except:
pass
return None
# alignment constants
Left = 0
Right = 1
Center = 2
Down = 0
Up = 1
Auto = -1
# font renderer class
class GLFont:
def __init__(self, width, height, name, size, search_path=[], default_charset='iso8859-15', extend=1, blur=1):
self.width = width
self.height = height
self._i_extend = range(extend)
self._i_blur = range(blur)
self.feather = extend + blur + 1
self.current_x = 0
self.current_y = 0
self.max_height = 0
self.boxes = {}
self.widths = {}
self.line_height = 0
self.default_charset = default_charset
if isinstance(name, basestring):
self.font = LoadFont(search_path, name, size)
else:
for check_name in name:
self.font = LoadFont(search_path, check_name, size)
if self.font: break
if not self.font:
raise IOError("font file not found")
self.img = Image.new('LA', (width, height))
self.alpha = Image.new('L', (width, height))
self.extend = ImageFilter.MaxFilter()
self.blur = ImageFilter.Kernel((3, 3), [1,2,1,2,4,2,1,2,1])
self.tex = gl.make_texture(gl.TEXTURE_2D, filter=gl.NEAREST)
self.AddString(range(32, 128))
self.vertices = None
self.index_buffer = None
self.index_buffer_capacity = 0
def AddCharacter(self, c):
w, h = self.font.getsize(c)
try:
ox, oy = self.font.getoffset(c)
w += ox
h += oy
except AttributeError:
pass
self.line_height = max(self.line_height, h)
size = (w + 2 * self.feather, h + 2 * self.feather)
glyph = Image.new('L', size)
draw = ImageDraw.Draw(glyph)
draw.text((self.feather, self.feather), c, font=self.font, fill=255)
del draw
box = self.AllocateGlyphBox(*size)
self.img.paste(glyph, (box.orig_x, box.orig_y))
for i in self._i_extend: glyph = glyph.filter(self.extend)
for i in self._i_blur: glyph = glyph.filter(self.blur)
self.alpha.paste(glyph, (box.orig_x, box.orig_y))
self.boxes[c] = box
self.widths[c] = w
del glyph
def AddString(self, s, charset=None, fail_silently=False):
update_count = 0
try:
for c in ForceUnicode(s, self.GetCharset(charset)):
if c in self.widths:
continue
self.AddCharacter(c)
update_count += 1
except ValueError:
if fail_silently:
pass
else:
raise
if not update_count: return
self.img.putalpha(self.alpha)
gl.load_texture(gl.TEXTURE_2D, self.tex, self.img)
def AllocateGlyphBox(self, w, h):
if self.current_x + w > self.width:
self.current_x = 0
self.current_y += self.max_height
self.max_height = 0
if self.current_y + h > self.height:
raise ValueError("bitmap too small for all the glyphs")
box = self.GlyphBox()
box.orig_x = self.current_x
box.orig_y = self.current_y
box.size_x = w
box.size_y = h
box.x0 = self.current_x / float(self.width)
box.y0 = self.current_y / float(self.height)
box.x1 = (self.current_x + w) / float(self.width)
box.y1 = (self.current_y + h) / float(self.height)
box.dsx = w * PixelX
box.dsy = h * PixelY
self.current_x += w
self.max_height = max(self.max_height, h)
return box
def GetCharset(self, charset=None):
if charset: return charset
return self.default_charset
def SplitText(self, s, charset=None):
return ForceUnicode(s, self.GetCharset(charset)).split(u'\n')
def GetLineHeight(self):
return self.line_height
def GetTextWidth(self, s, charset=None):
return max([self.GetTextWidthEx(line) for line in self.SplitText(s, charset)])
def GetTextHeight(self, s, charset=None):
return len(self.SplitText(s, charset)) * self.line_height
def GetTextSize(self, s, charset=None):
lines = self.SplitText(s, charset)
return (max([self.GetTextWidthEx(line) for line in lines]), len(lines) * self.line_height)
def GetTextWidthEx(self, u):
if u: return sum([self.widths.get(c, 0) for c in u])
else: return 0
def GetTextHeightEx(self, u=[]):
return self.line_height
def AlignTextEx(self, x, u, align=Left):
if not align: return x
return x - self.GetTextWidthEx(u) // align
class FontShader(GLShader):
vs = """
attribute highp vec4 aPosAndTexCoord;
varying mediump vec2 vTexCoord;
void main() {
gl_Position = vec4(vec2(-1.0, 1.0) + aPosAndTexCoord.xy * vec2(2.0, -2.0), 0.0, 1.0);
vTexCoord = aPosAndTexCoord.zw;
}
"""
fs = """
uniform lowp sampler2D uTex;
uniform lowp vec4 uColor;
varying mediump vec2 vTexCoord;
void main() {
gl_FragColor = uColor * texture2D(uTex, vTexCoord);
}
"""
attributes = { 0: 'aPosAndTexCoord' }
uniforms = ['uColor']
def BeginDraw(self):
self.vertices = []
def EndDraw(self, color=(1.0, 1.0, 1.0), alpha=1.0, beveled=True):
if not self.vertices:
self.vertices = None
return
char_count = len(self.vertices) // 16
if char_count > 16383:
print("Internal Error: too many characters (%d) to display in one go, truncating." % char_count, file=sys.stderr)
char_count = 16383
# create an index buffer large enough for the text
if not(self.index_buffer) or (self.index_buffer_capacity < char_count):
self.index_buffer_capacity = (char_count + 63) & (~63)
data = []
for b in range(0, self.index_buffer_capacity * 4, 4):
data.extend([b+0, b+2, b+1, b+1, b+2, b+3])
if not self.index_buffer:
self.index_buffer = gl.GenBuffers()
gl.BindBuffer(gl.ELEMENT_ARRAY_BUFFER, self.index_buffer)
gl.BufferData(gl.ELEMENT_ARRAY_BUFFER, data=data, type=gl.UNSIGNED_SHORT, usage=gl.DYNAMIC_DRAW)
else:
gl.BindBuffer(gl.ELEMENT_ARRAY_BUFFER, self.index_buffer)
# set the vertex buffer
vbuf = (c_float * len(self.vertices))(*self.vertices)
gl.BindBuffer(gl.ARRAY_BUFFER, 0)
gl.set_enabled_attribs(0)
gl.VertexAttribPointer(0, 4, gl.FLOAT, False, 0, vbuf)
# draw it
shader = self.FontShader.get_instance().use()
gl.BindTexture(gl.TEXTURE_2D, self.tex)
if beveled:
gl.BlendFunc(gl.ZERO, gl.ONE_MINUS_SRC_ALPHA)
gl.Uniform4f(shader.uColor, 0.0, 0.0, 0.0, alpha)
gl.DrawElements(gl.TRIANGLES, char_count * 6, gl.UNSIGNED_SHORT, 0)
gl.BlendFunc(gl.ONE, gl.ONE)
gl.Uniform4f(shader.uColor, color[0] * alpha, color[1] * alpha, color[2] * alpha, 1.0)
gl.DrawElements(gl.TRIANGLES, char_count * 6, gl.UNSIGNED_SHORT, 0)
gl.BlendFunc(gl.SRC_ALPHA, gl.ONE_MINUS_SRC_ALPHA)
self.vertices = None
def Draw(self, origin, text, charset=None, align=Left, color=(1.0, 1.0, 1.0), alpha=1.0, beveled=True, bold=False):
own_draw = (self.vertices is None)
if own_draw:
self.BeginDraw()
lines = self.SplitText(text, charset)
x0, y = origin
x0 -= self.feather
y -= self.feather
for line in lines:
sy = y * PixelY
x = self.AlignTextEx(x0, line, align)
for c in line:
if not c in self.widths: continue
self.boxes[c].add_vertices(self.vertices, x * PixelX, sy)
x += self.widths[c]
y += self.line_height
if bold and not(beveled):
self.Draw((origin[0] + 1, origin[1]), text, charset=charset, align=align, color=color, alpha=alpha, beveled=False, bold=False)
if own_draw:
self.EndDraw(color, alpha, beveled)
class GlyphBox:
def add_vertices(self, vertex_list, sx=0.0, sy=0.0):
vertex_list.extend([
sx, sy, self.x0, self.y0,
sx + self.dsx, sy, self.x1, self.y0,
sx, sy + self.dsy, self.x0, self.y1,
sx + self.dsx, sy + self.dsy, self.x1, self.y1,
])
# high-level draw function
def DrawOSD(x, y, text, halign=Auto, valign=Auto, alpha=1.0):
if not(OSDFont) or not(text) or (alpha <= 0.004): return
if alpha > 1.0: alpha = 1.0
if halign == Auto:
if x < 0:
x += ScreenWidth
halign = Right
else:
halign = Left
if HalfScreen and (halign == Left):
x += ScreenWidth // 2
if valign == Auto:
if y < 0:
y += ScreenHeight
valign = Up
else:
valign = Down
if valign != Down:
y -= OSDFont.GetLineHeight() // valign
OSDFont.Draw((x, y), text, align=halign, alpha=alpha)
# very high-level draw function
def DrawOSDEx(position, text, alpha_factor=1.0):
xpos = position >> 1
y = (1 - 2 * (position & 1)) * OSDMargin
if xpos < 2:
x = (1 - 2 * xpos) * OSDMargin
halign = Auto
else:
x = ScreenWidth // 2
halign = Center
DrawOSD(x, y, text, halign, alpha = OSDAlpha * alpha_factor)
RequiredShaders.append(GLFont.FontShader)
| gpl-2.0 |
Barbarian1010/pychess | lib/pychess/Utils/lutils/TranspositionTable.py | 20 | 4110 | from pychess.Utils.const import hashfALPHA, hashfBETA, hashfEXACT, hashfBAD
from pychess.Utils.lutils.ldata import MATE_VALUE, MAXPLY
from ctypes import create_string_buffer, memset
from struct import Struct, pack_into, unpack_from
# Store hash entries in buckets of 4. An entry consists of:
# key 32 bits derived from the board hash
# search_id counter used to determine entry's age
# hashf bound type (one of the hashf* constants)
# depth search depth
# score search score
# move best move (or cutoff move)
entryType = Struct('=I B B H h H')
class TranspositionTable:
def __init__ (self, maxSize):
assert maxSize > 0
self.buckets = maxSize // (4 * entryType.size)
self.data = create_string_buffer(self.buckets * 4 * entryType.size)
self.search_id = 0
self.killer1 = [-1]*80
self.killer2 = [-1]*80
self.hashmove = [-1]*80
self.butterfly = [0]*(64*64)
def clear (self):
memset(self.data, 0, self.buckets * 4 * entryType.size)
self.killer1 = [-1]*80
self.killer2 = [-1]*80
self.hashmove = [-1]*80
self.butterfly = [0]*(64*64)
def newSearch (self):
self.search_id = (self.search_id + 1) & 0xff
#TODO: consider clearing butterfly table
def probe (self, board, depth, alpha, beta):
baseIndex = (board.hash % self.buckets) * 4
key = (board.hash // self.buckets) & 0xffffffff
for i in range(baseIndex, baseIndex + 4):
tkey, search_id, hashf, tdepth, score, move = entryType.unpack_from(self.data, i * entryType.size)
if tkey == key:
# Mate score bounds are guaranteed to be accurate at any depth.
if tdepth < depth and abs(score) < MATE_VALUE-MAXPLY:
return move, score, hashfBAD
if hashf == hashfEXACT:
return move, score, hashf
if hashf == hashfALPHA and score <= alpha:
return move, alpha, hashf
if hashf == hashfBETA and score >= beta:
return move, beta, hashf
def record (self, board, move, score, hashf, depth):
baseIndex = (board.hash % self.buckets) * 4
key = (board.hash // self.buckets) & 0xffffffff
# We always overwrite *something*: an empty slot, this position's last entry, or else the least relevant.
staleIndex = baseIndex
staleRelevance = 0xffff
for i in range(baseIndex, baseIndex + 4):
tkey, search_id, thashf, tdepth, tscore, tmove = entryType.unpack_from(self.data, i * entryType.size)
if tkey == 0 or tkey == key:
staleIndex = i
break
relevance = (0x8000 if search_id != self.search_id and thashf == hashfEXACT else 0) + \
(0x4000 if ((self.search_id - search_id) & 0xff) > 1 else 0) + \
tdepth
if relevance < staleRelevance:
staleIndex = i
staleRelevance = relevance
entryType.pack_into(self.data, staleIndex * entryType.size, key, self.search_id, hashf, depth, score, move)
def addKiller (self, ply, move):
if self.killer1[ply] == -1:
self.killer1[ply] = move
elif move != self.killer1[ply]:
self.killer2[ply] = move
def isKiller (self, ply, move):
if self.killer1[ply] == move:
return 10
elif self.killer2[ply] == move:
return 8
if ply >= 2:
if self.killer1[ply-2] == move:
return 6
elif self.killer2[ply-2] == move:
return 4
return 0
def setHashMove (self, ply, move):
self.hashmove[ply] = move
def isHashMove (self, ply, move):
return self.hashmove[ply] == move
def addButterfly (self, move, depth):
self.butterfly[move & 0xfff] += 1 << depth
def getButterfly (self, move):
return self.butterfly[move & 0xfff]
| gpl-3.0 |
pinterest/kingpin | kingpin/logging_utils/__init__.py | 1 | 6156 | # Copyright 2016 Pinterest, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Some utility functions we use for python logging, built on
top of Python native logging package.
It provides formatted layout in the logging file.
In KingPin, this is used by zk_update_montior and zk_download_data processes.
Usage:
from kingpin.logging_utils import initialize_logger
initialize_logger(logger_name='', logger_filename='',
log_to_stderr=False, log_dir="/var/log")
"""
import logging
import logging.handlers
import os
import sys
__INITIALIZED = {}
#: Log only `STDERR_LEVEL` and greater.
#: Integer (based on constants in :py:mod:`logging <python:logging>`) to
#: indicate minimum severity to output::
#:
#: >>> [logging.DEBUG, logging.INFO, logging.WARNING, logging.ERROR,
#: logging.FATAL]
#: [10, 20, 30, 40, 50]
#:
#: Note that we default to :py:const:`logging.INFO`.
#: Can be overridden with environment variable ``PINLOG_STDERR_LEVEL`` by
#: setting it to `DEBUG`, `INFO`, `WARNING`, `ERROR` or `FATAL`.
STDERR_LEVEL = getattr(logging, os.environ.get("PINLOG_STDERR_LEVEL", "INFO"))
#: Nothing below this loglevel is logged (in ``stderr``, a file, ``syslog``,
#: # etc). This uses the same format as :py:const:`STDERR_LEVEL`.
#: Can be overridden with environment variable ``PINLOG_MIN_LOG_LEVEL``.
MIN_LOG_LEVEL = getattr(logging, os.environ.get("PINLOG_MIN_LOG_LEVEL", "INFO"))
#: Logfiles are written into this directory. If not set log files are not
#: written to disk.
#: Can be overridden with environment variable ``PINLOG_LOG_DIR``.
LOG_DIR = os.getenv("PINLOG_LOG_DIR")
#: Log files are written to this filename.
#: Can be overridden with environment variable ``PINLOG_LOG_FILE``.
LOG_FILE = os.getenv("PINLOG_LOG_FILE", "/var/log")
def __generate_filename():
"""Generate a log filename based on pid and program name.
Conventional log filename has the following format:
``/<log dir>/<program name>. <pid>.log``
(e.g. "/tmp/hello_world.1234.log").
Returns:
A string for the log filename.
"""
program_name = sys.argv[0].split("/")[-1]
if not program_name:
program_name = "UNKNOWN-PROGRAM-NAME"
pid = os.getpid()
FORMAT = "{program_name}.{pid}.log"
# Introducing a dictionary in the string interpolation below as PEP8 cannot
# see variables are used if we picked them up from locals() call.
return FORMAT.format(program_name=program_name, pid=pid)
def __get_file_formatter():
"""Get logging formatter with Google logging like format.
Each line in the log should look like:
[DIWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] <message>
Returns:
Formatter object for use in logging handlers.
"""
# [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] <message>
ASCII_TIME_FORMAT = "%m%d %H:%M:%S" # mmdd hh:mm:ss.uuuuuu
LINE_FORMAT = ("%(levelname).1s" # [DIWEF]
"%(asctime)s.%(msecs)s " # ASCII_TIME_FORMAT
"%(threadName)s " # threadid
"%(pathname)s:%(lineno)d] " # file:line]
"%(message)s") # <message>
return logging.Formatter(fmt=LINE_FORMAT, datefmt=ASCII_TIME_FORMAT)
def initialize_logger(logger_name='', logger_filename=LOG_FILE, log_to_stderr=False, log_dir=LOG_DIR):
"""Initialize global log.
Before using the log object, run ``initialize_logger()`` to set up
handlers and formatting. You should almost never run this
directly: it is up to ``apprunner.runapp()`` to set up the log
calling this function.
Args:
``logger_name``: A string specifying the name to use while
outputting log lines.
``logger_filename``: The filename to use within ``LOG_DIR``. If
not set will use the format indicated by
:meth:`__generate_filename`.
``log_to_stderr``: If true we log to stderr
``log_dir``: the directory logs go to.
Returns the logger object.
"""
global __INITIALIZED
logger = logging.getLogger(logger_name)
if __INITIALIZED.get(logger_name, False):
return logger
# Use custom formatter that logs at microseconds level and follows glog
# format.
# Translate logging levels.
stderr_level = STDERR_LEVEL
logging_level = MIN_LOG_LEVEL
if log_to_stderr:
# Setup console handler.
console_handler = logging.StreamHandler()
line_format = ""
line_format += ("%(levelname).1s" # [DIWEF]
"%(asctime)s " # "%H:%M:%S"
"[%(name)s] ") # [<name>]
line_format += "%(message)s" # <message>
console_handler.setFormatter(logging.Formatter(line_format, datefmt="%H:%M:%S"))
console_handler.setLevel(max(logging_level, stderr_level))
logger.addHandler(console_handler)
if log_dir:
# Timed rotating file handler has hourly rotation enabled by default.
if logger_filename:
filename = os.path.join(log_dir, logger_filename)
else:
filename = os.path.join(log_dir, __generate_filename())
rotating_file_handler = logging.handlers.TimedRotatingFileHandler(filename, utc=True, when='h')
rotating_file_handler.setFormatter(__get_file_formatter())
logger.addHandler(rotating_file_handler)
logger.setLevel(logging_level)
# We don't want to propagate log messages. Until we really start having
# loggers that are initialized with different handlers.
logger.propagate = False
__INITIALIZED[logger_name] = True
return logger | apache-2.0 |
SaschaMester/delicium | build/android/pylib/perf/thermal_throttle.py | 38 | 4529 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
from pylib import android_commands
from pylib.device import device_utils
class OmapThrottlingDetector(object):
"""Class to detect and track thermal throttling on an OMAP 4."""
OMAP_TEMP_FILE = ('/sys/devices/platform/omap/omap_temp_sensor.0/'
'temperature')
@staticmethod
def IsSupported(device):
return device.FileExists(OmapThrottlingDetector.OMAP_TEMP_FILE)
def __init__(self, device):
self._device = device
@staticmethod
def BecameThrottled(log_line):
return 'omap_thermal_throttle' in log_line
@staticmethod
def BecameUnthrottled(log_line):
return 'omap_thermal_unthrottle' in log_line
@staticmethod
def GetThrottlingTemperature(log_line):
if 'throttle_delayed_work_fn' in log_line:
return float([s for s in log_line.split() if s.isdigit()][0]) / 1000.0
def GetCurrentTemperature(self):
tempdata = self._device.ReadFile(OmapThrottlingDetector.OMAP_TEMP_FILE)
return float(tempdata) / 1000.0
class ExynosThrottlingDetector(object):
"""Class to detect and track thermal throttling on an Exynos 5."""
@staticmethod
def IsSupported(device):
return device.FileExists('/sys/bus/exynos5-core')
def __init__(self, device):
pass
@staticmethod
def BecameThrottled(log_line):
return 'exynos_tmu: Throttling interrupt' in log_line
@staticmethod
def BecameUnthrottled(log_line):
return 'exynos_thermal_unthrottle: not throttling' in log_line
@staticmethod
def GetThrottlingTemperature(_log_line):
return None
@staticmethod
def GetCurrentTemperature():
return None
class ThermalThrottle(object):
"""Class to detect and track thermal throttling.
Usage:
Wait for IsThrottled() to be False before running test
After running test call HasBeenThrottled() to find out if the
test run was affected by thermal throttling.
"""
def __init__(self, device):
# TODO(jbudorick) Remove once telemetry gets switched over.
if isinstance(device, android_commands.AndroidCommands):
device = device_utils.DeviceUtils(device)
self._device = device
self._throttled = False
self._detector = None
if OmapThrottlingDetector.IsSupported(device):
self._detector = OmapThrottlingDetector(device)
elif ExynosThrottlingDetector.IsSupported(device):
self._detector = ExynosThrottlingDetector(device)
def HasBeenThrottled(self):
"""True if there has been any throttling since the last call to
HasBeenThrottled or IsThrottled.
"""
return self._ReadLog()
def IsThrottled(self):
"""True if currently throttled."""
self._ReadLog()
return self._throttled
def _ReadLog(self):
if not self._detector:
return False
has_been_throttled = False
serial_number = str(self._device)
log = self._device.RunShellCommand('dmesg -c')
degree_symbol = unichr(0x00B0)
for line in log:
if self._detector.BecameThrottled(line):
if not self._throttled:
logging.warning('>>> Device %s thermally throttled', serial_number)
self._throttled = True
has_been_throttled = True
elif self._detector.BecameUnthrottled(line):
if self._throttled:
logging.warning('>>> Device %s thermally unthrottled', serial_number)
self._throttled = False
has_been_throttled = True
temperature = self._detector.GetThrottlingTemperature(line)
if temperature is not None:
logging.info(u'Device %s thermally throttled at %3.1f%sC',
serial_number, temperature, degree_symbol)
if logging.getLogger().isEnabledFor(logging.DEBUG):
# Print current temperature of CPU SoC.
temperature = self._detector.GetCurrentTemperature()
if temperature is not None:
logging.debug(u'Current SoC temperature of %s = %3.1f%sC',
serial_number, temperature, degree_symbol)
# Print temperature of battery, to give a system temperature
dumpsys_log = self._device.RunShellCommand('dumpsys battery')
for line in dumpsys_log:
if 'temperature' in line:
btemp = float([s for s in line.split() if s.isdigit()][0]) / 10.0
logging.debug(u'Current battery temperature of %s = %3.1f%sC',
serial_number, btemp, degree_symbol)
return has_been_throttled
| bsd-3-clause |
varuntiwari27/rally | todo-api/flask/lib/python2.7/site-packages/pip/_vendor/packaging/version.py | 1151 | 11556 | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import collections
import itertools
import re
from ._structures import Infinity
__all__ = [
"parse", "Version", "LegacyVersion", "InvalidVersion", "VERSION_PATTERN"
]
_Version = collections.namedtuple(
"_Version",
["epoch", "release", "dev", "pre", "post", "local"],
)
def parse(version):
"""
Parse the given version string and return either a :class:`Version` object
or a :class:`LegacyVersion` object depending on if the given version is
a valid PEP 440 version or a legacy version.
"""
try:
return Version(version)
except InvalidVersion:
return LegacyVersion(version)
class InvalidVersion(ValueError):
"""
An invalid version was found, users should refer to PEP 440.
"""
class _BaseVersion(object):
def __hash__(self):
return hash(self._key)
def __lt__(self, other):
return self._compare(other, lambda s, o: s < o)
def __le__(self, other):
return self._compare(other, lambda s, o: s <= o)
def __eq__(self, other):
return self._compare(other, lambda s, o: s == o)
def __ge__(self, other):
return self._compare(other, lambda s, o: s >= o)
def __gt__(self, other):
return self._compare(other, lambda s, o: s > o)
def __ne__(self, other):
return self._compare(other, lambda s, o: s != o)
def _compare(self, other, method):
if not isinstance(other, _BaseVersion):
return NotImplemented
return method(self._key, other._key)
class LegacyVersion(_BaseVersion):
def __init__(self, version):
self._version = str(version)
self._key = _legacy_cmpkey(self._version)
def __str__(self):
return self._version
def __repr__(self):
return "<LegacyVersion({0})>".format(repr(str(self)))
@property
def public(self):
return self._version
@property
def base_version(self):
return self._version
@property
def local(self):
return None
@property
def is_prerelease(self):
return False
@property
def is_postrelease(self):
return False
_legacy_version_component_re = re.compile(
r"(\d+ | [a-z]+ | \.| -)", re.VERBOSE,
)
_legacy_version_replacement_map = {
"pre": "c", "preview": "c", "-": "final-", "rc": "c", "dev": "@",
}
def _parse_version_parts(s):
for part in _legacy_version_component_re.split(s):
part = _legacy_version_replacement_map.get(part, part)
if not part or part == ".":
continue
if part[:1] in "0123456789":
# pad for numeric comparison
yield part.zfill(8)
else:
yield "*" + part
# ensure that alpha/beta/candidate are before final
yield "*final"
def _legacy_cmpkey(version):
# We hardcode an epoch of -1 here. A PEP 440 version can only have a epoch
# greater than or equal to 0. This will effectively put the LegacyVersion,
# which uses the defacto standard originally implemented by setuptools,
# as before all PEP 440 versions.
epoch = -1
# This scheme is taken from pkg_resources.parse_version setuptools prior to
# it's adoption of the packaging library.
parts = []
for part in _parse_version_parts(version.lower()):
if part.startswith("*"):
# remove "-" before a prerelease tag
if part < "*final":
while parts and parts[-1] == "*final-":
parts.pop()
# remove trailing zeros from each series of numeric parts
while parts and parts[-1] == "00000000":
parts.pop()
parts.append(part)
parts = tuple(parts)
return epoch, parts
# Deliberately not anchored to the start and end of the string, to make it
# easier for 3rd party code to reuse
VERSION_PATTERN = r"""
v?
(?:
(?:(?P<epoch>[0-9]+)!)? # epoch
(?P<release>[0-9]+(?:\.[0-9]+)*) # release segment
(?P<pre> # pre-release
[-_\.]?
(?P<pre_l>(a|b|c|rc|alpha|beta|pre|preview))
[-_\.]?
(?P<pre_n>[0-9]+)?
)?
(?P<post> # post release
(?:-(?P<post_n1>[0-9]+))
|
(?:
[-_\.]?
(?P<post_l>post|rev|r)
[-_\.]?
(?P<post_n2>[0-9]+)?
)
)?
(?P<dev> # dev release
[-_\.]?
(?P<dev_l>dev)
[-_\.]?
(?P<dev_n>[0-9]+)?
)?
)
(?:\+(?P<local>[a-z0-9]+(?:[-_\.][a-z0-9]+)*))? # local version
"""
class Version(_BaseVersion):
_regex = re.compile(
r"^\s*" + VERSION_PATTERN + r"\s*$",
re.VERBOSE | re.IGNORECASE,
)
def __init__(self, version):
# Validate the version and parse it into pieces
match = self._regex.search(version)
if not match:
raise InvalidVersion("Invalid version: '{0}'".format(version))
# Store the parsed out pieces of the version
self._version = _Version(
epoch=int(match.group("epoch")) if match.group("epoch") else 0,
release=tuple(int(i) for i in match.group("release").split(".")),
pre=_parse_letter_version(
match.group("pre_l"),
match.group("pre_n"),
),
post=_parse_letter_version(
match.group("post_l"),
match.group("post_n1") or match.group("post_n2"),
),
dev=_parse_letter_version(
match.group("dev_l"),
match.group("dev_n"),
),
local=_parse_local_version(match.group("local")),
)
# Generate a key which will be used for sorting
self._key = _cmpkey(
self._version.epoch,
self._version.release,
self._version.pre,
self._version.post,
self._version.dev,
self._version.local,
)
def __repr__(self):
return "<Version({0})>".format(repr(str(self)))
def __str__(self):
parts = []
# Epoch
if self._version.epoch != 0:
parts.append("{0}!".format(self._version.epoch))
# Release segment
parts.append(".".join(str(x) for x in self._version.release))
# Pre-release
if self._version.pre is not None:
parts.append("".join(str(x) for x in self._version.pre))
# Post-release
if self._version.post is not None:
parts.append(".post{0}".format(self._version.post[1]))
# Development release
if self._version.dev is not None:
parts.append(".dev{0}".format(self._version.dev[1]))
# Local version segment
if self._version.local is not None:
parts.append(
"+{0}".format(".".join(str(x) for x in self._version.local))
)
return "".join(parts)
@property
def public(self):
return str(self).split("+", 1)[0]
@property
def base_version(self):
parts = []
# Epoch
if self._version.epoch != 0:
parts.append("{0}!".format(self._version.epoch))
# Release segment
parts.append(".".join(str(x) for x in self._version.release))
return "".join(parts)
@property
def local(self):
version_string = str(self)
if "+" in version_string:
return version_string.split("+", 1)[1]
@property
def is_prerelease(self):
return bool(self._version.dev or self._version.pre)
@property
def is_postrelease(self):
return bool(self._version.post)
def _parse_letter_version(letter, number):
if letter:
# We consider there to be an implicit 0 in a pre-release if there is
# not a numeral associated with it.
if number is None:
number = 0
# We normalize any letters to their lower case form
letter = letter.lower()
# We consider some words to be alternate spellings of other words and
# in those cases we want to normalize the spellings to our preferred
# spelling.
if letter == "alpha":
letter = "a"
elif letter == "beta":
letter = "b"
elif letter in ["c", "pre", "preview"]:
letter = "rc"
elif letter in ["rev", "r"]:
letter = "post"
return letter, int(number)
if not letter and number:
# We assume if we are given a number, but we are not given a letter
# then this is using the implicit post release syntax (e.g. 1.0-1)
letter = "post"
return letter, int(number)
_local_version_seperators = re.compile(r"[\._-]")
def _parse_local_version(local):
"""
Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
"""
if local is not None:
return tuple(
part.lower() if not part.isdigit() else int(part)
for part in _local_version_seperators.split(local)
)
def _cmpkey(epoch, release, pre, post, dev, local):
# When we compare a release version, we want to compare it with all of the
# trailing zeros removed. So we'll use a reverse the list, drop all the now
# leading zeros until we come to something non zero, then take the rest
# re-reverse it back into the correct order and make it a tuple and use
# that for our sorting key.
release = tuple(
reversed(list(
itertools.dropwhile(
lambda x: x == 0,
reversed(release),
)
))
)
# We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0.
# We'll do this by abusing the pre segment, but we _only_ want to do this
# if there is not a pre or a post segment. If we have one of those then
# the normal sorting rules will handle this case correctly.
if pre is None and post is None and dev is not None:
pre = -Infinity
# Versions without a pre-release (except as noted above) should sort after
# those with one.
elif pre is None:
pre = Infinity
# Versions without a post segment should sort before those with one.
if post is None:
post = -Infinity
# Versions without a development segment should sort after those with one.
if dev is None:
dev = Infinity
if local is None:
# Versions without a local segment should sort before those with one.
local = -Infinity
else:
# Versions with a local segment need that segment parsed to implement
# the sorting rules in PEP440.
# - Alpha numeric segments sort before numeric segments
# - Alpha numeric segments sort lexicographically
# - Numeric segments sort numerically
# - Shorter versions sort before longer versions when the prefixes
# match exactly
local = tuple(
(i, "") if isinstance(i, int) else (-Infinity, i)
for i in local
)
return epoch, release, pre, post, dev, local
| apache-2.0 |
amisrs/angular-flask | angular_flask/lib/python2.7/site-packages/jinja2/testsuite/loader.py | 411 | 8162 | # -*- coding: utf-8 -*-
"""
jinja2.testsuite.loader
~~~~~~~~~~~~~~~~~~~~~~~
Test the loaders.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import os
import sys
import tempfile
import shutil
import unittest
from jinja2.testsuite import JinjaTestCase, dict_loader, \
package_loader, filesystem_loader, function_loader, \
choice_loader, prefix_loader
from jinja2 import Environment, loaders
from jinja2._compat import PYPY, PY2
from jinja2.loaders import split_template_path
from jinja2.exceptions import TemplateNotFound
class LoaderTestCase(JinjaTestCase):
def test_dict_loader(self):
env = Environment(loader=dict_loader)
tmpl = env.get_template('justdict.html')
assert tmpl.render().strip() == 'FOO'
self.assert_raises(TemplateNotFound, env.get_template, 'missing.html')
def test_package_loader(self):
env = Environment(loader=package_loader)
tmpl = env.get_template('test.html')
assert tmpl.render().strip() == 'BAR'
self.assert_raises(TemplateNotFound, env.get_template, 'missing.html')
def test_filesystem_loader(self):
env = Environment(loader=filesystem_loader)
tmpl = env.get_template('test.html')
assert tmpl.render().strip() == 'BAR'
tmpl = env.get_template('foo/test.html')
assert tmpl.render().strip() == 'FOO'
self.assert_raises(TemplateNotFound, env.get_template, 'missing.html')
def test_choice_loader(self):
env = Environment(loader=choice_loader)
tmpl = env.get_template('justdict.html')
assert tmpl.render().strip() == 'FOO'
tmpl = env.get_template('test.html')
assert tmpl.render().strip() == 'BAR'
self.assert_raises(TemplateNotFound, env.get_template, 'missing.html')
def test_function_loader(self):
env = Environment(loader=function_loader)
tmpl = env.get_template('justfunction.html')
assert tmpl.render().strip() == 'FOO'
self.assert_raises(TemplateNotFound, env.get_template, 'missing.html')
def test_prefix_loader(self):
env = Environment(loader=prefix_loader)
tmpl = env.get_template('a/test.html')
assert tmpl.render().strip() == 'BAR'
tmpl = env.get_template('b/justdict.html')
assert tmpl.render().strip() == 'FOO'
self.assert_raises(TemplateNotFound, env.get_template, 'missing')
def test_caching(self):
changed = False
class TestLoader(loaders.BaseLoader):
def get_source(self, environment, template):
return u'foo', None, lambda: not changed
env = Environment(loader=TestLoader(), cache_size=-1)
tmpl = env.get_template('template')
assert tmpl is env.get_template('template')
changed = True
assert tmpl is not env.get_template('template')
changed = False
env = Environment(loader=TestLoader(), cache_size=0)
assert env.get_template('template') \
is not env.get_template('template')
env = Environment(loader=TestLoader(), cache_size=2)
t1 = env.get_template('one')
t2 = env.get_template('two')
assert t2 is env.get_template('two')
assert t1 is env.get_template('one')
t3 = env.get_template('three')
assert 'one' in env.cache
assert 'two' not in env.cache
assert 'three' in env.cache
def test_dict_loader_cache_invalidates(self):
mapping = {'foo': "one"}
env = Environment(loader=loaders.DictLoader(mapping))
assert env.get_template('foo').render() == "one"
mapping['foo'] = "two"
assert env.get_template('foo').render() == "two"
def test_split_template_path(self):
assert split_template_path('foo/bar') == ['foo', 'bar']
assert split_template_path('./foo/bar') == ['foo', 'bar']
self.assert_raises(TemplateNotFound, split_template_path, '../foo')
class ModuleLoaderTestCase(JinjaTestCase):
archive = None
def compile_down(self, zip='deflated', py_compile=False):
super(ModuleLoaderTestCase, self).setup()
log = []
self.reg_env = Environment(loader=prefix_loader)
if zip is not None:
self.archive = tempfile.mkstemp(suffix='.zip')[1]
else:
self.archive = tempfile.mkdtemp()
self.reg_env.compile_templates(self.archive, zip=zip,
log_function=log.append,
py_compile=py_compile)
self.mod_env = Environment(loader=loaders.ModuleLoader(self.archive))
return ''.join(log)
def teardown(self):
super(ModuleLoaderTestCase, self).teardown()
if hasattr(self, 'mod_env'):
if os.path.isfile(self.archive):
os.remove(self.archive)
else:
shutil.rmtree(self.archive)
self.archive = None
def test_log(self):
log = self.compile_down()
assert 'Compiled "a/foo/test.html" as ' \
'tmpl_a790caf9d669e39ea4d280d597ec891c4ef0404a' in log
assert 'Finished compiling templates' in log
assert 'Could not compile "a/syntaxerror.html": ' \
'Encountered unknown tag \'endif\'' in log
def _test_common(self):
tmpl1 = self.reg_env.get_template('a/test.html')
tmpl2 = self.mod_env.get_template('a/test.html')
assert tmpl1.render() == tmpl2.render()
tmpl1 = self.reg_env.get_template('b/justdict.html')
tmpl2 = self.mod_env.get_template('b/justdict.html')
assert tmpl1.render() == tmpl2.render()
def test_deflated_zip_compile(self):
self.compile_down(zip='deflated')
self._test_common()
def test_stored_zip_compile(self):
self.compile_down(zip='stored')
self._test_common()
def test_filesystem_compile(self):
self.compile_down(zip=None)
self._test_common()
def test_weak_references(self):
self.compile_down()
tmpl = self.mod_env.get_template('a/test.html')
key = loaders.ModuleLoader.get_template_key('a/test.html')
name = self.mod_env.loader.module.__name__
assert hasattr(self.mod_env.loader.module, key)
assert name in sys.modules
# unset all, ensure the module is gone from sys.modules
self.mod_env = tmpl = None
try:
import gc
gc.collect()
except:
pass
assert name not in sys.modules
# This test only makes sense on non-pypy python 2
if PY2 and not PYPY:
def test_byte_compilation(self):
log = self.compile_down(py_compile=True)
assert 'Byte-compiled "a/test.html"' in log
tmpl1 = self.mod_env.get_template('a/test.html')
mod = self.mod_env.loader.module. \
tmpl_3c4ddf650c1a73df961a6d3d2ce2752f1b8fd490
assert mod.__file__.endswith('.pyc')
def test_choice_loader(self):
log = self.compile_down()
self.mod_env.loader = loaders.ChoiceLoader([
self.mod_env.loader,
loaders.DictLoader({'DICT_SOURCE': 'DICT_TEMPLATE'})
])
tmpl1 = self.mod_env.get_template('a/test.html')
self.assert_equal(tmpl1.render(), 'BAR')
tmpl2 = self.mod_env.get_template('DICT_SOURCE')
self.assert_equal(tmpl2.render(), 'DICT_TEMPLATE')
def test_prefix_loader(self):
log = self.compile_down()
self.mod_env.loader = loaders.PrefixLoader({
'MOD': self.mod_env.loader,
'DICT': loaders.DictLoader({'test.html': 'DICT_TEMPLATE'})
})
tmpl1 = self.mod_env.get_template('MOD/a/test.html')
self.assert_equal(tmpl1.render(), 'BAR')
tmpl2 = self.mod_env.get_template('DICT/test.html')
self.assert_equal(tmpl2.render(), 'DICT_TEMPLATE')
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(LoaderTestCase))
suite.addTest(unittest.makeSuite(ModuleLoaderTestCase))
return suite
| mit |
jferreir/mbed | workspace_tools/export/simplicityv3.py | 36 | 5565 | """
mbed SDK
Copyright (c) 2014 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from exporters import Exporter
from os.path import split,splitext, basename
class Folder:
def __init__(self, name):
self.name = name
self.children = []
def contains(self, folderName):
for child in self.children:
if child.name == folderName:
return True
return False
def __str__(self):
retval = self.name + " "
if len(self.children) > 0:
retval += "[ "
for child in self.children:
retval += child.__str__()
retval += " ]"
return retval
def findChild(self, folderName):
for child in self.children:
if child.name == folderName:
return child
return None
def addChild(self, folderName):
if folderName == '':
return None
if not self.contains(folderName):
self.children.append(Folder(folderName))
return self.findChild(folderName)
class SimplicityV3(Exporter):
NAME = 'SimplicityV3'
TOOLCHAIN = 'GCC_ARM'
TARGETS = [
'EFM32GG_STK3700',
'EFM32ZG_STK3200',
'EFM32LG_STK3600',
'EFM32WG_STK3800',
'EFM32HG_STK3400'
]
PARTS = {
'EFM32GG_STK3700': 'com.silabs.mcu.si32.efm32.efm32gg.efm32gg990f1024',
'EFM32ZG_STK3200': 'com.silabs.mcu.si32.efm32.efm32zg.efm32zg222f32',
'EFM32LG_STK3600': 'com.silabs.mcu.si32.efm32.efm32lg.efm32lg990f256',
'EFM32WG_STK3800': 'com.silabs.mcu.si32.efm32.efm32wg.efm32wg990f256',
'EFM32HG_STK3400': 'com.silabs.mcu.si32.efm32.efm32hg.efm32hg322f64'
}
KITS = {
'EFM32GG_STK3700': 'com.silabs.kit.si32.efm32.efm32gg.stk3700',
'EFM32ZG_STK3200': 'com.silabs.kit.si32.efm32.efm32zg.stk3200',
'EFM32LG_STK3600': 'com.silabs.kit.si32.efm32.efm32lg.stk3600',
'EFM32WG_STK3800': 'com.silabs.kit.si32.efm32.efm32wg.stk3800',
'EFM32HG_STK3400': 'com.silabs.kit.si32.efm32.efm32hg.slstk3400a'
}
FILE_TYPES = {
'c_sources':'1',
'cpp_sources':'1',
's_sources':'1'
}
EXCLUDED_LIBS = [
'm',
'c',
'gcc',
'nosys',
'supc++',
'stdc++'
]
DOT_IN_RELATIVE_PATH = False
orderedPaths = Folder("Root")
def check_and_add_path(self, path):
levels = path.split('/')
base = self.orderedPaths
for level in levels:
if base.contains(level):
base = base.findChild(level)
else:
base.addChild(level)
base = base.findChild(level)
def generate(self):
# "make" wants Unix paths
self.resources.win_to_unix()
main_files = []
EXCLUDED_LIBS = [
'm',
'c',
'gcc',
'nosys',
'supc++',
'stdc++'
]
for r_type in ['s_sources', 'c_sources', 'cpp_sources']:
r = getattr(self.resources, r_type)
if r:
for source in r:
self.check_and_add_path(split(source)[0])
if not ('/' in source):
main_files.append(source)
libraries = []
for lib in self.resources.libraries:
l, _ = splitext(basename(lib))
if l[3:] not in EXCLUDED_LIBS:
libraries.append(l[3:])
defines = []
for define in self.get_symbols():
if '=' in define:
keyval = define.split('=')
defines.append( (keyval[0], keyval[1]) )
else:
defines.append( (define, '') )
self.check_and_add_path(split(self.resources.linker_script)[0])
ctx = {
'name': self.program_name,
'main_files': main_files,
'recursiveFolders': self.orderedPaths,
'object_files': self.resources.objects,
'include_paths': self.resources.inc_dirs,
'library_paths': self.resources.lib_dirs,
'linker_script': self.resources.linker_script,
'libraries': libraries,
'symbols': self.get_symbols(),
'defines': defines,
'part': self.PARTS[self.target],
'kit': self.KITS[self.target],
'loopcount': 0
}
## Strip main folder from include paths because ssproj is not capable of handling it
if '.' in ctx['include_paths']:
ctx['include_paths'].remove('.')
'''
Suppress print statements
print('\n')
print(self.target)
print('\n')
print(ctx)
print('\n')
print(self.orderedPaths)
for path in self.orderedPaths.children:
print(path.name + "\n")
for bpath in path.children:
print("\t" + bpath.name + "\n")
'''
self.gen_file('simplicityv3_slsproj.tmpl', ctx, '%s.slsproj' % self.program_name)
| apache-2.0 |
Aleksei-Badyaev/rst2db | abstrys/cmd_rst2db.py | 1 | 3650 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# rst2db.py
# =========
#
# A reStructuredText to DocBook conversion tool, using Python's docutils
# library.
#
# by Eron Hennessey
from argparse import ArgumentParser
from argparse import RawDescriptionHelpFormatter
import os
import sys
from abstrys.docutils_ext.docbook_writer import DocBookWriter
from docutils.core import publish_string
DESCRIPTION = 'rst2db - convert reStructuredText to DocBook'
def printerr(error_text):
"""Prints an error message to stderr."""
sys.stderr.write("ERROR -- %s\n" % error_text)
def process_cmd_args():
"""Parse command-line options."""
parser = ArgumentParser(description=DESCRIPTION,
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument('input_filename', metavar='INPUT',
help='Path to input ReST file.')
parser.add_argument('-o', '--output',
dest='output_filename', metavar='OUTPUT',
help='Path to output DocBook file.')
parser.add_argument('-t', '--template',
dest='template_filename', metavar='TEMPLATE',
help='Path to template DocBook file.')
parser.add_argument('-e', '--element', dest='root_element',
default='section', metavar='ROOT',
help='Root element of the resulting DocBook file.')
parser.add_argument('-l', '--lang', dest='lang',
help='Language code of the resulting DocBook file.')
return parser.parse_args()
def run():
"""The main procedure."""
program_name = os.path.basename(sys.argv[0])
try:
params = process_cmd_args()
if not os.path.exists(params.input_filename):
printerr("File doesn't exist: %s" % params.input_filename)
sys.exit(1)
# get the file contents first
input_file_contents = open(params.input_filename, 'rb').read()
docutils_writer = None
# set up the writer
if params.output_filename is not None:
# If there's an output filename, use its basename as the root
# element's ID.
(_, filename) = os.path.split(params.output_filename)
(doc_id, _) = os.path.splitext(filename)
docutils_writer = DocBookWriter(params.root_element,
doc_id,
lang=params.lang)
else:
docutils_writer = DocBookWriter(params.root_element,
lang=params.lang)
# get the docbook output.
overrides = {'input_encoding': 'utf-8',
'output_encoding': 'utf-8'}
docbook_contents = publish_string(input_file_contents,
writer=docutils_writer,
settings_overrides=overrides)
# if there's an output file, write to that. Otherwise, write to stdout.
if params.output_filename is None:
output_file = sys.stdout
else:
output_file = open(params.output_filename, 'w+')
output_file.write(docbook_contents)
# that's it, we're done here!
return 0
except KeyboardInterrupt:
### handle keyboard interrupt ###
return 0
except Exception as e:
indent = len(program_name) * ' '
sys.stderr.write(program_name + ': ' + repr(e) + '\n')
sys.stderr.write(indent + ' for help use --help\n')
return 1
if __name__ == "__main__":
sys.exit(run())
| bsd-3-clause |
Smelly-London/Smelly-London | NLTK_textmine/map.py | 2 | 162511 | mapping = {'b19956587': 'City of Westminster', 'b18047129': 'Kensington and Chelsea', 'b17999418': 'City of Westminster', 'b1823687x': 'Tower Hamlets', 'b19878230': 'Redbridge', 'b18238142': 'Hammersmith and Fulham', 'b19954724': 'Tower Hamlets', 'b19783747': 'Ealing', 'b19882804': 'Merton', 'b2027483x': 'Harrow', 'b19879726': 'Richmond upon Thame', 'b19884126': 'City of London', 'b18247271': 'City of Westminster', 'b19956769': 'City of Westminster', 'b1978921x': 'Enfield', 'b18249383': 'Haringey', 'b19883316': 'Tower Hamlets', 'b19970110': 'Kingston upon Thames', 'b18244075': 'Camden', 'b19880959': 'Brent', 'b18253106': 'City of London', 'b19821918': 'Havering', 'b18252503': 'London County Council', 'b19787443': 'Barking and Dagenham', 'b19783358': 'Ealing', 'b19796286': 'Brent', 'b19791835': 'Barnet', 'b1799732x': 'Southwark', 'b2005709x': 'City of Westminster', 'b20274786': 'Harrow', 'b1982418x': 'Kensington and Chelsea', 'b2005614x': 'Islington', 'b18237472': 'Islington', 'b1987439x': 'Southwark', 'b1978711x': 'Croydon', 'b19793881': 'Havering', 'b19823691': 'Greenwich', 'b18111671': 'Southwark', 'b19885416': 'Hackney', 'b18219287': 'Southwark', 'b19882373': 'Sutton', 'b18245651': 'City of Westminster', 'b19969351': 'Hounslow', 'b18245389': 'City of Westminster', 'b18244920': 'Lewisham', 'b18236698': 'Tower Hamlets', 'b19953847': 'Tower Hamlets', 'b19821645': 'Bromley', 'b19874649': 'Newham', 'b18250610': 'Wandsworth', 'b18245031': 'Lewisham', 'b1982225x': 'Hackney', 'b19824233': 'Kensington and Chelsea', 'b19823514': 'Greenwich', 'b19788824': 'Enfield', 'b19795063': 'Enfield', 'b19788538': 'Barnet', 'b18111099': 'Lewisham', 'b19881770': 'Haringey', 'b19785161': 'Bexley', 'b18039807': 'Hackney', 'b19885064': 'Lewisham', 'b19969764': 'Hounslow', 'b19790533': 'Barnet', 'b19788794': 'Enfield', 'b1821888x': 'Southwark', 'b18222353': 'Islington', 'b1987800x': 'Ealing', 'b19784065': 'Barking and Dagenham', 'b18235347': 'Hammersmith and Fulham', 'b1979339x': 'Bromley', 'b18238920': 'Camden', 'b19790284': 'Barnet', 'b1988042x': 'Hounslow', 'b18247714': 'City of Westminster', 'b19784740': 'Barking and Dagenham', 'b19955443': 'City of Westminster', 'b19874820': 'Newham', 'b19875514': 'Southwark', 'b18251882': 'Camden', 'b18246400': 'Southwark', 'b18018415': 'Greenwich', 'b18250737': 'Wandsworth', 'b19783917': 'Ealing', 'b20056369': 'Islington', 'b19880133': 'Hillingdon', 'b19877602': 'Hounslow', 'b19884333': 'City of London', 'b18251250': 'Wandsworth', 'b18254342': 'City of Westminster', 'b18254615': 'City of Westminster', 'b1995511x': 'City of Westminster', 'b19791227': 'Havering', 'b19796705': 'Brent', 'b19796870': 'Brent', 'b18250828': 'Wandsworth', 'b19793571': 'Bromley', 'b1988381x': 'Sutton', 'b18122218': 'Southwark', 'b17998426': 'Greenwich', 'b19792165': 'Havering', 'b18237642': 'Greenwich', 'b19824439': 'Harrow', 'b18239407': 'Islington', 'b1978904x': 'Enfield', 'b18237319': 'Islington', 'b18248718': 'Islington', 'b19823472': 'Greenwich', 'b19970328': 'Richmond upon Thame', 'b1988588x': 'Hackney', 'b19882142': 'Merton', 'b19879064': 'Richmond upon Thame', 'b19786633': 'Croydon', 'b19786189': 'Croydon', 'b19876567': 'Waltham Forest', 'b19968577': 'Hounslow', 'b19792116': 'Barnet', 'b18038190': 'Islington', 'b19797473': 'Haringey', 'b19787327': 'Barking and Dagenham', 'b18039583': 'Tower Hamlets', 'b18250440': 'Wandsworth', 'b19969296': 'Newham', 'b19790843': 'Barnet', 'b19786451': 'Bexley', 'b19786980': 'Croydon', 'b19956393': 'City of Westminster', 'b18038220': 'City of Westminster', 'b18249449': 'Haringey', 'b19875356': 'Kingston upon Thames', 'b18236996': 'Lewisham', 'b1978417x': 'Barking and Dagenham', 'b20056552': 'Hackney', 'b18237861': 'Greenwich', 'b19956022': 'Southwark', 'b18121299': 'City of London', 'b19787583': 'Barking and Dagenham', 'b18236194': 'Kensington and Chelsea', 'b19792931': 'Waltham Forest', 'b18222109': 'Greenwich', 'b19953616': 'Hammersmith and Fulham', 'b19877857': 'Ealing', 'b18248925': 'Camden', 'b19823162': 'Tower Hamlets', 'b19788496': 'Barnet', 'b18253933': 'Tower Hamlets', 'b19884680': 'City of London', 'b18044761': 'Southwark', 'b19875721': 'Camden', 'b19955613': 'Southwark', 'b19953471': 'Hammersmith and Fulham', 'b19822340': 'Hackney', 'b18219548': 'Southwark', 'b18235645': 'Kensington and Chelsea', 'b19784351': 'Barking and Dagenham', 'b19787911': 'Ealing', 'b19786074': 'Croydon', 'b19795233': 'City of Westminster', 'b18254111': 'Camden', 'b19791033': 'Ealing', 'b18223503': 'Kensington and Chelsea', 'b18111427': 'Kensington and Chelsea', 'b19952879': 'Southwark', 'b1987747x': 'Merton', 'b18235426': 'Hammersmith and Fulham', 'b1823673x': 'Tower Hamlets', 'b1825147x': 'Camden', 'b19876075': 'Redbridge', 'b19878928': 'Richmond upon Thame', 'b18246655': 'Southwark', 'b19821451': 'Bromley', 'b18018312': 'City of Westminster', 'b19952703': 'Tower Hamlets', 'b19880303': 'Hounslow', 'b19874923': 'Kingston upon Thames', 'b18248548': 'Islington', 'b1978918x': 'Enfield', 'b19976185': 'Kingston upon Thames', 'b19785409': 'Bromley', 'b19785951': 'Bromley', 'b19881435': 'Newham', 'b18239638': 'Lambeth', 'b19969934': 'Kingston upon Thames', 'b19793765': 'Bromley', 'b19797862': 'Hillingdon', 'b19976239': 'Kingston upon Thames', 'b1804377x': 'Tower Hamlets', 'b17996685': 'Greenwich', 'b18221622': 'Greenwich', 'b19877298': 'Merton', 'b19790077': 'Barnet', 'b18238269': 'Hammersmith and Fulham', 'b19879520': 'Waltham Forest', 'b19956563': 'City of Westminster', 'b19954700': 'Tower Hamlets', 'b18238129': 'Hammersmith and Fulham', 'b19878217': 'Redbridge', 'b19883158': 'Kingston upon Thames', 'b1811684x': 'Camden', 'b19793182': 'Bromley', 'b19879295': 'Richmond upon Thame', 'b19884102': 'City of London', 'b18247295': 'City of Westminster', 'b18253349': 'City of London', 'b19789452': 'Enfield', 'b19875290': 'Kingston upon Thames', 'b19883481': 'Tower Hamlets', 'b19956708': 'City of Westminster', 'b18238695': 'Camden', 'b18236856': 'Tower Hamlets', 'b18251274': 'Wandsworth', 'b19970171': 'Kingston upon Thames', 'b19880935': 'Brent', 'b18244014': 'Haringey', 'b20274816': 'Harrow', 'b18253167': 'City of London', 'b18047105': 'Lambeth', 'b19789233': 'Enfield', 'b19821931': 'Lambeth', 'b18252564': 'London County Council', 'b19787467': 'Barking and Dagenham', 'b1822099x': 'Kensington and Chelsea', 'b18251183': 'Wandsworth', 'b19791586': 'Hillingdon', 'b19783371': 'Ealing', 'b1988235x': 'Sutton', 'b19791859': 'Barnet', 'b18219044': 'Southwark', 'b19882683': 'Merton', 'b18237101': 'Lewisham', 'b20056308': 'Islington', 'b18237459': 'Islington', 'b18239614': 'Lambeth', 'b18044372': 'Greenwich', 'b18236509': 'Wandsworth', 'b17997306': 'Southwark', 'b19882427': 'Sutton', 'b18245638': 'City of Westminster', 'b19786840': 'Croydon', 'b19969338': 'Hounslow', 'b19823757': 'Greenwich', 'b19968322': 'Hounslow', 'b18251067': 'Wandsworth', 'b1995301x': 'Southwark', 'b19953860': 'Tower Hamlets', 'b18116565': 'Camden', 'b19876245': 'Redbridge', 'b18244907': 'Lewisham', 'b19879222': 'Richmond upon Thame', 'b18250634': 'Wandsworth', 'b1979051x': 'Barnet', 'b19823289': 'Greenwich', 'b19790673': 'Barnet', 'b19788800': 'Enfield', 'b18110873': 'Hammersmith and Fulham', 'b19824257': 'Kensington and Chelsea', 'b19823575': 'Greenwich', 'b1824919x': 'Haringey', 'b18222596': 'Tower Hamlets', 'b19795002': 'Enfield', 'b18044487': 'Lewisham', 'b19794605': 'Hillingdon', 'b19794356': 'Hillingdon', 'b18252989': 'London County Council', 'b19821591': 'Bromley', 'b19969740': 'Hounslow', 'b19953628': 'Hammersmith and Fulham', 'b18247908': 'City of Westminster', 'b19884746': 'City of London', 'b19952867': 'Southwark', 'b1988011x': 'Richmond upon Thame', 'b17996478': 'Greenwich', 'b18235360': 'Hammersmith and Fulham', 'b1821860x': 'Southwark', 'b19877596': 'Hounslow', 'b19877043': 'Waltham Forest', 'b19784727': 'Barking and Dagenham', 'b18247842': 'City of Westminster', 'b19955133': 'City of Westminster', 'b19881836': 'Haringey', 'b19875575': 'Camden', 'b18249991': 'Hackney', 'b19876737': 'Waltham Forest', 'b18251869': 'Camden', 'b19822157': 'Hackney', 'b19783978': 'Barnet', 'b18246461': 'Southwark', 'b19877626': 'Hounslow', 'b19880443': 'Hounslow', 'b19784533': 'Barking and Dagenham', 'b19793376': 'Bromley', 'b19788113': 'Ealing', 'b19884357': 'City of London', 'b18247593': 'City of Westminster', 'b18209439': 'Islington', 'b18239055': 'Camden', 'b19791203': 'Havering', 'b1995542x': 'City of Westminster', 'b18254329': 'City of Westminster', 'b1987988x': 'Richmond upon Thame', 'b1799844x': 'Greenwich', 'b19793510': 'Bromley', 'b18248482': 'Islington', 'b18018439': 'Greenwich', 'b19954554': 'Lewisham', 'b1987599x': 'Redbridge', 'b18122231': 'Southwark', 'b18246990': 'Southwark', 'b18239420': 'Islington', 'b18248731': 'Islington', 'b18237666': 'Greenwich', 'b19956952': 'Southwark', 'b19797187': 'Haringey', 'b19824415': 'Harrow', 'b19876877': 'Waltham Forest', 'b19796857': 'Brent', 'b19882166': 'Merton', 'b19969107': 'Barnet', 'b18237599': 'Greenwich', 'b19797321': 'Haringey', 'b18252771': 'London County Council', 'b19794988': 'Enfield', 'b1987537x': 'Harrow', 'b19792281': 'Havering', 'b19796584': 'Brent', 'b1825357x': 'City of London', 'b19790867': 'Barnet', 'b19786475': 'Bexley', 'b1979745x': 'Haringey', 'b19824087': 'Kensington and Chelsea', 'b18117302': 'Kensington and Chelsea', 'b1978661x': 'Croydon', 'b18249462': 'Haringey', 'b17997537': 'Southwark', 'b18122097': 'Southwark', 'b1825391x': 'City of London', 'b18121548': 'Greenwich', 'b18237848': 'Greenwich', 'b19884916': 'Hillingdon', 'b19956046': 'Southwark', 'b1824905x': 'Haringey', 'b19794629': 'Hillingdon', 'b19822170': 'Hackney', 'b19796328': 'Brent', 'b18251043': 'Wandsworth', 'b18238488': 'Camden', 'b19953677': 'Hammersmith and Fulham', 'b19791446': 'Hillingdon', 'b18244737': 'Lewisham', 'b19877833': 'Ealing', 'b19783498': 'Ealing', 'b19788472': 'Barnet', 'b19823149': 'Tower Hamlets', 'b19784119': 'Barking and Dagenham', 'b18042995': 'Tower Hamlets', 'b19955637': 'Southwark', 'b18044748': 'Southwark', 'b19875745': 'Camden', 'b19787704': 'Ealing', 'b19822364': 'Hackney', 'b18253799': 'City of London', 'b19953495': 'Hammersmith and Fulham', 'b18111221': 'Hackney', 'b1821938x': 'Southwark', 'b18235992': 'Kensington and Chelsea', 'b18235621': 'Hammersmith and Fulham', 'b19788617': 'Enfield', 'b18106286': 'Kensington and Chelsea', 'b18042193': 'Wandsworth', 'b18251456': 'Camden', 'b18254135': 'Hammersmith and Fulham', 'b18239997': 'Kensington and Chelsea', 'b18236716': 'Tower Hamlets', 'b19791057': 'Ealing', 'b1996948x': 'Camden', 'b1988574x': 'Hackney', 'b18223564': 'Kensington and Chelsea', 'b20106919': 'Wandsworth', 'b19952818': 'Tower Hamlets', 'b18245171': 'City of Westminster', 'b18248901': 'Camden', 'b1822278x': 'Islington', 'b1979521x': 'Enfield', 'b18239195': 'Islington', 'b19955091': 'City of Westminster', 'b19785641': 'Bromley', 'b19881630': 'Newham', 'b19876506': 'Waltham Forest', 'b19881186': 'Sutton', 'b19876051': 'Redbridge', 'b19954943': 'Tower Hamlets', 'b19878904': 'Richmond upon Thame', 'b18246679': 'Southwark', 'b19821438': 'Bromley', 'b19791938': 'Barnet', 'b1987456x': 'Newham', 'b19877456': 'Merton', 'b19880327': 'Hounslow', 'b19793583': 'Bromley', 'b19952727': 'Tower Hamlets', 'b19955893': 'Southwark', 'b19789324': 'Enfield', 'b19785392': 'Bexley', 'b19794162': 'Havering', 'b19881459': 'Newham', 'b19881988': 'Haringey', 'b19885295': 'Hackney', 'b18250324': 'Wandsworth', 'b19793741': 'Bromley', 'b18235438': 'Hammersmith and Fulham', 'b18220423': 'Wandsworth', 'b18108878': 'Southwark', 'b19976252': 'Kingston upon Thames', 'b19792839': 'Waltham Forest', 'b1987490x': 'Kingston upon Thames', 'b17999637': 'City of London', 'b19795397': 'Barnet', 'b18238245': 'Hammersmith and Fulham', 'b19790090': 'Barnet', 'b19880583': 'Hounslow', 'b19879507': 'Waltham Forest', 'b18247106': 'Southwark', 'b17998323': 'Hammersmith and Fulham', 'b18124021': 'Camden', 'b18249619': 'Tower Hamlets', 'b19883171': 'Kingston upon Thames', 'b18238105': 'Hammersmith and Fulham', 'b19970481': 'Kingston upon Thames', 'b19954761': 'Tower Hamlets', 'b19878278': 'Redbridge', 'b1995654x': 'City of Westminster', 'b18253362': 'City of London', 'b19789439': 'Enfield', 'b18252096': 'Camden', 'b18251213': 'Wandsworth', 'b19883353': 'Tower Hamlets', 'b19880911': 'Brent', 'b17950612': 'Southwark', 'b18253143': 'City of London', 'b18116796': 'Camden', 'b1978370x': 'Ealing', 'b1997128x': 'Newham', 'b19786281': 'Bexley', 'b19821955': 'Hackney', 'b19877067': 'Waltham Forest', 'b19791872': 'Barnet', 'b19875022': 'Kingston upon Thames', 'b19957233': 'Tower Hamlets', 'b18239675': 'Lambeth', 'b18237125': 'Lewisham', 'b20056989': 'Southwark', 'b19956721': 'City of Westminster', 'b20057052': 'Wandsworth', 'b19882403': 'Sutton', 'b19969314': 'Newham', 'b18245614': 'City of Westminster', 'b19968309': 'Hounslow', 'b18245341': 'City of Westminster', 'b18248226': 'City of Westminster', 'b19823770': 'Greenwich', 'b18247866': 'City of Westminster', 'b1825455x': 'City of Westminster', 'b20056187': 'Islington', 'b18252230': 'Camden', 'b18252540': 'London County Council', 'b19821608': 'Bromley', 'b18244968': 'Lewisham', 'b1979437x': 'Hillingdon', 'b19969284': 'Newham', 'b19874686': 'Newham', 'b19792244': 'Havering', 'b1988476x': 'Port of London', 'b18121159': 'Camden', 'b19824270': 'Kensington and Chelsea', 'b19823551': 'Greenwich', 'b19788861': 'Enfield', 'b18222572': 'Lambeth', 'b19795026': 'Enfield', 'b18044359': 'Greenwich', 'b18236522': 'Wandsworth', 'b18039844': 'Hackney', 'b19957257': 'Tower Hamlets', 'b1995380x': 'Tower Hamlets', 'b1979065x': 'Barnet', 'b18247921': 'City of Westminster', 'b19876269': 'Redbridge', 'b19875083': 'Kingston upon Thames', 'b18235384': 'Hammersmith and Fulham', 'b18246199': 'Tower Hamlets', 'b19784259': 'Barking and Dagenham', 'b19788289': 'Ealing', 'b18247829': 'City of Westminster', 'b19784703': 'Barking and Dagenham', 'b18218593': 'Southwark', 'b19955157': 'City of Westminster', 'b19875551': 'Camden', 'b19954530': 'Lewisham', 'b18251845': 'Camden', 'b19822601': 'Hackney', 'b19876191': 'Redbridge', 'b19880467': 'Hounslow', 'b1987702x': 'Waltham Forest', 'b19783954': 'Barnet', 'b19793352': 'Bromley', 'b19793686': 'Bromley', 'b19884370': 'City of London', 'b19788137': 'Ealing', 'b18218623': 'Southwark', 'b19976082': 'Kingston upon Thames', 'b18239079': 'Camden', 'b18220034': 'Tower Hamlets', 'b18209452': 'Islington', 'b19791264': 'Hillingdon', 'b17996454': 'Southwark', 'b19794083': 'Havering', 'b19785860': 'Bromley', 'b19793534': 'Bromley', 'b19881848': 'Haringey', 'b19785070': 'Bromley', 'b19885179': 'Hackney', 'b18246448': 'Southwark', 'b19881952': 'Haringey', 'b19878047': 'Ealing', 'b1997081x': 'Haringey', 'b19954578': 'Lewisham', 'b17996983': 'Wandsworth', 'b17998463': 'Southwark', 'b19879313': 'Richmond upon Thame', 'b18247313': 'City of Westminster', 'b18239444': 'Islington', 'b18237605': 'Greenwich', 'b19824476': 'Harrow', 'b19876853': 'Waltham Forest', 'b19821384': 'Bromley', 'b19790132': 'Barnet', 'b19796833': 'Brent', 'b18253556': 'City of London', 'b19789373': 'Enfield', 'b19786670': 'Croydon', 'b19797308': 'Haringey', 'b19794964': 'Enfield', 'b18039546': 'Tower Hamlets', 'b18250506': 'Wandsworth', 'b18253702': 'City of London', 'b19786499': 'Bexley', 'b1996853x': 'Sutton', 'b18238713': 'Camden', 'b18249401': 'Haringey', 'b18236959': 'Tower Hamlets', 'b18118173': 'Hackney', 'b19875319': 'Kingston upon Thames', 'b20056515': 'City of London', 'b19884977': 'Southwark', 'b19953653': 'Hammersmith and Fulham', 'b19796304': 'Brent', 'b19953124': 'Southwark', 'b18244750': 'Lewisham', 'b19976227': 'Kingston upon Thames', 'b18219366': 'Southwark', 'b19823125': 'Tower Hamlets', 'b1995606x': 'Southwark', 'b19784132': 'Barking and Dagenham', 'b18253970': 'Tower Hamlets', 'b18249073': 'Haringey', 'b19787728': 'Ealing', 'b19822832': 'Tower Hamlets', 'b19875769': 'Camden', 'b1987781x': 'Ealing', 'b1979146x': 'Hillingdon', 'b18111208': 'Wandsworth', 'b1825102x': 'Wandsworth', 'b1811717x': 'Kensington and Chelsea', 'b18219500': 'Southwark', 'b18235979': 'Kensington and Chelsea', 'b18235608': 'Hammersmith and Fulham', 'b18120611': 'Lewisham', 'b18236777': 'Tower Hamlets', 'b19791070': 'City of Westminster', 'b17997112': 'Southwark', 'b1987652x': 'Waltham Forest', 'b18223540': 'Kensington and Chelsea', 'b18250774': 'Wandsworth', 'b19952831': 'Tower Hamlets', 'b18245407': 'City of Westminster', 'b18245158': 'City of Westminster', 'b19874546': 'Newham', 'b20240880': 'Kensington and Chelsea', 'b19881617': 'Newham', 'b18043550': 'Southwark', 'b19876038': 'Redbridge', 'b19954967': 'Tower Hamlets', 'b19885763': 'Hackney', 'b19969466': 'Camden', 'b19784399': 'Barking and Dagenham', 'b19952740': 'Tower Hamlets', 'b19788630': 'Enfield', 'b19792475': 'Havering', 'b19881472': 'Newham', 'b19785914': 'Bromley', 'b19794411': 'Hillingdon', 'b18221610': 'Greenwich', 'b18250300': 'Wandsworth', 'b19794149': 'Havering', 'b1824712x': 'Southwark', 'b17999613': 'City of Westminster', 'b19954293': 'Lambeth', 'b19878783': 'Ealing', 'b19884497': 'City of London', 'b19955212': 'City of Westminster', 'b19874960': 'Kingston upon Thames', 'b19954748': 'Tower Hamlets', 'b19954037': 'Lambeth', 'b19883110': 'Kingston upon Thames', 'b19878254': 'Redbridge', 'b19880704': 'Hounslow', 'b19783723': 'Ealing', 'b18116619': 'Hackney', 'b19879787': 'Richmond upon Thame', 'b18253301': 'City of London', 'b1824483x': 'Islington', 'b19789142': 'Enfield', 'b19789415': 'Enfield', 'b19976276': 'Kingston upon Thames', 'b18254512': 'City of Westminster', 'b18118549': 'Hackney', 'b18251237': 'Wandsworth', 'b1824967x': 'Tower Hamlets', 'b20274853': 'Harrow', 'b1988414x': 'Port of London', 'b18248676': 'Islington', 'b19956526': 'City of Westminster', 'b18239389': 'Islington', 'b19791896': 'Barnet', 'b19882646': 'Merton', 'b19885969': 'Hackney', 'b18237149': 'Lewisham', 'b1982371x': 'Greenwich', 'b18237411': 'Islington', 'b19954712': 'Tower Hamlets', 'b17996739': 'Southwark', 'b19956290': 'City of Westminster', 'b1824869x': 'Islington', 'b19882397': 'Sutton', 'b19882464': 'Sutton', 'b1987912x': 'Richmond upon Thame', 'b1995721x': 'Tower Hamlets', 'b18248202': 'City of Westminster', 'b18245365': 'City of Westminster', 'b19874376': 'Islington', 'b19821979': 'Hackney', 'b18252217': 'Camden', 'b19956976': 'Southwark', 'b19821621': 'Bromley', 'b19975922': 'Sutton', 'b18219196': 'Southwark', 'b18245092': 'Lewisham', 'b19784892': 'Bromley', 'b19824294': 'Kensington and Chelsea', 'b18239900': 'Lambeth', 'b19788848': 'Enfield', 'b19882816': 'Merton', 'b19792268': 'Havering', 'b18236546': 'Wandsworth', 'b18244853': 'Lewisham', 'b20057076': 'City of Westminster', 'b19952995': 'Southwark', 'b20056709': 'Hackney', 'b18245894': 'Tower Hamlets', 'b18247945': 'City of Westminster', 'b18253891': 'City of London', 'b18106249': 'Kensington and Chelsea', 'b18222882': 'Tower Hamlets', 'b19884709': 'City of London', 'b1823852x': 'Camden', 'b19876208': 'Redbridge', 'b19822996': 'Tower Hamlets', 'b19953823': 'Tower Hamlets', 'b18246230': 'Tower Hamlets', 'b19877006': 'Waltham Forest', 'b19877559': 'Hounslow', 'b18247805': 'City of Westminster', 'b19784272': 'Barking and Dagenham', 'b18106894': 'Hammersmith and Fulham', 'b19955170': 'City of Westminster', 'b19822194': 'Hackney', 'b19794319': 'Havering', 'b19791793': 'Barnet', 'b19822662': 'Tower Hamlets', 'b18111567': 'City of London', 'b20106890': 'Southwark', 'b19793339': 'Bromley', 'b18106092': 'Southwark', 'b18218647': 'Southwark', 'b19788150': 'Ealing', 'b19795403': 'Barnet', 'b19883365': 'Tower Hamlets', 'b18209476': 'Tower Hamlets', 'b18220010': 'Tower Hamlets', 'b19955509': 'Southwark', 'b19791240': 'Havering', 'b18239018': 'Camden', 'b20106762': 'Lewisham', 'b1987683x': 'Waltham Forest', 'b18248445': 'City of Westminster', 'b18118616': 'Lambeth', 'b19823460': 'Greenwich', 'b18251249': 'Wandsworth', 'b19785057': 'Bromley', 'b18252850': 'London County Council', 'b19878412': 'Redbridge', 'b19878060': 'Ealing', 'b19885155': 'Hackney', 'b1979681x': 'Brent', 'b19969697': 'Hounslow', 'b19879337': 'Richmond upon Thame', 'b18247374': 'City of Westminster', 'b18250312': 'Wandsworth', 'b18237629': 'Greenwich', 'b19824452': 'Harrow', 'b18252758': 'London County Council', 'b19793030': 'Waltham Forest', 'b19790119': 'Barnet', 'b1824631x': 'Tower Hamlets', 'b19786657': 'Croydon', 'b19789609': 'Enfield', 'b19786128': 'Croydon', 'b18047282': 'Kensington and Chelsea', 'b19797497': 'Haringey', 'b19794940': 'Enfield', 'b19797369': 'Haringey', 'b18043501': 'Islington', 'b18249796': 'Hackney', 'b18221506': 'Kensington and Chelsea', 'b19957026': 'Southwark', 'b18237083': 'Lewisham', 'b1978935x': 'Enfield', 'b19956885': 'Southwark', 'b19875848': 'Camden', 'b19883250': 'Newham', 'b20274828': 'Harrow', 'b18249425': 'Haringey', 'b18238737': 'Camden', 'b18236972': 'Lewisham', 'b1803956x': 'Tower Hamlets', 'b19884953': 'Southwark', 'b19956083': 'City of Westminster', 'b18252667': 'London County Council', 'b19787522': 'Barking and Dagenham', 'b19787297': 'Croydon', 'b18251006': 'Wandsworth', 'b18244774': 'Lewisham', 'b19783450': 'Ealing', 'b19796365': 'Brent', 'b19953100': 'Southwark', 'b18048444': 'Camden', 'b19791409': 'Hillingdon', 'b18219342': 'Southwark', 'b19975715': 'Richmond upon Thame', 'b18253957': 'Tower Hamlets', 'b18236285': 'Wandsworth', 'b17997288': 'Southwark', 'b19787741': 'Ealing', 'b19822856': 'Tower Hamlets', 'b18246953': 'Southwark', 'b18235955': 'Kensington and Chelsea', 'b18219524': 'Southwark', 'b18237885': 'Greenwich', 'b18236029': 'Kensington and Chelsea', 'b18236753': 'Tower Hamlets', 'b17997136': 'Southwark', 'b18235487': 'Hammersmith and Fulham', 'b19790752': 'Barnet', 'b18250750': 'Wandsworth', 'b19823459': 'Greenwich', 'b18245134': 'Lewisham', 'b19874522': 'Newham', 'b19788435': 'Barnet', 'b19823101': 'Tower Hamlets', 'b19824336': 'Kensington and Chelsea', 'b19881678': 'Haringey', 'b19822273': 'Hackney', 'b18043537': 'Kensington and Chelsea', 'b19876543': 'Waltham Forest', 'b19876014': 'Redbridge', 'b19954906': 'Tower Hamlets', 'b19885702': 'Hackney', 'b19952764': 'Tower Hamlets', 'b19792451': 'Havering', 'b19788381': 'Barnet', 'b18106791': 'Wandsworth', 'b1824676x': 'Southwark', 'b19881496': 'Newham', 'b19794435': 'Hillingdon', 'b19794125': 'Havering', 'b19883766': 'Sutton', 'b19790326': 'Barnet', 'b18250361': 'Wandsworth', 'b1978451x': 'Barking and Dagenham', 'b17999674': 'Camden', 'b18238282': 'Hammersmith and Fulham', 'b19877766': 'Hounslow', 'b18106134': 'Hammersmith and Fulham', 'b19874947': 'Kingston upon Thames', 'b18209804': 'Southwark', 'b18251584': 'Camden', 'b19883134': 'Kingston upon Thames', 'b18249656': 'Tower Hamlets', 'b19954013': 'Lambeth', 'b19970997': 'Richmond upon Thame', 'b18250981': 'Wandsworth', 'b19878503': 'Redbridge', 'b19793479': 'Bromley', 'b19880728': 'Hounslow', 'b18253325': 'City of London', 'b18116632': 'Hackney', 'b19789166': 'Enfield', 'b19792876': 'Waltham Forest', 'b18254536': 'City of Westminster', 'b19970195': 'Merton', 'b19970663': 'Richmond upon Thame', 'b19879416': 'Richmond upon Thame', 'b18240045': 'Greenwich', 'b18253180': 'City of London', 'b19789294': 'Enfield', 'b18248652': 'Islington', 'b19956502': 'City of Westminster', 'b1997629x': 'Kingston upon Thames', 'b1978983x': 'Bexley', 'b19957270': 'Tower Hamlets', 'b19879106': 'Richmond upon Thame', 'b18237162': 'Lewisham', 'b19883390': 'Tower Hamlets', 'b18252059': 'Camden', 'b19882919': 'Richmond upon Thame', 'b19882440': 'Sutton', 'b19885490': 'Hackney', 'b18250567': 'Wandsworth', 'b19790922': 'Barnet', 'b19786359': 'Bexley', 'b18245304': 'City of Westminster', 'b19786827': 'Croydon', 'b18248263': 'City of Westminster', 'b1988266x': 'Merton', 'b18252588': 'London County Council', 'b19883602': 'Tower Hamlets', 'b19824105': 'Kensington and Chelsea', 'b18250592': 'Wandsworth', 'b19821992': 'Hackney', 'b19787194': 'Croydon', 'b18252278': 'Camden', 'b17998013': 'Hammersmith and Fulham', 'b19975909': 'Sutton', 'b18250695': 'Wandsworth', 'b19790697': 'Barnet', 'b19792207': 'Havering', 'b18238014': 'Hammersmith and Fulham', 'b20056497': 'City of London', 'b19953598': 'Hammersmith and Fulham', 'b18120866': 'Tower Hamlets', 'b18238506': 'Camden', 'b20057015': 'Southwark', 'b19877997': 'Ealing', 'b19952971': 'Southwark', 'b18248883': 'Camden', 'b18247969': 'City of Westminster', 'b19784077': 'Barking and Dagenham', 'b20056722': 'Hackney', 'b19884722': 'City of London', 'b19875046': 'Kingston upon Thames', 'b18249139': 'Haringey', 'b18251614': 'Camden', 'b19822467': 'Hackney', 'b19876609': 'Waltham Forest', 'b19876221': 'Redbridge', 'b19953379': 'Hammersmith and Fulham', 'b1823656x': 'Wandsworth', 'b19877572': 'Hounslow', 'b19784211': 'Barking and Dagenham', 'b18106304': 'Kensington and Chelsea', 'b18237708': 'Greenwich', 'b19875599': 'Camden', 'b19794332': 'Havering', 'b18111543': 'City of Westminster', 'b18223667': 'Kensington and Chelsea', 'b19793911': 'Havering', 'b19793315': 'Bromley', 'b19788174': 'Ealing', 'b18218660': 'Southwark', 'b19795427': 'Barnet', 'b18239031': 'Camden', 'b20106749': 'Wandsworth', 'b19876488': 'Waltham Forest', 'b18246151': 'Tower Hamlets', 'b18247684': 'City of Westminster', 'b19968905': 'Hounslow', 'b1988493x': 'Kensington and Chelsea', 'b18220290': 'Hammersmith and Fulham', 'b18123740': 'Lewisham', 'b18252874': 'London County Council', 'b19785033': 'Bromley', 'b19881800': 'Haringey', 'b18246485': 'Southwark', 'b18246370': 'Southwark', 'b19885131': 'Hackney', 'b19877687': 'Hounslow', 'b19976331': 'Kingston upon Thames', 'b19971102': 'Waltham Forest', 'b18248792': 'Islington', 'b1825245x': 'London County Council', 'b19792992': 'Waltham Forest', 'b18238348': 'Hammersmith and Fulham', 'b19883894': 'Sutton', 'b1821891x': 'Southwark', 'b19786104': 'Croydon', 'b19789622': 'Bexley', 'b18220435': 'Wandsworth', 'b19792190': 'Havering', 'b19797345': 'Haringey', 'b19968255': 'Hounslow', 'b18039509': 'Tower Hamlets', 'b19794927': 'Enfield', 'b19954463': 'Lewisham', 'b19783887': 'Ealing', 'b18247428': 'City of Westminster', 'b18247350': 'City of Westminster', 'b19879350': 'Richmond upon Thame', 'b19792293': 'Havering', 'b20056266': 'Islington', 'b18110848': 'Islington', 'b19956319': 'City of Westminster', 'b1987523x': 'Kingston upon Thames', 'b19883274': 'Redbridge', 'b18236911': 'Tower Hamlets', 'b18238750': 'Camden', 'b19970274': 'Richmond upon Thame', 'b1822152x': 'Kensington and Chelsea', 'b19796699': 'Brent', 'b18253519': 'City of London', 'b1995704x': 'Southwark', 'b18253040': 'London County Council', 'b19821839': 'Bexley', 'b19787509': 'Barking and Dagenham', 'b18252643': 'London County Council', 'b19796432': 'Brent', 'b19953161': 'Southwark', 'b19796341': 'Brent', 'b19791422': 'Hillingdon', 'b19975739': 'Richmond upon Thame', 'b18219329': 'Southwark', 'b19787765': 'Ealing', 'b19786396': 'Bexley', 'b18239493': 'Islington', 'b18111713': 'Southwark', 'b19882051': 'Merton', 'b18048390': 'Wandsworth', 'b18246977': 'Southwark', 'b18235931': 'Kensington and Chelsea', 'b19968280': 'Hounslow', 'b18237204': 'Lewisham', 'b18239936': 'Lambeth', 'b18120659': 'Greenwich', 'b19953781': 'Tower Hamlets', 'b18236005': 'Kensington and Chelsea', 'b1995492x': 'Tower Hamlets', 'b19874509': 'Newham', 'b19790776': 'Barnet', 'b18245110': 'Lewisham', 'b19788411': 'Barnet', 'b19968139': 'Hounslow', 'b19824312': 'Kensington and Chelsea', 'b18043513': 'Lewisham', 'b19881125': 'Sutton', 'b19881654': 'Haringey', 'b19969429': 'Camden', 'b19885726': 'Hackney', 'b1799715x': 'Southwark', 'b19952788': 'Tower Hamlets', 'b19792438': 'Havering', 'b1978353x': 'Ealing', 'b19785483': 'Bromley', 'b19794459': 'Hillingdon', 'b19794101': 'Havering', 'b18250348': 'Wandsworth', 'b1988445x': 'Port of London', 'b19784685': 'Barking and Dagenham', 'b19955030': 'Tower Hamlets', 'b19875678': 'Camden', 'b19955789': 'Southwark', 'b1995525x': 'City of Westminster', 'b17999121': 'City of Westminster', 'b19954256': 'Lambeth', 'b19878746': 'Redbridge', 'b18246746': 'Southwark', 'b19880522': 'Hounslow', 'b19877213': 'Merton', 'b19877742': 'Hounslow', 'b1979034x': 'Barnet', 'b19880297': 'Hounslow', 'b18247167': 'City of Westminster', 'b18106110': 'Hammersmith and Fulham', 'b18254731': 'City of Westminster', 'b19976100': 'Kingston upon Thames', 'b19787881': 'Ealing', 'b19791161': 'Havering', 'b18209828': 'Southwark', 'b18254081': 'Hammersmith and Fulham', 'b19878564': 'Redbridge', 'b19954074': 'Lambeth', 'b19878291': 'Redbridge', 'b19883079': 'Kingston upon Thames', 'b19880741': 'Hounslow', 'b19793108': 'Waltham Forest', 'b19793455': 'Bromley', 'b18116656': 'Hackney', 'b20057106': 'City of Westminster', 'b19792852': 'Waltham Forest', 'b18239353': 'Islington', 'b17998529': 'Hammersmith and Fulham', 'b18239341': 'Islington', 'b18237563': 'Greenwich', 'b18248639': 'Islington', 'b19968735': 'Sutton', 'b19954785': 'Tower Hamlets', 'b19882609': 'Sutton', 'b19885921': 'Hackney', 'b19879167': 'Richmond upon Thame', 'b1824824x': 'City of Westminster', 'b19786591': 'Croydon', 'b17998748': 'Islington', 'b20056928': 'Southwark', 'b18252072': 'Camden', 'b19956782': 'Southwark', 'b18239699': 'Lambeth', 'b18236169': 'Kensington and Chelsea', 'b19790909': 'Barnet', 'b19975697': 'Bromley', 'b19957063': 'Southwark', 'b19882932': 'Richmond upon Thame', 'b19824129': 'Kensington and Chelsea', 'b19786372': 'Bexley', 'b19789816': 'Bexley', 'b18245328': 'City of Westminster', 'b19874339': 'Harrow', 'b18117065': 'Hammersmith and Fulham', 'b18252254': 'Camden', 'b18039339': 'Camden', 'b19792220': 'Havering', 'b18121469': 'Kensington and Chelsea', 'b18218866': 'Southwark', 'b19956125': 'City of Westminster', 'b19883511': 'Tower Hamlets', 'b18197267': 'Hammersmith and Fulham', 'b1987506x': 'Kingston upon Thames', 'b18238567': 'Camden', 'b17997380': 'Southwark', 'b18244634': 'Lewisham', 'b19953574': 'Hammersmith and Fulham', 'b18245584': 'City of Westminster', 'b18245857': 'Tower Hamlets', 'b1997596x': 'Sutton', 'b19952958': 'Southwark', 'b18247982': 'City of Westminster', 'b20056746': 'Hackney', 'b19784053': 'Barking and Dagenham', 'b19879155': 'Richmond upon Thame', 'b18249115': 'Haringey', 'b18251638': 'Camden', 'b19822406': 'Hackney', 'b18244981': 'Lewisham', 'b19953318': 'Hammersmith and Fulham', 'b19823319': 'Greenwich', 'b19883213': 'Kingston upon Thames', 'b19953008': 'Southwark', 'b18238774': 'Camden', 'b19784235': 'Barking and Dagenham', 'b19784946': 'Bromley', 'b19795622': 'Hillingdon', 'b19795191': 'Enfield', 'b19791756': 'Barnet', 'b19793935': 'Havering', 'b1978899x': 'Enfield', 'b18218684': 'Southwark', 'b19788198': 'Ealing', 'b19795993': 'Hillingdon', 'b19795440': 'Barnet', 'b18246175': 'Tower Hamlets', 'b19876440': 'Waltham Forest', 'b18238981': 'Camden', 'b19877511': 'Hounslow', 'b1982449x': 'Harrow', 'b19881514': 'Newham', 'b18248408': 'City of Westminster', 'b18249802': 'Hackney', 'b19785549': 'Bromley', 'b19881824': 'Haringey', 'b19953914': 'Tower Hamlets', 'b19881356': 'Newham', 'b19968929': 'Hounslow', 'b1997047x': 'Ealing', 'b19885118': 'Hackney', 'b18252813': 'London County Council', 'b18246357': 'Tower Hamlets', 'b18250087': 'Hackney', 'b19880194': 'Hillingdon', 'b19793662': 'Bromley', 'b19976318': 'Kingston upon Thames', 'b19971473': 'Merton', 'b19797102': 'Brent', 'b1978501x': 'Bromley', 'b19970870': 'Richmond upon Thame', 'b18250889': 'Wandsworth', 'b18221890': 'Kensington and Chelsea', 'b19790156': 'Barnet', 'b19793595': 'Bromley', 'b1979048x': 'Barnet', 'b18220411': 'Wandsworth', 'b18039522': 'Tower Hamlets', 'b19794903': 'Enfield', 'b17999352': 'Southwark', 'b19878175': 'Redbridge', 'b18209427': 'Islington', 'b17998190': 'Camden', 'b19879805': 'Richmond upon Thame', 'b19879374': 'Richmond upon Thame', 'b18247404': 'City of Westminster', 'b19884242': 'City of London', 'b18253283': 'City of London', 'b19956332': 'City of Westminster', 'b19970250': 'Merton', 'b19791999': 'Barnet', 'b18236935': 'Tower Hamlets', 'b1995444x': 'Lewisham', 'b18118446': 'Hackney', 'b1825116x': 'Wandsworth', 'b18253064': 'London County Council', 'b19786165': 'Croydon', 'b19789312': 'Enfield', 'b19789646': 'Bexley', 'b19821815': 'Bromley', 'b19796419': 'Brent', 'b19953148': 'Southwark', 'b19783413': 'Ealing', 'b18048407': 'Wandsworth', 'b18219305': 'Southwark', 'b19975752': 'Richmond upon Thame', 'b18239572': 'Lambeth', 'b20056242': 'Islington', 'b1978756x': 'Barking and Dagenham', 'b18236248': 'Wandsworth', 'b19787789': 'Ealing', 'b1825262x': 'London County Council', 'b18111737': 'Southwark', 'b18246916': 'Southwark', 'b18110964': 'Southwark', 'b18235918': 'Kensington and Chelsea', 'b19882038': 'Merton', 'b18248184': 'City of Westminster', 'b18121895': 'Southwark', 'b18237228': 'Lewisham', 'b19823678': 'Greenwich', 'b18236066': 'Kensington and Chelsea', 'b18236790': 'Tower Hamlets', 'b18244841': 'Lewisham', 'b17997173': 'Southwark', 'b18250713': 'Wandsworth', 'b19790715': 'Barnet', 'b19969545': 'Harrow', 'b18248986': 'Haringey', 'b18245468': 'City of Westminster', 'b19823411': 'Greenwich', 'b19824373': 'Harrow', 'b19822236': 'Hackney', 'b19881101': 'Sutton', 'b18111191': 'Wandsworth', 'b19969405': 'Camden', 'b18121329': 'Camden', 'b19792414': 'Havering', 'b19788691': 'Enfield', 'b19884886': 'City of London', 'b19795294': 'Barnet', 'b18239912': 'Lambeth', 'b18222250': 'Islington', 'b19794472': 'Hillingdon', 'b1987876x': 'Redbridge', 'b19790363': 'Barnet', 'b19955017': 'Tower Hamlets', 'b19875654': 'Camden', 'b19880509': 'Hounslow', 'b18246722': 'Southwark', 'b17999108': 'Southwark', 'b19884473': 'City of London', 'b19788034': 'Ealing', 'b19877729': 'Hounslow', 'b19784557': 'Barking and Dagenham', 'b18106171': 'Islington', 'b19955273': 'City of Westminster', 'b19976124': 'Kingston upon Thames', 'b19791148': 'Haringey', 'b19785975': 'Croydon', 'b18254718': 'City of Westminster', 'b18218878': 'Southwark', 'b19880765': 'Hounslow', 'b19783784': 'Ealing', 'b1995590x': 'Southwark', 'b19793121': 'Bromley', 'b18223114': 'Tower Hamlets', 'b18254573': 'City of Westminster', 'b19789129': 'Enfield', 'b19969090': 'Barnet', 'b18116711': 'Hackney', 'b19879490': 'Waltham Forest', 'b18248615': 'Islington', 'b18249632': 'Tower Hamlets', 'b18239365': 'Islington', 'b19824579': 'Harrow', 'b19785203': 'Bexley', 'b1995427x': 'Lambeth', 'b19885301': 'Hackney', 'b19878813': 'Richmond upon Thame', 'b19878540': 'Redbridge', 'b19954050': 'Lambeth', 'b19882622': 'Sutton', 'b1979096x': 'Barnet', 'b19786785': 'Croydon', 'b19885908': 'Hackney', 'b1825052x': 'Wandsworth', 'b17998761': 'Islington', 'b19879143': 'Richmond upon Thame', 'b1803830x': 'City of Westminster', 'b18222079': 'Greenwich', 'b19789786': 'Bexley', 'b19968437': 'Richmond upon Thame', 'b18117107': 'Islington', 'b18252011': 'Camden', 'b19783760': 'Ealing', 'b19882956': 'Camden', 'b18253726': 'City of London', 'b19969399': 'Camden', 'b19882488': 'Sutton', 'b19789877': 'Bexley', 'b1997131x': 'Newham', 'b19786311': 'Bexley', 'b19824142': 'Kensington and Chelsea', 'b19876889': 'Waltham Forest', 'b18117041': 'Hammersmith and Fulham', 'b19883687': 'Tower Hamlets', 'b18249565': 'Lambeth', 'b17997410': 'Southwark', 'b18238099': 'Hammersmith and Fulham', 'b2005676x': 'Hackney', 'b18253660': 'City of London', 'b1978403x': 'Barking and Dagenham', 'b18218842': 'Southwark', 'b19876816': 'Waltham Forest', 'b18197280': 'Camden', 'b19883535': 'Tower Hamlets', 'b18251389': 'Camden', 'b19953550': 'Hammersmith and Fulham', 'b1982242x': 'Hackney', 'b18118306': 'Camden', 'b19953082': 'Southwark', 'b18244610': 'Lewisham', 'b19823083': 'Tower Hamlets', 'b19875009': 'Kingston upon Thames', 'b18044682': 'Tower Hamlets', 'b18249176': 'Haringey', 'b18222390': 'Hammersmith and Fulham', 'b18042120': 'Hackney', 'b19822935': 'Tower Hamlets', 'b19824555': 'Harrow', 'b19953884': 'Tower Hamlets', 'b19953331': 'Hammersmith and Fulham', 'b18219469': 'Southwark', 'b19975946': 'Haringey', 'b19823332': 'Greenwich', 'b19788204': 'Ealing', 'b19784788': 'Bromley', 'b18044530': 'Lewisham', 'b18236479': 'Wandsworth', 'b19955480': 'Southwark', 'b18018270': 'Wandsworth', 'b18120751': 'Wandsworth', 'b1825438x': 'City of Westminster', 'b19822686': 'Tower Hamlets', 'b18254470': 'City of Westminster', 'b18245870': 'Tower Hamlets', 'b19952934': 'Southwark', 'b19952685': 'Tower Hamlets', 'b19793959': 'Havering', 'b18246898': 'Southwark', 'b1978496x': 'Bromley', 'b19795464': 'Barnet', 'b1979177x': 'Barnet', 'b19881599': 'Newham', 'b19785781': 'Bromley', 'b20106786': 'Lewisham', 'b1822362x': 'Kensington and Chelsea', 'b19883377': 'Tower Hamlets', 'b19885684': 'Hackney', 'b2010683x': 'Lewisham', 'b18246114': 'Tower Hamlets', 'b18245274': 'City of Westminster', 'b19877535': 'Hounslow', 'b19874716': 'Newham', 'b18245705': 'City of Westminster', 'b18124124': 'Hackney', 'b18244993': 'Lewisham', 'b19874844': 'Newham', 'b18249826': 'Hackney', 'b18246333': 'Tower Hamlets', 'b19968942': 'Hounslow', 'b19785562': 'Bromley', 'b18252837': 'London County Council', 'b18250063': 'Hackney', 'b19792608': 'Waltham Forest', 'b19968152': 'Hounslow', 'b19793601': 'Bromley', 'b19792955': 'Waltham Forest', 'b19797126': 'Brent', 'b19971412': 'Merton', 'b1988137x': 'Newham', 'b19794733': 'Enfield', 'b18221877': 'Camden', 'b18238300': 'Hammersmith and Fulham', 'b19877390': 'Merton', 'b19797382': 'Haringey', 'b19878151': 'Redbridge', 'b19883055': 'Kingston upon Thames', 'b19954426': 'Lewisham', 'b18249772': 'Hackney', 'b17999376': 'Camden', 'b18253696': 'City of London', 'b18247465': 'City of Westminster', 'b1978966x': 'Bexley', 'b19884229': 'City of London', 'b19879829': 'Richmond upon Thame', 'b18247398': 'City of Westminster', 'b19883948': 'Sutton', 'b18249486': 'Haringey', 'b19883237': 'Kingston upon Thames', 'b19875393': 'Southwark', 'b19970237': 'Merton', 'b19796651': 'Brent', 'b19791975': 'Barnet', 'b19823320': 'Greenwich', 'b19786141': 'Croydon', 'b18253003': 'London County Council', 'b19969855': 'Redbridge', 'b1825326x': 'City of London', 'b19789981': 'Bexley', 'b19796389': 'Brent', 'b19787546': 'Barking and Dagenham', 'b19787236': 'Croydon', 'b19877894': 'Ealing', 'b19783437': 'Ealing', 'b1824693x': 'Southwark', 'b19975776': 'Richmond upon Thame', 'b18116504': 'Camden', 'b18237022': 'Lewisham', 'b18237794': 'Greenwich', 'b20056229': 'Islington', 'b18239559': 'Lambeth', 'b19956356': 'City of Westminster', 'b2005659x': 'Hackney', 'b17997999': 'Hammersmith and Fulham', 'b18236261': 'Wandsworth', 'b18219585': 'Southwark', 'b19882543': 'Sutton', 'b1979647x': 'Brent', 'b18248160': 'City of Westminster', 'b1988543x': 'Hackney', 'b19824683': 'Harrow', 'b18237241': 'Islington', 'b18252606': 'London County Council', 'b18252357': 'Camden', 'b18244865': 'Lewisham', 'b19790739': 'Barnet', 'b19792384': 'Havering', 'b19953288': 'Hammersmith and Fulham', 'b19968176': 'Hounslow', 'b19823435': 'Greenwich', 'b18218982': 'Southwark', 'b19821499': 'Bromley', 'b18235797': 'Kensington and Chelsea', 'b19955601': 'Southwark', 'b18250907': 'Wandsworth', 'b1982435x': 'Harrow', 'b18038475': 'Islington', 'b18222237': 'Camden', 'b18236042': 'Kensington and Chelsea', 'b18239973': 'Lambeth', 'b18120696': 'City of Westminster', 'b19789798': 'Bexley', 'b1988056x': 'Hounslow', 'b19794496': 'Hillingdon', 'b18235530': 'Hammersmith and Fulham', 'b19955078': 'City of Westminster', 'b19875630': 'Camden', 'b19822741': 'Tower Hamlets', 'b19822212': 'Hackney', 'b19881162': 'Sutton', 'b19877705': 'Hounslow', 'b19877250': 'Merton', 'b18106158': 'Islington', 'b18218726': 'Southwark', 'b18106626': 'Islington', 'b18254779': 'City of Westminster', 'b19787844': 'Ealing', 'b19976148': 'Kingston upon Thames', 'b19791124': 'Haringey', 'b19785999': 'Croydon', 'b18111312': 'Greenwich', 'b19793418': 'Bromley', 'b19793145': 'Bromley', 'b19880789': 'Hounslow', 'b18254597': 'City of Westminster', 'b18209798': 'Southwark', 'b19881782': 'Haringey', 'b19785197': 'Bexley', 'b1996982x': 'Redbridge', 'b18246709': 'Southwark', 'b19878709': 'Redbridge', 'b19954219': 'Lambeth', 'b18246254': 'Tower Hamlets', 'b18247076': 'Southwark', 'b18239304': 'Islington', 'b19955923': 'Southwark', 'b19968772': 'Sutton', 'b19785227': 'Bexley', 'b19824518': 'Harrow', 'b1988509x': 'Hackney', 'b19970420': 'Ealing', 'b19878837': 'Richmond upon Thame', 'b19885325': 'Hackney', 'b18250294': 'Wandsworth', 'b19879611': 'Waltham Forest', 'b17998785': 'Islington', 'b19786554': 'Bexley', 'b1825374x': 'City of London', 'b19786086': 'Croydon', 'b19971333': 'Merton', 'b18237496': 'Islington', 'b19797242': 'Haringey', 'b18117120': 'Southwark', 'b18252035': 'Camden', 'b19790946': 'Barnet', 'b19957117': 'Southwark', 'b19786888': 'Croydon', 'b19786335': 'Bexley', 'b19789506': 'Enfield', 'b19789853': 'Bexley', 'b19824166': 'Kensington and Chelsea', 'b18117028': 'Camden', 'b20083865': 'Southwark', 'b18252291': 'Camden', 'b18249541': 'Lambeth', 'b18250580': 'Wandsworth', 'b18118094': 'Kensington and Chelsea', 'b18251055': 'Wandsworth', 'b18236182': 'Kensington and Chelsea', 'b19956162': 'City of Westminster', 'b18222006': 'Kensington and Chelsea', 'b19787686': 'Barking and Dagenham', 'b1810874x': 'Tower Hamlets', 'b19953537': 'Hammersmith and Fulham', 'b19783590': 'Ealing', 'b19883559': 'Tower Hamlets', 'b20056783': 'Hackney', 'b19784016': 'Barking and Dagenham', 'b18253817': 'City of London', 'b19884783': 'City of London', 'b18249152': 'Haringey', 'b19787066': 'Croydon', 'b19877237': 'Merton', 'b18251675': 'Camden', 'b19876944': 'Waltham Forest', 'b19822911': 'Tower Hamlets', 'b1811832x': 'Hammersmith and Fulham', 'b19793091': 'Waltham Forest', 'b1988297x': 'Kingston upon Thames', 'b18219445': 'Southwark', 'b19823356': 'Greenwich', 'b19784909': 'Bromley', 'b18239894': 'Lambeth', 'b18044554': 'Southwark', 'b18251717': 'Camden', 'b18236455': 'Wandsworth', 'b19822480': 'Hackney', 'b20106816': 'Lewisham', 'b19876634': 'Waltham Forest', 'b18223606': 'Kensington and Chelsea', 'b18223357': 'Kensington and Chelsea', 'b19885520': 'Hackney', 'b19876713': 'Waltham Forest', 'b19793972': 'Havering', 'b19952661': 'Tower Hamlets', 'b18042983': 'Tower Hamlets', 'b19952910': 'Southwark', 'b1987473x': 'Newham', 'b19874480': 'Harrow', 'b18248391': 'City of Westminster', 'b19795488': 'Barnet', 'b18239092': 'Islington', 'b19876427': 'Waltham Forest', 'b19881575': 'Newham', 'b19785768': 'Bromley', 'b18041450': 'Hackney', 'b19885660': 'Hackney', 'b18246138': 'Tower Hamlets', 'b18250105': 'Hackney', 'b19788228': 'Ealing', 'b1824581x': 'Tower Hamlets', 'b19792591': 'Waltham Forest', 'b18245729': 'City of Westminster', 'b19968966': 'Hounslow', 'b19822005': 'Hackney', 'b19876646': 'Waltham Forest', 'b19794289': 'Havering', 'b19794538': 'Hillingdon', 'b18246394': 'Southwark', 'b19881319': 'Sutton', 'b19790442': 'Barnet', 'b18221932': 'Kensington and Chelsea', 'b19793625': 'Bromley', 'b18253623': 'City of London', 'b18253908': 'City of London', 'b19792621': 'Waltham Forest', 'b18220186': 'Lewisham', 'b19880182': 'Hillingdon', 'b1824984x': 'Hackney', 'b19792979': 'Waltham Forest', 'b19794757': 'Enfield', 'b18238324': 'Hammersmith and Fulham', 'b18238853': 'Camden', 'b18221853': 'Camden', 'b19953033': 'Southwark', 'b19790193': 'Barnet', 'b1825004x': 'Hackney', 'b19874868': 'Newham', 'b18247660': 'City of Westminster', 'b18251298': 'Wandsworth', 'b19783528': 'Ealing', 'b19954402': 'Lewisham', 'b19954153': 'Lambeth', 'b1988378x': 'Sutton', 'b18249711': 'Hackney', 'b19877110': 'Waltham Forest', 'b19880042': 'Richmond upon Thame', 'b18221580': 'City of Westminster', 'b17996879': 'Kensington and Chelsea', 'b18247441': 'City of Westminster', 'b19884205': 'City of London', 'b19879842': 'Richmond upon Thame', 'b19971436': 'Merton', 'b19791951': 'Barnet', 'b19796675': 'Brent', 'b19883961': 'Sutton', 'b19970213': 'Merton', 'b18118409': 'Greenwich', 'b1979308x': 'Waltham Forest', 'b18106948': 'Hammersmith and Fulham', 'b18253027': 'London County Council', 'b18220320': 'Hammersmith and Fulham', 'b18251080': 'Wandsworth', 'b19796456': 'Brent', 'b19791483': 'Hillingdon', 'b17998049': 'Tower Hamlets', 'b19957178': 'Southwark', 'b18219615': 'Southwark', 'b19882786': 'Merton', 'b18237009': 'Lewisham', 'b18248007': 'City of Westminster', 'b18116528': 'Camden', 'b19956800': 'Southwark', 'b18239535': 'Islington', 'b1987926x': 'Richmond upon Thame', 'b19882567': 'Sutton', 'b18248147': 'City of Westminster', 'b19823988': 'Kensington and Chelsea', 'b18237265': 'Islington', 'b19797503': 'Haringey', 'b19792025': 'Barnet', 'b19823630': 'Greenwich', 'b19821852': 'Enfield', 'b19787212': 'Croydon', 'b1995637x': 'City of Westminster', 'b18252370': 'Camden', 'b18244804': 'Lewisham', 'b19969582': 'Harrow', 'b18120623': 'City of London', 'b19792360': 'Havering', 'b19823186': 'Tower Hamlets', 'b1823995x': 'Lambeth', 'b18236200': 'Wandsworth', 'b18237915': 'Greenwich', 'b1996819x': 'Hounslow', 'b1823866x': 'Camden', 'b19822583': 'Hackney', 'b18235517': 'Hammersmith and Fulham', 'b19955054': 'Tower Hamlets', 'b1825150x': 'Camden', 'b19875617': 'Camden', 'b19822765': 'Tower Hamlets', 'b19876099': 'Redbridge', 'b19877274': 'Merton', 'b19880546': 'Hounslow', 'b19880236': 'Hounslow', 'b19795348': 'Barnet', 'b19884436': 'City of London', 'b19788071': 'Ealing', 'b19784594': 'Barking and Dagenham', 'b19976161': 'Kingston upon Thames', 'b18254755': 'City of Westminster', 'b19791100': 'Haringey', 'b19787868': 'Ealing', 'b18246096': 'Tower Hamlets', 'b19883067': 'Kingston upon Thames', 'b18249334': 'Haringey', 'b19793169': 'Bromley', 'b19797886': 'Hillingdon', 'b18247246': 'City of Westminster', 'b18252977': 'London County Council', 'b19878722': 'Redbridge', 'b19954232': 'Lambeth', 'b18246278': 'Tower Hamlets', 'b19792566': 'Waltham Forest', 'b17998232': 'Hammersmith and Fulham', 'b19879453': 'Waltham Forest', 'b18247052': 'Southwark', 'b18238907': 'Camden', 'b18240008': 'Greenwich', 'b19955947': 'Southwark', 'b19968759': 'Sutton', 'b19797084': 'Brent', 'b19824531': 'Harrow', 'b18239328': 'Islington', 'b19954098': 'Lambeth', 'b19785240': 'Bexley', 'b19878850': 'Richmond upon Thame', 'b19790272': 'Barnet', 'b19969806': 'Redbridge', 'b19885349': 'Hackney', 'b19878588': 'Redbridge', 'b19879672': 'Waltham Forest', 'b1978952x': 'Enfield', 'b19786530': 'Bexley', 'b19971357': 'Merton', 'b20056941': 'Southwark', 'b19797266': 'Haringey', 'b18117144': 'Southwark', 'b18121998': 'Tower Hamlets', 'b18247416': 'City of Westminster', 'b19956496': 'City of Westminster', 'b18117004': 'Hammersmith and Fulham', 'b18237800': 'Greenwich', 'b19970407': 'Ealing', 'b18238051': 'Hammersmith and Fulham', 'b19875149': 'Kingston upon Thames', 'b18249528': 'Lambeth', 'b19879180': 'Richmond upon Thame', 'b20056412': 'Islington', 'b19956149': 'City of Westminster', 'b19956630': 'City of Westminster', 'b19787662': 'Barking and Dagenham', 'b18238580': 'Camden', 'b19883572': 'Tower Hamlets', 'b19970006': 'Kingston upon Thames', 'b19877912': 'Ealing', 'b19880844': 'Hounslow', 'b19783577': 'Ealing', 'b19953045': 'Southwark', 'b18244658': 'Lewisham', 'b19882993': 'Kingston upon Thames', 'b18253490': 'City of London', 'b18253763': 'City of London', 'b18253830': 'City of London', 'b18252412': 'London County Council', 'b19787005': 'Croydon', 'b18251699': 'Camden', 'b18248305': 'City of Westminster', 'b19879635': 'Waltham Forest', 'b19822972': 'Tower Hamlets', 'b19882191': 'Merton', 'b1979602x': 'Hillingdon', 'b19975983': 'Sutton', 'b19953744': 'Hammersmith and Fulham', 'b1982239x': 'Hackney', 'b19795130': 'Enfield', 'b18044578': 'Kensington and Chelsea', 'b19883201': 'Kingston upon Thames', 'b19791732': 'Barnet', 'b20106877': 'Lewisham', 'b19786578': 'Croydon', 'b18249759': 'Hackney', 'b19793996': 'Havering', 'b18223370': 'Kensington and Chelsea', 'b19885507': 'Hackney', 'b19969247': 'Newham', 'b18245523': 'City of Westminster', 'b19824026': 'Kensington and Chelsea', 'b1982337x': 'Greenwich', 'b18245833': 'Tower Hamlets', 'b18245298': 'City of Westminster', 'b18248809': 'Islington', 'b19952648': 'Tower Hamlets', 'b18222973': 'Wandsworth', 'b18019092': 'Camden', 'b19881551': 'Newham', 'b19876403': 'Waltham Forest', 'b19785744': 'Bromley', 'b19881083': 'Sutton', 'b1799701x': 'Wandsworth', 'b18223084': 'Tower Hamlets', 'b19885647': 'Hackney', 'b18250129': 'Hackney', 'b18245742': 'Tower Hamlets', 'b19874753': 'Newham', 'b19784922': 'Bromley', 'b19784296': 'Barking and Dagenham', 'b19788241': 'Ealing', 'b18251997': 'Camden', 'b19822029': 'Hackney', 'b19785525': 'Bromley', 'b19881885': 'Haringey', 'b19881332': 'Sutton', 'b18222845': 'Tower Hamlets', 'b19794514': 'Hillingdon', 'b1822183x': 'Camden', 'b18045224': 'Kensington and Chelsea', 'b1799908x': 'Southwark', 'b18250804': 'Wandsworth', 'b19790466': 'Barnet', 'b1988459x': 'Port of London', 'b1824760x': 'City of Westminster', 'b19792645': 'Waltham Forest', 'b1987540x': 'Southwark', 'b1987666x': 'Waltham Forest', 'b18254639': 'City of Westminster', 'b1979454x': 'Hillingdon', 'b19794770': 'Enfield', 'b19954396': 'Lewisham', 'b18238877': 'Camden', 'b19880686': 'Hounslow', 'b19784430': 'Barking and Dagenham', 'b18250336': 'Wandsworth', 'b19955558': 'Southwark', 'b18249863': 'Hackney', 'b19874807': 'Newham', 'b19878448': 'Redbridge', 'b18249735': 'Hackney', 'b19878114': 'Ealing', 'b19883018': 'Kingston upon Thames', 'b18246515': 'Southwark', 'b19783802': 'Ealing', 'b19880066': 'Richmond upon Thame', 'b19877134': 'Merton', 'b19879866': 'Richmond upon Thame', 'b19795099': 'Enfield', 'b18253222': 'City of London', 'b19971187': 'Newham', 'b19792918': 'Waltham Forest', 'b18254251': 'City of Westminster', 'b19883900': 'Sutton', 'b19970766': 'Richmond upon Thame', 'b19796614': 'Brent', 'b19793200': 'Bromley', 'b18047269': 'Lewisham', 'b18253593': 'City of London', 'b18220307': 'Hammersmith and Fulham', 'b19796924': 'Brent', 'b19882762': 'Merton', 'b1978627x': 'Croydon', 'b19957154': 'Southwark', 'b19879246': 'Richmond upon Thame', 'b18116541': 'Camden', 'b18248020': 'City of Westminster', 'b19792827': 'Waltham Forest', 'b19795142': 'Enfield', 'b18239511': 'Islington', 'b20056886': 'Southwark', 'b19956824': 'Southwark', 'b18252175': 'Camden', 'b1825231x': 'Camden', 'b19882506': 'Sutton', 'b18248123': 'City of Westminster', 'b19789956': 'Bexley', 'b19968206': 'Hounslow', 'b19823964': 'Islington', 'b1823706x': 'Lewisham', 'b19824646': 'Harrow', 'b19797527': 'Haringey', 'b20056084': 'Wandsworth', 'b18237289': 'Islington', 'b19792001': 'Barnet', 'b19787273': 'Croydon', 'b19975843': 'Richmond upon Thame', 'b1823575x': 'Kensington and Chelsea', 'b19874583': 'Newham', 'b2005662x': 'Hackney', 'b19971096': 'Waltham Forest', 'b18245195': 'City of Westminster', 'b19792347': 'Havering', 'b19824397': 'Harrow', 'b18218945': 'Southwark', 'b18236224': 'Wandsworth', 'b18197309': 'London County Council', 'b1982256x': 'Hackney', 'b18236881': 'Tower Hamlets', 'b1823608x': 'Kensington and Chelsea', 'b18245997': 'Tower Hamlets', 'b19956320': 'City of Westminster', 'b18222766': 'City of Westminster', 'b19787492': 'Barking and Dagenham', 'b19875186': 'Kingston upon Thames', 'b18249292': 'Haringey', 'b18237782': 'Greenwich', 'b18244828': 'Lewisham', 'b19953707': 'Hammersmith and Fulham', 'b19783383': 'Ealing', 'b19953252': 'Southwark', 'b18235578': 'Hammersmith and Fulham', 'b19784600': 'Barking and Dagenham', 'b19795580': 'Hillingdon', 'b18110885': 'Hammersmith and Fulham', 'b19955704': 'Southwark', 'b19791690': 'Barnet', 'b19822704': 'Tower Hamlets', 'b19793832': 'Barnet', 'b1811135x': 'Greenwich', 'b19788058': 'Ealing', 'b19788721': 'Enfield', 'b18106663': 'Hackney', 'b19787807': 'Ealing', 'b19795361': 'Barnet', 'b18254007': 'Tower Hamlets', 'b19878023': 'Ealing', 'b18251523': 'Camden', 'b19956101': 'City of Westminster', 'b18223412': 'Kensington and Chelsea', 'b19794186': 'Havering', 'b18246072': 'Tower Hamlets', 'b19821542': 'Bromley', 'b18252953': 'London County Council', 'b18246217': 'Tower Hamlets', 'b19880212': 'Hillingdon', 'b19955960': 'Southwark', 'b18247039': 'Southwark', 'b19797060': 'Brent', 'b19785264': 'Bexley', 'b18250920': 'Wandsworth', 'b19885362': 'Hackney', 'b19878874': 'Richmond upon Thame', 'b1996979x': 'Redbridge', 'b19790259': 'Barnet', 'b19786517': 'Bexley', 'b19789725': 'Bexley', 'b19789099': 'Enfield', 'b19786049': 'Croydon', 'b19971370': 'Merton', 'b1995461x': 'Lewisham', 'b19797205': 'Haringey', 'b18238543': 'Camden', 'b18240021': 'Greenwich', 'b19879477': 'Waltham Forest', 'b19789543': 'Enfield', 'b19878308': 'Redbridge', 'b18249504': 'Lambeth', 'b18238075': 'Hammersmith and Fulham', 'b19875964': 'Redbridge', 'b18253854': 'City of London', 'b19879659': 'Waltham Forest', 'b19884011': 'City of London', 'b19956617': 'City of Westminster', 'b18252783': 'London County Council', 'b19796559': 'Brent', 'b19953069': 'Southwark', 'b19883596': 'Tower Hamlets', 'b19876166': 'Redbridge', 'b18251328': 'Wandsworth', 'b18118367': 'Greenwich', 'b19787649': 'Barking and Dagenham', 'b19877936': 'Ealing', 'b18244105': 'London County Council', 'b19783553': 'Ealing', 'b19880868': 'Hounslow', 'b1823754x': 'Greenwich', 'b19975612': 'Newham', 'b18253787': 'City of London', 'b19789890': 'Bexley', 'b18246783': 'Southwark', 'b19787029': 'Croydon', 'b18252436': 'London County Council', 'b1997002x': 'Kingston upon Thames', 'b19822959': 'Tower Hamlets', 'b19953392': 'Hammersmith and Fulham', 'b1978739x': 'Barking and Dagenham', 'b18108702': 'Camden', 'b18219408': 'Southwark', 'b18219159': 'Southwark', 'b19970341': 'Richmond upon Thame', 'b18239705': 'Lambeth', 'b18044591': 'Wandsworth', 'b18236418': 'Wandsworth', 'b18239857': 'Lambeth', 'b18246886': 'Southwark', 'b18236145': 'Kensington and Chelsea', 'b20106853': 'Lewisham', 'b19882245': 'Merton', 'b19787042': 'Croydon', 'b19790892': 'Barnet', 'b18248822': 'Islington', 'b19885568': 'Hackney', 'b19969260': 'Newham', 'b19879088': 'Richmond upon Thame', 'b18253878': 'City of London', 'b18251262': 'Wandsworth', 'b19824075': 'Kensington and Chelsea', 'b19953938': 'Tower Hamlets', 'b19881538': 'Newham', 'b19785896': 'Bromley', 'b19785720': 'Bromley', 'b18251651': 'Camden', 'b19885623': 'Hackney', 'b1824550x': 'City of Westminster', 'b18250142': 'Hackney', 'b1825049x': 'Wandsworth', 'b18245766': 'Tower Hamlets', 'b19874777': 'Newham', 'b19788265': 'Ealing', 'b19788915': 'Enfield', 'b18106857': 'Wandsworth', 'b19795117': 'Enfield', 'b19785094': 'Bexley', 'b19876683': 'Waltham Forest', 'b18252898': 'London County Council', 'b19822042': 'Hackney', 'b19794575': 'Hillingdon', 'b18250002': 'Hackney', 'b19885192': 'Hackney', 'b19790405': 'Barnet', 'b1987733x': 'Merton', 'b18045248': 'Kensington and Chelsea', 'b19792669': 'Waltham Forest', 'b18249085': 'Haringey', 'b19954372': 'Lewisham', 'b19794794': 'Enfield', 'b18246680': 'Southwark', 'b19878680': 'Redbridge', 'b18238816': 'Camden', 'b18221816': 'Camden', 'b19880662': 'Hounslow', 'b1996965x': 'Harrow', 'b19877481': 'Merton', 'b18247623': 'City of Westminster', 'b19884576': 'City of London', 'b19784417': 'Barking and Dagenham', 'b19955571': 'Southwark', 'b1995539x': 'City of Westminster', 'b18249887': 'Hackney', 'b18254494': 'City of Westminster', 'b19875423': 'Southwark', 'b19878461': 'Redbridge', 'b19970560': 'Kingston upon Thames', 'b19954116': 'Lambeth', 'b18246539': 'Southwark', 'b19880005': 'Richmond upon Thame', 'b19783826': 'Ealing', 'b19877158': 'Merton', 'b18253209': 'City of London', 'b18247489': 'City of Westminster', 'b19791318': 'Hillingdon', 'b18254275': 'City of Westminster', 'b18220149': 'Lewisham', 'b19791914': 'Barnet', 'b19970705': 'Richmond upon Thame', 'b19793224': 'Bromley', 'b19789397': 'Enfield', 'b18047245': 'Lambeth', 'b18248597': 'Islington', 'b18239286': 'Islington', 'b18220368': 'Wandsworth', 'b19882749': 'Merton', 'b1988252x': 'Sutton', 'b19796948': 'Brent', 'b1996822x': 'Hounslow', 'b17998001': 'Hammersmith and Fulham', 'b19957130': 'Southwark', 'b1978997x': 'Bexley', 'b1824810x': 'City of Westminster', 'b18237733': 'Greenwich', 'b18248044': 'City of Westminster', 'b19956848': 'Southwark', 'b19823800': 'Greenwich', 'b1982466x': 'Harrow', 'b19883298': 'Redbridge', 'b18252151': 'Camden', 'b19875885': 'Camden', 'b1978725x': 'Croydon', 'b19883924': 'Sutton', 'b19969016': 'Hounslow', 'b19786256': 'Croydon', 'b19797540': 'Haringey', 'b19823940': 'Islington', 'b18252333': 'Camden', 'b1987778x': 'Hounslow', 'b19796493': 'Brent', 'b19975867': 'Enfield', 'b18250798': 'Wandsworth', 'b18218969': 'Southwark', 'b19823496': 'Greenwich', 'b19956265': 'City of Westminster', 'b19792323': 'Havering', 'b19875228': 'Kingston upon Thames', 'b18249310': 'Haringey', 'b19883432': 'Tower Hamlets', 'b18238622': 'Camden', 'b18244592': 'Lewisham', 'b18245973': 'Tower Hamlets', 'b1979079x': 'Barnet', 'b19884801': 'City of London', 'b18237952': 'Hammersmith and Fulham', 'b20056606': 'Hackney', 'b19792499': 'Lewisham', 'b19875162': 'Kingston upon Thames', 'b18251158': 'Wandsworth', 'b18249279': 'Haringey', 'b19822546': 'Hackney', 'b19953720': 'Hammersmith and Fulham', 'b19953239': 'Southwark', 'b1810664x': 'Tower Hamlets', 'b1821874x': 'Southwark', 'b19784624': 'Barking and Dagenham', 'b19883742': 'Sutton', 'b19795567': 'Barnet', 'b19955765': 'Southwark', 'b19822728': 'Tower Hamlets', 'b19791677': 'Barnet', 'b18111117': 'Lewisham', 'b19793819': 'Barnet', 'b18235773': 'Kensington and Chelsea', 'b1982323x': 'Greenwich', 'b19788708': 'Enfield', 'b19787820': 'Ealing', 'b19795300': 'Barnet', 'b18236601': 'Tower Hamlets', 'b18251547': 'Camden', 'b18223436': 'Kensington and Chelsea', 'b19785331': 'Bexley', 'b18235554': 'Hammersmith and Fulham', 'b18235803': 'Kensington and Chelsea', 'b18246059': 'Tower Hamlets', 'b19955182': 'City of Westminster', 'b19876142': 'Redbridge', 'b19881277': 'Sutton', 'b19821566': 'Bromley', 'b1997095x': 'Richmond upon Thame', 'b19880273': 'Hounslow', 'b18254688': 'City of Westminster', 'b19955984': 'Southwark', 'b19976070': 'Kingston upon Thames', 'b19968796': 'Waltham Forest', 'b19797047': 'Brent', 'b19785288': 'Bexley', 'b18039765': 'Tower Hamlets', 'b19885386': 'Hackney', 'b19969843': 'Redbridge', 'b19878898': 'Richmond upon Thame', 'b18250944': 'Wandsworth', 'b19821797': 'Bromley', 'b1825293x': 'London County Council', 'b19885234': 'Hackney', 'b19790235': 'Barnet', 'b19882348': 'Merton', 'b19971394': 'Merton', 'b18117181': 'Kensington and Chelsea', 'b19878096': 'Ealing', 'b19880492': 'Hounslow', 'b19884382': 'City of London', 'b18247015': 'Southwark', 'b20056539': 'Hackney', 'b18247544': 'City of Westminster', 'b18038384': 'Southwark', 'b19874340': 'City of Westminster', 'b19956988': 'Southwark', 'b19956459': 'City of Westminster', 'b19875940': 'Redbridge', 'b1995458x': 'Lewisham', 'b19954633': 'Lewisham', 'b19796791': 'Brent', 'b19878321': 'Redbridge', 'b19822200': 'Hackney', 'b19954657': 'Lewisham', 'b19783619': 'Ealing', 'b19884035': 'City of Westminster', 'b1799827x': 'Hammersmith and Fulham', 'b18253416': 'City of London', 'b19789701': 'Bexley', 'b19786062': 'Croydon', 'b19787625': 'Barking and Dagenham', 'b19956678': 'City of Westminster', 'b18118380': 'Greenwich', 'b18251304': 'Wandsworth', 'b19970043': 'Kingston upon Thames', 'b19880807': 'Hounslow', 'b19796572': 'Brent', 'b17998499': 'Camden', 'b19975673': 'Bromley', 'b2005645x': 'Islington', 'b20274920': 'Harrow', 'b18121950': 'Tower Hamlets', 'b18237381': 'Islington', 'b19789567': 'Enfield', 'b18236388': 'Wandsworth', 'b19880571': 'Hounslow', 'b18044608': 'Islington', 'b19783280': 'Ealing', 'b1987795x': 'Ealing', 'b18122024': 'Tower Hamlets', 'b18246837': 'Southwark', 'b19784569': 'Barking and Dagenham', 'b18239729': 'Lambeth', 'b18121482': 'City of Westminster', 'b19956186': 'City of Westminster', 'b18239870': 'Lambeth', 'b17997720': 'Tower Hamlets', 'b18244786': 'Lewisham', 'b18223333': 'Kensington and Chelsea', 'b19882269': 'Merton', 'b18111580': 'Port of London', 'b19953811': 'Tower Hamlets', 'b1988560x': 'Hackney', 'b19885817': 'Hackney', 'b19885544': 'Hackney', 'b18248846': 'Islington', 'b1824578x': 'Tower Hamlets', 'b19786931': 'Croydon', 'b18245560': 'City of Westminster', 'b19824051': 'Kensington and Chelsea', 'b19823009': 'Tower Hamlets', 'b19785872': 'Bromley', 'b19785707': 'Bromley', 'b19881046': 'Sutton', 'b18250166': 'Hackney', 'b19788939': 'Enfield', 'b19874790': 'Newham', 'b1996920x': 'Newham', 'b19795178': 'Enfield', 'b19786001': 'Croydon', 'b19794551': 'Hillingdon', 'b1823883x': 'Camden', 'b19794228': 'Havering', 'b19969673': 'Harrow', 'b19790429': 'Barnet', 'b1988039x': 'Hounslow', 'b19792682': 'Waltham Forest', 'b19884631': 'City of London', 'b19955376': 'City of Westminster', 'b1825195x': 'Camden', 'b17999042': 'City of London', 'b19878667': 'Redbridge', 'b1822359x': 'Kensington and Chelsea', 'b18238385': 'Camden', 'b19954359': 'Lewisham', 'b19877316': 'Merton', 'b18253611': 'City of London', 'b19880649': 'Hounslow', 'b19884552': 'City of London', 'b19784478': 'Barking and Dagenham', 'b18251481': 'Camden', 'b19875447': 'Southwark', 'b19955510': 'Southwark', 'b19791082': 'Haringey', 'b19785963': 'Bromley', 'b19822066': 'Hackney', 'b19783516': 'Ealing', 'b18118604': 'Lambeth', 'b19880029': 'Richmond upon Thame', 'b18246552': 'Southwark', 'b19878400': 'Redbridge', 'b19877171': 'Merton', 'b19793790': 'Bromley', 'b19793029': 'Waltham Forest', 'b19797576': 'Haringey', 'b19971497': 'Merton', 'b18239122': 'Islington', 'b19791331': 'Hillingdon', 'b18254214': 'City of Westminster', 'b1995413x': 'Lambeth', 'b18220162': 'Lewisham', 'b19953513': 'Hammersmith and Fulham', 'b1978384x': 'Ealing', 'b18253088': 'London County Council', 'b19793248': 'Bromley', 'b18239262': 'Islington', 'b18219561': 'Southwark', 'b18116917': 'Hammersmith and Fulham', 'b17996892': 'Kensington and Chelsea', 'b19970547': 'Kingston upon Thames', 'b18247209': 'City of Westminster', 'b18122565': 'Hammersmith and Fulham', 'b19879209': 'Richmond upon Thame', 'b19879751': 'Richmond upon Thame', 'b19882725': 'Merton', 'b19882890': 'Merton', 'b18248068': 'City of Westminster', 'b19971230': 'Newham', 'b19956861': 'Southwark', 'b19823824': 'Greenwich', 'b18252138': 'Camden', 'b20056849': 'Hackney', 'b19970729': 'Richmond upon Thame', 'b19969077': 'Barnet', 'b19882099': 'Merton', 'b1823771x': 'Greenwich', 'b19792049': 'Barnet', 'b19786724': 'Croydon', 'b19824609': 'Harrow', 'b19786232': 'Croydon', 'b19823927': 'Islington', 'b19789919': 'Bexley', 'b19968243': 'Hounslow', 'b19797564': 'Haringey', 'b18252680': 'London County Council', 'b19796961': 'Brent', 'b19975806': 'Richmond upon Thame', 'b1824595x': 'Tower Hamlets', 'b1810616x': 'Islington', 'b18218908': 'Southwark', 'b19956241': 'City of Westminster', 'b18238609': 'Camden', 'b19883419': 'Tower Hamlets', 'b19875204': 'Kingston upon Thames', 'b17997264': 'Southwark', 'b17997914': 'Camden', 'b18237976': 'Hammersmith and Fulham', 'b20056667': 'Hackney', 'b19884825': 'City of London', 'b1979230x': 'Havering', 'b18249255': 'Haringey', 'b18251171': 'Wandsworth', 'b19796213': 'Brent', 'b19822522': 'Hackney', 'b19791537': 'Hillingdon', 'b19953215': 'Southwark', 'b19784648': 'Barking and Dagenham', 'b19788897': 'Enfield', 'b19823216': 'Hackney', 'b19784867': 'Bromley', 'b19955741': 'Southwark', 'b19795543': 'Barnet', 'b18236595': 'Tower Hamlets', 'b19822297': 'Hackney', 'b19791653': 'Barnet', 'b1822345x': 'Kensington and Chelsea', 'b18045091': 'Camden', 'b18235712': 'Kensington and Chelsea', 'b19788095': 'Ealing', 'b1978854x': 'Barnet', 'b19788769': 'Enfield', 'b19795324': 'Barnet', 'b18251560': 'Camden', 'b18254044': 'Tower Hamlets', 'b18236625': 'Tower Hamlets', 'b19876786': 'Waltham Forest', 'b18246035': 'Tower Hamlets', 'b18235827': 'Kensington and Chelsea', 'b18245043': 'Lewisham', 'b1979387x': 'Havering', 'b19874637': 'Newham', 'b18249929': 'Hackney', 'b18238610': 'Camden', 'b1997100x': 'Richmond upon Thame', 'b19785112': 'Bexley', 'b19881708': 'Haringey', 'b19881253': 'Sutton', 'b19821505': 'Bromley', 'b19885015': 'Islington', 'b1825021x': 'Hackney', 'b19954839': 'Tower Hamlets', 'b18239377': 'Islington', 'b19976057': 'Kingston upon Thames', 'b18237502': 'Islington', 'b19797023': 'Brent', 'b19824592': 'Harrow', 'b19794058': 'Havering', 'b18250968': 'Wandsworth', 'b19790211': 'Barnet', 'b19793492': 'Bromley', 'b18247787': 'City of Westminster', 'b1988025x': 'Hounslow', 'b17996971': 'Wandsworth', 'b1978367x': 'Ealing', 'b19879921': 'Richmond upon Thame', 'b18247568': 'City of Westminster', 'b18240069': 'Greenwich', 'b1825343x': 'City of London', 'b19881071': 'Sutton', 'b19883626': 'Tower Hamlets', 'b19878345': 'Redbridge', 'b19875927': 'Redbridge', 'b1799925x': 'Hackney', 'b19796778': 'Brent', 'b19884837': 'City of London', 'b18238038': 'Hammersmith and Fulham', 'b1978398x': 'Barking and Dagenham', 'b19879696': 'Waltham Forest', 'b1987943x': 'Waltham Forest', 'b19884059': 'City of Westminster', 'b19787601': 'Barking and Dagenham', 'b19787352': 'Barking and Dagenham', 'b18251365': 'Camden', 'b19970067': 'Kingston upon Thames', 'b19877973': 'Ealing', 'b19880820': 'Hounslow', 'b19796511': 'Brent', 'b19953021': 'Southwark', 'b20274944': 'Harrow', 'b19789580': 'Enfield', 'b18237368': 'Islington', 'b19956472': 'City of Westminster', 'b18236364': 'Wandsworth', 'b18246813': 'Southwark', 'b18245237': 'City of Westminster', 'b19969193': 'Newham', 'b1997565x': 'Richmond upon Thame', 'b20056473': 'Islington', 'b18239742': 'Lambeth', 'b19956654': 'City of Westminster', 'b19953689': 'Hammersmith and Fulham', 'b18236108': 'Kensington and Chelsea', 'b19882282': 'Merton', 'b17998827': 'Kensington and Chelsea', 'b19969223': 'Newham', 'b19880108': 'Richmond upon Thame', 'b1825018x': 'Hackney', 'b19885830': 'Hackney', 'b18245547': 'City of Westminster', 'b19874406': 'Lewisham', 'b18248317': 'City of Westminster', 'b19786918': 'Croydon', 'b19823022': 'Tower Hamlets', 'b19786098': 'Croydon', 'b1823981x': 'Lambeth', 'b19824038': 'Kensington and Chelsea', 'b18252473': 'London County Council', 'b19953975': 'Tower Hamlets', 'b19821773': 'Bromley', 'b19792517': 'Lewisham', 'b1824886x': 'Islington', 'b19784983': 'Bromley', 'b19795154': 'Enfield', 'b19794204': 'Havering', 'b18045285': 'Islington', 'b18235451': 'Hammersmith and Fulham', 'b1978837x': 'Barnet', 'b19952880': 'Southwark', 'b19784181': 'Barking and Dagenham', 'b19884618': 'City of London', 'b19955352': 'City of Westminster', 'b18253982': 'Tower Hamlets', 'b19955686': 'Southwark', 'b19785859': 'Bromley', 'b1982208x': 'Hackney', 'b19876312': 'Waltham Forest', 'b19880625': 'Hounslow', 'b19877377': 'Merton', 'b19884539': 'City of London', 'b19784326': 'Barking and Dagenham', 'b19787984': 'Ealing', 'b18251973': 'Camden', 'b18254457': 'City of Westminster', 'b1988106x': 'Sutton', 'b18254184': 'City of Westminster', 'b19785586': 'Kensington and Chelsea', 'b19881393': 'Newham', 'b19783863': 'Ealing', 'b19877195': 'Merton', 'b19793005': 'Waltham Forest', 'b19783632': 'Ealing', 'b18236583': 'Wandsworth', 'b1978532x': 'Bexley', 'b18254238': 'City of Westminster', 'b18239109': 'Islington', 'b19786323': 'Bexley', 'b1824645x': 'Southwark', 'b18108933': 'Islington', 'b1799939x': 'Islington', 'b19954335': 'Lewisham', 'b19878643': 'Redbridge', 'b17999066': 'City of London', 'b19879593': 'Waltham Forest', 'b18247192': 'City of Westminster', 'b19793261': 'Bromley', 'b18239249': 'Islington', 'b19955820': 'Southwark', 'b1988428x': 'Port of London', 'b19881939': 'Haringey', 'b19968619': 'Hounslow', 'b19954487': 'Lewisham', 'b19885222': 'Hackney', 'b18246576': 'Southwark', 'b19970523': 'Kingston upon Thames', 'b19882701': 'Merton', 'b1825360x': 'City of London', 'b19788952': 'Enfield', 'b19879775': 'Richmond upon Thame', 'b18247222': 'City of Westminster', 'b18248081': 'City of Westminster', 'b19971217': 'Newham', 'b20056862': 'Hackney', 'b18237770': 'Greenwich', 'b18252114': 'Camden', 'b19823848': 'Greenwich', 'b19876968': 'Waltham Forest', 'b19790004': 'Bexley', 'b19786219': 'Croydon', 'b19969053': 'Barnet', 'b20274889': 'Harrow', 'b19789245': 'Enfield', 'b19823903': 'Islington', 'b19824622': 'Harrow', 'b19968267': 'Hounslow', 'b19797588': 'Haringey', 'b18039212': 'Wandsworth', 'b18238191': 'Hammersmith and Fulham', 'b18249681': 'Tower Hamlets', 'b19796985': 'Brent', 'b20274774': 'Brent', 'b1823799x': 'Hammersmith and Fulham', 'b20056394': 'Islington', 'b19956228': 'City of Westminster', 'b19875265': 'Kingston upon Thames', 'b18236820': 'Tower Hamlets', 'b18249358': 'Haringey', 'b1997582x': 'Richmond upon Thame', 'b20056138': 'Islington', 'b19884849': 'City of London', 'b20056643': 'Hackney', 'b19787169': 'Croydon', 'b19875125': 'Kingston upon Thames', 'b18249231': 'Haringey', 'b19791513': 'Hillingdon', 'b18251110': 'Wandsworth', 'b19796237': 'Brent', 'b19953768': 'Hammersmith and Fulham', 'b1988347x': 'Tower Hamlets', 'b18219032': 'Southwark', 'b19823277': 'Greenwich', 'b19784843': 'Bromley', 'b19788563': 'Barnet', 'b19784661': 'Barking and Dagenham', 'b19875691': 'Camden', 'b18111154': 'Tower Hamlets', 'b19793856': 'Havering', 'b18235736': 'Kensington and Chelsea', 'b18245936': 'Tower Hamlets', 'b18245687': 'City of Westminster', 'b18106687': 'Hackney', 'b1979552x': 'Barnet', 'b19876762': 'Waltham Forest', 'b1979163x': 'Hillingdon', 'b19791185': 'Havering', 'b18236649': 'Tower Hamlets', 'b18246011': 'Tower Hamlets', 'b18223473': 'Kensington and Chelsea', 'b18235591': 'Hammersmith and Fulham', 'b18250646': 'Wandsworth', 'b18235840': 'Kensington and Chelsea', 'b18245067': 'Lewisham', 'b19874613': 'Newham', 'b18249905': 'Hackney', 'b1979700x': 'Brent', 'b19876105': 'Redbridge', 'b19785136': 'Bexley', 'b19821529': 'Bromley', 'b19881721': 'Haringey', 'b19954815': 'Tower Hamlets', 'b19969739': 'Hounslow', 'b19885039': 'Tower Hamlets', 'b19792724': 'Waltham Forest', 'b18197292': 'City of London', 'b19976033': 'Kingston upon Thames', 'b18118318': 'Camden', 'b19794071': 'Havering', 'b18247325': 'City of Westminster', 'b1988123x': 'Sutton', 'b19969880': 'Redbridge', 'b18247763': 'City of Westminster', 'b19874625': 'Newham', 'b1988364x': 'Tower Hamlets', 'b17999728': 'City of Westminster', 'b19879908': 'Richmond upon Thame', 'b18247507': 'City of Westminster', 'b19823654': 'Greenwich', 'b19971023': 'Waltham Forest', 'b19878369': 'Redbridge', 'b19883869': 'Sutton', 'b19970882': 'Richmond upon Thame', 'b19954670': 'Lewisham', 'b19875903': 'Camden', 'b19796754': 'Brent', 'b19783656': 'Ealing', 'b19884072': 'Tower Hamlets', 'b19786025': 'Croydon', 'b19789038': 'Enfield', 'b19970080': 'Kingston upon Thames', 'b18251341': 'Camden', 'b19796535': 'Brent', 'b19957099': 'Southwark', 'b18252904': 'London County Council', 'b18122243': 'Southwark', 'b18116930': 'Hammersmith and Fulham', 'b18248743': 'Islington', 'b20274968': 'Harrow', 'b18237344': 'Islington', 'b18237691': 'Greenwich', 'b18121913': 'Southwark', 'b19784454': 'Barking and Dagenham', 'b19884163': 'City of London', 'b18246795': 'Southwark', 'b18122061': 'Southwark', 'b19882117': 'Merton', 'b19879015': 'Richmond upon Thame', 'b19786682': 'Croydon', 'b19875988': 'Redbridge', 'b18239766': 'Lambeth', 'b19955534': 'Southwark', 'b19875460': 'Southwark', 'b1982175x': 'Bromley', 'b19885854': 'Hackney', 'b19885581': 'Hackney', 'b18248330': 'City of Westminster', 'b18245213': 'City of Westminster', 'b19823046': 'Tower Hamlets', 'b18038256': 'Greenwich', 'b19786979': 'Croydon', 'b18252497': 'London County Council', 'b18236340': 'Wandsworth', 'b18044645': 'Hackney', 'b18048341': 'Camden', 'b18235694': 'Kensington and Chelsea', 'b1987442x': 'Greenwich', 'b18237836': 'Greenwich', 'b19788976': 'Enfield', 'b19792530': 'Waltham Forest', 'b19884989': 'Lewisham', 'b18222602': 'Tower Hamlets', 'b18239833': 'Lambeth', 'b1978708x': 'Croydon', 'b19797448': 'Haringey', 'b18236121': 'Kensington and Chelsea', 'b18238415': 'Camden', 'b18235475': 'Hammersmith and Fulham', 'b19971035': 'Waltham Forest', 'b19955339': 'City of Westminster', 'b19884679': 'City of London', 'b19953409': 'Hammersmith and Fulham', 'b19822315': 'Hackney', 'b19881009': 'Sutton', 'b19785835': 'Bromley', 'b19953951': 'Tower Hamlets', 'b19877353': 'Merton', 'b19877420': 'Merton', 'b19880601': 'Hounslow', 'b19880352': 'Hounslow', 'b19884515': 'City of London', 'b19784302': 'Barking and Dagenham', 'b19787960': 'Ealing', 'b19875484': 'Southwark', 'b18251912': 'Camden', 'b18254433': 'City of Westminster', 'b19794265': 'Havering', 'b19784971': 'Bromley', 'b19793066': 'Waltham Forest', 'b1987957x': 'Waltham Forest', 'b19791379': 'Hillingdon', 'b19876592': 'Waltham Forest', 'b19954311': 'Lewisham', 'b18018634': 'City of London', 'b19788502': 'Barnet', 'b19878990': 'Richmond upon Thame', 'b19793285': 'Bromley', 'b18248536': 'Islington', 'b19874996': 'Kingston upon Thames', 'b19955807': 'Southwark', 'b18239225': 'Islington', 'b19881915': 'Haringey', 'b19785306': 'Bexley', 'b19794484': 'Hillingdon', 'b19790399': 'Barnet', 'b1987862x': 'Redbridge', 'b18250397': 'Wandsworth', 'b19885209': 'Hackney', 'b19969946': 'Kingston upon Thames', 'b19878199': 'Redbridge', 'b19879714': 'Waltham Forest', 'b19789488': 'Enfield', 'b18239596': 'Lambeth', 'b18237757': 'Greenwich', 'b19792888': 'Waltham Forest', 'b19876907': 'Waltham Forest', 'b19823861': 'Greenwich', 'b20056801': 'Hackney', 'b19883985': 'Sutton', 'b19791355': 'Hillingdon', 'b19790028': 'Bexley', 'b19793480': 'Bromley', 'b19786761': 'Croydon', 'b19789269': 'Enfield', 'b18047154': 'Islington', 'b19792086': 'Barnet', 'b18252394': 'Camden', 'b19794873': 'Enfield', 'b19883183': 'Kingston upon Thames', 'b18238178': 'Hammersmith and Fulham', 'b19882853': 'Merton', 'b18219536': 'Southwark', 'b19956757': 'City of Westminster', 'b19956204': 'City of Westminster', 'b19883328': 'Tower Hamlets', 'b18249371': 'Haringey', 'b19883456': 'Tower Hamlets', 'b18236807': 'Tower Hamlets', 'b19880984': 'Sutton', 'b17997951': 'Hammersmith and Fulham', 'b19882580': 'Sutton', 'b18252539': 'London County Council', 'b19884862': 'City of London', 'b20056114': 'Islington', 'b19823782': 'Greenwich', 'b19875101': 'Kingston upon Thames', 'b18251134': 'Wandsworth', 'b18249474': 'Haringey', 'b19787418': 'Barking and Dagenham', 'b18249218': 'Haringey', 'b19791574': 'Hillingdon', 'b19874492': 'Harrow', 'b19796250': 'Brent', 'b19783309': 'Ealing', 'b18219019': 'Southwark', 'b19795051': 'Enfield', 'b19795506': 'Barnet', 'b18044451': 'Camden', 'b19791616': 'Hillingdon', 'b19822789': 'Tower Hamlets', 'b18237435': 'Islington', 'b1825066x': 'Wandsworth', 'b19885441': 'Hackney', 'b17998980': 'Lewisham', 'b20056321': 'Islington', 'b19882324': 'Merton', 'b18245912': 'Tower Hamlets', 'b18245663': 'City of Westminster', 'b18248299': 'City of Westminster', 'b18236662': 'Tower Hamlets', 'b1978482x': 'Bromley', 'b19876294': 'Redbridge', 'b1987487x': 'Newham', 'b19876749': 'Waltham Forest', 'b18237046': 'Lewisham', 'b18235864': 'Kensington and Chelsea', 'b19790624': 'Barnet', 'b19824208': 'Kensington and Chelsea', 'b18245006': 'Lewisham', 'b19823253': 'Greenwich', 'b19823526': 'Greenwich', 'b18249966': 'Hackney', 'b18245444': 'City of Westminster', 'b19876129': 'Redbridge', 'b18019225': 'City of Westminster', 'b19881745': 'Haringey', 'b19822121': 'Hackney', 'b19881216': 'Sutton', 'b18039832': 'Hackney', 'b19954876': 'Tower Hamlets', 'b19794678': 'Hillingdon', 'b18246291': 'Tower Hamlets', 'b17992473': 'Greenwich', 'b19885052': 'Lewisham', 'b1824774x': 'City of Westminster', 'b19969715': 'Hounslow', 'b19790569': 'Barnet', 'b19792700': 'Waltham Forest', 'b1978515x': 'Bexley', 'b19878424': 'Redbridge', 'b18243952': 'City of Westminster', 'b19794010': 'Havering', 'b18250257': 'Hackney', 'b19955479': 'Southwark', 'b17999212': 'Hackney', 'b17999741': 'Wandsworth', 'b18246436': 'Southwark', 'b19880169': 'Hillingdon', 'b19877651': 'Hounslow', 'b19882877': 'Merton', 'b19879969': 'Richmond upon Thame', 'b18247520': 'City of Westminster', 'b18254378': 'City of Westminster', 'b1979728x': 'Haringey', 'b19968814': 'Waltham Forest', 'b19874352': 'City of Westminster', 'b19878382': 'Redbridge', 'b19883845': 'Sutton', 'b19954694': 'Tower Hamlets', 'b19883663': 'Tower Hamlets', 'b19796882': 'Brent', 'b18249589': 'Lambeth', 'b18251031': 'Wandsworth', 'b19793546': 'Bromley', 'b19884096': 'City of London', 'b19789014': 'Enfield', 'b1997601x': 'Kingston upon Thames', 'b19796080': 'Brent', 'b19883705': 'Tower Hamlets', 'b19879386': 'Richmond upon Thame', 'b17998165': 'Camden', 'b1823978x': 'Lambeth', 'b18237320': 'Islington', 'b19956435': 'City of Westminster', 'b19956927': 'Southwark', 'b19878941': 'Richmond upon Thame', 'b19882130': 'Merton', 'b19879039': 'Richmond upon Thame', 'b19968541': 'Hounslow', 'b19956691': 'City of Westminster', 'b19792141': 'Barnet', 'b19797424': 'Haringey', 'b19787315': 'Croydon', 'b18252709': 'London County Council', 'b19975788': 'Richmond upon Thame', 'b19885878': 'Hackney', 'b18250415': 'Wandsworth', 'b19786402': 'Bexley', 'b19786955': 'Croydon', 'b19790818': 'Barnet', 'b19874443': 'Southwark', 'b19784570': 'Barking and Dagenham', 'b18252382': 'Camden', 'b18236327': 'Wandsworth', 'b19788010': 'Ealing', 'b18238786': 'Camden', 'b18044669': 'Camden', 'b18235670': 'Kensington and Chelsea', 'b18045133': 'City of Westminster', 'b19792554': 'Waltham Forest', 'b18121536': 'Greenwich', 'b18237812': 'Greenwich', 'b19955297': 'City of Westminster', 'b1803827x': 'City of Westminster', 'b1982306x': 'Tower Hamlets', 'b19877869': 'Ealing', 'b18236492': 'Wandsworth', 'b18238439': 'Camden', 'b17997744': 'Tower Hamlets', 'b18235414': 'Hammersmith and Fulham', 'b19952843': 'Tower Hamlets', 'b18245493': 'City of Westminster', 'b19952594': 'Tower Hamlets', 'b18248974': 'Haringey', 'b19784144': 'Barking and Dagenham', 'b18106584': 'Lewisham', 'b19884655': 'City of London', 'b18249000': 'Haringey', 'b19955649': 'Southwark', 'b1825441x': 'City of Westminster', 'b19953422': 'Hammersmith and Fulham', 'b19785811': 'Bromley', 'b19881022': 'Sutton', 'b19789932': 'Bexley', 'b19822339': 'Hackney', 'b19877407': 'Merton', 'b19880376': 'Hounslow', 'b19795269': 'Camden', 'b19788332': 'Barnet', 'b19784491': 'Barking and Dagenham', 'b19787947': 'Ealing', 'b18251936': 'Camden', 'b18254147': 'City of Westminster', 'b1987571x': 'Camden', 'b1987635x': 'Waltham Forest', 'b19793042': 'Waltham Forest', 'b18239146': 'Islington', 'b19791392': 'Hillingdon', 'b18108970': 'Lewisham', 'b19878606': 'Redbridge', 'b18246606': 'Southwark', 'b1988008x': 'Richmond upon Thame', 'b19954992': 'Tower Hamlets', 'b18018348': 'Tower Hamlets', 'b17999029': 'City of London', 'b1988526x': 'Hackney', 'b19879556': 'Waltham Forest', 'b18248512': 'Islington', 'b18239201': 'Islington', 'b19955868': 'Southwark', 'b19881976': 'Haringey', 'b19785367': 'Bexley', 'b1988168x': 'Haringey', 'b19954190': 'Lambeth', 'b19789464': 'Enfield', 'b19976203': 'Kingston upon Thames', 'b18253398': 'City of London', 'b19971254': 'Newham', 'b20056825': 'Hackney', 'b19876920': 'Waltham Forest', 'b19823885': 'Waltham Forest', 'b17999686': 'Camden', 'b18238233': 'Hammersmith and Fulham', 'b1824726x': 'City of Westminster', 'b19790041': 'Bexley', 'b1988283x': 'Merton', 'b19956599': 'City of Westminster', 'b19789208': 'Enfield', 'b19794812': 'Enfield', 'b19954736': 'Tower Hamlets', 'b19783759': 'Ealing', 'b18238154': 'Hammersmith and Fulham', 'b1997078x': 'Haringey', 'b19884138': 'City of London', 'b19879738': 'Richmond upon Thame', 'b17998086': 'City of Westminster', 'b20056357': 'Islington', 'b18254123': 'Camden', 'b18237198': 'Lewisham', 'b19956770': 'Southwark', 'b1979180x': 'Barnet', 'b18249395': 'Haringey', 'b18236868': 'Tower Hamlets', 'b19970122': 'Kingston upon Thames', 'b19883304': 'Redbridge', 'b19880960': 'Brent', 'b18253647': 'City of London', 'b20056175': 'Islington', 'b19787431': 'Barking and Dagenham', 'b18252515': 'London County Council', 'b19787121': 'Croydon', 'b19796274': 'Brent', 'b19783322': 'Ealing', 'b19791550': 'Hillingdon', 'b1824404x': 'City of London', 'b19795075': 'Enfield', 'b20057088': 'City of Westminster', 'b19882300': 'Merton', 'b1824502x': 'Lewisham', 'b19793893': 'Havering', 'b17998967': 'Lewisham', 'b18219299': 'Southwark', 'b19969363': 'Camden', 'b18245390': 'City of Westminster', 'b19824191': 'Kensington and Chelsea', 'b19874388': 'Hammersmith and Fulham', 'b19953859': 'Tower Hamlets', 'b18236686': 'Tower Hamlets', 'b19876725': 'Waltham Forest', 'b1811166x': 'Southwark', 'b19821657': 'Bromley', 'b1823561x': 'Hammersmith and Fulham', 'b18235888': 'Kensington and Chelsea', 'b19874650': 'Newham', 'b18250609': 'Wandsworth', 'b19790600': 'Barnet', 'b19788526': 'Barnet', 'b19788836': 'Enfield', 'b19784806': 'Bromley', 'b19823502': 'Greenwich', 'b19824221': 'Kensington and Chelsea', 'b1824564x': 'City of Westminster', 'b19785173': 'Bexley', 'b19822108': 'Hackney', 'b19881769': 'Haringey', 'b19794654': 'Hillingdon', 'b18111087': 'Camden', 'b18118227': 'Islington', 'b19885076': 'Tower Hamlets', 'b19954852': 'Tower Hamlets', 'b19794381': 'Hillingdon', 'b19969776': 'Redbridge', 'b18039819': 'Hackney', 'b18239754': 'Lambeth', 'b19788782': 'Enfield', 'b18222833': 'Tower Hamlets', 'b19794034': 'Havering', 'b18250270': 'Hackney', 'b18235335': 'Hammersmith and Fulham', 'b18238932': 'Camden', 'b19877092': 'Waltham Forest', 'b18247726': 'City of Westminster', 'b19784776': 'Barking and Dagenham', 'b18247891': 'City of Westminster', 'b19955108': 'City of Westminster', 'b19955455': 'City of Westminster', 'b19875526': 'Southwark', 'b18249942': 'Hackney', 'b18251894': 'Camden', 'b19878011': 'Ealing', 'b19954505': 'Lewisham', 'b18246412': 'Southwark', 'b19793388': 'Bromley', 'b19877675': 'Hounslow', 'b1979356x': 'Bromley', 'b19880418': 'Hounslow', 'b19783929': 'Ealing', 'b19879945': 'Richmond upon Thame', 'b19884308': 'City of London', 'b19791239': 'Havering', 'b18254603': 'City of Westminster', 'b19971060': 'Waltham Forest', 'b18254354': 'City of Westminster', 'b18018324': 'City of Westminster', 'b18250853': 'Wandsworth', 'b19796717': 'Brent', 'b19883821': 'Sutton', 'b19796869': 'Brent', 'b19783693': 'Ealing', 'b18246667': 'Southwark', 'b19789075': 'Enfield', 'b1824709x': 'Southwark', 'b19955406': 'City of Westminster', 'b19885003': 'Greenwich', 'b18250427': 'Wandsworth', 'b18122206': 'Southwark', 'b17998141': 'Camden', 'b1821941x': 'Southwark', 'b19885829': 'Hackney', 'b1996917x': 'Newham', 'b18248706': 'Islington', 'b18237307': 'Islington', 'b18237654': 'Greenwich', 'b18239419': 'Islington', 'b19956903': 'Southwark', 'b19824427': 'Harrow', 'b19882154': 'Merton', 'b19879052': 'Richmond upon Thame', 'b19789695': 'Bexley', 'b19797400': 'Haringey', 'b18245201': 'City of Westminster', 'b18252722': 'London County Council', 'b18244889': 'Lewisham', 'b19787339': 'Barking and Dagenham', 'b19885891': 'Hackney', 'b18250476': 'Wandsworth', 'b19790831': 'Barnet', 'b19822509': 'Hackney', 'b20056965': 'Southwark', 'b19874467': 'Harrow', 'b18245250': 'City of Westminster', 'b19786426': 'Bexley', 'b18248378': 'City of Westminster', 'b18117338': 'Southwark', 'b1822264x': 'City of London', 'b18038219': 'Camden', 'b19785380': 'Bexley', 'b18236303': 'Wandsworth', 'b18235657': 'Kensington and Chelsea', 'b18237873': 'Greenwich', 'b19956010': 'Southwark', 'b19792578': 'Waltham Forest', 'b20056540': 'Hackney', 'b18251079': 'Wandsworth', 'b19787595': 'Barking and Dagenham', 'b18238452': 'Camden', 'b19953197': 'Southwark', 'b19877845': 'Ealing', 'b18248950': 'Haringey', 'b1978434x': 'Barking and Dagenham', 'b18253921': 'Southwark', 'b18106560': 'Lewisham', 'b18106420': 'Camden', 'b19955662': 'Southwark', 'b19784168': 'Barking and Dagenham', 'b19953999': 'Tower Hamlets', 'b18249024': 'Haringey', 'b19875733': 'Camden', 'b19822352': 'Hackney', 'b19953446': 'Hammersmith and Fulham', 'b18045157': 'Lambeth', 'b18048304': 'Camden', 'b19788319': 'Ealing', 'b19788642': 'Enfield', 'b18106705': 'Hackney', 'b19795245': 'Hackney', 'b18254160': 'City of Westminster', 'b19955595': 'Southwark', 'b19787923': 'Ealing', 'b18251407': 'Camden', 'b18223515': 'Kensington and Chelsea', 'b18111415': 'Greenwich', 'b18254305': 'City of Westminster', 'b18254652': 'City of Westminster', 'b1988395x': 'Sutton', 'b1988140x': 'Newham', 'b18254299': 'City of Westminster', 'b19785690': 'Bromley', 'b19821463': 'Bromley', 'b19878953': 'Richmond upon Thame', 'b19877468': 'Merton', 'b19880315': 'Hounslow', 'b19952715': 'Tower Hamlets', 'b19885799': 'Hackney', 'b19955844': 'Southwark', 'b18248573': 'Islington', 'b19976197': 'Kingston upon Thames', 'b19785434': 'Bromley', 'b19785343': 'Bexley', 'b1824662x': 'Southwark', 'b19885246': 'Hackney', 'b19878485': 'Redbridge', 'b19793716': 'Bromley', 'b19969909': 'Redbridge', 'b19789191': 'Enfield', 'b19789440': 'Enfield', 'b18220691': 'Hackney', 'b19971503': 'Merton', 'b19797874': 'Hillingdon', 'b19790065': 'Barnet', 'b19877286': 'Merton', 'b19957166': 'Southwark', 'b19879532': 'Waltham Forest', 'b19956575': 'City of Westminster', 'b18249620': 'Tower Hamlets', 'b19878229': 'Redbridge', 'b19883146': 'Kingston upon Thames', 'b17999406': 'Islington', 'b18238130': 'Hammersmith and Fulham', 'b1823821x': 'Hammersmith and Fulham', 'b19783772': 'Ealing', 'b19879283': 'Richmond upon Thame', 'b18247283': 'City of Westminster', 'b19957191': 'Tower Hamlets', 'b19884114': 'City of London', 'b18238683': 'Camden', 'b19883493': 'Tower Hamlets', 'b18236844': 'Tower Hamlets', 'b1995329x': 'Hammersmith and Fulham', 'b19875289': 'Kingston upon Thames', 'b19880947': 'Brent', 'b18253118': 'City of London', 'b19789221': 'Enfield', 'b19789993': 'Bexley', 'b18252576': 'London County Council', 'b19787108': 'Croydon', 'b19787455': 'Barking and Dagenham', 'b19876336': 'Waltham Forest', 'b19791823': 'Barnet', 'b19783346': 'Ealing', 'b19796298': 'Brent', 'b20274798': 'Harrow', 'b18219056': 'Southwark', 'b19975880': 'Brent', 'b20056370': 'Islington', 'b18237460': 'Islington', 'b18239626': 'Lambeth', 'b18236510': 'Wandsworth', 'b19885404': 'Hackney', 'b19882361': 'Sutton', 'b18245626': 'City of Westminster', 'b19823745': 'Greenwich', 'b18123703': 'Lewisham', 'b20056151': 'Islington', 'b19876701': 'Waltham Forest', 'b19876257': 'Redbridge', 'b18244932': 'Lewisham', 'b19953872': 'Tower Hamlets', 'b19874911': 'Kingston upon Thames', 'b18250622': 'Wandsworth', 'b19790661': 'Barnet', 'b1996934x': 'Hounslow', 'b18121160': 'Camden', 'b19824245': 'Kensington and Chelsea', 'b18218891': 'Southwark', 'b19788812': 'Enfield', 'b19823563': 'Greenwich', 'b19823290': 'Greenwich', 'b19795014': 'Enfield', 'b18222584': 'Lambeth', 'b19957002': 'Southwark', 'b18252990': 'London County Council', 'b19794630': 'Hillingdon', 'b19822169': 'Hackney', 'b19790521': 'Barnet', 'b19884758': 'City of London', 'b18222365': 'Islington', 'b18238919': 'Camden', 'b1982158x': 'Bromley', 'b19790296': 'Barnet', 'b19953410': 'Hammersmith and Fulham', 'b19877079': 'Waltham Forest', 'b18235359': 'Hammersmith and Fulham', 'b1824791x': 'City of Westminster', 'b19784752': 'Barking and Dagenham', 'b18247702': 'City of Westminster', 'b18247878': 'City of Westminster', 'b18251870': 'Camden', 'b19875502': 'Southwark', 'b19955431': 'City of Westminster', 'b19955121': 'City of Westminster', 'b19878035': 'Ealing', 'b18246473': 'Southwark', 'b19880431': 'Hounslow', 'b1825083x': 'Wandsworth', 'b19877614': 'Hounslow', 'b19793364': 'Bromley', 'b19880121': 'Richmond upon Thame', 'b19788101': 'Ealing', 'b19884321': 'City of London', 'b18254627': 'City of Westminster', 'b18254330': 'City of Westminster', 'b19791215': 'Havering', 'b19796845': 'Brent', 'b19793509': 'Bromley', 'b18248494': 'Islington', 'b19789051': 'Enfield', 'b1823897x': 'Camden', 'b19968991': 'Hounslow', 'b19954529': 'Lewisham', 'b17998438': 'Greenwich', 'b18116449': 'Islington', 'b17998128': 'City of Westminster', 'b1979499x': 'Enfield', 'b18239432': 'Islington', 'b19956964': 'Southwark', 'b19824403': 'Harrow', 'b18237678': 'Greenwich', 'b19883808': 'Sutton', 'b19970353': 'Richmond upon Thame', 'b18253568': 'City of London', 'b19879076': 'Richmond upon Thame', 'b19882178': 'Merton', 'b19786190': 'Croydon', 'b1824872x': 'Islington', 'b18237587': 'Greenwich', 'b19797461': 'Haringey', 'b18252746': 'London County Council', 'b18039595': 'Tower Hamlets', 'b1823785x': 'Greenwich', 'b18250452': 'Wandsworth', 'b19786992': 'Croydon', 'b19790855': 'Barnet', 'b19790582': 'Barnet', 'b19956381': 'City of Westminster', 'b1979633x': 'Brent', 'b19883778': 'Sutton', 'b18117314': 'Kensington and Chelsea', 'b18249450': 'Haringey', 'b19875344': 'Kingston upon Thames', 'b17997835': 'Tower Hamlets', 'b18121287': 'City of London', 'b20056527': 'City of London', 'b19956034': 'Southwark', 'b18222110': 'Greenwich', 'b19884928': 'Barnet', 'b1978644x': 'Bexley', 'b19791458': 'Hillingdon', 'b18238476': 'Camden', 'b19953604': 'Hammersmith and Fulham', 'b18244725': 'Lewisham', 'b19783486': 'Ealing', 'b19877821': 'Ealing', 'b17950454': 'Tower Hamlets', 'b18248937': 'Camden', 'b19877183': 'Merton', 'b19788484': 'Barnet', 'b19884692': 'City of London', 'b19823174': 'Tower Hamlets', 'b18249048': 'Haringey', 'b1825410x': 'Camden', 'b19875757': 'Camden', 'b1978790x': 'Ealing', 'b19787716': 'Ealing', 'b19822376': 'Hackney', 'b19822807': 'Tower Hamlets', 'b18235980': 'Kensington and Chelsea', 'b18235633': 'Kensington and Chelsea', 'b18042181': 'Camden', 'b18236728': 'Tower Hamlets', 'b18251468': 'Camden', 'b19795221': 'City of Westminster', 'b18239985': 'Lambeth', 'b1995346x': 'Hammersmith and Fulham', 'b18111439': 'Kensington and Chelsea', 'b20106907': 'Wandsworth', 'b18223576': 'Kensington and Chelsea', 'b1821955x': 'Southwark', 'b19952806': 'Tower Hamlets', 'b1824855x': 'Islington', 'b18239183': 'Islington', 'b19876063': 'Redbridge', 'b18043586': 'Wandsworth', 'b18250233': 'Hackney', 'b19954955': 'Tower Hamlets', 'b18246643': 'Southwark', 'b19952739': 'Tower Hamlets', 'b19877444': 'Merton', 'b19880339': 'Hounslow', 'b19874935': 'Kingston upon Thames', 'b19968693': 'Bromley', 'b19881423': 'Newham', 'b18043768': 'Southwark', 'b19794174': 'Havering', 'b1987893x': 'Richmond upon Thame', 'b18039601': 'Tower Hamlets', 'b1982144x': 'Bromley', 'b19969922': 'Kingston upon Thames', 'b19876580': 'Waltham Forest', 'b19793777': 'Bromley', 'b19976240': 'Kingston upon Thames', 'b19971291': 'Newham', 'b19797850': 'Hillingdon', 'b1822149x': 'Kensington and Chelsea', 'b1799942x': 'City of Westminster', 'b1988316x': 'Kingston upon Thames', 'b18221634': 'Greenwich', 'b18238270': 'Hammersmith and Fulham', 'b19790089': 'Barnet', 'b19880595': 'Hounslow', 'b17998335': 'Hammersmith and Fulham', 'b18247118': 'Southwark', 'b19879519': 'Waltham Forest', 'b19956551': 'City of Westminster', 'b19882208': 'Merton', 'b18249607': 'Tower Hamlets', 'b18238117': 'Hammersmith and Fulham', 'b19878205': 'Redbridge', 'b19954773': 'Tower Hamlets', 'b19783711': 'Ealing', 'b19793194': 'Bromley', 'b18253350': 'City of London', 'b19789427': 'Enfield', 'b19880881': 'Hounslow', 'b19883341': 'Tower Hamlets', 'b1978336x': 'Ealing', 'b19880923': 'Brent', 'b18116838': 'Camden', 'b18253684': 'City of London', 'b19786293': 'Bexley', 'b2005631x': 'Islington', 'b18253179': 'City of London', 'b19791598': 'Hillingdon', 'b19821943': 'City of Westminster', 'b18251195': 'Wandsworth', 'b19787479': 'Barking and Dagenham', 'b19791847': 'Barnet', 'b1997016x': 'Kingston upon Thames', 'b19882695': 'Merton', 'b18237113': 'Lewisham', 'b18239602': 'Lambeth', 'b18237447': 'Islington', 'b19956733': 'City of Westminster', 'b20057040': 'Wandsworth', 'b18236534': 'Wandsworth', 'b1982161x': 'Bromley', 'b19883729': 'Sutton', 'b19882439': 'Sutton', 'b19885428': 'Hackney', 'b1810700x': 'Hammersmith and Fulham', 'b19969326': 'Newham', 'b18245602': 'City of Westminster', 'b18245353': 'City of Westminster', 'b1982354x': 'Greenwich', 'b18248238': 'City of Westminster', 'b18252229': 'Camden', 'b19968334': 'Tower Hamlets', 'b18252552': 'London County Council', 'b19823769': 'Greenwich', 'b18239663': 'Lambeth', 'b19822492': 'Hackney', 'b19876270': 'Redbridge', 'b18244919': 'Lewisham', 'b19790648': 'Barnet', 'b1799892x': 'Lambeth', 'b19824269': 'Kensington and Chelsea', 'b18121147': 'Camden', 'b19788873': 'Enfield', 'b19874698': 'Newham', 'b18222560': 'Lambeth', 'b18110861': 'Hammersmith and Fulham', 'b19795038': 'Enfield', 'b19794617': 'Hillingdon', 'b18039856': 'Hackney', 'b19794344': 'Havering', 'b18219962': 'Tower Hamlets', 'b19876397': 'Waltham Forest', 'b18244683': 'Lewisham', 'b19790508': 'Barnet', 'b1824998x': 'Hackney', 'b18247933': 'City of Westminster', 'b19884771': 'City of London', 'b1987618x': 'Redbridge', 'b18249188': 'Haringey', 'b18246187': 'Tower Hamlets', 'b19877055': 'Waltham Forest', 'b18235372': 'Hammersmith and Fulham', 'b19877584': 'Hounslow', 'b19788290': 'Ealing', 'b18247854': 'City of Westminster', 'b19955418': 'City of Westminster', 'b19955145': 'City of Westminster', 'b19784739': 'Barking and Dagenham', 'b18251857': 'Camden', 'b19875563': 'Camden', 'b19822145': 'Hackney', 'b19822637': 'City of Westminster', 'b19877638': 'Hounslow', 'b19783966': 'Barnet', 'b19880455': 'Hounslow', 'b19879763': 'Richmond upon Thame', 'b19793340': 'Bromley', 'b19788125': 'Ealing', 'b19884345': 'City of London', 'b19793698': 'Bromley', 'b1988185x': 'Haringey', 'b18239043': 'Camden', 'b18254317': 'City of Westminster', 'b18254640': 'City of Westminster', 'b19791276': 'Hillingdon', 'b18245377': 'City of Westminster', 'b19793522': 'Bromley', 'b18248214': 'City of Westminster', 'b18244208': 'Camden', 'b19785069': 'Bromley', 'b19878059': 'Ealing', 'b18246321': 'Tower Hamlets', 'b19954542': 'Lewisham', 'b19885167': 'Hackney', 'b18038372': 'Hackney', 'b19957014': 'Southwark', 'b1987909x': 'Richmond upon Thame', 'b19879891': 'Richmond upon Thame', 'b17998451': 'Kensington and Chelsea', 'b18237617': 'Greenwich', 'b18239456': 'Islington', 'b19824464': 'Harrow', 'b19956940': 'Southwark', 'b19876865': 'Waltham Forest', 'b19954177': 'Lambeth', 'b1995363x': 'Hammersmith and Fulham', 'b19797199': 'Haringey', 'b18250816': 'Wandsworth', 'b19796821': 'Brent', 'b19786608': 'Croydon', 'b19969132': 'Barnet', 'b18253544': 'City of London', 'b19792128': 'Barnet', 'b19797333': 'Haringey', 'b19794976': 'Enfield', 'b18221579': 'City of Westminster', 'b19790879': 'Barnet', 'b19786463': 'Bexley', 'b18253453': 'City of London', 'b19824099': 'Kensington and Chelsea', 'b19883754': 'Sutton', 'b19875368': 'Wandsworth', 'b1823849x': 'Camden', 'b19876087': 'Redbridge', 'b20056503': 'City of London', 'b19884904': 'Enfield', 'b19788459': 'Barnet', 'b19956058': 'Southwark', 'b19877808': 'Ealing', 'b18244749': 'Lewisham', 'b19796316': 'Brent', 'b19953665': 'Hammersmith and Fulham', 'b19791471': 'Hillingdon', 'b19823150': 'Tower Hamlets', 'b19788460': 'Barnet', 'b18253969': 'Tower Hamlets', 'b19787376': 'Barking and Dagenham', 'b19955625': 'Southwark', 'b18249061': 'Haringey', 'b19875770': 'Camden', 'b19953483': 'Hammersmith and Fulham', 'b18108672': 'Camden', 'b19822820': 'Tower Hamlets', 'b1824516x': 'City of Westminster', 'b18219573': 'Southwark', 'b18235967': 'Kensington and Chelsea', 'b1995282x': 'Tower Hamlets', 'b19784387': 'Barking and Dagenham', 'b19795208': 'Enfield', 'b18222791': 'City of Westminster', 'b18251444': 'Camden', 'b18236704': 'Tower Hamlets', 'b1978773x': 'Ealing', 'b1804475x': 'Southwark', 'b19791045': 'Ealing', 'b18223552': 'Kensington and Chelsea', 'b19874571': 'Newham', 'b18248913': 'Camden', 'b1988199x': 'Haringey', 'b19785653': 'Bromley', 'b19881198': 'Sutton', 'b1979440x': 'Hillingdon', 'b19876518': 'Waltham Forest', 'b19881629': 'Newham', 'b19821426': 'Bromley', 'b19878916': 'Richmond upon Thame', 'b19885751': 'Hackney', 'b19954979': 'Tower Hamlets', 'b19952752': 'Tower Hamlets', 'b19792463': 'Havering', 'b1995508x': 'City of Westminster', 'b19881447': 'Newham', 'b1987604x': 'Redbridge', 'b19885283': 'Hackney', 'b19794150': 'Havering', 'b19790314': 'Barnet', 'b19793753': 'Bromley', 'b1995475x': 'Tower Hamlets', 'b19792803': 'Waltham Forest', 'b19954281': 'Lambeth', 'b17999625': 'Hackney', 'b18238257': 'Hammersmith and Fulham', 'b19884485': 'City of London', 'b18247131': 'Southwark', 'b19955881': 'Southwark', 'b19883109': 'Kingston upon Thames', 'b18249668': 'Tower Hamlets', 'b19954025': 'Lambeth', 'b19878266': 'Redbridge', 'b19970493': 'Kingston upon Thames', 'b19783735': 'Ealing', 'b19880716': 'Hounslow', 'b19879799': 'Richmond upon Thame', 'b19884151': 'City of London', 'b18253374': 'City of London', 'b19789403': 'Enfield', 'b19956411': 'City of Westminster', 'b19976264': 'Kingston upon Thames', 'b19970638': 'Kingston upon Thames', 'b18251201': 'Wandsworth', 'b18116814': 'Camden', 'b18253155': 'City of London', 'b20274865': 'Harrow', 'b19956538': 'City of Westminster', 'b19791860': 'Barnet', 'b19879131': 'Richmond upon Thame', 'b19957221': 'Tower Hamlets', 'b1988090x': 'Brent', 'b18237137': 'Lewisham', 'b18237423': 'Islington', 'b20056333': 'Islington', 'b19956289': 'City of Westminster', 'b20057064': 'City of Westminster', 'b1824497x': 'Islington', 'b17998906': 'Lambeth', 'b18250555': 'Wandsworth', 'b19882415': 'Sutton', 'b19969302': 'Newham', 'b19786815': 'Croydon', 'b19874364': 'City of Westminster', 'b19823708': 'Greenwich', 'b1978885x': 'Enfield', 'b19968310': 'Hounslow', 'b18252205': 'Camden', 'b19821967': 'Hackney', 'b19821633': 'Bromley', 'b18249346': 'Haringey', 'b19883031': 'Kingston upon Thames', 'b18219184': 'Southwark', 'b18245080': 'Lewisham', 'b19824282': 'Kensington and Chelsea', 'b19792256': 'Havering', 'b18121123': 'Tower Hamlets', 'b18236558': 'Wandsworth', 'b18222092': 'Greenwich', 'b18238531': 'Camden', 'b19955935': 'Southwark', 'b18219986': 'Tower Hamlets', 'b18237526': 'Greenwich', 'b18245882': 'Tower Hamlets', 'b18247957': 'City of Westminster', 'b1987554x': 'Camden', 'b19884710': 'City of London', 'b18222894': 'Tower Hamlets', 'b19875095': 'Kingston upon Thames', 'b19878138': 'Redbridge', 'b19953343': 'Hammersmith and Fulham', 'b18235396': 'Hammersmith and Fulham', 'b19877031': 'Waltham Forest', 'b19784715': 'Barking and Dagenham', 'b19784247': 'Barking and Dagenham', 'b18247830': 'City of Westminster', 'b19955169': 'City of Westminster', 'b18251833': 'Camden', 'b19794368': 'Hillingdon', 'b19822613': 'Islington', 'b19791781': 'Barnet', 'b1987621x': 'Redbridge', 'b19783942': 'Barnet', 'b19793327': 'Bromley', 'b19880479': 'Hounslow', 'b18106080': 'Southwark', 'b18253246': 'City of London', 'b19795415': 'Barnet', 'b19884369': 'City of London', 'b19976094': 'Kingston upon Thames', 'b18239067': 'Camden', 'b19791252': 'Hillingdon', 'b18220022': 'Tower Hamlets', 'b18223059': 'Newham', 'b19821414': 'Bromley', 'b19970924': 'Richmond upon Thame', 'b18248457': 'Islington', 'b18252849': 'London County Council', 'b19785045': 'Bromley', 'b19881873': 'Haringey', 'b18123715': 'Lewisham', 'b19878072': 'Ealing', 'b19954566': 'Lewisham', 'b18018464': 'City of London', 'b19885143': 'Hackney', 'b18247301': 'City of Westminster', 'b19879301': 'Richmond upon Thame', 'b19957038': 'Southwark', 'b19971151': 'Redbridge', 'b18237630': 'Greenwich', 'b1979731x': 'Haringey', 'b19876841': 'Waltham Forest', 'b19824440': 'Harrow', 'b18250439': 'Wandsworth', 'b19821396': 'Bromley', 'b19881290': 'Sutton', 'b19970821': 'Haringey', 'b19790120': 'Barnet', 'b19796808': 'Brent', 'b18253520': 'City of London', 'b19786669': 'Croydon', 'b19789361': 'Enfield', 'b1823947x': 'Islington', 'b19971345': 'Merton', 'b19794952': 'Enfield', 'b18039558': 'Tower Hamlets', 'b19824014': 'Kensington and Chelsea', 'b19786487': 'Bexley', 'b20056291': 'Islington', 'b19875307': 'Kingston upon Thames', 'b18238701': 'Camden', 'b18249413': 'Haringey', 'b19883730': 'Sutton', 'b18118148': 'Wandsworth', 'b17997872': 'Camden', 'b18237897': 'Greenwich', 'b19884965': 'Tower Hamlets', 'b19956071': 'City of Westminster', 'b18251018': 'Wandsworth', 'b19787534': 'Barking and Dagenham', 'b19787285': 'Croydon', 'b18244762': 'Lewisham', 'b19796377': 'Brent', 'b19953136': 'Southwark', 'b19953641': 'Hammersmith and Fulham', 'b19783449': 'Ealing', 'b19975703': 'Haringey', 'b18219378': 'Southwark', 'b18218635': 'Southwark', 'b19823137': 'Tower Hamlets', 'b18253945': 'Tower Hamlets', 'b18044736': 'Southwark', 'b19823265': 'Greenwich', 'b19787753': 'Ealing', 'b19822844': 'Tower Hamlets', 'b1996951x': 'Harrow', 'b18246941': 'Southwark', 'b19788149': 'Ealing', 'b19882063': 'Merton', 'b18235943': 'Kensington and Chelsea', 'b19790302': 'Barnet', 'b1799729x': 'Southwark', 'b18236765': 'Tower Hamlets', 'b18120660': 'Southwark', 'b19791069': 'Ealing', 'b18235499': 'Hammersmith and Fulham', 'b18250762': 'Wandsworth', 'b18254664': 'City of Westminster', 'b1811121x': 'Hackney', 'b18245146': 'Lewisham', 'b19874558': 'Newham', 'b18245419': 'City of Westminster', 'b19788447': 'Barnet', 'b18043549': 'Hackney', 'b19822261': 'Hackney', 'b19876531': 'Waltham Forest', 'b19881605': 'Newham', 'b19785677': 'Bromley', 'b19876026': 'Redbridge', 'b19954918': 'Tower Hamlets', 'b19885775': 'Hackney', 'b19821402': 'Bromley', 'b19969478': 'Camden', 'b19952776': 'Tower Hamlets', 'b19788629': 'Enfield', 'b19785458': 'Bromley', 'b19785902': 'Bromley', 'b19881460': 'Newham', 'b19794137': 'Havering', 'b19794423': 'Hillingdon', 'b18250373': 'Wandsworth', 'b19794095': 'Havering', 'b19790338': 'Barnet', 'b1979244x': 'Havering', 'b17999601': 'City of Westminster', 'b19877778': 'Hounslow', 'b19878795': 'Richmond upon Thame', 'b1988073x': 'Hounslow', 'b19784508': 'Barking and Dagenham', 'b18247155': 'City of Westminster', 'b18042235': 'Lewisham', 'b19955200': 'City of Westminster', 'b19874972': 'Kingston upon Thames', 'b19883122': 'Kingston upon Thames', 'b19878539': 'Redbridge', 'b18249644': 'Tower Hamlets', 'b18250993': 'Wandsworth', 'b19954001': 'Lambeth', 'b18253313': 'City of London', 'b19976288': 'Kingston upon Thames', 'b18254500': 'City of Westminster', 'b17998803': 'Kensington and Chelsea', 'b19789178': 'Enfield', 'b18251225': 'Wandsworth', 'b19970614': 'Kingston upon Thames', 'b19883389': 'Tower Hamlets', 'b1823964x': 'Lambeth', 'b20274841': 'Harrow', 'b18248664': 'Islington', 'b19789282': 'Enfield', 'b18239390': 'Islington', 'b19956514': 'City of Westminster', 'b19791884': 'Barnet', 'b18122103': 'Southwark', 'b19879118': 'Richmond upon Thame', 'b19957208': 'Tower Hamlets', 'b19882658': 'Merton', 'b18237150': 'Lewisham', 'b18252047': 'Camden', 'b19876774': 'Waltham Forest', 'b19882385': 'Sutton', 'b1979227x': 'Havering', 'b19789828': 'Bexley', 'b19786839': 'Croydon', 'b19790934': 'Barnet', 'b18248275': 'City of Westminster', 'b18245316': 'City of Westminster', 'b19786347': 'Bexley', 'b19823721': 'Greenwich', 'b19824117': 'Kensington and Chelsea', 'b19821980': 'Hackney', 'b18252266': 'Camden', 'b18246308': 'Tower Hamlets', 'b19975934': 'Haringey', 'b19790685': 'Barnet', 'b18121457': 'Kensington and Chelsea', 'b19823587': 'Greenwich', 'b18218830': 'Southwark', 'b19784880': 'Bromley', 'b19969685': 'Hounslow', 'b18236571': 'Wandsworth', 'b18238518': 'Camden', 'b18120854': 'Tower Hamlets', 'b19953586': 'Hammersmith and Fulham', 'b20057003': 'Southwark', 'b1825259x': 'London County Council', 'b18248895': 'Camden', 'b19952983': 'Southwark', 'b18247970': 'City of Westminster', 'b19884734': 'City of London', 'b19876233': 'Redbridge', 'b1979390x': 'Havering', 'b19822984': 'Tower Hamlets', 'b19822455': 'Hackney', 'b19953367': 'Hammersmith and Fulham', 'b18244956': 'Lewisham', 'b19877547': 'Hounslow', 'b19877018': 'Waltham Forest', 'b19784260': 'Barking and Dagenham', 'b18247817': 'City of Westminster', 'b19878527': 'Redbridge', 'b18106316': 'Kensington and Chelsea', 'b19783589': 'Ealing', 'b19822182': 'Hackney', 'b1825388x': 'City of London', 'b19876373': 'Waltham Forest', 'b19822674': 'Tower Hamlets', 'b19794307': 'Havering', 'b18111555': 'City of London', 'b19793303': 'Bromley', 'b19793650': 'Bromley', 'b19788162': 'Ealing', 'b19795439': 'Barnet', 'b18218659': 'Southwark', 'b18239006': 'Camden', 'b18223072': 'Greenwich', 'b1988512x': 'Hackney', 'b19874893': 'Kingston upon Thames', 'b18248470': 'Islington', 'b19785021': 'Bromley', 'b19881812': 'Haringey', 'b18252862': 'London County Council', 'b18246497': 'Southwark', 'b1824614x': 'Tower Hamlets', 'b18246369': 'Tower Hamlets', 'b18247362': 'City of Westminster', 'b19788356': 'Barnet', 'b1978613x': 'Croydon', 'b18248780': 'Islington', 'b18043069': 'Southwark', 'b19876828': 'Waltham Forest', 'b19971138': 'Redbridge', 'b19821372': 'Bromley', 'b19970845': 'Haringey', 'b19790107': 'Barnet', 'b19789610': 'Enfield', 'b19789348': 'Enfield', 'b19797485': 'Haringey', 'b19794939': 'Enfield', 'b19797370': 'Haringey', 'b18039571': 'Tower Hamlets', 'b18249784': 'Hackney', 'b18221531': 'Kensington and Chelsea', 'b19957051': 'Southwark', 'b19879325': 'Richmond upon Thame', 'b20056278': 'Islington', 'b19956897': 'Southwark', 'b19883249': 'Kingston upon Thames', 'b18236960': 'Lewisham', 'b19883717': 'Tower Hamlets', 'b19875320': 'Kingston upon Thames', 'b18238725': 'Camden', 'b19796687': 'Brent', 'b1823642x': 'Wandsworth', 'b18250683': 'Wandsworth', 'b18253052': 'London County Council', 'b18253507': 'City of London', 'b18252679': 'London County Council', 'b19884941': 'Tower Hamlets', 'b19956095': 'City of Westminster', 'b1987585x': 'Camden', 'b19787510': 'Barking and Dagenham', 'b19796353': 'Brent', 'b19796420': 'Brent', 'b19953112': 'Southwark', 'b19791434': 'Hillingdon', 'b19783462': 'Ealing', 'b18219354': 'Southwark', 'b18048432': 'Camden', 'b19975727': 'Richmond upon Thame', 'b19787777': 'Ealing', 'b18044712': 'Tower Hamlets', 'b18236297': 'Wandsworth', 'b19822868': 'Tower Hamlets', 'b18048389': 'Tower Hamlets', 'b19968292': 'Hounslow', 'b18237216': 'Lewisham', 'b18116772': 'Camden', 'b18239924': 'Lambeth', 'b18236030': 'Kensington and Chelsea', 'b18120647': 'City of London', 'b18236741': 'Tower Hamlets', 'b17997148': 'Southwark', 'b1823592x': 'Kensington and Chelsea', 'b1988204x': 'Merton', 'b18250749': 'Wandsworth', 'b18245432': 'City of Westminster', 'b19969533': 'Harrow', 'b19874534': 'Newham', 'b18245122': 'Lewisham', 'b19823447': 'Greenwich', 'b19824324': 'Kensington and Chelsea', 'b19823113': 'Tower Hamlets', 'b19822248': 'Hackney', 'b19785616': 'Kensington and Chelsea', 'b19876555': 'Waltham Forest', 'b19876002': 'Redbridge', 'b19954931': 'Tower Hamlets', 'b19885714': 'Hackney', 'b1825035x': 'Wandsworth', 'b19969454': 'Camden', 'b19792426': 'Havering', 'b19788393': 'Barnet', 'b19785926': 'Bromley', 'b19794113': 'Havering', 'b19794447': 'Hillingdon', 'b18223485': 'Kensington and Chelsea', 'b19790351': 'Barnet', 'b19969983': 'Kingston upon Thames', 'b1995279x': 'Tower Hamlets', 'b19784697': 'Barking and Dagenham', 'b19875666': 'Camden', 'b18123697': 'Lewisham', 'b19954244': 'Lambeth', 'b18246771': 'Southwark', 'b18238294': 'Hammersmith and Fulham', 'b19877201': 'Merton', 'b19880534': 'Hounslow', 'b19880285': 'Hounslow', 'b19877754': 'Hounslow', 'b18106122': 'Hammersmith and Fulham', 'b19884448': 'City of London', 'b18247179': 'City of Westminster', 'b19784521': 'Barking and Dagenham', 'b19976112': 'Kingston upon Thames', 'b19955224': 'City of Westminster', 'b18251596': 'Camden', 'b18237095': 'Lewisham', 'b19954797': 'Tower Hamlets', 'b19970985': 'Richmond upon Thame', 'b19954062': 'Lambeth', 'b19880753': 'Hounslow', 'b1824659x': 'Southwark', 'b18253337': 'City of London', 'b1997050x': 'Kingston upon Thames', 'b19789117': 'Enfield', 'b19792840': 'Waltham Forest', 'b18254524': 'City of Westminster', 'b19970675': 'Richmond upon Thame', 'b1978658x': 'Croydon', 'b18253192': 'City of London', 'b1799875x': 'Islington', 'b18248640': 'Islington', 'b19952673': 'Tower Hamlets', 'b19968723': 'Sutton', 'b19882671': 'Merton', 'b19885957': 'Hackney', 'b19879179': 'Richmond upon Thame', 'b19957269': 'Tower Hamlets', 'b18237174': 'Lewisham', 'b19956794': 'Southwark', 'b18252060': 'Camden', 'b19970183': 'Kingston upon Thames', 'b18239651': 'Lambeth', 'b18250518': 'Wandsworth', 'b19885489': 'Hackney', 'b19882907': 'Richmond upon Thame', 'b19882452': 'Sutton', 'b18248251': 'City of Westminster', 'b19790910': 'Barnet', 'b19789804': 'Bexley', 'b19786360': 'Bexley', 'b1988350x': 'Tower Hamlets', 'b19824130': 'Kensington and Chelsea', 'b18252242': 'Camden', 'b18117090': 'Islington', 'b19787182': 'Croydon', 'b17997446': 'Southwark', 'b19975910': 'Sutton', 'b19788964': 'Enfield', 'b19792219': 'Havering', 'b1824533x': 'City of Westminster', 'b19956137': 'City of Westminster', 'b20056485': 'City of London', 'b19953562': 'Hammersmith and Fulham', 'b20057027': 'Wandsworth', 'b18238579': 'Camden', 'b19877985': 'Ealing', 'b18245845': 'Tower Hamlets', 'b18245596': 'City of Westminster', 'b1823916x': 'Islington', 'b20056734': 'Hackney', 'b18247994': 'City of Westminster', 'b1979518x': 'Enfield', 'b1978420x': 'Barking and Dagenham', 'b19822479': 'Hackney', 'b18249127': 'Haringey', 'b18251602': 'Camden', 'b19875058': 'Kingston upon Thames', 'b19953306': 'Hammersmith and Fulham', 'b19788988': 'Enfield', 'b1995296x': 'Southwark', 'b19823307': 'Greenwich', 'b18110952': 'Southwark', 'b19795634': 'Hillingdon', 'b19875587': 'Camden', 'b18250543': 'Wandsworth', 'b19822650': 'Tower Hamlets', 'b19794320': 'Havering', 'b19955728': 'Southwark', 'b18223679': 'Kensington and Chelsea', 'b19793923': 'Havering', 'b19793674': 'Bromley', 'b19788186': 'Ealing', 'b18218672': 'Southwark', 'b19954682': 'Lewisham', 'b19795452': 'Barnet', 'b20106750': 'Wandsworth', 'b18238993': 'Camden', 'b18246163': 'Tower Hamlets', 'b18223011': 'City of Westminster', 'b19877560': 'Hounslow', 'b18247696': 'City of Westminster', 'b18250865': 'Wandsworth', 'b19785008': 'Bromley', 'b1823902x': 'Camden', 'b19968917': 'Hounslow', 'b19876610': 'Waltham Forest', 'b19881344': 'Newham', 'b19791410': 'Hillingdon', 'b18252801': 'London County Council', 'b18123752': 'Lewisham', 'b19794587': 'Hillingdon', 'b19885106': 'Hackney', 'b18246345': 'Tower Hamlets', 'b1979129x': 'Hillingdon', 'b19877699': 'Hounslow', 'b19971448': 'Merton', 'b19876804': 'Waltham Forest', 'b1799682x': 'Southwark', 'b19792980': 'Waltham Forest', 'b19824488': 'Harrow', 'b19883882': 'Sutton', 'b18238373': 'Camden', 'b1824743x': 'City of Westminster', 'b19876671': 'Waltham Forest', 'b19790168': 'Barnet', 'b1997632x': 'Kingston upon Thames', 'b19792189': 'Havering', 'b19968978': 'Hounslow', 'b19797357': 'Haringey', 'b19881149': 'Sutton', 'b19794915': 'Enfield', 'b19878163': 'Redbridge', 'b19874418': 'Lewisham', 'b19883080': 'Kingston upon Thames', 'b19954475': 'Lewisham', 'b19783899': 'Ealing', 'b18218702': 'Southwark', 'b18247349': 'City of Westminster', 'b19957075': 'Southwark', 'b19879349': 'Richmond upon Thame', 'b18253295': 'City of London', 'b20056254': 'Islington', 'b19956307': 'City of Westminster', 'b19883262': 'Redbridge', 'b19875873': 'Camden', 'b19970249': 'Merton', 'b18238749': 'Camden', 'b19791987': 'Barnet', 'b18253076': 'London County Council', 'b19789634': 'Bexley', 'b19786116': 'Croydon', 'b18252655': 'London County Council', 'b19787571': 'Barking and Dagenham', 'b19821827': 'Tower Hamlets', 'b1823690x': 'Tower Hamlets', 'b19796407': 'Brent', 'b19783401': 'Ealing', 'b19953173': 'Southwark', 'b18048419': 'Camden', 'b19975740': 'Richmond upon Thame', 'b18219330': 'Southwark', 'b18239560': 'Lambeth', 'b19787790': 'Ealing', 'b18111701': 'Southwark', 'b18246904': 'Southwark', 'b19882026': 'Merton', 'b18235906': 'Kensington and Chelsea', 'b19969089': 'Barnet', 'b1978840x': 'Barnet', 'b18236017': 'Kensington and Chelsea', 'b19953793': 'Tower Hamlets', 'b17997161': 'Southwark', 'b18250725': 'Wandsworth', 'b19790764': 'Barnet', 'b18245109': 'Lewisham', 'b18245456': 'City of Westminster', 'b19874510': 'Newham', 'b19824300': 'Kensington and Chelsea', 'b1982368x': 'Greenwich', 'b19968140': 'Hounslow', 'b1823723x': 'Islington', 'b19822224': 'Hackney', 'b19881137': 'Sutton', 'b19881642': 'Haringey', 'b18122255': 'Tower Hamlets', 'b19876579': 'Waltham Forest', 'b1978594x': 'Bromley', 'b19885738': 'Hackney', 'b1988302x': 'Kingston upon Thames', 'b19792402': 'Havering', 'b18121317': 'Port of London', 'b18219512': 'Southwark', 'b19790375': 'Barnet', 'b19955790': 'Southwark', 'b19955029': 'Tower Hamlets', 'b19875642': 'Camden', 'b17999133': 'City of Westminster', 'b18246758': 'Southwark', 'b19878758': 'Redbridge', 'b17999649': 'City of London', 'b19954268': 'Lambeth', 'b19877225': 'Merton', 'b19877730': 'Hounslow', 'b19880510': 'Hounslow', 'b1979311x': 'Bromley', 'b18248755': 'Islington', 'b19784545': 'Barking and Dagenham', 'b19876890': 'Waltham Forest', 'b18116395': 'Islington', 'b19884461': 'City of London', 'b18106109': 'Southwark', 'b17997240': 'Southwark', 'b19976136': 'Kingston upon Thames', 'b18254093': 'Hammersmith and Fulham', 'b19787893': 'Ealing', 'b19791173': 'Havering', 'b19955248': 'City of Westminster', 'b19878576': 'Redbridge', 'b18248469': 'Islington', 'b19783796': 'Ealing', 'b19880777': 'Hounslow', 'b18220666': 'Southwark', 'b19789130': 'Enfield', 'b1825472x': 'City of Westminster', 'b1820983x': 'Southwark', 'b18254548': 'City of Westminster', 'b19876622': 'Waltham Forest', 'b19879489': 'Waltham Forest', 'b18248627': 'Islington', 'b1825200x': 'Camden', 'b19824543': 'Harrow', 'b2005693x': 'Southwark', 'b19954049': 'Lambeth', 'b17998773': 'Islington', 'b19957245': 'Tower Hamlets', 'b19879623': 'Waltham Forest', 'b19885933': 'Hackney', 'b19882610': 'Sutton', 'b19971308': 'Newham', 'b19786566': 'Bexley', 'b1811670x': 'Hackney', 'b18239687': 'Lambeth', 'b1996870x': 'Sutton', 'b1987828x': 'Redbridge', 'b19882920': 'Richmond upon Thame', 'b18250531': 'Wandsworth', 'b19975685': 'Bromley', 'b19882105': 'Merton', 'b19879982': 'Richmond upon Thame', 'b18253714': 'City of London', 'b19789865': 'Bexley', 'b19824154': 'Kensington and Chelsea', 'b18038311': 'City of London', 'b19875241': 'Kingston upon Thames', 'b19883699': 'Tower Hamlets', 'b18117077': 'Hammersmith and Fulham', 'b19822443': 'Hackney', 'b18249577': 'Lambeth', 'b18238087': 'Hammersmith and Fulham', 'b18251286': 'Wandsworth', 'b19975971': 'Kingston upon Thames', 'b19792232': 'Havering', 'b18237538': 'Greenwich', 'b19956113': 'City of Westminster', 'b1978630x': 'Bexley', 'b19883523': 'Tower Hamlets', 'b18238555': 'Camden', 'b18244622': 'Lewisham', 'b19953094': 'Southwark', 'b17997392': 'Southwark', 'b17950570': 'Tower Hamlets', 'b18244944': 'Lewisham', 'b19953549': 'Hammersmith and Fulham', 'b19784041': 'Barking and Dagenham', 'b19823095': 'Tower Hamlets', 'b18245869': 'Tower Hamlets', 'b18253842': 'City of London', 'b18222389': 'Hammersmith and Fulham', 'b19875071': 'Kingston upon Thames', 'b18249103': 'Haringey', 'b20056758': 'Hackney', 'b19822418': 'Hackney', 'b19953896': 'Tower Hamlets', 'b19784958': 'Bromley', 'b19788216': 'Ealing', 'b19784223': 'Barking and Dagenham', 'b19955492': 'Southwark', 'b18251729': 'Camden', 'b18236467': 'Wandsworth', 'b18110939': 'Kensington and Chelsea', 'b1995332x': 'Hammersmith and Fulham', 'b18223655': 'Kensington and Chelsea', 'b19791768': 'Barnet', 'b19793947': 'Havering', 'b18223382': 'Kensington and Chelsea', 'b19952946': 'Southwark', 'b19876452': 'Waltham Forest', 'b19795476': 'Barnet', 'b18254391': 'City of Westminster', 'b19785793': 'Bromley', 'b19881587': 'Newham', 'b19821785': 'Bromley', 'b18041462': 'Hackney', 'b19885696': 'Hackney', 'b18246102': 'Tower Hamlets', 'b19874704': 'Newham', 'b18245717': 'City of Westminster', 'b19970912': 'Richmond upon Thame', 'b18248433': 'City of Westminster', 'b19874856': 'Newham', 'b19968930': 'Hounslow', 'b18249814': 'Hackney', 'b1995489x': 'Tower Hamlets', 'b19823241': 'Greenwich', 'b19881368': 'Newham', 'b19785574': 'Kensington and Chelsea', 'b18252825': 'London County Council', 'b18250099': 'Hackney', 'b1987750x': 'Hounslow', 'b19790491': 'Barnet', 'b18250403': 'Wandsworth', 'b19792967': 'Waltham Forest', 'b19793613': 'Bromley', 'b19787145': 'Croydon', 'b19971461': 'Merton', 'b19976306': 'Kingston upon Thames', 'b19797114': 'Brent', 'b19874455': 'Southwark', 'b1987814x': 'Redbridge', 'b18250890': 'Wandsworth', 'b18238312': 'Hammersmith and Fulham', 'b19790144': 'Barnet', 'b18248342': 'City of Westminster', 'b18039534': 'Tower Hamlets', 'b19954451': 'Lewisham', 'b18249747': 'Hackney', 'b17996806': 'Southwark', 'b17999364': 'Southwark', 'b19881289': 'Sutton', 'b18253271': 'City of London', 'b19884254': 'City of London', 'b19879817': 'Richmond upon Thame', 'b19879362': 'Richmond upon Thame', 'b17998189': 'Camden', 'b19970225': 'Merton', 'b19791963': 'Barnet', 'b18238762': 'Camden', 'b18236923': 'Tower Hamlets', 'b1995315x': 'Southwark', 'b18253015': 'London County Council', 'b19789658': 'Bexley', 'b19786177': 'Croydon', 'b19789300': 'Enfield', 'b19821803': 'Bromley', 'b19787224': 'Croydon', 'b18252631': 'London County Council', 'b19787558': 'Barking and Dagenham', 'b19877882': 'Ealing', 'b1823950x': 'Islington', 'b19783425': 'Ealing', 'b19796390': 'Brent', 'b1979664x': 'Brent', 'b19796468': 'Brent', 'b18219317': 'Southwark', 'b19975764': 'Richmond upon Thame', 'b18237034': 'Lewisham', 'b18239547': 'Lambeth', 'b19787091': 'Croydon', 'b20056230': 'Islington', 'b17997987': 'Hammersmith and Fulham', 'b18246928': 'Southwark', 'b19882002': 'Haringey', 'b19882555': 'Sutton', 'b18248196': 'City of Westminster', 'b1824547x': 'City of Westminster', 'b19786797': 'Croydon', 'b19823666': 'Greenwich', 'b18237253': 'Islington', 'b20056588': 'Hackney', 'b1982340x': 'Greenwich', 'b18244877': 'Lewisham', 'b1823625x': 'Wandsworth', 'b18254676': 'City of Westminster', 'b18236078': 'Kensington and Chelsea', 'b19969570': 'Harrow', 'b18250701': 'Wandsworth', 'b19790703': 'Barnet', 'b18248998': 'Haringey', 'b19792396': 'Havering', 'b19968164': 'Hounslow', 'b18218994': 'Southwark', 'b19824361': 'Harrow', 'b18235785': 'Kensington and Chelsea', 'b19969417': 'Camden', 'b19884898': 'City of London', 'b18236789': 'Tower Hamlets', 'b19795282': 'Barnet', 'b18222249': 'Camden', 'b18239961': 'Lambeth', 'b18235529': 'Hammersmith and Fulham', 'b1811118x': 'Wandsworth', 'b1978868x': 'Enfield', 'b19955005': 'Tower Hamlets', 'b19822753': 'Tower Hamlets', 'b19881113': 'Sutton', 'b19875629': 'Camden', 'b18253532': 'City of London', 'b18246734': 'Southwark', 'b19877249': 'Merton', 'b19877717': 'Hounslow', 'b18218738': 'Southwark', 'b18042259': 'Hammersmith and Fulham', 'b18219275': 'Southwark', 'b19788022': 'Ealing', 'b19884400': 'City of London', 'b19955261': 'City of Westminster', 'b18254706': 'City of Westminster', 'b18219603': 'Southwark', 'b1799911x': 'Southwark', 'b18043781': 'Greenwich', 'b19880790': 'Hounslow', 'b19793133': 'Bromley', 'b1997615x': 'Kingston upon Thames', 'b19968899': 'Hounslow', 'b18209762': 'Southwark', 'b1979115x': 'Haringey', 'b19878771': 'Redbridge', 'b19954207': 'Lambeth', 'b1987960x': 'Waltham Forest', 'b1988591x': 'Hackney', 'b18247064': 'Southwark', 'b19955911': 'Southwark', 'b18248603': 'Islington', 'b19968760': 'Sutton', 'b19824567': 'Harrow', 'b18043124': 'Greenwich', 'b19785215': 'Bexley', 'b19885313': 'Hackney', 'b19878801': 'Richmond upon Thame', 'b19878552': 'Redbridge', 'b19882634': 'Merton', 'b18253489': 'City of London', 'b17998797': 'Kensington and Chelsea', 'b19881861': 'Haringey', 'b19786542': 'Bexley', 'b19797254': 'Haringey', 'b18252023': 'Camden', 'b18039492': 'Tower Hamlets', 'b18117132': 'Southwark', 'b19789518': 'Enfield', 'b19882944': 'Camden', 'b19969387': 'Camden', 'b18253738': 'City of London', 'b19789841': 'Bexley', 'b19790958': 'Barnet', 'b19824178': 'Kensington and Chelsea', 'b18249553': 'Lambeth', 'b1988249x': 'Sutton', 'b17997409': 'Southwark', 'b1978689x': 'Croydon', 'b18121433': 'Kensington and Chelsea', 'b18218854': 'Southwark', 'b19956174': 'City of Westminster', 'b18251390': 'Camden', 'b18244609': 'Lewisham', 'b19883547': 'Tower Hamlets', 'b19953525': 'Hammersmith and Fulham', 'b1825228x': 'Camden', 'b19880893': 'Brent', 'b19884795': 'City of London', 'b1978479x': 'Bromley', 'b20056771': 'Hackney', 'b18253866': 'City of London', 'b1978823x': 'Ealing', 'b1812074x': 'Wandsworth', 'b18249164': 'Haringey', 'b19875010': 'Kingston upon Thames', 'b19822923': 'Tower Hamlets', 'b19822431': 'Hackney', 'b19874674': 'Newham', 'b1824838x': 'City of Westminster', 'b19975958': 'Haringey', 'b18219457': 'Southwark', 'b1799889x': 'Lambeth', 'b19823344': 'Greenwich', 'b18110915': 'Kensington and Chelsea', 'b19822698': 'Tower Hamlets', 'b19878242': 'Redbridge', 'b18251705': 'Camden', 'b19791707': 'Barnet', 'b18236443': 'Wandsworth', 'b19876348': 'Waltham Forest', 'b18118343': 'Greenwich', 'b19793960': 'Havering', 'b1825164x': 'Camden', 'b18223631': 'Kensington and Chelsea', 'b18245808': 'Tower Hamlets', 'b19952697': 'Tower Hamlets', 'b19952922': 'Southwark', 'b19784363': 'Barking and Dagenham', 'b18222900': 'Lewisham', 'b20106798': 'Lewisham', 'b19881563': 'Newham', 'b19876439': 'Waltham Forest', 'b19876385': 'Waltham Forest', 'b19885672': 'Hackney', 'b18246126': 'Tower Hamlets', 'b18250117': 'Hackney', 'b1979261x': 'Waltham Forest', 'b19874728': 'Newham', 'b18245730': 'Tower Hamlets', 'b18249838': 'Hackney', 'b1979549x': 'Barnet', 'b19881307': 'Sutton', 'b1978577x': 'Bromley', 'b19876658': 'Waltham Forest', 'b19969600': 'Harrow', 'b18246382': 'Southwark', 'b18250075': 'Hackney', 'b19821840': 'Enfield', 'b18221907': 'Kensington and Chelsea', 'b19793637': 'Bromley', 'b18038694': 'Wandsworth', 'b19792943': 'Waltham Forest', 'b19971400': 'Merton', 'b18108982': 'Kensington and Chelsea', 'b19794721': 'Bexley', 'b18238841': 'Camden', 'b18238336': 'Hammersmith and Fulham', 'b19877389': 'Merton', 'b18249760': 'Hackney', 'b19883043': 'Kingston upon Thames', 'b17996867': 'Kensington and Chelsea', 'b19878126': 'Redbridge', 'b19792864': 'Waltham Forest', 'b1801897x': 'Kensington and Chelsea', 'b19954438': 'Lewisham', 'b19879830': 'Richmond upon Thame', 'b18247386': 'City of Westminster', 'b18247477': 'City of Westminster', 'b19884230': 'City of London', 'b18253258': 'City of London', 'b18249498': 'Haringey', 'b19883225': 'Kingston upon Thames', 'b19970201': 'Merton', 'b19875381': 'Southwark', 'b19796663': 'Brent', 'b18118410': 'Greenwich', 'b2005628x': 'Islington', 'b18253039': 'London County Council', 'b18047233': 'Camden', 'b19789671': 'Bexley', 'b19786153': 'Croydon', 'b19797394': 'Haringey', 'b18251092': 'Wandsworth', 'b19821864': 'Enfield', 'b19787200': 'Croydon', 'b19791495': 'Hillingdon', 'b19796912': 'Brent', 'b19796444': 'Brent', 'b19882798': 'Merton', 'b1979194x': 'Barnet', 'b18248019': 'City of Westminster', 'b20056217': 'Islington', 'b18239523': 'Islington', 'b18237010': 'Lewisham', 'b19956344': 'City of Westminster', 'b18246874': 'Southwark', 'b19882579': 'Sutton', 'b18219597': 'Southwark', 'b18248172': 'City of Westminster', 'b19823642': 'Greenwich', 'b18237277': 'Islington', 'b19824695': 'Harrow', 'b18252618': 'London County Council', 'b18252345': 'Camden', 'b18244816': 'Lewisham', 'b1823740x': 'Islington', 'b19790727': 'Barnet', 'b19792372': 'Havering', 'b19824348': 'Kensington and Chelsea', 'b19796560': 'Brent', 'b19968188': 'Hounslow', 'b19823423': 'Greenwich', 'b18236273': 'Wandsworth', 'b19821487': 'Bromley', 'b19785884': 'Bromley', 'b19795646': 'Hillingdon', 'b18238956': 'Camden', 'b18237903': 'Greenwich', 'b18222225': 'Camden', 'b18236054': 'Kensington and Chelsea', 'b18239948': 'Lambeth', 'b19822595': 'Hackney', 'b18235505': 'Hammersmith and Fulham', 'b19953070': 'Southwark', 'b19955066': 'Tower Hamlets', 'b19875605': 'Camden', 'b19822777': 'Tower Hamlets', 'b19881174': 'Sutton', 'b19877262': 'Merton', 'b19880224': 'Hounslow', 'b19880558': 'Hounslow', 'b20056631': 'Hackney', 'b19784582': 'Barking and Dagenham', 'b19884424': 'City of London', 'b19788009': 'Ealing', 'b19976173': 'Kingston upon Thames', 'b19787856': 'Ealing', 'b19955285': 'City of Westminster', 'b18254767': 'City of Westminster', 'b19791136': 'Haringey', 'b19785987': 'Croydon', 'b18238889': 'Camden', 'b19793157': 'Bromley', 'b19793406': 'Bromley', 'b19787054': 'Croydon', 'b18254585': 'City of Westminster', 'b18220678': 'Southwark', 'b19881794': 'Haringey', 'b19785185': 'Bexley', 'b19885088': 'Tower Hamlets', 'b19954220': 'Lambeth', 'b19954888': 'Tower Hamlets', 'b18246242': 'Tower Hamlets', 'b18246710': 'Southwark', 'b19878710': 'Redbridge', 'b19879441': 'Waltham Forest', 'b19879994': 'Richmond upon Thame', 'b18247040': 'Southwark', 'b19882221': 'Merton', 'b19875332': 'Kingston upon Thames', 'b18239316': 'Islington', 'b19968747': 'Sutton', 'b19797096': 'Brent', 'b19824506': 'Harrow', 'b19785239': 'Bexley', 'b19954086': 'Lambeth', 'b19878825': 'Richmond upon Thame', 'b19970432': 'Ealing', 'b19885337': 'Hackney', 'b18250919': 'Wandsworth', 'b19790260': 'Barnet', 'b19786529': 'Bexley', 'b19879192': 'Richmond upon Thame', 'b1824001x': 'Greenwich', 'b18237484': 'Islington', 'b19797278': 'Haringey', 'b20056977': 'Southwark', 'b17996934': 'Islington', 'b19970699': 'Richmond upon Thame', 'b18253751': 'City of London', 'b19789531': 'Enfield', 'b19956484': 'City of Westminster', 'b1824466x': 'Islington', 'b18118082': 'Kensington and Chelsea', 'b19956629': 'City of Westminster', 'b18121184': 'City of Westminster', 'b19956150': 'City of Westminster', 'b19787698': 'Barking and Dagenham', 'b19883560': 'Tower Hamlets', 'b19953501': 'Hammersmith and Fulham', 'b18238592': 'Camden', 'b19953057': 'Southwark', 'b17997689': 'Tower Hamlets', 'b1823804x': 'Hammersmith and Fulham', 'b1824953x': 'Lambeth', 'b19882968': 'Camden', 'b19784004': 'Barking and Dagenham', 'b20056795': 'Hackney', 'b18252400': 'Camden', 'b18253805': 'City of London', 'b19875034': 'Kingston upon Thames', 'b18249140': 'Haringey', 'b19787078': 'Croydon', 'b18120581': 'Lambeth', 'b18251663': 'Camden', 'b1988221x': 'Merton', 'b18219433': 'Southwark', 'b19784910': 'Bromley', 'b19795658': 'Hillingdon', 'b18044566': 'Kensington and Chelsea', 'b1982290x': 'Tower Hamlets', 'b19791720': 'Barnet', 'b18239882': 'Lambeth', 'b17997021': 'Wandsworth', 'b20106804': 'Lewisham', 'b19882476': 'Sutton', 'b19793984': 'Havering', 'b18223345': 'Kensington and Chelsea', 'b19885532': 'Hackney', 'b18223618': 'Kensington and Chelsea', 'b18248810': 'Islington', 'b18245821': 'Tower Hamlets', 'b18245535': 'City of Westminster', 'b18245286': 'City of Westminster', 'b19952909': 'Southwark', 'b18239080': 'Islington', 'b19881095': 'Sutton', 'b19785756': 'Bromley', 'b19876415': 'Waltham Forest', 'b19970316': 'Richmond upon Thame', 'b19885659': 'Hackney', 'b18250130': 'Hackney', 'b18245754': 'Tower Hamlets', 'b19823368': 'Greenwich', 'b19874741': 'Newham', 'b18250579': 'Wandsworth', 'b19788253': 'Ealing', 'b18251985': 'Camden', 'b18249851': 'Hackney', 'b19881897': 'Haringey', 'b19881320': 'Sutton', 'b19785537': 'Bromley', 'b19822017': 'Hackney', 'b19796730': 'Brent', 'b19794290': 'Havering', 'b19794526': 'Hillingdon', 'b18045212': 'Kensington and Chelsea', 'b18250051': 'Hackney', 'b1988154x': 'Newham', 'b19969624': 'Harrow', 'b18038359': 'Hammersmith and Fulham', 'b19790454': 'Barnet', 'b1979258x': 'Waltham Forest', 'b19792633': 'Waltham Forest', 'b20056837': 'Hackney', 'b1987845x': 'Redbridge', 'b1824970x': 'Hackney', 'b18220198': 'Lewisham', 'b19794745': 'Enfield', 'b18221828': 'Camden', 'b18238865': 'Camden', 'b18247672': 'City of Westminster', 'b1825424x': 'City of Westminster', 'b19874819': 'Newham', 'b19955546': 'Southwark', 'b19954414': 'Lewisham', 'b19878102': 'Ealing', 'b18246503': 'Southwark', 'b19954141': 'Lambeth', 'b19970596': 'Kingston upon Thames', 'b19877109': 'Waltham Forest', 'b18253477': 'City of London', 'b19880054': 'Richmond upon Thame', 'b18253234': 'City of London', 'b19879854': 'Richmond upon Thame', 'b19884217': 'City of London', 'b18247453': 'City of Westminster', 'b19971424': 'Merton', 'b19976343': 'Kingston upon Thames', 'b19883973': 'Sutton', 'b19883791': 'Sutton', 'b19970778': 'Richmond upon Thame', 'b19791926': 'Barnet', 'b19796602': 'Brent', 'b19793212': 'Bromley', 'b18253581': 'City of London', 'b18250026': 'Hackney', 'b19969636': 'Harrow', 'b18220332': 'Hammersmith and Fulham', 'b19796936': 'Brent', 'b19882774': 'Merton', 'b19879271': 'Richmond upon Thame', 'b19957142': 'Southwark', 'b1804721x': 'Camden', 'b18250877': 'Wandsworth', 'b18237071': 'Lewisham', 'b18248032': 'City of Westminster', 'b19956812': 'Southwark', 'b18252187': 'Camden', 'b19956368': 'City of Westminster', 'b20056898': 'Southwark', 'b18248159': 'City of Westminster', 'b19969028': 'Hounslow', 'b19882518': 'Sutton', 'b18237290': 'Islington', 'b19823629': 'Greenwich', 'b19968218': 'Hounslow', 'b20056096': 'Islington', 'b19792037': 'Barnet', 'b19797515': 'Haringey', 'b18252369': 'Camden', 'b19787261': 'Croydon', 'b19975855': 'Richmond upon Thame', 'b1982399x': 'Kensington and Chelsea', 'b18218957': 'Southwark', 'b19823198': 'Tower Hamlets', 'b19792359': 'Havering', 'b18039133': 'Camden', 'b18238671': 'Camden', 'b18236212': 'Wandsworth', 'b18235748': 'Kensington and Chelsea', 'b18237927': 'Tower Hamlets', 'b19882075': 'Merton', 'b19875198': 'Kingston upon Thames', 'b19787480': 'Barking and Dagenham', 'b1995573x': 'Southwark', 'b18249280': 'Haringey', 'b19822571': 'Hackney', 'b19783395': 'Ealing', 'b19953264': 'Hammersmith and Fulham', 'b18235566': 'Hammersmith and Fulham', 'b19784612': 'Barking and Dagenham', 'b19955042': 'Tower Hamlets', 'b19822716': 'Tower Hamlets', 'b19881150': 'Sutton', 'b19880200': 'Hillingdon', 'b18218775': 'Southwark', 'b18106614': 'Islington', 'b18254032': 'Tower Hamlets', 'b18251511': 'Camden', 'b18254743': 'City of Westminster', 'b1810888x': 'Lambeth', 'b19791112': 'Haringey', 'b18223400': 'Kensington and Chelsea', 'b18246084': 'Tower Hamlets', 'b19794198': 'Havering', 'b19793170': 'Bromley', 'b1978787x': 'Ealing', 'b1979535x': 'Barnet', 'b1978806x': 'Ealing', 'b18252965': 'London County Council', 'b19878734': 'Redbridge', 'b17999157': 'Tower Hamlets', 'b18246266': 'Tower Hamlets', 'b19879465': 'Waltham Forest', 'b18240033': 'Greenwich', 'b19797072': 'Brent', 'b19955959': 'Southwark', 'b19785252': 'Bexley', 'b19970900': 'Richmond upon Thame', 'b18250932': 'Wandsworth', 'b19885350': 'Hackney', 'b19969818': 'Redbridge', 'b19878849': 'Richmond upon Thame', 'b19786050': 'Croydon', 'b19786505': 'Bexley', 'b19789087': 'Enfield', 'b1982452x': 'Harrow', 'b1823933x': 'Islington', 'b1987859x': 'Redbridge', 'b1803973x': 'Tower Hamlets', 'b19789555': 'Enfield', 'b18121962': 'Tower Hamlets', 'b19969156': 'Barnet', 'b18247027': 'Southwark', 'b19789889': 'Bexley', 'b19954608': 'Lewisham', 'b18249516': 'Lambeth', 'b19875976': 'Redbridge', 'b18238063': 'Hammersmith and Fulham', 'b19783620': 'Ealing', 'b19970584': 'Kingston upon Thames', 'b18120878': 'Tower Hamlets', 'b19879647': 'Waltham Forest', 'b20056400': 'Islington', 'b19956605': 'City of Westminster', 'b18245420': 'City of Westminster', 'b18252795': 'London County Council', 'b19970018': 'Kingston upon Thames', 'b19883584': 'Tower Hamlets', 'b19787674': 'Barking and Dagenham', 'b19877900': 'Ealing', 'b19880856': 'Hounslow', 'b19796031': 'Hillingdon', 'b1988400x': 'Sutton', 'b19882981': 'Kingston upon Thames', 'b19975624': 'Camden', 'b18253775': 'City of London', 'b1804458x': 'Wandsworth', 'b18252424': 'London County Council', 'b18253829': 'City of London', 'b19787017': 'Croydon', 'b18251687': 'Camden', 'b18120568': 'Lambeth', 'b19953380': 'Hammersmith and Fulham', 'b19822960': 'Tower Hamlets', 'b18108714': 'Camden', 'b1995265x': 'Tower Hamlets', 'b18246862': 'Southwark', 'b19975995': 'Richmond upon Thame', 'b19785604': 'Kensington and Chelsea', 'b19795105': 'Enfield', 'b18236406': 'Wandsworth', 'b18236157': 'Kensington and Chelsea', 'b18120702': 'City of Westminster', 'b17997008': 'Wandsworth', 'b20106865': 'Lewisham', 'b1988218x': 'Merton', 'b19882233': 'Merton', 'b18250488': 'Wandsworth', 'b19790880': 'Barnet', 'b18248834': 'Islington', 'b18245511': 'City of Westminster', 'b19824063': 'Kensington and Chelsea', 'b19822388': 'Hackney', 'b19881526': 'Newham', 'b19953926': 'Tower Hamlets', 'b18223096': 'Tower Hamlets', 'b18250154': 'Hackney', 'b18245778': 'Tower Hamlets', 'b19874765': 'Newham', 'b19823381': 'Greenwich', 'b19788903': 'Enfield', 'b19788277': 'Ealing', 'b19784934': 'Bromley', 'b19784284': 'Barking and Dagenham', 'b19822030': 'Hackney', 'b19785082': 'Bromley', 'b19876695': 'Waltham Forest', 'b19794502': 'Hillingdon', 'b18252886': 'London County Council', 'b19790478': 'Barnet', 'b19790247': 'Barnet', 'b19885180': 'Hackney', 'b19969648': 'Harrow', 'b18045236': 'Kensington and Chelsea', 'b19792657': 'Waltham Forest', 'b19955388': 'City of Westminster', 'b19794769': 'Enfield', 'b19954384': 'Lewisham', 'b19878692': 'Redbridge', 'b18246692': 'Southwark', 'b18238804': 'Camden', 'b19880698': 'Hounslow', 'b18248354': 'City of Westminster', 'b19784429': 'Barking and Dagenham', 'b19884588': 'City of London', 'b19789737': 'Bexley', 'b18247611': 'City of Westminster', 'b19874832': 'Newham', 'b18254482': 'City of Westminster', 'b19875411': 'Southwark', 'b19883006': 'Kingston upon Thames', 'b19970572': 'Kingston upon Thames', 'b19954165': 'Lambeth', 'b18249723': 'Hackney', 'b18246527': 'Southwark', 'b19878473': 'Redbridge', 'b19877122': 'Merton', 'b19783814': 'Ealing', 'b20106889': 'Lewisham', 'b19880078': 'Richmond upon Thame', 'b18253210': 'City of London', 'b19879878': 'Richmond upon Thame', 'b1995556x': 'Southwark', 'b19792906': 'Waltham Forest', 'b18254263': 'City of Westminster', 'b19971199': 'Newham', 'b19791902': 'Barnet', 'b19883912': 'Sutton', 'b19796626': 'Brent', 'b18118185': 'Hackney', 'b1979695x': 'Brent', 'b19793236': 'Bromley', 'b18239298': 'Islington', 'b18248585': 'Islington', 'b19785938': 'Bromley', 'b18047270': 'City of London', 'b19796481': 'Brent', 'b18122553': 'Hammersmith and Fulham', 'b18219627': 'Southwark', 'b17998074': 'Tower Hamlets', 'b19882750': 'Merton', 'b19879258': 'Richmond upon Thame', 'b19957129': 'Southwark', 'b18237721': 'Greenwich', 'b18248056': 'City of Westminster', 'b18237058': 'Lewisham', 'b19956836': 'Southwark', 'b18252163': 'Camden', 'b19876993': 'Waltham Forest', 'b18246989': 'Southwark', 'b19882531': 'Sutton', 'b19786268': 'Croydon', 'b19969004': 'Hounslow', 'b18248135': 'City of Westminster', 'b19789944': 'Bexley', 'b19823605': 'Greenwich', 'b19792013': 'Barnet', 'b19823976': 'Islington', 'b19968231': 'Hounslow', 'b19797539': 'Haringey', 'b19824658': 'Harrow', 'b19794885': 'Enfield', 'b19787248': 'Croydon', 'b18252308': 'Camden', 'b19975879': 'Ealing', 'b19874595': 'Newham', 'b18245183': 'City of Westminster', 'b19792335': 'Havering', 'b19824385': 'Harrow', 'b18038542': 'Islington', 'b18218970': 'Southwark', 'b18039157': 'Camden', 'b18236236': 'Wandsworth', 'b18236893': 'Tower Hamlets', 'b18238658': 'Camden', 'b17997252': 'Southwark', 'b18235761': 'Kensington and Chelsea', 'b18245985': 'Tower Hamlets', 'b19792487': 'Islington', 'b18237940': 'Hammersmith and Fulham', 'b18045182': 'Lambeth', 'b20056618': 'Hackney', 'b1982273x': 'Tower Hamlets', 'b19875174': 'Kingston upon Thames', 'b18249267': 'Haringey', 'b18236091': 'Kensington and Chelsea', 'b18222778': 'City of Westminster', 'b20106774': 'Lewisham', 'b20056564': 'Hackney', 'b19953240': 'Southwark', 'b19953719': 'Hammersmith and Fulham', 'b19822558': 'Hackney', 'b19784636': 'Barking and Dagenham', 'b18106456': 'Camden', 'b18106985': 'Hammersmith and Fulham', 'b19955716': 'Southwark', 'b19788599': 'Enfield', 'b19791689': 'Barnet', 'b18111695': 'Southwark', 'b19793820': 'Barnet', 'b18111129': 'Lewisham', 'b19788733': 'Enfield', 'b19788046': 'Ealing', 'b18254019': 'Tower Hamlets', 'b19795373': 'Barnet', 'b19787819': 'Ealing', 'b18251535': 'Camden', 'b18246060': 'Tower Hamlets', 'b18223424': 'Kensington and Chelsea', 'b18235542': 'Hammersmith and Fulham', 'b18235815': 'Kensington and Chelsea', 'b18248287': 'City of Westminster', 'b18252941': 'London County Council', 'b18043495': 'Hammersmith and Fulham', 'b19821554': 'Bromley', 'b18246205': 'Tower Hamlets', 'b19969788': 'Redbridge', 'b19880261': 'Hounslow', 'b19976069': 'Kingston upon Thames', 'b19955972': 'Southwark', 'b19968784': 'Waltham Forest', 'b19785276': 'Bexley', 'b19797059': 'Brent', 'b19885374': 'Hackney', 'b18250956': 'Wandsworth', 'b19878862': 'Richmond upon Thame', 'b19970961': 'Richmond upon Thame', 'b19876750': 'Waltham Forest', 'b19790223': 'Barnet', 'b19876981': 'Waltham Forest', 'b19969879': 'Redbridge', 'b19971382': 'Merton', 'b19789713': 'Bexley', 'b18117193': 'Kensington and Chelsea', 'b19954591': 'Lewisham', 'b19880480': 'Hounslow', 'b1987649x': 'Waltham Forest', 'b17998268': 'Hammersmith and Fulham', 'b18247003': 'Southwark', 'b19879404': 'Richmond upon Thame', 'b18240057': 'Greenwich', 'b19884394': 'City of London', 'b18247556': 'City of Westminster', 'b19956447': 'City of Westminster', 'b18247143': 'Southwark', 'b1988087x': 'Hounslow', 'b19970389': 'Ealing', 'b19883614': 'Tower Hamlets', 'b1825133x': 'Camden', 'b19875952': 'Redbridge', 'b19954621': 'Lewisham', 'b18238002': 'Hammersmith and Fulham', 'b19783607': 'Ealing', 'b19884023': 'City of London', 'b19956666': 'City of Westminster', 'b1995699x': 'Southwark', 'b19787650': 'Barking and Dagenham', 'b19956198': 'City of Westminster', 'b19970031': 'Kingston upon Thames', 'b1987831x': 'Redbridge', 'b19796018': 'Hillingdon', 'b18118379': 'Greenwich', 'b19955236': 'City of Westminster', 'b19783541': 'Ealing', 'b19877924': 'Ealing', 'b19787388': 'Barking and Dagenham', 'b19975600': 'Brent', 'b17998487': 'Hackney', 'b19789579': 'Enfield', 'b19786384': 'Bexley', 'b20274919': 'Harrow', 'b19787030': 'Croydon', 'b18252448': 'London County Council', 'b19822947': 'Tower Hamlets', 'b1978692x': 'Croydon', 'b18219160': 'Southwark', 'b18246849': 'Southwark', 'b1988557x': 'Hackney', 'b1982404x': 'Kensington and Chelsea', 'b18121494': 'City of Westminster', 'b18237551': 'Greenwich', 'b20056424': 'Islington', 'b18239717': 'Lambeth', 'b1823639x': 'Wandsworth', 'b18239845': 'Lambeth', 'b1804461x': 'Islington', 'b18120726': 'City of Westminster', 'b18244646': 'Lewisham', 'b18236170': 'Kensington and Chelsea', 'b18111592': 'Port of London', 'b20106841': 'Lewisham', 'b19823617': 'Greenwich', 'b19882257': 'Merton', 'b19969272': 'Newham', 'b18248858': 'Islington', 'b19952636': 'Tower Hamlets', 'b19783565': 'Ealing', 'b18245572': 'City of Westminster', 'b19823010': 'Tower Hamlets', 'b1979292x': 'Waltham Forest', 'b19881502': 'Newham', 'b19876324': 'Waltham Forest', 'b19881058': 'Sutton', 'b19785719': 'Bromley', 'b19953902': 'Tower Hamlets', 'b19885611': 'Hackney', 'b18250178': 'Hackney', 'b1804525x': 'Kensington and Chelsea', 'b18245791': 'Tower Hamlets', 'b1988462x': 'Port of London', 'b19874789': 'Newham', 'b19788927': 'Enfield', 'b19795166': 'Enfield', 'b19822054': 'Hackney', 'b19794563': 'Hillingdon', 'b18250014': 'Hackney', 'b19969661': 'Harrow', 'b19790417': 'Barnet', 'b19792670': 'Waltham Forest', 'b19955364': 'City of Westminster', 'b19875782': 'Camden', 'b18249097': 'Haringey', 'b19794782': 'Enfield', 'b19954360': 'Lewisham', 'b18238828': 'Camden', 'b17999054': 'City of London', 'b19878679': 'Redbridge', 'b18238397': 'Camden', 'b19877493': 'Hounslow', 'b19880674': 'Hounslow', 'b19877328': 'Merton', 'b18247635': 'City of Westminster', 'b19784405': 'Barking and Dagenham', 'b19884564': 'City of London', 'b19875435': 'Southwark', 'b1979132x': 'Hillingdon', 'b18254469': 'City of Westminster', 'b18251948': 'Camden', 'b18251493': 'Camden', 'b19791094': 'Haringey', 'b19783838': 'Ealing', 'b19954104': 'Lambeth', 'b18246540': 'Southwark', 'b17996880': 'Kensington and Chelsea', 'b18251420': 'Camden', 'b19970559': 'Kingston upon Thames', 'b19877146': 'Merton', 'b19880017': 'Richmond upon Thame', 'b19793789': 'Bromley', 'b18247490': 'City of Westminster', 'b18112614': 'Hammersmith and Fulham', 'b18254202': 'City of Westminster', 'b19796171': 'Brent', 'b1824807x': 'City of Westminster', 'b19789385': 'Enfield', 'b18047257': 'Islington', 'b18239274': 'Islington', 'b1995685x': 'Southwark', 'b19968644': 'Hounslow', 'b19882889': 'Merton', 'b18122577': 'Hammersmith and Fulham', 'b19882737': 'Merton', 'b19957105': 'Southwark', 'b19879234': 'Richmond upon Thame', 'b1825309x': 'City of London', 'b1979325x': 'Bromley', 'b20056850': 'Hackney', 'b19823836': 'Greenwich', 'b17994482': 'Camden', 'b19883286': 'Redbridge', 'b19875897': 'Camden', 'b19883936': 'Sutton', 'b19970730': 'Richmond upon Thame', 'b19882087': 'Merton', 'b18253635': 'City of London', 'b19786244': 'Croydon', 'b18248111': 'City of Westminster', 'b19789968': 'Bexley', 'b19824671': 'Harrow', 'b19823952': 'Islington', 'b19821888': 'Enfield', 'b19792074': 'Barnet', 'b19797552': 'Haringey', 'b18252321': 'Camden', 'b19796973': 'Brent', 'b18250786': 'Wandsworth', 'b19790788': 'Barnet', 'b19975818': 'Richmond upon Thame', 'b19792311': 'Havering', 'b18238646': 'Camden', 'b19823484': 'Greenwich', 'b19956277': 'City of Westminster', 'b18249309': 'Haringey', 'b19883420': 'Tower Hamlets', 'b18238634': 'Camden', 'b17997902': 'Camden', 'b17997227': 'Southwark', 'b18245961': 'Tower Hamlets', 'b1978465x': 'Barking and Dagenham', 'b18237964': 'Hammersmith and Fulham', 'b19884813': 'City of London', 'b20056679': 'Hackney', 'b18249243': 'Haringey', 'b19875150': 'Kingston upon Thames', 'b19822534': 'Hackney', 'b19953732': 'Hammersmith and Fulham', 'b19791525': 'Hillingdon', 'b19953227': 'Southwark', 'b19784879': 'Bromley', 'b19823228': 'City of Westminster', 'b19955777': 'Southwark', 'b19795579': 'Barnet', 'b19822285': 'Hackney', 'b19791665': 'Barnet', 'b18111105': 'Lewisham', 'b19793807': 'Bromley', 'b18235700': 'Kensington and Chelsea', 'b18106651': 'Tower Hamlets', 'b18116371': 'Islington', 'b19795312': 'Barnet', 'b19787832': 'Ealing', 'b18251559': 'Camden', 'b18236613': 'Tower Hamlets', 'b18223448': 'Kensington and Chelsea', 'b18246047': 'Tower Hamlets', 'b18235839': 'Kensington and Chelsea', 'b18018610': 'Camden', 'b19878977': 'Richmond upon Thame', 'b1978871x': 'Enfield', 'b18249930': 'Hackney', 'b19955194': 'City of Westminster', 'b18038293': 'Islington', 'b19969259': 'Newham', 'b1825407x': 'Hammersmith and Fulham', 'b19876154': 'Redbridge', 'b19881265': 'Sutton', 'b18246229': 'Tower Hamlets', 'b19954827': 'Tower Hamlets', 'b19821578': 'Bromley', 'b19885945': 'Hackney', 'b18252928': 'London County Council', 'b19877791': 'Hounslow', 'b18238890': 'Camden', 'b19885519': 'Hackney', 'b19880248': 'Hounslow', 'b19971011': 'Waltham Forest', 'b19955996': 'Southwark', 'b19797035': 'Brent', 'b19976045': 'Kingston upon Thames', 'b18248688': 'Islington', 'b19794046': 'Havering', 'b18039777': 'Tower Hamlets', 'b18249437': 'Haringey', 'b19878886': 'Richmond upon Thame', 'b18250208': 'Hackney', 'b19885398': 'Hackney', 'b19970948': 'Richmond upon Thame', 'b18247799': 'City of Westminster', 'b1979373x': 'Bromley', 'b1825469x': 'City of Westminster', 'b17996922': 'Kensington and Chelsea', 'b1978529x': 'Bexley', 'b17996946': 'Wandsworth', 'b19878084': 'Ealing', 'b1979020x': 'Barnet', 'b1825097x': 'Wandsworth', 'b19783991': 'Barking and Dagenham', 'b19879933': 'Richmond upon Thame', 'b18240070': 'Greenwich', 'b19879428': 'Richmond upon Thame', 'b19956460': 'City of Westminster', 'b19954645': 'Lewisham', 'b19878333': 'Redbridge', 'b18238026': 'Hammersmith and Fulham', 'b19875939': 'Redbridge', 'b19883638': 'Tower Hamlets', 'b19783668': 'Ealing', 'b19884047': 'City of Westminster', 'b19879684': 'Waltham Forest', 'b18253404': 'City of London', 'b19789774': 'Bexley', 'b19786013': 'Croydon', 'b19876476': 'Waltham Forest', 'b19787637': 'Barking and Dagenham', 'b18251316': 'Wandsworth', 'b18118392': 'Greenwich', 'b19970055': 'Kingston upon Thames', 'b1979678x': 'Brent', 'b19877948': 'Ealing', 'b19880819': 'Hounslow', 'b19785732': 'Bromley', 'b18116905': 'Hammersmith and Fulham', 'b20274932': 'Harrow', 'b19788587': 'Enfield', 'b19975661': 'Haringey', 'b19789592': 'Enfield', 'b18121925': 'Tower Hamlets', 'b18239481': 'Islington', 'b18237393': 'Islington', 'b18236376': 'Wandsworth', 'b1823611x': 'Kensington and Chelsea', 'b19885635': 'Hackney', 'b19783292': 'Ealing', 'b18122747': 'Islington', 'b18122036': 'Tower Hamlets', 'b17997598': 'Tower Hamlets', 'b18246825': 'Southwark', 'b18239730': 'Lambeth', 'b18237575': 'Greenwich', 'b20056448': 'Islington', 'b19956642': 'City of Westminster', 'b18244798': 'Lewisham', 'b19953690': 'Hammersmith and Fulham', 'b18223321': 'Kensington and Chelsea', 'b18251146': 'Wandsworth', 'b19885556': 'Hackney', 'b17998815': 'Kensington and Chelsea', 'b19882270': 'Merton', 'b19885805': 'Hackney', 'b18248871': 'Islington', 'b18245225': 'City of Westminster', 'b19952612': 'Tower Hamlets', 'b19969211': 'Newham', 'b19786906': 'Croydon', 'b18245559': 'City of Westminster', 'b19823034': 'Tower Hamlets', 'b18252461': 'London County Council', 'b19785847': 'Bromley', 'b19953963': 'Tower Hamlets', 'b19876300': 'Redbridge', 'b19821761': 'Bromley', 'b18250191': 'Hackney', 'b19970304': 'Richmond upon Thame', 'b19788940': 'Enfield', 'b18239869': 'Lambeth', 'b18222638': 'Camden', 'b18223588': 'Kensington and Chelsea', 'b18221981': 'Kensington and Chelsea', 'b19790430': 'Barnet', 'b19792694': 'Waltham Forest', 'b19884606': 'Port of London', 'b19823733': 'Greenwich', 'b19955340': 'City of Westminster', 'b19955698': 'Southwark', 'b19878655': 'Redbridge', 'b1987716x': 'Merton', 'b19880650': 'Hounslow', 'b19877304': 'Merton', 'b19788368': 'Barnet', 'b19884540': 'City of London', 'b19880388': 'Hounslow', 'b19784466': 'Barking and Dagenham', 'b18042132': 'Hammersmith and Fulham', 'b19955522': 'Southwark', 'b19787996': 'Ealing', 'b18254445': 'City of Westminster', 'b18254196': 'City of Westminster', 'b19822078': 'Hackney', 'b19785598': 'Kensington and Chelsea', 'b18251961': 'Camden', 'b19881381': 'Newham', 'b19875459': 'Southwark', 'b19880030': 'Richmond upon Thame', 'b19783851': 'Ealing', 'b19793017': 'Waltham Forest', 'b19971485': 'Merton', 'b18254226': 'City of Westminster', 'b18239134': 'Islington', 'b19791343': 'Hillingdon', 'b18108908': 'Lambeth', 'b19954347': 'Lewisham', 'b1987974x': 'Richmond upon Thame', 'b18250038': 'Hackney', 'b19793273': 'Bromley', 'b19955832': 'Southwark', 'b18239250': 'Islington', 'b19968668': 'Hounslow', 'b1982385x': 'Greenwich', 'b19881927': 'Haringey', 'b19954128': 'Lambeth', 'b17999388': 'City of Westminster', 'b19970535': 'Kingston upon Thames', 'b19878436': 'Redbridge', 'b18246564': 'Southwark', 'b19882713': 'Merton', 'b19882865': 'Merton', 'b18247210': 'City of Westminster', 'b19879210': 'Richmond upon Thame', 'b18248093': 'City of Westminster', 'b19971229': 'Newham', 'b18237769': 'Greenwich', 'b20056874': 'Hackney', 'b19876956': 'Waltham Forest', 'b19956873': 'Southwark', 'b18252126': 'Camden', 'b19970286': 'Richmond upon Thame', 'b18118495': 'Hackney', 'b19786736': 'Croydon', 'b20274890': 'Harrow', 'b19789907': 'Bexley', 'b19786220': 'Croydon', 'b19823939': 'Islington', 'b19968279': 'Hounslow', 'b19824610': 'Harrow', 'b18252692': 'London County Council', 'b18121111': 'Tower Hamlets', 'b19796997': 'Brent', 'b20274762': 'Brent', 'b18236947': 'Tower Hamlets', 'b18237186': 'Lewisham', 'b18218933': 'Southwark', 'b19956253': 'City of Westminster', 'b20056382': 'Islington', 'b18249322': 'Haringey', 'b19875216': 'Kingston upon Thames', 'b19883407': 'Tower Hamlets', 'b19796195': 'Brent', 'b17997926': 'Hackney', 'b18246965': 'Southwark', 'b18237988': 'Hammersmith and Fulham', 'b20056655': 'Hackney', 'b20056126': 'Islington', 'b19787170': 'Croydon', 'b19875137': 'Kingston upon Thames', 'b1987568x': 'Camden', 'b19822510': 'Hackney', 'b19953756': 'Hammersmith and Fulham', 'b19796225': 'Brent', 'b18251109': 'Wandsworth', 'b18244890': 'Lewisham', 'b19791501': 'Hillingdon', 'b19953203': 'Southwark', 'b19975831': 'Richmond upon Thame', 'b19784673': 'Barking and Dagenham', 'b19788551': 'Barnet', 'b19823204': 'Hackney', 'b19788885': 'Enfield', 'b19784855': 'Bromley', 'b19795555': 'Barnet', 'b19795087': 'Enfield', 'b1824922x': 'Haringey', 'b19955753': 'Southwark', 'b19791641': 'Barnet', 'b18111166': 'Southwark', 'b18245948': 'Tower Hamlets', 'b18235724': 'Kensington and Chelsea', 'b19793868': 'Havering', 'b19788770': 'Enfield', 'b19795336': 'Barnet', 'b20056680': 'Hackney', 'b18116358': 'Islington', 'b20110686': 'City of Westminster', 'b18254056': 'Tower Hamlets', 'b1979468x': 'Hillingdon', 'b19876798': 'Waltham Forest', 'b18251572': 'Camden', 'b18236637': 'Tower Hamlets', 'b1988171x': 'Haringey', 'b18223461': 'Kensington and Chelsea', 'b18111385': 'Greenwich', 'b18246023': 'Tower Hamlets', 'b18235852': 'Kensington and Chelsea', 'b18245055': 'Lewisham', 'b18249917': 'Hackney', 'b19881241': 'Sutton', 'b19785100': 'Bexley', 'b19876178': 'Redbridge', 'b18043458': 'Hammersmith and Fulham', 'b19821517': 'Bromley', 'b19954803': 'Tower Hamlets', 'b19790594': 'Barnet', 'b19788083': 'Ealing', 'b1823558x': 'Hammersmith and Fulham', 'b19797011': 'Brent', 'b19976021': 'Kingston upon Thames', 'b19824580': 'Harrow', 'b1799696x': 'Wandsworth', 'b18039753': 'Tower Hamlets', 'b18250221': 'Hackney', 'b18247775': 'City of Westminster', 'b1987991x': 'Richmond upon Thame', 'b17999716': 'City of Westminster', 'b1979406x': 'Havering', 'b17999248': 'Hackney', 'b18247519': 'City of Westminster', 'b19790740': 'Barnet', 'b19954669': 'Lewisham', 'b19883870': 'Sutton', 'b19883651': 'Tower Hamlets', 'b19875915': 'Camden', 'b19878357': 'Redbridge', 'b19796766': 'Brent', 'b1979650x': 'Brent', 'b19783644': 'Ealing', 'b18253428': 'City of London', 'b19884060': 'City of Westminster', 'b19786037': 'Croydon', 'b1995640x': 'City of Westminster', 'b19789026': 'Enfield', 'b1823737x': 'Islington', 'b19787340': 'Barking and Dagenham', 'b19970079': 'Kingston upon Thames', 'b19787613': 'Barking and Dagenham', 'b18251377': 'Camden', 'b19783504': 'Ealing', 'b19877961': 'Ealing', 'b19880832': 'Hounslow', 'b20274956': 'Harrow', 'b19975648': 'Kingston upon Thames', 'b19788423': 'Barnet', 'b18236352': 'Wandsworth', 'b19879003': 'Richmond upon Thame', 'b18246801': 'Southwark', 'b19969181': 'Newham', 'b19786694': 'Croydon', 'b19881666': 'Haringey', 'b18237514': 'Greenwich', 'b17997719': 'Tower Hamlets', 'b19882294': 'Merton', 'b19969235': 'Newham', 'b19786967': 'Croydon', 'b18253131': 'City of London', 'b19823058': 'Tower Hamlets', 'b19874431': 'Hammersmith and Fulham', 'b19824002': 'Kensington and Chelsea', 'b18038244': 'Tower Hamlets', 'b18248329': 'City of Westminster', 'b18252485': 'London County Council', 'b18044657': 'Hammersmith and Fulham', 'b1823544x': 'Hammersmith and Fulham', 'b19821748': 'Bromley', 'b18048377': 'Tower Hamlets', 'b19792505': 'Lewisham', 'b19784995': 'Bromley', 'b19884990': 'Camden', 'b1995394x': 'Tower Hamlets', 'b18239808': 'Lambeth', 'b18236133': 'Kensington and Chelsea', 'b18238403': 'Camden', 'b18045297': 'Islington', 'b19952892': 'Southwark', 'b19884667': 'City of London', 'b18253994': 'Tower Hamlets', 'b19784193': 'Barking and Dagenham', 'b19955327': 'City of Westminster', 'b19822893': 'Tower Hamlets', 'b19876361': 'Waltham Forest', 'b19881010': 'Sutton', 'b19785823': 'Bromley', 'b19880637': 'Hounslow', 'b19877365': 'Merton', 'b19784338': 'Barking and Dagenham', 'b18042119': 'Islington', 'b19884527': 'City of London', 'b19788344': 'Barnet', 'b18245249': 'City of Westminster', 'b19784442': 'Barking and Dagenham', 'b19787972': 'Ealing', 'b18254421': 'City of Westminster', 'b18251900': 'Camden', 'b19875472': 'Southwark', 'b19822091': 'Hackney', 'b18111440': 'Kensington and Chelsea', 'b19794216': 'Havering', 'b19783875': 'Ealing', 'b19793078': 'Waltham Forest', 'b19884291': 'Port of London', 'b19953355': 'Hammersmith and Fulham', 'b18239110': 'Islington', 'b18108921': 'Islington', 'b19791367': 'Hillingdon', 'b19878989': 'Richmond upon Thame', 'b17999017': 'City of London', 'b17996776': 'Southwark', 'b19878631': 'Redbridge', 'b19954323': 'Lewisham', 'b19793297': 'Bromley', 'b18247180': 'City of Westminster', 'b19879581': 'Waltham Forest', 'b1987697x': 'Waltham Forest', 'b18248524': 'Islington', 'b19955819': 'Southwark', 'b18239237': 'Islington', 'b19874984': 'Kingston upon Thames', 'b19968607': 'Hounslow', 'b19881484': 'Newham', 'b19785318': 'Bexley', 'b19881903': 'Haringey', 'b19969958': 'Kingston upon Thames', 'b19970511': 'Kingston upon Thames', 'b19878187': 'Redbridge', 'b18246588': 'Southwark', 'b19954499': 'Lewisham', 'b19885210': 'Hackney', 'b19793649': 'Bromley', 'b18247234': 'City of Westminster', 'b19882841': 'Merton', 'b19971205': 'Newham', 'b18237745': 'Greenwich', 'b18239584': 'Lambeth', 'b19823873': 'Waltham Forest', 'b20056813': 'Hackney', 'b18252102': 'Camden', 'b18221646': 'Greenwich', 'b19790016': 'Bexley', 'b18253672': 'City of London', 'b19786207': 'Croydon', 'b19789920': 'Bexley', 'b19789257': 'Enfield', 'b19824634': 'Harrow', 'b19823915': 'Islington', 'b1995623x': 'City of Westminster', 'b18249693': 'Hackney', 'b1799794x': 'Hackney', 'b1978675x': 'Croydon', 'b1979759x': 'Hillingdon', 'b18236832': 'Tower Hamlets', 'b19875277': 'Kingston upon Thames', 'b1823818x': 'Hammersmith and Fulham', 'b19880996': 'Sutton', 'b19883468': 'Tower Hamlets', 'b19882592': 'Sutton', 'b20056102': 'Islington', 'b19884850': 'City of London', 'b19875113': 'Kingston upon Thames', 'b18249206': 'Haringey', 'b19787406': 'Barking and Dagenham', 'b19787157': 'Croydon', 'b18251122': 'Wandsworth', 'b19783310': 'Ealing', 'b19791562': 'Hillingdon', 'b19796249': 'Brent', 'b18219020': 'Southwark', 'b17996417': 'Camden', 'b18110812': 'Hammersmith and Fulham', 'b19784831': 'Bromley', 'b18106432': 'Camden', 'b19795531': 'Barnet', 'b19791628': 'Hillingdon', 'b19822790': 'Tower Hamlets', 'b17997367': 'Southwark', 'b19885465': 'Hackney', 'b18111142': 'Tower Hamlets', 'b19885453': 'Hackney', 'b1995377x': 'Hammersmith and Fulham', 'b19882336': 'Merton', 'b18245699': 'City of Westminster', 'b18245924': 'Tower Hamlets', 'b1997145x': 'Merton', 'b18222869': 'Tower Hamlets', 'b19791197': 'Havering', 'b18236650': 'Tower Hamlets', 'b18235876': 'Kensington and Chelsea', 'b19874601': 'Newham', 'b18245079': 'Lewisham', 'b19788575': 'Barnet', 'b18043471': 'Hackney', 'b19822133': 'Hackney', 'b19881733': 'Haringey', 'b19785124': 'Bexley', 'b18249978': 'Hackney', 'b18045339': 'Islington', 'b19876117': 'Redbridge', 'b19794666': 'Hillingdon', 'b19954864': 'Tower Hamlets', 'b19821530': 'Bromley', 'b19877870': 'Ealing', 'b19881228': 'Sutton', 'b19885027': 'Islington', 'b19790570': 'Barnet', 'b19969727': 'Hounslow', 'b1824600x': 'Tower Hamlets', 'b18106699': 'Hackney', 'b19788757': 'Enfield', 'b19792712': 'Waltham Forest', 'b1799973x': 'Haringey', 'b19823812': 'Greenwich', 'b1987764x': 'Hounslow', 'b18238968': 'Camden', 'b18250245': 'Hackney', 'b19969892': 'Redbridge', 'b19794009': 'Havering', 'b18247751': 'City of Westminster', 'b17999224': 'Hackney', 'b19880170': 'Hillingdon', 'b17996909': 'Kensington and Chelsea', 'b19794599': 'Hillingdon', 'b18247532': 'City of Westminster', 'b19879970': 'Richmond upon Thame', 'b19976008': 'Brent', 'b19971059': 'Waltham Forest', 'b19784156': 'Barking and Dagenham', 'b18249590': 'Lambeth', 'b19883675': 'Tower Hamlets', 'b19883857': 'Sutton', 'b19796742': 'Brent', 'b19878370': 'Redbridge', 'b19970894': 'Richmond upon Thame', 'b18042211': 'Kensington and Chelsea', 'b19884084': 'City of London', 'b18253441': 'City of London', 'b19789002': 'Enfield', 'b19797291': 'Haringey', 'b1823768x': 'Greenwich', 'b19970092': 'Kingston upon Thames', 'b18251353': 'Camden', 'b19796523': 'Brent', 'b19879398': 'Richmond upon Thame', 'b17950594': 'Greenwich', 'b19957087': 'Southwark', 'b18237356': 'Islington', 'b19874959': 'Kingston upon Thames', 'b19956423': 'City of Westminster', 'b19956939': 'Southwark', 'b17997884': 'Camden', 'b18122073': 'Southwark', 'b18219494': 'Southwark', 'b19879027': 'Richmond upon Thame', 'b19878515': 'Redbridge', 'b18239778': 'Lambeth', 'b19797436': 'Haringey', 'b18252710': 'London County Council', 'b19787364': 'Barking and Dagenham', 'b19884412': 'City of London', 'b19885593': 'Hackney', 'b19885842': 'Hackney', 'b19790806': 'Barnet', 'b19786943': 'Croydon', 'b19786414': 'Bexley', 'b18245262': 'City of Westminster', 'b19823071': 'Tower Hamlets', 'b18038268': 'City of London', 'b18238798': 'Camden', 'b18236339': 'Wandsworth', 'b18106973': 'Hammersmith and Fulham', 'b19783474': 'Ealing', 'b1995668x': 'City of Westminster', 'b18048353': 'Camden', 'b18045108': 'Lewisham', 'b18235682': 'Kensington and Chelsea', 'b19792529': 'Lewisham', 'b18121524': 'Greenwich', 'b18237824': 'Greenwich', 'b19793467': 'Bromley', 'b18236480': 'Wandsworth', 'b18239821': 'Lambeth', 'b18238427': 'Camden', 'b1978580x': 'Bromley', 'b18235463': 'Hammersmith and Fulham', 'b20274907': 'Harrow', 'b18245481': 'City of Westminster', 'b18248962': 'Haringey', 'b19884643': 'City of London', 'b19955303': 'City of Westminster', 'b19955650': 'Southwark', 'b19822303': 'Hackney', 'b19881034': 'Sutton', 'b18111282': 'Greenwich', 'b19953434': 'Hammersmith and Fulham', 'b19877432': 'Merton', 'b19880340': 'Hounslow', 'b19877341': 'Merton', 'b19880613': 'Hounslow', 'b19795257': 'Camden', 'b19784314': 'Barking and Dagenham', 'b19788320': 'Barnet', 'b19884503': 'City of London', 'b19795270': 'Barnet', 'b18251432': 'Camden', 'b19875496': 'Southwark', 'b18251924': 'Camden', 'b18254159': 'City of Westminster', 'b18254408': 'City of Westminster', 'b19787959': 'Ealing', 'b1801835x': 'Tower Hamlets', 'b1995430x': 'Lambeth', 'b19793054': 'Waltham Forest', 'b19793728': 'Bromley', 'b19791380': 'Hillingdon', 'b18220137': 'Lewisham', 'b18239171': 'Islington', 'b18108945': 'Islington', 'b19881691': 'Haringey', 'b19878965': 'Richmond upon Thame', 'b18246618': 'Southwark', 'b19878618': 'Redbridge', 'b1978949x': 'Enfield', 'b19879568': 'Waltham Forest', 'b18239213': 'Islington', 'b19789336': 'Enfield', 'b18248500': 'Islington', 'b1979289x': 'Waltham Forest', 'b18220393': 'Wandsworth', 'b19881964': 'Haringey', 'b19785379': 'Bexley', 'b18251626': 'Camden', 'b19885271': 'Hackney', 'b18250385': 'Wandsworth', 'b1979003x': 'Bexley', 'b19790387': 'Barnet', 'b19969971': 'Kingston upon Thames', 'b18253386': 'City of London', 'b19879702': 'Waltham Forest', 'b19882014': 'Haringey', 'b18247258': 'City of Westminster', 'b19971266': 'Newham', 'b19795385': 'Barnet', 'b19797606': 'Hillingdon', 'b19823897': 'Islington', 'b19876919': 'Waltham Forest', 'b18238221': 'Hammersmith and Fulham', 'b19883997': 'Sutton', 'b19789270': 'Enfield', 'b19786773': 'Croydon', 'b1988333x': 'Tower Hamlets', 'b1824936x': 'Haringey', 'b19883092': 'Kingston upon Thames', 'b18039224': 'Tower Hamlets', 'b19794800': 'Enfield', 'b19883195': 'Kingston upon Thames', 'b18238166': 'Hammersmith and Fulham', 'b1825312x': 'City of London', 'b19882828': 'Merton', 'b19956216': 'City of Westminster', 'b19956745': 'City of Westminster', 'b18236819': 'Tower Hamlets', 'b19883444': 'Tower Hamlets', 'b19875253': 'Kingston upon Thames', 'b19880972': 'Sutton', 'b18253659': 'City of London', 'b19884874': 'City of London', 'b19823794': 'Greenwich', 'b20056692': 'Hackney', 'b20056163': 'Islington', 'b1979504x': 'Enfield', 'b18252527': 'London County Council', 'b19791549': 'Hillingdon', 'b18120738': 'Wandsworth', 'b19787133': 'Croydon', 'b19791811': 'Barnet', 'b19796262': 'Brent', 'b19783334': 'Ealing', 'b18219007': 'Southwark', 'b18110836': 'Islington', 'b19795518': 'Barnet', 'b18044335': 'Hackney', 'b19791604': 'Hillingdon', 'b1978742x': 'Barking and Dagenham', 'b19885477': 'Hackney', 'b19882312': 'Merton', 'b18245900': 'Tower Hamlets', 'b18245675': 'City of Westminster', 'b19884266': 'Port of London', 'b1982211x': 'Hackney', 'b18236674': 'Tower Hamlets', 'b19876282': 'Redbridge', 'b1824628x': 'Tower Hamlets', 'b18250671': 'Wandsworth', 'b19790636': 'Barnet', 'b19823538': 'Greenwich', 'b19784818': 'Bromley', 'b19874662': 'Newham', 'b19788514': 'Barnet', 'b19955315': 'City of Westminster', 'b18245018': 'Lewisham', 'b19954980': 'Tower Hamlets', 'b19785148': 'Bexley', 'b19881204': 'Sutton', 'b19881757': 'Haringey', 'b19876130': 'Redbridge', 'b18039820': 'Hackney', 'b19954840': 'Tower Hamlets', 'b19794642': 'Hillingdon', 'b19794393': 'Hillingdon', 'b18045315': 'Islington', 'b19969703': 'Hounslow', 'b19885040': 'Tower Hamlets', 'b19790557': 'Barnet', 'b1823589x': 'Kensington and Chelsea', 'b1982421x': 'Kensington and Chelsea', 'b18039790': 'Hackney', 'b19794022': 'Havering', 'b18238944': 'Camden', 'b19877080': 'Waltham Forest', 'b18247738': 'City of Westminster', 'b1988431x': 'Port of London', 'b19784764': 'Barking and Dagenham', 'b19875538': 'Camden', 'b19875861': 'Camden', 'b19955467': 'Southwark', 'b18249954': 'Hackney', 'b19954517': 'Lewisham', 'b18246424': 'Southwark', 'b19783930': 'Barnet', 'b19880406': 'Hounslow', 'b19880157': 'Hillingdon', 'b19877663': 'Hounslow', 'b1824788x': 'City of Westminster', 'b19879957': 'Richmond upon Thame', 'b20274804': 'Harrow', 'b18247088': 'Southwark', 'b19971072': 'Waltham Forest', 'b18254366': 'City of Westminster', 'b19880145': 'Hillingdon', 'b19883833': 'Sutton', 'b19796729': 'Brent', 'b19796894': 'Brent', 'b18250841': 'Wandsworth', 'b19793558': 'Bromley', 'b19783681': 'Ealing', 'b18253465': 'City of London', 'b19789063': 'Enfield', 'b19876464': 'Waltham Forest', 'b19796092': 'Brent', 'b17998402': 'Greenwich', 'b17998153': 'Camden', 'b18248779': 'Islington', 'b18237332': 'Islington', 'b19956915': 'Southwark', 'b19882129': 'Merton', 'b1997579x': 'Richmond upon Thame', 'b19879040': 'Richmond upon Thame', 'b19969144': 'Barnet', 'b1979082x': 'Barnet', 'b19968553': 'Hounslow', 'b18239791': 'Lambeth', 'b19792177': 'Havering', 'b19797412': 'Haringey', 'b18252734': 'London County Council', 'b19787303': 'Croydon', 'b18250464': 'Wandsworth', 'b19885866': 'Hackney', 'b18248366': 'City of Westminster', 'b19786438': 'Bexley', 'b19874479': 'Harrow', 'b18038207': 'City of Westminster', 'b18236315': 'Wandsworth', 'b19956009': 'Southwark', 'b19792542': 'Waltham Forest', 'b18121500': 'City of London', 'b18222651': 'City of Westminster', 'b19823393': 'Greenwich', 'b1982287x': 'Tower Hamlets', 'b18238440': 'Camden', 'b19953185': 'Southwark', 'b18235402': 'Hammersmith and Fulham', 'b18248949': 'Camden', 'b19952855': 'Tower Hamlets', 'b18106572': 'Lewisham', 'b18038670': 'Kensington and Chelsea', 'b18249012': 'Haringey', 'b19955674': 'Southwark', 'b19875708': 'Camden', 'b19822327': 'Hackney', 'b19953987': 'Tower Hamlets', 'b18111269': 'Greenwich', 'b18045121': 'Lewisham', 'b19953458': 'Hammersmith and Fulham', 'b19788307': 'Ealing', 'b19788654': 'Enfield', 'b19784375': 'Barking and Dagenham', 'b19787935': 'Ealing', 'b19955583': 'Southwark', 'b18254172': 'City of Westminster', 'b18251419': 'Camden', 'b18223527': 'Kensington and Chelsea', 'b19794253': 'Havering', 'b18218751': 'Southwark', 'b1978448x': 'Barking and Dagenham', 'b19793704': 'Bromley', 'b18239158': 'Islington', 'b18254287': 'City of Westminster', 'b1979101x': 'Ealing', 'b19821475': 'Bromley', 'b19877419': 'Merton', 'b19885787': 'Hackney', 'b18246631': 'Southwark', 'b19880364': 'Hounslow', 'b18248561': 'Islington', 'b19955856': 'Southwark', 'b19881411': 'Newham', 'b19881940': 'Haringey', 'b19785355': 'Bexley', 'b19885258': 'Hackney', 'b19954189': 'Lambeth', 'b19878497': 'Redbridge', 'b19969910': 'Redbridge', 'b19880091': 'Richmond upon Thame', 'b19789476': 'Enfield', 'b19976215': 'Kingston upon Thames', 'b19876932': 'Waltham Forest', 'b18221609': 'Greenwich', 'b18238208': 'Hammersmith and Fulham', 'b19790053': 'Bexley', 'b19879544': 'Waltham Forest'} | apache-2.0 |
rysson/filmkodi | plugin.video.fanfilm/resources/lib/resolvers/okru.py | 2 | 2736 | # -*- coding: utf-8 -*-
'''
FanFilm Add-on
Copyright (C) 2015 lambda
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urllib,urlparse,base64, json
from resources.lib.libraries import client
from resources.lib.libraries import control
OPERA_USER_AGENT = 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.111 Safari/537.36 OPR/34.0.2036.50'
header = {"User-Agent": OPERA_USER_AGENT}
qual_map = {'full': '1080', 'hd': '720', 'sd': '480', 'low': '360', 'lowest': '240', 'mobile': '144'}
def __replaceQuality(qual):
return qual_map.get(qual.lower(), '000')
def __get_Metadata(media_id):
url = "http://www.ok.ru/dk?cmd=videoPlayerMetadata&mid=" + media_id
html = client.request(url, headers=header)
json_data = json.loads(html)
info = dict()
info['urls'] = []
for entry in json_data['videos']:
info['urls'].append(entry)
return info
def resolve(url):
try:
purged_jsonvars = {}
lines = []
best = '0'
#referer = urlparse.parse_qs(urlparse.urlparse(url).query)['referer'][0]
#(?://|\.)(ok.ru|odnoklassniki.ru)/(?:videoembed|video)/(.+)
media_id = re.compile('(?://|\.)(ok.ru|odnoklassniki.ru)/(?:videoembed|video)/(.+)').findall(url)[0][1]
vids = __get_Metadata(media_id)
#control.log('saaa %s ' % vids)
for entry in vids['urls']:
quality = __replaceQuality(entry['name'])
lines.append(quality)
purged_jsonvars[quality] = entry['url'] + '|' + urllib.urlencode(header)
if int(quality) > int(best): best = quality
if len(lines) == 1:
return purged_jsonvars[lines[0]].encode('utf-8')
else:
return purged_jsonvars[str(best)].encode('utf-8')
if result != -1:
return purged_jsonvars[lines[result]].encode('utf-8')
else:
raise ResolverError('No link selected')
raise ResolverError('No video found')
# swf = re.compile('src\s*=[\'|\"](.+?player.+?\.js)[\'|\"]').findall(result)[0]
except:
return
| apache-2.0 |
HybridF5/jacket | jacket/tests/storage/unit/test_api_urlmap.py | 1 | 12850 | # Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests for storage.api.storage.urlmap.py
"""
from jacket.api.storage.storage import urlmap
from jacket.storage import test
class TestParseFunctions(test.TestCase):
def test_unquote_header_value_without_quotes(self):
arg = 'TestString'
result = urlmap.unquote_header_value(arg)
self.assertEqual(arg, result)
def test_unquote_header_value_with_quotes(self):
result = urlmap.unquote_header_value('"TestString"')
self.assertEqual('TestString', result)
def test_parse_list_header(self):
arg = 'token, "quoted value"'
result = urlmap.parse_list_header(arg)
self.assertEqual(['token', 'quoted value'], result)
def test_parse_options_header(self):
result = urlmap.parse_options_header('Content-Type: text/html;'
' mimetype=text/html')
self.assertEqual(('Content-Type:', {'mimetype': 'text/html'}), result)
def test_parse_options_header_without_value(self):
result = urlmap.parse_options_header(None)
self.assertEqual(('', {}), result)
class TestAccept(test.TestCase):
def test_best_match_ValueError(self):
arg = 'text/html; q=some_invalud_value'
accept = urlmap.Accept(arg)
self.assertEqual((None, {}), accept.best_match(['text/html']))
def test_best_match(self):
arg = '*/*; q=0.7, application/json; q=0.7, text/html; q=-0.8'
accept = urlmap.Accept(arg)
self.assertEqual(('application/json', {'q': '0.7'}),
accept.best_match(['application/json',
'application/xml', 'text/html']))
def test_match_mask_one_asterisk(self):
arg = 'text/*; q=0.7'
accept = urlmap.Accept(arg)
self.assertEqual(('text/html', {'q': '0.7'}),
accept.best_match(['text/html']))
def test_match_mask_two_asterisk(self):
arg = '*/*; q=0.7'
accept = urlmap.Accept(arg)
self.assertEqual(('text/html', {'q': '0.7'}),
accept.best_match(['text/html']))
def test_match_mask_no_asterisk(self):
arg = 'application/json; q=0.7'
accept = urlmap.Accept(arg)
self.assertEqual((None, {}), accept.best_match(['text/html']))
def test_content_type_params(self):
arg = "application/xml; q=0.1, application/json; q=0.2," \
" text/html; q=0.3"
accept = urlmap.Accept(arg)
self.assertEqual({'q': '0.2'},
accept.content_type_params('application/json'))
def test_content_type_params_wrong_content_type(self):
arg = 'application/xml; q=0.1, text/html; q=0.1'
accept = urlmap.Accept(arg)
self.assertEqual({}, accept.content_type_params('application/json'))
class TestUrlMapFactory(test.TestCase):
def setUp(self):
super(TestUrlMapFactory, self).setUp()
self.global_conf = {'not_found_app': 'app_global',
'domain hoobar.com port 10 /': 'some_app_global'}
self.loader = self.mox.CreateMockAnything()
def test_not_found_app_in_local_conf(self):
local_conf = {'not_found_app': 'app_local',
'domain foobar.com port 20 /': 'some_app_local'}
self.loader.get_app('app_local', global_conf=self.global_conf).\
AndReturn('app_local_loader')
self.loader.get_app('some_app_local', global_conf=self.global_conf).\
AndReturn('some_app_loader')
self.mox.ReplayAll()
expected_urlmap = urlmap.URLMap(not_found_app='app_local_loader')
expected_urlmap['http://foobar.com:20'] = 'some_app_loader'
self.assertEqual(expected_urlmap,
urlmap.urlmap_factory(self.loader, self.global_conf,
**local_conf))
def test_not_found_app_not_in_local_conf(self):
local_conf = {'domain foobar.com port 20 /': 'some_app_local'}
self.loader.get_app('app_global', global_conf=self.global_conf).\
AndReturn('app_global_loader')
self.loader.get_app('some_app_local', global_conf=self.global_conf).\
AndReturn('some_app_returned_by_loader')
self.mox.ReplayAll()
expected_urlmap = urlmap.URLMap(not_found_app='app_global_loader')
expected_urlmap['http://foobar.com:20'] = 'some_app_returned'\
'_by_loader'
self.assertEqual(expected_urlmap,
urlmap.urlmap_factory(self.loader, self.global_conf,
**local_conf))
def test_not_found_app_is_none(self):
local_conf = {'not_found_app': None,
'domain foobar.com port 20 /': 'some_app_local'}
self.loader.get_app('some_app_local', global_conf=self.global_conf).\
AndReturn('some_app_returned_by_loader')
self.mox.ReplayAll()
expected_urlmap = urlmap.URLMap(not_found_app=None)
expected_urlmap['http://foobar.com:20'] = 'some_app_returned'\
'_by_loader'
self.assertEqual(expected_urlmap,
urlmap.urlmap_factory(self.loader, self.global_conf,
**local_conf))
class TestURLMap(test.TestCase):
def setUp(self):
super(TestURLMap, self).setUp()
self.urlmap = urlmap.URLMap()
self.input_environ = {'HTTP_ACCEPT': "application/json;"
"version=9.0", 'REQUEST_METHOD': "GET",
'CONTENT_TYPE': 'application/xml',
'SCRIPT_NAME': '/scriptname',
'PATH_INFO': "/resource.xml"}
self.environ = {'HTTP_ACCEPT': "application/json;"
"version=9.0", 'REQUEST_METHOD': "GET",
'CONTENT_TYPE': 'application/xml',
'SCRIPT_NAME': '/scriptname/app_url',
'PATH_INFO': "/resource.xml"}
def test_match_with_applications(self):
self.urlmap[('http://10.20.30.40:50', '/path/somepath')] = 'app'
self.assertEqual((None, None),
self.urlmap._match('20.30.40.50', '20',
'path/somepath'))
def test_match_without_applications(self):
self.assertEqual((None, None),
self.urlmap._match('host', 20, 'app_url/somepath'))
def test_match_path_info_equals_app_url(self):
self.urlmap[('http://20.30.40.50:60', '/app_url/somepath')] = 'app'
self.assertEqual(('app', '/app_url/somepath'),
self.urlmap._match('http://20.30.40.50', '60',
'/app_url/somepath'))
def test_match_path_info_equals_app_url_many_app(self):
self.urlmap[('http://20.30.40.50:60', '/path')] = 'app1'
self.urlmap[('http://20.30.40.50:60', '/path/somepath')] = 'app2'
self.urlmap[('http://20.30.40.50:60', '/path/somepath/elsepath')] = \
'app3'
self.assertEqual(('app3', '/path/somepath/elsepath'),
self.urlmap._match('http://20.30.40.50', '60',
'/path/somepath/elsepath'))
def test_set_script_name(self):
app = self.mox.CreateMockAnything()
start_response = self.mox.CreateMockAnything()
app.__call__(self.environ, start_response).AndReturn('value')
self.mox.ReplayAll()
wrap = self.urlmap._set_script_name(app, '/app_url')
self.assertEqual('value', wrap(self.input_environ, start_response))
def test_munge_path(self):
app = self.mox.CreateMockAnything()
start_response = self.mox.CreateMockAnything()
app.__call__(self.environ, start_response).AndReturn('value')
self.mox.ReplayAll()
wrap = self.urlmap._munge_path(app, '/app_url/resource.xml',
'/app_url')
self.assertEqual('value', wrap(self.input_environ, start_response))
def test_content_type_strategy_without_version(self):
self.assertIsNone(self.urlmap._content_type_strategy('host', 20,
self.environ))
def test_content_type_strategy_with_version(self):
environ = {'HTTP_ACCEPT': "application/vnd.openstack.melange+xml;"
"version=9.0", 'REQUEST_METHOD': "GET",
'PATH_INFO': "/resource.xml",
'CONTENT_TYPE': 'application/xml; version=2.0'}
self.urlmap[('http://10.20.30.40:50', '/v2.0')] = 'app'
self.mox.StubOutWithMock(self.urlmap, '_set_script_name')
self.urlmap._set_script_name('app', '/v2.0').AndReturn('value')
self.mox.ReplayAll()
self.assertEqual('value',
self.urlmap._content_type_strategy(
'http://10.20.30.40', '50', environ))
def test_path_strategy_wrong_path_info(self):
self.assertEqual((None, None, None),
self.urlmap._path_strategy('http://10.20.30.40', '50',
'/resource'))
def test_path_strategy_mime_type_only(self):
self.assertEqual(('application/xml', None, None),
self.urlmap._path_strategy('http://10.20.30.40', '50',
'/resource.xml'))
def test_path_strategy(self):
self.urlmap[('http://10.20.30.40:50', '/path/elsepath/')] = 'app'
self.mox.StubOutWithMock(self.urlmap, '_munge_path')
self.urlmap._munge_path('app', '/path/elsepath/resource.xml',
'/path/elsepath').AndReturn('value')
self.mox.ReplayAll()
self.assertEqual(
('application/xml', 'value', '/path/elsepath'),
self.urlmap._path_strategy('http://10.20.30.40', '50',
'/path/elsepath/resource.xml'))
def test_path_strategy_wrong_mime_type(self):
self.urlmap[('http://10.20.30.40:50', '/path/elsepath/')] = 'app'
self.mox.StubOutWithMock(self.urlmap, '_munge_path')
self.urlmap._munge_path('app', '/path/elsepath/resource.abc',
'/path/elsepath').AndReturn('value')
self.mox.ReplayAll()
self.assertEqual(
(None, 'value', '/path/elsepath'),
self.urlmap._path_strategy('http://10.20.30.40', '50',
'/path/elsepath/resource.abc'))
def test_accept_strategy_version_not_in_params(self):
environ = {'HTTP_ACCEPT': "application/xml; q=0.1, application/json; "
"q=0.2", 'REQUEST_METHOD': "GET",
'PATH_INFO': "/resource.xml",
'CONTENT_TYPE': 'application/xml; version=2.0'}
self.assertEqual(('application/xml', None),
self.urlmap._accept_strategy('http://10.20.30.40',
'50',
environ,
['application/xml']))
def test_accept_strategy_version(self):
environ = {'HTTP_ACCEPT': "application/xml; q=0.1; version=1.0,"
"application/json; q=0.2; version=2.0",
'REQUEST_METHOD': "GET", 'PATH_INFO': "/resource.xml",
'CONTENT_TYPE': 'application/xml; version=2.0'}
self.urlmap[('http://10.20.30.40:50', '/v1.0')] = 'app'
self.mox.StubOutWithMock(self.urlmap, '_set_script_name')
self.urlmap._set_script_name('app', '/v1.0').AndReturn('value')
self.mox.ReplayAll()
self.assertEqual(('application/xml', 'value'),
self.urlmap._accept_strategy('http://10.20.30.40',
'50',
environ,
['application/xml']))
| apache-2.0 |
wwj718/murp-edx | lms/djangoapps/dashboard/management/commands/tests/test_git_add_course.py | 5 | 8686 | """
Provide tests for git_add_course management command.
"""
import logging
import os
import shutil
import StringIO
import subprocess
import unittest
from django.conf import settings
from django.core.management import call_command
from django.core.management.base import CommandError
from django.test.utils import override_settings
from courseware.tests.tests import TEST_DATA_MONGO_MODULESTORE
from xmodule.contentstore.django import contentstore
from xmodule.modulestore.django import modulestore
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from xmodule.modulestore.store_utilities import delete_course
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
import dashboard.git_import as git_import
from dashboard.git_import import GitImportError
TEST_MONGODB_LOG = {
'host': 'localhost',
'user': '',
'password': '',
'db': 'test_xlog',
}
FEATURES_WITH_SSL_AUTH = settings.FEATURES.copy()
FEATURES_WITH_SSL_AUTH['AUTH_USE_CERTIFICATES'] = True
@override_settings(MODULESTORE=TEST_DATA_MONGO_MODULESTORE)
@override_settings(MONGODB_LOG=TEST_MONGODB_LOG)
@unittest.skipUnless(settings.FEATURES.get('ENABLE_SYSADMIN_DASHBOARD'),
"ENABLE_SYSADMIN_DASHBOARD not set")
class TestGitAddCourse(ModuleStoreTestCase):
"""
Tests the git_add_course management command for proper functions.
"""
TEST_REPO = 'https://github.com/mitocw/edx4edx_lite.git'
TEST_COURSE = 'MITx/edx4edx/edx4edx'
TEST_BRANCH = 'testing_do_not_delete'
TEST_BRANCH_COURSE = SlashSeparatedCourseKey('MITx', 'edx4edx_branch', 'edx4edx')
GIT_REPO_DIR = getattr(settings, 'GIT_REPO_DIR')
def assertCommandFailureRegexp(self, regex, *args):
"""
Convenience function for testing command failures
"""
with self.assertRaises(SystemExit):
with self.assertRaisesRegexp(CommandError, regex):
call_command('git_add_course', *args,
stderr=StringIO.StringIO())
def test_command_args(self):
"""
Validate argument checking
"""
self.assertCommandFailureRegexp(
'This script requires at least one argument, the git URL')
self.assertCommandFailureRegexp(
'Expected no more than three arguments; recieved 4',
'blah', 'blah', 'blah', 'blah')
self.assertCommandFailureRegexp(
'Repo was not added, check log output for details',
'blah')
# Test successful import from command
if not os.path.isdir(self.GIT_REPO_DIR):
os.mkdir(self.GIT_REPO_DIR)
self.addCleanup(shutil.rmtree, self.GIT_REPO_DIR)
# Make a course dir that will be replaced with a symlink
# while we are at it.
if not os.path.isdir(self.GIT_REPO_DIR / 'edx4edx'):
os.mkdir(self.GIT_REPO_DIR / 'edx4edx')
call_command('git_add_course', self.TEST_REPO,
self.GIT_REPO_DIR / 'edx4edx_lite')
# Test with all three args (branch)
call_command('git_add_course', self.TEST_REPO,
self.GIT_REPO_DIR / 'edx4edx_lite',
self.TEST_BRANCH)
def test_add_repo(self):
"""
Various exit path tests for test_add_repo
"""
with self.assertRaisesRegexp(GitImportError, GitImportError.NO_DIR):
git_import.add_repo(self.TEST_REPO, None, None)
os.mkdir(self.GIT_REPO_DIR)
self.addCleanup(shutil.rmtree, self.GIT_REPO_DIR)
with self.assertRaisesRegexp(GitImportError, GitImportError.URL_BAD):
git_import.add_repo('foo', None, None)
with self.assertRaisesRegexp(GitImportError, GitImportError.CANNOT_PULL):
git_import.add_repo('file:///foobar.git', None, None)
# Test git repo that exists, but is "broken"
bare_repo = os.path.abspath('{0}/{1}'.format(settings.TEST_ROOT, 'bare.git'))
os.mkdir(bare_repo)
self.addCleanup(shutil.rmtree, bare_repo)
subprocess.check_output(['git', '--bare', 'init', ], stderr=subprocess.STDOUT,
cwd=bare_repo)
with self.assertRaisesRegexp(GitImportError, GitImportError.BAD_REPO):
git_import.add_repo('file://{0}'.format(bare_repo), None, None)
def test_detached_repo(self):
"""
Test repo that is in detached head state.
"""
repo_dir = self.GIT_REPO_DIR
# Test successful import from command
try:
os.mkdir(repo_dir)
except OSError:
pass
self.addCleanup(shutil.rmtree, repo_dir)
git_import.add_repo(self.TEST_REPO, repo_dir / 'edx4edx_lite', None)
subprocess.check_output(['git', 'checkout', 'HEAD~2', ],
stderr=subprocess.STDOUT,
cwd=repo_dir / 'edx4edx_lite')
with self.assertRaisesRegexp(GitImportError, GitImportError.CANNOT_PULL):
git_import.add_repo(self.TEST_REPO, repo_dir / 'edx4edx_lite', None)
def test_branching(self):
"""
Exercise branching code of import
"""
repo_dir = self.GIT_REPO_DIR
# Test successful import from command
if not os.path.isdir(repo_dir):
os.mkdir(repo_dir)
self.addCleanup(shutil.rmtree, repo_dir)
# Checkout non existent branch
with self.assertRaisesRegexp(GitImportError, GitImportError.REMOTE_BRANCH_MISSING):
git_import.add_repo(self.TEST_REPO, repo_dir / 'edx4edx_lite', 'asdfasdfasdf')
# Checkout new branch
git_import.add_repo(self.TEST_REPO,
repo_dir / 'edx4edx_lite',
self.TEST_BRANCH)
def_ms = modulestore()
# Validate that it is different than master
self.assertIsNotNone(def_ms.get_course(self.TEST_BRANCH_COURSE))
# Attempt to check out the same branch again to validate branch choosing
# works
git_import.add_repo(self.TEST_REPO,
repo_dir / 'edx4edx_lite',
self.TEST_BRANCH)
# Delete to test branching back to master
delete_course(def_ms, contentstore(),
self.TEST_BRANCH_COURSE,
True)
self.assertIsNone(def_ms.get_course(self.TEST_BRANCH_COURSE))
git_import.add_repo(self.TEST_REPO,
repo_dir / 'edx4edx_lite',
'master')
self.assertIsNone(def_ms.get_course(self.TEST_BRANCH_COURSE))
self.assertIsNotNone(def_ms.get_course(SlashSeparatedCourseKey.from_deprecated_string(self.TEST_COURSE)))
def test_branch_exceptions(self):
"""
This wil create conditions to exercise bad paths in the switch_branch function.
"""
# create bare repo that we can mess with and attempt an import
bare_repo = os.path.abspath('{0}/{1}'.format(settings.TEST_ROOT, 'bare.git'))
os.mkdir(bare_repo)
self.addCleanup(shutil.rmtree, bare_repo)
subprocess.check_output(['git', '--bare', 'init', ], stderr=subprocess.STDOUT,
cwd=bare_repo)
# Build repo dir
repo_dir = self.GIT_REPO_DIR
if not os.path.isdir(repo_dir):
os.mkdir(repo_dir)
self.addCleanup(shutil.rmtree, repo_dir)
rdir = '{0}/bare'.format(repo_dir)
with self.assertRaisesRegexp(GitImportError, GitImportError.BAD_REPO):
git_import.add_repo('file://{0}'.format(bare_repo), None, None)
# Get logger for checking strings in logs
output = StringIO.StringIO()
test_log_handler = logging.StreamHandler(output)
test_log_handler.setLevel(logging.DEBUG)
glog = git_import.log
glog.addHandler(test_log_handler)
# Move remote so fetch fails
shutil.move(bare_repo, '{0}/not_bare.git'.format(settings.TEST_ROOT))
try:
git_import.switch_branch('master', rdir)
except GitImportError:
self.assertIn('Unable to fetch remote', output.getvalue())
shutil.move('{0}/not_bare.git'.format(settings.TEST_ROOT), bare_repo)
output.truncate(0)
# Replace origin with a different remote
subprocess.check_output(
['git', 'remote', 'rename', 'origin', 'blah', ],
stderr=subprocess.STDOUT, cwd=rdir
)
with self.assertRaises(GitImportError):
git_import.switch_branch('master', rdir)
self.assertIn('Getting a list of remote branches failed', output.getvalue())
| agpl-3.0 |
n3wb13/OpenNfrGui-5.0-1 | lib/python/Screens/DVD.py | 3 | 20223 | import os
from enigma import eTimer, iPlayableService, iServiceInformation, eServiceReference, iServiceKeys, getDesktop
from Screens.Screen import Screen
from Screens.MessageBox import MessageBox
from Screens.ChoiceBox import ChoiceBox
from Screens.HelpMenu import HelpableScreen
from Screens.InfoBarGenerics import InfoBarSeek, InfoBarPVRState, InfoBarCueSheetSupport, InfoBarShowHide, InfoBarNotifications, InfoBarAudioSelection, InfoBarSubtitleSupport
from Components.ActionMap import ActionMap, NumberActionMap, HelpableActionMap
from Components.Label import Label
from Components.Pixmap import Pixmap
from Components.ServiceEventTracker import ServiceEventTracker, InfoBarBase
from Components.config import config
from Tools.Directories import pathExists
from Components.Harddisk import harddiskmanager
lastpath = ""
class DVDSummary(Screen):
def __init__(self, session, parent):
Screen.__init__(self, session, parent)
self["Title"] = Label("")
self["Time"] = Label("")
self["Chapter"] = Label("")
def updateChapter(self, chapter):
self["Chapter"].setText(chapter)
def setTitle(self, title):
self["Title"].setText(title)
class DVDOverlay(Screen):
def __init__(self, session, args = None, height = None):
desktop_size = getDesktop(0).size()
w = desktop_size.width()
h = desktop_size.height()
if height is not None:
h = height
DVDOverlay.skin = """<screen name="DVDOverlay" position="0,0" size="%d,%d" flags="wfNoBorder" zPosition="-1" backgroundColor="transparent" />""" %(w, h)
Screen.__init__(self, session)
class ChapterZap(Screen):
skin = """
<screen name="ChapterZap" position="235,255" size="250,60" title="Chapter" >
<widget name="chapter" position="35,15" size="110,25" font="Regular;23" />
<widget name="number" position="145,15" size="80,25" halign="right" font="Regular;23" />
</screen>"""
def quit(self):
self.Timer.stop()
self.close(0)
def keyOK(self):
self.Timer.stop()
self.close(int(self["number"].getText()))
def keyNumberGlobal(self, number):
self.Timer.start(3000, True) #reset timer
self.field += str(number)
self["number"].setText(self.field)
if len(self.field) >= 4:
self.keyOK()
def __init__(self, session, number):
Screen.__init__(self, session)
self.field = str(number)
self["chapter"] = Label(_("Chapter:"))
self["number"] = Label(self.field)
self["actions"] = NumberActionMap( [ "SetupActions" ],
{
"cancel": self.quit,
"ok": self.keyOK,
"1": self.keyNumberGlobal,
"2": self.keyNumberGlobal,
"3": self.keyNumberGlobal,
"4": self.keyNumberGlobal,
"5": self.keyNumberGlobal,
"6": self.keyNumberGlobal,
"7": self.keyNumberGlobal,
"8": self.keyNumberGlobal,
"9": self.keyNumberGlobal,
"0": self.keyNumberGlobal
})
self.Timer = eTimer()
self.Timer.callback.append(self.keyOK)
self.Timer.start(3000, True)
class DVDPlayer(Screen, InfoBarBase, InfoBarNotifications, InfoBarSeek, InfoBarPVRState, InfoBarShowHide, HelpableScreen, InfoBarCueSheetSupport, InfoBarAudioSelection, InfoBarSubtitleSupport):
ALLOW_SUSPEND = Screen.SUSPEND_PAUSES
ENABLE_RESUME_SUPPORT = True
def save_infobar_seek_config(self):
self.saved_config_speeds_forward = config.seek.speeds_forward.getValue()
self.saved_config_speeds_backward = config.seek.speeds_backward.getValue()
self.saved_config_enter_forward = config.seek.enter_forward.getValue()
self.saved_config_enter_backward = config.seek.enter_backward.getValue()
self.saved_config_seek_on_pause = config.seek.on_pause.getValue()
self.saved_config_seek_speeds_slowmotion = config.seek.speeds_slowmotion.getValue()
def change_infobar_seek_config(self):
config.seek.speeds_forward.value = [2, 4, 6, 8, 16, 32, 64]
config.seek.speeds_backward.value = [2, 4, 6, 8, 16, 32, 64]
config.seek.speeds_slowmotion.value = [ 2, 3, 4, 6 ]
config.seek.enter_forward.value = "2"
config.seek.enter_backward.value = "2"
config.seek.on_pause.value = "play"
def restore_infobar_seek_config(self):
config.seek.speeds_forward.value = self.saved_config_speeds_forward
config.seek.speeds_backward.value = self.saved_config_speeds_backward
config.seek.speeds_slowmotion.value = self.saved_config_seek_speeds_slowmotion
config.seek.enter_forward.value = self.saved_config_enter_forward
config.seek.enter_backward.value = self.saved_config_enter_backward
config.seek.on_pause.value = self.saved_config_seek_on_pause
def __init__(self, session, dvd_device=None, dvd_filelist=None, args=None):
if not dvd_filelist: dvd_filelist = []
Screen.__init__(self, session)
InfoBarBase.__init__(self)
InfoBarNotifications.__init__(self)
InfoBarCueSheetSupport.__init__(self, actionmap = "MediaPlayerCueSheetActions")
InfoBarShowHide.__init__(self)
InfoBarAudioSelection.__init__(self)
InfoBarSubtitleSupport.__init__(self)
HelpableScreen.__init__(self)
self.save_infobar_seek_config()
self.change_infobar_seek_config()
InfoBarSeek.__init__(self)
InfoBarPVRState.__init__(self)
self.oldService = self.session.nav.getCurrentlyPlayingServiceOrGroup()
self.session.nav.stopService()
self["audioLabel"] = Label("n/a")
self["subtitleLabel"] = Label("")
self["angleLabel"] = Label("")
self["chapterLabel"] = Label("")
self["anglePix"] = Pixmap()
self["anglePix"].hide()
self.last_audioTuple = None
self.last_subtitleTuple = None
self.last_angleTuple = None
self.totalChapters = 0
self.currentChapter = 0
self.totalTitles = 0
self.currentTitle = 0
self.__event_tracker = ServiceEventTracker(screen=self, eventmap=
{
iPlayableService.evStopped: self.__serviceStopped,
iPlayableService.evUser: self.__timeUpdated,
iPlayableService.evUser+1: self.__statePlay,
iPlayableService.evUser+2: self.__statePause,
iPlayableService.evUser+3: self.__osdFFwdInfoAvail,
iPlayableService.evUser+4: self.__osdFBwdInfoAvail,
iPlayableService.evUser+5: self.__osdStringAvail,
iPlayableService.evUser+6: self.__osdAudioInfoAvail,
iPlayableService.evUser+7: self.__osdSubtitleInfoAvail,
iPlayableService.evUser+8: self.__chapterUpdated,
iPlayableService.evUser+9: self.__titleUpdated,
iPlayableService.evUser+11: self.__menuOpened,
iPlayableService.evUser+12: self.__menuClosed,
iPlayableService.evUser+13: self.__osdAngleInfoAvail
})
self["DVDPlayerDirectionActions"] = ActionMap(["DirectionActions"],
{
#MENU KEY DOWN ACTIONS
"left": self.keyLeft,
"right": self.keyRight,
"up": self.keyUp,
"down": self.keyDown,
#MENU KEY REPEATED ACTIONS
"leftRepeated": self.doNothing,
"rightRepeated": self.doNothing,
"upRepeated": self.doNothing,
"downRepeated": self.doNothing,
#MENU KEY UP ACTIONS
"leftUp": self.doNothing,
"rightUp": self.doNothing,
"upUp": self.doNothing,
"downUp": self.doNothing,
})
self["OkCancelActions"] = ActionMap(["OkCancelActions"],
{
"ok": self.keyOk,
"cancel": self.keyCancel,
})
self["DVDPlayerPlaybackActions"] = HelpableActionMap(self, "DVDPlayerActions",
{
#PLAYER ACTIONS
"dvdMenu": (self.enterDVDMenu, _("show DVD main menu")),
"toggleInfo": (self.toggleInfo, _("toggle time, chapter, audio, subtitle info")),
"nextChapter": (self.nextChapter, _("forward to the next chapter")),
"prevChapter": (self.prevChapter, _("rewind to the previous chapter")),
"nextTitle": (self.nextTitle, _("jump forward to the next title")),
"prevTitle": (self.prevTitle, _("jump back to the previous title")),
"tv": (self.askLeavePlayer, _("exit DVD player or return to file browser")),
"dvdAudioMenu": (self.enterDVDAudioMenu, _("(show optional DVD audio menu)")),
"AudioSelection": (self.enterAudioSelection, _("Select audio track")),
"nextAudioTrack": (self.nextAudioTrack, _("switch to the next audio track")),
"nextSubtitleTrack": (self.nextSubtitleTrack, _("switch to the next subtitle language")),
"nextAngle": (self.nextAngle, _("switch to the next angle")),
"seekBeginning": self.seekBeginning,
}, -2)
self["NumberActions"] = NumberActionMap( [ "NumberActions"],
{
"1": self.keyNumberGlobal,
"2": self.keyNumberGlobal,
"3": self.keyNumberGlobal,
"4": self.keyNumberGlobal,
"5": self.keyNumberGlobal,
"6": self.keyNumberGlobal,
"7": self.keyNumberGlobal,
"8": self.keyNumberGlobal,
"9": self.keyNumberGlobal,
"0": self.keyNumberGlobal,
})
self.onClose.append(self.__onClose)
try:
from Plugins.SystemPlugins.Hotplug.plugin import hotplugNotifier
hotplugNotifier.append(self.hotplugCB)
except:
pass
self.autoplay = dvd_device or dvd_filelist
if dvd_device:
self.physicalDVD = True
else:
self.scanHotplug()
self.dvd_filelist = dvd_filelist
self.onFirstExecBegin.append(self.opened)
self.service = None
self.in_menu = False
def keyNumberGlobal(self, number):
print "You pressed number " + str(number)
self.session.openWithCallback(self.numberEntered, ChapterZap, number)
def numberEntered(self, retval):
# print self.servicelist
if retval > 0:
self.zapToNumber(retval)
def getServiceInterface(self, iface):
service = self.service
if service:
attr = getattr(service, iface, None)
if callable(attr):
return attr()
return None
def __serviceStopped(self):
self.dvdScreen.hide()
subs = self.getServiceInterface("subtitle")
if subs:
subs.disableSubtitles(self.session.current_dialog.instance)
def serviceStarted(self): #override InfoBarShowHide function
self.dvdScreen.show()
def doEofInternal(self, playing):
if self.in_menu:
self.hide()
def __menuOpened(self):
self.hide()
self.in_menu = True
self["NumberActions"].setEnabled(False)
def __menuClosed(self):
self.show()
self.in_menu = False
self["NumberActions"].setEnabled(True)
def setChapterLabel(self):
chapterLCD = "Menu"
chapterOSD = "DVD Menu"
if self.currentTitle > 0:
chapterLCD = "%s %d" % (_("Chap."), self.currentChapter)
chapterOSD = "DVD %s %d/%d" % (_("Chapter"), self.currentChapter, self.totalChapters)
chapterOSD += " (%s %d/%d)" % (_("Title"), self.currentTitle, self.totalTitles)
self["chapterLabel"].setText(chapterOSD)
try:
self.session.summary.updateChapter(chapterLCD)
except:
pass
def doNothing(self):
pass
def toggleInfo(self):
if not self.in_menu:
self.toggleShow()
print "toggleInfo"
def __timeUpdated(self):
print "timeUpdated"
def __statePlay(self):
print "statePlay"
def __statePause(self):
print "statePause"
def __osdFFwdInfoAvail(self):
self.setChapterLabel()
print "FFwdInfoAvail"
def __osdFBwdInfoAvail(self):
self.setChapterLabel()
print "FBwdInfoAvail"
def __osdStringAvail(self):
print "StringAvail"
def __osdAudioInfoAvail(self):
info = self.getServiceInterface("info")
audioTuple = info and info.getInfoObject(iServiceInformation.sUser+6)
print "AudioInfoAvail ", repr(audioTuple)
if audioTuple:
#audioString = "%d: %s (%s)" % (audioTuple[0], audioTuple[1],audioTuple[2])
audioString = "%s (%s)" % (audioTuple[1],audioTuple[2])
self["audioLabel"].setText(audioString)
if audioTuple != self.last_audioTuple and not self.in_menu:
self.doShow()
self.last_audioTuple = audioTuple
def __osdSubtitleInfoAvail(self):
info = self.getServiceInterface("info")
subtitleTuple = info and info.getInfoObject(iServiceInformation.sUser+7)
print "SubtitleInfoAvail ", repr(subtitleTuple)
if subtitleTuple:
subtitleString = ""
if subtitleTuple[0] is not 0:
#subtitleString = "%d: %s" % (subtitleTuple[0], subtitleTuple[1])
subtitleString = "%s" % subtitleTuple[1]
self["subtitleLabel"].setText(subtitleString)
if subtitleTuple != self.last_subtitleTuple and not self.in_menu:
self.doShow()
self.last_subtitleTuple = subtitleTuple
def __osdAngleInfoAvail(self):
info = self.getServiceInterface("info")
angleTuple = info and info.getInfoObject(iServiceInformation.sUser+8)
print "AngleInfoAvail ", repr(angleTuple)
if angleTuple:
angleString = ""
if angleTuple[1] > 1:
angleString = "%d / %d" % (angleTuple[0], angleTuple[1])
self["anglePix"].show()
else:
self["anglePix"].hide()
self["angleLabel"].setText(angleString)
if angleTuple != self.last_angleTuple and not self.in_menu:
self.doShow()
self.last_angleTuple = angleTuple
def __chapterUpdated(self):
info = self.getServiceInterface("info")
if info:
self.currentChapter = info.getInfo(iServiceInformation.sCurrentChapter)
self.totalChapters = info.getInfo(iServiceInformation.sTotalChapters)
self.setChapterLabel()
print "__chapterUpdated: %d/%d" % (self.currentChapter, self.totalChapters)
def __titleUpdated(self):
info = self.getServiceInterface("info")
if info:
self.currentTitle = info.getInfo(iServiceInformation.sCurrentTitle)
self.totalTitles = info.getInfo(iServiceInformation.sTotalTitles)
self.setChapterLabel()
print "__titleUpdated: %d/%d" % (self.currentTitle, self.totalTitles)
if not self.in_menu:
self.doShow()
def askLeavePlayer(self):
if self.autoplay:
self.exitCB((None,"exit"))
return
choices = [(_("Exit"), "exit"), (_("Continue playing"), "play")]
if self.physicalDVD:
cur = self.session.nav.getCurrentlyPlayingServiceOrGroup()
if cur and not cur.toString().endswith(harddiskmanager.getAutofsMountpoint(harddiskmanager.getCD())):
choices.insert(0,(_("Play DVD"), "playPhysical" ))
self.session.openWithCallback(self.exitCB, ChoiceBox, title=_("Leave DVD player?"), list = choices)
def sendKey(self, key):
keys = self.getServiceInterface("keys")
if keys:
keys.keyPressed(key)
return keys
def enterAudioSelection(self):
self.audioSelection()
def nextAudioTrack(self):
self.sendKey(iServiceKeys.keyUser)
def nextSubtitleTrack(self):
self.sendKey(iServiceKeys.keyUser+1)
def enterDVDAudioMenu(self):
self.sendKey(iServiceKeys.keyUser+2)
def nextChapter(self):
self.sendKey(iServiceKeys.keyUser+3)
def prevChapter(self):
self.sendKey(iServiceKeys.keyUser+4)
def nextTitle(self):
self.sendKey(iServiceKeys.keyUser+5)
def prevTitle(self):
self.sendKey(iServiceKeys.keyUser+6)
def enterDVDMenu(self):
self.sendKey(iServiceKeys.keyUser+7)
def nextAngle(self):
self.sendKey(iServiceKeys.keyUser+8)
def seekBeginning(self):
if self.service:
seekable = self.getSeek()
if seekable:
seekable.seekTo(0)
def zapToNumber(self, number):
if self.service:
seekable = self.getSeek()
if seekable:
print "seek to chapter %d" % number
seekable.seekChapter(number)
# MENU ACTIONS
def keyRight(self):
self.sendKey(iServiceKeys.keyRight)
def keyLeft(self):
self.sendKey(iServiceKeys.keyLeft)
def keyUp(self):
self.sendKey(iServiceKeys.keyUp)
def keyDown(self):
self.sendKey(iServiceKeys.keyDown)
def keyOk(self):
if self.sendKey(iServiceKeys.keyOk) and not self.in_menu:
self.toggleInfo()
def keyCancel(self):
self.askLeavePlayer()
def opened(self):
if self.autoplay and self.dvd_filelist:
# opened via autoplay
self.FileBrowserClosed(self.dvd_filelist[0])
elif self.autoplay and self.physicalDVD:
self.playPhysicalCB(True)
elif self.physicalDVD:
# opened from menu with dvd in drive
self.session.openWithCallback(self.playPhysicalCB, MessageBox, text=_("Do you want to play DVD in drive?"), timeout=5 )
def playPhysicalCB(self, answer):
if answer:
harddiskmanager.setDVDSpeed(harddiskmanager.getCD(), 1)
self.FileBrowserClosed(harddiskmanager.getAutofsMountpoint(harddiskmanager.getCD()))
def FileBrowserClosed(self, val):
curref = self.session.nav.getCurrentlyPlayingServiceOrGroup()
print "FileBrowserClosed", val
if val is None:
self.askLeavePlayer()
else:
isopathname = "/VIDEO_TS.ISO"
if os.path.exists(val + isopathname):
val += isopathname
newref = eServiceReference(4369, 0, val)
print "play", newref.toString()
if curref is None or curref != newref:
if newref.toString().endswith("/VIDEO_TS") or newref.toString().endswith("/"):
names = newref.toString().rsplit("/",3)
if names[2].startswith("Disk ") or names[2].startswith("DVD "):
name = str(names[1]) + " - " + str(names[2])
else:
name = names[2]
print "setting name to: ", self.service
newref.setName(str(name))
# Construct a path for the IFO header assuming it exists
ifofilename = val
if not ifofilename.upper().endswith("/VIDEO_TS"):
ifofilename += "/VIDEO_TS"
files = [("/VIDEO_TS.IFO", 0x100), ("/VTS_01_0.IFO", 0x100), ("/VTS_01_0.IFO", 0x200)] # ( filename, offset )
for name in files:
(status, isNTSC, isLowResolution) = self.readVideoAtributes( ifofilename, name )
if status:
break
height = getDesktop(0).size().height()
print "[DVD] height:", height
if isNTSC:
height = height * 576 / 480
print "[DVD] NTSC height:", height
if isLowResolution:
height *= 2
print "[DVD] LowResolution:", height
self.dvdScreen = self.session.instantiateDialog(DVDOverlay, height=height)
self.session.nav.playService(newref)
self.service = self.session.nav.getCurrentService()
print "self.service", self.service
print "cur_dlg", self.session.current_dialog
subs = self.getServiceInterface("subtitle")
if subs:
subs.enableSubtitles(self.dvdScreen.instance, None)
def readVideoAtributes(self, isofilename, checked_file):
(name, offset) = checked_file
isofilename += name
print "[DVD] file", name
status = False
isNTSC = False
isLowResolution = False
ifofile = None
try:
# Try to read the IFO header to determine PAL/NTSC format and the resolution
ifofile = open(isofilename, "r")
ifofile.seek(offset)
video_attr_high = ord(ifofile.read(1))
if video_attr_high != 0:
status = True
video_attr_low = ord(ifofile.read(1))
print "[DVD] %s: video_attr_high = %x" % ( name, video_attr_high ), "video_attr_low = %x" % video_attr_low
isNTSC = (video_attr_high & 0x10 == 0)
isLowResolution = (video_attr_low & 0x18 == 0x18)
except:
# If the service is an .iso or .img file we assume it is PAL
# Sorry we cannot open image files here.
print "[DVD] Cannot read file or is ISO/IMG"
finally:
if ifofile is not None:
ifofile.close()
return status, isNTSC, isLowResolution
def exitCB(self, answer):
if answer is not None:
if answer[1] == "exit":
if self.service:
self.service = None
self.close()
elif answer[1] == "playPhysical":
if self.service:
self.service = None
self.playPhysicalCB(True)
else:
pass
def __onClose(self):
self.restore_infobar_seek_config()
self.session.nav.playService(self.oldService)
try:
from Plugins.SystemPlugins.Hotplug.plugin import hotplugNotifier
hotplugNotifier.remove(self.hotplugCB)
except:
pass
def playLastCB(self, answer): # overwrite infobar cuesheet function
print "playLastCB", answer, self.resume_point
if self.service:
if answer:
seekable = self.getSeek()
if seekable:
seekable.seekTo(self.resume_point)
pause = self.service.pause()
pause.unpause()
self.hideAfterResume()
def showAfterCuesheetOperation(self):
if not self.in_menu:
self.show()
def createSummary(self):
return DVDSummary
#override some InfoBarSeek functions
def doEof(self):
self.setSeekState(self.SEEK_STATE_PLAY)
def calcRemainingTime(self):
return 0
def hotplugCB(self, dev, media_state):
print "[hotplugCB]", dev, media_state
if dev == harddiskmanager.getCD():
if media_state == "1":
self.scanHotplug()
else:
self.physicalDVD = False
def scanHotplug(self):
devicepath = harddiskmanager.getAutofsMountpoint(harddiskmanager.getCD())
if pathExists(devicepath):
from Components.Scanner import scanDevice
res = scanDevice(devicepath)
list = [ (r.description, r, res[r], self.session) for r in res ]
if list:
(desc, scanner, files, session) = list[0]
for file in files:
print file
if file.mimetype == "video/x-dvd":
print "physical dvd found:", devicepath
self.physicalDVD = True
return
self.physicalDVD = False
| gpl-2.0 |
siddhika1889/Pydev-Dependencies | pysrc/third_party/pep8/lib2to3/lib2to3/pygram.py | 320 | 1118 | # Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Export the Python grammar and symbols."""
# Python imports
import os
# Local imports
from .pgen2 import token
from .pgen2 import driver
from . import pytree
# The grammar file
_GRAMMAR_FILE = os.path.join(os.path.dirname(__file__), "Grammar.txt")
_PATTERN_GRAMMAR_FILE = os.path.join(os.path.dirname(__file__),
"PatternGrammar.txt")
class Symbols(object):
def __init__(self, grammar):
"""Initializer.
Creates an attribute for each grammar symbol (nonterminal),
whose value is the symbol's type (an int >= 256).
"""
for name, symbol in grammar.symbol2number.iteritems():
setattr(self, name, symbol)
python_grammar = driver.load_grammar(_GRAMMAR_FILE)
python_symbols = Symbols(python_grammar)
python_grammar_no_print_statement = python_grammar.copy()
del python_grammar_no_print_statement.keywords["print"]
pattern_grammar = driver.load_grammar(_PATTERN_GRAMMAR_FILE)
pattern_symbols = Symbols(pattern_grammar)
| epl-1.0 |
PetrDlouhy/django | django/db/transaction.py | 98 | 12277 | from django.db import (
DEFAULT_DB_ALIAS, DatabaseError, Error, ProgrammingError, connections,
)
from django.utils.decorators import ContextDecorator
class TransactionManagementError(ProgrammingError):
"""
This exception is thrown when transaction management is used improperly.
"""
pass
def get_connection(using=None):
"""
Get a database connection by name, or the default database connection
if no name is provided. This is a private API.
"""
if using is None:
using = DEFAULT_DB_ALIAS
return connections[using]
def get_autocommit(using=None):
"""
Get the autocommit status of the connection.
"""
return get_connection(using).get_autocommit()
def set_autocommit(autocommit, using=None):
"""
Set the autocommit status of the connection.
"""
return get_connection(using).set_autocommit(autocommit)
def commit(using=None):
"""
Commits a transaction.
"""
get_connection(using).commit()
def rollback(using=None):
"""
Rolls back a transaction.
"""
get_connection(using).rollback()
def savepoint(using=None):
"""
Creates a savepoint (if supported and required by the backend) inside the
current transaction. Returns an identifier for the savepoint that will be
used for the subsequent rollback or commit.
"""
return get_connection(using).savepoint()
def savepoint_rollback(sid, using=None):
"""
Rolls back the most recent savepoint (if one exists). Does nothing if
savepoints are not supported.
"""
get_connection(using).savepoint_rollback(sid)
def savepoint_commit(sid, using=None):
"""
Commits the most recent savepoint (if one exists). Does nothing if
savepoints are not supported.
"""
get_connection(using).savepoint_commit(sid)
def clean_savepoints(using=None):
"""
Resets the counter used to generate unique savepoint ids in this thread.
"""
get_connection(using).clean_savepoints()
def get_rollback(using=None):
"""
Gets the "needs rollback" flag -- for *advanced use* only.
"""
return get_connection(using).get_rollback()
def set_rollback(rollback, using=None):
"""
Sets or unsets the "needs rollback" flag -- for *advanced use* only.
When `rollback` is `True`, it triggers a rollback when exiting the
innermost enclosing atomic block that has `savepoint=True` (that's the
default). Use this to force a rollback without raising an exception.
When `rollback` is `False`, it prevents such a rollback. Use this only
after rolling back to a known-good state! Otherwise, you break the atomic
block and data corruption may occur.
"""
return get_connection(using).set_rollback(rollback)
#################################
# Decorators / context managers #
#################################
class Atomic(ContextDecorator):
"""
This class guarantees the atomic execution of a given block.
An instance can be used either as a decorator or as a context manager.
When it's used as a decorator, __call__ wraps the execution of the
decorated function in the instance itself, used as a context manager.
When it's used as a context manager, __enter__ creates a transaction or a
savepoint, depending on whether a transaction is already in progress, and
__exit__ commits the transaction or releases the savepoint on normal exit,
and rolls back the transaction or to the savepoint on exceptions.
It's possible to disable the creation of savepoints if the goal is to
ensure that some code runs within a transaction without creating overhead.
A stack of savepoints identifiers is maintained as an attribute of the
connection. None denotes the absence of a savepoint.
This allows reentrancy even if the same AtomicWrapper is reused. For
example, it's possible to define `oa = @atomic('other')` and use `@oa` or
`with oa:` multiple times.
Since database connections are thread-local, this is thread-safe.
This is a private API.
"""
def __init__(self, using, savepoint):
self.using = using
self.savepoint = savepoint
def __enter__(self):
connection = get_connection(self.using)
if not connection.in_atomic_block:
# Reset state when entering an outermost atomic block.
connection.commit_on_exit = True
connection.needs_rollback = False
if not connection.get_autocommit():
# Some database adapters (namely sqlite3) don't handle
# transactions and savepoints properly when autocommit is off.
# Turning autocommit back on isn't an option; it would trigger
# a premature commit. Give up if that happens.
if connection.features.autocommits_when_autocommit_is_off:
raise TransactionManagementError(
"Your database backend doesn't behave properly when "
"autocommit is off. Turn it on before using 'atomic'.")
# When entering an atomic block with autocommit turned off,
# Django should only use savepoints and shouldn't commit.
# This requires at least a savepoint for the outermost block.
if not self.savepoint:
raise TransactionManagementError(
"The outermost 'atomic' block cannot use "
"savepoint = False when autocommit is off.")
# Pretend we're already in an atomic block to bypass the code
# that disables autocommit to enter a transaction, and make a
# note to deal with this case in __exit__.
connection.in_atomic_block = True
connection.commit_on_exit = False
if connection.in_atomic_block:
# We're already in a transaction; create a savepoint, unless we
# were told not to or we're already waiting for a rollback. The
# second condition avoids creating useless savepoints and prevents
# overwriting needs_rollback until the rollback is performed.
if self.savepoint and not connection.needs_rollback:
sid = connection.savepoint()
connection.savepoint_ids.append(sid)
else:
connection.savepoint_ids.append(None)
else:
# We aren't in a transaction yet; create one.
# The usual way to start a transaction is to turn autocommit off.
# However, some database adapters (namely sqlite3) don't handle
# transactions and savepoints properly when autocommit is off.
# In such cases, start an explicit transaction instead, which has
# the side-effect of disabling autocommit.
if connection.features.autocommits_when_autocommit_is_off:
connection._start_transaction_under_autocommit()
connection.autocommit = False
else:
connection.set_autocommit(False)
connection.in_atomic_block = True
def __exit__(self, exc_type, exc_value, traceback):
connection = get_connection(self.using)
if connection.savepoint_ids:
sid = connection.savepoint_ids.pop()
else:
# Prematurely unset this flag to allow using commit or rollback.
connection.in_atomic_block = False
try:
if connection.closed_in_transaction:
# The database will perform a rollback by itself.
# Wait until we exit the outermost block.
pass
elif exc_type is None and not connection.needs_rollback:
if connection.in_atomic_block:
# Release savepoint if there is one
if sid is not None:
try:
connection.savepoint_commit(sid)
except DatabaseError:
try:
connection.savepoint_rollback(sid)
# The savepoint won't be reused. Release it to
# minimize overhead for the database server.
connection.savepoint_commit(sid)
except Error:
# If rolling back to a savepoint fails, mark for
# rollback at a higher level and avoid shadowing
# the original exception.
connection.needs_rollback = True
raise
else:
# Commit transaction
try:
connection.commit()
except DatabaseError:
try:
connection.rollback()
except Error:
# An error during rollback means that something
# went wrong with the connection. Drop it.
connection.close()
raise
else:
# This flag will be set to True again if there isn't a savepoint
# allowing to perform the rollback at this level.
connection.needs_rollback = False
if connection.in_atomic_block:
# Roll back to savepoint if there is one, mark for rollback
# otherwise.
if sid is None:
connection.needs_rollback = True
else:
try:
connection.savepoint_rollback(sid)
# The savepoint won't be reused. Release it to
# minimize overhead for the database server.
connection.savepoint_commit(sid)
except Error:
# If rolling back to a savepoint fails, mark for
# rollback at a higher level and avoid shadowing
# the original exception.
connection.needs_rollback = True
else:
# Roll back transaction
try:
connection.rollback()
except Error:
# An error during rollback means that something
# went wrong with the connection. Drop it.
connection.close()
finally:
# Outermost block exit when autocommit was enabled.
if not connection.in_atomic_block:
if connection.closed_in_transaction:
connection.connection = None
elif connection.features.autocommits_when_autocommit_is_off:
connection.autocommit = True
else:
connection.set_autocommit(True)
# Outermost block exit when autocommit was disabled.
elif not connection.savepoint_ids and not connection.commit_on_exit:
if connection.closed_in_transaction:
connection.connection = None
else:
connection.in_atomic_block = False
def atomic(using=None, savepoint=True):
# Bare decorator: @atomic -- although the first argument is called
# `using`, it's actually the function being decorated.
if callable(using):
return Atomic(DEFAULT_DB_ALIAS, savepoint)(using)
# Decorator: @atomic(...) or context manager: with atomic(...): ...
else:
return Atomic(using, savepoint)
def _non_atomic_requests(view, using):
try:
view._non_atomic_requests.add(using)
except AttributeError:
view._non_atomic_requests = {using}
return view
def non_atomic_requests(using=None):
if callable(using):
return _non_atomic_requests(using, DEFAULT_DB_ALIAS)
else:
if using is None:
using = DEFAULT_DB_ALIAS
return lambda view: _non_atomic_requests(view, using)
| bsd-3-clause |
nliolios24/textrank | share/doc/networkx-1.9.1/examples/algorithms/blockmodel.py | 32 | 3009 | #!/usr/bin/env python
# encoding: utf-8
"""
Example of creating a block model using the blockmodel function in NX. Data used is the Hartford, CT drug users network:
@article{,
title = {Social Networks of Drug Users in {High-Risk} Sites: Finding the Connections},
volume = {6},
shorttitle = {Social Networks of Drug Users in {High-Risk} Sites},
url = {http://dx.doi.org/10.1023/A:1015457400897},
doi = {10.1023/A:1015457400897},
number = {2},
journal = {{AIDS} and Behavior},
author = {Margaret R. Weeks and Scott Clair and Stephen P. Borgatti and Kim Radda and Jean J. Schensul},
month = jun,
year = {2002},
pages = {193--206}
}
"""
__author__ = """\n""".join(['Drew Conway <drew.conway@nyu.edu>',
'Aric Hagberg <hagberg@lanl.gov>'])
from collections import defaultdict
import networkx as nx
import numpy
from scipy.cluster import hierarchy
from scipy.spatial import distance
import matplotlib.pyplot as plt
def create_hc(G):
"""Creates hierarchical cluster of graph G from distance matrix"""
path_length=nx.all_pairs_shortest_path_length(G)
distances=numpy.zeros((len(G),len(G)))
for u,p in path_length.items():
for v,d in p.items():
distances[u][v]=d
# Create hierarchical cluster
Y=distance.squareform(distances)
Z=hierarchy.complete(Y) # Creates HC using farthest point linkage
# This partition selection is arbitrary, for illustrive purposes
membership=list(hierarchy.fcluster(Z,t=1.15))
# Create collection of lists for blockmodel
partition=defaultdict(list)
for n,p in zip(list(range(len(G))),membership):
partition[p].append(n)
return list(partition.values())
if __name__ == '__main__':
G=nx.read_edgelist("hartford_drug.edgelist")
# Extract largest connected component into graph H
H=nx.connected_component_subgraphs(G)[0]
# Makes life easier to have consecutively labeled integer nodes
H=nx.convert_node_labels_to_integers(H)
# Create parititions with hierarchical clustering
partitions=create_hc(H)
# Build blockmodel graph
BM=nx.blockmodel(H,partitions)
# Draw original graph
pos=nx.spring_layout(H,iterations=100)
fig=plt.figure(1,figsize=(6,10))
ax=fig.add_subplot(211)
nx.draw(H,pos,with_labels=False,node_size=10)
plt.xlim(0,1)
plt.ylim(0,1)
# Draw block model with weighted edges and nodes sized by number of internal nodes
node_size=[BM.node[x]['nnodes']*10 for x in BM.nodes()]
edge_width=[(2*d['weight']) for (u,v,d) in BM.edges(data=True)]
# Set positions to mean of positions of internal nodes from original graph
posBM={}
for n in BM:
xy=numpy.array([pos[u] for u in BM.node[n]['graph']])
posBM[n]=xy.mean(axis=0)
ax=fig.add_subplot(212)
nx.draw(BM,posBM,node_size=node_size,width=edge_width,with_labels=False)
plt.xlim(0,1)
plt.ylim(0,1)
plt.axis('off')
plt.savefig('hartford_drug_block_model.png')
| mit |
andriibekker/biddingsbase | django/utils/itercompat.py | 294 | 1169 | """
Providing iterator functions that are not in all version of Python we support.
Where possible, we try to use the system-native version and only fall back to
these implementations if necessary.
"""
import itertools
# Fallback for Python 2.4, Python 2.5
def product(*args, **kwds):
"""
Taken from http://docs.python.org/library/itertools.html#itertools.product
"""
# product('ABCD', 'xy') --> Ax Ay Bx By Cx Cy Dx Dy
# product(range(2), repeat=3) --> 000 001 010 011 100 101 110 111
pools = map(tuple, args) * kwds.get('repeat', 1)
result = [[]]
for pool in pools:
result = [x+[y] for x in result for y in pool]
for prod in result:
yield tuple(prod)
if hasattr(itertools, 'product'):
product = itertools.product
def is_iterable(x):
"A implementation independent way of checking for iterables"
try:
iter(x)
except TypeError:
return False
else:
return True
def all(iterable):
for item in iterable:
if not item:
return False
return True
def any(iterable):
for item in iterable:
if item:
return True
return False
| bsd-3-clause |
kingvuplus/xrd-alliance | lib/python/Screens/About.py | 1 | 9272 | from Screen import Screen
from Components.ActionMap import ActionMap
from Components.Sources.StaticText import StaticText
from Components.Harddisk import harddiskmanager
from Components.NimManager import nimmanager
from Components.About import about
from Components.ScrollLabel import ScrollLabel
from Components.Button import Button
from Tools.Downloader import downloadWithProgress
from Components.ConfigList import ConfigListScreen
from Components.config import config, ConfigSubsection, ConfigSelection, getConfigListEntry
from Components.Label import Label
import re
from Tools.StbHardware import getFPVersion
from enigma import eTimer
config.CommitInfoSetup = ConfigSubsection()
config.CommitInfoSetup.commiturl = ConfigSelection(default='Enigma2', choices=[('Enigma2', _('Source-Enigma2')), ('XTA', _('Skin-XTA')), ('TechniHD', _('Skin-TechniHD'))])
class About(Screen):
def __init__(self, session):
Screen.__init__(self, session)
AboutText = _("Hardware: ") + about.getHardwareTypeString() + "\n"
AboutText += _("Image: ") + about.getImageTypeString() + "\n"
AboutText += _("Kernel version: ") + about.getKernelVersionString() + "\n"
EnigmaVersion = "Enigma: " + about.getEnigmaVersionString()
self["EnigmaVersion"] = StaticText(EnigmaVersion)
AboutText += EnigmaVersion + "\n"
ImageVersion = _("Last upgrade: ") + about.getImageVersionString()
self["ImageVersion"] = StaticText(ImageVersion)
AboutText += ImageVersion + "\n"
fp_version = getFPVersion()
if fp_version is None:
fp_version = ""
else:
fp_version = _("Frontprocessor version: %d") % fp_version
AboutText += fp_version + "\n"
self["FPVersion"] = StaticText(fp_version)
self["TunerHeader"] = StaticText(_("Detected NIMs:"))
AboutText += "\n" + _("Detected NIMs:") + "\n"
nims = nimmanager.nimList()
for count in range(len(nims)):
if count < 4:
self["Tuner" + str(count)] = StaticText(nims[count])
else:
self["Tuner" + str(count)] = StaticText("")
AboutText += nims[count] + "\n"
self["HDDHeader"] = StaticText(_("Detected HDD:"))
AboutText += "\n" + _("Detected HDD:") + "\n"
hddlist = harddiskmanager.HDDList()
hddinfo = ""
if hddlist:
for count in range(len(hddlist)):
if hddinfo:
hddinfo += "\n"
hdd = hddlist[count][1]
if int(hdd.free()) > 1024:
hddinfo += "%s\n(%s, %d GB %s)" % (hdd.model(), hdd.capacity(), hdd.free()/1024, _("free"))
else:
hddinfo += "%s\n(%s, %d MB %s)" % (hdd.model(), hdd.capacity(), hdd.free(), _("free"))
else:
hddinfo = _("none")
self["hddA"] = StaticText(hddinfo)
AboutText += hddinfo
self["AboutScrollLabel"] = ScrollLabel(AboutText)
self["key_green"] = Button(_("Translations"))
self["key_red"] = Button(_("Latest Commits"))
self["actions"] = ActionMap(["ColorActions", "SetupActions", "DirectionActions"],
{
"cancel": self.close,
"ok": self.close,
"red": self.showCommits,
"green": self.showTranslationInfo,
"up": self["AboutScrollLabel"].pageUp,
"down": self["AboutScrollLabel"].pageDown
}, -2)
def showTranslationInfo(self):
self.session.open(TranslationInfo)
def showCommits(self):
self.session.open(CommitInfo)
class TranslationInfo(Screen):
def __init__(self, session):
Screen.__init__(self, session)
# don't remove the string out of the _(), or it can't be "translated" anymore.
# TRANSLATORS: Add here whatever should be shown in the "translator" about screen, up to 6 lines (use \n for newline)
info = _("TRANSLATOR_INFO")
if info == "TRANSLATOR_INFO":
info = "(N/A)"
infolines = _("").split("\n")
infomap = {}
for x in infolines:
l = x.split(': ')
if len(l) != 2:
continue
(type, value) = l
infomap[type] = value
print infomap
self["TranslationInfo"] = StaticText(info)
translator_name = infomap.get("Language-Team", "none")
if translator_name == "none":
translator_name = infomap.get("Last-Translator", "")
self["TranslatorName"] = StaticText(translator_name)
self["actions"] = ActionMap(["SetupActions"],
{
"cancel": self.close,
"ok": self.close,
}, -2)
class CommitInfo(Screen):
def __init__(self, session):
Screen.__init__(self, session)
self.skinName = ["CommitInfo", "About"]
self["AboutScrollLabel"] = ScrollLabel(_("Please wait"))
self["Commits"] = Label()
self["actions"] = ActionMap(["ColorActions", "OkCancelActions", "SetupActions", "DirectionActions"],
{
"cancel": self.close,
"ok": self.close,
"menu": self.keyMenu,
"up": self["AboutScrollLabel"].pageUp,
"down": self["AboutScrollLabel"].pageDown
}, -2)
self.Timer = eTimer()
self.Timer.callback.append(self.downloadWebSite)
self.Timer.start(50, True)
def downloadWebSite(self):
if config.CommitInfoSetup.commiturl.value == 'Enigma2':
self["Commits"].setText("Enigma2")
url = 'http://github.com/xtrend-boss/stbgui-new/commits/master'
elif config.CommitInfoSetup.commiturl.value == 'XTA':
self["Commits"].setText("XTA")
url = 'http://github.com/xtrend-alliance/xta/commits/master'
elif config.CommitInfoSetup.commiturl.value == 'TechniHD':
self["Commits"].setText("TechniHD")
url = 'http://github.com/xtrend-alliance/TechniHD/commits/master'
download = downloadWithProgress(url, '/tmp/.commits')
download.start().addCallback(self.download_finished).addErrback(self.download_failed)
def download_failed(self, failure_instance=None, error_message=""):
self["AboutScrollLabel"].setText(_("Currently the commit log cannot be retreived - please try later again"))
def download_finished(self, string=""):
commitlog = ""
try:
for x in "".join(open('/tmp/.commits', 'r').read().split('<li class="commit commit-group-item js-navigation-item js-details-container">')[1:]).split('<p class="commit-title js-pjax-commit-title">'):
title = re.findall('class="message" data-pjax="true" title="(.*?)"', x, re.DOTALL)
author = re.findall('rel="author">(.*?)</', x)
date = re.findall('<time class="js-relative-date" datetime=".*?" title="(.*?)">', x)
for t in title:
commitlog += t.strip().replace('&', '&').replace('"', '"').replace('<', '\xc2\xab').replace('>', '\xc2\xbb') + "\n"
for a in author:
commitlog += "Author: " + a.strip().replace('<', '\xc2\xab').replace('>', '\xc2\xbb') + "\n"
for d in date:
commitlog += d.strip() + "\n"
commitlog += 140*'-' + "\n"
except:
commitlog = _("Currently the commit log cannot be retrieved - please try later again")
self["AboutScrollLabel"].setText(commitlog)
def keyMenu(self):
self.session.open(CommitInfoSetup)
def showTranslationInfo(self):
self.session.open(TranslationInfo)
def showAbout(self):
self.session.open(About)
class CommitInfoSetup(Screen, ConfigListScreen):
skin = """
<screen position="c-300,c-250" size="600,200" title="XTA CommitInfoSetup">
<widget name="config" position="25,25" scrollbarMode="showOnDemand" size="550,400" />
<ePixmap pixmap="skin_default/buttons/red.png" position="20,e-45" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/green.png" position="160,e-45" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="20,e-45" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget source="key_green" render="Label" position="160,e-45" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
</screen>"""
def __init__(self, session):
self.skin = CommitInfoSetup.skin
Screen.__init__(self, session)
self['key_red'] = StaticText(_('Cancel'))
self['key_green'] = StaticText(_('OK'))
self['actions'] = ActionMap(['SetupActions', 'ColorActions', 'EPGSelectActions', 'NumberActions'],
{'ok': self.keyGo,
'left': self.keyLeft,
'right': self.keyRight,
'save': self.keyGo,
'cancel': self.keyCancel,
'green': self.keyGo,
'red': self.keyCancel}, -2)
self.list = []
ConfigListScreen.__init__(self, self.list, session=self.session)
self.list.append(getConfigListEntry(_('Select CommitInfo Log'), config.CommitInfoSetup.commiturl))
self['config'].list = self.list
self['config'].l.setList(self.list)
def keyLeft(self):
ConfigListScreen.keyLeft(self)
def keyRight(self):
ConfigListScreen.keyRight(self)
def keyGo(self):
for x in self['config'].list:
x[1].save()
self.close()
def keyCancel(self):
for x in self['config'].list:
x[1].cancel()
self.close()
| gpl-2.0 |
winndows/cinder | cinder/keymgr/key.py | 156 | 2587 | # Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Base Key and SymmetricKey Classes
This module defines the Key and SymmetricKey classes. The Key class is the base
class to represent all encryption keys. The basis for this class was copied
from Java.
"""
import abc
import six
@six.add_metaclass(abc.ABCMeta)
class Key(object):
"""Base class to represent all keys."""
@abc.abstractmethod
def get_algorithm(self):
"""Returns the key's algorithm.
Returns the key's algorithm. For example, "DSA" indicates that this key
is a DSA key and "AES" indicates that this key is an AES key.
"""
pass
@abc.abstractmethod
def get_format(self):
"""Returns the encoding format.
Returns the key's encoding format or None if this key is not encoded.
"""
pass
@abc.abstractmethod
def get_encoded(self):
"""Returns the key in the format specified by its encoding."""
pass
class SymmetricKey(Key):
"""This class represents symmetric keys."""
def __init__(self, alg, key):
"""Create a new SymmetricKey object.
The arguments specify the algorithm for the symmetric encryption and
the bytes for the key.
"""
self.alg = alg
self.key = key
def get_algorithm(self):
"""Returns the algorithm for symmetric encryption."""
return self.alg
def get_format(self):
"""This method returns 'RAW'."""
return "RAW"
def get_encoded(self):
"""Returns the key in its encoded format."""
return self.key
def __eq__(self, other):
if isinstance(other, SymmetricKey):
return (self.alg == other.alg and
self.key == other.key)
return NotImplemented
def __ne__(self, other):
result = self.__eq__(other)
if result is NotImplemented:
return result
return not result
| apache-2.0 |
maftieu/CouchPotatoServer | libs/rsa/__init__.py | 111 | 1568 | # -*- coding: utf-8 -*-
#
# Copyright 2011 Sybren A. Stüvel <sybren@stuvel.eu>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""RSA module
Module for calculating large primes, and RSA encryption, decryption, signing
and verification. Includes generating public and private keys.
WARNING: this implementation does not use random padding, compression of the
cleartext input to prevent repetitions, or other common security improvements.
Use with care.
If you want to have a more secure implementation, use the functions from the
``rsa.pkcs1`` module.
"""
__author__ = "Sybren Stuvel, Barry Mead and Yesudeep Mangalapilly"
__date__ = "2012-06-17"
__version__ = '3.1.1'
from rsa.key import newkeys, PrivateKey, PublicKey
from rsa.pkcs1 import encrypt, decrypt, sign, verify, DecryptionError, \
VerificationError
# Do doctest if we're run directly
if __name__ == "__main__":
import doctest
doctest.testmod()
__all__ = ["newkeys", "encrypt", "decrypt", "sign", "verify", 'PublicKey',
'PrivateKey', 'DecryptionError', 'VerificationError']
| gpl-3.0 |
wrdsb/canvas-lms | vendor/bundle/ruby/1.9.1/gems/pygments.rb-0.5.2/vendor/pygments-main/pygments/styles/tango.py | 363 | 7096 | # -*- coding: utf-8 -*-
"""
pygments.styles.tango
~~~~~~~~~~~~~~~~~~~~~
The Crunchy default Style inspired from the color palette from
the Tango Icon Theme Guidelines.
http://tango.freedesktop.org/Tango_Icon_Theme_Guidelines
Butter: #fce94f #edd400 #c4a000
Orange: #fcaf3e #f57900 #ce5c00
Chocolate: #e9b96e #c17d11 #8f5902
Chameleon: #8ae234 #73d216 #4e9a06
Sky Blue: #729fcf #3465a4 #204a87
Plum: #ad7fa8 #75507b #5c35cc
Scarlet Red:#ef2929 #cc0000 #a40000
Aluminium: #eeeeec #d3d7cf #babdb6
#888a85 #555753 #2e3436
Not all of the above colors are used; other colors added:
very light grey: #f8f8f8 (for background)
This style can be used as a template as it includes all the known
Token types, unlike most (if not all) of the styles included in the
Pygments distribution.
However, since Crunchy is intended to be used by beginners, we have strived
to create a style that gloss over subtle distinctions between different
categories.
Taking Python for example, comments (Comment.*) and docstrings (String.Doc)
have been chosen to have the same style. Similarly, keywords (Keyword.*),
and Operator.Word (and, or, in) have been assigned the same style.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic, Whitespace, Punctuation, Other, Literal
class TangoStyle(Style):
"""
The Crunchy default Style inspired from the color palette from
the Tango Icon Theme Guidelines.
"""
# work in progress...
background_color = "#f8f8f8"
default_style = ""
styles = {
# No corresponding class for the following:
#Text: "", # class: ''
Whitespace: "underline #f8f8f8", # class: 'w'
Error: "#a40000 border:#ef2929", # class: 'err'
Other: "#000000", # class 'x'
Comment: "italic #8f5902", # class: 'c'
Comment.Multiline: "italic #8f5902", # class: 'cm'
Comment.Preproc: "italic #8f5902", # class: 'cp'
Comment.Single: "italic #8f5902", # class: 'c1'
Comment.Special: "italic #8f5902", # class: 'cs'
Keyword: "bold #204a87", # class: 'k'
Keyword.Constant: "bold #204a87", # class: 'kc'
Keyword.Declaration: "bold #204a87", # class: 'kd'
Keyword.Namespace: "bold #204a87", # class: 'kn'
Keyword.Pseudo: "bold #204a87", # class: 'kp'
Keyword.Reserved: "bold #204a87", # class: 'kr'
Keyword.Type: "bold #204a87", # class: 'kt'
Operator: "bold #ce5c00", # class: 'o'
Operator.Word: "bold #204a87", # class: 'ow' - like keywords
Punctuation: "bold #000000", # class: 'p'
# because special names such as Name.Class, Name.Function, etc.
# are not recognized as such later in the parsing, we choose them
# to look the same as ordinary variables.
Name: "#000000", # class: 'n'
Name.Attribute: "#c4a000", # class: 'na' - to be revised
Name.Builtin: "#204a87", # class: 'nb'
Name.Builtin.Pseudo: "#3465a4", # class: 'bp'
Name.Class: "#000000", # class: 'nc' - to be revised
Name.Constant: "#000000", # class: 'no' - to be revised
Name.Decorator: "bold #5c35cc", # class: 'nd' - to be revised
Name.Entity: "#ce5c00", # class: 'ni'
Name.Exception: "bold #cc0000", # class: 'ne'
Name.Function: "#000000", # class: 'nf'
Name.Property: "#000000", # class: 'py'
Name.Label: "#f57900", # class: 'nl'
Name.Namespace: "#000000", # class: 'nn' - to be revised
Name.Other: "#000000", # class: 'nx'
Name.Tag: "bold #204a87", # class: 'nt' - like a keyword
Name.Variable: "#000000", # class: 'nv' - to be revised
Name.Variable.Class: "#000000", # class: 'vc' - to be revised
Name.Variable.Global: "#000000", # class: 'vg' - to be revised
Name.Variable.Instance: "#000000", # class: 'vi' - to be revised
# since the tango light blue does not show up well in text, we choose
# a pure blue instead.
Number: "bold #0000cf", # class: 'm'
Number.Float: "bold #0000cf", # class: 'mf'
Number.Hex: "bold #0000cf", # class: 'mh'
Number.Integer: "bold #0000cf", # class: 'mi'
Number.Integer.Long: "bold #0000cf", # class: 'il'
Number.Oct: "bold #0000cf", # class: 'mo'
Literal: "#000000", # class: 'l'
Literal.Date: "#000000", # class: 'ld'
String: "#4e9a06", # class: 's'
String.Backtick: "#4e9a06", # class: 'sb'
String.Char: "#4e9a06", # class: 'sc'
String.Doc: "italic #8f5902", # class: 'sd' - like a comment
String.Double: "#4e9a06", # class: 's2'
String.Escape: "#4e9a06", # class: 'se'
String.Heredoc: "#4e9a06", # class: 'sh'
String.Interpol: "#4e9a06", # class: 'si'
String.Other: "#4e9a06", # class: 'sx'
String.Regex: "#4e9a06", # class: 'sr'
String.Single: "#4e9a06", # class: 's1'
String.Symbol: "#4e9a06", # class: 'ss'
Generic: "#000000", # class: 'g'
Generic.Deleted: "#a40000", # class: 'gd'
Generic.Emph: "italic #000000", # class: 'ge'
Generic.Error: "#ef2929", # class: 'gr'
Generic.Heading: "bold #000080", # class: 'gh'
Generic.Inserted: "#00A000", # class: 'gi'
Generic.Output: "italic #000000", # class: 'go'
Generic.Prompt: "#8f5902", # class: 'gp'
Generic.Strong: "bold #000000", # class: 'gs'
Generic.Subheading: "bold #800080", # class: 'gu'
Generic.Traceback: "bold #a40000", # class: 'gt'
}
| agpl-3.0 |
ggirelli/gpseq-img-py | pygpseq/anim/series.py | 1 | 12252 | # -*- coding: utf-8 -*-
'''
@author: Gabriele Girelli
@contact: gigi.ga90@gmail.com
@description: contains Series wrapper, which in turn contains Nucleus.
'''
# DEPENDENCIES =================================================================
import math
import os
import matplotlib.pyplot as plt
import numpy as np
from skimage.measure import label
from pygpseq import const
from pygpseq.tools.binarize import Binarize
from pygpseq.tools import io as iot
from pygpseq.tools import image as imt
from pygpseq.tools import plot
from pygpseq.tools import stat as stt
from pygpseq.tools import string as st
from pygpseq.tools import vector as vt
from pygpseq.anim.nucleus import Nucleus
# CLASSES ======================================================================
class Series(iot.IOinterface):
"""Series (Field of View, i.e., two-channel image) wrapper.
Attributes:
__version__ (string): package version.
n (int): series id (1-indexed).
name (string): series name.
nuclei (list[pygpseq.wraps.Nuclei]): nuclei list.
basedir (string): series folder path.
dna_bg (float): estimated dna channel background.
sig_bg (float): estimated signal channel background.
flist (list): series file info.
"""
__version__ = const.VERSION
n = 0
name = ''
nuclei = []
basedir = '.'
dna_bg = None
sig_bg = None
filist = []
def __init__(self, ds, condition = None, **kwargs):
"""Run IOinterface __init__ method.
Args:
ds (dict): series information list.
condition (pyGPSeq.wraps.Condition): condition wrapper (opt).
"""
# If required, inherit from `condition` wrap
if None != condition:
logpath = condition.logpath
super(Series, self).__init__(path = logpath, append = True)
self.basedir = condition.path
else:
super(Series, self).__init__()
# Save input parameters
self.name = ds[0]
self.filist = ds[1]
self.n = ds[2]
def __getitem__(self, key):
""" Allow get item. """
if key in dir(self):
return(getattr(self, key))
else:
return(None)
def __setitem__(self, key, value):
""" Allow set item. """
if key in dir(self):
self.__setattr__(key, value)
def adjust_options(self, read_only_dna = None, log = None, **kwargs):
"""Adjust options to be passed to the Nucleus class.
Args:
dna_names (tuple[string]): dna channel names.
sig_names (tuple[string]): signal channel names.
an_type (pyGPSeq.const): analysis type.
Returns:
dict: adds the following kwargs:
series_name (string): series wrap name.
basedir (string): series wrap base directory.
dna_ch (numpy.array): image (dimensionality based on an_type).
sig_ch (numpy.array): image (dimensionality based on an_type).
"""
# Start log
if None == log: log = ''
# Only work on dna channel
if None == read_only_dna:
read_only_dna = False
# Add necessary options
kwargs['series_name'] = self.name
kwargs['basedir'] = self.basedir
# Read DNA channel
kwargs['dna_ch'], log = self.get_channel(kwargs['dna_names'],
log, **kwargs)
if not read_only_dna:
kwargs['sig_ch'], log = self.get_channel(kwargs['sig_names'],
log, **kwargs)
# Output
return((kwargs, log))
def export_nuclei(self, **kwargs):
"""Export current series nuclei. """
# Set output suffix
if not 'suffix' in kwargs.keys():
suffix = ''
else:
suffix = st.add_leading_dot(kwargs['suffix'])
# Add necessary options
self.printout('Current series: "' + self.name + '"...', 1)
kwargs, log = self.adjust_options(**kwargs)
# Export nuclei
[n.export(**kwargs) for n in self.nuclei]
# Produce log
log = np.zeros(len(self.nuclei), dtype = const.DTYPE_NUCLEAR_SUMMARY)
for l in [n.get_summary(**kwargs) for n in self.nuclei]:
# Append nuclear data to the series log
summary = [self.n]
summary.extend(l)
log[i, :] = summary
# Export series log
np.savetxt(kwargs['out_dir'] + self.name + '.summary' + suffix + '.csv',
log, delimiter = ',', comments = '',
header = ",".join([h for h in log.dtype.names]))
return(log)
def find_channel(self, channel_names):
"""Return the first channel to correspond to channel_names. """
# Fix the param type
if type(str()) == type(channel_names):
channel_names = [channel_names]
# Cycle through the available channels
for cname in channel_names:
# Identify the requested channel
idx = self.find_channel_id(cname)
# Return the channel
if -1 != idx:
return([i for i in self.filist.items()][idx])
# Return empty dictionary if no matching channel is found
return({})
def find_channel_id(self, channel_name):
"""Return the id of the channel file with the specified name. """
# Retrieve available channel names
names = self.get_channel_names()
if 0 != names.count(channel_name):
# Return matching channel id
return(names.index(channel_name))
else:
# Return -1 if no matching channel is found
return(-1)
def find_nuclei(self, **kwargs):
"""Segment current series.
Args:
**kwargs
dna_names (tuple[string]): dna channel names.
cond_name (string): condition wrapper name.
seg_type (pyGPSeq.const): segmentation type.
rm_z_tips (bool): remove nuclei touching the tips of the stack.
radius_interval (tuple[float]): allowed nuclear radius interval.
offset (tuple[int]): dimensions box/square offset.
aspect (tuple[float]): pixel/voxel dimension proportion.
Returns:
tuple: series current instance and log string.
"""
# Set output suffix
if not 'suffix' in kwargs.keys():
suffix = ''
else:
suffix = st.add_leading_dot(kwargs['suffix'])
# Check plotting
if not 'plotting' in kwargs.keys():
kwargs['plotting'] = True
log = ""
log += self.printout('Current series: "' + self.name + '"...', 1)
# Read images
kwargs, alog = self.adjust_options(read_only_dna = False, **kwargs)
log += alog
# Extract from kwargs
seg_type = kwargs['seg_type']
dna_ch = kwargs['dna_ch']
sig_ch = kwargs['sig_ch']
# Make new channel copy
i = dna_ch.copy()
# Produce a mask
bi = Binarize(path = kwargs['logpath'], append = True, **kwargs)
bi.verbose = self.verbose
mask, thr, tmp_log = bi.run(i)
log += tmp_log
# Estimate background
if None == self.dna_bg:
self.dna_bg = imt.estimate_background(dna_ch, mask, seg_type)
kwargs['dna_bg'] = self.dna_bg
if None == self.sig_bg:
self.sig_bg = imt.estimate_background(sig_ch, mask, seg_type)
kwargs['sig_bg'] = self.sig_bg
log += self.printout('Estimating background:', 2)
log += self.printout('DNA channel: ' + str(kwargs['dna_bg']), 3)
log += self.printout('Signal channel: ' + str(kwargs['sig_bg']), 3)
# Filter object size
mask, tmp_log = bi.filter_obj_XY_size(mask)
log += tmp_log
mask, tmp_log = bi.filter_obj_Z_size(mask)
log += tmp_log
# Save mask
log += self.printout('Saving series object mask...', 2)
L = label(mask)
# Plot
fig = plt.figure()
if 3 == len(mask.shape):
plt.imshow(L.max(0).astype('u4'))
else:
plt.imshow(L.astype('u4'))
plt.gca().get_xaxis().set_visible(False)
plt.gca().get_yaxis().set_visible(False)
plot.set_font_size(kwargs['font_size'])
title = 'Nuclei in "' + kwargs['cond_name'] + '", ' + str(self.name)
title += ' [' + str(L.max()) + ' objects]'
plt.title(title)
# Export as png
fname = kwargs['out_dir'] + const.OUTDIR_MASK + kwargs['cond_name']
fname += '.' + self.name + '.mask' + suffix + '.png'
if kwargs['plotting']: plot.export(fname, 'png')
# Close plot figure
plt.close(fig)
# Initialize nuclei
log += self.printout('Bounding ' + str(L.max()) + ' nuclei...', 2)
kwargs['logpath'] = self.logpath
kwargs['i'] = i
kwargs['thr'] = thr
kwargs['series_id'] = self.n
seq = range(1, L.max() + 1)
self.nuclei = [Nucleus(n = n, mask = L == n, **kwargs) for n in seq]
return((self, log))
def get_c(self):
"""Return number of channels in the series. """
return(len(self.filist))
def get_channel(self, ch_name, log = None, **kwargs):
"""Read the series specified channel.
Args:
ch_name (string): channel name.
log (string): log string.
**kwargs
Returns:
tuple: channel image and log string.
"""
# Start log (used when verbosity is off)
if None == log: log = ""
log += self.printout('Reading channel "' + str(ch_name) + '"...', 2)
# Read channel
f = self.find_channel(ch_name)
imch = imt.read_tiff(os.path.join(self.basedir, f[0]))
imch = imt.slice_k_d_img(imch, 3)
# Deconvolved images correction
if 'rescale_deconvolved' in kwargs.keys():
if kwargs['rescale_deconvolved']:
# Get DNA scaling factor and rescale
sf = imt.get_rescaling_factor(f, **kwargs)
imch = (imch / sf).astype('float')
msg = 'Rescaling "' + f[0] + '" [' + str(sf) + ']...'
log += self.printout(msg, 3)
# Make Z-projection
if kwargs['an_type'] in [const.AN_SUM_PROJ, const.AN_MAX_PROJ]:
msg = 'Generating Z-projection [' + str(kwargs['an_type']) + ']...'
log += self.printout(msg, 3)
if 2 != len(imch.shape):
imch = imt.mk_z_projection(imch, kwargs['an_type'])
# Prepare output
return((imch, log))
def get_channel_names(self, channel_field = None):
"""Return the names of the channels in the series. """
if None == channel_field:
channel_field = const.REG_CHANNEL_NAME
return([c[channel_field] for c in self.filist.values()])
def get_nuclei_data(self, nuclei_ids, **kwargs):
"""Retrieve a single nucleus from the current series. """
# Read channel images
kwargs, log = self.adjust_options(**kwargs)
# Re-build mask
bi = Binarize(path = self.logpath, append = True, **kwargs)
bi.verbose = self.verbose
mask, thr, tmp_log = bi.run(kwargs['dna_ch'].copy())
log += tmp_log
# Empty nuclear data array
data = []
for nucleus_id in nuclei_ids:
# Select nucleus
n = self.nuclei[nucleus_id -1]
# Setup nucleus instance verbosity
if not self.verbose:
n.verbose = False
# Retrieve nuclear data
ndata, nlog = n.get_data(mask = mask, **kwargs)
# Update log and save nuclear data
log += nlog
data.append(ndata)
return((data, log))
def propagate_attr(self, key):
"""Propagate attribute current value to every nucleus. """
for i in range(len(self.nuclei)):
self.nuclei[i][key] = self[key]
# END ==========================================================================
################################################################################
| mit |
SlimRemix/android_external_chromium_org | third_party/protobuf/python/google/protobuf/internal/enum_type_wrapper.py | 292 | 3541 | # Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""A simple wrapper around enum types to expose utility functions.
Instances are created as properties with the same name as the enum they wrap
on proto classes. For usage, see:
reflection_test.py
"""
__author__ = 'rabsatt@google.com (Kevin Rabsatt)'
class EnumTypeWrapper(object):
"""A utility for finding the names of enum values."""
DESCRIPTOR = None
def __init__(self, enum_type):
"""Inits EnumTypeWrapper with an EnumDescriptor."""
self._enum_type = enum_type
self.DESCRIPTOR = enum_type;
def Name(self, number):
"""Returns a string containing the name of an enum value."""
if number in self._enum_type.values_by_number:
return self._enum_type.values_by_number[number].name
raise ValueError('Enum %s has no name defined for value %d' % (
self._enum_type.name, number))
def Value(self, name):
"""Returns the value coresponding to the given enum name."""
if name in self._enum_type.values_by_name:
return self._enum_type.values_by_name[name].number
raise ValueError('Enum %s has no value defined for name %s' % (
self._enum_type.name, name))
def keys(self):
"""Return a list of the string names in the enum.
These are returned in the order they were defined in the .proto file.
"""
return [value_descriptor.name
for value_descriptor in self._enum_type.values]
def values(self):
"""Return a list of the integer values in the enum.
These are returned in the order they were defined in the .proto file.
"""
return [value_descriptor.number
for value_descriptor in self._enum_type.values]
def items(self):
"""Return a list of the (name, value) pairs of the enum.
These are returned in the order they were defined in the .proto file.
"""
return [(value_descriptor.name, value_descriptor.number)
for value_descriptor in self._enum_type.values]
| bsd-3-clause |
caphrim007/ansible | lib/ansible/modules/cloud/amazon/rds_instance.py | 12 | 50632 | #!/usr/bin/python
# Copyright (c) 2018 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: rds_instance
version_added: "2.7"
short_description: Manage RDS instances
description:
- Create, modify, and delete RDS instances.
requirements:
- botocore
- boto3 >= 1.5.0
extends_documentation_fragment:
- aws
- ec2
author:
- Sloane Hertel (@s-hertel)
options:
# General module options
state:
description:
- Whether the snapshot should exist or not. I(rebooted) is not idempotent and will leave the DB instance in a running state
and start it prior to rebooting if it was stopped. I(present) will leave the DB instance in the current running/stopped state,
(running if creating the DB instance).
- I(state=running) and I(state=started) are synonyms, as are I(state=rebooted) and I(state=restarted). Note - rebooting the instance
is not idempotent.
choices: ['present', 'absent', 'terminated', 'running', 'started', 'stopped', 'rebooted', 'restarted']
default: 'present'
creation_source:
description: Which source to use if restoring from a template (an existing instance, S3 bucket, or snapshot).
choices: ['snapshot', 's3', 'instance']
force_update_password:
description:
- Set to True to update your cluster password with I(master_user_password). Since comparing passwords to determine
if it needs to be updated is not possible this is set to False by default to allow idempotence.
type: bool
default: False
purge_cloudwatch_logs_exports:
description: Set to False to retain any enabled cloudwatch logs that aren't specified in the task and are associated with the instance.
type: bool
default: True
purge_tags:
description: Set to False to retain any tags that aren't specified in task and are associated with the instance.
type: bool
default: True
read_replica:
description:
- Set to False to promote a read replica cluster or true to create one. When creating a read replica C(creation_source) should
be set to 'instance' or not provided. C(source_db_instance_identifier) must be provided with this option.
type: bool
wait:
description:
- Whether to wait for the cluster to be available, stopped, or deleted. At a later time a wait_timeout option may be added.
Following each API call to create/modify/delete the instance a waiter is used with a 60 second delay 30 times until the instance reaches
the expected state (available/stopped/deleted). The total task time may also be influenced by AWSRetry which helps stabilize if the
instance is in an invalid state to operate on to begin with (such as if you try to stop it when it is in the process of rebooting).
If setting this to False task retries and delays may make your playbook execution better handle timeouts for major modifications.
type: bool
default: True
# Options that have a corresponding boto3 parameter
allocated_storage:
description:
- The amount of storage (in gibibytes) to allocate for the DB instance.
allow_major_version_upgrade:
description:
- Whether to allow major version upgrades.
type: bool
apply_immediately:
description:
- A value that specifies whether modifying a cluster with I(new_db_instance_identifier) and I(master_user_password)
should be applied as soon as possible, regardless of the I(preferred_maintenance_window) setting. If false, changes
are applied during the next maintenance window.
type: bool
default: False
auto_minor_version_upgrade:
description:
- Whether minor version upgrades are applied automatically to the DB instance during the maintenance window.
type: bool
availability_zone:
description:
- A list of EC2 Availability Zones that instances in the DB cluster can be created in.
May be used when creating a cluster or when restoring from S3 or a snapshot. Mutually exclusive with I(multi_az).
aliases:
- az
- zone
backup_retention_period:
description:
- The number of days for which automated backups are retained (must be greater or equal to 1).
May be used when creating a new cluster, when restoring from S3, or when modifying a cluster.
ca_certificate_identifier:
description:
- The identifier of the CA certificate for the DB instance.
character_set_name:
description:
- The character set to associate with the DB cluster.
copy_tags_to_snapshot:
description:
- Whether or not to copy all tags from the DB instance to snapshots of the instance. When initially creating
a DB instance the RDS API defaults this to false if unspecified.
type: bool
db_cluster_identifier:
description:
- The DB cluster (lowercase) identifier to add the aurora DB instance to. The identifier must contain from 1 to
63 letters, numbers, or hyphens and the first character must be a letter and may not end in a hyphen or
contain consecutive hyphens.
aliases:
- cluster_id
db_instance_class:
description:
- The compute and memory capacity of the DB instance, for example db.t2.micro.
aliases:
- class
- instance_type
db_instance_identifier:
description:
- The DB instance (lowercase) identifier. The identifier must contain from 1 to 63 letters, numbers, or
hyphens and the first character must be a letter and may not end in a hyphen or contain consecutive hyphens.
aliases:
- instance_id
- id
required: True
db_name:
description:
- The name for your database. If a name is not provided Amazon RDS will not create a database.
db_parameter_group_name:
description:
- The name of the DB parameter group to associate with this DB instance. When creating the DB instance if this
argument is omitted the default DBParameterGroup for the specified engine is used.
db_security_groups:
description:
- (EC2-Classic platform) A list of DB security groups to associate with this DB instance.
type: list
db_snapshot_identifier:
description:
- The identifier for the DB snapshot to restore from if using I(creation_source=snapshot).
db_subnet_group_name:
description:
- The DB subnet group name to use for the DB instance.
aliases:
- subnet_group
domain:
description:
- The Active Directory Domain to restore the instance in.
domain_iam_role_name:
description:
- The name of the IAM role to be used when making API calls to the Directory Service.
enable_cloudwatch_logs_exports:
description:
- A list of log types that need to be enabled for exporting to CloudWatch Logs.
aliases:
- cloudwatch_log_exports
type: list
enable_iam_database_authentication:
description:
- Enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts.
If this option is omitted when creating the cluster, Amazon RDS sets this to False.
type: bool
enable_performance_insights:
description:
- Whether to enable Performance Insights for the DB instance.
type: bool
engine:
description:
- The name of the database engine to be used for this DB instance. This is required to create an instance.
Valid choices are aurora | aurora-mysql | aurora-postgresql | mariadb | mysql | oracle-ee | oracle-se |
oracle-se1 | oracle-se2 | postgres | sqlserver-ee | sqlserver-ex | sqlserver-se | sqlserver-web
engine_version:
description:
- The version number of the database engine to use. For Aurora MySQL that could be 5.6.10a , 5.7.12.
Aurora PostgreSQL example, 9.6.3
final_db_snapshot_identifier:
description:
- The DB instance snapshot identifier of the new DB instance snapshot created when I(skip_final_snapshot) is false.
aliases:
- final_snapshot_identifier
force_failover:
description:
- Set to true to conduct the reboot through a MultiAZ failover.
type: bool
iops:
description:
- The Provisioned IOPS (I/O operations per second) value.
kms_key_id:
description:
- The ARN of the AWS KMS key identifier for an encrypted DB instance. If you are creating a DB instance with the
same AWS account that owns the KMS encryption key used to encrypt the new DB instance, then you can use the KMS key
alias instead of the ARN for the KM encryption key.
- If I(storage_encrypted) is true and and this option is not provided, the default encryption key is used.
license_model:
description:
- The license model for the DB instance.
choices:
- license-included
- bring-your-own-license
- general-public-license
master_user_password:
description:
- An 8-41 character password for the master database user. The password can contain any printable ASCII character
except "/", """, or "@". To modify the password use I(force_password_update). Use I(apply immediately) to change
the password immediately, otherwise it is updated during the next maintenance window.
aliases:
- password
master_username:
description:
- The name of the master user for the DB cluster. Must be 1-16 letters or numbers and begin with a letter.
aliases:
- username
monitoring_interval:
description:
- The interval, in seconds, when Enhanced Monitoring metrics are collected for the DB instance. To disable collecting
metrics, specify 0. Amazon RDS defaults this to 0 if omitted when initially creating a DB instance.
monitoring_role_arn:
description:
- The ARN for the IAM role that permits RDS to send enhanced monitoring metrics to Amazon CloudWatch Logs.
multi_az:
description:
- Specifies if the DB instance is a Multi-AZ deployment. Mutually exclusive with I(availability_zone).
type: bool
new_db_instance_identifier:
description:
- The new DB cluster (lowercase) identifier for the DB cluster when renaming a DB instance. The identifier must contain
from 1 to 63 letters, numbers, or hyphens and the first character must be a letter and may not end in a hyphen or
contain consecutive hyphens. Use I(apply_immediately) to rename immediately, otherwise it is updated during the
next maintenance window.
aliases:
- new_instance_id
- new_id
option_group_name:
description:
- The option group to associate with the DB instance.
performance_insights_kms_key_id:
description:
- The AWS KMS key identifier (ARN, name, or alias) for encryption of Performance Insights data.
performance_insights_retention_period:
description:
- The amount of time, in days, to retain Performance Insights data. Valid values are 7 or 731.
port:
description:
- The port number on which the instances accept connections.
preferred_backup_window:
description:
- The daily time range (in UTC) of at least 30 minutes, during which automated backups are created if automated backups are
enabled using I(backup_retention_period). The option must be in the format of "hh24:mi-hh24:mi" and not conflict with
I(preferred_maintenance_window).
aliases:
- backup_window
preferred_maintenance_window:
description:
- The weekly time range (in UTC) of at least 30 minutes, during which system maintenance can occur. The option must
be in the format "ddd:hh24:mi-ddd:hh24:mi" where ddd is one of Mon, Tue, Wed, Thu, Fri, Sat, Sun.
aliases:
- maintenance_window
processor_features:
description:
- A dictionary of Name, Value pairs to indicate the number of CPU cores and the number of threads per core for the
DB instance class of the DB instance. Names are threadsPerCore and coreCount.
Set this option to an empty dictionary to use the default processor features.
suboptions:
threadsPerCore:
description: The number of threads per core
coreCount:
description: The number of CPU cores
promotion_tier:
description:
- An integer that specifies the order in which an Aurora Replica is promoted to the primary instance after a failure of
the existing primary instance.
publicly_accessible:
description:
- Specifies the accessibility options for the DB instance. A value of true specifies an Internet-facing instance with
a publicly resolvable DNS name, which resolves to a public IP address. A value of false specifies an internal
instance with a DNS name that resolves to a private IP address.
type: bool
restore_time:
description:
- If using I(creation_source=instance) this indicates the UTC date and time to restore from the source instance.
For example, "2009-09-07T23:45:00Z". May alternatively set c(use_latest_restore_time) to True.
s3_bucket_name:
description:
- The name of the Amazon S3 bucket that contains the data used to create the Amazon DB instance.
s3_ingestion_role_arn:
description:
- The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that authorizes Amazon RDS to access
the Amazon S3 bucket on your behalf.
s3_prefix:
description:
- The prefix for all of the file names that contain the data used to create the Amazon DB instance. If you do not
specify a SourceS3Prefix value, then the Amazon DB instance is created by using all of the files in the Amazon S3 bucket.
skip_final_snapshot:
description:
- Whether a final DB cluster snapshot is created before the DB cluster is deleted. If this is false I(final_db_snapshot_identifier)
must be provided.
type: bool
default: false
snapshot_identifier:
description:
- The ARN of the DB snapshot to restore from when using I(creation_source=snapshot).
source_db_instance_identifier:
description:
- The identifier or ARN of the source DB instance from which to restore when creating a read replica or spinning up a point-in-time
DB instance using I(creation_source=instance). If the source DB is not in the same region this should be an ARN.
source_engine:
description:
- The identifier for the database engine that was backed up to create the files stored in the Amazon S3 bucket.
choices:
- mysql
source_engine_version:
description:
- The version of the database that the backup files were created from.
source_region:
description:
- The region of the DB instance from which the replica is created.
storage_encrypted:
description:
- Whether the DB instance is encrypted.
type: bool
storage_type:
description:
- The storage type to be associated with the DB instance. I(storage_type) does not apply to Aurora DB instances.
choices:
- standard
- gp2
- io1
tags:
description:
- A dictionary of key value pairs to assign the DB cluster.
tde_credential_arn:
description:
- The ARN from the key store with which to associate the instance for Transparent Data Encryption. This is
supported by Oracle or SQL Server DB instances and may be used in conjunction with C(storage_encrypted)
though it might slightly affect the performance of your database.
aliases:
- transparent_data_encryption_arn
tde_credential_password:
description:
- The password for the given ARN from the key store in order to access the device.
aliases:
- transparent_data_encryption_password
timezone:
description:
- The time zone of the DB instance.
use_latest_restorable_time:
description:
- Whether to restore the DB instance to the latest restorable backup time. Only one of I(use_latest_restorable_time)
and I(restore_to_time) may be provided.
type: bool
aliases:
- restore_from_latest
vpc_security_group_ids:
description:
- A list of EC2 VPC security groups to associate with the DB cluster.
type: list
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
- name: create minimal aurora instance in default VPC and default subnet group
rds_instance:
engine: aurora
db_instance_identifier: ansible-test-aurora-db-instance
instance_type: db.t2.small
password: "{{ password }}"
username: "{{ username }}"
cluster_id: ansible-test-cluster # This cluster must exist - see rds_cluster to manage it
- name: Create a DB instance using the default AWS KMS encryption key
rds_instance:
id: test-encrypted-db
state: present
engine: mariadb
storage_encrypted: True
db_instance_class: db.t2.medium
username: "{{ username }}"
password: "{{ password }}"
allocated_storage: "{{ allocated_storage }}"
- name: remove the DB instance without a final snapshot
rds_instance:
id: "{{ instance_id }}"
state: absent
skip_final_snapshot: True
- name: remove the DB instance with a final snapshot
rds_instance:
id: "{{ instance_id }}"
state: absent
final_snapshot_identifier: "{{ snapshot_id }}"
'''
RETURN = '''
allocated_storage:
description: The allocated storage size in gibibytes. This is always 1 for aurora database engines.
returned: always
type: int
sample: 20
auto_minor_version_upgrade:
description: Whether minor engine upgrades are applied automatically to the DB instance during the maintenance window.
returned: always
type: bool
sample: true
availability_zone:
description: The availability zone for the DB instance.
returned: always
type: string
sample: us-east-1f
backup_retention_period:
description: The number of days for which automated backups are retained.
returned: always
type: int
sample: 1
ca_certificate_identifier:
description: The identifier of the CA certificate for the DB instance.
returned: always
type: string
sample: rds-ca-2015
copy_tags_to_snapshot:
description: Whether tags are copied from the DB instance to snapshots of the DB instance.
returned: always
type: bool
sample: false
db_instance_arn:
description: The Amazon Resource Name (ARN) for the DB instance.
returned: always
type: string
sample: arn:aws:rds:us-east-1:123456789012:db:ansible-test
db_instance_class:
description: The name of the compute and memory capacity class of the DB instance.
returned: always
type: string
sample: db.m4.large
db_instance_identifier:
description: The identifier of the DB instance
returned: always
type: string
sample: ansible-test
db_instance_port:
description: The port that the DB instance listens on.
returned: always
type: int
sample: 0
db_instance_status:
description: The current state of this database.
returned: always
type: string
sample: stopped
db_parameter_groups:
description: The list of DB parameter groups applied to this DB instance.
returned: always
type: complex
contains:
db_parameter_group_name:
description: The name of the DP parameter group.
returned: always
type: string
sample: default.mariadb10.0
parameter_apply_status:
description: The status of parameter updates.
returned: always
type: string
sample: in-sync
db_security_groups:
description: A list of DB security groups associated with this DB instance.
returned: always
type: list
sample: []
db_subnet_group:
description: The subnet group associated with the DB instance.
returned: always
type: complex
contains:
db_subnet_group_description:
description: The description of the DB subnet group.
returned: always
type: string
sample: default
db_subnet_group_name:
description: The name of the DB subnet group.
returned: always
type: string
sample: default
subnet_group_status:
description: The status of the DB subnet group.
returned: always
type: string
sample: Complete
subnets:
description: A list of Subnet elements.
returned: always
type: complex
contains:
subnet_availability_zone:
description: The availability zone of the subnet.
returned: always
type: complex
contains:
name:
description: The name of the Availability Zone.
returned: always
type: string
sample: us-east-1c
subnet_identifier:
description: The ID of the subnet.
returned: always
type: string
sample: subnet-12345678
subnet_status:
description: The status of the subnet.
returned: always
type: string
sample: Active
vpc_id:
description: The VpcId of the DB subnet group.
returned: always
type: string
sample: vpc-12345678
dbi_resource_id:
description: The AWS Region-unique, immutable identifier for the DB instance.
returned: always
type: string
sample: db-UHV3QRNWX4KB6GALCIGRML6QFA
domain_memberships:
description: The Active Directory Domain membership records associated with the DB instance.
returned: always
type: list
sample: []
endpoint:
description: The connection endpoint.
returned: always
type: complex
contains:
address:
description: The DNS address of the DB instance.
returned: always
type: string
sample: ansible-test.cvlrtwiennww.us-east-1.rds.amazonaws.com
hosted_zone_id:
description: The ID that Amazon Route 53 assigns when you create a hosted zone.
returned: always
type: string
sample: ZTR2ITUGPA61AM
port:
description: The port that the database engine is listening on.
returned: always
type: int
sample: 3306
engine:
description: The database engine version.
returned: always
type: string
sample: mariadb
engine_version:
description: The database engine version.
returned: always
type: string
sample: 10.0.35
iam_database_authentication_enabled:
description: Whether mapping of AWS Identity and Access Management (IAM) accounts to database accounts is enabled.
returned: always
type: bool
sample: false
instance_create_time:
description: The date and time the DB instance was created.
returned: always
type: string
sample: '2018-07-04T16:48:35.332000+00:00'
kms_key_id:
description: The AWS KMS key identifier for the encrypted DB instance when storage_encrypted is true.
returned: When storage_encrypted is true
type: string
sample: arn:aws:kms:us-east-1:123456789012:key/70c45553-ad2e-4a85-9f14-cfeb47555c33
latest_restorable_time:
description: The latest time to which a database can be restored with point-in-time restore.
returned: always
type: string
sample: '2018-07-04T16:50:50.642000+00:00'
license_model:
description: The License model information for this DB instance.
returned: always
type: string
sample: general-public-license
master_username:
description: The master username for the DB instance.
returned: always
type: string
sample: test
monitoring_interval:
description:
- The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB instance.
0 means collecting Enhanced Monitoring metrics is disabled.
returned: always
type: int
sample: 0
multi_az:
description: Whether the DB instance is a Multi-AZ deployment.
returned: always
type: bool
sample: false
option_group_memberships:
description: The list of option group memberships for this DB instance.
returned: always
type: complex
contains:
option_group_name:
description: The name of the option group that the instance belongs to.
returned: always
type: string
sample: default:mariadb-10-0
status:
description: The status of the DB instance's option group membership.
returned: always
type: string
sample: in-sync
pending_modified_values:
description: The changes to the DB instance that are pending.
returned: always
type: complex
contains: {}
performance_insights_enabled:
description: True if Performance Insights is enabled for the DB instance, and otherwise false.
returned: always
type: bool
sample: false
preferred_backup_window:
description: The daily time range during which automated backups are created if automated backups are enabled.
returned: always
type: string
sample: 07:01-07:31
preferred_maintenance_window:
description: The weekly time range (in UTC) during which system maintenance can occur.
returned: always
type: string
sample: sun:09:31-sun:10:01
publicly_accessible:
description:
- True for an Internet-facing instance with a publicly resolvable DNS name, False to indicate an
internal instance with a DNS name that resolves to a private IP address.
returned: always
type: bool
sample: true
read_replica_db_instance_identifiers:
description: Identifiers of the Read Replicas associated with this DB instance.
returned: always
type: list
sample: []
storage_encrypted:
description: Whether the DB instance is encrypted.
returned: always
type: bool
sample: false
storage_type:
description: The storage type to be associated with the DB instance.
returned: always
type: string
sample: standard
tags:
description: A dictionary of tags associated with the DB instance.
returned: always
type: complex
contains: {}
vpc_security_groups:
description: A list of VPC security group elements that the DB instance belongs to.
returned: always
type: complex
contains:
status:
description: The status of the VPC security group.
returned: always
type: string
sample: active
vpc_security_group_id:
description: The name of the VPC security group.
returned: always
type: string
sample: sg-12345678
'''
from ansible.module_utils._text import to_text
from ansible.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code, get_boto3_client_method_parameters
from ansible.module_utils.aws.rds import ensure_tags, arg_spec_to_rds_params, call_method, get_rds_method_attribute, get_tags, get_final_identifier
from ansible.module_utils.aws.waiters import get_waiter
from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
from ansible.module_utils.ec2 import ansible_dict_to_boto3_tag_list, AWSRetry
from ansible.module_utils.six import string_types
from time import sleep
try:
from botocore.exceptions import ClientError, BotoCoreError, WaiterError
except ImportError:
pass # caught by AnsibleAWSModule
def get_rds_method_attribute_name(instance, state, creation_source, read_replica):
method_name = None
if state == 'absent' or state == 'terminated':
if instance and instance['DBInstanceStatus'] not in ['deleting', 'deleted']:
method_name = 'delete_db_instance'
else:
if instance:
method_name = 'modify_db_instance'
elif read_replica is True:
method_name = 'create_db_instance_read_replica'
elif creation_source == 'snapshot':
method_name = 'restore_db_instance_from_db_snapshot'
elif creation_source == 's3':
method_name = 'restore_db_instance_from_s3'
elif creation_source == 'instance':
method_name = 'restore_db_instance_to_point_in_time'
else:
method_name = 'create_db_instance'
return method_name
def get_instance(client, module, db_instance_id):
try:
for i in range(3):
try:
instance = client.describe_db_instances(DBInstanceIdentifier=db_instance_id)['DBInstances'][0]
instance['Tags'] = get_tags(client, module, instance['DBInstanceArn'])
if instance.get('ProcessorFeatures'):
instance['ProcessorFeatures'] = dict((feature['Name'], feature['Value']) for feature in instance['ProcessorFeatures'])
if instance.get('PendingModifiedValues', {}).get('ProcessorFeatures'):
instance['PendingModifiedValues']['ProcessorFeatures'] = dict(
(feature['Name'], feature['Value'])
for feature in instance['PendingModifiedValues']['ProcessorFeatures']
)
break
except is_boto3_error_code('DBInstanceNotFound'):
sleep(3)
else:
instance = {}
except (BotoCoreError, ClientError) as e: # pylint: disable=duplicate-except
module.fail_json_aws(e, msg='Failed to describe DB instances')
return instance
def get_final_snapshot(client, module, snapshot_identifier):
try:
snapshots = AWSRetry.jittered_backoff()(client.describe_db_snapshots)(DBSnapshotIdentifier=snapshot_identifier)
if len(snapshots.get('DBSnapshots', [])) == 1:
return snapshots['DBSnapshots'][0]
return {}
except is_boto3_error_code('DBSnapshotNotFound') as e: # May not be using wait: True
return {}
except (BotoCoreError, ClientError) as e: # pylint: disable=duplicate-except
module.fail_json_aws(e, msg='Failed to retrieve information about the final snapshot')
def get_parameters(client, module, parameters, method_name):
required_options = get_boto3_client_method_parameters(client, method_name, required=True)
if any([parameters.get(k) is None for k in required_options]):
module.fail_json(msg='To {0} requires the parameters: {1}'.format(
get_rds_method_attribute(method_name, module).operation_description, required_options))
options = get_boto3_client_method_parameters(client, method_name)
parameters = dict((k, v) for k, v in parameters.items() if k in options and v is not None)
if parameters.get('ProcessorFeatures') is not None:
parameters['ProcessorFeatures'] = [{'Name': k, 'Value': to_text(v)} for k, v in parameters['ProcessorFeatures'].items()]
# If this parameter is an empty list it can only be used with modify_db_instance (as the parameter UseDefaultProcessorFeatures)
if parameters.get('ProcessorFeatures') == [] and not method_name == 'modify_db_instance':
parameters.pop('ProcessorFeatures')
if method_name == 'create_db_instance' and parameters.get('Tags'):
parameters['Tags'] = ansible_dict_to_boto3_tag_list(parameters['Tags'])
if method_name == 'modify_db_instance':
parameters = get_options_with_changing_values(client, module, parameters)
if method_name == 'restore_db_instance_to_point_in_time':
parameters['TargetDBInstanceIdentifier'] = module.params['db_instance_identifier']
return parameters
def get_options_with_changing_values(client, module, parameters):
instance_id = module.params['db_instance_identifier']
purge_cloudwatch_logs = module.params['purge_cloudwatch_logs_exports']
force_update_password = module.params['force_update_password']
port = module.params['port']
apply_immediately = parameters.pop('ApplyImmediately', None)
cloudwatch_logs_enabled = module.params['enable_cloudwatch_logs_exports']
if port:
parameters['DBPortNumber'] = port
if not force_update_password:
parameters.pop('MasterUserPassword', None)
if cloudwatch_logs_enabled:
parameters['CloudwatchLogsExportConfiguration'] = cloudwatch_logs_enabled
instance = get_instance(client, module, instance_id)
updated_parameters = get_changing_options_with_inconsistent_keys(parameters, instance, purge_cloudwatch_logs)
updated_parameters.update(get_changing_options_with_consistent_keys(parameters, instance))
parameters = updated_parameters
if parameters.get('NewDBInstanceIdentifier') and instance.get('PendingModifiedValues', {}).get('DBInstanceIdentifier'):
if parameters['NewDBInstanceIdentifier'] == instance['PendingModifiedValues']['DBInstanceIdentifier'] and not apply_immediately:
parameters.pop('NewDBInstanceIdentifier')
if parameters:
parameters['DBInstanceIdentifier'] = instance_id
if apply_immediately is not None:
parameters['ApplyImmediately'] = apply_immediately
return parameters
def get_current_attributes_with_inconsistent_keys(instance):
options = {}
if instance.get('PendingModifiedValues', {}).get('PendingCloudwatchLogsExports', {}).get('LogTypesToEnable', []):
current_enabled = instance['PendingModifiedValues']['PendingCloudwatchLogsExports']['LogTypesToEnable']
current_disabled = instance['PendingModifiedValues']['PendingCloudwatchLogsExports']['LogTypesToDisable']
options['CloudwatchLogsExportConfiguration'] = {'LogTypesToEnable': current_enabled, 'LogTypesToDisable': current_disabled}
else:
options['CloudwatchLogsExportConfiguration'] = {'LogTypesToEnable': instance.get('EnabledCloudwatchLogsExports', []), 'LogTypesToDisable': []}
if instance.get('PendingModifiedValues', {}).get('Port'):
options['DBPortNumber'] = instance['PendingModifiedValues']['Port']
else:
options['DBPortNumber'] = instance['Endpoint']['Port']
if instance.get('PendingModifiedValues', {}).get('DBSubnetGroupName'):
options['DBSubnetGroupName'] = instance['PendingModifiedValues']['DBSubnetGroupName']
else:
options['DBSubnetGroupName'] = instance['DBSubnetGroup']['DBSubnetGroupName']
if instance.get('PendingModifiedValues', {}).get('ProcessorFeatures'):
options['ProcessorFeatures'] = instance['PendingModifiedValues']['ProcessorFeatures']
else:
options['ProcessorFeatures'] = instance.get('ProcessorFeatures', {})
options['OptionGroupName'] = [g['OptionGroupName'] for g in instance['OptionGroupMemberships']]
options['DBSecurityGroups'] = [sg['DBSecurityGroupName'] for sg in instance['DBSecurityGroups'] if sg['Status'] in ['adding', 'active']]
options['VpcSecurityGroupIds'] = [sg['VpcSecurityGroupId'] for sg in instance['VpcSecurityGroups'] if sg['Status'] in ['adding', 'active']]
options['DBParameterGroupName'] = [parameter_group['DBParameterGroupName'] for parameter_group in instance['DBParameterGroups']]
options['AllowMajorVersionUpgrade'] = None
options['EnableIAMDatabaseAuthentication'] = instance['IAMDatabaseAuthenticationEnabled']
options['EnablePerformanceInsights'] = instance['PerformanceInsightsEnabled']
options['MasterUserPassword'] = None
options['NewDBInstanceIdentifier'] = instance['DBInstanceIdentifier']
return options
def get_changing_options_with_inconsistent_keys(modify_params, instance, purge_cloudwatch_logs):
changing_params = {}
current_options = get_current_attributes_with_inconsistent_keys(instance)
for option in current_options:
current_option = current_options[option]
desired_option = modify_params.pop(option, None)
if desired_option is None:
continue
# TODO: allow other purge_option module parameters rather than just checking for things to add
if isinstance(current_option, list):
if isinstance(desired_option, list):
if set(desired_option) <= set(current_option):
continue
elif isinstance(desired_option, string_types):
if desired_option in current_option:
continue
if current_option == desired_option:
continue
if option == 'ProcessorFeatures' and desired_option == []:
changing_params['UseDefaultProcessorFeatures'] = True
elif option == 'CloudwatchLogsExportConfiguration':
format_option = {'EnableLogTypes': [], 'DisableLogTypes': []}
format_option['EnableLogTypes'] = list(desired_option.difference(current_option))
if purge_cloudwatch_logs:
format_option['DisableLogTypes'] = list(current_option.difference(desired_option))
if format_option['EnableLogTypes'] or format_option['DisableLogTypes']:
changing_params[option] = format_option
else:
changing_params[option] = desired_option
return changing_params
def get_changing_options_with_consistent_keys(modify_params, instance):
inconsistent_parameters = list(modify_params.keys())
changing_params = {}
for param in modify_params:
current_option = instance.get('PendingModifiedValues', {}).get(param)
if current_option is None:
current_option = instance[param]
if modify_params[param] != current_option:
changing_params[param] = modify_params[param]
return changing_params
def validate_options(client, module, instance):
state = module.params['state']
skip_final_snapshot = module.params['skip_final_snapshot']
snapshot_id = module.params['final_db_snapshot_identifier']
modified_id = module.params['new_db_instance_identifier']
engine = module.params['engine']
tde_options = bool(module.params['tde_credential_password'] or module.params['tde_credential_arn'])
read_replica = module.params['read_replica']
creation_source = module.params['creation_source']
source_instance = module.params['source_db_instance_identifier']
if module.params['source_region'] is not None:
same_region = bool(module.params['source_region'] == module.params['region'])
else:
same_region = True
if modified_id:
modified_instance = get_instance(client, module, modified_id)
else:
modified_instance = {}
if modified_id and instance and modified_instance:
module.fail_json(msg='A new instance ID {0} was provided but it already exists'.format(modified_id))
if modified_id and not instance and modified_instance:
module.fail_json(msg='A new instance ID {0} was provided but the instance to be renamed does not exist'.format(modified_id))
if state in ('absent', 'terminated') and instance and not skip_final_snapshot and snapshot_id is None:
module.fail_json(msg='skip_final_snapshot is false but all of the following are missing: final_db_snapshot_identifier')
if engine is not None and not (engine.startswith('mysql') or engine.startswith('oracle')) and tde_options:
module.fail_json(msg='TDE is available for MySQL and Oracle DB instances')
if read_replica is True and not instance and creation_source not in [None, 'instance']:
module.fail_json(msg='Cannot create a read replica from {0}. You must use a source DB instance'.format(creation_source))
if read_replica is True and not instance and not source_instance:
module.fail_json(msg='read_replica is true and the instance does not exist yet but all of the following are missing: source_db_instance_identifier')
def update_instance(client, module, instance, instance_id):
changed = False
# Get newly created DB instance
if not instance:
instance = get_instance(client, module, instance_id)
# Check tagging/promoting/rebooting/starting/stopping instance
changed |= ensure_tags(
client, module, instance['DBInstanceArn'], instance['Tags'], module.params['tags'], module.params['purge_tags']
)
changed |= promote_replication_instance(client, module, instance, module.params['read_replica'])
changed |= update_instance_state(client, module, instance, module.params['state'])
return changed
def promote_replication_instance(client, module, instance, read_replica):
changed = False
if read_replica is False:
changed = bool(instance.get('ReadReplicaSourceDBInstanceIdentifier') or instance.get('StatusInfos'))
if changed:
try:
call_method(client, module, method_name='promote_read_replica', parameters={'DBInstanceIdentifier': instance['DBInstanceIdentifier']})
changed = True
except is_boto3_error_code('InvalidDBInstanceState') as e:
if 'DB Instance is not a read replica' in e.response['Error']['Message']:
pass
else:
raise e
return changed
def update_instance_state(client, module, instance, state):
changed = False
if state in ['rebooted', 'restarted']:
changed |= reboot_running_db_instance(client, module, instance)
if state in ['started', 'running', 'stopped']:
changed |= start_or_stop_instance(client, module, instance, state)
return changed
def reboot_running_db_instance(client, module, instance):
parameters = {'DBInstanceIdentifier': instance['DBInstanceIdentifier']}
if instance['DBInstanceStatus'] in ['stopped', 'stopping']:
call_method(client, module, 'start_db_instance', parameters)
if module.params.get('force_failover') is not None:
parameters['ForceFailover'] = module.params['force_failover']
results, changed = call_method(client, module, 'reboot_db_instance', parameters)
return changed
def start_or_stop_instance(client, module, instance, state):
changed = False
parameters = {'DBInstanceIdentifier': instance['DBInstanceIdentifier']}
if state == 'stopped' and instance['DBInstanceStatus'] not in ['stopping', 'stopped']:
if module.params['db_snapshot_identifier']:
parameters['DBSnapshotIdentifier'] = module.params['db_snapshot_identifier']
result, changed = call_method(client, module, 'stop_db_instance', parameters)
elif state == 'started' and instance['DBInstanceStatus'] not in ['available', 'starting', 'restarting']:
result, changed = call_method(client, module, 'start_db_instance', parameters)
return changed
def main():
arg_spec = dict(
state=dict(choices=['present', 'absent', 'terminated', 'running', 'started', 'stopped', 'rebooted', 'restarted'], default='present'),
creation_source=dict(choices=['snapshot', 's3', 'instance']),
force_update_password=dict(type='bool', default=False),
purge_cloudwatch_logs_exports=dict(type='bool', default=True),
purge_tags=dict(type='bool', default=True),
read_replica=dict(type='bool'),
wait=dict(type='bool', default=True),
)
parameter_options = dict(
allocated_storage=dict(type='int'),
allow_major_version_upgrade=dict(type='bool'),
apply_immediately=dict(type='bool', default=False),
auto_minor_version_upgrade=dict(type='bool'),
availability_zone=dict(aliases=['az', 'zone']),
backup_retention_period=dict(type='int'),
ca_certificate_identifier=dict(),
character_set_name=dict(),
copy_tags_to_snapshot=dict(type='bool'),
db_cluster_identifier=dict(aliases=['cluster_id']),
db_instance_class=dict(aliases=['class', 'instance_type']),
db_instance_identifier=dict(required=True, aliases=['instance_id', 'id']),
db_name=dict(),
db_parameter_group_name=dict(),
db_security_groups=dict(type='list'),
db_snapshot_identifier=dict(),
db_subnet_group_name=dict(aliases=['subnet_group']),
domain=dict(),
domain_iam_role_name=dict(),
enable_cloudwatch_logs_exports=dict(type='list', aliases=['cloudwatch_log_exports']),
enable_iam_database_authentication=dict(type='bool'),
enable_performance_insights=dict(type='bool'),
engine=dict(),
engine_version=dict(),
final_db_snapshot_identifier=dict(aliases=['final_snapshot_identifier']),
force_failover=dict(type='bool'),
iops=dict(type='int'),
kms_key_id=dict(),
license_model=dict(choices=['license-included', 'bring-your-own-license', 'general-public-license']),
master_user_password=dict(aliases=['password'], no_log=True),
master_username=dict(aliases=['username']),
monitoring_interval=dict(type='int'),
monitoring_role_arn=dict(),
multi_az=dict(type='bool'),
new_db_instance_identifier=dict(aliases=['new_instance_id', 'new_id']),
option_group_name=dict(),
performance_insights_kms_key_id=dict(),
performance_insights_retention_period=dict(),
port=dict(type='int'),
preferred_backup_window=dict(aliases=['backup_window']),
preferred_maintenance_window=dict(aliases=['maintenance_window']),
processor_features=dict(type='dict'),
promotion_tier=dict(),
publicly_accessible=dict(type='bool'),
restore_time=dict(),
s3_bucket_name=dict(),
s3_ingestion_role_arn=dict(),
s3_prefix=dict(),
skip_final_snapshot=dict(type='bool', default=False),
snapshot_identifier=dict(),
source_db_instance_identifier=dict(),
source_engine=dict(choices=['mysql']),
source_engine_version=dict(),
source_region=dict(),
storage_encrypted=dict(type='bool'),
storage_type=dict(choices=['standard', 'gp2', 'io1']),
tags=dict(type='dict'),
tde_credential_arn=dict(aliases=['transparent_data_encryption_arn']),
tde_credential_password=dict(no_log=True, aliases=['transparent_data_encryption_password']),
timezone=dict(),
use_latest_restorable_time=dict(type='bool', aliases=['restore_from_latest']),
vpc_security_group_ids=dict(type='list')
)
arg_spec.update(parameter_options)
required_if = [
('engine', 'aurora', ('cluster_id',)),
('engine', 'aurora-mysql', ('cluster_id',)),
('engine', 'aurora-postresql', ('cluster_id',)),
('creation_source', 'snapshot', ('snapshot_identifier', 'engine')),
('creation_source', 's3', (
's3_bucket_name', 'engine', 'master_username', 'master_user_password',
'source_engine', 'source_engine_version', 's3_ingestion_role_arn')),
]
mutually_exclusive = [
('s3_bucket_name', 'source_db_instance_identifier', 'snapshot_identifier'),
('use_latest_restorable_time', 'restore_to_time'),
('availability_zone', 'multi_az'),
]
module = AnsibleAWSModule(
argument_spec=arg_spec,
required_if=required_if,
mutually_exclusive=mutually_exclusive,
supports_check_mode=True
)
if not module.boto3_at_least('1.5.0'):
module.fail_json(msg="rds_instance requires boto3 > 1.5.0")
# Sanitize instance identifiers
module.params['db_instance_identifier'] = module.params['db_instance_identifier'].lower()
if module.params['new_db_instance_identifier']:
module.params['new_db_instance_identifier'] = module.params['new_db_instance_identifier'].lower()
# Sanitize processor features
if module.params['processor_features'] is not None:
module.params['processor_features'] = dict((k, to_text(v)) for k, v in module.params['processor_features'].items())
client = module.client('rds')
changed = False
state = module.params['state']
instance_id = module.params['db_instance_identifier']
instance = get_instance(client, module, instance_id)
validate_options(client, module, instance)
method_name = get_rds_method_attribute_name(instance, state, module.params['creation_source'], module.params['read_replica'])
if method_name:
raw_parameters = arg_spec_to_rds_params(dict((k, module.params[k]) for k in module.params if k in parameter_options))
parameters = get_parameters(client, module, raw_parameters, method_name)
if parameters:
result, changed = call_method(client, module, method_name, parameters)
instance_id = get_final_identifier(method_name, module)
# Check tagging/promoting/rebooting/starting/stopping instance
if state != 'absent' and (not module.check_mode or instance):
changed |= update_instance(client, module, instance, instance_id)
if changed:
instance = get_instance(client, module, instance_id)
if state != 'absent' and (instance or not module.check_mode):
for attempt_to_wait in range(0, 10):
instance = get_instance(client, module, instance_id)
if instance:
break
else:
sleep(5)
if state == 'absent' and changed and not module.params['skip_final_snapshot']:
instance.update(FinalSnapshot=get_final_snapshot(client, module, module.params['final_db_snapshot_identifier']))
pending_processor_features = None
if instance.get('PendingModifiedValues', {}).get('ProcessorFeatures'):
pending_processor_features = instance['PendingModifiedValues'].pop('ProcessorFeatures')
instance = camel_dict_to_snake_dict(instance, ignore_list=['Tags', 'ProcessorFeatures'])
if pending_processor_features is not None:
instance['pending_modified_values']['processor_features'] = pending_processor_features
module.exit_json(changed=changed, **instance)
if __name__ == '__main__':
main()
| gpl-3.0 |
anshumang/lammps-analytics | tools/amber2lmp/amber2lammps.py | 8 | 36873 | #! /usr/bin/python
#
# This is amber2lammps, a program written by Keir E. Novik to convert
# Amber files to Lammps files.
#
# Copyright 1999, 2000 Keir E. Novik; all rights reserved.
#
# Modified by Vikas Varshney, U Akron, 5 July 2005, as described in README
# Bug Fixed :Third argument in Dihedral Coeffs section is an integer - Ketan S Khare September 26, 2011
# Modified by Vikas Varshney, Oct 8, 2013 to include additional flags (Atomic_Number, Coulombic and van der Waals 1-4 factors which are included in newer vesions of .top and .crd files in amber12.
#============================================================
def Pop(S, I=-1):
'Pop item I from list'
X = S[I]
del S[I]
return X
#============================================================
class Lammps:
#--------------------------------------------------------
def Dump(self):
'Write out contents of self (intended for debugging)'
Name_list = self.__dict__.keys()
Name_list.sort()
for Name in Name_list:
print Name + ':', self.__dict__[Name]
#--------------------------------------------------------
def Write_data(self, Basename, Item_list):
'Write the Lammps data to file (used by Write_Lammps)'
import os, sys
Filename = 'data.' + Basename
Dir_list = os.listdir('.')
i = 1
while Filename in Dir_list:
Filename = 'data' + `i` + '.' + Basename
i = i +1
del i
print 'Writing', Filename + '...',
sys.stdout.flush()
try:
F = open(Filename, 'w')
except IOError, Detail:
print '(error:', Detail[1] + '!)'
return
try:
F.writelines(Item_list)
except IOError, Detail:
print '(error:', Detail[1] + '!)'
F.close()
return
F.close()
print 'done.'
#--------------------------------------------------------
def Write_Lammps(self, Basename):
'Write the Lammps data file, ignoring blank sections'
import string
L = []
L.append('LAMMPS data file for ' + self.name + '\n\n')
L.append(`self.atoms` + ' atoms\n')
L.append(`self.bonds` + ' bonds\n')
L.append(`self.angles` + ' angles\n')
L.append(`self.dihedrals` + ' dihedrals\n')
L.append(`self.impropers` + ' impropers\n\n')
L.append(`self.atom_types` + ' atom types\n')
if self.bonds > 0:
L.append(`self.bond_types` + ' bond types\n')
if self.angles > 0:
L.append(`self.angle_types` + ' angle types\n')
if self.dihedrals > 0:
L.append(`self.dihedral_types` + ' dihedral types\n')
L.append('\n')
L.append(`self.xlo` + ' ' + `self.xhi` + ' xlo xhi\n')
L.append(`self.ylo` + ' ' + `self.yhi` + ' ylo yhi\n')
L.append(`self.zlo` + ' ' + `self.zhi` + ' zlo zhi\n\n')
if self.atom_types != 0:
L.append('Masses\n\n')
for i in range(self.atom_types):
L.append(`i+1` + ' ' + `self.Masses[i]` + '\n')
L.append('\n')
L.append('Pair Coeffs\n\n')
for i in range(self.atom_types):
L.append(`i+1`)
for j in range(len(self.Nonbond_Coeffs[0])):
L.append(' ' + `self.Nonbond_Coeffs[i][j]`)
L.append('\n')
L.append('\n')
if self.bonds != 0 and self.bond_types != 0:
L.append('Bond Coeffs\n\n')
for i in range(self.bond_types):
L.append(`i+1`)
for j in range(len(self.Bond_Coeffs[0])):
L.append(' ' + `self.Bond_Coeffs[i][j]`)
L.append('\n')
L.append('\n')
if self.angles != 0 and self.angle_types != 0:
L.append('Angle Coeffs\n\n')
for i in range(self.angle_types):
L.append(`i+1`)
for j in range(len(self.Angle_Coeffs[0])):
L.append(' ' + `self.Angle_Coeffs[i][j]`)
L.append('\n')
L.append('\n')
if self.dihedrals != 0 and self.dihedral_types != 0:
L.append('Dihedral Coeffs\n\n')
for i in range(self.dihedral_types):
L.append(`i+1`)
for j in range(len(self.Dihedral_Coeffs[0])):
L.append(' ' + `self.Dihedral_Coeffs[i][j]`)
L.append('\n')
L.append('\n')
if self.atoms != 0:
L.append('Atoms\n\n')
for i in range(self.atoms):
L.append(`i+1`)
for j in range(len(self.Atoms[0])):
L.append(' ' + `self.Atoms[i][j]`)
L.append('\n')
L.append('\n')
if self.bonds != 0 and self.bond_types != 0:
L.append('Bonds\n\n')
for i in range(self.bonds):
L.append(`i+1`)
for j in range(len(self.Bonds[0])):
L.append(' ' + `self.Bonds[i][j]`)
L.append('\n')
L.append('\n')
if self.angles != 0 and self.angle_types != 0:
L.append('Angles\n\n')
for i in range(self.angles):
L.append(`i+1`)
for j in range(len(self.Angles[0])):
L.append(' ' + `self.Angles[i][j]`)
L.append('\n')
L.append('\n')
if self.dihedrals != 0 and self.dihedral_types != 0:
L.append('Dihedrals\n\n')
for i in range(self.dihedrals):
L.append(`i+1`)
for j in range(len(self.Dihedrals[0])):
L.append(' ' + `self.Dihedrals[i][j]`)
L.append('\n')
L.append('\n')
self.Write_data(Basename, L)
#============================================================
class Amber:
def __init__(self):
'Initialise the Amber class'
self.CRD_is_read = 0
self.TOP_is_read = 0
#--------------------------------------------------------
def Dump(self):
'Write out contents of self (intended for debugging)'
Name_list = self.__dict__.keys()
Name_list.sort()
for Name in Name_list:
print Name + ':', self.__dict__[Name]
#--------------------------------------------------------
def Coerce_to_Lammps(self):
'Return the Amber data converted to Lammps format'
import math
if self.CRD_is_read and self.TOP_is_read:
l = Lammps()
print 'Converting...',
l.name = self.ITITL
l.atoms = self.NATOM
l.bonds = self.NBONH + self.MBONA
l.angles = self.NTHETH + self.MTHETA
l.dihedrals = self.NPHIH + self.MPHIA
l.impropers = 0
l.atom_types = self.NTYPES
l.bond_types = self.NUMBND
l.angle_types = self.NUMANG
l.dihedral_types = self.NPTRA
Shift = 0
if self.__dict__.has_key('BOX'):
l.xlo = 0.0
l.xhi = self.BOX[0]
l.ylo = 0.0
l.yhi = self.BOX[1]
l.zlo = 0.0
l.zhi = self.BOX[2]
if (l.xlo > min(self.X)) or (l.xhi < max(self.X)) or \
(l.ylo > min(self.Y)) or (l.yhi < max(self.Y)) or \
(l.zlo > min(self.Z)) or (l.zhi < max(self.Z)):
# Vikas Modification: Disabling Shifting. This means I am intend to send exact coordinates of each atom and let LAMMPS
# take care of imaging into periodic image cells. If one wants to shift all atoms in the periodic box,
# please uncomment the below 2 lines.
print '(warning: Currently not shifting the atoms to the periodic box)'
#Shift = 1
else:
print '(warning: Guessing at periodic box!)',
l.xlo = min(self.X)
l.xhi = max(self.X)
l.ylo = min(self.Y)
l.yhi = max(self.Y)
l.zlo = min(self.Z)
l.zhi = max(self.Z)
# This doesn't check duplicate values
l.Masses = []
for i in range(l.atom_types):
l.Masses.append(0)
for i in range(self.NATOM):
l.Masses[self.IAC[i] - 1] = self.AMASS[i]
l.Nonbond_Coeffs = []
for i in range(self.NTYPES):
l.Nonbond_Coeffs.append([0,0])
for i in range(self.NTYPES):
j = self.ICO[i * (self.NTYPES + 1)] - 1
if self.CN1[j] == 0.0:
l.Nonbond_Coeffs[i][0] = 0.0
else:
l.Nonbond_Coeffs[i][0] = \
0.25 * (self.CN2[j])**2 / self.CN1[j]
if self.CN2[j] == 0.0:
l.Nonbond_Coeffs[i][1] = 0.0
else:
l.Nonbond_Coeffs[i][1] = \
(self.CN1[j] / self.CN2[j])**(1.0/6.0)
l.Bond_Coeffs = []
for i in range(self.NUMBND):
l.Bond_Coeffs.append([0,0])
for i in range(self.NUMBND):
l.Bond_Coeffs[i][0] = self.RK[i]
l.Bond_Coeffs[i][1] = self.REQ[i]
l.Angle_Coeffs = []
for i in range(self.NUMANG):
l.Angle_Coeffs.append([0,0])
for i in range(self.NUMANG):
l.Angle_Coeffs[i][0] = self.TK[i]
l.Angle_Coeffs[i][1] = (180/math.pi) * self.TEQ[i]
l.Dihedral_Coeffs = []
for i in range(self.NPTRA):
l.Dihedral_Coeffs.append([0,0,0])
for i in range(self.NPTRA):
l.Dihedral_Coeffs[i][0] = self.PK[i]
if self.PHASE[i] == 0:
l.Dihedral_Coeffs[i][1] = 1
else:
l.Dihedral_Coeffs[i][1] = -1
l.Dihedral_Coeffs[i][2] = int(self.PN[i])
l.Atoms = []
for i in range(self.NATOM):
x = self.X[i]
y = self.Y[i]
z = self.Z[i]
if Shift:
while x < l.xlo:
x = x + self.BOX[0]
while x > l.xhi:
x = x - self.BOX[0]
while y < l.ylo:
y = y + self.BOX[1]
while y > l.yhi:
y = y - self.BOX[1]
while z < l.zlo:
z = z + self.BOX[2]
while z > l.zhi:
z = z - self.BOX[2]
l.Atoms.append([0, self.IAC[i], self.CHRG[i]/18.2223, \
x, y, z])
l.Bonds = []
for i in range(l.bonds):
l.Bonds.append([0,0,0])
for i in range(self.NBONH):
l.Bonds[i][0] = self.ICBH[i]
l.Bonds[i][1] = abs(self.IBH[i])/3 + 1
l.Bonds[i][2] = abs(self.JBH[i])/3 + 1
for i in range(self.NBONA):
l.Bonds[self.NBONH + i][0] = self.ICB[i]
l.Bonds[self.NBONH + i][1] = abs(self.IB[i])/3 + 1
l.Bonds[self.NBONH + i][2] = abs(self.JB[i])/3 + 1
l.Angles = []
for i in range(l.angles):
l.Angles.append([0,0,0,0])
for i in range(self.NTHETH):
l.Angles[i][0] = self.ICTH[i]
l.Angles[i][1] = abs(self.ITH[i])/3 + 1
l.Angles[i][2] = abs(self.JTH[i])/3 + 1
l.Angles[i][3] = abs(self.KTH[i])/3 + 1
for i in range(self.NTHETA):
l.Angles[self.NTHETH + i][0] = self.ICT[i]
l.Angles[self.NTHETH + i][1] = abs(self.IT[i])/3 + 1
l.Angles[self.NTHETH + i][2] = abs(self.JT[i])/3 + 1
l.Angles[self.NTHETH + i][3] = abs(self.KT[i])/3 + 1
l.Dihedrals = []
for i in range(l.dihedrals):
l.Dihedrals.append([0,0,0,0,0])
for i in range(self.NPHIH):
l.Dihedrals[i][0] = self.ICPH[i]
l.Dihedrals[i][1] = abs(self.IPH[i])/3 + 1
l.Dihedrals[i][2] = abs(self.JPH[i])/3 + 1
l.Dihedrals[i][3] = abs(self.KPH[i])/3 + 1
l.Dihedrals[i][4] = abs(self.LPH[i])/3 + 1
for i in range(self.NPHIA):
l.Dihedrals[self.NPHIH + i][0] = self.ICP[i]
l.Dihedrals[self.NPHIH + i][1] = abs(self.IP[i])/3 + 1
l.Dihedrals[self.NPHIH + i][2] = abs(self.JP[i])/3 + 1
l.Dihedrals[self.NPHIH + i][3] = abs(self.KP[i])/3 + 1
l.Dihedrals[self.NPHIH + i][4] = abs(self.LP[i])/3 + 1
print 'done.'
return l
else:
print '(Error: Not all the Amber data has been read!)'
#--------------------------------------------------------
def Read_data(self, Filename):
'Read the filename, returning a list of strings'
import string, sys
print 'Reading', Filename + '...',
sys.stdout.flush()
try:
F = open(Filename)
except IOError, Detail:
print '(error:', Detail[1] + '!)'
return
try:
Lines = F.readlines()
except IOError, Detail:
print '(error:', Detail[1] + '!)'
F.close()
return
F.close()
# If the first line is empty, use the Basename
if Filename[-4:] == '.crd':
if string.split(Lines[0]) == []: # This line corresponds to TITLE name in CRD file
Basename = Filename[:string.find(Filename, '.')]
Item_list = [Basename]
print 'Warning: Title not present... Assigning Basename as Title'
else:
Item_list = []
else:
if string.split(Lines[3]) == []: # This line corresponds to TITLE name in TOPOLOGY file
Basename = Filename[:string.find(Filename, '.')]
Item_list = [Basename]
print 'Warning: Title not present... Assigning Basename as Title'
else:
Item_list = []
for Line in Lines:
if Line[0]!='%': #Vikas' Modification: This condition ignores all the lines starting with % in the topology file.
Item_list.extend(string.split(Line))
return Item_list
#--------------------------------------------------------
def Read_CRD(self, Basename):
'Read the Amber coordinate/restart (.crd) file'
# The optional velocities and periodic box size are not yet parsed.
Item_list = self.Read_data(Basename + '.crd')
if Item_list == None:
return
elif len(Item_list) < 2:
print '(error: File too short!)'
return
# Parse the data
if self.__dict__.has_key('ITITL'):
if Pop(Item_list,0) != self.ITITL:
print '(warning: ITITL differs!)',
else:
self.ITITL = Pop(Item_list,0)
print self.ITITL #Vikas Modification : Priting the Title
if self.__dict__.has_key('NATOM'):
if eval(Pop(Item_list,0)) != self.NATOM:
print '(error: NATOM differs!)'
return
else:
self.NATOM = eval(Pop(Item_list,0))
print self.NATOM # Vikas' Modification: Printing number of atoms just to make sure that the program is reading the correct value.
#if len(Item_list) == 1 + 3 * self.NATOM:
# Vikas' Modification: I changed the condition.
if (len(Item_list)%3) != 0:
self.TIME = eval(Pop(Item_list,0))
else:
self.TIME = 0
print self.TIME # Vikas' Modification : Printing simulation time, just to make sure that the program is readint the correct value.
if len(Item_list) < 3 * self.NATOM:
print '(error: File too short!)'
return
self.X = []
self.Y = []
self.Z = []
for i in range(self.NATOM):
self.X.append(eval(Pop(Item_list,0)))
self.Y.append(eval(Pop(Item_list,0)))
self.Z.append(eval(Pop(Item_list,0)))
if (self.NATOM == 1) and len(Item_list):
print '(warning: Ambiguity!)',
if len(Item_list) >= 3 * self.NATOM:
self.VX = []
self.VY = []
self.VZ = []
for i in range(self.NATOM):
self.VX.append(eval(Pop(Item_list,0)))
self.VY.append(eval(Pop(Item_list,0)))
self.VZ.append(eval(Pop(Item_list,0)))
if len(Item_list) >= 3:
self.BOX = []
for i in range(3):
self.BOX.append(eval(Pop(Item_list,0)))
if len(Item_list):
print '(warning: File too large!)',
print 'done.'
self.CRD_is_read = 1
#--------------------------------------------------------
def Read_TOP(self, Basename):
'Read the Amber parameter/topology (.top) file'
Item_list = self.Read_data(Basename + '.top')
if Item_list == None:
return
elif len(Item_list) < 31:
print '(error: File too short!)'
return
# Parse the data
if self.__dict__.has_key('ITITL'):
if Pop(Item_list,0) != self.ITITL:
print '(warning: ITITL differs!)'
else:
self.ITITL = Pop(Item_list,0)
print self.ITITL # Printing Self Title
if self.__dict__.has_key('NATOM'):
if eval(Pop(Item_list,0)) != self.NATOM:
print '(error: NATOM differs!)'
return
else:
self.NATOM = eval(Pop(Item_list,0))
print self.NATOM # Printing total number of atoms just to make sure that thing are going right
self.NTYPES = eval(Pop(Item_list,0))
self.NBONH = eval(Pop(Item_list,0))
self.MBONA = eval(Pop(Item_list,0))
self.NTHETH = eval(Pop(Item_list,0))
self.MTHETA = eval(Pop(Item_list,0))
self.NPHIH = eval(Pop(Item_list,0))
self.MPHIA = eval(Pop(Item_list,0))
self.NHPARM = eval(Pop(Item_list,0))
self.NPARM = eval(Pop(Item_list,0))
self.NEXT = eval(Pop(Item_list,0))
self.NRES = eval(Pop(Item_list,0))
self.NBONA = eval(Pop(Item_list,0))
self.NTHETA = eval(Pop(Item_list,0))
self.NPHIA = eval(Pop(Item_list,0))
self.NUMBND = eval(Pop(Item_list,0))
self.NUMANG = eval(Pop(Item_list,0))
self.NPTRA = eval(Pop(Item_list,0))
self.NATYP = eval(Pop(Item_list,0))
self.NPHB = eval(Pop(Item_list,0))
self.IFPERT = eval(Pop(Item_list,0))
self.NBPER = eval(Pop(Item_list,0))
self.NGPER = eval(Pop(Item_list,0))
self.NDPER = eval(Pop(Item_list,0))
self.MBPER = eval(Pop(Item_list,0))
self.MGPER = eval(Pop(Item_list,0))
self.MDPER = eval(Pop(Item_list,0))
self.IFBOX = eval(Pop(Item_list,0))
self.NMXRS = eval(Pop(Item_list,0))
self.IFCAP = eval(Pop(Item_list,0))
#....................................................
if len(Item_list) < 5 * self.NATOM + self.NTYPES**2 + \
2*(self.NRES + self.NUMBND + self.NUMANG) + \
3*self.NPTRA + self.NATYP:
print '(error: File too short!)'
return -1
self.IGRAPH = []
Pop(Item_list,0)
# A little kludge is needed here, since the IGRAPH strings are
# not separated by spaces if 4 characters in length.
for i in range(self.NATOM):
if len(Item_list[0]) > 4:
Item_list.insert(1, Item_list[0][4:])
Item_list.insert(1, Item_list[0][0:4])
del Item_list[0]
self.IGRAPH.append(Pop(Item_list,0))
# Vikas' Modification : In the following section, I am printing out each quantity which is currently being read from the topology file.
print 'Reading Charges...'
self.CHRG = []
for i in range(self.NATOM):
self.CHRG.append(eval(Pop(Item_list,0)))
print 'Reading Atomic Number...'
self.ANUMBER = []
for i in range(self.NATOM):
self.ANUMBER.append(eval(Pop(Item_list,0)))
print 'Reading Atomic Masses...'
self.AMASS = []
for i in range(self.NATOM):
self.AMASS.append(eval(Pop(Item_list,0)))
print 'Reading Atom Types...'
self.IAC = []
for i in range(self.NATOM):
self.IAC.append(eval(Pop(Item_list,0)))
print 'Reading Excluded Atoms...'
self.NUMEX = []
for i in range(self.NATOM):
self.NUMEX.append(eval(Pop(Item_list,0)))
print 'Reading Non-bonded Parameter Index...'
self.ICO = []
for i in range(self.NTYPES**2):
self.ICO.append(eval(Pop(Item_list,0)))
print 'Reading Residue Labels...'
self.LABRES = []
for i in range(self.NRES):
self.LABRES.append(Pop(Item_list,0))
print 'Reading Residues Starting Pointers...'
self.IPRES = []
for i in range(self.NRES):
self.IPRES.append(eval(Pop(Item_list,0)))
print 'Reading Bond Force Constants...'
self.RK = []
for i in range(self.NUMBND):
self.RK.append(eval(Pop(Item_list,0)))
print 'Reading Equilibrium Bond Values...'
self.REQ = []
for i in range(self.NUMBND):
self.REQ.append(eval(Pop(Item_list,0)))
print 'Reading Angle Force Constants...'
self.TK = []
for i in range(self.NUMANG):
self.TK.append(eval(Pop(Item_list,0)))
print 'Reading Equilibrium Angle Values...'
self.TEQ = []
for i in range(self.NUMANG):
self.TEQ.append(eval(Pop(Item_list,0)))
print 'Reading Dihedral Force Constants...'
self.PK = []
for i in range(self.NPTRA):
self.PK.append(eval(Pop(Item_list,0)))
print 'Reading Dihedral Periodicity...'
self.PN = []
for i in range(self.NPTRA):
self.PN.append(eval(Pop(Item_list,0)))
print 'Reading Dihedral Phase...'
self.PHASE = []
for i in range(self.NPTRA):
self.PHASE.append(eval(Pop(Item_list,0)))
print 'Reading 1-4 Electrostatic Scaling Factor...'
self.SCEEFAC = []
for i in range(self.NPTRA):
self.SCEEFAC.append(eval(Pop(Item_list,0)))
print 'Reading 1-4 Van der Waals Scaling Factor...'
self.SCNBFAC = []
for i in range(self.NPTRA):
self.SCNBFAC.append(eval(Pop(Item_list,0)))
print 'Reading Solty...' #I think this is currently not used in AMBER. Check it out, though
self.SOLTY = []
for i in range(self.NATYP):
self.SOLTY.append(eval(Pop(Item_list,0)))
#....................................................
if len(Item_list) < 2 * self.NTYPES * (self.NTYPES + 1) / 2:
print '(error: File too short!)'
return -1
print 'Reading LJ A Coefficient...'
self.CN1 = []
for i in range(self.NTYPES * (self.NTYPES + 1) / 2):
self.CN1.append(eval(Pop(Item_list,0)))
print 'Reading LJ B Coefficient...'
self.CN2 = []
for i in range(self.NTYPES * (self.NTYPES + 1) / 2):
self.CN2.append(eval(Pop(Item_list,0)))
#....................................................
if len(Item_list) < 3 * (self.NBONH + self.NBONA) + \
4 * (self.NTHETH + self.NTHETA) + 5 * (self.NPHIH + self.NPHIA):
print '(error: File too short!)'
return -1
print 'Reading Bonds which include hydrogen...'
self.IBH = []
self.JBH = []
self.ICBH = []
for i in range(self.NBONH):
self.IBH.append(eval(Pop(Item_list,0)))
self.JBH.append(eval(Pop(Item_list,0)))
self.ICBH.append(eval(Pop(Item_list,0)))
print 'Reading Bonds which dont include hydrogen...'
self.IB = []
self.JB = []
self.ICB = []
for i in range(self.NBONA):
self.IB.append(eval(Pop(Item_list,0)))
self.JB.append(eval(Pop(Item_list,0)))
self.ICB.append(eval(Pop(Item_list,0)))
print 'Reading Angles which include hydrogen...'
self.ITH = []
self.JTH = []
self.KTH = []
self.ICTH = []
for i in range(self.NTHETH):
self.ITH.append(eval(Pop(Item_list,0)))
self.JTH.append(eval(Pop(Item_list,0)))
self.KTH.append(eval(Pop(Item_list,0)))
self.ICTH.append(eval(Pop(Item_list,0)))
print 'Reading Angles which dont include hydrogen...'
self.IT = []
self.JT = []
self.KT = []
self.ICT = []
for i in range(self.NTHETA):
self.IT.append(eval(Pop(Item_list,0)))
self.JT.append(eval(Pop(Item_list,0)))
self.KT.append(eval(Pop(Item_list,0)))
self.ICT.append(eval(Pop(Item_list,0)))
print 'Reading Dihedrals which include hydrogen...'
self.IPH = []
self.JPH = []
self.KPH = []
self.LPH = []
self.ICPH = []
for i in range(self.NPHIH):
self.IPH.append(eval(Pop(Item_list,0)))
self.JPH.append(eval(Pop(Item_list,0)))
self.KPH.append(eval(Pop(Item_list,0)))
self.LPH.append(eval(Pop(Item_list,0)))
self.ICPH.append(eval(Pop(Item_list,0)))
print 'Reading Dihedrals which dont include hydrogen...'
self.IP = []
self.JP = []
self.KP = []
self.LP = []
self.ICP = []
for i in range(self.NPHIA):
self.IP.append(eval(Pop(Item_list,0)))
self.JP.append(eval(Pop(Item_list,0)))
self.KP.append(eval(Pop(Item_list,0)))
self.LP.append(eval(Pop(Item_list,0)))
self.ICP.append(eval(Pop(Item_list,0)))
#....................................................
if len(Item_list) < self.NEXT + 3 * self.NPHB + 4 * self.NATOM:
print '(error: File too short!)'
return -1
print 'Reading Excluded Atom List...'
self.NATEX = []
for i in range(self.NEXT):
self.NATEX.append(eval(Pop(Item_list,0)))
print 'Reading H-Bond A Coefficient, corresponding to r**12 term for all possible types...'
self.ASOL = []
for i in range(self.NPHB):
self.ASOL.append(eval(Pop(Item_list,0)))
print 'Reading H-Bond B Coefficient, corresponding to r**10 term for all possible types...'
self.BSOL = []
for i in range(self.NPHB):
self.BSOL.append(eval(Pop(Item_list,0)))
print 'Reading H-Bond Cut...' # I think it is not being used nowadays
self.HBCUT = []
for i in range(self.NPHB):
self.HBCUT.append(eval(Pop(Item_list,0)))
print 'Reading Amber Atom Types for each atom...'
self.ISYMBL = []
for i in range(self.NATOM):
self.ISYMBL.append(Pop(Item_list,0))
print 'Reading Tree Chain Classification...'
self.ITREE = []
for i in range(self.NATOM):
self.ITREE.append(Pop(Item_list,0))
print 'Reading Join Array: Tree joining information' # Currently unused in Sander, an AMBER module
self.JOIN = []
for i in range(self.NATOM):
self.JOIN.append(eval(Pop(Item_list,0)))
print 'Reading IRotate...' # Currently unused in Sander and Gibbs
self.IROTAT = []
for i in range(self.NATOM):
self.IROTAT.append(eval(Pop(Item_list,0)))
#....................................................
if self.IFBOX > 0:
if len(Item_list) < 3:
print '(error: File too short!)'
return -1
print 'Reading final residue which is part of solute...'
self.IPTRES = eval(Pop(Item_list,0))
print 'Reading total number of molecules...'
self.NSPM = eval(Pop(Item_list,0))
print 'Reading first solvent moleule index...'
self.NSPSOL = eval(Pop(Item_list,0))
if len(Item_list) < self.NSPM + 4:
print '(error: File too short!)'
return -1
print 'Reading atom per molecule...'
self.NSP = []
for i in range(self.NSPM):
self.NSP.append(eval(Pop(Item_list,0)))
self.BETA = eval(Pop(Item_list,0))
print 'Reading Box Dimensions...'
if self.__dict__.has_key('BOX'):
BOX = []
for i in range(3):
BOX.append(eval(Pop(Item_list,0)))
for i in range(3):
if BOX[i] != self.BOX[i]:
print '(warning: BOX differs!)',
break
del BOX
else:
self.BOX = []
for i in range(3):
self.BOX.append(eval(Pop(Item_list,0)))
#....................................................
if self.IFCAP > 0:
if len(Item_list) < 5:
print '(error: File too short!)'
return -1
print 'Reading ICAP variables::: For details, refer to online AMBER format manual'
self.NATCAP = eval(Pop(Item_list,0))
self.CUTCAP = eval(Pop(Item_list,0))
self.XCAP = eval(Pop(Item_list,0))
self.YCAP = eval(Pop(Item_list,0))
self.ZCAP = eval(Pop(Item_list,0))
#....................................................
if self.IFPERT > 0:
if len(Item_list) < 4 * self.NBPER + 5 * self.NGPER + \
6 * self.NDPER + self.NRES + 6 * self.NATOM:
print '(error: File too short!)'
return -1
print 'Reading perturb variables, 1. Bond, 2. Angles, 3. Dihedrals, etc etc.::: For details, refer to online AMBER format manual'
self.IBPER = []
self.JBPER = []
for i in range(self.NBPER):
self.IBPER.append(eval(Pop(Item_list,0)))
self.JBPER.append(eval(Pop(Item_list,0)))
self.ICBPER = []
for i in range(2 * self.NBPER):
self.ICBPER.append(eval(Pop(Item_list,0)))
self.ITPER = []
self.JTPER = []
self.KTPER = []
for i in range(self.NGPER):
self.ITPER.append(eval(Pop(Item_list,0)))
self.JTPER.append(eval(Pop(Item_list,0)))
self.KTPER.append(eval(Pop(Item_list,0)))
self.ICTPER = []
for i in range(2 * self.NGPER):
self.ICTPER.append(eval(Pop(Item_list,0)))
self.IPPER = []
self.JPPER = []
self.KPPER = []
self.LPPER = []
for i in range(self.NDPER):
self.IPPER.append(eval(Pop(Item_list,0)))
self.JPPER.append(eval(Pop(Item_list,0)))
self.KPPER.append(eval(Pop(Item_list,0)))
self.LPPER.append(eval(Pop(Item_list,0)))
self.ICPPER = []
for i in range(2 * self.NDPER):
self.ICPPER.append(eval(Pop(Item_list,0)))
LABRES = []
for i in range(self.NRES):
LABRES.append(Pop(Item_list,0))
for i in range(self.NRES):
if LABRES[i] != self.LABRES[i]:
print '(warning: BOX differs!)',
break
self.IGRPER = []
for i in range(self.NATOM):
self.IGRPER.append(eval(Pop(Item_list,0)))
self.ISMPER = []
for i in range(self.NATOM):
self.ISMPER.append(eval(Pop(Item_list,0)))
self.ALMPER = []
for i in range(self.NATOM):
self.ALMPER.append(eval(Pop(Item_list,0)))
self.IAPER = []
for i in range(self.NATOM):
self.IAPER.append(eval(Pop(Item_list,0)))
self.IACPER = []
for i in range(self.NATOM):
self.IACPER.append(eval(Pop(Item_list,0)))
self.CGPER = []
for i in range(self.NATOM):
self.CGPER.append(eval(Pop(Item_list,0)))
#....................................................
self.IPOL = 0
if self.IPOL == 1:
if len(Item_list) < self.NATOM:
print '(error: File too short!)'
return -1
print 'Reading Polarizability Data. For details, refer to online AMBER format manual'
self.ATPOL = []
for i in range(self.NATOM):
self.ATPOL.append(eval(Pop(Item_list,0)))
if self.IFPERT == 1:
if len(Item_list) < self.NATOM:
print '(error: File too short!)'
return -1
self.ATPOL1 = []
for i in range(self.NATOM):
self.ATPOL1.append(eval(Pop(Item_list,0)))
#....................................................
if len(Item_list):
print '(warning: File too large!)',
print 'done.'
self.TOP_is_read = 1
#============================================================
def Find_Amber_files():
'Look for sets of Amber files to process'
'''If not passed anything on the command line, look for pairs of
Amber files (.crd and .top) in the current directory. For
each set if there is no corresponding Lammps file (data.), or it is
older than any of the Amber files, add its basename to a list of
strings. This list is returned by the function'''
# Date and existence checks not yet implemented
import os, sys
Basename_list = []
# Extract basenames from command line
for Name in sys.argv[1:]:
if Name[-4:] == '.crd':
Basename_list.append(Name[:-4])
else:
if Name[-4:] == '.top':
Basename_list.append(Name[:-4])
else:
Basename_list.append(Name)
# Remove duplicate basenames
for Basename in Basename_list[:]:
while Basename_list.count(Basename) > 1:
Basename_list.remove(Basename)
if Basename_list == []:
print 'Looking for Amber files...',
Dir_list = os.listdir('.')
Dir_list.sort()
for File in Dir_list:
if File[-4:] == '.top':
Basename = File[:-4]
if (Basename + '.crd') in Dir_list:
Basename_list.append(Basename)
if Basename_list != []:
print 'found',
for i in range(len(Basename_list)-1):
print Basename_list[i] + ',',
print Basename_list[-1] + '\n'
if Basename_list == []:
print 'none.\n'
return Basename_list
#============================================================
def Convert_Amber_files():
'Handle the whole conversion process'
print
print 'Welcome to amber2lammps, a program to convert Amber files to Lammps format!'
print
Basename_list = Find_Amber_files()
for Basename in Basename_list:
a = Amber()
a.Read_CRD(Basename)
if a.CRD_is_read:
a.Read_TOP(Basename)
if a.TOP_is_read:
l = a.Coerce_to_Lammps()
l.Write_Lammps(Basename)
del l
del a
print
#============================================================
Convert_Amber_files()
| gpl-2.0 |
lawishere/googletest | test/gtest_color_test.py | 3259 | 4911 | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that Google Test correctly determines whether to use colors."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import gtest_test_utils
IS_WINDOWS = os.name = 'nt'
COLOR_ENV_VAR = 'GTEST_COLOR'
COLOR_FLAG = 'gtest_color'
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_color_test_')
def SetEnvVar(env_var, value):
"""Sets the env variable to 'value'; unsets it when 'value' is None."""
if value is not None:
os.environ[env_var] = value
elif env_var in os.environ:
del os.environ[env_var]
def UsesColor(term, color_env_var, color_flag):
"""Runs gtest_color_test_ and returns its exit code."""
SetEnvVar('TERM', term)
SetEnvVar(COLOR_ENV_VAR, color_env_var)
if color_flag is None:
args = []
else:
args = ['--%s=%s' % (COLOR_FLAG, color_flag)]
p = gtest_test_utils.Subprocess([COMMAND] + args)
return not p.exited or p.exit_code
class GTestColorTest(gtest_test_utils.TestCase):
def testNoEnvVarNoFlag(self):
"""Tests the case when there's neither GTEST_COLOR nor --gtest_color."""
if not IS_WINDOWS:
self.assert_(not UsesColor('dumb', None, None))
self.assert_(not UsesColor('emacs', None, None))
self.assert_(not UsesColor('xterm-mono', None, None))
self.assert_(not UsesColor('unknown', None, None))
self.assert_(not UsesColor(None, None, None))
self.assert_(UsesColor('linux', None, None))
self.assert_(UsesColor('cygwin', None, None))
self.assert_(UsesColor('xterm', None, None))
self.assert_(UsesColor('xterm-color', None, None))
self.assert_(UsesColor('xterm-256color', None, None))
def testFlagOnly(self):
"""Tests the case when there's --gtest_color but not GTEST_COLOR."""
self.assert_(not UsesColor('dumb', None, 'no'))
self.assert_(not UsesColor('xterm-color', None, 'no'))
if not IS_WINDOWS:
self.assert_(not UsesColor('emacs', None, 'auto'))
self.assert_(UsesColor('xterm', None, 'auto'))
self.assert_(UsesColor('dumb', None, 'yes'))
self.assert_(UsesColor('xterm', None, 'yes'))
def testEnvVarOnly(self):
"""Tests the case when there's GTEST_COLOR but not --gtest_color."""
self.assert_(not UsesColor('dumb', 'no', None))
self.assert_(not UsesColor('xterm-color', 'no', None))
if not IS_WINDOWS:
self.assert_(not UsesColor('dumb', 'auto', None))
self.assert_(UsesColor('xterm-color', 'auto', None))
self.assert_(UsesColor('dumb', 'yes', None))
self.assert_(UsesColor('xterm-color', 'yes', None))
def testEnvVarAndFlag(self):
"""Tests the case when there are both GTEST_COLOR and --gtest_color."""
self.assert_(not UsesColor('xterm-color', 'no', 'no'))
self.assert_(UsesColor('dumb', 'no', 'yes'))
self.assert_(UsesColor('xterm-color', 'no', 'auto'))
def testAliasesOfYesAndNo(self):
"""Tests using aliases in specifying --gtest_color."""
self.assert_(UsesColor('dumb', None, 'true'))
self.assert_(UsesColor('dumb', None, 'YES'))
self.assert_(UsesColor('dumb', None, 'T'))
self.assert_(UsesColor('dumb', None, '1'))
self.assert_(not UsesColor('xterm', None, 'f'))
self.assert_(not UsesColor('xterm', None, 'false'))
self.assert_(not UsesColor('xterm', None, '0'))
self.assert_(not UsesColor('xterm', None, 'unknown'))
if __name__ == '__main__':
gtest_test_utils.Main()
| bsd-3-clause |
thawatchai/mrkimontour | appengine-django/lib/django/db/migrations/loader.py | 75 | 16471 | from __future__ import unicode_literals
import os
import sys
from importlib import import_module
from django.apps import apps
from django.conf import settings
from django.db.migrations.graph import MigrationGraph
from django.db.migrations.recorder import MigrationRecorder
from django.utils import six
from .exceptions import AmbiguityError, BadMigrationError, NodeNotFoundError
MIGRATIONS_MODULE_NAME = 'migrations'
class MigrationLoader(object):
"""
Loads migration files from disk, and their status from the database.
Migration files are expected to live in the "migrations" directory of
an app. Their names are entirely unimportant from a code perspective,
but will probably follow the 1234_name.py convention.
On initialization, this class will scan those directories, and open and
read the python files, looking for a class called Migration, which should
inherit from django.db.migrations.Migration. See
django.db.migrations.migration for what that looks like.
Some migrations will be marked as "replacing" another set of migrations.
These are loaded into a separate set of migrations away from the main ones.
If all the migrations they replace are either unapplied or missing from
disk, then they are injected into the main set, replacing the named migrations.
Any dependency pointers to the replaced migrations are re-pointed to the
new migration.
This does mean that this class MUST also talk to the database as well as
to disk, but this is probably fine. We're already not just operating
in memory.
"""
def __init__(self, connection, load=True, ignore_no_migrations=False):
self.connection = connection
self.disk_migrations = None
self.applied_migrations = None
self.ignore_no_migrations = ignore_no_migrations
if load:
self.build_graph()
@classmethod
def migrations_module(cls, app_label):
if app_label in settings.MIGRATION_MODULES:
return settings.MIGRATION_MODULES[app_label]
else:
app_package_name = apps.get_app_config(app_label).name
return '%s.%s' % (app_package_name, MIGRATIONS_MODULE_NAME)
def load_disk(self):
"""
Loads the migrations from all INSTALLED_APPS from disk.
"""
self.disk_migrations = {}
self.unmigrated_apps = set()
self.migrated_apps = set()
for app_config in apps.get_app_configs():
# Get the migrations module directory
module_name = self.migrations_module(app_config.label)
if module_name is None:
self.unmigrated_apps.add(app_config.label)
continue
was_loaded = module_name in sys.modules
try:
module = import_module(module_name)
except ImportError as e:
# I hate doing this, but I don't want to squash other import errors.
# Might be better to try a directory check directly.
if "No module named" in str(e) and MIGRATIONS_MODULE_NAME in str(e):
self.unmigrated_apps.add(app_config.label)
continue
raise
else:
# PY3 will happily import empty dirs as namespaces.
if not hasattr(module, '__file__'):
self.unmigrated_apps.add(app_config.label)
continue
# Module is not a package (e.g. migrations.py).
if not hasattr(module, '__path__'):
self.unmigrated_apps.add(app_config.label)
continue
# Force a reload if it's already loaded (tests need this)
if was_loaded:
six.moves.reload_module(module)
self.migrated_apps.add(app_config.label)
directory = os.path.dirname(module.__file__)
# Scan for .py files
migration_names = set()
for name in os.listdir(directory):
if name.endswith(".py"):
import_name = name.rsplit(".", 1)[0]
if import_name[0] not in "_.~":
migration_names.add(import_name)
# Load them
for migration_name in migration_names:
migration_module = import_module("%s.%s" % (module_name, migration_name))
if not hasattr(migration_module, "Migration"):
raise BadMigrationError(
"Migration %s in app %s has no Migration class" % (migration_name, app_config.label)
)
self.disk_migrations[app_config.label, migration_name] = migration_module.Migration(
migration_name,
app_config.label,
)
def get_migration(self, app_label, name_prefix):
"Gets the migration exactly named, or raises `graph.NodeNotFoundError`"
return self.graph.nodes[app_label, name_prefix]
def get_migration_by_prefix(self, app_label, name_prefix):
"Returns the migration(s) which match the given app label and name _prefix_"
# Do the search
results = []
for l, n in self.disk_migrations:
if l == app_label and n.startswith(name_prefix):
results.append((l, n))
if len(results) > 1:
raise AmbiguityError(
"There is more than one migration for '%s' with the prefix '%s'" % (app_label, name_prefix)
)
elif len(results) == 0:
raise KeyError("There no migrations for '%s' with the prefix '%s'" % (app_label, name_prefix))
else:
return self.disk_migrations[results[0]]
def check_key(self, key, current_app):
if (key[1] != "__first__" and key[1] != "__latest__") or key in self.graph:
return key
# Special-case __first__, which means "the first migration" for
# migrated apps, and is ignored for unmigrated apps. It allows
# makemigrations to declare dependencies on apps before they even have
# migrations.
if key[0] == current_app:
# Ignore __first__ references to the same app (#22325)
return
if key[0] in self.unmigrated_apps:
# This app isn't migrated, but something depends on it.
# The models will get auto-added into the state, though
# so we're fine.
return
if key[0] in self.migrated_apps:
try:
if key[1] == "__first__":
return list(self.graph.root_nodes(key[0]))[0]
else: # "__latest__"
return list(self.graph.leaf_nodes(key[0]))[0]
except IndexError:
if self.ignore_no_migrations:
return None
else:
raise ValueError("Dependency on app with no migrations: %s" % key[0])
raise ValueError("Dependency on unknown app: %s" % key[0])
def build_graph(self):
"""
Builds a migration dependency graph using both the disk and database.
You'll need to rebuild the graph if you apply migrations. This isn't
usually a problem as generally migration stuff runs in a one-shot process.
"""
# Load disk data
self.load_disk()
# Load database data
if self.connection is None:
self.applied_migrations = set()
else:
recorder = MigrationRecorder(self.connection)
self.applied_migrations = recorder.applied_migrations()
# Do a first pass to separate out replacing and non-replacing migrations
normal = {}
replacing = {}
for key, migration in self.disk_migrations.items():
if migration.replaces:
replacing[key] = migration
else:
normal[key] = migration
# Calculate reverse dependencies - i.e., for each migration, what depends on it?
# This is just for dependency re-pointing when applying replacements,
# so we ignore run_before here.
reverse_dependencies = {}
for key, migration in normal.items():
for parent in migration.dependencies:
reverse_dependencies.setdefault(parent, set()).add(key)
# Remember the possible replacements to generate more meaningful error
# messages
reverse_replacements = {}
for key, migration in replacing.items():
for replaced in migration.replaces:
reverse_replacements.setdefault(replaced, set()).add(key)
# Carry out replacements if we can - that is, if all replaced migrations
# are either unapplied or missing.
for key, migration in replacing.items():
# Ensure this replacement migration is not in applied_migrations
self.applied_migrations.discard(key)
# Do the check. We can replace if all our replace targets are
# applied, or if all of them are unapplied.
applied_statuses = [(target in self.applied_migrations) for target in migration.replaces]
can_replace = all(applied_statuses) or (not any(applied_statuses))
if not can_replace:
continue
# Alright, time to replace. Step through the replaced migrations
# and remove, repointing dependencies if needs be.
for replaced in migration.replaces:
if replaced in normal:
# We don't care if the replaced migration doesn't exist;
# the usage pattern here is to delete things after a while.
del normal[replaced]
for child_key in reverse_dependencies.get(replaced, set()):
if child_key in migration.replaces:
continue
# List of migrations whose dependency on `replaced` needs
# to be updated to a dependency on `key`.
to_update = []
# Child key may itself be replaced, in which case it might
# not be in `normal` anymore (depending on whether we've
# processed its replacement yet). If it's present, we go
# ahead and update it; it may be deleted later on if it is
# replaced, but there's no harm in updating it regardless.
if child_key in normal:
to_update.append(normal[child_key])
# If the child key is replaced, we update its replacement's
# dependencies too, if necessary. (We don't know if this
# replacement will actually take effect or not, but either
# way it's OK to update the replacing migration).
if child_key in reverse_replacements:
for replaces_child_key in reverse_replacements[child_key]:
if replaced in replacing[replaces_child_key].dependencies:
to_update.append(replacing[replaces_child_key])
# Actually perform the dependency update on all migrations
# that require it.
for migration_needing_update in to_update:
migration_needing_update.dependencies.remove(replaced)
migration_needing_update.dependencies.append(key)
normal[key] = migration
# Mark the replacement as applied if all its replaced ones are
if all(applied_statuses):
self.applied_migrations.add(key)
# Store the replacement migrations for later checks
self.replacements = replacing
# Finally, make a graph and load everything into it
self.graph = MigrationGraph()
for key, migration in normal.items():
self.graph.add_node(key, migration)
def _reraise_missing_dependency(migration, missing, exc):
"""
Checks if ``missing`` could have been replaced by any squash
migration but wasn't because the the squash migration was partially
applied before. In that case raise a more understandable exception.
#23556
"""
if missing in reverse_replacements:
candidates = reverse_replacements.get(missing, set())
is_replaced = any(candidate in self.graph.nodes for candidate in candidates)
if not is_replaced:
tries = ', '.join('%s.%s' % c for c in candidates)
exc_value = NodeNotFoundError(
"Migration {0} depends on nonexistent node ('{1}', '{2}'). "
"Django tried to replace migration {1}.{2} with any of [{3}] "
"but wasn't able to because some of the replaced migrations "
"are already applied.".format(
migration, missing[0], missing[1], tries
),
missing)
exc_value.__cause__ = exc
six.reraise(NodeNotFoundError, exc_value, sys.exc_info()[2])
raise exc
# Add all internal dependencies first to ensure __first__ dependencies
# find the correct root node.
for key, migration in normal.items():
for parent in migration.dependencies:
if parent[0] != key[0] or parent[1] == '__first__':
# Ignore __first__ references to the same app (#22325)
continue
try:
self.graph.add_dependency(migration, key, parent)
except NodeNotFoundError as e:
# Since we added "key" to the nodes before this implies
# "parent" is not in there. To make the raised exception
# more understandable we check if parent could have been
# replaced but hasn't (eg partially applied squashed
# migration)
_reraise_missing_dependency(migration, parent, e)
for key, migration in normal.items():
for parent in migration.dependencies:
if parent[0] == key[0]:
# Internal dependencies already added.
continue
parent = self.check_key(parent, key[0])
if parent is not None:
try:
self.graph.add_dependency(migration, key, parent)
except NodeNotFoundError as e:
# Since we added "key" to the nodes before this implies
# "parent" is not in there.
_reraise_missing_dependency(migration, parent, e)
for child in migration.run_before:
child = self.check_key(child, key[0])
if child is not None:
try:
self.graph.add_dependency(migration, child, key)
except NodeNotFoundError as e:
# Since we added "key" to the nodes before this implies
# "child" is not in there.
_reraise_missing_dependency(migration, child, e)
def detect_conflicts(self):
"""
Looks through the loaded graph and detects any conflicts - apps
with more than one leaf migration. Returns a dict of the app labels
that conflict with the migration names that conflict.
"""
seen_apps = {}
conflicting_apps = set()
for app_label, migration_name in self.graph.leaf_nodes():
if app_label in seen_apps:
conflicting_apps.add(app_label)
seen_apps.setdefault(app_label, set()).add(migration_name)
return {app_label: seen_apps[app_label] for app_label in conflicting_apps}
def project_state(self, nodes=None, at_end=True):
"""
Returns a ProjectState object representing the most recent state
that the migrations we loaded represent.
See graph.make_state for the meaning of "nodes" and "at_end"
"""
return self.graph.make_state(nodes=nodes, at_end=at_end, real_apps=list(self.unmigrated_apps))
| gpl-2.0 |
Sarah-Alsinan/muypicky | lib/python3.6/site-packages/django/db/backends/oracle/base.py | 18 | 24987 | """
Oracle database backend for Django.
Requires cx_Oracle: http://cx-oracle.sourceforge.net/
"""
from __future__ import unicode_literals
import datetime
import decimal
import os
import platform
import sys
import warnings
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.db import utils
from django.db.backends.base.base import BaseDatabaseWrapper
from django.utils import six, timezone
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.encoding import force_bytes, force_text
from django.utils.functional import cached_property
def _setup_environment(environ):
# Cygwin requires some special voodoo to set the environment variables
# properly so that Oracle will see them.
if platform.system().upper().startswith('CYGWIN'):
try:
import ctypes
except ImportError as e:
raise ImproperlyConfigured("Error loading ctypes: %s; "
"the Oracle backend requires ctypes to "
"operate correctly under Cygwin." % e)
kernel32 = ctypes.CDLL('kernel32')
for name, value in environ:
kernel32.SetEnvironmentVariableA(name, value)
else:
os.environ.update(environ)
_setup_environment([
# Oracle takes client-side character set encoding from the environment.
('NLS_LANG', '.AL32UTF8'),
# This prevents unicode from getting mangled by getting encoded into the
# potentially non-unicode database character set.
('ORA_NCHAR_LITERAL_REPLACE', 'TRUE'),
])
try:
import cx_Oracle as Database
except ImportError as e:
raise ImproperlyConfigured("Error loading cx_Oracle module: %s" % e)
# Some of these import cx_Oracle, so import them after checking if it's installed.
from .client import DatabaseClient # NOQA isort:skip
from .creation import DatabaseCreation # NOQA isort:skip
from .features import DatabaseFeatures # NOQA isort:skip
from .introspection import DatabaseIntrospection # NOQA isort:skip
from .operations import DatabaseOperations # NOQA isort:skip
from .schema import DatabaseSchemaEditor # NOQA isort:skip
from .utils import Oracle_datetime # NOQA isort:skip
class _UninitializedOperatorsDescriptor(object):
def __get__(self, instance, cls=None):
# If connection.operators is looked up before a connection has been
# created, transparently initialize connection.operators to avert an
# AttributeError.
if instance is None:
raise AttributeError("operators not available as class attribute")
# Creating a cursor will initialize the operators.
instance.cursor().close()
return instance.__dict__['operators']
class DatabaseWrapper(BaseDatabaseWrapper):
vendor = 'oracle'
# This dictionary maps Field objects to their associated Oracle column
# types, as strings. Column-type strings can contain format strings; they'll
# be interpolated against the values of Field.__dict__ before being output.
# If a column type is set to None, it won't be included in the output.
#
# Any format strings starting with "qn_" are quoted before being used in the
# output (the "qn_" prefix is stripped before the lookup is performed.
data_types = {
'AutoField': 'NUMBER(11)',
'BigAutoField': 'NUMBER(19)',
'BinaryField': 'BLOB',
'BooleanField': 'NUMBER(1)',
'CharField': 'NVARCHAR2(%(max_length)s)',
'CommaSeparatedIntegerField': 'VARCHAR2(%(max_length)s)',
'DateField': 'DATE',
'DateTimeField': 'TIMESTAMP',
'DecimalField': 'NUMBER(%(max_digits)s, %(decimal_places)s)',
'DurationField': 'INTERVAL DAY(9) TO SECOND(6)',
'FileField': 'NVARCHAR2(%(max_length)s)',
'FilePathField': 'NVARCHAR2(%(max_length)s)',
'FloatField': 'DOUBLE PRECISION',
'IntegerField': 'NUMBER(11)',
'BigIntegerField': 'NUMBER(19)',
'IPAddressField': 'VARCHAR2(15)',
'GenericIPAddressField': 'VARCHAR2(39)',
'NullBooleanField': 'NUMBER(1)',
'OneToOneField': 'NUMBER(11)',
'PositiveIntegerField': 'NUMBER(11)',
'PositiveSmallIntegerField': 'NUMBER(11)',
'SlugField': 'NVARCHAR2(%(max_length)s)',
'SmallIntegerField': 'NUMBER(11)',
'TextField': 'NCLOB',
'TimeField': 'TIMESTAMP',
'URLField': 'VARCHAR2(%(max_length)s)',
'UUIDField': 'VARCHAR2(32)',
}
data_type_check_constraints = {
'BooleanField': '%(qn_column)s IN (0,1)',
'NullBooleanField': '(%(qn_column)s IN (0,1)) OR (%(qn_column)s IS NULL)',
'PositiveIntegerField': '%(qn_column)s >= 0',
'PositiveSmallIntegerField': '%(qn_column)s >= 0',
}
operators = _UninitializedOperatorsDescriptor()
_standard_operators = {
'exact': '= %s',
'iexact': '= UPPER(%s)',
'contains': "LIKE TRANSLATE(%s USING NCHAR_CS) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
'icontains': "LIKE UPPER(TRANSLATE(%s USING NCHAR_CS)) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': "LIKE TRANSLATE(%s USING NCHAR_CS) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
'endswith': "LIKE TRANSLATE(%s USING NCHAR_CS) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
'istartswith': "LIKE UPPER(TRANSLATE(%s USING NCHAR_CS)) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
'iendswith': "LIKE UPPER(TRANSLATE(%s USING NCHAR_CS)) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
}
_likec_operators = _standard_operators.copy()
_likec_operators.update({
'contains': "LIKEC %s ESCAPE '\\'",
'icontains': "LIKEC UPPER(%s) ESCAPE '\\'",
'startswith': "LIKEC %s ESCAPE '\\'",
'endswith': "LIKEC %s ESCAPE '\\'",
'istartswith': "LIKEC UPPER(%s) ESCAPE '\\'",
'iendswith': "LIKEC UPPER(%s) ESCAPE '\\'",
})
# The patterns below are used to generate SQL pattern lookup clauses when
# the right-hand side of the lookup isn't a raw string (it might be an expression
# or the result of a bilateral transformation).
# In those cases, special characters for LIKE operators (e.g. \, *, _) should be
# escaped on database side.
#
# Note: we use str.format() here for readability as '%' is used as a wildcard for
# the LIKE operator.
pattern_esc = r"REPLACE(REPLACE(REPLACE({}, '\', '\\'), '%%', '\%%'), '_', '\_')"
_pattern_ops = {
'contains': "'%%' || {} || '%%'",
'icontains': "'%%' || UPPER({}) || '%%'",
'startswith': "{} || '%%'",
'istartswith': "UPPER({}) || '%%'",
'endswith': "'%%' || {}",
'iendswith': "'%%' || UPPER({})",
}
_standard_pattern_ops = {k: "LIKE TRANSLATE( " + v + " USING NCHAR_CS)"
" ESCAPE TRANSLATE('\\' USING NCHAR_CS)"
for k, v in _pattern_ops.items()}
_likec_pattern_ops = {k: "LIKEC " + v + " ESCAPE '\\'"
for k, v in _pattern_ops.items()}
Database = Database
SchemaEditorClass = DatabaseSchemaEditor
# Classes instantiated in __init__().
client_class = DatabaseClient
creation_class = DatabaseCreation
features_class = DatabaseFeatures
introspection_class = DatabaseIntrospection
ops_class = DatabaseOperations
def __init__(self, *args, **kwargs):
super(DatabaseWrapper, self).__init__(*args, **kwargs)
use_returning_into = self.settings_dict["OPTIONS"].get('use_returning_into', True)
self.features.can_return_id_from_insert = use_returning_into
def _connect_string(self):
settings_dict = self.settings_dict
if not settings_dict['HOST'].strip():
settings_dict['HOST'] = 'localhost'
if settings_dict['PORT']:
dsn = Database.makedsn(settings_dict['HOST'],
int(settings_dict['PORT']),
settings_dict['NAME'])
else:
dsn = settings_dict['NAME']
return "%s/%s@%s" % (settings_dict['USER'],
settings_dict['PASSWORD'], dsn)
def get_connection_params(self):
conn_params = self.settings_dict['OPTIONS'].copy()
if 'use_returning_into' in conn_params:
del conn_params['use_returning_into']
return conn_params
def get_new_connection(self, conn_params):
return Database.connect(self._connect_string(), **conn_params)
def init_connection_state(self):
cursor = self.create_cursor()
# Set the territory first. The territory overrides NLS_DATE_FORMAT
# and NLS_TIMESTAMP_FORMAT to the territory default. When all of
# these are set in single statement it isn't clear what is supposed
# to happen.
cursor.execute("ALTER SESSION SET NLS_TERRITORY = 'AMERICA'")
# Set Oracle date to ANSI date format. This only needs to execute
# once when we create a new connection. We also set the Territory
# to 'AMERICA' which forces Sunday to evaluate to a '1' in
# TO_CHAR().
cursor.execute(
"ALTER SESSION SET NLS_DATE_FORMAT = 'YYYY-MM-DD HH24:MI:SS'"
" NLS_TIMESTAMP_FORMAT = 'YYYY-MM-DD HH24:MI:SS.FF'" +
(" TIME_ZONE = 'UTC'" if settings.USE_TZ else '')
)
cursor.close()
if 'operators' not in self.__dict__:
# Ticket #14149: Check whether our LIKE implementation will
# work for this connection or we need to fall back on LIKEC.
# This check is performed only once per DatabaseWrapper
# instance per thread, since subsequent connections will use
# the same settings.
cursor = self.create_cursor()
try:
cursor.execute("SELECT 1 FROM DUAL WHERE DUMMY %s"
% self._standard_operators['contains'],
['X'])
except Database.DatabaseError:
self.operators = self._likec_operators
self.pattern_ops = self._likec_pattern_ops
else:
self.operators = self._standard_operators
self.pattern_ops = self._standard_pattern_ops
cursor.close()
self.connection.stmtcachesize = 20
# Ensure all changes are preserved even when AUTOCOMMIT is False.
if not self.get_autocommit():
self.commit()
def create_cursor(self, name=None):
return FormatStylePlaceholderCursor(self.connection)
def _commit(self):
if self.connection is not None:
try:
return self.connection.commit()
except Database.DatabaseError as e:
# cx_Oracle raises a cx_Oracle.DatabaseError exception
# with the following attributes and values:
# code = 2091
# message = 'ORA-02091: transaction rolled back
# 'ORA-02291: integrity constraint (TEST_DJANGOTEST.SYS
# _C00102056) violated - parent key not found'
# We convert that particular case to our IntegrityError exception
x = e.args[0]
if hasattr(x, 'code') and hasattr(x, 'message') \
and x.code == 2091 and 'ORA-02291' in x.message:
six.reraise(utils.IntegrityError, utils.IntegrityError(*tuple(e.args)), sys.exc_info()[2])
raise
# Oracle doesn't support releasing savepoints. But we fake them when query
# logging is enabled to keep query counts consistent with other backends.
def _savepoint_commit(self, sid):
if self.queries_logged:
self.queries_log.append({
'sql': '-- RELEASE SAVEPOINT %s (faked)' % self.ops.quote_name(sid),
'time': '0.000',
})
def _set_autocommit(self, autocommit):
with self.wrap_database_errors:
self.connection.autocommit = autocommit
def check_constraints(self, table_names=None):
"""
To check constraints, we set constraints to immediate. Then, when, we're done we must ensure they
are returned to deferred.
"""
self.cursor().execute('SET CONSTRAINTS ALL IMMEDIATE')
self.cursor().execute('SET CONSTRAINTS ALL DEFERRED')
def is_usable(self):
try:
self.connection.ping()
except Database.Error:
return False
else:
return True
@cached_property
def oracle_full_version(self):
with self.temporary_connection():
return self.connection.version
@cached_property
def oracle_version(self):
try:
return int(self.oracle_full_version.split('.')[0])
except ValueError:
return None
class OracleParam(object):
"""
Wrapper object for formatting parameters for Oracle. If the string
representation of the value is large enough (greater than 4000 characters)
the input size needs to be set as CLOB. Alternatively, if the parameter
has an `input_size` attribute, then the value of the `input_size` attribute
will be used instead. Otherwise, no input size will be set for the
parameter when executing the query.
"""
def __init__(self, param, cursor, strings_only=False):
# With raw SQL queries, datetimes can reach this function
# without being converted by DateTimeField.get_db_prep_value.
if settings.USE_TZ and (isinstance(param, datetime.datetime) and
not isinstance(param, Oracle_datetime)):
if timezone.is_aware(param):
warnings.warn(
"The Oracle database adapter received an aware datetime (%s), "
"probably from cursor.execute(). Update your code to pass a "
"naive datetime in the database connection's time zone (UTC by "
"default).", RemovedInDjango20Warning)
param = param.astimezone(timezone.utc).replace(tzinfo=None)
param = Oracle_datetime.from_datetime(param)
string_size = 0
# Oracle doesn't recognize True and False correctly in Python 3.
# The conversion done below works both in 2 and 3.
if param is True:
param = 1
elif param is False:
param = 0
if hasattr(param, 'bind_parameter'):
self.force_bytes = param.bind_parameter(cursor)
elif isinstance(param, (Database.Binary, datetime.timedelta)):
self.force_bytes = param
else:
# To transmit to the database, we need Unicode if supported
# To get size right, we must consider bytes.
self.force_bytes = force_text(param, cursor.charset, strings_only)
if isinstance(self.force_bytes, six.string_types):
# We could optimize by only converting up to 4000 bytes here
string_size = len(force_bytes(param, cursor.charset, strings_only))
if hasattr(param, 'input_size'):
# If parameter has `input_size` attribute, use that.
self.input_size = param.input_size
elif string_size > 4000:
# Mark any string param greater than 4000 characters as a CLOB.
self.input_size = Database.CLOB
else:
self.input_size = None
class VariableWrapper(object):
"""
An adapter class for cursor variables that prevents the wrapped object
from being converted into a string when used to instantiate an OracleParam.
This can be used generally for any other object that should be passed into
Cursor.execute as-is.
"""
def __init__(self, var):
self.var = var
def bind_parameter(self, cursor):
return self.var
def __getattr__(self, key):
return getattr(self.var, key)
def __setattr__(self, key, value):
if key == 'var':
self.__dict__[key] = value
else:
setattr(self.var, key, value)
class FormatStylePlaceholderCursor(object):
"""
Django uses "format" (e.g. '%s') style placeholders, but Oracle uses ":var"
style. This fixes it -- but note that if you want to use a literal "%s" in
a query, you'll need to use "%%s".
We also do automatic conversion between Unicode on the Python side and
UTF-8 -- for talking to Oracle -- in here.
"""
charset = 'utf-8'
def __init__(self, connection):
self.cursor = connection.cursor()
self.cursor.outputtypehandler = self._output_type_handler
# Default arraysize of 1 is highly sub-optimal.
self.cursor.arraysize = 100
@staticmethod
def _output_type_handler(cursor, name, defaultType, length, precision, scale):
"""
Called for each db column fetched from cursors. Return numbers as
strings so that decimal values don't have rounding error.
"""
if defaultType == Database.NUMBER:
return cursor.var(
Database.STRING,
size=255,
arraysize=cursor.arraysize,
outconverter=str,
)
def _format_params(self, params):
try:
return {k: OracleParam(v, self, True) for k, v in params.items()}
except AttributeError:
return tuple(OracleParam(p, self, True) for p in params)
def _guess_input_sizes(self, params_list):
# Try dict handling; if that fails, treat as sequence
if hasattr(params_list[0], 'keys'):
sizes = {}
for params in params_list:
for k, value in params.items():
if value.input_size:
sizes[k] = value.input_size
self.setinputsizes(**sizes)
else:
# It's not a list of dicts; it's a list of sequences
sizes = [None] * len(params_list[0])
for params in params_list:
for i, value in enumerate(params):
if value.input_size:
sizes[i] = value.input_size
self.setinputsizes(*sizes)
def _param_generator(self, params):
# Try dict handling; if that fails, treat as sequence
if hasattr(params, 'items'):
return {k: v.force_bytes for k, v in params.items()}
else:
return [p.force_bytes for p in params]
def _fix_for_params(self, query, params, unify_by_values=False):
# cx_Oracle wants no trailing ';' for SQL statements. For PL/SQL, it
# it does want a trailing ';' but not a trailing '/'. However, these
# characters must be included in the original query in case the query
# is being passed to SQL*Plus.
if query.endswith(';') or query.endswith('/'):
query = query[:-1]
if params is None:
params = []
query = query
elif hasattr(params, 'keys'):
# Handle params as dict
args = {k: ":%s" % k for k in params.keys()}
query = query % args
elif unify_by_values and len(params) > 0:
# Handle params as a dict with unified query parameters by their
# values. It can be used only in single query execute() because
# executemany() shares the formatted query with each of the params
# list. e.g. for input params = [0.75, 2, 0.75, 'sth', 0.75]
# params_dict = {
# (2, <type 'int'>): ':arg2',
# (0.75, <type 'float'>): ':arg1',
# ('sth', <type 'str'>): ':arg0',
# }
# args = [':arg0', ':arg1', ':arg0', ':arg2', ':arg0']
# params = {':arg0': 0.75, ':arg1': 2, ':arg2': 'sth'}
params = [(param, type(param)) for param in params]
params_dict = {param: ':arg%d' % i for i, param in enumerate(set(params))}
args = [params_dict[param] for param in params]
params = {value: key[0] for key, value in params_dict.items()}
query = query % tuple(args)
else:
# Handle params as sequence
args = [(':arg%d' % i) for i in range(len(params))]
query = query % tuple(args)
return force_text(query, self.charset), self._format_params(params)
def execute(self, query, params=None):
query, params = self._fix_for_params(query, params, unify_by_values=True)
self._guess_input_sizes([params])
return self.cursor.execute(query, self._param_generator(params))
def executemany(self, query, params=None):
if not params:
# No params given, nothing to do
return None
# uniform treatment for sequences and iterables
params_iter = iter(params)
query, firstparams = self._fix_for_params(query, next(params_iter))
# we build a list of formatted params; as we're going to traverse it
# more than once, we can't make it lazy by using a generator
formatted = [firstparams] + [self._format_params(p) for p in params_iter]
self._guess_input_sizes(formatted)
return self.cursor.executemany(query, [self._param_generator(p) for p in formatted])
def fetchone(self):
row = self.cursor.fetchone()
if row is None:
return row
return _rowfactory(row, self.cursor)
def fetchmany(self, size=None):
if size is None:
size = self.arraysize
return tuple(_rowfactory(r, self.cursor) for r in self.cursor.fetchmany(size))
def fetchall(self):
return tuple(_rowfactory(r, self.cursor) for r in self.cursor.fetchall())
def close(self):
try:
self.cursor.close()
except Database.InterfaceError:
# already closed
pass
def var(self, *args):
return VariableWrapper(self.cursor.var(*args))
def arrayvar(self, *args):
return VariableWrapper(self.cursor.arrayvar(*args))
def __getattr__(self, attr):
if attr in self.__dict__:
return self.__dict__[attr]
else:
return getattr(self.cursor, attr)
def __iter__(self):
return CursorIterator(self.cursor)
class CursorIterator(six.Iterator):
"""
Cursor iterator wrapper that invokes our custom row factory.
"""
def __init__(self, cursor):
self.cursor = cursor
self.iter = iter(cursor)
def __iter__(self):
return self
def __next__(self):
return _rowfactory(next(self.iter), self.cursor)
def _rowfactory(row, cursor):
# Cast numeric values as the appropriate Python type based upon the
# cursor description, and convert strings to unicode.
casted = []
for value, desc in zip(row, cursor.description):
if value is not None and desc[1] is Database.NUMBER:
precision = desc[4] or 0
scale = desc[5] or 0
if scale == -127:
if precision == 0:
# NUMBER column: decimal-precision floating point
# This will normally be an integer from a sequence,
# but it could be a decimal value.
if '.' in value:
value = decimal.Decimal(value)
else:
value = int(value)
else:
# FLOAT column: binary-precision floating point.
# This comes from FloatField columns.
value = float(value)
elif precision > 0:
# NUMBER(p,s) column: decimal-precision fixed point.
# This comes from IntField and DecimalField columns.
if scale == 0:
value = int(value)
else:
value = decimal.Decimal(value)
elif '.' in value:
# No type information. This normally comes from a
# mathematical expression in the SELECT list. Guess int
# or Decimal based on whether it has a decimal point.
value = decimal.Decimal(value)
else:
value = int(value)
elif desc[1] in (Database.STRING, Database.FIXED_CHAR,
Database.LONG_STRING):
value = to_unicode(value)
casted.append(value)
return tuple(casted)
def to_unicode(s):
"""
Convert strings to Unicode objects (and return all other data types
unchanged).
"""
if isinstance(s, six.string_types):
return force_text(s)
return s
| mit |
HaydenFaulkner/phd | processing/image/img2vid.py | 1 | 1058 | from os import listdir, remove
from os.path import isfile, join
import numpy as np
import cv2
from cv2 import __version__
cv_ver = __version__.split('.')
NEW_OCV = True
if int(cv_ver[0]) < 3:
NEW_OCV = False
import cv2.cv as cv
mypath = '/media/hayden/UStorage/DATASETS/VIDEO/TENNIS/BBS/imgtest/'
try:
remove(mypath+'video.avi')
except OSError:
pass
files = [f for f in listdir(mypath) if f.endswith('.png') and isfile(join(mypath,f))]
check = 0
frame_names = []
for filename in files:
frame_names.append(filename)
if check == 0:
image = cv2.imread(mypath+filename)
width = np.shape(image)[0]
height = np.shape(image)[1]
check = 1
frame_names.sort()
if NEW_OCV:
video = cv2.VideoWriter(mypath+'video.avi', cv2.VideoWriter_fourcc('F', 'M', 'P', '4'), 25, (width,height))
else:
video = cv2.VideoWriter(mypath+'video.avi', cv2.cv.CV_FOURCC('F', 'M', 'P', '4'), 25, (width,height))
for s in frame_names:
print s
image = cv2.imread(mypath+s)
video.write(image)
video.release()
| mit |
voiperr/ACE3 | tools/stringtablemerger.py | 52 | 4379 | #!/usr/bin/env python3
import os
import sys
import re
from xml.dom import minidom
# STRINGTABLE MERGER TOOL
# Author: KoffeinFlummi
# --------------------------
# Automatically merges all stringtable entries
# in the given language from the given dir.
def get_modules(projectpath):
""" Get all the modules of the project. """
modules = []
for i in os.listdir(projectpath):
path = os.path.join(projectpath, i)
if not os.path.isdir(path):
continue
if i[0] == ".":
continue
modules.append(i)
return modules
def contains_language(key, language):
""" Checks whether a given key contains a certain language. """
for child in key.childNodes:
try:
assert(child.tagName == language)
return True
except:
pass
return False
def get_entry_by_id(keys, keyid):
""" Returns the first child of keys with ID='keyid'. """
for key in keys:
if key.getAttribute("ID") == keyid:
return key
return False
def replace_entries(oldpath, newpath, language, breakdown):
""" Replaces all new entries of the given language in the given module. """
oldfile = minidom.parse(oldpath)
newfile = minidom.parse(newpath)
oldkeys = oldfile.getElementsByTagName("Key")
newkeys = newfile.getElementsByTagName("Key")
newkeys = list(filter(lambda x: contains_language(x, language), newkeys))
for newkey in newkeys:
keyid = newkey.getAttribute("ID")
oldkey = get_entry_by_id(oldkeys, keyid)
if not oldkey:
continue
if breakdown:
print(" Merging %s translation for %s" % (language, keyid))
newentry = newkey.getElementsByTagName(language)[0].firstChild
try:
# An entry for this language already exists, overwrite it
oldentry = oldkey.getElementsByTagName(language)[0].firstChild
oldentry.replaceWholeText(newentry.wholeText)
except:
# There is no entry for this language yet, make one
oldentry = oldfile.createElement(language)
oldentry.appendChild(oldfile.createTextNode(newentry.wholeText))
# Some whitespace tetris to maintain file structure
oldkey.insertBefore(oldfile.createTextNode("\n "), oldkey.lastChild)
oldkey.insertBefore(oldentry, oldkey.lastChild)
# Make a nice string
xmlstring = oldfile.toxml()
xmlstring = xmlstring.replace('" ?>', '" encoding="utf-8"?>')
# Replace the newlines that minidom swallows
xmlstring = xmlstring.replace("><", ">\n<")
xmlstring += "\n"
fhandle = open(oldpath, "w")
fhandle.write(xmlstring)
fhandle.close()
return len(newkeys)
def main(sourcepath, language, breakdown):
scriptpath = os.path.realpath(__file__)
projectpath = os.path.dirname(os.path.dirname(scriptpath))
projectpath = os.path.join(projectpath, "addons")
modules = get_modules(projectpath)
modulecounter = 0
keycounter = 0
for module in modules:
oldpath = os.path.join(projectpath, module, "stringtable.xml")
newpath = os.path.join(sourcepath, module, "stringtable.xml")
# Some translators extract the lowercase PBOs, so the module name might
# be lowercase (obviously only matters on Linux)
if not os.path.exists(newpath):
newpath = os.path.join(sourcepath, module.lower(), "stringtable.xml")
# Translator didn't include this module, skip
if not os.path.exists(newpath):
continue
keynum = replace_entries(oldpath, newpath, language, breakdown)
modulecounter += 1
keycounter += keynum
print("# Merged %i entry/entries in %s" % (keynum, module))
if breakdown:
print("")
print("")
print("# Merged %i entry/entries in %i modules" % (keycounter, modulecounter))
if __name__ == "__main__":
try:
sourcepath = os.path.normpath(os.path.join(os.getcwd(), sys.argv[1]))
language = sys.argv[2]
assert(os.path.exists(sourcepath))
except:
print("ERROR: Missing arguments of invalid path.")
print("\nUsage:")
print("[script] [path to new project] [language]")
sys.exit(1)
main(sourcepath, language, "--breakdown" in sys.argv)
| gpl-2.0 |
analyseuc3m/ANALYSE-v1 | common/lib/xmodule/xmodule/videoannotation_module.py | 19 | 6569 | """
Module for Video annotations using annotator.
"""
from lxml import etree
from pkg_resources import resource_string
from xmodule.x_module import XModule
from xmodule.raw_module import RawDescriptor
from xblock.core import Scope, String
from xmodule.annotator_mixin import get_instructions, get_extension
from xmodule.annotator_token import retrieve_token
from xblock.fragment import Fragment
import textwrap
# Make '_' a no-op so we can scrape strings. Using lambda instead of
# `django.utils.translation.ugettext_noop` because Django cannot be imported in this file
_ = lambda text: text
class AnnotatableFields(object):
""" Fields for `VideoModule` and `VideoDescriptor`. """
data = String(
help=_("XML data for the annotation"),
scope=Scope.content,
default=textwrap.dedent("""\
<annotatable>
<instructions>
<p>
Add the instructions to the assignment here.
</p>
</instructions>
</annotatable>
"""))
display_name = String(
display_name=_("Display Name"),
help=_("Display name for this module"),
scope=Scope.settings,
default=_('Video Annotation'),
)
sourceurl = String(
help=_("The external source URL for the video."),
display_name=_("Source URL"),
scope=Scope.settings, default="http://video-js.zencoder.com/oceans-clip.mp4"
)
poster_url = String(
help=_("Poster Image URL"),
display_name=_("Poster URL"),
scope=Scope.settings,
default=""
)
annotation_storage_url = String(
help=_("Location of Annotation backend"),
scope=Scope.settings,
default="http://your_annotation_storage.com",
display_name=_("Url for Annotation Storage")
)
annotation_token_secret = String(
help=_("Secret string for annotation storage"),
scope=Scope.settings,
default="xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx",
display_name=_("Secret Token String for Annotation")
)
default_tab = String(
display_name=_("Default Annotations Tab"),
help=_("Select which tab will be the default in the annotations table: myNotes, Instructor, or Public."),
scope=Scope.settings,
default="myNotes",
)
# currently only supports one instructor, will build functionality for multiple later
instructor_email = String(
display_name=_("Email for 'Instructor' Annotations"),
help=_("Email of the user that will be attached to all annotations that will be found in 'Instructor' tab."),
scope=Scope.settings,
default="",
)
annotation_mode = String(
display_name=_("Mode for Annotation Tool"),
help=_("Type in number corresponding to following modes: 'instructor' or 'everyone'"),
scope=Scope.settings,
default="everyone",
)
class VideoAnnotationModule(AnnotatableFields, XModule):
'''Video Annotation Module'''
js = {
'coffee': [
resource_string(__name__, 'js/src/javascript_loader.coffee'),
resource_string(__name__, 'js/src/html/display.coffee'),
resource_string(__name__, 'js/src/annotatable/display.coffee'),
],
'js': [
resource_string(__name__, 'js/src/collapsible.js'),
]
}
css = {'scss': [resource_string(__name__, 'css/annotatable/display.scss')]}
icon_class = 'videoannotation'
def __init__(self, *args, **kwargs):
super(VideoAnnotationModule, self).__init__(*args, **kwargs)
xmltree = etree.fromstring(self.data)
self.instructions = self._extract_instructions(xmltree)
self.content = etree.tostring(xmltree, encoding='unicode')
self.user_email = ""
self.is_course_staff = False
if self.runtime.get_user_role() in ['instructor', 'staff']:
self.is_course_staff = True
if self.runtime.get_real_user is not None:
try:
self.user_email = self.runtime.get_real_user(self.runtime.anonymous_student_id).email
except Exception: # pylint: disable=broad-except
self.user_email = _("No email address found.")
def _extract_instructions(self, xmltree):
""" Removes <instructions> from the xmltree and returns them as a string, otherwise None. """
return get_instructions(xmltree)
def _get_extension(self, src_url):
''' get the extension of a given url '''
return get_extension(src_url)
def student_view(self, context):
""" Renders parameters to template. """
extension = self._get_extension(self.sourceurl)
context = {
'course_key': self.runtime.course_id,
'display_name': self.display_name_with_default_escaped,
'instructions_html': self.instructions,
'sourceUrl': self.sourceurl,
'typeSource': extension,
'poster': self.poster_url,
'content_html': self.content,
'token': retrieve_token(self.user_email, self.annotation_token_secret),
'annotation_storage': self.annotation_storage_url,
'default_tab': self.default_tab,
'instructor_email': self.instructor_email,
'annotation_mode': self.annotation_mode,
'is_course_staff': self.is_course_staff,
}
fragment = Fragment(self.system.render_template('videoannotation.html', context))
# TinyMCE already exists in Studio so we should not load the files again
# get_real_user always returns "None" in Studio since its runtimes contains no anonymous ids
if self.runtime.get_real_user is not None:
fragment.add_javascript_url(self.runtime.STATIC_URL + "js/vendor/tinymce/js/tinymce/tinymce.full.min.js")
fragment.add_javascript_url(self.runtime.STATIC_URL + "js/vendor/tinymce/js/tinymce/jquery.tinymce.min.js")
return fragment
class VideoAnnotationDescriptor(AnnotatableFields, RawDescriptor):
''' Video annotation descriptor '''
module_class = VideoAnnotationModule
mako_template = "widgets/raw-edit.html"
@property
def non_editable_metadata_fields(self):
non_editable_fields = super(VideoAnnotationDescriptor, self).non_editable_metadata_fields
non_editable_fields.extend([
VideoAnnotationDescriptor.annotation_storage_url,
VideoAnnotationDescriptor.annotation_token_secret,
])
return non_editable_fields
| agpl-3.0 |
zaina/nova | tools/db/schema_diff.py | 36 | 8433 | #!/usr/bin/env python
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utility for diff'ing two versions of the DB schema.
Each release cycle the plan is to compact all of the migrations from that
release into a single file. This is a manual and, unfortunately, error-prone
process. To ensure that the schema doesn't change, this tool can be used to
diff the compacted DB schema to the original, uncompacted form.
The database is specified by providing a SQLAlchemy connection URL WITHOUT the
database-name portion (that will be filled in automatically with a temporary
database name).
The schema versions are specified by providing a git ref (a branch name or
commit hash) and a SQLAlchemy-Migrate version number:
Run like:
MYSQL:
./tools/db/schema_diff.py mysql+pymysql://root@localhost \
master:latest my_branch:82
POSTGRESQL:
./tools/db/schema_diff.py postgresql://localhost \
master:latest my_branch:82
DB2:
./tools/db/schema_diff.py ibm_db_sa://localhost \
master:latest my_branch:82
"""
from __future__ import print_function
import datetime
import glob
import os
import subprocess
import sys
from nova.i18n import _
# Dump
def dump_db(db_driver, db_name, db_url, migration_version, dump_filename):
if not db_url.endswith('/'):
db_url += '/'
db_url += db_name
db_driver.create(db_name)
try:
_migrate(db_url, migration_version)
db_driver.dump(db_name, dump_filename)
finally:
db_driver.drop(db_name)
# Diff
def diff_files(filename1, filename2):
pipeline = ['diff -U 3 %(filename1)s %(filename2)s'
% {'filename1': filename1, 'filename2': filename2}]
# Use colordiff if available
if subprocess.call(['which', 'colordiff']) == 0:
pipeline.append('colordiff')
pipeline.append('less -R')
cmd = ' | '.join(pipeline)
subprocess.check_call(cmd, shell=True)
# Database
class Mysql(object):
def create(self, name):
subprocess.check_call(['mysqladmin', '-u', 'root', 'create', name])
def drop(self, name):
subprocess.check_call(['mysqladmin', '-f', '-u', 'root', 'drop', name])
def dump(self, name, dump_filename):
subprocess.check_call(
'mysqldump -u root %(name)s > %(dump_filename)s'
% {'name': name, 'dump_filename': dump_filename},
shell=True)
class Postgresql(object):
def create(self, name):
subprocess.check_call(['createdb', name])
def drop(self, name):
subprocess.check_call(['dropdb', name])
def dump(self, name, dump_filename):
subprocess.check_call(
'pg_dump %(name)s > %(dump_filename)s'
% {'name': name, 'dump_filename': dump_filename},
shell=True)
class Ibm_db_sa(object):
@classmethod
def db2cmd(cls, cmd):
"""Wraps a command to be run under the DB2 instance user."""
subprocess.check_call('su - $(db2ilist) -c "%s"' % cmd, shell=True)
def create(self, name):
self.db2cmd('db2 \'create database %s\'' % name)
def drop(self, name):
self.db2cmd('db2 \'drop database %s\'' % name)
def dump(self, name, dump_filename):
self.db2cmd('db2look -d %(name)s -e -o %(dump_filename)s' %
{'name': name, 'dump_filename': dump_filename})
# The output file gets dumped to the db2 instance user's home directory
# so we have to copy it back to our current working directory.
subprocess.check_call('cp /home/$(db2ilist)/%s ./' % dump_filename,
shell=True)
def _get_db_driver_class(db_url):
try:
return globals()[db_url.split('://')[0].capitalize()]
except KeyError:
raise Exception(_("database %s not supported") % db_url)
# Migrate
MIGRATE_REPO = os.path.join(os.getcwd(), "nova/db/sqlalchemy/migrate_repo")
def _migrate(db_url, migration_version):
earliest_version = _migrate_get_earliest_version()
# NOTE(sirp): sqlalchemy-migrate currently cannot handle the skipping of
# migration numbers.
_migrate_cmd(
db_url, 'version_control', str(earliest_version - 1))
upgrade_cmd = ['upgrade']
if migration_version != 'latest':
upgrade_cmd.append(str(migration_version))
_migrate_cmd(db_url, *upgrade_cmd)
def _migrate_cmd(db_url, *cmd):
manage_py = os.path.join(MIGRATE_REPO, 'manage.py')
args = ['python', manage_py]
args += cmd
args += ['--repository=%s' % MIGRATE_REPO,
'--url=%s' % db_url]
subprocess.check_call(args)
def _migrate_get_earliest_version():
versions_glob = os.path.join(MIGRATE_REPO, 'versions', '???_*.py')
versions = []
for path in glob.iglob(versions_glob):
filename = os.path.basename(path)
prefix = filename.split('_', 1)[0]
try:
version = int(prefix)
except ValueError:
pass
versions.append(version)
versions.sort()
return versions[0]
# Git
def git_current_branch_name():
ref_name = git_symbolic_ref('HEAD', quiet=True)
current_branch_name = ref_name.replace('refs/heads/', '')
return current_branch_name
def git_symbolic_ref(ref, quiet=False):
args = ['git', 'symbolic-ref', ref]
if quiet:
args.append('-q')
proc = subprocess.Popen(args, stdout=subprocess.PIPE)
stdout, stderr = proc.communicate()
return stdout.strip()
def git_checkout(branch_name):
subprocess.check_call(['git', 'checkout', branch_name])
def git_has_uncommited_changes():
return subprocess.call(['git', 'diff', '--quiet', '--exit-code']) == 1
# Command
def die(msg):
print("ERROR: %s" % msg, file=sys.stderr)
sys.exit(1)
def usage(msg=None):
if msg:
print("ERROR: %s" % msg, file=sys.stderr)
prog = "schema_diff.py"
args = ["<db-url>", "<orig-branch:orig-version>",
"<new-branch:new-version>"]
print("usage: %s %s" % (prog, ' '.join(args)), file=sys.stderr)
sys.exit(1)
def parse_options():
try:
db_url = sys.argv[1]
except IndexError:
usage("must specify DB connection url")
try:
orig_branch, orig_version = sys.argv[2].split(':')
except IndexError:
usage('original branch and version required (e.g. master:82)')
try:
new_branch, new_version = sys.argv[3].split(':')
except IndexError:
usage('new branch and version required (e.g. master:82)')
return db_url, orig_branch, orig_version, new_branch, new_version
def main():
timestamp = datetime.datetime.utcnow().strftime("%Y%m%d_%H%M%S")
ORIG_DB = 'orig_db_%s' % timestamp
NEW_DB = 'new_db_%s' % timestamp
ORIG_DUMP = ORIG_DB + ".dump"
NEW_DUMP = NEW_DB + ".dump"
options = parse_options()
db_url, orig_branch, orig_version, new_branch, new_version = options
# Since we're going to be switching branches, ensure user doesn't have any
# uncommited changes
if git_has_uncommited_changes():
die("You have uncommited changes. Please commit them before running "
"this command.")
db_driver = _get_db_driver_class(db_url)()
users_branch = git_current_branch_name()
git_checkout(orig_branch)
try:
# Dump Original Schema
dump_db(db_driver, ORIG_DB, db_url, orig_version, ORIG_DUMP)
# Dump New Schema
git_checkout(new_branch)
dump_db(db_driver, NEW_DB, db_url, new_version, NEW_DUMP)
diff_files(ORIG_DUMP, NEW_DUMP)
finally:
git_checkout(users_branch)
if os.path.exists(ORIG_DUMP):
os.unlink(ORIG_DUMP)
if os.path.exists(NEW_DUMP):
os.unlink(NEW_DUMP)
if __name__ == "__main__":
main()
| apache-2.0 |
saurabh6790/OFF-RISLIB | core/doctype/profile/test_profile.py | 34 | 3620 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
import webnotes, unittest
from webnotes.model.utils import delete_doc, LinkExistsError
class TestProfile(unittest.TestCase):
def test_delete(self):
self.assertRaises(LinkExistsError, delete_doc, "Role", "_Test Role 2")
webnotes.conn.sql("""delete from tabUserRole where role='_Test Role 2'""")
delete_doc("Role","_Test Role 2")
profile = webnotes.bean(copy=test_records[1])
profile.doc.email = "_test@example.com"
profile.insert()
webnotes.bean({"doctype": "ToDo", "description": "_Test"}).insert()
delete_doc("Profile", "_test@example.com")
self.assertTrue(not webnotes.conn.sql("""select * from `tabToDo` where owner=%s""",
"_test@example.com"))
webnotes.bean({"doctype": "Role", "role_name": "_Test Role 2"}).insert()
def test_get_value(self):
self.assertEquals(webnotes.conn.get_value("Profile", "test@example.com"), "test@example.com")
self.assertEquals(webnotes.conn.get_value("Profile", {"email":"test@example.com"}), "test@example.com")
self.assertEquals(webnotes.conn.get_value("Profile", {"email":"test@example.com"}, "email"), "test@example.com")
self.assertEquals(webnotes.conn.get_value("Profile", {"email":"test@example.com"}, ["first_name", "email"]),
("_Test", "test@example.com"))
self.assertEquals(webnotes.conn.get_value("Profile",
{"email":"test@example.com", "first_name": "_Test"},
["first_name", "email"]),
("_Test", "test@example.com"))
test_profile = webnotes.conn.sql("select * from tabProfile where name='test@example.com'",
as_dict=True)[0]
self.assertEquals(webnotes.conn.get_value("Profile", {"email":"test@example.com"}, "*", as_dict=True),
test_profile)
self.assertEquals(webnotes.conn.get_value("Profile", "xxxtest@example.com"), None)
webnotes.conn.set_value("Control Panel", "Control Panel", "_test", "_test_val")
self.assertEquals(webnotes.conn.get_value("Control Panel", None, "_test"), "_test_val")
self.assertEquals(webnotes.conn.get_value("Control Panel", "Control Panel", "_test"), "_test_val")
def test_doclist(self):
p_meta = webnotes.get_doctype("Profile")
self.assertEquals(len(p_meta.get({"doctype": "DocField", "parent": "Profile", "fieldname": "first_name"})), 1)
self.assertEquals(len(p_meta.get({"doctype": "DocField", "parent": "Profile", "fieldname": "^first"})), 1)
self.assertEquals(len(p_meta.get({"fieldname": ["!=", "first_name"]})), len(p_meta) - 1)
self.assertEquals(len(p_meta.get({"fieldname": ["in", ["first_name", "last_name"]]})), 2)
self.assertEquals(len(p_meta.get({"fieldname": ["not in", ["first_name", "last_name"]]})), len(p_meta) - 2)
test_records = [[{
"doctype":"Profile",
"email": "test@example.com",
"first_name": "_Test",
"new_password": "testpassword",
"enabled": 1
}, {
"doctype":"UserRole",
"parentfield":"user_roles",
"role": "_Test Role"
}, {
"doctype":"UserRole",
"parentfield":"user_roles",
"role": "System Manager"
}],
[{
"doctype":"Profile",
"email": "test1@example.com",
"first_name": "_Test1",
"new_password": "testpassword"
}],
[{
"doctype":"Profile",
"email": "test2@example.com",
"first_name": "_Test2",
"new_password": "testpassword"
}],
[{
"doctype":"Profile",
"email": "testdelete@example.com",
"first_name": "_Test",
"new_password": "testpassword",
"enabled": 1
}, {
"doctype":"UserRole",
"parentfield":"user_roles",
"role": "_Test Role 2"
}, {
"doctype":"UserRole",
"parentfield":"user_roles",
"role": "System Manager"
}],
] | mit |
pplatek/odoo | openerp/report/render/odt2odt/odt2odt.py | 443 | 2265 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.report.render.rml2pdf import utils
import copy
class odt2odt(object):
def __init__(self, odt, localcontext):
self.localcontext = localcontext
self.etree = odt
self._node = None
def render(self):
def process_text(node,new_node):
for child in utils._child_get(node, self):
new_child = copy.deepcopy(child)
new_node.append(new_child)
if len(child):
for n in new_child:
new_child.text = utils._process_text(self, child.text)
new_child.tail = utils._process_text(self, child.tail)
new_child.remove(n)
process_text(child, new_child)
else:
new_child.text = utils._process_text(self, child.text)
new_child.tail = utils._process_text(self, child.tail)
self._node = copy.deepcopy(self.etree)
for n in self._node:
self._node.remove(n)
process_text(self.etree, self._node)
return self._node
def parseNode(node, localcontext = {}):
r = odt2odt(node, localcontext)
return r.render()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
ZerpaTechnology/AsenZor | static/js/brython/Lib/unittest/case.py | 9 | 50087 | """Test case implementation"""
import sys
import functools
import difflib
import pprint
import re
import warnings
import collections
from . import result
from .util import (strclass, safe_repr, _count_diff_all_purpose,
_count_diff_hashable)
__unittest = True
DIFF_OMITTED = ('\nDiff is %s characters long. '
'Set self.maxDiff to None to see it.')
class SkipTest(Exception):
"""
Raise this exception in a test to skip it.
Usually you can use TestCase.skipTest() or one of the skipping decorators
instead of raising this directly.
"""
class _ExpectedFailure(Exception):
"""
Raise this when a test is expected to fail.
This is an implementation detail.
"""
def __init__(self, exc_info):
super(_ExpectedFailure, self).__init__()
self.exc_info = exc_info
class _UnexpectedSuccess(Exception):
"""
The test was supposed to fail, but it didn't!
"""
class _Outcome(object):
def __init__(self):
self.success = True
self.skipped = None
self.unexpectedSuccess = None
self.expectedFailure = None
self.errors = []
self.failures = []
def _id(obj):
return obj
def skip(reason):
"""
Unconditionally skip a test.
"""
def decorator(test_item):
if not isinstance(test_item, type):
@functools.wraps(test_item)
def skip_wrapper(*args, **kwargs):
raise SkipTest(reason)
test_item = skip_wrapper
test_item.__unittest_skip__ = True
test_item.__unittest_skip_why__ = reason
return test_item
return decorator
def skipIf(condition, reason):
"""
Skip a test if the condition is true.
"""
if condition:
return skip(reason)
return _id
def skipUnless(condition, reason):
"""
Skip a test unless the condition is true.
"""
if not condition:
return skip(reason)
return _id
def expectedFailure(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
try:
func(*args, **kwargs)
except Exception:
raise _ExpectedFailure(sys.exc_info())
raise _UnexpectedSuccess
return wrapper
class _AssertRaisesBaseContext(object):
def __init__(self, expected, test_case, callable_obj=None,
expected_regex=None):
self.expected = expected
self.test_case = test_case
if callable_obj is not None:
try:
self.obj_name = callable_obj.__name__
except AttributeError:
self.obj_name = str(callable_obj)
else:
self.obj_name = None
if isinstance(expected_regex, (bytes, str)):
expected_regex = re.compile(expected_regex)
self.expected_regex = expected_regex
self.msg = None
def _raiseFailure(self, standardMsg):
msg = self.test_case._formatMessage(self.msg, standardMsg)
raise self.test_case.failureException(msg)
def handle(self, name, callable_obj, args, kwargs):
"""
If callable_obj is None, assertRaises/Warns is being used as a
context manager, so check for a 'msg' kwarg and return self.
If callable_obj is not None, call it passing args and kwargs.
"""
if callable_obj is None:
self.msg = kwargs.pop('msg', None)
return self
with self:
callable_obj(*args, **kwargs)
class _AssertRaisesContext(_AssertRaisesBaseContext):
"""A context manager used to implement TestCase.assertRaises* methods."""
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
if exc_type is None:
try:
exc_name = self.expected.__name__
except AttributeError:
exc_name = str(self.expected)
if self.obj_name:
self._raiseFailure("{} not raised by {}".format(exc_name,
self.obj_name))
else:
self._raiseFailure("{} not raised".format(exc_name))
if not issubclass(exc_type, self.expected):
# let unexpected exceptions pass through
return False
# store exception, without traceback, for later retrieval
self.exception = exc_value.with_traceback(None)
if self.expected_regex is None:
return True
expected_regex = self.expected_regex
if not expected_regex.search(str(exc_value)):
self._raiseFailure('"{}" does not match "{}"'.format(
expected_regex.pattern, str(exc_value)))
return True
class _AssertWarnsContext(_AssertRaisesBaseContext):
"""A context manager used to implement TestCase.assertWarns* methods."""
def __enter__(self):
# The __warningregistry__'s need to be in a pristine state for tests
# to work properly.
for v in sys.modules.values():
if getattr(v, '__warningregistry__', None):
v.__warningregistry__ = {}
self.warnings_manager = warnings.catch_warnings(record=True)
self.warnings = self.warnings_manager.__enter__()
warnings.simplefilter("always", self.expected)
return self
def __exit__(self, exc_type, exc_value, tb):
self.warnings_manager.__exit__(exc_type, exc_value, tb)
if exc_type is not None:
# let unexpected exceptions pass through
return
try:
exc_name = self.expected.__name__
except AttributeError:
exc_name = str(self.expected)
first_matching = None
for m in self.warnings:
w = m.message
if not isinstance(w, self.expected):
continue
if first_matching is None:
first_matching = w
if (self.expected_regex is not None and
not self.expected_regex.search(str(w))):
continue
# store warning for later retrieval
self.warning = w
self.filename = m.filename
self.lineno = m.lineno
return
# Now we simply try to choose a helpful failure message
if first_matching is not None:
self._raiseFailure('"{}" does not match "{}"'.format(
self.expected_regex.pattern, str(first_matching)))
if self.obj_name:
self._raiseFailure("{} not triggered by {}".format(exc_name,
self.obj_name))
else:
self._raiseFailure("{} not triggered".format(exc_name))
class TestCase(object):
"""A class whose instances are single test cases.
By default, the test code itself should be placed in a method named
'runTest'.
If the fixture may be used for many test cases, create as
many test methods as are needed. When instantiating such a TestCase
subclass, specify in the constructor arguments the name of the test method
that the instance is to execute.
Test authors should subclass TestCase for their own tests. Construction
and deconstruction of the test's environment ('fixture') can be
implemented by overriding the 'setUp' and 'tearDown' methods respectively.
If it is necessary to override the __init__ method, the base class
__init__ method must always be called. It is important that subclasses
should not change the signature of their __init__ method, since instances
of the classes are instantiated automatically by parts of the framework
in order to be run.
When subclassing TestCase, you can set these attributes:
* failureException: determines which exception will be raised when
the instance's assertion methods fail; test methods raising this
exception will be deemed to have 'failed' rather than 'errored'.
* longMessage: determines whether long messages (including repr of
objects used in assert methods) will be printed on failure in *addition*
to any explicit message passed.
* maxDiff: sets the maximum length of a diff in failure messages
by assert methods using difflib. It is looked up as an instance
attribute so can be configured by individual tests if required.
"""
failureException = AssertionError
longMessage = True
maxDiff = 80*8
# If a string is longer than _diffThreshold, use normal comparison instead
# of difflib. See #11763.
_diffThreshold = 2**16
# Attribute used by TestSuite for classSetUp
_classSetupFailed = False
def __init__(self, methodName='runTest'):
"""Create an instance of the class that will use the named test
method when executed. Raises a ValueError if the instance does
not have a method with the specified name.
"""
self._testMethodName = methodName
self._outcomeForDoCleanups = None
self._testMethodDoc = 'No test'
try:
testMethod = getattr(self, methodName)
except AttributeError:
if methodName != 'runTest':
# we allow instantiation with no explicit method name
# but not an *incorrect* or missing method name
raise ValueError("no such test method in %s: %s" %
(self.__class__, methodName))
else:
self._testMethodDoc = testMethod.__doc__
self._cleanups = []
# Map types to custom assertEqual functions that will compare
# instances of said type in more detail to generate a more useful
# error message.
self._type_equality_funcs = {}
self.addTypeEqualityFunc(dict, 'assertDictEqual')
self.addTypeEqualityFunc(list, 'assertListEqual')
self.addTypeEqualityFunc(tuple, 'assertTupleEqual')
self.addTypeEqualityFunc(set, 'assertSetEqual')
self.addTypeEqualityFunc(frozenset, 'assertSetEqual')
self.addTypeEqualityFunc(str, 'assertMultiLineEqual')
def addTypeEqualityFunc(self, typeobj, function):
"""Add a type specific assertEqual style function to compare a type.
This method is for use by TestCase subclasses that need to register
their own type equality functions to provide nicer error messages.
Args:
typeobj: The data type to call this function on when both values
are of the same type in assertEqual().
function: The callable taking two arguments and an optional
msg= argument that raises self.failureException with a
useful error message when the two arguments are not equal.
"""
self._type_equality_funcs[typeobj] = function
def addCleanup(self, function, *args, **kwargs):
"""Add a function, with arguments, to be called when the test is
completed. Functions added are called on a LIFO basis and are
called after tearDown on test failure or success.
Cleanup items are called even if setUp fails (unlike tearDown)."""
self._cleanups.append((function, args, kwargs))
def setUp(self):
"Hook method for setting up the test fixture before exercising it."
pass
def tearDown(self):
"Hook method for deconstructing the test fixture after testing it."
pass
@classmethod
def setUpClass(cls):
"Hook method for setting up class fixture before running tests in the class."
@classmethod
def tearDownClass(cls):
"Hook method for deconstructing the class fixture after running all tests in the class."
def countTestCases(self):
return 1
def defaultTestResult(self):
return result.TestResult()
def shortDescription(self):
"""Returns a one-line description of the test, or None if no
description has been provided.
The default implementation of this method returns the first line of
the specified test method's docstring.
"""
doc = self._testMethodDoc
return doc and doc.split("\n")[0].strip() or None
def id(self):
return "%s.%s" % (strclass(self.__class__), self._testMethodName)
def __eq__(self, other):
if type(self) is not type(other):
return NotImplemented
return self._testMethodName == other._testMethodName
def __hash__(self):
return hash((type(self), self._testMethodName))
def __str__(self):
return "%s (%s)" % (self._testMethodName, strclass(self.__class__))
def __repr__(self):
return "<%s testMethod=%s>" % \
(strclass(self.__class__), self._testMethodName)
def _addSkip(self, result, reason):
addSkip = getattr(result, 'addSkip', None)
if addSkip is not None:
addSkip(self, reason)
else:
warnings.warn("TestResult has no addSkip method, skips not reported",
RuntimeWarning, 2)
result.addSuccess(self)
def _executeTestPart(self, function, outcome, isTest=False):
try:
function()
except KeyboardInterrupt:
raise
except SkipTest as e:
outcome.success = False
outcome.skipped = str(e)
except _UnexpectedSuccess:
exc_info = sys.exc_info()
outcome.success = False
if isTest:
outcome.unexpectedSuccess = exc_info
else:
outcome.errors.append(exc_info)
except _ExpectedFailure:
outcome.success = False
exc_info = sys.exc_info()
if isTest:
outcome.expectedFailure = exc_info
else:
outcome.errors.append(exc_info)
except self.failureException:
outcome.success = False
outcome.failures.append(sys.exc_info())
exc_info = sys.exc_info()
except:
outcome.success = False
outcome.errors.append(sys.exc_info())
def run(self, result=None):
orig_result = result
if result is None:
result = self.defaultTestResult()
startTestRun = getattr(result, 'startTestRun', None)
if startTestRun is not None:
startTestRun()
result.startTest(self)
testMethod = getattr(self, self._testMethodName)
if (getattr(self.__class__, "__unittest_skip__", False) or
getattr(testMethod, "__unittest_skip__", False)):
# If the class or method was skipped.
try:
skip_why = (getattr(self.__class__, '__unittest_skip_why__', '')
or getattr(testMethod, '__unittest_skip_why__', ''))
self._addSkip(result, skip_why)
finally:
result.stopTest(self)
return
try:
outcome = _Outcome()
self._outcomeForDoCleanups = outcome
self._executeTestPart(self.setUp, outcome)
if outcome.success:
self._executeTestPart(testMethod, outcome, isTest=True)
self._executeTestPart(self.tearDown, outcome)
self.doCleanups()
if outcome.success:
result.addSuccess(self)
else:
if outcome.skipped is not None:
self._addSkip(result, outcome.skipped)
for exc_info in outcome.errors:
result.addError(self, exc_info)
for exc_info in outcome.failures:
result.addFailure(self, exc_info)
if outcome.unexpectedSuccess is not None:
addUnexpectedSuccess = getattr(result, 'addUnexpectedSuccess', None)
if addUnexpectedSuccess is not None:
addUnexpectedSuccess(self)
else:
warnings.warn("TestResult has no addUnexpectedSuccess method, reporting as failures",
RuntimeWarning)
result.addFailure(self, outcome.unexpectedSuccess)
if outcome.expectedFailure is not None:
addExpectedFailure = getattr(result, 'addExpectedFailure', None)
if addExpectedFailure is not None:
addExpectedFailure(self, outcome.expectedFailure)
else:
warnings.warn("TestResult has no addExpectedFailure method, reporting as passes",
RuntimeWarning)
result.addSuccess(self)
return result
finally:
result.stopTest(self)
if orig_result is None:
stopTestRun = getattr(result, 'stopTestRun', None)
if stopTestRun is not None:
stopTestRun()
def doCleanups(self):
"""Execute all cleanup functions. Normally called for you after
tearDown."""
outcome = self._outcomeForDoCleanups or _Outcome()
while self._cleanups:
function, args, kwargs = self._cleanups.pop()
part = lambda: function(*args, **kwargs)
self._executeTestPart(part, outcome)
# return this for backwards compatibility
# even though we no longer us it internally
return outcome.success
def __call__(self, *args, **kwds):
return self.run(*args, **kwds)
def debug(self):
"""Run the test without collecting errors in a TestResult"""
self.setUp()
getattr(self, self._testMethodName)()
self.tearDown()
while self._cleanups:
function, args, kwargs = self._cleanups.pop(-1)
function(*args, **kwargs)
def skipTest(self, reason):
"""Skip this test."""
raise SkipTest(reason)
def fail(self, msg=None):
"""Fail immediately, with the given message."""
raise self.failureException(msg)
def assertFalse(self, expr, msg=None):
"""Check that the expression is false."""
if expr:
msg = self._formatMessage(msg, "%s is not false" % safe_repr(expr))
raise self.failureException(msg)
def assertTrue(self, expr, msg=None):
"""Check that the expression is true."""
if not expr:
msg = self._formatMessage(msg, "%s is not true" % safe_repr(expr))
raise self.failureException(msg)
def _formatMessage(self, msg, standardMsg):
"""Honour the longMessage attribute when generating failure messages.
If longMessage is False this means:
* Use only an explicit message if it is provided
* Otherwise use the standard message for the assert
If longMessage is True:
* Use the standard message
* If an explicit message is provided, plus ' : ' and the explicit message
"""
if not self.longMessage:
return msg or standardMsg
if msg is None:
return standardMsg
try:
# don't switch to '{}' formatting in Python 2.X
# it changes the way unicode input is handled
return '%s : %s' % (standardMsg, msg)
except UnicodeDecodeError:
return '%s : %s' % (safe_repr(standardMsg), safe_repr(msg))
def assertRaises(self, excClass, callableObj=None, *args, **kwargs):
"""Fail unless an exception of class excClass is raised
by callableObj when invoked with arguments args and keyword
arguments kwargs. If a different type of exception is
raised, it will not be caught, and the test case will be
deemed to have suffered an error, exactly as for an
unexpected exception.
If called with callableObj omitted or None, will return a
context object used like this::
with self.assertRaises(SomeException):
do_something()
An optional keyword argument 'msg' can be provided when assertRaises
is used as a context object.
The context manager keeps a reference to the exception as
the 'exception' attribute. This allows you to inspect the
exception after the assertion::
with self.assertRaises(SomeException) as cm:
do_something()
the_exception = cm.exception
self.assertEqual(the_exception.error_code, 3)
"""
context = _AssertRaisesContext(excClass, self, callableObj)
return context.handle('assertRaises', callableObj, args, kwargs)
def assertWarns(self, expected_warning, callable_obj=None, *args, **kwargs):
"""Fail unless a warning of class warnClass is triggered
by callable_obj when invoked with arguments args and keyword
arguments kwargs. If a different type of warning is
triggered, it will not be handled: depending on the other
warning filtering rules in effect, it might be silenced, printed
out, or raised as an exception.
If called with callable_obj omitted or None, will return a
context object used like this::
with self.assertWarns(SomeWarning):
do_something()
An optional keyword argument 'msg' can be provided when assertWarns
is used as a context object.
The context manager keeps a reference to the first matching
warning as the 'warning' attribute; similarly, the 'filename'
and 'lineno' attributes give you information about the line
of Python code from which the warning was triggered.
This allows you to inspect the warning after the assertion::
with self.assertWarns(SomeWarning) as cm:
do_something()
the_warning = cm.warning
self.assertEqual(the_warning.some_attribute, 147)
"""
context = _AssertWarnsContext(expected_warning, self, callable_obj)
return context.handle('assertWarns', callable_obj, args, kwargs)
def _getAssertEqualityFunc(self, first, second):
"""Get a detailed comparison function for the types of the two args.
Returns: A callable accepting (first, second, msg=None) that will
raise a failure exception if first != second with a useful human
readable error message for those types.
"""
#
# NOTE(gregory.p.smith): I considered isinstance(first, type(second))
# and vice versa. I opted for the conservative approach in case
# subclasses are not intended to be compared in detail to their super
# class instances using a type equality func. This means testing
# subtypes won't automagically use the detailed comparison. Callers
# should use their type specific assertSpamEqual method to compare
# subclasses if the detailed comparison is desired and appropriate.
# See the discussion in http://bugs.python.org/issue2578.
#
if type(first) is type(second):
asserter = self._type_equality_funcs.get(type(first))
if asserter is not None:
if isinstance(asserter, str):
asserter = getattr(self, asserter)
return asserter
return self._baseAssertEqual
def _baseAssertEqual(self, first, second, msg=None):
"""The default assertEqual implementation, not type specific."""
if not first == second:
standardMsg = '%s != %s' % (safe_repr(first), safe_repr(second))
msg = self._formatMessage(msg, standardMsg)
raise self.failureException(msg)
def assertEqual(self, first, second, msg=None):
"""Fail if the two objects are unequal as determined by the '=='
operator.
"""
assertion_func = self._getAssertEqualityFunc(first, second)
assertion_func(first, second, msg=msg)
def assertNotEqual(self, first, second, msg=None):
"""Fail if the two objects are equal as determined by the '!='
operator.
"""
if not first != second:
msg = self._formatMessage(msg, '%s == %s' % (safe_repr(first),
safe_repr(second)))
raise self.failureException(msg)
def assertAlmostEqual(self, first, second, places=None, msg=None,
delta=None):
"""Fail if the two objects are unequal as determined by their
difference rounded to the given number of decimal places
(default 7) and comparing to zero, or by comparing that the
between the two objects is more than the given delta.
Note that decimal places (from zero) are usually not the same
as significant digits (measured from the most signficant digit).
If the two objects compare equal then they will automatically
compare almost equal.
"""
if first == second:
# shortcut
return
if delta is not None and places is not None:
raise TypeError("specify delta or places not both")
if delta is not None:
if abs(first - second) <= delta:
return
standardMsg = '%s != %s within %s delta' % (safe_repr(first),
safe_repr(second),
safe_repr(delta))
else:
if places is None:
places = 7
if round(abs(second-first), places) == 0:
return
standardMsg = '%s != %s within %r places' % (safe_repr(first),
safe_repr(second),
places)
msg = self._formatMessage(msg, standardMsg)
raise self.failureException(msg)
def assertNotAlmostEqual(self, first, second, places=None, msg=None,
delta=None):
"""Fail if the two objects are equal as determined by their
difference rounded to the given number of decimal places
(default 7) and comparing to zero, or by comparing that the
between the two objects is less than the given delta.
Note that decimal places (from zero) are usually not the same
as significant digits (measured from the most signficant digit).
Objects that are equal automatically fail.
"""
if delta is not None and places is not None:
raise TypeError("specify delta or places not both")
if delta is not None:
if not (first == second) and abs(first - second) > delta:
return
standardMsg = '%s == %s within %s delta' % (safe_repr(first),
safe_repr(second),
safe_repr(delta))
else:
if places is None:
places = 7
if not (first == second) and round(abs(second-first), places) != 0:
return
standardMsg = '%s == %s within %r places' % (safe_repr(first),
safe_repr(second),
places)
msg = self._formatMessage(msg, standardMsg)
raise self.failureException(msg)
def assertSequenceEqual(self, seq1, seq2, msg=None, seq_type=None):
"""An equality assertion for ordered sequences (like lists and tuples).
For the purposes of this function, a valid ordered sequence type is one
which can be indexed, has a length, and has an equality operator.
Args:
seq1: The first sequence to compare.
seq2: The second sequence to compare.
seq_type: The expected datatype of the sequences, or None if no
datatype should be enforced.
msg: Optional message to use on failure instead of a list of
differences.
"""
if seq_type is not None:
seq_type_name = seq_type.__name__
if not isinstance(seq1, seq_type):
raise self.failureException('First sequence is not a %s: %s'
% (seq_type_name, safe_repr(seq1)))
if not isinstance(seq2, seq_type):
raise self.failureException('Second sequence is not a %s: %s'
% (seq_type_name, safe_repr(seq2)))
else:
seq_type_name = "sequence"
differing = None
try:
len1 = len(seq1)
except (TypeError, NotImplementedError):
differing = 'First %s has no length. Non-sequence?' % (
seq_type_name)
if differing is None:
try:
len2 = len(seq2)
except (TypeError, NotImplementedError):
differing = 'Second %s has no length. Non-sequence?' % (
seq_type_name)
if differing is None:
if seq1 == seq2:
return
seq1_repr = safe_repr(seq1)
seq2_repr = safe_repr(seq2)
if len(seq1_repr) > 30:
seq1_repr = seq1_repr[:30] + '...'
if len(seq2_repr) > 30:
seq2_repr = seq2_repr[:30] + '...'
elements = (seq_type_name.capitalize(), seq1_repr, seq2_repr)
differing = '%ss differ: %s != %s\n' % elements
for i in range(min(len1, len2)):
try:
item1 = seq1[i]
except (TypeError, IndexError, NotImplementedError):
differing += ('\nUnable to index element %d of first %s\n' %
(i, seq_type_name))
break
try:
item2 = seq2[i]
except (TypeError, IndexError, NotImplementedError):
differing += ('\nUnable to index element %d of second %s\n' %
(i, seq_type_name))
break
if item1 != item2:
differing += ('\nFirst differing element %d:\n%s\n%s\n' %
(i, item1, item2))
break
else:
if (len1 == len2 and seq_type is None and
type(seq1) != type(seq2)):
# The sequences are the same, but have differing types.
return
if len1 > len2:
differing += ('\nFirst %s contains %d additional '
'elements.\n' % (seq_type_name, len1 - len2))
try:
differing += ('First extra element %d:\n%s\n' %
(len2, seq1[len2]))
except (TypeError, IndexError, NotImplementedError):
differing += ('Unable to index element %d '
'of first %s\n' % (len2, seq_type_name))
elif len1 < len2:
differing += ('\nSecond %s contains %d additional '
'elements.\n' % (seq_type_name, len2 - len1))
try:
differing += ('First extra element %d:\n%s\n' %
(len1, seq2[len1]))
except (TypeError, IndexError, NotImplementedError):
differing += ('Unable to index element %d '
'of second %s\n' % (len1, seq_type_name))
standardMsg = differing
diffMsg = '\n' + '\n'.join(
difflib.ndiff(pprint.pformat(seq1).splitlines(),
pprint.pformat(seq2).splitlines()))
standardMsg = self._truncateMessage(standardMsg, diffMsg)
msg = self._formatMessage(msg, standardMsg)
self.fail(msg)
def _truncateMessage(self, message, diff):
max_diff = self.maxDiff
if max_diff is None or len(diff) <= max_diff:
return message + diff
return message + (DIFF_OMITTED % len(diff))
def assertListEqual(self, list1, list2, msg=None):
"""A list-specific equality assertion.
Args:
list1: The first list to compare.
list2: The second list to compare.
msg: Optional message to use on failure instead of a list of
differences.
"""
self.assertSequenceEqual(list1, list2, msg, seq_type=list)
def assertTupleEqual(self, tuple1, tuple2, msg=None):
"""A tuple-specific equality assertion.
Args:
tuple1: The first tuple to compare.
tuple2: The second tuple to compare.
msg: Optional message to use on failure instead of a list of
differences.
"""
self.assertSequenceEqual(tuple1, tuple2, msg, seq_type=tuple)
def assertSetEqual(self, set1, set2, msg=None):
"""A set-specific equality assertion.
Args:
set1: The first set to compare.
set2: The second set to compare.
msg: Optional message to use on failure instead of a list of
differences.
assertSetEqual uses ducktyping to support different types of sets, and
is optimized for sets specifically (parameters must support a
difference method).
"""
try:
difference1 = set1.difference(set2)
except TypeError as e:
self.fail('invalid type when attempting set difference: %s' % e)
except AttributeError as e:
self.fail('first argument does not support set difference: %s' % e)
try:
difference2 = set2.difference(set1)
except TypeError as e:
self.fail('invalid type when attempting set difference: %s' % e)
except AttributeError as e:
self.fail('second argument does not support set difference: %s' % e)
if not (difference1 or difference2):
return
lines = []
if difference1:
lines.append('Items in the first set but not the second:')
for item in difference1:
lines.append(repr(item))
if difference2:
lines.append('Items in the second set but not the first:')
for item in difference2:
lines.append(repr(item))
standardMsg = '\n'.join(lines)
self.fail(self._formatMessage(msg, standardMsg))
def assertIn(self, member, container, msg=None):
"""Just like self.assertTrue(a in b), but with a nicer default message."""
if member not in container:
standardMsg = '%s not found in %s' % (safe_repr(member),
safe_repr(container))
self.fail(self._formatMessage(msg, standardMsg))
def assertNotIn(self, member, container, msg=None):
"""Just like self.assertTrue(a not in b), but with a nicer default message."""
if member in container:
standardMsg = '%s unexpectedly found in %s' % (safe_repr(member),
safe_repr(container))
self.fail(self._formatMessage(msg, standardMsg))
def assertIs(self, expr1, expr2, msg=None):
"""Just like self.assertTrue(a is b), but with a nicer default message."""
if expr1 is not expr2:
standardMsg = '%s is not %s' % (safe_repr(expr1),
safe_repr(expr2))
self.fail(self._formatMessage(msg, standardMsg))
def assertIsNot(self, expr1, expr2, msg=None):
"""Just like self.assertTrue(a is not b), but with a nicer default message."""
if expr1 is expr2:
standardMsg = 'unexpectedly identical: %s' % (safe_repr(expr1),)
self.fail(self._formatMessage(msg, standardMsg))
def assertDictEqual(self, d1, d2, msg=None):
self.assertIsInstance(d1, dict, 'First argument is not a dictionary')
self.assertIsInstance(d2, dict, 'Second argument is not a dictionary')
if d1 != d2:
standardMsg = '%s != %s' % (safe_repr(d1, True), safe_repr(d2, True))
diff = ('\n' + '\n'.join(difflib.ndiff(
pprint.pformat(d1).splitlines(),
pprint.pformat(d2).splitlines())))
standardMsg = self._truncateMessage(standardMsg, diff)
self.fail(self._formatMessage(msg, standardMsg))
def assertDictContainsSubset(self, subset, dictionary, msg=None):
"""Checks whether dictionary is a superset of subset."""
warnings.warn('assertDictContainsSubset is deprecated',
DeprecationWarning)
missing = []
mismatched = []
for key, value in subset.items():
if key not in dictionary:
missing.append(key)
elif value != dictionary[key]:
mismatched.append('%s, expected: %s, actual: %s' %
(safe_repr(key), safe_repr(value),
safe_repr(dictionary[key])))
if not (missing or mismatched):
return
standardMsg = ''
if missing:
standardMsg = 'Missing: %s' % ','.join(safe_repr(m) for m in
missing)
if mismatched:
if standardMsg:
standardMsg += '; '
standardMsg += 'Mismatched values: %s' % ','.join(mismatched)
self.fail(self._formatMessage(msg, standardMsg))
def assertCountEqual(self, first, second, msg=None):
"""An unordered sequence comparison asserting that the same elements,
regardless of order. If the same element occurs more than once,
it verifies that the elements occur the same number of times.
self.assertEqual(Counter(list(first)),
Counter(list(second)))
Example:
- [0, 1, 1] and [1, 0, 1] compare equal.
- [0, 0, 1] and [0, 1] compare unequal.
"""
first_seq, second_seq = list(first), list(second)
try:
first = collections.Counter(first_seq)
second = collections.Counter(second_seq)
except TypeError:
# Handle case with unhashable elements
differences = _count_diff_all_purpose(first_seq, second_seq)
else:
if first == second:
return
differences = _count_diff_hashable(first_seq, second_seq)
if differences:
standardMsg = 'Element counts were not equal:\n'
lines = ['First has %d, Second has %d: %r' % diff for diff in differences]
diffMsg = '\n'.join(lines)
standardMsg = self._truncateMessage(standardMsg, diffMsg)
msg = self._formatMessage(msg, standardMsg)
self.fail(msg)
def assertMultiLineEqual(self, first, second, msg=None):
"""Assert that two multi-line strings are equal."""
self.assertIsInstance(first, str, 'First argument is not a string')
self.assertIsInstance(second, str, 'Second argument is not a string')
if first != second:
# don't use difflib if the strings are too long
if (len(first) > self._diffThreshold or
len(second) > self._diffThreshold):
self._baseAssertEqual(first, second, msg)
firstlines = first.splitlines(keepends=True)
secondlines = second.splitlines(keepends=True)
if len(firstlines) == 1 and first.strip('\r\n') == first:
firstlines = [first + '\n']
secondlines = [second + '\n']
standardMsg = '%s != %s' % (safe_repr(first, True),
safe_repr(second, True))
diff = '\n' + ''.join(difflib.ndiff(firstlines, secondlines))
standardMsg = self._truncateMessage(standardMsg, diff)
self.fail(self._formatMessage(msg, standardMsg))
def assertLess(self, a, b, msg=None):
"""Just like self.assertTrue(a < b), but with a nicer default message."""
if not a < b:
standardMsg = '%s not less than %s' % (safe_repr(a), safe_repr(b))
self.fail(self._formatMessage(msg, standardMsg))
def assertLessEqual(self, a, b, msg=None):
"""Just like self.assertTrue(a <= b), but with a nicer default message."""
if not a <= b:
standardMsg = '%s not less than or equal to %s' % (safe_repr(a), safe_repr(b))
self.fail(self._formatMessage(msg, standardMsg))
def assertGreater(self, a, b, msg=None):
"""Just like self.assertTrue(a > b), but with a nicer default message."""
if not a > b:
standardMsg = '%s not greater than %s' % (safe_repr(a), safe_repr(b))
self.fail(self._formatMessage(msg, standardMsg))
def assertGreaterEqual(self, a, b, msg=None):
"""Just like self.assertTrue(a >= b), but with a nicer default message."""
if not a >= b:
standardMsg = '%s not greater than or equal to %s' % (safe_repr(a), safe_repr(b))
self.fail(self._formatMessage(msg, standardMsg))
def assertIsNone(self, obj, msg=None):
"""Same as self.assertTrue(obj is None), with a nicer default message."""
if obj is not None:
standardMsg = '%s is not None' % (safe_repr(obj),)
self.fail(self._formatMessage(msg, standardMsg))
def assertIsNotNone(self, obj, msg=None):
"""Included for symmetry with assertIsNone."""
if obj is None:
standardMsg = 'unexpectedly None'
self.fail(self._formatMessage(msg, standardMsg))
def assertIsInstance(self, obj, cls, msg=None):
"""Same as self.assertTrue(isinstance(obj, cls)), with a nicer
default message."""
if not isinstance(obj, cls):
standardMsg = '%s is not an instance of %r' % (safe_repr(obj), cls)
self.fail(self._formatMessage(msg, standardMsg))
def assertNotIsInstance(self, obj, cls, msg=None):
"""Included for symmetry with assertIsInstance."""
if isinstance(obj, cls):
standardMsg = '%s is an instance of %r' % (safe_repr(obj), cls)
self.fail(self._formatMessage(msg, standardMsg))
def assertRaisesRegex(self, expected_exception, expected_regex,
callable_obj=None, *args, **kwargs):
"""Asserts that the message in a raised exception matches a regex.
Args:
expected_exception: Exception class expected to be raised.
expected_regex: Regex (re pattern object or string) expected
to be found in error message.
callable_obj: Function to be called.
msg: Optional message used in case of failure. Can only be used
when assertRaisesRegex is used as a context manager.
args: Extra args.
kwargs: Extra kwargs.
"""
context = _AssertRaisesContext(expected_exception, self, callable_obj,
expected_regex)
return context.handle('assertRaisesRegex', callable_obj, args, kwargs)
def assertWarnsRegex(self, expected_warning, expected_regex,
callable_obj=None, *args, **kwargs):
"""Asserts that the message in a triggered warning matches a regexp.
Basic functioning is similar to assertWarns() with the addition
that only warnings whose messages also match the regular expression
are considered successful matches.
Args:
expected_warning: Warning class expected to be triggered.
expected_regex: Regex (re pattern object or string) expected
to be found in error message.
callable_obj: Function to be called.
msg: Optional message used in case of failure. Can only be used
when assertWarnsRegex is used as a context manager.
args: Extra args.
kwargs: Extra kwargs.
"""
context = _AssertWarnsContext(expected_warning, self, callable_obj,
expected_regex)
return context.handle('assertWarnsRegex', callable_obj, args, kwargs)
def assertRegex(self, text, expected_regex, msg=None):
"""Fail the test unless the text matches the regular expression."""
if isinstance(expected_regex, (str, bytes)):
assert expected_regex, "expected_regex must not be empty."
expected_regex = re.compile(expected_regex)
if not expected_regex.search(text):
msg = msg or "Regex didn't match"
msg = '%s: %r not found in %r' % (msg, expected_regex.pattern, text)
raise self.failureException(msg)
def assertNotRegex(self, text, unexpected_regex, msg=None):
"""Fail the test if the text matches the regular expression."""
if isinstance(unexpected_regex, (str, bytes)):
unexpected_regex = re.compile(unexpected_regex)
match = unexpected_regex.search(text)
if match:
msg = msg or "Regex matched"
msg = '%s: %r matches %r in %r' % (msg,
text[match.start():match.end()],
unexpected_regex.pattern,
text)
raise self.failureException(msg)
def _deprecate(original_func):
def deprecated_func(*args, **kwargs):
warnings.warn(
'Please use {0} instead.'.format(original_func.__name__),
DeprecationWarning, 2)
return original_func(*args, **kwargs)
return deprecated_func
# see #9424
failUnlessEqual = assertEquals = _deprecate(assertEqual)
failIfEqual = assertNotEquals = _deprecate(assertNotEqual)
failUnlessAlmostEqual = assertAlmostEquals = _deprecate(assertAlmostEqual)
failIfAlmostEqual = assertNotAlmostEquals = _deprecate(assertNotAlmostEqual)
failUnless = assert_ = _deprecate(assertTrue)
failUnlessRaises = _deprecate(assertRaises)
failIf = _deprecate(assertFalse)
assertRaisesRegexp = _deprecate(assertRaisesRegex)
assertRegexpMatches = _deprecate(assertRegex)
class FunctionTestCase(TestCase):
"""A test case that wraps a test function.
This is useful for slipping pre-existing test functions into the
unittest framework. Optionally, set-up and tidy-up functions can be
supplied. As with TestCase, the tidy-up ('tearDown') function will
always be called if the set-up ('setUp') function ran successfully.
"""
def __init__(self, testFunc, setUp=None, tearDown=None, description=None):
super(FunctionTestCase, self).__init__()
self._setUpFunc = setUp
self._tearDownFunc = tearDown
self._testFunc = testFunc
self._description = description
def setUp(self):
if self._setUpFunc is not None:
self._setUpFunc()
def tearDown(self):
if self._tearDownFunc is not None:
self._tearDownFunc()
def runTest(self):
self._testFunc()
def id(self):
return self._testFunc.__name__
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return self._setUpFunc == other._setUpFunc and \
self._tearDownFunc == other._tearDownFunc and \
self._testFunc == other._testFunc and \
self._description == other._description
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash((type(self), self._setUpFunc, self._tearDownFunc,
self._testFunc, self._description))
def __str__(self):
return "%s (%s)" % (strclass(self.__class__),
self._testFunc.__name__)
def __repr__(self):
return "<%s tec=%s>" % (strclass(self.__class__),
self._testFunc)
def shortDescription(self):
if self._description is not None:
return self._description
doc = self._testFunc.__doc__
return doc and doc.split("\n")[0].strip() or None
| lgpl-3.0 |
lino-framework/extjs6 | lino_extjs6/setup_info.py | 1 | 1673 | # -*- coding: UTF-8 -*-
# Copyright 2015-2021 Rumma & Ko Ltd
# License: GNU Affero General Public License v3 (see file COPYING for details)
SETUP_INFO = dict(
name='lino_extjs6',
version='17.10.0',
install_requires=['lino', 'lino_noi'],
tests_require=[],
test_suite='tests',
description="The Sencha ExtJS 6 user interface for Lino",
license_files=['COPYING'],
include_package_data=False,
zip_safe=False,
author='Rumma & Ko Ltd',
author_email='info@lino-framework.org',
url="https://github.com/lino-framework/ext6",
classifiers="""\
Programming Language :: Python
Programming Language :: Python :: 3
Development Status :: 1 - Planning
Environment :: Web Environment
Framework :: Django
Intended Audience :: Developers
Intended Audience :: System Administrators
License :: OSI Approved :: GNU Affero General Public License v3
Natural Language :: English
Natural Language :: French
Natural Language :: German
Operating System :: OS Independent
Topic :: Database :: Front-Ends
Topic :: Home Automation
Topic :: Office/Business
Topic :: Software Development :: Libraries :: Application Frameworks""".splitlines())
SETUP_INFO.update(long_description="""\
The Sencha ExtJS 6 user interface for Lino
The central project homepage is http://extjs6.lino-framework.org/
""")
SETUP_INFO.update(packages=[str(n) for n in """
lino_extjs6
lino_extjs6.extjs
lino_extjs6.projects
lino_extjs6.projects.team6
lino_extjs6.projects.team6.settings
lino_extjs6.projects.team6.tests
lino_extjs6.projects.lydia6
lino_extjs6.projects.lydia6.settings
lino_extjs6.projects.lydia6.tests
""".splitlines() if n])
| bsd-2-clause |
VladimirTyrin/letsencrypt | letsencrypt/proof_of_possession.py | 37 | 3643 | """Proof of Possession Identifier Validation Challenge."""
import logging
import os
from cryptography import x509
from cryptography.hazmat.backends import default_backend
import zope.component
from acme import challenges
from acme import jose
from acme import other
from letsencrypt import interfaces
from letsencrypt.display import util as display_util
logger = logging.getLogger(__name__)
class ProofOfPossession(object): # pylint: disable=too-few-public-methods
"""Proof of Possession Identifier Validation Challenge.
Based on draft-barnes-acme, section 6.5.
:ivar installer: Installer object
:type installer: :class:`~letsencrypt.interfaces.IInstaller`
"""
def __init__(self, installer):
self.installer = installer
def perform(self, achall):
"""Perform the Proof of Possession Challenge.
:param achall: Proof of Possession Challenge
:type achall: :class:`letsencrypt.achallenges.ProofOfPossession`
:returns: Response or None/False if the challenge cannot be completed
:rtype: :class:`acme.challenges.ProofOfPossessionResponse`
or False
"""
if (achall.alg in [jose.HS256, jose.HS384, jose.HS512] or
not isinstance(achall.hints.jwk, achall.alg.kty)):
return None
for cert, key, _ in self.installer.get_all_certs_keys():
with open(cert) as cert_file:
cert_data = cert_file.read()
try:
cert_obj = x509.load_pem_x509_certificate(
cert_data, default_backend())
except ValueError:
try:
cert_obj = x509.load_der_x509_certificate(
cert_data, default_backend())
except ValueError:
logger.warn("Certificate is neither PER nor DER: %s", cert)
cert_key = achall.alg.kty(key=cert_obj.public_key())
if cert_key == achall.hints.jwk:
return self._gen_response(achall, key)
# Is there are different prompt we should give the user?
code, key = zope.component.getUtility(
interfaces.IDisplay).input(
"Path to private key for identifier: %s " % achall.domain)
if code != display_util.CANCEL:
return self._gen_response(achall, key)
# If we get here, the key wasn't found
return False
def _gen_response(self, achall, key_path): # pylint: disable=no-self-use
"""Create the response to the Proof of Possession Challenge.
:param achall: Proof of Possession Challenge
:type achall: :class:`letsencrypt.achallenges.ProofOfPossession`
:param str key_path: Path to the key corresponding to the hinted to
public key.
:returns: Response or False if the challenge cannot be completed
:rtype: :class:`acme.challenges.ProofOfPossessionResponse`
or False
"""
if os.path.isfile(key_path):
with open(key_path, 'rb') as key:
try:
# Needs to be changed if JWKES doesn't have a key attribute
jwk = achall.alg.kty.load(key.read())
sig = other.Signature.from_msg(achall.nonce, jwk.key,
alg=achall.alg)
except (IndexError, ValueError, TypeError, jose.errors.Error):
return False
return challenges.ProofOfPossessionResponse(nonce=achall.nonce,
signature=sig)
return False
| apache-2.0 |
fujicoin/electrum-fjc | electrum/transaction.py | 1 | 47135 | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2011 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Note: The deserialization code originally comes from ABE.
import struct
import traceback
import sys
from typing import (Sequence, Union, NamedTuple, Tuple, Optional, Iterable,
Callable, List, Dict)
from . import ecc, bitcoin, constants, segwit_addr
from .util import profiler, to_bytes, bh2u, bfh
from .bitcoin import (TYPE_ADDRESS, TYPE_PUBKEY, TYPE_SCRIPT, hash_160,
hash160_to_p2sh, hash160_to_p2pkh, hash_to_segwit_addr,
hash_encode, var_int, TOTAL_COIN_SUPPLY_LIMIT_IN_BTC, COIN,
push_script, int_to_hex, push_script, b58_address_to_hash160,
opcodes, add_number_to_script, base_decode, is_segwit_script_type)
from .crypto import sha256d
from .keystore import xpubkey_to_address, xpubkey_to_pubkey
from .logging import get_logger
_logger = get_logger(__name__)
NO_SIGNATURE = 'ff'
PARTIAL_TXN_HEADER_MAGIC = b'EPTF\xff'
class SerializationError(Exception):
""" Thrown when there's a problem deserializing or serializing """
class UnknownTxinType(Exception):
pass
class NotRecognizedRedeemScript(Exception):
pass
class MalformedBitcoinScript(Exception):
pass
class TxOutput(NamedTuple):
type: int
address: str
value: Union[int, str] # str when the output is set to max: '!'
class TxOutputForUI(NamedTuple):
address: str
value: int
class TxOutputHwInfo(NamedTuple):
address_index: Tuple
sorted_xpubs: Iterable[str]
num_sig: Optional[int]
script_type: str
class BIP143SharedTxDigestFields(NamedTuple):
hashPrevouts: str
hashSequence: str
hashOutputs: str
class BCDataStream(object):
"""Workalike python implementation of Bitcoin's CDataStream class."""
def __init__(self):
self.input = None
self.read_cursor = 0
def clear(self):
self.input = None
self.read_cursor = 0
def write(self, _bytes): # Initialize with string of _bytes
if self.input is None:
self.input = bytearray(_bytes)
else:
self.input += bytearray(_bytes)
def read_string(self, encoding='ascii'):
# Strings are encoded depending on length:
# 0 to 252 : 1-byte-length followed by bytes (if any)
# 253 to 65,535 : byte'253' 2-byte-length followed by bytes
# 65,536 to 4,294,967,295 : byte '254' 4-byte-length followed by bytes
# ... and the Bitcoin client is coded to understand:
# greater than 4,294,967,295 : byte '255' 8-byte-length followed by bytes of string
# ... but I don't think it actually handles any strings that big.
if self.input is None:
raise SerializationError("call write(bytes) before trying to deserialize")
length = self.read_compact_size()
return self.read_bytes(length).decode(encoding)
def write_string(self, string, encoding='ascii'):
string = to_bytes(string, encoding)
# Length-encoded as with read-string
self.write_compact_size(len(string))
self.write(string)
def read_bytes(self, length):
try:
result = self.input[self.read_cursor:self.read_cursor+length]
self.read_cursor += length
return result
except IndexError:
raise SerializationError("attempt to read past end of buffer") from None
def can_read_more(self) -> bool:
if not self.input:
return False
return self.read_cursor < len(self.input)
def read_boolean(self): return self.read_bytes(1)[0] != chr(0)
def read_int16(self): return self._read_num('<h')
def read_uint16(self): return self._read_num('<H')
def read_int32(self): return self._read_num('<i')
def read_uint32(self): return self._read_num('<I')
def read_int64(self): return self._read_num('<q')
def read_uint64(self): return self._read_num('<Q')
def write_boolean(self, val): return self.write(chr(1) if val else chr(0))
def write_int16(self, val): return self._write_num('<h', val)
def write_uint16(self, val): return self._write_num('<H', val)
def write_int32(self, val): return self._write_num('<i', val)
def write_uint32(self, val): return self._write_num('<I', val)
def write_int64(self, val): return self._write_num('<q', val)
def write_uint64(self, val): return self._write_num('<Q', val)
def read_compact_size(self):
try:
size = self.input[self.read_cursor]
self.read_cursor += 1
if size == 253:
size = self._read_num('<H')
elif size == 254:
size = self._read_num('<I')
elif size == 255:
size = self._read_num('<Q')
return size
except IndexError as e:
raise SerializationError("attempt to read past end of buffer") from e
def write_compact_size(self, size):
if size < 0:
raise SerializationError("attempt to write size < 0")
elif size < 253:
self.write(bytes([size]))
elif size < 2**16:
self.write(b'\xfd')
self._write_num('<H', size)
elif size < 2**32:
self.write(b'\xfe')
self._write_num('<I', size)
elif size < 2**64:
self.write(b'\xff')
self._write_num('<Q', size)
def _read_num(self, format):
try:
(i,) = struct.unpack_from(format, self.input, self.read_cursor)
self.read_cursor += struct.calcsize(format)
except Exception as e:
raise SerializationError(e) from e
return i
def _write_num(self, format, num):
s = struct.pack(format, num)
self.write(s)
def script_GetOp(_bytes : bytes):
i = 0
while i < len(_bytes):
vch = None
opcode = _bytes[i]
i += 1
if opcode <= opcodes.OP_PUSHDATA4:
nSize = opcode
if opcode == opcodes.OP_PUSHDATA1:
try: nSize = _bytes[i]
except IndexError: raise MalformedBitcoinScript()
i += 1
elif opcode == opcodes.OP_PUSHDATA2:
try: (nSize,) = struct.unpack_from('<H', _bytes, i)
except struct.error: raise MalformedBitcoinScript()
i += 2
elif opcode == opcodes.OP_PUSHDATA4:
try: (nSize,) = struct.unpack_from('<I', _bytes, i)
except struct.error: raise MalformedBitcoinScript()
i += 4
vch = _bytes[i:i + nSize]
i += nSize
yield opcode, vch, i
class OPPushDataGeneric:
def __init__(self, pushlen: Callable=None):
if pushlen is not None:
self.check_data_len = pushlen
@classmethod
def check_data_len(cls, datalen: int) -> bool:
# Opcodes below OP_PUSHDATA4 all just push data onto stack, and are equivalent.
return opcodes.OP_PUSHDATA4 >= datalen >= 0
@classmethod
def is_instance(cls, item):
# accept objects that are instances of this class
# or other classes that are subclasses
return isinstance(item, cls) \
or (isinstance(item, type) and issubclass(item, cls))
OPPushDataPubkey = OPPushDataGeneric(lambda x: x in (33, 65))
# note that this does not include x_pubkeys !
def match_decoded(decoded, to_match):
if decoded is None:
return False
if len(decoded) != len(to_match):
return False
for i in range(len(decoded)):
to_match_item = to_match[i]
decoded_item = decoded[i]
if OPPushDataGeneric.is_instance(to_match_item) and to_match_item.check_data_len(decoded_item[0]):
continue
if to_match_item != decoded_item[0]:
return False
return True
def parse_sig(x_sig):
return [None if x == NO_SIGNATURE else x for x in x_sig]
def safe_parse_pubkey(x):
try:
return xpubkey_to_pubkey(x)
except:
return x
def parse_scriptSig(d, _bytes):
try:
decoded = [ x for x in script_GetOp(_bytes) ]
except Exception as e:
# coinbase transactions raise an exception
_logger.info(f"parse_scriptSig: cannot find address in input script (coinbase?) {bh2u(_bytes)}")
return
match = [OPPushDataGeneric]
if match_decoded(decoded, match):
item = decoded[0][1]
if item[0] == 0:
# segwit embedded into p2sh
# witness version 0
d['address'] = bitcoin.hash160_to_p2sh(hash_160(item))
if len(item) == 22:
d['type'] = 'p2wpkh-p2sh'
elif len(item) == 34:
d['type'] = 'p2wsh-p2sh'
else:
_logger.info(f"unrecognized txin type {bh2u(item)}")
elif opcodes.OP_1 <= item[0] <= opcodes.OP_16:
# segwit embedded into p2sh
# witness version 1-16
pass
else:
# assert item[0] == 0x30
# pay-to-pubkey
d['type'] = 'p2pk'
d['address'] = "(pubkey)"
d['signatures'] = [bh2u(item)]
d['num_sig'] = 1
d['x_pubkeys'] = ["(pubkey)"]
d['pubkeys'] = ["(pubkey)"]
return
# p2pkh TxIn transactions push a signature
# (71-73 bytes) and then their public key
# (33 or 65 bytes) onto the stack:
match = [OPPushDataGeneric, OPPushDataGeneric]
if match_decoded(decoded, match):
sig = bh2u(decoded[0][1])
x_pubkey = bh2u(decoded[1][1])
try:
signatures = parse_sig([sig])
pubkey, address = xpubkey_to_address(x_pubkey)
except:
_logger.info(f"parse_scriptSig: cannot find address in input script (p2pkh?) {bh2u(_bytes)}")
return
d['type'] = 'p2pkh'
d['signatures'] = signatures
d['x_pubkeys'] = [x_pubkey]
d['num_sig'] = 1
d['pubkeys'] = [pubkey]
d['address'] = address
return
# p2sh transaction, m of n
match = [opcodes.OP_0] + [OPPushDataGeneric] * (len(decoded) - 1)
if match_decoded(decoded, match):
x_sig = [bh2u(x[1]) for x in decoded[1:-1]]
redeem_script_unsanitized = decoded[-1][1] # for partial multisig txn, this has x_pubkeys
try:
m, n, x_pubkeys, pubkeys, redeem_script = parse_redeemScript_multisig(redeem_script_unsanitized)
except NotRecognizedRedeemScript:
_logger.info(f"parse_scriptSig: cannot find address in input script (p2sh?) {bh2u(_bytes)}")
# we could still guess:
# d['address'] = hash160_to_p2sh(hash_160(decoded[-1][1]))
return
# write result in d
d['type'] = 'p2sh'
d['num_sig'] = m
d['signatures'] = parse_sig(x_sig)
d['x_pubkeys'] = x_pubkeys
d['pubkeys'] = pubkeys
d['redeem_script'] = redeem_script
d['address'] = hash160_to_p2sh(hash_160(bfh(redeem_script)))
return
# custom partial format for imported addresses
match = [opcodes.OP_INVALIDOPCODE, opcodes.OP_0, OPPushDataGeneric]
if match_decoded(decoded, match):
x_pubkey = bh2u(decoded[2][1])
pubkey, address = xpubkey_to_address(x_pubkey)
d['type'] = 'address'
d['address'] = address
d['num_sig'] = 1
d['x_pubkeys'] = [x_pubkey]
d['pubkeys'] = None # get_sorted_pubkeys will populate this
d['signatures'] = [None]
return
_logger.info(f"parse_scriptSig: cannot find address in input script (unknown) {bh2u(_bytes)}")
def parse_redeemScript_multisig(redeem_script: bytes):
try:
dec2 = [ x for x in script_GetOp(redeem_script) ]
except MalformedBitcoinScript:
raise NotRecognizedRedeemScript()
try:
m = dec2[0][0] - opcodes.OP_1 + 1
n = dec2[-2][0] - opcodes.OP_1 + 1
except IndexError:
raise NotRecognizedRedeemScript()
op_m = opcodes.OP_1 + m - 1
op_n = opcodes.OP_1 + n - 1
match_multisig = [op_m] + [OPPushDataGeneric] * n + [op_n, opcodes.OP_CHECKMULTISIG]
if not match_decoded(dec2, match_multisig):
raise NotRecognizedRedeemScript()
x_pubkeys = [bh2u(x[1]) for x in dec2[1:-2]]
pubkeys = [safe_parse_pubkey(x) for x in x_pubkeys]
redeem_script2 = bfh(multisig_script(x_pubkeys, m))
if redeem_script2 != redeem_script:
raise NotRecognizedRedeemScript()
redeem_script_sanitized = multisig_script(pubkeys, m)
return m, n, x_pubkeys, pubkeys, redeem_script_sanitized
def get_address_from_output_script(_bytes: bytes, *, net=None) -> Tuple[int, str]:
try:
decoded = [x for x in script_GetOp(_bytes)]
except MalformedBitcoinScript:
decoded = None
# p2pk
match = [OPPushDataPubkey, opcodes.OP_CHECKSIG]
if match_decoded(decoded, match) and ecc.ECPubkey.is_pubkey_bytes(decoded[0][1]):
return TYPE_PUBKEY, bh2u(decoded[0][1])
# p2pkh
match = [opcodes.OP_DUP, opcodes.OP_HASH160, OPPushDataGeneric(lambda x: x == 20), opcodes.OP_EQUALVERIFY, opcodes.OP_CHECKSIG]
if match_decoded(decoded, match):
return TYPE_ADDRESS, hash160_to_p2pkh(decoded[2][1], net=net)
# p2sh
match = [opcodes.OP_HASH160, OPPushDataGeneric(lambda x: x == 20), opcodes.OP_EQUAL]
if match_decoded(decoded, match):
return TYPE_ADDRESS, hash160_to_p2sh(decoded[1][1], net=net)
# segwit address (version 0)
match = [opcodes.OP_0, OPPushDataGeneric(lambda x: x in (20, 32))]
if match_decoded(decoded, match):
return TYPE_ADDRESS, hash_to_segwit_addr(decoded[1][1], witver=0, net=net)
# segwit address (version 1-16)
future_witness_versions = list(range(opcodes.OP_1, opcodes.OP_16 + 1))
for witver, opcode in enumerate(future_witness_versions, start=1):
match = [opcode, OPPushDataGeneric(lambda x: 2 <= x <= 40)]
if match_decoded(decoded, match):
return TYPE_ADDRESS, hash_to_segwit_addr(decoded[1][1], witver=witver, net=net)
return TYPE_SCRIPT, bh2u(_bytes)
def parse_input(vds, full_parse: bool):
d = {}
prevout_hash = hash_encode(vds.read_bytes(32))
prevout_n = vds.read_uint32()
scriptSig = vds.read_bytes(vds.read_compact_size())
sequence = vds.read_uint32()
d['prevout_hash'] = prevout_hash
d['prevout_n'] = prevout_n
d['scriptSig'] = bh2u(scriptSig)
d['sequence'] = sequence
d['type'] = 'unknown' if prevout_hash != '00'*32 else 'coinbase'
d['address'] = None
d['num_sig'] = 0
if not full_parse:
return d
d['x_pubkeys'] = []
d['pubkeys'] = []
d['signatures'] = {}
if d['type'] != 'coinbase' and scriptSig:
try:
parse_scriptSig(d, scriptSig)
except BaseException:
_logger.exception(f'failed to parse scriptSig {bh2u(scriptSig)}')
return d
def construct_witness(items: Sequence[Union[str, int, bytes]]) -> str:
"""Constructs a witness from the given stack items."""
witness = var_int(len(items))
for item in items:
if type(item) is int:
item = bitcoin.script_num_to_hex(item)
elif type(item) is bytes:
item = bh2u(item)
witness += bitcoin.witness_push(item)
return witness
def parse_witness(vds, txin, full_parse: bool):
n = vds.read_compact_size()
if n == 0:
txin['witness'] = '00'
return
if n == 0xffffffff:
txin['value'] = vds.read_uint64()
txin['witness_version'] = vds.read_uint16()
n = vds.read_compact_size()
# now 'n' is the number of items in the witness
w = list(bh2u(vds.read_bytes(vds.read_compact_size())) for i in range(n))
txin['witness'] = construct_witness(w)
if not full_parse:
return
try:
if txin.get('witness_version', 0) != 0:
raise UnknownTxinType()
if txin['type'] == 'coinbase':
pass
elif txin['type'] == 'address':
pass
elif txin['type'] == 'p2wsh-p2sh' or n > 2:
witness_script_unsanitized = w[-1] # for partial multisig txn, this has x_pubkeys
try:
m, n, x_pubkeys, pubkeys, witness_script = parse_redeemScript_multisig(bfh(witness_script_unsanitized))
except NotRecognizedRedeemScript:
raise UnknownTxinType()
txin['signatures'] = parse_sig(w[1:-1])
txin['num_sig'] = m
txin['x_pubkeys'] = x_pubkeys
txin['pubkeys'] = pubkeys
txin['witness_script'] = witness_script
if not txin.get('scriptSig'): # native segwit script
txin['type'] = 'p2wsh'
txin['address'] = bitcoin.script_to_p2wsh(witness_script)
elif txin['type'] == 'p2wpkh-p2sh' or n == 2:
txin['num_sig'] = 1
txin['x_pubkeys'] = [w[1]]
txin['pubkeys'] = [safe_parse_pubkey(w[1])]
txin['signatures'] = parse_sig([w[0]])
if not txin.get('scriptSig'): # native segwit script
txin['type'] = 'p2wpkh'
txin['address'] = bitcoin.public_key_to_p2wpkh(bfh(txin['pubkeys'][0]))
else:
raise UnknownTxinType()
except UnknownTxinType:
txin['type'] = 'unknown'
except BaseException:
txin['type'] = 'unknown'
_logger.exception(f"failed to parse witness {txin.get('witness')}")
def parse_output(vds, i):
d = {}
d['value'] = vds.read_int64()
if d['value'] > TOTAL_COIN_SUPPLY_LIMIT_IN_BTC * COIN:
raise SerializationError('invalid output amount (too large)')
if d['value'] < 0:
raise SerializationError('invalid output amount (negative)')
scriptPubKey = vds.read_bytes(vds.read_compact_size())
d['type'], d['address'] = get_address_from_output_script(scriptPubKey)
d['scriptPubKey'] = bh2u(scriptPubKey)
d['prevout_n'] = i
return d
def deserialize(raw: str, force_full_parse=False) -> dict:
raw_bytes = bfh(raw)
d = {}
if raw_bytes[:5] == PARTIAL_TXN_HEADER_MAGIC:
d['partial'] = is_partial = True
partial_format_version = raw_bytes[5]
if partial_format_version != 0:
raise SerializationError('unknown tx partial serialization format version: {}'
.format(partial_format_version))
raw_bytes = raw_bytes[6:]
else:
d['partial'] = is_partial = False
full_parse = force_full_parse or is_partial
vds = BCDataStream()
vds.write(raw_bytes)
d['version'] = vds.read_int32()
n_vin = vds.read_compact_size()
is_segwit = (n_vin == 0)
if is_segwit:
marker = vds.read_bytes(1)
if marker != b'\x01':
raise ValueError('invalid txn marker byte: {}'.format(marker))
n_vin = vds.read_compact_size()
d['segwit_ser'] = is_segwit
d['inputs'] = [parse_input(vds, full_parse=full_parse) for i in range(n_vin)]
n_vout = vds.read_compact_size()
d['outputs'] = [parse_output(vds, i) for i in range(n_vout)]
if is_segwit:
for i in range(n_vin):
txin = d['inputs'][i]
parse_witness(vds, txin, full_parse=full_parse)
d['lockTime'] = vds.read_uint32()
if vds.can_read_more():
raise SerializationError('extra junk at the end')
return d
# pay & redeem scripts
def multisig_script(public_keys: Sequence[str], m: int) -> str:
n = len(public_keys)
assert 1 <= m <= n <= 15, f'm {m}, n {n}'
op_m = bh2u(add_number_to_script(m))
op_n = bh2u(add_number_to_script(n))
keylist = [push_script(k) for k in public_keys]
return op_m + ''.join(keylist) + op_n + opcodes.OP_CHECKMULTISIG.hex()
class Transaction:
def __str__(self):
if self.raw is None:
self.raw = self.serialize()
return self.raw
def __init__(self, raw):
if raw is None:
self.raw = None
elif isinstance(raw, str):
self.raw = raw.strip() if raw else None
elif isinstance(raw, dict):
self.raw = raw['hex']
else:
raise Exception("cannot initialize transaction", raw)
self._inputs = None
self._outputs = None # type: List[TxOutput]
self.locktime = 0
self.version = 2
# by default we assume this is a partial txn;
# this value will get properly set when deserializing
self.is_partial_originally = True
self._segwit_ser = None # None means "don't know"
self.output_info = None # type: Optional[Dict[str, TxOutputHwInfo]]
def update(self, raw):
self.raw = raw
self._inputs = None
self.deserialize()
def inputs(self):
if self._inputs is None:
self.deserialize()
return self._inputs
def outputs(self) -> List[TxOutput]:
if self._outputs is None:
self.deserialize()
return self._outputs
@classmethod
def get_sorted_pubkeys(self, txin):
# sort pubkeys and x_pubkeys, using the order of pubkeys
if txin['type'] == 'coinbase':
return [], []
x_pubkeys = txin['x_pubkeys']
pubkeys = txin.get('pubkeys')
if pubkeys is None:
pubkeys = [xpubkey_to_pubkey(x) for x in x_pubkeys]
pubkeys, x_pubkeys = zip(*sorted(zip(pubkeys, x_pubkeys)))
txin['pubkeys'] = pubkeys = list(pubkeys)
txin['x_pubkeys'] = x_pubkeys = list(x_pubkeys)
return pubkeys, x_pubkeys
def update_signatures(self, signatures: Sequence[str]):
"""Add new signatures to a transaction
`signatures` is expected to be a list of sigs with signatures[i]
intended for self._inputs[i].
This is used by the Trezor, KeepKey an Safe-T plugins.
"""
if self.is_complete():
return
if len(self.inputs()) != len(signatures):
raise Exception('expected {} signatures; got {}'.format(len(self.inputs()), len(signatures)))
for i, txin in enumerate(self.inputs()):
pubkeys, x_pubkeys = self.get_sorted_pubkeys(txin)
sig = signatures[i]
if sig in txin.get('signatures'):
continue
pre_hash = sha256d(bfh(self.serialize_preimage(i)))
sig_string = ecc.sig_string_from_der_sig(bfh(sig[:-2]))
for recid in range(4):
try:
public_key = ecc.ECPubkey.from_sig_string(sig_string, recid, pre_hash)
except ecc.InvalidECPointException:
# the point might not be on the curve for some recid values
continue
pubkey_hex = public_key.get_public_key_hex(compressed=True)
if pubkey_hex in pubkeys:
try:
public_key.verify_message_hash(sig_string, pre_hash)
except Exception:
_logger.exception('')
continue
j = pubkeys.index(pubkey_hex)
_logger.info(f"adding sig {i} {j} {pubkey_hex} {sig}")
self.add_signature_to_txin(i, j, sig)
break
# redo raw
self.raw = self.serialize()
def add_signature_to_txin(self, i, signingPos, sig):
txin = self._inputs[i]
txin['signatures'][signingPos] = sig
txin['scriptSig'] = None # force re-serialization
txin['witness'] = None # force re-serialization
self.raw = None
def add_inputs_info(self, wallet):
if self.is_complete():
return
for txin in self.inputs():
wallet.add_input_info(txin)
def remove_signatures(self):
for txin in self.inputs():
txin['signatures'] = [None] * len(txin['signatures'])
txin['scriptSig'] = None
txin['witness'] = None
assert not self.is_complete()
self.raw = None
def deserialize(self, force_full_parse=False):
if self.raw is None:
return
#self.raw = self.serialize()
if self._inputs is not None:
return
d = deserialize(self.raw, force_full_parse)
self._inputs = d['inputs']
self._outputs = [TxOutput(x['type'], x['address'], x['value']) for x in d['outputs']]
self.locktime = d['lockTime']
self.version = d['version']
self.is_partial_originally = d['partial']
self._segwit_ser = d['segwit_ser']
return d
@classmethod
def from_io(klass, inputs, outputs, locktime=0, version=None):
self = klass(None)
self._inputs = inputs
self._outputs = outputs
self.locktime = locktime
if version is not None:
self.version = version
self.BIP69_sort()
return self
@classmethod
def pay_script(self, output_type, addr: str) -> str:
"""Returns scriptPubKey in hex form."""
if output_type == TYPE_SCRIPT:
return addr
elif output_type == TYPE_ADDRESS:
return bitcoin.address_to_script(addr)
elif output_type == TYPE_PUBKEY:
return bitcoin.public_key_to_p2pk_script(addr)
else:
raise TypeError('Unknown output type')
@classmethod
def estimate_pubkey_size_from_x_pubkey(cls, x_pubkey):
try:
if x_pubkey[0:2] in ['02', '03']: # compressed pubkey
return 0x21
elif x_pubkey[0:2] == '04': # uncompressed pubkey
return 0x41
elif x_pubkey[0:2] == 'ff': # bip32 extended pubkey
return 0x21
elif x_pubkey[0:2] == 'fe': # old electrum extended pubkey
return 0x41
except Exception as e:
pass
return 0x21 # just guess it is compressed
@classmethod
def estimate_pubkey_size_for_txin(cls, txin):
pubkeys = txin.get('pubkeys', [])
x_pubkeys = txin.get('x_pubkeys', [])
if pubkeys and len(pubkeys) > 0:
return cls.estimate_pubkey_size_from_x_pubkey(pubkeys[0])
elif x_pubkeys and len(x_pubkeys) > 0:
return cls.estimate_pubkey_size_from_x_pubkey(x_pubkeys[0])
else:
return 0x21 # just guess it is compressed
@classmethod
def get_siglist(self, txin, estimate_size=False):
# if we have enough signatures, we use the actual pubkeys
# otherwise, use extended pubkeys (with bip32 derivation)
if txin['type'] == 'coinbase':
return [], []
num_sig = txin.get('num_sig', 1)
if estimate_size:
pubkey_size = self.estimate_pubkey_size_for_txin(txin)
pk_list = ["00" * pubkey_size] * len(txin.get('x_pubkeys', [None]))
# we assume that signature will be 0x48 bytes long
sig_list = [ "00" * 0x48 ] * num_sig
else:
pubkeys, x_pubkeys = self.get_sorted_pubkeys(txin)
x_signatures = txin['signatures']
signatures = list(filter(None, x_signatures))
is_complete = len(signatures) == num_sig
if is_complete:
pk_list = pubkeys
sig_list = signatures
else:
pk_list = x_pubkeys
sig_list = [sig if sig else NO_SIGNATURE for sig in x_signatures]
return pk_list, sig_list
@classmethod
def serialize_witness(self, txin, estimate_size=False):
_type = txin['type']
if not self.is_segwit_input(txin) and not txin['type'] == 'address':
return '00'
if _type == 'coinbase':
return txin['witness']
witness = txin.get('witness', None)
if witness is None or estimate_size:
if _type == 'address' and estimate_size:
_type = self.guess_txintype_from_address(txin['address'])
pubkeys, sig_list = self.get_siglist(txin, estimate_size)
if _type in ['p2wpkh', 'p2wpkh-p2sh']:
witness = construct_witness([sig_list[0], pubkeys[0]])
elif _type in ['p2wsh', 'p2wsh-p2sh']:
witness_script = multisig_script(pubkeys, txin['num_sig'])
witness = construct_witness([0] + sig_list + [witness_script])
else:
witness = txin.get('witness', '00')
if self.is_txin_complete(txin) or estimate_size:
partial_format_witness_prefix = ''
else:
input_value = int_to_hex(txin['value'], 8)
witness_version = int_to_hex(txin.get('witness_version', 0), 2)
partial_format_witness_prefix = var_int(0xffffffff) + input_value + witness_version
return partial_format_witness_prefix + witness
@classmethod
def is_segwit_input(cls, txin, guess_for_address=False):
_type = txin['type']
if _type == 'address' and guess_for_address:
_type = cls.guess_txintype_from_address(txin['address'])
has_nonzero_witness = txin.get('witness', '00') not in ('00', None)
return is_segwit_script_type(_type) or has_nonzero_witness
@classmethod
def guess_txintype_from_address(cls, addr):
# It's not possible to tell the script type in general
# just from an address.
# - "1" addresses are of course p2pkh
# - "3" addresses are p2sh but we don't know the redeem script..
# - "bc1" addresses (if they are 42-long) are p2wpkh
# - "bc1" addresses that are 62-long are p2wsh but we don't know the script..
# If we don't know the script, we _guess_ it is pubkeyhash.
# As this method is used e.g. for tx size estimation,
# the estimation will not be precise.
witver, witprog = segwit_addr.decode(constants.net.SEGWIT_HRP, addr)
if witprog is not None:
return 'p2wpkh'
addrtype, hash_160_ = b58_address_to_hash160(addr)
if addrtype == constants.net.ADDRTYPE_P2PKH:
return 'p2pkh'
elif addrtype == constants.net.ADDRTYPE_P2SH:
return 'p2wpkh-p2sh'
@classmethod
def input_script(self, txin, estimate_size=False):
_type = txin['type']
if _type == 'coinbase':
return txin['scriptSig']
# If there is already a saved scriptSig, just return that.
# This allows manual creation of txins of any custom type.
# However, if the txin is not complete, we might have some garbage
# saved from our partial txn ser format, so we re-serialize then.
script_sig = txin.get('scriptSig', None)
if script_sig is not None and self.is_txin_complete(txin):
return script_sig
pubkeys, sig_list = self.get_siglist(txin, estimate_size)
script = ''.join(push_script(x) for x in sig_list)
if _type == 'address' and estimate_size:
_type = self.guess_txintype_from_address(txin['address'])
if _type == 'p2pk':
pass
elif _type == 'p2sh':
# put op_0 before script
script = '00' + script
redeem_script = multisig_script(pubkeys, txin['num_sig'])
script += push_script(redeem_script)
elif _type == 'p2pkh':
script += push_script(pubkeys[0])
elif _type in ['p2wpkh', 'p2wsh']:
return ''
elif _type == 'p2wpkh-p2sh':
pubkey = safe_parse_pubkey(pubkeys[0])
scriptSig = bitcoin.p2wpkh_nested_script(pubkey)
return push_script(scriptSig)
elif _type == 'p2wsh-p2sh':
if estimate_size:
witness_script = ''
else:
witness_script = self.get_preimage_script(txin)
scriptSig = bitcoin.p2wsh_nested_script(witness_script)
return push_script(scriptSig)
elif _type == 'address':
return bytes([opcodes.OP_INVALIDOPCODE, opcodes.OP_0]).hex() + push_script(pubkeys[0])
elif _type == 'unknown':
return txin['scriptSig']
return script
@classmethod
def is_txin_complete(cls, txin):
if txin['type'] == 'coinbase':
return True
num_sig = txin.get('num_sig', 1)
if num_sig == 0:
return True
x_signatures = txin['signatures']
signatures = list(filter(None, x_signatures))
return len(signatures) == num_sig
@classmethod
def get_preimage_script(self, txin):
preimage_script = txin.get('preimage_script', None)
if preimage_script is not None:
return preimage_script
pubkeys, x_pubkeys = self.get_sorted_pubkeys(txin)
if txin['type'] == 'p2pkh':
return bitcoin.address_to_script(txin['address'])
elif txin['type'] in ['p2sh', 'p2wsh', 'p2wsh-p2sh']:
return multisig_script(pubkeys, txin['num_sig'])
elif txin['type'] in ['p2wpkh', 'p2wpkh-p2sh']:
pubkey = pubkeys[0]
pkh = bh2u(hash_160(bfh(pubkey)))
return '76a9' + push_script(pkh) + '88ac'
elif txin['type'] == 'p2pk':
pubkey = pubkeys[0]
return bitcoin.public_key_to_p2pk_script(pubkey)
else:
raise TypeError('Unknown txin type', txin['type'])
@classmethod
def serialize_outpoint(self, txin):
return bh2u(bfh(txin['prevout_hash'])[::-1]) + int_to_hex(txin['prevout_n'], 4)
@classmethod
def get_outpoint_from_txin(cls, txin):
if txin['type'] == 'coinbase':
return None
prevout_hash = txin['prevout_hash']
prevout_n = txin['prevout_n']
return prevout_hash + ':%d' % prevout_n
@classmethod
def serialize_input(self, txin, script):
# Prev hash and index
s = self.serialize_outpoint(txin)
# Script length, script, sequence
s += var_int(len(script)//2)
s += script
s += int_to_hex(txin.get('sequence', 0xffffffff - 1), 4)
return s
def set_rbf(self, rbf):
nSequence = 0xffffffff - (2 if rbf else 1)
for txin in self.inputs():
txin['sequence'] = nSequence
def BIP69_sort(self, inputs=True, outputs=True):
if inputs:
self._inputs.sort(key = lambda i: (i['prevout_hash'], i['prevout_n']))
if outputs:
self._outputs.sort(key = lambda o: (o.value, self.pay_script(o.type, o.address)))
@classmethod
def serialize_output(cls, output: TxOutput) -> str:
s = int_to_hex(output.value, 8)
script = cls.pay_script(output.type, output.address)
s += var_int(len(script)//2)
s += script
return s
def _calc_bip143_shared_txdigest_fields(self) -> BIP143SharedTxDigestFields:
inputs = self.inputs()
outputs = self.outputs()
hashPrevouts = bh2u(sha256d(bfh(''.join(self.serialize_outpoint(txin) for txin in inputs))))
hashSequence = bh2u(sha256d(bfh(''.join(int_to_hex(txin.get('sequence', 0xffffffff - 1), 4) for txin in inputs))))
hashOutputs = bh2u(sha256d(bfh(''.join(self.serialize_output(o) for o in outputs))))
return BIP143SharedTxDigestFields(hashPrevouts=hashPrevouts,
hashSequence=hashSequence,
hashOutputs=hashOutputs)
def serialize_preimage(self, txin_index: int, *,
bip143_shared_txdigest_fields: BIP143SharedTxDigestFields = None) -> str:
nVersion = int_to_hex(self.version, 4)
nHashType = int_to_hex(1, 4) # SIGHASH_ALL
nLocktime = int_to_hex(self.locktime, 4)
inputs = self.inputs()
outputs = self.outputs()
txin = inputs[txin_index]
if self.is_segwit_input(txin):
if bip143_shared_txdigest_fields is None:
bip143_shared_txdigest_fields = self._calc_bip143_shared_txdigest_fields()
hashPrevouts = bip143_shared_txdigest_fields.hashPrevouts
hashSequence = bip143_shared_txdigest_fields.hashSequence
hashOutputs = bip143_shared_txdigest_fields.hashOutputs
outpoint = self.serialize_outpoint(txin)
preimage_script = self.get_preimage_script(txin)
scriptCode = var_int(len(preimage_script) // 2) + preimage_script
amount = int_to_hex(txin['value'], 8)
nSequence = int_to_hex(txin.get('sequence', 0xffffffff - 1), 4)
preimage = nVersion + hashPrevouts + hashSequence + outpoint + scriptCode + amount + nSequence + hashOutputs + nLocktime + nHashType
else:
txins = var_int(len(inputs)) + ''.join(self.serialize_input(txin, self.get_preimage_script(txin) if txin_index==k else '')
for k, txin in enumerate(inputs))
txouts = var_int(len(outputs)) + ''.join(self.serialize_output(o) for o in outputs)
preimage = nVersion + txins + txouts + nLocktime + nHashType
return preimage
def is_segwit(self, guess_for_address=False):
if not self.is_partial_originally:
return self._segwit_ser
return any(self.is_segwit_input(x, guess_for_address=guess_for_address) for x in self.inputs())
def serialize(self, estimate_size=False, witness=True):
network_ser = self.serialize_to_network(estimate_size, witness)
if estimate_size:
return network_ser
if self.is_partial_originally and not self.is_complete():
partial_format_version = '00'
return bh2u(PARTIAL_TXN_HEADER_MAGIC) + partial_format_version + network_ser
else:
return network_ser
def serialize_to_network(self, estimate_size=False, witness=True):
self.deserialize()
nVersion = int_to_hex(self.version, 4)
nLocktime = int_to_hex(self.locktime, 4)
inputs = self.inputs()
outputs = self.outputs()
txins = var_int(len(inputs)) + ''.join(self.serialize_input(txin, self.input_script(txin, estimate_size)) for txin in inputs)
txouts = var_int(len(outputs)) + ''.join(self.serialize_output(o) for o in outputs)
use_segwit_ser_for_estimate_size = estimate_size and self.is_segwit(guess_for_address=True)
use_segwit_ser_for_actual_use = not estimate_size and \
(self.is_segwit() or any(txin['type'] == 'address' for txin in inputs))
use_segwit_ser = use_segwit_ser_for_estimate_size or use_segwit_ser_for_actual_use
if witness and use_segwit_ser:
marker = '00'
flag = '01'
witness = ''.join(self.serialize_witness(x, estimate_size) for x in inputs)
return nVersion + marker + flag + txins + txouts + witness + nLocktime
else:
return nVersion + txins + txouts + nLocktime
def txid(self):
self.deserialize()
all_segwit = all(self.is_segwit_input(x) for x in self.inputs())
if not all_segwit and not self.is_complete():
return None
ser = self.serialize_to_network(witness=False)
return bh2u(sha256d(bfh(ser))[::-1])
def wtxid(self):
self.deserialize()
if not self.is_complete():
return None
ser = self.serialize_to_network(witness=True)
return bh2u(sha256d(bfh(ser))[::-1])
def add_inputs(self, inputs):
self._inputs.extend(inputs)
self.raw = None
self.BIP69_sort(outputs=False)
def add_outputs(self, outputs):
self._outputs.extend(outputs)
self.raw = None
self.BIP69_sort(inputs=False)
def input_value(self) -> int:
return sum(x['value'] for x in self.inputs())
def output_value(self) -> int:
return sum(o.value for o in self.outputs())
def get_fee(self) -> int:
return self.input_value() - self.output_value()
def is_final(self):
return not any([x.get('sequence', 0xffffffff - 1) < 0xffffffff - 1 for x in self.inputs()])
def estimated_size(self):
"""Return an estimated virtual tx size in vbytes.
BIP-0141 defines 'Virtual transaction size' to be weight/4 rounded up.
This definition is only for humans, and has little meaning otherwise.
If we wanted sub-byte precision, fee calculation should use transaction
weights, but for simplicity we approximate that with (virtual_size)x4
"""
weight = self.estimated_weight()
return self.virtual_size_from_weight(weight)
@classmethod
def estimated_input_weight(cls, txin, is_segwit_tx):
'''Return an estimate of serialized input weight in weight units.'''
script = cls.input_script(txin, True)
input_size = len(cls.serialize_input(txin, script)) // 2
if cls.is_segwit_input(txin, guess_for_address=True):
witness_size = len(cls.serialize_witness(txin, True)) // 2
else:
witness_size = 1 if is_segwit_tx else 0
return 4 * input_size + witness_size
@classmethod
def estimated_output_size(cls, address):
"""Return an estimate of serialized output size in bytes."""
script = bitcoin.address_to_script(address)
# 8 byte value + 1 byte script len + script
return 9 + len(script) // 2
@classmethod
def virtual_size_from_weight(cls, weight):
return weight // 4 + (weight % 4 > 0)
def estimated_total_size(self):
"""Return an estimated total transaction size in bytes."""
return len(self.serialize(True)) // 2 if not self.is_complete() or self.raw is None else len(self.raw) // 2 # ASCII hex string
def estimated_witness_size(self):
"""Return an estimate of witness size in bytes."""
estimate = not self.is_complete()
if not self.is_segwit(guess_for_address=estimate):
return 0
inputs = self.inputs()
witness = ''.join(self.serialize_witness(x, estimate) for x in inputs)
witness_size = len(witness) // 2 + 2 # include marker and flag
return witness_size
def estimated_base_size(self):
"""Return an estimated base transaction size in bytes."""
return self.estimated_total_size() - self.estimated_witness_size()
def estimated_weight(self):
"""Return an estimate of transaction weight."""
total_tx_size = self.estimated_total_size()
base_tx_size = self.estimated_base_size()
return 3 * base_tx_size + total_tx_size
def signature_count(self):
r = 0
s = 0
for txin in self.inputs():
if txin['type'] == 'coinbase':
continue
signatures = list(filter(None, txin.get('signatures',[])))
s += len(signatures)
r += txin.get('num_sig',-1)
return s, r
def is_complete(self):
s, r = self.signature_count()
return r == s
def sign(self, keypairs) -> None:
# keypairs: (x_)pubkey -> secret_bytes
bip143_shared_txdigest_fields = self._calc_bip143_shared_txdigest_fields()
for i, txin in enumerate(self.inputs()):
pubkeys, x_pubkeys = self.get_sorted_pubkeys(txin)
for j, (pubkey, x_pubkey) in enumerate(zip(pubkeys, x_pubkeys)):
if self.is_txin_complete(txin):
break
if pubkey in keypairs:
_pubkey = pubkey
elif x_pubkey in keypairs:
_pubkey = x_pubkey
else:
continue
_logger.info(f"adding signature for {_pubkey}")
sec, compressed = keypairs.get(_pubkey)
sig = self.sign_txin(i, sec, bip143_shared_txdigest_fields=bip143_shared_txdigest_fields)
self.add_signature_to_txin(i, j, sig)
_logger.info(f"is_complete {self.is_complete()}")
self.raw = self.serialize()
def sign_txin(self, txin_index, privkey_bytes, *, bip143_shared_txdigest_fields=None) -> str:
pre_hash = sha256d(bfh(self.serialize_preimage(txin_index,
bip143_shared_txdigest_fields=bip143_shared_txdigest_fields)))
privkey = ecc.ECPrivkey(privkey_bytes)
sig = privkey.sign_transaction(pre_hash)
sig = bh2u(sig) + '01'
return sig
def get_outputs_for_UI(self) -> Sequence[TxOutputForUI]:
outputs = []
for o in self.outputs():
if o.type == TYPE_ADDRESS:
addr = o.address
elif o.type == TYPE_PUBKEY:
addr = 'PUBKEY ' + o.address
else:
addr = 'SCRIPT ' + o.address
outputs.append(TxOutputForUI(addr, o.value)) # consider using yield
return outputs
def has_address(self, addr: str) -> bool:
return (addr in (o.address for o in self.outputs())) \
or (addr in (txin.get("address") for txin in self.inputs()))
def as_dict(self):
if self.raw is None:
self.raw = self.serialize()
self.deserialize()
out = {
'hex': self.raw,
'complete': self.is_complete(),
'final': self.is_final(),
}
return out
def tx_from_str(txt: str) -> str:
"""Sanitizes tx-describing input (json or raw hex or base43) into
raw hex transaction."""
assert isinstance(txt, str), f"txt must be str, not {type(txt)}"
txt = txt.strip()
if not txt:
raise ValueError("empty string")
# try hex
try:
bfh(txt)
return txt
except:
pass
# try base43
try:
return base_decode(txt, length=None, base=43).hex()
except:
pass
# try json
import json
tx_dict = json.loads(str(txt))
assert "hex" in tx_dict.keys()
return tx_dict["hex"]
| mit |
tudyzhb/yichui | django/utils/dateparse.py | 96 | 2896 | """Functions to parse datetime objects."""
# We're using regular expressions rather than time.strptime because:
# - They provide both validation and parsing.
# - They're more flexible for datetimes.
# - The date/datetime/time constructors produce friendlier error messages.
import datetime
import re
from django.utils.timezone import utc
from django.utils.tzinfo import FixedOffset
date_re = re.compile(
r'(?P<year>\d{4})-(?P<month>\d{1,2})-(?P<day>\d{1,2})$'
)
datetime_re = re.compile(
r'(?P<year>\d{4})-(?P<month>\d{1,2})-(?P<day>\d{1,2})'
r'[T ](?P<hour>\d{1,2}):(?P<minute>\d{1,2})'
r'(?::(?P<second>\d{1,2})(?:\.(?P<microsecond>\d{1,6})\d{0,6})?)?'
r'(?P<tzinfo>Z|[+-]\d{1,2}:\d{1,2})?$'
)
time_re = re.compile(
r'(?P<hour>\d{1,2}):(?P<minute>\d{1,2})'
r'(?::(?P<second>\d{1,2})(?:\.(?P<microsecond>\d{1,6})\d{0,6})?)?'
)
def parse_date(value):
"""Parses a string and return a datetime.date.
Raises ValueError if the input is well formatted but not a valid date.
Returns None if the input isn't well formatted.
"""
match = date_re.match(value)
if match:
kw = dict((k, int(v)) for k, v in match.groupdict().iteritems())
return datetime.date(**kw)
def parse_time(value):
"""Parses a string and return a datetime.time.
This function doesn't support time zone offsets.
Sub-microsecond precision is accepted, but ignored.
Raises ValueError if the input is well formatted but not a valid time.
Returns None if the input isn't well formatted, in particular if it
contains an offset.
"""
match = time_re.match(value)
if match:
kw = match.groupdict()
if kw['microsecond']:
kw['microsecond'] = kw['microsecond'].ljust(6, '0')
kw = dict((k, int(v)) for k, v in kw.iteritems() if v is not None)
return datetime.time(**kw)
def parse_datetime(value):
"""Parses a string and return a datetime.datetime.
This function supports time zone offsets. When the input contains one,
the output uses an instance of FixedOffset as tzinfo.
Sub-microsecond precision is accepted, but ignored.
Raises ValueError if the input is well formatted but not a valid datetime.
Returns None if the input isn't well formatted.
"""
match = datetime_re.match(value)
if match:
kw = match.groupdict()
if kw['microsecond']:
kw['microsecond'] = kw['microsecond'].ljust(6, '0')
tzinfo = kw.pop('tzinfo')
if tzinfo == 'Z':
tzinfo = utc
elif tzinfo is not None:
offset = 60 * int(tzinfo[1:3]) + int(tzinfo[4:6])
if tzinfo[0] == '-':
offset = -offset
tzinfo = FixedOffset(offset)
kw = dict((k, int(v)) for k, v in kw.iteritems() if v is not None)
kw['tzinfo'] = tzinfo
return datetime.datetime(**kw)
| bsd-3-clause |
DelazJ/QGIS | tests/src/python/test_qgsfieldmodel.py | 31 | 19729 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsFieldModel
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Nyall Dawson'
__date__ = '14/11/2016'
__copyright__ = 'Copyright 2016, The QGIS Project'
import qgis # NOQA
from qgis.core import (QgsField,
QgsFields,
QgsVectorLayer,
QgsFieldModel,
QgsFieldProxyModel,
QgsEditorWidgetSetup,
QgsProject,
QgsVectorLayerJoinInfo,
QgsFieldConstraints)
from qgis.PyQt.QtCore import QVariant, Qt, QModelIndex
from qgis.testing import start_app, unittest
start_app()
def create_layer():
layer = QgsVectorLayer("Point?field=fldtxt:string&field=fldint:integer",
"addfeat", "memory")
layer.setEditorWidgetSetup(0, QgsEditorWidgetSetup('Hidden', {}))
layer.setEditorWidgetSetup(1, QgsEditorWidgetSetup('ValueMap', {}))
assert layer.isValid()
return layer
def create_model():
l = create_layer()
m = QgsFieldModel()
m.setLayer(l)
return l, m
class TestQgsFieldModel(unittest.TestCase):
def testGettersSetters(self):
""" test model getters/setters """
l = create_layer()
m = QgsFieldModel()
self.assertFalse(m.layer())
m.setLayer(l)
self.assertEqual(m.layer(), l)
m.setAllowExpression(True)
self.assertTrue(m.allowExpression())
m.setAllowExpression(False)
self.assertFalse(m.allowExpression())
m.setAllowEmptyFieldName(True)
self.assertTrue(m.allowEmptyFieldName())
m.setAllowEmptyFieldName(False)
self.assertFalse(m.allowEmptyFieldName())
fields = QgsFields()
fields.append(QgsField('test1', QVariant.String))
fields.append(QgsField('test2', QVariant.String))
m.setFields(fields)
self.assertIsNone(m.layer())
self.assertEqual(m.fields(), fields)
def testIndexFromName(self):
l, m = create_model()
i = m.indexFromName('fldtxt')
self.assertTrue(i.isValid())
self.assertEqual(i.row(), 0)
i = m.indexFromName('fldint')
self.assertTrue(i.isValid())
self.assertEqual(i.row(), 1)
i = m.indexFromName('not a field')
self.assertFalse(i.isValid())
# test with alias
i = m.indexFromName('text field')
self.assertFalse(i.isValid())
l.setFieldAlias(0, 'text field')
i = m.indexFromName('text field')
self.assertTrue(i.isValid())
self.assertEqual(i.row(), 0)
i = m.indexFromName('int field')
self.assertFalse(i.isValid())
l.setFieldAlias(1, 'int field')
i = m.indexFromName('int field')
self.assertTrue(i.isValid())
self.assertEqual(i.row(), 1)
# should be case insensitive
i = m.indexFromName('FLDTXT')
self.assertTrue(i.isValid())
self.assertEqual(i.row(), 0)
i = m.indexFromName('FLDINT')
self.assertTrue(i.isValid())
self.assertEqual(i.row(), 1)
# try with expression
m.setAllowExpression(True)
i = m.indexFromName('not a field')
# still not valid - needs expression set first
self.assertFalse(i.isValid())
m.setExpression('not a field')
i = m.indexFromName('not a field')
self.assertTrue(i.isValid())
self.assertEqual(i.row(), 2)
# try with null
i = m.indexFromName(None)
self.assertFalse(i.isValid())
m.setAllowEmptyFieldName(True)
i = m.indexFromName(None)
self.assertTrue(i.isValid())
self.assertEqual(i.row(), 0)
# when null is shown, all other rows should be offset
self.assertEqual(m.indexFromName('fldtxt').row(), 1)
self.assertEqual(m.indexFromName('fldint').row(), 2)
self.assertEqual(m.indexFromName('not a field').row(), 3)
self.assertEqual(m.indexFromName('FLDTXT').row(), 1)
self.assertEqual(m.indexFromName('FLDINT').row(), 2)
def testIsField(self):
l, m = create_model()
self.assertTrue(m.isField('fldtxt'))
self.assertTrue(m.isField('fldint'))
self.assertFalse(m.isField(None))
self.assertFalse(m.isField('an expression'))
def testRowCount(self):
l, m = create_model()
self.assertEqual(m.rowCount(), 2)
m.setAllowEmptyFieldName(True)
self.assertEqual(m.rowCount(), 3)
m.setAllowExpression(True)
m.setExpression('not a field')
self.assertEqual(m.rowCount(), 4)
m.setExpression('not a field')
self.assertEqual(m.rowCount(), 4)
m.setExpression('not a field 2')
self.assertEqual(m.rowCount(), 4)
m.removeExpression()
self.assertEqual(m.rowCount(), 3)
def testFieldNameRole(self):
l, m = create_model()
self.assertEqual(m.data(m.indexFromName('fldtxt'), QgsFieldModel.FieldNameRole), 'fldtxt')
self.assertEqual(m.data(m.indexFromName('fldint'), QgsFieldModel.FieldNameRole), 'fldint')
self.assertFalse(m.data(m.indexFromName('an expression'), QgsFieldModel.FieldNameRole))
self.assertFalse(m.data(m.indexFromName(None), QgsFieldModel.FieldNameRole))
m.setAllowExpression(True)
m.setExpression('an expression')
self.assertFalse(m.data(m.indexFromName('an expression'), QgsFieldModel.FieldNameRole))
m.setAllowEmptyFieldName(True)
self.assertFalse(m.data(m.indexFromName(None), QgsFieldModel.FieldNameRole))
def testExpressionRole(self):
l, m = create_model()
self.assertEqual(m.data(m.indexFromName('fldtxt'), QgsFieldModel.ExpressionRole), 'fldtxt')
self.assertEqual(m.data(m.indexFromName('fldint'), QgsFieldModel.ExpressionRole), 'fldint')
self.assertFalse(m.data(m.indexFromName('an expression'), QgsFieldModel.ExpressionRole))
self.assertFalse(m.data(m.indexFromName(None), QgsFieldModel.ExpressionRole))
m.setAllowExpression(True)
m.setExpression('an expression')
self.assertEqual(m.data(m.indexFromName('an expression'), QgsFieldModel.ExpressionRole), 'an expression')
m.setAllowEmptyFieldName(True)
self.assertFalse(m.data(m.indexFromName(None), QgsFieldModel.ExpressionRole))
def testFieldIndexRole(self):
l, m = create_model()
self.assertEqual(m.data(m.indexFromName('fldtxt'), QgsFieldModel.FieldIndexRole), 0)
self.assertEqual(m.data(m.indexFromName('fldint'), QgsFieldModel.FieldIndexRole), 1)
self.assertFalse(m.data(m.indexFromName('an expression'), QgsFieldModel.FieldIndexRole))
self.assertFalse(m.data(m.indexFromName(None), QgsFieldModel.FieldIndexRole))
m.setAllowExpression(True)
m.setExpression('an expression')
self.assertFalse(m.data(m.indexFromName('an expression'), QgsFieldModel.FieldIndexRole))
m.setAllowEmptyFieldName(True)
self.assertFalse(m.data(m.indexFromName(None), QgsFieldModel.FieldIndexRole))
def testIsExpressionRole(self):
l, m = create_model()
self.assertFalse(m.data(m.indexFromName('fldtxt'), QgsFieldModel.IsExpressionRole))
self.assertFalse(m.data(m.indexFromName('fldint'), QgsFieldModel.IsExpressionRole))
self.assertFalse(m.data(m.indexFromName('an expression'), QgsFieldModel.IsExpressionRole))
self.assertFalse(m.data(m.indexFromName(None), QgsFieldModel.IsExpressionRole))
m.setAllowExpression(True)
m.setExpression('an expression')
self.assertTrue(m.data(m.indexFromName('an expression'), QgsFieldModel.IsExpressionRole))
m.setAllowEmptyFieldName(True)
self.assertFalse(m.data(m.indexFromName(None), QgsFieldModel.IsExpressionRole))
def testExpressionValidityRole(self):
l, m = create_model()
self.assertTrue(m.data(m.indexFromName('fldtxt'), QgsFieldModel.ExpressionValidityRole))
self.assertTrue(m.data(m.indexFromName('fldint'), QgsFieldModel.ExpressionValidityRole))
self.assertFalse(m.data(m.indexFromName('an expression'), QgsFieldModel.ExpressionValidityRole))
self.assertFalse(m.data(m.indexFromName(None), QgsFieldModel.ExpressionValidityRole))
m.setAllowExpression(True)
m.setExpression('an expression')
self.assertFalse(m.data(m.indexFromName('an expression'), QgsFieldModel.ExpressionValidityRole))
m.setAllowEmptyFieldName(True)
self.assertTrue(m.data(m.indexFromName(None), QgsFieldModel.ExpressionValidityRole))
def testFieldTypeRole(self):
l, m = create_model()
self.assertEqual(m.data(m.indexFromName('fldtxt'), QgsFieldModel.FieldTypeRole), QVariant.String)
self.assertEqual(m.data(m.indexFromName('fldint'), QgsFieldModel.FieldTypeRole), QVariant.Int)
self.assertFalse(m.data(m.indexFromName('an expression'), QgsFieldModel.FieldTypeRole))
self.assertFalse(m.data(m.indexFromName(None), QgsFieldModel.FieldTypeRole))
m.setAllowExpression(True)
m.setExpression('an expression')
self.assertFalse(m.data(m.indexFromName('an expression'), QgsFieldModel.FieldTypeRole))
m.setAllowEmptyFieldName(True)
self.assertFalse(m.data(m.indexFromName(None), QgsFieldModel.FieldTypeRole))
def testFieldOriginRole(self):
l, m = create_model()
self.assertEqual(m.data(m.indexFromName('fldtxt'), QgsFieldModel.FieldOriginRole), QgsFields.OriginProvider)
self.assertEqual(m.data(m.indexFromName('fldint'), QgsFieldModel.FieldOriginRole), QgsFields.OriginProvider)
self.assertFalse(m.data(m.indexFromName('an expression'), QgsFieldModel.FieldOriginRole))
self.assertFalse(m.data(m.indexFromName(None), QgsFieldModel.FieldOriginRole))
m.setAllowExpression(True)
m.setExpression('an expression')
self.assertFalse(m.data(m.indexFromName('an expression'), QgsFieldModel.FieldOriginRole))
m.setAllowEmptyFieldName(True)
self.assertFalse(m.data(m.indexFromName(None), QgsFieldModel.FieldOriginRole))
def testIsEmptyRole(self):
l, m = create_model()
self.assertFalse(m.data(m.indexFromName('fldtxt'), QgsFieldModel.IsEmptyRole), QgsFields.OriginProvider)
self.assertFalse(m.data(m.indexFromName('fldint'), QgsFieldModel.IsEmptyRole), QgsFields.OriginProvider)
self.assertFalse(m.data(m.indexFromName('an expression'), QgsFieldModel.IsEmptyRole))
self.assertFalse(m.data(m.indexFromName(None), QgsFieldModel.IsEmptyRole))
m.setAllowExpression(True)
m.setExpression('an expression')
self.assertFalse(m.data(m.indexFromName('an expression'), QgsFieldModel.IsEmptyRole))
m.setAllowEmptyFieldName(True)
self.assertTrue(m.data(m.indexFromName(None), QgsFieldModel.IsEmptyRole))
def testDisplayRole(self):
l, m = create_model()
self.assertEqual(m.data(m.indexFromName('fldtxt'), Qt.DisplayRole), 'fldtxt')
self.assertEqual(m.data(m.indexFromName('fldint'), Qt.DisplayRole), 'fldint')
self.assertFalse(m.data(m.indexFromName('an expression'), Qt.DisplayRole))
self.assertFalse(m.data(m.indexFromName(None), Qt.DisplayRole))
m.setAllowExpression(True)
m.setExpression('an expression')
self.assertEqual(m.data(m.indexFromName('an expression'), Qt.DisplayRole), 'an expression')
m.setAllowEmptyFieldName(True)
self.assertFalse(m.data(m.indexFromName(None), Qt.DisplayRole))
def testManualFields(self):
_, m = create_model()
fields = QgsFields()
fields.append(QgsField('f1', QVariant.String))
fields.append(QgsField('f2', QVariant.String))
m.setFields(fields)
self.assertEqual(m.rowCount(), 2)
self.assertEqual(m.data(m.index(0, 0, QModelIndex()), Qt.DisplayRole), 'f1')
self.assertEqual(m.data(m.index(1, 0, QModelIndex()), Qt.DisplayRole), 'f2')
def testEditorWidgetTypeRole(self):
l, m = create_model()
self.assertEqual(m.data(m.indexFromName('fldtxt'), QgsFieldModel.EditorWidgetType), 'Hidden')
self.assertEqual(m.data(m.indexFromName('fldint'), QgsFieldModel.EditorWidgetType), 'ValueMap')
self.assertIsNone(m.data(m.indexFromName('an expression'), QgsFieldModel.EditorWidgetType))
self.assertIsNone(m.data(m.indexFromName(None), QgsFieldModel.EditorWidgetType))
m.setAllowExpression(True)
m.setExpression('an expression')
self.assertIsNone(m.data(m.indexFromName('an expression'), QgsFieldModel.EditorWidgetType))
m.setAllowEmptyFieldName(True)
self.assertIsNone(m.data(m.indexFromName(None), QgsFieldModel.EditorWidgetType))
def testJoinedFieldIsEditableRole(self):
layer = QgsVectorLayer("Point?field=id_a:integer",
"addfeat", "memory")
layer2 = QgsVectorLayer("Point?field=id_b:integer&field=value_b",
"addfeat", "memory")
QgsProject.instance().addMapLayers([layer, layer2])
# editable join
join_info = QgsVectorLayerJoinInfo()
join_info.setTargetFieldName("id_a")
join_info.setJoinLayer(layer2)
join_info.setJoinFieldName("id_b")
join_info.setPrefix("B_")
join_info.setEditable(True)
join_info.setUpsertOnEdit(True)
layer.addJoin(join_info)
m = QgsFieldModel()
m.setLayer(layer)
self.assertIsNone(m.data(m.indexFromName('id_a'), QgsFieldModel.JoinedFieldIsEditable))
self.assertTrue(m.data(m.indexFromName('B_value_b'), QgsFieldModel.JoinedFieldIsEditable))
self.assertIsNone(m.data(m.indexFromName('an expression'), QgsFieldModel.JoinedFieldIsEditable))
self.assertIsNone(m.data(m.indexFromName(None), QgsFieldModel.JoinedFieldIsEditable))
m.setAllowExpression(True)
m.setExpression('an expression')
self.assertIsNone(m.data(m.indexFromName('an expression'), QgsFieldModel.JoinedFieldIsEditable))
m.setAllowEmptyFieldName(True)
self.assertIsNone(m.data(m.indexFromName(None), QgsFieldModel.JoinedFieldIsEditable))
proxy_m = QgsFieldProxyModel()
proxy_m.setFilters(QgsFieldProxyModel.AllTypes | QgsFieldProxyModel.HideReadOnly)
proxy_m.sourceFieldModel().setLayer(layer)
self.assertEqual(proxy_m.rowCount(), 2)
self.assertEqual(proxy_m.data(proxy_m.index(0, 0)), 'id_a')
self.assertEqual(proxy_m.data(proxy_m.index(1, 0)), 'B_value_b')
# not editable join
layer3 = QgsVectorLayer("Point?field=id_a:integer",
"addfeat", "memory")
QgsProject.instance().addMapLayers([layer3])
join_info = QgsVectorLayerJoinInfo()
join_info.setTargetFieldName("id_a")
join_info.setJoinLayer(layer2)
join_info.setJoinFieldName("id_b")
join_info.setPrefix("B_")
join_info.setEditable(False)
layer3.addJoin(join_info)
m = QgsFieldModel()
m.setLayer(layer3)
self.assertIsNone(m.data(m.indexFromName('id_a'), QgsFieldModel.JoinedFieldIsEditable))
self.assertFalse(m.data(m.indexFromName('B_value_b'), QgsFieldModel.JoinedFieldIsEditable))
self.assertIsNone(m.data(m.indexFromName('an expression'), QgsFieldModel.JoinedFieldIsEditable))
self.assertIsNone(m.data(m.indexFromName(None), QgsFieldModel.JoinedFieldIsEditable))
m.setAllowExpression(True)
m.setExpression('an expression')
self.assertIsNone(m.data(m.indexFromName('an expression'), QgsFieldModel.JoinedFieldIsEditable))
m.setAllowEmptyFieldName(True)
self.assertIsNone(m.data(m.indexFromName(None), QgsFieldModel.JoinedFieldIsEditable))
proxy_m = QgsFieldProxyModel()
proxy_m.sourceFieldModel().setLayer(layer3)
proxy_m.setFilters(QgsFieldProxyModel.AllTypes | QgsFieldProxyModel.HideReadOnly)
self.assertEqual(proxy_m.rowCount(), 1)
self.assertEqual(proxy_m.data(proxy_m.index(0, 0)), 'id_a')
def testFieldIsWidgetEditableRole(self):
l, m = create_model()
self.assertTrue(m.data(m.indexFromName('fldtxt'), QgsFieldModel.FieldIsWidgetEditable))
self.assertTrue(m.data(m.indexFromName('fldint'), QgsFieldModel.FieldIsWidgetEditable))
self.assertFalse(m.data(m.indexFromName('an expression'), QgsFieldModel.FieldIsWidgetEditable))
self.assertFalse(m.data(m.indexFromName(None), QgsFieldModel.FieldIsWidgetEditable))
m.setAllowExpression(True)
m.setExpression('an expression')
self.assertTrue(m.data(m.indexFromName('an expression'), QgsFieldModel.FieldIsWidgetEditable))
m.setAllowEmptyFieldName(True)
self.assertTrue(m.data(m.indexFromName(None), QgsFieldModel.FieldIsWidgetEditable))
editFormConfig = l.editFormConfig()
idx = l.fields().indexOf('fldtxt')
# Make fldtxt readOnly
editFormConfig.setReadOnly(idx, True)
l.setEditFormConfig(editFormConfig)
# It's read only, so the widget is NOT editable
self.assertFalse(m.data(m.indexFromName('fldtxt'), QgsFieldModel.FieldIsWidgetEditable))
def testFieldTooltip(self):
f = QgsField('my_string', QVariant.String, 'string')
self.assertEqual(QgsFieldModel.fieldToolTip(f), "<b>my_string</b><br><font style='font-family:monospace; white-space: nowrap;'>string NULL</font>")
f.setAlias('my alias')
self.assertEqual(QgsFieldModel.fieldToolTip(f), "<b>my alias</b> (my_string)<br><font style='font-family:monospace; white-space: nowrap;'>string NULL</font>")
f.setLength(20)
self.assertEqual(QgsFieldModel.fieldToolTip(f), "<b>my alias</b> (my_string)<br><font style='font-family:monospace; white-space: nowrap;'>string(20) NULL</font>")
f = QgsField('my_real', QVariant.Double, 'real', 8, 3)
self.assertEqual(QgsFieldModel.fieldToolTip(f), "<b>my_real</b><br><font style='font-family:monospace; white-space: nowrap;'>real(8, 3) NULL</font>")
f.setComment('Comment text')
self.assertEqual(QgsFieldModel.fieldToolTip(f), "<b>my_real</b><br><font style='font-family:monospace; white-space: nowrap;'>real(8, 3) NULL</font><br><em>Comment text</em>")
def testFieldTooltipExtended(self):
layer = QgsVectorLayer("Point?", "tooltip", "memory")
f = QgsField('my_real', QVariant.Double, 'real', 8, 3, 'Comment text')
layer.addExpressionField('1+1', f)
layer.updateFields()
self.assertEqual(QgsFieldModel.fieldToolTipExtended(QgsField('my_string', QVariant.String, 'string'), layer), '')
self.assertEqual(QgsFieldModel.fieldToolTipExtended(f, layer), "<b>my_real</b><br><font style='font-family:monospace; white-space: nowrap;'>real(8, 3) NULL</font><br><em>Comment text</em><br><font style='font-family:monospace;'>1+1</font>")
f.setAlias('my alias')
constraints = f.constraints()
constraints.setConstraint(QgsFieldConstraints.ConstraintUnique)
f.setConstraints(constraints)
self.assertEqual(QgsFieldModel.fieldToolTipExtended(f, layer), "<b>my alias</b> (my_real)<br><font style='font-family:monospace; white-space: nowrap;'>real(8, 3) NULL UNIQUE</font><br><em>Comment text</em><br><font style='font-family:monospace;'>1+1</font>")
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
jscott413/maidsinharlem | flask/lib/python2.7/site-packages/pip/_vendor/ipaddress.py | 339 | 80176 | # Copyright 2007 Google Inc.
# Licensed to PSF under a Contributor Agreement.
"""A fast, lightweight IPv4/IPv6 manipulation library in Python.
This library is used to create/poke/manipulate IPv4 and IPv6 addresses
and networks.
"""
from __future__ import unicode_literals
import itertools
import struct
__version__ = '1.0.17'
# Compatibility functions
_compat_int_types = (int,)
try:
_compat_int_types = (int, long)
except NameError:
pass
try:
_compat_str = unicode
except NameError:
_compat_str = str
assert bytes != str
if b'\0'[0] == 0: # Python 3 semantics
def _compat_bytes_to_byte_vals(byt):
return byt
else:
def _compat_bytes_to_byte_vals(byt):
return [struct.unpack(b'!B', b)[0] for b in byt]
try:
_compat_int_from_byte_vals = int.from_bytes
except AttributeError:
def _compat_int_from_byte_vals(bytvals, endianess):
assert endianess == 'big'
res = 0
for bv in bytvals:
assert isinstance(bv, _compat_int_types)
res = (res << 8) + bv
return res
def _compat_to_bytes(intval, length, endianess):
assert isinstance(intval, _compat_int_types)
assert endianess == 'big'
if length == 4:
if intval < 0 or intval >= 2 ** 32:
raise struct.error("integer out of range for 'I' format code")
return struct.pack(b'!I', intval)
elif length == 16:
if intval < 0 or intval >= 2 ** 128:
raise struct.error("integer out of range for 'QQ' format code")
return struct.pack(b'!QQ', intval >> 64, intval & 0xffffffffffffffff)
else:
raise NotImplementedError()
if hasattr(int, 'bit_length'):
# Not int.bit_length , since that won't work in 2.7 where long exists
def _compat_bit_length(i):
return i.bit_length()
else:
def _compat_bit_length(i):
for res in itertools.count():
if i >> res == 0:
return res
def _compat_range(start, end, step=1):
assert step > 0
i = start
while i < end:
yield i
i += step
class _TotalOrderingMixin(object):
__slots__ = ()
# Helper that derives the other comparison operations from
# __lt__ and __eq__
# We avoid functools.total_ordering because it doesn't handle
# NotImplemented correctly yet (http://bugs.python.org/issue10042)
def __eq__(self, other):
raise NotImplementedError
def __ne__(self, other):
equal = self.__eq__(other)
if equal is NotImplemented:
return NotImplemented
return not equal
def __lt__(self, other):
raise NotImplementedError
def __le__(self, other):
less = self.__lt__(other)
if less is NotImplemented or not less:
return self.__eq__(other)
return less
def __gt__(self, other):
less = self.__lt__(other)
if less is NotImplemented:
return NotImplemented
equal = self.__eq__(other)
if equal is NotImplemented:
return NotImplemented
return not (less or equal)
def __ge__(self, other):
less = self.__lt__(other)
if less is NotImplemented:
return NotImplemented
return not less
IPV4LENGTH = 32
IPV6LENGTH = 128
class AddressValueError(ValueError):
"""A Value Error related to the address."""
class NetmaskValueError(ValueError):
"""A Value Error related to the netmask."""
def ip_address(address):
"""Take an IP string/int and return an object of the correct type.
Args:
address: A string or integer, the IP address. Either IPv4 or
IPv6 addresses may be supplied; integers less than 2**32 will
be considered to be IPv4 by default.
Returns:
An IPv4Address or IPv6Address object.
Raises:
ValueError: if the *address* passed isn't either a v4 or a v6
address
"""
try:
return IPv4Address(address)
except (AddressValueError, NetmaskValueError):
pass
try:
return IPv6Address(address)
except (AddressValueError, NetmaskValueError):
pass
if isinstance(address, bytes):
raise AddressValueError(
'%r does not appear to be an IPv4 or IPv6 address. '
'Did you pass in a bytes (str in Python 2) instead of'
' a unicode object?' % address)
raise ValueError('%r does not appear to be an IPv4 or IPv6 address' %
address)
def ip_network(address, strict=True):
"""Take an IP string/int and return an object of the correct type.
Args:
address: A string or integer, the IP network. Either IPv4 or
IPv6 networks may be supplied; integers less than 2**32 will
be considered to be IPv4 by default.
Returns:
An IPv4Network or IPv6Network object.
Raises:
ValueError: if the string passed isn't either a v4 or a v6
address. Or if the network has host bits set.
"""
try:
return IPv4Network(address, strict)
except (AddressValueError, NetmaskValueError):
pass
try:
return IPv6Network(address, strict)
except (AddressValueError, NetmaskValueError):
pass
if isinstance(address, bytes):
raise AddressValueError(
'%r does not appear to be an IPv4 or IPv6 network. '
'Did you pass in a bytes (str in Python 2) instead of'
' a unicode object?' % address)
raise ValueError('%r does not appear to be an IPv4 or IPv6 network' %
address)
def ip_interface(address):
"""Take an IP string/int and return an object of the correct type.
Args:
address: A string or integer, the IP address. Either IPv4 or
IPv6 addresses may be supplied; integers less than 2**32 will
be considered to be IPv4 by default.
Returns:
An IPv4Interface or IPv6Interface object.
Raises:
ValueError: if the string passed isn't either a v4 or a v6
address.
Notes:
The IPv?Interface classes describe an Address on a particular
Network, so they're basically a combination of both the Address
and Network classes.
"""
try:
return IPv4Interface(address)
except (AddressValueError, NetmaskValueError):
pass
try:
return IPv6Interface(address)
except (AddressValueError, NetmaskValueError):
pass
raise ValueError('%r does not appear to be an IPv4 or IPv6 interface' %
address)
def v4_int_to_packed(address):
"""Represent an address as 4 packed bytes in network (big-endian) order.
Args:
address: An integer representation of an IPv4 IP address.
Returns:
The integer address packed as 4 bytes in network (big-endian) order.
Raises:
ValueError: If the integer is negative or too large to be an
IPv4 IP address.
"""
try:
return _compat_to_bytes(address, 4, 'big')
except (struct.error, OverflowError):
raise ValueError("Address negative or too large for IPv4")
def v6_int_to_packed(address):
"""Represent an address as 16 packed bytes in network (big-endian) order.
Args:
address: An integer representation of an IPv6 IP address.
Returns:
The integer address packed as 16 bytes in network (big-endian) order.
"""
try:
return _compat_to_bytes(address, 16, 'big')
except (struct.error, OverflowError):
raise ValueError("Address negative or too large for IPv6")
def _split_optional_netmask(address):
"""Helper to split the netmask and raise AddressValueError if needed"""
addr = _compat_str(address).split('/')
if len(addr) > 2:
raise AddressValueError("Only one '/' permitted in %r" % address)
return addr
def _find_address_range(addresses):
"""Find a sequence of sorted deduplicated IPv#Address.
Args:
addresses: a list of IPv#Address objects.
Yields:
A tuple containing the first and last IP addresses in the sequence.
"""
it = iter(addresses)
first = last = next(it)
for ip in it:
if ip._ip != last._ip + 1:
yield first, last
first = ip
last = ip
yield first, last
def _count_righthand_zero_bits(number, bits):
"""Count the number of zero bits on the right hand side.
Args:
number: an integer.
bits: maximum number of bits to count.
Returns:
The number of zero bits on the right hand side of the number.
"""
if number == 0:
return bits
return min(bits, _compat_bit_length(~number & (number - 1)))
def summarize_address_range(first, last):
"""Summarize a network range given the first and last IP addresses.
Example:
>>> list(summarize_address_range(IPv4Address('192.0.2.0'),
... IPv4Address('192.0.2.130')))
... #doctest: +NORMALIZE_WHITESPACE
[IPv4Network('192.0.2.0/25'), IPv4Network('192.0.2.128/31'),
IPv4Network('192.0.2.130/32')]
Args:
first: the first IPv4Address or IPv6Address in the range.
last: the last IPv4Address or IPv6Address in the range.
Returns:
An iterator of the summarized IPv(4|6) network objects.
Raise:
TypeError:
If the first and last objects are not IP addresses.
If the first and last objects are not the same version.
ValueError:
If the last object is not greater than the first.
If the version of the first address is not 4 or 6.
"""
if (not (isinstance(first, _BaseAddress) and
isinstance(last, _BaseAddress))):
raise TypeError('first and last must be IP addresses, not networks')
if first.version != last.version:
raise TypeError("%s and %s are not of the same version" % (
first, last))
if first > last:
raise ValueError('last IP address must be greater than first')
if first.version == 4:
ip = IPv4Network
elif first.version == 6:
ip = IPv6Network
else:
raise ValueError('unknown IP version')
ip_bits = first._max_prefixlen
first_int = first._ip
last_int = last._ip
while first_int <= last_int:
nbits = min(_count_righthand_zero_bits(first_int, ip_bits),
_compat_bit_length(last_int - first_int + 1) - 1)
net = ip((first_int, ip_bits - nbits))
yield net
first_int += 1 << nbits
if first_int - 1 == ip._ALL_ONES:
break
def _collapse_addresses_internal(addresses):
"""Loops through the addresses, collapsing concurrent netblocks.
Example:
ip1 = IPv4Network('192.0.2.0/26')
ip2 = IPv4Network('192.0.2.64/26')
ip3 = IPv4Network('192.0.2.128/26')
ip4 = IPv4Network('192.0.2.192/26')
_collapse_addresses_internal([ip1, ip2, ip3, ip4]) ->
[IPv4Network('192.0.2.0/24')]
This shouldn't be called directly; it is called via
collapse_addresses([]).
Args:
addresses: A list of IPv4Network's or IPv6Network's
Returns:
A list of IPv4Network's or IPv6Network's depending on what we were
passed.
"""
# First merge
to_merge = list(addresses)
subnets = {}
while to_merge:
net = to_merge.pop()
supernet = net.supernet()
existing = subnets.get(supernet)
if existing is None:
subnets[supernet] = net
elif existing != net:
# Merge consecutive subnets
del subnets[supernet]
to_merge.append(supernet)
# Then iterate over resulting networks, skipping subsumed subnets
last = None
for net in sorted(subnets.values()):
if last is not None:
# Since they are sorted,
# last.network_address <= net.network_address is a given.
if last.broadcast_address >= net.broadcast_address:
continue
yield net
last = net
def collapse_addresses(addresses):
"""Collapse a list of IP objects.
Example:
collapse_addresses([IPv4Network('192.0.2.0/25'),
IPv4Network('192.0.2.128/25')]) ->
[IPv4Network('192.0.2.0/24')]
Args:
addresses: An iterator of IPv4Network or IPv6Network objects.
Returns:
An iterator of the collapsed IPv(4|6)Network objects.
Raises:
TypeError: If passed a list of mixed version objects.
"""
addrs = []
ips = []
nets = []
# split IP addresses and networks
for ip in addresses:
if isinstance(ip, _BaseAddress):
if ips and ips[-1]._version != ip._version:
raise TypeError("%s and %s are not of the same version" % (
ip, ips[-1]))
ips.append(ip)
elif ip._prefixlen == ip._max_prefixlen:
if ips and ips[-1]._version != ip._version:
raise TypeError("%s and %s are not of the same version" % (
ip, ips[-1]))
try:
ips.append(ip.ip)
except AttributeError:
ips.append(ip.network_address)
else:
if nets and nets[-1]._version != ip._version:
raise TypeError("%s and %s are not of the same version" % (
ip, nets[-1]))
nets.append(ip)
# sort and dedup
ips = sorted(set(ips))
# find consecutive address ranges in the sorted sequence and summarize them
if ips:
for first, last in _find_address_range(ips):
addrs.extend(summarize_address_range(first, last))
return _collapse_addresses_internal(addrs + nets)
def get_mixed_type_key(obj):
"""Return a key suitable for sorting between networks and addresses.
Address and Network objects are not sortable by default; they're
fundamentally different so the expression
IPv4Address('192.0.2.0') <= IPv4Network('192.0.2.0/24')
doesn't make any sense. There are some times however, where you may wish
to have ipaddress sort these for you anyway. If you need to do this, you
can use this function as the key= argument to sorted().
Args:
obj: either a Network or Address object.
Returns:
appropriate key.
"""
if isinstance(obj, _BaseNetwork):
return obj._get_networks_key()
elif isinstance(obj, _BaseAddress):
return obj._get_address_key()
return NotImplemented
class _IPAddressBase(_TotalOrderingMixin):
"""The mother class."""
__slots__ = ()
@property
def exploded(self):
"""Return the longhand version of the IP address as a string."""
return self._explode_shorthand_ip_string()
@property
def compressed(self):
"""Return the shorthand version of the IP address as a string."""
return _compat_str(self)
@property
def reverse_pointer(self):
"""The name of the reverse DNS pointer for the IP address, e.g.:
>>> ipaddress.ip_address("127.0.0.1").reverse_pointer
'1.0.0.127.in-addr.arpa'
>>> ipaddress.ip_address("2001:db8::1").reverse_pointer
'1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa'
"""
return self._reverse_pointer()
@property
def version(self):
msg = '%200s has no version specified' % (type(self),)
raise NotImplementedError(msg)
def _check_int_address(self, address):
if address < 0:
msg = "%d (< 0) is not permitted as an IPv%d address"
raise AddressValueError(msg % (address, self._version))
if address > self._ALL_ONES:
msg = "%d (>= 2**%d) is not permitted as an IPv%d address"
raise AddressValueError(msg % (address, self._max_prefixlen,
self._version))
def _check_packed_address(self, address, expected_len):
address_len = len(address)
if address_len != expected_len:
msg = (
'%r (len %d != %d) is not permitted as an IPv%d address. '
'Did you pass in a bytes (str in Python 2) instead of'
' a unicode object?'
)
raise AddressValueError(msg % (address, address_len,
expected_len, self._version))
@classmethod
def _ip_int_from_prefix(cls, prefixlen):
"""Turn the prefix length into a bitwise netmask
Args:
prefixlen: An integer, the prefix length.
Returns:
An integer.
"""
return cls._ALL_ONES ^ (cls._ALL_ONES >> prefixlen)
@classmethod
def _prefix_from_ip_int(cls, ip_int):
"""Return prefix length from the bitwise netmask.
Args:
ip_int: An integer, the netmask in expanded bitwise format
Returns:
An integer, the prefix length.
Raises:
ValueError: If the input intermingles zeroes & ones
"""
trailing_zeroes = _count_righthand_zero_bits(ip_int,
cls._max_prefixlen)
prefixlen = cls._max_prefixlen - trailing_zeroes
leading_ones = ip_int >> trailing_zeroes
all_ones = (1 << prefixlen) - 1
if leading_ones != all_ones:
byteslen = cls._max_prefixlen // 8
details = _compat_to_bytes(ip_int, byteslen, 'big')
msg = 'Netmask pattern %r mixes zeroes & ones'
raise ValueError(msg % details)
return prefixlen
@classmethod
def _report_invalid_netmask(cls, netmask_str):
msg = '%r is not a valid netmask' % netmask_str
raise NetmaskValueError(msg)
@classmethod
def _prefix_from_prefix_string(cls, prefixlen_str):
"""Return prefix length from a numeric string
Args:
prefixlen_str: The string to be converted
Returns:
An integer, the prefix length.
Raises:
NetmaskValueError: If the input is not a valid netmask
"""
# int allows a leading +/- as well as surrounding whitespace,
# so we ensure that isn't the case
if not _BaseV4._DECIMAL_DIGITS.issuperset(prefixlen_str):
cls._report_invalid_netmask(prefixlen_str)
try:
prefixlen = int(prefixlen_str)
except ValueError:
cls._report_invalid_netmask(prefixlen_str)
if not (0 <= prefixlen <= cls._max_prefixlen):
cls._report_invalid_netmask(prefixlen_str)
return prefixlen
@classmethod
def _prefix_from_ip_string(cls, ip_str):
"""Turn a netmask/hostmask string into a prefix length
Args:
ip_str: The netmask/hostmask to be converted
Returns:
An integer, the prefix length.
Raises:
NetmaskValueError: If the input is not a valid netmask/hostmask
"""
# Parse the netmask/hostmask like an IP address.
try:
ip_int = cls._ip_int_from_string(ip_str)
except AddressValueError:
cls._report_invalid_netmask(ip_str)
# Try matching a netmask (this would be /1*0*/ as a bitwise regexp).
# Note that the two ambiguous cases (all-ones and all-zeroes) are
# treated as netmasks.
try:
return cls._prefix_from_ip_int(ip_int)
except ValueError:
pass
# Invert the bits, and try matching a /0+1+/ hostmask instead.
ip_int ^= cls._ALL_ONES
try:
return cls._prefix_from_ip_int(ip_int)
except ValueError:
cls._report_invalid_netmask(ip_str)
def __reduce__(self):
return self.__class__, (_compat_str(self),)
class _BaseAddress(_IPAddressBase):
"""A generic IP object.
This IP class contains the version independent methods which are
used by single IP addresses.
"""
__slots__ = ()
def __int__(self):
return self._ip
def __eq__(self, other):
try:
return (self._ip == other._ip and
self._version == other._version)
except AttributeError:
return NotImplemented
def __lt__(self, other):
if not isinstance(other, _IPAddressBase):
return NotImplemented
if not isinstance(other, _BaseAddress):
raise TypeError('%s and %s are not of the same type' % (
self, other))
if self._version != other._version:
raise TypeError('%s and %s are not of the same version' % (
self, other))
if self._ip != other._ip:
return self._ip < other._ip
return False
# Shorthand for Integer addition and subtraction. This is not
# meant to ever support addition/subtraction of addresses.
def __add__(self, other):
if not isinstance(other, _compat_int_types):
return NotImplemented
return self.__class__(int(self) + other)
def __sub__(self, other):
if not isinstance(other, _compat_int_types):
return NotImplemented
return self.__class__(int(self) - other)
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, _compat_str(self))
def __str__(self):
return _compat_str(self._string_from_ip_int(self._ip))
def __hash__(self):
return hash(hex(int(self._ip)))
def _get_address_key(self):
return (self._version, self)
def __reduce__(self):
return self.__class__, (self._ip,)
class _BaseNetwork(_IPAddressBase):
"""A generic IP network object.
This IP class contains the version independent methods which are
used by networks.
"""
def __init__(self, address):
self._cache = {}
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, _compat_str(self))
def __str__(self):
return '%s/%d' % (self.network_address, self.prefixlen)
def hosts(self):
"""Generate Iterator over usable hosts in a network.
This is like __iter__ except it doesn't return the network
or broadcast addresses.
"""
network = int(self.network_address)
broadcast = int(self.broadcast_address)
for x in _compat_range(network + 1, broadcast):
yield self._address_class(x)
def __iter__(self):
network = int(self.network_address)
broadcast = int(self.broadcast_address)
for x in _compat_range(network, broadcast + 1):
yield self._address_class(x)
def __getitem__(self, n):
network = int(self.network_address)
broadcast = int(self.broadcast_address)
if n >= 0:
if network + n > broadcast:
raise IndexError('address out of range')
return self._address_class(network + n)
else:
n += 1
if broadcast + n < network:
raise IndexError('address out of range')
return self._address_class(broadcast + n)
def __lt__(self, other):
if not isinstance(other, _IPAddressBase):
return NotImplemented
if not isinstance(other, _BaseNetwork):
raise TypeError('%s and %s are not of the same type' % (
self, other))
if self._version != other._version:
raise TypeError('%s and %s are not of the same version' % (
self, other))
if self.network_address != other.network_address:
return self.network_address < other.network_address
if self.netmask != other.netmask:
return self.netmask < other.netmask
return False
def __eq__(self, other):
try:
return (self._version == other._version and
self.network_address == other.network_address and
int(self.netmask) == int(other.netmask))
except AttributeError:
return NotImplemented
def __hash__(self):
return hash(int(self.network_address) ^ int(self.netmask))
def __contains__(self, other):
# always false if one is v4 and the other is v6.
if self._version != other._version:
return False
# dealing with another network.
if isinstance(other, _BaseNetwork):
return False
# dealing with another address
else:
# address
return (int(self.network_address) <= int(other._ip) <=
int(self.broadcast_address))
def overlaps(self, other):
"""Tell if self is partly contained in other."""
return self.network_address in other or (
self.broadcast_address in other or (
other.network_address in self or (
other.broadcast_address in self)))
@property
def broadcast_address(self):
x = self._cache.get('broadcast_address')
if x is None:
x = self._address_class(int(self.network_address) |
int(self.hostmask))
self._cache['broadcast_address'] = x
return x
@property
def hostmask(self):
x = self._cache.get('hostmask')
if x is None:
x = self._address_class(int(self.netmask) ^ self._ALL_ONES)
self._cache['hostmask'] = x
return x
@property
def with_prefixlen(self):
return '%s/%d' % (self.network_address, self._prefixlen)
@property
def with_netmask(self):
return '%s/%s' % (self.network_address, self.netmask)
@property
def with_hostmask(self):
return '%s/%s' % (self.network_address, self.hostmask)
@property
def num_addresses(self):
"""Number of hosts in the current subnet."""
return int(self.broadcast_address) - int(self.network_address) + 1
@property
def _address_class(self):
# Returning bare address objects (rather than interfaces) allows for
# more consistent behaviour across the network address, broadcast
# address and individual host addresses.
msg = '%200s has no associated address class' % (type(self),)
raise NotImplementedError(msg)
@property
def prefixlen(self):
return self._prefixlen
def address_exclude(self, other):
"""Remove an address from a larger block.
For example:
addr1 = ip_network('192.0.2.0/28')
addr2 = ip_network('192.0.2.1/32')
list(addr1.address_exclude(addr2)) =
[IPv4Network('192.0.2.0/32'), IPv4Network('192.0.2.2/31'),
IPv4Network('192.0.2.4/30'), IPv4Network('192.0.2.8/29')]
or IPv6:
addr1 = ip_network('2001:db8::1/32')
addr2 = ip_network('2001:db8::1/128')
list(addr1.address_exclude(addr2)) =
[ip_network('2001:db8::1/128'),
ip_network('2001:db8::2/127'),
ip_network('2001:db8::4/126'),
ip_network('2001:db8::8/125'),
...
ip_network('2001:db8:8000::/33')]
Args:
other: An IPv4Network or IPv6Network object of the same type.
Returns:
An iterator of the IPv(4|6)Network objects which is self
minus other.
Raises:
TypeError: If self and other are of differing address
versions, or if other is not a network object.
ValueError: If other is not completely contained by self.
"""
if not self._version == other._version:
raise TypeError("%s and %s are not of the same version" % (
self, other))
if not isinstance(other, _BaseNetwork):
raise TypeError("%s is not a network object" % other)
if not other.subnet_of(self):
raise ValueError('%s not contained in %s' % (other, self))
if other == self:
return
# Make sure we're comparing the network of other.
other = other.__class__('%s/%s' % (other.network_address,
other.prefixlen))
s1, s2 = self.subnets()
while s1 != other and s2 != other:
if other.subnet_of(s1):
yield s2
s1, s2 = s1.subnets()
elif other.subnet_of(s2):
yield s1
s1, s2 = s2.subnets()
else:
# If we got here, there's a bug somewhere.
raise AssertionError('Error performing exclusion: '
's1: %s s2: %s other: %s' %
(s1, s2, other))
if s1 == other:
yield s2
elif s2 == other:
yield s1
else:
# If we got here, there's a bug somewhere.
raise AssertionError('Error performing exclusion: '
's1: %s s2: %s other: %s' %
(s1, s2, other))
def compare_networks(self, other):
"""Compare two IP objects.
This is only concerned about the comparison of the integer
representation of the network addresses. This means that the
host bits aren't considered at all in this method. If you want
to compare host bits, you can easily enough do a
'HostA._ip < HostB._ip'
Args:
other: An IP object.
Returns:
If the IP versions of self and other are the same, returns:
-1 if self < other:
eg: IPv4Network('192.0.2.0/25') < IPv4Network('192.0.2.128/25')
IPv6Network('2001:db8::1000/124') <
IPv6Network('2001:db8::2000/124')
0 if self == other
eg: IPv4Network('192.0.2.0/24') == IPv4Network('192.0.2.0/24')
IPv6Network('2001:db8::1000/124') ==
IPv6Network('2001:db8::1000/124')
1 if self > other
eg: IPv4Network('192.0.2.128/25') > IPv4Network('192.0.2.0/25')
IPv6Network('2001:db8::2000/124') >
IPv6Network('2001:db8::1000/124')
Raises:
TypeError if the IP versions are different.
"""
# does this need to raise a ValueError?
if self._version != other._version:
raise TypeError('%s and %s are not of the same type' % (
self, other))
# self._version == other._version below here:
if self.network_address < other.network_address:
return -1
if self.network_address > other.network_address:
return 1
# self.network_address == other.network_address below here:
if self.netmask < other.netmask:
return -1
if self.netmask > other.netmask:
return 1
return 0
def _get_networks_key(self):
"""Network-only key function.
Returns an object that identifies this address' network and
netmask. This function is a suitable "key" argument for sorted()
and list.sort().
"""
return (self._version, self.network_address, self.netmask)
def subnets(self, prefixlen_diff=1, new_prefix=None):
"""The subnets which join to make the current subnet.
In the case that self contains only one IP
(self._prefixlen == 32 for IPv4 or self._prefixlen == 128
for IPv6), yield an iterator with just ourself.
Args:
prefixlen_diff: An integer, the amount the prefix length
should be increased by. This should not be set if
new_prefix is also set.
new_prefix: The desired new prefix length. This must be a
larger number (smaller prefix) than the existing prefix.
This should not be set if prefixlen_diff is also set.
Returns:
An iterator of IPv(4|6) objects.
Raises:
ValueError: The prefixlen_diff is too small or too large.
OR
prefixlen_diff and new_prefix are both set or new_prefix
is a smaller number than the current prefix (smaller
number means a larger network)
"""
if self._prefixlen == self._max_prefixlen:
yield self
return
if new_prefix is not None:
if new_prefix < self._prefixlen:
raise ValueError('new prefix must be longer')
if prefixlen_diff != 1:
raise ValueError('cannot set prefixlen_diff and new_prefix')
prefixlen_diff = new_prefix - self._prefixlen
if prefixlen_diff < 0:
raise ValueError('prefix length diff must be > 0')
new_prefixlen = self._prefixlen + prefixlen_diff
if new_prefixlen > self._max_prefixlen:
raise ValueError(
'prefix length diff %d is invalid for netblock %s' % (
new_prefixlen, self))
start = int(self.network_address)
end = int(self.broadcast_address) + 1
step = (int(self.hostmask) + 1) >> prefixlen_diff
for new_addr in _compat_range(start, end, step):
current = self.__class__((new_addr, new_prefixlen))
yield current
def supernet(self, prefixlen_diff=1, new_prefix=None):
"""The supernet containing the current network.
Args:
prefixlen_diff: An integer, the amount the prefix length of
the network should be decreased by. For example, given a
/24 network and a prefixlen_diff of 3, a supernet with a
/21 netmask is returned.
Returns:
An IPv4 network object.
Raises:
ValueError: If self.prefixlen - prefixlen_diff < 0. I.e., you have
a negative prefix length.
OR
If prefixlen_diff and new_prefix are both set or new_prefix is a
larger number than the current prefix (larger number means a
smaller network)
"""
if self._prefixlen == 0:
return self
if new_prefix is not None:
if new_prefix > self._prefixlen:
raise ValueError('new prefix must be shorter')
if prefixlen_diff != 1:
raise ValueError('cannot set prefixlen_diff and new_prefix')
prefixlen_diff = self._prefixlen - new_prefix
new_prefixlen = self.prefixlen - prefixlen_diff
if new_prefixlen < 0:
raise ValueError(
'current prefixlen is %d, cannot have a prefixlen_diff of %d' %
(self.prefixlen, prefixlen_diff))
return self.__class__((
int(self.network_address) & (int(self.netmask) << prefixlen_diff),
new_prefixlen
))
@property
def is_multicast(self):
"""Test if the address is reserved for multicast use.
Returns:
A boolean, True if the address is a multicast address.
See RFC 2373 2.7 for details.
"""
return (self.network_address.is_multicast and
self.broadcast_address.is_multicast)
def subnet_of(self, other):
# always false if one is v4 and the other is v6.
if self._version != other._version:
return False
# dealing with another network.
if (hasattr(other, 'network_address') and
hasattr(other, 'broadcast_address')):
return (other.network_address <= self.network_address and
other.broadcast_address >= self.broadcast_address)
# dealing with another address
else:
raise TypeError('Unable to test subnet containment with element '
'of type %s' % type(other))
def supernet_of(self, other):
# always false if one is v4 and the other is v6.
if self._version != other._version:
return False
# dealing with another network.
if (hasattr(other, 'network_address') and
hasattr(other, 'broadcast_address')):
return (other.network_address >= self.network_address and
other.broadcast_address <= self.broadcast_address)
# dealing with another address
else:
raise TypeError('Unable to test subnet containment with element '
'of type %s' % type(other))
@property
def is_reserved(self):
"""Test if the address is otherwise IETF reserved.
Returns:
A boolean, True if the address is within one of the
reserved IPv6 Network ranges.
"""
return (self.network_address.is_reserved and
self.broadcast_address.is_reserved)
@property
def is_link_local(self):
"""Test if the address is reserved for link-local.
Returns:
A boolean, True if the address is reserved per RFC 4291.
"""
return (self.network_address.is_link_local and
self.broadcast_address.is_link_local)
@property
def is_private(self):
"""Test if this address is allocated for private networks.
Returns:
A boolean, True if the address is reserved per
iana-ipv4-special-registry or iana-ipv6-special-registry.
"""
return (self.network_address.is_private and
self.broadcast_address.is_private)
@property
def is_global(self):
"""Test if this address is allocated for public networks.
Returns:
A boolean, True if the address is not reserved per
iana-ipv4-special-registry or iana-ipv6-special-registry.
"""
return not self.is_private
@property
def is_unspecified(self):
"""Test if the address is unspecified.
Returns:
A boolean, True if this is the unspecified address as defined in
RFC 2373 2.5.2.
"""
return (self.network_address.is_unspecified and
self.broadcast_address.is_unspecified)
@property
def is_loopback(self):
"""Test if the address is a loopback address.
Returns:
A boolean, True if the address is a loopback address as defined in
RFC 2373 2.5.3.
"""
return (self.network_address.is_loopback and
self.broadcast_address.is_loopback)
class _BaseV4(object):
"""Base IPv4 object.
The following methods are used by IPv4 objects in both single IP
addresses and networks.
"""
__slots__ = ()
_version = 4
# Equivalent to 255.255.255.255 or 32 bits of 1's.
_ALL_ONES = (2 ** IPV4LENGTH) - 1
_DECIMAL_DIGITS = frozenset('0123456789')
# the valid octets for host and netmasks. only useful for IPv4.
_valid_mask_octets = frozenset([255, 254, 252, 248, 240, 224, 192, 128, 0])
_max_prefixlen = IPV4LENGTH
# There are only a handful of valid v4 netmasks, so we cache them all
# when constructed (see _make_netmask()).
_netmask_cache = {}
def _explode_shorthand_ip_string(self):
return _compat_str(self)
@classmethod
def _make_netmask(cls, arg):
"""Make a (netmask, prefix_len) tuple from the given argument.
Argument can be:
- an integer (the prefix length)
- a string representing the prefix length (e.g. "24")
- a string representing the prefix netmask (e.g. "255.255.255.0")
"""
if arg not in cls._netmask_cache:
if isinstance(arg, _compat_int_types):
prefixlen = arg
else:
try:
# Check for a netmask in prefix length form
prefixlen = cls._prefix_from_prefix_string(arg)
except NetmaskValueError:
# Check for a netmask or hostmask in dotted-quad form.
# This may raise NetmaskValueError.
prefixlen = cls._prefix_from_ip_string(arg)
netmask = IPv4Address(cls._ip_int_from_prefix(prefixlen))
cls._netmask_cache[arg] = netmask, prefixlen
return cls._netmask_cache[arg]
@classmethod
def _ip_int_from_string(cls, ip_str):
"""Turn the given IP string into an integer for comparison.
Args:
ip_str: A string, the IP ip_str.
Returns:
The IP ip_str as an integer.
Raises:
AddressValueError: if ip_str isn't a valid IPv4 Address.
"""
if not ip_str:
raise AddressValueError('Address cannot be empty')
octets = ip_str.split('.')
if len(octets) != 4:
raise AddressValueError("Expected 4 octets in %r" % ip_str)
try:
return _compat_int_from_byte_vals(
map(cls._parse_octet, octets), 'big')
except ValueError as exc:
raise AddressValueError("%s in %r" % (exc, ip_str))
@classmethod
def _parse_octet(cls, octet_str):
"""Convert a decimal octet into an integer.
Args:
octet_str: A string, the number to parse.
Returns:
The octet as an integer.
Raises:
ValueError: if the octet isn't strictly a decimal from [0..255].
"""
if not octet_str:
raise ValueError("Empty octet not permitted")
# Whitelist the characters, since int() allows a lot of bizarre stuff.
if not cls._DECIMAL_DIGITS.issuperset(octet_str):
msg = "Only decimal digits permitted in %r"
raise ValueError(msg % octet_str)
# We do the length check second, since the invalid character error
# is likely to be more informative for the user
if len(octet_str) > 3:
msg = "At most 3 characters permitted in %r"
raise ValueError(msg % octet_str)
# Convert to integer (we know digits are legal)
octet_int = int(octet_str, 10)
# Any octets that look like they *might* be written in octal,
# and which don't look exactly the same in both octal and
# decimal are rejected as ambiguous
if octet_int > 7 and octet_str[0] == '0':
msg = "Ambiguous (octal/decimal) value in %r not permitted"
raise ValueError(msg % octet_str)
if octet_int > 255:
raise ValueError("Octet %d (> 255) not permitted" % octet_int)
return octet_int
@classmethod
def _string_from_ip_int(cls, ip_int):
"""Turns a 32-bit integer into dotted decimal notation.
Args:
ip_int: An integer, the IP address.
Returns:
The IP address as a string in dotted decimal notation.
"""
return '.'.join(_compat_str(struct.unpack(b'!B', b)[0]
if isinstance(b, bytes)
else b)
for b in _compat_to_bytes(ip_int, 4, 'big'))
def _is_hostmask(self, ip_str):
"""Test if the IP string is a hostmask (rather than a netmask).
Args:
ip_str: A string, the potential hostmask.
Returns:
A boolean, True if the IP string is a hostmask.
"""
bits = ip_str.split('.')
try:
parts = [x for x in map(int, bits) if x in self._valid_mask_octets]
except ValueError:
return False
if len(parts) != len(bits):
return False
if parts[0] < parts[-1]:
return True
return False
def _reverse_pointer(self):
"""Return the reverse DNS pointer name for the IPv4 address.
This implements the method described in RFC1035 3.5.
"""
reverse_octets = _compat_str(self).split('.')[::-1]
return '.'.join(reverse_octets) + '.in-addr.arpa'
@property
def max_prefixlen(self):
return self._max_prefixlen
@property
def version(self):
return self._version
class IPv4Address(_BaseV4, _BaseAddress):
"""Represent and manipulate single IPv4 Addresses."""
__slots__ = ('_ip', '__weakref__')
def __init__(self, address):
"""
Args:
address: A string or integer representing the IP
Additionally, an integer can be passed, so
IPv4Address('192.0.2.1') == IPv4Address(3221225985).
or, more generally
IPv4Address(int(IPv4Address('192.0.2.1'))) ==
IPv4Address('192.0.2.1')
Raises:
AddressValueError: If ipaddress isn't a valid IPv4 address.
"""
# Efficient constructor from integer.
if isinstance(address, _compat_int_types):
self._check_int_address(address)
self._ip = address
return
# Constructing from a packed address
if isinstance(address, bytes):
self._check_packed_address(address, 4)
bvs = _compat_bytes_to_byte_vals(address)
self._ip = _compat_int_from_byte_vals(bvs, 'big')
return
# Assume input argument to be string or any object representation
# which converts into a formatted IP string.
addr_str = _compat_str(address)
if '/' in addr_str:
raise AddressValueError("Unexpected '/' in %r" % address)
self._ip = self._ip_int_from_string(addr_str)
@property
def packed(self):
"""The binary representation of this address."""
return v4_int_to_packed(self._ip)
@property
def is_reserved(self):
"""Test if the address is otherwise IETF reserved.
Returns:
A boolean, True if the address is within the
reserved IPv4 Network range.
"""
return self in self._constants._reserved_network
@property
def is_private(self):
"""Test if this address is allocated for private networks.
Returns:
A boolean, True if the address is reserved per
iana-ipv4-special-registry.
"""
return any(self in net for net in self._constants._private_networks)
@property
def is_global(self):
return (
self not in self._constants._public_network and
not self.is_private)
@property
def is_multicast(self):
"""Test if the address is reserved for multicast use.
Returns:
A boolean, True if the address is multicast.
See RFC 3171 for details.
"""
return self in self._constants._multicast_network
@property
def is_unspecified(self):
"""Test if the address is unspecified.
Returns:
A boolean, True if this is the unspecified address as defined in
RFC 5735 3.
"""
return self == self._constants._unspecified_address
@property
def is_loopback(self):
"""Test if the address is a loopback address.
Returns:
A boolean, True if the address is a loopback per RFC 3330.
"""
return self in self._constants._loopback_network
@property
def is_link_local(self):
"""Test if the address is reserved for link-local.
Returns:
A boolean, True if the address is link-local per RFC 3927.
"""
return self in self._constants._linklocal_network
class IPv4Interface(IPv4Address):
def __init__(self, address):
if isinstance(address, (bytes, _compat_int_types)):
IPv4Address.__init__(self, address)
self.network = IPv4Network(self._ip)
self._prefixlen = self._max_prefixlen
return
if isinstance(address, tuple):
IPv4Address.__init__(self, address[0])
if len(address) > 1:
self._prefixlen = int(address[1])
else:
self._prefixlen = self._max_prefixlen
self.network = IPv4Network(address, strict=False)
self.netmask = self.network.netmask
self.hostmask = self.network.hostmask
return
addr = _split_optional_netmask(address)
IPv4Address.__init__(self, addr[0])
self.network = IPv4Network(address, strict=False)
self._prefixlen = self.network._prefixlen
self.netmask = self.network.netmask
self.hostmask = self.network.hostmask
def __str__(self):
return '%s/%d' % (self._string_from_ip_int(self._ip),
self.network.prefixlen)
def __eq__(self, other):
address_equal = IPv4Address.__eq__(self, other)
if not address_equal or address_equal is NotImplemented:
return address_equal
try:
return self.network == other.network
except AttributeError:
# An interface with an associated network is NOT the
# same as an unassociated address. That's why the hash
# takes the extra info into account.
return False
def __lt__(self, other):
address_less = IPv4Address.__lt__(self, other)
if address_less is NotImplemented:
return NotImplemented
try:
return self.network < other.network
except AttributeError:
# We *do* allow addresses and interfaces to be sorted. The
# unassociated address is considered less than all interfaces.
return False
def __hash__(self):
return self._ip ^ self._prefixlen ^ int(self.network.network_address)
__reduce__ = _IPAddressBase.__reduce__
@property
def ip(self):
return IPv4Address(self._ip)
@property
def with_prefixlen(self):
return '%s/%s' % (self._string_from_ip_int(self._ip),
self._prefixlen)
@property
def with_netmask(self):
return '%s/%s' % (self._string_from_ip_int(self._ip),
self.netmask)
@property
def with_hostmask(self):
return '%s/%s' % (self._string_from_ip_int(self._ip),
self.hostmask)
class IPv4Network(_BaseV4, _BaseNetwork):
"""This class represents and manipulates 32-bit IPv4 network + addresses..
Attributes: [examples for IPv4Network('192.0.2.0/27')]
.network_address: IPv4Address('192.0.2.0')
.hostmask: IPv4Address('0.0.0.31')
.broadcast_address: IPv4Address('192.0.2.32')
.netmask: IPv4Address('255.255.255.224')
.prefixlen: 27
"""
# Class to use when creating address objects
_address_class = IPv4Address
def __init__(self, address, strict=True):
"""Instantiate a new IPv4 network object.
Args:
address: A string or integer representing the IP [& network].
'192.0.2.0/24'
'192.0.2.0/255.255.255.0'
'192.0.0.2/0.0.0.255'
are all functionally the same in IPv4. Similarly,
'192.0.2.1'
'192.0.2.1/255.255.255.255'
'192.0.2.1/32'
are also functionally equivalent. That is to say, failing to
provide a subnetmask will create an object with a mask of /32.
If the mask (portion after the / in the argument) is given in
dotted quad form, it is treated as a netmask if it starts with a
non-zero field (e.g. /255.0.0.0 == /8) and as a hostmask if it
starts with a zero field (e.g. 0.255.255.255 == /8), with the
single exception of an all-zero mask which is treated as a
netmask == /0. If no mask is given, a default of /32 is used.
Additionally, an integer can be passed, so
IPv4Network('192.0.2.1') == IPv4Network(3221225985)
or, more generally
IPv4Interface(int(IPv4Interface('192.0.2.1'))) ==
IPv4Interface('192.0.2.1')
Raises:
AddressValueError: If ipaddress isn't a valid IPv4 address.
NetmaskValueError: If the netmask isn't valid for
an IPv4 address.
ValueError: If strict is True and a network address is not
supplied.
"""
_BaseNetwork.__init__(self, address)
# Constructing from a packed address or integer
if isinstance(address, (_compat_int_types, bytes)):
self.network_address = IPv4Address(address)
self.netmask, self._prefixlen = self._make_netmask(
self._max_prefixlen)
# fixme: address/network test here.
return
if isinstance(address, tuple):
if len(address) > 1:
arg = address[1]
else:
# We weren't given an address[1]
arg = self._max_prefixlen
self.network_address = IPv4Address(address[0])
self.netmask, self._prefixlen = self._make_netmask(arg)
packed = int(self.network_address)
if packed & int(self.netmask) != packed:
if strict:
raise ValueError('%s has host bits set' % self)
else:
self.network_address = IPv4Address(packed &
int(self.netmask))
return
# Assume input argument to be string or any object representation
# which converts into a formatted IP prefix string.
addr = _split_optional_netmask(address)
self.network_address = IPv4Address(self._ip_int_from_string(addr[0]))
if len(addr) == 2:
arg = addr[1]
else:
arg = self._max_prefixlen
self.netmask, self._prefixlen = self._make_netmask(arg)
if strict:
if (IPv4Address(int(self.network_address) & int(self.netmask)) !=
self.network_address):
raise ValueError('%s has host bits set' % self)
self.network_address = IPv4Address(int(self.network_address) &
int(self.netmask))
if self._prefixlen == (self._max_prefixlen - 1):
self.hosts = self.__iter__
@property
def is_global(self):
"""Test if this address is allocated for public networks.
Returns:
A boolean, True if the address is not reserved per
iana-ipv4-special-registry.
"""
return (not (self.network_address in IPv4Network('100.64.0.0/10') and
self.broadcast_address in IPv4Network('100.64.0.0/10')) and
not self.is_private)
class _IPv4Constants(object):
_linklocal_network = IPv4Network('169.254.0.0/16')
_loopback_network = IPv4Network('127.0.0.0/8')
_multicast_network = IPv4Network('224.0.0.0/4')
_public_network = IPv4Network('100.64.0.0/10')
_private_networks = [
IPv4Network('0.0.0.0/8'),
IPv4Network('10.0.0.0/8'),
IPv4Network('127.0.0.0/8'),
IPv4Network('169.254.0.0/16'),
IPv4Network('172.16.0.0/12'),
IPv4Network('192.0.0.0/29'),
IPv4Network('192.0.0.170/31'),
IPv4Network('192.0.2.0/24'),
IPv4Network('192.168.0.0/16'),
IPv4Network('198.18.0.0/15'),
IPv4Network('198.51.100.0/24'),
IPv4Network('203.0.113.0/24'),
IPv4Network('240.0.0.0/4'),
IPv4Network('255.255.255.255/32'),
]
_reserved_network = IPv4Network('240.0.0.0/4')
_unspecified_address = IPv4Address('0.0.0.0')
IPv4Address._constants = _IPv4Constants
class _BaseV6(object):
"""Base IPv6 object.
The following methods are used by IPv6 objects in both single IP
addresses and networks.
"""
__slots__ = ()
_version = 6
_ALL_ONES = (2 ** IPV6LENGTH) - 1
_HEXTET_COUNT = 8
_HEX_DIGITS = frozenset('0123456789ABCDEFabcdef')
_max_prefixlen = IPV6LENGTH
# There are only a bunch of valid v6 netmasks, so we cache them all
# when constructed (see _make_netmask()).
_netmask_cache = {}
@classmethod
def _make_netmask(cls, arg):
"""Make a (netmask, prefix_len) tuple from the given argument.
Argument can be:
- an integer (the prefix length)
- a string representing the prefix length (e.g. "24")
- a string representing the prefix netmask (e.g. "255.255.255.0")
"""
if arg not in cls._netmask_cache:
if isinstance(arg, _compat_int_types):
prefixlen = arg
else:
prefixlen = cls._prefix_from_prefix_string(arg)
netmask = IPv6Address(cls._ip_int_from_prefix(prefixlen))
cls._netmask_cache[arg] = netmask, prefixlen
return cls._netmask_cache[arg]
@classmethod
def _ip_int_from_string(cls, ip_str):
"""Turn an IPv6 ip_str into an integer.
Args:
ip_str: A string, the IPv6 ip_str.
Returns:
An int, the IPv6 address
Raises:
AddressValueError: if ip_str isn't a valid IPv6 Address.
"""
if not ip_str:
raise AddressValueError('Address cannot be empty')
parts = ip_str.split(':')
# An IPv6 address needs at least 2 colons (3 parts).
_min_parts = 3
if len(parts) < _min_parts:
msg = "At least %d parts expected in %r" % (_min_parts, ip_str)
raise AddressValueError(msg)
# If the address has an IPv4-style suffix, convert it to hexadecimal.
if '.' in parts[-1]:
try:
ipv4_int = IPv4Address(parts.pop())._ip
except AddressValueError as exc:
raise AddressValueError("%s in %r" % (exc, ip_str))
parts.append('%x' % ((ipv4_int >> 16) & 0xFFFF))
parts.append('%x' % (ipv4_int & 0xFFFF))
# An IPv6 address can't have more than 8 colons (9 parts).
# The extra colon comes from using the "::" notation for a single
# leading or trailing zero part.
_max_parts = cls._HEXTET_COUNT + 1
if len(parts) > _max_parts:
msg = "At most %d colons permitted in %r" % (
_max_parts - 1, ip_str)
raise AddressValueError(msg)
# Disregarding the endpoints, find '::' with nothing in between.
# This indicates that a run of zeroes has been skipped.
skip_index = None
for i in _compat_range(1, len(parts) - 1):
if not parts[i]:
if skip_index is not None:
# Can't have more than one '::'
msg = "At most one '::' permitted in %r" % ip_str
raise AddressValueError(msg)
skip_index = i
# parts_hi is the number of parts to copy from above/before the '::'
# parts_lo is the number of parts to copy from below/after the '::'
if skip_index is not None:
# If we found a '::', then check if it also covers the endpoints.
parts_hi = skip_index
parts_lo = len(parts) - skip_index - 1
if not parts[0]:
parts_hi -= 1
if parts_hi:
msg = "Leading ':' only permitted as part of '::' in %r"
raise AddressValueError(msg % ip_str) # ^: requires ^::
if not parts[-1]:
parts_lo -= 1
if parts_lo:
msg = "Trailing ':' only permitted as part of '::' in %r"
raise AddressValueError(msg % ip_str) # :$ requires ::$
parts_skipped = cls._HEXTET_COUNT - (parts_hi + parts_lo)
if parts_skipped < 1:
msg = "Expected at most %d other parts with '::' in %r"
raise AddressValueError(msg % (cls._HEXTET_COUNT - 1, ip_str))
else:
# Otherwise, allocate the entire address to parts_hi. The
# endpoints could still be empty, but _parse_hextet() will check
# for that.
if len(parts) != cls._HEXTET_COUNT:
msg = "Exactly %d parts expected without '::' in %r"
raise AddressValueError(msg % (cls._HEXTET_COUNT, ip_str))
if not parts[0]:
msg = "Leading ':' only permitted as part of '::' in %r"
raise AddressValueError(msg % ip_str) # ^: requires ^::
if not parts[-1]:
msg = "Trailing ':' only permitted as part of '::' in %r"
raise AddressValueError(msg % ip_str) # :$ requires ::$
parts_hi = len(parts)
parts_lo = 0
parts_skipped = 0
try:
# Now, parse the hextets into a 128-bit integer.
ip_int = 0
for i in range(parts_hi):
ip_int <<= 16
ip_int |= cls._parse_hextet(parts[i])
ip_int <<= 16 * parts_skipped
for i in range(-parts_lo, 0):
ip_int <<= 16
ip_int |= cls._parse_hextet(parts[i])
return ip_int
except ValueError as exc:
raise AddressValueError("%s in %r" % (exc, ip_str))
@classmethod
def _parse_hextet(cls, hextet_str):
"""Convert an IPv6 hextet string into an integer.
Args:
hextet_str: A string, the number to parse.
Returns:
The hextet as an integer.
Raises:
ValueError: if the input isn't strictly a hex number from
[0..FFFF].
"""
# Whitelist the characters, since int() allows a lot of bizarre stuff.
if not cls._HEX_DIGITS.issuperset(hextet_str):
raise ValueError("Only hex digits permitted in %r" % hextet_str)
# We do the length check second, since the invalid character error
# is likely to be more informative for the user
if len(hextet_str) > 4:
msg = "At most 4 characters permitted in %r"
raise ValueError(msg % hextet_str)
# Length check means we can skip checking the integer value
return int(hextet_str, 16)
@classmethod
def _compress_hextets(cls, hextets):
"""Compresses a list of hextets.
Compresses a list of strings, replacing the longest continuous
sequence of "0" in the list with "" and adding empty strings at
the beginning or at the end of the string such that subsequently
calling ":".join(hextets) will produce the compressed version of
the IPv6 address.
Args:
hextets: A list of strings, the hextets to compress.
Returns:
A list of strings.
"""
best_doublecolon_start = -1
best_doublecolon_len = 0
doublecolon_start = -1
doublecolon_len = 0
for index, hextet in enumerate(hextets):
if hextet == '0':
doublecolon_len += 1
if doublecolon_start == -1:
# Start of a sequence of zeros.
doublecolon_start = index
if doublecolon_len > best_doublecolon_len:
# This is the longest sequence of zeros so far.
best_doublecolon_len = doublecolon_len
best_doublecolon_start = doublecolon_start
else:
doublecolon_len = 0
doublecolon_start = -1
if best_doublecolon_len > 1:
best_doublecolon_end = (best_doublecolon_start +
best_doublecolon_len)
# For zeros at the end of the address.
if best_doublecolon_end == len(hextets):
hextets += ['']
hextets[best_doublecolon_start:best_doublecolon_end] = ['']
# For zeros at the beginning of the address.
if best_doublecolon_start == 0:
hextets = [''] + hextets
return hextets
@classmethod
def _string_from_ip_int(cls, ip_int=None):
"""Turns a 128-bit integer into hexadecimal notation.
Args:
ip_int: An integer, the IP address.
Returns:
A string, the hexadecimal representation of the address.
Raises:
ValueError: The address is bigger than 128 bits of all ones.
"""
if ip_int is None:
ip_int = int(cls._ip)
if ip_int > cls._ALL_ONES:
raise ValueError('IPv6 address is too large')
hex_str = '%032x' % ip_int
hextets = ['%x' % int(hex_str[x:x + 4], 16) for x in range(0, 32, 4)]
hextets = cls._compress_hextets(hextets)
return ':'.join(hextets)
def _explode_shorthand_ip_string(self):
"""Expand a shortened IPv6 address.
Args:
ip_str: A string, the IPv6 address.
Returns:
A string, the expanded IPv6 address.
"""
if isinstance(self, IPv6Network):
ip_str = _compat_str(self.network_address)
elif isinstance(self, IPv6Interface):
ip_str = _compat_str(self.ip)
else:
ip_str = _compat_str(self)
ip_int = self._ip_int_from_string(ip_str)
hex_str = '%032x' % ip_int
parts = [hex_str[x:x + 4] for x in range(0, 32, 4)]
if isinstance(self, (_BaseNetwork, IPv6Interface)):
return '%s/%d' % (':'.join(parts), self._prefixlen)
return ':'.join(parts)
def _reverse_pointer(self):
"""Return the reverse DNS pointer name for the IPv6 address.
This implements the method described in RFC3596 2.5.
"""
reverse_chars = self.exploded[::-1].replace(':', '')
return '.'.join(reverse_chars) + '.ip6.arpa'
@property
def max_prefixlen(self):
return self._max_prefixlen
@property
def version(self):
return self._version
class IPv6Address(_BaseV6, _BaseAddress):
"""Represent and manipulate single IPv6 Addresses."""
__slots__ = ('_ip', '__weakref__')
def __init__(self, address):
"""Instantiate a new IPv6 address object.
Args:
address: A string or integer representing the IP
Additionally, an integer can be passed, so
IPv6Address('2001:db8::') ==
IPv6Address(42540766411282592856903984951653826560)
or, more generally
IPv6Address(int(IPv6Address('2001:db8::'))) ==
IPv6Address('2001:db8::')
Raises:
AddressValueError: If address isn't a valid IPv6 address.
"""
# Efficient constructor from integer.
if isinstance(address, _compat_int_types):
self._check_int_address(address)
self._ip = address
return
# Constructing from a packed address
if isinstance(address, bytes):
self._check_packed_address(address, 16)
bvs = _compat_bytes_to_byte_vals(address)
self._ip = _compat_int_from_byte_vals(bvs, 'big')
return
# Assume input argument to be string or any object representation
# which converts into a formatted IP string.
addr_str = _compat_str(address)
if '/' in addr_str:
raise AddressValueError("Unexpected '/' in %r" % address)
self._ip = self._ip_int_from_string(addr_str)
@property
def packed(self):
"""The binary representation of this address."""
return v6_int_to_packed(self._ip)
@property
def is_multicast(self):
"""Test if the address is reserved for multicast use.
Returns:
A boolean, True if the address is a multicast address.
See RFC 2373 2.7 for details.
"""
return self in self._constants._multicast_network
@property
def is_reserved(self):
"""Test if the address is otherwise IETF reserved.
Returns:
A boolean, True if the address is within one of the
reserved IPv6 Network ranges.
"""
return any(self in x for x in self._constants._reserved_networks)
@property
def is_link_local(self):
"""Test if the address is reserved for link-local.
Returns:
A boolean, True if the address is reserved per RFC 4291.
"""
return self in self._constants._linklocal_network
@property
def is_site_local(self):
"""Test if the address is reserved for site-local.
Note that the site-local address space has been deprecated by RFC 3879.
Use is_private to test if this address is in the space of unique local
addresses as defined by RFC 4193.
Returns:
A boolean, True if the address is reserved per RFC 3513 2.5.6.
"""
return self in self._constants._sitelocal_network
@property
def is_private(self):
"""Test if this address is allocated for private networks.
Returns:
A boolean, True if the address is reserved per
iana-ipv6-special-registry.
"""
return any(self in net for net in self._constants._private_networks)
@property
def is_global(self):
"""Test if this address is allocated for public networks.
Returns:
A boolean, true if the address is not reserved per
iana-ipv6-special-registry.
"""
return not self.is_private
@property
def is_unspecified(self):
"""Test if the address is unspecified.
Returns:
A boolean, True if this is the unspecified address as defined in
RFC 2373 2.5.2.
"""
return self._ip == 0
@property
def is_loopback(self):
"""Test if the address is a loopback address.
Returns:
A boolean, True if the address is a loopback address as defined in
RFC 2373 2.5.3.
"""
return self._ip == 1
@property
def ipv4_mapped(self):
"""Return the IPv4 mapped address.
Returns:
If the IPv6 address is a v4 mapped address, return the
IPv4 mapped address. Return None otherwise.
"""
if (self._ip >> 32) != 0xFFFF:
return None
return IPv4Address(self._ip & 0xFFFFFFFF)
@property
def teredo(self):
"""Tuple of embedded teredo IPs.
Returns:
Tuple of the (server, client) IPs or None if the address
doesn't appear to be a teredo address (doesn't start with
2001::/32)
"""
if (self._ip >> 96) != 0x20010000:
return None
return (IPv4Address((self._ip >> 64) & 0xFFFFFFFF),
IPv4Address(~self._ip & 0xFFFFFFFF))
@property
def sixtofour(self):
"""Return the IPv4 6to4 embedded address.
Returns:
The IPv4 6to4-embedded address if present or None if the
address doesn't appear to contain a 6to4 embedded address.
"""
if (self._ip >> 112) != 0x2002:
return None
return IPv4Address((self._ip >> 80) & 0xFFFFFFFF)
class IPv6Interface(IPv6Address):
def __init__(self, address):
if isinstance(address, (bytes, _compat_int_types)):
IPv6Address.__init__(self, address)
self.network = IPv6Network(self._ip)
self._prefixlen = self._max_prefixlen
return
if isinstance(address, tuple):
IPv6Address.__init__(self, address[0])
if len(address) > 1:
self._prefixlen = int(address[1])
else:
self._prefixlen = self._max_prefixlen
self.network = IPv6Network(address, strict=False)
self.netmask = self.network.netmask
self.hostmask = self.network.hostmask
return
addr = _split_optional_netmask(address)
IPv6Address.__init__(self, addr[0])
self.network = IPv6Network(address, strict=False)
self.netmask = self.network.netmask
self._prefixlen = self.network._prefixlen
self.hostmask = self.network.hostmask
def __str__(self):
return '%s/%d' % (self._string_from_ip_int(self._ip),
self.network.prefixlen)
def __eq__(self, other):
address_equal = IPv6Address.__eq__(self, other)
if not address_equal or address_equal is NotImplemented:
return address_equal
try:
return self.network == other.network
except AttributeError:
# An interface with an associated network is NOT the
# same as an unassociated address. That's why the hash
# takes the extra info into account.
return False
def __lt__(self, other):
address_less = IPv6Address.__lt__(self, other)
if address_less is NotImplemented:
return NotImplemented
try:
return self.network < other.network
except AttributeError:
# We *do* allow addresses and interfaces to be sorted. The
# unassociated address is considered less than all interfaces.
return False
def __hash__(self):
return self._ip ^ self._prefixlen ^ int(self.network.network_address)
__reduce__ = _IPAddressBase.__reduce__
@property
def ip(self):
return IPv6Address(self._ip)
@property
def with_prefixlen(self):
return '%s/%s' % (self._string_from_ip_int(self._ip),
self._prefixlen)
@property
def with_netmask(self):
return '%s/%s' % (self._string_from_ip_int(self._ip),
self.netmask)
@property
def with_hostmask(self):
return '%s/%s' % (self._string_from_ip_int(self._ip),
self.hostmask)
@property
def is_unspecified(self):
return self._ip == 0 and self.network.is_unspecified
@property
def is_loopback(self):
return self._ip == 1 and self.network.is_loopback
class IPv6Network(_BaseV6, _BaseNetwork):
"""This class represents and manipulates 128-bit IPv6 networks.
Attributes: [examples for IPv6('2001:db8::1000/124')]
.network_address: IPv6Address('2001:db8::1000')
.hostmask: IPv6Address('::f')
.broadcast_address: IPv6Address('2001:db8::100f')
.netmask: IPv6Address('ffff:ffff:ffff:ffff:ffff:ffff:ffff:fff0')
.prefixlen: 124
"""
# Class to use when creating address objects
_address_class = IPv6Address
def __init__(self, address, strict=True):
"""Instantiate a new IPv6 Network object.
Args:
address: A string or integer representing the IPv6 network or the
IP and prefix/netmask.
'2001:db8::/128'
'2001:db8:0000:0000:0000:0000:0000:0000/128'
'2001:db8::'
are all functionally the same in IPv6. That is to say,
failing to provide a subnetmask will create an object with
a mask of /128.
Additionally, an integer can be passed, so
IPv6Network('2001:db8::') ==
IPv6Network(42540766411282592856903984951653826560)
or, more generally
IPv6Network(int(IPv6Network('2001:db8::'))) ==
IPv6Network('2001:db8::')
strict: A boolean. If true, ensure that we have been passed
A true network address, eg, 2001:db8::1000/124 and not an
IP address on a network, eg, 2001:db8::1/124.
Raises:
AddressValueError: If address isn't a valid IPv6 address.
NetmaskValueError: If the netmask isn't valid for
an IPv6 address.
ValueError: If strict was True and a network address was not
supplied.
"""
_BaseNetwork.__init__(self, address)
# Efficient constructor from integer or packed address
if isinstance(address, (bytes, _compat_int_types)):
self.network_address = IPv6Address(address)
self.netmask, self._prefixlen = self._make_netmask(
self._max_prefixlen)
return
if isinstance(address, tuple):
if len(address) > 1:
arg = address[1]
else:
arg = self._max_prefixlen
self.netmask, self._prefixlen = self._make_netmask(arg)
self.network_address = IPv6Address(address[0])
packed = int(self.network_address)
if packed & int(self.netmask) != packed:
if strict:
raise ValueError('%s has host bits set' % self)
else:
self.network_address = IPv6Address(packed &
int(self.netmask))
return
# Assume input argument to be string or any object representation
# which converts into a formatted IP prefix string.
addr = _split_optional_netmask(address)
self.network_address = IPv6Address(self._ip_int_from_string(addr[0]))
if len(addr) == 2:
arg = addr[1]
else:
arg = self._max_prefixlen
self.netmask, self._prefixlen = self._make_netmask(arg)
if strict:
if (IPv6Address(int(self.network_address) & int(self.netmask)) !=
self.network_address):
raise ValueError('%s has host bits set' % self)
self.network_address = IPv6Address(int(self.network_address) &
int(self.netmask))
if self._prefixlen == (self._max_prefixlen - 1):
self.hosts = self.__iter__
def hosts(self):
"""Generate Iterator over usable hosts in a network.
This is like __iter__ except it doesn't return the
Subnet-Router anycast address.
"""
network = int(self.network_address)
broadcast = int(self.broadcast_address)
for x in _compat_range(network + 1, broadcast + 1):
yield self._address_class(x)
@property
def is_site_local(self):
"""Test if the address is reserved for site-local.
Note that the site-local address space has been deprecated by RFC 3879.
Use is_private to test if this address is in the space of unique local
addresses as defined by RFC 4193.
Returns:
A boolean, True if the address is reserved per RFC 3513 2.5.6.
"""
return (self.network_address.is_site_local and
self.broadcast_address.is_site_local)
class _IPv6Constants(object):
_linklocal_network = IPv6Network('fe80::/10')
_multicast_network = IPv6Network('ff00::/8')
_private_networks = [
IPv6Network('::1/128'),
IPv6Network('::/128'),
IPv6Network('::ffff:0:0/96'),
IPv6Network('100::/64'),
IPv6Network('2001::/23'),
IPv6Network('2001:2::/48'),
IPv6Network('2001:db8::/32'),
IPv6Network('2001:10::/28'),
IPv6Network('fc00::/7'),
IPv6Network('fe80::/10'),
]
_reserved_networks = [
IPv6Network('::/8'), IPv6Network('100::/8'),
IPv6Network('200::/7'), IPv6Network('400::/6'),
IPv6Network('800::/5'), IPv6Network('1000::/4'),
IPv6Network('4000::/3'), IPv6Network('6000::/3'),
IPv6Network('8000::/3'), IPv6Network('A000::/3'),
IPv6Network('C000::/3'), IPv6Network('E000::/4'),
IPv6Network('F000::/5'), IPv6Network('F800::/6'),
IPv6Network('FE00::/9'),
]
_sitelocal_network = IPv6Network('fec0::/10')
IPv6Address._constants = _IPv6Constants
| bsd-3-clause |
mikeycattell/benefit-housing-rebuild | node_modules/grunt-sass/node_modules/node-sass/node_modules/node-gyp/gyp/pylib/gyp/MSVSProject.py | 2736 | 6387 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Visual Studio project reader/writer."""
import gyp.common
import gyp.easy_xml as easy_xml
#------------------------------------------------------------------------------
class Tool(object):
"""Visual Studio tool."""
def __init__(self, name, attrs=None):
"""Initializes the tool.
Args:
name: Tool name.
attrs: Dict of tool attributes; may be None.
"""
self._attrs = attrs or {}
self._attrs['Name'] = name
def _GetSpecification(self):
"""Creates an element for the tool.
Returns:
A new xml.dom.Element for the tool.
"""
return ['Tool', self._attrs]
class Filter(object):
"""Visual Studio filter - that is, a virtual folder."""
def __init__(self, name, contents=None):
"""Initializes the folder.
Args:
name: Filter (folder) name.
contents: List of filenames and/or Filter objects contained.
"""
self.name = name
self.contents = list(contents or [])
#------------------------------------------------------------------------------
class Writer(object):
"""Visual Studio XML project writer."""
def __init__(self, project_path, version, name, guid=None, platforms=None):
"""Initializes the project.
Args:
project_path: Path to the project file.
version: Format version to emit.
name: Name of the project.
guid: GUID to use for project, if not None.
platforms: Array of string, the supported platforms. If null, ['Win32']
"""
self.project_path = project_path
self.version = version
self.name = name
self.guid = guid
# Default to Win32 for platforms.
if not platforms:
platforms = ['Win32']
# Initialize the specifications of the various sections.
self.platform_section = ['Platforms']
for platform in platforms:
self.platform_section.append(['Platform', {'Name': platform}])
self.tool_files_section = ['ToolFiles']
self.configurations_section = ['Configurations']
self.files_section = ['Files']
# Keep a dict keyed on filename to speed up access.
self.files_dict = dict()
def AddToolFile(self, path):
"""Adds a tool file to the project.
Args:
path: Relative path from project to tool file.
"""
self.tool_files_section.append(['ToolFile', {'RelativePath': path}])
def _GetSpecForConfiguration(self, config_type, config_name, attrs, tools):
"""Returns the specification for a configuration.
Args:
config_type: Type of configuration node.
config_name: Configuration name.
attrs: Dict of configuration attributes; may be None.
tools: List of tools (strings or Tool objects); may be None.
Returns:
"""
# Handle defaults
if not attrs:
attrs = {}
if not tools:
tools = []
# Add configuration node and its attributes
node_attrs = attrs.copy()
node_attrs['Name'] = config_name
specification = [config_type, node_attrs]
# Add tool nodes and their attributes
if tools:
for t in tools:
if isinstance(t, Tool):
specification.append(t._GetSpecification())
else:
specification.append(Tool(t)._GetSpecification())
return specification
def AddConfig(self, name, attrs=None, tools=None):
"""Adds a configuration to the project.
Args:
name: Configuration name.
attrs: Dict of configuration attributes; may be None.
tools: List of tools (strings or Tool objects); may be None.
"""
spec = self._GetSpecForConfiguration('Configuration', name, attrs, tools)
self.configurations_section.append(spec)
def _AddFilesToNode(self, parent, files):
"""Adds files and/or filters to the parent node.
Args:
parent: Destination node
files: A list of Filter objects and/or relative paths to files.
Will call itself recursively, if the files list contains Filter objects.
"""
for f in files:
if isinstance(f, Filter):
node = ['Filter', {'Name': f.name}]
self._AddFilesToNode(node, f.contents)
else:
node = ['File', {'RelativePath': f}]
self.files_dict[f] = node
parent.append(node)
def AddFiles(self, files):
"""Adds files to the project.
Args:
files: A list of Filter objects and/or relative paths to files.
This makes a copy of the file/filter tree at the time of this call. If you
later add files to a Filter object which was passed into a previous call
to AddFiles(), it will not be reflected in this project.
"""
self._AddFilesToNode(self.files_section, files)
# TODO(rspangler) This also doesn't handle adding files to an existing
# filter. That is, it doesn't merge the trees.
def AddFileConfig(self, path, config, attrs=None, tools=None):
"""Adds a configuration to a file.
Args:
path: Relative path to the file.
config: Name of configuration to add.
attrs: Dict of configuration attributes; may be None.
tools: List of tools (strings or Tool objects); may be None.
Raises:
ValueError: Relative path does not match any file added via AddFiles().
"""
# Find the file node with the right relative path
parent = self.files_dict.get(path)
if not parent:
raise ValueError('AddFileConfig: file "%s" not in project.' % path)
# Add the config to the file node
spec = self._GetSpecForConfiguration('FileConfiguration', config, attrs,
tools)
parent.append(spec)
def WriteIfChanged(self):
"""Writes the project file."""
# First create XML content definition
content = [
'VisualStudioProject',
{'ProjectType': 'Visual C++',
'Version': self.version.ProjectVersion(),
'Name': self.name,
'ProjectGUID': self.guid,
'RootNamespace': self.name,
'Keyword': 'Win32Proj'
},
self.platform_section,
self.tool_files_section,
self.configurations_section,
['References'], # empty section
self.files_section,
['Globals'] # empty section
]
easy_xml.WriteXmlIfChanged(content, self.project_path,
encoding="Windows-1252")
| mit |
mazvv/travelcrm | travelcrm/views/transfers.py | 1 | 7321 | # -*-coding: utf-8-*-
import logging
from pyramid.view import view_config, view_defaults
from pyramid.httpexceptions import HTTPFound
from . import BaseView
from ..models import DBSession
from ..models.transfer import Transfer
from ..lib.bl.subscriptions import subscribe_resource
from ..lib.utils.common_utils import translate as _
from ..forms.transfers import (
TransferForm,
TransferSearchForm,
TransferAssignForm,
)
from ..lib.events.resources import (
ResourceCreated,
ResourceChanged,
ResourceDeleted,
)
log = logging.getLogger(__name__)
@view_defaults(
context='..resources.transfers.TransfersResource',
)
class TransfersView(BaseView):
@view_config(
request_method='GET',
renderer='travelcrm:templates/transfers/index.mako',
permission='view'
)
def index(self):
return {
'title': self._get_title(),
}
@view_config(
name='list',
xhr='True',
request_method='POST',
renderer='json',
permission='view'
)
def list(self):
form = TransferSearchForm(self.request, self.context)
form.validate()
qb = form.submit()
return {
'total': qb.get_count(),
'rows': qb.get_serialized()
}
@view_config(
name='view',
request_method='GET',
renderer='travelcrm:templates/transfers/form.mako',
permission='view'
)
def view(self):
if self.request.params.get('rid'):
resource_id = self.request.params.get('rid')
transfer = Transfer.by_resource_id(resource_id)
return HTTPFound(
location=self.request.resource_url(
self.context, 'view', query={'id': transfer.id}
)
)
result = self.edit()
result.update({
'title': self._get_title(_(u'View')),
'readonly': True,
})
return result
@view_config(
name='add',
request_method='GET',
renderer='travelcrm:templates/transfers/form.mako',
permission='add'
)
def add(self):
return {
'title': self._get_title(_(u'Add')),
}
@view_config(
name='add',
request_method='POST',
renderer='json',
permission='add'
)
def _add(self):
form = TransferForm(self.request)
if form.validate():
transfer = form.submit()
DBSession.add(transfer)
DBSession.flush()
event = ResourceCreated(self.request, transfer)
event.registry()
return {
'success_message': _(u'Saved'),
'response': transfer.id
}
else:
return {
'error_message': _(u'Please, check errors'),
'errors': form.errors
}
@view_config(
name='edit',
request_method='GET',
renderer='travelcrm:templates/transfers/form.mako',
permission='edit'
)
def edit(self):
transfer = Transfer.get(self.request.params.get('id'))
return {
'item': transfer,
'title': self._get_title(_(u'Edit')),
}
@view_config(
name='edit',
request_method='POST',
renderer='json',
permission='edit'
)
def _edit(self):
transfer = Transfer.get(self.request.params.get('id'))
form = TransferForm(self.request)
if form.validate():
form.submit(transfer)
event = ResourceChanged(self.request, transfer)
event.registry()
return {
'success_message': _(u'Saved'),
'response': transfer.id
}
else:
return {
'error_message': _(u'Please, check errors'),
'errors': form.errors
}
@view_config(
name='copy',
request_method='GET',
renderer='travelcrm:templates/transfers/form.mako',
permission='add'
)
def copy(self):
transfer = Transfer.get_copy(self.request.params.get('id'))
return {
'action': self.request.path_url,
'item': transfer,
'title': self._get_title(_(u'Copy')),
}
@view_config(
name='copy',
request_method='POST',
renderer='json',
permission='add'
)
def _copy(self):
return self._add()
@view_config(
name='delete',
request_method='GET',
renderer='travelcrm:templates/transfers/delete.mako',
permission='delete'
)
def delete(self):
return {
'title': self._get_title(_(u'Delete')),
'rid': self.request.params.get('rid')
}
@view_config(
name='delete',
request_method='POST',
renderer='json',
permission='delete'
)
def _delete(self):
errors = False
ids = self.request.params.getall('id')
if ids:
try:
items = DBSession.query(Transfer).filter(
Transfer.id.in_(ids)
)
for item in items:
DBSession.delete(item)
event = ResourceDeleted(self.request, item)
event.registry()
DBSession.flush()
except:
errors=True
DBSession.rollback()
if errors:
return {
'error_message': _(
u'Some objects could not be delete'
),
}
return {'success_message': _(u'Deleted')}
@view_config(
name='assign',
request_method='GET',
renderer='travelcrm:templates/transfers/assign.mako',
permission='assign'
)
def assign(self):
return {
'id': self.request.params.get('id'),
'title': self._get_title(_(u'Assign Maintainer')),
}
@view_config(
name='assign',
request_method='POST',
renderer='json',
permission='assign'
)
def _assign(self):
form = TransferAssignForm(self.request)
if form.validate():
form.submit(self.request.params.getall('id'))
return {
'success_message': _(u'Assigned'),
}
else:
return {
'error_message': _(u'Please, check errors'),
'errors': form.errors
}
@view_config(
name='subscribe',
request_method='GET',
renderer='travelcrm:templates/transfer/subscribe.mako',
permission='view'
)
def subscribe(self):
return {
'id': self.request.params.get('id'),
'title': self._get_title(_(u'Subscribe')),
}
@view_config(
name='subscribe',
request_method='POST',
renderer='json',
permission='view'
)
def _subscribe(self):
ids = self.request.params.getall('id')
for id in ids:
transfer = Transfer.get(id)
subscribe_resource(self.request, transfer.resource)
return {
'success_message': _(u'Subscribed'),
}
| gpl-3.0 |
petrutlucian94/nova | nova/tests/unit/api/openstack/compute/contrib/test_evacuate.py | 8 | 12071 | # Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
import mock
from oslo_config import cfg
import webob
from nova.api.openstack.compute.contrib import evacuate as evacuate_v2
from nova.api.openstack.compute.plugins.v3 import evacuate as evacuate_v21
from nova.api.openstack import extensions
from nova.compute import api as compute_api
from nova.compute import vm_states
from nova import exception
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import fake_instance
CONF = cfg.CONF
CONF.import_opt('password_length', 'nova.utils')
def fake_compute_api(*args, **kwargs):
return True
def fake_compute_api_get(self, context, instance_id, want_objects=False,
**kwargs):
# BAD_UUID is something that does not exist
if instance_id == 'BAD_UUID':
raise exception.InstanceNotFound(instance_id=instance_id)
else:
return fake_instance.fake_instance_obj(context, id=1, uuid=instance_id,
task_state=None, host='host1',
vm_state=vm_states.ACTIVE)
def fake_service_get_by_compute_host(self, context, host):
if host == 'bad-host':
raise exception.ComputeHostNotFound(host=host)
else:
return {
'host_name': host,
'service': 'compute',
'zone': 'nova'
}
class EvacuateTestV21(test.NoDBTestCase):
validation_error = exception.ValidationError
_methods = ('resize', 'evacuate')
def setUp(self):
super(EvacuateTestV21, self).setUp()
self.stubs.Set(compute_api.API, 'get', fake_compute_api_get)
self.stubs.Set(compute_api.HostAPI, 'service_get_by_compute_host',
fake_service_get_by_compute_host)
self.UUID = uuid.uuid4()
for _method in self._methods:
self.stubs.Set(compute_api.API, _method, fake_compute_api)
self._set_up_controller()
self.admin_req = fakes.HTTPRequest.blank('', use_admin_context=True)
self.req = fakes.HTTPRequest.blank('')
def _set_up_controller(self):
self.controller = evacuate_v21.EvacuateController()
self.controller_no_ext = self.controller
def _get_evacuate_response(self, json_load, uuid=None):
base_json_load = {'evacuate': json_load}
response = self.controller._evacuate(self.admin_req, uuid or self.UUID,
body=base_json_load)
return response
def _check_evacuate_failure(self, exception, body, uuid=None,
controller=None):
controller = controller or self.controller
body = {'evacuate': body}
self.assertRaises(exception,
controller._evacuate,
self.admin_req, uuid or self.UUID, body=body)
def test_evacuate_with_valid_instance(self):
admin_pass = 'MyNewPass'
res = self._get_evacuate_response({'host': 'my-host',
'onSharedStorage': 'False',
'adminPass': admin_pass})
self.assertEqual(admin_pass, res['adminPass'])
def test_evacuate_with_invalid_instance(self):
self._check_evacuate_failure(webob.exc.HTTPNotFound,
{'host': 'my-host',
'onSharedStorage': 'False',
'adminPass': 'MyNewPass'},
uuid='BAD_UUID')
def test_evacuate_with_active_service(self):
def fake_evacuate(*args, **kwargs):
raise exception.ComputeServiceInUse("Service still in use")
self.stubs.Set(compute_api.API, 'evacuate', fake_evacuate)
self._check_evacuate_failure(webob.exc.HTTPBadRequest,
{'host': 'my-host',
'onSharedStorage': 'False',
'adminPass': 'MyNewPass'})
def test_evacuate_instance_with_no_target(self):
admin_pass = 'MyNewPass'
res = self._get_evacuate_response({'onSharedStorage': 'False',
'adminPass': admin_pass})
self.assertEqual(admin_pass, res['adminPass'])
def test_evacuate_instance_without_on_shared_storage(self):
self._check_evacuate_failure(self.validation_error,
{'host': 'my-host',
'adminPass': 'MyNewPass'})
def test_evacuate_instance_with_invalid_characters_host(self):
host = 'abc!#'
self._check_evacuate_failure(self.validation_error,
{'host': host,
'onSharedStorage': 'False',
'adminPass': 'MyNewPass'})
def test_evacuate_instance_with_too_long_host(self):
host = 'a' * 256
self._check_evacuate_failure(self.validation_error,
{'host': host,
'onSharedStorage': 'False',
'adminPass': 'MyNewPass'})
def test_evacuate_instance_with_invalid_on_shared_storage(self):
self._check_evacuate_failure(self.validation_error,
{'host': 'my-host',
'onSharedStorage': 'foo',
'adminPass': 'MyNewPass'})
def test_evacuate_instance_with_bad_target(self):
self._check_evacuate_failure(webob.exc.HTTPNotFound,
{'host': 'bad-host',
'onSharedStorage': 'False',
'adminPass': 'MyNewPass'})
def test_evacuate_instance_with_target(self):
admin_pass = 'MyNewPass'
res = self._get_evacuate_response({'host': 'my-host',
'onSharedStorage': 'False',
'adminPass': admin_pass})
self.assertEqual(admin_pass, res['adminPass'])
@mock.patch('nova.objects.Instance.save')
def test_evacuate_shared_and_pass(self, mock_save):
self._check_evacuate_failure(webob.exc.HTTPBadRequest,
{'host': 'bad-host',
'onSharedStorage': 'True',
'adminPass': 'MyNewPass'})
@mock.patch('nova.objects.Instance.save')
def test_evacuate_not_shared_pass_generated(self, mock_save):
res = self._get_evacuate_response({'host': 'my-host',
'onSharedStorage': 'False'})
self.assertEqual(CONF.password_length, len(res['adminPass']))
@mock.patch('nova.objects.Instance.save')
def test_evacuate_shared(self, mock_save):
self._get_evacuate_response({'host': 'my-host',
'onSharedStorage': 'True'})
def test_not_admin(self):
body = {'evacuate': {'host': 'my-host',
'onSharedStorage': 'False'}}
self.assertRaises(exception.PolicyNotAuthorized,
self.controller._evacuate,
self.req, self.UUID, body=body)
def test_evacuate_to_same_host(self):
self._check_evacuate_failure(webob.exc.HTTPBadRequest,
{'host': 'host1',
'onSharedStorage': 'False',
'adminPass': 'MyNewPass'})
def test_evacuate_instance_with_empty_host(self):
self._check_evacuate_failure(self.validation_error,
{'host': '',
'onSharedStorage': 'False',
'adminPass': 'MyNewPass'},
controller=self.controller_no_ext)
@mock.patch('nova.objects.Instance.save')
def test_evacuate_instance_with_underscore_in_hostname(self, mock_save):
admin_pass = 'MyNewPass'
# NOTE: The hostname grammar in RFC952 does not allow for
# underscores in hostnames. However, we should test that it
# is supported because it sometimes occurs in real systems.
res = self._get_evacuate_response({'host': 'underscore_hostname',
'onSharedStorage': 'False',
'adminPass': admin_pass})
self.assertEqual(admin_pass, res['adminPass'])
def test_evacuate_disable_password_return(self):
self._test_evacuate_enable_instance_password_conf(False)
def test_evacuate_enable_password_return(self):
self._test_evacuate_enable_instance_password_conf(True)
@mock.patch('nova.objects.Instance.save')
def _test_evacuate_enable_instance_password_conf(self, mock_save,
enable_pass):
self.flags(enable_instance_password=enable_pass)
res = self._get_evacuate_response({'host': 'underscore_hostname',
'onSharedStorage': 'False'})
if enable_pass:
self.assertIn('adminPass', res)
else:
self.assertIsNone(res.get('adminPass'))
class EvacuateTestV2(EvacuateTestV21):
validation_error = webob.exc.HTTPBadRequest
def _set_up_controller(self):
ext_mgr = extensions.ExtensionManager()
ext_mgr.extensions = {'os-extended-evacuate-find-host': 'fake'}
self.controller = evacuate_v2.Controller(ext_mgr)
ext_mgr_no_ext = extensions.ExtensionManager()
ext_mgr_no_ext.extensions = {}
self.controller_no_ext = evacuate_v2.Controller(ext_mgr_no_ext)
def test_no_target_fails_if_extension_not_loaded(self):
self._check_evacuate_failure(webob.exc.HTTPBadRequest,
{'onSharedStorage': 'False',
'adminPass': 'MyNewPass'},
controller=self.controller_no_ext)
def test_evacuate_instance_with_too_long_host(self):
pass
def test_evacuate_instance_with_invalid_characters_host(self):
pass
def test_evacuate_instance_with_invalid_on_shared_storage(self):
pass
def test_evacuate_disable_password_return(self):
pass
def test_evacuate_enable_password_return(self):
pass
class EvacuatePolicyEnforcementv21(test.NoDBTestCase):
def setUp(self):
super(EvacuatePolicyEnforcementv21, self).setUp()
self.controller = evacuate_v21.EvacuateController()
def test_evacuate_policy_failed(self):
rule_name = "os_compute_api:os-evacuate"
self.policy.set_rules({rule_name: "project:non_fake"})
req = fakes.HTTPRequest.blank('')
body = {'evacuate': {'host': 'my-host',
'onSharedStorage': 'False',
'adminPass': 'MyNewPass'
}}
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.controller._evacuate, req, fakes.FAKE_UUID,
body=body)
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
| apache-2.0 |
charleswhchan/ansible | lib/ansible/playbook/role/requirement.py | 28 | 7970 | # (c) 2014 Michael DeHaan, <michael@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.compat.six import string_types
import os
import shutil
import subprocess
import tempfile
from ansible.errors import AnsibleError
from ansible.playbook.role.definition import RoleDefinition
__all__ = ['RoleRequirement']
VALID_SPEC_KEYS = [
'name',
'role',
'scm',
'src',
'version',
]
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class RoleRequirement(RoleDefinition):
"""
Helper class for Galaxy, which is used to parse both dependencies
specified in meta/main.yml and requirements.yml files.
"""
def __init__(self):
pass
@staticmethod
def repo_url_to_role_name(repo_url):
# gets the role name out of a repo like
# http://git.example.com/repos/repo.git" => "repo"
if '://' not in repo_url and '@' not in repo_url:
return repo_url
trailing_path = repo_url.split('/')[-1]
if trailing_path.endswith('.git'):
trailing_path = trailing_path[:-4]
if trailing_path.endswith('.tar.gz'):
trailing_path = trailing_path[:-7]
if ',' in trailing_path:
trailing_path = trailing_path.split(',')[0]
return trailing_path
@staticmethod
def role_spec_parse(role_spec):
# takes a repo and a version like
# git+http://git.example.com/repos/repo.git,v1.0
# and returns a list of properties such as:
# {
# 'scm': 'git',
# 'src': 'http://git.example.com/repos/repo.git',
# 'version': 'v1.0',
# 'name': 'repo'
# }
display.deprecated("The comma separated role spec format, use the yaml/explicit format instead.")
default_role_versions = dict(git='master', hg='tip')
role_spec = role_spec.strip()
role_version = ''
if role_spec == "" or role_spec.startswith("#"):
return (None, None, None, None)
tokens = [s.strip() for s in role_spec.split(',')]
# assume https://github.com URLs are git+https:// URLs and not
# tarballs unless they end in '.zip'
if 'github.com/' in tokens[0] and not tokens[0].startswith("git+") and not tokens[0].endswith('.tar.gz'):
tokens[0] = 'git+' + tokens[0]
if '+' in tokens[0]:
(scm, role_url) = tokens[0].split('+')
else:
scm = None
role_url = tokens[0]
if len(tokens) >= 2:
role_version = tokens[1]
if len(tokens) == 3:
role_name = tokens[2]
else:
role_name = RoleRequirement.repo_url_to_role_name(tokens[0])
if scm and not role_version:
role_version = default_role_versions.get(scm, '')
return dict(scm=scm, src=role_url, version=role_version, name=role_name)
@staticmethod
def role_yaml_parse(role):
if isinstance(role, string_types):
name = None
scm = None
src = None
version = None
if ',' in role:
if role.count(',') == 1:
(src, version) = role.strip().split(',', 1)
elif role.count(',') == 2:
(src, version, name) = role.strip().split(',', 2)
else:
raise AnsibleError("Invalid role line (%s). Proper format is 'role_name[,version[,name]]'" % role)
else:
src = role
if name is None:
name = RoleRequirement.repo_url_to_role_name(src)
if '+' in src:
(scm, src) = src.split('+', 1)
return dict(name=name, src=src, scm=scm, version=version)
if 'role' in role:
# Old style: {role: "galaxy.role,version,name", other_vars: "here" }
role = RoleRequirement.role_spec_parse(role['role'])
else:
role = role.copy()
if 'src'in role:
# New style: { src: 'galaxy.role,version,name', other_vars: "here" }
if 'github.com' in role["src"] and 'http' in role["src"] and '+' not in role["src"] and not role["src"].endswith('.tar.gz'):
role["src"] = "git+" + role["src"]
if '+' in role["src"]:
(scm, src) = role["src"].split('+')
role["scm"] = scm
role["src"] = src
if 'name' not in role:
role["name"] = RoleRequirement.repo_url_to_role_name(role["src"])
if 'version' not in role:
role['version'] = ''
if 'scm' not in role:
role['scm'] = None
for key in role.keys():
if key not in VALID_SPEC_KEYS:
role.pop(key)
return role
@staticmethod
def scm_archive_role(src, scm='git', name=None, version='HEAD'):
if scm not in ['hg', 'git']:
raise AnsibleError("- scm %s is not currently supported" % scm)
tempdir = tempfile.mkdtemp()
clone_cmd = [scm, 'clone', src, name]
with open('/dev/null', 'w') as devnull:
try:
popen = subprocess.Popen(clone_cmd, cwd=tempdir, stdout=devnull, stderr=devnull)
except:
raise AnsibleError("error executing: %s" % " ".join(clone_cmd))
rc = popen.wait()
if rc != 0:
raise AnsibleError ("- command %s failed in directory %s (rc=%s)" % (' '.join(clone_cmd), tempdir, rc))
if scm == 'git' and version:
checkout_cmd = [scm, 'checkout', version]
with open('/dev/null', 'w') as devnull:
try:
popen = subprocess.Popen(checkout_cmd, cwd=os.path.join(tempdir, name), stdout=devnull, stderr=devnull)
except (IOError, OSError):
raise AnsibleError("error executing: %s" % " ".join(checkout_cmd))
rc = popen.wait()
if rc != 0:
raise AnsibleError("- command %s failed in directory %s (rc=%s)" % (' '.join(checkout_cmd), tempdir, rc))
temp_file = tempfile.NamedTemporaryFile(delete=False, suffix='.tar')
if scm == 'hg':
archive_cmd = ['hg', 'archive', '--prefix', "%s/" % name]
if version:
archive_cmd.extend(['-r', version])
archive_cmd.append(temp_file.name)
if scm == 'git':
archive_cmd = ['git', 'archive', '--prefix=%s/' % name, '--output=%s' % temp_file.name]
if version:
archive_cmd.append(version)
else:
archive_cmd.append('HEAD')
with open('/dev/null', 'w') as devnull:
popen = subprocess.Popen(archive_cmd, cwd=os.path.join(tempdir, name),
stderr=devnull, stdout=devnull)
rc = popen.wait()
if rc != 0:
raise AnsibleError("- command %s failed in directory %s (rc=%s)" % (' '.join(archive_cmd), tempdir, rc))
shutil.rmtree(tempdir, ignore_errors=True)
return temp_file.name
| gpl-3.0 |
CS-SI/QGIS | python/plugins/processing/preconfigured/PreconfiguredAlgorithmDialog.py | 17 | 4528 | # -*- coding: utf-8 -*-
"""
***************************************************************************
PreconfiguredAlgorithmDialog.py
---------------------
Date : April 2016
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'April 2016'
__copyright__ = '(C) 2016, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
import json
from processing.preconfigured.PreconfiguredUtils import algAsDict
from processing.preconfigured.PreconfiguredUtils import preconfiguredAlgorithmsFolder
from processing.gui.AlgorithmDialogBase import AlgorithmDialogBase
from processing.gui.AlgorithmDialog import AlgorithmDialog
from processing.tools import dataobjects
from qgis.PyQt.QtWidgets import QMessageBox, QVBoxLayout, QLabel, QLineEdit, QWidget
from qgis.PyQt.QtGui import QPalette, QColor
from qgis.core import QgsApplication
from qgis.gui import QgsMessageBar
class PreconfiguredAlgorithmDialog(AlgorithmDialog):
def __init__(self, alg, toolbox):
AlgorithmDialog.__init__(self, alg)
self.toolbox = toolbox
self.runButton().setText(self.tr("OK"))
self.tabWidget().removeTab(1)
self.settingsPanel = SettingsPanel()
self.tabWidget().addTab(self.settingsPanel, "Description")
def accept(self):
context = dataobjects.createContext()
try:
parameters = self.getParameterValues()
self.setOutputValues()
ok, msg = self.algorithm().checkParameterValues(parameters, context)
if not ok:
QMessageBox.warning(
self, self.tr('Unable to execute algorithm'), msg)
return
description = algAsDict(self.algorithm())
description["name"] = self.settingsPanel.txtName.text().strip()
description["group"] = self.settingsPanel.txtGroup.text().strip()
if not (description["name"] and description["group"]):
self.tabWidget().setCurrentIndex(self.tabWidget().count() - 1)
return
validChars = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789:'
filename = ''.join(c for c in description["name"] if c in validChars).lower() + ".json"
filepath = os.path.join(preconfiguredAlgorithmsFolder(), filename)
with open(filepath, "w") as f:
json.dump(description, f)
QgsApplication.processingRegistry().providerById('preconfigured').refreshAlgorithms()
except AlgorithmDialogBase.InvalidParameterValue as e:
try:
self.buttonBox().accepted.connect(lambda: e.widget.setPalette(QPalette()))
palette = e.widget.palette()
palette.setColor(QPalette.Base, QColor(255, 255, 0))
e.widget.setPalette(palette)
self.messageBar().pushMessage("", self.tr('Missing parameter value: {0}').format(
e.parameter.description()),
level=Qgis.Warning, duration=5)
return
except:
QMessageBox.critical(self,
self.tr('Unable to execute algorithm'),
self.tr('Wrong or missing parameter values'))
self.close()
class SettingsPanel(QWidget):
def __init__(self):
QWidget.__init__(self)
layout = QVBoxLayout()
labelName = QLabel("Name")
labelGroup = QLabel("Group")
self.txtName = QLineEdit()
self.txtGroup = QLineEdit()
layout.addWidget(labelName)
layout.addWidget(self.txtName)
layout.addWidget(labelGroup)
layout.addWidget(self.txtGroup)
layout.addStretch()
self.setLayout(layout)
| gpl-2.0 |
1185/starwels | test/functional/wallet_hd.py | 2 | 5199 | #!/usr/bin/env python3
# Copyright (c) 2016-2019 The Starwels developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test Hierarchical Deterministic wallet function."""
from test_framework.test_framework import StarwelsTestFramework
from test_framework.util import (
assert_equal,
connect_nodes_bi,
)
import shutil
import os
class WalletHDTest(StarwelsTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.extra_args = [[], ['-keypool=0']]
def run_test (self):
tmpdir = self.options.tmpdir
# Make sure can't switch off usehd after wallet creation
self.stop_node(1)
self.assert_start_raises_init_error(1, ['-usehd=0'], 'already existing HD wallet')
self.start_node(1)
connect_nodes_bi(self.nodes, 0, 1)
# Make sure we use hd, keep masterkeyid
masterkeyid = self.nodes[1].getwalletinfo()['hdmasterkeyid']
assert_equal(len(masterkeyid), 40)
# create an internal key
change_addr = self.nodes[1].getrawchangeaddress()
change_addrV= self.nodes[1].validateaddress(change_addr)
assert_equal(change_addrV["hdkeypath"], "m/0'/1'/0'") #first internal child key
# Import a non-HD private key in the HD wallet
non_hd_add = self.nodes[0].getnewaddress()
self.nodes[1].importprivkey(self.nodes[0].dumpprivkey(non_hd_add))
# This should be enough to keep the master key and the non-HD key
self.nodes[1].backupwallet(tmpdir + "/hd.bak")
#self.nodes[1].dumpwallet(tmpdir + "/hd.dump")
# Derive some HD addresses and remember the last
# Also send funds to each add
self.nodes[0].generate(101)
hd_add = None
num_hd_adds = 300
for i in range(num_hd_adds):
hd_add = self.nodes[1].getnewaddress()
hd_info = self.nodes[1].validateaddress(hd_add)
assert_equal(hd_info["hdkeypath"], "m/0'/0'/"+str(i)+"'")
assert_equal(hd_info["hdmasterkeyid"], masterkeyid)
self.nodes[0].sendtoaddress(hd_add, 1)
self.nodes[0].generate(1)
self.nodes[0].sendtoaddress(non_hd_add, 1)
self.nodes[0].generate(1)
# create an internal key (again)
change_addr = self.nodes[1].getrawchangeaddress()
change_addrV= self.nodes[1].validateaddress(change_addr)
assert_equal(change_addrV["hdkeypath"], "m/0'/1'/1'") #second internal child key
self.sync_all()
assert_equal(self.nodes[1].getbalance(), num_hd_adds + 1)
self.log.info("Restore backup ...")
self.stop_node(1)
# we need to delete the complete regtest directory
# otherwise node1 would auto-recover all funds in flag the keypool keys as used
shutil.rmtree(os.path.join(tmpdir, "node1/regtest/blocks"))
shutil.rmtree(os.path.join(tmpdir, "node1/regtest/chainstate"))
shutil.copyfile(os.path.join(tmpdir, "hd.bak"), os.path.join(tmpdir, "node1/regtest/wallets/wallet.dat"))
self.start_node(1)
# Assert that derivation is deterministic
hd_add_2 = None
for _ in range(num_hd_adds):
hd_add_2 = self.nodes[1].getnewaddress()
hd_info_2 = self.nodes[1].validateaddress(hd_add_2)
assert_equal(hd_info_2["hdkeypath"], "m/0'/0'/"+str(_)+"'")
assert_equal(hd_info_2["hdmasterkeyid"], masterkeyid)
assert_equal(hd_add, hd_add_2)
connect_nodes_bi(self.nodes, 0, 1)
self.sync_all()
# Needs rescan
self.stop_node(1)
self.start_node(1, extra_args=self.extra_args[1] + ['-rescan'])
assert_equal(self.nodes[1].getbalance(), num_hd_adds + 1)
# Try a RPC based rescan
self.stop_node(1)
shutil.rmtree(os.path.join(tmpdir, "node1/regtest/blocks"))
shutil.rmtree(os.path.join(tmpdir, "node1/regtest/chainstate"))
shutil.copyfile(os.path.join(tmpdir, "hd.bak"), os.path.join(tmpdir, "node1/regtest/wallet.dat"))
self.start_node(1, extra_args=self.extra_args[1])
connect_nodes_bi(self.nodes, 0, 1)
self.sync_all()
out = self.nodes[1].rescanblockchain(0, 1)
assert_equal(out['start_height'], 0)
assert_equal(out['stop_height'], 1)
out = self.nodes[1].rescanblockchain()
assert_equal(out['start_height'], 0)
assert_equal(out['stop_height'], self.nodes[1].getblockcount())
assert_equal(self.nodes[1].getbalance(), num_hd_adds + 1)
# send a tx and make sure its using the internal chain for the changeoutput
txid = self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 1)
outs = self.nodes[1].decoderawtransaction(self.nodes[1].gettransaction(txid)['hex'])['vout']
keypath = ""
for out in outs:
if out['value'] != 1:
keypath = self.nodes[1].validateaddress(out['scriptPubKey']['addresses'][0])['hdkeypath']
assert_equal(keypath[0:7], "m/0'/1'")
if __name__ == '__main__':
WalletHDTest().main ()
| mit |
cinaljess/btools | type2_endcheck.py | 1 | 3097 | """
Module to check whether TypeIIs end-sites are compatible. Looks for
3bp homology then 2 basepair edge-homology for each input sequence given.
Comparisons are made between each element in the list and to the reverse
complement of each element. Repeat elements are also validated along with
noncanonical basepairs.
Example:
python2.7 type2_endcheck.py "GAGG, GAGG, TACT, GACT, PAPP"
"""
import sys
from string import maketrans
from collections import Counter
def main(ends_list):
ends_list = ends_list.split(",")
ends_list = [x.strip(" ") for x in ends_list]
ncs = 'ATGC'
silly_list = []
for end in ends_list:
for c in end:
if c not in ncs:
ter = ends_list.index(end)
silly_list = ends_list.pop(ter)
break
notgood = False
sim_list = set([])
rc_list = [revcomplement(x) for x in ends_list]
counts = Counter(ends_list)
self_list = set([])
# Check list for repeats
for c, n in counts.items():
if n >= 2:
notgood = True
self_list.add((c))
for x in ends_list:
# Validate no ends share homology to each other
for g in ends_list:
if g != x:
score = align(x, g)
if score >= 3:
notgood = True
sim_list.add((x, g))
# Validate no reverse complements are equivalent to entry list
if x in rc_list:
notgood = True
idx = rc_list.index(x)
sim_list.add((x, rc_list[idx]))
# Validate no ends share 3 max homology & 2bp edge homology of revers complement list
for h in rc_list:
revscore = align(x, h)
if revscore >= 3:
rrevset = [h, reverse_region(h)]
for p in rrevset:
rpositionscore = align(x[:2], p[:2])
if rpositionscore == 2:
notgood = True
sim_list.add((x, p))
if not notgood:
print('Good to go!!!')
if silly_list:
print 'Bad entry: ', silly_list
else:
print('Not good!')
if silly_list:
print 'Bad entry: ', silly_list
for x in sim_list:
print 'Entry: ' + str(x[0]) + ' > (' + revcomplement(x[0]) + ') : ' + revcomplement(x[1]) + ' > (' + \
reverse_region(x[1]) + ')'
for x in self_list:
print 'Entry: ' + x + ' appeared more than once'
def revcomplement(seq):
"""
A quick reverse-complement routine that understands
IUPAC ambiguity codes, and preserves case.
"""
revcompTBL = maketrans('AGCTagctWSKMYRnN', 'TCGAtcgaWSMKTYnN')
_t = list(seq.translate(revcompTBL))
_t.reverse()
rc = ''.join(_t)
return rc
def reverse_region(region):
return region[::-1]
def align(end, rcend):
pairs = zip(end, rcend)
match_score = 0
for a, b in pairs:
if a == b:
match_score += 1
return match_score
if __name__ == "__main__":
main(sys.argv[1])
| mit |
lmprice/ansible | lib/ansible/module_utils/network/junos/junos.py | 19 | 13610 | #
# (c) 2017 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
import collections
import json
from contextlib import contextmanager
from copy import deepcopy
from ansible.module_utils.basic import env_fallback, return_values
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.common.netconf import NetconfConnection
from ansible.module_utils._text import to_text
try:
from lxml.etree import Element, SubElement, fromstring, tostring
HAS_LXML = True
except ImportError:
from xml.etree.ElementTree import Element, SubElement, fromstring, tostring
HAS_LXML = False
ACTIONS = frozenset(['merge', 'override', 'replace', 'update', 'set'])
JSON_ACTIONS = frozenset(['merge', 'override', 'update'])
FORMATS = frozenset(['xml', 'text', 'json'])
CONFIG_FORMATS = frozenset(['xml', 'text', 'json', 'set'])
junos_provider_spec = {
'host': dict(),
'port': dict(type='int'),
'username': dict(fallback=(env_fallback, ['ANSIBLE_NET_USERNAME'])),
'password': dict(fallback=(env_fallback, ['ANSIBLE_NET_PASSWORD']), no_log=True),
'ssh_keyfile': dict(fallback=(env_fallback, ['ANSIBLE_NET_SSH_KEYFILE']), type='path'),
'timeout': dict(type='int'),
'transport': dict(default='netconf', choices=['cli', 'netconf'])
}
junos_argument_spec = {
'provider': dict(type='dict', options=junos_provider_spec),
}
junos_top_spec = {
'host': dict(removed_in_version=2.9),
'port': dict(removed_in_version=2.9, type='int'),
'username': dict(removed_in_version=2.9),
'password': dict(removed_in_version=2.9, no_log=True),
'ssh_keyfile': dict(removed_in_version=2.9, type='path'),
'timeout': dict(removed_in_version=2.9, type='int'),
'transport': dict(removed_in_version=2.9)
}
junos_argument_spec.update(junos_top_spec)
def get_provider_argspec():
return junos_provider_spec
def get_connection(module):
if hasattr(module, '_junos_connection'):
return module._junos_connection
capabilities = get_capabilities(module)
network_api = capabilities.get('network_api')
if network_api == 'cliconf':
module._junos_connection = Connection(module._socket_path)
elif network_api == 'netconf':
module._junos_connection = NetconfConnection(module._socket_path)
else:
module.fail_json(msg='Invalid connection type %s' % network_api)
return module._junos_connection
def get_capabilities(module):
if hasattr(module, '_junos_capabilities'):
return module._junos_capabilities
capabilities = Connection(module._socket_path).get_capabilities()
module._junos_capabilities = json.loads(capabilities)
return module._junos_capabilities
def _validate_rollback_id(module, value):
try:
if not 0 <= int(value) <= 49:
raise ValueError
except ValueError:
module.fail_json(msg='rollback must be between 0 and 49')
def load_configuration(module, candidate=None, action='merge', rollback=None, format='xml'):
if all((candidate is None, rollback is None)):
module.fail_json(msg='one of candidate or rollback must be specified')
elif all((candidate is not None, rollback is not None)):
module.fail_json(msg='candidate and rollback are mutually exclusive')
if format not in FORMATS:
module.fail_json(msg='invalid format specified')
if format == 'json' and action not in JSON_ACTIONS:
module.fail_json(msg='invalid action for format json')
elif format in ('text', 'xml') and action not in ACTIONS:
module.fail_json(msg='invalid action format %s' % format)
if action == 'set' and not format == 'text':
module.fail_json(msg='format must be text when action is set')
conn = get_connection(module)
if rollback is not None:
_validate_rollback_id(module, rollback)
obj = Element('load-configuration', {'rollback': str(rollback)})
conn.execute_rpc(tostring(obj))
else:
return conn.load_configuration(config=candidate, action=action, format=format)
def get_configuration(module, compare=False, format='xml', rollback='0', filter=None):
if format not in CONFIG_FORMATS:
module.fail_json(msg='invalid config format specified')
conn = get_connection(module)
if compare:
xattrs = {'format': format}
_validate_rollback_id(module, rollback)
xattrs['compare'] = 'rollback'
xattrs['rollback'] = str(rollback)
reply = conn.execute_rpc(tostring(Element('get-configuration', xattrs)))
else:
reply = conn.get_configuration(format=format, filter=filter)
return reply
def commit_configuration(module, confirm=False, check=False, comment=None, confirm_timeout=None, synchronize=False,
at_time=None, exit=False):
conn = get_connection(module)
if check:
reply = conn.validate()
else:
reply = conn.commit(confirmed=confirm, timeout=confirm_timeout, comment=comment, synchronize=synchronize, at_time=at_time)
return reply
def command(module, cmd, format='text', rpc_only=False):
conn = get_connection(module)
if rpc_only:
cmd += ' | display xml rpc'
return conn.command(command=cmd, format=format)
def lock_configuration(x):
conn = get_connection(x)
return conn.lock()
def unlock_configuration(x):
conn = get_connection(x)
return conn.unlock()
@contextmanager
def locked_config(module):
try:
lock_configuration(module)
yield
finally:
unlock_configuration(module)
def discard_changes(module):
conn = get_connection(module)
return conn.discard_changes()
def get_diff(module, rollback='0'):
reply = get_configuration(module, compare=True, format='text', rollback=rollback)
# if warning is received from device diff is empty.
if isinstance(reply, list):
return None
output = reply.find('.//configuration-output')
if output is not None:
return to_text(output.text, encoding='latin-1').strip()
def load_config(module, candidate, warnings, action='merge', format='xml'):
get_connection(module)
if not candidate:
return
if isinstance(candidate, list):
candidate = '\n'.join(candidate)
reply = load_configuration(module, candidate, action=action, format=format)
if isinstance(reply, list):
warnings.extend(reply)
module._junos_connection.validate()
return get_diff(module)
def get_param(module, key):
if module.params.get(key):
value = module.params[key]
elif module.params.get('provider'):
value = module.params['provider'].get(key)
else:
value = None
return value
def map_params_to_obj(module, param_to_xpath_map, param=None):
"""
Creates a new dictionary with key as xpath corresponding
to param and value is a list of dict with metadata and values for
the xpath.
Acceptable metadata keys:
'value': Value of param.
'tag_only': Value is indicated by tag only in xml hierarchy.
'leaf_only': If operation is to be added at leaf node only.
'value_req': If value(text) is requried for leaf node.
'is_key': If the field is key or not.
eg: Output
{
'name': [{'value': 'ge-0/0/1'}]
'disable': [{'value': True, tag_only': True}]
}
:param module:
:param param_to_xpath_map: Modules params to xpath map
:return: obj
"""
if not param:
param = module.params
obj = collections.OrderedDict()
for key, attribute in param_to_xpath_map.items():
if key in param:
is_attribute_dict = False
value = param[key]
if not isinstance(value, (list, tuple)):
value = [value]
if isinstance(attribute, dict):
xpath = attribute.get('xpath')
is_attribute_dict = True
else:
xpath = attribute
if not obj.get(xpath):
obj[xpath] = list()
for val in value:
if is_attribute_dict:
attr = deepcopy(attribute)
del attr['xpath']
attr.update({'value': val})
obj[xpath].append(attr)
else:
obj[xpath].append({'value': val})
return obj
def map_obj_to_ele(module, want, top, value_map=None, param=None):
if not HAS_LXML:
module.fail_json(msg='lxml is not installed.')
if not param:
param = module.params
root = Element('root')
top_ele = top.split('/')
ele = SubElement(root, top_ele[0])
if len(top_ele) > 1:
for item in top_ele[1:-1]:
ele = SubElement(ele, item)
container = ele
state = param.get('state')
active = param.get('active')
if active:
oper = 'active'
else:
oper = 'inactive'
# build xml subtree
if container.tag != top_ele[-1]:
node = SubElement(container, top_ele[-1])
else:
node = container
for fxpath, attributes in want.items():
for attr in attributes:
tag_only = attr.get('tag_only', False)
leaf_only = attr.get('leaf_only', False)
value_req = attr.get('value_req', False)
is_key = attr.get('is_key', False)
parent_attrib = attr.get('parent_attrib', True)
value = attr.get('value')
field_top = attr.get('top')
# operation 'delete' is added as element attribute
# only if it is key or leaf only node
if state == 'absent' and not (is_key or leaf_only):
continue
# convert param value to device specific value
if value_map and fxpath in value_map:
value = value_map[fxpath].get(value)
if (value is not None) or tag_only or leaf_only:
ele = node
if field_top:
# eg: top = 'system/syslog/file'
# field_top = 'system/syslog/file/contents'
# <file>
# <name>test</name>
# <contents>
# </contents>
# </file>
ele_list = root.xpath(top + '/' + field_top)
if not len(ele_list):
fields = field_top.split('/')
ele = node
for item in fields:
inner_ele = root.xpath(top + '/' + item)
if len(inner_ele):
ele = inner_ele[0]
else:
ele = SubElement(ele, item)
else:
ele = ele_list[0]
if value is not None and not isinstance(value, bool):
value = to_text(value, errors='surrogate_then_replace')
if fxpath:
tags = fxpath.split('/')
for item in tags:
ele = SubElement(ele, item)
if tag_only:
if state == 'present':
if not value:
# if value of tag_only node is false, delete the node
ele.set('delete', 'delete')
elif leaf_only:
if state == 'present':
ele.set(oper, oper)
ele.text = value
else:
ele.set('delete', 'delete')
# Add value of leaf node if required while deleting.
# in some cases if value is present while deleting, it
# can result in error, hence the check
if value_req:
ele.text = value
if is_key:
par = ele.getparent()
par.set('delete', 'delete')
else:
ele.text = value
par = ele.getparent()
if parent_attrib:
if state == 'present':
# set replace attribute at parent node
if not par.attrib.get('replace'):
par.set('replace', 'replace')
# set active/inactive at parent node
if not par.attrib.get(oper):
par.set(oper, oper)
else:
par.set('delete', 'delete')
return root.getchildren()[0]
def to_param_list(module):
aggregate = module.params.get('aggregate')
if aggregate:
if isinstance(aggregate, dict):
return [aggregate]
else:
return aggregate
else:
return [module.params]
| gpl-3.0 |
ketjow4/NOV | Lib/anydbm.py | 253 | 2663 | """Generic interface to all dbm clones.
Instead of
import dbm
d = dbm.open(file, 'w', 0666)
use
import anydbm
d = anydbm.open(file, 'w')
The returned object is a dbhash, gdbm, dbm or dumbdbm object,
dependent on the type of database being opened (determined by whichdb
module) in the case of an existing dbm. If the dbm does not exist and
the create or new flag ('c' or 'n') was specified, the dbm type will
be determined by the availability of the modules (tested in the above
order).
It has the following interface (key and data are strings):
d[key] = data # store data at key (may override data at
# existing key)
data = d[key] # retrieve data at key (raise KeyError if no
# such key)
del d[key] # delete data stored at key (raises KeyError
# if no such key)
flag = key in d # true if the key exists
list = d.keys() # return a list of all existing keys (slow!)
Future versions may change the order in which implementations are
tested for existence, and add interfaces to other dbm-like
implementations.
"""
class error(Exception):
pass
_names = ['dbhash', 'gdbm', 'dbm', 'dumbdbm']
_errors = [error]
_defaultmod = None
for _name in _names:
try:
_mod = __import__(_name)
except ImportError:
continue
if not _defaultmod:
_defaultmod = _mod
_errors.append(_mod.error)
if not _defaultmod:
raise ImportError, "no dbm clone found; tried %s" % _names
error = tuple(_errors)
def open(file, flag='r', mode=0666):
"""Open or create database at path given by *file*.
Optional argument *flag* can be 'r' (default) for read-only access, 'w'
for read-write access of an existing database, 'c' for read-write access
to a new or existing database, and 'n' for read-write access to a new
database.
Note: 'r' and 'w' fail if the database doesn't exist; 'c' creates it
only if it doesn't exist; and 'n' always creates a new database.
"""
# guess the type of an existing database
from whichdb import whichdb
result=whichdb(file)
if result is None:
# db doesn't exist
if 'c' in flag or 'n' in flag:
# file doesn't exist and the new
# flag was used so use default type
mod = _defaultmod
else:
raise error, "need 'c' or 'n' flag to open new db"
elif result == "":
# db type cannot be determined
raise error, "db type could not be determined"
else:
mod = __import__(result)
return mod.open(file, flag, mode)
| gpl-3.0 |
scvalencia/ROBOCOL_desastres | Galileo/Python/venv/lib/python2.7/site-packages/pip/cmdoptions.py | 361 | 9507 | """
shared options and groups
The principle here is to define options once, but *not* instantiate them globally.
One reason being that options with action='append' can carry state between parses.
pip parse's general options twice internally, and shouldn't pass on state.
To be consistent, all options will follow this design.
"""
import copy
from optparse import OptionGroup, SUPPRESS_HELP, Option
from pip.locations import build_prefix, default_log_file
def make_option_group(group, parser):
"""
Return an OptionGroup object
group -- assumed to be dict with 'name' and 'options' keys
parser -- an optparse Parser
"""
option_group = OptionGroup(parser, group['name'])
for option in group['options']:
option_group.add_option(option.make())
return option_group
class OptionMaker(object):
"""Class that stores the args/kwargs that would be used to make an Option,
for making them later, and uses deepcopy's to reset state."""
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
def make(self):
args_copy = copy.deepcopy(self.args)
kwargs_copy = copy.deepcopy(self.kwargs)
return Option(*args_copy, **kwargs_copy)
###########
# options #
###########
help_ = OptionMaker(
'-h', '--help',
dest='help',
action='help',
help='Show help.')
require_virtualenv = OptionMaker(
# Run only if inside a virtualenv, bail if not.
'--require-virtualenv', '--require-venv',
dest='require_venv',
action='store_true',
default=False,
help=SUPPRESS_HELP)
verbose = OptionMaker(
'-v', '--verbose',
dest='verbose',
action='count',
default=0,
help='Give more output. Option is additive, and can be used up to 3 times.')
version = OptionMaker(
'-V', '--version',
dest='version',
action='store_true',
help='Show version and exit.')
quiet = OptionMaker(
'-q', '--quiet',
dest='quiet',
action='count',
default=0,
help='Give less output.')
log = OptionMaker(
'--log',
dest='log',
metavar='path',
help='Path to a verbose appending log. This log is inactive by default.')
log_explicit_levels = OptionMaker(
# Writes the log levels explicitely to the log'
'--log-explicit-levels',
dest='log_explicit_levels',
action='store_true',
default=False,
help=SUPPRESS_HELP)
log_file = OptionMaker(
# The default log file
'--log-file', '--local-log',
dest='log_file',
metavar='path',
default=default_log_file,
help='Path to a verbose non-appending log, that only logs failures. This log is active by default at %default.')
no_input = OptionMaker(
# Don't ask for input
'--no-input',
dest='no_input',
action='store_true',
default=False,
help=SUPPRESS_HELP)
proxy = OptionMaker(
'--proxy',
dest='proxy',
type='str',
default='',
help="Specify a proxy in the form [user:passwd@]proxy.server:port.")
timeout = OptionMaker(
'--timeout', '--default-timeout',
metavar='sec',
dest='timeout',
type='float',
default=15,
help='Set the socket timeout (default %default seconds).')
default_vcs = OptionMaker(
# The default version control system for editables, e.g. 'svn'
'--default-vcs',
dest='default_vcs',
type='str',
default='',
help=SUPPRESS_HELP)
skip_requirements_regex = OptionMaker(
# A regex to be used to skip requirements
'--skip-requirements-regex',
dest='skip_requirements_regex',
type='str',
default='',
help=SUPPRESS_HELP)
exists_action = OptionMaker(
# Option when path already exist
'--exists-action',
dest='exists_action',
type='choice',
choices=['s', 'i', 'w', 'b'],
default=[],
action='append',
metavar='action',
help="Default action when a path already exists: "
"(s)witch, (i)gnore, (w)ipe, (b)ackup.")
cert = OptionMaker(
'--cert',
dest='cert',
type='str',
default='',
metavar='path',
help = "Path to alternate CA bundle.")
index_url = OptionMaker(
'-i', '--index-url', '--pypi-url',
dest='index_url',
metavar='URL',
default='https://pypi.python.org/simple/',
help='Base URL of Python Package Index (default %default).')
extra_index_url = OptionMaker(
'--extra-index-url',
dest='extra_index_urls',
metavar='URL',
action='append',
default=[],
help='Extra URLs of package indexes to use in addition to --index-url.')
no_index = OptionMaker(
'--no-index',
dest='no_index',
action='store_true',
default=False,
help='Ignore package index (only looking at --find-links URLs instead).')
find_links = OptionMaker(
'-f', '--find-links',
dest='find_links',
action='append',
default=[],
metavar='url',
help="If a url or path to an html file, then parse for links to archives. If a local path or file:// url that's a directory, then look for archives in the directory listing.")
# TODO: Remove after 1.6
use_mirrors = OptionMaker(
'-M', '--use-mirrors',
dest='use_mirrors',
action='store_true',
default=False,
help=SUPPRESS_HELP)
# TODO: Remove after 1.6
mirrors = OptionMaker(
'--mirrors',
dest='mirrors',
metavar='URL',
action='append',
default=[],
help=SUPPRESS_HELP)
allow_external = OptionMaker(
"--allow-external",
dest="allow_external",
action="append",
default=[],
metavar="PACKAGE",
help="Allow the installation of externally hosted files",
)
allow_all_external = OptionMaker(
"--allow-all-external",
dest="allow_all_external",
action="store_true",
default=False,
help="Allow the installation of all externally hosted files",
)
# Remove after 1.7
no_allow_external = OptionMaker(
"--no-allow-external",
dest="allow_all_external",
action="store_false",
default=False,
help=SUPPRESS_HELP,
)
# Remove --allow-insecure after 1.7
allow_unsafe = OptionMaker(
"--allow-unverified", "--allow-insecure",
dest="allow_unverified",
action="append",
default=[],
metavar="PACKAGE",
help="Allow the installation of insecure and unverifiable files",
)
# Remove after 1.7
no_allow_unsafe = OptionMaker(
"--no-allow-insecure",
dest="allow_all_insecure",
action="store_false",
default=False,
help=SUPPRESS_HELP
)
# Remove after 1.5
process_dependency_links = OptionMaker(
"--process-dependency-links",
dest="process_dependency_links",
action="store_true",
default=False,
help="Enable the processing of dependency links.",
)
requirements = OptionMaker(
'-r', '--requirement',
dest='requirements',
action='append',
default=[],
metavar='file',
help='Install from the given requirements file. '
'This option can be used multiple times.')
use_wheel = OptionMaker(
'--use-wheel',
dest='use_wheel',
action='store_true',
help=SUPPRESS_HELP,
)
no_use_wheel = OptionMaker(
'--no-use-wheel',
dest='use_wheel',
action='store_false',
default=True,
help=('Do not Find and prefer wheel archives when searching indexes and '
'find-links locations.'),
)
download_cache = OptionMaker(
'--download-cache',
dest='download_cache',
metavar='dir',
default=None,
help='Cache downloaded packages in <dir>.')
no_deps = OptionMaker(
'--no-deps', '--no-dependencies',
dest='ignore_dependencies',
action='store_true',
default=False,
help="Don't install package dependencies.")
build_dir = OptionMaker(
'-b', '--build', '--build-dir', '--build-directory',
dest='build_dir',
metavar='dir',
default=build_prefix,
help='Directory to unpack packages into and build in. '
'The default in a virtualenv is "<venv path>/build". '
'The default for global installs is "<OS temp dir>/pip_build_<username>".')
install_options = OptionMaker(
'--install-option',
dest='install_options',
action='append',
metavar='options',
help="Extra arguments to be supplied to the setup.py install "
"command (use like --install-option=\"--install-scripts=/usr/local/bin\"). "
"Use multiple --install-option options to pass multiple options to setup.py install. "
"If you are using an option with a directory path, be sure to use absolute path.")
global_options = OptionMaker(
'--global-option',
dest='global_options',
action='append',
metavar='options',
help="Extra global options to be supplied to the setup.py "
"call before the install command.")
no_clean = OptionMaker(
'--no-clean',
action='store_true',
default=False,
help="Don't clean up build directories.")
##########
# groups #
##########
general_group = {
'name': 'General Options',
'options': [
help_,
require_virtualenv,
verbose,
version,
quiet,
log_file,
log,
log_explicit_levels,
no_input,
proxy,
timeout,
default_vcs,
skip_requirements_regex,
exists_action,
cert,
]
}
index_group = {
'name': 'Package Index Options',
'options': [
index_url,
extra_index_url,
no_index,
find_links,
use_mirrors,
mirrors,
allow_external,
allow_all_external,
no_allow_external,
allow_unsafe,
no_allow_unsafe,
process_dependency_links,
]
}
| mit |
Tao-Ma/gpdb | gpMgmt/test/behave/mgmt_utils/steps/scripts/part_data.py | 35 | 1093 | #!/usr/bin/env python
import sys
import datetime
if len(sys.argv) != 4:
print "%s YEAR COPYYEAR NUMDAYS" % sys.argv[0]
sys.exit(0)
YEAR = int(sys.argv[1])
COPYYEAR = int(sys.argv[2])
DAY_COUNT = int(sys.argv[3])
description = """abcdefghijklmnopqrstuvwqyz.?{}-abcdefghijklmnopqrstuvwqyz.?{}abcdefghijklmnopqrstuvwqyz.?{}
abcdefghijklmnopqrstuvwqyz.?{}abcdefghijklmnopqrstuvwqyz.?{}abcdefghijklmnopqrstuvwqyz.?{}abcdefghijklmnopqrstuvwqyz.?{}"""
date_iterator = datetime.date(YEAR, 01, 02)
delta = datetime.timedelta(days=1)
for i in range(DAY_COUNT):
print "INSERT INTO ao_part_table select '%s', data, description from ao_part_table where eventdate = '%s-01-01';" \
% (str(date_iterator), COPYYEAR)
print "INSERT INTO co_part_table select '%s', data, description from ao_part_table where eventdate = '%s-01-01';" \
% (str(date_iterator), COPYYEAR)
print "INSERT INTO part_table select '%s', data, description from part_table where eventdate = '%s-01-01';" \
% (str(date_iterator), COPYYEAR)
date_iterator = date_iterator + delta
| apache-2.0 |
Rogentos/legacy-anaconda | installclasses/awesome.py | 1 | 2620 | #
# awesome.py
#
# Copyright (C) 2010 Fabio Erculiani
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from installclass import BaseInstallClass
from constants import *
from product import *
from flags import flags
import os, types
import iutil
import gettext
_ = lambda x: gettext.ldgettext("anaconda", x)
import installmethod
from sabayon import Entropy
from sabayon.livecd import LiveCDCopyBackend
class InstallClass(BaseInstallClass):
id = "sabayon_awesome"
name = N_("Rogentos Awesome")
_pixmap_dirs = os.getenv("PIXMAPPATH", "/usr/share/pixmaps").split(":")
for _pix_dir in _pixmap_dirs:
_pix_path = os.path.join(_pix_dir, "awesome.png")
if os.path.isfile(_pix_path):
pixmap = _pix_path
dmrc = "awesome"
_description = N_("Select this installation type for a default installation "
"with the Awesome desktop environment. "
"A small lightweight and functional working environment at your service.")
_descriptionFields = (productName,)
sortPriority = 10000
if not Entropy().is_installed("x11-wm/awesome"):
hidden = 1
def configure(self, anaconda):
BaseInstallClass.configure(self, anaconda)
BaseInstallClass.setDefaultPartitioning(self,
anaconda.storage, anaconda.platform)
def setSteps(self, anaconda):
BaseInstallClass.setSteps(self, anaconda)
anaconda.dispatch.skipStep("welcome", skip = 1)
#anaconda.dispatch.skipStep("network", skip = 1)
def getBackend(self):
return LiveCDCopyBackend
def productMatches(self, oldprod):
if oldprod is None:
return False
if oldprod.startswith(productName):
return True
return False
def versionMatches(self, oldver):
try:
oldVer = float(oldver)
newVer = float(productVersion)
except ValueError:
return True
return newVer >= oldVer
def __init__(self):
BaseInstallClass.__init__(self)
| gpl-2.0 |
tomkralidis/GeoHealthCheck | GeoHealthCheck/views.py | 1 | 7335 | # =================================================================
#
# Authors: Tom Kralidis <tomkralidis@gmail.com>
#
# Copyright (c) 2014 Tom Kralidis
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# =================================================================
import logging
import models
import util
from sqlalchemy import text
from plugin import Plugin
from factory import Factory
from init import App
APP = App.get_app()
LOGGER = logging.getLogger(__name__)
def list_resources(resource_type=None, query=None, tag=None):
"""return all resources"""
reliability_values = []
first_run = None
last_run = None
response = {
'total': 0,
'success': {
'number': 0,
'percentage': 0
},
'fail': {
'number': 0,
'percentage': 0
},
'first_run': None,
'last_run': None,
'reliability': 0
}
filters = ()
if resource_type is not None:
filters = filters + (text("resource_type = '%s'" % resource_type),)
if query is not None:
field, term = get_query_field_term(query)
filters = filters + (field.ilike(term),)
if tag is not None:
tag_filter = (models.Resource.tags.any(models.Tag.name.in_([tag])),)
filters = filters + tag_filter
response['resources'] = models.Resource.query.filter(*filters).all()
response['total'] = len(response['resources'])
response['success']['percentage'] = 0
response['fail']['percentage'] = 0
response['reliability'] = 0
for resource in response['resources']:
if resource.runs.count() > 0:
# View should work even without Runs
if resource.first_run < first_run or first_run is None:
first_run = resource.first_run
if resource.last_run < last_run or last_run is None:
last_run = resource.last_run
response['first_run'] = first_run
response['last_run'] = last_run
if resource.last_run.success:
response['success']['number'] += 1
else:
response['fail']['number'] += 1
reliability_values.append(resource.reliability)
response['success']['percentage'] = int(round(util.percentage(
response['success']['number'], response['total'])))
response['fail']['percentage'] = 100 - response['success']['percentage']
response['reliability'] = round(util.average(reliability_values), 1)
return response
def get_resource_by_id(identifier):
"""return one resource by identifier"""
return models.Resource.query.filter_by(
identifier=identifier).first_or_404()
def get_run_by_id(identifier):
"""return one Run by identifier"""
return models.Run.query.filter_by(
identifier=identifier).first_or_404()
def get_run_by_resource_id(identifier):
"""return one Run by identifier"""
return models.Run.query.filter_by(
resource_identifier=identifier)
def get_resource_types_counts():
"""return frequency counts of registered resource types"""
mrt = models.get_resource_types_counts()
return {
'counts': mrt[0],
'total': mrt[1]
}
def get_health_summary():
"""return summary of all runs"""
# For overall reliability
total_runs = models.get_runs_count()
failed_runs = models.get_runs_status_count(False)
success_runs = total_runs - failed_runs
# Resources status derived from last N runs
total_resources = models.get_resources_count()
last_runs = models.get_last_run_per_resource()
failed = 0
failed_resources = []
for run in last_runs:
if not run.success:
failed_resources.append(
get_resource_by_id(run.resource_identifier))
failed += 1
success = total_resources - failed
failed_percentage = int(round(
util.percentage(failed, total_resources)))
success_percentage = 100 - failed_percentage
response = {
'site_url': APP.config['GHC_SITE_URL'],
'total': total_resources,
'success': {
'number': success,
'percentage': success_percentage
},
'fail': {
'number': failed,
'percentage': failed_percentage
},
'first_run': models.get_first_run(),
'last_run': models.get_last_run(),
'reliability': round(util.percentage(success_runs, total_runs), 1),
'failed_resources': failed_resources
}
return response
def get_tag_counts():
"""return all tag counts"""
return models.get_tag_counts()
def get_query_field_term(query):
"""determine query context from q="""
field = models.Resource.title # default
try:
facet, term = query.split(':')
term2 = '%%%s%%' % term # default like
if facet == 'url':
field = models.Resource.url
elif facet == 'title':
field = models.Resource.title
elif facet == 'site':
field = models.Resource.url
term2 = '%%%s/%%' % term
elif facet == 'owner':
field = models.Resource.owner_identifier
term = term2
except ValueError: # default search
term = '%%%s%%' % query
return [field, term]
def get_probes_avail(resource_type=None, resource=None):
"""
Get all available Probes with their attributes.
:param resource_type: optional resource type e.g. OGC:WMS
:param resource: optional Resource instance
:return:
"""
# Assume no resource type
filters = None
if resource_type:
filters = [('RESOURCE_TYPE', resource_type),
('RESOURCE_TYPE', '*:*')]
probe_classes = Plugin.get_plugins('GeoHealthCheck.probe.Probe', filters)
result = dict()
for probe_class in probe_classes:
probe = Factory.create_obj(probe_class)
if probe:
if resource:
try:
probe.expand_params(resource)
except Exception as err:
msg = 'Cannot expand plugin vars for %s err=%s' \
% (probe_class, str(err))
LOGGER.warning(msg)
result[probe_class] = probe.get_plugin_vars()
return result
| mit |
wonwon0/StrategyIA | RULEngine/Command/command.py | 2 | 2515 | # Under MIT License, see LICENSE.txt
"""
Ce module permet de créer des commandes pour faire agir les robots.
Des fonctions utilitaire permettent de transformer une commande de
Position (Pose) en une commande de vitesse.
L'embarqué et le simulateur utilise un vecteur de vitesse (Pose) pour
contrôler les robots.
"""
from abc import abstractmethod
import time
from ..Game.Player import Player
from ..Util.area import *
import RULEngine.Communication.util.serial_protocol as protocol
class _Command(object):
def __init__(self, player):
assert (isinstance(player, Player))
self.player = player
self.pose = Pose()
self.kick_speed = 0
@abstractmethod
def package_command(self):
pass
class Move(_Command):
def __init__(self, player, destination):
# Parameters Assertion
assert (isinstance(destination, Pose))
super().__init__(player)
self.pose = destination
def package_command(self):
x = self.pose.position.x
y = self.pose.position.y
theta = self.pose.orientation
player_idx = self.player.id
packed_command = protocol.create_speed_command(x, y, theta, player_idx)
return packed_command
class Kick(_Command):
def __init__(self, player, kick_strength):
""" Kick speed est un int entre 0 et 4 """
# TODO FIXME KICK SPEED OR STRENGTH
super().__init__(player)
self.kick_speed = 4
def package_command(self):
return protocol.create_kick_command(self.player.id, self.kick_speed)
class Stop(_Command):
def __init__(self, player):
super().__init__(player)
def package_command(self):
return protocol.create_speed_command(0, 0, 0, self.player.id)
class ChargeKick(_Command):
def __init__(self, player):
super().__init__(player)
def package_command(self):
print("Kick charge!")
return protocol.create_charge_command(self.player.id)
class Dribbler(_Command):
def __init__(self, player, activate):
super().__init__(player)
self.dribbler_status = protocol.DribblerStatus.DISABLED
if activate:
self.dribbler_status = protocol.DribblerStatus.ENABLED
def package_command(self):
print("Dribbler")
if self.dribbler_status == protocol.DribblerStatus.DISABLED:
status = 0
else:
status = 3
return protocol.create_dribbler_command(self.player.id, status)
| mit |
GdZ/scriptfile | software/googleAppEngine/lib/django_1_3/tests/regressiontests/i18n/tests.py | 47 | 48435 | # -*- encoding: utf-8 -*-
import datetime
import decimal
import os
import sys
import pickle
from threading import local
from django.conf import settings
from django.template import Template, Context
from django.utils.formats import (get_format, date_format, time_format,
localize, localize_input, iter_format_modules, get_format_modules)
from django.utils.importlib import import_module
from django.utils.numberformat import format as nformat
from django.utils.safestring import mark_safe, SafeString, SafeUnicode
from django.utils.translation import (ugettext, ugettext_lazy, activate,
deactivate, gettext_lazy, pgettext, npgettext, to_locale,
get_language_info, get_language)
from django.utils.unittest import TestCase
from forms import I18nForm, SelectDateForm, SelectDateWidget, CompanyForm
from models import Company, TestModel
from commands.tests import *
from test_warnings import DeprecationWarningTests
class TranslationTests(TestCase):
def test_lazy_objects(self):
"""
Format string interpolation should work with *_lazy objects.
"""
s = ugettext_lazy('Add %(name)s')
d = {'name': 'Ringo'}
self.assertEqual(u'Add Ringo', s % d)
activate('de')
try:
self.assertEqual(u'Ringo hinzuf\xfcgen', s % d)
activate('pl')
self.assertEqual(u'Dodaj Ringo', s % d)
finally:
deactivate()
# It should be possible to compare *_lazy objects.
s1 = ugettext_lazy('Add %(name)s')
self.assertEqual(True, s == s1)
s2 = gettext_lazy('Add %(name)s')
s3 = gettext_lazy('Add %(name)s')
self.assertEqual(True, s2 == s3)
self.assertEqual(True, s == s2)
s4 = ugettext_lazy('Some other string')
self.assertEqual(False, s == s4)
def test_lazy_pickle(self):
s1 = ugettext_lazy("test")
self.assertEqual(unicode(s1), "test")
s2 = pickle.loads(pickle.dumps(s1))
self.assertEqual(unicode(s2), "test")
def test_pgettext(self):
# Reset translation catalog to include other/locale/de
self.old_locale_paths = settings.LOCALE_PATHS
settings.LOCALE_PATHS += (os.path.join(os.path.dirname(os.path.abspath(__file__)), 'other', 'locale'),)
from django.utils.translation import trans_real
trans_real._active = local()
trans_real._translations = {}
activate('de')
self.assertEqual(pgettext("unexisting", "May"), u"May")
self.assertEqual(pgettext("month name", "May"), u"Mai")
self.assertEqual(pgettext("verb", "May"), u"Kann")
self.assertEqual(npgettext("search", "%d result", "%d results", 4) % 4, u"4 Resultate")
settings.LOCALE_PATHS = self.old_locale_paths
def test_string_concat(self):
"""
unicode(string_concat(...)) should not raise a TypeError - #4796
"""
import django.utils.translation
self.assertEqual(u'django', unicode(django.utils.translation.string_concat("dja", "ngo")))
def test_safe_status(self):
"""
Translating a string requiring no auto-escaping shouldn't change the "safe" status.
"""
s = mark_safe('Password')
self.assertEqual(SafeString, type(s))
activate('de')
try:
self.assertEqual(SafeUnicode, type(ugettext(s)))
finally:
deactivate()
self.assertEqual('aPassword', SafeString('a') + s)
self.assertEqual('Passworda', s + SafeString('a'))
self.assertEqual('Passworda', s + mark_safe('a'))
self.assertEqual('aPassword', mark_safe('a') + s)
self.assertEqual('as', mark_safe('a') + mark_safe('s'))
def test_maclines(self):
"""
Translations on files with mac or dos end of lines will be converted
to unix eof in .po catalogs, and they have to match when retrieved
"""
from django.utils.translation.trans_real import translation
ca_translation = translation('ca')
ca_translation._catalog[u'Mac\nEOF\n'] = u'Catalan Mac\nEOF\n'
ca_translation._catalog[u'Win\nEOF\n'] = u'Catalan Win\nEOF\n'
activate('ca')
try:
self.assertEqual(u'Catalan Mac\nEOF\n', ugettext(u'Mac\rEOF\r'))
self.assertEqual(u'Catalan Win\nEOF\n', ugettext(u'Win\r\nEOF\r\n'))
finally:
deactivate()
def test_to_locale(self):
"""
Tests the to_locale function and the special case of Serbian Latin
(refs #12230 and r11299)
"""
self.assertEqual(to_locale('en-us'), 'en_US')
self.assertEqual(to_locale('sr-lat'), 'sr_Lat')
def test_to_language(self):
"""
Test the to_language function
"""
from django.utils.translation.trans_real import to_language
self.assertEqual(to_language('en_US'), 'en-us')
self.assertEqual(to_language('sr_Lat'), 'sr-lat')
class FormattingTests(TestCase):
def setUp(self):
self.use_i18n = settings.USE_I18N
self.use_l10n = settings.USE_L10N
self.use_thousand_separator = settings.USE_THOUSAND_SEPARATOR
self.thousand_separator = settings.THOUSAND_SEPARATOR
self.number_grouping = settings.NUMBER_GROUPING
self.n = decimal.Decimal('66666.666')
self.f = 99999.999
self.d = datetime.date(2009, 12, 31)
self.dt = datetime.datetime(2009, 12, 31, 20, 50)
self.t = datetime.time(10, 15, 48)
self.l = 10000L
self.ctxt = Context({
'n': self.n,
't': self.t,
'd': self.d,
'dt': self.dt,
'f': self.f,
'l': self.l,
})
def tearDown(self):
# Restore defaults
settings.USE_I18N = self.use_i18n
settings.USE_L10N = self.use_l10n
settings.USE_THOUSAND_SEPARATOR = self.use_thousand_separator
settings.THOUSAND_SEPARATOR = self.thousand_separator
settings.NUMBER_GROUPING = self.number_grouping
def test_locale_independent(self):
"""
Localization of numbers
"""
settings.USE_L10N = True
settings.USE_THOUSAND_SEPARATOR = False
self.assertEqual(u'66666.66', nformat(self.n, decimal_sep='.', decimal_pos=2, grouping=3, thousand_sep=','))
self.assertEqual(u'66666A6', nformat(self.n, decimal_sep='A', decimal_pos=1, grouping=1, thousand_sep='B'))
settings.USE_THOUSAND_SEPARATOR = True
self.assertEqual(u'66,666.66', nformat(self.n, decimal_sep='.', decimal_pos=2, grouping=3, thousand_sep=','))
self.assertEqual(u'6B6B6B6B6A6', nformat(self.n, decimal_sep='A', decimal_pos=1, grouping=1, thousand_sep='B'))
self.assertEqual(u'-66666.6', nformat(-66666.666, decimal_sep='.', decimal_pos=1))
self.assertEqual(u'-66666.0', nformat(int('-66666'), decimal_sep='.', decimal_pos=1))
self.assertEqual(u'10000.0', nformat(self.l, decimal_sep='.', decimal_pos=1))
# date filter
self.assertEqual(u'31.12.2009 в 20:50', Template('{{ dt|date:"d.m.Y в H:i" }}').render(self.ctxt))
self.assertEqual(u'⌚ 10:15', Template('{{ t|time:"⌚ H:i" }}').render(self.ctxt))
def test_l10n_disabled(self):
"""
Catalan locale with format i18n disabled translations will be used,
but not formats
"""
settings.USE_L10N = False
activate('ca')
try:
self.assertEqual(u'N j, Y', get_format('DATE_FORMAT'))
self.assertEqual(0, get_format('FIRST_DAY_OF_WEEK'))
self.assertEqual(u'.', get_format('DECIMAL_SEPARATOR'))
self.assertEqual(u'10:15 a.m.', time_format(self.t))
self.assertEqual(u'des. 31, 2009', date_format(self.d))
self.assertEqual(u'desembre 2009', date_format(self.d, 'YEAR_MONTH_FORMAT'))
self.assertEqual(u'12/31/2009 8:50 p.m.', date_format(self.dt, 'SHORT_DATETIME_FORMAT'))
self.assertEqual(u'No localizable', localize('No localizable'))
self.assertEqual(u'66666.666', localize(self.n))
self.assertEqual(u'99999.999', localize(self.f))
self.assertEqual(u'10000', localize(self.l))
self.assertEqual(u'des. 31, 2009', localize(self.d))
self.assertEqual(u'des. 31, 2009, 8:50 p.m.', localize(self.dt))
self.assertEqual(u'66666.666', Template('{{ n }}').render(self.ctxt))
self.assertEqual(u'99999.999', Template('{{ f }}').render(self.ctxt))
self.assertEqual(u'des. 31, 2009', Template('{{ d }}').render(self.ctxt))
self.assertEqual(u'des. 31, 2009, 8:50 p.m.', Template('{{ dt }}').render(self.ctxt))
self.assertEqual(u'66666.67', Template('{{ n|floatformat:2 }}').render(self.ctxt))
self.assertEqual(u'100000.0', Template('{{ f|floatformat }}').render(self.ctxt))
self.assertEqual(u'10:15 a.m.', Template('{{ t|time:"TIME_FORMAT" }}').render(self.ctxt))
self.assertEqual(u'12/31/2009', Template('{{ d|date:"SHORT_DATE_FORMAT" }}').render(self.ctxt))
self.assertEqual(u'12/31/2009 8:50 p.m.', Template('{{ dt|date:"SHORT_DATETIME_FORMAT" }}').render(self.ctxt))
form = I18nForm({
'decimal_field': u'66666,666',
'float_field': u'99999,999',
'date_field': u'31/12/2009',
'datetime_field': u'31/12/2009 20:50',
'time_field': u'20:50',
'integer_field': u'1.234',
})
self.assertEqual(False, form.is_valid())
self.assertEqual([u'Introdu\xefu un n\xfamero.'], form.errors['float_field'])
self.assertEqual([u'Introdu\xefu un n\xfamero.'], form.errors['decimal_field'])
self.assertEqual([u'Introdu\xefu una data v\xe0lida.'], form.errors['date_field'])
self.assertEqual([u'Introdu\xefu una data/hora v\xe0lides.'], form.errors['datetime_field'])
self.assertEqual([u'Introdu\xefu un n\xfamero sencer.'], form.errors['integer_field'])
form2 = SelectDateForm({
'date_field_month': u'12',
'date_field_day': u'31',
'date_field_year': u'2009'
})
self.assertEqual(True, form2.is_valid())
self.assertEqual(datetime.date(2009, 12, 31), form2.cleaned_data['date_field'])
self.assertEqual(
u'<select name="mydate_month" id="id_mydate_month">\n<option value="1">gener</option>\n<option value="2">febrer</option>\n<option value="3">mar\xe7</option>\n<option value="4">abril</option>\n<option value="5">maig</option>\n<option value="6">juny</option>\n<option value="7">juliol</option>\n<option value="8">agost</option>\n<option value="9">setembre</option>\n<option value="10">octubre</option>\n<option value="11">novembre</option>\n<option value="12" selected="selected">desembre</option>\n</select>\n<select name="mydate_day" id="id_mydate_day">\n<option value="1">1</option>\n<option value="2">2</option>\n<option value="3">3</option>\n<option value="4">4</option>\n<option value="5">5</option>\n<option value="6">6</option>\n<option value="7">7</option>\n<option value="8">8</option>\n<option value="9">9</option>\n<option value="10">10</option>\n<option value="11">11</option>\n<option value="12">12</option>\n<option value="13">13</option>\n<option value="14">14</option>\n<option value="15">15</option>\n<option value="16">16</option>\n<option value="17">17</option>\n<option value="18">18</option>\n<option value="19">19</option>\n<option value="20">20</option>\n<option value="21">21</option>\n<option value="22">22</option>\n<option value="23">23</option>\n<option value="24">24</option>\n<option value="25">25</option>\n<option value="26">26</option>\n<option value="27">27</option>\n<option value="28">28</option>\n<option value="29">29</option>\n<option value="30">30</option>\n<option value="31" selected="selected">31</option>\n</select>\n<select name="mydate_year" id="id_mydate_year">\n<option value="2009" selected="selected">2009</option>\n<option value="2010">2010</option>\n<option value="2011">2011</option>\n<option value="2012">2012</option>\n<option value="2013">2013</option>\n<option value="2014">2014</option>\n<option value="2015">2015</option>\n<option value="2016">2016</option>\n<option value="2017">2017</option>\n<option value="2018">2018</option>\n</select>',
SelectDateWidget(years=range(2009, 2019)).render('mydate', datetime.date(2009, 12, 31))
)
# We shouldn't change the behavior of the floatformat filter re:
# thousand separator and grouping when USE_L10N is False even
# if the USE_THOUSAND_SEPARATOR, NUMBER_GROUPING and
# THOUSAND_SEPARATOR settings are specified
settings.USE_THOUSAND_SEPARATOR = True
settings.NUMBER_GROUPING = 1
settings.THOUSAND_SEPARATOR = '!'
self.assertEqual(u'66666.67', Template('{{ n|floatformat:2 }}').render(self.ctxt))
self.assertEqual(u'100000.0', Template('{{ f|floatformat }}').render(self.ctxt))
finally:
deactivate()
def test_l10n_enabled(self):
settings.USE_L10N = True
# Catalan locale
activate('ca')
try:
self.assertEqual('j \de F \de Y', get_format('DATE_FORMAT'))
self.assertEqual(1, get_format('FIRST_DAY_OF_WEEK'))
self.assertEqual(',', get_format('DECIMAL_SEPARATOR'))
self.assertEqual(u'10:15:48', time_format(self.t))
self.assertEqual(u'31 de desembre de 2009', date_format(self.d))
self.assertEqual(u'desembre del 2009', date_format(self.d, 'YEAR_MONTH_FORMAT'))
self.assertEqual(u'31/12/2009 20:50', date_format(self.dt, 'SHORT_DATETIME_FORMAT'))
self.assertEqual('No localizable', localize('No localizable'))
settings.USE_THOUSAND_SEPARATOR = True
self.assertEqual(u'66.666,666', localize(self.n))
self.assertEqual(u'99.999,999', localize(self.f))
self.assertEqual(u'10.000', localize(self.l))
self.assertEqual(u'True', localize(True))
settings.USE_THOUSAND_SEPARATOR = False
self.assertEqual(u'66666,666', localize(self.n))
self.assertEqual(u'99999,999', localize(self.f))
self.assertEqual(u'10000', localize(self.l))
self.assertEqual(u'31 de desembre de 2009', localize(self.d))
self.assertEqual(u'31 de desembre de 2009 a les 20:50', localize(self.dt))
settings.USE_THOUSAND_SEPARATOR = True
self.assertEqual(u'66.666,666', Template('{{ n }}').render(self.ctxt))
self.assertEqual(u'99.999,999', Template('{{ f }}').render(self.ctxt))
self.assertEqual(u'10.000', Template('{{ l }}').render(self.ctxt))
form3 = I18nForm({
'decimal_field': u'66.666,666',
'float_field': u'99.999,999',
'date_field': u'31/12/2009',
'datetime_field': u'31/12/2009 20:50',
'time_field': u'20:50',
'integer_field': u'1.234',
})
self.assertEqual(True, form3.is_valid())
self.assertEqual(decimal.Decimal('66666.666'), form3.cleaned_data['decimal_field'])
self.assertEqual(99999.999, form3.cleaned_data['float_field'])
self.assertEqual(datetime.date(2009, 12, 31), form3.cleaned_data['date_field'])
self.assertEqual(datetime.datetime(2009, 12, 31, 20, 50), form3.cleaned_data['datetime_field'])
self.assertEqual(datetime.time(20, 50), form3.cleaned_data['time_field'])
self.assertEqual(1234, form3.cleaned_data['integer_field'])
settings.USE_THOUSAND_SEPARATOR = False
self.assertEqual(u'66666,666', Template('{{ n }}').render(self.ctxt))
self.assertEqual(u'99999,999', Template('{{ f }}').render(self.ctxt))
self.assertEqual(u'31 de desembre de 2009', Template('{{ d }}').render(self.ctxt))
self.assertEqual(u'31 de desembre de 2009 a les 20:50', Template('{{ dt }}').render(self.ctxt))
self.assertEqual(u'66666,67', Template('{{ n|floatformat:2 }}').render(self.ctxt))
self.assertEqual(u'100000,0', Template('{{ f|floatformat }}').render(self.ctxt))
self.assertEqual(u'10:15:48', Template('{{ t|time:"TIME_FORMAT" }}').render(self.ctxt))
self.assertEqual(u'31/12/2009', Template('{{ d|date:"SHORT_DATE_FORMAT" }}').render(self.ctxt))
self.assertEqual(u'31/12/2009 20:50', Template('{{ dt|date:"SHORT_DATETIME_FORMAT" }}').render(self.ctxt))
form4 = I18nForm({
'decimal_field': u'66666,666',
'float_field': u'99999,999',
'date_field': u'31/12/2009',
'datetime_field': u'31/12/2009 20:50',
'time_field': u'20:50',
'integer_field': u'1234',
})
self.assertEqual(True, form4.is_valid())
self.assertEqual(decimal.Decimal('66666.666'), form4.cleaned_data['decimal_field'])
self.assertEqual(99999.999, form4.cleaned_data['float_field'])
self.assertEqual(datetime.date(2009, 12, 31), form4.cleaned_data['date_field'])
self.assertEqual(datetime.datetime(2009, 12, 31, 20, 50), form4.cleaned_data['datetime_field'])
self.assertEqual(datetime.time(20, 50), form4.cleaned_data['time_field'])
self.assertEqual(1234, form4.cleaned_data['integer_field'])
form5 = SelectDateForm({
'date_field_month': u'12',
'date_field_day': u'31',
'date_field_year': u'2009'
})
self.assertEqual(True, form5.is_valid())
self.assertEqual(datetime.date(2009, 12, 31), form5.cleaned_data['date_field'])
self.assertEqual(
u'<select name="mydate_day" id="id_mydate_day">\n<option value="1">1</option>\n<option value="2">2</option>\n<option value="3">3</option>\n<option value="4">4</option>\n<option value="5">5</option>\n<option value="6">6</option>\n<option value="7">7</option>\n<option value="8">8</option>\n<option value="9">9</option>\n<option value="10">10</option>\n<option value="11">11</option>\n<option value="12">12</option>\n<option value="13">13</option>\n<option value="14">14</option>\n<option value="15">15</option>\n<option value="16">16</option>\n<option value="17">17</option>\n<option value="18">18</option>\n<option value="19">19</option>\n<option value="20">20</option>\n<option value="21">21</option>\n<option value="22">22</option>\n<option value="23">23</option>\n<option value="24">24</option>\n<option value="25">25</option>\n<option value="26">26</option>\n<option value="27">27</option>\n<option value="28">28</option>\n<option value="29">29</option>\n<option value="30">30</option>\n<option value="31" selected="selected">31</option>\n</select>\n<select name="mydate_month" id="id_mydate_month">\n<option value="1">gener</option>\n<option value="2">febrer</option>\n<option value="3">mar\xe7</option>\n<option value="4">abril</option>\n<option value="5">maig</option>\n<option value="6">juny</option>\n<option value="7">juliol</option>\n<option value="8">agost</option>\n<option value="9">setembre</option>\n<option value="10">octubre</option>\n<option value="11">novembre</option>\n<option value="12" selected="selected">desembre</option>\n</select>\n<select name="mydate_year" id="id_mydate_year">\n<option value="2009" selected="selected">2009</option>\n<option value="2010">2010</option>\n<option value="2011">2011</option>\n<option value="2012">2012</option>\n<option value="2013">2013</option>\n<option value="2014">2014</option>\n<option value="2015">2015</option>\n<option value="2016">2016</option>\n<option value="2017">2017</option>\n<option value="2018">2018</option>\n</select>',
SelectDateWidget(years=range(2009, 2019)).render('mydate', datetime.date(2009, 12, 31))
)
finally:
deactivate()
# Russian locale (with E as month)
activate('ru')
try:
self.assertEqual(
u'<select name="mydate_day" id="id_mydate_day">\n<option value="1">1</option>\n<option value="2">2</option>\n<option value="3">3</option>\n<option value="4">4</option>\n<option value="5">5</option>\n<option value="6">6</option>\n<option value="7">7</option>\n<option value="8">8</option>\n<option value="9">9</option>\n<option value="10">10</option>\n<option value="11">11</option>\n<option value="12">12</option>\n<option value="13">13</option>\n<option value="14">14</option>\n<option value="15">15</option>\n<option value="16">16</option>\n<option value="17">17</option>\n<option value="18">18</option>\n<option value="19">19</option>\n<option value="20">20</option>\n<option value="21">21</option>\n<option value="22">22</option>\n<option value="23">23</option>\n<option value="24">24</option>\n<option value="25">25</option>\n<option value="26">26</option>\n<option value="27">27</option>\n<option value="28">28</option>\n<option value="29">29</option>\n<option value="30">30</option>\n<option value="31" selected="selected">31</option>\n</select>\n<select name="mydate_month" id="id_mydate_month">\n<option value="1">\u042f\u043d\u0432\u0430\u0440\u044c</option>\n<option value="2">\u0424\u0435\u0432\u0440\u0430\u043b\u044c</option>\n<option value="3">\u041c\u0430\u0440\u0442</option>\n<option value="4">\u0410\u043f\u0440\u0435\u043b\u044c</option>\n<option value="5">\u041c\u0430\u0439</option>\n<option value="6">\u0418\u044e\u043d\u044c</option>\n<option value="7">\u0418\u044e\u043b\u044c</option>\n<option value="8">\u0410\u0432\u0433\u0443\u0441\u0442</option>\n<option value="9">\u0421\u0435\u043d\u0442\u044f\u0431\u0440\u044c</option>\n<option value="10">\u041e\u043a\u0442\u044f\u0431\u0440\u044c</option>\n<option value="11">\u041d\u043e\u044f\u0431\u0440\u044c</option>\n<option value="12" selected="selected">\u0414\u0435\u043a\u0430\u0431\u0440\u044c</option>\n</select>\n<select name="mydate_year" id="id_mydate_year">\n<option value="2009" selected="selected">2009</option>\n<option value="2010">2010</option>\n<option value="2011">2011</option>\n<option value="2012">2012</option>\n<option value="2013">2013</option>\n<option value="2014">2014</option>\n<option value="2015">2015</option>\n<option value="2016">2016</option>\n<option value="2017">2017</option>\n<option value="2018">2018</option>\n</select>',
SelectDateWidget(years=range(2009, 2019)).render('mydate', datetime.date(2009, 12, 31))
)
finally:
deactivate()
# English locale
activate('en')
try:
self.assertEqual('N j, Y', get_format('DATE_FORMAT'))
self.assertEqual(0, get_format('FIRST_DAY_OF_WEEK'))
self.assertEqual('.', get_format('DECIMAL_SEPARATOR'))
self.assertEqual(u'Dec. 31, 2009', date_format(self.d))
self.assertEqual(u'December 2009', date_format(self.d, 'YEAR_MONTH_FORMAT'))
self.assertEqual(u'12/31/2009 8:50 p.m.', date_format(self.dt, 'SHORT_DATETIME_FORMAT'))
self.assertEqual(u'No localizable', localize('No localizable'))
settings.USE_THOUSAND_SEPARATOR = True
self.assertEqual(u'66,666.666', localize(self.n))
self.assertEqual(u'99,999.999', localize(self.f))
self.assertEqual(u'10,000', localize(self.l))
settings.USE_THOUSAND_SEPARATOR = False
self.assertEqual(u'66666.666', localize(self.n))
self.assertEqual(u'99999.999', localize(self.f))
self.assertEqual(u'10000', localize(self.l))
self.assertEqual(u'Dec. 31, 2009', localize(self.d))
self.assertEqual(u'Dec. 31, 2009, 8:50 p.m.', localize(self.dt))
settings.USE_THOUSAND_SEPARATOR = True
self.assertEqual(u'66,666.666', Template('{{ n }}').render(self.ctxt))
self.assertEqual(u'99,999.999', Template('{{ f }}').render(self.ctxt))
self.assertEqual(u'10,000', Template('{{ l }}').render(self.ctxt))
settings.USE_THOUSAND_SEPARATOR = False
self.assertEqual(u'66666.666', Template('{{ n }}').render(self.ctxt))
self.assertEqual(u'99999.999', Template('{{ f }}').render(self.ctxt))
self.assertEqual(u'Dec. 31, 2009', Template('{{ d }}').render(self.ctxt))
self.assertEqual(u'Dec. 31, 2009, 8:50 p.m.', Template('{{ dt }}').render(self.ctxt))
self.assertEqual(u'66666.67', Template('{{ n|floatformat:2 }}').render(self.ctxt))
self.assertEqual(u'100000.0', Template('{{ f|floatformat }}').render(self.ctxt))
self.assertEqual(u'12/31/2009', Template('{{ d|date:"SHORT_DATE_FORMAT" }}').render(self.ctxt))
self.assertEqual(u'12/31/2009 8:50 p.m.', Template('{{ dt|date:"SHORT_DATETIME_FORMAT" }}').render(self.ctxt))
form5 = I18nForm({
'decimal_field': u'66666.666',
'float_field': u'99999.999',
'date_field': u'12/31/2009',
'datetime_field': u'12/31/2009 20:50',
'time_field': u'20:50',
'integer_field': u'1234',
})
self.assertEqual(True, form5.is_valid())
self.assertEqual(decimal.Decimal('66666.666'), form5.cleaned_data['decimal_field'])
self.assertEqual(99999.999, form5.cleaned_data['float_field'])
self.assertEqual(datetime.date(2009, 12, 31), form5.cleaned_data['date_field'])
self.assertEqual(datetime.datetime(2009, 12, 31, 20, 50), form5.cleaned_data['datetime_field'])
self.assertEqual(datetime.time(20, 50), form5.cleaned_data['time_field'])
self.assertEqual(1234, form5.cleaned_data['integer_field'])
form6 = SelectDateForm({
'date_field_month': u'12',
'date_field_day': u'31',
'date_field_year': u'2009'
})
self.assertEqual(True, form6.is_valid())
self.assertEqual(datetime.date(2009, 12, 31), form6.cleaned_data['date_field'])
self.assertEqual(
u'<select name="mydate_month" id="id_mydate_month">\n<option value="1">January</option>\n<option value="2">February</option>\n<option value="3">March</option>\n<option value="4">April</option>\n<option value="5">May</option>\n<option value="6">June</option>\n<option value="7">July</option>\n<option value="8">August</option>\n<option value="9">September</option>\n<option value="10">October</option>\n<option value="11">November</option>\n<option value="12" selected="selected">December</option>\n</select>\n<select name="mydate_day" id="id_mydate_day">\n<option value="1">1</option>\n<option value="2">2</option>\n<option value="3">3</option>\n<option value="4">4</option>\n<option value="5">5</option>\n<option value="6">6</option>\n<option value="7">7</option>\n<option value="8">8</option>\n<option value="9">9</option>\n<option value="10">10</option>\n<option value="11">11</option>\n<option value="12">12</option>\n<option value="13">13</option>\n<option value="14">14</option>\n<option value="15">15</option>\n<option value="16">16</option>\n<option value="17">17</option>\n<option value="18">18</option>\n<option value="19">19</option>\n<option value="20">20</option>\n<option value="21">21</option>\n<option value="22">22</option>\n<option value="23">23</option>\n<option value="24">24</option>\n<option value="25">25</option>\n<option value="26">26</option>\n<option value="27">27</option>\n<option value="28">28</option>\n<option value="29">29</option>\n<option value="30">30</option>\n<option value="31" selected="selected">31</option>\n</select>\n<select name="mydate_year" id="id_mydate_year">\n<option value="2009" selected="selected">2009</option>\n<option value="2010">2010</option>\n<option value="2011">2011</option>\n<option value="2012">2012</option>\n<option value="2013">2013</option>\n<option value="2014">2014</option>\n<option value="2015">2015</option>\n<option value="2016">2016</option>\n<option value="2017">2017</option>\n<option value="2018">2018</option>\n</select>',
SelectDateWidget(years=range(2009, 2019)).render('mydate', datetime.date(2009, 12, 31))
)
finally:
deactivate()
def test_sub_locales(self):
"""
Check if sublocales fall back to the main locale
"""
settings.USE_L10N = True
activate('de-at')
settings.USE_THOUSAND_SEPARATOR = True
try:
self.assertEqual(u'66.666,666', Template('{{ n }}').render(self.ctxt))
finally:
deactivate()
activate('es-us')
try:
self.assertEqual(u'31 de diciembre de 2009', date_format(self.d))
finally:
deactivate()
def test_localized_input(self):
"""
Tests if form input is correctly localized
"""
settings.USE_L10N = True
activate('de-at')
try:
form6 = CompanyForm({
'name': u'acme',
'date_added': datetime.datetime(2009, 12, 31, 6, 0, 0),
'cents_payed': decimal.Decimal('59.47'),
'products_delivered': 12000,
})
self.assertEqual(True, form6.is_valid())
self.assertEqual(
form6.as_ul(),
u'<li><label for="id_name">Name:</label> <input id="id_name" type="text" name="name" value="acme" maxlength="50" /></li>\n<li><label for="id_date_added">Date added:</label> <input type="text" name="date_added" value="31.12.2009 06:00:00" id="id_date_added" /></li>\n<li><label for="id_cents_payed">Cents payed:</label> <input type="text" name="cents_payed" value="59,47" id="id_cents_payed" /></li>\n<li><label for="id_products_delivered">Products delivered:</label> <input type="text" name="products_delivered" value="12000" id="id_products_delivered" /></li>'
)
self.assertEqual(localize_input(datetime.datetime(2009, 12, 31, 6, 0, 0)), '31.12.2009 06:00:00')
self.assertEqual(datetime.datetime(2009, 12, 31, 6, 0, 0), form6.cleaned_data['date_added'])
settings.USE_THOUSAND_SEPARATOR = True
# Checking for the localized "products_delivered" field
self.assertTrue(u'<input type="text" name="products_delivered" value="12.000" id="id_products_delivered" />' in form6.as_ul())
finally:
deactivate()
def test_iter_format_modules(self):
"""
Tests the iter_format_modules function.
"""
activate('de-at')
old_format_module_path = settings.FORMAT_MODULE_PATH
try:
settings.USE_L10N = True
de_format_mod = import_module('django.conf.locale.de.formats')
self.assertEqual(list(iter_format_modules('de')), [de_format_mod])
settings.FORMAT_MODULE_PATH = 'regressiontests.i18n.other.locale'
test_de_format_mod = import_module('regressiontests.i18n.other.locale.de.formats')
self.assertEqual(list(iter_format_modules('de')), [test_de_format_mod, de_format_mod])
finally:
settings.FORMAT_MODULE_PATH = old_format_module_path
deactivate()
def test_iter_format_modules_stability(self):
"""
Tests the iter_format_modules function always yields format modules in
a stable and correct order in presence of both base ll and ll_CC formats.
"""
settings.USE_L10N = True
en_format_mod = import_module('django.conf.locale.en.formats')
en_gb_format_mod = import_module('django.conf.locale.en_GB.formats')
self.assertEqual(list(iter_format_modules('en-gb')), [en_gb_format_mod, en_format_mod])
def test_get_format_modules_stability(self):
activate('de')
old_format_module_path = settings.FORMAT_MODULE_PATH
settings.FORMAT_MODULE_PATH = 'regressiontests.i18n.other.locale'
try:
settings.USE_L10N = True
old = "%r" % get_format_modules(reverse=True)
new = "%r" % get_format_modules(reverse=True) # second try
self.assertEqual(new, old, 'Value returned by get_formats_modules() must be preserved between calls.')
finally:
settings.FORMAT_MODULE_PATH = old_format_module_path
deactivate()
def test_localize_templatetag_and_filter(self):
"""
Tests the {% localize %} templatetag
"""
context = Context({'value': 3.14 })
template1 = Template("{% load l10n %}{% localize %}{{ value }}{% endlocalize %};{% localize on %}{{ value }}{% endlocalize %}")
template2 = Template("{% load l10n %}{{ value }};{% localize off %}{{ value }};{% endlocalize %}{{ value }}")
template3 = Template('{% load l10n %}{{ value }};{{ value|unlocalize }}')
template4 = Template('{% load l10n %}{{ value }};{{ value|localize }}')
output1 = '3,14;3,14'
output2 = '3,14;3.14;3,14'
output3 = '3,14;3.14'
output4 = '3.14;3,14'
old_localize = settings.USE_L10N
try:
activate('de')
settings.USE_L10N = False
self.assertEqual(template1.render(context), output1)
self.assertEqual(template4.render(context), output4)
settings.USE_L10N = True
self.assertEqual(template1.render(context), output1)
self.assertEqual(template2.render(context), output2)
self.assertEqual(template3.render(context), output3)
finally:
deactivate()
settings.USE_L10N = old_localize
class MiscTests(TestCase):
def test_parse_spec_http_header(self):
"""
Testing HTTP header parsing. First, we test that we can parse the
values according to the spec (and that we extract all the pieces in
the right order).
"""
from django.utils.translation.trans_real import parse_accept_lang_header
p = parse_accept_lang_header
# Good headers.
self.assertEqual([('de', 1.0)], p('de'))
self.assertEqual([('en-AU', 1.0)], p('en-AU'))
self.assertEqual([('*', 1.0)], p('*;q=1.00'))
self.assertEqual([('en-AU', 0.123)], p('en-AU;q=0.123'))
self.assertEqual([('en-au', 0.5)], p('en-au;q=0.5'))
self.assertEqual([('en-au', 1.0)], p('en-au;q=1.0'))
self.assertEqual([('da', 1.0), ('en', 0.5), ('en-gb', 0.25)], p('da, en-gb;q=0.25, en;q=0.5'))
self.assertEqual([('en-au-xx', 1.0)], p('en-au-xx'))
self.assertEqual([('de', 1.0), ('en-au', 0.75), ('en-us', 0.5), ('en', 0.25), ('es', 0.125), ('fa', 0.125)], p('de,en-au;q=0.75,en-us;q=0.5,en;q=0.25,es;q=0.125,fa;q=0.125'))
self.assertEqual([('*', 1.0)], p('*'))
self.assertEqual([('de', 1.0)], p('de;q=0.'))
self.assertEqual([], p(''))
# Bad headers; should always return [].
self.assertEqual([], p('en-gb;q=1.0000'))
self.assertEqual([], p('en;q=0.1234'))
self.assertEqual([], p('en;q=.2'))
self.assertEqual([], p('abcdefghi-au'))
self.assertEqual([], p('**'))
self.assertEqual([], p('en,,gb'))
self.assertEqual([], p('en-au;q=0.1.0'))
self.assertEqual([], p('XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXZ,en'))
self.assertEqual([], p('da, en-gb;q=0.8, en;q=0.7,#'))
self.assertEqual([], p('de;q=2.0'))
self.assertEqual([], p('de;q=0.a'))
self.assertEqual([], p(''))
def test_parse_literal_http_header(self):
"""
Now test that we parse a literal HTTP header correctly.
"""
from django.utils.translation.trans_real import get_language_from_request
g = get_language_from_request
from django.http import HttpRequest
r = HttpRequest
r.COOKIES = {}
r.META = {'HTTP_ACCEPT_LANGUAGE': 'pt-br'}
self.assertEqual('pt-br', g(r))
r.META = {'HTTP_ACCEPT_LANGUAGE': 'pt'}
self.assertEqual('pt', g(r))
r.META = {'HTTP_ACCEPT_LANGUAGE': 'es,de'}
self.assertEqual('es', g(r))
r.META = {'HTTP_ACCEPT_LANGUAGE': 'es-ar,de'}
self.assertEqual('es-ar', g(r))
# Python 2.3 and 2.4 return slightly different results for completely
# bogus locales, so we omit this test for that anything below 2.4.
# It's relatively harmless in any cases (GIGO). This also means this
# won't be executed on Jython currently, but life's like that
# sometimes. (On those platforms, passing in a truly bogus locale
# will get you the default locale back.)
if sys.version_info >= (2, 5):
# This test assumes there won't be a Django translation to a US
# variation of the Spanish language, a safe assumption. When the
# user sets it as the preferred language, the main 'es'
# translation should be selected instead.
r.META = {'HTTP_ACCEPT_LANGUAGE': 'es-us'}
self.assertEqual(g(r), 'es')
# This tests the following scenario: there isn't a main language (zh)
# translation of Django but there is a translation to variation (zh_CN)
# the user sets zh-cn as the preferred language, it should be selected
# by Django without falling back nor ignoring it.
r.META = {'HTTP_ACCEPT_LANGUAGE': 'zh-cn,de'}
self.assertEqual(g(r), 'zh-cn')
def test_parse_language_cookie(self):
"""
Now test that we parse language preferences stored in a cookie correctly.
"""
from django.utils.translation.trans_real import get_language_from_request
g = get_language_from_request
from django.http import HttpRequest
r = HttpRequest
r.COOKIES = {settings.LANGUAGE_COOKIE_NAME: 'pt-br'}
r.META = {}
self.assertEqual('pt-br', g(r))
r.COOKIES = {settings.LANGUAGE_COOKIE_NAME: 'pt'}
r.META = {}
self.assertEqual('pt', g(r))
r.COOKIES = {settings.LANGUAGE_COOKIE_NAME: 'es'}
r.META = {'HTTP_ACCEPT_LANGUAGE': 'de'}
self.assertEqual('es', g(r))
# Python 2.3 and 2.4 return slightly different results for completely
# bogus locales, so we omit this test for that anything below 2.4.
# It's relatively harmless in any cases (GIGO). This also means this
# won't be executed on Jython currently, but life's like that
# sometimes. (On those platforms, passing in a truly bogus locale
# will get you the default locale back.)
if sys.version_info >= (2, 5):
# This test assumes there won't be a Django translation to a US
# variation of the Spanish language, a safe assumption. When the
# user sets it as the preferred language, the main 'es'
# translation should be selected instead.
r.COOKIES = {settings.LANGUAGE_COOKIE_NAME: 'es-us'}
r.META = {}
self.assertEqual(g(r), 'es')
# This tests the following scenario: there isn't a main language (zh)
# translation of Django but there is a translation to variation (zh_CN)
# the user sets zh-cn as the preferred language, it should be selected
# by Django without falling back nor ignoring it.
r.COOKIES = {settings.LANGUAGE_COOKIE_NAME: 'zh-cn'}
r.META = {'HTTP_ACCEPT_LANGUAGE': 'de'}
self.assertEqual(g(r), 'zh-cn')
class ResolutionOrderI18NTests(TestCase):
def setUp(self):
from django.utils.translation import trans_real
# Okay, this is brutal, but we have no other choice to fully reset
# the translation framework
trans_real._active = local()
trans_real._translations = {}
activate('de')
def tearDown(self):
deactivate()
def assertUgettext(self, msgid, msgstr):
result = ugettext(msgid)
self.assertTrue(msgstr in result, ("The string '%s' isn't in the "
"translation of '%s'; the actual result is '%s'." % (msgstr, msgid, result)))
class AppResolutionOrderI18NTests(ResolutionOrderI18NTests):
def setUp(self):
self.old_installed_apps = settings.INSTALLED_APPS
settings.INSTALLED_APPS = ['regressiontests.i18n.resolution'] + list(settings.INSTALLED_APPS)
super(AppResolutionOrderI18NTests, self).setUp()
def tearDown(self):
settings.INSTALLED_APPS = self.old_installed_apps
super(AppResolutionOrderI18NTests, self).tearDown()
def test_app_translation(self):
self.assertUgettext('Date/time', 'APP')
class LocalePathsResolutionOrderI18NTests(ResolutionOrderI18NTests):
def setUp(self):
self.old_locale_paths = settings.LOCALE_PATHS
settings.LOCALE_PATHS += (os.path.join(os.path.dirname(os.path.abspath(__file__)), 'other', 'locale'),)
super(LocalePathsResolutionOrderI18NTests, self).setUp()
def tearDown(self):
settings.LOCALE_PATHS = self.old_locale_paths
super(LocalePathsResolutionOrderI18NTests, self).tearDown()
def test_locale_paths_translation(self):
self.assertUgettext('Time', 'LOCALE_PATHS')
def test_locale_paths_override_app_translation(self):
old_installed_apps = settings.INSTALLED_APPS
settings.INSTALLED_APPS = list(settings.INSTALLED_APPS) + ['regressiontests.i18n.resolution']
try:
self.assertUgettext('Time', 'LOCALE_PATHS')
finally:
settings.INSTALLED_APPS = old_installed_apps
def test_locale_paths_override_project_translation(self):
old_settings_module = settings.SETTINGS_MODULE
settings.SETTINGS_MODULE = 'regressiontests'
try:
self.assertUgettext('Date/time', 'LOCALE_PATHS')
finally:
settings.SETTINGS_MODULE = old_settings_module
class ProjectResolutionOrderI18NTests(ResolutionOrderI18NTests):
def setUp(self):
self.old_settings_module = settings.SETTINGS_MODULE
settings.SETTINGS_MODULE = 'regressiontests'
super(ProjectResolutionOrderI18NTests, self).setUp()
def tearDown(self):
settings.SETTINGS_MODULE = self.old_settings_module
super(ProjectResolutionOrderI18NTests, self).tearDown()
def test_project_translation(self):
self.assertUgettext('Date/time', 'PROJECT')
def test_project_override_app_translation(self):
old_installed_apps = settings.INSTALLED_APPS
settings.INSTALLED_APPS = list(settings.INSTALLED_APPS) + ['regressiontests.i18n.resolution']
try:
self.assertUgettext('Date/time', 'PROJECT')
finally:
settings.INSTALLED_APPS = old_installed_apps
class DjangoFallbackResolutionOrderI18NTests(ResolutionOrderI18NTests):
def test_django_fallback(self):
self.assertEqual(ugettext('Date/time'), 'Datum/Zeit')
class TestModels(TestCase):
def test_lazy(self):
tm = TestModel()
tm.save()
def test_safestr(self):
c = Company(cents_payed=12, products_delivered=1)
c.name = SafeUnicode(u'Iñtërnâtiônàlizætiøn1')
c.save()
c.name = SafeString(u'Iñtërnâtiônàlizætiøn1'.encode('utf-8'))
c.save()
class TestLanguageInfo(TestCase):
def test_localized_language_info(self):
li = get_language_info('de')
self.assertEqual(li['code'], 'de')
self.assertEqual(li['name_local'], u'Deutsch')
self.assertEqual(li['name'], 'German')
self.assertEqual(li['bidi'], False)
class MultipleLocaleActivationTests(TestCase):
"""
Tests for template rendering behavior when multiple locales are activated
during the lifetime of the same process.
"""
def setUp(self):
self._old_language = get_language()
def tearDown(self):
activate(self._old_language)
def test_single_locale_activation(self):
"""
Simple baseline behavior with one locale for all the supported i18n constructs.
"""
activate('fr')
self.assertEqual(Template("{{ _('Yes') }}").render(Context({})), 'Oui')
self.assertEqual(Template("{% load i18n %}{% trans 'Yes' %}").render(Context({})), 'Oui')
self.assertEqual(Template("{% load i18n %}{% blocktrans %}Yes{% endblocktrans %}").render(Context({})), 'Oui')
# Literal marked up with _() in a filter expression
def test_multiple_locale_filter(self):
activate('de')
t = Template("{% load i18n %}{{ 0|yesno:_('yes,no,maybe') }}")
activate(self._old_language)
activate('nl')
self.assertEqual(t.render(Context({})), 'nee')
def test_multiple_locale_filter_deactivate(self):
activate('de')
t = Template("{% load i18n %}{{ 0|yesno:_('yes,no,maybe') }}")
deactivate()
activate('nl')
self.assertEqual(t.render(Context({})), 'nee')
def test_multiple_locale_filter_direct_switch(self):
activate('de')
t = Template("{% load i18n %}{{ 0|yesno:_('yes,no,maybe') }}")
activate('nl')
self.assertEqual(t.render(Context({})), 'nee')
# Literal marked up with _()
def test_multiple_locale(self):
activate('de')
t = Template("{{ _('No') }}")
activate(self._old_language)
activate('nl')
self.assertEqual(t.render(Context({})), 'Nee')
def test_multiple_locale_deactivate(self):
activate('de')
t = Template("{{ _('No') }}")
deactivate()
activate('nl')
self.assertEqual(t.render(Context({})), 'Nee')
def test_multiple_locale_direct_switch(self):
activate('de')
t = Template("{{ _('No') }}")
activate('nl')
self.assertEqual(t.render(Context({})), 'Nee')
# Literal marked up with _(), loading the i18n template tag library
def test_multiple_locale_loadi18n(self):
activate('de')
t = Template("{% load i18n %}{{ _('No') }}")
activate(self._old_language)
activate('nl')
self.assertEqual(t.render(Context({})), 'Nee')
def test_multiple_locale_loadi18n_deactivate(self):
activate('de')
t = Template("{% load i18n %}{{ _('No') }}")
deactivate()
activate('nl')
self.assertEqual(t.render(Context({})), 'Nee')
def test_multiple_locale_loadi18n_direct_switch(self):
activate('de')
t = Template("{% load i18n %}{{ _('No') }}")
activate('nl')
self.assertEqual(t.render(Context({})), 'Nee')
# trans i18n tag
def test_multiple_locale_trans(self):
activate('de')
t = Template("{% load i18n %}{% trans 'No' %}")
activate(self._old_language)
activate('nl')
self.assertEqual(t.render(Context({})), 'Nee')
def test_multiple_locale_deactivate_trans(self):
activate('de')
t = Template("{% load i18n %}{% trans 'No' %}")
deactivate()
activate('nl')
self.assertEqual(t.render(Context({})), 'Nee')
def test_multiple_locale_direct_switch_trans(self):
activate('de')
t = Template("{% load i18n %}{% trans 'No' %}")
activate('nl')
self.assertEqual(t.render(Context({})), 'Nee')
# blocktrans i18n tag
def test_multiple_locale_btrans(self):
activate('de')
t = Template("{% load i18n %}{% blocktrans %}No{% endblocktrans %}")
activate(self._old_language)
activate('nl')
self.assertEqual(t.render(Context({})), 'Nee')
def test_multiple_locale_deactivate_btrans(self):
activate('de')
t = Template("{% load i18n %}{% blocktrans %}No{% endblocktrans %}")
deactivate()
activate('nl')
self.assertEqual(t.render(Context({})), 'Nee')
def test_multiple_locale_direct_switch_btrans(self):
activate('de')
t = Template("{% load i18n %}{% blocktrans %}No{% endblocktrans %}")
activate('nl')
self.assertEqual(t.render(Context({})), 'Nee')
| mit |
mzdaniel/oh-mainline | vendor/packages/celery/celery/utils/mail.py | 18 | 4826 | # -*- coding: utf-8 -*-
"""
celery.utils.mail
~~~~~~~~~~~~~~~~~
How task error emails are formatted and sent.
:copyright: (c) 2009 - 2011 by Ask Solem.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import sys
import smtplib
try:
from email.mime.text import MIMEText
except ImportError:
from email.MIMEText import MIMEText # noqa
from celery.utils import get_symbol_by_name
supports_timeout = sys.version_info >= (2, 6)
class SendmailWarning(UserWarning):
"""Problem happened while sending the email message."""
class Message(object):
def __init__(self, to=None, sender=None, subject=None, body=None,
charset="us-ascii"):
self.to = to
self.sender = sender
self.subject = subject
self.body = body
self.charset = charset
if not isinstance(self.to, (list, tuple)):
self.to = [self.to]
def __repr__(self):
return "<Email: To:%r Subject:%r>" % (self.to, self.subject)
def __str__(self):
msg = MIMEText(self.body, "plain", self.charset)
msg["Subject"] = self.subject
msg["From"] = self.sender
msg["To"] = ", ".join(self.to)
return msg.as_string()
class Mailer(object):
def __init__(self, host="localhost", port=0, user=None, password=None,
timeout=2, use_ssl=False, use_tls=False):
self.host = host
self.port = port
self.user = user
self.password = password
self.timeout = timeout
self.use_ssl = use_ssl
self.use_tls = use_tls
def send(self, message):
if supports_timeout:
self._send(message, timeout=self.timeout)
else:
import socket
old_timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(self.timeout)
try:
self._send(message)
finally:
socket.setdefaulttimeout(old_timeout)
def _send(self, message, **kwargs):
if (self.use_ssl):
client = smtplib.SMTP_SSL(self.host, self.port, **kwargs)
else:
client = smtplib.SMTP(self.host, self.port, **kwargs)
if self.use_tls:
client.ehlo()
client.starttls()
client.ehlo()
if self.user and self.password:
client.login(self.user, self.password)
client.sendmail(message.sender, message.to, str(message))
client.quit()
class ErrorMail(object):
"""Defines how and when task error e-mails should be sent.
:param task: The task instance that raised the error.
:attr:`subject` and :attr:`body` are format strings which
are passed a context containing the following keys:
* name
Name of the task.
* id
UUID of the task.
* exc
String representation of the exception.
* args
Positional arguments.
* kwargs
Keyword arguments.
* traceback
String representation of the traceback.
* hostname
Worker hostname.
"""
# pep8.py borks on a inline signature separator and
# says "trailing whitespace" ;)
EMAIL_SIGNATURE_SEP = "-- "
#: Format string used to generate error email subjects.
subject = """\
[celery@%(hostname)s] Error: Task %(name)s (%(id)s): %(exc)s
"""
#: Format string used to generate error email content.
body = """
Task %%(name)s with id %%(id)s raised exception:\n%%(exc)r
Task was called with args: %%(args)s kwargs: %%(kwargs)s.
The contents of the full traceback was:
%%(traceback)s
%(EMAIL_SIGNATURE_SEP)s
Just to let you know,
celeryd at %%(hostname)s.
""" % {"EMAIL_SIGNATURE_SEP": EMAIL_SIGNATURE_SEP}
error_whitelist = None
def __init__(self, task, **kwargs):
self.task = task
self.email_subject = kwargs.get("subject", self.subject)
self.email_body = kwargs.get("body", self.body)
self.error_whitelist = getattr(task, "error_whitelist")
def should_send(self, context, exc):
"""Returns true or false depending on if a task error mail
should be sent for this type of error."""
allow_classes = tuple(map(get_symbol_by_name, self.error_whitelist))
return not self.error_whitelist or isinstance(exc, allow_classes)
def format_subject(self, context):
return self.subject.strip() % context
def format_body(self, context):
return self.body.strip() % context
def send(self, context, exc, fail_silently=True):
if self.should_send(context, exc):
self.task.app.mail_admins(self.format_subject(context),
self.format_body(context),
fail_silently=fail_silently)
| agpl-3.0 |
eayunstack/nova | nova/tests/api/openstack/compute/contrib/test_server_password.py | 12 | 3178 | # Copyright 2012 Nebula, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from lxml import etree
from oslo.config import cfg
import webob
from nova.api.metadata import password
from nova import compute
from nova.openstack.common import jsonutils
from nova import test
from nova.tests.api.openstack import fakes
from nova.tests import fake_instance
CONF = cfg.CONF
CONF.import_opt('osapi_compute_ext_list', 'nova.api.openstack.compute.contrib')
class ServerPasswordTest(test.TestCase):
content_type = 'application/json'
def setUp(self):
super(ServerPasswordTest, self).setUp()
fakes.stub_out_nw_api(self.stubs)
self.stubs.Set(
compute.api.API, 'get',
lambda self, ctxt, *a, **kw:
fake_instance.fake_instance_obj(
ctxt,
system_metadata={},
expected_attrs=['system_metadata']))
self.password = 'fakepass'
def fake_extract_password(instance):
return self.password
def fake_convert_password(context, password):
self.password = password
return {}
self.stubs.Set(password, 'extract_password', fake_extract_password)
self.stubs.Set(password, 'convert_password', fake_convert_password)
self.flags(
osapi_compute_extension=[
'nova.api.openstack.compute.contrib.select_extensions'],
osapi_compute_ext_list=['Server_password'])
def _make_request(self, url, method='GET'):
req = webob.Request.blank(url)
req.headers['Accept'] = self.content_type
req.method = method
res = req.get_response(
fakes.wsgi_app(init_only=('servers', 'os-server-password')))
return res
def _get_pass(self, body):
return jsonutils.loads(body).get('password')
def test_get_password(self):
url = '/v2/fake/servers/fake/os-server-password'
res = self._make_request(url)
self.assertEqual(res.status_int, 200)
self.assertEqual(self._get_pass(res.body), 'fakepass')
def test_reset_password(self):
url = '/v2/fake/servers/fake/os-server-password'
res = self._make_request(url, 'DELETE')
self.assertEqual(res.status_int, 204)
res = self._make_request(url)
self.assertEqual(res.status_int, 200)
self.assertEqual(self._get_pass(res.body), '')
class ServerPasswordXmlTest(ServerPasswordTest):
content_type = 'application/xml'
def _get_pass(self, body):
# NOTE(vish): first element is password
return etree.XML(body).text or ''
| apache-2.0 |
openstack-infra/shade | shade/tests/functional/test_qos_dscp_marking_rule.py | 1 | 2765 | # Copyright 2017 OVH SAS
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
test_qos_dscp_marking_rule
----------------------------------
Functional tests for `shade`QoS DSCP marking rule methods.
"""
from shade.exc import OpenStackCloudException
from shade.tests.functional import base
class TestQosDscpMarkingRule(base.BaseFunctionalTestCase):
def setUp(self):
super(TestQosDscpMarkingRule, self).setUp()
if not self.operator_cloud.has_service('network'):
self.skipTest('Network service not supported by cloud')
if not self.operator_cloud._has_neutron_extension('qos'):
self.skipTest('QoS network extension not supported by cloud')
policy_name = self.getUniqueString('qos_policy')
self.policy = self.operator_cloud.create_qos_policy(name=policy_name)
self.addCleanup(self._cleanup_qos_policy)
def _cleanup_qos_policy(self):
try:
self.operator_cloud.delete_qos_policy(self.policy['id'])
except Exception as e:
raise OpenStackCloudException(e)
def test_qos_dscp_marking_rule_lifecycle(self):
dscp_mark = 16
updated_dscp_mark = 32
# Create DSCP marking rule
rule = self.operator_cloud.create_qos_dscp_marking_rule(
self.policy['id'],
dscp_mark=dscp_mark)
self.assertIn('id', rule)
self.assertEqual(dscp_mark, rule['dscp_mark'])
# Now try to update rule
updated_rule = self.operator_cloud.update_qos_dscp_marking_rule(
self.policy['id'],
rule['id'],
dscp_mark=updated_dscp_mark)
self.assertIn('id', updated_rule)
self.assertEqual(updated_dscp_mark, updated_rule['dscp_mark'])
# List rules from policy
policy_rules = self.operator_cloud.list_qos_dscp_marking_rules(
self.policy['id'])
self.assertEqual([updated_rule], policy_rules)
# Delete rule
self.operator_cloud.delete_qos_dscp_marking_rule(
self.policy['id'], updated_rule['id'])
# Check if there is no rules in policy
policy_rules = self.operator_cloud.list_qos_dscp_marking_rules(
self.policy['id'])
self.assertEqual([], policy_rules)
| apache-2.0 |
oihane/server-tools | base_field_serialized/tests/test_serialized.py | 31 | 2214 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# This module copyright (C) 2014 Therp BV (<http://therp.nl>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, fields
from openerp.tests.common import TransactionCase
class BaseFieldSerializedTestModel(models.Model):
_name = 'base.field.serialized.test.model'
serialized = fields.Serialized('Serialized')
class TestBaseFieldSerialized(TransactionCase):
def test_ReadWrite(self):
BaseFieldSerializedTestModel._build_model(self.registry, self.cr)
testmodel = self.env['base.field.serialized.test.model']
testmodel._prepare_setup()
testmodel._setup_base(False)
testmodel._setup_fields()
testmodel._setup_complete()
testmodel._auto_init()
record = testmodel.create(
{'serialized': ['hello world']})
self.assertEqual(record.serialized, ['hello world'])
self.env.invalidate_all()
self.assertEqual(record.serialized, ['hello world'])
record.write({'serialized': {'hello': 'world'}})
self.env.invalidate_all()
self.assertEqual(record.serialized, {'hello': 'world'})
record.write({'serialized': None})
self.assertEqual(
self.registry['base.field.serialized.test.model'].browse(
self.cr, self.uid, record.id).serialized,
{})
| agpl-3.0 |
markflyhigh/incubator-beam | sdks/python/apache_beam/testing/benchmarks/chicago_taxi/process_tfma.py | 1 | 6321 | # Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Runs a batch job for performing Tensorflow Model Analysis."""
from __future__ import absolute_import, division, print_function
import argparse
import tensorflow as tf
import tensorflow_model_analysis as tfma
from tensorflow_model_analysis.evaluators import evaluator
import apache_beam as beam
from apache_beam.metrics.metric import MetricsFilter
from apache_beam.testing.load_tests.load_test_metrics_utils import MeasureTime
from apache_beam.testing.load_tests.load_test_metrics_utils import MetricsReader
from trainer import taxi
def process_tfma(schema_file,
big_query_table=None,
eval_model_dir=None,
max_eval_rows=None,
pipeline_args=None,
publish_to_bq=False,
project=None,
metrics_table=None,
metrics_dataset=None):
"""Runs a batch job to evaluate the eval_model against the given input.
Args:
schema_file: A file containing a text-serialized Schema that describes the
eval data.
big_query_table: A BigQuery table name specified as DATASET.TABLE which
should be the input for evaluation. This can only be set if input_csv is
None.
eval_model_dir: A directory where the eval model is located.
max_eval_rows: Number of rows to query from BigQuery.
pipeline_args: additional DataflowRunner or DirectRunner args passed to
the beam pipeline.
publish_to_bq:
project:
metrics_dataset:
metrics_table:
Raises:
ValueError: if input_csv and big_query_table are not specified correctly.
"""
if big_query_table is None:
raise ValueError(
'--big_query_table should be provided.')
slice_spec = [
tfma.slicer.SingleSliceSpec(),
tfma.slicer.SingleSliceSpec(columns=['trip_start_hour'])
]
metrics_namespace = metrics_table
schema = taxi.read_schema(schema_file)
eval_shared_model = tfma.default_eval_shared_model(
eval_saved_model_path=eval_model_dir,
add_metrics_callbacks=[
tfma.post_export_metrics.calibration_plot_and_prediction_histogram(),
tfma.post_export_metrics.auc_plots()
])
metrics_monitor = None
if publish_to_bq:
metrics_monitor = MetricsReader(
project_name=project,
bq_table=metrics_table,
bq_dataset=metrics_dataset,
filters=MetricsFilter().with_namespace(metrics_namespace)
)
pipeline = beam.Pipeline(argv=pipeline_args)
query = taxi.make_sql(big_query_table, max_eval_rows, for_eval=True)
raw_feature_spec = taxi.get_raw_feature_spec(schema)
raw_data = (
pipeline
| 'ReadBigQuery' >> beam.io.Read(
beam.io.BigQuerySource(query=query, use_standard_sql=True))
| 'Measure time: Start' >> beam.ParDo(MeasureTime(metrics_namespace))
| 'CleanData' >> beam.Map(lambda x: (
taxi.clean_raw_data_dict(x, raw_feature_spec))))
# Examples must be in clean tf-example format.
coder = taxi.make_proto_coder(schema)
# Prepare arguments for Extract, Evaluate and Write steps
extractors = tfma.default_extractors(
eval_shared_model=eval_shared_model,
slice_spec=slice_spec,
desired_batch_size=None,
materialize=False)
evaluators = tfma.default_evaluators(
eval_shared_model=eval_shared_model,
desired_batch_size=None,
num_bootstrap_samples=1)
_ = (
raw_data
| 'ToSerializedTFExample' >> beam.Map(coder.encode)
| 'Extract Results' >> tfma.InputsToExtracts()
| 'Extract and evaluate' >> tfma.ExtractAndEvaluate(
extractors=extractors,
evaluators=evaluators)
| 'Map Evaluations to PCollection' >> MapEvalToPCollection()
| 'Measure time: End' >> beam.ParDo(
MeasureTime(metrics_namespace))
)
result = pipeline.run()
result.wait_until_finish()
if metrics_monitor:
metrics_monitor.publish_metrics(result)
@beam.ptransform_fn
@beam.typehints.with_input_types(evaluator.Evaluation)
@beam.typehints.with_output_types(beam.typehints.Any)
def MapEvalToPCollection( # pylint: disable=invalid-name
evaluation):
return evaluation['metrics']
def main():
tf.logging.set_verbosity(tf.logging.INFO)
parser = argparse.ArgumentParser()
parser.add_argument(
'--eval_model_dir',
help='Input path to the model which will be evaluated.')
parser.add_argument(
'--big_query_table',
help='BigQuery path to input examples which will be evaluated.')
parser.add_argument(
'--max_eval_rows',
help='Maximum number of rows to evaluate on.',
default=None,
type=int)
parser.add_argument(
'--schema_file', help='File holding the schema for the input data')
parser.add_argument(
'--publish_to_big_query',
help='Whether to publish to BQ',
default=None,
type=bool)
parser.add_argument(
'--metrics_dataset',
help='BQ dataset',
default=None,
type=str)
parser.add_argument(
'--metrics_table',
help='BQ table for storing metrics',
default=None,
type=str)
parser.add_argument(
'--metric_reporting_project',
help='BQ table project',
default=None,
type=str)
known_args, pipeline_args = parser.parse_known_args()
process_tfma(
big_query_table=known_args.big_query_table,
eval_model_dir=known_args.eval_model_dir,
max_eval_rows=known_args.max_eval_rows,
schema_file=known_args.schema_file,
pipeline_args=pipeline_args,
publish_to_bq=known_args.publish_to_big_query,
metrics_table=known_args.metrics_table,
metrics_dataset=known_args.metrics_dataset,
project=known_args.metric_reporting_project)
if __name__ == '__main__':
main()
| apache-2.0 |
OpenSeizureDetector/OpenSeizureDetector | fitbit_version/galileo/tests/testUtils.py | 3 | 3496 | import unittest
from galileo.utils import a2x, a2s, a2lsbi, a2msbi, i2lsba, s2a, x2a
class testa2x(unittest.TestCase):
def testSimple(self):
self.assertEqual(a2x(range(10)), '00 01 02 03 04 05 06 07 08 09')
def testNotShorten(self):
self.assertEqual(a2x([0] * 5), '00 00 00 00 00')
def testDelim(self):
self.assertEqual(a2x(range(190, 196), '|'), 'BE|BF|C0|C1|C2|C3')
class testx2a(unittest.TestCase):
def testSimple(self):
self.assertEqual(x2a('2'), [2])
self.assertEqual(x2a('02'), [2])
self.assertEqual(x2a('2 3'), [2, 3])
class testa2s(unittest.TestCase):
def testSimple(self):
self.assertEqual(a2s(range(ord('a'), ord('d') + 1)), 'abcd')
def testWithNUL(self):
self.assertEqual(
a2s(list(range(ord('a'), ord('d')+1)) + [0]*3 + list(range(ord('e'), ord('i')+1))),
'abcd')
def testWithNULNotPrint(self):
self.assertEqual(
a2s(list(range(ord('a'), ord('d')+1)) + [0]*3 + list(range(ord('e'), ord('i')+1)), False),
'abcd\0\0\0efghi')
class testa2lsbi(unittest.TestCase):
def test0(self):
self.assertEqual(a2lsbi([0]), 0)
self.assertEqual(a2lsbi([0]*3), 0)
self.assertEqual(a2lsbi([0]*10), 0)
def test1byte(self):
self.assertEqual(a2lsbi([8]), 8)
self.assertEqual(a2lsbi([0xff]), 0xff)
self.assertEqual(a2lsbi([0x80]), 0x80)
def test2bytes(self):
self.assertEqual(a2lsbi([1, 0]), 1)
self.assertEqual(a2lsbi([0xff, 0]), 0xff)
self.assertEqual(a2lsbi([0x80, 0]), 0x80)
self.assertEqual(a2lsbi([0, 1]), 0x100)
self.assertEqual(a2lsbi([0, 0xff]), 0xff00)
self.assertEqual(a2lsbi([0, 0x80]), 0x8000)
class testa2msbi(unittest.TestCase):
def test0(self):
self.assertEqual(a2msbi([0]), 0)
self.assertEqual(a2msbi([0]*3), 0)
self.assertEqual(a2msbi([0]*10), 0)
def test1byte(self):
self.assertEqual(a2msbi([8]), 8)
self.assertEqual(a2msbi([0xff]), 0xff)
self.assertEqual(a2msbi([0x80]), 0x80)
def test2bytes(self):
self.assertEqual(a2msbi([1, 0]), 0x100)
self.assertEqual(a2msbi([0xff, 0]), 0xff00)
self.assertEqual(a2msbi([0x80, 0]), 0x8000)
self.assertEqual(a2msbi([0, 1]), 0x1)
self.assertEqual(a2msbi([0, 0xff]), 0xff)
self.assertEqual(a2msbi([0, 0x80]), 0x80)
class testi2lsba(unittest.TestCase):
def test0(self):
self.assertEqual(i2lsba(0, 1), [0])
self.assertEqual(i2lsba(0, 3), [0]*3)
self.assertEqual(i2lsba(0, 5), [0]*5)
def test1byte(self):
self.assertEqual(i2lsba(1, 1), [1])
self.assertEqual(i2lsba(0xff, 1), [0xff])
self.assertEqual(i2lsba(0x80, 1), [0x80])
def test2bytes(self):
self.assertEqual(i2lsba(1, 2), [1, 0])
self.assertEqual(i2lsba(0xff, 2), [0xff, 0])
self.assertEqual(i2lsba(0x80, 2), [0x80, 0])
self.assertEqual(i2lsba(0x100, 2), [0, 1])
self.assertEqual(i2lsba(0xff00, 2), [0, 0xff])
self.assertEqual(i2lsba(0x8000, 2), [0, 0x80])
class tests2a(unittest.TestCase):
def testSimple(self):
self.assertEqual(s2a('abcd'), list(range(ord('a'), ord('d')+1)))
def testWithNUL(self):
self.assertEqual(s2a('abcd\0\0\0efghi'),
list(range(ord('a'), ord('d')+1)) +
[0] * 3 + list(range(ord('e'), ord('i') + 1)))
| gpl-3.0 |
ibrahimsharaf/crashsimilarity | tests/test_model.py | 1 | 11586 | import unittest
import multiprocessing
import numpy as np
from crashsimilarity.stacktrace import StackTraceProcessor
from crashsimilarity.models import doc2vec, word2vec
class CrashSimilarityTest(unittest.TestCase):
# Train Model to be used in all tests
@classmethod
def setUpClass(self):
self.paths = ['tests/test.json']
self.doc2vec_model = doc2vec.Doc2Vec(self.paths)
self.doc2vec_trained_model = self.doc2vec_model.get_model()
self.doc2vec_trained_model.init_sims(replace=True)
self.word2vec_model = word2vec.Word2Vec(self.paths)
self.word2vec_trained_model = self.word2vec_model.get_model()
self.word2vec_trained_model.init_sims(replace=True)
# Test if equal reports have distance 0 and different reports have difference greater than 0
def zero_dist_coherence(self, model):
signature = 'mozilla::testZeroCoherence'
similarities = model.signature_similarity(self.paths, signature, signature)
errors = []
for doc1, doc2, dist in similarities:
if doc1 != doc2:
try:
self.assertTrue(dist > 0)
except AssertionError:
errors.append((doc1, doc2, dist))
else:
try:
self.assertEqual(dist, 0)
except AssertionError:
errors.append((doc1, doc2, dist))
self.assertEqual(len(errors), 0)
def test_zero_dist_coherence(self):
self.zero_dist_coherence(self.doc2vec_model)
self.zero_dist_coherence(self.word2vec_model)
# Test if reports with the same words in different order have distance different than zero
def order_similarity(self, model):
signature1 = 'mozilla::testOrdem1'
signature2 = 'mozilla::testOrdem2'
signature3 = 'mozilla::testOrdem3'
similarity_mid = model.signature_similarity(self.paths, signature1, signature2)
similarity_end = model.signature_similarity(self.paths, signature1, signature3)
doc_mid1, doc_mid2, dist_mid = similarity_mid[0]
doc_end1, doc_end2, dist_end = similarity_end[0]
self.assertTrue(dist_mid < dist_end)
@unittest.expectedFailure
def test_order_similarity(self):
self.order_similarity(self.doc2vec_model)
self.order_similarity(self.word2vec_model)
def wmdistance_cosine_non_zero_distance(self, model, trained_model):
doc1 = "KiFastSystemCallRet | NtWaitForMultipleObjects | WaitForMultipleObjectsEx | RealMsgWaitForMultipleObjectsEx | CCliModalLoop::BlockFn | CoWaitForMultipleHandles | mozilla::ipc::MessageChannel::WaitForSyncNotifyWithA11yReentry | mozilla::ipc::MessageChannel::WaitForSyncNotify | mozilla::ipc::MessageChannel::Send | mozilla::dom::PScreenManagerChild::SendScreenRefresh | mozilla::widget::ScreenProxy::EnsureCacheIsValid | mozilla::widget::ScreenProxy::GetColorDepth | gfxPlatform::PopulateScreenInfo | gfxPlatform::Init | mozilla::dom::ContentProcess::Init | XRE_InitChildProcess | content_process_main | wmain | remainder | remainder | WinSqmStartSession | _SEH_epilog4 | WinSqmStartSession | _RtlUserThreadStart"
doc2 = "Assertion::~Assertion | Assertion::Destroy | InMemoryDataSource::DeleteForwardArcsEntry | PL_DHashTableEnumerate | InMemoryDataSource::~InMemoryDataSource | InMemoryDataSource::`vector deleting destructor' | InMemoryDataSource::Internal::Release | InMemoryDataSource::Release | nsCOMPtr_base::~nsCOMPtr_base | RDFXMLDataSourceImpl::`vector deleting destructor' | RDFXMLDataSourceImpl::Release | DoDeferredRelease<T> | XPCJSRuntime::GCCallback | Collect | js::GC | js::GCForReason | nsXPConnect::Collect | nsCycleCollector::GCIfNeeded | nsCycleCollector::Collect | nsCycleCollector::Shutdown | nsCycleCollector_shutdown | mozilla::ShutdownXPCOM | ScopedXPCOMStartup::~ScopedXPCOMStartup | XREMain::XRE_main | XRE_main | wmain | __tmainCRTStartup | BaseThreadInitThunk | __RtlUserThreadStart | _RtlUserThreadStart"
words_to_test1 = StackTraceProcessor.preprocess(doc1)
words_to_test_clean1 = [w for w in np.unique(words_to_test1).tolist() if w in trained_model.wv.vocab]
words_to_test2 = StackTraceProcessor.preprocess(doc2)
words_to_test_clean2 = [w for w in np.unique(words_to_test2).tolist() if w in trained_model.wv.vocab]
if model.get_model_name() == 'Word2Vec':
all_distances = np.array(1.0 - np.dot(trained_model.wv.vectors_norm, trained_model.wv.vectors_norm[
[trained_model.wv.vocab[word].index for word in words_to_test_clean1]].transpose()), dtype=np.double)
else:
all_distances = np.array(1.0 - np.dot(trained_model.wv.vectors, trained_model.wv.vectors[
[trained_model.wv.vocab[word].index for word in words_to_test_clean1]].transpose()), dtype=np.double)
distance = model.wmdistance(words_to_test_clean1, words_to_test_clean2, all_distances)
self.assertNotEqual(float('inf'), distance)
def wmdistance_cosine_zero_distance(self, model, trained_model):
doc1 = "A | A | A"
doc2 = "A | A | A"
words_to_test1 = StackTraceProcessor.preprocess(doc1)
words_to_test_clean1 = [w for w in np.unique(words_to_test1).tolist() if w in trained_model.wv.vocab]
words_to_test2 = StackTraceProcessor.preprocess(doc2)
words_to_test_clean2 = [w for w in np.unique(words_to_test2).tolist() if w in trained_model.wv.vocab]
if model.get_model_name() == 'Word2Vec':
all_distances = np.array(1.0 - np.dot(trained_model.wv.vectors_norm, trained_model.wv.vectors_norm[
[trained_model.wv.vocab[word].index for word in words_to_test_clean1]].transpose()), dtype=np.double)
else:
all_distances = np.array(1.0 - np.dot(trained_model.wv.vectors, trained_model.wv.vectors[
[trained_model.wv.vocab[word].index for word in words_to_test_clean1]].transpose()), dtype=np.double)
distance = model.wmdistance(words_to_test_clean1, words_to_test_clean2, all_distances)
self.assertEqual(float('inf'), distance)
def wmdistance_euclidean_non_zero_distance(self, model, trained_model):
doc1 = "KiFastSystemCallRet | NtWaitForMultipleObjects | WaitForMultipleObjectsEx | RealMsgWaitForMultipleObjectsEx | CCliModalLoop::BlockFn | CoWaitForMultipleHandles | mozilla::ipc::MessageChannel::WaitForSyncNotifyWithA11yReentry | mozilla::ipc::MessageChannel::WaitForSyncNotify | mozilla::ipc::MessageChannel::Send | mozilla::dom::PScreenManagerChild::SendScreenRefresh | mozilla::widget::ScreenProxy::EnsureCacheIsValid | mozilla::widget::ScreenProxy::GetColorDepth | gfxPlatform::PopulateScreenInfo | gfxPlatform::Init | mozilla::dom::ContentProcess::Init | XRE_InitChildProcess | content_process_main | wmain | remainder | remainder | WinSqmStartSession | _SEH_epilog4 | WinSqmStartSession | _RtlUserThreadStart"
doc2 = "Assertion::~Assertion | Assertion::Destroy | InMemoryDataSource::DeleteForwardArcsEntry | PL_DHashTableEnumerate | InMemoryDataSource::~InMemoryDataSource | InMemoryDataSource::`vector deleting destructor' | InMemoryDataSource::Internal::Release | InMemoryDataSource::Release | nsCOMPtr_base::~nsCOMPtr_base | RDFXMLDataSourceImpl::`vector deleting destructor' | RDFXMLDataSourceImpl::Release | DoDeferredRelease<T> | XPCJSRuntime::GCCallback | Collect | js::GC | js::GCForReason | nsXPConnect::Collect | nsCycleCollector::GCIfNeeded | nsCycleCollector::Collect | nsCycleCollector::Shutdown | nsCycleCollector_shutdown | mozilla::ShutdownXPCOM | ScopedXPCOMStartup::~ScopedXPCOMStartup | XREMain::XRE_main | XRE_main | wmain | __tmainCRTStartup | BaseThreadInitThunk | __RtlUserThreadStart | _RtlUserThreadStart"
words_to_test1 = StackTraceProcessor.preprocess(doc1)
words_to_test_clean1 = [w for w in np.unique(words_to_test1).tolist() if w in trained_model.wv.vocab]
words_to_test2 = StackTraceProcessor.preprocess(doc2)
words_to_test_clean2 = [w for w in np.unique(words_to_test2).tolist() if w in trained_model.wv.vocab]
if model.get_model_name() == 'Word2Vec':
all_distances = np.array(1.0 - np.dot(trained_model.wv.vectors_norm, trained_model.wv.vectors_norm[
[trained_model.wv.vocab[word].index for word in words_to_test_clean1]].transpose()), dtype=np.double)
else:
all_distances = np.array(1.0 - np.dot(trained_model.wv.vectors, trained_model.wv.vectors[
[trained_model.wv.vocab[word].index for word in words_to_test_clean1]].transpose()), dtype=np.double)
distance = model.wmdistance(words_to_test_clean1, words_to_test_clean2, all_distances, distance_metric='euclidean')
self.assertNotEqual(float('inf'), distance)
def wmdistance_euclidean_zero_distance(self, model, trained_model):
doc1 = "A | A | A"
doc2 = "A | A | A"
words_to_test1 = StackTraceProcessor.preprocess(doc1)
words_to_test_clean1 = [w for w in np.unique(words_to_test1).tolist() if w in trained_model.wv.vocab]
words_to_test2 = StackTraceProcessor.preprocess(doc2)
words_to_test_clean2 = [w for w in np.unique(words_to_test2).tolist() if w in trained_model.wv.vocab]
if model.get_model_name() == 'Word2Vec':
all_distances = np.array(1.0 - np.dot(trained_model.wv.vectors_norm, trained_model.wv.vectors_norm[
[trained_model.wv.vocab[word].index for word in words_to_test_clean1]].transpose()), dtype=np.double)
else:
all_distances = np.array(1.0 - np.dot(trained_model.wv.vectors, trained_model.wv.vectors[
[trained_model.wv.vocab[word].index for word in words_to_test_clean1]].transpose()), dtype=np.double)
distance = model.wmdistance(words_to_test_clean1, words_to_test_clean2, all_distances, distance_metric='euclidean')
self.assertEqual(float('inf'), distance)
def test_wmdistance(self):
self.wmdistance_cosine_non_zero_distance(self.doc2vec_model, self.doc2vec_trained_model)
self.wmdistance_cosine_non_zero_distance(self.word2vec_model, self.word2vec_trained_model)
self.wmdistance_cosine_zero_distance(self.doc2vec_model, self.doc2vec_trained_model)
self.wmdistance_cosine_zero_distance(self.word2vec_model, self.word2vec_trained_model)
self.wmdistance_euclidean_non_zero_distance(self.doc2vec_model, self.doc2vec_trained_model)
self.wmdistance_euclidean_non_zero_distance(self.word2vec_model, self.word2vec_trained_model)
self.wmdistance_euclidean_zero_distance(self.doc2vec_model, self.doc2vec_trained_model)
self.wmdistance_euclidean_zero_distance(self.word2vec_model, self.word2vec_trained_model)
def read_corpus(self, model):
resp = model._read_corpus()
self.assertEqual(type(resp), list)
self.assertEqual(len(resp), 378)
def test_read_corpus(self):
self.read_corpus(self.doc2vec_model)
self.read_corpus(self.word2vec_model)
def train_model(self, model):
resp = model._train_model()
try:
workers = multiprocessing.cpu_count()
except NotImplementedError:
workers = 2
self.assertEqual(workers, resp.workers)
self.assertEqual(8, resp.window)
self.assertEqual(20, resp.epochs)
self.assertEqual(101, len(resp.wv.vocab))
def test_train_model(self):
self.train_model(self.doc2vec_model)
self.train_model(self.word2vec_model)
| mpl-2.0 |
yuxans/badgirl | src/hi.py | 1 | 1145 | #!/usr/bin/env python
# Copyright (c) 2002 Brad Stewart
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
"""hi.py - dummy function that says Hi """
handler_list=["hi"]
from moobot_module import MooBotModule
class hi(MooBotModule):
def __init__(self):
self.regex="^hi$"
def handler(self, **args):
"""A dummy handler we used for testing -- this is the first handler
we wrote"""
from irclib import Event
return Event("privmsg", "", self.return_to_sender(args), [ "hi" ])
| gpl-2.0 |
ESRF-BCU/emotion | tests/TestStates.py | 1 | 2381 | import unittest
import sys
import os
sys.path.insert(
0,
os.path.abspath(
os.path.join(
os.path.dirname(__file__),
"..")))
from emotion.axis import AxisState
class TestStates(unittest.TestCase):
def setUp(self):
pass
def test_states(self):
# empty state
s = AxisState()
self.assertEquals(s, "UNKNOWN")
# moving
s.set("MOVING")
self.assertEquals(s, "MOVING")
# moving => not ready
self.assertFalse(s.READY)
# now ready but no more moving
s.set("READY")
self.assertTrue(s.READY)
self.assertFalse(s.MOVING)
# custom state
s.create_state("PARKED", "c'est ma place !!")
s.set("PARKED")
self.assertTrue(s.PARKED)
# still ready
self.assertTrue(s.READY)
self.assertEquals(s, "PARKED")
# Prints string of states.
self.assertTrue(isinstance(s.current_states(), str))
# bad name for a state
self.assertRaises(ValueError, s.create_state, "A bad state")
def test_init_state(self):
self.assertEquals(AxisState(), "UNKNOWN")
def test_desc(self):
s = AxisState(("KAPUT", "auff"), "LIMNEG", "READY")
self.assertTrue(s.READY)
self.assertEquals(s._state_desc["KAPUT"], "auff")
self.assertEquals(s._state_desc["LIMNEG"], "Hardware low limit active")
def test_from_current_states_str(self):
s = AxisState(("KAPUT", "auff"), "LIMNEG", "READY")
states_str = s.current_states()
t = AxisState(states_str)
self.assertTrue(t.READY)
self.assertEquals(t._state_desc["KAPUT"], "auff")
self.assertEquals(t._state_desc["LIMNEG"], "Hardware low limit active")
self.assertEquals(s.current_states(), t.current_states())
u = AxisState()
v = AxisState(u.current_states())
self.assertEquals(u.current_states(), v.current_states())
def test_state_from_state(self):
s = AxisState("READY")
t = AxisState(s)
self.assertEquals(s.current_states(), t.current_states())
def test_clear_state(self):
s = AxisState("READY")
s.clear()
self.assertEquals(s, "UNKNOWN")
s.set("MOVING")
self.assertEquals(s, "MOVING")
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
andris210296/andris-projeto | backend/venv/test/lib/python2.7/site-packages/unidecode/x0b7.py | 253 | 4833 | data = (
'ddwim', # 0x00
'ddwib', # 0x01
'ddwibs', # 0x02
'ddwis', # 0x03
'ddwiss', # 0x04
'ddwing', # 0x05
'ddwij', # 0x06
'ddwic', # 0x07
'ddwik', # 0x08
'ddwit', # 0x09
'ddwip', # 0x0a
'ddwih', # 0x0b
'ddyu', # 0x0c
'ddyug', # 0x0d
'ddyugg', # 0x0e
'ddyugs', # 0x0f
'ddyun', # 0x10
'ddyunj', # 0x11
'ddyunh', # 0x12
'ddyud', # 0x13
'ddyul', # 0x14
'ddyulg', # 0x15
'ddyulm', # 0x16
'ddyulb', # 0x17
'ddyuls', # 0x18
'ddyult', # 0x19
'ddyulp', # 0x1a
'ddyulh', # 0x1b
'ddyum', # 0x1c
'ddyub', # 0x1d
'ddyubs', # 0x1e
'ddyus', # 0x1f
'ddyuss', # 0x20
'ddyung', # 0x21
'ddyuj', # 0x22
'ddyuc', # 0x23
'ddyuk', # 0x24
'ddyut', # 0x25
'ddyup', # 0x26
'ddyuh', # 0x27
'ddeu', # 0x28
'ddeug', # 0x29
'ddeugg', # 0x2a
'ddeugs', # 0x2b
'ddeun', # 0x2c
'ddeunj', # 0x2d
'ddeunh', # 0x2e
'ddeud', # 0x2f
'ddeul', # 0x30
'ddeulg', # 0x31
'ddeulm', # 0x32
'ddeulb', # 0x33
'ddeuls', # 0x34
'ddeult', # 0x35
'ddeulp', # 0x36
'ddeulh', # 0x37
'ddeum', # 0x38
'ddeub', # 0x39
'ddeubs', # 0x3a
'ddeus', # 0x3b
'ddeuss', # 0x3c
'ddeung', # 0x3d
'ddeuj', # 0x3e
'ddeuc', # 0x3f
'ddeuk', # 0x40
'ddeut', # 0x41
'ddeup', # 0x42
'ddeuh', # 0x43
'ddyi', # 0x44
'ddyig', # 0x45
'ddyigg', # 0x46
'ddyigs', # 0x47
'ddyin', # 0x48
'ddyinj', # 0x49
'ddyinh', # 0x4a
'ddyid', # 0x4b
'ddyil', # 0x4c
'ddyilg', # 0x4d
'ddyilm', # 0x4e
'ddyilb', # 0x4f
'ddyils', # 0x50
'ddyilt', # 0x51
'ddyilp', # 0x52
'ddyilh', # 0x53
'ddyim', # 0x54
'ddyib', # 0x55
'ddyibs', # 0x56
'ddyis', # 0x57
'ddyiss', # 0x58
'ddying', # 0x59
'ddyij', # 0x5a
'ddyic', # 0x5b
'ddyik', # 0x5c
'ddyit', # 0x5d
'ddyip', # 0x5e
'ddyih', # 0x5f
'ddi', # 0x60
'ddig', # 0x61
'ddigg', # 0x62
'ddigs', # 0x63
'ddin', # 0x64
'ddinj', # 0x65
'ddinh', # 0x66
'ddid', # 0x67
'ddil', # 0x68
'ddilg', # 0x69
'ddilm', # 0x6a
'ddilb', # 0x6b
'ddils', # 0x6c
'ddilt', # 0x6d
'ddilp', # 0x6e
'ddilh', # 0x6f
'ddim', # 0x70
'ddib', # 0x71
'ddibs', # 0x72
'ddis', # 0x73
'ddiss', # 0x74
'dding', # 0x75
'ddij', # 0x76
'ddic', # 0x77
'ddik', # 0x78
'ddit', # 0x79
'ddip', # 0x7a
'ddih', # 0x7b
'ra', # 0x7c
'rag', # 0x7d
'ragg', # 0x7e
'rags', # 0x7f
'ran', # 0x80
'ranj', # 0x81
'ranh', # 0x82
'rad', # 0x83
'ral', # 0x84
'ralg', # 0x85
'ralm', # 0x86
'ralb', # 0x87
'rals', # 0x88
'ralt', # 0x89
'ralp', # 0x8a
'ralh', # 0x8b
'ram', # 0x8c
'rab', # 0x8d
'rabs', # 0x8e
'ras', # 0x8f
'rass', # 0x90
'rang', # 0x91
'raj', # 0x92
'rac', # 0x93
'rak', # 0x94
'rat', # 0x95
'rap', # 0x96
'rah', # 0x97
'rae', # 0x98
'raeg', # 0x99
'raegg', # 0x9a
'raegs', # 0x9b
'raen', # 0x9c
'raenj', # 0x9d
'raenh', # 0x9e
'raed', # 0x9f
'rael', # 0xa0
'raelg', # 0xa1
'raelm', # 0xa2
'raelb', # 0xa3
'raels', # 0xa4
'raelt', # 0xa5
'raelp', # 0xa6
'raelh', # 0xa7
'raem', # 0xa8
'raeb', # 0xa9
'raebs', # 0xaa
'raes', # 0xab
'raess', # 0xac
'raeng', # 0xad
'raej', # 0xae
'raec', # 0xaf
'raek', # 0xb0
'raet', # 0xb1
'raep', # 0xb2
'raeh', # 0xb3
'rya', # 0xb4
'ryag', # 0xb5
'ryagg', # 0xb6
'ryags', # 0xb7
'ryan', # 0xb8
'ryanj', # 0xb9
'ryanh', # 0xba
'ryad', # 0xbb
'ryal', # 0xbc
'ryalg', # 0xbd
'ryalm', # 0xbe
'ryalb', # 0xbf
'ryals', # 0xc0
'ryalt', # 0xc1
'ryalp', # 0xc2
'ryalh', # 0xc3
'ryam', # 0xc4
'ryab', # 0xc5
'ryabs', # 0xc6
'ryas', # 0xc7
'ryass', # 0xc8
'ryang', # 0xc9
'ryaj', # 0xca
'ryac', # 0xcb
'ryak', # 0xcc
'ryat', # 0xcd
'ryap', # 0xce
'ryah', # 0xcf
'ryae', # 0xd0
'ryaeg', # 0xd1
'ryaegg', # 0xd2
'ryaegs', # 0xd3
'ryaen', # 0xd4
'ryaenj', # 0xd5
'ryaenh', # 0xd6
'ryaed', # 0xd7
'ryael', # 0xd8
'ryaelg', # 0xd9
'ryaelm', # 0xda
'ryaelb', # 0xdb
'ryaels', # 0xdc
'ryaelt', # 0xdd
'ryaelp', # 0xde
'ryaelh', # 0xdf
'ryaem', # 0xe0
'ryaeb', # 0xe1
'ryaebs', # 0xe2
'ryaes', # 0xe3
'ryaess', # 0xe4
'ryaeng', # 0xe5
'ryaej', # 0xe6
'ryaec', # 0xe7
'ryaek', # 0xe8
'ryaet', # 0xe9
'ryaep', # 0xea
'ryaeh', # 0xeb
'reo', # 0xec
'reog', # 0xed
'reogg', # 0xee
'reogs', # 0xef
'reon', # 0xf0
'reonj', # 0xf1
'reonh', # 0xf2
'reod', # 0xf3
'reol', # 0xf4
'reolg', # 0xf5
'reolm', # 0xf6
'reolb', # 0xf7
'reols', # 0xf8
'reolt', # 0xf9
'reolp', # 0xfa
'reolh', # 0xfb
'reom', # 0xfc
'reob', # 0xfd
'reobs', # 0xfe
'reos', # 0xff
)
| mit |
Hacker-YHJ/betwixt | dt/cm/PRESUBMIT.py | 148 | 2368 | # Copyright (C) 2014 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
def _CheckCodeMirrorChanges(input_api, output_api):
errorText = ("ERROR: Attempt to modify CodeMirror. The only allowed changes are "
"rolls from the upstream (http://codemirror.net). If this is a roll, "
"make sure you mention 'roll CodeMirror' (no quotes) in the change description.\n"
"CodeMirror rolling instructions:\n"
" https://sites.google.com/a/chromium.org/devtools-codemirror-rolling")
changeDescription = input_api.change.DescriptionText()
errors = []
if not "roll codemirror" in changeDescription.lower():
errors.append(output_api.PresubmitError(errorText))
return errors
def CheckChangeOnUpload(input_api, output_api):
results = []
results.extend(_CheckCodeMirrorChanges(input_api, output_api))
return results
| mit |
AndroidOpenDevelopment/android_external_chromium_org | tools/telemetry/telemetry/timeline/slice.py | 8 | 2266 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import telemetry.timeline.event as timeline_event
class Slice(timeline_event.TimelineEvent):
"""A Slice represents an interval of time plus parameters associated
with that interval.
NOTE: The Sample class implements the same interface as
Slice. These must be kept in sync.
All time units are stored in milliseconds.
"""
def __init__(self, parent_thread, category, name, timestamp, duration=0,
thread_timestamp=None, thread_duration=None, args=None):
super(Slice, self).__init__(
category, name, timestamp, duration, thread_timestamp, thread_duration,
args)
self.parent_thread = parent_thread
self.parent_slice = None
self.sub_slices = []
self.did_not_finish = False
def AddSubSlice(self, sub_slice):
assert sub_slice.parent_slice == self
self.sub_slices.append(sub_slice)
def IterEventsInThisContainerRecrusively(self):
for sub_slice in self.sub_slices:
yield sub_slice
for sub_sub in sub_slice.IterEventsInThisContainerRecrusively():
yield sub_sub
@property
def self_time(self):
"""Time spent in this function less any time spent in child events."""
child_total = sum(
[e.duration for e in self.sub_slices])
return self.duration - child_total
@property
def self_thread_time(self):
"""Thread (scheduled) time spent in this function less any thread time spent
in child events. Returns None if the slice or any of its children does not
have a thread_duration value.
"""
if not self.thread_duration:
return None
child_total = 0
for e in self.sub_slices:
if e.thread_duration == None:
return None
child_total += e.thread_duration
return self.thread_duration - child_total
def _GetSubSlicesRecursive(self):
for sub_slice in self.sub_slices:
for s in sub_slice.GetAllSubSlices():
yield s
yield sub_slice
def GetAllSubSlices(self):
return list(self._GetSubSlicesRecursive())
def GetAllSubSlicesOfName(self, name):
return [e for e in self.GetAllSubSlices() if e.name == name]
| bsd-3-clause |
luistorresm/odoo | addons/account_chart/__openerp__.py | 313 | 1451 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Template of Charts of Accounts',
'version': '1.1',
'category': 'Hidden/Dependency',
'description': """
Remove minimal account chart.
=============================
Deactivates minimal chart of accounts.
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com/page/accounting',
'depends': ['account'],
'data': [],
'demo': [],
'installable': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
dd00/commandergenius | project/jni/python/src/Lib/test/test_userlist.py | 56 | 1767 | # Check every path through every method of UserList
from UserList import UserList
from test import test_support, list_tests
class UserListTest(list_tests.CommonTest):
type2test = UserList
def test_getslice(self):
super(UserListTest, self).test_getslice()
l = [0, 1, 2, 3, 4]
u = self.type2test(l)
for i in range(-3, 6):
self.assertEqual(u[:i], l[:i])
self.assertEqual(u[i:], l[i:])
for j in xrange(-3, 6):
self.assertEqual(u[i:j], l[i:j])
def test_add_specials(self):
u = UserList("spam")
u2 = u + "eggs"
self.assertEqual(u2, list("spameggs"))
def test_radd_specials(self):
u = UserList("eggs")
u2 = "spam" + u
self.assertEqual(u2, list("spameggs"))
u2 = u.__radd__(UserList("spam"))
self.assertEqual(u2, list("spameggs"))
def test_iadd(self):
super(UserListTest, self).test_iadd()
u = [0, 1]
u += UserList([0, 1])
self.assertEqual(u, [0, 1, 0, 1])
def test_mixedcmp(self):
u = self.type2test([0, 1])
self.assertEqual(u, [0, 1])
self.assertNotEqual(u, [0])
self.assertNotEqual(u, [0, 2])
def test_mixedadd(self):
u = self.type2test([0, 1])
self.assertEqual(u + [], u)
self.assertEqual(u + [2], [0, 1, 2])
def test_getitemoverwriteiter(self):
# Verify that __getitem__ overrides *are* recognized by __iter__
class T(self.type2test):
def __getitem__(self, key):
return str(key) + '!!!'
self.assertEqual(iter(T((1,2))).next(), "0!!!")
def test_main():
test_support.run_unittest(UserListTest)
if __name__ == "__main__":
test_main()
| lgpl-2.1 |
65apps/omim | 3party/protobuf/python/google/protobuf/internal/unknown_fields_test.py | 73 | 9102 | #! /usr/bin/python
# -*- coding: utf-8 -*-
#
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Test for preservation of unknown fields in the pure Python implementation."""
__author__ = 'bohdank@google.com (Bohdan Koval)'
from google.apputils import basetest
from google.protobuf import unittest_mset_pb2
from google.protobuf import unittest_pb2
from google.protobuf.internal import encoder
from google.protobuf.internal import missing_enum_values_pb2
from google.protobuf.internal import test_util
from google.protobuf.internal import type_checkers
class UnknownFieldsTest(basetest.TestCase):
def setUp(self):
self.descriptor = unittest_pb2.TestAllTypes.DESCRIPTOR
self.all_fields = unittest_pb2.TestAllTypes()
test_util.SetAllFields(self.all_fields)
self.all_fields_data = self.all_fields.SerializeToString()
self.empty_message = unittest_pb2.TestEmptyMessage()
self.empty_message.ParseFromString(self.all_fields_data)
self.unknown_fields = self.empty_message._unknown_fields
def GetField(self, name):
field_descriptor = self.descriptor.fields_by_name[name]
wire_type = type_checkers.FIELD_TYPE_TO_WIRE_TYPE[field_descriptor.type]
field_tag = encoder.TagBytes(field_descriptor.number, wire_type)
result_dict = {}
for tag_bytes, value in self.unknown_fields:
if tag_bytes == field_tag:
decoder = unittest_pb2.TestAllTypes._decoders_by_tag[tag_bytes][0]
decoder(value, 0, len(value), self.all_fields, result_dict)
return result_dict[field_descriptor]
def testEnum(self):
value = self.GetField('optional_nested_enum')
self.assertEqual(self.all_fields.optional_nested_enum, value)
def testRepeatedEnum(self):
value = self.GetField('repeated_nested_enum')
self.assertEqual(self.all_fields.repeated_nested_enum, value)
def testVarint(self):
value = self.GetField('optional_int32')
self.assertEqual(self.all_fields.optional_int32, value)
def testFixed32(self):
value = self.GetField('optional_fixed32')
self.assertEqual(self.all_fields.optional_fixed32, value)
def testFixed64(self):
value = self.GetField('optional_fixed64')
self.assertEqual(self.all_fields.optional_fixed64, value)
def testLengthDelimited(self):
value = self.GetField('optional_string')
self.assertEqual(self.all_fields.optional_string, value)
def testGroup(self):
value = self.GetField('optionalgroup')
self.assertEqual(self.all_fields.optionalgroup, value)
def testSerialize(self):
data = self.empty_message.SerializeToString()
# Don't use assertEqual because we don't want to dump raw binary data to
# stdout.
self.assertTrue(data == self.all_fields_data)
def testCopyFrom(self):
message = unittest_pb2.TestEmptyMessage()
message.CopyFrom(self.empty_message)
self.assertEqual(self.unknown_fields, message._unknown_fields)
def testMergeFrom(self):
message = unittest_pb2.TestAllTypes()
message.optional_int32 = 1
message.optional_uint32 = 2
source = unittest_pb2.TestEmptyMessage()
source.ParseFromString(message.SerializeToString())
message.ClearField('optional_int32')
message.optional_int64 = 3
message.optional_uint32 = 4
destination = unittest_pb2.TestEmptyMessage()
destination.ParseFromString(message.SerializeToString())
unknown_fields = destination._unknown_fields[:]
destination.MergeFrom(source)
self.assertEqual(unknown_fields + source._unknown_fields,
destination._unknown_fields)
def testClear(self):
self.empty_message.Clear()
self.assertEqual(0, len(self.empty_message._unknown_fields))
def testByteSize(self):
self.assertEqual(self.all_fields.ByteSize(), self.empty_message.ByteSize())
def testUnknownExtensions(self):
message = unittest_pb2.TestEmptyMessageWithExtensions()
message.ParseFromString(self.all_fields_data)
self.assertEqual(self.empty_message._unknown_fields,
message._unknown_fields)
def testListFields(self):
# Make sure ListFields doesn't return unknown fields.
self.assertEqual(0, len(self.empty_message.ListFields()))
def testSerializeMessageSetWireFormatUnknownExtension(self):
# Create a message using the message set wire format with an unknown
# message.
raw = unittest_mset_pb2.RawMessageSet()
# Add an unknown extension.
item = raw.item.add()
item.type_id = 1545009
message1 = unittest_mset_pb2.TestMessageSetExtension1()
message1.i = 12345
item.message = message1.SerializeToString()
serialized = raw.SerializeToString()
# Parse message using the message set wire format.
proto = unittest_mset_pb2.TestMessageSet()
proto.MergeFromString(serialized)
# Verify that the unknown extension is serialized unchanged
reserialized = proto.SerializeToString()
new_raw = unittest_mset_pb2.RawMessageSet()
new_raw.MergeFromString(reserialized)
self.assertEqual(raw, new_raw)
def testEquals(self):
message = unittest_pb2.TestEmptyMessage()
message.ParseFromString(self.all_fields_data)
self.assertEqual(self.empty_message, message)
self.all_fields.ClearField('optional_string')
message.ParseFromString(self.all_fields.SerializeToString())
self.assertNotEqual(self.empty_message, message)
class UnknownFieldsTest(basetest.TestCase):
def setUp(self):
self.descriptor = missing_enum_values_pb2.TestEnumValues.DESCRIPTOR
self.message = missing_enum_values_pb2.TestEnumValues()
self.message.optional_nested_enum = (
missing_enum_values_pb2.TestEnumValues.ZERO)
self.message.repeated_nested_enum.extend([
missing_enum_values_pb2.TestEnumValues.ZERO,
missing_enum_values_pb2.TestEnumValues.ONE,
])
self.message.packed_nested_enum.extend([
missing_enum_values_pb2.TestEnumValues.ZERO,
missing_enum_values_pb2.TestEnumValues.ONE,
])
self.message_data = self.message.SerializeToString()
self.missing_message = missing_enum_values_pb2.TestMissingEnumValues()
self.missing_message.ParseFromString(self.message_data)
self.unknown_fields = self.missing_message._unknown_fields
def GetField(self, name):
field_descriptor = self.descriptor.fields_by_name[name]
wire_type = type_checkers.FIELD_TYPE_TO_WIRE_TYPE[field_descriptor.type]
field_tag = encoder.TagBytes(field_descriptor.number, wire_type)
result_dict = {}
for tag_bytes, value in self.unknown_fields:
if tag_bytes == field_tag:
decoder = missing_enum_values_pb2.TestEnumValues._decoders_by_tag[
tag_bytes][0]
decoder(value, 0, len(value), self.message, result_dict)
return result_dict[field_descriptor]
def testUnknownEnumValue(self):
self.assertFalse(self.missing_message.HasField('optional_nested_enum'))
value = self.GetField('optional_nested_enum')
self.assertEqual(self.message.optional_nested_enum, value)
def testUnknownRepeatedEnumValue(self):
value = self.GetField('repeated_nested_enum')
self.assertEqual(self.message.repeated_nested_enum, value)
def testUnknownPackedEnumValue(self):
value = self.GetField('packed_nested_enum')
self.assertEqual(self.message.packed_nested_enum, value)
def testRoundTrip(self):
new_message = missing_enum_values_pb2.TestEnumValues()
new_message.ParseFromString(self.missing_message.SerializeToString())
self.assertEqual(self.message, new_message)
if __name__ == '__main__':
basetest.main()
| apache-2.0 |
emanlove/robotframework-selenium2library | src/SeleniumLibrary/keywords/__init__.py | 3 | 1444 | # Copyright 2008-2011 Nokia Networks
# Copyright 2011-2016 Ryan Tomac, Ed Manlove and contributors
# Copyright 2016- Robot Framework Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .alert import AlertKeywords # noqa
from .browsermanagement import BrowserManagementKeywords # noqa
from .cookie import CookieKeywords # noqa
from .element import ElementKeywords # noqa
from .formelement import FormElementKeywords # noqa
from .frames import FrameKeywords # noqa
from .javascript import JavaScriptKeywords # noqa
from .runonfailure import RunOnFailureKeywords # noqa
from .screenshot import ScreenshotKeywords # noqa
from .selectelement import SelectElementKeywords # noqa
from .tableelement import TableElementKeywords # noqa
from .waiting import WaitingKeywords # noqa
from .webdrivertools import WebDriverCache # noqa
from .webdrivertools import WebDriverCreator # noqa
from .window import WindowKeywords # noqa
| apache-2.0 |
cschnei3/forseti-security | google/cloud/security/common/gcp_api/cloud_billing.py | 1 | 1114 | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrapper for Billing API client."""
from google.cloud.security.common.gcp_api import _base_client
# TODO: The next editor must remove this disable and correct issues.
# pylint: disable=missing-type-doc
# pylint: disable=missing-param-doc
class CloudBillingClient(_base_client.BaseClient):
"""Billing Client."""
API_NAME = 'cloudbilling'
def __init__(self, global_configs, credentials=None):
super(CloudBillingClient, self).__init__(
global_configs, credentials=credentials, api_name=self.API_NAME)
| apache-2.0 |
anthrotype/robofab | Lib/robofab/interface/all/dialogs_fontlab_legacy1.py | 9 | 1782 | """
Dialogs for FontLab < 5.1.
This one should be loaded for various platforms, using dialogKit
http://www.robofab.org/tools/dialogs.html
"""
from FL import *
from dialogKit import ModalDialog, Button, TextBox, EditText
__all__ = [
#"AskString",
#"AskYesNoCancel",
#"FindGlyph",
"GetFile",
"GetFolder",
#"Message",
#"OneList",
#"PutFile",
#"SearchList",
#"SelectFont",
#"SelectGlyph",
#"TwoChecks",
#"TwoFields",
"ProgressBar",
]
def GetFile(message=None, title=None, directory=None, fileName=None, allowsMultipleSelection=False, fileTypes=None):
strFilter = "All Files (*.*)|*.*|"
defaultExt = ""
# using fontlab's internal file dialogs
return fl.GetFileName(1, defaultExt, message, strFilter)
def GetFolder(message=None, title=None, directory=None, allowsMultipleSelection=False):
# using fontlab's internal file dialogs
if message is None:
message = ""
return fl.GetPathName(message)
def PutFile(message=None, fileName=None):
# using fontlab's internal file dialogs
# message is not used
if message is None:
message = ""
if fileName is None:
fileName = ""
defaultExt = ""
return fl.GetFileName(0, defaultExt, fileName, '')
class ProgressBar(object):
def __init__(self, title="RoboFab...", ticks=0, label=""):
self._tickValue = 1
fl.BeginProgress(title, ticks)
def getCurrentTick(self):
return self._tickValue
def tick(self, tickValue=None):
if not tickValue:
tickValue = self._tickValue
fl.TickProgress(tickValue)
self._tickValue = tickValue + 1
def label(self, label):
pass
def close(self):
fl.EndProgress()
| bsd-3-clause |
outastracoin-project/outastracoin | qa/rpc-tests/txn_doublespend.py | 152 | 4968 | #!/usr/bin/env python2
# Copyright (c) 2014 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test proper accounting with malleable transactions
#
from test_framework import BitcoinTestFramework
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
from decimal import Decimal
from util import *
import os
import shutil
class TxnMallTest(BitcoinTestFramework):
def add_options(self, parser):
parser.add_option("--mineblock", dest="mine_block", default=False, action="store_true",
help="Test double-spend of 1-confirmed transaction")
def setup_network(self):
# Start with split network:
return super(TxnMallTest, self).setup_network(True)
def run_test(self):
# All nodes should start with 1,250 BTC:
starting_balance = 1250
for i in range(4):
assert_equal(self.nodes[i].getbalance(), starting_balance)
self.nodes[i].getnewaddress("") # bug workaround, coins generated assigned to first getnewaddress!
# Assign coins to foo and bar accounts:
self.nodes[0].move("", "foo", 1220)
self.nodes[0].move("", "bar", 30)
assert_equal(self.nodes[0].getbalance(""), 0)
# Coins are sent to node1_address
node1_address = self.nodes[1].getnewaddress("from0")
# First: use raw transaction API to send 1210 BTC to node1_address,
# but don't broadcast:
(total_in, inputs) = gather_inputs(self.nodes[0], 1210)
change_address = self.nodes[0].getnewaddress("foo")
outputs = {}
outputs[change_address] = 40
outputs[node1_address] = 1210
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
doublespend = self.nodes[0].signrawtransaction(rawtx)
assert_equal(doublespend["complete"], True)
# Create two transaction from node[0] to node[1]; the
# second must spend change from the first because the first
# spends all mature inputs:
txid1 = self.nodes[0].sendfrom("foo", node1_address, 1210, 0)
txid2 = self.nodes[0].sendfrom("bar", node1_address, 20, 0)
# Have node0 mine a block:
if (self.options.mine_block):
self.nodes[0].setgenerate(True, 1)
sync_blocks(self.nodes[0:2])
tx1 = self.nodes[0].gettransaction(txid1)
tx2 = self.nodes[0].gettransaction(txid2)
# Node0's balance should be starting balance, plus 50BTC for another
# matured block, minus 1210, minus 20, and minus transaction fees:
expected = starting_balance
if self.options.mine_block: expected += 50
expected += tx1["amount"] + tx1["fee"]
expected += tx2["amount"] + tx2["fee"]
assert_equal(self.nodes[0].getbalance(), expected)
# foo and bar accounts should be debited:
assert_equal(self.nodes[0].getbalance("foo"), 1220+tx1["amount"]+tx1["fee"])
assert_equal(self.nodes[0].getbalance("bar"), 30+tx2["amount"]+tx2["fee"])
if self.options.mine_block:
assert_equal(tx1["confirmations"], 1)
assert_equal(tx2["confirmations"], 1)
# Node1's "from0" balance should be both transaction amounts:
assert_equal(self.nodes[1].getbalance("from0"), -(tx1["amount"]+tx2["amount"]))
else:
assert_equal(tx1["confirmations"], 0)
assert_equal(tx2["confirmations"], 0)
# Now give doublespend to miner:
mutated_txid = self.nodes[2].sendrawtransaction(doublespend["hex"])
# ... mine a block...
self.nodes[2].setgenerate(True, 1)
# Reconnect the split network, and sync chain:
connect_nodes(self.nodes[1], 2)
self.nodes[2].setgenerate(True, 1) # Mine another block to make sure we sync
sync_blocks(self.nodes)
# Re-fetch transaction info:
tx1 = self.nodes[0].gettransaction(txid1)
tx2 = self.nodes[0].gettransaction(txid2)
# Both transactions should be conflicted
assert_equal(tx1["confirmations"], -1)
assert_equal(tx2["confirmations"], -1)
# Node0's total balance should be starting balance, plus 100BTC for
# two more matured blocks, minus 1210 for the double-spend:
expected = starting_balance + 100 - 1210
assert_equal(self.nodes[0].getbalance(), expected)
assert_equal(self.nodes[0].getbalance("*"), expected)
# foo account should be debited, but bar account should not:
assert_equal(self.nodes[0].getbalance("foo"), 1220-1210)
assert_equal(self.nodes[0].getbalance("bar"), 30)
# Node1's "from" account balance should be just the mutated send:
assert_equal(self.nodes[1].getbalance("from0"), 1210)
if __name__ == '__main__':
TxnMallTest().main()
| mit |
CameronLonsdale/sec-tools | python2/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/langhebrewmodel.py | 2763 | 11318 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Simon Montagu
# Portions created by the Initial Developer are Copyright (C) 2005
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
# Shoshannah Forbes - original C code (?)
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# Windows-1255 language model
# Character Mapping Table:
win1255_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 69, 91, 79, 80, 92, 89, 97, 90, 68,111,112, 82, 73, 95, 85, # 40
78,121, 86, 71, 67,102,107, 84,114,103,115,253,253,253,253,253, # 50
253, 50, 74, 60, 61, 42, 76, 70, 64, 53,105, 93, 56, 65, 54, 49, # 60
66,110, 51, 43, 44, 63, 81, 77, 98, 75,108,253,253,253,253,253, # 70
124,202,203,204,205, 40, 58,206,207,208,209,210,211,212,213,214,
215, 83, 52, 47, 46, 72, 32, 94,216,113,217,109,218,219,220,221,
34,116,222,118,100,223,224,117,119,104,125,225,226, 87, 99,227,
106,122,123,228, 55,229,230,101,231,232,120,233, 48, 39, 57,234,
30, 59, 41, 88, 33, 37, 36, 31, 29, 35,235, 62, 28,236,126,237,
238, 38, 45,239,240,241,242,243,127,244,245,246,247,248,249,250,
9, 8, 20, 16, 3, 2, 24, 14, 22, 1, 25, 15, 4, 11, 6, 23,
12, 19, 13, 26, 18, 27, 21, 17, 7, 10, 5,251,252,128, 96,253,
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 98.4004%
# first 1024 sequences: 1.5981%
# rest sequences: 0.087%
# negative sequences: 0.0015%
HebrewLangModel = (
0,3,3,3,3,3,3,3,3,3,3,2,3,3,3,3,3,3,3,3,3,3,3,2,3,2,1,2,0,1,0,0,
3,0,3,1,0,0,1,3,2,0,1,1,2,0,2,2,2,1,1,1,1,2,1,1,1,2,0,0,2,2,0,1,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2,2,
1,2,1,2,1,2,0,0,2,0,0,0,0,0,1,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2,
1,2,1,3,1,1,0,0,2,0,0,0,1,0,1,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,1,0,1,2,2,1,3,
1,2,1,1,2,2,0,0,2,2,0,0,0,0,1,0,1,0,0,0,1,0,0,0,0,0,0,1,0,1,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,2,2,2,2,3,2,
1,2,1,2,2,2,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,2,3,2,2,3,2,2,2,1,2,2,2,2,
1,2,1,1,2,2,0,1,2,0,0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,0,2,2,2,2,2,
0,2,0,2,2,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,0,2,2,2,
0,2,1,2,2,2,0,0,2,1,0,0,0,0,1,0,1,0,0,0,0,0,0,2,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,3,2,1,2,3,2,2,2,
1,2,1,2,2,2,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,1,0,
3,3,3,3,3,3,3,3,3,2,3,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,3,1,0,2,0,2,
0,2,1,2,2,2,0,0,1,2,0,0,0,0,1,0,1,0,0,0,0,0,0,1,0,0,0,2,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,2,3,2,2,3,2,1,2,1,1,1,
0,1,1,1,1,1,3,0,1,0,0,0,0,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,0,1,1,0,0,1,0,0,1,0,0,0,0,
0,0,1,0,0,0,0,0,2,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2,2,2,2,2,
0,2,0,1,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,2,3,3,3,2,1,2,3,3,2,3,3,3,3,2,3,2,1,2,0,2,1,2,
0,2,0,2,2,2,0,0,1,2,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,
3,3,3,3,3,3,3,3,3,2,3,3,3,1,2,2,3,3,2,3,2,3,2,2,3,1,2,2,0,2,2,2,
0,2,1,2,2,2,0,0,1,2,0,0,0,0,1,0,0,0,0,0,1,0,0,1,0,0,0,1,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,2,3,3,2,2,2,3,3,3,3,1,3,2,2,2,
0,2,0,1,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,3,3,3,2,3,2,2,2,1,2,2,0,2,2,2,2,
0,2,0,2,2,2,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,1,3,2,3,3,2,3,3,2,2,1,2,2,2,2,2,2,
0,2,1,2,1,2,0,0,1,0,0,0,0,0,1,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,2,3,2,3,3,2,3,3,3,3,2,3,2,3,3,3,3,3,2,2,2,2,2,2,2,1,
0,2,0,1,2,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,2,1,2,3,3,3,3,3,3,3,2,3,2,3,2,1,2,3,0,2,1,2,2,
0,2,1,1,2,1,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,2,0,
3,3,3,3,3,3,3,3,3,2,3,3,3,3,2,1,3,1,2,2,2,1,2,3,3,1,2,1,2,2,2,2,
0,1,1,1,1,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,0,2,3,3,3,1,3,3,3,1,2,2,2,2,1,1,2,2,2,2,2,2,
0,2,0,1,1,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,2,3,3,3,2,2,3,3,3,2,1,2,3,2,3,2,2,2,2,1,2,1,1,1,2,2,
0,2,1,1,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,0,0,0,1,0,0,0,0,0,
1,0,1,0,0,0,0,0,2,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,2,3,3,2,3,1,2,2,2,2,3,2,3,1,1,2,2,1,2,2,1,1,0,2,2,2,2,
0,1,0,1,2,2,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,
3,0,0,1,1,0,1,0,0,1,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,2,2,0,
0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,1,0,1,0,1,1,0,1,1,0,0,0,1,1,0,1,1,1,0,0,0,0,0,0,1,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,0,1,1,0,1,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
3,2,2,1,2,2,2,2,2,2,2,1,2,2,1,2,2,1,1,1,1,1,1,1,1,2,1,1,0,3,3,3,
0,3,0,2,2,2,2,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
2,2,2,3,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,2,2,1,2,2,2,1,1,1,2,0,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,2,2,2,2,2,2,2,2,1,2,2,2,2,2,2,2,2,2,2,2,0,2,2,0,0,0,0,0,0,
0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,1,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,2,1,0,2,1,0,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,1,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,
0,3,1,1,2,2,2,2,2,1,2,2,2,1,1,2,2,2,2,2,2,2,1,2,2,1,0,1,1,1,1,0,
0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,2,1,1,1,1,2,1,1,2,1,0,1,1,1,1,1,1,1,1,1,1,1,0,1,0,0,0,0,0,0,0,
0,0,2,0,0,0,0,0,0,0,0,1,1,0,0,0,0,1,1,0,0,1,1,0,0,0,0,0,0,1,0,0,
2,1,1,2,2,2,2,2,2,2,2,2,2,2,1,2,2,2,2,2,1,2,1,2,1,1,1,1,0,0,0,0,
0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,2,1,2,2,2,2,2,2,2,2,2,2,1,2,1,2,1,1,2,1,1,1,2,1,2,1,2,0,1,0,1,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,1,2,2,2,1,2,2,2,2,2,2,2,2,1,2,1,1,1,1,1,1,2,1,2,1,1,0,1,0,1,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,1,2,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,2,2,
0,2,0,1,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,1,1,1,1,1,1,1,0,1,1,0,1,0,0,1,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,2,0,1,1,1,0,1,0,0,0,1,1,0,1,1,0,0,0,0,0,1,1,0,0,
0,1,1,1,2,1,2,2,2,0,2,0,2,0,1,1,2,1,1,1,1,2,1,0,1,1,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,1,0,0,0,0,0,1,0,1,2,2,0,1,0,0,1,1,2,2,1,2,0,2,0,0,0,1,2,0,1,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,2,0,2,1,2,0,2,0,0,1,1,1,1,1,1,0,1,0,0,0,1,0,0,1,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,1,0,0,0,0,0,1,0,2,1,1,0,1,0,0,1,1,1,2,2,0,0,1,0,0,0,1,0,0,1,
1,1,2,1,0,1,1,1,0,1,0,1,1,1,1,0,0,0,1,0,1,0,0,0,0,0,0,0,0,2,2,1,
0,2,0,1,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,1,0,0,1,0,1,1,1,1,0,0,0,0,0,1,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,1,1,1,1,1,1,1,1,2,1,0,1,1,1,1,1,1,1,1,1,1,1,0,1,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,1,1,0,0,0,0,1,1,1,0,1,1,0,1,0,0,0,1,1,0,1,
2,0,1,0,1,0,1,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,1,1,1,0,1,0,0,1,1,2,1,1,2,0,1,0,0,0,1,1,0,1,
1,0,0,1,0,0,1,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,1,1,2,0,1,0,0,0,0,2,1,1,2,0,2,0,0,0,1,1,0,1,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,2,1,1,0,1,0,0,2,2,1,2,1,1,0,1,0,0,0,1,1,0,1,
2,0,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,2,2,0,0,0,0,0,1,1,0,1,0,0,1,0,0,0,0,1,0,1,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,2,2,0,0,0,0,2,1,1,1,0,2,1,1,0,0,0,2,1,0,1,
1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,1,1,2,0,1,0,0,1,1,0,2,1,1,0,1,0,0,0,1,1,0,1,
2,2,1,1,1,0,1,1,0,1,1,0,1,0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,2,1,1,0,1,0,0,1,1,0,1,2,1,0,2,0,0,0,1,1,0,1,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,
0,1,0,0,2,0,2,1,1,0,1,0,1,0,0,1,0,0,0,0,1,0,0,0,1,0,0,0,0,0,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,1,0,0,1,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,1,1,2,0,1,0,0,1,1,1,0,1,0,0,1,0,0,0,1,0,0,1,
1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,0,0,0,0,0,1,0,1,1,0,0,1,0,0,2,1,1,1,1,1,0,1,0,0,0,0,1,0,1,
0,1,1,1,2,1,1,1,1,0,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,2,1,0,0,0,0,0,1,1,1,1,1,0,1,0,0,0,1,1,0,0,
)
Win1255HebrewModel = {
'charToOrderMap': win1255_CharToOrderMap,
'precedenceMatrix': HebrewLangModel,
'mTypicalPositiveRatio': 0.984004,
'keepEnglishLetter': False,
'charsetName': "windows-1255"
}
# flake8: noqa
| mit |
tonybaloney/st2 | st2common/st2common/services/keyvalues.py | 1 | 6723 | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from st2common import log as logging
from st2common.constants.keyvalue import SYSTEM_SCOPE, FULL_SYSTEM_SCOPE
from st2common.constants.keyvalue import USER_SCOPE, FULL_USER_SCOPE
from st2common.constants.keyvalue import ALLOWED_SCOPES
from st2common.constants.keyvalue import DATASTORE_KEY_SEPARATOR
from st2common.exceptions.keyvalue import InvalidScopeException, InvalidUserException
from st2common.models.system.keyvalue import UserKeyReference
from st2common.persistence.keyvalue import KeyValuePair
__all__ = [
'get_kvp_for_name',
'get_values_for_names',
'KeyValueLookup',
'UserKeyValueLookup'
]
LOG = logging.getLogger(__name__)
def get_kvp_for_name(name):
try:
kvp_db = KeyValuePair.get_by_name(name)
except ValueError:
kvp_db = None
return kvp_db
def get_values_for_names(names, default_value=None):
"""
Retrieve values for the provided key names (multi get).
If a KeyValuePair objects for a particular name doesn't exist, the dictionary will contain
default_value for that name.
:rtype: ``dict``
"""
result = {}
kvp_dbs = KeyValuePair.get_by_names(names=names)
name_to_kvp_db_map = {}
for kvp_db in kvp_dbs:
name_to_kvp_db_map[kvp_db.name] = kvp_db.value
for name in names:
result[name] = name_to_kvp_db_map.get(name, default_value)
return result
class KeyValueLookup(object):
def __init__(self, prefix=None, key_prefix=None, cache=None, scope=FULL_SYSTEM_SCOPE):
if not scope:
scope = FULL_SYSTEM_SCOPE
if scope == SYSTEM_SCOPE:
scope = FULL_SYSTEM_SCOPE
self._prefix = prefix
self._key_prefix = key_prefix or ''
self._value_cache = cache or {}
self._scope = scope
def __str__(self):
return self._value_cache[self._key_prefix]
def __int__(self):
return int(float(self))
def __float__(self):
return float(str(self))
def __getitem__(self, key):
return self._get(key)
def __getattr__(self, name):
return self._get(name)
def _get(self, name):
# get the value for this key and save in value_cache
if self._key_prefix:
key = '%s.%s' % (self._key_prefix, name)
else:
key = name
if self._prefix:
kvp_key = DATASTORE_KEY_SEPARATOR.join([self._prefix, key])
else:
kvp_key = key
value = self._get_kv(kvp_key)
self._value_cache[key] = value
# return a KeyValueLookup as response since the lookup may not be complete e.g. if
# the lookup is for 'key_base.key_value' it is likely that the calling code, e.g. Jinja,
# will expect to do a dictionary style lookup for key_base and key_value as subsequent
# calls. Saving the value in cache avoids extra DB calls.
return KeyValueLookup(prefix=self._prefix, key_prefix=key, cache=self._value_cache,
scope=self._scope)
def _get_kv(self, key):
scope = self._scope
LOG.debug('Lookup system kv: scope: %s and key: %s', scope, key)
kvp = KeyValuePair.get_by_scope_and_name(scope=scope, name=key)
if kvp:
LOG.debug('Got value %s from datastore.', kvp.value)
return kvp.value if kvp else ''
class UserKeyValueLookup(object):
def __init__(self, user, prefix=None, key_prefix=None, cache=None, scope=FULL_USER_SCOPE):
if not scope:
scope = FULL_USER_SCOPE
if scope == USER_SCOPE:
scope = FULL_USER_SCOPE
self._prefix = prefix
self._key_prefix = key_prefix or ''
self._value_cache = cache or {}
self._user = user
self._scope = scope
def __str__(self):
return self._value_cache[self._key_prefix]
def __getitem__(self, key):
return self._get(key)
def __getattr__(self, name):
return self._get(name)
def _get(self, name):
# get the value for this key and save in value_cache
if self._key_prefix:
key = '%s.%s' % (self._key_prefix, name)
else:
key = UserKeyReference(name=name, user=self._user).ref
if self._prefix:
kvp_key = DATASTORE_KEY_SEPARATOR.join([self._prefix, key])
else:
kvp_key = key
value = self._get_kv(kvp_key)
self._value_cache[key] = value
# return a KeyValueLookup as response since the lookup may not be complete e.g. if
# the lookup is for 'key_base.key_value' it is likely that the calling code, e.g. Jinja,
# will expect to do a dictionary style lookup for key_base and key_value as subsequent
# calls. Saving the value in cache avoids extra DB calls.
return UserKeyValueLookup(prefix=self._prefix, user=self._user, key_prefix=key,
cache=self._value_cache, scope=self._scope)
def _get_kv(self, key):
scope = self._scope
kvp = KeyValuePair.get_by_scope_and_name(scope=scope, name=key)
return kvp.value if kvp else ''
def get_key_reference(scope, name, user=None):
"""
Given a key name and user this method returns a new name (string ref)
to address the key value pair in the context of that user.
:param user: User to whom key belongs.
:type name: ``str``
:param name: Original name of the key.
:type name: ``str``
:rtype: ``str``
"""
if (scope == SYSTEM_SCOPE or scope == FULL_SYSTEM_SCOPE):
return name
elif (scope == USER_SCOPE or scope == FULL_USER_SCOPE):
if not user:
raise InvalidUserException('A valid user must be specified for user key ref.')
return UserKeyReference(name=name, user=user).ref
else:
raise InvalidScopeException('Scope "%s" is not valid. Allowed scopes are %s.' %
(scope, ALLOWED_SCOPES))
| apache-2.0 |
xbmc/xbmc-antiquated | xbmc/lib/libPython/Python/Tools/framer/framer/bases.py | 48 | 6967 | """Provides the Module and Type base classes that user code inherits from."""
__all__ = ["Module", "Type", "member"]
from framer import struct, template
from framer.function import Function, Method
from framer.member import member
from framer.slots import *
from framer.util import cstring, unindent
from types import FunctionType
def sortitems(dict):
L = dict.items()
L.sort()
return L
# The Module and Type classes are implemented using metaclasses,
# because most of the methods are class methods. It is easier to use
# metaclasses than the cumbersome classmethod() builtin. They have
# class methods because they are exposed to user code as base classes.
class BaseMetaclass(type):
"""Shared infrastructure for generating modules and types."""
# just methoddef so far
def dump_methoddef(self, f, functions, vars):
def p(templ, vars=vars): # helper function to generate output
print >> f, templ % vars
if not functions:
return
p(template.methoddef_start)
for name, func in sortitems(functions):
if func.__doc__:
p(template.methoddef_def_doc, func.vars)
else:
p(template.methoddef_def, func.vars)
p(template.methoddef_end)
class ModuleMetaclass(BaseMetaclass):
"""Provides methods for Module class."""
def gen(self):
self.analyze()
self.initvars()
f = open(self.__filename, "w")
self.dump(f)
f.close()
def analyze(self):
self.name = getattr(self, "abbrev", self.__name__)
self.__functions = {}
self.__types = {}
self.__members = False
for name, obj in self.__dict__.iteritems():
if isinstance(obj, FunctionType):
self.__functions[name] = Function(obj, self)
elif isinstance(obj, TypeMetaclass):
obj._TypeMetaclass__module = self.name
obj.analyze()
self.__types[name] = obj
if obj.has_members():
self.__members = True
def initvars(self):
v = self.__vars = {}
filename = getattr(self, "__file__", None)
if filename is None:
filename = self.__name__ + "module.c"
self.__filename = v["FileName"] = filename
name = v["ModuleName"] = self.__name__
v["MethodDefName"] = "%s_methods" % name
v["ModuleDocstring"] = cstring(unindent(self.__doc__))
def dump(self, f):
def p(templ, vars=self.__vars): # helper function to generate output
print >> f, templ % vars
p(template.module_start)
if self.__members:
p(template.member_include)
print >> f
if self.__doc__:
p(template.module_doc)
for name, type in sortitems(self.__types):
type.dump(f)
for name, func in sortitems(self.__functions):
func.dump(f)
self.dump_methoddef(f, self.__functions, self.__vars)
p(template.module_init_start)
for name, type in sortitems(self.__types):
type.dump_init(f)
p("}")
class Module:
__metaclass__ = ModuleMetaclass
class TypeMetaclass(BaseMetaclass):
def dump(self, f):
self.initvars()
# defined after initvars() so that __vars is defined
def p(templ, vars=self.__vars):
print >> f, templ % vars
if self.struct is not None:
print >> f, unindent(self.struct, False)
if self.__doc__:
p(template.docstring)
for name, func in sortitems(self.__methods):
func.dump(f)
self.dump_methoddef(f, self.__methods, self.__vars)
self.dump_memberdef(f)
self.dump_slots(f)
def has_members(self):
if self.__members:
return True
else:
return False
def analyze(self):
# called by ModuleMetaclass analyze()
self.name = getattr(self, "abbrev", self.__name__)
src = getattr(self, "struct", None)
if src is not None:
self.__struct = struct.parse(src)
else:
self.__struct = None
self.__methods = {}
self.__members = {}
for cls in self.__mro__:
for k, v in cls.__dict__.iteritems():
if isinstance(v, FunctionType):
self.__methods[k] = Method(v, self)
if isinstance(v, member):
self.__members[k] = v
assert self.__struct is not None
v.register(k, self.__struct)
self.analyze_slots()
def analyze_slots(self):
self.__slots = {}
for s in Slots:
if s.special is not None:
meth = self.__methods.get(s.special)
if meth is not None:
self.__slots[s] = meth
self.__slots[TP_NAME] = '"%s.%s"' % (self.__module, self.__name__)
if self.__doc__:
self.__slots[TP_DOC] = "%s_doc" % self.name
if self.__struct is not None:
self.__slots[TP_BASICSIZE] = "sizeof(%s)" % self.__struct.name
self.__slots[TP_DEALLOC] = "%s_dealloc" % self.name
if self.__methods:
self.__slots[TP_METHODS] = "%s_methods" % self.name
if self.__members:
self.__slots[TP_MEMBERS] = "%s_members" % self.name
def initvars(self):
v = self.__vars = {}
v["TypeName"] = self.__name__
v["CTypeName"] = "Py%s_Type" % self.__name__
v["MethodDefName"] = self.__slots[TP_METHODS]
if self.__doc__:
v["DocstringVar"] = self.__slots[TP_DOC]
v["Docstring"] = cstring(unindent(self.__doc__))
if self.__struct is not None:
v["StructName"] = self.__struct.name
if self.__members:
v["MemberDefName"] = self.__slots[TP_MEMBERS]
def dump_memberdef(self, f):
def p(templ, vars=self.__vars):
print >> f, templ % vars
if not self.__members:
return
p(template.memberdef_start)
for name, slot in sortitems(self.__members):
slot.dump(f)
p(template.memberdef_end)
def dump_slots(self, f):
def p(templ, vars=self.__vars):
print >> f, templ % vars
if self.struct:
p(template.dealloc_func, {"name" : self.__slots[TP_DEALLOC]})
p(template.type_struct_start)
for s in Slots[:-5]: # XXX
val = self.__slots.get(s, s.default)
ntabs = 4 - (4 + len(val)) / 8
line = " %s,%s/* %s */" % (val, "\t" * ntabs, s.name)
print >> f, line
p(template.type_struct_end)
def dump_init(self, f):
def p(templ):
print >> f, templ % self.__vars
p(template.type_init_type)
p(template.module_add_type)
class Type:
__metaclass__ = TypeMetaclass
| gpl-2.0 |
xingwu1/autorest | AutoRest/Generators/Python/Azure.Python.Tests/Expected/AcceptanceTests/AzureResource/autorestresourceflatteningtestservice/models/error.py | 211 | 1286 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
from msrest.exceptions import HttpOperationError
class Error(Model):
"""Error.
:param status:
:type status: int
:param message:
:type message: str
"""
_attribute_map = {
'status': {'key': 'status', 'type': 'int'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(self, status=None, message=None):
self.status = status
self.message = message
class ErrorException(HttpOperationError):
"""Server responsed with exception of type: 'Error'.
:param deserialize: A deserializer
:param response: Server response to be deserialized.
"""
def __init__(self, deserialize, response, *args):
super(ErrorException, self).__init__(deserialize, response, 'Error', *args)
| mit |
YuMatsuzawa/HadoopEclipseProject | hadoop-0.20.2-cdh3u5/contrib/hod/hodlib/Schedulers/torque.py | 182 | 5568 | #Licensed to the Apache Software Foundation (ASF) under one
#or more contributor license agreements. See the NOTICE file
#distributed with this work for additional information
#regarding copyright ownership. The ASF licenses this file
#to you under the Apache License, Version 2.0 (the
#"License"); you may not use this file except in compliance
#with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
import os, pprint, re, time
from hodlib.Common.threads import simpleCommand
from hodlib.Common.util import args_to_string
from hodlib.Common.logger import hodDummyLogger
reQstatLine = re.compile("^\s*(\w+)\s*=\s*(.*)\s*$")
class torqueInterface:
def __init__(self, torqueDir, environment, log=None):
self.__qsub = os.path.join(torqueDir, 'bin', 'qsub')
self.__qdel = os.path.join(torqueDir, 'bin', 'qdel')
self.__qstat = os.path.join(torqueDir, 'bin', 'qstat')
self.__pbsNodes = os.path.join(torqueDir, 'bin', 'pbsnodes')
self.__pbsdsh = os.path.join(torqueDir, 'bin', 'pbsdsh')
self.__qalter = os.path.join(torqueDir, 'bin', 'qalter')
self.__env = environment
self.__log = log
if not self.__log:
self.__log = hodDummyLogger()
def qsub(self, argList, stdinList):
jobID = False
exitCode = 0
qsubCommand = "%s %s" % (self.__qsub, args_to_string(argList))
self.__log.debug("qsub -> %s" % qsubCommand)
qsubProcess = simpleCommand('qsub', qsubCommand, env=self.__env)
qsubProcess.start()
while qsubProcess.stdin == None:
time.sleep(.2)
try:
for line in stdinList:
self.__log.debug("qsub stdin: %s" % line)
print >>qsubProcess.stdin, line
qsubProcess.stdin.close()
except IOError, i:
# If torque's qsub is given invalid params, it fails & returns immediately
# Check for such errors here
# Wait for command execution to finish
qsubProcess.wait()
qsubProcess.join()
output = qsubProcess.output()
if output!=[]:
self.__log.critical("qsub Failure : %s " % output[0].strip())
self.__log.critical("qsub Command : %s" % qsubCommand)
return None, qsubProcess.exit_code()
qsubProcess.wait()
qsubProcess.join()
exitCode = qsubProcess.exit_code()
if exitCode == 0:
buffer = qsubProcess.output()
jobID = buffer[0].rstrip('\n')
self.__log.debug("qsub jobid: %s" % jobID)
else:
self.__log.critical("qsub error: %s" % qsubProcess.exit_status_string())
return jobID, exitCode
def qstat(self, jobID):
qstatInfo = None
qstatCommand = "%s -f -1 %s" % (self.__qstat, jobID)
self.__log.debug(qstatCommand)
qstatProcess = simpleCommand('qstat', qstatCommand, env=self.__env)
qstatProcess.start()
qstatProcess.wait()
qstatProcess.join()
exitCode = qstatProcess.exit_code()
if exitCode > 0:
self.__log.warn('qstat error: %s' % qstatProcess.exit_status_string())
else:
qstatInfo = {}
for line in qstatProcess.output():
line = line.rstrip()
if line.find('=') != -1:
qstatMatch = reQstatLine.match(line)
if qstatMatch:
key = qstatMatch.group(1)
value = qstatMatch.group(2)
qstatInfo[key] = value
if 'exec_host' in qstatInfo:
list = qstatInfo['exec_host'].split('+')
addrList = []
for item in list:
[head, end] = item.split('/', 1)
addrList.append(head)
qstatInfo['exec_host'] = addrList
return qstatInfo, exitCode
def pbs_nodes(self, argString):
pass
def qdel(self, jobId, force=False):
exitCode = 0
qdel = self.__qdel
if force:
qdel = "%s -p %s" % (qdel, jobId)
else:
qdel = "%s %s" % (qdel, jobId)
self.__log.debug(qdel)
qdelProcess = simpleCommand('qdel', qdel, env=self.__env)
qdelProcess.start()
qdelProcess.wait()
qdelProcess.join()
exitCode = qdelProcess.exit_code()
return exitCode
def pbsdsh(self, arguments):
status = None
pbsdshCommand = "%s %s" % (self.__pbsdsh, args_to_string(arguments))
self.__log.debug("pbsdsh command: %s" % pbsdshCommand)
pbsdsh = simpleCommand('pbsdsh', pbsdshCommand, env=self.__env)
pbsdsh.start()
for i in range(0, 30):
status = pbsdsh.exit_code()
if status:
self.__log.error("pbsdsh failed: %s" % pbsdsh.exit_status_string())
break
if not status: status = 0
return status
def qalter(self, fieldName, fieldValue, jobId):
"""Update the job field with fieldName with the fieldValue.
The fieldValue must be modifiable after the job is submitted."""
# E.g. to alter comment: qalter -W notes='value` jobId
qalterCmd = '%s -W %s=\"%s\" %s' % (self.__qalter, fieldName, fieldValue, jobId)
self.__log.debug("qalter command: %s" % qalterCmd)
qalterProcess = simpleCommand('qalter', qalterCmd, env=self.__env)
qalterProcess.start()
qalterProcess.wait()
qalterProcess.join()
exitCode = qalterProcess.exit_code()
return exitCode
| apache-2.0 |
dol-sen/portage | pym/portage/cvstree.py | 5 | 9852 | # cvstree.py -- cvs tree utilities
# Copyright 1998-2017 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from __future__ import print_function
import io
import re
import stat
import sys
import time
from portage import os
from portage import _encodings
from portage import _unicode_encode
if sys.hexversion >= 0x3000000:
# pylint: disable=W0622
long = int
# [D]/Name/Version/Date/Flags/Tags
def pathdata(entries, path):
"""Returns the data(dict) for a specific file/dir at the path specified."""
mysplit = path.split("/")
myentries = entries
mytarget = mysplit[-1]
mysplit = mysplit[:-1]
for mys in mysplit:
if mys in myentries["dirs"]:
myentries = myentries["dirs"][mys]
else:
return None
if mytarget in myentries["dirs"]:
return myentries["dirs"][mytarget]
elif mytarget in myentries["files"]:
return myentries["files"][mytarget]
else:
return None
def fileat(entries, path):
return pathdata(entries, path)
def isadded(entries, path):
"""Returns True if the path exists and is added to the cvs tree."""
mytarget = pathdata(entries, path)
if mytarget:
if "cvs" in mytarget["status"]:
return 1
basedir = os.path.dirname(path)
filename = os.path.basename(path)
try:
myfile = io.open(
_unicode_encode(os.path.join(basedir, 'CVS', 'Entries'),
encoding=_encodings['fs'], errors='strict'),
mode='r', encoding=_encodings['content'], errors='strict')
except IOError:
return 0
mylines = myfile.readlines()
myfile.close()
rep = re.compile(r"^\/%s\/" % re.escape(filename))
for x in mylines:
if rep.search(x):
return 1
return 0
def findnew(entries, recursive=0, basedir=""):
"""Recurses the entries tree to find all elements that have been added but
have not yet been committed. Returns a list of paths, optionally prepended
with a basedir.
"""
if basedir and basedir[-1] != "/":
basedir += "/"
mylist = []
for myfile in entries["files"]:
if "cvs" in entries["files"][myfile]["status"]:
if "0" == entries["files"][myfile]["revision"]:
mylist.append(basedir + myfile)
if recursive:
for mydir in entries["dirs"]:
mylist += findnew(entries["dirs"][mydir], recursive, basedir + mydir)
return mylist
def findoption(entries, pattern, recursive=0, basedir=""):
"""Iterate over paths of cvs entries for which the pattern.search() method
finds a match. Returns a list of paths, optionally prepended with a
basedir.
"""
if not basedir.endswith("/"):
basedir += "/"
for myfile, mydata in entries["files"].items():
if "cvs" in mydata["status"]:
if pattern.search(mydata["flags"]):
yield basedir + myfile
if recursive:
for mydir, mydata in entries["dirs"].items():
for x in findoption(mydata, pattern,
recursive, basedir + mydir):
yield x
def findchanged(entries, recursive=0, basedir=""):
"""Recurses the entries tree to find all elements that exist in the cvs tree
and differ from the committed version. Returns a list of paths, optionally
prepended with a basedir.
"""
if basedir and basedir[-1] != "/":
basedir += "/"
mylist = []
for myfile in entries["files"]:
if "cvs" in entries["files"][myfile]["status"]:
if "current" not in entries["files"][myfile]["status"]:
if "exists" in entries["files"][myfile]["status"]:
if entries["files"][myfile]["revision"] != "0":
mylist.append(basedir + myfile)
if recursive:
for mydir in entries["dirs"]:
mylist += findchanged(entries["dirs"][mydir], recursive, basedir + mydir)
return mylist
def findmissing(entries, recursive=0, basedir=""):
"""Recurses the entries tree to find all elements that are listed in the cvs
tree but do not exist on the filesystem. Returns a list of paths,
optionally prepended with a basedir.
"""
if basedir and basedir[-1] != "/":
basedir += "/"
mylist = []
for myfile in entries["files"]:
if "cvs" in entries["files"][myfile]["status"]:
if "exists" not in entries["files"][myfile]["status"]:
if "removed" not in entries["files"][myfile]["status"]:
mylist.append(basedir + myfile)
if recursive:
for mydir in entries["dirs"]:
mylist += findmissing(entries["dirs"][mydir], recursive, basedir + mydir)
return mylist
def findunadded(entries, recursive=0, basedir=""):
"""Recurses the entries tree to find all elements that are in valid cvs
directories but are not part of the cvs tree. Returns a list of paths,
optionally prepended with a basedir.
"""
if basedir and basedir[-1] != "/":
basedir += "/"
# Ignore what cvs ignores.
mylist = []
for myfile in entries["files"]:
if "cvs" not in entries["files"][myfile]["status"]:
mylist.append(basedir + myfile)
if recursive:
for mydir in entries["dirs"]:
mylist += findunadded(entries["dirs"][mydir], recursive, basedir + mydir)
return mylist
def findremoved(entries, recursive=0, basedir=""):
"""Recurses the entries tree to find all elements that are in flagged for cvs
deletions. Returns a list of paths, optionally prepended with a basedir.
"""
if basedir and basedir[-1] != "/":
basedir += "/"
mylist = []
for myfile in entries["files"]:
if "removed" in entries["files"][myfile]["status"]:
mylist.append(basedir + myfile)
if recursive:
for mydir in entries["dirs"]:
mylist += findremoved(entries["dirs"][mydir], recursive, basedir + mydir)
return mylist
def findall(entries, recursive=0, basedir=""):
"""Recurses the entries tree to find all new, changed, missing, and unadded
entities. Returns a 4 element list of lists as returned from each find*().
"""
if basedir and basedir[-1] != "/":
basedir += "/"
mynew = findnew(entries, recursive, basedir)
mychanged = findchanged(entries, recursive, basedir)
mymissing = findmissing(entries, recursive, basedir)
myunadded = findunadded(entries, recursive, basedir)
myremoved = findremoved(entries, recursive, basedir)
return [mynew, mychanged, mymissing, myunadded, myremoved]
ignore_list = re.compile(r"(^|/)(RCS(|LOG)|SCCS|CVS(|\.adm)|cvslog\..*|tags|TAGS|\.(make\.state|nse_depinfo)|.*~|(\.|)#.*|,.*|_$.*|.*\$|\.del-.*|.*\.(old|BAK|bak|orig|rej|a|olb|o|obj|so|exe|Z|elc|ln)|core)$")
def apply_cvsignore_filter(list):
x = 0
while x < len(list):
if ignore_list.match(list[x].split("/")[-1]):
list.pop(x)
else:
x += 1
return list
def getentries(mydir, recursive=0):
"""Scans the given directory and returns a datadict of all the entries in
the directory separated as a dirs dict and a files dict.
"""
myfn = mydir + "/CVS/Entries"
# entries=[dirs, files]
entries = {"dirs":{}, "files":{}}
if not os.path.exists(mydir):
return entries
try:
myfile = io.open(_unicode_encode(myfn,
encoding=_encodings['fs'], errors='strict'),
mode='r', encoding=_encodings['content'], errors='strict')
mylines = myfile.readlines()
myfile.close()
except SystemExit as e:
raise
except:
mylines = []
for line in mylines:
if line and line[-1] == "\n":
line = line[:-1]
if not line:
continue
if line == "D": # End of entries file
break
mysplit = line.split("/")
if len(mysplit) != 6:
print("Confused:", mysplit)
continue
if mysplit[0] == "D":
entries["dirs"][mysplit[1]] = {"dirs":{}, "files":{}, "status":[]}
entries["dirs"][mysplit[1]]["status"] = ["cvs"]
if os.path.isdir(mydir+"/"+mysplit[1]):
entries["dirs"][mysplit[1]]["status"] += ["exists"]
entries["dirs"][mysplit[1]]["flags"] = mysplit[2:]
if recursive:
rentries = getentries(mydir + "/" + mysplit[1], recursive)
entries["dirs"][mysplit[1]]["dirs"] = rentries["dirs"]
entries["dirs"][mysplit[1]]["files"] = rentries["files"]
else:
# [D]/Name/revision/Date/Flags/Tags
entries["files"][mysplit[1]] = {}
entries["files"][mysplit[1]]["revision"] = mysplit[2]
entries["files"][mysplit[1]]["date"] = mysplit[3]
entries["files"][mysplit[1]]["flags"] = mysplit[4]
entries["files"][mysplit[1]]["tags"] = mysplit[5]
entries["files"][mysplit[1]]["status"] = ["cvs"]
if entries["files"][mysplit[1]]["revision"][0] == "-":
entries["files"][mysplit[1]]["status"] += ["removed"]
for file in os.listdir(mydir):
if file == "CVS":
continue
if os.path.isdir(mydir + "/" + file):
if file not in entries["dirs"]:
if ignore_list.match(file) is not None:
continue
entries["dirs"][file] = {"dirs":{}, "files":{}}
# It's normal for a directory to be unlisted in Entries
# when checked out without -P (see bug #257660).
rentries = getentries(mydir + "/" + file, recursive)
entries["dirs"][file]["dirs"] = rentries["dirs"]
entries["dirs"][file]["files"] = rentries["files"]
if "status" in entries["dirs"][file]:
if "exists" not in entries["dirs"][file]["status"]:
entries["dirs"][file]["status"] += ["exists"]
else:
entries["dirs"][file]["status"] = ["exists"]
elif os.path.isfile(mydir + "/" + file):
if file not in entries["files"]:
if ignore_list.match(file) is not None:
continue
entries["files"][file] = {"revision":"", "date":"", "flags":"", "tags":""}
if "status" in entries["files"][file]:
if "exists" not in entries["files"][file]["status"]:
entries["files"][file]["status"] += ["exists"]
else:
entries["files"][file]["status"] = ["exists"]
try:
mystat = os.stat(mydir + "/" + file)
mytime = time.asctime(time.gmtime(mystat[stat.ST_MTIME]))
if "status" not in entries["files"][file]:
entries["files"][file]["status"] = []
if mytime == entries["files"][file]["date"]:
entries["files"][file]["status"] += ["current"]
except SystemExit as e:
raise
except Exception as e:
print("failed to stat", file)
print(e)
return
elif ignore_list.match(file) is not None:
pass
else:
print()
print("File of unknown type:", mydir + "/" + file)
print()
return entries
| gpl-2.0 |
lseyesl/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/tool/steps/cleanworkingdirectory_unittest.py | 124 | 3269 | # Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest2 as unittest
from webkitpy.thirdparty.mock import Mock
from webkitpy.tool.mocktool import MockOptions, MockTool
from webkitpy.tool.steps.cleanworkingdirectory import CleanWorkingDirectory
from webkitpy.common.system.executive import ScriptError
class CleanWorkingDirectoryTest(unittest.TestCase):
def test_run_working_directory_changes_no_force(self):
tool = MockTool()
tool._scm = Mock()
step = CleanWorkingDirectory(tool, MockOptions(clean=True, force_clean=False))
tool._scm.has_working_directory_changes = lambda: True
self.assertRaises(ScriptError, step.run, {})
self.assertEqual(tool._scm.discard_working_directory_changes.call_count, 0)
def test_run_working_directory_changes_force(self):
tool = MockTool()
tool._scm = Mock()
step = CleanWorkingDirectory(tool, MockOptions(clean=True, force_clean=True))
tool._scm.has_working_directory_changes = lambda: True
step.run({})
self.assertEqual(tool._scm.discard_working_directory_changes.call_count, 1)
def test_run_no_local_changes(self):
tool = MockTool()
tool._scm = Mock()
step = CleanWorkingDirectory(tool, MockOptions(clean=True, force_clean=False))
tool._scm.has_working_directory_changes = lambda: False
tool._scm.has_local_commits = lambda: False
step.run({})
self.assertEqual(tool._scm.discard_working_directory_changes.call_count, 1)
def test_no_clean(self):
tool = MockTool()
tool._scm = Mock()
step = CleanWorkingDirectory(tool, MockOptions(clean=False))
step.run({})
self.assertEqual(tool._scm.discard_working_directory_changes.call_count, 0)
| bsd-3-clause |
ashaycool/syncform | server.py | 2 | 1351 | # Import your application as:
# from wsgi import application
# Example:
# If you are using the wsgi.py (standard Pyramid)
#from wsgi import app
# If using application.py (single page example):
# from application import app
from wsgi import application
# Import CherryPy
import cherrypy
if __name__ == '__main__':
# Mount the application (or *app*)
cherrypy.tree.graft(app, "/")
# Unsubscribe the default server
cherrypy.server.unsubscribe()
# Instantiate a new server object
server = cherrypy._cpserver.Server()
# Configure the server object
server.socket_host = "0.0.0.0"
server.socket_port = 5000
server.thread_pool = 30
# For SSL Support
# server.ssl_module = 'pyopenssl'
# server.ssl_certificate = 'ssl/certificate.crt'
# server.ssl_private_key = 'ssl/private.key'
# server.ssl_certificate_chain = 'ssl/bundle.crt'
# Subscribe this server
server.subscribe()
# Example for a 2nd server (same steps as above):
# Remember to use a different port
# server2 = cherrypy._cpserver.Server()
# server2.socket_host = "0.0.0.0"
# server2.socket_port = 8080
# server2.thread_pool = 30
# server2.subscribe()
# Start the server engine (Option 1 *and* 2)
cherrypy.engine.start()
cherrypy.engine.block() | gpl-3.0 |
FireWRT/OpenWrt-Firefly-Libraries | staging_dir/target-mipsel_1004kc+dsp_uClibc-0.9.33.2/usr/lib/python2.7/distutils/command/clean.py | 251 | 2814 | """distutils.command.clean
Implements the Distutils 'clean' command."""
# contributed by Bastian Kleineidam <calvin@cs.uni-sb.de>, added 2000-03-18
__revision__ = "$Id$"
import os
from distutils.core import Command
from distutils.dir_util import remove_tree
from distutils import log
class clean(Command):
description = "clean up temporary files from 'build' command"
user_options = [
('build-base=', 'b',
"base build directory (default: 'build.build-base')"),
('build-lib=', None,
"build directory for all modules (default: 'build.build-lib')"),
('build-temp=', 't',
"temporary build directory (default: 'build.build-temp')"),
('build-scripts=', None,
"build directory for scripts (default: 'build.build-scripts')"),
('bdist-base=', None,
"temporary directory for built distributions"),
('all', 'a',
"remove all build output, not just temporary by-products")
]
boolean_options = ['all']
def initialize_options(self):
self.build_base = None
self.build_lib = None
self.build_temp = None
self.build_scripts = None
self.bdist_base = None
self.all = None
def finalize_options(self):
self.set_undefined_options('build',
('build_base', 'build_base'),
('build_lib', 'build_lib'),
('build_scripts', 'build_scripts'),
('build_temp', 'build_temp'))
self.set_undefined_options('bdist',
('bdist_base', 'bdist_base'))
def run(self):
# remove the build/temp.<plat> directory (unless it's already
# gone)
if os.path.exists(self.build_temp):
remove_tree(self.build_temp, dry_run=self.dry_run)
else:
log.debug("'%s' does not exist -- can't clean it",
self.build_temp)
if self.all:
# remove build directories
for directory in (self.build_lib,
self.bdist_base,
self.build_scripts):
if os.path.exists(directory):
remove_tree(directory, dry_run=self.dry_run)
else:
log.warn("'%s' does not exist -- can't clean it",
directory)
# just for the heck of it, try to remove the base build directory:
# we might have emptied it right now, but if not we don't care
if not self.dry_run:
try:
os.rmdir(self.build_base)
log.info("removing '%s'", self.build_base)
except OSError:
pass
# class clean
| gpl-2.0 |
txemi/ansible | lib/ansible/modules/network/nxos/nxos_rollback.py | 21 | 5721 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: nxos_rollback
extends_documentation_fragment: nxos
version_added: "2.2"
short_description: Set a checkpoint or rollback to a checkpoint.
description:
- This module offers the ability to set a configuration checkpoint
file or rollback to a configuration checkpoint file on Cisco NXOS
switches.
author:
- Jason Edelman (@jedelman8)
- Gabriele Gerbino (@GGabriele)
notes:
- Sometimes C(transport=nxapi) may cause a timeout error.
options:
checkpoint_file:
description:
- Name of checkpoint file to create. Mutually exclusive
with rollback_to.
required: false
default: null
rollback_to:
description:
- Name of checkpoint file to rollback to. Mutually exclusive
with checkpoint_file.
required: false
default: null
'''
EXAMPLES = '''
- nxos_rollback:
checkpoint_file: backup.cfg
username: "{{ un }}"
password: "{{ pwd }}"
host: "{{ inventory_hostname }}"
- nxos_rollback:
rollback_to: backup.cfg
username: "{{ un }}"
password: "{{ pwd }}"
host: "{{ inventory_hostname }}"
'''
RETURN = '''
filename:
description: The filename of the checkpoint/rollback file.
returned: success
type: string
sample: 'backup.cfg'
status:
description: Which operation took place and whether it was successful.
returned: success
type: string
sample: 'rollback executed'
'''
import re
from ansible.module_utils.nxos import get_config, load_config, run_commands
from ansible.module_utils.nxos import nxos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.netcfg import CustomNetworkConfig
import re
def execute_commands(cmds, module, command_type=None):
command_type_map = {
'cli_show': 'json',
'cli_show_ascii': 'text'
}
try:
if command_type:
response = module.execute(cmds, command_type=command_type)
else:
response = module.execute(cmds)
except ShellError:
clie = get_exception()
module.fail_json(msg='Error sending {0}'.format(cmds),
error=str(clie))
except AttributeError:
try:
if command_type:
command_type = command_type_map.get(command_type)
module.cli.add_commands(cmds, output=command_type)
response = module.cli.run_commands()
else:
module.cli.add_commands(cmds, output=command_type)
response = module.cli.run_commands()
except ShellError:
clie = get_exception()
module.fail_json(msg='Error sending {0}'.format(cmds),
error=str(clie))
return response
def prepare_show_command(command, module):
if module.params['transport'] == 'cli':
execute_commands(command, module)
elif module.params['transport'] == 'nxapi':
execute_commands(command, module, command_type='cli_show_ascii')
def checkpoint(filename, module):
commands = ['terminal dont-ask', 'checkpoint file %s' % filename]
prepare_show_command(commands, module)
def rollback(filename, module):
commands = ['rollback running-config file %s' % filename]
try:
module.configure(commands)
except AttributeError:
try:
module.cli.add_commands(commands, output='config')
module.cli.run_commands()
except ShellError:
clie = get_exception()
module.fail_json(msg='Error sending CLI commands',
error=str(clie), commands=commands)
def main():
argument_spec = dict(
checkpoint_file=dict(required=False),
rollback_to=dict(required=False),
include_defaults=dict(default=True),
config=dict(),
save=dict(type='bool', default=False)
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
mutually_exclusive=[['checkpoint_file',
'rollback_to']],
supports_check_mode=False)
checkpoint_file = module.params['checkpoint_file']
rollback_to = module.params['rollback_to']
status = None
filename = None
changed = False
try:
if checkpoint_file:
checkpoint(checkpoint_file, module)
status = 'checkpoint file created'
elif rollback_to:
rollback(rollback_to, module)
status = 'rollback executed'
changed = True
filename = rollback_to or checkpoint_file
except ShellError:
clie = get_exception()
module.fail_json(msg=str(clie))
module.exit_json(changed=changed, status=status, filename=filename)
if __name__ == '__main__':
main()
| gpl-3.0 |
kerzhner/airflow | airflow/utils/asciiart.py | 67 | 2298 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
bug = r"""\
=, .=
=.| ,---. |.=
=.| "-(:::::)-" |.=
\\__/`-.|.-'\__//
`-| .::| .::|-' Pillendreher
_|`-._|_.-'|_ (Scarabaeus sacer)
/.-| | .::|-.\
// ,| .::|::::|. \\
|| //\::::|::' /\\ ||
/'\|| `.__|__.' ||/'\
^ \\ // ^
/'\ /'\
^ ^
"""
nukular = r"""
____/ ( ( ) ) \___
/( ( ( ) _ )) ) )\
(( ( )( ) ) ( ) )
((/ ( _( ) ( _) ) ( () ) )
( ( ( (_) (( ( ) .((_ ) . )_
( ( ) ( ( ) ) ) . ) ( )
( ( ( ( ) ( _ ( _) ). ) . ) ) ( )
( ( ( ) ( ) ( )) ) _)( ) ) )
( ( ( \ ) ( (_ ( ) ( ) ) ) ) )) ( )
( ( ( ( (_ ( ) ( _ ) ) ( ) ) )
( ( ( ( ( ) (_ ) ) ) _) ) _( ( )
(( ( )( ( _ ) _) _(_ ( (_ )
(_((__(_(__(( ( ( | ) ) ) )_))__))_)___)
((__) \\||lll|l||/// \_))
( /(/ ( ) ) )\ )
( ( ( ( | | ) ) )\ )
( /(| / ( )) ) ) )) )
( ( ((((_(|)_))))) )
( ||\(|(|)|/|| )
( |(||(||)|||| )
( //|/l|||)|\\ \ )
(/ / // /|//||||\\ \ \ \ _)
-------------------------------------------------------------------------------
"""
| apache-2.0 |
scs/uclinux | user/python/python-2.4.4/Tools/scripts/diff.py | 62 | 2002 | """ Command line interface to difflib.py providing diffs in four formats:
* ndiff: lists every line and highlights interline changes.
* context: highlights clusters of changes in a before/after format.
* unified: highlights clusters of changes in an inline format.
* html: generates side by side comparison with change highlights.
"""
import sys, os, time, difflib, optparse
def main():
usage = "usage: %prog [options] fromfile tofile"
parser = optparse.OptionParser(usage)
parser.add_option("-c", action="store_true", default=False, help='Produce a context format diff (default)')
parser.add_option("-u", action="store_true", default=False, help='Produce a unified format diff')
parser.add_option("-m", action="store_true", default=False, help='Produce HTML side by side diff (can use -c and -l in conjunction)')
parser.add_option("-n", action="store_true", default=False, help='Produce a ndiff format diff')
parser.add_option("-l", "--lines", type="int", default=3, help='Set number of context lines (default 3)')
(options, args) = parser.parse_args()
if len(args) == 0:
parser.print_help()
sys.exit(1)
if len(args) != 2:
parser.error("need to specify both a fromfile and tofile")
n = options.lines
fromfile, tofile = args
fromdate = time.ctime(os.stat(fromfile).st_mtime)
todate = time.ctime(os.stat(tofile).st_mtime)
fromlines = open(fromfile, 'U').readlines()
tolines = open(tofile, 'U').readlines()
if options.u:
diff = difflib.unified_diff(fromlines, tolines, fromfile, tofile, fromdate, todate, n=n)
elif options.n:
diff = difflib.ndiff(fromlines, tolines)
elif options.m:
diff = difflib.HtmlDiff().make_file(fromlines,tolines,fromfile,tofile,context=options.c,numlines=n)
else:
diff = difflib.context_diff(fromlines, tolines, fromfile, tofile, fromdate, todate, n=n)
sys.stdout.writelines(diff)
if __name__ == '__main__':
main()
| gpl-2.0 |
mlperf/training_results_v0.5 | v0.5.0/google/cloud_v2.8/ssd-tpuv2-8/code/ssd/model/tpu/models/official/amoeba_net/network_utils_test.py | 5 | 2161 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for slim.nets.nasnet.nasnet_utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import network_utils
class NetworkUtilsTest(tf.test.TestCase):
def testCalcReductionLayers(self):
num_cells = 18
num_reduction_layers = 2
reduction_layers = network_utils.calc_reduction_layers(
num_cells, num_reduction_layers)
self.assertEqual(len(reduction_layers), 2)
self.assertEqual(reduction_layers[0], 6)
self.assertEqual(reduction_layers[1], 12)
def testGetChannelIndex(self):
data_formats = ['NHWC', 'NCHW']
for data_format in data_formats:
index = network_utils.get_channel_index(data_format)
correct_index = 3 if data_format == 'NHWC' else 1
self.assertEqual(index, correct_index)
def testGetChannelDim(self):
data_formats = ['NHWC', 'NCHW']
shape = [10, 20, 30, 40]
for data_format in data_formats:
dim = network_utils.get_channel_dim(shape, data_format)
correct_dim = shape[3] if data_format == 'NHWC' else shape[1]
self.assertEqual(dim, correct_dim)
def testGlobalAvgPool(self):
data_formats = ['NHWC', 'NCHW']
inputs = tf.placeholder(tf.float32, (5, 10, 20, 10))
for data_format in data_formats:
output = network_utils.global_avg_pool(
inputs, data_format)
self.assertEqual(output.shape, [5, 10])
if __name__ == '__main__':
tf.test.main()
| apache-2.0 |
tedlaz/pyted | misthodosia/payroll/calc_example.py | 1 | 1345 | # -*- coding: utf-8 -*-
"""Module calc_example.py"""
if __name__ == '__main__':
import calc as c
B105 = c.Erg(50, 45.66, 18.95, False)
B101 = c.Erg(40, 40.06, 15.5, False)
# -----------------------------------------------
# Nea pososta IKA
#
# Perigrafi ergazomenos Ergodotis Synolo
# ===============================================
# Syntaksi 6.67 13.33 20.00
# Perilthapsi 2.15 4.30 6.45
# Se eidos 0.40 0.25 0.65
# ===============================================
# Synola 9.22 17.88 27.10
# Epikoyriko 3.50 3.50 7.00
# ===============================================
# Synolo & epikoyriko 12.72 21.38 34.10
# -----------------------------------------------
# bnew = Erg(40, 27.1, 9.22, False)
# print(doro_pasxa(20, 38, False))
# par = Parousies(10)
# prncalc(par.calc(bnew))
# print('%s|' % fmt('ted laza', 30, 't'))
# print('%s|' % fmt('120.836,23.15', 10, 'n'))
c.printfor(8000, 71000, 1000, True)
# print(foros_ak(55000))
# print(foros_ea(23000))
# print(oaee_etisio(24000))
c.ek_ee(200000, 0, 0, 0, True)
print(c.foros_eis(15052.8, 0, True))
print(c.foros_eispar(15052.8, 0, True))
| gpl-3.0 |
dcroc16/skunk_works | google_appengine/lib/django-1.5/tests/modeltests/model_package/tests.py | 150 | 2612 | from __future__ import absolute_import
from django.contrib.sites.models import Site
from django.db import models
from django.test import TestCase
from .models.publication import Publication
from .models.article import Article
class Advertisment(models.Model):
customer = models.CharField(max_length=100)
publications = models.ManyToManyField(
"model_package.Publication", null=True, blank=True
)
class Meta:
app_label = 'model_package'
class ModelPackageTests(TestCase):
def test_model_packages(self):
p = Publication.objects.create(title="FooBar")
current_site = Site.objects.get_current()
self.assertEqual(current_site.domain, "example.com")
# Regression for #12168: models split into subpackages still get M2M
# tables
a = Article.objects.create(headline="a foo headline")
a.publications.add(p)
a.sites.add(current_site)
a = Article.objects.get(id=a.pk)
self.assertEqual(a.id, a.pk)
self.assertEqual(a.sites.count(), 1)
# Regression for #12245 - Models can exist in the test package, too
ad = Advertisment.objects.create(customer="Lawrence Journal-World")
ad.publications.add(p)
ad = Advertisment.objects.get(id=ad.pk)
self.assertEqual(ad.publications.count(), 1)
# Regression for #12386 - field names on the autogenerated intermediate
# class that are specified as dotted strings don't retain any path
# component for the field or column name
self.assertEqual(
Article.publications.through._meta.fields[1].name, 'article'
)
self.assertEqual(
Article.publications.through._meta.fields[1].get_attname_column(),
('article_id', 'article_id')
)
self.assertEqual(
Article.publications.through._meta.fields[2].name, 'publication'
)
self.assertEqual(
Article.publications.through._meta.fields[2].get_attname_column(),
('publication_id', 'publication_id')
)
# The oracle backend truncates the name to 'model_package_article_publ233f'.
self.assertTrue(
Article._meta.get_field('publications').m2m_db_table() in ('model_package_article_publications', 'model_package_article_publ233f')
)
self.assertEqual(
Article._meta.get_field('publications').m2m_column_name(), 'article_id'
)
self.assertEqual(
Article._meta.get_field('publications').m2m_reverse_name(),
'publication_id'
)
| mit |
brianlsharp/MissionPlanner | Lib/site-packages/scipy/ndimage/tests/test_filters.py | 55 | 1118 | ''' Some tests for filters '''
import numpy as np
from numpy.testing import assert_equal, assert_raises
import scipy.ndimage as sndi
def test_ticket_701():
# Test generic filter sizes
arr = np.arange(4).reshape((2,2))
func = lambda x: np.min(x)
res = sndi.generic_filter(arr, func, size=(1,1))
# The following raises an error unless ticket 701 is fixed
res2 = sndi.generic_filter(arr, func, size=1)
assert_equal(res, res2)
def test_orders_gauss():
# Check order inputs to Gaussians
arr = np.zeros((1,))
yield assert_equal, 0, sndi.gaussian_filter(arr, 1, order=0)
yield assert_equal, 0, sndi.gaussian_filter(arr, 1, order=3)
yield assert_raises, ValueError, sndi.gaussian_filter, arr, 1, -1
yield assert_raises, ValueError, sndi.gaussian_filter, arr, 1, 4
yield assert_equal, 0, sndi.gaussian_filter1d(arr, 1, axis=-1, order=0)
yield assert_equal, 0, sndi.gaussian_filter1d(arr, 1, axis=-1, order=3)
yield assert_raises, ValueError, sndi.gaussian_filter1d, arr, 1, -1, -1
yield assert_raises, ValueError, sndi.gaussian_filter1d, arr, 1, -1, 4
| gpl-3.0 |
bobthekingofegypt/bobswitch-python | bobswitch/bobswitch.py | 1 | 9377 | # -*- coding: utf-8 -*-
"""
BobSwitch
~~~~~~~~~~~~~~
Online HTML5 version of the backpackers card game switch, also known as
crazy eights and a lot of different names. This version follows the rules
that I know.
:copyright: (c) Copyright 2013 by Bob
:license: BSD, see LICENSE for more details.
"""
import sys
import argparse
import tornado.ioloop
import sockjs.tornado
from json_convert import convert_hand, convert_state_start, convert_play_response, \
convert_state_watch
from engine import create_deck, Game, MoveType, GameMove, GameState
from models import Player, Card, Suit, Rank
from sockjs_ext import EventSocketConnection, event
import logging
log = logging.getLogger()
def wrap_chat_message(name, message):
return {
"name": name,
"text": message
}
def create_game(players):
deck = create_deck()
deck.shuffle()
game = Game(players, 7, deck)
return game
def convert_move_type(move_type):
if move_type == "pick":
return MoveType.pick
elif move_type == "play":
return MoveType.play
return MoveType.wait
def convert_card(rankId, suitId):
rank = next(x for x in Rank if int(x) == rankId)
suit = next(x for x in Suit if int(x) == suitId)
return Card(suit, rank)
def convert_suit(suitId):
return next(x for x in Suit if int(x) == suitId)
class Room(object):
def __init__(self):
self.active_game = None
self.active_players = {}
self.players = {}
self.participants = set()
class RoomPlayer(object):
def __init__(self, name, socket, player):
self.name = name
self.socket = socket
self.player = player
self.ready = False
class SocketConnection(EventSocketConnection):
participants = set()
room = Room()
rooms = {}
def on_open(self, info):
#user remains annonymous until they register, so they get no name
self.name = None
self.participants.add(self)
def on_close(self):
log.debug("Player disconnected: %s", self.name)
if hasattr(self, "active_room"):
if self.name in self.active_room.players:
self.broadcast_event(self.active_room.participants, "players:disconnected", self.name)
self.active_room.players[self.name].socket = None
self.active_room.participants.remove(self)
self.participants.remove(self)
########
# chat functions
########
@event("chat:message")
def chat_message(self, room_name, message):
log.debug("Chat message recieved for room '%s': %s: %s", room_name, self.name, message)
room = self.active_room
wrapped_message = wrap_chat_message(self.name, message)
self.broadcast_event(room.participants, "chat:message", wrapped_message)
def check_room(self, room):
if room not in self.rooms:
log.debug("creating room - '%s'", room)
self.rooms[room] = Room()
########
# account functions
########
@event("account:login")
def login(self, room_name, name):
log.debug("user '%s' logged in to room '%s'", name, room_name)
room = self.active_room
self.name = name
if name in room.players:
room.players[name].socket = self
self.broadcast_event(room.participants, "players:reconnected", self.name)
game = room.active_game
if game:
hand = game.player_hand(name)
self.send_event("game:state:start",
convert_state_start(game.state, game.players, game.player_hands,
game.current_player, game.played_cards.top_card, hand))
else:
player = Player(name)
room_player = RoomPlayer(name, self, player)
room.players[name] = room_player
self.broadcast_event(self.active_room.participants, "players:added", name)
@event("account:listing")
def listing(self, room_name, message):
log.debug("request for registered users")
self.check_room(room_name)
room = self.rooms[room_name]
room.participants.add(self)
self.active_room = room
names = [{"name":s.name, "ready":s.ready, "disconnected": s.socket==None}
for s in room.players.values()]
self.send_event("players:listing", names)
#######
# game functions
#######
@event("game:player:ready")
def player_ready(self, room_name, message):
log.debug("%s ready to start game", self.name)
room = self.active_room
player = room.players[self.name]
player.ready = True
self.broadcast_event(room.participants, "game:player:ready", self.name)
all_ready = all([s.ready
for s in room.players.values()])
player_count = len(room.players)
enough_players = player_count > 1 and player_count < 5
if not all_ready or not enough_players:
return
room.active_players = room.players.copy()
#start the game
#send the game initial state to all players
players = [s.player for s in room.players.values()]
game = create_game(players)
for participant in room.active_players.values():
socket = participant.socket
if socket == None:
#player is currently disconnected, just ignore him he will
#pick up state on reconnection
continue
hand = game.player_hand(socket.name)
socket.send_event("game:state:start",
convert_state_start(game.state, game.players, game.player_hands,
game.current_player, game.played_cards.top_card, hand))
for participant in room.participants:
if participant.name not in room.active_players:
participant.send_event("game:state:watch",
convert_state_watch(game.state, game.players, game.player_hands,
game.current_player, game.played_cards.top_card))
room.active_game = game
for participant in room.active_players.values():
participant.ready = False
@event("game:player:move")
def player_move(self, room_name, message):
log.debug("%s plays move", self.name)
#parse message (wait, pick, play)
#format
"""
"type": "play",
"card": { rank: 4, suit: 4 }
"""
room = self.active_room
move_type = convert_move_type(message["type"])
move = None
if move_type == MoveType.play:
card = message["card"]
move = GameMove(MoveType.play,
convert_card(card["rank"], card["suit"]))
if "suit" in message:
move.suit = convert_suit(message["suit"])
else:
move = GameMove(move_type)
game = room.active_game
play_response = game.play(self.name, move)
self.send_event("game:player:response",
convert_play_response(play_response))
if play_response.success:
for participant in room.active_players.values():
socket = participant.socket
if socket == None:
continue
hand = game.player_hand(socket.name)
socket.send_event("game:state:update",
convert_state_start(game.state, game.players,
game.player_hands,
game.current_player,
game.played_cards.top_card, hand))
for participant in room.participants:
if participant.name not in room.active_players:
participant.send_event("game:state:watch",
convert_state_watch(game.state, game.players, game.player_hands,
game.current_player, game.played_cards.top_card))
if game.state == GameState.FINISHED:
room.active_game = None
room.active_players = None
for participant in room.players.values():
participant.ready = False
class ClearHandler(tornado.web.RequestHandler):
def get(self):
SocketConnection.participants.clear()
SocketConnection.rooms = {}
def parse_args(argv=sys.argv[1:]):
description = """
Python server for playing bobswitch
"""
parser = argparse.ArgumentParser(description=description)
parser.add_argument("-d", "--debug", help="enable debug mode",
action="store_true")
return parser.parse_args(argv)
if __name__ == "__main__":
import logging
logging.getLogger().setLevel(logging.DEBUG)
arguments = parse_args()
BobSwitchRouter = sockjs.tornado.SockJSRouter(SocketConnection,
'/bobswitch')
app = tornado.web.Application(
BobSwitchRouter.urls,
debug=arguments.debug
)
app.listen(4500)
if arguments.debug:
debug_application = tornado.web.Application([
(r"/clear", ClearHandler),
])
debug_application.listen(9433)
tornado.ioloop.IOLoop.instance().start()
| bsd-2-clause |
raincoatrun/ThinkStats2 | code/thinkplot.py | 75 | 18140 | """This file contains code for use with "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2014 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function
import math
import matplotlib
import matplotlib.pyplot as pyplot
import numpy as np
import pandas
import warnings
# customize some matplotlib attributes
#matplotlib.rc('figure', figsize=(4, 3))
#matplotlib.rc('font', size=14.0)
#matplotlib.rc('axes', labelsize=22.0, titlesize=22.0)
#matplotlib.rc('legend', fontsize=20.0)
#matplotlib.rc('xtick.major', size=6.0)
#matplotlib.rc('xtick.minor', size=3.0)
#matplotlib.rc('ytick.major', size=6.0)
#matplotlib.rc('ytick.minor', size=3.0)
class _Brewer(object):
"""Encapsulates a nice sequence of colors.
Shades of blue that look good in color and can be distinguished
in grayscale (up to a point).
Borrowed from http://colorbrewer2.org/
"""
color_iter = None
colors = ['#081D58',
'#253494',
'#225EA8',
'#1D91C0',
'#41B6C4',
'#7FCDBB',
'#C7E9B4',
'#EDF8B1',
'#FFFFD9']
# lists that indicate which colors to use depending on how many are used
which_colors = [[],
[1],
[1, 3],
[0, 2, 4],
[0, 2, 4, 6],
[0, 2, 3, 5, 6],
[0, 2, 3, 4, 5, 6],
[0, 1, 2, 3, 4, 5, 6],
]
@classmethod
def Colors(cls):
"""Returns the list of colors.
"""
return cls.colors
@classmethod
def ColorGenerator(cls, n):
"""Returns an iterator of color strings.
n: how many colors will be used
"""
for i in cls.which_colors[n]:
yield cls.colors[i]
raise StopIteration('Ran out of colors in _Brewer.ColorGenerator')
@classmethod
def InitializeIter(cls, num):
"""Initializes the color iterator with the given number of colors."""
cls.color_iter = cls.ColorGenerator(num)
@classmethod
def ClearIter(cls):
"""Sets the color iterator to None."""
cls.color_iter = None
@classmethod
def GetIter(cls):
"""Gets the color iterator."""
if cls.color_iter is None:
cls.InitializeIter(7)
return cls.color_iter
def PrePlot(num=None, rows=None, cols=None):
"""Takes hints about what's coming.
num: number of lines that will be plotted
rows: number of rows of subplots
cols: number of columns of subplots
"""
if num:
_Brewer.InitializeIter(num)
if rows is None and cols is None:
return
if rows is not None and cols is None:
cols = 1
if cols is not None and rows is None:
rows = 1
# resize the image, depending on the number of rows and cols
size_map = {(1, 1): (8, 6),
(1, 2): (14, 6),
(1, 3): (14, 6),
(2, 2): (10, 10),
(2, 3): (16, 10),
(3, 1): (8, 10),
}
if (rows, cols) in size_map:
fig = pyplot.gcf()
fig.set_size_inches(*size_map[rows, cols])
# create the first subplot
if rows > 1 or cols > 1:
pyplot.subplot(rows, cols, 1)
global SUBPLOT_ROWS, SUBPLOT_COLS
SUBPLOT_ROWS = rows
SUBPLOT_COLS = cols
def SubPlot(plot_number, rows=None, cols=None):
"""Configures the number of subplots and changes the current plot.
rows: int
cols: int
plot_number: int
"""
rows = rows or SUBPLOT_ROWS
cols = cols or SUBPLOT_COLS
pyplot.subplot(rows, cols, plot_number)
def _Underride(d, **options):
"""Add key-value pairs to d only if key is not in d.
If d is None, create a new dictionary.
d: dictionary
options: keyword args to add to d
"""
if d is None:
d = {}
for key, val in options.items():
d.setdefault(key, val)
return d
def Clf():
"""Clears the figure and any hints that have been set."""
global LOC
LOC = None
_Brewer.ClearIter()
pyplot.clf()
fig = pyplot.gcf()
fig.set_size_inches(8, 6)
def Figure(**options):
"""Sets options for the current figure."""
_Underride(options, figsize=(6, 8))
pyplot.figure(**options)
def _UnderrideColor(options):
if 'color' in options:
return options
color_iter = _Brewer.GetIter()
if color_iter:
try:
options['color'] = next(color_iter)
except StopIteration:
# TODO: reconsider whether this should warn
# warnings.warn('Warning: Brewer ran out of colors.')
_Brewer.ClearIter()
return options
def Plot(obj, ys=None, style='', **options):
"""Plots a line.
Args:
obj: sequence of x values, or Series, or anything with Render()
ys: sequence of y values
style: style string passed along to pyplot.plot
options: keyword args passed to pyplot.plot
"""
options = _UnderrideColor(options)
label = getattr(obj, 'label', '_nolegend_')
options = _Underride(options, linewidth=3, alpha=0.8, label=label)
xs = obj
if ys is None:
if hasattr(obj, 'Render'):
xs, ys = obj.Render()
if isinstance(obj, pandas.Series):
ys = obj.values
xs = obj.index
if ys is None:
pyplot.plot(xs, style, **options)
else:
pyplot.plot(xs, ys, style, **options)
def FillBetween(xs, y1, y2=None, where=None, **options):
"""Plots a line.
Args:
xs: sequence of x values
y1: sequence of y values
y2: sequence of y values
where: sequence of boolean
options: keyword args passed to pyplot.fill_between
"""
options = _UnderrideColor(options)
options = _Underride(options, linewidth=0, alpha=0.5)
pyplot.fill_between(xs, y1, y2, where, **options)
def Bar(xs, ys, **options):
"""Plots a line.
Args:
xs: sequence of x values
ys: sequence of y values
options: keyword args passed to pyplot.bar
"""
options = _UnderrideColor(options)
options = _Underride(options, linewidth=0, alpha=0.6)
pyplot.bar(xs, ys, **options)
def Scatter(xs, ys=None, **options):
"""Makes a scatter plot.
xs: x values
ys: y values
options: options passed to pyplot.scatter
"""
options = _Underride(options, color='blue', alpha=0.2,
s=30, edgecolors='none')
if ys is None and isinstance(xs, pandas.Series):
ys = xs.values
xs = xs.index
pyplot.scatter(xs, ys, **options)
def HexBin(xs, ys, **options):
"""Makes a scatter plot.
xs: x values
ys: y values
options: options passed to pyplot.scatter
"""
options = _Underride(options, cmap=matplotlib.cm.Blues)
pyplot.hexbin(xs, ys, **options)
def Pdf(pdf, **options):
"""Plots a Pdf, Pmf, or Hist as a line.
Args:
pdf: Pdf, Pmf, or Hist object
options: keyword args passed to pyplot.plot
"""
low, high = options.pop('low', None), options.pop('high', None)
n = options.pop('n', 101)
xs, ps = pdf.Render(low=low, high=high, n=n)
options = _Underride(options, label=pdf.label)
Plot(xs, ps, **options)
def Pdfs(pdfs, **options):
"""Plots a sequence of PDFs.
Options are passed along for all PDFs. If you want different
options for each pdf, make multiple calls to Pdf.
Args:
pdfs: sequence of PDF objects
options: keyword args passed to pyplot.plot
"""
for pdf in pdfs:
Pdf(pdf, **options)
def Hist(hist, **options):
"""Plots a Pmf or Hist with a bar plot.
The default width of the bars is based on the minimum difference
between values in the Hist. If that's too small, you can override
it by providing a width keyword argument, in the same units
as the values.
Args:
hist: Hist or Pmf object
options: keyword args passed to pyplot.bar
"""
# find the minimum distance between adjacent values
xs, ys = hist.Render()
if 'width' not in options:
try:
options['width'] = 0.9 * np.diff(xs).min()
except TypeError:
warnings.warn("Hist: Can't compute bar width automatically."
"Check for non-numeric types in Hist."
"Or try providing width option."
)
options = _Underride(options, label=hist.label)
options = _Underride(options, align='center')
if options['align'] == 'left':
options['align'] = 'edge'
elif options['align'] == 'right':
options['align'] = 'edge'
options['width'] *= -1
Bar(xs, ys, **options)
def Hists(hists, **options):
"""Plots two histograms as interleaved bar plots.
Options are passed along for all PMFs. If you want different
options for each pmf, make multiple calls to Pmf.
Args:
hists: list of two Hist or Pmf objects
options: keyword args passed to pyplot.plot
"""
for hist in hists:
Hist(hist, **options)
def Pmf(pmf, **options):
"""Plots a Pmf or Hist as a line.
Args:
pmf: Hist or Pmf object
options: keyword args passed to pyplot.plot
"""
xs, ys = pmf.Render()
low, high = min(xs), max(xs)
width = options.pop('width', None)
if width is None:
try:
width = np.diff(xs).min()
except TypeError:
warnings.warn("Pmf: Can't compute bar width automatically."
"Check for non-numeric types in Pmf."
"Or try providing width option.")
points = []
lastx = np.nan
lasty = 0
for x, y in zip(xs, ys):
if (x - lastx) > 1e-5:
points.append((lastx, 0))
points.append((x, 0))
points.append((x, lasty))
points.append((x, y))
points.append((x+width, y))
lastx = x + width
lasty = y
points.append((lastx, 0))
pxs, pys = zip(*points)
align = options.pop('align', 'center')
if align == 'center':
pxs = np.array(pxs) - width/2.0
if align == 'right':
pxs = np.array(pxs) - width
options = _Underride(options, label=pmf.label)
Plot(pxs, pys, **options)
def Pmfs(pmfs, **options):
"""Plots a sequence of PMFs.
Options are passed along for all PMFs. If you want different
options for each pmf, make multiple calls to Pmf.
Args:
pmfs: sequence of PMF objects
options: keyword args passed to pyplot.plot
"""
for pmf in pmfs:
Pmf(pmf, **options)
def Diff(t):
"""Compute the differences between adjacent elements in a sequence.
Args:
t: sequence of number
Returns:
sequence of differences (length one less than t)
"""
diffs = [t[i+1] - t[i] for i in range(len(t)-1)]
return diffs
def Cdf(cdf, complement=False, transform=None, **options):
"""Plots a CDF as a line.
Args:
cdf: Cdf object
complement: boolean, whether to plot the complementary CDF
transform: string, one of 'exponential', 'pareto', 'weibull', 'gumbel'
options: keyword args passed to pyplot.plot
Returns:
dictionary with the scale options that should be passed to
Config, Show or Save.
"""
xs, ps = cdf.Render()
xs = np.asarray(xs)
ps = np.asarray(ps)
scale = dict(xscale='linear', yscale='linear')
for s in ['xscale', 'yscale']:
if s in options:
scale[s] = options.pop(s)
if transform == 'exponential':
complement = True
scale['yscale'] = 'log'
if transform == 'pareto':
complement = True
scale['yscale'] = 'log'
scale['xscale'] = 'log'
if complement:
ps = [1.0-p for p in ps]
if transform == 'weibull':
xs = np.delete(xs, -1)
ps = np.delete(ps, -1)
ps = [-math.log(1.0-p) for p in ps]
scale['xscale'] = 'log'
scale['yscale'] = 'log'
if transform == 'gumbel':
xs = xp.delete(xs, 0)
ps = np.delete(ps, 0)
ps = [-math.log(p) for p in ps]
scale['yscale'] = 'log'
options = _Underride(options, label=cdf.label)
Plot(xs, ps, **options)
return scale
def Cdfs(cdfs, complement=False, transform=None, **options):
"""Plots a sequence of CDFs.
cdfs: sequence of CDF objects
complement: boolean, whether to plot the complementary CDF
transform: string, one of 'exponential', 'pareto', 'weibull', 'gumbel'
options: keyword args passed to pyplot.plot
"""
for cdf in cdfs:
Cdf(cdf, complement, transform, **options)
def Contour(obj, pcolor=False, contour=True, imshow=False, **options):
"""Makes a contour plot.
d: map from (x, y) to z, or object that provides GetDict
pcolor: boolean, whether to make a pseudocolor plot
contour: boolean, whether to make a contour plot
imshow: boolean, whether to use pyplot.imshow
options: keyword args passed to pyplot.pcolor and/or pyplot.contour
"""
try:
d = obj.GetDict()
except AttributeError:
d = obj
_Underride(options, linewidth=3, cmap=matplotlib.cm.Blues)
xs, ys = zip(*d.keys())
xs = sorted(set(xs))
ys = sorted(set(ys))
X, Y = np.meshgrid(xs, ys)
func = lambda x, y: d.get((x, y), 0)
func = np.vectorize(func)
Z = func(X, Y)
x_formatter = matplotlib.ticker.ScalarFormatter(useOffset=False)
axes = pyplot.gca()
axes.xaxis.set_major_formatter(x_formatter)
if pcolor:
pyplot.pcolormesh(X, Y, Z, **options)
if contour:
cs = pyplot.contour(X, Y, Z, **options)
pyplot.clabel(cs, inline=1, fontsize=10)
if imshow:
extent = xs[0], xs[-1], ys[0], ys[-1]
pyplot.imshow(Z, extent=extent, **options)
def Pcolor(xs, ys, zs, pcolor=True, contour=False, **options):
"""Makes a pseudocolor plot.
xs:
ys:
zs:
pcolor: boolean, whether to make a pseudocolor plot
contour: boolean, whether to make a contour plot
options: keyword args passed to pyplot.pcolor and/or pyplot.contour
"""
_Underride(options, linewidth=3, cmap=matplotlib.cm.Blues)
X, Y = np.meshgrid(xs, ys)
Z = zs
x_formatter = matplotlib.ticker.ScalarFormatter(useOffset=False)
axes = pyplot.gca()
axes.xaxis.set_major_formatter(x_formatter)
if pcolor:
pyplot.pcolormesh(X, Y, Z, **options)
if contour:
cs = pyplot.contour(X, Y, Z, **options)
pyplot.clabel(cs, inline=1, fontsize=10)
def Text(x, y, s, **options):
"""Puts text in a figure.
x: number
y: number
s: string
options: keyword args passed to pyplot.text
"""
options = _Underride(options,
fontsize=16,
verticalalignment='top',
horizontalalignment='left')
pyplot.text(x, y, s, **options)
LEGEND = True
LOC = None
def Config(**options):
"""Configures the plot.
Pulls options out of the option dictionary and passes them to
the corresponding pyplot functions.
"""
names = ['title', 'xlabel', 'ylabel', 'xscale', 'yscale',
'xticks', 'yticks', 'axis', 'xlim', 'ylim']
for name in names:
if name in options:
getattr(pyplot, name)(options[name])
# looks like this is not necessary: matplotlib understands text loc specs
loc_dict = {'upper right': 1,
'upper left': 2,
'lower left': 3,
'lower right': 4,
'right': 5,
'center left': 6,
'center right': 7,
'lower center': 8,
'upper center': 9,
'center': 10,
}
global LEGEND
LEGEND = options.get('legend', LEGEND)
if LEGEND:
global LOC
LOC = options.get('loc', LOC)
pyplot.legend(loc=LOC)
def Show(**options):
"""Shows the plot.
For options, see Config.
options: keyword args used to invoke various pyplot functions
"""
clf = options.pop('clf', True)
Config(**options)
pyplot.show()
if clf:
Clf()
def Plotly(**options):
"""Shows the plot.
For options, see Config.
options: keyword args used to invoke various pyplot functions
"""
clf = options.pop('clf', True)
Config(**options)
import plotly.plotly as plotly
url = plotly.plot_mpl(pyplot.gcf())
if clf:
Clf()
return url
def Save(root=None, formats=None, **options):
"""Saves the plot in the given formats and clears the figure.
For options, see Config.
Args:
root: string filename root
formats: list of string formats
options: keyword args used to invoke various pyplot functions
"""
clf = options.pop('clf', True)
Config(**options)
if formats is None:
formats = ['pdf', 'eps']
try:
formats.remove('plotly')
Plotly(clf=False)
except ValueError:
pass
if root:
for fmt in formats:
SaveFormat(root, fmt)
if clf:
Clf()
def SaveFormat(root, fmt='eps'):
"""Writes the current figure to a file in the given format.
Args:
root: string filename root
fmt: string format
"""
filename = '%s.%s' % (root, fmt)
print('Writing', filename)
pyplot.savefig(filename, format=fmt, dpi=300)
# provide aliases for calling functons with lower-case names
preplot = PrePlot
subplot = SubPlot
clf = Clf
figure = Figure
plot = Plot
text = Text
scatter = Scatter
pmf = Pmf
pmfs = Pmfs
hist = Hist
hists = Hists
diff = Diff
cdf = Cdf
cdfs = Cdfs
contour = Contour
pcolor = Pcolor
config = Config
show = Show
save = Save
def main():
color_iter = _Brewer.ColorGenerator(7)
for color in color_iter:
print(color)
if __name__ == '__main__':
main()
| gpl-3.0 |
twobob/buildroot-kindle | output/build/host-libglib2-2.30.3/gio/gdbus-2.0/codegen/parser.py | 23 | 12599 | # -*- Mode: Python -*-
# GDBus - GLib D-Bus Library
#
# Copyright (C) 2008-2011 Red Hat, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General
# Public License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330,
# Boston, MA 02111-1307, USA.
#
# Author: David Zeuthen <davidz@redhat.com>
import sys
import xml.parsers.expat
from . import dbustypes
class DBusXMLParser:
STATE_TOP = 'top'
STATE_NODE = 'node'
STATE_INTERFACE = 'interface'
STATE_METHOD = 'method'
STATE_SIGNAL = 'signal'
STATE_PROPERTY = 'property'
STATE_ARG = 'arg'
STATE_ANNOTATION = 'annotation'
STATE_IGNORED = 'ignored'
def __init__(self, xml_data):
self._parser = xml.parsers.expat.ParserCreate()
self._parser.CommentHandler = self.handle_comment
self._parser.CharacterDataHandler = self.handle_char_data
self._parser.StartElementHandler = self.handle_start_element
self._parser.EndElementHandler = self.handle_end_element
self.parsed_interfaces = []
self._cur_object = None
self.state = DBusXMLParser.STATE_TOP
self.state_stack = []
self._cur_object = None
self._cur_object_stack = []
self.doc_comment_last_symbol = ''
self._parser.Parse(xml_data)
COMMENT_STATE_BEGIN = 'begin'
COMMENT_STATE_PARAMS = 'params'
COMMENT_STATE_BODY = 'body'
COMMENT_STATE_SKIP = 'skip'
def handle_comment(self, data):
comment_state = DBusXMLParser.COMMENT_STATE_BEGIN;
lines = data.split('\n')
symbol = ''
body = ''
in_para = False
params = {}
for line in lines:
orig_line = line
line = line.lstrip()
if comment_state == DBusXMLParser.COMMENT_STATE_BEGIN:
if len(line) > 0:
colon_index = line.find(': ')
if colon_index == -1:
if line.endswith(':'):
symbol = line[0:len(line)-1]
comment_state = DBusXMLParser.COMMENT_STATE_PARAMS
else:
comment_state = DBusXMLParser.COMMENT_STATE_SKIP
else:
symbol = line[0:colon_index]
rest_of_line = line[colon_index+2:].strip()
if len(rest_of_line) > 0:
body += '<para>' + rest_of_line + '</para>'
comment_state = DBusXMLParser.COMMENT_STATE_PARAMS
elif comment_state == DBusXMLParser.COMMENT_STATE_PARAMS:
if line.startswith('@'):
colon_index = line.find(': ')
if colon_index == -1:
comment_state = DBusXMLParser.COMMENT_STATE_BODY
if not in_para:
body += '<para>'
in_para = True
body += orig_line + '\n'
else:
param = line[1:colon_index]
docs = line[colon_index + 2:]
params[param] = docs
else:
comment_state = DBusXMLParser.COMMENT_STATE_BODY
if len(line) > 0:
if not in_para:
body += '<para>'
in_para = True
body += orig_line + '\n'
elif comment_state == DBusXMLParser.COMMENT_STATE_BODY:
if len(line) > 0:
if not in_para:
body += '<para>'
in_para = True
body += orig_line + '\n'
else:
if in_para:
body += '</para>'
in_para = False
if in_para:
body += '</para>'
if symbol != '':
self.doc_comment_last_symbol = symbol
self.doc_comment_params = params
self.doc_comment_body = body
def handle_char_data(self, data):
#print 'char_data=%s'%data
pass
def handle_start_element(self, name, attrs):
old_state = self.state
old_cur_object = self._cur_object
if self.state == DBusXMLParser.STATE_IGNORED:
self.state = DBusXMLParser.STATE_IGNORED
elif self.state == DBusXMLParser.STATE_TOP:
if name == DBusXMLParser.STATE_NODE:
self.state = DBusXMLParser.STATE_NODE
else:
self.state = DBusXMLParser.STATE_IGNORED
elif self.state == DBusXMLParser.STATE_NODE:
if name == DBusXMLParser.STATE_INTERFACE:
self.state = DBusXMLParser.STATE_INTERFACE
iface = dbustypes.Interface(attrs['name'])
self._cur_object = iface
self.parsed_interfaces.append(iface)
elif name == DBusXMLParser.STATE_ANNOTATION:
self.state = DBusXMLParser.STATE_ANNOTATION
anno = dbustypes.Annotation(attrs['name'], attrs['value'])
self._cur_object.annotations.append(anno)
self._cur_object = anno
else:
self.state = DBusXMLParser.STATE_IGNORED
# assign docs, if any
if 'name' in attrs and self.doc_comment_last_symbol == attrs['name']:
self._cur_object.doc_string = self.doc_comment_body
if 'short_description' in self.doc_comment_params:
short_description = self.doc_comment_params['short_description']
self._cur_object.doc_string_brief = short_description
if 'since' in self.doc_comment_params:
self._cur_object.since = self.doc_comment_params['since']
elif self.state == DBusXMLParser.STATE_INTERFACE:
if name == DBusXMLParser.STATE_METHOD:
self.state = DBusXMLParser.STATE_METHOD
method = dbustypes.Method(attrs['name'])
self._cur_object.methods.append(method)
self._cur_object = method
elif name == DBusXMLParser.STATE_SIGNAL:
self.state = DBusXMLParser.STATE_SIGNAL
signal = dbustypes.Signal(attrs['name'])
self._cur_object.signals.append(signal)
self._cur_object = signal
elif name == DBusXMLParser.STATE_PROPERTY:
self.state = DBusXMLParser.STATE_PROPERTY
prop = dbustypes.Property(attrs['name'], attrs['type'], attrs['access'])
self._cur_object.properties.append(prop)
self._cur_object = prop
elif name == DBusXMLParser.STATE_ANNOTATION:
self.state = DBusXMLParser.STATE_ANNOTATION
anno = dbustypes.Annotation(attrs['name'], attrs['value'])
self._cur_object.annotations.append(anno)
self._cur_object = anno
else:
self.state = DBusXMLParser.STATE_IGNORED
# assign docs, if any
if 'name' in attrs and self.doc_comment_last_symbol == attrs['name']:
self._cur_object.doc_string = self.doc_comment_body
if 'since' in self.doc_comment_params:
self._cur_object.since = self.doc_comment_params['since']
elif self.state == DBusXMLParser.STATE_METHOD:
if name == DBusXMLParser.STATE_ARG:
self.state = DBusXMLParser.STATE_ARG
arg_name = None
if 'name' in attrs:
arg_name = attrs['name']
arg = dbustypes.Arg(arg_name, attrs['type'])
direction = attrs['direction']
if direction == 'in':
self._cur_object.in_args.append(arg)
elif direction == 'out':
self._cur_object.out_args.append(arg)
else:
raise RuntimeError('Invalid direction "%s"'%(direction))
self._cur_object = arg
elif name == DBusXMLParser.STATE_ANNOTATION:
self.state = DBusXMLParser.STATE_ANNOTATION
anno = dbustypes.Annotation(attrs['name'], attrs['value'])
self._cur_object.annotations.append(anno)
self._cur_object = anno
else:
self.state = DBusXMLParser.STATE_IGNORED
# assign docs, if any
if self.doc_comment_last_symbol == old_cur_object.name:
if 'name' in attrs and attrs['name'] in self.doc_comment_params:
doc_string = self.doc_comment_params[attrs['name']]
if doc_string != None:
self._cur_object.doc_string = doc_string
if 'since' in self.doc_comment_params:
self._cur_object.since = self.doc_comment_params['since']
elif self.state == DBusXMLParser.STATE_SIGNAL:
if name == DBusXMLParser.STATE_ARG:
self.state = DBusXMLParser.STATE_ARG
arg_name = None
if 'name' in attrs:
arg_name = attrs['name']
arg = dbustypes.Arg(arg_name, attrs['type'])
self._cur_object.args.append(arg)
self._cur_object = arg
elif name == DBusXMLParser.STATE_ANNOTATION:
self.state = DBusXMLParser.STATE_ANNOTATION
anno = dbustypes.Annotation(attrs['name'], attrs['value'])
self._cur_object.annotations.append(anno)
self._cur_object = anno
else:
self.state = DBusXMLParser.STATE_IGNORED
# assign docs, if any
if self.doc_comment_last_symbol == old_cur_object.name:
if 'name' in attrs and attrs['name'] in self.doc_comment_params:
doc_string = self.doc_comment_params[attrs['name']]
if doc_string != None:
self._cur_object.doc_string = doc_string
if 'since' in self.doc_comment_params:
self._cur_object.since = self.doc_comment_params['since']
elif self.state == DBusXMLParser.STATE_PROPERTY:
if name == DBusXMLParser.STATE_ANNOTATION:
self.state = DBusXMLParser.STATE_ANNOTATION
anno = dbustypes.Annotation(attrs['name'], attrs['value'])
self._cur_object.annotations.append(anno)
self._cur_object = anno
else:
self.state = DBusXMLParser.STATE_IGNORED
elif self.state == DBusXMLParser.STATE_ARG:
if name == DBusXMLParser.STATE_ANNOTATION:
self.state = DBusXMLParser.STATE_ANNOTATION
anno = dbustypes.Annotation(attrs['name'], attrs['value'])
self._cur_object.annotations.append(anno)
self._cur_object = anno
else:
self.state = DBusXMLParser.STATE_IGNORED
elif self.state == DBusXMLParser.STATE_ANNOTATION:
if name == DBusXMLParser.STATE_ANNOTATION:
self.state = DBusXMLParser.STATE_ANNOTATION
anno = dbustypes.Annotation(attrs['name'], attrs['value'])
self._cur_object.annotations.append(anno)
self._cur_object = anno
else:
self.state = DBusXMLParser.STATE_IGNORED
else:
raise RuntimeError('Unhandled state "%s" while entering element with name "%s"'%(self.state, name))
self.state_stack.append(old_state)
self._cur_object_stack.append(old_cur_object)
def handle_end_element(self, name):
self.state = self.state_stack.pop()
self._cur_object = self._cur_object_stack.pop()
def parse_dbus_xml(xml_data):
parser = DBusXMLParser(xml_data)
return parser.parsed_interfaces
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.