repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
bplancher/odoo | openerp/cli/scaffold.py | 27 | 4379 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import os
import re
import sys
import jinja2
from . import Command
from openerp.modules.module import (get_module_root, MANIFEST, load_information_from_description_file as load_manifest)
class Scaffold(Command):
""" Generates an Odoo module skeleton. """
def run(self, cmdargs):
# TODO: bash completion file
parser = argparse.ArgumentParser(
prog="%s scaffold" % sys.argv[0].split(os.path.sep)[-1],
description=self.__doc__,
epilog=self.epilog(),
)
parser.add_argument(
'-t', '--template', type=template, default=template('default'),
help="Use a custom module template, can be a template name or the"
" path to a module template (default: %(default)s)")
parser.add_argument('name', help="Name of the module to create")
parser.add_argument(
'dest', default='.', nargs='?',
help="Directory to create the module in (default: %(default)s)")
if not cmdargs:
sys.exit(parser.print_help())
args = parser.parse_args(args=cmdargs)
args.template.render_to(
snake(args.name),
directory(args.dest, create=True),
{'name': args.name})
def epilog(self):
return "Built-in templates available are: %s" % ', '.join(
d for d in os.listdir(builtins())
if d != 'base'
)
builtins = lambda *args: os.path.join(
os.path.abspath(os.path.dirname(__file__)),
'templates',
*args)
def snake(s):
""" snake cases ``s``
:param str s:
:return: str
"""
# insert a space before each uppercase character preceded by a
# non-uppercase letter
s = re.sub(r'(?<=[^A-Z])\B([A-Z])', r' \1', s)
# lowercase everything, split on whitespace and join
return '_'.join(s.lower().split())
def pascal(s):
return ''.join(
ss.capitalize()
for ss in re.sub('[_\s]+', ' ', s).split()
)
def directory(p, create=False):
expanded = os.path.abspath(
os.path.expanduser(
os.path.expandvars(p)))
if create and not os.path.exists(expanded):
os.makedirs(expanded)
if not os.path.isdir(expanded):
die("%s is not a directory" % p)
return expanded
env = jinja2.Environment()
env.filters['snake'] = snake
env.filters['pascal'] = pascal
class template(object):
def __init__(self, identifier):
# TODO: archives (zipfile, tarfile)
self.id = identifier
# is identifier a builtin?
self.path = builtins(identifier)
if os.path.isdir(self.path):
return
# is identifier a directory?
self.path = identifier
if os.path.isdir(self.path):
return
die("{} is not a valid module template".format(identifier))
def __str__(self):
return self.id
def files(self):
""" Lists the (local) path and content of all files in the template
"""
for root, _, files in os.walk(self.path):
for f in files:
path = os.path.join(root, f)
yield path, open(path, 'rb').read()
def render_to(self, modname, directory, params=None):
""" Render this module template to ``dest`` with the provided
rendering parameters
"""
# overwrite with local
for path, content in self.files():
local = os.path.relpath(path, self.path)
# strip .template extension
root, ext = os.path.splitext(local)
if ext == '.template':
local = root
dest = os.path.join(directory, modname, local)
destdir = os.path.dirname(dest)
if not os.path.exists(destdir):
os.makedirs(destdir)
with open(dest, 'wb') as f:
if ext not in ('.py', '.xml', '.csv', '.js', '.rst', '.html', '.template'):
f.write(content)
else:
env.from_string(content.decode('utf-8'))\
.stream(params or {})\
.dump(f, encoding='utf-8')
def die(message, code=1):
print >>sys.stderr, message
sys.exit(code)
def warn(message):
# ASK: shall we use logger ?
print "WARNING: " + message
| agpl-3.0 |
ltilve/ChromiumGStreamerBackend | tools/telemetry/telemetry/internal/backends/mandoline/desktop_mandoline_finder.py | 9 | 5077 | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Finds desktop mandoline browsers that can be controlled by telemetry."""
import os
import sys
from telemetry.core import exceptions
from telemetry.core import platform as platform_module
from telemetry.internal.backends.mandoline import desktop_mandoline_backend
from telemetry.internal.browser import browser
from telemetry.internal.browser import possible_browser
from telemetry.internal.platform import desktop_device
from telemetry.internal.util import path
class PossibleDesktopMandolineBrowser(possible_browser.PossibleBrowser):
"""A desktop mandoline browser that can be controlled."""
def __init__(self, browser_type, finder_options, executable,
browser_directory):
target_os = sys.platform.lower()
super(PossibleDesktopMandolineBrowser, self).__init__(
browser_type, target_os, supports_tab_control=False)
assert browser_type in FindAllBrowserTypes(finder_options), (
'Please add %s to desktop_mandoline_finder.FindAllBrowserTypes' %
browser_type)
self._local_executable = executable
self._browser_directory = browser_directory
def __repr__(self):
return 'PossibleDesktopMandolineBrowser(type=%s, executable=%s)' % (
self.browser_type, self._local_executable)
def _InitPlatformIfNeeded(self):
if self._platform:
return
self._platform = platform_module.GetHostPlatform()
# pylint: disable=W0212
self._platform_backend = self._platform._platform_backend
def Create(self, finder_options):
self._InitPlatformIfNeeded()
mandoline_backend = desktop_mandoline_backend.DesktopMandolineBackend(
self._platform_backend, finder_options.browser_options,
self._local_executable, self._browser_directory)
return browser.Browser(
mandoline_backend, self._platform_backend, self._credentials_path)
def SupportsOptions(self, finder_options):
if len(finder_options.extensions_to_load) != 0:
return False
return True
def UpdateExecutableIfNeeded(self):
pass
def last_modification_time(self):
if os.path.exists(self._local_executable):
return os.path.getmtime(self._local_executable)
return -1
def SelectDefaultBrowser(possible_browsers):
if not possible_browsers:
return None
return max(possible_browsers, key=lambda b: b.last_modification_time())
def CanFindAvailableBrowsers():
os_name = platform_module.GetHostPlatform().GetOSName()
return os_name == 'win' or os_name == 'linux'
def CanPossiblyHandlePath(target_path):
_, extension = os.path.splitext(target_path.lower())
if sys.platform.startswith('linux'):
return not extension
elif sys.platform.startswith('win'):
return extension == '.exe'
return False
def FindAllBrowserTypes(_):
return [
'mandoline-debug',
'mandoline-debug_x64',
'mandoline-release',
'mandoline-release_x64',]
def FindAllAvailableBrowsers(finder_options, device):
"""Finds all the desktop mandoline browsers available on this machine."""
if not isinstance(device, desktop_device.DesktopDevice):
return []
browsers = []
if not CanFindAvailableBrowsers():
return []
# Look for a browser in the standard chrome build locations.
if finder_options.chrome_root:
chrome_root = finder_options.chrome_root
else:
chrome_root = path.GetChromiumSrcDir()
if sys.platform.startswith('linux'):
mandoline_app_name = 'mandoline'
elif sys.platform.startswith('win'):
mandoline_app_name = 'mandoline.exe'
else:
raise Exception('Platform not recognized')
# Add the explicit browser executable if given and we can handle it.
if (finder_options.browser_executable and
CanPossiblyHandlePath(finder_options.browser_executable)):
normalized_executable = os.path.expanduser(
finder_options.browser_executable)
if path.IsExecutable(normalized_executable):
browser_directory = os.path.dirname(finder_options.browser_executable)
browsers.append(PossibleDesktopMandolineBrowser('exact', finder_options,
normalized_executable,
browser_directory))
else:
raise exceptions.PathMissingError(
'%s specified by --browser-executable does not exist',
normalized_executable)
def AddIfFound(browser_type, build_dir, type_dir, app_name):
browser_directory = os.path.join(chrome_root, build_dir, type_dir)
app = os.path.join(browser_directory, app_name)
if path.IsExecutable(app):
browsers.append(PossibleDesktopMandolineBrowser(
browser_type, finder_options, app, browser_directory))
return True
return False
# Add local builds.
for build_dir, build_type in path.GetBuildDirectories():
AddIfFound('mandoline-' + build_type.lower(), build_dir, build_type,
mandoline_app_name)
return browsers
| bsd-3-clause |
thinkgen/thirdparty | script.module.urlresolver/lib/urlresolver/plugins/movzap.py | 3 | 2815 | """
movzap|zuzvideo urlresolver plugin
Copyright (C) 2012 Lynx187
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from t0mm0.common.net import Net
from urlresolver.plugnplay.interfaces import UrlResolver
from urlresolver.plugnplay.interfaces import PluginSettings
from urlresolver.plugnplay import Plugin
import urllib2
from urlresolver import common
from lib import jsunpack
import xbmcgui
import re
import time
class MovzapZuzVideoResolver(Plugin, UrlResolver, PluginSettings):
implements = [UrlResolver, PluginSettings]
name = "movzap|zuzvideo"
def __init__(self):
p = self.get_setting('priority') or 100
self.priority = int(p)
self.net = Net()
def get_media_url(self, host, media_id):
web_url = self.get_url(host, media_id)
try:
resp = self.net.http_GET(web_url)
html = resp.content
r = re.search('file: "(.+?)",', html)
if r:
return r.group(1)
raise Exception ('movzap|zuzvideo: could not obtain video url')
except urllib2.URLError, e:
common.addon.log_error('Movzap: got http error %d fetching %s' %
(e.code, web_url))
common.addon.show_small_popup('Error','Http error: '+str(e), 5000, error_logo)
return self.unresolvable(code=3, msg=e)
except Exception, e:
common.addon.log_error('**** Movzap Error occured: %s' % e)
common.addon.show_small_popup(title='[B][COLOR white]MOVZAP[/COLOR][/B]', msg='[COLOR red]%s[/COLOR]' % e, delay=5000, image=error_logo)
return self.unresolvable(code=0, msg=e)
def get_url(self, host, media_id):
return host + media_id
def get_host_and_id(self, url):
#r = re.search('http://(?:www.)?(.+?)/([0-9A-Za-z]+)', url)
r = re.search('(http://(?:www.|)(?:.+?)/)([0-9A-Za-z]+)', url)
if r:
return r.groups()
else:
return False
def valid_url(self, url, host):
if self.get_setting('enabled') == 'false': return False
return re.match('http://(?:www.|)(?:movzap|zuzvideo).com/[0-9A-Za-z]+', url) or 'movzap' in host or 'zuzvideo' in host
| gpl-2.0 |
debugger06/MiroX | lib/test/databasesanitytest.py | 1 | 2065 | """Test database sanity checking. Right now this is pretty short
because we don't do that much sanity checking.
"""
import os
from miro import item
from miro import feed
from miro import databasesanity
from miro.fileobject import FilenameType
from miro.test.framework import MiroTestCase
class SanityCheckingTest(MiroTestCase):
def setUp(self):
MiroTestCase.setUp(self)
self.save_path = self.make_temp_path()
def tearDown(self):
try:
os.unlink(self.save_path)
except OSError:
pass
MiroTestCase.tearDown(self)
def check_object_list_fails_test(self, object_list):
self.assertRaises(databasesanity.DatabaseInsaneError,
databasesanity.check_sanity, object_list, False)
def check_fix_if_possible(self, start_list, fixed_list):
self.error_signal_okay = True
rv = databasesanity.check_sanity(start_list)
self.assertEquals(start_list, fixed_list)
self.assertEquals(rv, False)
self.assertEquals(self.saw_error, True)
def check_object_list_passes_test(self, object_list):
databasesanity.check_sanity(object_list)
def test_phantom_feed_checking(self):
f = feed.Feed(u"http://feed.uk")
i = item.Item(item.FeedParserValues({}), feed_id=f.id)
i2 = item.FileItem(FilenameType('/foo/bar.txt'), feed_id=f.id)
self.check_object_list_fails_test([i])
self.check_fix_if_possible([i, i2], [])
self.check_object_list_passes_test([i, f])
self.check_object_list_passes_test([])
def test_manual_feed_checking(self):
f = feed.Feed(u"dtv:manualFeed")
f2 = feed.Feed(u"dtv:manualFeed")
f3 = feed.Feed(u"dtv:manualFeed")
self.check_object_list_passes_test([f])
self.check_object_list_fails_test([f, f2])
self.error_signal_okay = True
test_list = [f, f2, f3]
databasesanity.check_sanity(test_list)
self.assertEquals(len(test_list), 1)
self.assertEquals(self.saw_error, True)
| gpl-2.0 |
hstaugaard/notejam | django/notejam/pads/views.py | 6 | 2449 | from django.contrib import messages
from django.core.urlresolvers import reverse_lazy
from django.shortcuts import redirect
from django.shortcuts import get_object_or_404
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from django.views.generic import ListView
from pads.models import Pad
from notes.models import Note
from pads.forms import PadForm
class PadCreateView(CreateView):
model = Pad
form_class = PadForm
template_name_suffix = '_create'
success_url = reverse_lazy('home')
success_message = 'Pad is successfully created'
def form_valid(self, form):
self.object = form.save(commit=False)
self.object.user = self.request.user
self.object.save()
messages.success(self.request, self.success_message)
return redirect(self.get_success_url())
def get_success_url(self):
return reverse_lazy("view_pad_notes", kwargs={'pk': self.object.pk})
class PadUpdateView(UpdateView):
model = Pad
form_class = PadForm
template_name_suffix = '_edit'
success_url = reverse_lazy('home')
success_message = 'Pad is successfully updated'
def form_valid(self, form):
messages.success(self.request, self.success_message)
return super(PadUpdateView, self).form_valid(form)
def get_queryset(self):
qs = super(PadUpdateView, self).get_queryset()
return qs.filter(user=self.request.user)
def get_success_url(self):
return reverse_lazy("view_pad_notes", kwargs={'pk': self.object.pk})
# Note list mixed with pad details data
class PadNotesListView(ListView):
model = Note
context_object_name = 'notes'
order_by = '-updated_at'
template_name = 'pads/pad_note_list.html'
def get_queryset(self):
order_by = self.request.GET.get('order', self.order_by)
return self.get_pad().note_set.all().order_by(order_by)
def get_pad(self):
return get_object_or_404(
Pad, pk=int(self.kwargs.get('pk')), user=self.request.user
)
def get_context_data(self, **kwargs):
context = super(PadNotesListView, self).get_context_data(**kwargs)
context['pad'] = self.get_pad()
return context
class PadDeleteView(DeleteView):
model = Pad
success_url = reverse_lazy("home")
def get_queryset(self):
qs = super(PadDeleteView, self).get_queryset()
return qs.filter(user=self.request.user)
| mit |
snnn/bazel | third_party/protobuf/3.4.0/python/google/protobuf/internal/descriptor_pool_test.py | 5 | 38193 | #! /usr/bin/env python
#
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests for google.protobuf.descriptor_pool."""
__author__ = 'matthewtoia@google.com (Matt Toia)'
import os
import sys
try:
import unittest2 as unittest #PY26
except ImportError:
import unittest
from google.protobuf import unittest_import_pb2
from google.protobuf import unittest_import_public_pb2
from google.protobuf import unittest_pb2
from google.protobuf import descriptor_pb2
from google.protobuf.internal import api_implementation
from google.protobuf.internal import descriptor_pool_test1_pb2
from google.protobuf.internal import descriptor_pool_test2_pb2
from google.protobuf.internal import factory_test1_pb2
from google.protobuf.internal import factory_test2_pb2
from google.protobuf.internal import file_options_test_pb2
from google.protobuf.internal import more_messages_pb2
from google.protobuf import descriptor
from google.protobuf import descriptor_database
from google.protobuf import descriptor_pool
from google.protobuf import message_factory
from google.protobuf import symbol_database
class DescriptorPoolTest(unittest.TestCase):
def setUp(self):
# TODO(jieluo): Should make the pool which is created by
# serialized_pb same with generated pool.
# TODO(jieluo): More test coverage for the generated pool.
self.pool = descriptor_pool.DescriptorPool()
self.factory_test1_fd = descriptor_pb2.FileDescriptorProto.FromString(
factory_test1_pb2.DESCRIPTOR.serialized_pb)
self.factory_test2_fd = descriptor_pb2.FileDescriptorProto.FromString(
factory_test2_pb2.DESCRIPTOR.serialized_pb)
self.pool.Add(self.factory_test1_fd)
self.pool.Add(self.factory_test2_fd)
self.pool.Add(descriptor_pb2.FileDescriptorProto.FromString(
unittest_import_public_pb2.DESCRIPTOR.serialized_pb))
self.pool.Add(descriptor_pb2.FileDescriptorProto.FromString(
unittest_import_pb2.DESCRIPTOR.serialized_pb))
self.pool.Add(descriptor_pb2.FileDescriptorProto.FromString(
unittest_pb2.DESCRIPTOR.serialized_pb))
def testFindFileByName(self):
name1 = 'google/protobuf/internal/factory_test1.proto'
file_desc1 = self.pool.FindFileByName(name1)
self.assertIsInstance(file_desc1, descriptor.FileDescriptor)
self.assertEqual(name1, file_desc1.name)
self.assertEqual('google.protobuf.python.internal', file_desc1.package)
self.assertIn('Factory1Message', file_desc1.message_types_by_name)
name2 = 'google/protobuf/internal/factory_test2.proto'
file_desc2 = self.pool.FindFileByName(name2)
self.assertIsInstance(file_desc2, descriptor.FileDescriptor)
self.assertEqual(name2, file_desc2.name)
self.assertEqual('google.protobuf.python.internal', file_desc2.package)
self.assertIn('Factory2Message', file_desc2.message_types_by_name)
def testFindFileByNameFailure(self):
with self.assertRaises(KeyError):
self.pool.FindFileByName('Does not exist')
def testFindFileContainingSymbol(self):
file_desc1 = self.pool.FindFileContainingSymbol(
'google.protobuf.python.internal.Factory1Message')
self.assertIsInstance(file_desc1, descriptor.FileDescriptor)
self.assertEqual('google/protobuf/internal/factory_test1.proto',
file_desc1.name)
self.assertEqual('google.protobuf.python.internal', file_desc1.package)
self.assertIn('Factory1Message', file_desc1.message_types_by_name)
file_desc2 = self.pool.FindFileContainingSymbol(
'google.protobuf.python.internal.Factory2Message')
self.assertIsInstance(file_desc2, descriptor.FileDescriptor)
self.assertEqual('google/protobuf/internal/factory_test2.proto',
file_desc2.name)
self.assertEqual('google.protobuf.python.internal', file_desc2.package)
self.assertIn('Factory2Message', file_desc2.message_types_by_name)
# Tests top level extension.
file_desc3 = self.pool.FindFileContainingSymbol(
'google.protobuf.python.internal.another_field')
self.assertIsInstance(file_desc3, descriptor.FileDescriptor)
self.assertEqual('google/protobuf/internal/factory_test2.proto',
file_desc3.name)
# Tests nested extension inside a message.
file_desc4 = self.pool.FindFileContainingSymbol(
'google.protobuf.python.internal.Factory2Message.one_more_field')
self.assertIsInstance(file_desc4, descriptor.FileDescriptor)
self.assertEqual('google/protobuf/internal/factory_test2.proto',
file_desc4.name)
file_desc5 = self.pool.FindFileContainingSymbol(
'protobuf_unittest.TestService')
self.assertIsInstance(file_desc5, descriptor.FileDescriptor)
self.assertEqual('google/protobuf/unittest.proto',
file_desc5.name)
# Tests the generated pool.
assert descriptor_pool.Default().FindFileContainingSymbol(
'google.protobuf.python.internal.Factory2Message.one_more_field')
assert descriptor_pool.Default().FindFileContainingSymbol(
'google.protobuf.python.internal.another_field')
assert descriptor_pool.Default().FindFileContainingSymbol(
'protobuf_unittest.TestService')
def testFindFileContainingSymbolFailure(self):
with self.assertRaises(KeyError):
self.pool.FindFileContainingSymbol('Does not exist')
def testFindMessageTypeByName(self):
msg1 = self.pool.FindMessageTypeByName(
'google.protobuf.python.internal.Factory1Message')
self.assertIsInstance(msg1, descriptor.Descriptor)
self.assertEqual('Factory1Message', msg1.name)
self.assertEqual('google.protobuf.python.internal.Factory1Message',
msg1.full_name)
self.assertEqual(None, msg1.containing_type)
self.assertFalse(msg1.has_options)
nested_msg1 = msg1.nested_types[0]
self.assertEqual('NestedFactory1Message', nested_msg1.name)
self.assertEqual(msg1, nested_msg1.containing_type)
nested_enum1 = msg1.enum_types[0]
self.assertEqual('NestedFactory1Enum', nested_enum1.name)
self.assertEqual(msg1, nested_enum1.containing_type)
self.assertEqual(nested_msg1, msg1.fields_by_name[
'nested_factory_1_message'].message_type)
self.assertEqual(nested_enum1, msg1.fields_by_name[
'nested_factory_1_enum'].enum_type)
msg2 = self.pool.FindMessageTypeByName(
'google.protobuf.python.internal.Factory2Message')
self.assertIsInstance(msg2, descriptor.Descriptor)
self.assertEqual('Factory2Message', msg2.name)
self.assertEqual('google.protobuf.python.internal.Factory2Message',
msg2.full_name)
self.assertIsNone(msg2.containing_type)
nested_msg2 = msg2.nested_types[0]
self.assertEqual('NestedFactory2Message', nested_msg2.name)
self.assertEqual(msg2, nested_msg2.containing_type)
nested_enum2 = msg2.enum_types[0]
self.assertEqual('NestedFactory2Enum', nested_enum2.name)
self.assertEqual(msg2, nested_enum2.containing_type)
self.assertEqual(nested_msg2, msg2.fields_by_name[
'nested_factory_2_message'].message_type)
self.assertEqual(nested_enum2, msg2.fields_by_name[
'nested_factory_2_enum'].enum_type)
self.assertTrue(msg2.fields_by_name['int_with_default'].has_default_value)
self.assertEqual(
1776, msg2.fields_by_name['int_with_default'].default_value)
self.assertTrue(
msg2.fields_by_name['double_with_default'].has_default_value)
self.assertEqual(
9.99, msg2.fields_by_name['double_with_default'].default_value)
self.assertTrue(
msg2.fields_by_name['string_with_default'].has_default_value)
self.assertEqual(
'hello world', msg2.fields_by_name['string_with_default'].default_value)
self.assertTrue(msg2.fields_by_name['bool_with_default'].has_default_value)
self.assertFalse(msg2.fields_by_name['bool_with_default'].default_value)
self.assertTrue(msg2.fields_by_name['enum_with_default'].has_default_value)
self.assertEqual(
1, msg2.fields_by_name['enum_with_default'].default_value)
msg3 = self.pool.FindMessageTypeByName(
'google.protobuf.python.internal.Factory2Message.NestedFactory2Message')
self.assertEqual(nested_msg2, msg3)
self.assertTrue(msg2.fields_by_name['bytes_with_default'].has_default_value)
self.assertEqual(
b'a\xfb\x00c',
msg2.fields_by_name['bytes_with_default'].default_value)
self.assertEqual(1, len(msg2.oneofs))
self.assertEqual(1, len(msg2.oneofs_by_name))
self.assertEqual(2, len(msg2.oneofs[0].fields))
for name in ['oneof_int', 'oneof_string']:
self.assertEqual(msg2.oneofs[0],
msg2.fields_by_name[name].containing_oneof)
self.assertIn(msg2.fields_by_name[name], msg2.oneofs[0].fields)
def testFindMessageTypeByNameFailure(self):
with self.assertRaises(KeyError):
self.pool.FindMessageTypeByName('Does not exist')
def testFindEnumTypeByName(self):
enum1 = self.pool.FindEnumTypeByName(
'google.protobuf.python.internal.Factory1Enum')
self.assertIsInstance(enum1, descriptor.EnumDescriptor)
self.assertEqual(0, enum1.values_by_name['FACTORY_1_VALUE_0'].number)
self.assertEqual(1, enum1.values_by_name['FACTORY_1_VALUE_1'].number)
self.assertFalse(enum1.has_options)
nested_enum1 = self.pool.FindEnumTypeByName(
'google.protobuf.python.internal.Factory1Message.NestedFactory1Enum')
self.assertIsInstance(nested_enum1, descriptor.EnumDescriptor)
self.assertEqual(
0, nested_enum1.values_by_name['NESTED_FACTORY_1_VALUE_0'].number)
self.assertEqual(
1, nested_enum1.values_by_name['NESTED_FACTORY_1_VALUE_1'].number)
enum2 = self.pool.FindEnumTypeByName(
'google.protobuf.python.internal.Factory2Enum')
self.assertIsInstance(enum2, descriptor.EnumDescriptor)
self.assertEqual(0, enum2.values_by_name['FACTORY_2_VALUE_0'].number)
self.assertEqual(1, enum2.values_by_name['FACTORY_2_VALUE_1'].number)
nested_enum2 = self.pool.FindEnumTypeByName(
'google.protobuf.python.internal.Factory2Message.NestedFactory2Enum')
self.assertIsInstance(nested_enum2, descriptor.EnumDescriptor)
self.assertEqual(
0, nested_enum2.values_by_name['NESTED_FACTORY_2_VALUE_0'].number)
self.assertEqual(
1, nested_enum2.values_by_name['NESTED_FACTORY_2_VALUE_1'].number)
def testFindEnumTypeByNameFailure(self):
with self.assertRaises(KeyError):
self.pool.FindEnumTypeByName('Does not exist')
def testFindFieldByName(self):
field = self.pool.FindFieldByName(
'google.protobuf.python.internal.Factory1Message.list_value')
self.assertEqual(field.name, 'list_value')
self.assertEqual(field.label, field.LABEL_REPEATED)
self.assertFalse(field.has_options)
with self.assertRaises(KeyError):
self.pool.FindFieldByName('Does not exist')
def testFindExtensionByName(self):
# An extension defined in a message.
extension = self.pool.FindExtensionByName(
'google.protobuf.python.internal.Factory2Message.one_more_field')
self.assertEqual(extension.name, 'one_more_field')
# An extension defined at file scope.
extension = self.pool.FindExtensionByName(
'google.protobuf.python.internal.another_field')
self.assertEqual(extension.name, 'another_field')
self.assertEqual(extension.number, 1002)
with self.assertRaises(KeyError):
self.pool.FindFieldByName('Does not exist')
def testFindAllExtensions(self):
factory1_message = self.pool.FindMessageTypeByName(
'google.protobuf.python.internal.Factory1Message')
factory2_message = self.pool.FindMessageTypeByName(
'google.protobuf.python.internal.Factory2Message')
# An extension defined in a message.
one_more_field = factory2_message.extensions_by_name['one_more_field']
self.pool.AddExtensionDescriptor(one_more_field)
# An extension defined at file scope.
factory_test2 = self.pool.FindFileByName(
'google/protobuf/internal/factory_test2.proto')
another_field = factory_test2.extensions_by_name['another_field']
self.pool.AddExtensionDescriptor(another_field)
extensions = self.pool.FindAllExtensions(factory1_message)
expected_extension_numbers = set([one_more_field, another_field])
self.assertEqual(expected_extension_numbers, set(extensions))
# Verify that mutating the returned list does not affect the pool.
extensions.append('unexpected_element')
# Get the extensions again, the returned value does not contain the
# 'unexpected_element'.
extensions = self.pool.FindAllExtensions(factory1_message)
self.assertEqual(expected_extension_numbers, set(extensions))
def testFindExtensionByNumber(self):
factory1_message = self.pool.FindMessageTypeByName(
'google.protobuf.python.internal.Factory1Message')
factory2_message = self.pool.FindMessageTypeByName(
'google.protobuf.python.internal.Factory2Message')
# An extension defined in a message.
one_more_field = factory2_message.extensions_by_name['one_more_field']
self.pool.AddExtensionDescriptor(one_more_field)
# An extension defined at file scope.
factory_test2 = self.pool.FindFileByName(
'google/protobuf/internal/factory_test2.proto')
another_field = factory_test2.extensions_by_name['another_field']
self.pool.AddExtensionDescriptor(another_field)
# An extension defined in a message.
extension = self.pool.FindExtensionByNumber(factory1_message, 1001)
self.assertEqual(extension.name, 'one_more_field')
# An extension defined at file scope.
extension = self.pool.FindExtensionByNumber(factory1_message, 1002)
self.assertEqual(extension.name, 'another_field')
with self.assertRaises(KeyError):
extension = self.pool.FindExtensionByNumber(factory1_message, 1234567)
def testExtensionsAreNotFields(self):
with self.assertRaises(KeyError):
self.pool.FindFieldByName('google.protobuf.python.internal.another_field')
with self.assertRaises(KeyError):
self.pool.FindFieldByName(
'google.protobuf.python.internal.Factory2Message.one_more_field')
with self.assertRaises(KeyError):
self.pool.FindExtensionByName(
'google.protobuf.python.internal.Factory1Message.list_value')
def testFindService(self):
service = self.pool.FindServiceByName('protobuf_unittest.TestService')
self.assertEqual(service.full_name, 'protobuf_unittest.TestService')
def testUserDefinedDB(self):
db = descriptor_database.DescriptorDatabase()
self.pool = descriptor_pool.DescriptorPool(db)
db.Add(self.factory_test1_fd)
db.Add(self.factory_test2_fd)
self.testFindMessageTypeByName()
def testAddSerializedFile(self):
self.pool = descriptor_pool.DescriptorPool()
self.pool.AddSerializedFile(self.factory_test1_fd.SerializeToString())
self.pool.AddSerializedFile(self.factory_test2_fd.SerializeToString())
self.testFindMessageTypeByName()
def testComplexNesting(self):
more_messages_desc = descriptor_pb2.FileDescriptorProto.FromString(
more_messages_pb2.DESCRIPTOR.serialized_pb)
test1_desc = descriptor_pb2.FileDescriptorProto.FromString(
descriptor_pool_test1_pb2.DESCRIPTOR.serialized_pb)
test2_desc = descriptor_pb2.FileDescriptorProto.FromString(
descriptor_pool_test2_pb2.DESCRIPTOR.serialized_pb)
self.pool.Add(more_messages_desc)
self.pool.Add(test1_desc)
self.pool.Add(test2_desc)
TEST1_FILE.CheckFile(self, self.pool)
TEST2_FILE.CheckFile(self, self.pool)
def testEnumDefaultValue(self):
"""Test the default value of enums which don't start at zero."""
def _CheckDefaultValue(file_descriptor):
default_value = (file_descriptor
.message_types_by_name['DescriptorPoolTest1']
.fields_by_name['nested_enum']
.default_value)
self.assertEqual(default_value,
descriptor_pool_test1_pb2.DescriptorPoolTest1.BETA)
# First check what the generated descriptor contains.
_CheckDefaultValue(descriptor_pool_test1_pb2.DESCRIPTOR)
# Then check the generated pool. Normally this is the same descriptor.
file_descriptor = symbol_database.Default().pool.FindFileByName(
'google/protobuf/internal/descriptor_pool_test1.proto')
self.assertIs(file_descriptor, descriptor_pool_test1_pb2.DESCRIPTOR)
_CheckDefaultValue(file_descriptor)
# Then check the dynamic pool and its internal DescriptorDatabase.
descriptor_proto = descriptor_pb2.FileDescriptorProto.FromString(
descriptor_pool_test1_pb2.DESCRIPTOR.serialized_pb)
self.pool.Add(descriptor_proto)
# And do the same check as above
file_descriptor = self.pool.FindFileByName(
'google/protobuf/internal/descriptor_pool_test1.proto')
_CheckDefaultValue(file_descriptor)
def testDefaultValueForCustomMessages(self):
"""Check the value returned by non-existent fields."""
def _CheckValueAndType(value, expected_value, expected_type):
self.assertEqual(value, expected_value)
self.assertIsInstance(value, expected_type)
def _CheckDefaultValues(msg):
try:
int64 = long
except NameError: # Python3
int64 = int
try:
unicode_type = unicode
except NameError: # Python3
unicode_type = str
_CheckValueAndType(msg.optional_int32, 0, int)
_CheckValueAndType(msg.optional_uint64, 0, (int64, int))
_CheckValueAndType(msg.optional_float, 0, (float, int))
_CheckValueAndType(msg.optional_double, 0, (float, int))
_CheckValueAndType(msg.optional_bool, False, bool)
_CheckValueAndType(msg.optional_string, u'', unicode_type)
_CheckValueAndType(msg.optional_bytes, b'', bytes)
_CheckValueAndType(msg.optional_nested_enum, msg.FOO, int)
# First for the generated message
_CheckDefaultValues(unittest_pb2.TestAllTypes())
# Then for a message built with from the DescriptorPool.
pool = descriptor_pool.DescriptorPool()
pool.Add(descriptor_pb2.FileDescriptorProto.FromString(
unittest_import_public_pb2.DESCRIPTOR.serialized_pb))
pool.Add(descriptor_pb2.FileDescriptorProto.FromString(
unittest_import_pb2.DESCRIPTOR.serialized_pb))
pool.Add(descriptor_pb2.FileDescriptorProto.FromString(
unittest_pb2.DESCRIPTOR.serialized_pb))
message_class = message_factory.MessageFactory(pool).GetPrototype(
pool.FindMessageTypeByName(
unittest_pb2.TestAllTypes.DESCRIPTOR.full_name))
_CheckDefaultValues(message_class())
class ProtoFile(object):
def __init__(self, name, package, messages, dependencies=None,
public_dependencies=None):
self.name = name
self.package = package
self.messages = messages
self.dependencies = dependencies or []
self.public_dependencies = public_dependencies or []
def CheckFile(self, test, pool):
file_desc = pool.FindFileByName(self.name)
test.assertEqual(self.name, file_desc.name)
test.assertEqual(self.package, file_desc.package)
dependencies_names = [f.name for f in file_desc.dependencies]
test.assertEqual(self.dependencies, dependencies_names)
public_dependencies_names = [f.name for f in file_desc.public_dependencies]
test.assertEqual(self.public_dependencies, public_dependencies_names)
for name, msg_type in self.messages.items():
msg_type.CheckType(test, None, name, file_desc)
class EnumType(object):
def __init__(self, values):
self.values = values
def CheckType(self, test, msg_desc, name, file_desc):
enum_desc = msg_desc.enum_types_by_name[name]
test.assertEqual(name, enum_desc.name)
expected_enum_full_name = '.'.join([msg_desc.full_name, name])
test.assertEqual(expected_enum_full_name, enum_desc.full_name)
test.assertEqual(msg_desc, enum_desc.containing_type)
test.assertEqual(file_desc, enum_desc.file)
for index, (value, number) in enumerate(self.values):
value_desc = enum_desc.values_by_name[value]
test.assertEqual(value, value_desc.name)
test.assertEqual(index, value_desc.index)
test.assertEqual(number, value_desc.number)
test.assertEqual(enum_desc, value_desc.type)
test.assertIn(value, msg_desc.enum_values_by_name)
class MessageType(object):
def __init__(self, type_dict, field_list, is_extendable=False,
extensions=None):
self.type_dict = type_dict
self.field_list = field_list
self.is_extendable = is_extendable
self.extensions = extensions or []
def CheckType(self, test, containing_type_desc, name, file_desc):
if containing_type_desc is None:
desc = file_desc.message_types_by_name[name]
expected_full_name = '.'.join([file_desc.package, name])
else:
desc = containing_type_desc.nested_types_by_name[name]
expected_full_name = '.'.join([containing_type_desc.full_name, name])
test.assertEqual(name, desc.name)
test.assertEqual(expected_full_name, desc.full_name)
test.assertEqual(containing_type_desc, desc.containing_type)
test.assertEqual(desc.file, file_desc)
test.assertEqual(self.is_extendable, desc.is_extendable)
for name, subtype in self.type_dict.items():
subtype.CheckType(test, desc, name, file_desc)
for index, (name, field) in enumerate(self.field_list):
field.CheckField(test, desc, name, index, file_desc)
for index, (name, field) in enumerate(self.extensions):
field.CheckField(test, desc, name, index, file_desc)
class EnumField(object):
def __init__(self, number, type_name, default_value):
self.number = number
self.type_name = type_name
self.default_value = default_value
def CheckField(self, test, msg_desc, name, index, file_desc):
field_desc = msg_desc.fields_by_name[name]
enum_desc = msg_desc.enum_types_by_name[self.type_name]
test.assertEqual(name, field_desc.name)
expected_field_full_name = '.'.join([msg_desc.full_name, name])
test.assertEqual(expected_field_full_name, field_desc.full_name)
test.assertEqual(index, field_desc.index)
test.assertEqual(self.number, field_desc.number)
test.assertEqual(descriptor.FieldDescriptor.TYPE_ENUM, field_desc.type)
test.assertEqual(descriptor.FieldDescriptor.CPPTYPE_ENUM,
field_desc.cpp_type)
test.assertTrue(field_desc.has_default_value)
test.assertEqual(enum_desc.values_by_name[self.default_value].number,
field_desc.default_value)
test.assertFalse(enum_desc.values_by_name[self.default_value].has_options)
test.assertEqual(msg_desc, field_desc.containing_type)
test.assertEqual(enum_desc, field_desc.enum_type)
test.assertEqual(file_desc, enum_desc.file)
class MessageField(object):
def __init__(self, number, type_name):
self.number = number
self.type_name = type_name
def CheckField(self, test, msg_desc, name, index, file_desc):
field_desc = msg_desc.fields_by_name[name]
field_type_desc = msg_desc.nested_types_by_name[self.type_name]
test.assertEqual(name, field_desc.name)
expected_field_full_name = '.'.join([msg_desc.full_name, name])
test.assertEqual(expected_field_full_name, field_desc.full_name)
test.assertEqual(index, field_desc.index)
test.assertEqual(self.number, field_desc.number)
test.assertEqual(descriptor.FieldDescriptor.TYPE_MESSAGE, field_desc.type)
test.assertEqual(descriptor.FieldDescriptor.CPPTYPE_MESSAGE,
field_desc.cpp_type)
test.assertFalse(field_desc.has_default_value)
test.assertEqual(msg_desc, field_desc.containing_type)
test.assertEqual(field_type_desc, field_desc.message_type)
test.assertEqual(file_desc, field_desc.file)
class StringField(object):
def __init__(self, number, default_value):
self.number = number
self.default_value = default_value
def CheckField(self, test, msg_desc, name, index, file_desc):
field_desc = msg_desc.fields_by_name[name]
test.assertEqual(name, field_desc.name)
expected_field_full_name = '.'.join([msg_desc.full_name, name])
test.assertEqual(expected_field_full_name, field_desc.full_name)
test.assertEqual(index, field_desc.index)
test.assertEqual(self.number, field_desc.number)
test.assertEqual(descriptor.FieldDescriptor.TYPE_STRING, field_desc.type)
test.assertEqual(descriptor.FieldDescriptor.CPPTYPE_STRING,
field_desc.cpp_type)
test.assertTrue(field_desc.has_default_value)
test.assertEqual(self.default_value, field_desc.default_value)
test.assertEqual(file_desc, field_desc.file)
class ExtensionField(object):
def __init__(self, number, extended_type):
self.number = number
self.extended_type = extended_type
def CheckField(self, test, msg_desc, name, index, file_desc):
field_desc = msg_desc.extensions_by_name[name]
test.assertEqual(name, field_desc.name)
expected_field_full_name = '.'.join([msg_desc.full_name, name])
test.assertEqual(expected_field_full_name, field_desc.full_name)
test.assertEqual(self.number, field_desc.number)
test.assertEqual(index, field_desc.index)
test.assertEqual(descriptor.FieldDescriptor.TYPE_MESSAGE, field_desc.type)
test.assertEqual(descriptor.FieldDescriptor.CPPTYPE_MESSAGE,
field_desc.cpp_type)
test.assertFalse(field_desc.has_default_value)
test.assertTrue(field_desc.is_extension)
test.assertEqual(msg_desc, field_desc.extension_scope)
test.assertEqual(msg_desc, field_desc.message_type)
test.assertEqual(self.extended_type, field_desc.containing_type.name)
test.assertEqual(file_desc, field_desc.file)
class AddDescriptorTest(unittest.TestCase):
def _TestMessage(self, prefix):
pool = descriptor_pool.DescriptorPool()
pool.AddDescriptor(unittest_pb2.TestAllTypes.DESCRIPTOR)
self.assertEqual(
'protobuf_unittest.TestAllTypes',
pool.FindMessageTypeByName(
prefix + 'protobuf_unittest.TestAllTypes').full_name)
# AddDescriptor is not recursive.
with self.assertRaises(KeyError):
pool.FindMessageTypeByName(
prefix + 'protobuf_unittest.TestAllTypes.NestedMessage')
pool.AddDescriptor(unittest_pb2.TestAllTypes.NestedMessage.DESCRIPTOR)
self.assertEqual(
'protobuf_unittest.TestAllTypes.NestedMessage',
pool.FindMessageTypeByName(
prefix + 'protobuf_unittest.TestAllTypes.NestedMessage').full_name)
# Files are implicitly also indexed when messages are added.
self.assertEqual(
'google/protobuf/unittest.proto',
pool.FindFileByName(
'google/protobuf/unittest.proto').name)
self.assertEqual(
'google/protobuf/unittest.proto',
pool.FindFileContainingSymbol(
prefix + 'protobuf_unittest.TestAllTypes.NestedMessage').name)
@unittest.skipIf(api_implementation.Type() == 'cpp',
'With the cpp implementation, Add() must be called first')
def testMessage(self):
self._TestMessage('')
self._TestMessage('.')
def _TestEnum(self, prefix):
pool = descriptor_pool.DescriptorPool()
pool.AddEnumDescriptor(unittest_pb2.ForeignEnum.DESCRIPTOR)
self.assertEqual(
'protobuf_unittest.ForeignEnum',
pool.FindEnumTypeByName(
prefix + 'protobuf_unittest.ForeignEnum').full_name)
# AddEnumDescriptor is not recursive.
with self.assertRaises(KeyError):
pool.FindEnumTypeByName(
prefix + 'protobuf_unittest.ForeignEnum.NestedEnum')
pool.AddEnumDescriptor(unittest_pb2.TestAllTypes.NestedEnum.DESCRIPTOR)
self.assertEqual(
'protobuf_unittest.TestAllTypes.NestedEnum',
pool.FindEnumTypeByName(
prefix + 'protobuf_unittest.TestAllTypes.NestedEnum').full_name)
# Files are implicitly also indexed when enums are added.
self.assertEqual(
'google/protobuf/unittest.proto',
pool.FindFileByName(
'google/protobuf/unittest.proto').name)
self.assertEqual(
'google/protobuf/unittest.proto',
pool.FindFileContainingSymbol(
prefix + 'protobuf_unittest.TestAllTypes.NestedEnum').name)
@unittest.skipIf(api_implementation.Type() == 'cpp',
'With the cpp implementation, Add() must be called first')
def testEnum(self):
self._TestEnum('')
self._TestEnum('.')
@unittest.skipIf(api_implementation.Type() == 'cpp',
'With the cpp implementation, Add() must be called first')
def testService(self):
pool = descriptor_pool.DescriptorPool()
with self.assertRaises(KeyError):
pool.FindServiceByName('protobuf_unittest.TestService')
pool.AddServiceDescriptor(unittest_pb2._TESTSERVICE)
self.assertEqual(
'protobuf_unittest.TestService',
pool.FindServiceByName('protobuf_unittest.TestService').full_name)
@unittest.skipIf(api_implementation.Type() == 'cpp',
'With the cpp implementation, Add() must be called first')
def testFile(self):
pool = descriptor_pool.DescriptorPool()
pool.AddFileDescriptor(unittest_pb2.DESCRIPTOR)
self.assertEqual(
'google/protobuf/unittest.proto',
pool.FindFileByName(
'google/protobuf/unittest.proto').name)
# AddFileDescriptor is not recursive; messages and enums within files must
# be explicitly registered.
with self.assertRaises(KeyError):
pool.FindFileContainingSymbol(
'protobuf_unittest.TestAllTypes')
def testEmptyDescriptorPool(self):
# Check that an empty DescriptorPool() contains no messages.
pool = descriptor_pool.DescriptorPool()
proto_file_name = descriptor_pb2.DESCRIPTOR.name
self.assertRaises(KeyError, pool.FindFileByName, proto_file_name)
# Add the above file to the pool
file_descriptor = descriptor_pb2.FileDescriptorProto()
descriptor_pb2.DESCRIPTOR.CopyToProto(file_descriptor)
pool.Add(file_descriptor)
# Now it exists.
self.assertTrue(pool.FindFileByName(proto_file_name))
def testCustomDescriptorPool(self):
# Create a new pool, and add a file descriptor.
pool = descriptor_pool.DescriptorPool()
file_desc = descriptor_pb2.FileDescriptorProto(
name='some/file.proto', package='package')
file_desc.message_type.add(name='Message')
pool.Add(file_desc)
self.assertEqual(pool.FindFileByName('some/file.proto').name,
'some/file.proto')
self.assertEqual(pool.FindMessageTypeByName('package.Message').name,
'Message')
def testFileDescriptorOptionsWithCustomDescriptorPool(self):
# Create a descriptor pool, and add a new FileDescriptorProto to it.
pool = descriptor_pool.DescriptorPool()
file_name = 'file_descriptor_options_with_custom_descriptor_pool.proto'
file_descriptor_proto = descriptor_pb2.FileDescriptorProto(name=file_name)
extension_id = file_options_test_pb2.foo_options
file_descriptor_proto.options.Extensions[extension_id].foo_name = 'foo'
pool.Add(file_descriptor_proto)
# The options set on the FileDescriptorProto should be available in the
# descriptor even if they contain extensions that cannot be deserialized
# using the pool.
file_descriptor = pool.FindFileByName(file_name)
options = file_descriptor.GetOptions()
self.assertEqual('foo', options.Extensions[extension_id].foo_name)
# The object returned by GetOptions() is cached.
self.assertIs(options, file_descriptor.GetOptions())
class DefaultPoolTest(unittest.TestCase):
def testFindMethods(self):
pool = descriptor_pool.Default()
self.assertIs(
pool.FindFileByName('google/protobuf/unittest.proto'),
unittest_pb2.DESCRIPTOR)
self.assertIs(
pool.FindMessageTypeByName('protobuf_unittest.TestAllTypes'),
unittest_pb2.TestAllTypes.DESCRIPTOR)
self.assertIs(
pool.FindFieldByName('protobuf_unittest.TestAllTypes.optional_int32'),
unittest_pb2.TestAllTypes.DESCRIPTOR.fields_by_name['optional_int32'])
self.assertIs(
pool.FindEnumTypeByName('protobuf_unittest.ForeignEnum'),
unittest_pb2.ForeignEnum.DESCRIPTOR)
if api_implementation.Type() != 'cpp':
self.skipTest('Only the C++ implementation correctly indexes all types')
self.assertIs(
pool.FindExtensionByName('protobuf_unittest.optional_int32_extension'),
unittest_pb2.DESCRIPTOR.extensions_by_name['optional_int32_extension'])
self.assertIs(
pool.FindOneofByName('protobuf_unittest.TestAllTypes.oneof_field'),
unittest_pb2.TestAllTypes.DESCRIPTOR.oneofs_by_name['oneof_field'])
self.assertIs(
pool.FindServiceByName('protobuf_unittest.TestService'),
unittest_pb2.DESCRIPTOR.services_by_name['TestService'])
def testAddFileDescriptor(self):
pool = descriptor_pool.Default()
file_desc = descriptor_pb2.FileDescriptorProto(name='some/file.proto')
pool.Add(file_desc)
pool.AddSerializedFile(file_desc.SerializeToString())
TEST1_FILE = ProtoFile(
'google/protobuf/internal/descriptor_pool_test1.proto',
'google.protobuf.python.internal',
{
'DescriptorPoolTest1': MessageType({
'NestedEnum': EnumType([('ALPHA', 1), ('BETA', 2)]),
'NestedMessage': MessageType({
'NestedEnum': EnumType([('EPSILON', 5), ('ZETA', 6)]),
'DeepNestedMessage': MessageType({
'NestedEnum': EnumType([('ETA', 7), ('THETA', 8)]),
}, [
('nested_enum', EnumField(1, 'NestedEnum', 'ETA')),
('nested_field', StringField(2, 'theta')),
]),
}, [
('nested_enum', EnumField(1, 'NestedEnum', 'ZETA')),
('nested_field', StringField(2, 'beta')),
('deep_nested_message', MessageField(3, 'DeepNestedMessage')),
])
}, [
('nested_enum', EnumField(1, 'NestedEnum', 'BETA')),
('nested_message', MessageField(2, 'NestedMessage')),
], is_extendable=True),
'DescriptorPoolTest2': MessageType({
'NestedEnum': EnumType([('GAMMA', 3), ('DELTA', 4)]),
'NestedMessage': MessageType({
'NestedEnum': EnumType([('IOTA', 9), ('KAPPA', 10)]),
'DeepNestedMessage': MessageType({
'NestedEnum': EnumType([('LAMBDA', 11), ('MU', 12)]),
}, [
('nested_enum', EnumField(1, 'NestedEnum', 'MU')),
('nested_field', StringField(2, 'lambda')),
]),
}, [
('nested_enum', EnumField(1, 'NestedEnum', 'IOTA')),
('nested_field', StringField(2, 'delta')),
('deep_nested_message', MessageField(3, 'DeepNestedMessage')),
])
}, [
('nested_enum', EnumField(1, 'NestedEnum', 'GAMMA')),
('nested_message', MessageField(2, 'NestedMessage')),
]),
})
TEST2_FILE = ProtoFile(
'google/protobuf/internal/descriptor_pool_test2.proto',
'google.protobuf.python.internal',
{
'DescriptorPoolTest3': MessageType({
'NestedEnum': EnumType([('NU', 13), ('XI', 14)]),
'NestedMessage': MessageType({
'NestedEnum': EnumType([('OMICRON', 15), ('PI', 16)]),
'DeepNestedMessage': MessageType({
'NestedEnum': EnumType([('RHO', 17), ('SIGMA', 18)]),
}, [
('nested_enum', EnumField(1, 'NestedEnum', 'RHO')),
('nested_field', StringField(2, 'sigma')),
]),
}, [
('nested_enum', EnumField(1, 'NestedEnum', 'PI')),
('nested_field', StringField(2, 'nu')),
('deep_nested_message', MessageField(3, 'DeepNestedMessage')),
])
}, [
('nested_enum', EnumField(1, 'NestedEnum', 'XI')),
('nested_message', MessageField(2, 'NestedMessage')),
], extensions=[
('descriptor_pool_test',
ExtensionField(1001, 'DescriptorPoolTest1')),
]),
},
dependencies=['google/protobuf/internal/descriptor_pool_test1.proto',
'google/protobuf/internal/more_messages.proto'],
public_dependencies=['google/protobuf/internal/more_messages.proto'])
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
wessamfathi/GFX | CSE167X/hw2-windows_VS2012/hw2-windows/glm-0.9.2.7/util/gen_external_templates.py | 14 | 5531 |
__author__ = "eloraiby"
__date__ = "$5-Sep-2010 9:35:29 PM$"
atomic_types = ["unsigned char", "unsigned short", "unsigned int",
"signed char", "signed short", "signed int",
"float", "double"]
glsl_vector_types = ["tvec2", "tvec3", "tvec4"]
glsl_matrix_types = ["tmat2x2", "tmat2x3", "tmat2x4",
"tmat3x2", "tmat3x3", "tmat3x4",
"tmat4x2", "tmat4x3", "tmat4x4"]
glsl_matrix_member_operators = ["+=", "-=", "*=", "/="]
glsl_matrix_out_op_dic = {
"tmat2x2":"tmat2x2",
"tmat2x3":"tmat3x3",
"tmat2x4":"tmat4x4",
"tmat3x2":"tmat2x2",
"tmat3x3":"tmat3x3",
"tmat3x4":"tmat4x4",
"tmat4x2":"tmat2x2",
"tmat4x3":"tmat3x3",
"tmat4x4":"tmat4x4",
}
glsl_matrix_right_op_dic = {
"tmat2x2":"tmat2x2",
"tmat2x3":"tmat3x2",
"tmat2x4":"tmat4x2",
"tmat3x2":"tmat2x3",
"tmat3x3":"tmat3x3",
"tmat3x4":"tmat4x3",
"tmat4x2":"tmat2x4",
"tmat4x3":"tmat3x4",
"tmat4x4":"tmat4x4",
}
def gen_vectors():
for v in glsl_vector_types:
print
print "//"
print "// " + v + " type explicit instantiation"
print "//"
for a in atomic_types:
print "template struct " + v + "<" + a + ">;"
print
def gen_matrices_member_operators():
for m in glsl_matrix_types:
print
print "//"
print "// " + m + " type member operator instantiation"
print "//"
for a in atomic_types:
#print "template " + m + "<" + a + ">::col_type;"
#print "template " + m + "<" + a + ">::row_type;"
for c in atomic_types:
if a != c:
print "template " + m + "<" + a + ">::" + m + "(" + m + "<" + c + "> const &m);"
"""for b in glsl_matrix_member_operators:
for cm in atomic_types:
print "template " + m + "<" + a + ">& " + m + "<" + a + ">::operator " + b + "( " + m + "<" + cm + "> const &m);"
print "template " + m + "<" + a + ">& " + m + "<" + a + ">::operator " + b + "( " + cm + " const &s);"
"""
print
print "//"
print "// Binary operators"
print "//"
print "template " + m + "<" + a + "> operator + (" + m + "<" + a + "> const &m, " + a + " const &s);"
if m == "tmat2x2" or m == "tmat3x3" or m == "tmat4x4":
print "template " + m + "<" + a + "> operator + (" + a + " const &s, " + m + "<" + a + "> const &m);"
print "template " + m + "<" + a + "> operator + (" + m + "<" + a + "> const &m1, " + m + "<" + a + "> const &m2);"
print "template " + m + "<" + a + "> operator - (" + m + "<" + a + "> const &m, " + a + " const &s);"
if m == "tmat2x2" or m == "tmat3x3" or m == "tmat4x4":
print "template " + m + "<" + a + "> operator - (" + a + " const &s, " + m + "<" + a + "> const &m);"
print "template " + m + "<" + a + "> operator - (" + m + "<" + a + "> const &m1, " + m + "<" + a + "> const &m2);"
out_op = glsl_matrix_out_op_dic[m]
right_op = glsl_matrix_right_op_dic[m]
print "template " + m + "<" + a + "> operator * (" + m + "<" + a + "> const &m, " + a + " const &s);"
if m == "tmat2x2" or m == "tmat3x3" or m == "tmat4x4":
print "template " + m + "<" + a + "> operator * ( " + a + " const &s, " + m + "<" + a + "> const &m);"
print "template " + out_op + "<" + a + "> operator * (" + m + "<" + a + "> const &m1, " + right_op + "<" + a + "> const &m2);"
print "template " + m + "<" + a + ">::col_type" + " operator * ( " + m + "<" + a + "> const &m, " + m + "<" + a + ">::row_type" + " const &s);"
print "template " + m + "<" + a + ">::row_type" + " operator * ( " + m + "<" + a + ">::col_type const &s, " + m + "<" + a + "> const &m);"
print "template " + m + "<" + a + "> operator / (" + m + "<" + a + "> const &m, " + a + " const &s);"
#print "template " + right_op + "<" + a + "> operator / ( " + a + " const &s, " + m + "<" + a + "> const &m);"
if m == "tmat2x2" or m == "tmat3x3" or m == "tmat4x4":
print "template " + m + "<" + a + "> operator / ( " + a + " const &s, " + m + "<" + a + "> const &m);"
#print "template " + m + "<" + a + "> operator / (" + m + "<" + a + "> const &m1, " + m + "<" + a + "> const &m2);"
else:
print "template " + m + "<" + a + "> operator / ( " + a + " const &s, " + m + "<" + a + "> const &m);"
#print "template " + m + "<" + a + ">" + " operator / ( " + m + "<" + a + "> const &m, " + a + " const &s);"
#print "template " + m + "<" + a + ">" + " operator / ( " + a + " const &s, " + m + "<" + a + "> const &m);"
print
print "//"
print "// Unary constant operators"
print "//"
print "template " + m + "<" + a + "> const operator -(" + m + "<" + a + "> const &m);"
print "template " + m + "<" + a + "> const operator --(" + m + "<" + a + "> const &m, int);"
print "template " + m + "<" + a + "> const operator ++(" + m + "<" + a + "> const &m, int);"
print
def gen_matrices():
for m in glsl_matrix_types:
print
print "//"
print "// " + m + " type explicit instantiation"
print "//"
for a in atomic_types:
print "template struct " + m + "<" + a + ">;"
print
if __name__ == "__main__":
print "//"
print "// GLM External templates generator script version 0.1 for GLM core"
print "//"
print "// atomic types:", atomic_types
print "// GLSL vector types:", glsl_vector_types;
print "// GLSL matrix types:", glsl_matrix_types;
print "//"
print
print "#include <glm/glm.hpp>"
print
print "namespace glm {"
print "namespace detail {"
gen_vectors()
gen_matrices()
gen_matrices_member_operators()
print "} // namespace detail"
print "} // namespace glm"
| gpl-3.0 |
ccellis/WHACK2016 | flask/lib/python2.7/site-packages/whoosh/util/cache.py | 95 | 13382 | # Copyright 2007 Matt Chaput. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of Matt Chaput.
from __future__ import with_statement
import functools, random
from array import array
from heapq import nsmallest
from operator import itemgetter
from threading import Lock
from time import time
from whoosh.compat import iteritems, xrange
try:
from collections import Counter
except ImportError:
class Counter(dict):
def __missing__(self, key):
return 0
def unbound_cache(func):
"""Caching decorator with an unbounded cache size.
"""
cache = {}
@functools.wraps(func)
def caching_wrapper(*args):
try:
return cache[args]
except KeyError:
result = func(*args)
cache[args] = result
return result
return caching_wrapper
def lru_cache(maxsize=100):
"""A simple cache that, when the cache is full, deletes the least recently
used 10% of the cached values.
This function duplicates (more-or-less) the protocol of the
``functools.lru_cache`` decorator in the Python 3.2 standard library.
Arguments to the cached function must be hashable.
View the cache statistics tuple ``(hits, misses, maxsize, currsize)``
with f.cache_info(). Clear the cache and statistics with f.cache_clear().
Access the underlying function with f.__wrapped__.
"""
def decorating_function(user_function):
stats = [0, 0] # Hits, misses
data = {}
lastused = {}
@functools.wraps(user_function)
def wrapper(*args):
try:
result = data[args]
stats[0] += 1 # Hit
except KeyError:
stats[1] += 1 # Miss
if len(data) == maxsize:
for k, _ in nsmallest(maxsize // 10 or 1,
iteritems(lastused),
key=itemgetter(1)):
del data[k]
del lastused[k]
data[args] = user_function(*args)
result = data[args]
finally:
lastused[args] = time()
return result
def cache_info():
return stats[0], stats[1], maxsize, len(data)
def cache_clear():
data.clear()
lastused.clear()
stats[0] = stats[1] = 0
wrapper.cache_info = cache_info
wrapper.cache_clear = cache_clear
return wrapper
return decorating_function
def lfu_cache(maxsize=100):
"""A simple cache that, when the cache is full, deletes the least frequently
used 10% of the cached values.
This function duplicates (more-or-less) the protocol of the
``functools.lru_cache`` decorator in the Python 3.2 standard library.
Arguments to the cached function must be hashable.
View the cache statistics tuple ``(hits, misses, maxsize, currsize)``
with f.cache_info(). Clear the cache and statistics with f.cache_clear().
Access the underlying function with f.__wrapped__.
"""
def decorating_function(user_function):
stats = [0, 0] # Hits, misses
data = {}
usecount = Counter()
@functools.wraps(user_function)
def wrapper(*args):
try:
result = data[args]
stats[0] += 1 # Hit
except KeyError:
stats[1] += 1 # Miss
if len(data) == maxsize:
for k, _ in nsmallest(maxsize // 10 or 1,
iteritems(usecount),
key=itemgetter(1)):
del data[k]
del usecount[k]
data[args] = user_function(*args)
result = data[args]
finally:
usecount[args] += 1
return result
def cache_info():
return stats[0], stats[1], maxsize, len(data)
def cache_clear():
data.clear()
usecount.clear()
wrapper.cache_info = cache_info
wrapper.cache_clear = cache_clear
return wrapper
return decorating_function
def random_cache(maxsize=100):
"""A very simple cache that, when the cache is filled, deletes 10% of the
cached values AT RANDOM.
This function duplicates (more-or-less) the protocol of the
``functools.lru_cache`` decorator in the Python 3.2 standard library.
Arguments to the cached function must be hashable.
View the cache statistics tuple ``(hits, misses, maxsize, currsize)``
with f.cache_info(). Clear the cache and statistics with f.cache_clear().
Access the underlying function with f.__wrapped__.
"""
def decorating_function(user_function):
stats = [0, 0] # hits, misses
data = {}
@functools.wraps(user_function)
def wrapper(*args):
try:
result = data[args]
stats[0] += 1 # Hit
except KeyError:
stats[1] += 1 # Miss
if len(data) == maxsize:
keys = data.keys()
for i in xrange(maxsize // 10 or 1):
n = random.randint(0, len(keys) - 1)
k = keys.pop(n)
del data[k]
data[args] = user_function(*args)
result = data[args]
return result
def cache_info():
return stats[0], stats[1], maxsize, len(data)
def cache_clear():
data.clear()
wrapper.cache_info = cache_info
wrapper.cache_clear = cache_clear
return wrapper
return decorating_function
def db_lru_cache(maxsize=100):
"""Double-barrel least-recently-used cache decorator. This is a simple
LRU algorithm that keeps a primary and secondary dict. Keys are checked
in the primary dict, and then the secondary. Once the primary dict fills
up, the secondary dict is cleared and the two dicts are swapped.
This function duplicates (more-or-less) the protocol of the
``functools.lru_cache`` decorator in the Python 3.2 standard library.
Arguments to the cached function must be hashable.
View the cache statistics tuple ``(hits, misses, maxsize, currsize)``
with f.cache_info(). Clear the cache and statistics with f.cache_clear().
Access the underlying function with f.__wrapped__.
"""
def decorating_function(user_function):
# Cache1, Cache2, Pointer, Hits, Misses
stats = [{}, {}, 0, 0, 0]
@functools.wraps(user_function)
def wrapper(*args):
ptr = stats[2]
a = stats[ptr]
b = stats[not ptr]
key = args
if key in a:
stats[3] += 1 # Hit
return a[key]
elif key in b:
stats[3] += 1 # Hit
return b[key]
else:
stats[4] += 1 # Miss
result = user_function(*args)
a[key] = result
if len(a) >= maxsize:
stats[2] = not ptr
b.clear()
return result
def cache_info():
return stats[3], stats[4], maxsize, len(stats[0]) + len(stats[1])
def cache_clear():
"""Clear the cache and cache statistics"""
stats[0].clear()
stats[1].clear()
stats[3] = stats[4] = 0
wrapper.cache_info = cache_info
wrapper.cache_clear = cache_clear
return wrapper
return decorating_function
def clockface_lru_cache(maxsize=100):
"""Least-recently-used cache decorator.
This function duplicates (more-or-less) the protocol of the
``functools.lru_cache`` decorator in the Python 3.2 standard library, but
uses the clock face LRU algorithm instead of an ordered dictionary.
If *maxsize* is set to None, the LRU features are disabled and the cache
can grow without bound.
Arguments to the cached function must be hashable.
View the cache statistics named tuple (hits, misses, maxsize, currsize)
with f.cache_info(). Clear the cache and statistics with f.cache_clear().
Access the underlying function with f.__wrapped__.
"""
def decorating_function(user_function):
stats = [0, 0, 0] # hits, misses, hand
data = {}
if maxsize:
# The keys at each point on the clock face
clock_keys = [None] * maxsize
# The "referenced" bits at each point on the clock face
clock_refs = array("B", (0 for _ in xrange(maxsize)))
lock = Lock()
@functools.wraps(user_function)
def wrapper(*args):
key = args
try:
with lock:
pos, result = data[key]
# The key is in the cache. Set the key's reference bit
clock_refs[pos] = 1
# Record a cache hit
stats[0] += 1
except KeyError:
# Compute the value
result = user_function(*args)
with lock:
# Current position of the clock hand
hand = stats[2]
# Remember to stop here after a full revolution
end = hand
# Sweep around the clock looking for a position with
# the reference bit off
while True:
hand = (hand + 1) % maxsize
current_ref = clock_refs[hand]
if current_ref:
# This position's "referenced" bit is set. Turn
# the bit off and move on.
clock_refs[hand] = 0
elif not current_ref or hand == end:
# We've either found a position with the
# "reference" bit off or reached the end of the
# circular cache. So we'll replace this
# position with the new key
current_key = clock_keys[hand]
if current_key in data:
del data[current_key]
clock_keys[hand] = key
clock_refs[hand] = 1
break
# Put the key and result in the cache
data[key] = (hand, result)
# Save the new hand position
stats[2] = hand
# Record a cache miss
stats[1] += 1
return result
else:
@functools.wraps(user_function)
def wrapper(*args):
key = args
try:
result = data[key]
stats[0] += 1
except KeyError:
result = user_function(*args)
data[key] = result
stats[1] += 1
return result
def cache_info():
return stats[0], stats[1], maxsize, len(data)
def cache_clear():
"""Clear the cache and cache statistics"""
data.clear()
stats[0] = stats[1] = stats[2] = 0
for i in xrange(maxsize):
clock_keys[i] = None
clock_refs[i] = 0
wrapper.cache_info = cache_info
wrapper.cache_clear = cache_clear
return wrapper
return decorating_function
| bsd-3-clause |
Desarrollo-CeSPI/meran | dev-plugins/node/lib/node/wafadmin/Tools/unittestw.py | 4 | 11378 | #!/usr/bin/env python
# Meran - MERAN UNLP is a ILS (Integrated Library System) wich provides Catalog,
# Circulation and User's Management. It's written in Perl, and uses Apache2
# Web-Server, MySQL database and Sphinx 2 indexing.
# Copyright (C) 2009-2013 Grupo de desarrollo de Meran CeSPI-UNLP
#
# This file is part of Meran.
#
# Meran is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Meran is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Meran. If not, see <http://www.gnu.org/licenses/>.
# encoding: utf-8
# Meran - MERAN UNLP is a ILS (Integrated Library System) wich provides Catalog,
# Circulation and User's Management. It's written in Perl, and uses Apache2
# Web-Server, MySQL database and Sphinx 2 indexing.
# Copyright (C) 2009-2013 Grupo de desarrollo de Meran CeSPI-UNLP
#
# This file is part of Meran.
#
# Meran is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Meran is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Meran. If not, see <http://www.gnu.org/licenses/>.
# Carlos Rafael Giani, 2006
"""
Unit tests run in the shutdown() method, and for c/c++ programs
One should NOT have to give parameters to programs to execute
In the shutdown method, add the following code:
>>> def shutdown():
... ut = UnitTest.unit_test()
... ut.run()
... ut.print_results()
Each object to use as a unit test must be a program and must have X{obj.unit_test=1}
"""
import os, sys
import Build, TaskGen, Utils, Options, Logs, Task
from TaskGen import before, after, feature
from Constants import *
class unit_test(object):
"Unit test representation"
def __init__(self):
self.returncode_ok = 0 # Unit test returncode considered OK. All returncodes differing from this one
# will cause the unit test to be marked as "FAILED".
# The following variables are filled with data by run().
# print_results() uses these for printing the unit test summary,
# but if there is need for direct access to the results,
# they can be retrieved here, after calling run().
self.num_tests_ok = 0 # Number of successful unit tests
self.num_tests_failed = 0 # Number of failed unit tests
self.num_tests_err = 0 # Tests that have not even run
self.total_num_tests = 0 # Total amount of unit tests
self.max_label_length = 0 # Maximum label length (pretty-print the output)
self.unit_tests = Utils.ordered_dict() # Unit test dictionary. Key: the label (unit test filename relative
# to the build dir), value: unit test filename with absolute path
self.unit_test_results = {} # Dictionary containing the unit test results.
# Key: the label, value: result (true = success false = failure)
self.unit_test_erroneous = {} # Dictionary indicating erroneous unit tests.
# Key: the label, value: true = unit test has an error false = unit test is ok
self.change_to_testfile_dir = False #True if the test file needs to be executed from the same dir
self.want_to_see_test_output = False #True to see the stdout from the testfile (for example check suites)
self.want_to_see_test_error = False #True to see the stderr from the testfile (for example check suites)
self.run_if_waf_does = 'check' #build was the old default
def run(self):
"Run the unit tests and gather results (note: no output here)"
self.num_tests_ok = 0
self.num_tests_failed = 0
self.num_tests_err = 0
self.total_num_tests = 0
self.max_label_length = 0
self.unit_tests = Utils.ordered_dict()
self.unit_test_results = {}
self.unit_test_erroneous = {}
ld_library_path = []
# If waf is not building, don't run anything
if not Options.commands[self.run_if_waf_does]: return
# Get the paths for the shared libraries, and obtain the unit tests to execute
for obj in Build.bld.all_task_gen:
try:
link_task = obj.link_task
except AttributeError:
pass
else:
lib_path = link_task.outputs[0].parent.abspath(obj.env)
if lib_path not in ld_library_path:
ld_library_path.append(lib_path)
unit_test = getattr(obj, 'unit_test', '')
if unit_test and 'cprogram' in obj.features:
try:
output = obj.path
filename = os.path.join(output.abspath(obj.env), obj.target)
srcdir = output.abspath()
label = os.path.join(output.bldpath(obj.env), obj.target)
self.max_label_length = max(self.max_label_length, len(label))
self.unit_tests[label] = (filename, srcdir)
except KeyError:
pass
self.total_num_tests = len(self.unit_tests)
# Now run the unit tests
Utils.pprint('GREEN', 'Running the unit tests')
count = 0
result = 1
for label in self.unit_tests.allkeys:
file_and_src = self.unit_tests[label]
filename = file_and_src[0]
srcdir = file_and_src[1]
count += 1
line = Build.bld.progress_line(count, self.total_num_tests, Logs.colors.GREEN, Logs.colors.NORMAL)
if Options.options.progress_bar and line:
sys.stderr.write(line)
sys.stderr.flush()
try:
kwargs = {}
kwargs['env'] = os.environ.copy()
if self.change_to_testfile_dir:
kwargs['cwd'] = srcdir
if not self.want_to_see_test_output:
kwargs['stdout'] = Utils.pproc.PIPE # PIPE for ignoring output
if not self.want_to_see_test_error:
kwargs['stderr'] = Utils.pproc.PIPE # PIPE for ignoring output
if ld_library_path:
v = kwargs['env']
def add_path(dct, path, var):
dct[var] = os.pathsep.join(Utils.to_list(path) + [os.environ.get(var, '')])
if sys.platform == 'win32':
add_path(v, ld_library_path, 'PATH')
elif sys.platform == 'darwin':
add_path(v, ld_library_path, 'DYLD_LIBRARY_PATH')
add_path(v, ld_library_path, 'LD_LIBRARY_PATH')
else:
add_path(v, ld_library_path, 'LD_LIBRARY_PATH')
pp = Utils.pproc.Popen(filename, **kwargs)
pp.wait()
result = int(pp.returncode == self.returncode_ok)
if result:
self.num_tests_ok += 1
else:
self.num_tests_failed += 1
self.unit_test_results[label] = result
self.unit_test_erroneous[label] = 0
except OSError:
self.unit_test_erroneous[label] = 1
self.num_tests_err += 1
except KeyboardInterrupt:
pass
if Options.options.progress_bar: sys.stdout.write(Logs.colors.cursor_on)
def print_results(self):
"Pretty-prints a summary of all unit tests, along with some statistics"
# If waf is not building, don't output anything
if not Options.commands[self.run_if_waf_does]: return
p = Utils.pprint
# Early quit if no tests were performed
if self.total_num_tests == 0:
p('YELLOW', 'No unit tests present')
return
for label in self.unit_tests.allkeys:
filename = self.unit_tests[label]
err = 0
result = 0
try: err = self.unit_test_erroneous[label]
except KeyError: pass
try: result = self.unit_test_results[label]
except KeyError: pass
n = self.max_label_length - len(label)
if err: n += 4
elif result: n += 7
else: n += 3
line = '%s %s' % (label, '.' * n)
if err: p('RED', '%sERROR' % line)
elif result: p('GREEN', '%sOK' % line)
else: p('YELLOW', '%sFAILED' % line)
percentage_ok = float(self.num_tests_ok) / float(self.total_num_tests) * 100.0
percentage_failed = float(self.num_tests_failed) / float(self.total_num_tests) * 100.0
percentage_erroneous = float(self.num_tests_err) / float(self.total_num_tests) * 100.0
p('NORMAL', '''
Successful tests: %i (%.1f%%)
Failed tests: %i (%.1f%%)
Erroneous tests: %i (%.1f%%)
Total number of tests: %i
''' % (self.num_tests_ok, percentage_ok, self.num_tests_failed, percentage_failed,
self.num_tests_err, percentage_erroneous, self.total_num_tests))
p('GREEN', 'Unit tests finished')
############################################################################################
"""
New unit test system
The targets with feature 'test' are executed after they are built
bld(features='cprogram cc test', ...)
To display the results:
import UnitTest
bld.add_post_fun(UnitTest.summary)
"""
import threading
testlock = threading.Lock()
def set_options(opt):
opt.add_option('--alltests', action='store_true', default=True, help='Exec all unit tests', dest='all_tests')
@feature('test')
@after('apply_link', 'vars_target_cprogram')
def make_test(self):
if not 'cprogram' in self.features:
Logs.error('test cannot be executed %s' % self)
return
self.default_install_path = None
self.create_task('utest', self.link_task.outputs)
def exec_test(self):
status = 0
variant = self.env.variant()
filename = self.inputs[0].abspath(self.env)
try:
fu = getattr(self.generator.bld, 'all_test_paths')
except AttributeError:
fu = os.environ.copy()
self.generator.bld.all_test_paths = fu
lst = []
for obj in self.generator.bld.all_task_gen:
link_task = getattr(obj, 'link_task', None)
if link_task and link_task.env.variant() == variant:
lst.append(link_task.outputs[0].parent.abspath(obj.env))
def add_path(dct, path, var):
dct[var] = os.pathsep.join(Utils.to_list(path) + [os.environ.get(var, '')])
if sys.platform == 'win32':
add_path(fu, lst, 'PATH')
elif sys.platform == 'darwin':
add_path(fu, lst, 'DYLD_LIBRARY_PATH')
add_path(fu, lst, 'LD_LIBRARY_PATH')
else:
add_path(fu, lst, 'LD_LIBRARY_PATH')
cwd = getattr(self.generator, 'ut_cwd', '') or self.inputs[0].parent.abspath(self.env)
proc = Utils.pproc.Popen(filename, cwd=cwd, env=fu, stderr=Utils.pproc.PIPE, stdout=Utils.pproc.PIPE)
(stdout, stderr) = proc.communicate()
tup = (filename, proc.returncode, stdout, stderr)
self.generator.utest_result = tup
testlock.acquire()
try:
bld = self.generator.bld
Logs.debug("ut: %r", tup)
try:
bld.utest_results.append(tup)
except AttributeError:
bld.utest_results = [tup]
finally:
testlock.release()
cls = Task.task_type_from_func('utest', func=exec_test, color='PINK', ext_in='.bin')
old = cls.runnable_status
def test_status(self):
if getattr(Options.options, 'all_tests', False):
return RUN_ME
return old(self)
cls.runnable_status = test_status
cls.quiet = 1
def summary(bld):
lst = getattr(bld, 'utest_results', [])
if lst:
Utils.pprint('CYAN', 'execution summary')
total = len(lst)
tfail = len([x for x in lst if x[1]])
Utils.pprint('CYAN', ' tests that pass %d/%d' % (total-tfail, total))
for (f, code, out, err) in lst:
if not code:
Utils.pprint('CYAN', ' %s' % f)
Utils.pprint('CYAN', ' tests that fail %d/%d' % (tfail, total))
for (f, code, out, err) in lst:
if code:
Utils.pprint('CYAN', ' %s' % f) | gpl-3.0 |
cryptoprojects/ultimateonlinecash | test/functional/p2p-mempool.py | 17 | 1231 | #!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test p2p mempool message.
Test that nodes are disconnected if they send mempool messages when bloom
filters are not enabled.
"""
from test_framework.mininode import *
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
class P2PMempoolTests(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [["-peerbloomfilters=0"]]
def run_test(self):
#connect a mininode
aTestNode = NodeConnCB()
node = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], aTestNode)
aTestNode.add_connection(node)
NetworkThread().start()
aTestNode.wait_for_verack()
#request mempool
aTestNode.send_message(msg_mempool())
aTestNode.wait_for_disconnect()
#mininode must be disconnected at this point
assert_equal(len(self.nodes[0].getpeerinfo()), 0)
if __name__ == '__main__':
P2PMempoolTests().main()
| mit |
ichuang/sympy | sympy/physics/quantum/tests/test_cg.py | 3 | 7510 | from __future__ import division
from sympy import S, sqrt, Sum, symbols
from sympy.physics.quantum.cg import Wigner3j, Wigner6j, Wigner9j, CG, cg_simp
from sympy.functions.special.tensor_functions import KroneckerDelta
def test_cg_simp_add():
j, m1, m1p, m2, m2p = symbols('j m1 m1p m2 m2p')
# Test Varshalovich 8.7.1 Eq 1
a = CG(S(1)/2,S(1)/2,0,0,S(1)/2,S(1)/2)
b = CG(S(1)/2,-S(1)/2,0,0,S(1)/2,-S(1)/2)
c = CG(1,1,0,0,1,1)
d = CG(1,0,0,0,1,0)
e = CG(1,-1,0,0,1,-1)
assert cg_simp(a+b) == 2
assert cg_simp(c+d+e) == 3
assert cg_simp(a+b+c+d+e) == 5
assert cg_simp(a+b+c) == 2+c
assert cg_simp(2*a+b) == 2+a
assert cg_simp(2*c+d+e) == 3+c
assert cg_simp(5*a+5*b) == 10
assert cg_simp(5*c+5*d+5*e) == 15
assert cg_simp(-a-b) == -2
assert cg_simp(-c-d-e) == -3
assert cg_simp(-6*a-6*b) == -12
assert cg_simp(-4*c-4*d-4*e) == -12
a = CG(S(1)/2,S(1)/2,j,0,S(1)/2,S(1)/2)
b = CG(S(1)/2,-S(1)/2,j,0,S(1)/2,-S(1)/2)
c = CG(1,1,j,0,1,1)
d = CG(1,0,j,0,1,0)
e = CG(1,-1,j,0,1,-1)
assert cg_simp(a+b) == 2*KroneckerDelta(j,0)
assert cg_simp(c+d+e) == 3*KroneckerDelta(j,0)
assert cg_simp(a+b+c+d+e) == 5*KroneckerDelta(j,0)
assert cg_simp(a+b+c) == 2*KroneckerDelta(j,0)+c
assert cg_simp(2*a+b) == 2*KroneckerDelta(j,0)+a
assert cg_simp(2*c+d+e) == 3*KroneckerDelta(j,0)+c
assert cg_simp(5*a+5*b) == 10*KroneckerDelta(j,0)
assert cg_simp(5*c+5*d+5*e) == 15*KroneckerDelta(j,0)
assert cg_simp(-a-b) == -2*KroneckerDelta(j,0)
assert cg_simp(-c-d-e) == -3*KroneckerDelta(j,0)
assert cg_simp(-6*a-6*b) == -12*KroneckerDelta(j,0)
assert cg_simp(-4*c-4*d-4*e) == -12*KroneckerDelta(j,0)
# Test Varshalovich 8.7.1 Eq 2
a = CG(S(1)/2,S(1)/2,S(1)/2,-S(1)/2,0,0)
b = CG(S(1)/2,-S(1)/2,S(1)/2,S(1)/2,0,0)
c = CG(1,1,1,-1,0,0)
d = CG(1,0,1,0,0,0)
e = CG(1,-1,1,1,0,0)
assert cg_simp(a-b) == sqrt(2)
assert cg_simp(c-d+e) == sqrt(3)
assert cg_simp(a-b+c-d+e) == sqrt(2)+sqrt(3)
assert cg_simp(a-b+c) == sqrt(2)+c
assert cg_simp(2*a-b) == sqrt(2)+a
assert cg_simp(2*c-d+e) == sqrt(3)+c
assert cg_simp(5*a-5*b) == 5*sqrt(2)
assert cg_simp(5*c-5*d+5*e) == 5*sqrt(3)
assert cg_simp(-a+b) == -sqrt(2)
assert cg_simp(-c+d-e) == -sqrt(3)
assert cg_simp(-6*a+6*b) == -6*sqrt(2)
assert cg_simp(-4*c+4*d-4*e) == -4*sqrt(3)
a = CG(S(1)/2,S(1)/2,S(1)/2,-S(1)/2,j,0)
b = CG(S(1)/2,-S(1)/2,S(1)/2,S(1)/2,j,0)
c = CG(1,1,1,-1,j,0)
d = CG(1,0,1,0,j,0)
e = CG(1,-1,1,1,j,0)
assert cg_simp(a-b) == sqrt(2)*KroneckerDelta(j,0)
assert cg_simp(c-d+e) == sqrt(3)*KroneckerDelta(j,0)
assert cg_simp(a-b+c-d+e) == sqrt(2)*KroneckerDelta(j,0)+sqrt(3)*KroneckerDelta(j,0)
assert cg_simp(a-b+c) == sqrt(2)*KroneckerDelta(j,0)+c
assert cg_simp(2*a-b) == sqrt(2)*KroneckerDelta(j,0)+a
assert cg_simp(2*c-d+e) == sqrt(3)*KroneckerDelta(j,0)+c
assert cg_simp(5*a-5*b) == 5*sqrt(2)*KroneckerDelta(j,0)
assert cg_simp(5*c-5*d+5*e) == 5*sqrt(3)*KroneckerDelta(j,0)
assert cg_simp(-a+b) == -sqrt(2)*KroneckerDelta(j,0)
assert cg_simp(-c+d-e) == -sqrt(3)*KroneckerDelta(j,0)
assert cg_simp(-6*a+6*b) == -6*sqrt(2)*KroneckerDelta(j,0)
assert cg_simp(-4*c+4*d-4*e) == -4*sqrt(3)*KroneckerDelta(j,0)
# Test Varshalovich 8.7.2 Eq 9
# alpha=alphap,beta=betap case
# numerical
a = CG(S(1)/2,S(1)/2,S(1)/2,-S(1)/2,1,0)**2
b = CG(S(1)/2,S(1)/2,S(1)/2,-S(1)/2,0,0)**2
c = CG(1,0,1,1,1,1)**2
d = CG(1,0,1,1,2,1)**2
assert cg_simp(a+b) == 1
assert cg_simp(c+d) == 1
assert cg_simp(a+b+c+d) == 2
assert cg_simp(4*a+4*b) == 4
assert cg_simp(4*c+4*d) == 4
assert cg_simp(5*a+3*b) == 3+2*a
assert cg_simp(5*c+3*d) == 3+2*c
assert cg_simp(-a-b) == -1
assert cg_simp(-c-d) == -1
# symbolic
a = CG(S(1)/2,m1,S(1)/2,m2,1,1)**2
b = CG(S(1)/2,m1,S(1)/2,m2,1,0)**2
c = CG(S(1)/2,m1,S(1)/2,m2,1,-1)**2
d = CG(S(1)/2,m1,S(1)/2,m2,0,0)**2
assert cg_simp(a+b+c+d) == 1
assert cg_simp(4*a+4*b+4*c+4*d) == 4
assert cg_simp(3*a+5*b+3*c+4*d) == 3+2*b+d
assert cg_simp(-a-b-c-d) == -1
a = CG(1,m1,1,m2,2,2)**2
b = CG(1,m1,1,m2,2,1)**2
c = CG(1,m1,1,m2,2,0)**2
d = CG(1,m1,1,m2,2,-1)**2
e = CG(1,m1,1,m2,2,-2)**2
f = CG(1,m1,1,m2,1,1)**2
g = CG(1,m1,1,m2,1,0)**2
h = CG(1,m1,1,m2,1,-1)**2
i = CG(1,m1,1,m2,0,0)**2
assert cg_simp(a+b+c+d+e+f+g+h+i) == 1
assert cg_simp(4*(a+b+c+d+e+f+g+h+i)) == 4
assert cg_simp(a+b+2*c+d+4*e+f+g+h+i) == 1+c+3*e
assert cg_simp(-a-b-c-d-e-f-g-h-i) == -1
# alpha!=alphap or beta!=betap case
# numerical
a = CG(S(1)/2,S(1)/2,S(1)/2,-S(1)/2,1,0)*CG(S(1)/2,-S(1)/2,S(1)/2,S(1)/2,1,0)
b = CG(S(1)/2,S(1)/2,S(1)/2,-S(1)/2,0,0)*CG(S(1)/2,-S(1)/2,S(1)/2,S(1)/2,0,0)
c = CG(1,1,1,0,2,1)*CG(1,0,1,1,2,1)
d = CG(1,1,1,0,1,1)*CG(1,0,1,1,1,1)
assert cg_simp(a+b) == 0
assert cg_simp(c+d) == 0
# symbolic
a = CG(S(1)/2,m1,S(1)/2,m2,1,1)*CG(S(1)/2,m1p,S(1)/2,m2p,1,1)
b = CG(S(1)/2,m1,S(1)/2,m2,1,0)*CG(S(1)/2,m1p,S(1)/2,m2p,1,0)
c = CG(S(1)/2,m1,S(1)/2,m2,1,-1)*CG(S(1)/2,m1p,S(1)/2,m2p,1,-1)
d = CG(S(1)/2,m1,S(1)/2,m2,0,0)*CG(S(1)/2,m1p,S(1)/2,m2p,0,0)
assert cg_simp(a+b+c+d) == KroneckerDelta(m1,m1p)*KroneckerDelta(m2,m2p)
a = CG(1,m1,1,m2,2,2)*CG(1,m1p,1,m2p,2,2)
b = CG(1,m1,1,m2,2,1)*CG(1,m1p,1,m2p,2,1)
c = CG(1,m1,1,m2,2,0)*CG(1,m1p,1,m2p,2,0)
d = CG(1,m1,1,m2,2,-1)*CG(1,m1p,1,m2p,2,-1)
e = CG(1,m1,1,m2,2,-2)*CG(1,m1p,1,m2p,2,-2)
f = CG(1,m1,1,m2,1,1)*CG(1,m1p,1,m2p,1,1)
g = CG(1,m1,1,m2,1,0)*CG(1,m1p,1,m2p,1,0)
h = CG(1,m1,1,m2,1,-1)*CG(1,m1p,1,m2p,1,-1)
i = CG(1,m1,1,m2,0,0)*CG(1,m1p,1,m2p,0,0)
assert cg_simp(a+b+c+d+e+f+g+h+i) == KroneckerDelta(m1,m1p)*KroneckerDelta(m2,m2p)
def test_cg_simp_sum():
x, a, b, c, cp, alpha, beta, gamma, gammap = symbols('x a b c cp alpha beta gamma gammap')
# Varshalovich 8.7.1 Eq 1
assert cg_simp(x * Sum(CG(a,alpha,b,0,a,alpha), (alpha,-a,a))) == x*(2*a+1)*KroneckerDelta(b,0)
assert cg_simp(x * Sum(CG(a,alpha,b,0,a,alpha), (alpha,-a,a))+CG(1,0,1,0,1,0)) == x*(2*a+1)*KroneckerDelta(b,0)+CG(1,0,1,0,1,0)
assert cg_simp(2 * Sum(CG(1,alpha,0,0,1,alpha), (alpha,-1,1))) == 6
# Varshalovich 8.7.1 Eq 2
assert cg_simp(x*Sum((-1)**(a-alpha) * CG(a,alpha,a,-alpha,c,0), (alpha,-a,a))) == x*sqrt(2*a+1)*KroneckerDelta(c,0)
assert cg_simp(3*Sum((-1)**(2-alpha) * CG(2,alpha,2,-alpha,0,0), (alpha,-2,2))) == 3*sqrt(5)
# Varshalovich 8.7.2 Eq 4
assert cg_simp(Sum(CG(a,alpha,b,beta,c,gamma)*CG(a,alpha,b,beta,cp,gammap),(alpha,-a,a),(beta,-b,b))) == KroneckerDelta(c,cp)*KroneckerDelta(gamma,gammap)
assert cg_simp(Sum(CG(a,alpha,b,beta,c,gamma)*CG(a,alpha,b,beta,c,gammap),(alpha,-a,a),(beta,-b,b))) == KroneckerDelta(gamma,gammap)
assert cg_simp(Sum(CG(a,alpha,b,beta,c,gamma)*CG(a,alpha,b,beta,cp,gamma),(alpha,-a,a),(beta,-b,b))) == KroneckerDelta(c,cp)
assert cg_simp(Sum(CG(a,alpha,b,beta,c,gamma)**2,(alpha,-a,a),(beta,-b,b))) == 1
assert cg_simp(Sum(CG(2,alpha,1,beta,2,gamma)*CG(2,alpha,1,beta,2,gammap), (alpha,-2,2), (beta,-1,1))) == KroneckerDelta(gamma,gammap)
def test_doit():
assert Wigner3j(1/2,-1/2,1/2,1/2,0,0).doit() == -sqrt(2)/2
assert Wigner6j(1,2,3,2,1,2).doit() == sqrt(21)/105
assert Wigner9j(2,1,1,S(3)/2,S(1)/2,1,S(1)/2,S(1)/2,0).doit() == sqrt(2)/12
assert CG(1/2,1/2,1/2,-1/2,1,0).doit() == sqrt(2)/2
| bsd-3-clause |
qlands/onadata | onadata/apps/logger/management/commands/import_tools.py | 13 | 1695 | #!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4 coding=utf-8
import glob
import os
from django.conf import settings
from django.contrib.auth.models import User
from django.core.management.base import BaseCommand, CommandError
from django.utils.translation import ugettext as _, ugettext_lazy
from onadata.libs.logger.import_tools import import_instances_from_zip
from onadata.libs.logger.models import Instance
IMAGES_DIR = os.path.join(settings.MEDIA_ROOT, "attachments")
class Command(BaseCommand):
help = ugettext_lazy("Import ODK forms and instances.")
def handle(self, *args, **kwargs):
if args.__len__() < 2:
raise CommandError(_(u"path(xform instances) username"))
path = args[0]
username = args[1]
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
raise CommandError(_(u"Invalid username %s") % username)
debug = False
if debug:
print (_(u"[Importing XForm Instances from %(path)s]\n")
% {'path': path})
im_count = len(glob.glob(os.path.join(IMAGES_DIR, '*')))
print _(u"Before Parse:")
print _(u" --> Images: %(nb)d") % {'nb': im_count}
print (_(u" --> Instances: %(nb)d")
% {'nb': Instance.objects.count()})
import_instances_from_zip(path, user)
if debug:
im_count2 = len(glob.glob(os.path.join(IMAGES_DIR, '*')))
print _(u"After Parse:")
print _(u" --> Images: %(nb)d") % {'nb': im_count2}
print (_(u" --> Instances: %(nb)d")
% {'nb': Instance.objects.count()})
| bsd-2-clause |
DailyActie/Surrogate-Model | 01-codes/scikit-learn-master/examples/cluster/plot_cluster_comparison.py | 1 | 4683 | """
=========================================================
Comparing different clustering algorithms on toy datasets
=========================================================
This example aims at showing characteristics of different
clustering algorithms on datasets that are "interesting"
but still in 2D. The last dataset is an example of a 'null'
situation for clustering: the data is homogeneous, and
there is no good clustering.
While these examples give some intuition about the algorithms,
this intuition might not apply to very high dimensional data.
The results could be improved by tweaking the parameters for
each clustering strategy, for instance setting the number of
clusters for the methods that needs this parameter
specified. Note that affinity propagation has a tendency to
create many clusters. Thus in this example its two parameters
(damping and per-point preference) were set to to mitigate this
behavior.
"""
print(__doc__)
import time
import matplotlib.pyplot as plt
import numpy as np
from sklearn import cluster, datasets
from sklearn.neighbors import kneighbors_graph
from sklearn.preprocessing import StandardScaler
np.random.seed(0)
# Generate datasets. We choose the size big enough to see the scalability
# of the algorithms, but not too big to avoid too long running times
n_samples = 1500
noisy_circles = datasets.make_circles(n_samples=n_samples, factor=.5,
noise=.05)
noisy_moons = datasets.make_moons(n_samples=n_samples, noise=.05)
blobs = datasets.make_blobs(n_samples=n_samples, random_state=8)
no_structure = np.random.rand(n_samples, 2), None
colors = np.array([x for x in 'bgrcmykbgrcmykbgrcmykbgrcmyk'])
colors = np.hstack([colors] * 20)
clustering_names = [
'MiniBatchKMeans', 'AffinityPropagation', 'MeanShift',
'SpectralClustering', 'Ward', 'AgglomerativeClustering',
'DBSCAN', 'Birch']
plt.figure(figsize=(len(clustering_names) * 2 + 3, 9.5))
plt.subplots_adjust(left=.02, right=.98, bottom=.001, top=.96, wspace=.05,
hspace=.01)
plot_num = 1
datasets = [noisy_circles, noisy_moons, blobs, no_structure]
for i_dataset, dataset in enumerate(datasets):
X, y = dataset
# normalize dataset for easier parameter selection
X = StandardScaler().fit_transform(X)
# estimate bandwidth for mean shift
bandwidth = cluster.estimate_bandwidth(X, quantile=0.3)
# connectivity matrix for structured Ward
connectivity = kneighbors_graph(X, n_neighbors=10, include_self=False)
# make connectivity symmetric
connectivity = 0.5 * (connectivity + connectivity.T)
# create clustering estimators
ms = cluster.MeanShift(bandwidth=bandwidth, bin_seeding=True)
two_means = cluster.MiniBatchKMeans(n_clusters=2)
ward = cluster.AgglomerativeClustering(n_clusters=2, linkage='ward',
connectivity=connectivity)
spectral = cluster.SpectralClustering(n_clusters=2,
eigen_solver='arpack',
affinity="nearest_neighbors")
dbscan = cluster.DBSCAN(eps=.2)
affinity_propagation = cluster.AffinityPropagation(damping=.9,
preference=-200)
average_linkage = cluster.AgglomerativeClustering(
linkage="average", affinity="cityblock", n_clusters=2,
connectivity=connectivity)
birch = cluster.Birch(n_clusters=2)
clustering_algorithms = [
two_means, affinity_propagation, ms, spectral, ward, average_linkage,
dbscan, birch]
for name, algorithm in zip(clustering_names, clustering_algorithms):
# predict cluster memberships
t0 = time.time()
algorithm.fit(X)
t1 = time.time()
if hasattr(algorithm, 'labels_'):
y_pred = algorithm.labels_.astype(np.int)
else:
y_pred = algorithm.predict(X)
# plot
plt.subplot(4, len(clustering_algorithms), plot_num)
if i_dataset == 0:
plt.title(name, size=18)
plt.scatter(X[:, 0], X[:, 1], color=colors[y_pred].tolist(), s=10)
if hasattr(algorithm, 'cluster_centers_'):
centers = algorithm.cluster_centers_
center_colors = colors[:len(centers)]
plt.scatter(centers[:, 0], centers[:, 1], s=100, c=center_colors)
plt.xlim(-2, 2)
plt.ylim(-2, 2)
plt.xticks(())
plt.yticks(())
plt.text(.99, .01, ('%.2fs' % (t1 - t0)).lstrip('0'),
transform=plt.gca().transAxes, size=15,
horizontalalignment='right')
plot_num += 1
plt.show()
| mit |
imphil/fusesoc | tests/edalize/test_icarus.py | 1 | 1810 | import pytest
def test_icarus():
import os
import shutil
from edalize_common import compare_files, setup_backend, tests_dir
ref_dir = os.path.join(tests_dir, __name__)
paramtypes = ['plusarg', 'vlogdefine', 'vlogparam']
name = 'test_icarus_0'
tool = 'icarus'
tool_options = {
'iverilog_options' : ['some', 'iverilog_options'],
'timescale' : '1ns/1ns',
}
(backend, args, work_root) = setup_backend(paramtypes, name, tool, tool_options, use_vpi=True)
backend.configure(args)
compare_files(ref_dir, work_root, ['Makefile',
name+'.scr',
'timescale.v',
])
backend.build()
compare_files(ref_dir, work_root, ['iverilog.cmd'])
compare_files(ref_dir, work_root, ['iverilog-vpi.cmd'])
backend.run(args)
compare_files(ref_dir, work_root, ['vvp.cmd'])
def test_icarus_minimal():
import os
import shutil
import tempfile
from edalize import get_edatool
from edalize_common import compare_files, tests_dir
ref_dir = os.path.join(tests_dir, __name__, 'minimal')
os.environ['PATH'] = os.path.join(tests_dir, 'mock_commands')+':'+os.environ['PATH']
tool = 'icarus'
name = 'test_'+tool+'_minimal_0'
work_root = tempfile.mkdtemp(prefix=tool+'_')
eda_api = {'name' : name,
'toplevel' : 'top'}
backend = get_edatool(tool)(eda_api=eda_api, work_root=work_root)
backend.configure([])
compare_files(ref_dir, work_root, ['Makefile',
name+'.scr',
])
backend.build()
compare_files(ref_dir, work_root, ['iverilog.cmd'])
backend.run([])
compare_files(ref_dir, work_root, ['vvp.cmd'])
| gpl-3.0 |
alash3al/rethinkdb | test/interface/precise_stats.py | 10 | 8915 | #!/usr/bin/env python
# Copyright 2010-2012 RethinkDB, all rights reserved.
# This file tests the `rethinkdb.stats` admin table.
# Here, we run very particular queries and verify that the 'total' stats are exactly
# correct. This includes point reads/writes, range reads/replaces, backfills, and
# sindex construction.
from __future__ import print_function
import sys, os, time, re, multiprocessing, random, pprint
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir, 'common')))
import driver, scenario_common, utils, vcoptparse, workload_runner
r = utils.import_python_driver()
db_name = 'test'
table_name = 'foo'
server_names = ['grey', 'face']
op = vcoptparse.OptParser()
scenario_common.prepare_option_parser_mode_flags(op)
opts = op.parse(sys.argv)
with driver.Metacluster() as metacluster:
cluster = driver.Cluster(metacluster)
_, command_prefix, serve_options = scenario_common.parse_mode_flags(opts)
print('Spinning up %d processes...' % len(server_names))
servers = [ ]
for i in xrange(len(server_names)):
info = { 'name': server_names[i] }
info['files'] = driver.Files(metacluster, db_path='db-%d' % i,
console_output='create-output-%d' % i,
server_name=info['name'], command_prefix=command_prefix)
info['process'] = driver.Process(cluster, info['files'],
console_output='serve-output-%d' % i,
command_prefix=command_prefix, extra_options=serve_options)
servers.append(info)
for server in servers:
server['process'].wait_until_started_up()
r.connect(servers[0]['process'].host, servers[0]['process'].driver_port).repl()
r.db_create(db_name).run()
r.db(db_name).table_create(table_name).run()
tbl = r.db(db_name).table(table_name)
table_id = tbl.config()['id'].run()
for server in servers:
server['id'] = r.db('rethinkdb').table('server_config') \
.filter(r.row['name'].eq(server['name']))[0]['id'].run()
def get_stats(server):
return r.db('rethinkdb').table('stats').get(['table_server', table_id, server['id']]) \
.do({'reads': r.row['query_engine']['read_docs_total'],
'writes': r.row['query_engine']['written_docs_total']}).run()
def check_stats_internal(query, reads, writes):
stats = get_stats(servers[0])
delta_reads = stats['reads'] - check_stats_internal.last_reads
delta_writes = stats['writes'] - check_stats_internal.last_writes
if delta_reads != reads:
print('Error in query %s: Expected %d reads but got %d' %
(r.errors.QueryPrinter(query).print_query(), reads, delta_reads))
if delta_writes != writes:
print('Error in query %s: Expected %d writes but got %d' %
(r.errors.QueryPrinter(query).print_query(), writes, delta_writes))
check_stats_internal.last_reads = stats['reads']
check_stats_internal.last_writes = stats['writes']
check_stats_internal.last_reads = 0
check_stats_internal.last_writes = 0
def check_error_stats(query, reads, writes):
try:
query.run()
print("Failed to error in query (%s)" % r.errors.QueryPrinter(query).print_query())
except r.RqlError as e:
pass
check_stats_internal(query, reads, writes)
def check_query_stats(query, reads, writes):
query.run()
check_stats_internal(query, reads, writes)
def primary_shard(server):
return { 'primary_replica': server['name'], 'replicas': [ server['name'] ] }
# Pin the table to one server
tbl.config().update({'shards': [ primary_shard(servers[0]) ]}).run()
tbl.wait().run()
# Create a secondary index that will be used in some of the inserts
check_query_stats(tbl.index_create('double', r.row['id']), reads=0, writes=0)
check_query_stats(tbl.index_create('value'), reads=0, writes=0)
tbl.index_wait().run()
# batch of writes
check_query_stats(tbl.insert(r.range(100).map(lambda x: {'id': x})), reads=0, writes=100)
# point operations
check_query_stats(tbl.insert({'id': 100}), reads=0, writes=1)
check_query_stats(tbl.get(100).delete(), reads=0, writes=1)
check_query_stats(tbl.get(50), reads=1, writes=0)
check_query_stats(tbl.get(101).replace({'id': 101}), reads=0, writes=1)
check_query_stats(tbl.get(101).delete(), reads=0, writes=1)
# point writes with changed rows
check_query_stats(tbl.get(50).update({'value': 'dummy'}), reads=0, writes=1)
check_query_stats(tbl.get(50).replace({'id': 50}), reads=0, writes=1)
check_query_stats(tbl.get(50).update(lambda x: {'value': 'dummy'}), reads=0, writes=1)
check_query_stats(tbl.get(50).replace(lambda x: {'id': 50}), reads=0, writes=1)
# point writes with unchanged rows
check_query_stats(tbl.get(50).update({}), reads=0, writes=1)
check_query_stats(tbl.get(50).update(lambda x: {}), reads=0, writes=1)
check_query_stats(tbl.get(50).replace({'id': 50}), reads=0, writes=1)
check_query_stats(tbl.get(50).replace(lambda x: x), reads=0, writes=1)
# range operations
check_query_stats(tbl.between(20, 40), reads=20, writes=0)
# range writes with some changed and some unchanged rows
check_query_stats(tbl.between(20, 40).update(r.branch(r.row['id'].mod(2).eq(1), {}, {'value': 'dummy'})),
reads=20, writes=20)
check_query_stats(tbl.between(20, 40).replace({'id': r.row['id']}), reads=20, writes=20)
# range writes with unchanged rows
check_query_stats(tbl.between(20, 40).update({}), reads=20, writes=20)
check_query_stats(tbl.between(20, 40).replace(lambda x: x), reads=20, writes=20)
# Operations with existence errors should still show up
check_query_stats(tbl.get(101), reads=1, writes=0)
check_query_stats(tbl.get(101).delete(), reads=0, writes=1)
check_query_stats(tbl.get(101).update({}), reads=0, writes=1)
check_query_stats(tbl.get(101).update(lambda x: {}), reads=0, writes=1)
check_query_stats(tbl.get(101).replace(lambda x: x), reads=0, writes=1)
check_query_stats(tbl.insert({'id': 0}), reads=0, writes=1)
# Add a sindex, make sure that all rows are represented
check_query_stats(r.expr([tbl.index_create('fake', r.row['id']), tbl.index_wait()]), reads=100, writes=100)
check_query_stats(tbl.count(), reads=100, writes=0)
check_query_stats(tbl.between(20, 40).count(), reads=20, writes=0)
check_query_stats(tbl.between(20, 40).update(r.branch(r.row['id'].mod(2).eq(1), {}, {'value': 'dummy'})),
reads=20, writes=20)
# Count on a sindex
check_query_stats(tbl.between(r.minval, r.maxval, index='double').count(), reads=100, writes=0)
check_query_stats(tbl.between(r.minval, r.maxval, index='value').count(), reads=10, writes=0)
# Can't test dropping the index because the stats will disappear once it's gone
backfiller_stats_before = get_stats(servers[0])
backfillee_stats_before = get_stats(servers[1])
# Shard the table into two shards (1 primary replica on each server)
tbl.config().update({'shards': [ primary_shard(servers[0]),
primary_shard(servers[1]) ]}).run()
tbl.wait().run()
# Manually check stats here as the number of reads/writes will be unpredictable
backfiller_stats_after = get_stats(servers[0])
backfillee_stats_after = get_stats(servers[1])
backfiller_reads = backfiller_stats_after['reads'] - backfiller_stats_before['reads']
backfiller_writes = backfiller_stats_after['writes'] - backfiller_stats_before['writes']
backfillee_reads = backfillee_stats_after['reads'] - backfillee_stats_before['reads']
backfillee_writes = backfillee_stats_after['writes'] - backfillee_stats_before['writes']
if backfiller_reads > 60 or backfiller_reads < 40: # Backfiller should transfer roughly half the rows
print("During backfill, on backfiller: Expected between 40 and 60 reads but got %d" % backfiller_reads)
if backfillee_reads != 0: # Backfillee didn't have any rows to read - we only want writes
print("During backfill, on backfillee: Expected 0 reads but got %d" % backfillee_reads)
if backfiller_writes != backfiller_reads: # Backfiller should have deleted the rows it transfered
print("During backfill, on backfiller: Expected %d writes but got %d" % (backfiller_reads, backfiller_writes))
if backfillee_writes != backfiller_reads: # Backfillee should have written the same number of rows transferred
print("During backfill, on backfillee: Expected %d writes but got %d" % (backfiller_reads, backfillee_writes))
cluster.check_and_stop()
print('Done.')
| agpl-3.0 |
thelabnyc/django-oscar-wfrs | src/wellsfargo/migrations/0035_auto_20200326_1412.py | 1 | 13104 | # Generated by Django 2.2.11 on 2020-03-26 18:12
from django.db import migrations
from django.db.models.functions import Concat, Substr
from django.db.models import F, Value, CharField
def trunc(NewCls, field_name, value):
if type(value) is str:
max_length = NewCls._meta.get_field(field_name).max_length
return value[:max_length]
return value
def migrate_credit_app_data(apps, schema_editor):
ContentType = apps.get_model("contenttypes", "ContentType")
AccountInquiryResult = apps.get_model("wellsfargo", "AccountInquiryResult")
# Old Cedit Apps
USCreditApp = apps.get_model("wellsfargo", "USCreditApp")
USJointCreditApp = apps.get_model("wellsfargo", "USJointCreditApp")
uscreditapp_type = ContentType.objects.get_for_model(USCreditApp)
usjointcreditapp_type = ContentType.objects.get_for_model(USJointCreditApp)
# New unified Credit App model
CreditApplicationAddress = apps.get_model("wellsfargo", "CreditApplicationAddress")
CreditApplicationApplicant = apps.get_model(
"wellsfargo", "CreditApplicationApplicant"
)
CreditApplication = apps.get_model("wellsfargo", "CreditApplication")
# Migrate USCreditApp
for old_app in USCreditApp.objects.select_related(
"credentials", "user", "submitting_user"
).all():
main_applicant_addr = CreditApplicationAddress.objects.create(
address_line_1=trunc(
CreditApplicationAddress, "address_line_1", old_app.main_address_line1
),
address_line_2=trunc(
CreditApplicationAddress, "address_line_2", old_app.main_address_line2
),
city=trunc(CreditApplicationAddress, "city", old_app.main_address_city),
state_code=trunc(
CreditApplicationAddress, "state_code", old_app.main_address_state
),
postal_code=trunc(
CreditApplicationAddress, "postal_code", old_app.main_address_postcode
),
)
main_applicant = CreditApplicationApplicant.objects.create(
first_name=trunc(
CreditApplicationApplicant, "first_name", old_app.main_first_name
),
last_name=trunc(
CreditApplicationApplicant, "last_name", old_app.main_last_name
),
middle_initial=trunc(
CreditApplicationApplicant,
"middle_initial",
old_app.main_middle_initial,
),
date_of_birth=trunc(
CreditApplicationApplicant, "date_of_birth", old_app.main_date_of_birth
),
ssn=trunc(CreditApplicationApplicant, "ssn", old_app.main_ssn),
annual_income=trunc(
CreditApplicationApplicant, "annual_income", old_app.main_annual_income
),
email_address=trunc(
CreditApplicationApplicant, "email_address", old_app.email
),
home_phone=trunc(
CreditApplicationApplicant, "home_phone", old_app.main_home_phone
),
mobile_phone=trunc(
CreditApplicationApplicant, "mobile_phone", old_app.main_cell_phone
),
work_phone=trunc(
CreditApplicationApplicant, "work_phone", old_app.main_employer_phone
),
employer_name=trunc(
CreditApplicationApplicant, "employer_name", old_app.main_employer_name
),
housing_status=trunc(
CreditApplicationApplicant,
"housing_status",
old_app.main_housing_status,
),
address=trunc(CreditApplicationApplicant, "address", main_applicant_addr),
)
new_app = CreditApplication.objects.create(
requested_credit_limit=trunc(
CreditApplication, "requested_credit_limit", old_app.purchase_price
),
language_preference=trunc(
CreditApplication, "language_preference", old_app.language
),
salesperson=trunc(
CreditApplication, "salesperson", old_app.new_sales_person
),
main_applicant=trunc(CreditApplication, "main_applicant", main_applicant),
joint_applicant=trunc(CreditApplication, "joint_applicant", None),
status=trunc(CreditApplication, "status", old_app.status),
credentials=trunc(CreditApplication, "credentials", old_app.credentials),
application_source=trunc(
CreditApplication, "application_source", old_app.application_source
),
user=trunc(CreditApplication, "user", old_app.user),
ip_address=trunc(CreditApplication, "ip_address", old_app.ip_address),
submitting_user=trunc(
CreditApplication, "submitting_user", old_app.submitting_user
),
encrypted_account_number=trunc(
CreditApplication, "encrypted_account_number", None
),
last4_account_number=trunc(
CreditApplication, "last4_account_number", old_app.last4_account_number
),
created_datetime=trunc(
CreditApplication, "created_datetime", old_app.created_datetime
),
modified_datetime=trunc(
CreditApplication, "modified_datetime", old_app.modified_datetime
),
)
AccountInquiryResult.objects.filter(
credit_app_type__pk=uscreditapp_type.id, credit_app_id=old_app.pk
).update(new_credit_app=new_app)
# Migrate USJointCreditApp
for old_app in USJointCreditApp.objects.select_related(
"credentials", "user", "submitting_user"
).all():
main_applicant_addr = CreditApplicationAddress.objects.create(
address_line_1=trunc(
CreditApplicationAddress, "address_line_1", old_app.main_address_line1
),
address_line_2=trunc(
CreditApplicationAddress, "address_line_2", old_app.main_address_line2
),
city=trunc(CreditApplicationAddress, "city", old_app.main_address_city),
state_code=trunc(
CreditApplicationAddress, "state_code", old_app.main_address_state
),
postal_code=trunc(
CreditApplicationAddress, "postal_code", old_app.main_address_postcode
),
)
main_applicant = CreditApplicationApplicant.objects.create(
first_name=trunc(
CreditApplicationApplicant, "first_name", old_app.main_first_name
),
last_name=trunc(
CreditApplicationApplicant, "last_name", old_app.main_last_name
),
middle_initial=trunc(
CreditApplicationApplicant,
"middle_initial",
old_app.main_middle_initial,
),
date_of_birth=trunc(
CreditApplicationApplicant, "date_of_birth", old_app.main_date_of_birth
),
ssn=trunc(CreditApplicationApplicant, "ssn", old_app.main_ssn),
annual_income=trunc(
CreditApplicationApplicant, "annual_income", old_app.main_annual_income
),
email_address=trunc(
CreditApplicationApplicant, "email_address", old_app.email
),
home_phone=trunc(
CreditApplicationApplicant, "home_phone", old_app.main_home_phone
),
mobile_phone=trunc(
CreditApplicationApplicant, "mobile_phone", old_app.main_cell_phone
),
work_phone=trunc(
CreditApplicationApplicant, "work_phone", old_app.main_employer_phone
),
employer_name=trunc(
CreditApplicationApplicant, "employer_name", old_app.main_employer_name
),
housing_status=trunc(
CreditApplicationApplicant,
"housing_status",
old_app.main_housing_status,
),
address=trunc(CreditApplicationApplicant, "address", main_applicant_addr),
)
joint_applicant_addr = CreditApplicationAddress.objects.create(
address_line_1=trunc(
CreditApplicationAddress, "address_line_1", old_app.joint_address_line1
),
address_line_2=trunc(
CreditApplicationAddress, "address_line_2", old_app.joint_address_line2
),
city=trunc(CreditApplicationAddress, "city", old_app.joint_address_city),
state_code=trunc(
CreditApplicationAddress, "state_code", old_app.joint_address_state
),
postal_code=trunc(
CreditApplicationAddress, "postal_code", old_app.joint_address_postcode
),
)
joint_applicant = CreditApplicationApplicant.objects.create(
first_name=trunc(
CreditApplicationApplicant, "first_name", old_app.joint_first_name
),
last_name=trunc(
CreditApplicationApplicant, "last_name", old_app.joint_last_name
),
middle_initial=trunc(
CreditApplicationApplicant,
"middle_initial",
old_app.joint_middle_initial,
),
date_of_birth=trunc(
CreditApplicationApplicant, "date_of_birth", old_app.joint_date_of_birth
),
ssn=trunc(CreditApplicationApplicant, "ssn", old_app.joint_ssn),
annual_income=trunc(
CreditApplicationApplicant, "annual_income", old_app.joint_annual_income
),
email_address=trunc(CreditApplicationApplicant, "email_address", None),
home_phone="",
mobile_phone=trunc(
CreditApplicationApplicant, "mobile_phone", old_app.joint_cell_phone
),
work_phone=trunc(
CreditApplicationApplicant, "work_phone", old_app.joint_employer_phone
),
employer_name=trunc(
CreditApplicationApplicant, "employer_name", old_app.joint_employer_name
),
housing_status=trunc(CreditApplicationApplicant, "housing_status", None),
address=trunc(CreditApplicationApplicant, "address", joint_applicant_addr),
)
new_app = CreditApplication.objects.create(
requested_credit_limit=trunc(
CreditApplication, "requested_credit_limit", old_app.purchase_price
),
language_preference=trunc(
CreditApplication, "language_preference", old_app.language
),
salesperson=trunc(
CreditApplication, "salesperson", old_app.new_sales_person
),
main_applicant=trunc(CreditApplication, "main_applicant", main_applicant),
joint_applicant=trunc(
CreditApplication, "joint_applicant", joint_applicant
),
status=trunc(CreditApplication, "status", old_app.status),
credentials=trunc(CreditApplication, "credentials", old_app.credentials),
application_source=trunc(
CreditApplication, "application_source", old_app.application_source
),
user=trunc(CreditApplication, "user", old_app.user),
ip_address=trunc(CreditApplication, "ip_address", old_app.ip_address),
submitting_user=trunc(
CreditApplication, "submitting_user", old_app.submitting_user
),
encrypted_account_number=trunc(
CreditApplication, "encrypted_account_number", None
),
last4_account_number=trunc(
CreditApplication, "last4_account_number", old_app.last4_account_number
),
created_datetime=trunc(
CreditApplication, "created_datetime", old_app.created_datetime
),
modified_datetime=trunc(
CreditApplication, "modified_datetime", old_app.modified_datetime
),
)
AccountInquiryResult.objects.filter(
credit_app_type__pk=usjointcreditapp_type.id, credit_app_id=old_app.pk
).update(new_credit_app=new_app)
def migrate_account_inquiries(apps, schema_editor):
AccountInquiryResult = apps.get_model("wellsfargo", "AccountInquiryResult")
AccountInquiryResult.objects.all().update(
main_applicant_full_name=Substr(
Concat(
F("last_name"), Value(", "), F("first_name"), output_field=CharField()
),
1,
50,
),
)
class Migration(migrations.Migration):
dependencies = [
("wellsfargo", "0034_auto_20200326_1411"),
]
operations = [
migrations.RunPython(migrate_account_inquiries),
migrations.RunPython(migrate_credit_app_data),
]
| isc |
airbnb/kafka | tests/kafkatest/directory_layout/kafka_path.py | 10 | 5383 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
import os
from kafkatest.version import get_version, KafkaVersion, DEV_BRANCH
"""This module serves a few purposes:
First, it gathers information about path layout in a single place, and second, it
makes the layout of the Kafka installation pluggable, so that users are not forced
to use the layout assumed in the KafkaPathResolver class.
To run system tests using your own path resolver, use for example:
ducktape <TEST_PATH> --globals '{"kafka-path-resolver": "my.path.resolver.CustomResolverClass"}'
"""
SCRATCH_ROOT = "/mnt"
KAFKA_INSTALL_ROOT = "/opt"
KAFKA_PATH_RESOLVER_KEY = "kafka-path-resolver"
KAFKA_PATH_RESOLVER = "kafkatest.directory_layout.kafka_path.KafkaSystemTestPathResolver"
# Variables for jar path resolution
CORE_JAR_NAME = "core"
CORE_LIBS_JAR_NAME = "core-libs"
CORE_DEPENDANT_TEST_LIBS_JAR_NAME = "core-dependant-testlibs"
TOOLS_JAR_NAME = "tools"
TOOLS_DEPENDANT_TEST_LIBS_JAR_NAME = "tools-dependant-libs"
JARS = {
"dev": {
CORE_JAR_NAME: "core/build/*/*.jar",
CORE_LIBS_JAR_NAME: "core/build/libs/*.jar",
CORE_DEPENDANT_TEST_LIBS_JAR_NAME: "core/build/dependant-testlibs/*.jar",
TOOLS_JAR_NAME: "tools/build/libs/kafka-tools*.jar",
TOOLS_DEPENDANT_TEST_LIBS_JAR_NAME: "tools/build/dependant-libs*/*.jar"
}
}
def create_path_resolver(context, project="kafka"):
"""Factory for generating a path resolver class
This will first check for a fully qualified path resolver classname in context.globals.
If present, construct a new instance, else default to KafkaSystemTestPathResolver
"""
assert project is not None
if KAFKA_PATH_RESOLVER_KEY in context.globals:
resolver_fully_qualified_classname = context.globals[KAFKA_PATH_RESOLVER_KEY]
else:
resolver_fully_qualified_classname = KAFKA_PATH_RESOLVER
# Using the fully qualified classname, import the resolver class
(module_name, resolver_class_name) = resolver_fully_qualified_classname.rsplit('.', 1)
cluster_mod = importlib.import_module(module_name)
path_resolver_class = getattr(cluster_mod, resolver_class_name)
path_resolver = path_resolver_class(context, project)
return path_resolver
class KafkaPathResolverMixin(object):
"""Mixin to automatically provide pluggable path resolution functionality to any class using it.
Keep life simple, and don't add a constructor to this class:
Since use of a mixin entails multiple inheritence, it is *much* simpler to reason about the interaction of this
class with subclasses if we don't have to worry about method resolution order, constructor signatures etc.
"""
@property
def path(self):
if not hasattr(self, "_path"):
setattr(self, "_path", create_path_resolver(self.context, "kafka"))
if hasattr(self.context, "logger") and self.context.logger is not None:
self.context.logger.debug("Using path resolver %s" % self._path.__class__.__name__)
return self._path
class KafkaSystemTestPathResolver(object):
"""Path resolver for Kafka system tests which assumes the following layout:
/opt/kafka-dev # Current version of kafka under test
/opt/kafka-0.9.0.1 # Example of an older version of kafka installed from tarball
/opt/kafka-<version> # Other previous versions of kafka
...
"""
def __init__(self, context, project="kafka"):
self.context = context
self.project = project
def home(self, node_or_version=DEV_BRANCH):
version = self._version(node_or_version)
home_dir = self.project
if version is not None:
home_dir += "-%s" % str(version)
return os.path.join(KAFKA_INSTALL_ROOT, home_dir)
def bin(self, node_or_version=DEV_BRANCH):
version = self._version(node_or_version)
return os.path.join(self.home(version), "bin")
def script(self, script_name, node_or_version=DEV_BRANCH):
version = self._version(node_or_version)
return os.path.join(self.bin(version), script_name)
def jar(self, jar_name, node_or_version=DEV_BRANCH):
version = self._version(node_or_version)
return os.path.join(self.home(version), JARS[str(version)][jar_name])
def scratch_space(self, service_instance):
return os.path.join(SCRATCH_ROOT, service_instance.service_id)
def _version(self, node_or_version):
if isinstance(node_or_version, KafkaVersion):
return node_or_version
else:
return get_version(node_or_version)
| apache-2.0 |
OpenERPJeff/gooderp_addons | sell/__openerp__.py | 6 | 3544 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2014 上海开阖软件有限公司 (http://www.osbzr.com).
# All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
{
'name': 'GOODERP 销售模块',
'author': 'jeff@osbzr.com,flora@osbzr.com',
'website': 'https://www.osbzr.com',
'category': 'gooderp',
'description':
'''
该模块可以方便的管理销货。
通过创建销货订单,审核后将销货订单行中的商品销售给客户,来完成销货功能。
通过创建销货退货订单,审核后将退货订单行中的商品退回,来完成销货退货功能。
通过创建销售变更单,选择原始销货订单,审核后将销售变更单行中的商品调整到原始销货订单行,来完成销售调整功能。
销货管理的报表有:
销售订单跟踪表;
销售明细表;
销售汇总表(按商品、按客户、按销售人员);
销售收款一览表;
销售前十商品。
''',
'version': '11.11',
'depends': ['warehouse', 'partner_address', 'staff'],
'data': [
'data/sell_data.xml',
'security/groups.xml',
'security/rules.xml',
'views/sell_view.xml',
'views/customer_view.xml',
'views/approve_multi_sale_order_view.xml',
'report/customer_statements_view.xml',
'report/sell_order_track_view.xml',
'report/sell_order_detail_view.xml',
'report/sell_summary_goods_view.xml',
'report/sell_summary_partner_view.xml',
'report/sell_summary_staff_view.xml',
'report/sell_receipt_view.xml',
'report/sell_top_ten_view.xml',
'report/sell_summary_view.xml',
'wizard/customer_statements_wizard_view.xml',
'wizard/sell_order_track_wizard_view.xml',
'wizard/sell_order_detail_wizard_view.xml',
'wizard/sell_summary_goods_wizard_view.xml',
'wizard/sell_summary_partner_wizard_view.xml',
'wizard/sell_summary_staff_wizard_view.xml',
'wizard/sell_receipt_wizard_view.xml',
'wizard/sell_top_ten_wizard_view.xml',
'security/ir.model.access.csv',
'report/report_data.xml',
'data/home_page_data.xml'
],
'demo': [
'data/sell_demo.xml',
],
'installable': True,
'auto_install': False,
}
| agpl-3.0 |
cloudera/hue | desktop/core/ext-py/thrift-0.13.0/src/protocol/TCompactProtocol.py | 15 | 15465 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from .TProtocol import TType, TProtocolBase, TProtocolException, TProtocolFactory, checkIntegerLimits
from struct import pack, unpack
from ..compat import binary_to_str, str_to_binary
__all__ = ['TCompactProtocol', 'TCompactProtocolFactory']
CLEAR = 0
FIELD_WRITE = 1
VALUE_WRITE = 2
CONTAINER_WRITE = 3
BOOL_WRITE = 4
FIELD_READ = 5
CONTAINER_READ = 6
VALUE_READ = 7
BOOL_READ = 8
def make_helper(v_from, container):
def helper(func):
def nested(self, *args, **kwargs):
assert self.state in (v_from, container), (self.state, v_from, container)
return func(self, *args, **kwargs)
return nested
return helper
writer = make_helper(VALUE_WRITE, CONTAINER_WRITE)
reader = make_helper(VALUE_READ, CONTAINER_READ)
def makeZigZag(n, bits):
checkIntegerLimits(n, bits)
return (n << 1) ^ (n >> (bits - 1))
def fromZigZag(n):
return (n >> 1) ^ -(n & 1)
def writeVarint(trans, n):
assert n >= 0, "Input to TCompactProtocol writeVarint cannot be negative!"
out = bytearray()
while True:
if n & ~0x7f == 0:
out.append(n)
break
else:
out.append((n & 0xff) | 0x80)
n = n >> 7
trans.write(bytes(out))
def readVarint(trans):
result = 0
shift = 0
while True:
x = trans.readAll(1)
byte = ord(x)
result |= (byte & 0x7f) << shift
if byte >> 7 == 0:
return result
shift += 7
class CompactType(object):
STOP = 0x00
TRUE = 0x01
FALSE = 0x02
BYTE = 0x03
I16 = 0x04
I32 = 0x05
I64 = 0x06
DOUBLE = 0x07
BINARY = 0x08
LIST = 0x09
SET = 0x0A
MAP = 0x0B
STRUCT = 0x0C
CTYPES = {
TType.STOP: CompactType.STOP,
TType.BOOL: CompactType.TRUE, # used for collection
TType.BYTE: CompactType.BYTE,
TType.I16: CompactType.I16,
TType.I32: CompactType.I32,
TType.I64: CompactType.I64,
TType.DOUBLE: CompactType.DOUBLE,
TType.STRING: CompactType.BINARY,
TType.STRUCT: CompactType.STRUCT,
TType.LIST: CompactType.LIST,
TType.SET: CompactType.SET,
TType.MAP: CompactType.MAP,
}
TTYPES = {}
for k, v in CTYPES.items():
TTYPES[v] = k
TTYPES[CompactType.FALSE] = TType.BOOL
del k
del v
class TCompactProtocol(TProtocolBase):
"""Compact implementation of the Thrift protocol driver."""
PROTOCOL_ID = 0x82
VERSION = 1
VERSION_MASK = 0x1f
TYPE_MASK = 0xe0
TYPE_BITS = 0x07
TYPE_SHIFT_AMOUNT = 5
def __init__(self, trans,
string_length_limit=None,
container_length_limit=None):
TProtocolBase.__init__(self, trans)
self.state = CLEAR
self.__last_fid = 0
self.__bool_fid = None
self.__bool_value = None
self.__structs = []
self.__containers = []
self.string_length_limit = string_length_limit
self.container_length_limit = container_length_limit
def _check_string_length(self, length):
self._check_length(self.string_length_limit, length)
def _check_container_length(self, length):
self._check_length(self.container_length_limit, length)
def __writeVarint(self, n):
writeVarint(self.trans, n)
def writeMessageBegin(self, name, type, seqid):
assert self.state == CLEAR
self.__writeUByte(self.PROTOCOL_ID)
self.__writeUByte(self.VERSION | (type << self.TYPE_SHIFT_AMOUNT))
# The sequence id is a signed 32-bit integer but the compact protocol
# writes this out as a "var int" which is always positive, and attempting
# to write a negative number results in an infinite loop, so we may
# need to do some conversion here...
tseqid = seqid
if tseqid < 0:
tseqid = 2147483648 + (2147483648 + tseqid)
self.__writeVarint(tseqid)
self.__writeBinary(str_to_binary(name))
self.state = VALUE_WRITE
def writeMessageEnd(self):
assert self.state == VALUE_WRITE
self.state = CLEAR
def writeStructBegin(self, name):
assert self.state in (CLEAR, CONTAINER_WRITE, VALUE_WRITE), self.state
self.__structs.append((self.state, self.__last_fid))
self.state = FIELD_WRITE
self.__last_fid = 0
def writeStructEnd(self):
assert self.state == FIELD_WRITE
self.state, self.__last_fid = self.__structs.pop()
def writeFieldStop(self):
self.__writeByte(0)
def __writeFieldHeader(self, type, fid):
delta = fid - self.__last_fid
if 0 < delta <= 15:
self.__writeUByte(delta << 4 | type)
else:
self.__writeByte(type)
self.__writeI16(fid)
self.__last_fid = fid
def writeFieldBegin(self, name, type, fid):
assert self.state == FIELD_WRITE, self.state
if type == TType.BOOL:
self.state = BOOL_WRITE
self.__bool_fid = fid
else:
self.state = VALUE_WRITE
self.__writeFieldHeader(CTYPES[type], fid)
def writeFieldEnd(self):
assert self.state in (VALUE_WRITE, BOOL_WRITE), self.state
self.state = FIELD_WRITE
def __writeUByte(self, byte):
self.trans.write(pack('!B', byte))
def __writeByte(self, byte):
self.trans.write(pack('!b', byte))
def __writeI16(self, i16):
self.__writeVarint(makeZigZag(i16, 16))
def __writeSize(self, i32):
self.__writeVarint(i32)
def writeCollectionBegin(self, etype, size):
assert self.state in (VALUE_WRITE, CONTAINER_WRITE), self.state
if size <= 14:
self.__writeUByte(size << 4 | CTYPES[etype])
else:
self.__writeUByte(0xf0 | CTYPES[etype])
self.__writeSize(size)
self.__containers.append(self.state)
self.state = CONTAINER_WRITE
writeSetBegin = writeCollectionBegin
writeListBegin = writeCollectionBegin
def writeMapBegin(self, ktype, vtype, size):
assert self.state in (VALUE_WRITE, CONTAINER_WRITE), self.state
if size == 0:
self.__writeByte(0)
else:
self.__writeSize(size)
self.__writeUByte(CTYPES[ktype] << 4 | CTYPES[vtype])
self.__containers.append(self.state)
self.state = CONTAINER_WRITE
def writeCollectionEnd(self):
assert self.state == CONTAINER_WRITE, self.state
self.state = self.__containers.pop()
writeMapEnd = writeCollectionEnd
writeSetEnd = writeCollectionEnd
writeListEnd = writeCollectionEnd
def writeBool(self, bool):
if self.state == BOOL_WRITE:
if bool:
ctype = CompactType.TRUE
else:
ctype = CompactType.FALSE
self.__writeFieldHeader(ctype, self.__bool_fid)
elif self.state == CONTAINER_WRITE:
if bool:
self.__writeByte(CompactType.TRUE)
else:
self.__writeByte(CompactType.FALSE)
else:
raise AssertionError("Invalid state in compact protocol")
writeByte = writer(__writeByte)
writeI16 = writer(__writeI16)
@writer
def writeI32(self, i32):
self.__writeVarint(makeZigZag(i32, 32))
@writer
def writeI64(self, i64):
self.__writeVarint(makeZigZag(i64, 64))
@writer
def writeDouble(self, dub):
self.trans.write(pack('<d', dub))
def __writeBinary(self, s):
self.__writeSize(len(s))
self.trans.write(s)
writeBinary = writer(__writeBinary)
def readFieldBegin(self):
assert self.state == FIELD_READ, self.state
type = self.__readUByte()
if type & 0x0f == TType.STOP:
return (None, 0, 0)
delta = type >> 4
if delta == 0:
fid = self.__readI16()
else:
fid = self.__last_fid + delta
self.__last_fid = fid
type = type & 0x0f
if type == CompactType.TRUE:
self.state = BOOL_READ
self.__bool_value = True
elif type == CompactType.FALSE:
self.state = BOOL_READ
self.__bool_value = False
else:
self.state = VALUE_READ
return (None, self.__getTType(type), fid)
def readFieldEnd(self):
assert self.state in (VALUE_READ, BOOL_READ), self.state
self.state = FIELD_READ
def __readUByte(self):
result, = unpack('!B', self.trans.readAll(1))
return result
def __readByte(self):
result, = unpack('!b', self.trans.readAll(1))
return result
def __readVarint(self):
return readVarint(self.trans)
def __readZigZag(self):
return fromZigZag(self.__readVarint())
def __readSize(self):
result = self.__readVarint()
if result < 0:
raise TProtocolException("Length < 0")
return result
def readMessageBegin(self):
assert self.state == CLEAR
proto_id = self.__readUByte()
if proto_id != self.PROTOCOL_ID:
raise TProtocolException(TProtocolException.BAD_VERSION,
'Bad protocol id in the message: %d' % proto_id)
ver_type = self.__readUByte()
type = (ver_type >> self.TYPE_SHIFT_AMOUNT) & self.TYPE_BITS
version = ver_type & self.VERSION_MASK
if version != self.VERSION:
raise TProtocolException(TProtocolException.BAD_VERSION,
'Bad version: %d (expect %d)' % (version, self.VERSION))
seqid = self.__readVarint()
# the sequence is a compact "var int" which is treaded as unsigned,
# however the sequence is actually signed...
if seqid > 2147483647:
seqid = -2147483648 - (2147483648 - seqid)
name = binary_to_str(self.__readBinary())
return (name, type, seqid)
def readMessageEnd(self):
assert self.state == CLEAR
assert len(self.__structs) == 0
def readStructBegin(self):
assert self.state in (CLEAR, CONTAINER_READ, VALUE_READ), self.state
self.__structs.append((self.state, self.__last_fid))
self.state = FIELD_READ
self.__last_fid = 0
def readStructEnd(self):
assert self.state == FIELD_READ
self.state, self.__last_fid = self.__structs.pop()
def readCollectionBegin(self):
assert self.state in (VALUE_READ, CONTAINER_READ), self.state
size_type = self.__readUByte()
size = size_type >> 4
type = self.__getTType(size_type)
if size == 15:
size = self.__readSize()
self._check_container_length(size)
self.__containers.append(self.state)
self.state = CONTAINER_READ
return type, size
readSetBegin = readCollectionBegin
readListBegin = readCollectionBegin
def readMapBegin(self):
assert self.state in (VALUE_READ, CONTAINER_READ), self.state
size = self.__readSize()
self._check_container_length(size)
types = 0
if size > 0:
types = self.__readUByte()
vtype = self.__getTType(types)
ktype = self.__getTType(types >> 4)
self.__containers.append(self.state)
self.state = CONTAINER_READ
return (ktype, vtype, size)
def readCollectionEnd(self):
assert self.state == CONTAINER_READ, self.state
self.state = self.__containers.pop()
readSetEnd = readCollectionEnd
readListEnd = readCollectionEnd
readMapEnd = readCollectionEnd
def readBool(self):
if self.state == BOOL_READ:
return self.__bool_value == CompactType.TRUE
elif self.state == CONTAINER_READ:
return self.__readByte() == CompactType.TRUE
else:
raise AssertionError("Invalid state in compact protocol: %d" %
self.state)
readByte = reader(__readByte)
__readI16 = __readZigZag
readI16 = reader(__readZigZag)
readI32 = reader(__readZigZag)
readI64 = reader(__readZigZag)
@reader
def readDouble(self):
buff = self.trans.readAll(8)
val, = unpack('<d', buff)
return val
def __readBinary(self):
size = self.__readSize()
self._check_string_length(size)
return self.trans.readAll(size)
readBinary = reader(__readBinary)
def __getTType(self, byte):
return TTYPES[byte & 0x0f]
class TCompactProtocolFactory(TProtocolFactory):
def __init__(self,
string_length_limit=None,
container_length_limit=None):
self.string_length_limit = string_length_limit
self.container_length_limit = container_length_limit
def getProtocol(self, trans):
return TCompactProtocol(trans,
self.string_length_limit,
self.container_length_limit)
class TCompactProtocolAccelerated(TCompactProtocol):
"""C-Accelerated version of TCompactProtocol.
This class does not override any of TCompactProtocol's methods,
but the generated code recognizes it directly and will call into
our C module to do the encoding, bypassing this object entirely.
We inherit from TCompactProtocol so that the normal TCompactProtocol
encoding can happen if the fastbinary module doesn't work for some
reason.
To disable this behavior, pass fallback=False constructor argument.
In order to take advantage of the C module, just use
TCompactProtocolAccelerated instead of TCompactProtocol.
"""
pass
def __init__(self, *args, **kwargs):
fallback = kwargs.pop('fallback', True)
super(TCompactProtocolAccelerated, self).__init__(*args, **kwargs)
try:
from thrift.protocol import fastbinary
except ImportError:
if not fallback:
raise
else:
self._fast_decode = fastbinary.decode_compact
self._fast_encode = fastbinary.encode_compact
class TCompactProtocolAcceleratedFactory(TProtocolFactory):
def __init__(self,
string_length_limit=None,
container_length_limit=None,
fallback=True):
self.string_length_limit = string_length_limit
self.container_length_limit = container_length_limit
self._fallback = fallback
def getProtocol(self, trans):
return TCompactProtocolAccelerated(
trans,
string_length_limit=self.string_length_limit,
container_length_limit=self.container_length_limit,
fallback=self._fallback)
| apache-2.0 |
quadrismegistus/prosodic | meters/litlab.py | 1 | 13830 | ############################################
# [config.py]
# CONFIGURATION SETTINGS FOR A PARTICULAR METER
#
#
# Set the long-form name of this meter
name = "Literary Lab's Meter"
#
# [Do not remove or uncomment the following line]
Cs={}
############################################
############################################
# STRUCTURE PARAMETERS
#
# Parameters subject to conscious control by the poet. Kiparsky & Hanson (1996)
# call these "formally independent of phonological structure." By contrast,
# "realization parameters"--e.g., the size of a metrical position, which positions
# are regulated, and other constraints--"determine the way the structure is
# linguistically manifested, and are dependent on the prosodic givens of languge."
#
#
####
# [Number of feet in a line]
#
#Cs['number_feet!=2'] = 1 # require dimeter
#Cs['number_feet!=3'] = 1 # require trimeter
#Cs['number_feet!=4'] = 1 # require tetrameter
#Cs['number_feet!=5'] = 1 # require pentameter
#Cs['number_feet!=6'] = 1 # require hexameter
#Cs['number_feet!=7'] = 1 # require heptameter
#
#
####
# [Headedness of the line]
#
#Cs['headedness!=falling'] = 1 # require a falling rhythm (e.g. trochaic, dactylic)
#Cs['headedness!=rising'] = 1 # require a rising rhythm (e.g., iambic, anapestic)
#
############################################
############################################
# REALIZATION PARAMETERS
#
# All subsequent constraints can be seen as "realization parameters."
# See note to "structure parameters" above for more information.
#
#############################################
# METRICAL PARSING: POSITION SIZE
#
# Select how many syllables are at least *possible* in strong or weak positions
# cf. Kiparsky & Hanson's "position size" parameter ("Parametric Theory" 1996)
#
#
######
# [Maximum position size]
#
# The maximum number of syllables allowed in strong metrical positions (i.e. "s")
maxS=2
#
# The maximum number of syllables allowed in weak metrical positions (i.e. "w")
maxW=2
#
#
######
# [Minimum position size]
#
# (Recommended) Positions are at minimum one syllable in size
splitheavies=0
#
# (Unrecommended) Allow positions to be as small as a single mora
# i.e. (a split heavy syllable can straddle two metrical positions)
#splitheavies=1
############################################
############################################
# METRICAL PARSING: METRICAL CONSTRAINTS
#
# Here you can configure the constraints used by the metrical parser.
# Each constraint is expressed in the form:
# Cs['(constraint name)']=(constraint weight)
# Constraint weights do not affect harmonic bounding (i.e. which parses
# survive as possibilities), but they do affect how those possibilities
# are sorted to select the "best" parse.
#
#
######
# [Constraints regulating the 'STRENGTH' of a syllable]
#
# A syllable is strong if it is a peak in a polysyllabic word:
# the syllables in 'liberty', stressed-unstressed-unstressed,
# are, in terms of *strength*, strong-weak-neutral, because
# the first syllable is more stressed than its neighbor;
# the second syllable less stressed; and the third equally stressed.
#
###
# [Stricter versions:]
#
# A strong metrical position should not contain any weak syllables ("troughs"):
Cs['strength.s=>-u']=3
#
# A weak metrical position may not contain any strong syllables ("peaks"):
# [Kiparsky and Hanson believe this is Shakespeare's meter]
Cs['strength.w=>-p']=3
#
###
# [Laxer versions:]
#
# A strong metrical position should contain at least one strong syllable:
#Cs['strength.s=>p']=3
#
# A weak metrical position should contain at least one weak syllable:
#Cs['strength.w=>u']=3
#
#
#
######
# [Constraints regulating the STRESS of a syllable]
#
###
# [Stricter versions:]
#
# A strong metrical position should not contain any unstressed syllables:
# [Kiparsky and Hanson believe this is Hopkins' meter]
Cs['stress.s=>-u']=2
#
# A weak metrical position should not contain any stressed syllables:
Cs['stress.w=>-p']=2
#
###
# [Laxer versions:]
#
# A strong metrical position should contain at least one stressed syllable:
#Cs['stress.s=>p']=2
#
# A weak metrical position must contain at least one unstressed syllable;
#Cs['stress.w=>u']=2
#
#
#
######
# [Constraints regulating HEADS of PHRASES]
#
# Phrasal heads as defined by Metrical Tree.
# The head of a phrase is the rightmost constituent of a local branch.
# In "Come to one mark, as many ways meet in one town", the phrasal heads
# are "mark" (one mark), "ways" (many ways), and "town" (one town).
#
###
# [Stricter versions:]
#
# A strong metrical position should not contain any feets of phrases (non-heads in branching phrases):
#Cs['phrasal_head.s=>-u']=10
#
# A strong metrical position should not contain any heads of phrases:
#Cs['phrasal_head.w=>-p']=1
#
###
# [Laxer versions:]
#
# A strong metrical position must contain at least one feets of phrases (non-heads in branching phrases):
#Cs['phrasal_head.s=>p']=2
#
# A weak metrical position must contain at least one phrasal head;
#Cs['phrasal_head.w=>u']=2
#
#
#
#######
# [Constraints regulating PHRASAL STRENGTH]
#
# Phrasal 'strength' as defined by Metrical Tree. Similar to "peaks" and "valleys"
# of Bruce Hayes' "A Grid-based Theory of Meter" (1983). A word is phrasally strong,
# or a phrasal "peak", if a word to its left or right is less phrasally stressed.
# Conversely, a word is phrasally "weak", or a phrasal "valley", if a word to its
# left or right is more phrasally stressed.
#
# [Caveat: A word can be both a stress valley and a stress peak, but a word cannot
# be both phrasally weak and strong. Phrasal peaks take precedence: only if a word
# is not also a phrasal peak can it be registered as a phrasal valley. We might want
# to change this, but this allows us to constrain phrasal strength in a manner
# homologous to how we constrain syllable strength.]
#
###
# [Stricter versions:]
#
# A strong metrical position should not contain any phrasally weak syllables:
#Cs['phrasal_strength.s=>-u']=10
#
# A weak metrical position should not contain any phrasally strong syllables:
#Cs['phrasal_strength.w=>-p']=1
#
###
# [Laxer versions:]
#
# A strong metrical position should contain at least one phrasally strong syllable:
#Cs['phrasal_strength.s=>p']=2
#
# A weak metrical position must contain at least one phrasally weak syllable;
#Cs['phrasal_strength.w=>u']=2
#
#
#
######
# [Constraints regulating PHRASAL STRESS]
#
# Phrasal stress as defined by Metrical Tree. A normalized numeric value is given
# to each word in the sentence, representing least (0) to most (1) stressed. Please
# see Tim Dozat's MetricalTree <https://github.com/tdozat/Metrics/> for more information.
#
# Constraint weights are multiplied against the numeric value; so if phrasally stressed syllables
# are prohibited, and that constraint's weight is "10", and the violating word has a phrasal
# stress value of 0.75, then the resulting violation score would be 10 * 0.75 = 7.5.
#
###
# [Configuration of phrasal stress]
#
# Because most words have *some* phrasal stress, a word is considered phrasally stressed
# if its numeric value is less than what is defined here; otherwise, it is considered
# phrasally unstressed. Default = 2, i.e., primary and secondary stresses count.
Cs['phrasal_stress_threshold']=2
#
#
# Should the above threshold be computed across the sentence, or the line? If this is 'sentence',
# then the above number (say, 2) refers to the secondary stresses in the *sentence.* If 'line',
# then it refers to the secondary stresses in the poetic *line* (i.e., the stress grid is moved up
# such that the biggest stress in the line becomes 1 (primary), etc.)
Cs['phrasal_stress_norm_context_is_sentence']=1
#Cs['phrasal_stress_norm_context_is_line']=1
#
#
#
###
# [Stricter versions:]
#
# A strong metrical position should not contain any phrasally unstressed syllables:
#Cs['phrasal_stress.s=>-u']=10
#
# A weak metrical position should not contain any phrasally stressed syllables:
#Cs['phrasal_stress.w=>-p']=1
#
###
# [Laxer versions:]
#
# A strong metrical position should contain at least one phrasally stressed syllable:
#Cs['phrasal_stress.s=>p']=2
#
# A weak metrical position must contain at least phrasally unstressed syllable:
#Cs['phrasal_stress.w=>u']=2
#
#
#
######
# [Constraints regulating the WEIGHT of a syllable]
#
# The weight of a syllable is its "quantity": short or long.
# These constraints are designed for "quantitative verse",
# as for example in classical Latin and Greek poetry.
#
###
# [Stricter versions:]
#
# A strong metrical position should not contain any light syllables:
#Cs['weight.s=>-u']=2
#
# A weak metrical position should not contain any heavy syllables:
#Cs['weight.w=>-p']=2
#
###
# [Laxer versions:]
#
# A strong metrical position should contain at least one heavy syllable:
#Cs['weight.s=>p']=2
#
# A weak metrical position must contain at least one light syllable;
#Cs['weight.w=>u']=2
#
#
#
######
# [Constraints regulating what's permissible as a DISYLLABIC metrical position]
# [(with thanks to Sam Bowman, who programmed many of these constraints)]
#
###
# [Based on weight:]
#
# A disyllabic metrical position should not contain more than a minimal foot:
# (i.e. allowed positions are syllables weighted light-light or light-heavy)
#Cs['footmin-noHX']=10000
#
# A disyllabic metrical position should be syllables weighted light-light:
#Cs['footmin-noLH-noHX']=1
#
###
# [Categorical:]
#
# A metrical position should not contain more than one syllable:
# [use to discourage disyllabic positions]
Cs['footmin-none']=1
#
# A strong metrical position should not contain more than one syllable:
#Cs['footmin-no-s']=1
#
# A weak metrical position should not contain more than one syllable:
#Cs['footmin-no-w']=1
#
# A metrical position should not contain more than one syllable,
# *unless* that metrical position is the *first* or *second* in the line:
# [use to discourage disyllabic positions, but not trochaic inversions,
# or an initial "extrametrical" syllable]
#Cs['footmin-none-unless-in-first-two-positions']=1
#
# A metrical position should not contain more than one syllable,
# *unless* that metrical position is the *second* in the line:
# [use to discourage disyllabic positions, but not trochaic inversions]
#Cs['footmin-none-unless-in-second-position']=1
#
# A strong metrical position should not contain more than one syllable,
# *unless* it is preceded by a disyllabic *weak* metrical position:
# [use to implement the metrical pattern described by Derek Attridge,
# in The Rhythms of English Poetry (1982), and commented on by Bruce Hayes
# in his review of the book in Language 60.1 (1984).
# e.g. Shakespeare's "when.your|SWEET.IS|ue.your|SWEET.FORM|should|BEAR"
# [this implementation is different in that it only takes into account
# double-weak beats *preceding* -- due to the way in which the parser
# throws away bounded parses as it goes, it might not be possible for now
# to write a constraint referencing future positions]
Cs['footmin-no-s-unless-preceded-by-ww']=10
# [The version that does reference future positions; but appears to be unstable]:
#Cs['attridge-ss-not-by-ww']=10
#
###
# [For disyllabic positions crossing a word boundary...
# (i.e. having two syllables, each from a different word)...
#
# ...it should never cross a word boundary to begin with:
#Cs['footmin-wordbound']=1
#
# ...both words should be function words:
#Cs['footmin-wordbound-bothnotfw']=1
#
# ...at least one word should be a function word:
#Cs['footmin-wordbound-neitherfw']=1
#
# ...the left-hand syllable should be a function-word:
#Cs['footmin-wordbound-leftfw']=1
#
# ...the right-hand syllable should be a function word:
#Cs['footmin-wordbound-rightfw']=1
#
# ...neither word should be a monosyllable:
#Cs['footmin-wordbound-nomono']=1
#
###
# [Miscellaneous constraints relating to disyllabic positions]
#
# A disyllabic metrical position may contain a strong syllable
# of a lexical word only if the syllable is (i) light and
# (ii) followed within the same position by an unstressed
# syllable normally belonging to the same word.
# [written by Sam Bowman]
#Cs['footmin-strongconstraint']=1
#
# The final metrical position of the line should not be 'ww'
# [use to encourage "...LI|ber|TY" rather than "...LI|ber.ty"]
Cs['posthoc-no-final-ww']=2
#
# The final metrical position of the line should not be 'w' or 'ww'
#Cs['posthoc-no-final-w']=2
#
# A line should have all 'ww' or all 'w':
# It works by:
# Nw = Number of weak positions in the line
# Mw = Maximum number of occurrences of 'w' metrical position
# Mww = Maximum number of occurrences of 'ww' metrical position
# M = Whichever is bigger, Mw or Mww
# V = Nw - M
# Violation Score = V * [Weight]
# [use to encourage consistency of meter across line]
# [feel free to make this a decimal number, like 0.25]
Cs['posthoc-standardize-weakpos']=1
#
#
#
######
# [MISCELLANEOUS constraints]
#
# A function word can fall only in a weak position:
#Cs['functiontow']=2
#
# An initial syllable must be in a weak position:
#Cs['initialstrong']=2
#
# The first metrical position will not be evaluated
# for any metrical constraints whatsoever:
# [set to 1 to be true]
#Cs['extrametrical-first-pos']=1
#
# The first two metrical positions will not be evaluated
# for any metrical constraints whatsoever:
# [set to 1 to be true]
#Cs['skip_initial_foot']=1
#
# A word should not be an elision [use to discourage elisions]:
Cs['word-elision']=1
#
# A weak metrical position should not contain any syllables
# that are stressed and heavy: [Meter of Finnish "Kalevala"]
#Cs['kalevala.w=>-p']=1
#
# A strong metrical position should not contain any syllables
# that are stressed and light: [Meter of Finnish "Kalevala"]
#Cs['kalevala.s=>-u']=1
############################################
| gpl-3.0 |
nnjpp/shadowsocks | shadowsocks/crypto/rc4_md5.py | 1042 | 1339 | #!/usr/bin/env python
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import hashlib
from shadowsocks.crypto import openssl
__all__ = ['ciphers']
def create_cipher(alg, key, iv, op, key_as_bytes=0, d=None, salt=None,
i=1, padding=1):
md5 = hashlib.md5()
md5.update(key)
md5.update(iv)
rc4_key = md5.digest()
return openssl.OpenSSLCrypto(b'rc4', rc4_key, b'', op)
ciphers = {
'rc4-md5': (16, 16, create_cipher),
}
def test():
from shadowsocks.crypto import util
cipher = create_cipher('rc4-md5', b'k' * 32, b'i' * 16, 1)
decipher = create_cipher('rc4-md5', b'k' * 32, b'i' * 16, 0)
util.run_cipher(cipher, decipher)
if __name__ == '__main__':
test()
| apache-2.0 |
pombredanne/redo | redo.py | 1 | 5742 | #!/usr/bin/env python
import sys, os
import options
from helpers import atoi
optspec = """
redo [targets...]
--
j,jobs= maximum number of jobs to build at once
d,debug print dependency checks as they happen
v,verbose print commands as they are read from .do files (variables intact)
x,xtrace print commands as they are executed (variables expanded)
k,keep-going keep going as long as possible even if some targets fail
overwrite overwrite files even if generated outside of redo
log activate log recording (slower)
only-log print only failed targets from log
shuffle randomize the build order to find dependency bugs
debug-locks print messages about file locking (useful for debugging)
debug-pids print process ids as part of log messages (useful for debugging)
version print the current version and exit
color force enable color (--no-color to disable)
old-args use old-style definitions of $1,$2,$3 (deprecated)
old-stdout use old-style stdout to create target (deprecated)
warn-stdout warn if stdout is used
main= Choose which redo flavour to execute
"""
sys.path.insert(0, os.path.dirname(os.path.realpath(__file__)))
def read_opts():
redo_flavour = os.path.splitext(os.path.basename(sys.argv[0]))[0]
if redo_flavour == "redo-exec":
return False, 1, redo_flavour, sys.argv[1:]
o = options.Options(optspec)
(opt, flags, extra) = o.parse(sys.argv[1:])
if opt.overwrite:
os.environ['REDO_OVERWRITE'] = '1'
if opt.version:
from version import TAG
print TAG
sys.exit(0)
if opt.debug:
os.environ['REDO_DEBUG'] = str(opt.debug or 0)
if opt.verbose:
os.environ['REDO_VERBOSE'] = '1'
if opt.xtrace:
os.environ['REDO_XTRACE'] = '1'
if opt.keep_going:
os.environ['REDO_KEEP_GOING'] = '1'
if opt.shuffle:
os.environ['REDO_SHUFFLE'] = '1'
if opt.debug_locks:
os.environ['REDO_DEBUG_LOCKS'] = '1'
if opt.debug_pids:
os.environ['REDO_DEBUG_PIDS'] = '1'
if opt.old_args:
os.environ['REDO_OLD_ARGS'] = '1'
if opt.old_stdout:
os.environ['REDO_OLD_STDOUT'] = '1'
if opt.warn_stdout:
os.environ['REDO_WARN_STDOUT'] = '1'
if opt.color != None:
os.environ['REDO_COLOR'] = str(opt.color)
if opt.log != None:
os.environ['REDO_LOG'] = str(opt.log)
if opt.only_log:
os.environ['REDO_ONLY_LOG'] = '1'
if opt.main:
redo_flavour = opt.main
return True, atoi(opt.jobs or 1), redo_flavour, extra
def set_main(arg0):
# When the module is imported, change the process title.
# We do it here because this module is imported by all the scripts.
try:
from setproctitle import setproctitle
except ImportError:
pass
else:
args = sys.argv[1:]
args.insert(0, arg0)
setproctitle(" ".join(args))
def init(targets, redo_binaries=[]):
if not os.environ.get('REDO'):
if len(targets) == 0:
targets.append('all')
dirname = os.path.dirname(os.path.realpath(__file__))
paths = [os.path.join(dirname, "bin"),
os.path.join(dirname, "redo-sh")]
bindir = None
shdir = None
for p in paths:
p_redo = os.path.join(p, "redo")
if not bindir and os.path.exists(p_redo):
try:
from version import TAG as myver
except:
pass
else:
with os.popen("'%s' --version" % p_redo.replace("'", "'\"'\"'")) as f:
ver = f.read().strip()
if ver == myver:
bindir = p
elif os.environ.get('REDO_DEBUG'):
sys.stderr.write("%s: version %s different than %s\n" % (p_redo, ver, myver))
elif not shdir and os.path.exists(os.path.join(p, "sh")):
shdir = p
if shdir and bindir:
break
if not bindir:
bindir = os.path.join(os.getcwd(), ".redo", "bin")
try: os.makedirs(bindir)
except: pass
main = os.path.realpath(__file__)
for exe in redo_binaries:
exe = os.path.join(bindir, exe)
try: os.unlink(exe)
except: pass
os.symlink(main, exe)
if bindir: os.environ['PATH'] = bindir + ":" + os.environ['PATH']
if shdir: os.environ['PATH'] = shdir + ":" + os.environ['PATH']
os.environ['REDO'] = os.path.join(bindir, "redo")
if not os.environ.get('REDO_STARTDIR'):
import runid
os.environ['REDO_STARTDIR'] = os.getcwd()
os.environ['REDO_RUNID_FILE'] = '.redo/runid'
runid.change('.redo/runid')
if not os.environ.get('REDO_STDIO'):
os.environ['REDO_STDIO'] = "%d,%d,%d" % (os.dup(0), os.dup(1), os.dup(2))
try:
from main import mains
do_init, jobs, redo_flavour, targets = read_opts()
if do_init:
init(targets, mains.keys())
from log import err, debug
import jwack
if not redo_flavour.startswith("redo"):
redo_flavour = "redo-%s" % redo_flavour
if redo_flavour not in mains:
err("invalid redo: %s\n", redo_flavour)
sys.exit(1)
set_main(redo_flavour)
if jobs < 1 or jobs > 1000:
err('invalid --jobs value: %r\n', opt.jobs)
jwack.setup(jobs)
debug("%s %r\n", redo_flavour, targets)
import vars
vars.init()
sys.exit(mains[redo_flavour](redo_flavour, targets) or 0)
except KeyboardInterrupt:
sys.exit(200)
| lgpl-2.1 |
Elbagoury/odoo | addons/mail/res_users.py | 314 | 10337 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2009-Today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
from openerp.osv import fields, osv
from openerp import api
from openerp import SUPERUSER_ID
from openerp.tools.translate import _
import openerp
class res_users(osv.Model):
""" Update of res.users class
- add a preference about sending emails about notifications
- make a new user follow itself
- add a welcome message
- add suggestion preference
"""
_name = 'res.users'
_inherit = ['res.users']
_inherits = {'mail.alias': 'alias_id'}
_columns = {
'alias_id': fields.many2one('mail.alias', 'Alias', ondelete="restrict", required=True,
help="Email address internally associated with this user. Incoming "\
"emails will appear in the user's notifications.", copy=False, auto_join=True),
'display_groups_suggestions': fields.boolean("Display Groups Suggestions"),
}
_defaults = {
'display_groups_suggestions': True,
}
def __init__(self, pool, cr):
""" Override of __init__ to add access rights on notification_email_send
and alias fields. Access rights are disabled by default, but allowed
on some specific fields defined in self.SELF_{READ/WRITE}ABLE_FIELDS.
"""
init_res = super(res_users, self).__init__(pool, cr)
# duplicate list to avoid modifying the original reference
self.SELF_WRITEABLE_FIELDS = list(self.SELF_WRITEABLE_FIELDS)
self.SELF_WRITEABLE_FIELDS.extend(['notify_email', 'display_groups_suggestions'])
# duplicate list to avoid modifying the original reference
self.SELF_READABLE_FIELDS = list(self.SELF_READABLE_FIELDS)
self.SELF_READABLE_FIELDS.extend(['notify_email', 'alias_domain', 'alias_name', 'display_groups_suggestions'])
return init_res
def _auto_init(self, cr, context=None):
""" Installation hook: aliases, partner following themselves """
# create aliases for all users and avoid constraint errors
return self.pool.get('mail.alias').migrate_to_alias(cr, self._name, self._table, super(res_users, self)._auto_init,
self._name, self._columns['alias_id'], 'login', alias_force_key='id', context=context)
def create(self, cr, uid, data, context=None):
if not data.get('login', False):
model, action_id = self.pool['ir.model.data'].get_object_reference(cr, uid, 'base', 'action_res_users')
msg = _("You cannot create a new user from here.\n To create new user please go to configuration panel.")
raise openerp.exceptions.RedirectWarning(msg, action_id, _('Go to the configuration panel'))
if context is None:
context = {}
create_context = dict(context, alias_model_name=self._name, alias_parent_model_name=self._name)
user_id = super(res_users, self).create(cr, uid, data, context=create_context)
user = self.browse(cr, uid, user_id, context=context)
self.pool.get('mail.alias').write(cr, SUPERUSER_ID, [user.alias_id.id], {"alias_force_thread_id": user_id, "alias_parent_thread_id": user_id}, context)
# create a welcome message
self._create_welcome_message(cr, uid, user, context=context)
return user_id
def copy_data(self, *args, **kwargs):
data = super(res_users, self).copy_data(*args, **kwargs)
if data and data.get('alias_name'):
data['alias_name'] = data['login']
return data
def _create_welcome_message(self, cr, uid, user, context=None):
if not self.has_group(cr, uid, 'base.group_user'):
return False
company_name = user.company_id.name if user.company_id else ''
body = _('%s has joined the %s network.') % (user.name, company_name)
# TODO change SUPERUSER_ID into user.id but catch errors
return self.pool.get('res.partner').message_post(cr, SUPERUSER_ID, [user.partner_id.id],
body=body, context=context)
def unlink(self, cr, uid, ids, context=None):
# Cascade-delete mail aliases as well, as they should not exist without the user.
alias_pool = self.pool.get('mail.alias')
alias_ids = [user.alias_id.id for user in self.browse(cr, uid, ids, context=context) if user.alias_id]
res = super(res_users, self).unlink(cr, uid, ids, context=context)
alias_pool.unlink(cr, uid, alias_ids, context=context)
return res
def _message_post_get_pid(self, cr, uid, thread_id, context=None):
assert thread_id, "res.users does not support posting global messages"
if context and 'thread_model' in context:
context['thread_model'] = 'res.users'
if isinstance(thread_id, (list, tuple)):
thread_id = thread_id[0]
return self.browse(cr, SUPERUSER_ID, thread_id).partner_id.id
@api.cr_uid_ids_context
def message_post(self, cr, uid, thread_id, context=None, **kwargs):
""" Redirect the posting of message on res.users as a private discussion.
This is done because when giving the context of Chatter on the
various mailboxes, we do not have access to the current partner_id. """
if isinstance(thread_id, (list, tuple)):
thread_id = thread_id[0]
current_pids = []
partner_ids = kwargs.get('partner_ids', [])
user_pid = self._message_post_get_pid(cr, uid, thread_id, context=context)
for partner_id in partner_ids:
if isinstance(partner_id, (list, tuple)) and partner_id[0] == 4 and len(partner_id) == 2:
current_pids.append(partner_id[1])
elif isinstance(partner_id, (list, tuple)) and partner_id[0] == 6 and len(partner_id) == 3:
current_pids.append(partner_id[2])
elif isinstance(partner_id, (int, long)):
current_pids.append(partner_id)
if user_pid not in current_pids:
partner_ids.append(user_pid)
kwargs['partner_ids'] = partner_ids
if context and context.get('thread_model') == 'res.partner':
return self.pool['res.partner'].message_post(cr, uid, user_pid, **kwargs)
return self.pool['mail.thread'].message_post(cr, uid, uid, **kwargs)
def message_update(self, cr, uid, ids, msg_dict, update_vals=None, context=None):
return True
def message_subscribe(self, cr, uid, ids, partner_ids, subtype_ids=None, context=None):
return True
def message_get_partner_info_from_emails(self, cr, uid, emails, link_mail=False, context=None):
return self.pool.get('mail.thread').message_get_partner_info_from_emails(cr, uid, emails, link_mail=link_mail, context=context)
def message_get_suggested_recipients(self, cr, uid, ids, context=None):
return dict((res_id, list()) for res_id in ids)
def stop_showing_groups_suggestions(self, cr, uid, user_id, context=None):
"""Update display_groups_suggestions value to False"""
if context is None:
context = {}
self.write(cr, uid, user_id, {"display_groups_suggestions": False}, context)
class res_users_mail_group(osv.Model):
""" Update of res.users class
- if adding groups to an user, check mail.groups linked to this user
group, and the user. This is done by overriding the write method.
"""
_name = 'res.users'
_inherit = ['res.users']
# FP Note: to improve, post processing may be better ?
def write(self, cr, uid, ids, vals, context=None):
write_res = super(res_users_mail_group, self).write(cr, uid, ids, vals, context=context)
if vals.get('groups_id'):
# form: {'group_ids': [(3, 10), (3, 3), (4, 10), (4, 3)]} or {'group_ids': [(6, 0, [ids]}
user_group_ids = [command[1] for command in vals['groups_id'] if command[0] == 4]
user_group_ids += [id for command in vals['groups_id'] if command[0] == 6 for id in command[2]]
mail_group_obj = self.pool.get('mail.group')
mail_group_ids = mail_group_obj.search(cr, uid, [('group_ids', 'in', user_group_ids)], context=context)
mail_group_obj.message_subscribe_users(cr, uid, mail_group_ids, ids, context=context)
return write_res
class res_groups_mail_group(osv.Model):
""" Update of res.groups class
- if adding users from a group, check mail.groups linked to this user
group and subscribe them. This is done by overriding the write method.
"""
_name = 'res.groups'
_inherit = 'res.groups'
# FP Note: to improve, post processeing, after the super may be better
def write(self, cr, uid, ids, vals, context=None):
write_res = super(res_groups_mail_group, self).write(cr, uid, ids, vals, context=context)
if vals.get('users'):
# form: {'group_ids': [(3, 10), (3, 3), (4, 10), (4, 3)]} or {'group_ids': [(6, 0, [ids]}
user_ids = [command[1] for command in vals['users'] if command[0] == 4]
user_ids += [id for command in vals['users'] if command[0] == 6 for id in command[2]]
mail_group_obj = self.pool.get('mail.group')
mail_group_ids = mail_group_obj.search(cr, uid, [('group_ids', 'in', ids)], context=context)
mail_group_obj.message_subscribe_users(cr, uid, mail_group_ids, user_ids, context=context)
return write_res
| agpl-3.0 |
waltharius/NewsBlur | apps/feed_import/migrations/0003_session_id.py | 18 | 4929 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'OAuthToken.session_id'
db.add_column('feed_import_oauthtoken', 'session_id', self.gf('django.db.models.fields.CharField')(max_length=50, null=True, blank=True), keep_default=False)
# Changing field 'OAuthToken.user'
db.alter_column('feed_import_oauthtoken', 'user_id', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['auth.User'], unique=True, null=True, blank=True))
def backwards(self, orm):
# Deleting field 'OAuthToken.session_id'
db.delete_column('feed_import_oauthtoken', 'session_id')
# Changing field 'OAuthToken.user'
db.alter_column('feed_import_oauthtoken', 'user_id', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['auth.User'], unique=True))
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'feed_import.oauthtoken': {
'Meta': {'object_name': 'OAuthToken'},
'access_token': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'access_token_secret': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'request_token': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'request_token_secret': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'session_id': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['feed_import']
| mit |
miraculixx/heroku-buildpack-python | vendor/pip-pop/pip/_vendor/requests/models.py | 410 | 29176 | # -*- coding: utf-8 -*-
"""
requests.models
~~~~~~~~~~~~~~~
This module contains the primary objects that power Requests.
"""
import collections
import datetime
from io import BytesIO, UnsupportedOperation
from .hooks import default_hooks
from .structures import CaseInsensitiveDict
from .auth import HTTPBasicAuth
from .cookies import cookiejar_from_dict, get_cookie_header, _copy_cookie_jar
from .packages.urllib3.fields import RequestField
from .packages.urllib3.filepost import encode_multipart_formdata
from .packages.urllib3.util import parse_url
from .packages.urllib3.exceptions import (
DecodeError, ReadTimeoutError, ProtocolError, LocationParseError)
from .exceptions import (
HTTPError, MissingSchema, InvalidURL, ChunkedEncodingError,
ContentDecodingError, ConnectionError, StreamConsumedError)
from .utils import (
guess_filename, get_auth_from_url, requote_uri,
stream_decode_response_unicode, to_key_val_list, parse_header_links,
iter_slices, guess_json_utf, super_len, to_native_string)
from .compat import (
cookielib, urlunparse, urlsplit, urlencode, str, bytes, StringIO,
is_py2, chardet, json, builtin_str, basestring)
from .status_codes import codes
#: The set of HTTP status codes that indicate an automatically
#: processable redirect.
REDIRECT_STATI = (
codes.moved, # 301
codes.found, # 302
codes.other, # 303
codes.temporary_redirect, # 307
codes.permanent_redirect, # 308
)
DEFAULT_REDIRECT_LIMIT = 30
CONTENT_CHUNK_SIZE = 10 * 1024
ITER_CHUNK_SIZE = 512
json_dumps = json.dumps
class RequestEncodingMixin(object):
@property
def path_url(self):
"""Build the path URL to use."""
url = []
p = urlsplit(self.url)
path = p.path
if not path:
path = '/'
url.append(path)
query = p.query
if query:
url.append('?')
url.append(query)
return ''.join(url)
@staticmethod
def _encode_params(data):
"""Encode parameters in a piece of data.
Will successfully encode parameters when passed as a dict or a list of
2-tuples. Order is retained if data is a list of 2-tuples but arbitrary
if parameters are supplied as a dict.
"""
if isinstance(data, (str, bytes)):
return data
elif hasattr(data, 'read'):
return data
elif hasattr(data, '__iter__'):
result = []
for k, vs in to_key_val_list(data):
if isinstance(vs, basestring) or not hasattr(vs, '__iter__'):
vs = [vs]
for v in vs:
if v is not None:
result.append(
(k.encode('utf-8') if isinstance(k, str) else k,
v.encode('utf-8') if isinstance(v, str) else v))
return urlencode(result, doseq=True)
else:
return data
@staticmethod
def _encode_files(files, data):
"""Build the body for a multipart/form-data request.
Will successfully encode files when passed as a dict or a list of
2-tuples. Order is retained if data is a list of 2-tuples but arbitrary
if parameters are supplied as a dict.
"""
if (not files):
raise ValueError("Files must be provided.")
elif isinstance(data, basestring):
raise ValueError("Data must not be a string.")
new_fields = []
fields = to_key_val_list(data or {})
files = to_key_val_list(files or {})
for field, val in fields:
if isinstance(val, basestring) or not hasattr(val, '__iter__'):
val = [val]
for v in val:
if v is not None:
# Don't call str() on bytestrings: in Py3 it all goes wrong.
if not isinstance(v, bytes):
v = str(v)
new_fields.append(
(field.decode('utf-8') if isinstance(field, bytes) else field,
v.encode('utf-8') if isinstance(v, str) else v))
for (k, v) in files:
# support for explicit filename
ft = None
fh = None
if isinstance(v, (tuple, list)):
if len(v) == 2:
fn, fp = v
elif len(v) == 3:
fn, fp, ft = v
else:
fn, fp, ft, fh = v
else:
fn = guess_filename(v) or k
fp = v
if isinstance(fp, (str, bytes, bytearray)):
fdata = fp
else:
fdata = fp.read()
rf = RequestField(name=k, data=fdata,
filename=fn, headers=fh)
rf.make_multipart(content_type=ft)
new_fields.append(rf)
body, content_type = encode_multipart_formdata(new_fields)
return body, content_type
class RequestHooksMixin(object):
def register_hook(self, event, hook):
"""Properly register a hook."""
if event not in self.hooks:
raise ValueError('Unsupported event specified, with event name "%s"' % (event))
if isinstance(hook, collections.Callable):
self.hooks[event].append(hook)
elif hasattr(hook, '__iter__'):
self.hooks[event].extend(h for h in hook if isinstance(h, collections.Callable))
def deregister_hook(self, event, hook):
"""Deregister a previously registered hook.
Returns True if the hook existed, False if not.
"""
try:
self.hooks[event].remove(hook)
return True
except ValueError:
return False
class Request(RequestHooksMixin):
"""A user-created :class:`Request <Request>` object.
Used to prepare a :class:`PreparedRequest <PreparedRequest>`, which is sent to the server.
:param method: HTTP method to use.
:param url: URL to send.
:param headers: dictionary of headers to send.
:param files: dictionary of {filename: fileobject} files to multipart upload.
:param data: the body to attach to the request. If a dictionary is provided, form-encoding will take place.
:param json: json for the body to attach to the request (if data is not specified).
:param params: dictionary of URL parameters to append to the URL.
:param auth: Auth handler or (user, pass) tuple.
:param cookies: dictionary or CookieJar of cookies to attach to this request.
:param hooks: dictionary of callback hooks, for internal usage.
Usage::
>>> import requests
>>> req = requests.Request('GET', 'http://httpbin.org/get')
>>> req.prepare()
<PreparedRequest [GET]>
"""
def __init__(self,
method=None,
url=None,
headers=None,
files=None,
data=None,
params=None,
auth=None,
cookies=None,
hooks=None,
json=None):
# Default empty dicts for dict params.
data = [] if data is None else data
files = [] if files is None else files
headers = {} if headers is None else headers
params = {} if params is None else params
hooks = {} if hooks is None else hooks
self.hooks = default_hooks()
for (k, v) in list(hooks.items()):
self.register_hook(event=k, hook=v)
self.method = method
self.url = url
self.headers = headers
self.files = files
self.data = data
self.json = json
self.params = params
self.auth = auth
self.cookies = cookies
def __repr__(self):
return '<Request [%s]>' % (self.method)
def prepare(self):
"""Constructs a :class:`PreparedRequest <PreparedRequest>` for transmission and returns it."""
p = PreparedRequest()
p.prepare(
method=self.method,
url=self.url,
headers=self.headers,
files=self.files,
data=self.data,
json=self.json,
params=self.params,
auth=self.auth,
cookies=self.cookies,
hooks=self.hooks,
)
return p
class PreparedRequest(RequestEncodingMixin, RequestHooksMixin):
"""The fully mutable :class:`PreparedRequest <PreparedRequest>` object,
containing the exact bytes that will be sent to the server.
Generated from either a :class:`Request <Request>` object or manually.
Usage::
>>> import requests
>>> req = requests.Request('GET', 'http://httpbin.org/get')
>>> r = req.prepare()
<PreparedRequest [GET]>
>>> s = requests.Session()
>>> s.send(r)
<Response [200]>
"""
def __init__(self):
#: HTTP verb to send to the server.
self.method = None
#: HTTP URL to send the request to.
self.url = None
#: dictionary of HTTP headers.
self.headers = None
# The `CookieJar` used to create the Cookie header will be stored here
# after prepare_cookies is called
self._cookies = None
#: request body to send to the server.
self.body = None
#: dictionary of callback hooks, for internal usage.
self.hooks = default_hooks()
def prepare(self, method=None, url=None, headers=None, files=None,
data=None, params=None, auth=None, cookies=None, hooks=None,
json=None):
"""Prepares the entire request with the given parameters."""
self.prepare_method(method)
self.prepare_url(url, params)
self.prepare_headers(headers)
self.prepare_cookies(cookies)
self.prepare_body(data, files, json)
self.prepare_auth(auth, url)
# Note that prepare_auth must be last to enable authentication schemes
# such as OAuth to work on a fully prepared request.
# This MUST go after prepare_auth. Authenticators could add a hook
self.prepare_hooks(hooks)
def __repr__(self):
return '<PreparedRequest [%s]>' % (self.method)
def copy(self):
p = PreparedRequest()
p.method = self.method
p.url = self.url
p.headers = self.headers.copy() if self.headers is not None else None
p._cookies = _copy_cookie_jar(self._cookies)
p.body = self.body
p.hooks = self.hooks
return p
def prepare_method(self, method):
"""Prepares the given HTTP method."""
self.method = method
if self.method is not None:
self.method = self.method.upper()
def prepare_url(self, url, params):
"""Prepares the given HTTP URL."""
#: Accept objects that have string representations.
#: We're unable to blindy call unicode/str functions
#: as this will include the bytestring indicator (b'')
#: on python 3.x.
#: https://github.com/kennethreitz/requests/pull/2238
if isinstance(url, bytes):
url = url.decode('utf8')
else:
url = unicode(url) if is_py2 else str(url)
# Don't do any URL preparation for non-HTTP schemes like `mailto`,
# `data` etc to work around exceptions from `url_parse`, which
# handles RFC 3986 only.
if ':' in url and not url.lower().startswith('http'):
self.url = url
return
# Support for unicode domain names and paths.
try:
scheme, auth, host, port, path, query, fragment = parse_url(url)
except LocationParseError as e:
raise InvalidURL(*e.args)
if not scheme:
raise MissingSchema("Invalid URL {0!r}: No schema supplied. "
"Perhaps you meant http://{0}?".format(
to_native_string(url, 'utf8')))
if not host:
raise InvalidURL("Invalid URL %r: No host supplied" % url)
# Only want to apply IDNA to the hostname
try:
host = host.encode('idna').decode('utf-8')
except UnicodeError:
raise InvalidURL('URL has an invalid label.')
# Carefully reconstruct the network location
netloc = auth or ''
if netloc:
netloc += '@'
netloc += host
if port:
netloc += ':' + str(port)
# Bare domains aren't valid URLs.
if not path:
path = '/'
if is_py2:
if isinstance(scheme, str):
scheme = scheme.encode('utf-8')
if isinstance(netloc, str):
netloc = netloc.encode('utf-8')
if isinstance(path, str):
path = path.encode('utf-8')
if isinstance(query, str):
query = query.encode('utf-8')
if isinstance(fragment, str):
fragment = fragment.encode('utf-8')
enc_params = self._encode_params(params)
if enc_params:
if query:
query = '%s&%s' % (query, enc_params)
else:
query = enc_params
url = requote_uri(urlunparse([scheme, netloc, path, None, query, fragment]))
self.url = url
def prepare_headers(self, headers):
"""Prepares the given HTTP headers."""
if headers:
self.headers = CaseInsensitiveDict((to_native_string(name), value) for name, value in headers.items())
else:
self.headers = CaseInsensitiveDict()
def prepare_body(self, data, files, json=None):
"""Prepares the given HTTP body data."""
# Check if file, fo, generator, iterator.
# If not, run through normal process.
# Nottin' on you.
body = None
content_type = None
length = None
if json is not None:
content_type = 'application/json'
body = json_dumps(json)
is_stream = all([
hasattr(data, '__iter__'),
not isinstance(data, (basestring, list, tuple, dict))
])
try:
length = super_len(data)
except (TypeError, AttributeError, UnsupportedOperation):
length = None
if is_stream:
body = data
if files:
raise NotImplementedError('Streamed bodies and files are mutually exclusive.')
if length is not None:
self.headers['Content-Length'] = builtin_str(length)
else:
self.headers['Transfer-Encoding'] = 'chunked'
else:
# Multi-part file uploads.
if files:
(body, content_type) = self._encode_files(files, data)
else:
if data and json is None:
body = self._encode_params(data)
if isinstance(data, basestring) or hasattr(data, 'read'):
content_type = None
else:
content_type = 'application/x-www-form-urlencoded'
self.prepare_content_length(body)
# Add content-type if it wasn't explicitly provided.
if content_type and ('content-type' not in self.headers):
self.headers['Content-Type'] = content_type
self.body = body
def prepare_content_length(self, body):
if hasattr(body, 'seek') and hasattr(body, 'tell'):
body.seek(0, 2)
self.headers['Content-Length'] = builtin_str(body.tell())
body.seek(0, 0)
elif body is not None:
l = super_len(body)
if l:
self.headers['Content-Length'] = builtin_str(l)
elif (self.method not in ('GET', 'HEAD')) and (self.headers.get('Content-Length') is None):
self.headers['Content-Length'] = '0'
def prepare_auth(self, auth, url=''):
"""Prepares the given HTTP auth data."""
# If no Auth is explicitly provided, extract it from the URL first.
if auth is None:
url_auth = get_auth_from_url(self.url)
auth = url_auth if any(url_auth) else None
if auth:
if isinstance(auth, tuple) and len(auth) == 2:
# special-case basic HTTP auth
auth = HTTPBasicAuth(*auth)
# Allow auth to make its changes.
r = auth(self)
# Update self to reflect the auth changes.
self.__dict__.update(r.__dict__)
# Recompute Content-Length
self.prepare_content_length(self.body)
def prepare_cookies(self, cookies):
"""Prepares the given HTTP cookie data.
This function eventually generates a ``Cookie`` header from the
given cookies using cookielib. Due to cookielib's design, the header
will not be regenerated if it already exists, meaning this function
can only be called once for the life of the
:class:`PreparedRequest <PreparedRequest>` object. Any subsequent calls
to ``prepare_cookies`` will have no actual effect, unless the "Cookie"
header is removed beforehand."""
if isinstance(cookies, cookielib.CookieJar):
self._cookies = cookies
else:
self._cookies = cookiejar_from_dict(cookies)
cookie_header = get_cookie_header(self._cookies, self)
if cookie_header is not None:
self.headers['Cookie'] = cookie_header
def prepare_hooks(self, hooks):
"""Prepares the given hooks."""
# hooks can be passed as None to the prepare method and to this
# method. To prevent iterating over None, simply use an empty list
# if hooks is False-y
hooks = hooks or []
for event in hooks:
self.register_hook(event, hooks[event])
class Response(object):
"""The :class:`Response <Response>` object, which contains a
server's response to an HTTP request.
"""
__attrs__ = [
'_content',
'status_code',
'headers',
'url',
'history',
'encoding',
'reason',
'cookies',
'elapsed',
'request',
]
def __init__(self):
super(Response, self).__init__()
self._content = False
self._content_consumed = False
#: Integer Code of responded HTTP Status, e.g. 404 or 200.
self.status_code = None
#: Case-insensitive Dictionary of Response Headers.
#: For example, ``headers['content-encoding']`` will return the
#: value of a ``'Content-Encoding'`` response header.
self.headers = CaseInsensitiveDict()
#: File-like object representation of response (for advanced usage).
#: Use of ``raw`` requires that ``stream=True`` be set on the request.
# This requirement does not apply for use internally to Requests.
self.raw = None
#: Final URL location of Response.
self.url = None
#: Encoding to decode with when accessing r.text.
self.encoding = None
#: A list of :class:`Response <Response>` objects from
#: the history of the Request. Any redirect responses will end
#: up here. The list is sorted from the oldest to the most recent request.
self.history = []
#: Textual reason of responded HTTP Status, e.g. "Not Found" or "OK".
self.reason = None
#: A CookieJar of Cookies the server sent back.
self.cookies = cookiejar_from_dict({})
#: The amount of time elapsed between sending the request
#: and the arrival of the response (as a timedelta).
#: This property specifically measures the time taken between sending
#: the first byte of the request and finishing parsing the headers. It
#: is therefore unaffected by consuming the response content or the
#: value of the ``stream`` keyword argument.
self.elapsed = datetime.timedelta(0)
#: The :class:`PreparedRequest <PreparedRequest>` object to which this
#: is a response.
self.request = None
def __getstate__(self):
# Consume everything; accessing the content attribute makes
# sure the content has been fully read.
if not self._content_consumed:
self.content
return dict(
(attr, getattr(self, attr, None))
for attr in self.__attrs__
)
def __setstate__(self, state):
for name, value in state.items():
setattr(self, name, value)
# pickled objects do not have .raw
setattr(self, '_content_consumed', True)
setattr(self, 'raw', None)
def __repr__(self):
return '<Response [%s]>' % (self.status_code)
def __bool__(self):
"""Returns true if :attr:`status_code` is 'OK'."""
return self.ok
def __nonzero__(self):
"""Returns true if :attr:`status_code` is 'OK'."""
return self.ok
def __iter__(self):
"""Allows you to use a response as an iterator."""
return self.iter_content(128)
@property
def ok(self):
try:
self.raise_for_status()
except HTTPError:
return False
return True
@property
def is_redirect(self):
"""True if this Response is a well-formed HTTP redirect that could have
been processed automatically (by :meth:`Session.resolve_redirects`).
"""
return ('location' in self.headers and self.status_code in REDIRECT_STATI)
@property
def is_permanent_redirect(self):
"""True if this Response one of the permanant versions of redirect"""
return ('location' in self.headers and self.status_code in (codes.moved_permanently, codes.permanent_redirect))
@property
def apparent_encoding(self):
"""The apparent encoding, provided by the chardet library"""
return chardet.detect(self.content)['encoding']
def iter_content(self, chunk_size=1, decode_unicode=False):
"""Iterates over the response data. When stream=True is set on the
request, this avoids reading the content at once into memory for
large responses. The chunk size is the number of bytes it should
read into memory. This is not necessarily the length of each item
returned as decoding can take place.
If decode_unicode is True, content will be decoded using the best
available encoding based on the response.
"""
def generate():
try:
# Special case for urllib3.
try:
for chunk in self.raw.stream(chunk_size, decode_content=True):
yield chunk
except ProtocolError as e:
raise ChunkedEncodingError(e)
except DecodeError as e:
raise ContentDecodingError(e)
except ReadTimeoutError as e:
raise ConnectionError(e)
except AttributeError:
# Standard file-like object.
while True:
chunk = self.raw.read(chunk_size)
if not chunk:
break
yield chunk
self._content_consumed = True
if self._content_consumed and isinstance(self._content, bool):
raise StreamConsumedError()
# simulate reading small chunks of the content
reused_chunks = iter_slices(self._content, chunk_size)
stream_chunks = generate()
chunks = reused_chunks if self._content_consumed else stream_chunks
if decode_unicode:
chunks = stream_decode_response_unicode(chunks, self)
return chunks
def iter_lines(self, chunk_size=ITER_CHUNK_SIZE, decode_unicode=None, delimiter=None):
"""Iterates over the response data, one line at a time. When
stream=True is set on the request, this avoids reading the
content at once into memory for large responses.
.. note:: This method is not reentrant safe.
"""
pending = None
for chunk in self.iter_content(chunk_size=chunk_size, decode_unicode=decode_unicode):
if pending is not None:
chunk = pending + chunk
if delimiter:
lines = chunk.split(delimiter)
else:
lines = chunk.splitlines()
if lines and lines[-1] and chunk and lines[-1][-1] == chunk[-1]:
pending = lines.pop()
else:
pending = None
for line in lines:
yield line
if pending is not None:
yield pending
@property
def content(self):
"""Content of the response, in bytes."""
if self._content is False:
# Read the contents.
try:
if self._content_consumed:
raise RuntimeError(
'The content for this response was already consumed')
if self.status_code == 0:
self._content = None
else:
self._content = bytes().join(self.iter_content(CONTENT_CHUNK_SIZE)) or bytes()
except AttributeError:
self._content = None
self._content_consumed = True
# don't need to release the connection; that's been handled by urllib3
# since we exhausted the data.
return self._content
@property
def text(self):
"""Content of the response, in unicode.
If Response.encoding is None, encoding will be guessed using
``chardet``.
The encoding of the response content is determined based solely on HTTP
headers, following RFC 2616 to the letter. If you can take advantage of
non-HTTP knowledge to make a better guess at the encoding, you should
set ``r.encoding`` appropriately before accessing this property.
"""
# Try charset from content-type
content = None
encoding = self.encoding
if not self.content:
return str('')
# Fallback to auto-detected encoding.
if self.encoding is None:
encoding = self.apparent_encoding
# Decode unicode from given encoding.
try:
content = str(self.content, encoding, errors='replace')
except (LookupError, TypeError):
# A LookupError is raised if the encoding was not found which could
# indicate a misspelling or similar mistake.
#
# A TypeError can be raised if encoding is None
#
# So we try blindly encoding.
content = str(self.content, errors='replace')
return content
def json(self, **kwargs):
"""Returns the json-encoded content of a response, if any.
:param \*\*kwargs: Optional arguments that ``json.loads`` takes.
"""
if not self.encoding and len(self.content) > 3:
# No encoding set. JSON RFC 4627 section 3 states we should expect
# UTF-8, -16 or -32. Detect which one to use; If the detection or
# decoding fails, fall back to `self.text` (using chardet to make
# a best guess).
encoding = guess_json_utf(self.content)
if encoding is not None:
try:
return json.loads(self.content.decode(encoding), **kwargs)
except UnicodeDecodeError:
# Wrong UTF codec detected; usually because it's not UTF-8
# but some other 8-bit codec. This is an RFC violation,
# and the server didn't bother to tell us what codec *was*
# used.
pass
return json.loads(self.text, **kwargs)
@property
def links(self):
"""Returns the parsed header links of the response, if any."""
header = self.headers.get('link')
# l = MultiDict()
l = {}
if header:
links = parse_header_links(header)
for link in links:
key = link.get('rel') or link.get('url')
l[key] = link
return l
def raise_for_status(self):
"""Raises stored :class:`HTTPError`, if one occurred."""
http_error_msg = ''
if 400 <= self.status_code < 500:
http_error_msg = '%s Client Error: %s' % (self.status_code, self.reason)
elif 500 <= self.status_code < 600:
http_error_msg = '%s Server Error: %s' % (self.status_code, self.reason)
if http_error_msg:
raise HTTPError(http_error_msg, response=self)
def close(self):
"""Releases the connection back to the pool. Once this method has been
called the underlying ``raw`` object must not be accessed again.
*Note: Should not normally need to be called explicitly.*
"""
return self.raw.release_conn()
| mit |
Akrog/cinder | cinder/backup/drivers/tsm.py | 1 | 21068 | # Copyright 2013 IBM Corp
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Backup driver for IBM Tivoli Storage Manager (TSM).
Implementation of a backup service that uses IBM Tivoli Storage Manager (TSM)
as the backend. The driver uses TSM command line dsmc utility to
run the backup and restore operations.
This version supports backup of block devices, e.g, FC, iSCSI, local as well as
regular files.
A prerequisite for using the IBM TSM backup service is configuring the
Cinder host for using TSM.
"""
import json
import os
import stat
from oslo_concurrency import processutils
from oslo_config import cfg
from cinder.backup import driver
from cinder import exception
from cinder.i18n import _LE, _
from cinder.openstack.common import log as logging
from cinder import utils
LOG = logging.getLogger(__name__)
tsm_opts = [
cfg.StrOpt('backup_tsm_volume_prefix',
default='backup',
help='Volume prefix for the backup id when backing up to TSM'),
cfg.StrOpt('backup_tsm_password',
default='password',
help='TSM password for the running username',
secret=True),
cfg.BoolOpt('backup_tsm_compression',
default=True,
help='Enable or Disable compression for backups'),
]
CONF = cfg.CONF
CONF.register_opts(tsm_opts)
VALID_BACKUP_MODES = ['image', 'file']
def _get_backup_metadata(backup, operation):
"""Return metadata persisted with backup object."""
svc_metadata = backup['service_metadata']
try:
svc_dict = json.loads(svc_metadata)
backup_path = svc_dict.get('backup_path')
backup_mode = svc_dict.get('backup_mode')
except TypeError:
# for backwards compatibility
vol_prefix = CONF.backup_tsm_volume_prefix
backup_id = backup['id']
backup_path = utils.make_dev_path('%s-%s' %
(vol_prefix, backup_id))
backup_mode = 'image'
if backup_mode not in VALID_BACKUP_MODES:
volume_id = backup['volume_id']
backup_id = backup['id']
err = (_('%(op)s: backup %(bck_id)s, volume %(vol_id)s failed. '
'Backup object has unexpected mode. Image or file '
'backups supported, actual mode is %(vol_mode)s.')
% {'op': operation,
'bck_id': backup_id,
'vol_id': volume_id,
'vol_mode': backup_mode})
LOG.error(err)
raise exception.InvalidBackup(reason=err)
return backup_path, backup_mode
def _image_mode(backup_mode):
"""True if backup is image type."""
return backup_mode == 'image'
def _make_link(volume_path, backup_path, vol_id):
"""Create a hard link for the volume block device.
The IBM TSM client performs an image backup on a block device.
The name of the block device is the backup prefix plus the backup id
:param volume_path: real device path name for volume
:param backup_path: path name TSM will use as volume to backup
:param vol_id: id of volume to backup (for reporting)
:raises: InvalidBackup
"""
try:
utils.execute('ln', volume_path, backup_path,
run_as_root=True,
check_exit_code=True)
except processutils.ProcessExecutionError as exc:
err = (_('backup: %(vol_id)s failed to create device hardlink '
'from %(vpath)s to %(bpath)s.\n'
'stdout: %(out)s\n stderr: %(err)s')
% {'vol_id': vol_id,
'vpath': volume_path,
'bpath': backup_path,
'out': exc.stdout,
'err': exc.stderr})
LOG.error(err)
raise exception.InvalidBackup(reason=err)
def _create_unique_device_link(backup_id, volume_path, volume_id, bckup_mode):
"""Create a consistent hardlink for the volume block device.
Create a consistent hardlink using the backup id so TSM
will be able to backup and restore to the same block device.
:param backup_id: the backup id
:param volume_path: real path of the backup/restore device
:param volume_id: Volume id for backup or as restore target
:param bckup_mode: TSM backup mode, either 'image' or 'file'
:raises: InvalidBackup
:returns str -- hardlink path of the volume block device
"""
if _image_mode(bckup_mode):
hardlink_path = utils.make_dev_path('%s-%s' %
(CONF.backup_tsm_volume_prefix,
backup_id))
else:
dir, volname = os.path.split(volume_path)
hardlink_path = ('%s/%s-%s' %
(dir,
CONF.backup_tsm_volume_prefix,
backup_id))
_make_link(volume_path, hardlink_path, volume_id)
return hardlink_path
def _check_dsmc_output(output, check_attrs, exact_match=True):
"""Check dsmc command line utility output.
Parse the output of the dsmc command and make sure that a given
attribute is present, and that it has the proper value.
TSM attribute has the format of "text : value".
:param output: TSM output to parse
:param check_attrs: text to identify in the output
:param exact_match: if True, the check will pass only if the parsed
value is equal to the value specified in check_attrs. If false, the
check will pass if the parsed value is greater than or equal to the
value specified in check_attrs. This is needed because for file
backups, the parent directories may also be included the first a
volume is backed up.
:returns bool -- indicate if requited output attribute found in output
"""
parsed_attrs = {}
for line in output.split('\n'):
# parse TSM output: look for "msg : value
key, sep, val = line.partition(':')
if sep is not None and key is not None and len(val.strip()) > 0:
parsed_attrs[key] = val.strip()
for ckey, cval in check_attrs.iteritems():
if ckey not in parsed_attrs:
return False
elif exact_match and parsed_attrs[ckey] != cval:
return False
elif not exact_match and int(parsed_attrs[ckey]) < int(cval):
return False
return True
def _get_volume_realpath(volume_file, volume_id):
"""Get the real path for the volume block device.
If the volume is not a block device or a regular file issue an
InvalidBackup exception.
:param volume_file: file object representing the volume
:param volume_id: Volume id for backup or as restore target
:raises: InvalidBackup
:returns str -- real path of volume device
:returns str -- backup mode to be used
"""
try:
# Get real path
volume_path = os.path.realpath(volume_file.name)
# Verify that path is a block device
volume_mode = os.stat(volume_path).st_mode
if stat.S_ISBLK(volume_mode):
backup_mode = 'image'
elif stat.S_ISREG(volume_mode):
backup_mode = 'file'
else:
err = (_('backup: %(vol_id)s failed. '
'%(path)s is unexpected file type. Block or regular '
'files supported, actual file mode is %(vol_mode)s.')
% {'vol_id': volume_id,
'path': volume_path,
'vol_mode': volume_mode})
LOG.error(err)
raise exception.InvalidBackup(reason=err)
except AttributeError:
err = (_('backup: %(vol_id)s failed. Cannot obtain real path '
'to volume at %(path)s.')
% {'vol_id': volume_id,
'path': volume_file})
LOG.error(err)
raise exception.InvalidBackup(reason=err)
except OSError:
err = (_('backup: %(vol_id)s failed. '
'%(path)s is not a file.')
% {'vol_id': volume_id,
'path': volume_path})
LOG.error(err)
raise exception.InvalidBackup(reason=err)
return volume_path, backup_mode
def _cleanup_device_hardlink(hardlink_path, volume_path, volume_id):
"""Remove the hardlink for the volume block device.
:param hardlink_path: hardlink to the volume block device
:param volume_path: real path of the backup/restore device
:param volume_id: Volume id for backup or as restore target
"""
try:
utils.execute('rm',
'-f',
hardlink_path,
run_as_root=True)
except processutils.ProcessExecutionError as exc:
err = (_LE('backup: %(vol_id)s failed to remove backup hardlink'
' from %(vpath)s to %(bpath)s.\n'
'stdout: %(out)s\n stderr: %(err)s.')
% {'vol_id': volume_id,
'vpath': volume_path,
'bpath': hardlink_path,
'out': exc.stdout,
'err': exc.stderr})
LOG.error(err)
class TSMBackupDriver(driver.BackupDriver):
"""Provides backup, restore and delete of volumes backup for TSM."""
DRIVER_VERSION = '1.0.0'
def __init__(self, context, db_driver=None):
super(TSMBackupDriver, self).__init__(context, db_driver)
self.tsm_password = CONF.backup_tsm_password
self.volume_prefix = CONF.backup_tsm_volume_prefix
def _do_backup(self, backup_path, vol_id, backup_mode):
"""Perform the actual backup operation.
:param backup_path: volume path
:param vol_id: volume id
:param backup_mode: file mode of source volume; 'image' or 'file'
:raises: InvalidBackup
"""
backup_attrs = {'Total number of objects backed up': '1'}
compr_flag = 'yes' if CONF.backup_tsm_compression else 'no'
backup_cmd = ['dsmc', 'backup']
if _image_mode(backup_mode):
backup_cmd.append('image')
backup_cmd.extend(['-quiet',
'-compression=%s' % compr_flag,
'-password=%s' % self.tsm_password,
backup_path])
out, err = utils.execute(*backup_cmd,
run_as_root=True,
check_exit_code=False)
success = _check_dsmc_output(out, backup_attrs, exact_match=False)
if not success:
err = (_('backup: %(vol_id)s failed to obtain backup '
'success notification from server.\n'
'stdout: %(out)s\n stderr: %(err)s')
% {'vol_id': vol_id,
'out': out,
'err': err})
LOG.error(err)
raise exception.InvalidBackup(reason=err)
def _do_restore(self, backup_path, restore_path, vol_id, backup_mode):
"""Perform the actual restore operation.
:param backup_path: the path the backup was created from, this
identifies the backup to tsm
:param restore_path: volume path to restore into
:param vol_id: volume id
:param backup_mode: mode used to create the backup ('image' or 'file')
:raises: InvalidBackup
"""
restore_attrs = {'Total number of objects restored': '1'}
restore_cmd = ['dsmc', 'restore']
if _image_mode(backup_mode):
restore_cmd.append('image')
restore_cmd.append('-noprompt') # suppress prompt
else:
restore_cmd.append('-replace=yes') # suppress prompt
restore_cmd.extend(['-quiet',
'-password=%s' % self.tsm_password,
backup_path])
if restore_path != backup_path:
restore_cmd.append(restore_path)
out, err = utils.execute(*restore_cmd,
run_as_root=True,
check_exit_code=False)
success = _check_dsmc_output(out, restore_attrs)
if not success:
err = (_('restore: %(vol_id)s failed.\n'
'stdout: %(out)s\n stderr: %(err)s.')
% {'vol_id': vol_id,
'out': out,
'err': err})
LOG.error(err)
raise exception.InvalidBackup(reason=err)
def backup(self, backup, volume_file, backup_metadata=False):
"""Backup the given volume to TSM.
TSM performs a backup of a volume. The volume_file is used
to determine the path of the block device that TSM will back-up.
:param backup: backup information for volume
:param volume_file: file object representing the volume
:param backup_metadata: whether or not to backup volume metadata
:raises InvalidBackup
"""
# TODO(dosaboy): this needs implementing (see backup.drivers.ceph for
# an example)
if backup_metadata:
msg = _("Volume metadata backup requested but this driver does "
"not yet support this feature.")
raise exception.InvalidBackup(reason=msg)
backup_id = backup['id']
volume_id = backup['volume_id']
volume_path, backup_mode = _get_volume_realpath(volume_file,
volume_id)
LOG.debug('Starting backup of volume: %(volume_id)s to TSM,'
' volume path: %(volume_path)s, mode: %(mode)s.'
% {'volume_id': volume_id,
'volume_path': volume_path,
'mode': backup_mode})
backup_path = _create_unique_device_link(backup_id,
volume_path,
volume_id,
backup_mode)
service_metadata = {'backup_mode': backup_mode,
'backup_path': backup_path}
self.db.backup_update(self.context,
backup_id,
{'service_metadata':
json.dumps(service_metadata)})
try:
self._do_backup(backup_path, volume_id, backup_mode)
except processutils.ProcessExecutionError as exc:
err = (_('backup: %(vol_id)s failed to run dsmc '
'on %(bpath)s.\n'
'stdout: %(out)s\n stderr: %(err)s')
% {'vol_id': volume_id,
'bpath': backup_path,
'out': exc.stdout,
'err': exc.stderr})
LOG.error(err)
raise exception.InvalidBackup(reason=err)
except exception.Error as exc:
err = (_('backup: %(vol_id)s failed to run dsmc '
'due to invalid arguments '
'on %(bpath)s.\n'
'stdout: %(out)s\n stderr: %(err)s')
% {'vol_id': volume_id,
'bpath': backup_path,
'out': exc.stdout,
'err': exc.stderr})
LOG.error(err)
raise exception.InvalidBackup(reason=err)
finally:
_cleanup_device_hardlink(backup_path, volume_path, volume_id)
LOG.debug('Backup %s finished.' % backup_id)
def restore(self, backup, volume_id, volume_file):
"""Restore the given volume backup from TSM server.
:param backup: backup information for volume
:param volume_id: volume id
:param volume_file: file object representing the volume
:raises InvalidBackup
"""
backup_id = backup['id']
# backup_path is the path that was originally backed up.
backup_path, backup_mode = _get_backup_metadata(backup, 'restore')
LOG.debug('Starting restore of backup from TSM '
'to volume %(volume_id)s, '
'backup: %(backup_id)s, '
'mode: %(mode)s.' %
{'volume_id': volume_id,
'backup_id': backup_id,
'mode': backup_mode})
# volume_path is the path to restore into. This may
# be different than the original volume.
volume_path, unused = _get_volume_realpath(volume_file,
volume_id)
restore_path = _create_unique_device_link(backup_id,
volume_path,
volume_id,
backup_mode)
try:
self._do_restore(backup_path, restore_path, volume_id, backup_mode)
except processutils.ProcessExecutionError as exc:
err = (_('restore: %(vol_id)s failed to run dsmc '
'on %(bpath)s.\n'
'stdout: %(out)s\n stderr: %(err)s')
% {'vol_id': volume_id,
'bpath': restore_path,
'out': exc.stdout,
'err': exc.stderr})
LOG.error(err)
raise exception.InvalidBackup(reason=err)
except exception.Error as exc:
err = (_('restore: %(vol_id)s failed to run dsmc '
'due to invalid arguments '
'on %(bpath)s.\n'
'stdout: %(out)s\n stderr: %(err)s')
% {'vol_id': volume_id,
'bpath': restore_path,
'out': exc.stdout,
'err': exc.stderr})
LOG.error(err)
raise exception.InvalidBackup(reason=err)
finally:
_cleanup_device_hardlink(restore_path, volume_path, volume_id)
LOG.debug('Restore %(backup_id)s to %(volume_id)s finished.'
% {'backup_id': backup_id,
'volume_id': volume_id})
def delete(self, backup):
"""Delete the given backup from TSM server.
:param backup: backup information for volume
:raises InvalidBackup
"""
delete_attrs = {'Total number of objects deleted': '1'}
delete_path, backup_mode = _get_backup_metadata(backup, 'restore')
volume_id = backup['volume_id']
LOG.debug('Delete started for backup: %(backup)s, mode: %(mode)s.',
{'backup': backup['id'],
'mode': backup_mode})
try:
out, err = utils.execute('dsmc',
'delete',
'backup',
'-quiet',
'-noprompt',
'-objtype=%s' % backup_mode,
'-password=%s' % self.tsm_password,
delete_path,
run_as_root=True,
check_exit_code=False)
except processutils.ProcessExecutionError as exc:
err = (_('delete: %(vol_id)s failed to run dsmc with '
'stdout: %(out)s\n stderr: %(err)s')
% {'vol_id': volume_id,
'out': exc.stdout,
'err': exc.stderr})
LOG.error(err)
raise exception.InvalidBackup(reason=err)
except exception.Error as exc:
err = (_('delete: %(vol_id)s failed to run dsmc '
'due to invalid arguments with '
'stdout: %(out)s\n stderr: %(err)s')
% {'vol_id': volume_id,
'out': exc.stdout,
'err': exc.stderr})
LOG.error(err)
raise exception.InvalidBackup(reason=err)
success = _check_dsmc_output(out, delete_attrs)
if not success:
# log error if tsm cannot delete the backup object
# but do not raise exception so that cinder backup
# object can be removed.
err = (_LE('delete: %(vol_id)s failed with '
'stdout: %(out)s\n stderr: %(err)s')
% {'vol_id': volume_id,
'out': out,
'err': err})
LOG.error(err)
LOG.debug('Delete %s finished.' % backup['id'])
def get_backup_driver(context):
return TSMBackupDriver(context)
| apache-2.0 |
szbesting/trunk | software/rt-thread/components/external/freetype/builds/mac/ascii2mpw.py | 830 | 1033 | #!/usr/bin/env python
import sys
import string
if len( sys.argv ) == 1 :
for asc_line in sys.stdin.readlines():
mpw_line = string.replace(asc_line, "\\xA5", "\245")
mpw_line = string.replace(mpw_line, "\\xB6", "\266")
mpw_line = string.replace(mpw_line, "\\xC4", "\304")
mpw_line = string.replace(mpw_line, "\\xC5", "\305")
mpw_line = string.replace(mpw_line, "\\xFF", "\377")
mpw_line = string.replace(mpw_line, "\n", "\r")
mpw_line = string.replace(mpw_line, "\\n", "\n")
sys.stdout.write(mpw_line)
elif sys.argv[1] == "-r" :
for mpw_line in sys.stdin.readlines():
asc_line = string.replace(mpw_line, "\n", "\\n")
asc_line = string.replace(asc_line, "\r", "\n")
asc_line = string.replace(asc_line, "\245", "\\xA5")
asc_line = string.replace(asc_line, "\266", "\\xB6")
asc_line = string.replace(asc_line, "\304", "\\xC4")
asc_line = string.replace(asc_line, "\305", "\\xC5")
asc_line = string.replace(asc_line, "\377", "\\xFF")
sys.stdout.write(asc_line)
| gpl-2.0 |
smandy/d_c_experiment | scons-local-2.3.4/SCons/Scanner/C.py | 9 | 4811 | """SCons.Scanner.C
This module implements the depenency scanner for C/C++ code.
"""
#
# Copyright (c) 2001 - 2014 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Scanner/C.py 2014/09/27 12:51:43 garyo"
import SCons.Node.FS
import SCons.Scanner
import SCons.Util
import SCons.cpp
class SConsCPPScanner(SCons.cpp.PreProcessor):
"""
SCons-specific subclass of the cpp.py module's processing.
We subclass this so that: 1) we can deal with files represented
by Nodes, not strings; 2) we can keep track of the files that are
missing.
"""
def __init__(self, *args, **kw):
SCons.cpp.PreProcessor.__init__(self, *args, **kw)
self.missing = []
def initialize_result(self, fname):
self.result = SCons.Util.UniqueList([fname])
def finalize_result(self, fname):
return self.result[1:]
def find_include_file(self, t):
keyword, quote, fname = t
result = SCons.Node.FS.find_file(fname, self.searchpath[quote])
if not result:
self.missing.append((fname, self.current_file))
return result
def read_file(self, file):
try:
fp = open(str(file.rfile()))
except EnvironmentError, e:
self.missing.append((file, self.current_file))
return ''
else:
return fp.read()
def dictify_CPPDEFINES(env):
cppdefines = env.get('CPPDEFINES', {})
if cppdefines is None:
return {}
if SCons.Util.is_Sequence(cppdefines):
result = {}
for c in cppdefines:
if SCons.Util.is_Sequence(c):
result[c[0]] = c[1]
else:
result[c] = None
return result
if not SCons.Util.is_Dict(cppdefines):
return {cppdefines : None}
return cppdefines
class SConsCPPScannerWrapper(object):
"""
The SCons wrapper around a cpp.py scanner.
This is the actual glue between the calling conventions of generic
SCons scanners, and the (subclass of) cpp.py class that knows how
to look for #include lines with reasonably real C-preprocessor-like
evaluation of #if/#ifdef/#else/#elif lines.
"""
def __init__(self, name, variable):
self.name = name
self.path = SCons.Scanner.FindPathDirs(variable)
def __call__(self, node, env, path = ()):
cpp = SConsCPPScanner(current = node.get_dir(),
cpppath = path,
dict = dictify_CPPDEFINES(env))
result = cpp(node)
for included, includer in cpp.missing:
fmt = "No dependency generated for file: %s (included from: %s) -- file not found"
SCons.Warnings.warn(SCons.Warnings.DependencyWarning,
fmt % (included, includer))
return result
def recurse_nodes(self, nodes):
return nodes
def select(self, node):
return self
def CScanner():
"""Return a prototype Scanner instance for scanning source files
that use the C pre-processor"""
# Here's how we would (or might) use the CPP scanner code above that
# knows how to evaluate #if/#ifdef/#else/#elif lines when searching
# for #includes. This is commented out for now until we add the
# right configurability to let users pick between the scanners.
#return SConsCPPScannerWrapper("CScanner", "CPPPATH")
cs = SCons.Scanner.ClassicCPP("CScanner",
"$CPPSUFFIXES",
"CPPPATH",
'^[ \t]*#[ \t]*(?:include|import)[ \t]*(<|")([^>"]+)(>|")')
return cs
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| mit |
harveyormston/osc_gen | tests/test_dsp.py | 1 | 5028 | #!/usr/bin/env python3
"""
Copyright 2019 Harvey Ormston
This file is part of osc_gen.
osc_gen is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
osc_gen is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with osc_gen. If not, see <https://www.gnu.org/licenses/>.
"""
from __future__ import division
import numpy as np
from osc_gen import dsp
from osc_gen import sig
def test_normalize():
""" test normalize """
a = np.array([0.0, 1.0, 2.0])
e = np.array([-1.0, 0.0, 1.0])
assert np.all(dsp.normalize(a) == e)
def test_normalize_zero():
""" test normalize with amplitude zero """
a = np.array([0.0, 0.0])
assert np.amax(dsp.normalize(a)) == 0.0
def test_normalize_neg():
""" test normalize with negative input """
a = np.array([-1.0, 0.0])
assert np.amax(dsp.normalize(a)) == 1.0
assert np.amin(dsp.normalize(a)) == -1.0
def test_normalize_dc():
""" test normalize with negative input """
a = np.array([0.123, 0.123])
e = np.array([0.0, 0.0])
assert np.all(dsp.normalize(a) == e)
def test_clip():
""" test clip """
a = np.array([0.0, 1.0, 2.0])
o = dsp.clip(a, 0)
e = np.array([-1.0, 1.0, 1.0])
assert np.allclose(o, e)
def test_clip_amount():
""" test clip amount """
a = np.array([-0.1, 0.5, 5.0])
o = dsp.clip(a, 1)
e = np.array([-1.0, 1.0, 1.0])
assert np.allclose(o, e)
def test_clip_bias():
""" test clip bias """
a = np.array([-1.0, 0.5, 5.0])
o = dsp.clip(a, 0, bias=0.5)
e = np.array([-1.0, 1.0, 1.0])
assert np.allclose(o, e)
def test_tube():
""" test tube """
a = np.array([-1.0, -0.1, 0.1, 1.0])
o = dsp.tube(a, 0)
e = np.array([-1.0, -0.1081076, 0.1081076, 1.0])
assert np.allclose(o, e)
def test_tube_bypass():
""" test tube bypass """
a = np.array([-1.0, 0.0, 1.0])
o = dsp.tube(a, 0)
e = np.array([-1.0, 0.0, 1.0])
assert np.allclose(o, e)
def test_fold():
""" test fold to """
a = np.array([-0.5, -0.7, 0.0, 0.6, 0.5])
o = dsp.fold(a, 1)
e = np.array([-1.0, -0.6, 0.0, 0.8, 1.0])
assert np.allclose(o, e)
def test_fold_to_zero():
""" test fold to zero """
a = np.array([-1.0, 0.0, 1.0])
o = dsp.fold(a, 1)
e = np.array([0.0, 0.0, 0.0])
assert np.allclose(o, e)
def test_shape():
""" test shape """
a = np.array([-1.0, -0.5, 0.0, 0.5, 1.0])
o = dsp.shape(a)
e = np.array([-1.0, -0.125, 0.0, 0.125, 1.0])
assert np.allclose(o, e)
def test_shape_two():
""" test shape power two """
a = np.array([-1.0, -0.5, 0.0, 0.5, 1.0])
o = dsp.shape(a, power=2)
e = np.array([-1.0, -0.25, 0.0, 0.25, 1.0])
assert np.allclose(o, e)
def test_shape_bias():
""" test shape bias """
a = np.array([-1.0, -0.5, 0.0, 0.5, 1.0])
o = dsp.shape(a, bias=0.5)
e = np.array([-0.625, -0.5, -0.375, 0.5, 2.875])
dsp.normalize(e)
assert np.allclose(o, e)
def test_slew():
""" test slew """
a = np.ones(100)
a[:50] *= -1
o = dsp.slew(a, 0.1)
assert o[0] > o[1]
def test_downsample():
""" test downsample """
a = np.linspace(-1, 1, 10)
e = np.empty_like(a)
for i in range(10):
if i % 2 == 0:
e[i] = a[i]
else:
e[i] = a[i - 1]
dsp.normalize(e)
dsp.downsample(a, 2)
assert np.all(a == e)
def test_quantize():
""" test quantize """
a = np.array([-1.0, -0.5, 0.0, 0.5, 1.0])
dsp.quantize(a, 2)
assert a[1] == -2 / 3
assert a[-2] == 2 / 3
def test_fundamental():
""" test fundamental """
a = np.array([-1.0, 0.0, 1.0, 0.0])
f = dsp.fundamental(a, 2)
assert f == 0.5
def test_harmonic_series():
""" test harmonic_series """
n = 64 * 501
a = np.sin(16 * np.pi * np.linspace(0, 1, n))
a += (0.5 * np.sin(32 * np.pi * np.linspace(0, 1, n)))
a += (0.25 * np.sin(48 * np.pi * np.linspace(0, 1, n)))
h = np.absolute(dsp.harmonic_series(a))
e = np.zeros_like(h)
e[0] = 1.
e[1] = 0.5
e[2] = 0.25
assert np.allclose(h, e, rtol=5e-3, atol=5e-3)
def test_slice_cycles():
""" test slice_cycles """
a = np.array([0.0, 1.0, 0.0, -1.0])
e = a
a = np.tile(a, 501)
o = dsp.slice_cycles(a, 2, 2)
assert np.all(o[1] == e)
def test_resynthesize():
""" test resynthesize """
a = np.sin(128 * np.pi * np.linspace(0, 1, 2048))
e = np.sin(2 * np.pi * np.linspace(0, 1 - (1 / 32), 32))
s = sig.SigGen()
s.num_points = 32
o = dsp.resynthesize(a, s)
assert np.all(np.abs(o - e) < 0.01)
| gpl-3.0 |
karthik-sethuraman/ONFOpenTransport | RI/flask_server/tapi_server/models/tapi_oam_getmeg_input.py | 4 | 1736 | # coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from tapi_server.models.base_model_ import Model
from tapi_server import util
class TapiOamGetmegInput(Model):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
"""
def __init__(self, service_id=None): # noqa: E501
"""TapiOamGetmegInput - a model defined in OpenAPI
:param service_id: The service_id of this TapiOamGetmegInput. # noqa: E501
:type service_id: str
"""
self.openapi_types = {
'service_id': str
}
self.attribute_map = {
'service_id': 'service-id'
}
self._service_id = service_id
@classmethod
def from_dict(cls, dikt) -> 'TapiOamGetmegInput':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The tapi.oam.getmeg.Input of this TapiOamGetmegInput. # noqa: E501
:rtype: TapiOamGetmegInput
"""
return util.deserialize_model(dikt, cls)
@property
def service_id(self):
"""Gets the service_id of this TapiOamGetmegInput.
none # noqa: E501
:return: The service_id of this TapiOamGetmegInput.
:rtype: str
"""
return self._service_id
@service_id.setter
def service_id(self, service_id):
"""Sets the service_id of this TapiOamGetmegInput.
none # noqa: E501
:param service_id: The service_id of this TapiOamGetmegInput.
:type service_id: str
"""
self._service_id = service_id
| apache-2.0 |
fitermay/intellij-community | python/lib/Lib/site-packages/django/utils/encoding.py | 89 | 6532 | import types
import urllib
import locale
import datetime
import codecs
from decimal import Decimal
from django.utils.functional import Promise
class DjangoUnicodeDecodeError(UnicodeDecodeError):
def __init__(self, obj, *args):
self.obj = obj
UnicodeDecodeError.__init__(self, *args)
def __str__(self):
original = UnicodeDecodeError.__str__(self)
return '%s. You passed in %r (%s)' % (original, self.obj,
type(self.obj))
class StrAndUnicode(object):
"""
A class whose __str__ returns its __unicode__ as a UTF-8 bytestring.
Useful as a mix-in.
"""
def __str__(self):
return self.__unicode__().encode('utf-8')
def smart_unicode(s, encoding='utf-8', strings_only=False, errors='strict'):
"""
Returns a unicode object representing 's'. Treats bytestrings using the
'encoding' codec.
If strings_only is True, don't convert (some) non-string-like objects.
"""
if isinstance(s, Promise):
# The input is the result of a gettext_lazy() call.
return s
return force_unicode(s, encoding, strings_only, errors)
def is_protected_type(obj):
"""Determine if the object instance is of a protected type.
Objects of protected types are preserved as-is when passed to
force_unicode(strings_only=True).
"""
return isinstance(obj, (
types.NoneType,
int, long,
datetime.datetime, datetime.date, datetime.time,
float, Decimal)
)
def force_unicode(s, encoding='utf-8', strings_only=False, errors='strict'):
"""
Similar to smart_unicode, except that lazy instances are resolved to
strings, rather than kept as lazy objects.
If strings_only is True, don't convert (some) non-string-like objects.
"""
# Handle the common case first, saves 30-40% in performance when s
# is an instance of unicode. This function gets called often in that
# setting.
if isinstance(s, unicode):
return s
if strings_only and is_protected_type(s):
return s
try:
if not isinstance(s, basestring,):
if hasattr(s, '__unicode__'):
s = unicode(s)
else:
try:
s = unicode(str(s), encoding, errors)
except UnicodeEncodeError:
if not isinstance(s, Exception):
raise
# If we get to here, the caller has passed in an Exception
# subclass populated with non-ASCII data without special
# handling to display as a string. We need to handle this
# without raising a further exception. We do an
# approximation to what the Exception's standard str()
# output should be.
s = ' '.join([force_unicode(arg, encoding, strings_only,
errors) for arg in s])
elif not isinstance(s, unicode):
# Note: We use .decode() here, instead of unicode(s, encoding,
# errors), so that if s is a SafeString, it ends up being a
# SafeUnicode at the end.
s = s.decode(encoding, errors)
except UnicodeDecodeError, e:
if not isinstance(s, Exception):
raise DjangoUnicodeDecodeError(s, *e.args)
else:
# If we get to here, the caller has passed in an Exception
# subclass populated with non-ASCII bytestring data without a
# working unicode method. Try to handle this without raising a
# further exception by individually forcing the exception args
# to unicode.
s = ' '.join([force_unicode(arg, encoding, strings_only,
errors) for arg in s])
return s
def smart_str(s, encoding='utf-8', strings_only=False, errors='strict'):
"""
Returns a bytestring version of 's', encoded as specified in 'encoding'.
If strings_only is True, don't convert (some) non-string-like objects.
"""
if strings_only and isinstance(s, (types.NoneType, int)):
return s
if isinstance(s, Promise):
return unicode(s).encode(encoding, errors)
elif not isinstance(s, basestring):
try:
return str(s)
except UnicodeEncodeError:
if isinstance(s, Exception):
# An Exception subclass containing non-ASCII data that doesn't
# know how to print itself properly. We shouldn't raise a
# further exception.
return ' '.join([smart_str(arg, encoding, strings_only,
errors) for arg in s])
return unicode(s).encode(encoding, errors)
elif isinstance(s, unicode):
return s.encode(encoding, errors)
elif s and encoding != 'utf-8':
return s.decode('utf-8', errors).encode(encoding, errors)
else:
return s
def iri_to_uri(iri):
"""
Convert an Internationalized Resource Identifier (IRI) portion to a URI
portion that is suitable for inclusion in a URL.
This is the algorithm from section 3.1 of RFC 3987. However, since we are
assuming input is either UTF-8 or unicode already, we can simplify things a
little from the full method.
Returns an ASCII string containing the encoded result.
"""
# The list of safe characters here is constructed from the "reserved" and
# "unreserved" characters specified in sections 2.2 and 2.3 of RFC 3986:
# reserved = gen-delims / sub-delims
# gen-delims = ":" / "/" / "?" / "#" / "[" / "]" / "@"
# sub-delims = "!" / "$" / "&" / "'" / "(" / ")"
# / "*" / "+" / "," / ";" / "="
# unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~"
# Of the unreserved characters, urllib.quote already considers all but
# the ~ safe.
# The % character is also added to the list of safe characters here, as the
# end of section 3.1 of RFC 3987 specifically mentions that % must not be
# converted.
if iri is None:
return iri
return urllib.quote(smart_str(iri), safe="/#%[]=:;$&()+,!?*@'~")
# The encoding of the default system locale but falls back to the
# given fallback encoding if the encoding is unsupported by python or could
# not be determined. See tickets #10335 and #5846
try:
DEFAULT_LOCALE_ENCODING = locale.getdefaultlocale()[1] or 'ascii'
codecs.lookup(DEFAULT_LOCALE_ENCODING)
except:
DEFAULT_LOCALE_ENCODING = 'ascii'
| apache-2.0 |
pedropena/iteexe | exe/engine/placetheobjectsidevice.py | 10 | 6544 | # ===========================================================================
# Place the Objects Idevice for eXe Learning.
#
# Copyright Mike Dawson / PAIWASTOON Networking Services Ltd.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# ===========================================================================
"""
Place the objects game requires the player to place a number of components
on a background area in the correct place. This could be organs in the correct
place in the human body etc. For these elements a TextArea field is used that
is then cropped / sized accordingly.
"""
import logging
from exe.engine.idevice import Idevice
from exe.engine.field import TextAreaField
from exe.engine.field import TextField
from exe.engine.field import Field
from exe.engine.path import Path, toUnicode
from exe.engine.resource import Resource
log = logging.getLogger(__name__)
# ===========================================================================
class PlaceTheObjectsIdeviceInc(Idevice):
"""
This is an example of a user created iDevice plugin. If it is copied
into the user's ~/.exe/idevices dircectory it will be loaded along with
the system idevices.
"""
persistenceVersion = 2
def __init__(self, content=""):
Idevice.__init__(self, x_(u"Place The Objects"),
x_(u"Mike Dawson / PAIWASTOON Networking Services Ltd."),
x_(u"""User has to place various objects in the correct place."""), "", "")
self.emphasis = Idevice.SomeEmphasis
self.content = TextAreaField(x_(u"Instructions"),
x_(u"This is a free text field."),
content)
self.content.idevice = self
self.titleField = TextField(x_("Title"), x_("Title"))
self.titleField.idevice = self
#This is the main field where objects will be dragged to
self.mainArea = TextAreaField(x_(u"Main Area"),
x_(u"This is the main image where the user will drag / drop items to"),
"")
self.mainArea.idevice = self
self.gameTimeLimit = TextField(x_(u"Time Limit (mm:ss)"), x_(u"(Optional) Game Time Limit"), "")
self.gameTimeLimit.idevice = self
self.gameTimerShown = TextField(x_(u"Show Timer"), x_(u"Even if there is no time limit, show the timer..."), "")
self.gameTimerShown.idevice = self
#these are shown when there is a right / wrong response
self.clickToStartGameArea = TextAreaField(x_(u"Message to click game to start"), x_(u"This will when clicked start the game"), "")
self.clickToStartGameArea.idevice = self
self.positiveResponseArea = TextAreaField(x_(u"Positive Response"), x_(u"Overlays main area when player correctly places object"), "")
self.positiveResponseArea.idevice = self
self.negativeResponseArea = TextAreaField(x_(u"Negative Response"), x_(u"Overlays main area when player guesses wrong"), "")
self.negativeResponseArea.idevice = self
self.partbinNumCols = TextField(x_("Number of Columns in part bin"), x_("Columns part bin"), "2")
self.partbinNumCols.idevice = self
#This is a list of objects to place
self.objectsToPlace = []
self.addPlacableObject()
"""
This will add a new placable object to the list
"""
def addPlacableObject(self):
newPlacableObject = PlacableObjectField(x_("Object to Place"), self)
self.objectsToPlace.append(newPlacableObject)
"""
Game requires jquery and jqueryui scripts - these should be in the same
folder as this idevice source file
This can then be called from the process method
"""
def uploadNeededScripts(self):
from exe import globals
import os,sys
scriptFileNames = ['jquery-ui-1.10.3.custom.min.js', 'placetheobjects.js']
for scriptName in scriptFileNames:
from exe import globals
scriptSrcFilename = globals.application.config.webDir/"templates"/scriptName
gameScriptFile = Path(scriptSrcFilename)
if gameScriptFile.isfile():
Resource(self, gameScriptFile)
class PlacableObjectField(Field):
"""
This class is just to hold together the fields relating to a placed object
"""
persistenceVersion = 3
def __init__(self, name, idevice, instruction=x_("An object that has a correct place in the main area"), content=""):
Field.__init__(self, name, instruction)
self.mainContentField = TextAreaField(x_(u"Placable Object"), x_(u"Object to be put in place"), "")
self.idevice = idevice
self.mainContentField.idevice = idevice
self.targetX = TextField(x_(u"Correct Location (x)"), x_(u"Where this object belongs in the main area x coordinate"), "0")
self.targetX.idevice = idevice
self.targetY = TextField(x_(u"Correct Location (y)"), x_(u"Where this object belongs in the main area y coordinate"), "0")
self.targetY.idevice = idevice
self.width = TextField(x_(u"Width (pixels)"), x_(u"Width of object"), "100")
self.width.idevice = idevice
self.height = TextField(x_(u"Height (pixels)"), x_(u"Height of object"), "100")
self.height.idevice = idevice
self.tolerance = TextField(x_(u"Tolerance (pixels)"), x_(u"Tolerance when dropping num of pixels"), "20")
self.tolerance.idevice = idevice
# ===========================================================================
def register(ideviceStore):
"""Register with the ideviceStore"""
ideviceStore.extended.append(PlaceTheObjectsIdeviceInc())
# ===========================================================================
| gpl-2.0 |
patmcb/odoo | openerp/report/printscreen/ps_form.py | 381 | 5211 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import openerp
from openerp.report.interface import report_int
import openerp.tools as tools
from openerp.report import render
from lxml import etree
import time, os
class report_printscreen_list(report_int):
def __init__(self, name):
report_int.__init__(self, name)
def _parse_node(self, root_node):
result = []
for node in root_node:
if node.tag == 'field':
attrsa = node.attrib
attrs = {}
if not attrsa is None:
for key,val in attrsa.items():
attrs[key] = val
result.append(attrs['name'])
else:
result.extend(self._parse_node(node))
return result
def _parse_string(self, view):
dom = etree.XML(view)
return self._parse_node(dom)
def create(self, cr, uid, ids, datas, context=None):
if not context:
context={}
datas['ids'] = ids
registry = openerp.registry(cr.dbname)
model = registry[datas['model']]
# title come from description of model which are specified in py file.
self.title = model._description
result = model.fields_view_get(cr, uid, view_type='form', context=context)
fields_order = self._parse_string(result['arch'])
rows = model.read(cr, uid, datas['ids'], result['fields'].keys() )
self._create_table(uid, datas['ids'], result['fields'], fields_order, rows, context, model._description)
return self.obj.get(), 'pdf'
def _create_table(self, uid, ids, fields, fields_order, results, context, title=''):
pageSize=[297.0,210.0]
new_doc = etree.Element("report")
config = etree.SubElement(new_doc, 'config')
# build header
def _append_node(name, text):
n = etree.SubElement(config, name)
n.text = text
_append_node('date', time.strftime('%d/%m/%Y'))
_append_node('PageSize', '%.2fmm,%.2fmm' % tuple(pageSize))
_append_node('PageWidth', '%.2f' % (pageSize[0] * 2.8346,))
_append_node('PageHeight', '%.2f' %(pageSize[1] * 2.8346,))
_append_node('report-header', title)
l = []
t = 0
strmax = (pageSize[0]-40) * 2.8346
for f in fields_order:
s = 0
if fields[f]['type'] in ('date','time','float','integer'):
s = 60
strmax -= s
else:
t += fields[f].get('size', 56) / 28 + 1
l.append(s)
for pos in range(len(l)):
if not l[pos]:
s = fields[fields_order[pos]].get('size', 56) / 28 + 1
l[pos] = strmax * s / t
_append_node('tableSize', ','.join(map(str,l)) )
header = etree.SubElement(new_doc, 'header')
for f in fields_order:
field = etree.SubElement(header, 'field')
field.text = fields[f]['string'] or ''
lines = etree.SubElement(new_doc, 'lines')
for line in results:
node_line = etree.SubElement(lines, 'row')
for f in fields_order:
if fields[f]['type']=='many2one' and line[f]:
line[f] = line[f][1]
if fields[f]['type'] in ('one2many','many2many') and line[f]:
line[f] = '( '+str(len(line[f])) + ' )'
if fields[f]['type'] == 'float':
precision=(('digits' in fields[f]) and fields[f]['digits'][1]) or 2
line[f]=round(line[f],precision)
col = etree.SubElement(node_line, 'col', tree='no')
if line[f] is not None:
col.text = tools.ustr(line[f] or '')
else:
col.text = '/'
transform = etree.XSLT(
etree.parse(os.path.join(tools.config['root_path'],
'addons/base/report/custom_new.xsl')))
rml = etree.tostring(transform(new_doc))
self.obj = render.rml(rml, self.title)
self.obj.render()
return True
report_printscreen_list('report.printscreen.form')
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
Morgan-Stanley/treadmill | lib/python/treadmill/cron/model/__init__.py | 4 | 1111 | """Model of cron job.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
from treadmill import plugin_manager
_LOGGER = logging.getLogger(__name__)
def create(scheduler, job_id, event, resource, expression, count):
"""Create a new job/model"""
model, action = event.split(':')
_LOGGER.debug('model: %s, action: %s', model, action)
model_module = plugin_manager.load('treadmill.cron', model)
_LOGGER.debug('model_module: %r', model_module)
return model_module.create(
scheduler, job_id, model, action, resource, expression, count
)
def update(scheduler, job_id, event, resource, expression, count):
"""Update a job/model"""
model, action = event.split(':')
_LOGGER.debug('model: %s, action: %s', model, action)
model_module = plugin_manager.load('treadmill.cron', model)
_LOGGER.debug('model_module: %r', model_module)
return model_module.update(
scheduler, job_id, model, action, resource, expression, count
)
| apache-2.0 |
AViisiion/namebench | cocoa/controller.py | 176 | 6919 | # Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Cocoa frontend implementation for namebench."""
__author__ = 'tstromberg@google.com (Thomas Stromberg)'
import os
import sys
import traceback
from Foundation import *
from AppKit import *
from objc import IBAction, IBOutlet
from libnamebench import addr_util
from libnamebench import base_ui
from libnamebench import config
from libnamebench import conn_quality
from libnamebench import nameserver_list
from libnamebench import util
from libnamebench import version
# How much room do we have in the UI for status messages?
MAX_STATUS_LENGTH = 68
class controller(NSWindowController, base_ui.BaseUI):
"""Controller class associated with the main window."""
nameserver_form = IBOutlet()
include_global = IBOutlet()
include_regional = IBOutlet()
include_censorship_checks = IBOutlet()
data_source = IBOutlet()
health_performance = IBOutlet()
enable_sharing = IBOutlet()
location = IBOutlet()
query_count = IBOutlet()
run_count = IBOutlet()
status = IBOutlet()
spinner = IBOutlet()
button = IBOutlet()
def awakeFromNib(self):
"""Initializes our class, called automatically by Cocoa."""
self.SetupDataStructures()
self.resource_dir = os.path.join(os.getcwd(), 'namebench.app', 'Contents', 'Resources')
conf_file = util.FindDataFile('config/namebench.cfg')
(self.options, self.supplied_ns, self.global_ns, self.regional_ns) = config.GetConfiguration(filename=conf_file)
# TODO(tstromberg): Consider moving this into a thread for faster loading.
self.UpdateStatus('Discovering sources')
self.LoadDataSources()
self.UpdateStatus('Discovering location')
self.DiscoverLocation()
self.UpdateStatus('Populating Form...')
self.setFormDefaults()
self.UpdateStatus('namebench %s is ready!' % version.VERSION)
@IBAction
def startJob_(self, sender):
"""Trigger for the 'Start Benchmark' button, starts benchmark thread."""
self.ProcessForm()
self.UpdateStatus('Starting benchmark thread')
t = NSThread.alloc().initWithTarget_selector_object_(self, self.benchmarkThread, None)
t.start()
def UpdateStatus(self, message, count=None, total=None, error=False, debug=False):
"""Update the status message at the bottom of the window."""
if error:
return self.displayError(message, error)
if total:
state = '%s [%s/%s]' % (message, count, total)
elif count:
state = '%s%s' % (message, '.' * count)
else:
state = message
state = state.replace('%', '%%')
print state
NSLog(state)
self.status.setStringValue_(state[0:MAX_STATUS_LENGTH])
def ProcessForm(self):
"""Parse the form fields and populate class variables."""
self.UpdateStatus('Processing form inputs')
self.preferred = self.supplied_ns
self.include_internal = False
if not int(self.include_global.stringValue()):
self.UpdateStatus('Not using global')
self.global_ns = []
else:
self.preferred.extend(self.global_ns)
if not int(self.include_regional.stringValue()):
self.UpdateStatus('Not using regional')
self.regional_ns = []
if int(self.enable_sharing.stringValue()):
self.options.upload_results = True
if int(self.include_censorship_checks.stringValue()):
self.options.enable_censorship_checks = True
print self.health_performance.titleOfSelectedItem()
if 'Slow' in self.health_performance.titleOfSelectedItem():
self.options.health_thread_count = 10
self.options.input_source = self.data_src.ConvertSourceTitleToType(self.data_source.titleOfSelectedItem())
self.UpdateStatus('Supplied servers: %s' % self.nameserver_form.stringValue())
self.preferred.extend(addr_util.ExtractIPTuplesFromString(self.nameserver_form.stringValue()))
self.options.query_count = int(self.query_count.stringValue())
def benchmarkThread(self):
"""Run the benchmarks, designed to be run in a thread."""
pool = NSAutoreleasePool.alloc().init()
self.spinner.startAnimation_(self)
self.button.setEnabled_(False)
self.UpdateStatus('Preparing benchmark')
try:
self.PrepareTestRecords()
self.PrepareNameServers()
self.PrepareBenchmark()
self.RunAndOpenReports()
except nameserver_list.OutgoingUdpInterception:
(exc_type, exception, tb) = sys.exc_info()
self.UpdateStatus('Outgoing requests were intercepted!',
error=str(exception))
except nameserver_list.TooFewNameservers:
(exc_type, exception, tb) = sys.exc_info()
self.UpdateStatus('Too few nameservers to test', error=str(exception))
except conn_quality.OfflineConnection:
(exc_type, exception, tb) = sys.exc_info()
self.UpdateStatus('The connection appears to be offline!', error=str(exception))
except:
(exc_type, exception, tb) = sys.exc_info()
traceback.print_exc(tb)
error_msg = '\n'.join(traceback.format_tb(tb)[-4:])
self.UpdateStatus('FAIL: %s' % exception, error=error_msg)
self.spinner.stopAnimation_(self)
self.button.setEnabled_(True)
# This seems weird, but recommended by http://pyobjc.sourceforge.net/documentation/pyobjc-core/intro.html
del pool
def displayError(self, msg, details):
"""Display an alert drop-down message."""
NSLog('ERROR: %s - %s' % (msg, details))
alert = NSAlert.alloc().init()
alert.setMessageText_(msg)
alert.setInformativeText_(details)
buttonPressed = alert.runModal()
def setFormDefaults(self):
"""Set up the form with sane initial values."""
nameservers_string = ', '.join(nameserver_list.InternalNameServers())
self.nameserver_form.setStringValue_(nameservers_string)
self.query_count.setStringValue_(self.options.query_count)
self.query_count.setStringValue_(self.options.query_count)
self.location.removeAllItems()
if self.country:
self.location.addItemWithTitle_(self.country)
self.location.addItemWithTitle_('(Other)')
else:
self.location.addItemWithTitle_('(automatic)')
self.health_performance.removeAllItems()
self.health_performance.addItemWithTitle_('Fast')
self.health_performance.addItemWithTitle_('Slow (unstable network)')
self.data_source.removeAllItems()
self.data_source.addItemsWithTitles_(self.data_src.ListSourceTitles())
| apache-2.0 |
ibressler/pyqtgraph | pyqtgraph/console/template_pyside.py | 2 | 6225 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file './console/template.ui'
#
# Created: Sun Sep 9 14:41:30 2012
# by: pyside-uic 0.2.13 running on PySide 1.1.0
#
# WARNING! All changes made in this file will be lost!
from PySide import QtCore, QtGui
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(710, 497)
self.gridLayout = QtGui.QGridLayout(Form)
self.gridLayout.setContentsMargins(0, 0, 0, 0)
self.gridLayout.setSpacing(0)
self.gridLayout.setObjectName("gridLayout")
self.splitter = QtGui.QSplitter(Form)
self.splitter.setOrientation(QtCore.Qt.Vertical)
self.splitter.setObjectName("splitter")
self.layoutWidget = QtGui.QWidget(self.splitter)
self.layoutWidget.setObjectName("layoutWidget")
self.verticalLayout = QtGui.QVBoxLayout(self.layoutWidget)
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.verticalLayout.setObjectName("verticalLayout")
self.output = QtGui.QPlainTextEdit(self.layoutWidget)
font = QtGui.QFont()
font.setFamily("Monospace")
self.output.setFont(font)
self.output.setReadOnly(True)
self.output.setObjectName("output")
self.verticalLayout.addWidget(self.output)
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.input = CmdInput(self.layoutWidget)
self.input.setObjectName("input")
self.horizontalLayout.addWidget(self.input)
self.historyBtn = QtGui.QPushButton(self.layoutWidget)
self.historyBtn.setCheckable(True)
self.historyBtn.setObjectName("historyBtn")
self.horizontalLayout.addWidget(self.historyBtn)
self.exceptionBtn = QtGui.QPushButton(self.layoutWidget)
self.exceptionBtn.setCheckable(True)
self.exceptionBtn.setObjectName("exceptionBtn")
self.horizontalLayout.addWidget(self.exceptionBtn)
self.verticalLayout.addLayout(self.horizontalLayout)
self.historyList = QtGui.QListWidget(self.splitter)
font = QtGui.QFont()
font.setFamily("Monospace")
self.historyList.setFont(font)
self.historyList.setObjectName("historyList")
self.exceptionGroup = QtGui.QGroupBox(self.splitter)
self.exceptionGroup.setObjectName("exceptionGroup")
self.gridLayout_2 = QtGui.QGridLayout(self.exceptionGroup)
self.gridLayout_2.setSpacing(0)
self.gridLayout_2.setContentsMargins(-1, 0, -1, 0)
self.gridLayout_2.setObjectName("gridLayout_2")
self.catchAllExceptionsBtn = QtGui.QPushButton(self.exceptionGroup)
self.catchAllExceptionsBtn.setCheckable(True)
self.catchAllExceptionsBtn.setObjectName("catchAllExceptionsBtn")
self.gridLayout_2.addWidget(self.catchAllExceptionsBtn, 0, 1, 1, 1)
self.catchNextExceptionBtn = QtGui.QPushButton(self.exceptionGroup)
self.catchNextExceptionBtn.setCheckable(True)
self.catchNextExceptionBtn.setObjectName("catchNextExceptionBtn")
self.gridLayout_2.addWidget(self.catchNextExceptionBtn, 0, 0, 1, 1)
self.onlyUncaughtCheck = QtGui.QCheckBox(self.exceptionGroup)
self.onlyUncaughtCheck.setChecked(True)
self.onlyUncaughtCheck.setObjectName("onlyUncaughtCheck")
self.gridLayout_2.addWidget(self.onlyUncaughtCheck, 0, 2, 1, 1)
self.exceptionStackList = QtGui.QListWidget(self.exceptionGroup)
self.exceptionStackList.setAlternatingRowColors(True)
self.exceptionStackList.setObjectName("exceptionStackList")
self.gridLayout_2.addWidget(self.exceptionStackList, 2, 0, 1, 5)
self.runSelectedFrameCheck = QtGui.QCheckBox(self.exceptionGroup)
self.runSelectedFrameCheck.setChecked(True)
self.runSelectedFrameCheck.setObjectName("runSelectedFrameCheck")
self.gridLayout_2.addWidget(self.runSelectedFrameCheck, 3, 0, 1, 5)
self.exceptionInfoLabel = QtGui.QLabel(self.exceptionGroup)
self.exceptionInfoLabel.setObjectName("exceptionInfoLabel")
self.gridLayout_2.addWidget(self.exceptionInfoLabel, 1, 0, 1, 5)
self.clearExceptionBtn = QtGui.QPushButton(self.exceptionGroup)
self.clearExceptionBtn.setEnabled(False)
self.clearExceptionBtn.setObjectName("clearExceptionBtn")
self.gridLayout_2.addWidget(self.clearExceptionBtn, 0, 4, 1, 1)
spacerItem = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.gridLayout_2.addItem(spacerItem, 0, 3, 1, 1)
self.gridLayout.addWidget(self.splitter, 0, 0, 1, 1)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
Form.setWindowTitle(QtGui.QApplication.translate("Form", "Console", None, QtGui.QApplication.UnicodeUTF8))
self.historyBtn.setText(QtGui.QApplication.translate("Form", "History..", None, QtGui.QApplication.UnicodeUTF8))
self.exceptionBtn.setText(QtGui.QApplication.translate("Form", "Exceptions..", None, QtGui.QApplication.UnicodeUTF8))
self.exceptionGroup.setTitle(QtGui.QApplication.translate("Form", "Exception Handling", None, QtGui.QApplication.UnicodeUTF8))
self.catchAllExceptionsBtn.setText(QtGui.QApplication.translate("Form", "Show All Exceptions", None, QtGui.QApplication.UnicodeUTF8))
self.catchNextExceptionBtn.setText(QtGui.QApplication.translate("Form", "Show Next Exception", None, QtGui.QApplication.UnicodeUTF8))
self.onlyUncaughtCheck.setText(QtGui.QApplication.translate("Form", "Only Uncaught Exceptions", None, QtGui.QApplication.UnicodeUTF8))
self.runSelectedFrameCheck.setText(QtGui.QApplication.translate("Form", "Run commands in selected stack frame", None, QtGui.QApplication.UnicodeUTF8))
self.exceptionInfoLabel.setText(QtGui.QApplication.translate("Form", "Exception Info", None, QtGui.QApplication.UnicodeUTF8))
self.clearExceptionBtn.setText(QtGui.QApplication.translate("Form", "Clear Exception", None, QtGui.QApplication.UnicodeUTF8))
from .CmdInput import CmdInput
| mit |
ustramooner/CouchPotato | library/imdb/locale/rebuildmo.py | 137 | 1498 | #!/usr/bin/env python
"""
rebuildmo.py script.
This script builds the .mo files, from the .po files.
Copyright 2009 H. Turgut Uyar <uyar@tekir.org>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
import glob
import msgfmt
import os
#LOCALE_DIR = os.path.dirname(__file__)
def rebuildmo():
lang_glob = 'imdbpy-*.po'
created = []
for input_file in glob.glob(lang_glob):
lang = input_file[7:-3]
if not os.path.exists(lang):
os.mkdir(lang)
mo_dir = os.path.join(lang, 'LC_MESSAGES')
if not os.path.exists(mo_dir):
os.mkdir(mo_dir)
output_file = os.path.join(mo_dir, 'imdbpy.mo')
msgfmt.make(input_file, output_file)
created.append(lang)
return created
if __name__ == '__main__':
languages = rebuildmo()
print 'Created locale for: %s.' % ' '.join(languages)
| gpl-3.0 |
EliotBerriot/django | tests/forms_tests/field_tests/test_multivaluefield.py | 156 | 5661 | from datetime import datetime
from django.forms import (
CharField, Form, MultipleChoiceField, MultiValueField, MultiWidget,
SelectMultiple, SplitDateTimeField, SplitDateTimeWidget, TextInput,
ValidationError,
)
from django.test import SimpleTestCase
beatles = (('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo'))
class ComplexMultiWidget(MultiWidget):
def __init__(self, attrs=None):
widgets = (
TextInput(),
SelectMultiple(choices=beatles),
SplitDateTimeWidget(),
)
super(ComplexMultiWidget, self).__init__(widgets, attrs)
def decompress(self, value):
if value:
data = value.split(',')
return [
data[0],
list(data[1]),
datetime.strptime(data[2], "%Y-%m-%d %H:%M:%S"),
]
return [None, None, None]
def format_output(self, rendered_widgets):
return '\n'.join(rendered_widgets)
class ComplexField(MultiValueField):
def __init__(self, required=True, widget=None, label=None, initial=None):
fields = (
CharField(),
MultipleChoiceField(choices=beatles),
SplitDateTimeField(),
)
super(ComplexField, self).__init__(fields, required, widget, label, initial)
def compress(self, data_list):
if data_list:
return '%s,%s,%s' % (data_list[0], ''.join(data_list[1]), data_list[2])
return None
class ComplexFieldForm(Form):
field1 = ComplexField(widget=ComplexMultiWidget())
class MultiValueFieldTest(SimpleTestCase):
@classmethod
def setUpClass(cls):
cls.field = ComplexField(widget=ComplexMultiWidget())
super(MultiValueFieldTest, cls).setUpClass()
def test_clean(self):
self.assertEqual(
self.field.clean(['some text', ['J', 'P'], ['2007-04-25', '6:24:00']]),
'some text,JP,2007-04-25 06:24:00',
)
def test_bad_choice(self):
msg = "'Select a valid choice. X is not one of the available choices.'"
with self.assertRaisesMessage(ValidationError, msg):
self.field.clean(['some text', ['X'], ['2007-04-25', '6:24:00']])
def test_no_value(self):
"""
If insufficient data is provided, None is substituted.
"""
msg = "'This field is required.'"
with self.assertRaisesMessage(ValidationError, msg):
self.field.clean(['some text', ['JP']])
def test_has_changed_no_initial(self):
self.assertTrue(self.field.has_changed(
None, ['some text', ['J', 'P'], ['2007-04-25', '6:24:00']],
))
def test_has_changed_same(self):
self.assertFalse(self.field.has_changed(
'some text,JP,2007-04-25 06:24:00',
['some text', ['J', 'P'], ['2007-04-25', '6:24:00']],
))
def test_has_changed_first_widget(self):
"""
Test when the first widget's data has changed.
"""
self.assertTrue(self.field.has_changed(
'some text,JP,2007-04-25 06:24:00',
['other text', ['J', 'P'], ['2007-04-25', '6:24:00']],
))
def test_has_changed_last_widget(self):
"""
Test when the last widget's data has changed. This ensures that it is
not short circuiting while testing the widgets.
"""
self.assertTrue(self.field.has_changed(
'some text,JP,2007-04-25 06:24:00',
['some text', ['J', 'P'], ['2009-04-25', '11:44:00']],
))
def test_form_as_table(self):
form = ComplexFieldForm()
self.assertHTMLEqual(
form.as_table(),
"""
<tr><th><label for="id_field1_0">Field1:</label></th>
<td><input type="text" name="field1_0" id="id_field1_0" />
<select multiple="multiple" name="field1_1" id="id_field1_1">
<option value="J">John</option>
<option value="P">Paul</option>
<option value="G">George</option>
<option value="R">Ringo</option>
</select>
<input type="text" name="field1_2_0" id="id_field1_2_0" />
<input type="text" name="field1_2_1" id="id_field1_2_1" /></td></tr>
""",
)
def test_form_as_table_data(self):
form = ComplexFieldForm({
'field1_0': 'some text',
'field1_1': ['J', 'P'],
'field1_2_0': '2007-04-25',
'field1_2_1': '06:24:00',
})
self.assertHTMLEqual(
form.as_table(),
"""
<tr><th><label for="id_field1_0">Field1:</label></th>
<td><input type="text" name="field1_0" value="some text" id="id_field1_0" />
<select multiple="multiple" name="field1_1" id="id_field1_1">
<option value="J" selected="selected">John</option>
<option value="P" selected="selected">Paul</option>
<option value="G">George</option>
<option value="R">Ringo</option>
</select>
<input type="text" name="field1_2_0" value="2007-04-25" id="id_field1_2_0" />
<input type="text" name="field1_2_1" value="06:24:00" id="id_field1_2_1" /></td></tr>
""",
)
def test_form_cleaned_data(self):
form = ComplexFieldForm({
'field1_0': 'some text',
'field1_1': ['J', 'P'],
'field1_2_0': '2007-04-25',
'field1_2_1': '06:24:00',
})
form.is_valid()
self.assertEqual(
form.cleaned_data['field1'], 'some text,JP,2007-04-25 06:24:00',
)
| bsd-3-clause |
TuSimple/simpledet | config/resnet_v1b/faster_r50v1b_fpn_1x.py | 1 | 8673 | from symbol.builder import FasterRcnn as Detector
from symbol.builder import ResNetV1bFPN as Backbone
from symbol.builder import add_anchor_to_arg
from models.FPN.builder import FPNNeck as Neck
from models.FPN.builder import FPNRpnHead as RpnHead
from models.FPN.builder import FPNRoiAlign as RoiExtractor
from models.FPN.builder import FPNBbox2fcHead as BboxHead
from mxnext.complicate import normalizer_factory
def get_config(is_train):
class General:
log_frequency = 10
name = __name__.rsplit("/")[-1].rsplit(".")[-1]
batch_image = 2 if is_train else 1
fp16 = False
loader_worker = 8
class KvstoreParam:
kvstore = "nccl"
batch_image = General.batch_image
gpus = [0, 1, 2, 3, 4, 5, 6, 7]
fp16 = General.fp16
class NormalizeParam:
normalizer = normalizer_factory(type="fixbn")
class BackboneParam:
fp16 = General.fp16
normalizer = NormalizeParam.normalizer
depth = 50
class NeckParam:
fp16 = General.fp16
normalizer = NormalizeParam.normalizer
class RpnParam:
fp16 = General.fp16
normalizer = NormalizeParam.normalizer
batch_image = General.batch_image
nnvm_proposal = True
nnvm_rpn_target = False
class anchor_generate:
scale = (8,)
ratio = (0.5, 1.0, 2.0)
stride = (4, 8, 16, 32, 64)
max_side = 1400
class anchor_assign:
allowed_border = 0
pos_thr = 0.7
neg_thr = 0.3
min_pos_thr = 0.0
image_anchor = 256
pos_fraction = 0.5
class head:
conv_channel = 256
mean = (0, 0, 0, 0)
std = (1, 1, 1, 1)
class proposal:
pre_nms_top_n = 2000 if is_train else 1000
post_nms_top_n = 2000 if is_train else 1000
nms_thr = 0.7
min_bbox_side = 0
class subsample_proposal:
proposal_wo_gt = False
image_roi = 512
fg_fraction = 0.25
fg_thr = 0.5
bg_thr_hi = 0.5
bg_thr_lo = 0.0
class bbox_target:
num_reg_class = 81
class_agnostic = False
weight = (1.0, 1.0, 1.0, 1.0)
mean = (0.0, 0.0, 0.0, 0.0)
std = (0.1, 0.1, 0.2, 0.2)
class BboxParam:
fp16 = General.fp16
normalizer = NormalizeParam.normalizer
num_class = 1 + 80
image_roi = 512
batch_image = General.batch_image
class regress_target:
class_agnostic = False
mean = (0.0, 0.0, 0.0, 0.0)
std = (0.1, 0.1, 0.2, 0.2)
class RoiParam:
fp16 = General.fp16
normalizer = NormalizeParam.normalizer
out_size = 7
stride = (4, 8, 16, 32)
roi_canonical_scale = 224
roi_canonical_level = 4
class DatasetParam:
if is_train:
image_set = ("coco_train2017", )
else:
image_set = ("coco_val2017", )
backbone = Backbone(BackboneParam)
neck = Neck(NeckParam)
rpn_head = RpnHead(RpnParam)
roi_extractor = RoiExtractor(RoiParam)
bbox_head = BboxHead(BboxParam)
detector = Detector()
if is_train:
train_sym = detector.get_train_symbol(backbone, neck, rpn_head, roi_extractor, bbox_head)
rpn_test_sym = None
test_sym = None
else:
train_sym = None
rpn_test_sym = detector.get_rpn_test_symbol(backbone, neck, rpn_head)
test_sym = detector.get_test_symbol(backbone, neck, rpn_head, roi_extractor, bbox_head)
class ModelParam:
train_symbol = train_sym
test_symbol = test_sym
rpn_test_symbol = rpn_test_sym
from_scratch = False
random = True
memonger = False
memonger_until = "stage3_unit21_plus"
class pretrain:
prefix = "pretrain_model/resnet%s_v1b" % BackboneParam.depth
epoch = 0
fixed_param = ["conv0", "stage1", "gamma", "beta"]
def process_weight(sym, arg, aux):
for stride in RpnParam.anchor_generate.stride:
add_anchor_to_arg(
sym, arg, aux, RpnParam.anchor_generate.max_side,
stride, RpnParam.anchor_generate.scale,
RpnParam.anchor_generate.ratio)
class OptimizeParam:
class optimizer:
type = "sgd"
lr = 0.01 / 8 * len(KvstoreParam.gpus) * KvstoreParam.batch_image
momentum = 0.9
wd = 0.0001
clip_gradient = None
class schedule:
begin_epoch = 0
end_epoch = 6
lr_iter = [60000 * 16 // (len(KvstoreParam.gpus) * KvstoreParam.batch_image),
80000 * 16 // (len(KvstoreParam.gpus) * KvstoreParam.batch_image)]
class warmup:
type = "gradual"
lr = 0.01 / 8 * len(KvstoreParam.gpus) * KvstoreParam.batch_image / 3.0
iter = 500
class TestParam:
min_det_score = 0.05
max_det_per_image = 100
process_roidb = lambda x: x
process_output = lambda x, y: x
class model:
prefix = "experiments/{}/checkpoint".format(General.name)
epoch = OptimizeParam.schedule.end_epoch
class nms:
type = "nms"
thr = 0.5
class coco:
annotation = "data/coco/annotations/instances_minival2014.json"
# data processing
class NormParam:
mean = tuple(i * 255 for i in (0.485, 0.456, 0.406)) # RGB order
std = tuple(i * 255 for i in (0.229, 0.224, 0.225))
# data processing
class ResizeParam:
short = 800
long = 1333
class PadParam:
short = 800
long = 1333
max_num_gt = 100
class AnchorTarget2DParam:
def __init__(self):
self.generate = self._generate()
class _generate:
def __init__(self):
self.stride = (4, 8, 16, 32, 64)
self.short = (200, 100, 50, 25, 13)
self.long = (334, 167, 84, 42, 21)
scales = (8)
aspects = (0.5, 1.0, 2.0)
class assign:
allowed_border = 0
pos_thr = 0.7
neg_thr = 0.3
min_pos_thr = 0.0
class sample:
image_anchor = 256
pos_fraction = 0.5
class RenameParam:
mapping = dict(image="data")
from core.detection_input import ReadRoiRecord, Resize2DImageBbox, \
ConvertImageFromHwcToChw, Flip2DImageBbox, Pad2DImageBbox, \
RenameRecord, Norm2DImage
from models.FPN.input import PyramidAnchorTarget2D
if is_train:
transform = [
ReadRoiRecord(None),
Norm2DImage(NormParam),
Resize2DImageBbox(ResizeParam),
Flip2DImageBbox(),
Pad2DImageBbox(PadParam),
ConvertImageFromHwcToChw(),
RenameRecord(RenameParam.mapping)
]
data_name = ["data"]
label_name = ["gt_bbox", "im_info"]
if not RpnParam.nnvm_rpn_target:
transform.append(PyramidAnchorTarget2D(AnchorTarget2DParam()))
label_name += ["rpn_cls_label", "rpn_reg_target", "rpn_reg_weight"]
else:
transform = [
ReadRoiRecord(None),
Norm2DImage(NormParam),
Resize2DImageBbox(ResizeParam),
ConvertImageFromHwcToChw(),
RenameRecord(RenameParam.mapping)
]
data_name = ["data", "im_info", "im_id", "rec_id"]
label_name = []
import core.detection_metric as metric
rpn_acc_metric = metric.AccWithIgnore(
"RpnAcc",
["rpn_cls_loss_output", "rpn_cls_label_blockgrad_output"],
[]
)
rpn_l1_metric = metric.L1(
"RpnL1",
["rpn_reg_loss_output", "rpn_cls_label_blockgrad_output"],
[]
)
# for bbox, the label is generated in network so it is an output
box_acc_metric = metric.AccWithIgnore(
"RcnnAcc",
["bbox_cls_loss_output", "bbox_label_blockgrad_output"],
[]
)
box_l1_metric = metric.L1(
"RcnnL1",
["bbox_reg_loss_output", "bbox_label_blockgrad_output"],
[]
)
metric_list = [rpn_acc_metric, rpn_l1_metric, box_acc_metric, box_l1_metric]
return General, KvstoreParam, RpnParam, RoiParam, BboxParam, DatasetParam, \
ModelParam, OptimizeParam, TestParam, \
transform, data_name, label_name, metric_list
| apache-2.0 |
RPI-OPENEDX/edx-platform | common/test/acceptance/pages/lms/pay_and_verify.py | 110 | 6385 | """Payment and verification pages"""
import re
from bok_choy.page_object import PageObject
from bok_choy.promise import Promise
from . import BASE_URL
from .dashboard import DashboardPage
class PaymentAndVerificationFlow(PageObject):
"""Interact with the split payment and verification flow.
The flow can be accessed at the following URLs:
`/verify_student/start-flow/{course}/`
`/verify_student/upgrade/{course}/`
`/verify_student/verify-now/{course}/`
`/verify_student/verify-later/{course}/`
`/verify_student/payment-confirmation/{course}/`
Users can reach the flow when attempting to enroll in a course's verified
mode, either directly from the track selection page, or by upgrading from
the honor mode. Users can also reach the flow when attempting to complete
a deferred verification, or when attempting to view a receipt corresponding
to an earlier payment.
"""
def __init__(self, browser, course_id, entry_point='start-flow'):
"""Initialize the page.
Arguments:
browser (Browser): The browser instance.
course_id (unicode): The course in which the user is enrolling.
Keyword Arguments:
entry_point (str): Where to begin the flow; must be one of 'start-flow',
'upgrade', 'verify-now', verify-later', or 'payment-confirmation'.
Raises:
ValueError
"""
super(PaymentAndVerificationFlow, self).__init__(browser)
self._course_id = course_id
if entry_point not in ['start-flow', 'upgrade', 'verify-now', 'verify-later', 'payment-confirmation']:
raise ValueError(
"Entry point must be either 'start-flow', 'upgrade', 'verify-now', 'verify-later', or 'payment-confirmation'."
)
self._entry_point = entry_point
@property
def url(self):
"""Return the URL corresponding to the initial position in the flow."""
url = "{base}/verify_student/{entry_point}/{course}/".format(
base=BASE_URL,
entry_point=self._entry_point,
course=self._course_id
)
return url
def is_browser_on_page(self):
"""Check if a step in the payment and verification flow has loaded."""
return (
self.q(css="div .make-payment-step").is_present() or
self.q(css="div .payment-confirmation-step").is_present() or
self.q(css="div .face-photo-step").is_present() or
self.q(css="div .id-photo-step").is_present() or
self.q(css="div .review-photos-step").is_present() or
self.q(css="div .enrollment-confirmation-step").is_present()
)
def indicate_contribution(self):
"""Interact with the radio buttons appearing on the first page of the upgrade flow."""
self.q(css=".contribution-option > input").first.click()
def proceed_to_payment(self):
"""Interact with the payment button."""
self.q(css=".payment-button").click()
FakePaymentPage(self.browser, self._course_id).wait_for_page()
def immediate_verification(self):
"""Interact with the immediate verification button."""
self.q(css="#verify_now_button").click()
PaymentAndVerificationFlow(self.browser, self._course_id, entry_point='verify-now').wait_for_page()
def defer_verification(self):
"""Interact with the link allowing the user to defer their verification."""
self.q(css="#verify_later_button").click()
DashboardPage(self.browser).wait_for_page()
def webcam_capture(self):
"""Interact with a webcam capture button."""
self.q(css="#webcam_capture_button").click()
def _check_func():
next_step_button_classes = self.q(css="#next_step_button").attrs('class')
next_step_button_enabled = 'is-disabled' not in next_step_button_classes
return (next_step_button_enabled, next_step_button_classes)
# Check that the #next_step_button is enabled before returning control to the caller
Promise(_check_func, "The 'Next Step' button is enabled.").fulfill()
def next_verification_step(self, next_page_object):
"""Interact with the 'Next' step button found in the verification flow."""
self.q(css="#next_step_button").click()
next_page_object.wait_for_page()
def go_to_dashboard(self):
"""Interact with the link to the dashboard appearing on the enrollment confirmation page."""
if self.q(css="div .enrollment-confirmation-step").is_present():
self.q(css=".action-primary").click()
else:
raise Exception("The dashboard can only be accessed from the enrollment confirmation.")
DashboardPage(self.browser).wait_for_page()
class FakePaymentPage(PageObject):
"""Interact with the fake payment endpoint.
This page is hidden behind the feature flag `ENABLE_PAYMENT_FAKE`,
which is enabled in the Bok Choy env settings.
Configuring this payment endpoint also requires configuring the Bok Choy
auth settings with the following:
"CC_PROCESSOR_NAME": "CyberSource2",
"CC_PROCESSOR": {
"CyberSource2": {
"SECRET_KEY": <string>,
"ACCESS_KEY": <string>,
"PROFILE_ID": "edx",
"PURCHASE_ENDPOINT": "/shoppingcart/payment_fake"
}
}
"""
def __init__(self, browser, course_id):
"""Initialize the page.
Arguments:
browser (Browser): The browser instance.
course_id (unicode): The course in which the user is enrolling.
"""
super(FakePaymentPage, self).__init__(browser)
self._course_id = course_id
url = BASE_URL + "/shoppingcart/payment_fake/"
def is_browser_on_page(self):
"""Check if a step in the payment and verification flow has loaded."""
message = self.q(css='BODY').text[0]
match = re.search('Payment page', message)
return True if match else False
def submit_payment(self):
"""Interact with the payment submission button."""
self.q(css="input[value='Submit']").click()
return PaymentAndVerificationFlow(self.browser, self._course_id, entry_point='payment-confirmation').wait_for_page()
| agpl-3.0 |
martinpilat/dag-evaluate | custom_models.py | 1 | 6526 | __author__ = 'Martin'
import pandas as pd
import numpy as np
from scipy import stats
from sklearn import cross_validation
from sklearn import ensemble
def is_transformer(cls):
return hasattr(cls, '__dageva_type') and cls.__dageva_type == 'transformer'
def is_predictor(cls):
return hasattr(cls, '__dageva_type') and cls.__dageva_type == 'predictor'
def make_transformer(cls):
"""
Adds Transformer to the bases of the cls class, useful in order to distinguish between transformers and predictors.
:param cls: The class to turn into a Transformer
:return: A class equivalent to cls, but with Transformer among its bases
"""
cls.__dageva_type = 'transformer'
return cls
def make_predictor(cls):
"""
Adds Predictor to the bases of the cls class, useful in order to distinguish between transformers and predictors.
:param cls: The class to turn into a Predictor
:return: A class equivalent to cls, but with Predictor among its bases
"""
cls.__dageva_type = 'predictor'
return cls
class KMeansSplitter:
def __init__(self, k):
from sklearn import cluster
self.kmeans = cluster.KMeans(n_clusters=k)
self.sorted_outputs = None
self.weight_idx = []
def fit(self, x, y, sample_weight=None):
self.kmeans.fit(x, y)
preds = self.kmeans.predict(x)
out = []
for i in range(self.kmeans.n_clusters):
idx = [n for n in range(len(preds)) if preds[n] == i]
self.weight_idx.append(idx)
if isinstance(x, pd.DataFrame):
out.append(x.iloc[idx])
else:
out.append(x[idx])
mins = [len(x.index) for x in out]
self.sorted_outputs = list(np.argsort(mins))
self.weight_idx = [self.weight_idx[i] for i in self.sorted_outputs]
return self
def transform(self, x):
preds = self.kmeans.predict(x)
out = []
for i in range(self.kmeans.n_clusters):
idx = [n for n in range(len(preds)) if preds[n] == i]
if isinstance(x, pd.DataFrame):
out.append(x.iloc[idx])
else:
out.append(x[idx])
return [out[i] for i in self.sorted_outputs]
class ConstantModel:
def __init__(self, cls):
self.cls = cls
def fit(self, x, y):
return self
def predict(self, x):
return pd.Series(np.array([self.cls]*len(x)), index=x.index)
class Aggregator:
def aggregate(self, x, y):
pass
class Voter(Aggregator):
def fit(self, x, y):
return self
def union_aggregate(self, x, y):
f_list, t_list = x, y
f_frame, t_frame = pd.DataFrame(), pd.Series()
for i in range(len(t_list)):
fl = f_list[i]
assert isinstance(fl, pd.DataFrame)
if fl.columns.dtype == np.dtype('int64'):
cols = map(lambda z: str(id(fl)) + '_' + str(z), fl.columns)
fl.columns = cols
t_frame = t_frame.append(t_list[i])
f_frame = f_frame.append(f_list[i])
f_frame.sort_index(inplace=True)
t_frame = t_frame.sort_index()
return f_frame, t_frame
def aggregate(self, x, y):
if not all([x[0].index.equals(xi.index) for xi in x]):
return self.union_aggregate(x, y)
res = pd.DataFrame(index=y[0].index)
for i in range(len(y)):
res["p"+str(i)] = y[i]
modes = res.apply(lambda row: stats.mode(row, axis=None)[0][0], axis=1)
if modes.empty:
return x[0], pd.Series()
return x[0], pd.Series(modes, index=y[0].index)
class Workflow:
def __init__(self, dag=None):
self.dag = dag
self.sample_weight = None
self.classes_ = None
def fit(self, X, y, sample_weight=None):
import eval #TODO: Refactor to remove circular imports
self.models = eval.train_dag(self.dag, train_data=(X, y), sample_weight=sample_weight)
self.classes_ = np.unique(y)
return self
def predict(self, X):
import eval #TODO: Refactor to remove circular imports
return np.array(eval.test_dag(self.dag, self.models, test_data=(X, None)))
def transform(self, X):
import eval
return eval.test_dag(self.dag, self.models, test_data=(X, None), output='feats_only')
def get_params(self, deep=False):
return {'dag': self.dag}
def set_params(self, **params):
if 'sample_weight' in params:
self.sample_weight = params['sample_weight']
class Stacker(Aggregator):
def __init__(self, sub_dags=None, initial_dag=None):
self.sub_dags = sub_dags
self.initial_dag = initial_dag
def fit(self, X, y, sample_weight=None):
import eval
preds = [[] for _ in self.sub_dags]
for train_idx, test_idx in cross_validation.StratifiedKFold(y, n_folds=5):
tr_X, tr_y = X.iloc[train_idx], y.iloc[train_idx]
tst_X, tst_y = X.iloc[test_idx], y.iloc[test_idx]
wf_init = Workflow(self.initial_dag)
wf_init.fit(tr_X, tr_y, sample_weight=sample_weight)
preproc_X, preproc_y = eval.test_dag(self.initial_dag, wf_init.models, test_data=(tr_X, tr_y), output='all')
pp_tst_X = wf_init.transform(tst_X)
if pp_tst_X.empty:
continue
for i, dag in enumerate(self.sub_dags):
wf = Workflow(dag)
wf.fit(preproc_X, preproc_y)
res = wf.predict(pp_tst_X)
preds[i].append(pd.DataFrame(res, index=pp_tst_X.index))
preds = [pd.concat(ps) for ps in preds]
self.train = pd.concat(preds, axis=1)
self.train.columns = ['p' + str(x) for x in range(len(preds))]
return self
def aggregate(self, X, y):
res = pd.DataFrame(index=y[0].index)
for i in range(len(X)):
res["p" + str(i)] = y[i]
return res, y[0]
class Booster(ensemble.AdaBoostClassifier):
def __init__(self, sub_dags=()):
self.sub_dags = sub_dags
self.current_sub_dag = 0
super(Booster, self).__init__(base_estimator=Workflow(), n_estimators=len(sub_dags), algorithm='SAMME')
def _make_estimator(self, append=True, random_state=0):
estimator = Workflow(self.sub_dags[self.current_sub_dag])
self.current_sub_dag += 1
if append:
self.estimators_.append(estimator)
return estimator | mit |
sql-analytics/openvstorage | ovs/extensions/snmp/trapsender.py | 2 | 2945 | # Copyright 2014 CloudFounders NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pysnmp.entity.rfc3413.oneliner import ntforg
from pysnmp.proto import rfc1902
from pysnmp.entity import engine
"""
SNMP TRAP Sender
"""
class SNMPTrapSender():
"""
Send SNMP TRAP Message
"""
def __init__(self, host, port = 162):
self.host = host
self.port = port
self.authData = None
#TODO: engine id customizable
# engine id makes sense in v3 context, using USM
# SNMPv3 with the User-Based Security Model (USM)
# makes use of an EngineID identifier for the SNMPv3 application
# that is authoritative (meaning the one who controls the flow of information).
snmpEngineId = rfc1902.OctetString(hexValue='0000000000000000')
self.ntfOrg = ntforg.NotificationOriginator(engine.SnmpEngine(snmpEngineId))
def send(self, mib, value, value_type='OctetString'):
"""
v1 snmp, public
"""
if not self.authData:
raise ValueError('Credentials not set, use .security_XXX() methods')
obj_class = getattr(rfc1902, value_type)
errorIndication = self.ntfOrg.sendNotification(self.authData,
ntforg.UdpTransportTarget((self.host, self.port)), #transportTarget
'trap', #notifyType
ntforg.MibVariable('SNMPv2-MIB', 'snmpOutTraps'), #notificationType
((rfc1902.ObjectName(mib),
obj_class(value))))
if errorIndication:
raise RuntimeError('Notification not sent: %s' % errorIndication)
print('Sent SNMP TRAP {} "{}" to {} {}'.format(mib, value, self.host, self.port))
def security_public(self, community_string = 'public'):
"""
v1 snmp, insecure
"""
self.authData, = ntforg.CommunityData(community_string, mpModel=0), # authData
def security_aes128(self, user, authkey, privkey):
"""
v3 snmp, secure
"""
self.authData, = ntforg.UsmUserData(user, authkey, privkey,
authProtocol=ntforg.usmHMACSHAAuthProtocol,
privProtocol=ntforg.usmAesCfb128Protocol)
| apache-2.0 |
jbassen/edx-platform | common/djangoapps/student/migrations/0013_auto__chg_field_userprofile_meta.py | 188 | 10346 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'UserProfile.meta'
db.alter_column('auth_userprofile', 'meta', self.gf('django.db.models.fields.TextField')())
def backwards(self, orm):
# Changing field 'UserProfile.meta'
db.alter_column('auth_userprofile', 'meta', self.gf('django.db.models.fields.CharField')(max_length=255))
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'about': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'avatar_type': ('django.db.models.fields.CharField', [], {'default': "'n'", 'max_length': '1'}),
'bronze': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'consecutive_days_visit_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'display_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'email_isvalid': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'email_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'email_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'gold': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'gravatar': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ignored_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'interesting_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'new_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'questions_per_page': ('django.db.models.fields.SmallIntegerField', [], {'default': '10'}),
'real_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'reputation': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'seen_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'show_country': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'silver': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'w'", 'max_length': '2'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'student.courseenrollment': {
'Meta': {'object_name': 'CourseEnrollment'},
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'student.pendingemailchange': {
'Meta': {'object_name': 'PendingEmailChange'},
'activation_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'new_email': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'student.pendingnamechange': {
'Meta': {'object_name': 'PendingNameChange'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'new_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'rationale': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'student.registration': {
'Meta': {'object_name': 'Registration', 'db_table': "'auth_registration'"},
'activation_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'student.userprofile': {
'Meta': {'object_name': 'UserProfile', 'db_table': "'auth_userprofile'"},
'country': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'courseware': ('django.db.models.fields.CharField', [], {'default': "'course.xml'", 'max_length': '255', 'blank': 'True'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '6', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'mailing_address': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'meta': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'occupation': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'telephone_number': ('django.db.models.fields.CharField', [], {'max_length': '25', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'profile'", 'unique': 'True', 'to': "orm['auth.User']"})
},
'student.usertestgroup': {
'Meta': {'object_name': 'UserTestGroup'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'db_index': 'True', 'symmetrical': 'False'})
}
}
complete_apps = ['student']
| agpl-3.0 |
pradeep-gr/mbed-os5-onsemi | tools/host_tests/echo_flow_control.py | 125 | 1381 | """
mbed SDK
Copyright (c) 2011-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from host_test import Test
class EchoTest(Test):
def __init__(self):
Test.__init__(self)
self.mbed.init_serial()
self.mbed.extra_serial.rtscts = True
self.mbed.reset()
def test(self):
self.mbed.flush()
self.notify("Starting the ECHO test")
TEST="longer serial test"
check = True
for i in range(1, 100):
self.mbed.extra_serial.write(TEST + "\n")
l = self.mbed.extra_serial.readline().strip()
if not l: continue
if l != TEST:
check = False
self.notify('"%s" != "%s"' % (l, TEST))
else:
if (i % 10) == 0:
self.notify('.')
return check
if __name__ == '__main__':
EchoTest().run()
| apache-2.0 |
nerzhul/ansible | lib/ansible/modules/system/capabilities.py | 23 | 7173 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2014, Nate Coraor <nate@bx.psu.edu>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: capabilities
short_description: Manage Linux capabilities
description:
- This module manipulates files privileges using the Linux capabilities(7) system.
version_added: "1.6"
options:
path:
description:
- Specifies the path to the file to be managed.
required: true
default: null
capability:
description:
- Desired capability to set (with operator and flags, if state is C(present)) or remove (if state is C(absent))
required: true
default: null
aliases: [ 'cap' ]
state:
description:
- Whether the entry should be present or absent in the file's capabilities.
choices: [ "present", "absent" ]
default: present
notes:
- The capabilities system will automatically transform operators and flags
into the effective set, so (for example, cap_foo=ep will probably become
cap_foo+ep). This module does not attempt to determine the final operator
and flags to compare, so you will want to ensure that your capabilities
argument matches the final capabilities.
requirements: []
author: "Nate Coraor (@natefoo)"
'''
EXAMPLES = '''
# Set cap_sys_chroot+ep on /foo
- capabilities:
path: /foo
capability: cap_sys_chroot+ep
state: present
# Remove cap_net_bind_service from /bar
- capabilities:
path: /bar
capability: cap_net_bind_service
state: absent
'''
OPS = ( '=', '-', '+' )
# ==============================================================
import os
import tempfile
import re
class CapabilitiesModule(object):
platform = 'Linux'
distribution = None
def __init__(self, module):
self.module = module
self.path = module.params['path'].strip()
self.capability = module.params['capability'].strip().lower()
self.state = module.params['state']
self.getcap_cmd = module.get_bin_path('getcap', required=True)
self.setcap_cmd = module.get_bin_path('setcap', required=True)
self.capability_tup = self._parse_cap(self.capability, op_required=self.state=='present')
self.run()
def run(self):
current = self.getcap(self.path)
caps = [ cap[0] for cap in current ]
if self.state == 'present' and self.capability_tup not in current:
# need to add capability
if self.module.check_mode:
self.module.exit_json(changed=True, msg='capabilities changed')
else:
# remove from current cap list if it's already set (but op/flags differ)
current = filter(lambda x: x[0] != self.capability_tup[0], current)
# add new cap with correct op/flags
current.append( self.capability_tup )
self.module.exit_json(changed=True, state=self.state, msg='capabilities changed', stdout=self.setcap(self.path, current))
elif self.state == 'absent' and self.capability_tup[0] in caps:
# need to remove capability
if self.module.check_mode:
self.module.exit_json(changed=True, msg='capabilities changed')
else:
# remove from current cap list and then set current list
current = filter(lambda x: x[0] != self.capability_tup[0], current)
self.module.exit_json(changed=True, state=self.state, msg='capabilities changed', stdout=self.setcap(self.path, current))
self.module.exit_json(changed=False, state=self.state)
def getcap(self, path):
rval = []
cmd = "%s -v %s" % (self.getcap_cmd, path)
rc, stdout, stderr = self.module.run_command(cmd)
# If file xattrs are set but no caps are set the output will be:
# '/foo ='
# If file xattrs are unset the output will be:
# '/foo'
# If the file does not eixst the output will be (with rc == 0...):
# '/foo (No such file or directory)'
if rc != 0 or (stdout.strip() != path and stdout.count(' =') != 1):
self.module.fail_json(msg="Unable to get capabilities of %s" % path, stdout=stdout.strip(), stderr=stderr)
if stdout.strip() != path:
caps = stdout.split(' =')[1].strip().split()
for cap in caps:
cap = cap.lower()
# getcap condenses capabilities with the same op/flags into a
# comma-separated list, so we have to parse that
if ',' in cap:
cap_group = cap.split(',')
cap_group[-1], op, flags = self._parse_cap(cap_group[-1])
for subcap in cap_group:
rval.append( ( subcap, op, flags ) )
else:
rval.append(self._parse_cap(cap))
return rval
def setcap(self, path, caps):
caps = ' '.join([ ''.join(cap) for cap in caps ])
cmd = "%s '%s' %s" % (self.setcap_cmd, caps, path)
rc, stdout, stderr = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Unable to set capabilities of %s" % path, stdout=stdout, stderr=stderr)
else:
return stdout
def _parse_cap(self, cap, op_required=True):
opind = -1
try:
i = 0
while opind == -1:
opind = cap.find(OPS[i])
i += 1
except:
if op_required:
self.module.fail_json(msg="Couldn't find operator (one of: %s)" % str(OPS))
else:
return (cap, None, None)
op = cap[opind]
cap, flags = cap.split(op)
return (cap, op, flags)
# ==============================================================
# main
def main():
# defining module
module = AnsibleModule(
argument_spec = dict(
path = dict(aliases=['key'], required=True),
capability = dict(aliases=['cap'], required=True),
state = dict(default='present', choices=['present', 'absent']),
),
supports_check_mode=True
)
CapabilitiesModule(module)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
magic0704/oslo.utils | oslo_utils/importutils.py | 2 | 2355 | # Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Import related utilities and helper functions.
"""
import sys
import traceback
def import_class(import_str):
"""Returns a class from a string including module and class."""
mod_str, _sep, class_str = import_str.rpartition('.')
__import__(mod_str)
try:
return getattr(sys.modules[mod_str], class_str)
except AttributeError:
raise ImportError('Class %s cannot be found (%s)' %
(class_str,
traceback.format_exception(*sys.exc_info())))
def import_object(import_str, *args, **kwargs):
"""Import a class and return an instance of it."""
return import_class(import_str)(*args, **kwargs)
def import_object_ns(name_space, import_str, *args, **kwargs):
"""Tries to import object from default namespace.
Imports a class and return an instance of it, first by trying
to find the class in a default namespace, then failing back to
a full path if not found in the default namespace.
"""
import_value = "%s.%s" % (name_space, import_str)
try:
cls = import_class(import_value)
except ImportError:
cls = import_class(import_str)
return cls(*args, **kwargs)
def import_module(import_str):
"""Import a module."""
__import__(import_str)
return sys.modules[import_str]
def import_versioned_module(version, submodule=None):
module = 'oslo.v%s' % version
if submodule:
module = '.'.join((module, submodule))
return import_module(module)
def try_import(import_str, default=None):
"""Try to import a module and if it fails return default."""
try:
return import_module(import_str)
except ImportError:
return default
| apache-2.0 |
GbalsaC/bitnamiP | venv/src/xblock-poll/tests/integration/test_max_submissions.py | 3 | 3545 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2015 McKinsey Academy
#
# Authors:
# Jonathan Piacenti <jonathan@opencraft.com>
#
# This software's license gives you freedom; you can copy, convey,
# propagate, redistribute and/or modify this program under the terms of
# the GNU Affero General Public License (AGPL) as published by the Free
# Software Foundation (FSF), either version 3 of the License, or (at your
# option) any later version of the AGPL published by the FSF.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero
# General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program in a file in the toplevel directory called
# "AGPLv3". If not, see <http://www.gnu.org/licenses/>.
#
from ddt import ddt, unpack, data
from tests.integration.base_test import PollBaseTest, DEFAULT_POLL_NAMES, DEFAULT_SURVEY_NAMES
scenarios_infinite = (
('Survey Max Submissions Infinite', DEFAULT_SURVEY_NAMES),
('Poll Max Submissions Infinite', DEFAULT_POLL_NAMES),
)
scenarios_max = (
('Survey Max Submissions', DEFAULT_SURVEY_NAMES),
('Poll Max Submissions', DEFAULT_POLL_NAMES),
)
@ddt
class TestPrivateResults(PollBaseTest):
@unpack
@data(*scenarios_infinite)
def test_infinite_submissions(self, page, names):
"""
We can't actually test infinite submissions, but we can be reasonably certain it will work
if it has worked a few times more than we have allocated, which should be '0' according to the
setting, which is actually code for 'as often as you like' rather than '0 attempts permitted'.
Try this by staying on the page, and by loading it up again.
"""
for __ in range(0, 2):
self.go_to_page(page)
for ___ in range(1, 5):
self.do_submit(names)
self.assertTrue(self.get_submit().is_enabled())
@unpack
@data(*scenarios_max)
def test_max_submissions_one_view(self, page, names):
"""
Verify that the user can't submit more than a certain number of times. Our XML allows two submissions.
"""
self.go_to_page(page)
for __ in range(0, 2):
self.do_submit(names)
self.assertFalse(self.get_submit().is_enabled())
@unpack
@data(*scenarios_max)
def test_max_submissions_reload(self, page, names):
"""
Same as above, but revisit the page between attempts.
"""
self.go_to_page(page)
self.do_submit(names)
self.go_to_page(page)
self.do_submit(names)
self.assertFalse(self.get_submit().is_enabled())
@unpack
@data(*scenarios_max)
def test_max_submissions_counter(self, page, names):
"""
Verify a counter is displayed stating how many submissions have been used.
Our XML allows two submissions, and we must mind the off-by-one for range.
"""
self.go_to_page(page)
counter_div = self.browser.find_element_by_css_selector('.poll-submissions-count')
counter = self.browser.find_element_by_css_selector('.poll-current-count')
self.assertFalse(counter_div.is_displayed())
for i in range(1, 3):
self.do_submit(names)
self.assertTrue(counter_div.is_displayed())
self.assertEqual(counter.text.strip(), str(i))
| agpl-3.0 |
daineseh/kodi-plugin.video.ted-talks-chinese | youtube_dl/extractor/instagram.py | 3 | 5853 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
get_element_by_attribute,
int_or_none,
limit_length,
lowercase_escape,
)
class InstagramIE(InfoExtractor):
_VALID_URL = r'(?P<url>https?://(?:www\.)?instagram\.com/p/(?P<id>[^/?#&]+))'
_TESTS = [{
'url': 'https://instagram.com/p/aye83DjauH/?foo=bar#abc',
'md5': '0d2da106a9d2631273e192b372806516',
'info_dict': {
'id': 'aye83DjauH',
'ext': 'mp4',
'uploader_id': 'naomipq',
'title': 'Video by naomipq',
'description': 'md5:1f17f0ab29bd6fe2bfad705f58de3cb8',
}
}, {
# missing description
'url': 'https://www.instagram.com/p/BA-pQFBG8HZ/?taken-by=britneyspears',
'info_dict': {
'id': 'BA-pQFBG8HZ',
'ext': 'mp4',
'uploader_id': 'britneyspears',
'title': 'Video by britneyspears',
},
'params': {
'skip_download': True,
},
}, {
'url': 'https://instagram.com/p/-Cmh1cukG2/',
'only_matching': True,
}, {
'url': 'http://instagram.com/p/9o6LshA7zy/embed/',
'only_matching': True,
}]
@staticmethod
def _extract_embed_url(webpage):
mobj = re.search(
r'<iframe[^>]+src=(["\'])(?P<url>(?:https?:)?//(?:www\.)?instagram\.com/p/[^/]+/embed.*?)\1',
webpage)
if mobj:
return mobj.group('url')
blockquote_el = get_element_by_attribute(
'class', 'instagram-media', webpage)
if blockquote_el is None:
return
mobj = re.search(
r'<a[^>]+href=([\'"])(?P<link>[^\'"]+)\1', blockquote_el)
if mobj:
return mobj.group('link')
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
url = mobj.group('url')
webpage = self._download_webpage(url, video_id)
uploader_id = self._search_regex(r'"owner":{"username":"(.+?)"',
webpage, 'uploader id', fatal=False)
desc = self._search_regex(
r'"caption":"(.+?)"', webpage, 'description', default=None)
if desc is not None:
desc = lowercase_escape(desc)
return {
'id': video_id,
'url': self._og_search_video_url(webpage, secure=False),
'ext': 'mp4',
'title': 'Video by %s' % uploader_id,
'thumbnail': self._og_search_thumbnail(webpage),
'uploader_id': uploader_id,
'description': desc,
}
class InstagramUserIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?instagram\.com/(?P<username>[^/]{2,})/?(?:$|[?#])'
IE_DESC = 'Instagram user profile'
IE_NAME = 'instagram:user'
_TEST = {
'url': 'https://instagram.com/porsche',
'info_dict': {
'id': 'porsche',
'title': 'porsche',
},
'playlist_mincount': 2,
'playlist': [{
'info_dict': {
'id': '614605558512799803_462752227',
'ext': 'mp4',
'title': '#Porsche Intelligent Performance.',
'thumbnail': 're:^https?://.*\.jpg',
'uploader': 'Porsche',
'uploader_id': 'porsche',
'timestamp': 1387486713,
'upload_date': '20131219',
},
}],
'params': {
'extract_flat': True,
'skip_download': True,
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
uploader_id = mobj.group('username')
entries = []
page_count = 0
media_url = 'http://instagram.com/%s/media' % uploader_id
while True:
page = self._download_json(
media_url, uploader_id,
note='Downloading page %d ' % (page_count + 1),
)
page_count += 1
for it in page['items']:
if it.get('type') != 'video':
continue
like_count = int_or_none(it.get('likes', {}).get('count'))
user = it.get('user', {})
formats = [{
'format_id': k,
'height': v.get('height'),
'width': v.get('width'),
'url': v['url'],
} for k, v in it['videos'].items()]
self._sort_formats(formats)
thumbnails_el = it.get('images', {})
thumbnail = thumbnails_el.get('thumbnail', {}).get('url')
# In some cases caption is null, which corresponds to None
# in python. As a result, it.get('caption', {}) gives None
title = (it.get('caption') or {}).get('text', it['id'])
entries.append({
'id': it['id'],
'title': limit_length(title, 80),
'formats': formats,
'thumbnail': thumbnail,
'webpage_url': it.get('link'),
'uploader': user.get('full_name'),
'uploader_id': user.get('username'),
'like_count': like_count,
'timestamp': int_or_none(it.get('created_time')),
})
if not page['items']:
break
max_id = page['items'][-1]['id'].split('_')[0]
media_url = (
'http://instagram.com/%s/media?max_id=%s' % (
uploader_id, max_id))
return {
'_type': 'playlist',
'entries': entries,
'id': uploader_id,
'title': uploader_id,
}
| gpl-2.0 |
lextoumbourou/cyclone | cyclone/websocket.py | 3 | 17952 | # coding: utf-8
#
# Copyright 2010 Alexandre Fiori
# based on the original Tornado by Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Server-side implementation of the WebSocket protocol.
`WebSocket <http://en.wikipedia.org/wiki/WebSocket>`_ is a web technology
providing full-duplex communications channels over a single TCP connection.
For more information, check out the `WebSocket demos
<https://github.com/fiorix/cyclone/tree/master/demos/websocket>`_.
"""
import base64
import functools
import hashlib
import struct
import cyclone
import cyclone.web
import cyclone.escape
from twisted.python import log
class _NotEnoughFrame(Exception):
pass
class WebSocketHandler(cyclone.web.RequestHandler):
"""Subclass this class to create a basic WebSocket handler.
Override messageReceived to handle incoming messages.
See http://dev.w3.org/html5/websockets/ for details on the
JavaScript interface. The protocol is specified at
http://tools.ietf.org/html/rfc6455.
Here is an example Web Socket handler that echos back all received messages
back to the client::
class EchoWebSocket(websocket.WebSocketHandler):
def connectionMade(self):
print "WebSocket connected"
def messageReceived(self, message):
self.sendMessage(u"You said: " + message)
def connectionLost(self, reason):
print "WebSocket disconnected"
Web Sockets are not standard HTTP connections. The "handshake" is HTTP,
but after the handshake, the protocol is message-based. Consequently,
most of the Cyclone HTTP facilities are not available in handlers of this
type. The only communication methods available to you is sendMessage().
If you map the handler above to "/websocket" in your application, you can
invoke it in JavaScript with::
var ws = new WebSocket("ws://localhost:8888/websocket");
ws.onopen = function() {
ws.send("Hello, world");
};
ws.onmessage = function (evt) {
alert(evt.data);
};
This script pops up an alert box that says "You said: Hello, world".
"""
def __init__(self, application, request, **kwargs):
cyclone.web.RequestHandler.__init__(self, application, request,
**kwargs)
self.application = application
self.request = request
self.transport = request.connection.transport
self.ws_protocol = None
self.notifyFinish().addCallback(self.connectionLost)
def headersReceived(self):
pass
def connectionMade(self, *args, **kwargs):
pass
def connectionLost(self, reason):
pass
def messageReceived(self, message):
"""Gets called when a message is received from the peer."""
pass
def sendMessage(self, message):
"""Sends the given message to the client of this Web Socket.
The message may be either a string or a dict (which will be
encoded as json).
"""
if isinstance(message, dict):
message = cyclone.escape.json_encode(message)
if isinstance(message, unicode):
message = message.encode("utf-8")
assert isinstance(message, str)
self.ws_protocol.sendMessage(message)
def _rawDataReceived(self, data):
self.ws_protocol.handleRawData(data)
def _execute(self, transforms, *args, **kwargs):
self._transforms = transforms or list()
try:
assert self.request.headers["Upgrade"].lower() == "websocket"
except:
return self.forbidConnection("Expected WebSocket Headers")
self._connectionMade = functools.partial(self.connectionMade,
*args, **kwargs)
if "Sec-Websocket-Version" in self.request.headers and \
self.request.headers['Sec-Websocket-Version'] in ('7', '8', '13'):
self.ws_protocol = WebSocketProtocol17(self)
elif "Sec-WebSocket-Version" in self.request.headers:
self.transport.write(cyclone.escape.utf8(
"HTTP/1.1 426 Upgrade Required\r\n"
"Sec-WebSocket-Version: 8\r\n\r\n"))
self.transport.loseConnection()
else:
self.ws_protocol = WebSocketProtocol76(self)
self.request.connection.setRawMode()
self.request.connection.rawDataReceived = \
self.ws_protocol.rawDataReceived
self.ws_protocol.acceptConnection()
def forbidConnection(self, message):
self.transport.write(
"HTTP/1.1 403 Forbidden\r\nContent-Length: %s\r\n\r\n%s" %
(str(len(message)), message))
return self.transport.loseConnection()
class WebSocketProtocol(object):
def __init__(self, handler):
self.handler = handler
self.request = handler.request
self.transport = handler.transport
def acceptConnection(self):
pass
def rawDataReceived(self, data):
pass
def sendMessage(self, message):
pass
class WebSocketProtocol17(WebSocketProtocol):
def __init__(self, handler):
WebSocketProtocol.__init__(self, handler)
self._partial_data = None
self._frame_fin = None
self._frame_rsv = None
self._frame_ops = None
self._frame_mask = None
self._frame_payload_length = None
self._frame_header_length = None
self._data_len = None
self._header_index = None
self._message_buffer = ""
def acceptConnection(self):
log.msg('Using ws spec (draft 17)')
# The difference between version 8 and 13 is that in 8 the
# client sends a "Sec-Websocket-Origin" header and in 13 it's
# simply "Origin".
if 'Origin' in self.request.headers:
origin = self.request.headers['Origin']
else:
origin = self.request.headers['Sec-Websocket-Origin']
key = self.request.headers['Sec-Websocket-Key']
accept = base64.b64encode(hashlib.sha1("%s%s" %
(key, '258EAFA5-E914-47DA-95CA-C5AB0DC85B11')).digest())
self.transport.write(
"HTTP/1.1 101 Web Socket Protocol Handshake\r\n"
"Upgrade: WebSocket\r\n"
"Connection: Upgrade\r\n"
"Sec-WebSocket-Accept: %s\r\n"
"Server: cyclone/%s\r\n"
"WebSocket-Origin: %s\r\n"
"WebSocket-Location: ws://%s%s\r\n\r\n" %
(accept, cyclone.version, origin,
self.request.host, self.request.path))
self.handler._connectionMade()
def rawDataReceived(self, data):
while True:
if self._partial_data:
data = self._partial_data + data
self._partial_data = None
try:
self._processFrameHeader(data)
except _NotEnoughFrame:
self._partial_data = data
return
self._message_buffer += self._extractMessageFromFrame(data)
if self._frame_fin:
if self._frame_ops == 8:
self.sendMessage(self._message_buffer, code=0x88)
#self.handler.connectionLost(self._message_buffer)
elif self._frame_ops == 9:
self.sendMessage(self._message_buffer, code=0x8A)
else:
self.handler.messageReceived(self._message_buffer)
self._message_buffer = ""
# if there is still data after this frame, process again
current_len = self._frame_header_len + self._frame_payload_len
if current_len < self._data_len:
data = data[current_len:]
else:
break
def _processFrameHeader(self, data):
self._data_len = len(data)
# we need at least 2 bytes to start processing a frame
if self._data_len < 2:
raise _NotEnoughFrame()
# first byte contains fin, rsv and ops
b = ord(data[0])
self._frame_fin = (b & 0x80) != 0
self._frame_rsv = (b & 0x70) >> 4
self._frame_ops = b & 0x0f
# second byte contains mask and payload length
b = ord(data[1])
self._frame_mask = (b & 0x80) != 0
frame_payload_len1 = b & 0x7f
# accumulating for self._frame_header_len
i = 2
if frame_payload_len1 < 126:
self._frame_payload_len = frame_payload_len1
elif frame_payload_len1 == 126:
i += 2
if self._data_len < i:
raise _NotEnoughFrame()
self._frame_payload_len = struct.unpack("!H", data[i - 2:i])[0]
elif frame_payload_len1 == 127:
i += 8
if self._data_len < i:
raise _NotEnoughFrame()
self._frame_payload_len = struct.unpack("!Q", data[i - 8:i])[0]
if (self._frame_mask):
i += 4
if (self._data_len - i) < self._frame_payload_len:
raise _NotEnoughFrame()
self._frame_header_len = i
def _extractMessageFromFrame(self, data):
i = self._frame_header_len
# when payload is masked, extract frame mask
frame_mask = None
frame_mask_array = []
if self._frame_mask:
frame_mask = data[i - 4:i]
for j in range(0, 4):
frame_mask_array.append(ord(frame_mask[j]))
payload = bytearray(data[i:i + self._frame_payload_len])
for k in xrange(0, self._frame_payload_len):
payload[k] ^= frame_mask_array[k % 4]
return str(payload)
else:
return data[i:i+self._frame_payload_len]
def sendMessage(self, message, code=0x81):
if isinstance(message, unicode):
message = message.encode('utf8')
length = len(message)
newFrame = []
newFrame.append(code)
newFrame = bytearray(newFrame)
if length <= 125:
newFrame.append(length)
elif length > 125 and length < 65536:
newFrame.append(126)
newFrame += struct.pack('!H', length)
elif length >= 65536:
newFrame.append(127)
newFrame += struct.pack('!Q', length)
newFrame += message
self.transport.write(str(newFrame))
class WebSocketProtocol76(WebSocketProtocol):
def __init__(self, handler):
WebSocketProtocol.__init__(self, handler)
self._k1 = None
self._k2 = None
self._nonce = None
self._postheader = False
self._protocol = None
self._frame_decoder = Hixie76FrameDecoder()
def acceptConnection(self):
if "Sec-Websocket-Key1" not in self.request.headers or \
"Sec-Websocket-Key2" not in self.request.headers:
log.msg('Using old ws spec (draft 75)')
ws_origin_header = "WebSocket-Origin"
ws_location_header = "WebSocket-Location"
self._protocol = 75
else:
log.msg('Using ws draft 76 header exchange')
self._k1 = self.request.headers["Sec-WebSocket-Key1"]
self._k2 = self.request.headers["Sec-WebSocket-Key2"]
ws_origin_header = "Sec-WebSocket-Origin"
ws_location_header = "Sec-WebSocket-Location"
self._protocol = 76
self.transport.write(
"HTTP/1.1 101 Web Socket Protocol Handshake\r\n"
"Upgrade: WebSocket\r\n"
"Connection: Upgrade\r\n"
"Server: cyclone/%s\r\n"
"%s: %s\r\n"
"%s: ws://%s%s\r\n\r\n" %
(cyclone.version, ws_origin_header, self.request.headers["Origin"],
ws_location_header, self.request.host, self.request.path))
self._postheader = True
def _handleClientChallenge(self, data):
# accumulate data until the challenge token from client is complete
# return None if not enough data to form the challenge has been passed,
# a string (eventually empty) of the rest of the bytes not used for the
# challenge
if self._nonce is None:
self._nonce = data[:8]
rest = data[8:]
else:
bytes_remaining = 8 - len(self._nonce)
self._nonce += data[:bytes_remaining]
rest = data[bytes_remaining:]
# if self._nonce is complete, return the remaining data (eventually '')
# else, return None to signal that nonce has not yet been completely
# received
return rest if len(self._nonce) == 8 else None
def rawDataReceived(self, data):
if self._postheader is True and self._protocol >= 76:
rest = self._handleClientChallenge(data)
if rest is None:
# not enough bytes for the challenge data, process later
return
else:
# process challenge and (eventually) process remaining data by
# calling rawDataReceived with the rest of data
token = self._calculate_token(self._k1, self._k2,
self._nonce)
self.transport.write(token)
self._postheader = False
self.handler._connectionMade()
self.rawDataReceived(rest)
return
# process websocket frames
try:
frames = self._frame_decoder.feed(data)
for message in frames:
if message is None:
# incomplete frame, wait for more data
return
elif message is self._frame_decoder.CLOSING_FRAME:
self.close()
else:
self.handler.messageReceived(message)
except Exception as e:
log.msg("Invalid WebSocket data: %r" % e)
self.handler._handle_request_exception(e)
self.transport.loseConnection()
def close(self):
self.transport.write('\xff\x00')
self.transport.loseConnection()
def sendMessage(self, message):
self.transport.write("\x00%s\xff" % message)
def _calculate_token(self, k1, k2, k3):
token = struct.pack('>II8s', self._filterella(k1),
self._filterella(k2), k3)
return hashlib.md5(token).digest()
def _filterella(self, w):
nums = []
spaces = 0
for l in w:
if l.isdigit():
nums.append(l)
if l.isspace():
spaces = spaces + 1
x = int(''.join(nums)) / spaces
return x
class FrameDecodeError(Exception):
""" Frame Decode Error """
class Hixie76FrameDecoder(object):
"""
Hixie76 Frame Decoder
"""
# represents a closing frame
CLOSING_FRAME = object()
# possible states for the frame decoder
WAIT_FOR_FRAME_TYPE = 0 # waiting for the frame type byte
INSIDE_FRAME = 1 # inside a frame and accumulating bytes until the
# end of it
WAIT_FOR_CLOSE = 2 # frame type was \xff, waiting for \x00 to form a
# closing frame
def __init__(self):
self._state = self.WAIT_FOR_FRAME_TYPE # current state
self._frame = [] # accumulates frame message
def feed(self, data):
"""
Feed the frame decode with new data. Returns a list of the resulting
frames or [] if the input data is insufficient to form a valid frame.
"""
res = []
for b in data:
frame = self._feed_byte(b)
if frame is not None:
res.append(frame)
if frame is self.CLOSING_FRAME:
break # no need to process data which will be discarded
return res
def _feed_byte(self, b):
if self._state == self.WAIT_FOR_FRAME_TYPE:
if b == '\x00':
# start of a new frame
self._state = self.INSIDE_FRAME
self._frame = []
return None
elif b == '\xff':
# start of a closing frame
self._state = self.WAIT_FOR_CLOSE
self._frame = []
return None
else:
raise FrameDecodeError("Invalid byte '%r' while waiting for "
"a new frame" % b)
elif self._state == self.INSIDE_FRAME:
if b == '\xff':
# end of frame: reset state, form the new frame and return it
self._state = self.WAIT_FOR_FRAME_TYPE
frame = ''.join(self._frame)
self._frame = []
return frame
else:
# accumulate frame data
self._frame.append(b)
elif self._state == self.WAIT_FOR_CLOSE:
if b == '\x00':
# closing frame received
self._state = self.WAIT_FOR_FRAME_TYPE
self._frame = []
return self.CLOSING_FRAME
else:
raise FrameDecodeError("Invalid byte '%r' while waiting for "
"close message" % b)
else:
raise FrameDecodeError("Invalid decoder state. "
"This shouldn't happen")
| apache-2.0 |
nightjean/Deep-Learning | tensorflow/contrib/tfprof/python/tools/tfprof/print_model_analysis_test.py | 4 | 6999 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""print_model_analysis test."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from google.protobuf import text_format
from tensorflow.python.client import session
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import test
from tensorflow.tools.tfprof import tfprof_options_pb2
from tensorflow.tools.tfprof import tfprof_output_pb2
# XXX: this depends on pywrap_tensorflow and must come later
from tensorflow.contrib.tfprof.python.tools.tfprof import pywrap_tensorflow_print_model_analysis_lib as print_mdl
# pylint: disable=bad-whitespace
# pylint: disable=bad-continuation
TEST_OPTIONS = {
'max_depth': 10000,
'min_bytes': 0,
'min_micros': 0,
'min_params': 0,
'min_float_ops': 0,
'order_by': 'name',
'account_type_regexes': ['.*'],
'start_name_regexes': ['.*'],
'trim_name_regexes': [],
'show_name_regexes': ['.*'],
'hide_name_regexes': [],
'account_displayed_op_only': True,
'select': ['params'],
'output': 'stdout',
}
# pylint: enable=bad-whitespace
# pylint: enable=bad-continuation
class PrintModelAnalysisTest(test.TestCase):
def _BuildSmallModel(self):
image = array_ops.zeros([2, 6, 6, 3])
kernel = variable_scope.get_variable(
'DW', [6, 6, 3, 6],
dtypes.float32,
initializer=init_ops.random_normal_initializer(stddev=0.001))
x = nn_ops.conv2d(image, kernel, [1, 2, 2, 1], padding='SAME')
return x
def testPrintModelAnalysis(self):
opts = tfprof_options_pb2.OptionsProto()
opts.max_depth = TEST_OPTIONS['max_depth']
opts.min_bytes = TEST_OPTIONS['min_bytes']
opts.min_micros = TEST_OPTIONS['min_micros']
opts.min_params = TEST_OPTIONS['min_params']
opts.min_float_ops = TEST_OPTIONS['min_float_ops']
opts.order_by = TEST_OPTIONS['order_by']
for p in TEST_OPTIONS['account_type_regexes']:
opts.account_type_regexes.append(p)
for p in TEST_OPTIONS['start_name_regexes']:
opts.start_name_regexes.append(p)
for p in TEST_OPTIONS['trim_name_regexes']:
opts.trim_name_regexes.append(p)
for p in TEST_OPTIONS['show_name_regexes']:
opts.show_name_regexes.append(p)
for p in TEST_OPTIONS['hide_name_regexes']:
opts.hide_name_regexes.append(p)
opts.account_displayed_op_only = TEST_OPTIONS['account_displayed_op_only']
for p in TEST_OPTIONS['select']:
opts.select.append(p)
opts.output = TEST_OPTIONS['output']
with session.Session() as sess, ops.device('/cpu:0'):
_ = self._BuildSmallModel()
tfprof_pb = tfprof_output_pb2.TFGraphNodeProto()
tfprof_pb.ParseFromString(
print_mdl.PrintModelAnalysis(
sess.graph.as_graph_def().SerializeToString(),
b'', b'', b'scope', opts.SerializeToString()))
expected_pb = tfprof_output_pb2.TFGraphNodeProto()
text_format.Merge(r"""name: "_TFProfRoot"
exec_micros: 0
requested_bytes: 0
total_exec_micros: 0
total_requested_bytes: 0
total_parameters: 648
children {
name: "Conv2D"
exec_micros: 0
requested_bytes: 0
total_exec_micros: 0
total_requested_bytes: 0
total_parameters: 0
float_ops: 0
total_float_ops: 0
}
children {
name: "DW"
exec_micros: 0
requested_bytes: 0
parameters: 648
total_exec_micros: 0
total_requested_bytes: 0
total_parameters: 648
children {
name: "DW/Assign"
exec_micros: 0
requested_bytes: 0
total_exec_micros: 0
total_requested_bytes: 0
total_parameters: 0
float_ops: 0
total_float_ops: 0
}
children {
name: "DW/Initializer"
exec_micros: 0
requested_bytes: 0
total_exec_micros: 0
total_requested_bytes: 0
total_parameters: 0
children {
name: "DW/Initializer/random_normal"
exec_micros: 0
requested_bytes: 0
total_exec_micros: 0
total_requested_bytes: 0
total_parameters: 0
children {
name: "DW/Initializer/random_normal/RandomStandardNormal"
exec_micros: 0
requested_bytes: 0
total_exec_micros: 0
total_requested_bytes: 0
total_parameters: 0
float_ops: 0
total_float_ops: 0
}
children {
name: "DW/Initializer/random_normal/mean"
exec_micros: 0
requested_bytes: 0
total_exec_micros: 0
total_requested_bytes: 0
total_parameters: 0
float_ops: 0
total_float_ops: 0
}
children {
name: "DW/Initializer/random_normal/mul"
exec_micros: 0
requested_bytes: 0
total_exec_micros: 0
total_requested_bytes: 0
total_parameters: 0
float_ops: 0
total_float_ops: 0
}
children {
name: "DW/Initializer/random_normal/shape"
exec_micros: 0
requested_bytes: 0
total_exec_micros: 0
total_requested_bytes: 0
total_parameters: 0
float_ops: 0
total_float_ops: 0
}
children {
name: "DW/Initializer/random_normal/stddev"
exec_micros: 0
requested_bytes: 0
total_exec_micros: 0
total_requested_bytes: 0
total_parameters: 0
float_ops: 0
total_float_ops: 0
}
float_ops: 0
total_float_ops: 0
}
float_ops: 0
total_float_ops: 0
}
children {
name: "DW/read"
exec_micros: 0
requested_bytes: 0
total_exec_micros: 0
total_requested_bytes: 0
total_parameters: 0
float_ops: 0
total_float_ops: 0
}
float_ops: 0
total_float_ops: 0
}
children {
name: "zeros"
exec_micros: 0
requested_bytes: 0
total_exec_micros: 0
total_requested_bytes: 0
total_parameters: 0
float_ops: 0
total_float_ops: 0
}
float_ops: 0
total_float_ops: 0""", expected_pb)
self.assertEqual(expected_pb, tfprof_pb)
if __name__ == '__main__':
test.main()
| apache-2.0 |
zainabg/NOX | src/nox/lib/packet/llc.py | 6 | 2128 | # Copyright 2008 (C) Nicira, Inc.
#
# This file is part of NOX.
#
# NOX is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# NOX is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NOX. If not, see <http://www.gnu.org/licenses/>.
#======================================================================
# llc header
#
# Copyright (C) 2007 Nicira Networks
#
#======================================================================
import struct
from packet_utils import *
from packet_exceptions import *
from array import *
from packet_base import packet_base
class llc(packet_base):
"llc packet struct"
LEN = 3;
# A "type" field value of less than 0x0600 indicates that this
# frame should be interpreted as an LLC packet, and the "type"
# field should be interpreted as the frame's length.
CUTOFF = htons(0x0600)
def __init__(self, prev=None):
self.prev = prev
if self.prev == None:
self.dsap = 0
self.ssap = 0
self.ctrl = 0
else:
self.parse()
def parse(self):
plen = self.prev.get_payload_len()
if plen != None and plan < LlcPacket.LEN:
print '(llc parse) data too short to be an llc packet %u' % plen
return
dlen = len(self.get_layer())
if dlen != None and plan < LlcPacket.LEN:
print '(llc parse) data too truncated to parse llc packet %u' % plen
return
(self.dsap, self.ssap, self.ctrl) \
= struct.unpack('!BBB', self.get_layer()[:self.LEN])
self.header_len = self.LEN
self.payload_len = self.prev.get_payload_len() - self.header_len
self.parsed = True
| gpl-3.0 |
conradh1/FiveHigh | node_modules/node-gyp/gyp/pylib/gyp/common.py | 1292 | 20063 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import with_statement
import collections
import errno
import filecmp
import os.path
import re
import tempfile
import sys
# A minimal memoizing decorator. It'll blow up if the args aren't immutable,
# among other "problems".
class memoize(object):
def __init__(self, func):
self.func = func
self.cache = {}
def __call__(self, *args):
try:
return self.cache[args]
except KeyError:
result = self.func(*args)
self.cache[args] = result
return result
class GypError(Exception):
"""Error class representing an error, which is to be presented
to the user. The main entry point will catch and display this.
"""
pass
def ExceptionAppend(e, msg):
"""Append a message to the given exception's message."""
if not e.args:
e.args = (msg,)
elif len(e.args) == 1:
e.args = (str(e.args[0]) + ' ' + msg,)
else:
e.args = (str(e.args[0]) + ' ' + msg,) + e.args[1:]
def FindQualifiedTargets(target, qualified_list):
"""
Given a list of qualified targets, return the qualified targets for the
specified |target|.
"""
return [t for t in qualified_list if ParseQualifiedTarget(t)[1] == target]
def ParseQualifiedTarget(target):
# Splits a qualified target into a build file, target name and toolset.
# NOTE: rsplit is used to disambiguate the Windows drive letter separator.
target_split = target.rsplit(':', 1)
if len(target_split) == 2:
[build_file, target] = target_split
else:
build_file = None
target_split = target.rsplit('#', 1)
if len(target_split) == 2:
[target, toolset] = target_split
else:
toolset = None
return [build_file, target, toolset]
def ResolveTarget(build_file, target, toolset):
# This function resolves a target into a canonical form:
# - a fully defined build file, either absolute or relative to the current
# directory
# - a target name
# - a toolset
#
# build_file is the file relative to which 'target' is defined.
# target is the qualified target.
# toolset is the default toolset for that target.
[parsed_build_file, target, parsed_toolset] = ParseQualifiedTarget(target)
if parsed_build_file:
if build_file:
# If a relative path, parsed_build_file is relative to the directory
# containing build_file. If build_file is not in the current directory,
# parsed_build_file is not a usable path as-is. Resolve it by
# interpreting it as relative to build_file. If parsed_build_file is
# absolute, it is usable as a path regardless of the current directory,
# and os.path.join will return it as-is.
build_file = os.path.normpath(os.path.join(os.path.dirname(build_file),
parsed_build_file))
# Further (to handle cases like ../cwd), make it relative to cwd)
if not os.path.isabs(build_file):
build_file = RelativePath(build_file, '.')
else:
build_file = parsed_build_file
if parsed_toolset:
toolset = parsed_toolset
return [build_file, target, toolset]
def BuildFile(fully_qualified_target):
# Extracts the build file from the fully qualified target.
return ParseQualifiedTarget(fully_qualified_target)[0]
def GetEnvironFallback(var_list, default):
"""Look up a key in the environment, with fallback to secondary keys
and finally falling back to a default value."""
for var in var_list:
if var in os.environ:
return os.environ[var]
return default
def QualifiedTarget(build_file, target, toolset):
# "Qualified" means the file that a target was defined in and the target
# name, separated by a colon, suffixed by a # and the toolset name:
# /path/to/file.gyp:target_name#toolset
fully_qualified = build_file + ':' + target
if toolset:
fully_qualified = fully_qualified + '#' + toolset
return fully_qualified
@memoize
def RelativePath(path, relative_to, follow_path_symlink=True):
# Assuming both |path| and |relative_to| are relative to the current
# directory, returns a relative path that identifies path relative to
# relative_to.
# If |follow_symlink_path| is true (default) and |path| is a symlink, then
# this method returns a path to the real file represented by |path|. If it is
# false, this method returns a path to the symlink. If |path| is not a
# symlink, this option has no effect.
# Convert to normalized (and therefore absolute paths).
if follow_path_symlink:
path = os.path.realpath(path)
else:
path = os.path.abspath(path)
relative_to = os.path.realpath(relative_to)
# On Windows, we can't create a relative path to a different drive, so just
# use the absolute path.
if sys.platform == 'win32':
if (os.path.splitdrive(path)[0].lower() !=
os.path.splitdrive(relative_to)[0].lower()):
return path
# Split the paths into components.
path_split = path.split(os.path.sep)
relative_to_split = relative_to.split(os.path.sep)
# Determine how much of the prefix the two paths share.
prefix_len = len(os.path.commonprefix([path_split, relative_to_split]))
# Put enough ".." components to back up out of relative_to to the common
# prefix, and then append the part of path_split after the common prefix.
relative_split = [os.path.pardir] * (len(relative_to_split) - prefix_len) + \
path_split[prefix_len:]
if len(relative_split) == 0:
# The paths were the same.
return ''
# Turn it back into a string and we're done.
return os.path.join(*relative_split)
@memoize
def InvertRelativePath(path, toplevel_dir=None):
"""Given a path like foo/bar that is relative to toplevel_dir, return
the inverse relative path back to the toplevel_dir.
E.g. os.path.normpath(os.path.join(path, InvertRelativePath(path)))
should always produce the empty string, unless the path contains symlinks.
"""
if not path:
return path
toplevel_dir = '.' if toplevel_dir is None else toplevel_dir
return RelativePath(toplevel_dir, os.path.join(toplevel_dir, path))
def FixIfRelativePath(path, relative_to):
# Like RelativePath but returns |path| unchanged if it is absolute.
if os.path.isabs(path):
return path
return RelativePath(path, relative_to)
def UnrelativePath(path, relative_to):
# Assuming that |relative_to| is relative to the current directory, and |path|
# is a path relative to the dirname of |relative_to|, returns a path that
# identifies |path| relative to the current directory.
rel_dir = os.path.dirname(relative_to)
return os.path.normpath(os.path.join(rel_dir, path))
# re objects used by EncodePOSIXShellArgument. See IEEE 1003.1 XCU.2.2 at
# http://www.opengroup.org/onlinepubs/009695399/utilities/xcu_chap02.html#tag_02_02
# and the documentation for various shells.
# _quote is a pattern that should match any argument that needs to be quoted
# with double-quotes by EncodePOSIXShellArgument. It matches the following
# characters appearing anywhere in an argument:
# \t, \n, space parameter separators
# # comments
# $ expansions (quoted to always expand within one argument)
# % called out by IEEE 1003.1 XCU.2.2
# & job control
# ' quoting
# (, ) subshell execution
# *, ?, [ pathname expansion
# ; command delimiter
# <, >, | redirection
# = assignment
# {, } brace expansion (bash)
# ~ tilde expansion
# It also matches the empty string, because "" (or '') is the only way to
# represent an empty string literal argument to a POSIX shell.
#
# This does not match the characters in _escape, because those need to be
# backslash-escaped regardless of whether they appear in a double-quoted
# string.
_quote = re.compile('[\t\n #$%&\'()*;<=>?[{|}~]|^$')
# _escape is a pattern that should match any character that needs to be
# escaped with a backslash, whether or not the argument matched the _quote
# pattern. _escape is used with re.sub to backslash anything in _escape's
# first match group, hence the (parentheses) in the regular expression.
#
# _escape matches the following characters appearing anywhere in an argument:
# " to prevent POSIX shells from interpreting this character for quoting
# \ to prevent POSIX shells from interpreting this character for escaping
# ` to prevent POSIX shells from interpreting this character for command
# substitution
# Missing from this list is $, because the desired behavior of
# EncodePOSIXShellArgument is to permit parameter (variable) expansion.
#
# Also missing from this list is !, which bash will interpret as the history
# expansion character when history is enabled. bash does not enable history
# by default in non-interactive shells, so this is not thought to be a problem.
# ! was omitted from this list because bash interprets "\!" as a literal string
# including the backslash character (avoiding history expansion but retaining
# the backslash), which would not be correct for argument encoding. Handling
# this case properly would also be problematic because bash allows the history
# character to be changed with the histchars shell variable. Fortunately,
# as history is not enabled in non-interactive shells and
# EncodePOSIXShellArgument is only expected to encode for non-interactive
# shells, there is no room for error here by ignoring !.
_escape = re.compile(r'(["\\`])')
def EncodePOSIXShellArgument(argument):
"""Encodes |argument| suitably for consumption by POSIX shells.
argument may be quoted and escaped as necessary to ensure that POSIX shells
treat the returned value as a literal representing the argument passed to
this function. Parameter (variable) expansions beginning with $ are allowed
to remain intact without escaping the $, to allow the argument to contain
references to variables to be expanded by the shell.
"""
if not isinstance(argument, str):
argument = str(argument)
if _quote.search(argument):
quote = '"'
else:
quote = ''
encoded = quote + re.sub(_escape, r'\\\1', argument) + quote
return encoded
def EncodePOSIXShellList(list):
"""Encodes |list| suitably for consumption by POSIX shells.
Returns EncodePOSIXShellArgument for each item in list, and joins them
together using the space character as an argument separator.
"""
encoded_arguments = []
for argument in list:
encoded_arguments.append(EncodePOSIXShellArgument(argument))
return ' '.join(encoded_arguments)
def DeepDependencyTargets(target_dicts, roots):
"""Returns the recursive list of target dependencies."""
dependencies = set()
pending = set(roots)
while pending:
# Pluck out one.
r = pending.pop()
# Skip if visited already.
if r in dependencies:
continue
# Add it.
dependencies.add(r)
# Add its children.
spec = target_dicts[r]
pending.update(set(spec.get('dependencies', [])))
pending.update(set(spec.get('dependencies_original', [])))
return list(dependencies - set(roots))
def BuildFileTargets(target_list, build_file):
"""From a target_list, returns the subset from the specified build_file.
"""
return [p for p in target_list if BuildFile(p) == build_file]
def AllTargets(target_list, target_dicts, build_file):
"""Returns all targets (direct and dependencies) for the specified build_file.
"""
bftargets = BuildFileTargets(target_list, build_file)
deptargets = DeepDependencyTargets(target_dicts, bftargets)
return bftargets + deptargets
def WriteOnDiff(filename):
"""Write to a file only if the new contents differ.
Arguments:
filename: name of the file to potentially write to.
Returns:
A file like object which will write to temporary file and only overwrite
the target if it differs (on close).
"""
class Writer(object):
"""Wrapper around file which only covers the target if it differs."""
def __init__(self):
# Pick temporary file.
tmp_fd, self.tmp_path = tempfile.mkstemp(
suffix='.tmp',
prefix=os.path.split(filename)[1] + '.gyp.',
dir=os.path.split(filename)[0])
try:
self.tmp_file = os.fdopen(tmp_fd, 'wb')
except Exception:
# Don't leave turds behind.
os.unlink(self.tmp_path)
raise
def __getattr__(self, attrname):
# Delegate everything else to self.tmp_file
return getattr(self.tmp_file, attrname)
def close(self):
try:
# Close tmp file.
self.tmp_file.close()
# Determine if different.
same = False
try:
same = filecmp.cmp(self.tmp_path, filename, False)
except OSError, e:
if e.errno != errno.ENOENT:
raise
if same:
# The new file is identical to the old one, just get rid of the new
# one.
os.unlink(self.tmp_path)
else:
# The new file is different from the old one, or there is no old one.
# Rename the new file to the permanent name.
#
# tempfile.mkstemp uses an overly restrictive mode, resulting in a
# file that can only be read by the owner, regardless of the umask.
# There's no reason to not respect the umask here, which means that
# an extra hoop is required to fetch it and reset the new file's mode.
#
# No way to get the umask without setting a new one? Set a safe one
# and then set it back to the old value.
umask = os.umask(077)
os.umask(umask)
os.chmod(self.tmp_path, 0666 & ~umask)
if sys.platform == 'win32' and os.path.exists(filename):
# NOTE: on windows (but not cygwin) rename will not replace an
# existing file, so it must be preceded with a remove. Sadly there
# is no way to make the switch atomic.
os.remove(filename)
os.rename(self.tmp_path, filename)
except Exception:
# Don't leave turds behind.
os.unlink(self.tmp_path)
raise
return Writer()
def EnsureDirExists(path):
"""Make sure the directory for |path| exists."""
try:
os.makedirs(os.path.dirname(path))
except OSError:
pass
def GetFlavor(params):
"""Returns |params.flavor| if it's set, the system's default flavor else."""
flavors = {
'cygwin': 'win',
'win32': 'win',
'darwin': 'mac',
}
if 'flavor' in params:
return params['flavor']
if sys.platform in flavors:
return flavors[sys.platform]
if sys.platform.startswith('sunos'):
return 'solaris'
if sys.platform.startswith('freebsd'):
return 'freebsd'
if sys.platform.startswith('openbsd'):
return 'openbsd'
if sys.platform.startswith('netbsd'):
return 'netbsd'
if sys.platform.startswith('aix'):
return 'aix'
return 'linux'
def CopyTool(flavor, out_path):
"""Finds (flock|mac|win)_tool.gyp in the gyp directory and copies it
to |out_path|."""
# aix and solaris just need flock emulation. mac and win use more complicated
# support scripts.
prefix = {
'aix': 'flock',
'solaris': 'flock',
'mac': 'mac',
'win': 'win'
}.get(flavor, None)
if not prefix:
return
# Slurp input file.
source_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), '%s_tool.py' % prefix)
with open(source_path) as source_file:
source = source_file.readlines()
# Add header and write it out.
tool_path = os.path.join(out_path, 'gyp-%s-tool' % prefix)
with open(tool_path, 'w') as tool_file:
tool_file.write(
''.join([source[0], '# Generated by gyp. Do not edit.\n'] + source[1:]))
# Make file executable.
os.chmod(tool_path, 0755)
# From Alex Martelli,
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52560
# ASPN: Python Cookbook: Remove duplicates from a sequence
# First comment, dated 2001/10/13.
# (Also in the printed Python Cookbook.)
def uniquer(seq, idfun=None):
if idfun is None:
idfun = lambda x: x
seen = {}
result = []
for item in seq:
marker = idfun(item)
if marker in seen: continue
seen[marker] = 1
result.append(item)
return result
# Based on http://code.activestate.com/recipes/576694/.
class OrderedSet(collections.MutableSet):
def __init__(self, iterable=None):
self.end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.map = {} # key --> [key, prev, next]
if iterable is not None:
self |= iterable
def __len__(self):
return len(self.map)
def __contains__(self, key):
return key in self.map
def add(self, key):
if key not in self.map:
end = self.end
curr = end[1]
curr[2] = end[1] = self.map[key] = [key, curr, end]
def discard(self, key):
if key in self.map:
key, prev_item, next_item = self.map.pop(key)
prev_item[2] = next_item
next_item[1] = prev_item
def __iter__(self):
end = self.end
curr = end[2]
while curr is not end:
yield curr[0]
curr = curr[2]
def __reversed__(self):
end = self.end
curr = end[1]
while curr is not end:
yield curr[0]
curr = curr[1]
# The second argument is an addition that causes a pylint warning.
def pop(self, last=True): # pylint: disable=W0221
if not self:
raise KeyError('set is empty')
key = self.end[1][0] if last else self.end[2][0]
self.discard(key)
return key
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, list(self))
def __eq__(self, other):
if isinstance(other, OrderedSet):
return len(self) == len(other) and list(self) == list(other)
return set(self) == set(other)
# Extensions to the recipe.
def update(self, iterable):
for i in iterable:
if i not in self:
self.add(i)
class CycleError(Exception):
"""An exception raised when an unexpected cycle is detected."""
def __init__(self, nodes):
self.nodes = nodes
def __str__(self):
return 'CycleError: cycle involving: ' + str(self.nodes)
def TopologicallySorted(graph, get_edges):
r"""Topologically sort based on a user provided edge definition.
Args:
graph: A list of node names.
get_edges: A function mapping from node name to a hashable collection
of node names which this node has outgoing edges to.
Returns:
A list containing all of the node in graph in topological order.
It is assumed that calling get_edges once for each node and caching is
cheaper than repeatedly calling get_edges.
Raises:
CycleError in the event of a cycle.
Example:
graph = {'a': '$(b) $(c)', 'b': 'hi', 'c': '$(b)'}
def GetEdges(node):
return re.findall(r'\$\(([^))]\)', graph[node])
print TopologicallySorted(graph.keys(), GetEdges)
==>
['a', 'c', b']
"""
get_edges = memoize(get_edges)
visited = set()
visiting = set()
ordered_nodes = []
def Visit(node):
if node in visiting:
raise CycleError(visiting)
if node in visited:
return
visited.add(node)
visiting.add(node)
for neighbor in get_edges(node):
Visit(neighbor)
visiting.remove(node)
ordered_nodes.insert(0, node)
for node in sorted(graph):
Visit(node)
return ordered_nodes
def CrossCompileRequested():
# TODO: figure out how to not build extra host objects in the
# non-cross-compile case when this is enabled, and enable unconditionally.
return (os.environ.get('GYP_CROSSCOMPILE') or
os.environ.get('AR_host') or
os.environ.get('CC_host') or
os.environ.get('CXX_host') or
os.environ.get('AR_target') or
os.environ.get('CC_target') or
os.environ.get('CXX_target'))
| gpl-3.0 |
SyamGadde/cython | Cython/Plex/Actions.py | 32 | 2517 | #=======================================================================
#
# Python Lexical Analyser
#
# Actions for use in token specifications
#
#=======================================================================
class Action(object):
def perform(self, token_stream, text):
pass # abstract
def same_as(self, other):
return self is other
class Return(Action):
"""
Internal Plex action which causes |value| to
be returned as the value of the associated token
"""
def __init__(self, value):
self.value = value
def perform(self, token_stream, text):
return self.value
def same_as(self, other):
return isinstance(other, Return) and self.value == other.value
def __repr__(self):
return "Return(%s)" % repr(self.value)
class Call(Action):
"""
Internal Plex action which causes a function to be called.
"""
def __init__(self, function):
self.function = function
def perform(self, token_stream, text):
return self.function(token_stream, text)
def __repr__(self):
return "Call(%s)" % self.function.__name__
def same_as(self, other):
return isinstance(other, Call) and self.function is other.function
class Begin(Action):
"""
Begin(state_name) is a Plex action which causes the Scanner to
enter the state |state_name|. See the docstring of Plex.Lexicon
for more information.
"""
def __init__(self, state_name):
self.state_name = state_name
def perform(self, token_stream, text):
token_stream.begin(self.state_name)
def __repr__(self):
return "Begin(%s)" % self.state_name
def same_as(self, other):
return isinstance(other, Begin) and self.state_name == other.state_name
class Ignore(Action):
"""
IGNORE is a Plex action which causes its associated token
to be ignored. See the docstring of Plex.Lexicon for more
information.
"""
def perform(self, token_stream, text):
return None
def __repr__(self):
return "IGNORE"
IGNORE = Ignore()
#IGNORE.__doc__ = Ignore.__doc__
class Text(Action):
"""
TEXT is a Plex action which causes the text of a token to
be returned as the value of the token. See the docstring of
Plex.Lexicon for more information.
"""
def perform(self, token_stream, text):
return text
def __repr__(self):
return "TEXT"
TEXT = Text()
#TEXT.__doc__ = Text.__doc__
| apache-2.0 |
nemesiscodex/JukyOS-sugar | src/jarabe/controlpanel/cmd.py | 5 | 5886 | # Copyright (C) 2007, 2008 One Laptop Per Child
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import sys
import getopt
import os
from gettext import gettext as _
import logging
from jarabe import config
_RESTART = 1
_same_option_warning = _('sugar-control-panel: WARNING, found more than one'
' option with the same name: %s module: %r')
_no_option_error = _('sugar-control-panel: key=%s not an available option')
_general_error = _('sugar-control-panel: %s')
def cmd_help():
"""Print the help to the screen"""
# TRANS: Translators, there's a empty line at the end of this string,
# which must appear in the translated string (msgstr) as well.
print _('Usage: sugar-control-panel [ option ] key [ args ... ] \n\
Control for the sugar environment. \n\
Options: \n\
-h show this help message and exit \n\
-l list all the available options \n\
-h key show information about this key \n\
-g key get the current value of the key \n\
-s key set the current value for the key \n\
-c key clear the current value for the key \n\
')
def note_restart():
"""Instructions how to restart sugar"""
print _('To apply your changes you have to restart Sugar.\n' +
'Hit ctrl+alt+erase on the keyboard to trigger a restart.')
def load_modules():
"""Build a list of pointers to available modules and import them.
"""
modules = []
path = os.path.join(config.ext_path, 'cpsection')
folder = os.listdir(path)
for item in folder:
if os.path.isdir(os.path.join(path, item)) and \
os.path.exists(os.path.join(path, item, 'model.py')):
try:
module = __import__('.'.join(('cpsection', item, 'model')),
globals(), locals(), ['model'])
except Exception:
logging.exception('Exception while loading extension:')
else:
modules.append(module)
return modules
def main():
try:
options, args = getopt.getopt(sys.argv[1:], 'h:s:g:c:l', [])
except getopt.GetoptError:
cmd_help()
sys.exit(2)
if not options:
cmd_help()
sys.exit(2)
modules = load_modules()
for option, key in options:
found = 0
if option in ('-h'):
for module in modules:
method = getattr(module, 'set_' + key, None)
if method:
found += 1
if found == 1:
print method.__doc__
else:
print _(_same_option_warning % (key, module))
if found == 0:
print _(_no_option_error % key)
if option in ('-l'):
for module in modules:
methods = dir(module)
print '%s:' % module.__name__.split('.')[1]
for method in methods:
if method.startswith('get_'):
print ' %s' % method[4:]
elif method.startswith('clear_'):
print ' %s (use the -c argument with this option)' \
% method[6:]
if option in ('-g'):
for module in modules:
method = getattr(module, 'print_' + key, None)
if method:
found += 1
if found == 1:
try:
method()
except Exception, detail:
print _(_general_error % detail)
else:
print _(_same_option_warning % (key, module))
if found == 0:
print _(_no_option_error % key)
if option in ('-s'):
for module in modules:
method = getattr(module, 'set_' + key, None)
if method:
note = 0
found += 1
if found == 1:
try:
note = method(*args)
except Exception, detail:
print _(_general_error % detail)
if note == _RESTART:
note_restart()
else:
print _(_same_option_warning % (key, module))
if found == 0:
print _(_no_option_error % key)
if option in ('-c'):
for module in modules:
method = getattr(module, 'clear_' + key, None)
if method:
note = 0
found += 1
if found == 1:
try:
note = method(*args)
except Exception, detail:
print _(_general_error % detail)
if note == _RESTART:
note_restart()
else:
print _(_same_option_warning % (key, module))
if found == 0:
print _(_no_option_error % key)
| gpl-2.0 |
kobejean/tensorflow | tensorflow/python/training/supervisor_test.py | 7 | 35482 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for supervisor.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import os
import shutil
import time
import uuid
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.framework import graph_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.core.util import event_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import meta_graph
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.summary import summary
from tensorflow.python.summary import summary_iterator
from tensorflow.python.summary.writer import writer
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.training import server_lib
from tensorflow.python.training import session_manager as session_manager_lib
from tensorflow.python.training import supervisor
def _summary_iterator(test_dir):
"""Reads events from test_dir/events.
Args:
test_dir: Name of the test directory.
Returns:
A summary_iterator
"""
event_paths = sorted(glob.glob(os.path.join(test_dir, "event*")))
return summary_iterator.summary_iterator(event_paths[-1])
class SupervisorTest(test.TestCase):
def _test_dir(self, test_name):
test_dir = os.path.join(self.get_temp_dir(), test_name)
if os.path.exists(test_dir):
shutil.rmtree(test_dir)
return test_dir
def _wait_for_glob(self, pattern, timeout_secs, for_checkpoint=True):
"""Wait for a checkpoint file to appear.
Args:
pattern: A string.
timeout_secs: How long to wait for in seconds.
for_checkpoint: whether we're globbing for checkpoints.
"""
end_time = time.time() + timeout_secs
while time.time() < end_time:
if for_checkpoint:
if checkpoint_management.checkpoint_exists(pattern):
return
else:
if len(gfile.Glob(pattern)) >= 1:
return
time.sleep(0.05)
self.assertFalse(True, "Glob never matched any file: %s" % pattern)
# This test does not test much.
def testBasics(self):
logdir = self._test_dir("basics")
with ops.Graph().as_default():
my_op = constant_op.constant(1.0)
sv = supervisor.Supervisor(logdir=logdir)
sess = sv.prepare_or_wait_for_session("")
for _ in xrange(10):
sess.run(my_op)
sess.close()
sv.stop()
def testManagedSession(self):
logdir = self._test_dir("managed_session")
with ops.Graph().as_default():
my_op = constant_op.constant(1.0)
sv = supervisor.Supervisor(logdir=logdir)
with sv.managed_session("") as sess:
for _ in xrange(10):
sess.run(my_op)
# Supervisor has been stopped.
self.assertTrue(sv.should_stop())
def testManagedSessionUserError(self):
logdir = self._test_dir("managed_user_error")
with ops.Graph().as_default():
my_op = constant_op.constant(1.0)
sv = supervisor.Supervisor(logdir=logdir)
last_step = None
with self.assertRaisesRegexp(RuntimeError, "failing here"):
with sv.managed_session("") as sess:
for step in xrange(10):
last_step = step
if step == 1:
raise RuntimeError("failing here")
else:
sess.run(my_op)
# Supervisor has been stopped.
self.assertTrue(sv.should_stop())
self.assertEqual(1, last_step)
def testManagedSessionIgnoreOutOfRangeError(self):
logdir = self._test_dir("managed_out_of_range")
with ops.Graph().as_default():
my_op = constant_op.constant(1.0)
sv = supervisor.Supervisor(logdir=logdir)
last_step = None
with sv.managed_session("") as sess:
for step in xrange(10):
last_step = step
if step == 3:
raise errors_impl.OutOfRangeError(my_op.op.node_def, my_op.op,
"all done")
else:
sess.run(my_op)
# Supervisor has been stopped. OutOfRangeError was not thrown.
self.assertTrue(sv.should_stop())
self.assertEqual(3, last_step)
def testManagedSessionDoNotKeepSummaryWriter(self):
logdir = self._test_dir("managed_not_keep_summary_writer")
with ops.Graph().as_default():
summary.scalar("c1", constant_op.constant(1))
summary.scalar("c2", constant_op.constant(2))
summary.scalar("c3", constant_op.constant(3))
summ = summary.merge_all()
sv = supervisor.Supervisor(logdir=logdir, summary_op=None)
with sv.managed_session(
"", close_summary_writer=True, start_standard_services=False) as sess:
sv.summary_computed(sess, sess.run(summ))
# Sleep 1.2s to make sure that the next event file has a different name
# than the current one.
time.sleep(1.2)
with sv.managed_session(
"", close_summary_writer=True, start_standard_services=False) as sess:
sv.summary_computed(sess, sess.run(summ))
event_paths = sorted(glob.glob(os.path.join(logdir, "event*")))
self.assertEquals(2, len(event_paths))
# The two event files should have the same contents.
for path in event_paths:
# The summary iterator should report the summary once as we closed the
# summary writer across the 2 sessions.
rr = summary_iterator.summary_iterator(path)
# The first event should list the file_version.
ev = next(rr)
self.assertEquals("brain.Event:2", ev.file_version)
# The next one has the graph and metagraph.
ev = next(rr)
self.assertTrue(ev.graph_def)
ev = next(rr)
self.assertTrue(ev.meta_graph_def)
# The next one should have the values from the summary.
# But only once.
ev = next(rr)
self.assertProtoEquals("""
value { tag: 'c1' simple_value: 1.0 }
value { tag: 'c2' simple_value: 2.0 }
value { tag: 'c3' simple_value: 3.0 }
""", ev.summary)
# The next one should be a stop message if we closed cleanly.
ev = next(rr)
self.assertEquals(event_pb2.SessionLog.STOP, ev.session_log.status)
# We should be done.
with self.assertRaises(StopIteration):
next(rr)
def testManagedSessionKeepSummaryWriter(self):
logdir = self._test_dir("managed_keep_summary_writer")
with ops.Graph().as_default():
summary.scalar("c1", constant_op.constant(1))
summary.scalar("c2", constant_op.constant(2))
summary.scalar("c3", constant_op.constant(3))
summ = summary.merge_all()
sv = supervisor.Supervisor(logdir=logdir)
with sv.managed_session(
"", close_summary_writer=False,
start_standard_services=False) as sess:
sv.summary_computed(sess, sess.run(summ))
with sv.managed_session(
"", close_summary_writer=False,
start_standard_services=False) as sess:
sv.summary_computed(sess, sess.run(summ))
# Now close the summary writer to flush the events.
sv.summary_writer.close()
# The summary iterator should report the summary twice as we reused
# the same summary writer across the 2 sessions.
rr = _summary_iterator(logdir)
# The first event should list the file_version.
ev = next(rr)
self.assertEquals("brain.Event:2", ev.file_version)
# The next one has the graph.
ev = next(rr)
self.assertTrue(ev.graph_def)
ev = next(rr)
self.assertTrue(ev.meta_graph_def)
# The next one should have the values from the summary.
ev = next(rr)
self.assertProtoEquals("""
value { tag: 'c1' simple_value: 1.0 }
value { tag: 'c2' simple_value: 2.0 }
value { tag: 'c3' simple_value: 3.0 }
""", ev.summary)
# The next one should also have the values from the summary.
ev = next(rr)
self.assertProtoEquals("""
value { tag: 'c1' simple_value: 1.0 }
value { tag: 'c2' simple_value: 2.0 }
value { tag: 'c3' simple_value: 3.0 }
""", ev.summary)
# We should be done.
self.assertRaises(StopIteration, lambda: next(rr))
def _csv_data(self, logdir):
# Create a small data file with 3 CSV records.
data_path = os.path.join(logdir, "data.csv")
with open(data_path, "w") as f:
f.write("1,2,3\n")
f.write("4,5,6\n")
f.write("7,8,9\n")
return data_path
def testManagedEndOfInputOneQueue(self):
# Tests that the supervisor finishes without an error when using
# a fixed number of epochs, reading from a single queue.
logdir = self._test_dir("managed_end_of_input_one_queue")
os.makedirs(logdir)
data_path = self._csv_data(logdir)
with ops.Graph().as_default():
# Create an input pipeline that reads the file 3 times.
filename_queue = input_lib.string_input_producer(
[data_path], num_epochs=3)
reader = io_ops.TextLineReader()
_, csv = reader.read(filename_queue)
rec = parsing_ops.decode_csv(csv, record_defaults=[[1], [1], [1]])
sv = supervisor.Supervisor(logdir=logdir)
with sv.managed_session("") as sess:
while not sv.should_stop():
sess.run(rec)
def testManagedEndOfInputTwoQueues(self):
# Tests that the supervisor finishes without an error when using
# a fixed number of epochs, reading from two queues, the second
# one producing a batch from the first one.
logdir = self._test_dir("managed_end_of_input_two_queues")
os.makedirs(logdir)
data_path = self._csv_data(logdir)
with ops.Graph().as_default():
# Create an input pipeline that reads the file 3 times.
filename_queue = input_lib.string_input_producer(
[data_path], num_epochs=3)
reader = io_ops.TextLineReader()
_, csv = reader.read(filename_queue)
rec = parsing_ops.decode_csv(csv, record_defaults=[[1], [1], [1]])
shuff_rec = input_lib.shuffle_batch(rec, 1, 6, 4)
sv = supervisor.Supervisor(logdir=logdir)
with sv.managed_session("") as sess:
while not sv.should_stop():
sess.run(shuff_rec)
def testManagedMainErrorTwoQueues(self):
# Tests that the supervisor correctly raises a main loop
# error even when using multiple queues for input.
logdir = self._test_dir("managed_main_error_two_queues")
os.makedirs(logdir)
data_path = self._csv_data(logdir)
with self.assertRaisesRegexp(RuntimeError, "fail at step 3"):
with ops.Graph().as_default():
# Create an input pipeline that reads the file 3 times.
filename_queue = input_lib.string_input_producer(
[data_path], num_epochs=3)
reader = io_ops.TextLineReader()
_, csv = reader.read(filename_queue)
rec = parsing_ops.decode_csv(csv, record_defaults=[[1], [1], [1]])
shuff_rec = input_lib.shuffle_batch(rec, 1, 6, 4)
sv = supervisor.Supervisor(logdir=logdir)
with sv.managed_session("") as sess:
for step in range(9):
if sv.should_stop():
break
elif step == 3:
raise RuntimeError("fail at step 3")
else:
sess.run(shuff_rec)
def testSessionConfig(self):
logdir = self._test_dir("session_config")
with ops.Graph().as_default():
with ops.device("/cpu:1"):
my_op = constant_op.constant([1.0])
sv = supervisor.Supervisor(logdir=logdir)
sess = sv.prepare_or_wait_for_session(
"", config=config_pb2.ConfigProto(device_count={"CPU": 2}))
for _ in xrange(10):
sess.run(my_op)
sess.close()
sv.stop()
def testChiefCanWriteEvents(self):
logdir = self._test_dir("can_write")
with ops.Graph().as_default():
summary.scalar("c1", constant_op.constant(1))
summary.scalar("c2", constant_op.constant(2))
summary.scalar("c3", constant_op.constant(3))
summ = summary.merge_all()
sv = supervisor.Supervisor(is_chief=True, logdir=logdir, summary_op=None)
meta_graph_def = meta_graph.create_meta_graph_def()
sess = sv.prepare_or_wait_for_session("")
sv.summary_computed(sess, sess.run(summ))
sess.close()
# Wait to make sure everything is written to file before stopping.
time.sleep(1)
sv.stop()
rr = _summary_iterator(logdir)
# The first event should list the file_version.
ev = next(rr)
self.assertEquals("brain.Event:2", ev.file_version)
# The next one has the graph.
ev = next(rr)
ev_graph = graph_pb2.GraphDef()
ev_graph.ParseFromString(ev.graph_def)
self.assertProtoEquals(sess.graph.as_graph_def(add_shapes=True), ev_graph)
# Stored MetaGraphDef
ev = next(rr)
ev_meta_graph = meta_graph_pb2.MetaGraphDef()
ev_meta_graph.ParseFromString(ev.meta_graph_def)
self.assertProtoEquals(meta_graph_def, ev_meta_graph)
self.assertProtoEquals(
sess.graph.as_graph_def(add_shapes=True), ev_meta_graph.graph_def)
# The next one should have the values from the summary.
ev = next(rr)
self.assertProtoEquals("""
value { tag: 'c1' simple_value: 1.0 }
value { tag: 'c2' simple_value: 2.0 }
value { tag: 'c3' simple_value: 3.0 }
""", ev.summary)
# The next one should be a stop message if we closed cleanly.
ev = next(rr)
self.assertEquals(event_pb2.SessionLog.STOP, ev.session_log.status)
# We should be done.
self.assertRaises(StopIteration, lambda: next(rr))
def testNonChiefCannotWriteEvents(self):
def _summary_computed():
with ops.Graph().as_default():
sv = supervisor.Supervisor(is_chief=False)
sess = sv.prepare_or_wait_for_session("")
summary.scalar("c1", constant_op.constant(1))
summary.scalar("c2", constant_op.constant(2))
summ = summary.merge_all()
sv.summary_computed(sess, sess.run(summ))
def _start_standard_services():
with ops.Graph().as_default():
sv = supervisor.Supervisor(is_chief=False)
sess = sv.prepare_or_wait_for_session("")
sv.start_standard_services(sess)
self.assertRaises(RuntimeError, _summary_computed)
self.assertRaises(RuntimeError, _start_standard_services)
def testNoLogdirButWantSummary(self):
with ops.Graph().as_default():
summary.scalar("c1", constant_op.constant(1))
summary.scalar("c2", constant_op.constant(2))
summary.scalar("c3", constant_op.constant(3))
summ = summary.merge_all()
sv = supervisor.Supervisor(logdir="", summary_op=None)
sess = sv.prepare_or_wait_for_session("")
with self.assertRaisesRegexp(RuntimeError, "requires a summary writer"):
sv.summary_computed(sess, sess.run(summ))
def testLogdirButExplicitlyNoSummaryWriter(self):
logdir = self._test_dir("explicit_no_summary_writer")
with ops.Graph().as_default():
variables.VariableV1([1.0], name="foo")
summary.scalar("c1", constant_op.constant(1))
summary.scalar("c2", constant_op.constant(2))
summary.scalar("c3", constant_op.constant(3))
summ = summary.merge_all()
sv = supervisor.Supervisor(logdir=logdir, summary_writer=None)
sess = sv.prepare_or_wait_for_session("")
# Check that a checkpoint is still be generated.
self._wait_for_glob(sv.save_path, 3.0)
# Check that we cannot write a summary
with self.assertRaisesRegexp(RuntimeError, "requires a summary writer"):
sv.summary_computed(sess, sess.run(summ))
def testNoLogdirButExplicitSummaryWriter(self):
logdir = self._test_dir("explicit_summary_writer")
with ops.Graph().as_default():
summary.scalar("c1", constant_op.constant(1))
summary.scalar("c2", constant_op.constant(2))
summary.scalar("c3", constant_op.constant(3))
summ = summary.merge_all()
sw = writer.FileWriter(logdir)
sv = supervisor.Supervisor(logdir="", summary_op=None, summary_writer=sw)
meta_graph_def = meta_graph.create_meta_graph_def()
sess = sv.prepare_or_wait_for_session("")
sv.summary_computed(sess, sess.run(summ))
sess.close()
# Wait to make sure everything is written to file before stopping.
time.sleep(1)
sv.stop()
# Check the summary was written to 'logdir'
rr = _summary_iterator(logdir)
# The first event should list the file_version.
ev = next(rr)
self.assertEquals("brain.Event:2", ev.file_version)
# The next one has the graph.
ev = next(rr)
ev_graph = graph_pb2.GraphDef()
ev_graph.ParseFromString(ev.graph_def)
self.assertProtoEquals(sess.graph.as_graph_def(add_shapes=True), ev_graph)
# Stored MetaGraphDef
ev = next(rr)
ev_meta_graph = meta_graph_pb2.MetaGraphDef()
ev_meta_graph.ParseFromString(ev.meta_graph_def)
self.assertProtoEquals(meta_graph_def, ev_meta_graph)
self.assertProtoEquals(
sess.graph.as_graph_def(add_shapes=True), ev_meta_graph.graph_def)
# The next one should have the values from the summary.
ev = next(rr)
self.assertProtoEquals("""
value { tag: 'c1' simple_value: 1.0 }
value { tag: 'c2' simple_value: 2.0 }
value { tag: 'c3' simple_value: 3.0 }
""", ev.summary)
# The next one should be a stop message if we closed cleanly.
ev = next(rr)
self.assertEquals(event_pb2.SessionLog.STOP, ev.session_log.status)
# We should be done.
self.assertRaises(StopIteration, lambda: next(rr))
def testNoLogdirSucceeds(self):
with ops.Graph().as_default():
variables.VariableV1([1.0, 2.0, 3.0])
sv = supervisor.Supervisor(logdir="", summary_op=None)
sess = sv.prepare_or_wait_for_session("")
sess.close()
sv.stop()
def testUseSessionManager(self):
with ops.Graph().as_default():
variables.VariableV1([1.0, 2.0, 3.0])
sm = session_manager_lib.SessionManager()
# Pass in session_manager. The additional init_op is ignored.
sv = supervisor.Supervisor(logdir="", session_manager=sm)
sv.prepare_or_wait_for_session("")
def testInitOp(self):
logdir = self._test_dir("default_init_op")
with ops.Graph().as_default():
v = variables.VariableV1([1.0, 2.0, 3.0])
sv = supervisor.Supervisor(logdir=logdir)
sess = sv.prepare_or_wait_for_session("")
self.assertAllClose([1.0, 2.0, 3.0], sess.run(v))
sv.stop()
def testInitFn(self):
logdir = self._test_dir("default_init_op")
with ops.Graph().as_default():
v = variables.VariableV1([1.0, 2.0, 3.0])
def _init_fn(sess):
sess.run(v.initializer)
sv = supervisor.Supervisor(logdir=logdir, init_op=None, init_fn=_init_fn)
sess = sv.prepare_or_wait_for_session("")
self.assertAllClose([1.0, 2.0, 3.0], sess.run(v))
sv.stop()
def testInitOpWithFeedDict(self):
logdir = self._test_dir("feed_dict_init_op")
with ops.Graph().as_default():
p = array_ops.placeholder(dtypes.float32, shape=(3,))
v = variables.VariableV1(p, name="v")
sv = supervisor.Supervisor(
logdir=logdir,
init_op=variables.global_variables_initializer(),
init_feed_dict={p: [1.0, 2.0, 3.0]})
sess = sv.prepare_or_wait_for_session("")
self.assertAllClose([1.0, 2.0, 3.0], sess.run(v))
sv.stop()
def testReadyForLocalInitOp(self):
server = server_lib.Server.create_local_server()
logdir = self._test_dir("default_ready_for_local_init_op")
uid = uuid.uuid4().hex
def get_session(is_chief):
g = ops.Graph()
with g.as_default():
with ops.device("/job:local"):
v = variables.VariableV1(
1, name="default_ready_for_local_init_op_v_" + str(uid))
vadd = v.assign_add(1)
w = variables.VariableV1(
v,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
name="default_ready_for_local_init_op_w_" + str(uid))
ready_for_local_init_op = variables.report_uninitialized_variables(
variables.global_variables())
sv = supervisor.Supervisor(
logdir=logdir,
is_chief=is_chief,
graph=g,
recovery_wait_secs=1,
init_op=v.initializer,
ready_for_local_init_op=ready_for_local_init_op)
sess = sv.prepare_or_wait_for_session(server.target)
return sv, sess, v, vadd, w
sv0, sess0, v0, _, w0 = get_session(True)
sv1, sess1, _, vadd1, w1 = get_session(False)
self.assertEqual(1, sess0.run(w0))
self.assertEqual(2, sess1.run(vadd1))
self.assertEqual(1, sess1.run(w1))
self.assertEqual(2, sess0.run(v0))
sv0.stop()
sv1.stop()
def testReadyForLocalInitOpRestoreFromCheckpoint(self):
server = server_lib.Server.create_local_server()
logdir = self._test_dir("ready_for_local_init_op_restore")
uid = uuid.uuid4().hex
# Create a checkpoint.
with ops.Graph().as_default():
v = variables.VariableV1(
10.0, name="ready_for_local_init_op_restore_v_" + str(uid))
summary.scalar("ready_for_local_init_op_restore_v_" + str(uid), v)
sv = supervisor.Supervisor(logdir=logdir)
sv.prepare_or_wait_for_session(server.target)
save_path = sv.save_path
self._wait_for_glob(save_path, 3.0)
self._wait_for_glob(
os.path.join(logdir, "*events*"), 3.0, for_checkpoint=False)
# Wait to make sure everything is written to file before stopping.
time.sleep(1)
sv.stop()
def get_session(is_chief):
g = ops.Graph()
with g.as_default():
with ops.device("/job:local"):
v = variables.VariableV1(
1.0, name="ready_for_local_init_op_restore_v_" + str(uid))
vadd = v.assign_add(1)
w = variables.VariableV1(
v,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
name="ready_for_local_init_op_restore_w_" + str(uid))
ready_for_local_init_op = variables.report_uninitialized_variables(
variables.global_variables())
sv = supervisor.Supervisor(
logdir=logdir,
is_chief=is_chief,
graph=g,
recovery_wait_secs=1,
ready_for_local_init_op=ready_for_local_init_op)
sess = sv.prepare_or_wait_for_session(server.target)
return sv, sess, v, vadd, w
sv0, sess0, v0, _, w0 = get_session(True)
sv1, sess1, _, vadd1, w1 = get_session(False)
self.assertEqual(10, sess0.run(w0))
self.assertEqual(11, sess1.run(vadd1))
self.assertEqual(10, sess1.run(w1))
self.assertEqual(11, sess0.run(v0))
sv0.stop()
sv1.stop()
def testLocalInitOp(self):
logdir = self._test_dir("default_local_init_op")
with ops.Graph().as_default():
# A local variable.
v = variables.VariableV1(
[1.0, 2.0, 3.0],
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES])
# An entity which is initialized through a TABLE_INITIALIZER.
w = variables.VariableV1([4, 5, 6], trainable=False, collections=[])
ops.add_to_collection(ops.GraphKeys.TABLE_INITIALIZERS, w.initializer)
# This shouldn't add a variable to the VARIABLES collection responsible
# for variables that are saved/restored from checkpoints.
self.assertEquals(len(variables.global_variables()), 0)
# Suppress normal variable inits to make sure the local one is
# initialized via local_init_op.
sv = supervisor.Supervisor(logdir=logdir, init_op=None)
sess = sv.prepare_or_wait_for_session("")
self.assertAllClose([1.0, 2.0, 3.0], sess.run(v))
self.assertAllClose([4, 5, 6], sess.run(w))
sv.stop()
def testLocalInitOpForNonChief(self):
logdir = self._test_dir("default_local_init_op_non_chief")
with ops.Graph().as_default():
with ops.device("/job:localhost"):
# A local variable.
v = variables.VariableV1(
[1.0, 2.0, 3.0],
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES])
# This shouldn't add a variable to the VARIABLES collection responsible
# for variables that are saved/restored from checkpoints.
self.assertEquals(len(variables.global_variables()), 0)
# Suppress normal variable inits to make sure the local one is
# initialized via local_init_op.
sv = supervisor.Supervisor(logdir=logdir, init_op=None, is_chief=False)
sess = sv.prepare_or_wait_for_session("")
self.assertAllClose([1.0, 2.0, 3.0], sess.run(v))
sv.stop()
def testInitOpFails(self):
server = server_lib.Server.create_local_server()
logdir = self._test_dir("default_init_op_fails")
with ops.Graph().as_default():
v = variables.VariableV1([1.0, 2.0, 3.0], name="v")
variables.VariableV1([4.0, 5.0, 6.0], name="w")
# w will not be initialized.
sv = supervisor.Supervisor(logdir=logdir, init_op=v.initializer)
with self.assertRaisesRegexp(RuntimeError,
"Variables not initialized: w"):
sv.prepare_or_wait_for_session(server.target)
def testInitOpFailsForTransientVariable(self):
server = server_lib.Server.create_local_server()
logdir = self._test_dir("default_init_op_fails_for_local_variable")
with ops.Graph().as_default():
v = variables.VariableV1(
[1.0, 2.0, 3.0],
name="v",
collections=[ops.GraphKeys.LOCAL_VARIABLES])
variables.VariableV1(
[1.0, 2.0, 3.0],
name="w",
collections=[ops.GraphKeys.LOCAL_VARIABLES])
# w will not be initialized.
sv = supervisor.Supervisor(logdir=logdir, local_init_op=v.initializer)
with self.assertRaisesRegexp(RuntimeError,
"Variables not initialized: w"):
sv.prepare_or_wait_for_session(server.target)
def testSetupFail(self):
logdir = self._test_dir("setup_fail")
with ops.Graph().as_default():
variables.VariableV1([1.0, 2.0, 3.0], name="v")
with self.assertRaisesRegexp(ValueError, "must have their device set"):
supervisor.Supervisor(logdir=logdir, is_chief=False)
with ops.Graph().as_default(), ops.device("/job:ps"):
variables.VariableV1([1.0, 2.0, 3.0], name="v")
supervisor.Supervisor(logdir=logdir, is_chief=False)
def testDefaultGlobalStep(self):
logdir = self._test_dir("default_global_step")
with ops.Graph().as_default():
variables.VariableV1(287, name="global_step")
sv = supervisor.Supervisor(logdir=logdir)
sess = sv.prepare_or_wait_for_session("")
self.assertEquals(287, sess.run(sv.global_step))
sv.stop()
def testRestoreFromMetaGraph(self):
logdir = self._test_dir("restore_from_meta_graph")
with ops.Graph().as_default():
variables.VariableV1(1, name="v0")
sv = supervisor.Supervisor(logdir=logdir)
sess = sv.prepare_or_wait_for_session("")
filename = sv.saver.save(sess, sv.save_path)
sv.stop()
# Create a new Graph and Supervisor and recover.
with ops.Graph().as_default():
new_saver = saver_lib.import_meta_graph(".".join([filename, "meta"]))
self.assertIsNotNone(new_saver)
sv2 = supervisor.Supervisor(logdir=logdir, saver=new_saver)
sess = sv2.prepare_or_wait_for_session("")
self.assertEquals(1, sess.run("v0:0"))
sv2.saver.save(sess, sv2.save_path)
sv2.stop()
# This test is based on the fact that the standard services start
# right away and get to run once before sv.stop() returns.
# We still sleep a bit to make the test robust.
def testStandardServicesWithoutGlobalStep(self):
logdir = self._test_dir("standard_services_without_global_step")
# Create a checkpoint.
with ops.Graph().as_default():
v = variables.VariableV1([1.0], name="foo")
summary.scalar("v", v[0])
sv = supervisor.Supervisor(logdir=logdir)
meta_graph_def = meta_graph.create_meta_graph_def(
saver_def=sv.saver.saver_def)
sess = sv.prepare_or_wait_for_session("")
save_path = sv.save_path
self._wait_for_glob(save_path, 3.0)
self._wait_for_glob(
os.path.join(logdir, "*events*"), 3.0, for_checkpoint=False)
# Wait to make sure everything is written to file before stopping.
time.sleep(1)
sv.stop()
# There should be an event file with a version number.
rr = _summary_iterator(logdir)
ev = next(rr)
self.assertEquals("brain.Event:2", ev.file_version)
ev = next(rr)
ev_graph = graph_pb2.GraphDef()
ev_graph.ParseFromString(ev.graph_def)
self.assertProtoEquals(sess.graph.as_graph_def(add_shapes=True), ev_graph)
# Stored MetaGraphDef
ev = next(rr)
ev_meta_graph = meta_graph_pb2.MetaGraphDef()
ev_meta_graph.ParseFromString(ev.meta_graph_def)
self.assertProtoEquals(meta_graph_def, ev_meta_graph)
self.assertProtoEquals(
sess.graph.as_graph_def(add_shapes=True), ev_meta_graph.graph_def)
ev = next(rr)
self.assertProtoEquals("value { tag: 'v' simple_value: 1.0 }", ev.summary)
ev = next(rr)
self.assertEquals(event_pb2.SessionLog.STOP, ev.session_log.status)
self.assertRaises(StopIteration, lambda: next(rr))
# There should be a checkpoint file with the variable "foo"
with ops.Graph().as_default(), self.cached_session() as sess:
v = variables.VariableV1([10.10], name="foo")
sav = saver_lib.Saver([v])
sav.restore(sess, save_path)
self.assertEqual(1.0, v.eval()[0])
# Same as testStandardServicesNoGlobalStep but with a global step.
# We should get a summary about the step time.
def testStandardServicesWithGlobalStep(self):
logdir = self._test_dir("standard_services_with_global_step")
# Create a checkpoint.
with ops.Graph().as_default():
v = variables.VariableV1([123], name="global_step")
sv = supervisor.Supervisor(logdir=logdir)
meta_graph_def = meta_graph.create_meta_graph_def(
saver_def=sv.saver.saver_def)
sess = sv.prepare_or_wait_for_session("")
# This is where the checkpoint will appear, with step number 123.
save_path = "%s-123" % sv.save_path
self._wait_for_glob(save_path, 3.0)
self._wait_for_glob(
os.path.join(logdir, "*events*"), 3.0, for_checkpoint=False)
# Wait to make sure everything is written to file before stopping.
time.sleep(1)
sv.stop()
# There should be an event file with a version number.
rr = _summary_iterator(logdir)
ev = next(rr)
self.assertEquals("brain.Event:2", ev.file_version)
ev = next(rr)
ev_graph = graph_pb2.GraphDef()
ev_graph.ParseFromString(ev.graph_def)
self.assertProtoEquals(sess.graph.as_graph_def(add_shapes=True), ev_graph)
ev = next(rr)
ev_meta_graph = meta_graph_pb2.MetaGraphDef()
ev_meta_graph.ParseFromString(ev.meta_graph_def)
self.assertProtoEquals(meta_graph_def, ev_meta_graph)
self.assertProtoEquals(
sess.graph.as_graph_def(add_shapes=True), ev_meta_graph.graph_def)
ev = next(rr)
# It is actually undeterministic whether SessionLog.START gets written
# before the summary or the checkpoint, but this works when run 10000 times.
self.assertEquals(123, ev.step)
self.assertEquals(event_pb2.SessionLog.START, ev.session_log.status)
first = next(rr)
second = next(rr)
# It is undeterministic whether the value gets written before the checkpoint
# since they are on separate threads, so we check for both conditions.
if first.HasField("summary"):
self.assertProtoEquals("""value { tag: 'global_step/sec'
simple_value: 0.0 }""", first.summary)
self.assertEquals(123, second.step)
self.assertEquals(event_pb2.SessionLog.CHECKPOINT,
second.session_log.status)
else:
self.assertEquals(123, first.step)
self.assertEquals(event_pb2.SessionLog.CHECKPOINT,
first.session_log.status)
self.assertProtoEquals("""value { tag: 'global_step/sec'
simple_value: 0.0 }""", second.summary)
ev = next(rr)
self.assertEquals(event_pb2.SessionLog.STOP, ev.session_log.status)
self.assertRaises(StopIteration, lambda: next(rr))
# There should be a checkpoint file with the variable "foo"
with ops.Graph().as_default(), self.cached_session() as sess:
v = variables.VariableV1([-12], name="global_step")
sav = saver_lib.Saver([v])
sav.restore(sess, save_path)
self.assertEqual(123, v.eval()[0])
def testNoQueueRunners(self):
with ops.Graph().as_default(), self.cached_session() as sess:
sv = supervisor.Supervisor(logdir=self._test_dir("no_queue_runners"))
self.assertEqual(0, len(sv.start_queue_runners(sess)))
sv.stop()
def testPrepareSessionAfterStopForChief(self):
logdir = self._test_dir("prepare_after_stop_chief")
with ops.Graph().as_default():
sv = supervisor.Supervisor(logdir=logdir, is_chief=True)
# Create a first session and then stop.
sess = sv.prepare_or_wait_for_session("")
sv.stop()
sess.close()
self.assertTrue(sv.should_stop())
# Now create a second session and test that we don't stay stopped, until
# we ask to stop again.
sess2 = sv.prepare_or_wait_for_session("")
self.assertFalse(sv.should_stop())
sv.stop()
sess2.close()
self.assertTrue(sv.should_stop())
def testPrepareSessionAfterStopForNonChief(self):
logdir = self._test_dir("prepare_after_stop_nonchief")
with ops.Graph().as_default():
sv = supervisor.Supervisor(logdir=logdir, is_chief=False)
# Create a first session and then stop.
sess = sv.prepare_or_wait_for_session("")
sv.stop()
sess.close()
self.assertTrue(sv.should_stop())
# Now create a second session and test that we don't stay stopped, until
# we ask to stop again.
sess2 = sv.prepare_or_wait_for_session("")
self.assertFalse(sv.should_stop())
sv.stop()
sess2.close()
self.assertTrue(sv.should_stop())
if __name__ == "__main__":
test.main()
| apache-2.0 |
mapr/hue | desktop/core/ext-py/django-nose-1.3/django_nose/fixture_tables.py | 49 | 6377 | """A copy of Django 1.3.0's stock loaddata.py, adapted so that, instead of
loading any data, it returns the tables referenced by a set of fixtures so we
can truncate them (and no others) quickly after we're finished with them."""
import os
import gzip
import zipfile
from itertools import product
from django.conf import settings
from django.core import serializers
from django.db import router, DEFAULT_DB_ALIAS
from django.db.models import get_apps
try:
import bz2
has_bz2 = True
except ImportError:
has_bz2 = False
def tables_used_by_fixtures(fixture_labels, using=DEFAULT_DB_ALIAS):
"""Act like Django's stock loaddata command, but, instead of loading data,
return an iterable of the names of the tables into which data would be
loaded."""
# Keep a count of the installed objects and fixtures
fixture_count = 0
loaded_object_count = 0
fixture_object_count = 0
tables = set()
class SingleZipReader(zipfile.ZipFile):
def __init__(self, *args, **kwargs):
zipfile.ZipFile.__init__(self, *args, **kwargs)
if settings.DEBUG:
assert len(self.namelist()) == 1, "Zip-compressed fixtures must contain only one file."
def read(self):
return zipfile.ZipFile.read(self, self.namelist()[0])
compression_types = {
None: file,
'gz': gzip.GzipFile,
'zip': SingleZipReader
}
if has_bz2:
compression_types['bz2'] = bz2.BZ2File
app_module_paths = []
for app in get_apps():
if hasattr(app, '__path__'):
# It's a 'models/' subpackage
for path in app.__path__:
app_module_paths.append(path)
else:
# It's a models.py module
app_module_paths.append(app.__file__)
app_fixtures = [os.path.join(os.path.dirname(path), 'fixtures') for path in app_module_paths]
for fixture_label in fixture_labels:
parts = fixture_label.split('.')
if len(parts) > 1 and parts[-1] in compression_types:
compression_formats = [parts[-1]]
parts = parts[:-1]
else:
compression_formats = list(compression_types.keys())
if len(parts) == 1:
fixture_name = parts[0]
formats = serializers.get_public_serializer_formats()
else:
fixture_name, format = '.'.join(parts[:-1]), parts[-1]
if format in serializers.get_public_serializer_formats():
formats = [format]
else:
formats = []
if not formats:
# stderr.write(style.ERROR("Problem installing fixture '%s': %s is
# not a known serialization format.\n" % (fixture_name, format)))
return set()
if os.path.isabs(fixture_name):
fixture_dirs = [fixture_name]
else:
fixture_dirs = app_fixtures + list(settings.FIXTURE_DIRS) + ['']
for fixture_dir in fixture_dirs:
# stdout.write("Checking %s for fixtures...\n" %
# humanize(fixture_dir))
label_found = False
for combo in product([using, None], formats, compression_formats):
database, format, compression_format = combo
file_name = '.'.join(
p for p in [
fixture_name, database, format, compression_format
]
if p
)
# stdout.write("Trying %s for %s fixture '%s'...\n" % \
# (humanize(fixture_dir), file_name, fixture_name))
full_path = os.path.join(fixture_dir, file_name)
open_method = compression_types[compression_format]
try:
fixture = open_method(full_path, 'r')
if label_found:
fixture.close()
# stderr.write(style.ERROR("Multiple fixtures named
# '%s' in %s. Aborting.\n" % (fixture_name,
# humanize(fixture_dir))))
return set()
else:
fixture_count += 1
objects_in_fixture = 0
loaded_objects_in_fixture = 0
# stdout.write("Installing %s fixture '%s' from %s.\n"
# % (format, fixture_name, humanize(fixture_dir)))
try:
objects = serializers.deserialize(format, fixture, using=using)
for obj in objects:
objects_in_fixture += 1
if router.allow_syncdb(using, obj.object.__class__):
loaded_objects_in_fixture += 1
tables.add(
obj.object.__class__._meta.db_table)
loaded_object_count += loaded_objects_in_fixture
fixture_object_count += objects_in_fixture
label_found = True
except (SystemExit, KeyboardInterrupt):
raise
except Exception:
fixture.close()
# stderr.write( style.ERROR("Problem installing
# fixture '%s': %s\n" % (full_path, ''.join(tra
# ceback.format_exception(sys.exc_type,
# sys.exc_value, sys.exc_traceback)))))
return set()
fixture.close()
# If the fixture we loaded contains 0 objects, assume that an
# error was encountered during fixture loading.
if objects_in_fixture == 0:
# stderr.write( style.ERROR("No fixture data found
# for '%s'. (File format may be invalid.)\n" %
# (fixture_name)))
return set()
except Exception:
# stdout.write("No %s fixture '%s' in %s.\n" % \ (format,
# fixture_name, humanize(fixture_dir)))
pass
return tables
| apache-2.0 |
nowopen/scrapy | tests/test_proxy_connect.py | 130 | 4114 | import json
import os
import time
from threading import Thread
from libmproxy import controller, proxy
from netlib import http_auth
from testfixtures import LogCapture
from twisted.internet import defer
from twisted.trial.unittest import TestCase
from scrapy.utils.test import get_crawler
from scrapy.http import Request
from tests.spiders import SimpleSpider, SingleRequestSpider
from tests.mockserver import MockServer
class HTTPSProxy(controller.Master, Thread):
def __init__(self, port):
password_manager = http_auth.PassManSingleUser('scrapy', 'scrapy')
authenticator = http_auth.BasicProxyAuth(password_manager, "mitmproxy")
cert_path = os.path.join(os.path.abspath(os.path.dirname(__file__)),
'keys', 'mitmproxy-ca.pem')
server = proxy.ProxyServer(proxy.ProxyConfig(
authenticator = authenticator,
cacert = cert_path),
port)
Thread.__init__(self)
controller.Master.__init__(self, server)
class ProxyConnectTestCase(TestCase):
def setUp(self):
self.mockserver = MockServer()
self.mockserver.__enter__()
self._oldenv = os.environ.copy()
self._proxy = HTTPSProxy(8888)
self._proxy.start()
# Wait for the proxy to start.
time.sleep(1.0)
os.environ['http_proxy'] = 'http://scrapy:scrapy@localhost:8888'
os.environ['https_proxy'] = 'http://scrapy:scrapy@localhost:8888'
def tearDown(self):
self.mockserver.__exit__(None, None, None)
self._proxy.shutdown()
os.environ = self._oldenv
@defer.inlineCallbacks
def test_https_connect_tunnel(self):
crawler = get_crawler(SimpleSpider)
with LogCapture() as l:
yield crawler.crawl("https://localhost:8999/status?n=200")
self._assert_got_response_code(200, l)
@defer.inlineCallbacks
def test_https_noconnect(self):
os.environ['https_proxy'] = 'http://scrapy:scrapy@localhost:8888?noconnect'
crawler = get_crawler(SimpleSpider)
with LogCapture() as l:
yield crawler.crawl("https://localhost:8999/status?n=200")
self._assert_got_response_code(200, l)
os.environ['https_proxy'] = 'http://scrapy:scrapy@localhost:8888'
@defer.inlineCallbacks
def test_https_connect_tunnel_error(self):
crawler = get_crawler(SimpleSpider)
with LogCapture() as l:
yield crawler.crawl("https://localhost:99999/status?n=200")
self._assert_got_tunnel_error(l)
@defer.inlineCallbacks
def test_https_tunnel_auth_error(self):
os.environ['https_proxy'] = 'http://wrong:wronger@localhost:8888'
crawler = get_crawler(SimpleSpider)
with LogCapture() as l:
yield crawler.crawl("https://localhost:8999/status?n=200")
# The proxy returns a 407 error code but it does not reach the client;
# he just sees a TunnelError.
self._assert_got_tunnel_error(l)
os.environ['https_proxy'] = 'http://scrapy:scrapy@localhost:8888'
@defer.inlineCallbacks
def test_https_tunnel_without_leak_proxy_authorization_header(self):
request = Request("https://localhost:8999/echo")
crawler = get_crawler(SingleRequestSpider)
with LogCapture() as l:
yield crawler.crawl(seed=request)
self._assert_got_response_code(200, l)
echo = json.loads(crawler.spider.meta['responses'][0].body)
self.assertTrue('Proxy-Authorization' not in echo['headers'])
@defer.inlineCallbacks
def test_https_noconnect_auth_error(self):
os.environ['https_proxy'] = 'http://wrong:wronger@localhost:8888?noconnect'
crawler = get_crawler(SimpleSpider)
with LogCapture() as l:
yield crawler.crawl("https://localhost:8999/status?n=200")
self._assert_got_response_code(407, l)
def _assert_got_response_code(self, code, log):
self.assertEqual(str(log).count('Crawled (%d)' % code), 1)
def _assert_got_tunnel_error(self, log):
self.assertEqual(str(log).count('TunnelError'), 1)
| bsd-3-clause |
benoitsteiner/tensorflow-opencl | tensorflow/contrib/distributions/python/ops/bijectors/sigmoid_centered.py | 85 | 1166 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SigmoidCentered bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.contrib.distributions.python.ops.bijectors.sigmoid_centered_impl import *
# pylint: enable=wildcard-import
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = ["SigmoidCentered"]
remove_undocumented(__name__, _allowed_symbols)
| apache-2.0 |
6809/MC6809 | MC6809/components/mc6809_speedlimited.py | 1 | 1650 | #!/usr/bin/env python
"""
MC6809 - 6809 CPU emulator in Python
=======================================
6809 is Big-Endian
Links:
http://dragondata.worldofdragon.org/Publications/inside-dragon.htm
http://www.burgins.com/m6809.html
http://koti.mbnet.fi/~atjs/mc6809/
:copyleft: 2013-2015 by the MC6809 team, see AUTHORS for more details.
:license: GNU GPL v3 or above, see LICENSE for more details.
Based on:
* ApplyPy by James Tauber (MIT license)
* XRoar emulator by Ciaran Anscomb (GPL license)
more info, see README
"""
import time
class CPUSpeedLimitMixin:
max_delay = 0.01 # maximum time.sleep() value per burst run
delay = 0 # the current time.sleep() value per burst run
def delayed_burst_run(self, target_cycles_per_sec):
""" Run CPU not faster than given speedlimit """
old_cycles = self.cycles
start_time = time.time()
self.burst_run()
is_duration = time.time() - start_time
new_cycles = self.cycles - old_cycles
try:
is_cycles_per_sec = new_cycles / is_duration
except ZeroDivisionError:
pass
else:
should_burst_duration = is_cycles_per_sec / target_cycles_per_sec
target_duration = should_burst_duration * is_duration
delay = target_duration - is_duration
if delay > 0:
if delay > self.max_delay:
self.delay = self.max_delay
else:
self.delay = delay
time.sleep(self.delay)
self.call_sync_callbacks()
| gpl-3.0 |
gengue/django | tests/test_runner/test_discover_runner.py | 97 | 6079 | import os
from contextlib import contextmanager
from unittest import TestSuite, TextTestRunner, defaultTestLoader
from django.test import TestCase
from django.test.runner import DiscoverRunner
@contextmanager
def change_cwd(directory):
current_dir = os.path.abspath(os.path.dirname(__file__))
new_dir = os.path.join(current_dir, directory)
old_cwd = os.getcwd()
os.chdir(new_dir)
try:
yield
finally:
os.chdir(old_cwd)
class DiscoverRunnerTest(TestCase):
def test_dotted_test_module(self):
count = DiscoverRunner().build_suite(
["test_discovery_sample.tests_sample"],
).countTestCases()
self.assertEqual(count, 4)
def test_dotted_test_class_vanilla_unittest(self):
count = DiscoverRunner().build_suite(
["test_discovery_sample.tests_sample.TestVanillaUnittest"],
).countTestCases()
self.assertEqual(count, 1)
def test_dotted_test_class_django_testcase(self):
count = DiscoverRunner().build_suite(
["test_discovery_sample.tests_sample.TestDjangoTestCase"],
).countTestCases()
self.assertEqual(count, 1)
def test_dotted_test_method_django_testcase(self):
count = DiscoverRunner().build_suite(
["test_discovery_sample.tests_sample.TestDjangoTestCase.test_sample"],
).countTestCases()
self.assertEqual(count, 1)
def test_pattern(self):
count = DiscoverRunner(
pattern="*_tests.py",
).build_suite(["test_discovery_sample"]).countTestCases()
self.assertEqual(count, 1)
def test_file_path(self):
with change_cwd(".."):
count = DiscoverRunner().build_suite(
["test_discovery_sample/"],
).countTestCases()
self.assertEqual(count, 5)
def test_empty_label(self):
"""
If the test label is empty, discovery should happen on the current
working directory.
"""
with change_cwd("."):
suite = DiscoverRunner().build_suite([])
self.assertEqual(
suite._tests[0].id().split(".")[0],
os.path.basename(os.getcwd()),
)
def test_empty_test_case(self):
count = DiscoverRunner().build_suite(
["test_discovery_sample.tests_sample.EmptyTestCase"],
).countTestCases()
self.assertEqual(count, 0)
def test_discovery_on_package(self):
count = DiscoverRunner().build_suite(
["test_discovery_sample.tests"],
).countTestCases()
self.assertEqual(count, 1)
def test_ignore_adjacent(self):
"""
When given a dotted path to a module, unittest discovery searches
not just the module, but also the directory containing the module.
This results in tests from adjacent modules being run when they
should not. The discover runner avoids this behavior.
"""
count = DiscoverRunner().build_suite(
["test_discovery_sample.empty"],
).countTestCases()
self.assertEqual(count, 0)
def test_testcase_ordering(self):
with change_cwd(".."):
suite = DiscoverRunner().build_suite(["test_discovery_sample/"])
self.assertEqual(
suite._tests[0].__class__.__name__,
'TestDjangoTestCase',
msg="TestDjangoTestCase should be the first test case")
self.assertEqual(
suite._tests[1].__class__.__name__,
'TestZimpleTestCase',
msg="TestZimpleTestCase should be the second test case")
# All others can follow in unspecified order, including doctests
self.assertIn('DocTestCase', [t.__class__.__name__ for t in suite._tests[2:]])
def test_duplicates_ignored(self):
"""
Tests shouldn't be discovered twice when discovering on overlapping paths.
"""
single = DiscoverRunner().build_suite(["gis_tests"]).countTestCases()
dups = DiscoverRunner().build_suite(
["gis_tests", "gis_tests.geo3d"]).countTestCases()
self.assertEqual(single, dups)
def test_reverse(self):
"""
Reverse should reorder tests while maintaining the grouping specified
by ``DiscoverRunner.reorder_by``.
"""
runner = DiscoverRunner(reverse=True)
suite = runner.build_suite(
test_labels=('test_discovery_sample', 'test_discovery_sample2'))
self.assertIn('test_discovery_sample2', next(iter(suite)).id(),
msg="Test labels should be reversed.")
suite = runner.build_suite(test_labels=('test_discovery_sample2',))
suite = tuple(suite)
self.assertIn('DjangoCase', suite[0].id(),
msg="Test groups should not be reversed.")
self.assertIn('SimpleCase', suite[4].id(),
msg="Test groups order should be preserved.")
self.assertIn('DjangoCase2', suite[0].id(),
msg="Django test cases should be reversed.")
self.assertIn('SimpleCase2', suite[4].id(),
msg="Simple test cases should be reversed.")
self.assertIn('UnittestCase2', suite[8].id(),
msg="Unittest test cases should be reversed.")
self.assertIn('test_2', suite[0].id(),
msg="Methods of Django cases should be reversed.")
self.assertIn('test_2', suite[4].id(),
msg="Methods of simple cases should be reversed.")
self.assertIn('test_2', suite[8].id(),
msg="Methods of unittest cases should be reversed.")
def test_overrideable_test_suite(self):
self.assertEqual(DiscoverRunner().test_suite, TestSuite)
def test_overrideable_test_runner(self):
self.assertEqual(DiscoverRunner().test_runner, TextTestRunner)
def test_overrideable_test_loader(self):
self.assertEqual(DiscoverRunner().test_loader, defaultTestLoader)
| bsd-3-clause |
cherry-wb/SideTools | examples/state-machine/rogue.py | 1 | 5589 | #!/usr/bin/env python
#Author velociraptor Genjix <aphidia@hotmail.com>
from PySide.QtGui import *
from PySide.QtCore import *
class MovementTransition(QEventTransition):
def __init__(self, window):
super(MovementTransition, self).__init__(window, QEvent.KeyPress)
self.window = window
def eventTest(self, event):
if event.type() == QEvent.StateMachineWrapped and \
event.event().type() == QEvent.KeyPress:
key = event.event().key()
return key == Qt.Key_2 or key == Qt.Key_8 or \
key == Qt.Key_6 or key == Qt.Key_4
return False
def onTransition(self, event):
key = event.event().key()
if key == Qt.Key_4:
self.window.movePlayer(self.window.Left)
if key == Qt.Key_8:
self.window.movePlayer(self.window.Up)
if key == Qt.Key_6:
self.window.movePlayer(self.window.Right)
if key == Qt.Key_2:
self.window.movePlayer(self.window.Down)
class Custom(QState):
def __init__(self, parent, mw):
super(Custom, self).__init__(parent)
self.mw = mw
def onEntry(self, e):
print(self.mw.status)
class MainWindow(QMainWindow):
def __init__(self):
super(MainWindow, self).__init__()
self.pX = 5
self.pY = 5
self.width = 35
self.height = 20
self.statusStr = ''
database = QFontDatabase()
font = QFont()
if 'Monospace' in database.families():
font = QFont('Monospace', 12)
else:
for family in database.families():
if database.isFixedPitch(family):
font = QFont(family, 12)
self.setFont(font)
self.setupMap()
self.buildMachine()
self.show()
def setupMap(self):
self.map = []
qsrand(QTime(0, 0, 0).secsTo(QTime.currentTime()))
for x in range(self.width):
column = []
for y in range(self.height):
if x == 0 or x == self.width - 1 or y == 0 or \
y == self.height - 1 or qrand() % 40 == 0:
column.append('#')
else:
column.append('.')
self.map.append(column)
def buildMachine(self):
machine = QStateMachine(self)
inputState = Custom(machine, self)
# this line sets the status
self.status = 'hello!'
# however this line does not
inputState.assignProperty(self, 'status', 'Move the rogue with 2, 4, 6, and 8')
machine.setInitialState(inputState)
machine.start()
transition = MovementTransition(self)
inputState.addTransition(transition)
quitState = QState(machine)
quitState.assignProperty(self, 'status', 'Really quit(y/n)?')
yesTransition = QKeyEventTransition(self, QEvent.KeyPress, Qt.Key_Y)
self.finalState = QFinalState(machine)
yesTransition.setTargetState(self.finalState)
quitState.addTransition(yesTransition)
noTransition = QKeyEventTransition(self, QEvent.KeyPress, Qt.Key_N)
noTransition.setTargetState(inputState)
quitState.addTransition(noTransition)
quitTransition = QKeyEventTransition(self, QEvent.KeyPress, Qt.Key_Q)
quitTransition.setTargetState(quitState)
inputState.addTransition(quitTransition)
machine.setInitialState(inputState)
machine.finished.connect(qApp.quit)
machine.start()
def sizeHint(self):
metrics = QFontMetrics(self.font())
return QSize(metrics.width('X') * self.width, metrics.height() * (self.height + 1))
def paintEvent(self, event):
metrics = QFontMetrics(self.font())
painter = QPainter(self)
fontHeight = metrics.height()
fontWidth = metrics.width('X')
painter.fillRect(self.rect(), Qt.black)
painter.setPen(Qt.white)
yPos = fontHeight
painter.drawText(QPoint(0, yPos), self.status)
for y in range(self.height):
yPos += fontHeight
xPos = 0
for x in range(self.width):
if y == self.pY and x == self.pX:
xPos += fontWidth
continue
painter.drawText(QPoint(xPos, yPos), self.map[x][y])
xPos += fontWidth
painter.drawText(QPoint(self.pX * fontWidth, (self.pY + 2) * fontHeight), '@')
def movePlayer(self, direction):
if direction == self.Left:
if self.map[self.pX - 1][self.pY] != '#':
self.pX -= 1
elif direction == self.Right:
if self.map[self.pX + 1][self.pY] != '#':
self.pX += 1
elif direction == self.Up:
if self.map[self.pX][self.pY - 1] != '#':
self.pY -= 1
elif direction == self.Down:
if self.map[self.pX][self.pY + 1] != '#':
self.pY += 1
self.repaint()
def getStatus(self):
return self.statusStr
def setStatus(self, status):
self.statusStr = status
self.repaint()
status = Property(str, getStatus, setStatus)
Up = 0
Down = 1
Left = 2
Right = 3
Width = 35
Height = 20
if __name__ == '__main__':
import sys
app = QApplication(sys.argv)
mainWin = MainWindow()
sys.exit(app.exec_())
| apache-2.0 |
jalavik/invenio | invenio/legacy/webmessage/templates.py | 13 | 27994 | # -*- coding: utf-8 -*-
#
# handles rendering of webmessage module
#
# This file is part of Invenio.
# Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
""" templates for webmessage module """
__revision__ = "$Id$"
from invenio.utils.mail import email_quoted_txt2html, email_quote_txt
from invenio.modules.messages.config import CFG_WEBMESSAGE_STATUS_CODE, \
CFG_WEBMESSAGE_SEPARATOR, \
CFG_WEBMESSAGE_RESULTS_FIELD, \
CFG_WEBMESSAGE_MAX_NB_OF_MESSAGES
from invenio.utils.date import convert_datetext_to_dategui, \
datetext_default, \
create_day_selectbox, \
create_month_selectbox, \
create_year_selectbox
from invenio.utils.url import create_html_link, create_url
from invenio.utils.html import escape_html
from invenio.config import CFG_SITE_URL, CFG_SITE_LANG
from invenio.base.i18n import gettext_set_language
from invenio.legacy.webuser import get_user_info
class Template:
"""Templates for WebMessage module"""
def tmpl_display_inbox(self, messages, infos=[], warnings=[], nb_messages=0, no_quota=0, ln=CFG_SITE_LANG):
"""
Displays a list of messages, with the appropriate links and buttons
@param messages: a list of tuples:
[(message_id,
user_from_id,
user_from_nickname,
subject,
sent_date,
status=]
@param infos: a list of informations to print on top of page
@param warnings: a list of warnings to display
@param nb_messages: number of messages user has
@param no_quota: 1 if user has no quota (admin) or 0 else.
@param ln: language of the page.
@return: the list in HTML format
"""
_ = gettext_set_language(ln)
dummy = 0
inbox = self.tmpl_warning(warnings, ln)
inbox += self.tmpl_infobox(infos, ln)
if not(no_quota):
inbox += self.tmpl_quota(nb_messages, ln)
inbox += """
<table class="mailbox">
<thead class="mailboxheader">
<tr class="inboxheader">
<td>%s</td>
<td>%s</td>
<td>%s</td>
<td>%s</td>
</tr>
</thead>
<tfoot>
<tr style="height:0px;">
<td></td>
<td></td>
<td></td>
<td></td>
</tr>
</tfoot>
<tbody class="mailboxbody">""" % (_("Subject"),
_("Sender"),
_("Date"),
_("Action"))
if len(messages) == 0:
inbox += """
<tr class="mailboxrecord" style="height: 100px;">
<td colspan="4" style="text-align: center;">
<b>%s</b>
</td>
</tr>""" %(_("No messages"),)
for (msgid, id_user_from, user_from_nick,
subject, sent_date, status) in messages:
if not(subject):
subject = _("No subject")
subject_link = create_html_link(
CFG_SITE_URL + '/yourmessages/display_msg',
{'msgid': msgid, 'ln': ln},
escape_html(subject))
if user_from_nick:
from_link = '%s'% (user_from_nick)
else:
from_link = get_user_info(id_user_from, ln)[2]
action_link = create_html_link(CFG_SITE_URL + '/yourmessages/write',
{'msg_reply_id': msgid, 'ln': ln},
_("Reply"))
action_link += ' '
action_link += create_html_link(CFG_SITE_URL + '/yourmessages/delete',
{'msgid': msgid, 'ln': ln},
_("Delete"))
s_date = convert_datetext_to_dategui(sent_date, ln)
stat_style = ''
if (status == CFG_WEBMESSAGE_STATUS_CODE['NEW']):
stat_style = ' style="font-weight:bold"'
inbox += """
<tr class="mailboxrecord">
<td%s>%s</td>
<td>%s</td>
<td>%s</td>
<td>%s</td>
</tr>""" %(stat_style, subject_link, from_link, s_date, action_link)
inbox += """
<tr class="mailboxfooter">
<td colspan="2">
<form name="newMessage" action="%(url_new)s" method="post">
<input type="submit" name="del_all" value="%(write_label)s" class="formbutton" />
</form>
</td>
<td> </td>
<td>
<form name="deleteAll" action="%(url_delete_all)s" method="post">
<input type="submit" name="del_all" value="%(delete_all_label)s" class="formbutton" />
</form>
</td>
</tr>
</tbody>
</table>""" % {'url_new': create_url(CFG_SITE_URL + '/yourmessages/write',
{'ln': ln}),
'url_delete_all': create_url(CFG_SITE_URL + '/yourmessages/delete_all',
{'ln': ln}),
'write_label': _("Write new message"),
'delete_all_label': _("Delete All")}
return inbox
def tmpl_write(self,
msg_to="", msg_to_group="",
msg_id=0,
msg_subject="", msg_body="",
msg_send_year=0, msg_send_month=0, msg_send_day=0,
warnings=[],
search_results_list=[],
search_pattern="",
results_field=CFG_WEBMESSAGE_RESULTS_FIELD['NONE'],
ln=CFG_SITE_LANG):
"""
Displays a writing message form with optional prefilled fields
@param msg_to: nick of the user (prefills the To: field)
@param msg_subject: subject of the message (prefills the Subject: field)
@param msg_body: body of the message (prefills the Message: field)
@param msg_send_year: prefills to year field
@param msg_send_month: prefills the month field
@param msg_send_day: prefills the day field
@param warnings: display warnings on top of page
@param search_results_list: list of tuples. (user/groupname, is_selected)
@param search_pattern: pattern used for searching
@param results_field: 'none', 'user' or 'group', see CFG_WEBMESSAGE_RESULTS_FIELD
@param ln: language of the form
@return: the form in HTML format
"""
_ = gettext_set_language(ln)
write_box = self.tmpl_warning(warnings)
# escape forbidden character
msg_to = escape_html(msg_to)
msg_to_group = escape_html(msg_to_group)
msg_subject = escape_html(msg_subject)
search_pattern = escape_html(search_pattern)
to_select = self.tmpl_user_or_group_search(search_results_list,
search_pattern,
results_field,
ln)
if msg_id:
msg_subject = _("Re:") + " " + msg_subject
msg_body = email_quote_txt(msg_body)
write_box += """
<form name="write_message" action="%(url_form)s" method="post">
<div style="float: left; vertical-align:text-top; margin-right: 10px;">
<table class="mailbox">
<thead class="mailboxheader">
<tr>
<td class="inboxheader" colspan="2">
<table class="messageheader">
<tr>
<td class="mailboxlabel">%(to_label)s</td>
<td class="mailboxlabel">%(users_label)s</td>
<td style="width:100%%;">
<input class="mailboxinput" type="text" name="msg_to_user" value="%(to_users)s" />
</td>
</tr>
<tr>
<td class="mailboxlabel"> </td>
<td class="mailboxlabel">%(groups_label)s</td>
<td style="width:100%%;">
<input class="mailboxinput" type="text" name="msg_to_group" value="%(to_groups)s" />
</td>
</tr>
<tr>
<td class="mailboxlabel"> </td>
<td> </td>
<td> </td>
</tr>
<tr>
<td class="mailboxlabel">%(subject_label)s</td>
<td colspan="2">
<input class="mailboxinput" type="text" name="msg_subject" value="%(subject)s" />
</td>
</tr>
</table>
</td>
</tr>
</thead>
<tfoot>
<tr>
<td style="height:0px" colspan="2"></td>
</tr>
</tfoot>
<tbody class="mailboxbody">
<tr>
<td class="mailboxlabel">%(message_label)s</td>
<td>
<textarea name="msg_body" rows="10" cols="50">"""
write_box_part2 = """
</td>
</tr>
<tr>
<td class="mailboxlabel">%(send_later_label)s</td>
<td>
%(day_field)s
%(month_field)s
%(year_field)s
</td>
</tr>
<tr class="mailboxfooter">
<td colspan="2" class="mailboxfoot">
<input type="submit" name="send_button" value="%(send_label)s" class="formbutton"/>
</td>
</tr>
</tbody>
</table>
</div>
<div style="vertical-align:top; margin-left: 5px; float: left;">
%(to_select)s
</div>
</form>
"""
write_box += "%(body)s</textarea>" + write_box_part2
day_field = create_day_selectbox('msg_send_day',
msg_send_day, ln)
month_field = create_month_selectbox('msg_send_month',
msg_send_month, ln)
year_field = create_year_selectbox('msg_send_year', -1, 10,
msg_send_year, ln)
write_box = write_box % {'url_form': create_url(
CFG_SITE_URL + '/yourmessages/send',
{'ln': ln}),
'to_users' : msg_to,
'to_groups': msg_to_group,
'subject' : msg_subject,
'body' : msg_body,
'ln': ln,
'day_field': day_field,
'month_field': month_field,
'year_field': year_field,
'to_select': to_select,
'send_later_label': _("Send later?"),
'to_label': _("To:"),
'users_label': _("Users"),
'groups_label': _("Groups"),
'subject_label': _("Subject:"),
'message_label': _("Message:"),
'send_label': _("SEND")}
return write_box
def tmpl_display_msg(self,
msg_id="",
msg_from_id="",
msg_from_nickname="",
msg_sent_to="",
msg_sent_to_group="",
msg_subject="",
msg_body="",
msg_sent_date="",
msg_received_date=datetext_default,
ln=CFG_SITE_LANG):
"""
Displays a given message
@param msg_id: id of the message
@param msg_from_id: id of user who sent the message
@param msg_from_nickname: nickname of the user who sent the message
@param msg_sent_to: list of users who received the message
(comma separated string)
@param msg_sent_to_group: list of groups who received the message
(comma separated string)
@param msg_subject: subject of the message
@param msg_body: body of the message
@param msg_sent_date: date at which the message was sent
@param msg_received_date: date at which the message had to be received
(if this argument != 0000-00-00 => reminder
@param ln: language of the page
@return: the message in HTML format
"""
# load the right message language
_ = gettext_set_language(ln)
sent_to_link = ''
tos = msg_sent_to.split(CFG_WEBMESSAGE_SEPARATOR)
if (tos):
for to in tos[0:-1]:
to_display = to
if to.isdigit():
(dummy, to, to_display) = get_user_info(int(to), ln)
sent_to_link += create_html_link(CFG_SITE_URL + '/yourmessages/write',
{'msg_to': to, 'ln': ln},
escape_html(to_display))
sent_to_link += CFG_WEBMESSAGE_SEPARATOR
to_display = tos[-1]
to = tos[-1]
if to.isdigit():
(dummy, to, to_display) = get_user_info(int(to), ln)
sent_to_link += create_html_link(CFG_SITE_URL + '/yourmessages/write',
{'msg_to': to, 'ln': ln},
escape_html(to_display))
group_to_link = ""
groups = msg_sent_to_group.split(CFG_WEBMESSAGE_SEPARATOR)
if (groups):
for group in groups[0:-1]:
group_to_link += create_html_link(
CFG_SITE_URL + '/yourmessages/write',
{'msg_to_group': group, 'ln': ln},
escape_html(group))
group_to_link += CFG_WEBMESSAGE_SEPARATOR
group_to_link += create_html_link(
CFG_SITE_URL + '/yourmessages/write',
{'msg_to_group': groups[-1], 'ln': ln},
escape_html(groups[-1]))
# format the msg so that the '>>' chars give vertical lines
final_body = email_quoted_txt2html(msg_body)
out = """
<table class="mailbox" style="width: 70%%;">
<thead class="mailboxheader">
<tr>
<td class="inboxheader" colspan="2">
<table class="messageheader">
<tr>
<td class="mailboxlabel">%(from_label)s</td>
<td>%(from_link)s</td>
</tr>
<tr>
<td class="mailboxlabel">%(subject_label)s</td>
<td style="width: 100%%;">%(subject)s</td>
</tr>
<tr>
<td class="mailboxlabel">%(sent_label)s</td>
<td>%(sent_date)s</td>
</tr>"""
if (msg_received_date != datetext_default):
out += """
<tr>
<td class="mailboxlabel">%(received_label)s</td>
<td>%(received_date)s</td>
</tr>"""
out += """
<tr>
<td class="mailboxlabel">%(sent_to_label)s</td>
<td>%(sent_to)s</td>
</tr>"""
if (msg_sent_to_group != ""):
out += """
<tr>
<td class="mailboxlabel">%(groups_label)s</td>
<td>%(sent_to_group)s</td>
</tr>"""
out += """
</table>
</td>
</tr>
</thead>
<tfoot>
<tr>
<td></td>
<td></td>
</tr>
</tfoot>
<tbody class="mailboxbody">
<tr class="mailboxrecord">
<td colspan="2">%(body)s</td>
</tr>
<tr class="mailboxfooter">
<td>
<form name="reply" action="%(reply_url)s" method="post">
<input class="formbutton" name="reply" value="%(reply_but_label)s" type="submit" />
</form>
</td>
<td>
<form name="deletemsg" action="%(delete_url)s" method="post">
<input class="formbutton" name="delete" value="%(delete_but_label)s" type="submit" />
</form>
</td>
</tr>
</tbody>
</table>
"""
if msg_from_nickname:
msg_from_display = msg_from_nickname
else:
msg_from_display = get_user_info(msg_from_id, ln)[2]
msg_from_nickname = msg_from_id
return out % {'from_link': create_html_link(
CFG_SITE_URL + '/yourmessages/write',
{'msg_to': msg_from_nickname,
'ln': ln},
msg_from_display),
'reply_url': create_url(CFG_SITE_URL + '/yourmessages/write',
{'msg_reply_id': msg_id,
'ln': ln}),
'delete_url': create_url(CFG_SITE_URL + '/yourmessages/delete',
{'msgid': msg_id,
'ln': ln}),
'sent_date' : convert_datetext_to_dategui(msg_sent_date, ln),
'received_date': convert_datetext_to_dategui(msg_received_date, ln),
'sent_to': sent_to_link,
'sent_to_group': group_to_link,
'subject' : msg_subject,
'body' : final_body,
'reply_to': msg_from_id,
'ln': ln,
'from_label':_("From:"),
'subject_label':_("Subject:"),
'sent_label': _("Sent on:"),
'received_label':_("Received on:"),
'sent_to_label': _("Sent to:"),
'groups_label': _("Sent to groups:"),
'reply_but_label':_("REPLY"),
'delete_but_label': _("DELETE")}
def tmpl_navtrail(self, ln=CFG_SITE_LANG, title=""):
"""
display the navtrail, e.g.:
Your account > Your messages > title
@param title: the last part of the navtrail. Is not a link
@param ln: language
return html formatted navtrail
"""
_ = gettext_set_language(ln)
nav_h1 = create_html_link(CFG_SITE_URL + '/youraccount/display',
{'ln': ln},
_("Your Account"),
{'class': 'navtrail'})
nav_h2 = ""
if (title != ""):
nav_h2 += create_html_link(CFG_SITE_URL + '/yourmessages/display',
{'ln': ln},
_("Your Messages"),
{'class': 'navtrail'})
return nav_h1 + ' > ' + nav_h2
return nav_h1
def tmpl_confirm_delete(self, ln=CFG_SITE_LANG):
"""
display a confirm message
@param ln: language
@return: html output
"""
_ = gettext_set_language(ln)
out = """
<table class="confirmoperation">
<tr>
<td colspan="2" class="confirmmessage">
%(message)s
</td>
</tr>
<tr>
<td>
<form name="validate" action="delete_all" method="post">
<input type="hidden" name="confirmed" value="1" />
<input type="hidden" name="ln" value="%(ln)s" />
<input type="submit" value="%(yes_label)s" class="formbutton" />
</form>
</td>
<td>
<form name="cancel" action="display" method="post">
<input type="hidden" name="ln" value="%(ln)s" />
<input type="submit" value="%(no_label)s" class="formbutton" />
</form>
</td>
</tr>
</table>"""% {'message': _("Are you sure you want to empty your whole mailbox?"),
'ln':ln,
'yes_label': _("Yes"),
'no_label': _("No")}
return out
def tmpl_infobox(self, infos, ln=CFG_SITE_LANG):
"""Display len(infos) information fields
@param infos: list of strings
@param ln=language
@return: html output
"""
_ = gettext_set_language(ln)
if not((type(infos) is list) or (type(infos) is tuple)):
infos = [infos]
infobox = ""
for info in infos:
infobox += "<div class=\"infobox\">"
lines = info.split("\n")
for line in lines[0:-1]:
infobox += line + "<br />\n"
infobox += lines[-1] + "</div><br />\n"
return infobox
def tmpl_warning(self, warnings, ln=CFG_SITE_LANG):
"""
Display len(warnings) warning fields
@param infos: list of strings
@param ln=language
@return: html output
"""
_ = gettext_set_language(ln)
if not((type(warnings) is list) or (type(warnings) is tuple)):
warnings = [warnings]
warningbox = ""
if warnings != []:
warningbox = "<div class=\"warningbox\">\n <b>Warning:</b>\n"
for warning in warnings:
lines = warning.split("\n")
warningbox += " <p>"
for line in lines[0:-1]:
warningbox += line + " <br />\n"
warningbox += lines[-1] + " </p>"
warningbox += "</div><br />\n"
return warningbox
def tmpl_error(self, error, ln=CFG_SITE_LANG):
"""
Display error
@param error: string
@param ln=language
@return: html output
"""
_ = gettext_set_language(ln)
errorbox = ""
if error != "":
errorbox = "<div class=\"errorbox\">\n <b>Error:</b>\n"
errorbox += " <p>"
errorbox += error + " </p>"
errorbox += "</div><br />\n"
return errorbox
def tmpl_quota(self, nb_messages=0, ln=CFG_SITE_LANG):
"""
Display a quota bar.
@nb_messages: number of messages in inbox.
@ln=language
@return: html output
"""
_ = gettext_set_language(ln)
quota = float(CFG_WEBMESSAGE_MAX_NB_OF_MESSAGES)
ratio = float(nb_messages) / quota
out = """
%(quota_label)s<br />
<div class="quotabox">
<div class="quotabar" style="width:%(width)ipx"></div>
</div>""" % {'quota_label': _("Quota used: %(x_nb_used)i messages out of max. %(x_nb_total)i",
x_nb_used=nb_messages, x_nb_total=CFG_WEBMESSAGE_MAX_NB_OF_MESSAGES),
'width': int(ratio * 200)}
return out
def tmpl_multiple_select(self, select_name, tuples_list, ln=CFG_SITE_LANG):
"""displays a multiple select environment
@param tuples_list: a list of (value, isSelected) tuples
@return: HTML output
"""
_ = gettext_set_language(ln)
if not((type(tuples_list) is list) or (type(tuples_list) is tuple)):
tuples_list = [tuples_list]
out = """
%s
<select name="%s" multiple="multiple" style="width:100%%">"""% (_("Please select one or more:"), select_name)
for (value, is_selected) in tuples_list:
out += ' <option value="%s"'% value
if is_selected:
out += " selected=\"selected\""
out += ">%s</option>\n"% value
out += "</select>\n"
return out
def tmpl_user_or_group_search(self,
tuples_list=[],
search_pattern="",
results_field=CFG_WEBMESSAGE_RESULTS_FIELD['NONE'],
ln=CFG_SITE_LANG):
"""
Display a box for user searching
@param tuples_list: list of (value, is_selected) tuples
@param search_pattern: text to display in this field
@param results_field: either 'none', 'user', 'group', look at CFG_WEBMESSAGE_RESULTS_FIELD
@param ln: language
@return: html output
"""
_ = gettext_set_language(ln)
multiple_select = ''
add_button = ''
if results_field != CFG_WEBMESSAGE_RESULTS_FIELD['NONE'] and results_field in CFG_WEBMESSAGE_RESULTS_FIELD.values():
if len(tuples_list):
multiple_select = self.tmpl_multiple_select('names_selected', tuples_list)
add_button = '<input type="submit" name="%s" value="%s" class="nonsubmitbutton" />'
if results_field == CFG_WEBMESSAGE_RESULTS_FIELD['USER']:
add_button = add_button % ('add_user', _("Add to users"))
else:
add_button = add_button % ('add_group', _("Add to groups"))
else:
if results_field == CFG_WEBMESSAGE_RESULTS_FIELD['USER']:
multiple_select = _("No matching user")
else:
multiple_select = _("No matching group")
out = """
<table class="mailbox">
<thead class="mailboxheader">
<tr class ="inboxheader">
<td colspan="3">
%(title_label)s
<input type="hidden" name="results_field" value="%(results_field)s" />
</td>
</tr>
</thead>
<tfoot>
<tr><td colspan="3"></td></tr>
</tfoot>
<tbody class="mailboxbody">
<tr class="mailboxsearch">
<td>
<input type="text" name="search_pattern" value="%(search_pattern)s" />
</td>
<td>
<input type="submit" name="search_user" value="%(search_user_label)s" class="nonsubmitbutton" />
</td>
<td>
<input type="submit" name="search_group" value="%(search_group_label)s" class="nonsubmitbutton" />
</td>
</tr>
<tr class="mailboxresults">
<td colspan="2">
%(multiple_select)s
</td>
<td>
%(add_button)s
</td>
</tr>
</tbody>
</table>
"""
out = out % {'title_label' : _("Find users or groups:"),
'search_user_label' : _("Find a user"),
'search_group_label' : _("Find a group"),
'results_field' : results_field,
'search_pattern' : search_pattern,
'multiple_select' : multiple_select,
'add_button' : add_button}
return out
def tmpl_account_new_mail(self, nb_new_mail=0, total_mail=0, ln=CFG_SITE_LANG):
"""
display infos about inbox (used by myaccount.py)
@param nb_new_mail: number of new mails
@param ln: language
return: html output.
"""
_ = gettext_set_language(ln)
out = _("You have %(x_nb_new)s new messages out of %(x_nb_total)s messages") % \
{'x_nb_new': '<b>' + str(nb_new_mail) + '</b>',
'x_nb_total': create_html_link(CFG_SITE_URL + '/yourmessages/',
{'ln': ln},
str(total_mail),
{},
False, False)}
return out + '.'
| gpl-2.0 |
Edu-Glez/Bank_sentiment_analysis | env/lib/python3.6/site-packages/pip/_vendor/requests/utils.py | 319 | 24163 | # -*- coding: utf-8 -*-
"""
requests.utils
~~~~~~~~~~~~~~
This module provides utility functions that are used within Requests
that are also useful for external consumption.
"""
import cgi
import codecs
import collections
import io
import os
import re
import socket
import struct
import warnings
from . import __version__
from . import certs
from .compat import parse_http_list as _parse_list_header
from .compat import (quote, urlparse, bytes, str, OrderedDict, unquote, is_py2,
builtin_str, getproxies, proxy_bypass, urlunparse,
basestring)
from .cookies import RequestsCookieJar, cookiejar_from_dict
from .structures import CaseInsensitiveDict
from .exceptions import InvalidURL, InvalidHeader, FileModeWarning
_hush_pyflakes = (RequestsCookieJar,)
NETRC_FILES = ('.netrc', '_netrc')
DEFAULT_CA_BUNDLE_PATH = certs.where()
def dict_to_sequence(d):
"""Returns an internal sequence dictionary update."""
if hasattr(d, 'items'):
d = d.items()
return d
def super_len(o):
total_length = 0
current_position = 0
if hasattr(o, '__len__'):
total_length = len(o)
elif hasattr(o, 'len'):
total_length = o.len
elif hasattr(o, 'getvalue'):
# e.g. BytesIO, cStringIO.StringIO
total_length = len(o.getvalue())
elif hasattr(o, 'fileno'):
try:
fileno = o.fileno()
except io.UnsupportedOperation:
pass
else:
total_length = os.fstat(fileno).st_size
# Having used fstat to determine the file length, we need to
# confirm that this file was opened up in binary mode.
if 'b' not in o.mode:
warnings.warn((
"Requests has determined the content-length for this "
"request using the binary size of the file: however, the "
"file has been opened in text mode (i.e. without the 'b' "
"flag in the mode). This may lead to an incorrect "
"content-length. In Requests 3.0, support will be removed "
"for files in text mode."),
FileModeWarning
)
if hasattr(o, 'tell'):
try:
current_position = o.tell()
except (OSError, IOError):
# This can happen in some weird situations, such as when the file
# is actually a special file descriptor like stdin. In this
# instance, we don't know what the length is, so set it to zero and
# let requests chunk it instead.
current_position = total_length
return max(0, total_length - current_position)
def get_netrc_auth(url, raise_errors=False):
"""Returns the Requests tuple auth for a given url from netrc."""
try:
from netrc import netrc, NetrcParseError
netrc_path = None
for f in NETRC_FILES:
try:
loc = os.path.expanduser('~/{0}'.format(f))
except KeyError:
# os.path.expanduser can fail when $HOME is undefined and
# getpwuid fails. See http://bugs.python.org/issue20164 &
# https://github.com/kennethreitz/requests/issues/1846
return
if os.path.exists(loc):
netrc_path = loc
break
# Abort early if there isn't one.
if netrc_path is None:
return
ri = urlparse(url)
# Strip port numbers from netloc. This weird `if...encode`` dance is
# used for Python 3.2, which doesn't support unicode literals.
splitstr = b':'
if isinstance(url, str):
splitstr = splitstr.decode('ascii')
host = ri.netloc.split(splitstr)[0]
try:
_netrc = netrc(netrc_path).authenticators(host)
if _netrc:
# Return with login / password
login_i = (0 if _netrc[0] else 1)
return (_netrc[login_i], _netrc[2])
except (NetrcParseError, IOError):
# If there was a parsing error or a permissions issue reading the file,
# we'll just skip netrc auth unless explicitly asked to raise errors.
if raise_errors:
raise
# AppEngine hackiness.
except (ImportError, AttributeError):
pass
def guess_filename(obj):
"""Tries to guess the filename of the given object."""
name = getattr(obj, 'name', None)
if (name and isinstance(name, basestring) and name[0] != '<' and
name[-1] != '>'):
return os.path.basename(name)
def from_key_val_list(value):
"""Take an object and test to see if it can be represented as a
dictionary. Unless it can not be represented as such, return an
OrderedDict, e.g.,
::
>>> from_key_val_list([('key', 'val')])
OrderedDict([('key', 'val')])
>>> from_key_val_list('string')
ValueError: need more than 1 value to unpack
>>> from_key_val_list({'key': 'val'})
OrderedDict([('key', 'val')])
:rtype: OrderedDict
"""
if value is None:
return None
if isinstance(value, (str, bytes, bool, int)):
raise ValueError('cannot encode objects that are not 2-tuples')
return OrderedDict(value)
def to_key_val_list(value):
"""Take an object and test to see if it can be represented as a
dictionary. If it can be, return a list of tuples, e.g.,
::
>>> to_key_val_list([('key', 'val')])
[('key', 'val')]
>>> to_key_val_list({'key': 'val'})
[('key', 'val')]
>>> to_key_val_list('string')
ValueError: cannot encode objects that are not 2-tuples.
:rtype: list
"""
if value is None:
return None
if isinstance(value, (str, bytes, bool, int)):
raise ValueError('cannot encode objects that are not 2-tuples')
if isinstance(value, collections.Mapping):
value = value.items()
return list(value)
# From mitsuhiko/werkzeug (used with permission).
def parse_list_header(value):
"""Parse lists as described by RFC 2068 Section 2.
In particular, parse comma-separated lists where the elements of
the list may include quoted-strings. A quoted-string could
contain a comma. A non-quoted string could have quotes in the
middle. Quotes are removed automatically after parsing.
It basically works like :func:`parse_set_header` just that items
may appear multiple times and case sensitivity is preserved.
The return value is a standard :class:`list`:
>>> parse_list_header('token, "quoted value"')
['token', 'quoted value']
To create a header from the :class:`list` again, use the
:func:`dump_header` function.
:param value: a string with a list header.
:return: :class:`list`
:rtype: list
"""
result = []
for item in _parse_list_header(value):
if item[:1] == item[-1:] == '"':
item = unquote_header_value(item[1:-1])
result.append(item)
return result
# From mitsuhiko/werkzeug (used with permission).
def parse_dict_header(value):
"""Parse lists of key, value pairs as described by RFC 2068 Section 2 and
convert them into a python dict:
>>> d = parse_dict_header('foo="is a fish", bar="as well"')
>>> type(d) is dict
True
>>> sorted(d.items())
[('bar', 'as well'), ('foo', 'is a fish')]
If there is no value for a key it will be `None`:
>>> parse_dict_header('key_without_value')
{'key_without_value': None}
To create a header from the :class:`dict` again, use the
:func:`dump_header` function.
:param value: a string with a dict header.
:return: :class:`dict`
:rtype: dict
"""
result = {}
for item in _parse_list_header(value):
if '=' not in item:
result[item] = None
continue
name, value = item.split('=', 1)
if value[:1] == value[-1:] == '"':
value = unquote_header_value(value[1:-1])
result[name] = value
return result
# From mitsuhiko/werkzeug (used with permission).
def unquote_header_value(value, is_filename=False):
r"""Unquotes a header value. (Reversal of :func:`quote_header_value`).
This does not use the real unquoting but what browsers are actually
using for quoting.
:param value: the header value to unquote.
:rtype: str
"""
if value and value[0] == value[-1] == '"':
# this is not the real unquoting, but fixing this so that the
# RFC is met will result in bugs with internet explorer and
# probably some other browsers as well. IE for example is
# uploading files with "C:\foo\bar.txt" as filename
value = value[1:-1]
# if this is a filename and the starting characters look like
# a UNC path, then just return the value without quotes. Using the
# replace sequence below on a UNC path has the effect of turning
# the leading double slash into a single slash and then
# _fix_ie_filename() doesn't work correctly. See #458.
if not is_filename or value[:2] != '\\\\':
return value.replace('\\\\', '\\').replace('\\"', '"')
return value
def dict_from_cookiejar(cj):
"""Returns a key/value dictionary from a CookieJar.
:param cj: CookieJar object to extract cookies from.
:rtype: dict
"""
cookie_dict = {}
for cookie in cj:
cookie_dict[cookie.name] = cookie.value
return cookie_dict
def add_dict_to_cookiejar(cj, cookie_dict):
"""Returns a CookieJar from a key/value dictionary.
:param cj: CookieJar to insert cookies into.
:param cookie_dict: Dict of key/values to insert into CookieJar.
:rtype: CookieJar
"""
cj2 = cookiejar_from_dict(cookie_dict)
cj.update(cj2)
return cj
def get_encodings_from_content(content):
"""Returns encodings from given content string.
:param content: bytestring to extract encodings from.
"""
warnings.warn((
'In requests 3.0, get_encodings_from_content will be removed. For '
'more information, please see the discussion on issue #2266. (This'
' warning should only appear once.)'),
DeprecationWarning)
charset_re = re.compile(r'<meta.*?charset=["\']*(.+?)["\'>]', flags=re.I)
pragma_re = re.compile(r'<meta.*?content=["\']*;?charset=(.+?)["\'>]', flags=re.I)
xml_re = re.compile(r'^<\?xml.*?encoding=["\']*(.+?)["\'>]')
return (charset_re.findall(content) +
pragma_re.findall(content) +
xml_re.findall(content))
def get_encoding_from_headers(headers):
"""Returns encodings from given HTTP Header Dict.
:param headers: dictionary to extract encoding from.
:rtype: str
"""
content_type = headers.get('content-type')
if not content_type:
return None
content_type, params = cgi.parse_header(content_type)
if 'charset' in params:
return params['charset'].strip("'\"")
if 'text' in content_type:
return 'ISO-8859-1'
def stream_decode_response_unicode(iterator, r):
"""Stream decodes a iterator."""
if r.encoding is None:
for item in iterator:
yield item
return
decoder = codecs.getincrementaldecoder(r.encoding)(errors='replace')
for chunk in iterator:
rv = decoder.decode(chunk)
if rv:
yield rv
rv = decoder.decode(b'', final=True)
if rv:
yield rv
def iter_slices(string, slice_length):
"""Iterate over slices of a string."""
pos = 0
if slice_length is None or slice_length <= 0:
slice_length = len(string)
while pos < len(string):
yield string[pos:pos + slice_length]
pos += slice_length
def get_unicode_from_response(r):
"""Returns the requested content back in unicode.
:param r: Response object to get unicode content from.
Tried:
1. charset from content-type
2. fall back and replace all unicode characters
:rtype: str
"""
warnings.warn((
'In requests 3.0, get_unicode_from_response will be removed. For '
'more information, please see the discussion on issue #2266. (This'
' warning should only appear once.)'),
DeprecationWarning)
tried_encodings = []
# Try charset from content-type
encoding = get_encoding_from_headers(r.headers)
if encoding:
try:
return str(r.content, encoding)
except UnicodeError:
tried_encodings.append(encoding)
# Fall back:
try:
return str(r.content, encoding, errors='replace')
except TypeError:
return r.content
# The unreserved URI characters (RFC 3986)
UNRESERVED_SET = frozenset(
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
+ "0123456789-._~")
def unquote_unreserved(uri):
"""Un-escape any percent-escape sequences in a URI that are unreserved
characters. This leaves all reserved, illegal and non-ASCII bytes encoded.
:rtype: str
"""
parts = uri.split('%')
for i in range(1, len(parts)):
h = parts[i][0:2]
if len(h) == 2 and h.isalnum():
try:
c = chr(int(h, 16))
except ValueError:
raise InvalidURL("Invalid percent-escape sequence: '%s'" % h)
if c in UNRESERVED_SET:
parts[i] = c + parts[i][2:]
else:
parts[i] = '%' + parts[i]
else:
parts[i] = '%' + parts[i]
return ''.join(parts)
def requote_uri(uri):
"""Re-quote the given URI.
This function passes the given URI through an unquote/quote cycle to
ensure that it is fully and consistently quoted.
:rtype: str
"""
safe_with_percent = "!#$%&'()*+,/:;=?@[]~"
safe_without_percent = "!#$&'()*+,/:;=?@[]~"
try:
# Unquote only the unreserved characters
# Then quote only illegal characters (do not quote reserved,
# unreserved, or '%')
return quote(unquote_unreserved(uri), safe=safe_with_percent)
except InvalidURL:
# We couldn't unquote the given URI, so let's try quoting it, but
# there may be unquoted '%'s in the URI. We need to make sure they're
# properly quoted so they do not cause issues elsewhere.
return quote(uri, safe=safe_without_percent)
def address_in_network(ip, net):
"""This function allows you to check if on IP belongs to a network subnet
Example: returns True if ip = 192.168.1.1 and net = 192.168.1.0/24
returns False if ip = 192.168.1.1 and net = 192.168.100.0/24
:rtype: bool
"""
ipaddr = struct.unpack('=L', socket.inet_aton(ip))[0]
netaddr, bits = net.split('/')
netmask = struct.unpack('=L', socket.inet_aton(dotted_netmask(int(bits))))[0]
network = struct.unpack('=L', socket.inet_aton(netaddr))[0] & netmask
return (ipaddr & netmask) == (network & netmask)
def dotted_netmask(mask):
"""Converts mask from /xx format to xxx.xxx.xxx.xxx
Example: if mask is 24 function returns 255.255.255.0
:rtype: str
"""
bits = 0xffffffff ^ (1 << 32 - mask) - 1
return socket.inet_ntoa(struct.pack('>I', bits))
def is_ipv4_address(string_ip):
"""
:rtype: bool
"""
try:
socket.inet_aton(string_ip)
except socket.error:
return False
return True
def is_valid_cidr(string_network):
"""
Very simple check of the cidr format in no_proxy variable.
:rtype: bool
"""
if string_network.count('/') == 1:
try:
mask = int(string_network.split('/')[1])
except ValueError:
return False
if mask < 1 or mask > 32:
return False
try:
socket.inet_aton(string_network.split('/')[0])
except socket.error:
return False
else:
return False
return True
def should_bypass_proxies(url):
"""
Returns whether we should bypass proxies or not.
:rtype: bool
"""
get_proxy = lambda k: os.environ.get(k) or os.environ.get(k.upper())
# First check whether no_proxy is defined. If it is, check that the URL
# we're getting isn't in the no_proxy list.
no_proxy = get_proxy('no_proxy')
netloc = urlparse(url).netloc
if no_proxy:
# We need to check whether we match here. We need to see if we match
# the end of the netloc, both with and without the port.
no_proxy = (
host for host in no_proxy.replace(' ', '').split(',') if host
)
ip = netloc.split(':')[0]
if is_ipv4_address(ip):
for proxy_ip in no_proxy:
if is_valid_cidr(proxy_ip):
if address_in_network(ip, proxy_ip):
return True
elif ip == proxy_ip:
# If no_proxy ip was defined in plain IP notation instead of cidr notation &
# matches the IP of the index
return True
else:
for host in no_proxy:
if netloc.endswith(host) or netloc.split(':')[0].endswith(host):
# The URL does match something in no_proxy, so we don't want
# to apply the proxies on this URL.
return True
# If the system proxy settings indicate that this URL should be bypassed,
# don't proxy.
# The proxy_bypass function is incredibly buggy on macOS in early versions
# of Python 2.6, so allow this call to fail. Only catch the specific
# exceptions we've seen, though: this call failing in other ways can reveal
# legitimate problems.
try:
bypass = proxy_bypass(netloc)
except (TypeError, socket.gaierror):
bypass = False
if bypass:
return True
return False
def get_environ_proxies(url):
"""
Return a dict of environment proxies.
:rtype: dict
"""
if should_bypass_proxies(url):
return {}
else:
return getproxies()
def select_proxy(url, proxies):
"""Select a proxy for the url, if applicable.
:param url: The url being for the request
:param proxies: A dictionary of schemes or schemes and hosts to proxy URLs
"""
proxies = proxies or {}
urlparts = urlparse(url)
if urlparts.hostname is None:
return proxies.get('all', proxies.get(urlparts.scheme))
proxy_keys = [
'all://' + urlparts.hostname,
'all',
urlparts.scheme + '://' + urlparts.hostname,
urlparts.scheme,
]
proxy = None
for proxy_key in proxy_keys:
if proxy_key in proxies:
proxy = proxies[proxy_key]
break
return proxy
def default_user_agent(name="python-requests"):
"""
Return a string representing the default user agent.
:rtype: str
"""
return '%s/%s' % (name, __version__)
def default_headers():
"""
:rtype: requests.structures.CaseInsensitiveDict
"""
return CaseInsensitiveDict({
'User-Agent': default_user_agent(),
'Accept-Encoding': ', '.join(('gzip', 'deflate')),
'Accept': '*/*',
'Connection': 'keep-alive',
})
def parse_header_links(value):
"""Return a dict of parsed link headers proxies.
i.e. Link: <http:/.../front.jpeg>; rel=front; type="image/jpeg",<http://.../back.jpeg>; rel=back;type="image/jpeg"
:rtype: list
"""
links = []
replace_chars = ' \'"'
for val in re.split(', *<', value):
try:
url, params = val.split(';', 1)
except ValueError:
url, params = val, ''
link = {'url': url.strip('<> \'"')}
for param in params.split(';'):
try:
key, value = param.split('=')
except ValueError:
break
link[key.strip(replace_chars)] = value.strip(replace_chars)
links.append(link)
return links
# Null bytes; no need to recreate these on each call to guess_json_utf
_null = '\x00'.encode('ascii') # encoding to ASCII for Python 3
_null2 = _null * 2
_null3 = _null * 3
def guess_json_utf(data):
"""
:rtype: str
"""
# JSON always starts with two ASCII characters, so detection is as
# easy as counting the nulls and from their location and count
# determine the encoding. Also detect a BOM, if present.
sample = data[:4]
if sample in (codecs.BOM_UTF32_LE, codecs.BOM32_BE):
return 'utf-32' # BOM included
if sample[:3] == codecs.BOM_UTF8:
return 'utf-8-sig' # BOM included, MS style (discouraged)
if sample[:2] in (codecs.BOM_UTF16_LE, codecs.BOM_UTF16_BE):
return 'utf-16' # BOM included
nullcount = sample.count(_null)
if nullcount == 0:
return 'utf-8'
if nullcount == 2:
if sample[::2] == _null2: # 1st and 3rd are null
return 'utf-16-be'
if sample[1::2] == _null2: # 2nd and 4th are null
return 'utf-16-le'
# Did not detect 2 valid UTF-16 ascii-range characters
if nullcount == 3:
if sample[:3] == _null3:
return 'utf-32-be'
if sample[1:] == _null3:
return 'utf-32-le'
# Did not detect a valid UTF-32 ascii-range character
return None
def prepend_scheme_if_needed(url, new_scheme):
"""Given a URL that may or may not have a scheme, prepend the given scheme.
Does not replace a present scheme with the one provided as an argument.
:rtype: str
"""
scheme, netloc, path, params, query, fragment = urlparse(url, new_scheme)
# urlparse is a finicky beast, and sometimes decides that there isn't a
# netloc present. Assume that it's being over-cautious, and switch netloc
# and path if urlparse decided there was no netloc.
if not netloc:
netloc, path = path, netloc
return urlunparse((scheme, netloc, path, params, query, fragment))
def get_auth_from_url(url):
"""Given a url with authentication components, extract them into a tuple of
username,password.
:rtype: (str,str)
"""
parsed = urlparse(url)
try:
auth = (unquote(parsed.username), unquote(parsed.password))
except (AttributeError, TypeError):
auth = ('', '')
return auth
def to_native_string(string, encoding='ascii'):
"""Given a string object, regardless of type, returns a representation of
that string in the native string type, encoding and decoding where
necessary. This assumes ASCII unless told otherwise.
"""
if isinstance(string, builtin_str):
out = string
else:
if is_py2:
out = string.encode(encoding)
else:
out = string.decode(encoding)
return out
# Moved outside of function to avoid recompile every call
_CLEAN_HEADER_REGEX_BYTE = re.compile(b'^\\S[^\\r\\n]*$|^$')
_CLEAN_HEADER_REGEX_STR = re.compile(r'^\S[^\r\n]*$|^$')
def check_header_validity(header):
"""Verifies that header value is a string which doesn't contain
leading whitespace or return characters. This prevents unintended
header injection.
:param header: tuple, in the format (name, value).
"""
name, value = header
if isinstance(value, bytes):
pat = _CLEAN_HEADER_REGEX_BYTE
else:
pat = _CLEAN_HEADER_REGEX_STR
try:
if not pat.match(value):
raise InvalidHeader("Invalid return character or leading space in header: %s" % name)
except TypeError:
raise InvalidHeader("Header value %s must be of type str or bytes, "
"not %s" % (value, type(value)))
def urldefragauth(url):
"""
Given a url remove the fragment and the authentication part.
:rtype: str
"""
scheme, netloc, path, params, query, fragment = urlparse(url)
# see func:`prepend_scheme_if_needed`
if not netloc:
netloc, path = path, netloc
netloc = netloc.rsplit('@', 1)[-1]
return urlunparse((scheme, netloc, path, params, query, ''))
| apache-2.0 |
CCI-MOC/GUI-Backend | scripts/import_tags.py | 1 | 2440 | #!/usr/bin/env python
import json
import logging
from optparse import OptionParser
from service_old.models import Instance
import django
django.setup()
def export_instance_tags():
instance_tags = []
instances = Instance.objects.all()
added = 0
for i in instances:
if i.instance_tags:
tag_json = []
tag_list = i.instance_tags.split(',')
for tag in tag_list:
tag_json.append({'name': tag, 'description': ''})
instance_tags.append({'instance': i.instance_id, 'tags': tag_json})
added = added + 1
logging.info('%s records exported' % added)
return json.dumps(instance_tags)
def import_instance_tags(instance_tags_json):
instance_tags = json.loads(instance_tags_json)
added = 0
skipped = 0
for instance_tag in instance_tags:
try:
instance = Instance.objects.get(
instance_id=instance_tag['instance'])
instance.instance_tags = ','.join(
[tag['name'] for tag in instance_tag['tags']])
instance.save()
added = added + 1
except Instance.DoesNotExist as dne:
logging.warn(
'Could not import tags for instance <%s> - DB Record does not exist' %
instance_tag['instance'])
skipped = skipped + 1
total = added + skipped
logging.info(
'%s Records imported. %s Records added, %s Records skipped' %
(total, added, skipped))
return
def main():
(options, filenames) = parser.parse_args()
if not filenames or len(filenames) == 0:
print 'Missing filename'
parser.print_help()
return 1
filename = filenames[0]
if options.export:
f = open(filename, 'w')
json_data = export_instance_tags()
f.write(json_data)
else:
f = open(filename, 'r')
json_data = f.read()
import_instance_tags(json_data)
f.close()
return
usage = "usage: %prog [command] filename"
parser = OptionParser(usage=usage)
parser.add_option(
"--import",
action="store_false",
dest="export",
help="Override the current DB with the Instance Tag JSON file provided")
parser.add_option(
"--export",
action="store_true",
dest="export",
default=True,
help="Export the current DB instance tags to empty file provided")
if __name__ == '__main__':
main()
| apache-2.0 |
scotthartbti/android_external_chromium_org | third_party/protobuf/python/mox.py | 603 | 38237 | #!/usr/bin/python2.4
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is used for testing. The original is at:
# http://code.google.com/p/pymox/
"""Mox, an object-mocking framework for Python.
Mox works in the record-replay-verify paradigm. When you first create
a mock object, it is in record mode. You then programmatically set
the expected behavior of the mock object (what methods are to be
called on it, with what parameters, what they should return, and in
what order).
Once you have set up the expected mock behavior, you put it in replay
mode. Now the mock responds to method calls just as you told it to.
If an unexpected method (or an expected method with unexpected
parameters) is called, then an exception will be raised.
Once you are done interacting with the mock, you need to verify that
all the expected interactions occured. (Maybe your code exited
prematurely without calling some cleanup method!) The verify phase
ensures that every expected method was called; otherwise, an exception
will be raised.
Suggested usage / workflow:
# Create Mox factory
my_mox = Mox()
# Create a mock data access object
mock_dao = my_mox.CreateMock(DAOClass)
# Set up expected behavior
mock_dao.RetrievePersonWithIdentifier('1').AndReturn(person)
mock_dao.DeletePerson(person)
# Put mocks in replay mode
my_mox.ReplayAll()
# Inject mock object and run test
controller.SetDao(mock_dao)
controller.DeletePersonById('1')
# Verify all methods were called as expected
my_mox.VerifyAll()
"""
from collections import deque
import re
import types
import unittest
import stubout
class Error(AssertionError):
"""Base exception for this module."""
pass
class ExpectedMethodCallsError(Error):
"""Raised when Verify() is called before all expected methods have been called
"""
def __init__(self, expected_methods):
"""Init exception.
Args:
# expected_methods: A sequence of MockMethod objects that should have been
# called.
expected_methods: [MockMethod]
Raises:
ValueError: if expected_methods contains no methods.
"""
if not expected_methods:
raise ValueError("There must be at least one expected method")
Error.__init__(self)
self._expected_methods = expected_methods
def __str__(self):
calls = "\n".join(["%3d. %s" % (i, m)
for i, m in enumerate(self._expected_methods)])
return "Verify: Expected methods never called:\n%s" % (calls,)
class UnexpectedMethodCallError(Error):
"""Raised when an unexpected method is called.
This can occur if a method is called with incorrect parameters, or out of the
specified order.
"""
def __init__(self, unexpected_method, expected):
"""Init exception.
Args:
# unexpected_method: MockMethod that was called but was not at the head of
# the expected_method queue.
# expected: MockMethod or UnorderedGroup the method should have
# been in.
unexpected_method: MockMethod
expected: MockMethod or UnorderedGroup
"""
Error.__init__(self)
self._unexpected_method = unexpected_method
self._expected = expected
def __str__(self):
return "Unexpected method call: %s. Expecting: %s" % \
(self._unexpected_method, self._expected)
class UnknownMethodCallError(Error):
"""Raised if an unknown method is requested of the mock object."""
def __init__(self, unknown_method_name):
"""Init exception.
Args:
# unknown_method_name: Method call that is not part of the mocked class's
# public interface.
unknown_method_name: str
"""
Error.__init__(self)
self._unknown_method_name = unknown_method_name
def __str__(self):
return "Method called is not a member of the object: %s" % \
self._unknown_method_name
class Mox(object):
"""Mox: a factory for creating mock objects."""
# A list of types that should be stubbed out with MockObjects (as
# opposed to MockAnythings).
_USE_MOCK_OBJECT = [types.ClassType, types.InstanceType, types.ModuleType,
types.ObjectType, types.TypeType]
def __init__(self):
"""Initialize a new Mox."""
self._mock_objects = []
self.stubs = stubout.StubOutForTesting()
def CreateMock(self, class_to_mock):
"""Create a new mock object.
Args:
# class_to_mock: the class to be mocked
class_to_mock: class
Returns:
MockObject that can be used as the class_to_mock would be.
"""
new_mock = MockObject(class_to_mock)
self._mock_objects.append(new_mock)
return new_mock
def CreateMockAnything(self):
"""Create a mock that will accept any method calls.
This does not enforce an interface.
"""
new_mock = MockAnything()
self._mock_objects.append(new_mock)
return new_mock
def ReplayAll(self):
"""Set all mock objects to replay mode."""
for mock_obj in self._mock_objects:
mock_obj._Replay()
def VerifyAll(self):
"""Call verify on all mock objects created."""
for mock_obj in self._mock_objects:
mock_obj._Verify()
def ResetAll(self):
"""Call reset on all mock objects. This does not unset stubs."""
for mock_obj in self._mock_objects:
mock_obj._Reset()
def StubOutWithMock(self, obj, attr_name, use_mock_anything=False):
"""Replace a method, attribute, etc. with a Mock.
This will replace a class or module with a MockObject, and everything else
(method, function, etc) with a MockAnything. This can be overridden to
always use a MockAnything by setting use_mock_anything to True.
Args:
obj: A Python object (class, module, instance, callable).
attr_name: str. The name of the attribute to replace with a mock.
use_mock_anything: bool. True if a MockAnything should be used regardless
of the type of attribute.
"""
attr_to_replace = getattr(obj, attr_name)
if type(attr_to_replace) in self._USE_MOCK_OBJECT and not use_mock_anything:
stub = self.CreateMock(attr_to_replace)
else:
stub = self.CreateMockAnything()
self.stubs.Set(obj, attr_name, stub)
def UnsetStubs(self):
"""Restore stubs to their original state."""
self.stubs.UnsetAll()
def Replay(*args):
"""Put mocks into Replay mode.
Args:
# args is any number of mocks to put into replay mode.
"""
for mock in args:
mock._Replay()
def Verify(*args):
"""Verify mocks.
Args:
# args is any number of mocks to be verified.
"""
for mock in args:
mock._Verify()
def Reset(*args):
"""Reset mocks.
Args:
# args is any number of mocks to be reset.
"""
for mock in args:
mock._Reset()
class MockAnything:
"""A mock that can be used to mock anything.
This is helpful for mocking classes that do not provide a public interface.
"""
def __init__(self):
""" """
self._Reset()
def __getattr__(self, method_name):
"""Intercept method calls on this object.
A new MockMethod is returned that is aware of the MockAnything's
state (record or replay). The call will be recorded or replayed
by the MockMethod's __call__.
Args:
# method name: the name of the method being called.
method_name: str
Returns:
A new MockMethod aware of MockAnything's state (record or replay).
"""
return self._CreateMockMethod(method_name)
def _CreateMockMethod(self, method_name):
"""Create a new mock method call and return it.
Args:
# method name: the name of the method being called.
method_name: str
Returns:
A new MockMethod aware of MockAnything's state (record or replay).
"""
return MockMethod(method_name, self._expected_calls_queue,
self._replay_mode)
def __nonzero__(self):
"""Return 1 for nonzero so the mock can be used as a conditional."""
return 1
def __eq__(self, rhs):
"""Provide custom logic to compare objects."""
return (isinstance(rhs, MockAnything) and
self._replay_mode == rhs._replay_mode and
self._expected_calls_queue == rhs._expected_calls_queue)
def __ne__(self, rhs):
"""Provide custom logic to compare objects."""
return not self == rhs
def _Replay(self):
"""Start replaying expected method calls."""
self._replay_mode = True
def _Verify(self):
"""Verify that all of the expected calls have been made.
Raises:
ExpectedMethodCallsError: if there are still more method calls in the
expected queue.
"""
# If the list of expected calls is not empty, raise an exception
if self._expected_calls_queue:
# The last MultipleTimesGroup is not popped from the queue.
if (len(self._expected_calls_queue) == 1 and
isinstance(self._expected_calls_queue[0], MultipleTimesGroup) and
self._expected_calls_queue[0].IsSatisfied()):
pass
else:
raise ExpectedMethodCallsError(self._expected_calls_queue)
def _Reset(self):
"""Reset the state of this mock to record mode with an empty queue."""
# Maintain a list of method calls we are expecting
self._expected_calls_queue = deque()
# Make sure we are in setup mode, not replay mode
self._replay_mode = False
class MockObject(MockAnything, object):
"""A mock object that simulates the public/protected interface of a class."""
def __init__(self, class_to_mock):
"""Initialize a mock object.
This determines the methods and properties of the class and stores them.
Args:
# class_to_mock: class to be mocked
class_to_mock: class
"""
# This is used to hack around the mixin/inheritance of MockAnything, which
# is not a proper object (it can be anything. :-)
MockAnything.__dict__['__init__'](self)
# Get a list of all the public and special methods we should mock.
self._known_methods = set()
self._known_vars = set()
self._class_to_mock = class_to_mock
for method in dir(class_to_mock):
if callable(getattr(class_to_mock, method)):
self._known_methods.add(method)
else:
self._known_vars.add(method)
def __getattr__(self, name):
"""Intercept attribute request on this object.
If the attribute is a public class variable, it will be returned and not
recorded as a call.
If the attribute is not a variable, it is handled like a method
call. The method name is checked against the set of mockable
methods, and a new MockMethod is returned that is aware of the
MockObject's state (record or replay). The call will be recorded
or replayed by the MockMethod's __call__.
Args:
# name: the name of the attribute being requested.
name: str
Returns:
Either a class variable or a new MockMethod that is aware of the state
of the mock (record or replay).
Raises:
UnknownMethodCallError if the MockObject does not mock the requested
method.
"""
if name in self._known_vars:
return getattr(self._class_to_mock, name)
if name in self._known_methods:
return self._CreateMockMethod(name)
raise UnknownMethodCallError(name)
def __eq__(self, rhs):
"""Provide custom logic to compare objects."""
return (isinstance(rhs, MockObject) and
self._class_to_mock == rhs._class_to_mock and
self._replay_mode == rhs._replay_mode and
self._expected_calls_queue == rhs._expected_calls_queue)
def __setitem__(self, key, value):
"""Provide custom logic for mocking classes that support item assignment.
Args:
key: Key to set the value for.
value: Value to set.
Returns:
Expected return value in replay mode. A MockMethod object for the
__setitem__ method that has already been called if not in replay mode.
Raises:
TypeError if the underlying class does not support item assignment.
UnexpectedMethodCallError if the object does not expect the call to
__setitem__.
"""
setitem = self._class_to_mock.__dict__.get('__setitem__', None)
# Verify the class supports item assignment.
if setitem is None:
raise TypeError('object does not support item assignment')
# If we are in replay mode then simply call the mock __setitem__ method.
if self._replay_mode:
return MockMethod('__setitem__', self._expected_calls_queue,
self._replay_mode)(key, value)
# Otherwise, create a mock method __setitem__.
return self._CreateMockMethod('__setitem__')(key, value)
def __getitem__(self, key):
"""Provide custom logic for mocking classes that are subscriptable.
Args:
key: Key to return the value for.
Returns:
Expected return value in replay mode. A MockMethod object for the
__getitem__ method that has already been called if not in replay mode.
Raises:
TypeError if the underlying class is not subscriptable.
UnexpectedMethodCallError if the object does not expect the call to
__setitem__.
"""
getitem = self._class_to_mock.__dict__.get('__getitem__', None)
# Verify the class supports item assignment.
if getitem is None:
raise TypeError('unsubscriptable object')
# If we are in replay mode then simply call the mock __getitem__ method.
if self._replay_mode:
return MockMethod('__getitem__', self._expected_calls_queue,
self._replay_mode)(key)
# Otherwise, create a mock method __getitem__.
return self._CreateMockMethod('__getitem__')(key)
def __call__(self, *params, **named_params):
"""Provide custom logic for mocking classes that are callable."""
# Verify the class we are mocking is callable
callable = self._class_to_mock.__dict__.get('__call__', None)
if callable is None:
raise TypeError('Not callable')
# Because the call is happening directly on this object instead of a method,
# the call on the mock method is made right here
mock_method = self._CreateMockMethod('__call__')
return mock_method(*params, **named_params)
@property
def __class__(self):
"""Return the class that is being mocked."""
return self._class_to_mock
class MockMethod(object):
"""Callable mock method.
A MockMethod should act exactly like the method it mocks, accepting parameters
and returning a value, or throwing an exception (as specified). When this
method is called, it can optionally verify whether the called method (name and
signature) matches the expected method.
"""
def __init__(self, method_name, call_queue, replay_mode):
"""Construct a new mock method.
Args:
# method_name: the name of the method
# call_queue: deque of calls, verify this call against the head, or add
# this call to the queue.
# replay_mode: False if we are recording, True if we are verifying calls
# against the call queue.
method_name: str
call_queue: list or deque
replay_mode: bool
"""
self._name = method_name
self._call_queue = call_queue
if not isinstance(call_queue, deque):
self._call_queue = deque(self._call_queue)
self._replay_mode = replay_mode
self._params = None
self._named_params = None
self._return_value = None
self._exception = None
self._side_effects = None
def __call__(self, *params, **named_params):
"""Log parameters and return the specified return value.
If the Mock(Anything/Object) associated with this call is in record mode,
this MockMethod will be pushed onto the expected call queue. If the mock
is in replay mode, this will pop a MockMethod off the top of the queue and
verify this call is equal to the expected call.
Raises:
UnexpectedMethodCall if this call is supposed to match an expected method
call and it does not.
"""
self._params = params
self._named_params = named_params
if not self._replay_mode:
self._call_queue.append(self)
return self
expected_method = self._VerifyMethodCall()
if expected_method._side_effects:
expected_method._side_effects(*params, **named_params)
if expected_method._exception:
raise expected_method._exception
return expected_method._return_value
def __getattr__(self, name):
"""Raise an AttributeError with a helpful message."""
raise AttributeError('MockMethod has no attribute "%s". '
'Did you remember to put your mocks in replay mode?' % name)
def _PopNextMethod(self):
"""Pop the next method from our call queue."""
try:
return self._call_queue.popleft()
except IndexError:
raise UnexpectedMethodCallError(self, None)
def _VerifyMethodCall(self):
"""Verify the called method is expected.
This can be an ordered method, or part of an unordered set.
Returns:
The expected mock method.
Raises:
UnexpectedMethodCall if the method called was not expected.
"""
expected = self._PopNextMethod()
# Loop here, because we might have a MethodGroup followed by another
# group.
while isinstance(expected, MethodGroup):
expected, method = expected.MethodCalled(self)
if method is not None:
return method
# This is a mock method, so just check equality.
if expected != self:
raise UnexpectedMethodCallError(self, expected)
return expected
def __str__(self):
params = ', '.join(
[repr(p) for p in self._params or []] +
['%s=%r' % x for x in sorted((self._named_params or {}).items())])
desc = "%s(%s) -> %r" % (self._name, params, self._return_value)
return desc
def __eq__(self, rhs):
"""Test whether this MockMethod is equivalent to another MockMethod.
Args:
# rhs: the right hand side of the test
rhs: MockMethod
"""
return (isinstance(rhs, MockMethod) and
self._name == rhs._name and
self._params == rhs._params and
self._named_params == rhs._named_params)
def __ne__(self, rhs):
"""Test whether this MockMethod is not equivalent to another MockMethod.
Args:
# rhs: the right hand side of the test
rhs: MockMethod
"""
return not self == rhs
def GetPossibleGroup(self):
"""Returns a possible group from the end of the call queue or None if no
other methods are on the stack.
"""
# Remove this method from the tail of the queue so we can add it to a group.
this_method = self._call_queue.pop()
assert this_method == self
# Determine if the tail of the queue is a group, or just a regular ordered
# mock method.
group = None
try:
group = self._call_queue[-1]
except IndexError:
pass
return group
def _CheckAndCreateNewGroup(self, group_name, group_class):
"""Checks if the last method (a possible group) is an instance of our
group_class. Adds the current method to this group or creates a new one.
Args:
group_name: the name of the group.
group_class: the class used to create instance of this new group
"""
group = self.GetPossibleGroup()
# If this is a group, and it is the correct group, add the method.
if isinstance(group, group_class) and group.group_name() == group_name:
group.AddMethod(self)
return self
# Create a new group and add the method.
new_group = group_class(group_name)
new_group.AddMethod(self)
self._call_queue.append(new_group)
return self
def InAnyOrder(self, group_name="default"):
"""Move this method into a group of unordered calls.
A group of unordered calls must be defined together, and must be executed
in full before the next expected method can be called. There can be
multiple groups that are expected serially, if they are given
different group names. The same group name can be reused if there is a
standard method call, or a group with a different name, spliced between
usages.
Args:
group_name: the name of the unordered group.
Returns:
self
"""
return self._CheckAndCreateNewGroup(group_name, UnorderedGroup)
def MultipleTimes(self, group_name="default"):
"""Move this method into group of calls which may be called multiple times.
A group of repeating calls must be defined together, and must be executed in
full before the next expected mehtod can be called.
Args:
group_name: the name of the unordered group.
Returns:
self
"""
return self._CheckAndCreateNewGroup(group_name, MultipleTimesGroup)
def AndReturn(self, return_value):
"""Set the value to return when this method is called.
Args:
# return_value can be anything.
"""
self._return_value = return_value
return return_value
def AndRaise(self, exception):
"""Set the exception to raise when this method is called.
Args:
# exception: the exception to raise when this method is called.
exception: Exception
"""
self._exception = exception
def WithSideEffects(self, side_effects):
"""Set the side effects that are simulated when this method is called.
Args:
side_effects: A callable which modifies the parameters or other relevant
state which a given test case depends on.
Returns:
Self for chaining with AndReturn and AndRaise.
"""
self._side_effects = side_effects
return self
class Comparator:
"""Base class for all Mox comparators.
A Comparator can be used as a parameter to a mocked method when the exact
value is not known. For example, the code you are testing might build up a
long SQL string that is passed to your mock DAO. You're only interested that
the IN clause contains the proper primary keys, so you can set your mock
up as follows:
mock_dao.RunQuery(StrContains('IN (1, 2, 4, 5)')).AndReturn(mock_result)
Now whatever query is passed in must contain the string 'IN (1, 2, 4, 5)'.
A Comparator may replace one or more parameters, for example:
# return at most 10 rows
mock_dao.RunQuery(StrContains('SELECT'), 10)
or
# Return some non-deterministic number of rows
mock_dao.RunQuery(StrContains('SELECT'), IsA(int))
"""
def equals(self, rhs):
"""Special equals method that all comparators must implement.
Args:
rhs: any python object
"""
raise NotImplementedError, 'method must be implemented by a subclass.'
def __eq__(self, rhs):
return self.equals(rhs)
def __ne__(self, rhs):
return not self.equals(rhs)
class IsA(Comparator):
"""This class wraps a basic Python type or class. It is used to verify
that a parameter is of the given type or class.
Example:
mock_dao.Connect(IsA(DbConnectInfo))
"""
def __init__(self, class_name):
"""Initialize IsA
Args:
class_name: basic python type or a class
"""
self._class_name = class_name
def equals(self, rhs):
"""Check to see if the RHS is an instance of class_name.
Args:
# rhs: the right hand side of the test
rhs: object
Returns:
bool
"""
try:
return isinstance(rhs, self._class_name)
except TypeError:
# Check raw types if there was a type error. This is helpful for
# things like cStringIO.StringIO.
return type(rhs) == type(self._class_name)
def __repr__(self):
return str(self._class_name)
class IsAlmost(Comparator):
"""Comparison class used to check whether a parameter is nearly equal
to a given value. Generally useful for floating point numbers.
Example mock_dao.SetTimeout((IsAlmost(3.9)))
"""
def __init__(self, float_value, places=7):
"""Initialize IsAlmost.
Args:
float_value: The value for making the comparison.
places: The number of decimal places to round to.
"""
self._float_value = float_value
self._places = places
def equals(self, rhs):
"""Check to see if RHS is almost equal to float_value
Args:
rhs: the value to compare to float_value
Returns:
bool
"""
try:
return round(rhs-self._float_value, self._places) == 0
except TypeError:
# This is probably because either float_value or rhs is not a number.
return False
def __repr__(self):
return str(self._float_value)
class StrContains(Comparator):
"""Comparison class used to check whether a substring exists in a
string parameter. This can be useful in mocking a database with SQL
passed in as a string parameter, for example.
Example:
mock_dao.RunQuery(StrContains('IN (1, 2, 4, 5)')).AndReturn(mock_result)
"""
def __init__(self, search_string):
"""Initialize.
Args:
# search_string: the string you are searching for
search_string: str
"""
self._search_string = search_string
def equals(self, rhs):
"""Check to see if the search_string is contained in the rhs string.
Args:
# rhs: the right hand side of the test
rhs: object
Returns:
bool
"""
try:
return rhs.find(self._search_string) > -1
except Exception:
return False
def __repr__(self):
return '<str containing \'%s\'>' % self._search_string
class Regex(Comparator):
"""Checks if a string matches a regular expression.
This uses a given regular expression to determine equality.
"""
def __init__(self, pattern, flags=0):
"""Initialize.
Args:
# pattern is the regular expression to search for
pattern: str
# flags passed to re.compile function as the second argument
flags: int
"""
self.regex = re.compile(pattern, flags=flags)
def equals(self, rhs):
"""Check to see if rhs matches regular expression pattern.
Returns:
bool
"""
return self.regex.search(rhs) is not None
def __repr__(self):
s = '<regular expression \'%s\'' % self.regex.pattern
if self.regex.flags:
s += ', flags=%d' % self.regex.flags
s += '>'
return s
class In(Comparator):
"""Checks whether an item (or key) is in a list (or dict) parameter.
Example:
mock_dao.GetUsersInfo(In('expectedUserName')).AndReturn(mock_result)
"""
def __init__(self, key):
"""Initialize.
Args:
# key is any thing that could be in a list or a key in a dict
"""
self._key = key
def equals(self, rhs):
"""Check to see whether key is in rhs.
Args:
rhs: dict
Returns:
bool
"""
return self._key in rhs
def __repr__(self):
return '<sequence or map containing \'%s\'>' % self._key
class ContainsKeyValue(Comparator):
"""Checks whether a key/value pair is in a dict parameter.
Example:
mock_dao.UpdateUsers(ContainsKeyValue('stevepm', stevepm_user_info))
"""
def __init__(self, key, value):
"""Initialize.
Args:
# key: a key in a dict
# value: the corresponding value
"""
self._key = key
self._value = value
def equals(self, rhs):
"""Check whether the given key/value pair is in the rhs dict.
Returns:
bool
"""
try:
return rhs[self._key] == self._value
except Exception:
return False
def __repr__(self):
return '<map containing the entry \'%s: %s\'>' % (self._key, self._value)
class SameElementsAs(Comparator):
"""Checks whether iterables contain the same elements (ignoring order).
Example:
mock_dao.ProcessUsers(SameElementsAs('stevepm', 'salomaki'))
"""
def __init__(self, expected_seq):
"""Initialize.
Args:
expected_seq: a sequence
"""
self._expected_seq = expected_seq
def equals(self, actual_seq):
"""Check to see whether actual_seq has same elements as expected_seq.
Args:
actual_seq: sequence
Returns:
bool
"""
try:
expected = dict([(element, None) for element in self._expected_seq])
actual = dict([(element, None) for element in actual_seq])
except TypeError:
# Fall back to slower list-compare if any of the objects are unhashable.
expected = list(self._expected_seq)
actual = list(actual_seq)
expected.sort()
actual.sort()
return expected == actual
def __repr__(self):
return '<sequence with same elements as \'%s\'>' % self._expected_seq
class And(Comparator):
"""Evaluates one or more Comparators on RHS and returns an AND of the results.
"""
def __init__(self, *args):
"""Initialize.
Args:
*args: One or more Comparator
"""
self._comparators = args
def equals(self, rhs):
"""Checks whether all Comparators are equal to rhs.
Args:
# rhs: can be anything
Returns:
bool
"""
for comparator in self._comparators:
if not comparator.equals(rhs):
return False
return True
def __repr__(self):
return '<AND %s>' % str(self._comparators)
class Or(Comparator):
"""Evaluates one or more Comparators on RHS and returns an OR of the results.
"""
def __init__(self, *args):
"""Initialize.
Args:
*args: One or more Mox comparators
"""
self._comparators = args
def equals(self, rhs):
"""Checks whether any Comparator is equal to rhs.
Args:
# rhs: can be anything
Returns:
bool
"""
for comparator in self._comparators:
if comparator.equals(rhs):
return True
return False
def __repr__(self):
return '<OR %s>' % str(self._comparators)
class Func(Comparator):
"""Call a function that should verify the parameter passed in is correct.
You may need the ability to perform more advanced operations on the parameter
in order to validate it. You can use this to have a callable validate any
parameter. The callable should return either True or False.
Example:
def myParamValidator(param):
# Advanced logic here
return True
mock_dao.DoSomething(Func(myParamValidator), true)
"""
def __init__(self, func):
"""Initialize.
Args:
func: callable that takes one parameter and returns a bool
"""
self._func = func
def equals(self, rhs):
"""Test whether rhs passes the function test.
rhs is passed into func.
Args:
rhs: any python object
Returns:
the result of func(rhs)
"""
return self._func(rhs)
def __repr__(self):
return str(self._func)
class IgnoreArg(Comparator):
"""Ignore an argument.
This can be used when we don't care about an argument of a method call.
Example:
# Check if CastMagic is called with 3 as first arg and 'disappear' as third.
mymock.CastMagic(3, IgnoreArg(), 'disappear')
"""
def equals(self, unused_rhs):
"""Ignores arguments and returns True.
Args:
unused_rhs: any python object
Returns:
always returns True
"""
return True
def __repr__(self):
return '<IgnoreArg>'
class MethodGroup(object):
"""Base class containing common behaviour for MethodGroups."""
def __init__(self, group_name):
self._group_name = group_name
def group_name(self):
return self._group_name
def __str__(self):
return '<%s "%s">' % (self.__class__.__name__, self._group_name)
def AddMethod(self, mock_method):
raise NotImplementedError
def MethodCalled(self, mock_method):
raise NotImplementedError
def IsSatisfied(self):
raise NotImplementedError
class UnorderedGroup(MethodGroup):
"""UnorderedGroup holds a set of method calls that may occur in any order.
This construct is helpful for non-deterministic events, such as iterating
over the keys of a dict.
"""
def __init__(self, group_name):
super(UnorderedGroup, self).__init__(group_name)
self._methods = []
def AddMethod(self, mock_method):
"""Add a method to this group.
Args:
mock_method: A mock method to be added to this group.
"""
self._methods.append(mock_method)
def MethodCalled(self, mock_method):
"""Remove a method call from the group.
If the method is not in the set, an UnexpectedMethodCallError will be
raised.
Args:
mock_method: a mock method that should be equal to a method in the group.
Returns:
The mock method from the group
Raises:
UnexpectedMethodCallError if the mock_method was not in the group.
"""
# Check to see if this method exists, and if so, remove it from the set
# and return it.
for method in self._methods:
if method == mock_method:
# Remove the called mock_method instead of the method in the group.
# The called method will match any comparators when equality is checked
# during removal. The method in the group could pass a comparator to
# another comparator during the equality check.
self._methods.remove(mock_method)
# If this group is not empty, put it back at the head of the queue.
if not self.IsSatisfied():
mock_method._call_queue.appendleft(self)
return self, method
raise UnexpectedMethodCallError(mock_method, self)
def IsSatisfied(self):
"""Return True if there are not any methods in this group."""
return len(self._methods) == 0
class MultipleTimesGroup(MethodGroup):
"""MultipleTimesGroup holds methods that may be called any number of times.
Note: Each method must be called at least once.
This is helpful, if you don't know or care how many times a method is called.
"""
def __init__(self, group_name):
super(MultipleTimesGroup, self).__init__(group_name)
self._methods = set()
self._methods_called = set()
def AddMethod(self, mock_method):
"""Add a method to this group.
Args:
mock_method: A mock method to be added to this group.
"""
self._methods.add(mock_method)
def MethodCalled(self, mock_method):
"""Remove a method call from the group.
If the method is not in the set, an UnexpectedMethodCallError will be
raised.
Args:
mock_method: a mock method that should be equal to a method in the group.
Returns:
The mock method from the group
Raises:
UnexpectedMethodCallError if the mock_method was not in the group.
"""
# Check to see if this method exists, and if so add it to the set of
# called methods.
for method in self._methods:
if method == mock_method:
self._methods_called.add(mock_method)
# Always put this group back on top of the queue, because we don't know
# when we are done.
mock_method._call_queue.appendleft(self)
return self, method
if self.IsSatisfied():
next_method = mock_method._PopNextMethod();
return next_method, None
else:
raise UnexpectedMethodCallError(mock_method, self)
def IsSatisfied(self):
"""Return True if all methods in this group are called at least once."""
# NOTE(psycho): We can't use the simple set difference here because we want
# to match different parameters which are considered the same e.g. IsA(str)
# and some string. This solution is O(n^2) but n should be small.
tmp = self._methods.copy()
for called in self._methods_called:
for expected in tmp:
if called == expected:
tmp.remove(expected)
if not tmp:
return True
break
return False
class MoxMetaTestBase(type):
"""Metaclass to add mox cleanup and verification to every test.
As the mox unit testing class is being constructed (MoxTestBase or a
subclass), this metaclass will modify all test functions to call the
CleanUpMox method of the test class after they finish. This means that
unstubbing and verifying will happen for every test with no additional code,
and any failures will result in test failures as opposed to errors.
"""
def __init__(cls, name, bases, d):
type.__init__(cls, name, bases, d)
# also get all the attributes from the base classes to account
# for a case when test class is not the immediate child of MoxTestBase
for base in bases:
for attr_name in dir(base):
d[attr_name] = getattr(base, attr_name)
for func_name, func in d.items():
if func_name.startswith('test') and callable(func):
setattr(cls, func_name, MoxMetaTestBase.CleanUpTest(cls, func))
@staticmethod
def CleanUpTest(cls, func):
"""Adds Mox cleanup code to any MoxTestBase method.
Always unsets stubs after a test. Will verify all mocks for tests that
otherwise pass.
Args:
cls: MoxTestBase or subclass; the class whose test method we are altering.
func: method; the method of the MoxTestBase test class we wish to alter.
Returns:
The modified method.
"""
def new_method(self, *args, **kwargs):
mox_obj = getattr(self, 'mox', None)
cleanup_mox = False
if mox_obj and isinstance(mox_obj, Mox):
cleanup_mox = True
try:
func(self, *args, **kwargs)
finally:
if cleanup_mox:
mox_obj.UnsetStubs()
if cleanup_mox:
mox_obj.VerifyAll()
new_method.__name__ = func.__name__
new_method.__doc__ = func.__doc__
new_method.__module__ = func.__module__
return new_method
class MoxTestBase(unittest.TestCase):
"""Convenience test class to make stubbing easier.
Sets up a "mox" attribute which is an instance of Mox - any mox tests will
want this. Also automatically unsets any stubs and verifies that all mock
methods have been called at the end of each test, eliminating boilerplate
code.
"""
__metaclass__ = MoxMetaTestBase
def setUp(self):
self.mox = Mox()
| bsd-3-clause |
forpster/dotfiles | home/.emacs.d/.python-environments/default/lib/python2.7/site-packages/pip/_vendor/requests/packages/urllib3/packages/ordered_dict.py | 2040 | 8935 | # Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy.
# Passes Python2.7's test suite and incorporates all the latest updates.
# Copyright 2009 Raymond Hettinger, released under the MIT License.
# http://code.activestate.com/recipes/576693/
try:
from thread import get_ident as _get_ident
except ImportError:
from dummy_thread import get_ident as _get_ident
try:
from _abcoll import KeysView, ValuesView, ItemsView
except ImportError:
pass
class OrderedDict(dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as for regular dictionaries.
# The internal self.__map dictionary maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# Each link is stored as a list of length three: [PREV, NEXT, KEY].
def __init__(self, *args, **kwds):
'''Initialize an ordered dictionary. Signature is the same as for
regular dictionaries, but keyword arguments are not recommended
because their insertion order is arbitrary.
'''
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__root = root = [] # sentinel node
root[:] = [root, root, None]
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link which goes at the end of the linked
# list, and the inherited dictionary is updated with the new key/value pair.
if key not in self:
root = self.__root
last = root[0]
last[1] = root[0] = self.__map[key] = [last, root, key]
dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which is
# then removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link_prev, link_next, key = self.__map.pop(key)
link_prev[1] = link_next
link_next[0] = link_prev
def __iter__(self):
'od.__iter__() <==> iter(od)'
root = self.__root
curr = root[1]
while curr is not root:
yield curr[2]
curr = curr[1]
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
root = self.__root
curr = root[0]
while curr is not root:
yield curr[2]
curr = curr[0]
def clear(self):
'od.clear() -> None. Remove all items from od.'
try:
for node in self.__map.itervalues():
del node[:]
root = self.__root
root[:] = [root, root, None]
self.__map.clear()
except AttributeError:
pass
dict.clear(self)
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
root = self.__root
if last:
link = root[0]
link_prev = link[0]
link_prev[1] = root
root[0] = link_prev
else:
link = root[1]
link_next = link[1]
root[1] = link_next
link_next[0] = root
key = link[2]
del self.__map[key]
value = dict.pop(self, key)
return key, value
# -- the following methods do not depend on the internal structure --
def keys(self):
'od.keys() -> list of keys in od'
return list(self)
def values(self):
'od.values() -> list of values in od'
return [self[key] for key in self]
def items(self):
'od.items() -> list of (key, value) pairs in od'
return [(key, self[key]) for key in self]
def iterkeys(self):
'od.iterkeys() -> an iterator over the keys in od'
return iter(self)
def itervalues(self):
'od.itervalues -> an iterator over the values in od'
for k in self:
yield self[k]
def iteritems(self):
'od.iteritems -> an iterator over the (key, value) items in od'
for k in self:
yield (k, self[k])
def update(*args, **kwds):
'''od.update(E, **F) -> None. Update od from dict/iterable E and F.
If E is a dict instance, does: for k in E: od[k] = E[k]
If E has a .keys() method, does: for k in E.keys(): od[k] = E[k]
Or if E is an iterable of items, does: for k, v in E: od[k] = v
In either case, this is followed by: for k, v in F.items(): od[k] = v
'''
if len(args) > 2:
raise TypeError('update() takes at most 2 positional '
'arguments (%d given)' % (len(args),))
elif not args:
raise TypeError('update() takes at least 1 argument (0 given)')
self = args[0]
# Make progressively weaker assumptions about "other"
other = ()
if len(args) == 2:
other = args[1]
if isinstance(other, dict):
for key in other:
self[key] = other[key]
elif hasattr(other, 'keys'):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
__update = update # let subclasses override update without breaking __init__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
def __repr__(self, _repr_running={}):
'od.__repr__() <==> repr(od)'
call_key = id(self), _get_ident()
if call_key in _repr_running:
return '...'
_repr_running[call_key] = 1
try:
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
finally:
del _repr_running[call_key]
def __reduce__(self):
'Return state information for pickling'
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
and values equal to v (which defaults to None).
'''
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return len(self)==len(other) and self.items() == other.items()
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
# -- the following methods are only used in Python 2.7 --
def viewkeys(self):
"od.viewkeys() -> a set-like object providing a view on od's keys"
return KeysView(self)
def viewvalues(self):
"od.viewvalues() -> an object providing a view on od's values"
return ValuesView(self)
def viewitems(self):
"od.viewitems() -> a set-like object providing a view on od's items"
return ItemsView(self)
| mit |
hiunnhue/libchewing | scripts/create_keystroke_from_text.py | 15 | 1441 | #!/usr/bin/env python3
import argparse
import os
import sys
def get_args():
parser = argparse.ArgumentParser(
description='Create keystroke from text.')
parser.add_argument('input', metavar='input', type=str, nargs=1,
help='Input Chinese text file')
parser.add_argument('output', metavar='output', type=str, nargs=1,
help='Output keystroke file')
parser.add_argument('phone', metavar='phone', type=str, nargs='?',
default=os.path.join(os.path.dirname(sys.argv[0]), '..', 'data', 'phone.cin'),
help='phone.cin')
return parser.parse_args()
def read_phone(phone):
phone_table = {}
with open(phone) as f:
for l in f:
if l.startswith('%chardef begin'):
break
for l in f:
if l.startswith('%chardef end'):
break
item = l.split()
phone_table[item[1]] = item[0]
return phone_table
def main():
args = get_args()
phone_table = read_phone(args.phone)
in_file = args.input[0]
out_file = args.output[0]
word_count = 0
with open(in_file) as in_, open(out_file, "w") as out_:
for l in in_:
for c in l:
if c in phone_table:
out_.write(phone_table[c])
word_count += 1
out_.write('\n')
print('word count = {}'.format(word_count))
if __name__ == '__main__':
main()
| lgpl-2.1 |
ChanChiChoi/scikit-learn | sklearn/manifold/tests/test_mds.py | 324 | 1862 | import numpy as np
from numpy.testing import assert_array_almost_equal
from nose.tools import assert_raises
from sklearn.manifold import mds
def test_smacof():
# test metric smacof using the data of "Modern Multidimensional Scaling",
# Borg & Groenen, p 154
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
Z = np.array([[-.266, -.539],
[.451, .252],
[.016, -.238],
[-.200, .524]])
X, _ = mds.smacof(sim, init=Z, n_components=2, max_iter=1, n_init=1)
X_true = np.array([[-1.415, -2.471],
[1.633, 1.107],
[.249, -.067],
[-.468, 1.431]])
assert_array_almost_equal(X, X_true, decimal=3)
def test_smacof_error():
# Not symmetric similarity matrix:
sim = np.array([[0, 5, 9, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
assert_raises(ValueError, mds.smacof, sim)
# Not squared similarity matrix:
sim = np.array([[0, 5, 9, 4],
[5, 0, 2, 2],
[4, 2, 1, 0]])
assert_raises(ValueError, mds.smacof, sim)
# init not None and not correct format:
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
Z = np.array([[-.266, -.539],
[.016, -.238],
[-.200, .524]])
assert_raises(ValueError, mds.smacof, sim, init=Z, n_init=1)
def test_MDS():
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
mds_clf = mds.MDS(metric=False, n_jobs=3, dissimilarity="precomputed")
mds_clf.fit(sim)
| bsd-3-clause |
UBERMALLOW/external_skia | tools/test_gpuveto.py | 142 | 5357 | #!/usr/bin/env python
# Copyright 2014 Google Inc.
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Script to test out suitableForGpuRasterization (via gpuveto)"""
import argparse
import glob
import os
import re
import subprocess
import sys
# Set the PYTHONPATH to include the tools directory.
sys.path.append(
os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir))
import find_run_binary
def list_files(dir_or_file):
"""Returns a list of all the files from the provided argument
@param dir_or_file: either a directory or skp file
@returns a list containing the files in the directory or a single file
"""
files = []
for globbedpath in glob.iglob(dir_or_file): # useful on win32
if os.path.isdir(globbedpath):
for filename in os.listdir(globbedpath):
newpath = os.path.join(globbedpath, filename)
if os.path.isfile(newpath):
files.append(newpath)
elif os.path.isfile(globbedpath):
files.append(globbedpath)
return files
def execute_program(args):
"""Executes a process and waits for it to complete.
@param args: is passed into subprocess.Popen().
@returns a tuple of the process output (returncode, output)
"""
proc = subprocess.Popen(args, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
output, _ = proc.communicate()
errcode = proc.returncode
return (errcode, output)
class GpuVeto(object):
def __init__(self):
self.bench_pictures = find_run_binary.find_path_to_program(
'bench_pictures')
sys.stdout.write('Running: %s\n' % (self.bench_pictures))
self.gpuveto = find_run_binary.find_path_to_program('gpuveto')
assert os.path.isfile(self.bench_pictures)
assert os.path.isfile(self.gpuveto)
self.indeterminate = 0
self.truePositives = 0
self.falsePositives = 0
self.trueNegatives = 0
self.falseNegatives = 0
def process_skps(self, dir_or_file):
for skp in enumerate(dir_or_file):
self.process_skp(skp[1])
sys.stdout.write('TP %d FP %d TN %d FN %d IND %d\n' % (self.truePositives,
self.falsePositives,
self.trueNegatives,
self.falseNegatives,
self.indeterminate))
def process_skp(self, skp_file):
assert os.path.isfile(skp_file)
#print skp_file
# run gpuveto on the skp
args = [self.gpuveto, '-r', skp_file]
returncode, output = execute_program(args)
if (returncode != 0):
return
if ('unsuitable' in output):
suitable = False
else:
assert 'suitable' in output
suitable = True
# run raster config
args = [self.bench_pictures, '-r', skp_file,
'--repeat', '20',
'--timers', 'w',
'--config', '8888']
returncode, output = execute_program(args)
if (returncode != 0):
return
matches = re.findall('[\d]+\.[\d]+', output)
if len(matches) != 1:
return
rasterTime = float(matches[0])
# run gpu config
args2 = [self.bench_pictures, '-r', skp_file,
'--repeat', '20',
'--timers', 'w',
'--config', 'gpu']
returncode, output = execute_program(args2)
if (returncode != 0):
return
matches = re.findall('[\d]+\.[\d]+', output)
if len(matches) != 1:
return
gpuTime = float(matches[0])
# happens if page is too big it will not render
if 0 == gpuTime:
return
tolerance = 0.05
tol_range = tolerance * gpuTime
if rasterTime > gpuTime - tol_range and rasterTime < gpuTime + tol_range:
result = "NONE"
self.indeterminate += 1
elif suitable:
if gpuTime < rasterTime:
self.truePositives += 1
result = "TP"
else:
self.falsePositives += 1
result = "FP"
else:
if gpuTime < rasterTime:
self.falseNegatives += 1
result = "FN"
else:
self.trueNegatives += 1
result = "TN"
sys.stdout.write('%s: gpuveto: %d raster %.2f gpu: %.2f Result: %s\n' % (
skp_file, suitable, rasterTime, gpuTime, result))
def main(main_argv):
parser = argparse.ArgumentParser()
parser.add_argument('--skp_path',
help='Path to the SKP(s). Can either be a directory ' \
'containing SKPs or a single SKP.',
required=True)
args = parser.parse_args()
GpuVeto().process_skps(list_files(args.skp_path))
if __name__ == '__main__':
sys.exit(main(sys.argv[1]))
| bsd-3-clause |
simonjbeaumont/sm | drivers/ISCSISR.py | 1 | 28776 | #!/usr/bin/python
#
# Copyright (C) Citrix Systems Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation; version 2.1 only.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# ISCSISR: ISCSI software initiator SR driver
#
import SR, VDI, SRCommand, util
import statvfs, time, LUNperVDI
import os, socket, sys, re, glob
import xml.dom.minidom
import shutil, xmlrpclib
import scsiutil, iscsilib
import xs_errors, errno
CAPABILITIES = ["SR_PROBE","VDI_CREATE","VDI_DELETE","VDI_ATTACH",
"VDI_DETACH", "VDI_INTRODUCE"]
CONFIGURATION = [ [ 'target', 'IP address or hostname of the iSCSI target (required)' ], \
[ 'targetIQN', 'The IQN of the target LUN group to be attached (required)' ], \
[ 'chapuser', 'The username to be used during CHAP authentication (optional)' ], \
[ 'chappassword', 'The password to be used during CHAP authentication (optional)' ], \
[ 'incoming_chapuser', 'The incoming username to be used during bi-directional CHAP authentication (optional)' ], \
[ 'incoming_chappassword', 'The incoming password to be used during bi-directional CHAP authentication (optional)' ], \
[ 'port', 'The network port number on which to query the target (optional)' ], \
[ 'multihomed', 'Enable multi-homing to this target, true or false (optional, defaults to same value as host.other_config:multipathing)' ],
[ 'force_tapdisk', 'Force use of tapdisk, true or false (optional, defaults to false)'],
]
DRIVER_INFO = {
'name': 'iSCSI',
'description': 'Base ISCSI SR driver, provides a LUN-per-VDI. Does not support creation of VDIs but accesses existing LUNs on a target.',
'vendor': 'Citrix Systems Inc',
'copyright': '(C) 2008 Citrix Systems Inc',
'driver_version': '1.0',
'required_api_version': '1.0',
'capabilities': CAPABILITIES,
'configuration': CONFIGURATION
}
INITIATORNAME_FILE = '/etc/iscsi/initiatorname.iscsi'
SECTOR_SHIFT = 9
DEFAULT_PORT = 3260
# 2^16 Max port number value
MAXPORT = 65535
MAX_TIMEOUT = 15
MAX_LUNID_TIMEOUT = 60
ISCSI_PROCNAME = "iscsi_tcp"
class ISCSISR(SR.SR):
"""ISCSI storage repository"""
@property
def force_tapdisk(self):
return self.dconf.get('force_tapdisk', 'false') == 'true'
def handles(type):
if type == "iscsi":
return True
return False
handles = staticmethod(handles)
def _synchroniseAddrList(self, addrlist):
if not self.multihomed:
return
change = False
if not self.dconf.has_key('multihomelist'):
change = True
self.mlist = []
mstr = ""
else:
self.mlist = self.dconf['multihomelist'].split(',')
mstr = self.dconf['multihomelist']
for val in addrlist:
if not val in self.mlist:
self.mlist.append(val)
if len(mstr):
mstr += ","
mstr += val
change = True
if change:
pbd = None
try:
pbd = util.find_my_pbd(self.session, self.host_ref, self.sr_ref)
if pbd <> None:
device_config = self.session.xenapi.PBD.get_device_config(pbd)
device_config['multihomelist'] = mstr
self.session.xenapi.PBD.set_device_config(pbd, device_config)
except:
pass
def load(self, sr_uuid):
if self.force_tapdisk:
self.sr_vditype = 'aio'
else:
self.sr_vditype = 'phy'
self.discoverentry = 0
self.default_vdi_visibility = False
# Required parameters
if not self.dconf.has_key('target') or not self.dconf['target']:
raise xs_errors.XenError('ConfigTargetMissing')
# we are no longer putting hconf in the xml.
# Instead we pass a session and host ref and let the SM backend query XAPI itself
try:
if not self.dconf.has_key('localIQN'):
self.localIQN = self.session.xenapi.host.get_other_config(self.host_ref)['iscsi_iqn']
else:
self.localIQN = self.dconf['localIQN']
except:
raise xs_errors.XenError('ConfigISCSIIQNMissing')
# Check for empty string
if not self.localIQN:
raise xs_errors.XenError('ConfigISCSIIQNMissing')
try:
self.target = util._convertDNS(self.dconf['target'].split(',')[0])
except:
raise xs_errors.XenError('DNSError')
self.targetlist = self.target
if self.dconf.has_key('targetlist'):
self.targetlist = self.dconf['targetlist']
# Optional parameters
self.chapuser = ""
self.chappassword = ""
if self.dconf.has_key('chapuser') \
and (self.dconf.has_key('chappassword') or self.dconf.has_key('chappassword_secret')):
self.chapuser = self.dconf['chapuser']
if self.dconf.has_key('chappassword_secret'):
self.chappassword = util.get_secret(self.session, self.dconf['chappassword_secret'])
else:
self.chappassword = self.dconf['chappassword']
self.incoming_chapuser = ""
self.incoming_chappassword = ""
if self.dconf.has_key('incoming_chapuser') \
and (self.dconf.has_key('incoming_chappassword') or self.dconf.has_key('incoming_chappassword_secret')):
self.incoming_chapuser = self.dconf['incoming_chapuser']
if self.dconf.has_key('incoming_chappassword_secret'):
self.incoming_chappassword = util.get_secret(self.session, self.dconf['incoming_chappassword_secret'])
else:
self.incoming_chappassword = self.dconf['incoming_chappassword']
self.port = DEFAULT_PORT
if self.dconf.has_key('port') and self.dconf['port']:
try:
self.port = long(self.dconf['port'])
except:
raise xs_errors.XenError('ISCSIPort')
if self.port > MAXPORT or self.port < 1:
raise xs_errors.XenError('ISCSIPort')
# For backwards compatibility
if self.dconf.has_key('usediscoverynumber'):
self.discoverentry = self.dconf['usediscoverynumber']
self.multihomed = False
if self.dconf.has_key('multihomed'):
if self.dconf['multihomed'] == "true":
self.multihomed = True
elif self.mpath == 'true':
self.multihomed = True
if not self.dconf.has_key('targetIQN') or not self.dconf['targetIQN']:
self._scan_IQNs()
raise xs_errors.XenError('ConfigTargetIQNMissing')
self.targetIQN = unicode(self.dconf['targetIQN']).encode('utf-8')
self.attached = False
try:
self.attached = iscsilib._checkTGT(self.targetIQN)
except:
pass
self._initPaths()
def _initPaths(self):
self._init_adapters()
# Generate a list of all possible paths
self.pathdict = {}
addrlist = []
rec = {}
key = "%s:%d" % (self.target,self.port)
rec['ipaddr'] = self.target
rec['port'] = self.port
rec['path'] = os.path.join("/dev/iscsi",self.targetIQN,\
key)
self.pathdict[key] = rec
util.SMlog("PATHDICT: key %s: %s" % (key,rec))
self.tgtidx = key
addrlist.append(key)
self.path = rec['path']
self.address = self.tgtidx
if not self.attached:
return
if self.multihomed:
map = iscsilib.get_node_records(targetIQN=self.targetIQN)
for i in range(0,len(map)):
(portal,tpgt,iqn) = map[i]
(ipaddr, port) = iscsilib.parse_IP_port(portal)
if self.target != ipaddr:
key = "%s:%s" % (ipaddr,port)
rec = {}
rec['ipaddr'] = ipaddr
rec['port'] = long(port)
rec['path'] = os.path.join("/dev/iscsi",self.targetIQN,\
key)
self.pathdict[key] = rec
util.SMlog("PATHDICT: key %s: %s" % (key,rec))
addrlist.append(key)
# Try to detect an active path in order of priority
for key in self.pathdict:
if self.adapter.has_key(key):
self.tgtidx = key
self.path = self.pathdict[self.tgtidx]['path']
if os.path.exists(self.path):
util.SMlog("Path found: %s" % self.path)
break
self.address = self.tgtidx
self._synchroniseAddrList(addrlist)
def _init_adapters(self):
# Generate a list of active adapters
ids = scsiutil._genHostList(ISCSI_PROCNAME)
util.SMlog(ids)
self.adapter = {}
for host in ids:
try:
targetIQN = iscsilib.get_targetIQN(host)
if targetIQN != self.targetIQN:
continue
(addr, port) = iscsilib.get_targetIP_and_port(host)
entry = "%s:%s" % (addr,port)
self.adapter[entry] = host
except:
pass
self.devs = scsiutil.cacheSCSIidentifiers()
def attach(self, sr_uuid):
self._mpathHandle()
npaths=0
if not self.attached:
# Verify iSCSI target and port
if self.dconf.has_key('multihomelist') and not self.dconf.has_key('multiSession'):
targetlist = self.dconf['multihomelist'].split(',')
else:
targetlist = ['%s:%d' % (self.target,self.port)]
conn = False
for val in targetlist:
(target, port) = iscsilib.parse_IP_port(val)
try:
util._testHost(target, long(port), 'ISCSITarget')
self.target = target
self.port = long(port)
conn = True
break
except:
pass
if not conn:
raise xs_errors.XenError('ISCSITarget')
# Test and set the initiatorname file
iscsilib.ensure_daemon_running_ok(self.localIQN)
# Check to see if auto attach was set
if not iscsilib._checkTGT(self.targetIQN):
try:
map = []
if 'any' != self.targetIQN:
try:
map = iscsilib.get_node_records(self.targetIQN)
except:
# Pass the exception that is thrown, when there
# are no nodes
pass
if len(map) == 0:
map = iscsilib.discovery(self.target, self.port,
self.chapuser, self.chappassword,
self.targetIQN,
iscsilib.get_iscsi_interfaces())
if len(map) == 0:
self._scan_IQNs()
raise xs_errors.XenError('ISCSIDiscovery',
opterr='check target settings')
for i in range(0,len(map)):
(portal,tpgt,iqn) = map[i]
try:
(ipaddr, port) = iscsilib.parse_IP_port(portal)
if not self.multihomed and ipaddr != self.target:
continue
util._testHost(ipaddr, long(port), 'ISCSITarget')
util.SMlog("Logging in to [%s:%s]" % (ipaddr,port))
iscsilib.login(portal, iqn, self.chapuser,
self.chappassword,
self.incoming_chapuser,
self.incoming_chappassword,
self.mpath == "true")
npaths = npaths + 1
except Exception, e:
# Exceptions thrown in login are acknowledged,
# the rest of exceptions are ignored since some of the
# paths in multipath may not be reachable
if str(e) == 'ISCSI login failed, verify CHAP credentials':
raise
else:
pass
if not iscsilib._checkTGT(self.targetIQN):
raise xs_errors.XenError('ISCSIDevice', \
opterr='during login')
# Allow the devices to settle
time.sleep(5)
except util.CommandException, inst:
raise xs_errors.XenError('ISCSILogin', \
opterr='code is %d' % inst.code)
self.attached = True
self._initPaths()
util._incr_iscsiSR_refcount(self.targetIQN, sr_uuid)
IQNs = []
if self.dconf.has_key("multiSession"):
IQNs = ""
for iqn in self.dconf['multiSession'].split("|"):
if len(iqn): IQNs += iqn.split(',')[2]
else:
IQNs.append(self.targetIQN)
sessions = 0
paths = iscsilib.get_IQN_paths()
for path in paths:
try:
if util.get_single_entry(os.path.join(path, 'targetname')) in IQNs:
sessions += 1
util.SMlog("IQN match. Incrementing sessions to %d" % sessions)
except:
util.SMlog("Failed to read targetname path," \
+ "iscsi_sessions value may be incorrect")
try:
pbdref = util.find_my_pbd(self.session, self.host_ref, self.sr_ref)
if pbdref <> None:
# Just to be safe in case of garbage left during crashes
# we remove the key and add it
self.session.xenapi.PBD.remove_from_other_config(
pbdref, "iscsi_sessions")
self.session.xenapi.PBD.add_to_other_config(
pbdref, "iscsi_sessions", str(sessions))
except:
pass
if self.mpath == 'true' and self.dconf.has_key('SCSIid'):
self.mpathmodule.refresh(self.dconf['SCSIid'],npaths)
# set the device mapper's I/O scheduler
path = '/dev/disk/by-scsid/%s' % self.dconf['SCSIid']
for file in os.listdir(path):
self.block_setscheduler('%s/%s' % (path,file))
def detach(self, sr_uuid):
keys = []
pbdref = None
try:
pbdref = util.find_my_pbd(self.session, self.host_ref, self.sr_ref)
except:
pass
if self.dconf.has_key('SCSIid'):
self.mpathmodule.reset(self.dconf['SCSIid'], True) # explicitly unmap
keys.append("mpath-" + self.dconf['SCSIid'])
# Remove iscsi_sessions and multipathed keys
if pbdref <> None:
if self.cmd == 'sr_detach':
keys += ["multipathed", "iscsi_sessions", "MPPEnabled"]
for key in keys:
try:
self.session.xenapi.PBD.remove_from_other_config(pbdref, key)
except:
pass
if util._decr_iscsiSR_refcount(self.targetIQN, sr_uuid) != 0:
return
if self.direct and util._containsVDIinuse(self):
return
if iscsilib._checkTGT(self.targetIQN):
try:
iscsilib.logout(self.target, self.targetIQN, all=True)
except util.CommandException, inst:
raise xs_errors.XenError('ISCSIQueryDaemon', \
opterr='error is %d' % inst.code)
if iscsilib._checkTGT(self.targetIQN):
raise xs_errors.XenError('ISCSIQueryDaemon', \
opterr='Failed to logout from target')
self.attached = False
def create(self, sr_uuid, size):
# Check whether an SR already exists
SRs = self.session.xenapi.SR.get_all_records()
for sr in SRs:
record = SRs[sr]
sm_config = record["sm_config"]
if sm_config.has_key('targetIQN') and \
sm_config['targetIQN'] == self.targetIQN:
raise xs_errors.XenError('SRInUse')
self.attach(sr_uuid)
# Wait up to MAX_TIMEOUT for devices to appear
util.wait_for_path(self.path, MAX_TIMEOUT)
if self._loadvdis() > 0:
scanrecord = SR.ScanRecord(self)
scanrecord.synchronise()
try:
self.detach(sr_uuid)
except:
pass
self.sm_config = self.session.xenapi.SR.get_sm_config(self.sr_ref)
self.sm_config['disktype'] = 'Raw'
self.sm_config['datatype'] = 'ISCSI'
self.sm_config['target'] = self.target
self.sm_config['targetIQN'] = self.targetIQN
self.sm_config['multipathable'] = 'true'
self.session.xenapi.SR.set_sm_config(self.sr_ref, self.sm_config)
return
def delete(self, sr_uuid):
self.detach(sr_uuid)
return
def probe(self):
SRs = self.session.xenapi.SR.get_all_records()
Recs = {}
for sr in SRs:
record = SRs[sr]
sm_config = record["sm_config"]
if sm_config.has_key('targetIQN') and \
sm_config['targetIQN'] == self.targetIQN:
Recs[record["uuid"]] = sm_config
return self.srlist_toxml(Recs)
def scan(self, sr_uuid):
if not self.passthrough:
if not self.attached:
raise xs_errors.XenError('SRUnavailable')
self.refresh()
time.sleep(2) # it seems impossible to tell when a scan's finished
self._loadvdis()
self.physical_utilisation = self.physical_size
for uuid, vdi in self.vdis.iteritems():
if vdi.managed:
self.physical_utilisation += vdi.size
self.virtual_allocation = self.physical_utilisation
return super(ISCSISR, self).scan(sr_uuid)
def vdi(self, uuid):
return LUNperVDI.RAWVDI(self, uuid)
def _scan_IQNs(self):
# Verify iSCSI target and port
util._testHost(self.target, self.port, 'ISCSITarget')
# Test and set the initiatorname file
iscsilib.ensure_daemon_running_ok(self.localIQN)
map = iscsilib.discovery(self.target, self.port, self.chapuser,
self.chappassword,
interfaceArray=iscsilib.get_iscsi_interfaces())
map.append(("%s:%d" % (self.targetlist,self.port),"0","*"))
self.print_entries(map)
def _attach_LUN_bylunid(self, lunid):
if not self.attached:
raise xs_errors.XenError('SRUnavailable')
connected = []
for val in self.adapter:
if not self.pathdict.has_key(val):
continue
rec = self.pathdict[val]
path = os.path.join(rec['path'],"LUN%s" % lunid)
realpath = os.path.realpath(path)
host = self.adapter[val]
l = [realpath, host, 0, 0, lunid]
addDevice = True
if self.devs.has_key(realpath):
# if the device is stale remove it before adding again
real_SCSIid = None
try:
real_SCSIid = scsiutil.getSCSIid(realpath)
except:
pass
if real_SCSIid != None:
# make sure this is the same scsiid, if not remove the device
cur_scsibuspath = glob.glob('/dev/disk/by-scsibus/*-%s:0:0:%s' % (host,lunid))
cur_SCSIid = os.path.basename(cur_scsibuspath[0]).split("-")[0]
if cur_SCSIid != real_SCSIid:
# looks stale, remove it
scsiutil.scsi_dev_ctrl(l,"remove")
else:
util.SMlog("Not attaching LUNID %s for adapter %s"\
" since the device exists and the scsi id %s seems"\
" to be valid. " % (lunid, val, real_SCSIid))
addDevice = False
else:
# looks stale, remove it
scsiutil.scsi_dev_ctrl(l,"remove")
if addDevice:
# add the device
scsiutil.scsi_dev_ctrl(l,"add")
if not util.wait_for_path(path, MAX_LUNID_TIMEOUT):
util.SMlog("Unable to detect LUN attached to host on path [%s]" % path)
continue
connected.append(path)
return connected
def _attach_LUN_byserialid(self, serialid):
if not self.attached:
raise xs_errors.XenError('SRUnavailable')
connected = []
for val in self.adapter:
if not self.pathdict.has_key(val):
continue
rec = self.pathdict[val]
path = os.path.join(rec['path'],"SERIAL-%s" % serialid)
realpath = os.path.realpath(path)
if not self.devs.has_key(realpath):
if not util.wait_for_path(path, 5):
util.SMlog("Unable to detect LUN attached to host on serial path [%s]" % path)
continue
connected.append(path)
return connected
def _detach_LUN_bylunid(self, lunid, SCSIid):
if not self.attached:
raise xs_errors.XenError('SRUnavailable')
if self.mpath == 'true' and len(SCSIid):
self.mpathmodule.reset(SCSIid, True)
util.remove_mpathcount_field(self.session, self.host_ref, self.sr_ref, SCSIid)
for val in self.adapter:
if not self.pathdict.has_key(val):
continue
rec = self.pathdict[val]
path = os.path.join(rec['path'],"LUN%s" % lunid)
realpath = os.path.realpath(path)
if self.devs.has_key(realpath):
util.SMlog("Found key: %s" % realpath)
scsiutil.scsi_dev_ctrl(self.devs[realpath], 'remove')
# Wait for device to disappear
if not util.wait_for_nopath(realpath, MAX_LUNID_TIMEOUT):
util.SMlog("Device has not disappeared after %d seconds" % \
MAX_LUNID_TIMEOUT)
else:
util.SMlog("Device [%s,%s] disappeared" % (realpath,path))
def _attach_LUN_bySCSIid(self, SCSIid):
if not self.attached:
raise xs_errors.XenError('SRUnavailable')
path = self.mpathmodule.path(SCSIid)
if not util.pathexists(path):
self.refresh()
if not util.wait_for_path(path, MAX_TIMEOUT):
util.SMlog("Unable to detect LUN attached to host [%s]" \
% path)
return False
return True
# This function queries the session for the attached LUNs
def _loadvdis(self):
count = 0
if not os.path.exists(self.path):
return 0
for file in filter(self.match_lun, util.listdir(self.path)):
vdi_path = os.path.join(self.path,file)
LUNid = file.replace("LUN","")
uuid = scsiutil.gen_uuid_from_string(scsiutil.getuniqueserial(vdi_path))
obj = self.vdi(uuid)
obj._query(vdi_path, LUNid)
self.vdis[uuid] = obj
self.physical_size += obj.size
count += 1
return count
def refresh(self):
for val in self.adapter:
util.SMlog("Rescanning host adapter %s" % self.adapter[val])
scsiutil.rescan([self.adapter[val]])
# Helper function for LUN-per-VDI VDI.introduce
def _getLUNbySMconfig(self, sm_config):
if not sm_config.has_key('LUNid'):
raise xs_errors.XenError('VDIUnavailable')
LUNid = long(sm_config['LUNid'])
if not len(self._attach_LUN_bylunid(LUNid)):
raise xs_errors.XenError('VDIUnavailable')
return os.path.join(self.path,"LUN%d" % LUNid)
# This function takes an ISCSI device and populate it with
# a dictionary of available LUNs on that target.
def print_LUNs(self):
self.LUNs = {}
if os.path.exists(self.path):
for file in util.listdir(self.path):
if file.find("LUN") != -1 and file.find("_") == -1:
vdi_path = os.path.join(self.path,file)
LUNid = file.replace("LUN","")
obj = self.vdi(self.uuid)
obj._query(vdi_path, LUNid)
self.LUNs[obj.uuid] = obj
def print_entries(self, map):
dom = xml.dom.minidom.Document()
element = dom.createElement("iscsi-target-iqns")
dom.appendChild(element)
count = 0
for address,tpgt,iqn in map:
entry = dom.createElement('TGT')
element.appendChild(entry)
subentry = dom.createElement('Index')
entry.appendChild(subentry)
textnode = dom.createTextNode(str(count))
subentry.appendChild(textnode)
try:
# We always expect a port so this holds
# regardless of IP version
(addr, port) = address.rsplit(':', 1)
except:
addr = address
port = DEFAULT_PORT
subentry = dom.createElement('IPAddress')
entry.appendChild(subentry)
textnode = dom.createTextNode(str(addr))
subentry.appendChild(textnode)
if int(port) != DEFAULT_PORT:
subentry = dom.createElement('Port')
entry.appendChild(subentry)
textnode = dom.createTextNode(str(port))
subentry.appendChild(textnode)
subentry = dom.createElement('TargetIQN')
entry.appendChild(subentry)
textnode = dom.createTextNode(str(iqn))
subentry.appendChild(textnode)
count += 1
print >>sys.stderr,dom.toprettyxml()
def srlist_toxml(self, SRs):
dom = xml.dom.minidom.Document()
element = dom.createElement("SRlist")
dom.appendChild(element)
for val in SRs:
record = SRs[val]
entry = dom.createElement('SR')
element.appendChild(entry)
subentry = dom.createElement("UUID")
entry.appendChild(subentry)
textnode = dom.createTextNode(val)
subentry.appendChild(textnode)
subentry = dom.createElement("Target")
entry.appendChild(subentry)
textnode = dom.createTextNode(record['target'])
subentry.appendChild(textnode)
subentry = dom.createElement("TargetIQN")
entry.appendChild(subentry)
textnode = dom.createTextNode(record['targetIQN'])
subentry.appendChild(textnode)
return dom.toprettyxml()
def match_lun(self, s):
regex = re.compile("_")
if regex.search(s,0):
return False
regex = re.compile("LUN")
return regex.search(s, 0)
if __name__ == '__main__':
SRCommand.run(ISCSISR, DRIVER_INFO)
else:
SR.registerSR(ISCSISR)
| lgpl-2.1 |
indictranstech/phr-frappe | frappe/modules/__init__.py | 28 | 2998 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
"""
Utilities for using modules
"""
import frappe, os
import frappe.utils
from frappe import _
lower_case_files_for = ['DocType', 'Page', 'Report',
"Workflow", 'Module Def', 'Desktop Item', 'Workflow State', 'Workflow Action', 'Print Format']
def scrub(txt):
return frappe.scrub(txt)
def scrub_dt_dn(dt, dn):
"""Returns in lowercase and code friendly names of doctype and name for certain types"""
ndt, ndn = dt, dn
if dt in lower_case_files_for:
ndt, ndn = scrub(dt), scrub(dn)
return ndt, ndn
def get_module_path(module):
"""Returns path of the given module"""
return frappe.get_module_path(module)
def get_doc_path(module, doctype, name):
dt, dn = scrub_dt_dn(doctype, name)
return os.path.join(get_module_path(module), dt, dn)
def reload_doc(module, dt=None, dn=None, force=True):
from frappe.modules.import_file import import_files
return import_files(module, dt, dn, force=force)
def export_doc(doctype, name, module=None):
"""write out a doc"""
from frappe.modules.export_file import write_document_file
if not module: module = frappe.db.get_value(doctype, name, 'module')
write_document_file(frappe.get_doc(doctype, name), module)
def get_doctype_module(doctype):
return frappe.db.get_value('DocType', doctype, 'module') or "core"
doctype_python_modules = {}
def load_doctype_module(doctype, module=None, prefix=""):
if not module:
module = get_doctype_module(doctype)
app = get_module_app(module)
key = (app, doctype, prefix)
if key not in doctype_python_modules:
doctype_python_modules[key] = frappe.get_module(get_module_name(doctype, module, prefix))
return doctype_python_modules[key]
def get_module_name(doctype, module, prefix="", app=None):
return '{app}.{module}.doctype.{doctype}.{prefix}{doctype}'.format(\
app = scrub(app or get_module_app(module)),
module = scrub(module),
doctype = scrub(doctype),
prefix=prefix)
def get_module_app(module):
return frappe.local.module_app[scrub(module)]
def get_app_publisher(module):
app = frappe.local.module_app[scrub(module)]
if not app:
frappe.throw(_("App not found"))
app_publisher = frappe.get_hooks(hook="app_publisher", app_name=app)[0]
return app_publisher
def make_boilerplate(template, doc, opts=None):
target_path = get_doc_path(doc.module, doc.doctype, doc.name)
template_name = template.replace("controller", scrub(doc.name))
target_file_path = os.path.join(target_path, template_name)
app_publisher = get_app_publisher(doc.module)
if not os.path.exists(target_file_path):
if not opts:
opts = {}
with open(target_file_path, 'w') as target:
with open(os.path.join(get_module_path("core"), "doctype", scrub(doc.doctype),
"boilerplate", template), 'r') as source:
target.write(source.read().format(app_publisher=app_publisher,
classname=doc.name.replace(" ", ""), doctype=doc.name, **opts))
| mit |
CollinsIchigo/hdx_2 | venv/lib/python2.7/site-packages/requests/packages/chardet/big5prober.py | 2931 | 1684 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import Big5DistributionAnalysis
from .mbcssm import Big5SMModel
class Big5Prober(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(Big5SMModel)
self._mDistributionAnalyzer = Big5DistributionAnalysis()
self.reset()
def get_charset_name(self):
return "Big5"
| mit |
chrisenytc/liv-api | api/controllers/cors.py | 1 | 1818 | # -*- coding: utf-8 -*-
""""
ProjectName: liv-api
Repo: https://github.com/chrisenytc/liv-api
Copyright (c) 2014 Christopher EnyTC
Licensed under the MIT license.
"""
# Dependencies
from datetime import timedelta
from flask import make_response, request, current_app
from functools import update_wrapper
def cors(origin=None, methods=None, headers=None,
max_age=21600, attach_to_all=True,
automatic_options=True):
if methods is not None:
methods = ', '.join(sorted(x.upper() for x in methods))
if headers is not None and not isinstance(headers, basestring):
headers = ', '.join(x.upper() for x in headers)
if not isinstance(origin, basestring):
origin = ', '.join(origin)
if isinstance(max_age, timedelta):
max_age = max_age.total_seconds()
def get_methods():
if methods is not None:
return methods
options_resp = current_app.make_default_options_response()
return options_resp.headers['allow']
def decorator(f):
def wrapped_function(*args, **kwargs):
if automatic_options and request.method == 'OPTIONS':
resp = current_app.make_default_options_response()
else:
resp = make_response(f(*args, **kwargs))
if not attach_to_all and request.method != 'OPTIONS':
return resp
h = resp.headers
h['Access-Control-Allow-Origin'] = origin
h['Access-Control-Allow-Methods'] = get_methods()
h['Access-Control-Max-Age'] = str(max_age)
if headers is not None:
h['Access-Control-Allow-Headers'] = headers
return resp
f.provide_automatic_options = False
return update_wrapper(wrapped_function, f)
return decorator
| mit |
kmonsoor/python-for-android | python3-alpha/python3-src/Tools/scripts/checkpyc.py | 49 | 2178 | #! /usr/bin/env python3
# Check that all ".pyc" files exist and are up-to-date
# Uses module 'os'
import sys
import os
from stat import ST_MTIME
import imp
# PEP 3147 compatibility (PYC Repository Directories)
cache_from_source = (imp.cache_from_source if hasattr(imp, 'get_tag') else
lambda path: path + 'c')
def main():
if len(sys.argv) > 1:
verbose = (sys.argv[1] == '-v')
silent = (sys.argv[1] == '-s')
else:
verbose = silent = False
MAGIC = imp.get_magic()
if not silent:
print('Using MAGIC word', repr(MAGIC))
for dirname in sys.path:
try:
names = os.listdir(dirname)
except os.error:
print('Cannot list directory', repr(dirname))
continue
if not silent:
print('Checking ', repr(dirname), '...')
for name in sorted(names):
if name.endswith('.py'):
name = os.path.join(dirname, name)
try:
st = os.stat(name)
except os.error:
print('Cannot stat', repr(name))
continue
if verbose:
print('Check', repr(name), '...')
name_c = cache_from_source(name)
try:
with open(name_c, 'rb') as f:
magic_str = f.read(4)
mtime_str = f.read(4)
except IOError:
print('Cannot open', repr(name_c))
continue
if magic_str != MAGIC:
print('Bad MAGIC word in ".pyc" file', end=' ')
print(repr(name_c))
continue
mtime = get_long(mtime_str)
if mtime in {0, -1}:
print('Bad ".pyc" file', repr(name_c))
elif mtime != st[ST_MTIME]:
print('Out-of-date ".pyc" file', end=' ')
print(repr(name_c))
def get_long(s):
if len(s) != 4:
return -1
return s[0] + (s[1] << 8) + (s[2] << 16) + (s[3] << 24)
if __name__ == '__main__':
main()
| apache-2.0 |
2ndQuadrant/ansible | test/units/modules/storage/netapp/test_netapp_e_auditlog.py | 68 | 10758 | # (c) 2018, NetApp Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from ansible.modules.storage.netapp.netapp_e_auditlog import AuditLog
from units.modules.utils import AnsibleFailJson, ModuleTestCase, set_module_args
__metaclass__ = type
from units.compat import mock
class AuditLogTests(ModuleTestCase):
REQUIRED_PARAMS = {'api_username': 'rw',
'api_password': 'password',
'api_url': 'http://localhost',
'ssid': '1'}
REQ_FUNC = 'ansible.modules.storage.netapp.netapp_e_auditlog.request'
MAX_RECORDS_MAXIMUM = 50000
MAX_RECORDS_MINIMUM = 100
def _set_args(self, **kwargs):
module_args = self.REQUIRED_PARAMS.copy()
if kwargs is not None:
module_args.update(kwargs)
set_module_args(module_args)
def test_max_records_argument_pass(self):
"""Verify AuditLog arument's max_records and threshold upper and lower boundaries."""
initial = {"max_records": 1000,
"log_level": "writeOnly",
"full_policy": "overWrite",
"threshold": 90}
max_records_set = (self.MAX_RECORDS_MINIMUM, 25000, self.MAX_RECORDS_MAXIMUM)
for max_records in max_records_set:
initial["max_records"] = max_records
self._set_args(**initial)
with mock.patch(self.REQ_FUNC, return_value=(200, {"runningAsProxy": False})):
audit_log = AuditLog()
self.assertTrue(audit_log.max_records == max_records)
def test_max_records_argument_fail(self):
"""Verify AuditLog arument's max_records and threshold upper and lower boundaries."""
initial = {"max_records": 1000,
"log_level": "writeOnly",
"full_policy": "overWrite",
"threshold": 90}
max_records_set = (self.MAX_RECORDS_MINIMUM - 1, self.MAX_RECORDS_MAXIMUM + 1)
for max_records in max_records_set:
with self.assertRaisesRegexp(AnsibleFailJson, r"Audit-log max_records count must be between 100 and 50000"):
initial["max_records"] = max_records
self._set_args(**initial)
AuditLog()
def test_threshold_argument_pass(self):
"""Verify AuditLog arument's max_records and threshold upper and lower boundaries."""
initial = {"max_records": 1000,
"log_level": "writeOnly",
"full_policy": "overWrite",
"threshold": 90}
threshold_set = (60, 75, 90)
for threshold in threshold_set:
initial["threshold"] = threshold
self._set_args(**initial)
with mock.patch(self.REQ_FUNC, return_value=(200, {"runningAsProxy": False})):
audit_log = AuditLog()
self.assertTrue(audit_log.threshold == threshold)
def test_threshold_argument_fail(self):
"""Verify AuditLog arument's max_records and threshold upper and lower boundaries."""
initial = {"max_records": 1000,
"log_level": "writeOnly",
"full_policy": "overWrite",
"threshold": 90}
threshold_set = (59, 91)
for threshold in threshold_set:
with self.assertRaisesRegexp(AnsibleFailJson, r"Audit-log percent threshold must be between 60 and 90"):
initial["threshold"] = threshold
self._set_args(**initial)
with mock.patch(self.REQ_FUNC, return_value=(200, {"runningAsProxy": False})):
AuditLog()
def test_is_proxy_pass(self):
"""Verify that True is returned when proxy is used to communicate with storage."""
initial = {"max_records": 1000,
"log_level": "writeOnly",
"full_policy": "overWrite",
"threshold": 90,
"api_url": "https://10.1.1.10/devmgr/v2"}
self._set_args(**initial)
with mock.patch(self.REQ_FUNC, return_value=(200, {"runningAsProxy": True})):
audit_log = AuditLog()
with mock.patch(self.REQ_FUNC, return_value=(200, {"runningAsProxy": True})):
self.assertTrue(audit_log.is_proxy())
def test_is_proxy_fail(self):
"""Verify that AnsibleJsonFail exception is thrown when exception occurs."""
initial = {"max_records": 1000,
"log_level": "writeOnly",
"full_policy": "overWrite",
"threshold": 90}
self._set_args(**initial)
with mock.patch(self.REQ_FUNC, return_value=(200, {"runningAsProxy": True})):
audit_log = AuditLog()
with self.assertRaisesRegexp(AnsibleFailJson, r"Failed to retrieve the webservices about information"):
with mock.patch(self.REQ_FUNC, return_value=Exception()):
audit_log.is_proxy()
def test_get_configuration_pass(self):
"""Validate get configuration does not throw exception when normal request is returned."""
initial = {"max_records": 1000,
"log_level": "writeOnly",
"full_policy": "overWrite",
"threshold": 90}
expected = {"auditLogMaxRecords": 1000,
"auditLogLevel": "writeOnly",
"auditLogFullPolicy": "overWrite",
"auditLogWarningThresholdPct": 90}
self._set_args(**initial)
with mock.patch(self.REQ_FUNC, return_value=(200, {"runningAsProxy": True})):
audit_log = AuditLog()
with mock.patch(self.REQ_FUNC, return_value=(200, expected)):
body = audit_log.get_configuration()
self.assertTrue(body == expected)
def test_get_configuration_fail(self):
"""Verify AnsibleJsonFail exception is thrown."""
initial = {"max_records": 1000,
"log_level": "writeOnly",
"full_policy": "overWrite",
"threshold": 90}
self._set_args(**initial)
with mock.patch(self.REQ_FUNC, return_value=(200, {"runningAsProxy": True})):
audit_log = AuditLog()
with self.assertRaisesRegexp(AnsibleFailJson, r"Failed to retrieve the audit-log configuration!"):
with mock.patch(self.REQ_FUNC, return_value=Exception()):
audit_log.get_configuration()
def test_build_configuration_pass(self):
"""Validate configuration changes will force an update."""
response = {"auditLogMaxRecords": 1000,
"auditLogLevel": "writeOnly",
"auditLogFullPolicy": "overWrite",
"auditLogWarningThresholdPct": 90}
initial = {"max_records": 1000,
"log_level": "writeOnly",
"full_policy": "overWrite",
"threshold": 90}
changes = [{"max_records": 50000},
{"log_level": "all"},
{"full_policy": "preventSystemAccess"},
{"threshold": 75}]
for change in changes:
initial_with_changes = initial.copy()
initial_with_changes.update(change)
self._set_args(**initial_with_changes)
with mock.patch(self.REQ_FUNC, return_value=(200, {"runningAsProxy": True})):
audit_log = AuditLog()
with mock.patch(self.REQ_FUNC, return_value=(200, response)):
update = audit_log.build_configuration()
self.assertTrue(update)
def test_delete_log_messages_fail(self):
"""Verify AnsibleJsonFail exception is thrown."""
initial = {"max_records": 1000,
"log_level": "writeOnly",
"full_policy": "overWrite",
"threshold": 90}
self._set_args(**initial)
with mock.patch(self.REQ_FUNC, return_value=(200, {"runningAsProxy": True})):
audit_log = AuditLog()
with self.assertRaisesRegexp(AnsibleFailJson, r"Failed to delete audit-log messages!"):
with mock.patch(self.REQ_FUNC, return_value=Exception()):
audit_log.delete_log_messages()
def test_update_configuration_delete_pass(self):
"""Verify 422 and force successfully returns True."""
body = {"auditLogMaxRecords": 1000,
"auditLogLevel": "writeOnly",
"auditLogFullPolicy": "overWrite",
"auditLogWarningThresholdPct": 90}
initial = {"max_records": 2000,
"log_level": "writeOnly",
"full_policy": "overWrite",
"threshold": 90,
"force": True}
self._set_args(**initial)
with mock.patch(self.REQ_FUNC, return_value=(200, {"runningAsProxy": True})):
audit_log = AuditLog()
with mock.patch(self.REQ_FUNC, side_effect=[(200, body),
(422, {u"invalidFieldsIfKnown": None,
u"errorMessage": u"Configuration change...",
u"localizedMessage": u"Configuration change...",
u"retcode": u"auditLogImmediateFullCondition",
u"codeType": u"devicemgrerror"}),
(200, None),
(200, None)]):
self.assertTrue(audit_log.update_configuration())
def test_update_configuration_delete_skip_fail(self):
"""Verify 422 and no force results in AnsibleJsonFail exception."""
body = {"auditLogMaxRecords": 1000,
"auditLogLevel": "writeOnly",
"auditLogFullPolicy": "overWrite",
"auditLogWarningThresholdPct": 90}
initial = {"max_records": 2000,
"log_level": "writeOnly",
"full_policy": "overWrite",
"threshold": 90,
"force": False}
self._set_args(**initial)
with mock.patch(self.REQ_FUNC, return_value=(200, {"runningAsProxy": True})):
audit_log = AuditLog()
with self.assertRaisesRegexp(AnsibleFailJson, r"Failed to update audit-log configuration!"):
with mock.patch(self.REQ_FUNC, side_effect=[(200, body), Exception(422, {"errorMessage": "error"}),
(200, None), (200, None)]):
audit_log.update_configuration()
| gpl-3.0 |
noslenfa/tdjangorest | uw/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/tarfile.py | 1005 | 92627 | #-------------------------------------------------------------------
# tarfile.py
#-------------------------------------------------------------------
# Copyright (C) 2002 Lars Gustaebel <lars@gustaebel.de>
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
from __future__ import print_function
"""Read from and write to tar format archives.
"""
__version__ = "$Revision$"
version = "0.9.0"
__author__ = "Lars Gust\u00e4bel (lars@gustaebel.de)"
__date__ = "$Date: 2011-02-25 17:42:01 +0200 (Fri, 25 Feb 2011) $"
__cvsid__ = "$Id: tarfile.py 88586 2011-02-25 15:42:01Z marc-andre.lemburg $"
__credits__ = "Gustavo Niemeyer, Niels Gust\u00e4bel, Richard Townsend."
#---------
# Imports
#---------
import sys
import os
import stat
import errno
import time
import struct
import copy
import re
try:
import grp, pwd
except ImportError:
grp = pwd = None
# os.symlink on Windows prior to 6.0 raises NotImplementedError
symlink_exception = (AttributeError, NotImplementedError)
try:
# WindowsError (1314) will be raised if the caller does not hold the
# SeCreateSymbolicLinkPrivilege privilege
symlink_exception += (WindowsError,)
except NameError:
pass
# from tarfile import *
__all__ = ["TarFile", "TarInfo", "is_tarfile", "TarError"]
if sys.version_info[0] < 3:
import __builtin__ as builtins
else:
import builtins
_open = builtins.open # Since 'open' is TarFile.open
#---------------------------------------------------------
# tar constants
#---------------------------------------------------------
NUL = b"\0" # the null character
BLOCKSIZE = 512 # length of processing blocks
RECORDSIZE = BLOCKSIZE * 20 # length of records
GNU_MAGIC = b"ustar \0" # magic gnu tar string
POSIX_MAGIC = b"ustar\x0000" # magic posix tar string
LENGTH_NAME = 100 # maximum length of a filename
LENGTH_LINK = 100 # maximum length of a linkname
LENGTH_PREFIX = 155 # maximum length of the prefix field
REGTYPE = b"0" # regular file
AREGTYPE = b"\0" # regular file
LNKTYPE = b"1" # link (inside tarfile)
SYMTYPE = b"2" # symbolic link
CHRTYPE = b"3" # character special device
BLKTYPE = b"4" # block special device
DIRTYPE = b"5" # directory
FIFOTYPE = b"6" # fifo special device
CONTTYPE = b"7" # contiguous file
GNUTYPE_LONGNAME = b"L" # GNU tar longname
GNUTYPE_LONGLINK = b"K" # GNU tar longlink
GNUTYPE_SPARSE = b"S" # GNU tar sparse file
XHDTYPE = b"x" # POSIX.1-2001 extended header
XGLTYPE = b"g" # POSIX.1-2001 global header
SOLARIS_XHDTYPE = b"X" # Solaris extended header
USTAR_FORMAT = 0 # POSIX.1-1988 (ustar) format
GNU_FORMAT = 1 # GNU tar format
PAX_FORMAT = 2 # POSIX.1-2001 (pax) format
DEFAULT_FORMAT = GNU_FORMAT
#---------------------------------------------------------
# tarfile constants
#---------------------------------------------------------
# File types that tarfile supports:
SUPPORTED_TYPES = (REGTYPE, AREGTYPE, LNKTYPE,
SYMTYPE, DIRTYPE, FIFOTYPE,
CONTTYPE, CHRTYPE, BLKTYPE,
GNUTYPE_LONGNAME, GNUTYPE_LONGLINK,
GNUTYPE_SPARSE)
# File types that will be treated as a regular file.
REGULAR_TYPES = (REGTYPE, AREGTYPE,
CONTTYPE, GNUTYPE_SPARSE)
# File types that are part of the GNU tar format.
GNU_TYPES = (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK,
GNUTYPE_SPARSE)
# Fields from a pax header that override a TarInfo attribute.
PAX_FIELDS = ("path", "linkpath", "size", "mtime",
"uid", "gid", "uname", "gname")
# Fields from a pax header that are affected by hdrcharset.
PAX_NAME_FIELDS = set(("path", "linkpath", "uname", "gname"))
# Fields in a pax header that are numbers, all other fields
# are treated as strings.
PAX_NUMBER_FIELDS = {
"atime": float,
"ctime": float,
"mtime": float,
"uid": int,
"gid": int,
"size": int
}
#---------------------------------------------------------
# Bits used in the mode field, values in octal.
#---------------------------------------------------------
S_IFLNK = 0o120000 # symbolic link
S_IFREG = 0o100000 # regular file
S_IFBLK = 0o060000 # block device
S_IFDIR = 0o040000 # directory
S_IFCHR = 0o020000 # character device
S_IFIFO = 0o010000 # fifo
TSUID = 0o4000 # set UID on execution
TSGID = 0o2000 # set GID on execution
TSVTX = 0o1000 # reserved
TUREAD = 0o400 # read by owner
TUWRITE = 0o200 # write by owner
TUEXEC = 0o100 # execute/search by owner
TGREAD = 0o040 # read by group
TGWRITE = 0o020 # write by group
TGEXEC = 0o010 # execute/search by group
TOREAD = 0o004 # read by other
TOWRITE = 0o002 # write by other
TOEXEC = 0o001 # execute/search by other
#---------------------------------------------------------
# initialization
#---------------------------------------------------------
if os.name in ("nt", "ce"):
ENCODING = "utf-8"
else:
ENCODING = sys.getfilesystemencoding()
#---------------------------------------------------------
# Some useful functions
#---------------------------------------------------------
def stn(s, length, encoding, errors):
"""Convert a string to a null-terminated bytes object.
"""
s = s.encode(encoding, errors)
return s[:length] + (length - len(s)) * NUL
def nts(s, encoding, errors):
"""Convert a null-terminated bytes object to a string.
"""
p = s.find(b"\0")
if p != -1:
s = s[:p]
return s.decode(encoding, errors)
def nti(s):
"""Convert a number field to a python number.
"""
# There are two possible encodings for a number field, see
# itn() below.
if s[0] != chr(0o200):
try:
n = int(nts(s, "ascii", "strict") or "0", 8)
except ValueError:
raise InvalidHeaderError("invalid header")
else:
n = 0
for i in range(len(s) - 1):
n <<= 8
n += ord(s[i + 1])
return n
def itn(n, digits=8, format=DEFAULT_FORMAT):
"""Convert a python number to a number field.
"""
# POSIX 1003.1-1988 requires numbers to be encoded as a string of
# octal digits followed by a null-byte, this allows values up to
# (8**(digits-1))-1. GNU tar allows storing numbers greater than
# that if necessary. A leading 0o200 byte indicates this particular
# encoding, the following digits-1 bytes are a big-endian
# representation. This allows values up to (256**(digits-1))-1.
if 0 <= n < 8 ** (digits - 1):
s = ("%0*o" % (digits - 1, n)).encode("ascii") + NUL
else:
if format != GNU_FORMAT or n >= 256 ** (digits - 1):
raise ValueError("overflow in number field")
if n < 0:
# XXX We mimic GNU tar's behaviour with negative numbers,
# this could raise OverflowError.
n = struct.unpack("L", struct.pack("l", n))[0]
s = bytearray()
for i in range(digits - 1):
s.insert(0, n & 0o377)
n >>= 8
s.insert(0, 0o200)
return s
def calc_chksums(buf):
"""Calculate the checksum for a member's header by summing up all
characters except for the chksum field which is treated as if
it was filled with spaces. According to the GNU tar sources,
some tars (Sun and NeXT) calculate chksum with signed char,
which will be different if there are chars in the buffer with
the high bit set. So we calculate two checksums, unsigned and
signed.
"""
unsigned_chksum = 256 + sum(struct.unpack("148B", buf[:148]) + struct.unpack("356B", buf[156:512]))
signed_chksum = 256 + sum(struct.unpack("148b", buf[:148]) + struct.unpack("356b", buf[156:512]))
return unsigned_chksum, signed_chksum
def copyfileobj(src, dst, length=None):
"""Copy length bytes from fileobj src to fileobj dst.
If length is None, copy the entire content.
"""
if length == 0:
return
if length is None:
while True:
buf = src.read(16*1024)
if not buf:
break
dst.write(buf)
return
BUFSIZE = 16 * 1024
blocks, remainder = divmod(length, BUFSIZE)
for b in range(blocks):
buf = src.read(BUFSIZE)
if len(buf) < BUFSIZE:
raise IOError("end of file reached")
dst.write(buf)
if remainder != 0:
buf = src.read(remainder)
if len(buf) < remainder:
raise IOError("end of file reached")
dst.write(buf)
return
filemode_table = (
((S_IFLNK, "l"),
(S_IFREG, "-"),
(S_IFBLK, "b"),
(S_IFDIR, "d"),
(S_IFCHR, "c"),
(S_IFIFO, "p")),
((TUREAD, "r"),),
((TUWRITE, "w"),),
((TUEXEC|TSUID, "s"),
(TSUID, "S"),
(TUEXEC, "x")),
((TGREAD, "r"),),
((TGWRITE, "w"),),
((TGEXEC|TSGID, "s"),
(TSGID, "S"),
(TGEXEC, "x")),
((TOREAD, "r"),),
((TOWRITE, "w"),),
((TOEXEC|TSVTX, "t"),
(TSVTX, "T"),
(TOEXEC, "x"))
)
def filemode(mode):
"""Convert a file's mode to a string of the form
-rwxrwxrwx.
Used by TarFile.list()
"""
perm = []
for table in filemode_table:
for bit, char in table:
if mode & bit == bit:
perm.append(char)
break
else:
perm.append("-")
return "".join(perm)
class TarError(Exception):
"""Base exception."""
pass
class ExtractError(TarError):
"""General exception for extract errors."""
pass
class ReadError(TarError):
"""Exception for unreadble tar archives."""
pass
class CompressionError(TarError):
"""Exception for unavailable compression methods."""
pass
class StreamError(TarError):
"""Exception for unsupported operations on stream-like TarFiles."""
pass
class HeaderError(TarError):
"""Base exception for header errors."""
pass
class EmptyHeaderError(HeaderError):
"""Exception for empty headers."""
pass
class TruncatedHeaderError(HeaderError):
"""Exception for truncated headers."""
pass
class EOFHeaderError(HeaderError):
"""Exception for end of file headers."""
pass
class InvalidHeaderError(HeaderError):
"""Exception for invalid headers."""
pass
class SubsequentHeaderError(HeaderError):
"""Exception for missing and invalid extended headers."""
pass
#---------------------------
# internal stream interface
#---------------------------
class _LowLevelFile(object):
"""Low-level file object. Supports reading and writing.
It is used instead of a regular file object for streaming
access.
"""
def __init__(self, name, mode):
mode = {
"r": os.O_RDONLY,
"w": os.O_WRONLY | os.O_CREAT | os.O_TRUNC,
}[mode]
if hasattr(os, "O_BINARY"):
mode |= os.O_BINARY
self.fd = os.open(name, mode, 0o666)
def close(self):
os.close(self.fd)
def read(self, size):
return os.read(self.fd, size)
def write(self, s):
os.write(self.fd, s)
class _Stream(object):
"""Class that serves as an adapter between TarFile and
a stream-like object. The stream-like object only
needs to have a read() or write() method and is accessed
blockwise. Use of gzip or bzip2 compression is possible.
A stream-like object could be for example: sys.stdin,
sys.stdout, a socket, a tape device etc.
_Stream is intended to be used only internally.
"""
def __init__(self, name, mode, comptype, fileobj, bufsize):
"""Construct a _Stream object.
"""
self._extfileobj = True
if fileobj is None:
fileobj = _LowLevelFile(name, mode)
self._extfileobj = False
if comptype == '*':
# Enable transparent compression detection for the
# stream interface
fileobj = _StreamProxy(fileobj)
comptype = fileobj.getcomptype()
self.name = name or ""
self.mode = mode
self.comptype = comptype
self.fileobj = fileobj
self.bufsize = bufsize
self.buf = b""
self.pos = 0
self.closed = False
try:
if comptype == "gz":
try:
import zlib
except ImportError:
raise CompressionError("zlib module is not available")
self.zlib = zlib
self.crc = zlib.crc32(b"")
if mode == "r":
self._init_read_gz()
else:
self._init_write_gz()
if comptype == "bz2":
try:
import bz2
except ImportError:
raise CompressionError("bz2 module is not available")
if mode == "r":
self.dbuf = b""
self.cmp = bz2.BZ2Decompressor()
else:
self.cmp = bz2.BZ2Compressor()
except:
if not self._extfileobj:
self.fileobj.close()
self.closed = True
raise
def __del__(self):
if hasattr(self, "closed") and not self.closed:
self.close()
def _init_write_gz(self):
"""Initialize for writing with gzip compression.
"""
self.cmp = self.zlib.compressobj(9, self.zlib.DEFLATED,
-self.zlib.MAX_WBITS,
self.zlib.DEF_MEM_LEVEL,
0)
timestamp = struct.pack("<L", int(time.time()))
self.__write(b"\037\213\010\010" + timestamp + b"\002\377")
if self.name.endswith(".gz"):
self.name = self.name[:-3]
# RFC1952 says we must use ISO-8859-1 for the FNAME field.
self.__write(self.name.encode("iso-8859-1", "replace") + NUL)
def write(self, s):
"""Write string s to the stream.
"""
if self.comptype == "gz":
self.crc = self.zlib.crc32(s, self.crc)
self.pos += len(s)
if self.comptype != "tar":
s = self.cmp.compress(s)
self.__write(s)
def __write(self, s):
"""Write string s to the stream if a whole new block
is ready to be written.
"""
self.buf += s
while len(self.buf) > self.bufsize:
self.fileobj.write(self.buf[:self.bufsize])
self.buf = self.buf[self.bufsize:]
def close(self):
"""Close the _Stream object. No operation should be
done on it afterwards.
"""
if self.closed:
return
if self.mode == "w" and self.comptype != "tar":
self.buf += self.cmp.flush()
if self.mode == "w" and self.buf:
self.fileobj.write(self.buf)
self.buf = b""
if self.comptype == "gz":
# The native zlib crc is an unsigned 32-bit integer, but
# the Python wrapper implicitly casts that to a signed C
# long. So, on a 32-bit box self.crc may "look negative",
# while the same crc on a 64-bit box may "look positive".
# To avoid irksome warnings from the `struct` module, force
# it to look positive on all boxes.
self.fileobj.write(struct.pack("<L", self.crc & 0xffffffff))
self.fileobj.write(struct.pack("<L", self.pos & 0xffffFFFF))
if not self._extfileobj:
self.fileobj.close()
self.closed = True
def _init_read_gz(self):
"""Initialize for reading a gzip compressed fileobj.
"""
self.cmp = self.zlib.decompressobj(-self.zlib.MAX_WBITS)
self.dbuf = b""
# taken from gzip.GzipFile with some alterations
if self.__read(2) != b"\037\213":
raise ReadError("not a gzip file")
if self.__read(1) != b"\010":
raise CompressionError("unsupported compression method")
flag = ord(self.__read(1))
self.__read(6)
if flag & 4:
xlen = ord(self.__read(1)) + 256 * ord(self.__read(1))
self.read(xlen)
if flag & 8:
while True:
s = self.__read(1)
if not s or s == NUL:
break
if flag & 16:
while True:
s = self.__read(1)
if not s or s == NUL:
break
if flag & 2:
self.__read(2)
def tell(self):
"""Return the stream's file pointer position.
"""
return self.pos
def seek(self, pos=0):
"""Set the stream's file pointer to pos. Negative seeking
is forbidden.
"""
if pos - self.pos >= 0:
blocks, remainder = divmod(pos - self.pos, self.bufsize)
for i in range(blocks):
self.read(self.bufsize)
self.read(remainder)
else:
raise StreamError("seeking backwards is not allowed")
return self.pos
def read(self, size=None):
"""Return the next size number of bytes from the stream.
If size is not defined, return all bytes of the stream
up to EOF.
"""
if size is None:
t = []
while True:
buf = self._read(self.bufsize)
if not buf:
break
t.append(buf)
buf = "".join(t)
else:
buf = self._read(size)
self.pos += len(buf)
return buf
def _read(self, size):
"""Return size bytes from the stream.
"""
if self.comptype == "tar":
return self.__read(size)
c = len(self.dbuf)
while c < size:
buf = self.__read(self.bufsize)
if not buf:
break
try:
buf = self.cmp.decompress(buf)
except IOError:
raise ReadError("invalid compressed data")
self.dbuf += buf
c += len(buf)
buf = self.dbuf[:size]
self.dbuf = self.dbuf[size:]
return buf
def __read(self, size):
"""Return size bytes from stream. If internal buffer is empty,
read another block from the stream.
"""
c = len(self.buf)
while c < size:
buf = self.fileobj.read(self.bufsize)
if not buf:
break
self.buf += buf
c += len(buf)
buf = self.buf[:size]
self.buf = self.buf[size:]
return buf
# class _Stream
class _StreamProxy(object):
"""Small proxy class that enables transparent compression
detection for the Stream interface (mode 'r|*').
"""
def __init__(self, fileobj):
self.fileobj = fileobj
self.buf = self.fileobj.read(BLOCKSIZE)
def read(self, size):
self.read = self.fileobj.read
return self.buf
def getcomptype(self):
if self.buf.startswith(b"\037\213\010"):
return "gz"
if self.buf.startswith(b"BZh91"):
return "bz2"
return "tar"
def close(self):
self.fileobj.close()
# class StreamProxy
class _BZ2Proxy(object):
"""Small proxy class that enables external file object
support for "r:bz2" and "w:bz2" modes. This is actually
a workaround for a limitation in bz2 module's BZ2File
class which (unlike gzip.GzipFile) has no support for
a file object argument.
"""
blocksize = 16 * 1024
def __init__(self, fileobj, mode):
self.fileobj = fileobj
self.mode = mode
self.name = getattr(self.fileobj, "name", None)
self.init()
def init(self):
import bz2
self.pos = 0
if self.mode == "r":
self.bz2obj = bz2.BZ2Decompressor()
self.fileobj.seek(0)
self.buf = b""
else:
self.bz2obj = bz2.BZ2Compressor()
def read(self, size):
x = len(self.buf)
while x < size:
raw = self.fileobj.read(self.blocksize)
if not raw:
break
data = self.bz2obj.decompress(raw)
self.buf += data
x += len(data)
buf = self.buf[:size]
self.buf = self.buf[size:]
self.pos += len(buf)
return buf
def seek(self, pos):
if pos < self.pos:
self.init()
self.read(pos - self.pos)
def tell(self):
return self.pos
def write(self, data):
self.pos += len(data)
raw = self.bz2obj.compress(data)
self.fileobj.write(raw)
def close(self):
if self.mode == "w":
raw = self.bz2obj.flush()
self.fileobj.write(raw)
# class _BZ2Proxy
#------------------------
# Extraction file object
#------------------------
class _FileInFile(object):
"""A thin wrapper around an existing file object that
provides a part of its data as an individual file
object.
"""
def __init__(self, fileobj, offset, size, blockinfo=None):
self.fileobj = fileobj
self.offset = offset
self.size = size
self.position = 0
if blockinfo is None:
blockinfo = [(0, size)]
# Construct a map with data and zero blocks.
self.map_index = 0
self.map = []
lastpos = 0
realpos = self.offset
for offset, size in blockinfo:
if offset > lastpos:
self.map.append((False, lastpos, offset, None))
self.map.append((True, offset, offset + size, realpos))
realpos += size
lastpos = offset + size
if lastpos < self.size:
self.map.append((False, lastpos, self.size, None))
def seekable(self):
if not hasattr(self.fileobj, "seekable"):
# XXX gzip.GzipFile and bz2.BZ2File
return True
return self.fileobj.seekable()
def tell(self):
"""Return the current file position.
"""
return self.position
def seek(self, position):
"""Seek to a position in the file.
"""
self.position = position
def read(self, size=None):
"""Read data from the file.
"""
if size is None:
size = self.size - self.position
else:
size = min(size, self.size - self.position)
buf = b""
while size > 0:
while True:
data, start, stop, offset = self.map[self.map_index]
if start <= self.position < stop:
break
else:
self.map_index += 1
if self.map_index == len(self.map):
self.map_index = 0
length = min(size, stop - self.position)
if data:
self.fileobj.seek(offset + (self.position - start))
buf += self.fileobj.read(length)
else:
buf += NUL * length
size -= length
self.position += length
return buf
#class _FileInFile
class ExFileObject(object):
"""File-like object for reading an archive member.
Is returned by TarFile.extractfile().
"""
blocksize = 1024
def __init__(self, tarfile, tarinfo):
self.fileobj = _FileInFile(tarfile.fileobj,
tarinfo.offset_data,
tarinfo.size,
tarinfo.sparse)
self.name = tarinfo.name
self.mode = "r"
self.closed = False
self.size = tarinfo.size
self.position = 0
self.buffer = b""
def readable(self):
return True
def writable(self):
return False
def seekable(self):
return self.fileobj.seekable()
def read(self, size=None):
"""Read at most size bytes from the file. If size is not
present or None, read all data until EOF is reached.
"""
if self.closed:
raise ValueError("I/O operation on closed file")
buf = b""
if self.buffer:
if size is None:
buf = self.buffer
self.buffer = b""
else:
buf = self.buffer[:size]
self.buffer = self.buffer[size:]
if size is None:
buf += self.fileobj.read()
else:
buf += self.fileobj.read(size - len(buf))
self.position += len(buf)
return buf
# XXX TextIOWrapper uses the read1() method.
read1 = read
def readline(self, size=-1):
"""Read one entire line from the file. If size is present
and non-negative, return a string with at most that
size, which may be an incomplete line.
"""
if self.closed:
raise ValueError("I/O operation on closed file")
pos = self.buffer.find(b"\n") + 1
if pos == 0:
# no newline found.
while True:
buf = self.fileobj.read(self.blocksize)
self.buffer += buf
if not buf or b"\n" in buf:
pos = self.buffer.find(b"\n") + 1
if pos == 0:
# no newline found.
pos = len(self.buffer)
break
if size != -1:
pos = min(size, pos)
buf = self.buffer[:pos]
self.buffer = self.buffer[pos:]
self.position += len(buf)
return buf
def readlines(self):
"""Return a list with all remaining lines.
"""
result = []
while True:
line = self.readline()
if not line: break
result.append(line)
return result
def tell(self):
"""Return the current file position.
"""
if self.closed:
raise ValueError("I/O operation on closed file")
return self.position
def seek(self, pos, whence=os.SEEK_SET):
"""Seek to a position in the file.
"""
if self.closed:
raise ValueError("I/O operation on closed file")
if whence == os.SEEK_SET:
self.position = min(max(pos, 0), self.size)
elif whence == os.SEEK_CUR:
if pos < 0:
self.position = max(self.position + pos, 0)
else:
self.position = min(self.position + pos, self.size)
elif whence == os.SEEK_END:
self.position = max(min(self.size + pos, self.size), 0)
else:
raise ValueError("Invalid argument")
self.buffer = b""
self.fileobj.seek(self.position)
def close(self):
"""Close the file object.
"""
self.closed = True
def __iter__(self):
"""Get an iterator over the file's lines.
"""
while True:
line = self.readline()
if not line:
break
yield line
#class ExFileObject
#------------------
# Exported Classes
#------------------
class TarInfo(object):
"""Informational class which holds the details about an
archive member given by a tar header block.
TarInfo objects are returned by TarFile.getmember(),
TarFile.getmembers() and TarFile.gettarinfo() and are
usually created internally.
"""
__slots__ = ("name", "mode", "uid", "gid", "size", "mtime",
"chksum", "type", "linkname", "uname", "gname",
"devmajor", "devminor",
"offset", "offset_data", "pax_headers", "sparse",
"tarfile", "_sparse_structs", "_link_target")
def __init__(self, name=""):
"""Construct a TarInfo object. name is the optional name
of the member.
"""
self.name = name # member name
self.mode = 0o644 # file permissions
self.uid = 0 # user id
self.gid = 0 # group id
self.size = 0 # file size
self.mtime = 0 # modification time
self.chksum = 0 # header checksum
self.type = REGTYPE # member type
self.linkname = "" # link name
self.uname = "" # user name
self.gname = "" # group name
self.devmajor = 0 # device major number
self.devminor = 0 # device minor number
self.offset = 0 # the tar header starts here
self.offset_data = 0 # the file's data starts here
self.sparse = None # sparse member information
self.pax_headers = {} # pax header information
# In pax headers the "name" and "linkname" field are called
# "path" and "linkpath".
def _getpath(self):
return self.name
def _setpath(self, name):
self.name = name
path = property(_getpath, _setpath)
def _getlinkpath(self):
return self.linkname
def _setlinkpath(self, linkname):
self.linkname = linkname
linkpath = property(_getlinkpath, _setlinkpath)
def __repr__(self):
return "<%s %r at %#x>" % (self.__class__.__name__,self.name,id(self))
def get_info(self):
"""Return the TarInfo's attributes as a dictionary.
"""
info = {
"name": self.name,
"mode": self.mode & 0o7777,
"uid": self.uid,
"gid": self.gid,
"size": self.size,
"mtime": self.mtime,
"chksum": self.chksum,
"type": self.type,
"linkname": self.linkname,
"uname": self.uname,
"gname": self.gname,
"devmajor": self.devmajor,
"devminor": self.devminor
}
if info["type"] == DIRTYPE and not info["name"].endswith("/"):
info["name"] += "/"
return info
def tobuf(self, format=DEFAULT_FORMAT, encoding=ENCODING, errors="surrogateescape"):
"""Return a tar header as a string of 512 byte blocks.
"""
info = self.get_info()
if format == USTAR_FORMAT:
return self.create_ustar_header(info, encoding, errors)
elif format == GNU_FORMAT:
return self.create_gnu_header(info, encoding, errors)
elif format == PAX_FORMAT:
return self.create_pax_header(info, encoding)
else:
raise ValueError("invalid format")
def create_ustar_header(self, info, encoding, errors):
"""Return the object as a ustar header block.
"""
info["magic"] = POSIX_MAGIC
if len(info["linkname"]) > LENGTH_LINK:
raise ValueError("linkname is too long")
if len(info["name"]) > LENGTH_NAME:
info["prefix"], info["name"] = self._posix_split_name(info["name"])
return self._create_header(info, USTAR_FORMAT, encoding, errors)
def create_gnu_header(self, info, encoding, errors):
"""Return the object as a GNU header block sequence.
"""
info["magic"] = GNU_MAGIC
buf = b""
if len(info["linkname"]) > LENGTH_LINK:
buf += self._create_gnu_long_header(info["linkname"], GNUTYPE_LONGLINK, encoding, errors)
if len(info["name"]) > LENGTH_NAME:
buf += self._create_gnu_long_header(info["name"], GNUTYPE_LONGNAME, encoding, errors)
return buf + self._create_header(info, GNU_FORMAT, encoding, errors)
def create_pax_header(self, info, encoding):
"""Return the object as a ustar header block. If it cannot be
represented this way, prepend a pax extended header sequence
with supplement information.
"""
info["magic"] = POSIX_MAGIC
pax_headers = self.pax_headers.copy()
# Test string fields for values that exceed the field length or cannot
# be represented in ASCII encoding.
for name, hname, length in (
("name", "path", LENGTH_NAME), ("linkname", "linkpath", LENGTH_LINK),
("uname", "uname", 32), ("gname", "gname", 32)):
if hname in pax_headers:
# The pax header has priority.
continue
# Try to encode the string as ASCII.
try:
info[name].encode("ascii", "strict")
except UnicodeEncodeError:
pax_headers[hname] = info[name]
continue
if len(info[name]) > length:
pax_headers[hname] = info[name]
# Test number fields for values that exceed the field limit or values
# that like to be stored as float.
for name, digits in (("uid", 8), ("gid", 8), ("size", 12), ("mtime", 12)):
if name in pax_headers:
# The pax header has priority. Avoid overflow.
info[name] = 0
continue
val = info[name]
if not 0 <= val < 8 ** (digits - 1) or isinstance(val, float):
pax_headers[name] = str(val)
info[name] = 0
# Create a pax extended header if necessary.
if pax_headers:
buf = self._create_pax_generic_header(pax_headers, XHDTYPE, encoding)
else:
buf = b""
return buf + self._create_header(info, USTAR_FORMAT, "ascii", "replace")
@classmethod
def create_pax_global_header(cls, pax_headers):
"""Return the object as a pax global header block sequence.
"""
return cls._create_pax_generic_header(pax_headers, XGLTYPE, "utf8")
def _posix_split_name(self, name):
"""Split a name longer than 100 chars into a prefix
and a name part.
"""
prefix = name[:LENGTH_PREFIX + 1]
while prefix and prefix[-1] != "/":
prefix = prefix[:-1]
name = name[len(prefix):]
prefix = prefix[:-1]
if not prefix or len(name) > LENGTH_NAME:
raise ValueError("name is too long")
return prefix, name
@staticmethod
def _create_header(info, format, encoding, errors):
"""Return a header block. info is a dictionary with file
information, format must be one of the *_FORMAT constants.
"""
parts = [
stn(info.get("name", ""), 100, encoding, errors),
itn(info.get("mode", 0) & 0o7777, 8, format),
itn(info.get("uid", 0), 8, format),
itn(info.get("gid", 0), 8, format),
itn(info.get("size", 0), 12, format),
itn(info.get("mtime", 0), 12, format),
b" ", # checksum field
info.get("type", REGTYPE),
stn(info.get("linkname", ""), 100, encoding, errors),
info.get("magic", POSIX_MAGIC),
stn(info.get("uname", ""), 32, encoding, errors),
stn(info.get("gname", ""), 32, encoding, errors),
itn(info.get("devmajor", 0), 8, format),
itn(info.get("devminor", 0), 8, format),
stn(info.get("prefix", ""), 155, encoding, errors)
]
buf = struct.pack("%ds" % BLOCKSIZE, b"".join(parts))
chksum = calc_chksums(buf[-BLOCKSIZE:])[0]
buf = buf[:-364] + ("%06o\0" % chksum).encode("ascii") + buf[-357:]
return buf
@staticmethod
def _create_payload(payload):
"""Return the string payload filled with zero bytes
up to the next 512 byte border.
"""
blocks, remainder = divmod(len(payload), BLOCKSIZE)
if remainder > 0:
payload += (BLOCKSIZE - remainder) * NUL
return payload
@classmethod
def _create_gnu_long_header(cls, name, type, encoding, errors):
"""Return a GNUTYPE_LONGNAME or GNUTYPE_LONGLINK sequence
for name.
"""
name = name.encode(encoding, errors) + NUL
info = {}
info["name"] = "././@LongLink"
info["type"] = type
info["size"] = len(name)
info["magic"] = GNU_MAGIC
# create extended header + name blocks.
return cls._create_header(info, USTAR_FORMAT, encoding, errors) + \
cls._create_payload(name)
@classmethod
def _create_pax_generic_header(cls, pax_headers, type, encoding):
"""Return a POSIX.1-2008 extended or global header sequence
that contains a list of keyword, value pairs. The values
must be strings.
"""
# Check if one of the fields contains surrogate characters and thereby
# forces hdrcharset=BINARY, see _proc_pax() for more information.
binary = False
for keyword, value in pax_headers.items():
try:
value.encode("utf8", "strict")
except UnicodeEncodeError:
binary = True
break
records = b""
if binary:
# Put the hdrcharset field at the beginning of the header.
records += b"21 hdrcharset=BINARY\n"
for keyword, value in pax_headers.items():
keyword = keyword.encode("utf8")
if binary:
# Try to restore the original byte representation of `value'.
# Needless to say, that the encoding must match the string.
value = value.encode(encoding, "surrogateescape")
else:
value = value.encode("utf8")
l = len(keyword) + len(value) + 3 # ' ' + '=' + '\n'
n = p = 0
while True:
n = l + len(str(p))
if n == p:
break
p = n
records += bytes(str(p), "ascii") + b" " + keyword + b"=" + value + b"\n"
# We use a hardcoded "././@PaxHeader" name like star does
# instead of the one that POSIX recommends.
info = {}
info["name"] = "././@PaxHeader"
info["type"] = type
info["size"] = len(records)
info["magic"] = POSIX_MAGIC
# Create pax header + record blocks.
return cls._create_header(info, USTAR_FORMAT, "ascii", "replace") + \
cls._create_payload(records)
@classmethod
def frombuf(cls, buf, encoding, errors):
"""Construct a TarInfo object from a 512 byte bytes object.
"""
if len(buf) == 0:
raise EmptyHeaderError("empty header")
if len(buf) != BLOCKSIZE:
raise TruncatedHeaderError("truncated header")
if buf.count(NUL) == BLOCKSIZE:
raise EOFHeaderError("end of file header")
chksum = nti(buf[148:156])
if chksum not in calc_chksums(buf):
raise InvalidHeaderError("bad checksum")
obj = cls()
obj.name = nts(buf[0:100], encoding, errors)
obj.mode = nti(buf[100:108])
obj.uid = nti(buf[108:116])
obj.gid = nti(buf[116:124])
obj.size = nti(buf[124:136])
obj.mtime = nti(buf[136:148])
obj.chksum = chksum
obj.type = buf[156:157]
obj.linkname = nts(buf[157:257], encoding, errors)
obj.uname = nts(buf[265:297], encoding, errors)
obj.gname = nts(buf[297:329], encoding, errors)
obj.devmajor = nti(buf[329:337])
obj.devminor = nti(buf[337:345])
prefix = nts(buf[345:500], encoding, errors)
# Old V7 tar format represents a directory as a regular
# file with a trailing slash.
if obj.type == AREGTYPE and obj.name.endswith("/"):
obj.type = DIRTYPE
# The old GNU sparse format occupies some of the unused
# space in the buffer for up to 4 sparse structures.
# Save the them for later processing in _proc_sparse().
if obj.type == GNUTYPE_SPARSE:
pos = 386
structs = []
for i in range(4):
try:
offset = nti(buf[pos:pos + 12])
numbytes = nti(buf[pos + 12:pos + 24])
except ValueError:
break
structs.append((offset, numbytes))
pos += 24
isextended = bool(buf[482])
origsize = nti(buf[483:495])
obj._sparse_structs = (structs, isextended, origsize)
# Remove redundant slashes from directories.
if obj.isdir():
obj.name = obj.name.rstrip("/")
# Reconstruct a ustar longname.
if prefix and obj.type not in GNU_TYPES:
obj.name = prefix + "/" + obj.name
return obj
@classmethod
def fromtarfile(cls, tarfile):
"""Return the next TarInfo object from TarFile object
tarfile.
"""
buf = tarfile.fileobj.read(BLOCKSIZE)
obj = cls.frombuf(buf, tarfile.encoding, tarfile.errors)
obj.offset = tarfile.fileobj.tell() - BLOCKSIZE
return obj._proc_member(tarfile)
#--------------------------------------------------------------------------
# The following are methods that are called depending on the type of a
# member. The entry point is _proc_member() which can be overridden in a
# subclass to add custom _proc_*() methods. A _proc_*() method MUST
# implement the following
# operations:
# 1. Set self.offset_data to the position where the data blocks begin,
# if there is data that follows.
# 2. Set tarfile.offset to the position where the next member's header will
# begin.
# 3. Return self or another valid TarInfo object.
def _proc_member(self, tarfile):
"""Choose the right processing method depending on
the type and call it.
"""
if self.type in (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK):
return self._proc_gnulong(tarfile)
elif self.type == GNUTYPE_SPARSE:
return self._proc_sparse(tarfile)
elif self.type in (XHDTYPE, XGLTYPE, SOLARIS_XHDTYPE):
return self._proc_pax(tarfile)
else:
return self._proc_builtin(tarfile)
def _proc_builtin(self, tarfile):
"""Process a builtin type or an unknown type which
will be treated as a regular file.
"""
self.offset_data = tarfile.fileobj.tell()
offset = self.offset_data
if self.isreg() or self.type not in SUPPORTED_TYPES:
# Skip the following data blocks.
offset += self._block(self.size)
tarfile.offset = offset
# Patch the TarInfo object with saved global
# header information.
self._apply_pax_info(tarfile.pax_headers, tarfile.encoding, tarfile.errors)
return self
def _proc_gnulong(self, tarfile):
"""Process the blocks that hold a GNU longname
or longlink member.
"""
buf = tarfile.fileobj.read(self._block(self.size))
# Fetch the next header and process it.
try:
next = self.fromtarfile(tarfile)
except HeaderError:
raise SubsequentHeaderError("missing or bad subsequent header")
# Patch the TarInfo object from the next header with
# the longname information.
next.offset = self.offset
if self.type == GNUTYPE_LONGNAME:
next.name = nts(buf, tarfile.encoding, tarfile.errors)
elif self.type == GNUTYPE_LONGLINK:
next.linkname = nts(buf, tarfile.encoding, tarfile.errors)
return next
def _proc_sparse(self, tarfile):
"""Process a GNU sparse header plus extra headers.
"""
# We already collected some sparse structures in frombuf().
structs, isextended, origsize = self._sparse_structs
del self._sparse_structs
# Collect sparse structures from extended header blocks.
while isextended:
buf = tarfile.fileobj.read(BLOCKSIZE)
pos = 0
for i in range(21):
try:
offset = nti(buf[pos:pos + 12])
numbytes = nti(buf[pos + 12:pos + 24])
except ValueError:
break
if offset and numbytes:
structs.append((offset, numbytes))
pos += 24
isextended = bool(buf[504])
self.sparse = structs
self.offset_data = tarfile.fileobj.tell()
tarfile.offset = self.offset_data + self._block(self.size)
self.size = origsize
return self
def _proc_pax(self, tarfile):
"""Process an extended or global header as described in
POSIX.1-2008.
"""
# Read the header information.
buf = tarfile.fileobj.read(self._block(self.size))
# A pax header stores supplemental information for either
# the following file (extended) or all following files
# (global).
if self.type == XGLTYPE:
pax_headers = tarfile.pax_headers
else:
pax_headers = tarfile.pax_headers.copy()
# Check if the pax header contains a hdrcharset field. This tells us
# the encoding of the path, linkpath, uname and gname fields. Normally,
# these fields are UTF-8 encoded but since POSIX.1-2008 tar
# implementations are allowed to store them as raw binary strings if
# the translation to UTF-8 fails.
match = re.search(br"\d+ hdrcharset=([^\n]+)\n", buf)
if match is not None:
pax_headers["hdrcharset"] = match.group(1).decode("utf8")
# For the time being, we don't care about anything other than "BINARY".
# The only other value that is currently allowed by the standard is
# "ISO-IR 10646 2000 UTF-8" in other words UTF-8.
hdrcharset = pax_headers.get("hdrcharset")
if hdrcharset == "BINARY":
encoding = tarfile.encoding
else:
encoding = "utf8"
# Parse pax header information. A record looks like that:
# "%d %s=%s\n" % (length, keyword, value). length is the size
# of the complete record including the length field itself and
# the newline. keyword and value are both UTF-8 encoded strings.
regex = re.compile(br"(\d+) ([^=]+)=")
pos = 0
while True:
match = regex.match(buf, pos)
if not match:
break
length, keyword = match.groups()
length = int(length)
value = buf[match.end(2) + 1:match.start(1) + length - 1]
# Normally, we could just use "utf8" as the encoding and "strict"
# as the error handler, but we better not take the risk. For
# example, GNU tar <= 1.23 is known to store filenames it cannot
# translate to UTF-8 as raw strings (unfortunately without a
# hdrcharset=BINARY header).
# We first try the strict standard encoding, and if that fails we
# fall back on the user's encoding and error handler.
keyword = self._decode_pax_field(keyword, "utf8", "utf8",
tarfile.errors)
if keyword in PAX_NAME_FIELDS:
value = self._decode_pax_field(value, encoding, tarfile.encoding,
tarfile.errors)
else:
value = self._decode_pax_field(value, "utf8", "utf8",
tarfile.errors)
pax_headers[keyword] = value
pos += length
# Fetch the next header.
try:
next = self.fromtarfile(tarfile)
except HeaderError:
raise SubsequentHeaderError("missing or bad subsequent header")
# Process GNU sparse information.
if "GNU.sparse.map" in pax_headers:
# GNU extended sparse format version 0.1.
self._proc_gnusparse_01(next, pax_headers)
elif "GNU.sparse.size" in pax_headers:
# GNU extended sparse format version 0.0.
self._proc_gnusparse_00(next, pax_headers, buf)
elif pax_headers.get("GNU.sparse.major") == "1" and pax_headers.get("GNU.sparse.minor") == "0":
# GNU extended sparse format version 1.0.
self._proc_gnusparse_10(next, pax_headers, tarfile)
if self.type in (XHDTYPE, SOLARIS_XHDTYPE):
# Patch the TarInfo object with the extended header info.
next._apply_pax_info(pax_headers, tarfile.encoding, tarfile.errors)
next.offset = self.offset
if "size" in pax_headers:
# If the extended header replaces the size field,
# we need to recalculate the offset where the next
# header starts.
offset = next.offset_data
if next.isreg() or next.type not in SUPPORTED_TYPES:
offset += next._block(next.size)
tarfile.offset = offset
return next
def _proc_gnusparse_00(self, next, pax_headers, buf):
"""Process a GNU tar extended sparse header, version 0.0.
"""
offsets = []
for match in re.finditer(br"\d+ GNU.sparse.offset=(\d+)\n", buf):
offsets.append(int(match.group(1)))
numbytes = []
for match in re.finditer(br"\d+ GNU.sparse.numbytes=(\d+)\n", buf):
numbytes.append(int(match.group(1)))
next.sparse = list(zip(offsets, numbytes))
def _proc_gnusparse_01(self, next, pax_headers):
"""Process a GNU tar extended sparse header, version 0.1.
"""
sparse = [int(x) for x in pax_headers["GNU.sparse.map"].split(",")]
next.sparse = list(zip(sparse[::2], sparse[1::2]))
def _proc_gnusparse_10(self, next, pax_headers, tarfile):
"""Process a GNU tar extended sparse header, version 1.0.
"""
fields = None
sparse = []
buf = tarfile.fileobj.read(BLOCKSIZE)
fields, buf = buf.split(b"\n", 1)
fields = int(fields)
while len(sparse) < fields * 2:
if b"\n" not in buf:
buf += tarfile.fileobj.read(BLOCKSIZE)
number, buf = buf.split(b"\n", 1)
sparse.append(int(number))
next.offset_data = tarfile.fileobj.tell()
next.sparse = list(zip(sparse[::2], sparse[1::2]))
def _apply_pax_info(self, pax_headers, encoding, errors):
"""Replace fields with supplemental information from a previous
pax extended or global header.
"""
for keyword, value in pax_headers.items():
if keyword == "GNU.sparse.name":
setattr(self, "path", value)
elif keyword == "GNU.sparse.size":
setattr(self, "size", int(value))
elif keyword == "GNU.sparse.realsize":
setattr(self, "size", int(value))
elif keyword in PAX_FIELDS:
if keyword in PAX_NUMBER_FIELDS:
try:
value = PAX_NUMBER_FIELDS[keyword](value)
except ValueError:
value = 0
if keyword == "path":
value = value.rstrip("/")
setattr(self, keyword, value)
self.pax_headers = pax_headers.copy()
def _decode_pax_field(self, value, encoding, fallback_encoding, fallback_errors):
"""Decode a single field from a pax record.
"""
try:
return value.decode(encoding, "strict")
except UnicodeDecodeError:
return value.decode(fallback_encoding, fallback_errors)
def _block(self, count):
"""Round up a byte count by BLOCKSIZE and return it,
e.g. _block(834) => 1024.
"""
blocks, remainder = divmod(count, BLOCKSIZE)
if remainder:
blocks += 1
return blocks * BLOCKSIZE
def isreg(self):
return self.type in REGULAR_TYPES
def isfile(self):
return self.isreg()
def isdir(self):
return self.type == DIRTYPE
def issym(self):
return self.type == SYMTYPE
def islnk(self):
return self.type == LNKTYPE
def ischr(self):
return self.type == CHRTYPE
def isblk(self):
return self.type == BLKTYPE
def isfifo(self):
return self.type == FIFOTYPE
def issparse(self):
return self.sparse is not None
def isdev(self):
return self.type in (CHRTYPE, BLKTYPE, FIFOTYPE)
# class TarInfo
class TarFile(object):
"""The TarFile Class provides an interface to tar archives.
"""
debug = 0 # May be set from 0 (no msgs) to 3 (all msgs)
dereference = False # If true, add content of linked file to the
# tar file, else the link.
ignore_zeros = False # If true, skips empty or invalid blocks and
# continues processing.
errorlevel = 1 # If 0, fatal errors only appear in debug
# messages (if debug >= 0). If > 0, errors
# are passed to the caller as exceptions.
format = DEFAULT_FORMAT # The format to use when creating an archive.
encoding = ENCODING # Encoding for 8-bit character strings.
errors = None # Error handler for unicode conversion.
tarinfo = TarInfo # The default TarInfo class to use.
fileobject = ExFileObject # The default ExFileObject class to use.
def __init__(self, name=None, mode="r", fileobj=None, format=None,
tarinfo=None, dereference=None, ignore_zeros=None, encoding=None,
errors="surrogateescape", pax_headers=None, debug=None, errorlevel=None):
"""Open an (uncompressed) tar archive `name'. `mode' is either 'r' to
read from an existing archive, 'a' to append data to an existing
file or 'w' to create a new file overwriting an existing one. `mode'
defaults to 'r'.
If `fileobj' is given, it is used for reading or writing data. If it
can be determined, `mode' is overridden by `fileobj's mode.
`fileobj' is not closed, when TarFile is closed.
"""
if len(mode) > 1 or mode not in "raw":
raise ValueError("mode must be 'r', 'a' or 'w'")
self.mode = mode
self._mode = {"r": "rb", "a": "r+b", "w": "wb"}[mode]
if not fileobj:
if self.mode == "a" and not os.path.exists(name):
# Create nonexistent files in append mode.
self.mode = "w"
self._mode = "wb"
fileobj = bltn_open(name, self._mode)
self._extfileobj = False
else:
if name is None and hasattr(fileobj, "name"):
name = fileobj.name
if hasattr(fileobj, "mode"):
self._mode = fileobj.mode
self._extfileobj = True
self.name = os.path.abspath(name) if name else None
self.fileobj = fileobj
# Init attributes.
if format is not None:
self.format = format
if tarinfo is not None:
self.tarinfo = tarinfo
if dereference is not None:
self.dereference = dereference
if ignore_zeros is not None:
self.ignore_zeros = ignore_zeros
if encoding is not None:
self.encoding = encoding
self.errors = errors
if pax_headers is not None and self.format == PAX_FORMAT:
self.pax_headers = pax_headers
else:
self.pax_headers = {}
if debug is not None:
self.debug = debug
if errorlevel is not None:
self.errorlevel = errorlevel
# Init datastructures.
self.closed = False
self.members = [] # list of members as TarInfo objects
self._loaded = False # flag if all members have been read
self.offset = self.fileobj.tell()
# current position in the archive file
self.inodes = {} # dictionary caching the inodes of
# archive members already added
try:
if self.mode == "r":
self.firstmember = None
self.firstmember = self.next()
if self.mode == "a":
# Move to the end of the archive,
# before the first empty block.
while True:
self.fileobj.seek(self.offset)
try:
tarinfo = self.tarinfo.fromtarfile(self)
self.members.append(tarinfo)
except EOFHeaderError:
self.fileobj.seek(self.offset)
break
except HeaderError as e:
raise ReadError(str(e))
if self.mode in "aw":
self._loaded = True
if self.pax_headers:
buf = self.tarinfo.create_pax_global_header(self.pax_headers.copy())
self.fileobj.write(buf)
self.offset += len(buf)
except:
if not self._extfileobj:
self.fileobj.close()
self.closed = True
raise
#--------------------------------------------------------------------------
# Below are the classmethods which act as alternate constructors to the
# TarFile class. The open() method is the only one that is needed for
# public use; it is the "super"-constructor and is able to select an
# adequate "sub"-constructor for a particular compression using the mapping
# from OPEN_METH.
#
# This concept allows one to subclass TarFile without losing the comfort of
# the super-constructor. A sub-constructor is registered and made available
# by adding it to the mapping in OPEN_METH.
@classmethod
def open(cls, name=None, mode="r", fileobj=None, bufsize=RECORDSIZE, **kwargs):
"""Open a tar archive for reading, writing or appending. Return
an appropriate TarFile class.
mode:
'r' or 'r:*' open for reading with transparent compression
'r:' open for reading exclusively uncompressed
'r:gz' open for reading with gzip compression
'r:bz2' open for reading with bzip2 compression
'a' or 'a:' open for appending, creating the file if necessary
'w' or 'w:' open for writing without compression
'w:gz' open for writing with gzip compression
'w:bz2' open for writing with bzip2 compression
'r|*' open a stream of tar blocks with transparent compression
'r|' open an uncompressed stream of tar blocks for reading
'r|gz' open a gzip compressed stream of tar blocks
'r|bz2' open a bzip2 compressed stream of tar blocks
'w|' open an uncompressed stream for writing
'w|gz' open a gzip compressed stream for writing
'w|bz2' open a bzip2 compressed stream for writing
"""
if not name and not fileobj:
raise ValueError("nothing to open")
if mode in ("r", "r:*"):
# Find out which *open() is appropriate for opening the file.
for comptype in cls.OPEN_METH:
func = getattr(cls, cls.OPEN_METH[comptype])
if fileobj is not None:
saved_pos = fileobj.tell()
try:
return func(name, "r", fileobj, **kwargs)
except (ReadError, CompressionError) as e:
if fileobj is not None:
fileobj.seek(saved_pos)
continue
raise ReadError("file could not be opened successfully")
elif ":" in mode:
filemode, comptype = mode.split(":", 1)
filemode = filemode or "r"
comptype = comptype or "tar"
# Select the *open() function according to
# given compression.
if comptype in cls.OPEN_METH:
func = getattr(cls, cls.OPEN_METH[comptype])
else:
raise CompressionError("unknown compression type %r" % comptype)
return func(name, filemode, fileobj, **kwargs)
elif "|" in mode:
filemode, comptype = mode.split("|", 1)
filemode = filemode or "r"
comptype = comptype or "tar"
if filemode not in "rw":
raise ValueError("mode must be 'r' or 'w'")
stream = _Stream(name, filemode, comptype, fileobj, bufsize)
try:
t = cls(name, filemode, stream, **kwargs)
except:
stream.close()
raise
t._extfileobj = False
return t
elif mode in "aw":
return cls.taropen(name, mode, fileobj, **kwargs)
raise ValueError("undiscernible mode")
@classmethod
def taropen(cls, name, mode="r", fileobj=None, **kwargs):
"""Open uncompressed tar archive name for reading or writing.
"""
if len(mode) > 1 or mode not in "raw":
raise ValueError("mode must be 'r', 'a' or 'w'")
return cls(name, mode, fileobj, **kwargs)
@classmethod
def gzopen(cls, name, mode="r", fileobj=None, compresslevel=9, **kwargs):
"""Open gzip compressed tar archive name for reading or writing.
Appending is not allowed.
"""
if len(mode) > 1 or mode not in "rw":
raise ValueError("mode must be 'r' or 'w'")
try:
import gzip
gzip.GzipFile
except (ImportError, AttributeError):
raise CompressionError("gzip module is not available")
extfileobj = fileobj is not None
try:
fileobj = gzip.GzipFile(name, mode + "b", compresslevel, fileobj)
t = cls.taropen(name, mode, fileobj, **kwargs)
except IOError:
if not extfileobj and fileobj is not None:
fileobj.close()
if fileobj is None:
raise
raise ReadError("not a gzip file")
except:
if not extfileobj and fileobj is not None:
fileobj.close()
raise
t._extfileobj = extfileobj
return t
@classmethod
def bz2open(cls, name, mode="r", fileobj=None, compresslevel=9, **kwargs):
"""Open bzip2 compressed tar archive name for reading or writing.
Appending is not allowed.
"""
if len(mode) > 1 or mode not in "rw":
raise ValueError("mode must be 'r' or 'w'.")
try:
import bz2
except ImportError:
raise CompressionError("bz2 module is not available")
if fileobj is not None:
fileobj = _BZ2Proxy(fileobj, mode)
else:
fileobj = bz2.BZ2File(name, mode, compresslevel=compresslevel)
try:
t = cls.taropen(name, mode, fileobj, **kwargs)
except (IOError, EOFError):
fileobj.close()
raise ReadError("not a bzip2 file")
t._extfileobj = False
return t
# All *open() methods are registered here.
OPEN_METH = {
"tar": "taropen", # uncompressed tar
"gz": "gzopen", # gzip compressed tar
"bz2": "bz2open" # bzip2 compressed tar
}
#--------------------------------------------------------------------------
# The public methods which TarFile provides:
def close(self):
"""Close the TarFile. In write-mode, two finishing zero blocks are
appended to the archive.
"""
if self.closed:
return
if self.mode in "aw":
self.fileobj.write(NUL * (BLOCKSIZE * 2))
self.offset += (BLOCKSIZE * 2)
# fill up the end with zero-blocks
# (like option -b20 for tar does)
blocks, remainder = divmod(self.offset, RECORDSIZE)
if remainder > 0:
self.fileobj.write(NUL * (RECORDSIZE - remainder))
if not self._extfileobj:
self.fileobj.close()
self.closed = True
def getmember(self, name):
"""Return a TarInfo object for member `name'. If `name' can not be
found in the archive, KeyError is raised. If a member occurs more
than once in the archive, its last occurrence is assumed to be the
most up-to-date version.
"""
tarinfo = self._getmember(name)
if tarinfo is None:
raise KeyError("filename %r not found" % name)
return tarinfo
def getmembers(self):
"""Return the members of the archive as a list of TarInfo objects. The
list has the same order as the members in the archive.
"""
self._check()
if not self._loaded: # if we want to obtain a list of
self._load() # all members, we first have to
# scan the whole archive.
return self.members
def getnames(self):
"""Return the members of the archive as a list of their names. It has
the same order as the list returned by getmembers().
"""
return [tarinfo.name for tarinfo in self.getmembers()]
def gettarinfo(self, name=None, arcname=None, fileobj=None):
"""Create a TarInfo object for either the file `name' or the file
object `fileobj' (using os.fstat on its file descriptor). You can
modify some of the TarInfo's attributes before you add it using
addfile(). If given, `arcname' specifies an alternative name for the
file in the archive.
"""
self._check("aw")
# When fileobj is given, replace name by
# fileobj's real name.
if fileobj is not None:
name = fileobj.name
# Building the name of the member in the archive.
# Backward slashes are converted to forward slashes,
# Absolute paths are turned to relative paths.
if arcname is None:
arcname = name
drv, arcname = os.path.splitdrive(arcname)
arcname = arcname.replace(os.sep, "/")
arcname = arcname.lstrip("/")
# Now, fill the TarInfo object with
# information specific for the file.
tarinfo = self.tarinfo()
tarinfo.tarfile = self
# Use os.stat or os.lstat, depending on platform
# and if symlinks shall be resolved.
if fileobj is None:
if hasattr(os, "lstat") and not self.dereference:
statres = os.lstat(name)
else:
statres = os.stat(name)
else:
statres = os.fstat(fileobj.fileno())
linkname = ""
stmd = statres.st_mode
if stat.S_ISREG(stmd):
inode = (statres.st_ino, statres.st_dev)
if not self.dereference and statres.st_nlink > 1 and \
inode in self.inodes and arcname != self.inodes[inode]:
# Is it a hardlink to an already
# archived file?
type = LNKTYPE
linkname = self.inodes[inode]
else:
# The inode is added only if its valid.
# For win32 it is always 0.
type = REGTYPE
if inode[0]:
self.inodes[inode] = arcname
elif stat.S_ISDIR(stmd):
type = DIRTYPE
elif stat.S_ISFIFO(stmd):
type = FIFOTYPE
elif stat.S_ISLNK(stmd):
type = SYMTYPE
linkname = os.readlink(name)
elif stat.S_ISCHR(stmd):
type = CHRTYPE
elif stat.S_ISBLK(stmd):
type = BLKTYPE
else:
return None
# Fill the TarInfo object with all
# information we can get.
tarinfo.name = arcname
tarinfo.mode = stmd
tarinfo.uid = statres.st_uid
tarinfo.gid = statres.st_gid
if type == REGTYPE:
tarinfo.size = statres.st_size
else:
tarinfo.size = 0
tarinfo.mtime = statres.st_mtime
tarinfo.type = type
tarinfo.linkname = linkname
if pwd:
try:
tarinfo.uname = pwd.getpwuid(tarinfo.uid)[0]
except KeyError:
pass
if grp:
try:
tarinfo.gname = grp.getgrgid(tarinfo.gid)[0]
except KeyError:
pass
if type in (CHRTYPE, BLKTYPE):
if hasattr(os, "major") and hasattr(os, "minor"):
tarinfo.devmajor = os.major(statres.st_rdev)
tarinfo.devminor = os.minor(statres.st_rdev)
return tarinfo
def list(self, verbose=True):
"""Print a table of contents to sys.stdout. If `verbose' is False, only
the names of the members are printed. If it is True, an `ls -l'-like
output is produced.
"""
self._check()
for tarinfo in self:
if verbose:
print(filemode(tarinfo.mode), end=' ')
print("%s/%s" % (tarinfo.uname or tarinfo.uid,
tarinfo.gname or tarinfo.gid), end=' ')
if tarinfo.ischr() or tarinfo.isblk():
print("%10s" % ("%d,%d" \
% (tarinfo.devmajor, tarinfo.devminor)), end=' ')
else:
print("%10d" % tarinfo.size, end=' ')
print("%d-%02d-%02d %02d:%02d:%02d" \
% time.localtime(tarinfo.mtime)[:6], end=' ')
print(tarinfo.name + ("/" if tarinfo.isdir() else ""), end=' ')
if verbose:
if tarinfo.issym():
print("->", tarinfo.linkname, end=' ')
if tarinfo.islnk():
print("link to", tarinfo.linkname, end=' ')
print()
def add(self, name, arcname=None, recursive=True, exclude=None, filter=None):
"""Add the file `name' to the archive. `name' may be any type of file
(directory, fifo, symbolic link, etc.). If given, `arcname'
specifies an alternative name for the file in the archive.
Directories are added recursively by default. This can be avoided by
setting `recursive' to False. `exclude' is a function that should
return True for each filename to be excluded. `filter' is a function
that expects a TarInfo object argument and returns the changed
TarInfo object, if it returns None the TarInfo object will be
excluded from the archive.
"""
self._check("aw")
if arcname is None:
arcname = name
# Exclude pathnames.
if exclude is not None:
import warnings
warnings.warn("use the filter argument instead",
DeprecationWarning, 2)
if exclude(name):
self._dbg(2, "tarfile: Excluded %r" % name)
return
# Skip if somebody tries to archive the archive...
if self.name is not None and os.path.abspath(name) == self.name:
self._dbg(2, "tarfile: Skipped %r" % name)
return
self._dbg(1, name)
# Create a TarInfo object from the file.
tarinfo = self.gettarinfo(name, arcname)
if tarinfo is None:
self._dbg(1, "tarfile: Unsupported type %r" % name)
return
# Change or exclude the TarInfo object.
if filter is not None:
tarinfo = filter(tarinfo)
if tarinfo is None:
self._dbg(2, "tarfile: Excluded %r" % name)
return
# Append the tar header and data to the archive.
if tarinfo.isreg():
f = bltn_open(name, "rb")
self.addfile(tarinfo, f)
f.close()
elif tarinfo.isdir():
self.addfile(tarinfo)
if recursive:
for f in os.listdir(name):
self.add(os.path.join(name, f), os.path.join(arcname, f),
recursive, exclude, filter=filter)
else:
self.addfile(tarinfo)
def addfile(self, tarinfo, fileobj=None):
"""Add the TarInfo object `tarinfo' to the archive. If `fileobj' is
given, tarinfo.size bytes are read from it and added to the archive.
You can create TarInfo objects using gettarinfo().
On Windows platforms, `fileobj' should always be opened with mode
'rb' to avoid irritation about the file size.
"""
self._check("aw")
tarinfo = copy.copy(tarinfo)
buf = tarinfo.tobuf(self.format, self.encoding, self.errors)
self.fileobj.write(buf)
self.offset += len(buf)
# If there's data to follow, append it.
if fileobj is not None:
copyfileobj(fileobj, self.fileobj, tarinfo.size)
blocks, remainder = divmod(tarinfo.size, BLOCKSIZE)
if remainder > 0:
self.fileobj.write(NUL * (BLOCKSIZE - remainder))
blocks += 1
self.offset += blocks * BLOCKSIZE
self.members.append(tarinfo)
def extractall(self, path=".", members=None):
"""Extract all members from the archive to the current working
directory and set owner, modification time and permissions on
directories afterwards. `path' specifies a different directory
to extract to. `members' is optional and must be a subset of the
list returned by getmembers().
"""
directories = []
if members is None:
members = self
for tarinfo in members:
if tarinfo.isdir():
# Extract directories with a safe mode.
directories.append(tarinfo)
tarinfo = copy.copy(tarinfo)
tarinfo.mode = 0o700
# Do not set_attrs directories, as we will do that further down
self.extract(tarinfo, path, set_attrs=not tarinfo.isdir())
# Reverse sort directories.
directories.sort(key=lambda a: a.name)
directories.reverse()
# Set correct owner, mtime and filemode on directories.
for tarinfo in directories:
dirpath = os.path.join(path, tarinfo.name)
try:
self.chown(tarinfo, dirpath)
self.utime(tarinfo, dirpath)
self.chmod(tarinfo, dirpath)
except ExtractError as e:
if self.errorlevel > 1:
raise
else:
self._dbg(1, "tarfile: %s" % e)
def extract(self, member, path="", set_attrs=True):
"""Extract a member from the archive to the current working directory,
using its full name. Its file information is extracted as accurately
as possible. `member' may be a filename or a TarInfo object. You can
specify a different directory using `path'. File attributes (owner,
mtime, mode) are set unless `set_attrs' is False.
"""
self._check("r")
if isinstance(member, str):
tarinfo = self.getmember(member)
else:
tarinfo = member
# Prepare the link target for makelink().
if tarinfo.islnk():
tarinfo._link_target = os.path.join(path, tarinfo.linkname)
try:
self._extract_member(tarinfo, os.path.join(path, tarinfo.name),
set_attrs=set_attrs)
except EnvironmentError as e:
if self.errorlevel > 0:
raise
else:
if e.filename is None:
self._dbg(1, "tarfile: %s" % e.strerror)
else:
self._dbg(1, "tarfile: %s %r" % (e.strerror, e.filename))
except ExtractError as e:
if self.errorlevel > 1:
raise
else:
self._dbg(1, "tarfile: %s" % e)
def extractfile(self, member):
"""Extract a member from the archive as a file object. `member' may be
a filename or a TarInfo object. If `member' is a regular file, a
file-like object is returned. If `member' is a link, a file-like
object is constructed from the link's target. If `member' is none of
the above, None is returned.
The file-like object is read-only and provides the following
methods: read(), readline(), readlines(), seek() and tell()
"""
self._check("r")
if isinstance(member, str):
tarinfo = self.getmember(member)
else:
tarinfo = member
if tarinfo.isreg():
return self.fileobject(self, tarinfo)
elif tarinfo.type not in SUPPORTED_TYPES:
# If a member's type is unknown, it is treated as a
# regular file.
return self.fileobject(self, tarinfo)
elif tarinfo.islnk() or tarinfo.issym():
if isinstance(self.fileobj, _Stream):
# A small but ugly workaround for the case that someone tries
# to extract a (sym)link as a file-object from a non-seekable
# stream of tar blocks.
raise StreamError("cannot extract (sym)link as file object")
else:
# A (sym)link's file object is its target's file object.
return self.extractfile(self._find_link_target(tarinfo))
else:
# If there's no data associated with the member (directory, chrdev,
# blkdev, etc.), return None instead of a file object.
return None
def _extract_member(self, tarinfo, targetpath, set_attrs=True):
"""Extract the TarInfo object tarinfo to a physical
file called targetpath.
"""
# Fetch the TarInfo object for the given name
# and build the destination pathname, replacing
# forward slashes to platform specific separators.
targetpath = targetpath.rstrip("/")
targetpath = targetpath.replace("/", os.sep)
# Create all upper directories.
upperdirs = os.path.dirname(targetpath)
if upperdirs and not os.path.exists(upperdirs):
# Create directories that are not part of the archive with
# default permissions.
os.makedirs(upperdirs)
if tarinfo.islnk() or tarinfo.issym():
self._dbg(1, "%s -> %s" % (tarinfo.name, tarinfo.linkname))
else:
self._dbg(1, tarinfo.name)
if tarinfo.isreg():
self.makefile(tarinfo, targetpath)
elif tarinfo.isdir():
self.makedir(tarinfo, targetpath)
elif tarinfo.isfifo():
self.makefifo(tarinfo, targetpath)
elif tarinfo.ischr() or tarinfo.isblk():
self.makedev(tarinfo, targetpath)
elif tarinfo.islnk() or tarinfo.issym():
self.makelink(tarinfo, targetpath)
elif tarinfo.type not in SUPPORTED_TYPES:
self.makeunknown(tarinfo, targetpath)
else:
self.makefile(tarinfo, targetpath)
if set_attrs:
self.chown(tarinfo, targetpath)
if not tarinfo.issym():
self.chmod(tarinfo, targetpath)
self.utime(tarinfo, targetpath)
#--------------------------------------------------------------------------
# Below are the different file methods. They are called via
# _extract_member() when extract() is called. They can be replaced in a
# subclass to implement other functionality.
def makedir(self, tarinfo, targetpath):
"""Make a directory called targetpath.
"""
try:
# Use a safe mode for the directory, the real mode is set
# later in _extract_member().
os.mkdir(targetpath, 0o700)
except EnvironmentError as e:
if e.errno != errno.EEXIST:
raise
def makefile(self, tarinfo, targetpath):
"""Make a file called targetpath.
"""
source = self.fileobj
source.seek(tarinfo.offset_data)
target = bltn_open(targetpath, "wb")
if tarinfo.sparse is not None:
for offset, size in tarinfo.sparse:
target.seek(offset)
copyfileobj(source, target, size)
else:
copyfileobj(source, target, tarinfo.size)
target.seek(tarinfo.size)
target.truncate()
target.close()
def makeunknown(self, tarinfo, targetpath):
"""Make a file from a TarInfo object with an unknown type
at targetpath.
"""
self.makefile(tarinfo, targetpath)
self._dbg(1, "tarfile: Unknown file type %r, " \
"extracted as regular file." % tarinfo.type)
def makefifo(self, tarinfo, targetpath):
"""Make a fifo called targetpath.
"""
if hasattr(os, "mkfifo"):
os.mkfifo(targetpath)
else:
raise ExtractError("fifo not supported by system")
def makedev(self, tarinfo, targetpath):
"""Make a character or block device called targetpath.
"""
if not hasattr(os, "mknod") or not hasattr(os, "makedev"):
raise ExtractError("special devices not supported by system")
mode = tarinfo.mode
if tarinfo.isblk():
mode |= stat.S_IFBLK
else:
mode |= stat.S_IFCHR
os.mknod(targetpath, mode,
os.makedev(tarinfo.devmajor, tarinfo.devminor))
def makelink(self, tarinfo, targetpath):
"""Make a (symbolic) link called targetpath. If it cannot be created
(platform limitation), we try to make a copy of the referenced file
instead of a link.
"""
try:
# For systems that support symbolic and hard links.
if tarinfo.issym():
os.symlink(tarinfo.linkname, targetpath)
else:
# See extract().
if os.path.exists(tarinfo._link_target):
os.link(tarinfo._link_target, targetpath)
else:
self._extract_member(self._find_link_target(tarinfo),
targetpath)
except symlink_exception:
if tarinfo.issym():
linkpath = os.path.join(os.path.dirname(tarinfo.name),
tarinfo.linkname)
else:
linkpath = tarinfo.linkname
else:
try:
self._extract_member(self._find_link_target(tarinfo),
targetpath)
except KeyError:
raise ExtractError("unable to resolve link inside archive")
def chown(self, tarinfo, targetpath):
"""Set owner of targetpath according to tarinfo.
"""
if pwd and hasattr(os, "geteuid") and os.geteuid() == 0:
# We have to be root to do so.
try:
g = grp.getgrnam(tarinfo.gname)[2]
except KeyError:
g = tarinfo.gid
try:
u = pwd.getpwnam(tarinfo.uname)[2]
except KeyError:
u = tarinfo.uid
try:
if tarinfo.issym() and hasattr(os, "lchown"):
os.lchown(targetpath, u, g)
else:
if sys.platform != "os2emx":
os.chown(targetpath, u, g)
except EnvironmentError as e:
raise ExtractError("could not change owner")
def chmod(self, tarinfo, targetpath):
"""Set file permissions of targetpath according to tarinfo.
"""
if hasattr(os, 'chmod'):
try:
os.chmod(targetpath, tarinfo.mode)
except EnvironmentError as e:
raise ExtractError("could not change mode")
def utime(self, tarinfo, targetpath):
"""Set modification time of targetpath according to tarinfo.
"""
if not hasattr(os, 'utime'):
return
try:
os.utime(targetpath, (tarinfo.mtime, tarinfo.mtime))
except EnvironmentError as e:
raise ExtractError("could not change modification time")
#--------------------------------------------------------------------------
def next(self):
"""Return the next member of the archive as a TarInfo object, when
TarFile is opened for reading. Return None if there is no more
available.
"""
self._check("ra")
if self.firstmember is not None:
m = self.firstmember
self.firstmember = None
return m
# Read the next block.
self.fileobj.seek(self.offset)
tarinfo = None
while True:
try:
tarinfo = self.tarinfo.fromtarfile(self)
except EOFHeaderError as e:
if self.ignore_zeros:
self._dbg(2, "0x%X: %s" % (self.offset, e))
self.offset += BLOCKSIZE
continue
except InvalidHeaderError as e:
if self.ignore_zeros:
self._dbg(2, "0x%X: %s" % (self.offset, e))
self.offset += BLOCKSIZE
continue
elif self.offset == 0:
raise ReadError(str(e))
except EmptyHeaderError:
if self.offset == 0:
raise ReadError("empty file")
except TruncatedHeaderError as e:
if self.offset == 0:
raise ReadError(str(e))
except SubsequentHeaderError as e:
raise ReadError(str(e))
break
if tarinfo is not None:
self.members.append(tarinfo)
else:
self._loaded = True
return tarinfo
#--------------------------------------------------------------------------
# Little helper methods:
def _getmember(self, name, tarinfo=None, normalize=False):
"""Find an archive member by name from bottom to top.
If tarinfo is given, it is used as the starting point.
"""
# Ensure that all members have been loaded.
members = self.getmembers()
# Limit the member search list up to tarinfo.
if tarinfo is not None:
members = members[:members.index(tarinfo)]
if normalize:
name = os.path.normpath(name)
for member in reversed(members):
if normalize:
member_name = os.path.normpath(member.name)
else:
member_name = member.name
if name == member_name:
return member
def _load(self):
"""Read through the entire archive file and look for readable
members.
"""
while True:
tarinfo = self.next()
if tarinfo is None:
break
self._loaded = True
def _check(self, mode=None):
"""Check if TarFile is still open, and if the operation's mode
corresponds to TarFile's mode.
"""
if self.closed:
raise IOError("%s is closed" % self.__class__.__name__)
if mode is not None and self.mode not in mode:
raise IOError("bad operation for mode %r" % self.mode)
def _find_link_target(self, tarinfo):
"""Find the target member of a symlink or hardlink member in the
archive.
"""
if tarinfo.issym():
# Always search the entire archive.
linkname = os.path.dirname(tarinfo.name) + "/" + tarinfo.linkname
limit = None
else:
# Search the archive before the link, because a hard link is
# just a reference to an already archived file.
linkname = tarinfo.linkname
limit = tarinfo
member = self._getmember(linkname, tarinfo=limit, normalize=True)
if member is None:
raise KeyError("linkname %r not found" % linkname)
return member
def __iter__(self):
"""Provide an iterator object.
"""
if self._loaded:
return iter(self.members)
else:
return TarIter(self)
def _dbg(self, level, msg):
"""Write debugging output to sys.stderr.
"""
if level <= self.debug:
print(msg, file=sys.stderr)
def __enter__(self):
self._check()
return self
def __exit__(self, type, value, traceback):
if type is None:
self.close()
else:
# An exception occurred. We must not call close() because
# it would try to write end-of-archive blocks and padding.
if not self._extfileobj:
self.fileobj.close()
self.closed = True
# class TarFile
class TarIter(object):
"""Iterator Class.
for tarinfo in TarFile(...):
suite...
"""
def __init__(self, tarfile):
"""Construct a TarIter object.
"""
self.tarfile = tarfile
self.index = 0
def __iter__(self):
"""Return iterator object.
"""
return self
def __next__(self):
"""Return the next item using TarFile's next() method.
When all members have been read, set TarFile as _loaded.
"""
# Fix for SF #1100429: Under rare circumstances it can
# happen that getmembers() is called during iteration,
# which will cause TarIter to stop prematurely.
if not self.tarfile._loaded:
tarinfo = self.tarfile.next()
if not tarinfo:
self.tarfile._loaded = True
raise StopIteration
else:
try:
tarinfo = self.tarfile.members[self.index]
except IndexError:
raise StopIteration
self.index += 1
return tarinfo
next = __next__ # for Python 2.x
#--------------------
# exported functions
#--------------------
def is_tarfile(name):
"""Return True if name points to a tar archive that we
are able to handle, else return False.
"""
try:
t = open(name)
t.close()
return True
except TarError:
return False
bltn_open = open
open = TarFile.open
| apache-2.0 |
aavanian/bokeh | bokeh/sphinxext/collapsible_code_block.py | 14 | 2395 | """ Display code blocks in collapsible sections when outputting
to HTML.
This directive takes a heading to use for the collapsible code block::
.. collapsible-code-block:: python
:heading: Some Code
from __future__ import print_function
print("Hello, Bokeh!")
This directive is identical to the standard ``code-block`` directive
that Sphinx supplies, with the addition of one new option:
heading : string
A heading to put for the collapsible block. Clicking the heading
expands or collapses the block
Examples
--------
The inline example code above produces the following output:
.. collapsible-code-block:: python
:heading: Some Code
from __future__ import print_function
print("Hello, Bokeh!")
"""
from __future__ import absolute_import
from docutils import nodes
from docutils.parsers.rst.directives import unchanged
from os.path import basename
from sphinx.directives.code import CodeBlock
from .templates import CCB_PROLOGUE, CCB_EPILOGUE
class collapsible_code_block(nodes.General, nodes.Element):
pass
class CollapsibleCodeBlock(CodeBlock):
option_spec = CodeBlock.option_spec
option_spec.update(heading=unchanged)
def run(self):
env = self.state.document.settings.env
rst_source = self.state_machine.node.document['source']
rst_filename = basename(rst_source)
target_id = "%s.ccb-%d" % (rst_filename, env.new_serialno('bokeh-plot'))
target_id = target_id.replace(".", "-")
target_node = nodes.target('', '', ids=[target_id])
node = collapsible_code_block()
node['target_id'] = target_id
node['heading'] = self.options.get('heading', "Code")
cb = CodeBlock.run(self)
node.setup_child(cb[0])
node.children.append(cb[0])
return [target_node, node]
def html_visit_collapsible_code_block(self, node):
self.body.append(
CCB_PROLOGUE.render(
id=node['target_id'],
heading=node['heading']
)
)
def html_depart_collapsible_code_block(self, node):
self.body.append(CCB_EPILOGUE.render())
def setup(app):
app.add_node(
collapsible_code_block,
html=(
html_visit_collapsible_code_block,
html_depart_collapsible_code_block
)
)
app.add_directive('collapsible-code-block', CollapsibleCodeBlock)
| bsd-3-clause |
TeachAtTUM/edx-platform | common/djangoapps/track/tests/test_contexts.py | 24 | 2109 | # pylint: disable=missing-docstring
from unittest import TestCase
import ddt
from track import contexts
@ddt.ddt
class TestContexts(TestCase):
COURSE_ID = 'test/course_name/course_run'
SPLIT_COURSE_ID = 'course-v1:test+course_name+course_run'
ORG_ID = 'test'
@ddt.data(
(COURSE_ID, ''),
(COURSE_ID, '/more/stuff'),
(COURSE_ID, '?format=json'),
(SPLIT_COURSE_ID, ''),
(SPLIT_COURSE_ID, '/more/stuff'),
(SPLIT_COURSE_ID, '?format=json')
)
@ddt.unpack
def test_course_id_from_url(self, course_id, postfix):
url = 'http://foo.bar.com/courses/{}{}'.format(course_id, postfix)
self.assert_parses_course_id_from_url(url, course_id)
def assert_parses_course_id_from_url(self, format_string, course_id):
self.assertEquals(
contexts.course_context_from_url(format_string.format(course_id=course_id)),
{
'course_id': course_id,
'org_id': self.ORG_ID
}
)
def test_no_course_id_in_url(self):
self.assert_empty_context_for_url('http://foo.bar.com/dashboard')
def assert_empty_context_for_url(self, url):
self.assertEquals(
contexts.course_context_from_url(url),
{
'course_id': '',
'org_id': ''
}
)
@ddt.data('', '/', '/?', '?format=json')
def test_malformed_course_id(self, postfix):
self.assert_empty_context_for_url('http://foo.bar.com/courses/test/course_name{}'.format(postfix))
@ddt.data(
(COURSE_ID, ''),
(COURSE_ID, '/more/stuff'),
(COURSE_ID, '?format=json'),
(SPLIT_COURSE_ID, ''),
(SPLIT_COURSE_ID, '/more/stuff'),
(SPLIT_COURSE_ID, '?format=json')
)
@ddt.unpack
def test_course_id_later_in_url(self, course_id, postfix):
url = 'http://foo.bar.com/x/y/z/courses/{}{}'.format(course_id, postfix)
self.assert_parses_course_id_from_url(url, course_id)
def test_no_url(self):
self.assert_empty_context_for_url(None)
| agpl-3.0 |
metacloud/percona-xtrabackup | percona-server-5.1-xtrabackup/python-for-subunit2junitxml/testtools/compat.py | 42 | 10756 | # Copyright (c) 2008-2011 testtools developers. See LICENSE for details.
"""Compatibility support for python 2 and 3."""
__metaclass__ = type
__all__ = [
'_b',
'_u',
'advance_iterator',
'str_is_unicode',
'StringIO',
'BytesIO',
'unicode_output_stream',
]
import codecs
import linecache
import locale
import os
import re
import sys
import traceback
from testtools.helpers import try_imports
StringIO = try_imports(['StringIO.StringIO', 'io.StringIO'])
BytesIO = try_imports(['io.BytesIO', 'BytesIO'])
__u_doc = """A function version of the 'u' prefix.
This is needed becayse the u prefix is not usable in Python 3 but is required
in Python 2 to get a unicode object.
To migrate code that was written as u'\u1234' in Python 2 to 2+3 change
it to be _u('\u1234'). The Python 3 interpreter will decode it
appropriately and the no-op _u for Python 3 lets it through, in Python
2 we then call unicode-escape in the _u function.
"""
if sys.version_info > (3, 0):
def _u(s):
return s
_r = ascii
def _b(s):
"""A byte literal."""
return s.encode("latin-1")
advance_iterator = next
def istext(x):
return isinstance(x, str)
def classtypes():
return (type,)
str_is_unicode = True
else:
def _u(s):
# The double replace mangling going on prepares the string for
# unicode-escape - \foo is preserved, \u and \U are decoded.
return (s.replace("\\", "\\\\").replace("\\\\u", "\\u")
.replace("\\\\U", "\\U").decode("unicode-escape"))
_r = repr
def _b(s):
return s
advance_iterator = lambda it: it.next()
def istext(x):
return isinstance(x, basestring)
def classtypes():
import types
return (type, types.ClassType)
str_is_unicode = sys.platform == "cli"
_u.__doc__ = __u_doc
if sys.version_info > (2, 5):
all = all
_error_repr = BaseException.__repr__
def isbaseexception(exception):
"""Return whether exception inherits from BaseException only"""
return (isinstance(exception, BaseException)
and not isinstance(exception, Exception))
else:
def all(iterable):
"""If contents of iterable all evaluate as boolean True"""
for obj in iterable:
if not obj:
return False
return True
def _error_repr(exception):
"""Format an exception instance as Python 2.5 and later do"""
return exception.__class__.__name__ + repr(exception.args)
def isbaseexception(exception):
"""Return whether exception would inherit from BaseException only
This approximates the hierarchy in Python 2.5 and later, compare the
difference between the diagrams at the bottom of the pages:
<http://docs.python.org/release/2.4.4/lib/module-exceptions.html>
<http://docs.python.org/release/2.5.4/lib/module-exceptions.html>
"""
return isinstance(exception, (KeyboardInterrupt, SystemExit))
def unicode_output_stream(stream):
"""Get wrapper for given stream that writes any unicode without exception
Characters that can't be coerced to the encoding of the stream, or 'ascii'
if valid encoding is not found, will be replaced. The original stream may
be returned in situations where a wrapper is determined unneeded.
The wrapper only allows unicode to be written, not non-ascii bytestrings,
which is a good thing to ensure sanity and sanitation.
"""
if sys.platform == "cli":
# Best to never encode before writing in IronPython
return stream
try:
writer = codecs.getwriter(stream.encoding or "")
except (AttributeError, LookupError):
# GZ 2010-06-16: Python 3 StringIO ends up here, but probably needs
# different handling as it doesn't want bytestrings
return codecs.getwriter("ascii")(stream, "replace")
if writer.__module__.rsplit(".", 1)[1].startswith("utf"):
# The current stream has a unicode encoding so no error handler is needed
return stream
if sys.version_info > (3, 0):
# Python 3 doesn't seem to make this easy, handle a common case
try:
return stream.__class__(stream.buffer, stream.encoding, "replace",
stream.newlines, stream.line_buffering)
except AttributeError:
pass
return writer(stream, "replace")
# The default source encoding is actually "iso-8859-1" until Python 2.5 but
# using non-ascii causes a deprecation warning in 2.4 and it's cleaner to
# treat all versions the same way
_default_source_encoding = "ascii"
# Pattern specified in <http://www.python.org/dev/peps/pep-0263/>
_cookie_search=re.compile("coding[:=]\s*([-\w.]+)").search
def _detect_encoding(lines):
"""Get the encoding of a Python source file from a list of lines as bytes
This function does less than tokenize.detect_encoding added in Python 3 as
it does not attempt to raise a SyntaxError when the interpreter would, it
just wants the encoding of a source file Python has already compiled and
determined is valid.
"""
if not lines:
return _default_source_encoding
if lines[0].startswith("\xef\xbb\xbf"):
# Source starting with UTF-8 BOM is either UTF-8 or a SyntaxError
return "utf-8"
# Only the first two lines of the source file are examined
magic = _cookie_search("".join(lines[:2]))
if magic is None:
return _default_source_encoding
encoding = magic.group(1)
try:
codecs.lookup(encoding)
except LookupError:
# Some codecs raise something other than LookupError if they don't
# support the given error handler, but not the text ones that could
# actually be used for Python source code
return _default_source_encoding
return encoding
class _EncodingTuple(tuple):
"""A tuple type that can have an encoding attribute smuggled on"""
def _get_source_encoding(filename):
"""Detect, cache and return the encoding of Python source at filename"""
try:
return linecache.cache[filename].encoding
except (AttributeError, KeyError):
encoding = _detect_encoding(linecache.getlines(filename))
if filename in linecache.cache:
newtuple = _EncodingTuple(linecache.cache[filename])
newtuple.encoding = encoding
linecache.cache[filename] = newtuple
return encoding
def _get_exception_encoding():
"""Return the encoding we expect messages from the OS to be encoded in"""
if os.name == "nt":
# GZ 2010-05-24: Really want the codepage number instead, the error
# handling of standard codecs is more deterministic
return "mbcs"
# GZ 2010-05-23: We need this call to be after initialisation, but there's
# no benefit in asking more than once as it's a global
# setting that can change after the message is formatted.
return locale.getlocale(locale.LC_MESSAGES)[1] or "ascii"
def _exception_to_text(evalue):
"""Try hard to get a sensible text value out of an exception instance"""
try:
return unicode(evalue)
except KeyboardInterrupt:
raise
except:
# Apparently this is what traceback._some_str does. Sigh - RBC 20100623
pass
try:
return str(evalue).decode(_get_exception_encoding(), "replace")
except KeyboardInterrupt:
raise
except:
# Apparently this is what traceback._some_str does. Sigh - RBC 20100623
pass
# Okay, out of ideas, let higher level handle it
return None
# GZ 2010-05-23: This function is huge and horrible and I welcome suggestions
# on the best way to break it up
_TB_HEADER = _u('Traceback (most recent call last):\n')
def _format_exc_info(eclass, evalue, tb, limit=None):
"""Format a stack trace and the exception information as unicode
Compatibility function for Python 2 which ensures each component of a
traceback is correctly decoded according to its origins.
Based on traceback.format_exception and related functions.
"""
fs_enc = sys.getfilesystemencoding()
if tb:
list = [_TB_HEADER]
extracted_list = []
for filename, lineno, name, line in traceback.extract_tb(tb, limit):
extracted_list.append((
filename.decode(fs_enc, "replace"),
lineno,
name.decode("ascii", "replace"),
line and line.decode(
_get_source_encoding(filename), "replace")))
list.extend(traceback.format_list(extracted_list))
else:
list = []
if evalue is None:
# Is a (deprecated) string exception
list.append((eclass + "\n").decode("ascii", "replace"))
return list
if isinstance(evalue, SyntaxError):
# Avoid duplicating the special formatting for SyntaxError here,
# instead create a new instance with unicode filename and line
# Potentially gives duff spacing, but that's a pre-existing issue
try:
msg, (filename, lineno, offset, line) = evalue
except (TypeError, ValueError):
pass # Strange exception instance, fall through to generic code
else:
# Errors during parsing give the line from buffer encoded as
# latin-1 or utf-8 or the encoding of the file depending on the
# coding and whether the patch for issue #1031213 is applied, so
# give up on trying to decode it and just read the file again
if line:
bytestr = linecache.getline(filename, lineno)
if bytestr:
if lineno == 1 and bytestr.startswith("\xef\xbb\xbf"):
bytestr = bytestr[3:]
line = bytestr.decode(
_get_source_encoding(filename), "replace")
del linecache.cache[filename]
else:
line = line.decode("ascii", "replace")
if filename:
filename = filename.decode(fs_enc, "replace")
evalue = eclass(msg, (filename, lineno, offset, line))
list.extend(traceback.format_exception_only(eclass, evalue))
return list
sclass = eclass.__name__
svalue = _exception_to_text(evalue)
if svalue:
list.append("%s: %s\n" % (sclass, svalue))
elif svalue is None:
# GZ 2010-05-24: Not a great fallback message, but keep for the moment
list.append("%s: <unprintable %s object>\n" % (sclass, sclass))
else:
list.append("%s\n" % sclass)
return list
| gpl-2.0 |
johan--/python_koans | python2/runner/path_to_enlightenment.py | 75 | 5088 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# The path to enlightenment starts with the following:
import unittest
from koans.about_asserts import AboutAsserts
from koans.about_strings import AboutStrings
from koans.about_none import AboutNone
from koans.about_lists import AboutLists
from koans.about_list_assignments import AboutListAssignments
from koans.about_dictionaries import AboutDictionaries
from koans.about_string_manipulation import AboutStringManipulation
from koans.about_tuples import AboutTuples
from koans.about_methods import AboutMethods
from koans.about_control_statements import AboutControlStatements
from koans.about_true_and_false import AboutTrueAndFalse
from koans.about_sets import AboutSets
from koans.about_triangle_project import AboutTriangleProject
from koans.about_exceptions import AboutExceptions
from koans.about_triangle_project2 import AboutTriangleProject2
from koans.about_iteration import AboutIteration
from koans.about_comprehension import AboutComprehension
from koans.about_generators import AboutGenerators
from koans.about_lambdas import AboutLambdas
from koans.about_scoring_project import AboutScoringProject
from koans.about_classes import AboutClasses
from koans.about_new_style_classes import AboutNewStyleClasses
from koans.about_with_statements import AboutWithStatements
from koans.about_monkey_patching import AboutMonkeyPatching
from koans.about_dice_project import AboutDiceProject
from koans.about_method_bindings import AboutMethodBindings
from koans.about_decorating_with_functions import AboutDecoratingWithFunctions
from koans.about_decorating_with_classes import AboutDecoratingWithClasses
from koans.about_inheritance import AboutInheritance
from koans.about_multiple_inheritance import AboutMultipleInheritance
from koans.about_regex import AboutRegex
from koans.about_scope import AboutScope
from koans.about_modules import AboutModules
from koans.about_packages import AboutPackages
from koans.about_class_attributes import AboutClassAttributes
from koans.about_attribute_access import AboutAttributeAccess
from koans.about_deleting_objects import AboutDeletingObjects
from koans.about_proxy_object_project import *
from koans.about_extra_credit import AboutExtraCredit
def koans():
loader = unittest.TestLoader()
suite = unittest.TestSuite()
loader.sortTestMethodsUsing = None
suite.addTests(loader.loadTestsFromTestCase(AboutAsserts))
suite.addTests(loader.loadTestsFromTestCase(AboutStrings))
suite.addTests(loader.loadTestsFromTestCase(AboutNone))
suite.addTests(loader.loadTestsFromTestCase(AboutLists))
suite.addTests(loader.loadTestsFromTestCase(AboutListAssignments))
suite.addTests(loader.loadTestsFromTestCase(AboutDictionaries))
suite.addTests(loader.loadTestsFromTestCase(AboutStringManipulation))
suite.addTests(loader.loadTestsFromTestCase(AboutTuples))
suite.addTests(loader.loadTestsFromTestCase(AboutMethods))
suite.addTests(loader.loadTestsFromTestCase(AboutControlStatements))
suite.addTests(loader.loadTestsFromTestCase(AboutTrueAndFalse))
suite.addTests(loader.loadTestsFromTestCase(AboutSets))
suite.addTests(loader.loadTestsFromTestCase(AboutTriangleProject))
suite.addTests(loader.loadTestsFromTestCase(AboutExceptions))
suite.addTests(loader.loadTestsFromTestCase(AboutTriangleProject2))
suite.addTests(loader.loadTestsFromTestCase(AboutIteration))
suite.addTests(loader.loadTestsFromTestCase(AboutComprehension))
suite.addTests(loader.loadTestsFromTestCase(AboutGenerators))
suite.addTests(loader.loadTestsFromTestCase(AboutLambdas))
suite.addTests(loader.loadTestsFromTestCase(AboutScoringProject))
suite.addTests(loader.loadTestsFromTestCase(AboutClasses))
suite.addTests(loader.loadTestsFromTestCase(AboutNewStyleClasses))
suite.addTests(loader.loadTestsFromTestCase(AboutWithStatements))
suite.addTests(loader.loadTestsFromTestCase(AboutMonkeyPatching))
suite.addTests(loader.loadTestsFromTestCase(AboutDiceProject))
suite.addTests(loader.loadTestsFromTestCase(AboutMethodBindings))
suite.addTests(loader.loadTestsFromTestCase(AboutDecoratingWithFunctions))
suite.addTests(loader.loadTestsFromTestCase(AboutDecoratingWithClasses))
suite.addTests(loader.loadTestsFromTestCase(AboutInheritance))
suite.addTests(loader.loadTestsFromTestCase(AboutMultipleInheritance))
suite.addTests(loader.loadTestsFromTestCase(AboutScope))
suite.addTests(loader.loadTestsFromTestCase(AboutModules))
suite.addTests(loader.loadTestsFromTestCase(AboutPackages))
suite.addTests(loader.loadTestsFromTestCase(AboutClassAttributes))
suite.addTests(loader.loadTestsFromTestCase(AboutAttributeAccess))
suite.addTests(loader.loadTestsFromTestCase(AboutDeletingObjects))
suite.addTests(loader.loadTestsFromTestCase(AboutProxyObjectProject))
suite.addTests(loader.loadTestsFromTestCase(TelevisionTest))
suite.addTests(loader.loadTestsFromTestCase(AboutExtraCredit))
suite.addTests(loader.loadTestsFromTestCase(AboutRegex))
return suite
| mit |
rangadi/beam | sdks/python/apache_beam/runners/dataflow/internal/clients/dataflow/message_matchers_test.py | 7 | 2820 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
import unittest
import hamcrest as hc
import apache_beam.runners.dataflow.internal.clients.dataflow as dataflow
from apache_beam.internal.gcp.json_value import to_json_value
from apache_beam.runners.dataflow.internal.clients.dataflow import message_matchers
# Protect against environments where apitools library is not available.
# pylint: disable=wrong-import-order, wrong-import-position
try:
from apitools.base.py import base_api
except ImportError:
base_api = None
# pylint: enable=wrong-import-order, wrong-import-position
@unittest.skipIf(base_api is None, 'GCP dependencies are not installed')
class TestMatchers(unittest.TestCase):
def test_structured_name_matcher_basic(self):
metric_name = dataflow.MetricStructuredName()
metric_name.name = 'metric1'
metric_name.origin = 'origin2'
matcher = message_matchers.MetricStructuredNameMatcher(
name='metric1',
origin='origin2')
hc.assert_that(metric_name, hc.is_(matcher))
with self.assertRaises(AssertionError):
matcher = message_matchers.MetricStructuredNameMatcher(
name='metric1',
origin='origin1')
hc.assert_that(metric_name, hc.is_(matcher))
def test_metric_update_basic(self):
metric_update = dataflow.MetricUpdate()
metric_update.name = dataflow.MetricStructuredName()
metric_update.name.name = 'metric1'
metric_update.name.origin = 'origin1'
metric_update.cumulative = False
metric_update.kind = 'sum'
metric_update.scalar = to_json_value(1, with_type=True)
name_matcher = message_matchers.MetricStructuredNameMatcher(
name='metric1',
origin='origin1')
matcher = message_matchers.MetricUpdateMatcher(
name=name_matcher,
kind='sum',
scalar=1)
hc.assert_that(metric_update, hc.is_(matcher))
with self.assertRaises(AssertionError):
matcher.kind = 'suma'
hc.assert_that(metric_update, hc.is_(matcher))
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
drjeep/django | tests/auth_tests/test_validators.py | 229 | 7546 | from __future__ import unicode_literals
import os
from django.contrib.auth.models import User
from django.contrib.auth.password_validation import (
CommonPasswordValidator, MinimumLengthValidator, NumericPasswordValidator,
UserAttributeSimilarityValidator, get_default_password_validators,
get_password_validators, password_changed,
password_validators_help_text_html, password_validators_help_texts,
validate_password,
)
from django.core.exceptions import ValidationError
from django.test import TestCase, override_settings
from django.utils._os import upath
@override_settings(AUTH_PASSWORD_VALIDATORS=[
{'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator'},
{'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', 'OPTIONS': {
'min_length': 12,
}},
])
class PasswordValidationTest(TestCase):
def test_get_default_password_validators(self):
validators = get_default_password_validators()
self.assertEqual(len(validators), 2)
self.assertEqual(validators[0].__class__.__name__, 'CommonPasswordValidator')
self.assertEqual(validators[1].__class__.__name__, 'MinimumLengthValidator')
self.assertEqual(validators[1].min_length, 12)
def test_get_password_validators_custom(self):
validator_config = [{'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator'}]
validators = get_password_validators(validator_config)
self.assertEqual(len(validators), 1)
self.assertEqual(validators[0].__class__.__name__, 'CommonPasswordValidator')
self.assertEqual(get_password_validators([]), [])
def test_validate_password(self):
self.assertIsNone(validate_password('sufficiently-long'))
msg_too_short = 'This password is too short. It must contain at least 12 characters.'
with self.assertRaises(ValidationError) as cm:
validate_password('django4242')
self.assertEqual(cm.exception.messages, [msg_too_short])
self.assertEqual(cm.exception.error_list[0].code, 'password_too_short')
with self.assertRaises(ValidationError) as cm:
validate_password('password')
self.assertEqual(cm.exception.messages, ['This password is too common.', msg_too_short])
self.assertEqual(cm.exception.error_list[0].code, 'password_too_common')
self.assertIsNone(validate_password('password', password_validators=[]))
def test_password_changed(self):
self.assertIsNone(password_changed('password'))
def test_password_validators_help_texts(self):
help_texts = password_validators_help_texts()
self.assertEqual(len(help_texts), 2)
self.assertIn('12 characters', help_texts[1])
self.assertEqual(password_validators_help_texts(password_validators=[]), [])
def test_password_validators_help_text_html(self):
help_text = password_validators_help_text_html()
self.assertEqual(help_text.count('<li>'), 2)
self.assertIn('12 characters', help_text)
class MinimumLengthValidatorTest(TestCase):
def test_validate(self):
expected_error = "This password is too short. It must contain at least %d characters."
self.assertIsNone(MinimumLengthValidator().validate('12345678'))
self.assertIsNone(MinimumLengthValidator(min_length=3).validate('123'))
with self.assertRaises(ValidationError) as cm:
MinimumLengthValidator().validate('1234567')
self.assertEqual(cm.exception.messages, [expected_error % 8])
self.assertEqual(cm.exception.error_list[0].code, 'password_too_short')
with self.assertRaises(ValidationError) as cm:
MinimumLengthValidator(min_length=3).validate('12')
self.assertEqual(cm.exception.messages, [expected_error % 3])
def test_help_text(self):
self.assertEqual(
MinimumLengthValidator().get_help_text(),
"Your password must contain at least 8 characters."
)
class UserAttributeSimilarityValidatorTest(TestCase):
def test_validate(self):
user = User.objects.create(
username='testclient', first_name='Test', last_name='Client', email='testclient@example.com',
password='sha1$6efc0$f93efe9fd7542f25a7be94871ea45aa95de57161',
)
expected_error = "The password is too similar to the %s."
self.assertIsNone(UserAttributeSimilarityValidator().validate('testclient'))
with self.assertRaises(ValidationError) as cm:
UserAttributeSimilarityValidator().validate('testclient', user=user),
self.assertEqual(cm.exception.messages, [expected_error % "username"])
self.assertEqual(cm.exception.error_list[0].code, 'password_too_similar')
with self.assertRaises(ValidationError) as cm:
UserAttributeSimilarityValidator().validate('example.com', user=user),
self.assertEqual(cm.exception.messages, [expected_error % "email address"])
with self.assertRaises(ValidationError) as cm:
UserAttributeSimilarityValidator(
user_attributes=['first_name'],
max_similarity=0.3,
).validate('testclient', user=user)
self.assertEqual(cm.exception.messages, [expected_error % "first name"])
self.assertIsNone(
UserAttributeSimilarityValidator(user_attributes=['first_name']).validate('testclient', user=user)
)
def test_help_text(self):
self.assertEqual(
UserAttributeSimilarityValidator().get_help_text(),
"Your password can't be too similar to your other personal information."
)
class CommonPasswordValidatorTest(TestCase):
def test_validate(self):
expected_error = "This password is too common."
self.assertIsNone(CommonPasswordValidator().validate('a-safe-password'))
with self.assertRaises(ValidationError) as cm:
CommonPasswordValidator().validate('godzilla')
self.assertEqual(cm.exception.messages, [expected_error])
def test_validate_custom_list(self):
path = os.path.join(os.path.dirname(os.path.realpath(upath(__file__))), 'common-passwords-custom.txt')
validator = CommonPasswordValidator(password_list_path=path)
expected_error = "This password is too common."
self.assertIsNone(validator.validate('a-safe-password'))
with self.assertRaises(ValidationError) as cm:
validator.validate('from-my-custom-list')
self.assertEqual(cm.exception.messages, [expected_error])
self.assertEqual(cm.exception.error_list[0].code, 'password_too_common')
def test_help_text(self):
self.assertEqual(
CommonPasswordValidator().get_help_text(),
"Your password can't be a commonly used password."
)
class NumericPasswordValidatorTest(TestCase):
def test_validate(self):
expected_error = "This password is entirely numeric."
self.assertIsNone(NumericPasswordValidator().validate('a-safe-password'))
with self.assertRaises(ValidationError) as cm:
NumericPasswordValidator().validate('42424242')
self.assertEqual(cm.exception.messages, [expected_error])
self.assertEqual(cm.exception.error_list[0].code, 'password_entirely_numeric')
def test_help_text(self):
self.assertEqual(
NumericPasswordValidator().get_help_text(),
"Your password can't be entirely numeric."
)
| bsd-3-clause |
midma101/m0du1ar | .venv/lib/python2.7/site-packages/Crypto/SelfTest/Random/test_rpoolcompat.py | 131 | 2030 | # -*- coding: utf-8 -*-
#
# SelfTest/Util/test_winrandom.py: Self-test for the winrandom module
#
# Written in 2008 by Dwayne C. Litzenberger <dlitz@dlitz.net>
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
"""Self-test for the Crypto.Util.randpool.RandomPool wrapper class"""
__revision__ = "$Id$"
import sys
import unittest
class SimpleTest(unittest.TestCase):
def runTest(self):
"""Crypto.Util.randpool.RandomPool"""
# Import the winrandom module and try to use it
from Crypto.Util.randpool import RandomPool
sys.stderr.write("SelfTest: You can ignore the RandomPool_DeprecationWarning that follows.\n")
rpool = RandomPool()
x = rpool.get_bytes(16)
y = rpool.get_bytes(16)
self.assertNotEqual(x, y)
self.assertNotEqual(rpool.entropy, 0)
rpool.randomize()
rpool.stir('foo')
rpool.add_event('foo')
def get_tests(config={}):
return [SimpleTest()]
if __name__ == '__main__':
suite = lambda: unittest.TestSuite(get_tests())
unittest.main(defaultTest='suite')
# vim:set ts=4 sw=4 sts=4 expandtab:
| mit |
mrquim/mrquimrepo | script.module.youtube.dl/lib/youtube_dl/extractor/europa.py | 58 | 3417 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import compat_urlparse
from ..utils import (
int_or_none,
orderedSet,
parse_duration,
qualities,
unified_strdate,
xpath_text
)
class EuropaIE(InfoExtractor):
_VALID_URL = r'https?://ec\.europa\.eu/avservices/(?:video/player|audio/audioDetails)\.cfm\?.*?\bref=(?P<id>[A-Za-z0-9-]+)'
_TESTS = [{
'url': 'http://ec.europa.eu/avservices/video/player.cfm?ref=I107758',
'md5': '574f080699ddd1e19a675b0ddf010371',
'info_dict': {
'id': 'I107758',
'ext': 'mp4',
'title': 'TRADE - Wikileaks on TTIP',
'description': 'NEW LIVE EC Midday press briefing of 11/08/2015',
'thumbnail': r're:^https?://.*\.jpg$',
'upload_date': '20150811',
'duration': 34,
'view_count': int,
'formats': 'mincount:3',
}
}, {
'url': 'http://ec.europa.eu/avservices/video/player.cfm?sitelang=en&ref=I107786',
'only_matching': True,
}, {
'url': 'http://ec.europa.eu/avservices/audio/audioDetails.cfm?ref=I-109295&sitelang=en',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
playlist = self._download_xml(
'http://ec.europa.eu/avservices/video/player/playlist.cfm?ID=%s' % video_id, video_id)
def get_item(type_, preference):
items = {}
for item in playlist.findall('./info/%s/item' % type_):
lang, label = xpath_text(item, 'lg', default=None), xpath_text(item, 'label', default=None)
if lang and label:
items[lang] = label.strip()
for p in preference:
if items.get(p):
return items[p]
query = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
preferred_lang = query.get('sitelang', ('en', ))[0]
preferred_langs = orderedSet((preferred_lang, 'en', 'int'))
title = get_item('title', preferred_langs) or video_id
description = get_item('description', preferred_langs)
thumbnmail = xpath_text(playlist, './info/thumburl', 'thumbnail')
upload_date = unified_strdate(xpath_text(playlist, './info/date', 'upload date'))
duration = parse_duration(xpath_text(playlist, './info/duration', 'duration'))
view_count = int_or_none(xpath_text(playlist, './info/views', 'views'))
language_preference = qualities(preferred_langs[::-1])
formats = []
for file_ in playlist.findall('./files/file'):
video_url = xpath_text(file_, './url')
if not video_url:
continue
lang = xpath_text(file_, './lg')
formats.append({
'url': video_url,
'format_id': lang,
'format_note': xpath_text(file_, './lglabel'),
'language_preference': language_preference(lang)
})
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnmail,
'upload_date': upload_date,
'duration': duration,
'view_count': view_count,
'formats': formats
}
| gpl-2.0 |
micromagnetics/magnum.fe | site-packages/magnumfe/common/cache.py | 2 | 2662 | # Copyright (C) 2011-2015 Claas Abert
#
# This file is part of magnum.fe.
#
# magnum.fe is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# magnum.fe is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with magnum.fe. If not, see <http://www.gnu.org/licenses/>.
#
# Last modified by Claas Abert, 2015-02-16
from __future__ import absolute_import
__all__ = ["Cache"]
class Cache(object):
def __init__(self, *keys, **kwargs):
"""
Simple cache class that is aware of changes in the state class.
*Arguments*
*keys ([:class:`string`])
Names of state attributes to be monitored.
**kwargs
Any attributes to be attached to the cache object.
*Example*
.. code-block:: python
# Initialize cache object that tracks changes in the m and t
# attribute of the state class.
cache = Cache("t", "m")
# Check for cache hit. In case of cache miss the following
# block is expected to update the cache. The hash keys of the
# current state are saved.
if cache.requires_update(state):
cache.some_var = expensive_operation(state.t, state.m)
if cache.requires_update(state):
# Won't execute since cache is up to date
"""
self._uuid = None
self._keys = keys
for key, val in kwargs.items():
setattr(self, key, val)
def requires_update(self, state):
"""
Checks if cache is expired with resepect to the current state and
saves the new state. Has to be followed by a block that updates
the cache.
*Arguments*
state (:class:`State`)
The current simulation state
*Returns*
:code:`True` if cache is expired, :code:`False` otherwise
"""
if self._uuid != state.uuid(*self._keys):
self._uuid = state.uuid(*self._keys)
return True
else:
return False
def wrap_func(self, func):
"""
Builds a lambda from a given function that returns the function
call along with the caching keys as required by the state class.
*Arguments*
func
Function to be wrapped
*Returns*
Wrapped function
"""
return lambda state: (func(state),) + tuple(self._keys)
| lgpl-3.0 |
azunite/gyp | test/mac/gyptest-objc-gc.py | 165 | 1586 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that GC objc settings are handled correctly.
"""
import TestGyp
import TestMac
import sys
if sys.platform == 'darwin':
# set |match| to ignore build stderr output.
test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'],
match = lambda a, b: True)
# Xcode 5.1 removed support for garbage-collection:
# error: garbage collection is no longer supported
if TestMac.Xcode.Version() < '0510':
CHDIR = 'objc-gc'
test.run_gyp('test.gyp', chdir=CHDIR)
build_error_code = {
'xcode': [1, 65], # 1 for xcode 3, 65 for xcode 4 (see `man sysexits`)
'make': 2,
'ninja': 1,
}[test.format]
test.build('test.gyp', 'gc_exe_fails', chdir=CHDIR, status=build_error_code)
test.build(
'test.gyp', 'gc_off_exe_req_lib', chdir=CHDIR, status=build_error_code)
test.build('test.gyp', 'gc_req_exe', chdir=CHDIR)
test.run_built_executable('gc_req_exe', chdir=CHDIR, stdout="gc on: 1\n")
test.build('test.gyp', 'gc_exe_req_lib', chdir=CHDIR)
test.run_built_executable(
'gc_exe_req_lib', chdir=CHDIR, stdout="gc on: 1\n")
test.build('test.gyp', 'gc_exe', chdir=CHDIR)
test.run_built_executable('gc_exe', chdir=CHDIR, stdout="gc on: 1\n")
test.build('test.gyp', 'gc_off_exe', chdir=CHDIR)
test.run_built_executable('gc_off_exe', chdir=CHDIR, stdout="gc on: 0\n")
test.pass_test()
| bsd-3-clause |
hynekcer/django | tests/template_tests/filter_tests/test_linenumbers.py | 331 | 1992 | from django.template.defaultfilters import linenumbers
from django.test import SimpleTestCase
from django.utils.safestring import mark_safe
from ..utils import setup
class LinenumbersTests(SimpleTestCase):
"""
The contents of "linenumbers" is escaped according to the current
autoescape setting.
"""
@setup({'linenumbers01': '{{ a|linenumbers }} {{ b|linenumbers }}'})
def test_linenumbers01(self):
output = self.engine.render_to_string(
'linenumbers01',
{'a': 'one\n<two>\nthree', 'b': mark_safe('one\n<two>\nthree')},
)
self.assertEqual(output, '1. one\n2. <two>\n3. three '
'1. one\n2. <two>\n3. three')
@setup({'linenumbers02':
'{% autoescape off %}{{ a|linenumbers }} {{ b|linenumbers }}{% endautoescape %}'})
def test_linenumbers02(self):
output = self.engine.render_to_string(
'linenumbers02',
{'a': 'one\n<two>\nthree', 'b': mark_safe('one\n<two>\nthree')},
)
self.assertEqual(output, '1. one\n2. <two>\n3. three '
'1. one\n2. <two>\n3. three')
class FunctionTests(SimpleTestCase):
def test_linenumbers(self):
self.assertEqual(linenumbers('line 1\nline 2'), '1. line 1\n2. line 2')
def test_linenumbers2(self):
self.assertEqual(
linenumbers('\n'.join(['x'] * 10)),
'01. x\n02. x\n03. x\n04. x\n05. x\n06. x\n07. x\n08. x\n09. x\n10. x',
)
def test_non_string_input(self):
self.assertEqual(linenumbers(123), '1. 123')
def test_autoescape(self):
self.assertEqual(
linenumbers('foo\n<a>bar</a>\nbuz'),
'1. foo\n2. <a>bar</a>\n3. buz',
)
def test_autoescape_off(self):
self.assertEqual(
linenumbers('foo\n<a>bar</a>\nbuz', autoescape=False),
'1. foo\n2. <a>bar</a>\n3. buz'
)
| bsd-3-clause |
KaiSzuttor/espresso | src/python/espressomd/io/writer/vtf.py | 5 | 3324 | # Copyright (C) 2010-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
def vtf_pid_map(system, types='all'):
"""
Generates a VTF particle index map to ESPResSo ``id``.
This fills the gap for particle ID's as required by VMD
Parameters
----------
system: espressomd.System() object
types : :obj:`str`
Specifies the particle types. The id mapping depends on which
particles are going to be printed. This should be the same as
the one used in writevsf() and writevsf().
Returns
-------
dict: A dictionary where the values are the VTF indices and the keys are the ESPresSo particle ``id``
"""
if not hasattr(types, '__iter__'):
types = [types]
if types == "all":
types = [types]
id_to_write = []
for p in system.part:
for t in types:
if t in (p.type, "all"):
id_to_write.append(p.id)
return dict(zip(id_to_write, range(len(id_to_write))))
def writevsf(system, fp, types='all'):
"""
writes a VST (VTF Structure Format) to a file.
This can be used to write the header of a VTF file.
Parameters
----------
system: espressomd.System() object
types : :obj:`str`
Specifies the particle types. The string 'all' will write all particles
fp : file
File pointer to write to.
"""
vtf_index = vtf_pid_map(system, types)
fp.write("unitcell {} {} {}\n".format(*(system.box_l)))
for pid, vtf_id, in vtf_index.items():
fp.write("atom {} radius 1 name {} type {} \n".format(vtf_id,
system.part[
pid].type,
system.part[pid].type))
for pid, vtf_id, in vtf_index.items():
for b in system.part[pid].bonds:
if system.part[b[1]].id in vtf_index:
fp.write("bond {}:{}\n".format(
vtf_id, vtf_index[system.part[b[1]].id]))
def writevcf(system, fp, types='all'):
"""
writes a VCF (VTF Coordinate Format) to a file.
This can be used to write a timestep to a VTF file.
Parameters
----------
system: espressomd.System() object
types : :obj:`str`
Specifies the particle types. The string 'all' will write all particles
fp : file
File pointer to write to.
"""
vtf_index = vtf_pid_map(system, types)
fp.write("\ntimestep indexed\n")
for pid, vtf_id, in vtf_index.items():
fp.write("{} {} {} {}\n".format(vtf_id, *(system.part[pid].pos)))
| gpl-3.0 |
xdajog/samsung_sources_i927 | tools/perf/python/twatch.py | 3213 | 1338 | #! /usr/bin/python
# -*- python -*-
# -*- coding: utf-8 -*-
# twatch - Experimental use of the perf python interface
# Copyright (C) 2011 Arnaldo Carvalho de Melo <acme@redhat.com>
#
# This application is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2.
#
# This application is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
import perf
def main():
cpus = perf.cpu_map()
threads = perf.thread_map()
evsel = perf.evsel(task = 1, comm = 1, mmap = 0,
wakeup_events = 1, sample_period = 1,
sample_id_all = 1,
sample_type = perf.SAMPLE_PERIOD | perf.SAMPLE_TID | perf.SAMPLE_CPU | perf.SAMPLE_TID)
evsel.open(cpus = cpus, threads = threads);
evlist = perf.evlist(cpus, threads)
evlist.add(evsel)
evlist.mmap()
while True:
evlist.poll(timeout = -1)
for cpu in cpus:
event = evlist.read_on_cpu(cpu)
if not event:
continue
print "cpu: %2d, pid: %4d, tid: %4d" % (event.sample_cpu,
event.sample_pid,
event.sample_tid),
print event
if __name__ == '__main__':
main()
| gpl-2.0 |
apagac/cfme_tests | cfme/fixtures/terminalreporter.py | 9 | 1050 | # FlexibleTerminalReporter is imported for backward compatibility;
# it should be imported from pytest_store
from cfme.fixtures.pytest_store import store
from cfme.utils import diaper
from cfme.utils.log import logger
def reporter(config=None):
"""Return a py.test terminal reporter that will write to the console no matter what
Only useful when trying to write to the console before or during a
:py:func:`pytest_configure <pytest:_pytest.hookspec.pytest_configure>` hook.
"""
# config arg is accepted, but no longer needed thanks to pytest_store, so it is ignored
return store.terminalreporter
def disable():
# Cloud be a FlexibleTerminalReporter, which is a subclass of TerminalReporter,
# so match the type directly
with diaper:
store.pluginmanager.unregister(store.terminalreporter)
logger.debug('terminalreporter disabled')
def enable():
with diaper:
store.pluginmanager.register(store.terminalreporter, 'terminalreporter')
logger.debug('terminalreporter enabled')
| gpl-2.0 |
dataxu/ansible | lib/ansible/plugins/connection/saltstack.py | 86 | 3724 | # Based on local.py (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
# Based on chroot.py (c) 2013, Maykel Moya <mmoya@speedyrails.com>
# Based on func.py
# (c) 2014, Michael Scherer <misc@zarb.org>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
author: Michael Scherer <misc@zarb.org>
connection: saltstack
short_description: Allow ansible to piggyback on salt minions
description:
- This allows you to use existing Saltstack infrastructure to connect to targets.
version_added: "2.2"
"""
import re
import os
import pty
import subprocess
from ansible.module_utils._text import to_bytes, to_text
from ansible.module_utils.six.moves import cPickle
HAVE_SALTSTACK = False
try:
import salt.client as sc
HAVE_SALTSTACK = True
except ImportError:
pass
import os
from ansible import errors
from ansible.plugins.connection import ConnectionBase
class Connection(ConnectionBase):
''' Salt-based connections '''
has_pipelining = False
# while the name of the product is salt, naming that module salt cause
# trouble with module import
transport = 'saltstack'
def __init__(self, play_context, new_stdin, *args, **kwargs):
super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
self.host = self._play_context.remote_addr
def _connect(self):
if not HAVE_SALTSTACK:
raise errors.AnsibleError("saltstack is not installed")
self.client = sc.LocalClient()
self._connected = True
return self
def exec_command(self, cmd, sudoable=False, in_data=None):
''' run a command on the remote minion '''
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
if in_data:
raise errors.AnsibleError("Internal Error: this module does not support optimized module pipelining")
self._display.vvv("EXEC %s" % (cmd), host=self.host)
# need to add 'true;' to work around https://github.com/saltstack/salt/issues/28077
res = self.client.cmd(self.host, 'cmd.exec_code_all', ['bash', 'true;' + cmd])
if self.host not in res:
raise errors.AnsibleError("Minion %s didn't answer, check if salt-minion is running and the name is correct" % self.host)
p = res[self.host]
return (p['retcode'], p['stdout'], p['stderr'])
def _normalize_path(self, path, prefix):
if not path.startswith(os.path.sep):
path = os.path.join(os.path.sep, path)
normpath = os.path.normpath(path)
return os.path.join(prefix, normpath[1:])
def put_file(self, in_path, out_path):
''' transfer a file from local to remote '''
super(Connection, self).put_file(in_path, out_path)
out_path = self._normalize_path(out_path, '/')
self._display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.host)
content = open(in_path).read()
self.client.cmd(self.host, 'file.write', [out_path, content])
# TODO test it
def fetch_file(self, in_path, out_path):
''' fetch a file from remote to local '''
super(Connection, self).fetch_file(in_path, out_path)
in_path = self._normalize_path(in_path, '/')
self._display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.host)
content = self.client.cmd(self.host, 'cp.get_file_str', [in_path])[self.host]
open(out_path, 'wb').write(content)
def close(self):
''' terminate the connection; nothing to do here '''
pass
| gpl-3.0 |
littledogboy/zulip | tools/emoji_dump/emoji_dump.py | 113 | 2098 | #!/usr/bin/env python
import os
import shutil
import subprocess
import json
from PIL import Image, ImageDraw, ImageFont
class MissingGlyphError(Exception):
pass
def color_font(name, code_point):
in_name = 'bitmaps/strike1/uni{}.png'.format(code_point)
out_name = 'out/unicode/{}.png'.format(code_point)
try:
shutil.copyfile(in_name, out_name)
except IOError:
raise MissingGlyphError('name: %r code_point: %r' % (name, code_point))
def bw_font(name, code_point):
char = unichr(int(code_point, 16))
AA_SCALE = 8
SIZE = (68, 68)
BIG_SIZE = tuple([x * AA_SCALE for x in SIZE])
# AndroidEmoji.ttf is from
# https://android.googlesource.com/platform/frameworks/base.git/+/master/data/fonts/AndroidEmoji.ttf
# commit 07912f876c8639f811b06831465c14c4a3b17663
font = ImageFont.truetype('AndroidEmoji.ttf', 65 * AA_SCALE)
image = Image.new('RGBA', BIG_SIZE)
draw = ImageDraw.Draw(image)
draw.text((0, 0), char, font=font, fill='black')
image.resize(SIZE, Image.ANTIALIAS).save('out/unicode/{}.png'.format(code_point), 'PNG')
# ttx is in the fonttools pacakge, the -z option is only on master
# https://github.com/behdad/fonttools/
# NotoColorEmoji.tff is from
# https://android.googlesource.com/platform/external/noto-fonts/+/kitkat-release/NotoColorEmoji.ttf
subprocess.call('ttx -v -z extfile NotoColorEmoji.ttf', shell=True)
try:
shutil.rmtree('out')
except OSError:
pass
os.mkdir('out')
os.mkdir('out/unicode')
emoji_map = json.load(open('emoji_map.json'))
# Fix data problem with red/blue cars being inaccurate.
emoji_map['blue_car'] = emoji_map['red_car']
emoji_map['red_car'] = emoji_map['oncoming_automobile']
for name, code_point in emoji_map.items():
try:
color_font(name, code_point)
except MissingGlyphError:
try:
bw_font(name, code_point)
except Exception as e:
print e
print 'Missing {}, {}'.format(name, code_point)
continue
os.symlink('unicode/{}.png'.format(code_point), 'out/{}.png'.format(name))
| apache-2.0 |
Hellowlol/plexpy | lib/pygazelle/user.py | 27 | 4731 |
class InvalidUserException(Exception):
pass
class User(object):
"""
This class represents a User, whether your own or someone else's. It is created knowing only its ID. To reduce
API accesses, load information using User.update_index_data() or User.update_user_data only as needed.
"""
def __init__(self, id, parent_api):
self.id = id
self.parent_api = parent_api
self.username = None
self.authkey = None
self.passkey = None
self.avatar = None
self.is_friend = None
self.profile_text = None
self.notifications = None
self.stats = None
self.ranks = None
self.personal = None
self.community = None
self.parent_api.cached_users[self.id] = self # add self to cache of known User objects
def update_index_data(self):
"""
Calls 'index' API action, then updates this User objects information with it.
NOTE: Only call if this user is the logged-in user...throws InvalidUserException otherwise.
"""
response = self.parent_api.request(action='index')
self.set_index_data(response)
def set_index_data(self, index_json_response):
"""
Takes parsed JSON response from 'index' action on api, and updates the available subset of user information.
ONLY callable if this User object represents the currently logged in user. Throws InvalidUserException otherwise.
"""
if self.id != index_json_response['id']:
raise InvalidUserException("Tried to update non-logged-in User's information from 'index' API call." +
" Should be %s, got %s" % (self.id, index_json_response['id']) )
self.username = index_json_response['username']
self.authkey = index_json_response['authkey']
self.passkey = index_json_response['passkey']
self.notifications = index_json_response['notifications']
if self.stats:
self.stats = dict(self.stats.items() + index_json_response['userstats'].items()) # merge in new info
else:
self.stats = index_json_response['userstats']
# cross pollinate some data that is located in multiple locations in API
if self.personal:
self.personal['class'] = self.stats['class']
self.personal['passkey'] = self.passkey
def update_user_data(self):
response = self.parent_api.request(action='user', id=self.id)
self.set_user_data(response)
def set_user_data(self, user_json_response):
"""
Takes parsed JSON response from 'user' action on api, and updates relevant user information.
To avoid problems, only pass in user data from an API call that used this user's ID as an argument.
"""
if self.username and self.username != user_json_response['username']:
raise InvalidUserException("Tried to update a user's information from a 'user' API call with a different username." +
" Should be %s, got %s" % (self.username, user_json_response['username']) )
self.username = user_json_response['username']
self.avatar = user_json_response['avatar']
self.is_friend = user_json_response['isFriend']
self.profile_text = user_json_response['profileText']
if self.stats:
self.stats = dict(self.stats.items() + user_json_response['stats'].items()) # merge in new info
else:
self.stats = user_json_response['stats']
self.ranks = user_json_response['ranks']
self.personal = user_json_response['personal']
self.community = user_json_response['community']
# cross pollinate some data that is located in multiple locations in API
self.stats['class'] = self.personal['class']
self.passkey = self.personal['passkey']
def set_search_result_data(self, search_result_item):
"""
Takes a single user result item from a 'usersearch' API call and updates user info.
"""
if self.id != search_result_item['userId']:
raise InvalidUserException("Tried to update existing user with another user's search result data (IDs don't match).")
self.username = search_result_item['username']
if not self.personal:
self.personal = {}
self.personal['donor'] = search_result_item['donor']
self.personal['warned'] = search_result_item['warned']
self.personal['enabled'] = search_result_item['enabled']
self.personal['class'] = search_result_item['class']
def __repr__(self):
return "User: %s - ID: %s" % (self.username, self.id)
| gpl-3.0 |
GiraldTec/2015-igraphic | mrdoob-three.js-f73593b/utils/exporters/blender/addons/io_three/exporter/api/animation.py | 177 | 16103 | """
Module for handling the parsing of skeletal animation data.
"""
import math
import mathutils
from bpy import data, context
from .. import constants, logger
def pose_animation(armature, options):
"""Query armature animation using pose bones
:param armature:
:param options:
:returns: list dictionaries containing animationdata
:rtype: [{}, {}, ...]
"""
logger.debug("animation.pose_animation(%s)", armature)
func = _parse_pose_action
return _parse_action(func, armature, options)
def rest_animation(armature, options):
"""Query armature animation (REST position)
:param armature:
:param options:
:returns: list dictionaries containing animationdata
:rtype: [{}, {}, ...]
"""
logger.debug("animation.rest_animation(%s)", armature)
func = _parse_rest_action
return _parse_action(func, armature, options)
def _parse_action(func, armature, options):
"""
:param func:
:param armature:
:param options:
"""
animations = []
logger.info("Parsing %d actions", len(data.actions))
for action in data.actions:
logger.info("Parsing action %s", action.name)
animation = func(action, armature, options)
animations.append(animation)
return animations
def _parse_rest_action(action, armature, options):
"""
:param action:
:param armature:
:param options:
"""
end_frame = action.frame_range[1]
start_frame = action.frame_range[0]
frame_length = end_frame - start_frame
rot = armature.matrix_world.decompose()[1]
rotation_matrix = rot.to_matrix()
hierarchy = []
parent_index = -1
frame_step = options.get(constants.FRAME_STEP, 1)
fps = context.scene.render.fps
start = int(start_frame)
end = int(end_frame / frame_step) + 1
for bone in armature.data.bones:
# I believe this was meant to skip control bones, may
# not be useful. needs more testing
if bone.use_deform is False:
logger.info("Skipping animation data for bone %s", bone.name)
continue
logger.info("Parsing animation data for bone %s", bone.name)
keys = []
for frame in range(start, end):
computed_frame = frame * frame_step
pos, pchange = _position(bone, computed_frame,
action, armature.matrix_world)
rot, rchange = _rotation(bone, computed_frame,
action, rotation_matrix)
rot = _normalize_quaternion(rot)
pos_x, pos_y, pos_z = pos.x, pos.z, -pos.y
rot_x, rot_y, rot_z, rot_w = rot.x, rot.z, -rot.y, rot.w
if frame == start_frame:
time = (frame * frame_step - start_frame) / fps
# @TODO: missing scale values
keyframe = {
constants.TIME: time,
constants.POS: [pos_x, pos_y, pos_z],
constants.ROT: [rot_x, rot_y, rot_z, rot_w],
constants.SCL: [1, 1, 1]
}
keys.append(keyframe)
# END-FRAME: needs pos, rot and scl attributes
# with animation length (required frame)
elif frame == end_frame / frame_step:
time = frame_length / fps
keyframe = {
constants.TIME: time,
constants.POS: [pos_x, pos_y, pos_z],
constants.ROT: [rot_x, rot_y, rot_z, rot_w],
constants.SCL: [1, 1, 1]
}
keys.append(keyframe)
# MIDDLE-FRAME: needs only one of the attributes,
# can be an empty frame (optional frame)
elif pchange is True or rchange is True:
time = (frame * frame_step - start_frame) / fps
if pchange is True and rchange is True:
keyframe = {
constants.TIME: time,
constants.POS: [pos_x, pos_y, pos_z],
constants.ROT: [rot_x, rot_y, rot_z, rot_w]
}
elif pchange is True:
keyframe = {
constants.TIME: time,
constants.POS: [pos_x, pos_y, pos_z]
}
elif rchange is True:
keyframe = {
constants.TIME: time,
constants.ROT: [rot_x, rot_y, rot_z, rot_w]
}
keys.append(keyframe)
hierarchy.append({
constants.KEYS: keys,
constants.PARENT: parent_index
})
parent_index += 1
animation = {
constants.HIERARCHY: hierarchy,
constants.LENGTH: frame_length / fps,
constants.FPS: fps,
constants.NAME: action.name
}
return animation
def _parse_pose_action(action, armature, options):
"""
:param action:
:param armature:
:param options:
"""
# @TODO: this seems to fail in batch mode meaning the
# user has to have th GUI open. need to improve
# this logic to allow batch processing, if Blender
# chooses to behave....
current_context = context.area.type
context.area.type = 'DOPESHEET_EDITOR'
context.space_data.mode = 'ACTION'
context.area.spaces.active.action = action
armature_matrix = armature.matrix_world
fps = context.scene.render.fps
end_frame = action.frame_range[1]
start_frame = action.frame_range[0]
frame_length = end_frame - start_frame
frame_step = options.get(constants.FRAME_STEP, 1)
used_frames = int(frame_length / frame_step) + 1
keys = []
channels_location = []
channels_rotation = []
channels_scale = []
for pose_bone in armature.pose.bones:
logger.info("Processing channels for %s",
pose_bone.bone.name)
keys.append([])
channels_location.append(
_find_channels(action,
pose_bone.bone,
'location'))
channels_rotation.append(
_find_channels(action,
pose_bone.bone,
'rotation_quaternion'))
channels_rotation.append(
_find_channels(action,
pose_bone.bone,
'rotation_euler'))
channels_scale.append(
_find_channels(action,
pose_bone.bone,
'scale'))
frame_step = options[constants.FRAME_STEP]
frame_index_as_time = options[constants.FRAME_INDEX_AS_TIME]
for frame_index in range(0, used_frames):
if frame_index == used_frames - 1:
frame = end_frame
else:
frame = start_frame + frame_index * frame_step
logger.info("Processing frame %d", frame)
time = frame - start_frame
if frame_index_as_time is False:
time = time / fps
context.scene.frame_set(frame)
bone_index = 0
def has_keyframe_at(channels, frame):
"""
:param channels:
:param frame:
"""
def find_keyframe_at(channel, frame):
"""
:param channel:
:param frame:
"""
for keyframe in channel.keyframe_points:
if keyframe.co[0] == frame:
return keyframe
return None
for channel in channels:
if not find_keyframe_at(channel, frame) is None:
return True
return False
for pose_bone in armature.pose.bones:
logger.info("Processing bone %s", pose_bone.bone.name)
if pose_bone.parent is None:
bone_matrix = armature_matrix * pose_bone.matrix
else:
parent_matrix = armature_matrix * pose_bone.parent.matrix
bone_matrix = armature_matrix * pose_bone.matrix
bone_matrix = parent_matrix.inverted() * bone_matrix
pos, rot, scl = bone_matrix.decompose()
rot = _normalize_quaternion(rot)
pchange = True or has_keyframe_at(
channels_location[bone_index], frame)
rchange = True or has_keyframe_at(
channels_rotation[bone_index], frame)
schange = True or has_keyframe_at(
channels_scale[bone_index], frame)
pos = (pos.x, pos.z, -pos.y)
rot = (rot.x, rot.z, -rot.y, rot.w)
scl = (scl.x, scl.z, scl.y)
keyframe = {constants.TIME: time}
if frame == start_frame or frame == end_frame:
keyframe.update({
constants.POS: pos,
constants.ROT: rot,
constants.SCL: scl
})
elif any([pchange, rchange, schange]):
if pchange is True:
keyframe[constants.POS] = pos
if rchange is True:
keyframe[constants.ROT] = rot
if schange is True:
keyframe[constants.SCL] = scl
if len(keyframe.keys()) > 1:
logger.info("Recording keyframe data for %s %s",
pose_bone.bone.name, str(keyframe))
keys[bone_index].append(keyframe)
else:
logger.info("No anim data to record for %s",
pose_bone.bone.name)
bone_index += 1
hierarchy = []
bone_index = 0
for pose_bone in armature.pose.bones:
hierarchy.append({
constants.PARENT: bone_index - 1,
constants.KEYS: keys[bone_index]
})
bone_index += 1
if frame_index_as_time is False:
frame_length = frame_length / fps
context.scene.frame_set(start_frame)
context.area.type = current_context
animation = {
constants.HIERARCHY: hierarchy,
constants.LENGTH: frame_length,
constants.FPS: fps,
constants.NAME: action.name
}
return animation
def _find_channels(action, bone, channel_type):
"""
:param action:
:param bone:
:param channel_type:
"""
result = []
if len(action.groups):
group_index = -1
for index, group in enumerate(action.groups):
if group.name == bone.name:
group_index = index
# @TODO: break?
if group_index > -1:
for channel in action.groups[group_index].channels:
if channel_type in channel.data_path:
result.append(channel)
else:
bone_label = '"%s"' % bone.name
for channel in action.fcurves:
data_path = [bone_label in channel.data_path,
channel_type in channel.data_path]
if all(data_path):
result.append(channel)
return result
def _position(bone, frame, action, armature_matrix):
"""
:param bone:
:param frame:
:param action:
:param armature_matrix:
"""
position = mathutils.Vector((0, 0, 0))
change = False
ngroups = len(action.groups)
if ngroups > 0:
index = 0
for i in range(ngroups):
if action.groups[i].name == bone.name:
index = i
for channel in action.groups[index].channels:
if "location" in channel.data_path:
has_changed = _handle_position_channel(
channel, frame, position)
change = change or has_changed
else:
bone_label = '"%s"' % bone.name
for channel in action.fcurves:
data_path = channel.data_path
if bone_label in data_path and "location" in data_path:
has_changed = _handle_position_channel(
channel, frame, position)
change = change or has_changed
position = position * bone.matrix_local.inverted()
if bone.parent is None:
position.x += bone.head.x
position.y += bone.head.y
position.z += bone.head.z
else:
parent = bone.parent
parent_matrix = parent.matrix_local.inverted()
diff = parent.tail_local - parent.head_local
position.x += (bone.head * parent_matrix).x + diff.x
position.y += (bone.head * parent_matrix).y + diff.y
position.z += (bone.head * parent_matrix).z + diff.z
return armature_matrix*position, change
def _rotation(bone, frame, action, armature_matrix):
"""
:param bone:
:param frame:
:param action:
:param armature_matrix:
"""
# TODO: calculate rotation also from rotation_euler channels
rotation = mathutils.Vector((0, 0, 0, 1))
change = False
ngroups = len(action.groups)
# animation grouped by bones
if ngroups > 0:
index = -1
for i in range(ngroups):
if action.groups[i].name == bone.name:
index = i
if index > -1:
for channel in action.groups[index].channels:
if "quaternion" in channel.data_path:
has_changed = _handle_rotation_channel(
channel, frame, rotation)
change = change or has_changed
# animation in raw fcurves
else:
bone_label = '"%s"' % bone.name
for channel in action.fcurves:
data_path = channel.data_path
if bone_label in data_path and "quaternion" in data_path:
has_changed = _handle_rotation_channel(
channel, frame, rotation)
change = change or has_changed
rot3 = rotation.to_3d()
rotation.xyz = rot3 * bone.matrix_local.inverted()
rotation.xyz = armature_matrix * rotation.xyz
return rotation, change
def _handle_rotation_channel(channel, frame, rotation):
"""
:param channel:
:param frame:
:param rotation:
"""
change = False
if channel.array_index in [0, 1, 2, 3]:
for keyframe in channel.keyframe_points:
if keyframe.co[0] == frame:
change = True
value = channel.evaluate(frame)
if channel.array_index == 1:
rotation.x = value
elif channel.array_index == 2:
rotation.y = value
elif channel.array_index == 3:
rotation.z = value
elif channel.array_index == 0:
rotation.w = value
return change
def _handle_position_channel(channel, frame, position):
"""
:param channel:
:param frame:
:param position:
"""
change = False
if channel.array_index in [0, 1, 2]:
for keyframe in channel.keyframe_points:
if keyframe.co[0] == frame:
change = True
value = channel.evaluate(frame)
if channel.array_index == 0:
position.x = value
if channel.array_index == 1:
position.y = value
if channel.array_index == 2:
position.z = value
return change
def _quaternion_length(quat):
"""Calculate the length of a quaternion
:param quat: Blender quaternion object
:rtype: float
"""
return math.sqrt(quat.x * quat.x + quat.y * quat.y +
quat.z * quat.z + quat.w * quat.w)
def _normalize_quaternion(quat):
"""Normalize a quaternion
:param quat: Blender quaternion object
:returns: generic quaternion enum object with normalized values
:rtype: object
"""
enum = type('Enum', (), {'x': 0, 'y': 0, 'z': 0, 'w': 1})
length = _quaternion_length(quat)
if length is not 0:
length = 1 / length
enum.x = quat.x * length
enum.y = quat.y * length
enum.z = quat.z * length
enum.w = quat.w * length
return enum
| gpl-2.0 |
garg10may/youtube-dl | test/test_write_annotations.py | 78 | 2550 | #!/usr/bin/env python
# coding: utf-8
from __future__ import unicode_literals
# Allow direct execution
import os
import sys
import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from test.helper import get_params, try_rm
import io
import xml.etree.ElementTree
import youtube_dl.YoutubeDL
import youtube_dl.extractor
class YoutubeDL(youtube_dl.YoutubeDL):
def __init__(self, *args, **kwargs):
super(YoutubeDL, self).__init__(*args, **kwargs)
self.to_stderr = self.to_screen
params = get_params({
'writeannotations': True,
'skip_download': True,
'writeinfojson': False,
'format': 'flv',
})
TEST_ID = 'gr51aVj-mLg'
ANNOTATIONS_FILE = TEST_ID + '.flv.annotations.xml'
EXPECTED_ANNOTATIONS = ['Speech bubble', 'Note', 'Title', 'Spotlight', 'Label']
class TestAnnotations(unittest.TestCase):
def setUp(self):
# Clear old files
self.tearDown()
def test_info_json(self):
expected = list(EXPECTED_ANNOTATIONS) # Two annotations could have the same text.
ie = youtube_dl.extractor.YoutubeIE()
ydl = YoutubeDL(params)
ydl.add_info_extractor(ie)
ydl.download([TEST_ID])
self.assertTrue(os.path.exists(ANNOTATIONS_FILE))
annoxml = None
with io.open(ANNOTATIONS_FILE, 'r', encoding='utf-8') as annof:
annoxml = xml.etree.ElementTree.parse(annof)
self.assertTrue(annoxml is not None, 'Failed to parse annotations XML')
root = annoxml.getroot()
self.assertEqual(root.tag, 'document')
annotationsTag = root.find('annotations')
self.assertEqual(annotationsTag.tag, 'annotations')
annotations = annotationsTag.findall('annotation')
# Not all the annotations have TEXT children and the annotations are returned unsorted.
for a in annotations:
self.assertEqual(a.tag, 'annotation')
if a.get('type') == 'text':
textTag = a.find('TEXT')
text = textTag.text
self.assertTrue(text in expected) # assertIn only added in python 2.7
# remove the first occurance, there could be more than one annotation with the same text
expected.remove(text)
# We should have seen (and removed) all the expected annotation texts.
self.assertEqual(len(expected), 0, 'Not all expected annotations were found.')
def tearDown(self):
try_rm(ANNOTATIONS_FILE)
if __name__ == '__main__':
unittest.main()
| unlicense |
Yipit/eventlib | tests/unit/test_eventlib.py | 1 | 3543 | # eventlib - Copyright (c) 2012 Yipit, Inc
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from ejson import serializers
from mock import Mock, call, patch
from datetime import datetime
from eventlib import exceptions, conf, core, tasks
def test_parse_event_name():
core.parse_event_name('app.Event').should.be.equal(
('app.events', 'Event'))
core.parse_event_name.when.called_with('stuff').should.throw(
exceptions.InvalidEventNameError,
'The name "stuff" is invalid. Make sure you are using the '
'"app.KlassName" format')
core.parse_event_name.when.called_with('other.stuff.blah').should.throw(
exceptions.InvalidEventNameError,
'The name "other.stuff.blah" is invalid. Make sure you are using the '
'"app.KlassName" format')
@patch('eventlib.core.import_module')
def test_find_event(import_module):
fake_module = Mock()
fake_module.Event = 'my-lol-module'
import_module.return_value = fake_module
core.find_event('app.Event').should.be.equals('my-lol-module')
import_module.reset_mock()
import_module.side_effect = ImportError
core.find_event.when.called_with('app.Event2').should.throw(
exceptions.EventNotFoundError,
'Event "app.Event2" not found. Make sure you have a class '
'called "Event2" inside the "app.events" module.')
def test_filter_data_values():
core.filter_data_values({'a': 'b', 'c': 'd'}).should.be.equals(
{'a': 'b', 'c': 'd'}
)
core.filter_data_values({'a': 'b', 'request': None}).should.be.equals(
{'a': 'b'}
)
@patch('eventlib.core.datetime')
@patch('eventlib.core.get_ip')
def test_get_default_values_with_request(get_ip, datetime):
get_ip.return_value = '150.164.211.1'
datetime.now.return_value = 'tea time!'
data = {'foo': 'bar', 'request': Mock()}
core.get_default_values(data).should.be.equals({
'__datetime__': 'tea time!',
'__ip_address__': '150.164.211.1',
})
@patch('eventlib.tasks.process')
def test_celery_process_wrapper(process):
tasks.process_task('name', 'data')
process.assert_called_once_with('name', 'data')
@patch('eventlib.conf.settings')
def test_django_integration(settings):
# Given I mock django conf
settings.LOCAL_GEOLOCATION_IP = 'CHUCK NORRIS'
# When I try to access a variable using the getsetting method, then
# it should contain the mocked values
conf.getsetting('LOCAL_GEOLOCATION_IP').should.equal('CHUCK NORRIS')
@patch('eventlib.conf.settings')
@patch('eventlib.core.import_module')
def test_importing_events(import_module, settings):
settings.INSTALLED_APPS = ['foobar', 'test_app']
core.import_event_modules()
calls = [call('foobar.events'), call('test_app.events')]
import_module.assert_has_calls(calls)
settings.INSTALLED_APPS = ['tester']
import_module.side_effect = ImportError('P0wned!!!')
core.import_event_modules()
| lgpl-3.0 |
LogicalDash/kivy | kivy/core/image/img_tex.py | 21 | 1549 | '''
Tex: Compressed texture
'''
__all__ = ('ImageLoaderTex', )
import json
from struct import unpack
from kivy.logger import Logger
from kivy.core.image import ImageLoaderBase, ImageData, ImageLoader
class ImageLoaderTex(ImageLoaderBase):
@staticmethod
def extensions():
return ('tex', )
def load(self, filename):
try:
fd = open(filename, 'rb')
if fd.read(4) != 'KTEX':
raise Exception('Invalid tex identifier')
headersize = unpack('I', fd.read(4))[0]
header = fd.read(headersize)
if len(header) != headersize:
raise Exception('Truncated tex header')
info = json.loads(header)
data = fd.read()
if len(data) != info['datalen']:
raise Exception('Truncated tex data')
except:
Logger.warning('Image: Image <%s> is corrupted' % filename)
raise
width, height = info['image_size']
tw, th = info['texture_size']
images = [data]
im = ImageData(width, height, str(info['format']), images[0],
source=filename)
'''
if len(dds.images) > 1:
images = dds.images
images_size = dds.images_size
for index in range(1, len(dds.images)):
w, h = images_size[index]
data = images[index]
im.add_mipmap(index, w, h, data)
'''
return [im]
# register
ImageLoader.register(ImageLoaderTex)
| mit |
ftl-toolbox/lib_openshift | lib_openshift/models/v1_load_balancer_ingress.py | 2 | 4203 | # coding: utf-8
"""
OpenAPI spec version:
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
import re
class V1LoadBalancerIngress(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
operations = [
]
# The key is attribute name
# and the value is attribute type.
swagger_types = {
'ip': 'str',
'hostname': 'str'
}
# The key is attribute name
# and the value is json key in definition.
attribute_map = {
'ip': 'ip',
'hostname': 'hostname'
}
def __init__(self, ip=None, hostname=None):
"""
V1LoadBalancerIngress - a model defined in Swagger
"""
self._ip = ip
self._hostname = hostname
@property
def ip(self):
"""
Gets the ip of this V1LoadBalancerIngress.
IP is set for load-balancer ingress points that are IP based (typically GCE or OpenStack load-balancers)
:return: The ip of this V1LoadBalancerIngress.
:rtype: str
"""
return self._ip
@ip.setter
def ip(self, ip):
"""
Sets the ip of this V1LoadBalancerIngress.
IP is set for load-balancer ingress points that are IP based (typically GCE or OpenStack load-balancers)
:param ip: The ip of this V1LoadBalancerIngress.
:type: str
"""
self._ip = ip
@property
def hostname(self):
"""
Gets the hostname of this V1LoadBalancerIngress.
Hostname is set for load-balancer ingress points that are DNS based (typically AWS load-balancers)
:return: The hostname of this V1LoadBalancerIngress.
:rtype: str
"""
return self._hostname
@hostname.setter
def hostname(self, hostname):
"""
Sets the hostname of this V1LoadBalancerIngress.
Hostname is set for load-balancer ingress points that are DNS based (typically AWS load-balancers)
:param hostname: The hostname of this V1LoadBalancerIngress.
:type: str
"""
self._hostname = hostname
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(V1LoadBalancerIngress.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| apache-2.0 |
blackseabass/django-polls | polls/tests.py | 1 | 1320 | import datetime
from django.utils import timezone
from django.test import TestCase
from .models import Question
# Create your tests here.
class QuestionMethodTests(TestCase):
def test_was_published_recently_with_future_question(self):
"""
was_published_recently() should return False for questions whose
pub_date is in the future.
"""
time = timezone.now() + datetime.timedelta(days=30)
future_question = Question(pub_date=time)
self.assertEqual(future_question.was_published_recently(), False)
def test_was_published_recently_with_old_question(self):
"""
was_published_recently() should return False for question whose
pub_date is older than 1 day.
"""
time = timezone.now() - datetime.timedelta(days=30)
old_question = Question(pub_date=time)
self.assertEqual(old_question.was_published_recently(), False)
def test_was_published_recently_with_recent_question(self):
"""
was_published_recently() should return True for question whose
pub_date is within the last day.
"""
time = timezone.now() - datetime.timedelta(hours=1)
recent_question = Question(pub_date=time)
self.assertEqual(recent_question.was_published_recently(), True)
| bsd-3-clause |
anentropic/django-oscar | src/oscar/apps/partner/abstract_models.py | 10 | 10064 | from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.timezone import now
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import pgettext_lazy
from oscar.apps.partner.exceptions import InvalidStockAdjustment
from oscar.core.compat import AUTH_USER_MODEL
from oscar.core.utils import get_default_currency
from oscar.models.fields import AutoSlugField
@python_2_unicode_compatible
class AbstractPartner(models.Model):
"""
A fulfillment partner. An individual or company who can fulfil products.
E.g. for physical goods, somebody with a warehouse and means of delivery.
Creating one or more instances of the Partner model is a required step in
setting up an Oscar deployment. Many Oscar deployments will only have one
fulfillment partner.
"""
code = AutoSlugField(_("Code"), max_length=128, unique=True,
populate_from='name')
name = models.CharField(
pgettext_lazy(u"Partner's name", u"Name"), max_length=128, blank=True)
#: A partner can have users assigned to it. This is used
#: for access modelling in the permission-based dashboard
users = models.ManyToManyField(
AUTH_USER_MODEL, related_name="partners",
blank=True, verbose_name=_("Users"))
@property
def display_name(self):
return self.name or self.code
@property
def primary_address(self):
"""
Returns a partners primary address. Usually that will be the
headquarters or similar.
This is a rudimentary implementation that raises an error if there's
more than one address. If you actually want to support multiple
addresses, you will likely need to extend PartnerAddress to have some
field or flag to base your decision on.
"""
addresses = self.addresses.all()
if len(addresses) == 0: # intentionally using len() to save queries
return None
elif len(addresses) == 1:
return addresses[0]
else:
raise NotImplementedError(
"Oscar's default implementation of primary_address only "
"supports one PartnerAddress. You need to override the "
"primary_address to look up the right address")
def get_address_for_stockrecord(self, stockrecord):
"""
Stock might be coming from different warehouses. Overriding this
function allows selecting the correct PartnerAddress for the record.
That can be useful when determining tax.
"""
return self.primary_address
class Meta:
abstract = True
app_label = 'partner'
ordering = ('name', 'code')
permissions = (('dashboard_access', 'Can access dashboard'), )
verbose_name = _('Fulfillment partner')
verbose_name_plural = _('Fulfillment partners')
def __str__(self):
return self.display_name
@python_2_unicode_compatible
class AbstractStockRecord(models.Model):
"""
A stock record.
This records information about a product from a fulfilment partner, such as
their SKU, the number they have in stock and price information.
Stockrecords are used by 'strategies' to determine availability and pricing
information for the customer.
"""
product = models.ForeignKey(
'catalogue.Product', related_name="stockrecords",
verbose_name=_("Product"))
partner = models.ForeignKey(
'partner.Partner', verbose_name=_("Partner"),
related_name='stockrecords')
#: The fulfilment partner will often have their own SKU for a product,
#: which we store here. This will sometimes be the same the product's UPC
#: but not always. It should be unique per partner.
#: See also http://en.wikipedia.org/wiki/Stock-keeping_unit
partner_sku = models.CharField(_("Partner SKU"), max_length=128)
# Price info:
price_currency = models.CharField(
_("Currency"), max_length=12, default=get_default_currency)
# This is the base price for calculations - tax should be applied by the
# appropriate method. We don't store tax here as its calculation is highly
# domain-specific. It is NULLable because some items don't have a fixed
# price but require a runtime calculation (possible from an external
# service).
price_excl_tax = models.DecimalField(
_("Price (excl. tax)"), decimal_places=2, max_digits=12,
blank=True, null=True)
#: Retail price for this item. This is simply the recommended price from
#: the manufacturer. If this is used, it is for display purposes only.
#: This prices is the NOT the price charged to the customer.
price_retail = models.DecimalField(
_("Price (retail)"), decimal_places=2, max_digits=12,
blank=True, null=True)
#: Cost price is the price charged by the fulfilment partner. It is not
#: used (by default) in any price calculations but is often used in
#: reporting so merchants can report on their profit margin.
cost_price = models.DecimalField(
_("Cost Price"), decimal_places=2, max_digits=12,
blank=True, null=True)
#: Number of items in stock
num_in_stock = models.PositiveIntegerField(
_("Number in stock"), blank=True, null=True)
#: The amount of stock allocated to orders but not fed back to the master
#: stock system. A typical stock update process will set the num_in_stock
#: variable to a new value and reset num_allocated to zero
num_allocated = models.IntegerField(
_("Number allocated"), blank=True, null=True)
#: Threshold for low-stock alerts. When stock goes beneath this threshold,
#: an alert is triggered so warehouse managers can order more.
low_stock_threshold = models.PositiveIntegerField(
_("Low Stock Threshold"), blank=True, null=True)
# Date information
date_created = models.DateTimeField(_("Date created"), auto_now_add=True)
date_updated = models.DateTimeField(_("Date updated"), auto_now=True,
db_index=True)
def __str__(self):
msg = u"Partner: %s, product: %s" % (
self.partner.display_name, self.product,)
if self.partner_sku:
msg = u"%s (%s)" % (msg, self.partner_sku)
return msg
class Meta:
abstract = True
app_label = 'partner'
unique_together = ('partner', 'partner_sku')
verbose_name = _("Stock record")
verbose_name_plural = _("Stock records")
@property
def net_stock_level(self):
"""
The effective number in stock (eg available to buy).
This is correct property to show the customer, not the num_in_stock
field as that doesn't account for allocations. This can be negative in
some unusual circumstances
"""
if self.num_in_stock is None:
return 0
if self.num_allocated is None:
return self.num_in_stock
return self.num_in_stock - self.num_allocated
# 2-stage stock management model
def allocate(self, quantity):
"""
Record a stock allocation.
This normally happens when a product is bought at checkout. When the
product is actually shipped, then we 'consume' the allocation.
"""
if self.num_allocated is None:
self.num_allocated = 0
self.num_allocated += quantity
self.save()
allocate.alters_data = True
def is_allocation_consumption_possible(self, quantity):
"""
Test if a proposed stock consumption is permitted
"""
return quantity <= min(self.num_allocated, self.num_in_stock)
def consume_allocation(self, quantity):
"""
Consume a previous allocation
This is used when an item is shipped. We remove the original
allocation and adjust the number in stock accordingly
"""
if not self.is_allocation_consumption_possible(quantity):
raise InvalidStockAdjustment(
_('Invalid stock consumption request'))
self.num_allocated -= quantity
self.num_in_stock -= quantity
self.save()
consume_allocation.alters_data = True
def cancel_allocation(self, quantity):
# We ignore requests that request a cancellation of more than the
# amount already allocated.
self.num_allocated -= min(self.num_allocated, quantity)
self.save()
cancel_allocation.alters_data = True
@property
def is_below_threshold(self):
if self.low_stock_threshold is None:
return False
return self.net_stock_level < self.low_stock_threshold
@python_2_unicode_compatible
class AbstractStockAlert(models.Model):
"""
A stock alert. E.g. used to notify users when a product is 'back in stock'.
"""
stockrecord = models.ForeignKey(
'partner.StockRecord', related_name='alerts',
verbose_name=_("Stock Record"))
threshold = models.PositiveIntegerField(_("Threshold"))
OPEN, CLOSED = "Open", "Closed"
status_choices = (
(OPEN, _("Open")),
(CLOSED, _("Closed")),
)
status = models.CharField(_("Status"), max_length=128, default=OPEN,
choices=status_choices)
date_created = models.DateTimeField(_("Date Created"), auto_now_add=True)
date_closed = models.DateTimeField(_("Date Closed"), blank=True, null=True)
def close(self):
self.status = self.CLOSED
self.date_closed = now()
self.save()
close.alters_data = True
def __str__(self):
return _('<stockalert for "%(stock)s" status %(status)s>') \
% {'stock': self.stockrecord, 'status': self.status}
class Meta:
abstract = True
app_label = 'partner'
ordering = ('-date_created',)
verbose_name = _('Stock alert')
verbose_name_plural = _('Stock alerts')
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.