repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
ChanduERP/odoo | addons/l10n_sg/__openerp__.py | 331 | 2380 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2014 Tech Receptives (<http://techreceptives.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Singapore - Accounting',
'version': '1.0',
'author': 'Tech Receptives',
'website': 'http://www.techreceptives.com',
'category': 'Localization/Account Charts',
'description': """
Singapore accounting chart and localization.
=======================================================
After installing this module, the Configuration wizard for accounting is launched.
* The Chart of Accounts consists of the list of all the general ledger accounts
required to maintain the transactions of Singapore.
* On that particular wizard, you will be asked to pass the name of the company,
the chart template to follow, the no. of digits to generate, the code for your
account and bank account, currency to create journals.
* The Chart of Taxes would display the different types/groups of taxes such as
Standard Rates, Zeroed, Exempted, MES and Out of Scope.
* The tax codes are specified considering the Tax Group and for easy accessibility of
submission of GST Tax Report.
""",
'depends': ['base', 'account', 'account_chart'],
'demo': [ ],
'data': [
'l10n_sg_chart_tax_code.xml',
'l10n_sg_chart.xml',
'l10n_sg_chart_tax.xml',
'l10n_sg_wizard.xml',
],
'installable': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
lmorchard/django | django/contrib/gis/geos/collections.py | 292 | 4986 | """
This module houses the Geometry Collection objects:
GeometryCollection, MultiPoint, MultiLineString, and MultiPolygon
"""
import json
from ctypes import byref, c_int, c_uint
from django.contrib.gis.geos import prototypes as capi
from django.contrib.gis.geos.geometry import (
GEOSGeometry, ProjectInterpolateMixin,
)
from django.contrib.gis.geos.libgeos import get_pointer_arr
from django.contrib.gis.geos.linestring import LinearRing, LineString
from django.contrib.gis.geos.point import Point
from django.contrib.gis.geos.polygon import Polygon
from django.utils.six.moves import range
class GeometryCollection(GEOSGeometry):
_typeid = 7
def __init__(self, *args, **kwargs):
"Initializes a Geometry Collection from a sequence of Geometry objects."
# Checking the arguments
if not args:
raise TypeError('Must provide at least one Geometry to initialize %s.' % self.__class__.__name__)
if len(args) == 1:
# If only one geometry provided or a list of geometries is provided
# in the first argument.
if isinstance(args[0], (tuple, list)):
init_geoms = args[0]
else:
init_geoms = args
else:
init_geoms = args
# Ensuring that only the permitted geometries are allowed in this collection
# this is moved to list mixin super class
self._check_allowed(init_geoms)
# Creating the geometry pointer array.
collection = self._create_collection(len(init_geoms), iter(init_geoms))
super(GeometryCollection, self).__init__(collection, **kwargs)
def __iter__(self):
"Iterates over each Geometry in the Collection."
for i in range(len(self)):
yield self[i]
def __len__(self):
"Returns the number of geometries in this Collection."
return self.num_geom
# ### Methods for compatibility with ListMixin ###
def _create_collection(self, length, items):
# Creating the geometry pointer array.
geoms = get_pointer_arr(length)
for i, g in enumerate(items):
# this is a little sloppy, but makes life easier
# allow GEOSGeometry types (python wrappers) or pointer types
geoms[i] = capi.geom_clone(getattr(g, 'ptr', g))
return capi.create_collection(c_int(self._typeid), byref(geoms), c_uint(length))
def _get_single_internal(self, index):
return capi.get_geomn(self.ptr, index)
def _get_single_external(self, index):
"Returns the Geometry from this Collection at the given index (0-based)."
# Checking the index and returning the corresponding GEOS geometry.
return GEOSGeometry(capi.geom_clone(self._get_single_internal(index)), srid=self.srid)
def _set_list(self, length, items):
"Create a new collection, and destroy the contents of the previous pointer."
prev_ptr = self.ptr
srid = self.srid
self.ptr = self._create_collection(length, items)
if srid:
self.srid = srid
capi.destroy_geom(prev_ptr)
_set_single = GEOSGeometry._set_single_rebuild
_assign_extended_slice = GEOSGeometry._assign_extended_slice_rebuild
@property
def json(self):
if self.__class__.__name__ == 'GeometryCollection':
return json.dumps({
'type': self.__class__.__name__,
'geometries': [
{'type': geom.__class__.__name__, 'coordinates': geom.coords}
for geom in self
],
})
return super(GeometryCollection, self).json
geojson = json
@property
def kml(self):
"Returns the KML for this Geometry Collection."
return '<MultiGeometry>%s</MultiGeometry>' % ''.join(g.kml for g in self)
@property
def tuple(self):
"Returns a tuple of all the coordinates in this Geometry Collection"
return tuple(g.tuple for g in self)
coords = tuple
# MultiPoint, MultiLineString, and MultiPolygon class definitions.
class MultiPoint(GeometryCollection):
_allowed = Point
_typeid = 4
class MultiLineString(ProjectInterpolateMixin, GeometryCollection):
_allowed = (LineString, LinearRing)
_typeid = 5
@property
def merged(self):
"""
Returns a LineString representing the line merge of this
MultiLineString.
"""
return self._topology(capi.geos_linemerge(self.ptr))
class MultiPolygon(GeometryCollection):
_allowed = Polygon
_typeid = 6
@property
def cascaded_union(self):
"Returns a cascaded union of this MultiPolygon."
return GEOSGeometry(capi.geos_cascaded_union(self.ptr), self.srid)
# Setting the allowed types here since GeometryCollection is defined before
# its subclasses.
GeometryCollection._allowed = (Point, LineString, LinearRing, Polygon, MultiPoint, MultiLineString, MultiPolygon)
| bsd-3-clause |
Intel-tensorflow/tensorflow | tensorflow/python/compiler/tensorrt/test/int32_test.py | 6 | 3326 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test conversion of graphs involving INT32 tensors and operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.compiler.tensorrt.test import tf_trt_integration_test_base as trt_test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.platform import test
class ExcludeUnsupportedInt32Test(trt_test.TfTrtIntegrationTestBase):
"""Test exclusion of ops which are not supported in INT32 mode by TF-TRT"""
def _ConstOp(self, shape, dtype):
return constant_op.constant(np.random.randn(*shape), dtype=dtype)
def GraphFn(self, x):
dtype = x.dtype
b = self._ConstOp((4, 10), dtype)
x = math_ops.matmul(x, b)
b = self._ConstOp((10,), dtype)
x = nn.bias_add(x, b)
return array_ops.identity(x, name='output_0')
def GetParams(self):
return self.BuildParams(self.GraphFn, dtypes.int32, [[100, 4]], [[100, 10]])
def setUp(self):
super(trt_test.TfTrtIntegrationTestBase, self).setUp() # pylint: disable=bad-super-call
# Disable layout optimizer, since it will convert BiasAdd with NHWC
# format to NCHW format under four dimentional input.
self.DisableNonTrtOptimizers()
def GetMaxBatchSize(self, run_params):
"""Returns the max_batch_size that the converter should use for tests."""
if run_params.dynamic_engine:
return None
return 100
def ExpectedEnginesToBuild(self, run_params):
"""Return the expected engines to build."""
return []
class CalibrationInt32Support(trt_test.TfTrtIntegrationTestBase):
"""Test execution of calibration with int32 input"""
def GraphFn(self, inp):
# Can use any op that is converted to TRT with int32 inputs
inp_transposed = array_ops.transpose(inp, [0, 3, 2, 1], name='transpose_0')
return array_ops.identity(inp_transposed, name='output_0')
def GetParams(self):
return self.BuildParams(self.GraphFn, dtypes.int32, [[3, 4, 5, 6]],
[[3, 6, 5, 4]])
def ShouldRunTest(self, run_params):
# Although test passes with all configurations but only
# execute INT8 with use_calibration=True because
# that is the purpose of the test.
return trt_test.IsQuantizationWithCalibration(
run_params), 'test calibration and INT8'
def ExpectedEnginesToBuild(self, run_params):
return ['TRTEngineOp_0']
if __name__ == '__main__':
test.main()
| apache-2.0 |
enitihas/SAC-Website | venv/bin/venv/lib/python2.7/site-packages/setuptools/command/test.py | 363 | 6471 | from distutils.errors import DistutilsOptionError
from unittest import TestLoader
import unittest
import sys
from pkg_resources import (resource_listdir, resource_exists, normalize_path,
working_set, _namespace_packages,
add_activation_listener, require, EntryPoint)
from setuptools import Command
from setuptools.compat import PY3
from setuptools.py31compat import unittest_main
class ScanningLoader(TestLoader):
def loadTestsFromModule(self, module):
"""Return a suite of all tests cases contained in the given module
If the module is a package, load tests from all the modules in it.
If the module has an ``additional_tests`` function, call it and add
the return value to the tests.
"""
tests = []
tests.append(TestLoader.loadTestsFromModule(self, module))
if hasattr(module, "additional_tests"):
tests.append(module.additional_tests())
if hasattr(module, '__path__'):
for file in resource_listdir(module.__name__, ''):
if file.endswith('.py') and file != '__init__.py':
submodule = module.__name__ + '.' + file[:-3]
else:
if resource_exists(module.__name__, file + '/__init__.py'):
submodule = module.__name__ + '.' + file
else:
continue
tests.append(self.loadTestsFromName(submodule))
if len(tests) != 1:
return self.suiteClass(tests)
else:
return tests[0] # don't create a nested suite for only one return
class test(Command):
"""Command to run unit tests after in-place build"""
description = "run unit tests after in-place build"
user_options = [
('test-module=', 'm', "Run 'test_suite' in specified module"),
('test-suite=', 's',
"Test suite to run (e.g. 'some_module.test_suite')"),
('test-runner=', 'r', "Test runner to use"),
]
def initialize_options(self):
self.test_suite = None
self.test_module = None
self.test_loader = None
self.test_runner = None
def finalize_options(self):
if self.test_suite is None:
if self.test_module is None:
self.test_suite = self.distribution.test_suite
else:
self.test_suite = self.test_module + ".test_suite"
elif self.test_module:
raise DistutilsOptionError(
"You may specify a module or a suite, but not both"
)
self.test_args = [self.test_suite]
if self.verbose:
self.test_args.insert(0, '--verbose')
if self.test_loader is None:
self.test_loader = getattr(self.distribution, 'test_loader', None)
if self.test_loader is None:
self.test_loader = "setuptools.command.test:ScanningLoader"
if self.test_runner is None:
self.test_runner = getattr(self.distribution, 'test_runner', None)
def with_project_on_sys_path(self, func):
with_2to3 = PY3 and getattr(self.distribution, 'use_2to3', False)
if with_2to3:
# If we run 2to3 we can not do this inplace:
# Ensure metadata is up-to-date
self.reinitialize_command('build_py', inplace=0)
self.run_command('build_py')
bpy_cmd = self.get_finalized_command("build_py")
build_path = normalize_path(bpy_cmd.build_lib)
# Build extensions
self.reinitialize_command('egg_info', egg_base=build_path)
self.run_command('egg_info')
self.reinitialize_command('build_ext', inplace=0)
self.run_command('build_ext')
else:
# Without 2to3 inplace works fine:
self.run_command('egg_info')
# Build extensions in-place
self.reinitialize_command('build_ext', inplace=1)
self.run_command('build_ext')
ei_cmd = self.get_finalized_command("egg_info")
old_path = sys.path[:]
old_modules = sys.modules.copy()
try:
sys.path.insert(0, normalize_path(ei_cmd.egg_base))
working_set.__init__()
add_activation_listener(lambda dist: dist.activate())
require('%s==%s' % (ei_cmd.egg_name, ei_cmd.egg_version))
func()
finally:
sys.path[:] = old_path
sys.modules.clear()
sys.modules.update(old_modules)
working_set.__init__()
def run(self):
if self.distribution.install_requires:
self.distribution.fetch_build_eggs(
self.distribution.install_requires)
if self.distribution.tests_require:
self.distribution.fetch_build_eggs(self.distribution.tests_require)
if self.test_suite:
cmd = ' '.join(self.test_args)
if self.dry_run:
self.announce('skipping "unittest %s" (dry run)' % cmd)
else:
self.announce('running "unittest %s"' % cmd)
self.with_project_on_sys_path(self.run_tests)
def run_tests(self):
# Purge modules under test from sys.modules. The test loader will
# re-import them from the build location. Required when 2to3 is used
# with namespace packages.
if PY3 and getattr(self.distribution, 'use_2to3', False):
module = self.test_args[-1].split('.')[0]
if module in _namespace_packages:
del_modules = []
if module in sys.modules:
del_modules.append(module)
module += '.'
for name in sys.modules:
if name.startswith(module):
del_modules.append(name)
list(map(sys.modules.__delitem__, del_modules))
unittest_main(
None, None, [unittest.__file__] + self.test_args,
testLoader=self._resolve_as_ep(self.test_loader),
testRunner=self._resolve_as_ep(self.test_runner),
)
@staticmethod
def _resolve_as_ep(val):
"""
Load the indicated attribute value, called, as a as if it were
specified as an entry point.
"""
if val is None:
return
parsed = EntryPoint.parse("x=" + val)
return parsed.resolve()()
| apache-2.0 |
maralla/completor.vim | tests/conftest.py | 1 | 2190 | # -*- coding: utf-8 -*-
import os
import sys
import pytest
import mock
from copy import deepcopy
class VimError(Exception):
pass
class List(object):
pass
class Vars(dict):
def __set__(self, inst, value):
if not value:
inst._vars = deepcopy(self)
inst._vars.update(value)
def __get__(self, inst, owner):
args = deepcopy(self)
args.update(inst._vars)
inst._vars = args
return inst._vars
class Vim(object):
List = List
error = VimError
vars = Vars(completor_min_chars=2)
def __init__(self):
self.reset()
def reset(self):
self._vars = {}
self.var_map = {}
self.eval_map = {'&encoding': b'utf-8'}
self.current = mock.Mock()
self.current.buffer.options = {'fileencoding': b'utf-8'}
self.funcs = {
'getbufvar': lambda nr, var: b'',
'completor#utils#in_comment_or_string': lambda: 0,
'completor#support_popup': lambda: 0,
'expand': lambda x: x,
'completor#utils#tempname': lambda: '/tmp/xxx-vim',
}
def eval(self, expr):
return self.eval_map.get(expr)
def bindeval(self, category):
return self.var_map.get(category, {})
def Function(self, func_name):
return self.funcs.get(func_name)
class Dictionary(object):
def __new__(self, **kwargs):
return dict(kwargs)
def command(self, cmd):
pass
class UltiSnips(object):
def _snips(self, base, other):
if base != 'urt':
return []
return [mock.Mock(trigger='urt', description='mock snips')]
sys.path.append('./pythonx')
sys.modules['vim'] = Vim()
sys.modules['UltiSnips'] = mock.Mock(UltiSnips_Manager=UltiSnips())
@pytest.fixture
def vim_mod():
vim = sys.modules['vim']
yield vim
vim.reset()
class Buffer(list):
def __init__(self, number, name=''):
self.name = name
self.number = number
self.valid = 1
@pytest.fixture
def create_buffer():
return lambda bufnr, name='': Buffer(bufnr, name)
os.environ['DISABLE_CACHE'] = '1'
import completers.common # noqa
| mit |
Arcanemagus/plexpy | lib/urllib3/util/request.py | 205 | 3705 | from __future__ import absolute_import
from base64 import b64encode
from ..packages.six import b, integer_types
from ..exceptions import UnrewindableBodyError
ACCEPT_ENCODING = 'gzip,deflate'
_FAILEDTELL = object()
def make_headers(keep_alive=None, accept_encoding=None, user_agent=None,
basic_auth=None, proxy_basic_auth=None, disable_cache=None):
"""
Shortcuts for generating request headers.
:param keep_alive:
If ``True``, adds 'connection: keep-alive' header.
:param accept_encoding:
Can be a boolean, list, or string.
``True`` translates to 'gzip,deflate'.
List will get joined by comma.
String will be used as provided.
:param user_agent:
String representing the user-agent you want, such as
"python-urllib3/0.6"
:param basic_auth:
Colon-separated username:password string for 'authorization: basic ...'
auth header.
:param proxy_basic_auth:
Colon-separated username:password string for 'proxy-authorization: basic ...'
auth header.
:param disable_cache:
If ``True``, adds 'cache-control: no-cache' header.
Example::
>>> make_headers(keep_alive=True, user_agent="Batman/1.0")
{'connection': 'keep-alive', 'user-agent': 'Batman/1.0'}
>>> make_headers(accept_encoding=True)
{'accept-encoding': 'gzip,deflate'}
"""
headers = {}
if accept_encoding:
if isinstance(accept_encoding, str):
pass
elif isinstance(accept_encoding, list):
accept_encoding = ','.join(accept_encoding)
else:
accept_encoding = ACCEPT_ENCODING
headers['accept-encoding'] = accept_encoding
if user_agent:
headers['user-agent'] = user_agent
if keep_alive:
headers['connection'] = 'keep-alive'
if basic_auth:
headers['authorization'] = 'Basic ' + \
b64encode(b(basic_auth)).decode('utf-8')
if proxy_basic_auth:
headers['proxy-authorization'] = 'Basic ' + \
b64encode(b(proxy_basic_auth)).decode('utf-8')
if disable_cache:
headers['cache-control'] = 'no-cache'
return headers
def set_file_position(body, pos):
"""
If a position is provided, move file to that point.
Otherwise, we'll attempt to record a position for future use.
"""
if pos is not None:
rewind_body(body, pos)
elif getattr(body, 'tell', None) is not None:
try:
pos = body.tell()
except (IOError, OSError):
# This differentiates from None, allowing us to catch
# a failed `tell()` later when trying to rewind the body.
pos = _FAILEDTELL
return pos
def rewind_body(body, body_pos):
"""
Attempt to rewind body to a certain position.
Primarily used for request redirects and retries.
:param body:
File-like object that supports seek.
:param int pos:
Position to seek to in file.
"""
body_seek = getattr(body, 'seek', None)
if body_seek is not None and isinstance(body_pos, integer_types):
try:
body_seek(body_pos)
except (IOError, OSError):
raise UnrewindableBodyError("An error occurred when rewinding request "
"body for redirect/retry.")
elif body_pos is _FAILEDTELL:
raise UnrewindableBodyError("Unable to record file position for rewinding "
"request body during a redirect/retry.")
else:
raise ValueError("body_pos must be of type integer, "
"instead it was %s." % type(body_pos))
| gpl-3.0 |
quodlibet/mutagen | mutagen/oggopus.py | 1 | 5334 | # Copyright (C) 2012, 2013 Christoph Reiter
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
"""Read and write Ogg Opus comments.
This module handles Opus files wrapped in an Ogg bitstream. The
first Opus stream found is used.
Based on http://tools.ietf.org/html/draft-terriberry-oggopus-01
"""
__all__ = ["OggOpus", "Open", "delete"]
import struct
from io import BytesIO
from mutagen import StreamInfo
from mutagen._util import get_size, loadfile, convert_error
from mutagen._tags import PaddingInfo
from mutagen._vorbis import VCommentDict
from mutagen.ogg import OggPage, OggFileType, error as OggError
class error(OggError):
pass
class OggOpusHeaderError(error):
pass
class OggOpusInfo(StreamInfo):
"""OggOpusInfo()
Ogg Opus stream information.
Attributes:
length (`float`): File length in seconds, as a float
channels (`int`): Number of channels
"""
length = 0
channels = 0
def __init__(self, fileobj):
page = OggPage(fileobj)
while not page.packets[0].startswith(b"OpusHead"):
page = OggPage(fileobj)
self.serial = page.serial
if not page.first:
raise OggOpusHeaderError(
"page has ID header, but doesn't start a stream")
(version, self.channels, pre_skip, orig_sample_rate, output_gain,
channel_map) = struct.unpack("<BBHIhB", page.packets[0][8:19])
self.__pre_skip = pre_skip
# only the higher 4 bits change on incombatible changes
major = version >> 4
if major != 0:
raise OggOpusHeaderError("version %r unsupported" % major)
def _post_tags(self, fileobj):
page = OggPage.find_last(fileobj, self.serial, finishing=True)
if page is None:
raise OggOpusHeaderError
self.length = (page.position - self.__pre_skip) / float(48000)
def pprint(self):
return u"Ogg Opus, %.2f seconds" % (self.length)
class OggOpusVComment(VCommentDict):
"""Opus comments embedded in an Ogg bitstream."""
def __get_comment_pages(self, fileobj, info):
# find the first tags page with the right serial
page = OggPage(fileobj)
while ((info.serial != page.serial) or
not page.packets[0].startswith(b"OpusTags")):
page = OggPage(fileobj)
# get all comment pages
pages = [page]
while not (pages[-1].complete or len(pages[-1].packets) > 1):
page = OggPage(fileobj)
if page.serial == pages[0].serial:
pages.append(page)
return pages
def __init__(self, fileobj, info):
pages = self.__get_comment_pages(fileobj, info)
data = OggPage.to_packets(pages)[0][8:] # Strip OpusTags
fileobj = BytesIO(data)
super(OggOpusVComment, self).__init__(fileobj, framing=False)
self._padding = len(data) - self._size
# in case the LSB of the first byte after v-comment is 1, preserve the
# following data
padding_flag = fileobj.read(1)
if padding_flag and ord(padding_flag) & 0x1:
self._pad_data = padding_flag + fileobj.read()
self._padding = 0 # we have to preserve, so no padding
else:
self._pad_data = b""
def _inject(self, fileobj, padding_func):
fileobj.seek(0)
info = OggOpusInfo(fileobj)
old_pages = self.__get_comment_pages(fileobj, info)
packets = OggPage.to_packets(old_pages)
vcomment_data = b"OpusTags" + self.write(framing=False)
if self._pad_data:
# if we have padding data to preserver we can't add more padding
# as long as we don't know the structure of what follows
packets[0] = vcomment_data + self._pad_data
else:
content_size = get_size(fileobj) - len(packets[0]) # approx
padding_left = len(packets[0]) - len(vcomment_data)
info = PaddingInfo(padding_left, content_size)
new_padding = info._get_padding(padding_func)
packets[0] = vcomment_data + b"\x00" * new_padding
new_pages = OggPage._from_packets_try_preserve(packets, old_pages)
OggPage.replace(fileobj, old_pages, new_pages)
class OggOpus(OggFileType):
"""OggOpus(filething)
An Ogg Opus file.
Arguments:
filething (filething)
Attributes:
info (`OggOpusInfo`)
tags (`mutagen._vorbis.VCommentDict`)
"""
_Info = OggOpusInfo
_Tags = OggOpusVComment
_Error = OggOpusHeaderError
_mimes = ["audio/ogg", "audio/ogg; codecs=opus"]
info = None
tags = None
@staticmethod
def score(filename, fileobj, header):
return (header.startswith(b"OggS") * (b"OpusHead" in header))
Open = OggOpus
@convert_error(IOError, error)
@loadfile(method=False, writable=True)
def delete(filething):
""" delete(filething)
Arguments:
filething (filething)
Raises:
mutagen.MutagenError
Remove tags from a file.
"""
t = OggOpus(filething)
filething.fileobj.seek(0)
t.delete(filething)
| gpl-2.0 |
kun--hust/SDSCloud | swift/container/replicator.py | 8 | 11266 | # Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import itertools
import time
from collections import defaultdict
from eventlet import Timeout
from swift.container.backend import ContainerBroker, DATADIR
from swift.container.reconciler import (
MISPLACED_OBJECTS_ACCOUNT, incorrect_policy_index,
get_reconciler_container_name, get_row_to_q_entry_translator)
from swift.common import db_replicator
from swift.common.storage_policy import POLICIES
from swift.common.exceptions import DeviceUnavailable
from swift.common.http import is_success
from swift.common.db import DatabaseAlreadyExists
from swift.common.utils import (json, Timestamp, hash_path,
storage_directory, quorum_size)
class ContainerReplicator(db_replicator.Replicator):
server_type = 'container'
brokerclass = ContainerBroker
datadir = DATADIR
default_port = 6001
def report_up_to_date(self, full_info):
reported_key_map = {
'reported_put_timestamp': 'put_timestamp',
'reported_delete_timestamp': 'delete_timestamp',
'reported_bytes_used': 'bytes_used',
'reported_object_count': 'count',
}
for reported, value_key in reported_key_map.items():
if full_info[reported] != full_info[value_key]:
return False
return True
def _gather_sync_args(self, replication_info):
parent = super(ContainerReplicator, self)
sync_args = parent._gather_sync_args(replication_info)
if len(POLICIES) > 1:
sync_args += tuple(replication_info[k] for k in
('status_changed_at', 'count',
'storage_policy_index'))
return sync_args
def _handle_sync_response(self, node, response, info, broker, http):
parent = super(ContainerReplicator, self)
if is_success(response.status):
remote_info = json.loads(response.data)
if incorrect_policy_index(info, remote_info):
status_changed_at = Timestamp(time.time())
broker.set_storage_policy_index(
remote_info['storage_policy_index'],
timestamp=status_changed_at.internal)
sync_timestamps = ('created_at', 'put_timestamp',
'delete_timestamp')
if any(info[key] != remote_info[key] for key in sync_timestamps):
broker.merge_timestamps(*(remote_info[key] for key in
sync_timestamps))
rv = parent._handle_sync_response(
node, response, info, broker, http)
return rv
def find_local_handoff_for_part(self, part):
"""
Look through devices in the ring for the first handoff device that was
identified during job creation as available on this node.
:returns: a node entry from the ring
"""
nodes = self.ring.get_part_nodes(part)
more_nodes = self.ring.get_more_nodes(part)
for node in itertools.chain(nodes, more_nodes):
if node['id'] in self._local_device_ids:
return node
return None
def get_reconciler_broker(self, timestamp):
"""
Get a local instance of the reconciler container broker that is
appropriate to enqueue the given timestamp.
:param timestamp: the timestamp of the row to be enqueued
:returns: a local reconciler broker
"""
container = get_reconciler_container_name(timestamp)
if self.reconciler_containers and \
container in self.reconciler_containers:
return self.reconciler_containers[container][1]
account = MISPLACED_OBJECTS_ACCOUNT
part = self.ring.get_part(account, container)
node = self.find_local_handoff_for_part(part)
if not node:
raise DeviceUnavailable(
'No mounted devices found suitable to Handoff reconciler '
'container %s in partition %s' % (container, part))
hsh = hash_path(account, container)
db_dir = storage_directory(DATADIR, part, hsh)
db_path = os.path.join(self.root, node['device'], db_dir, hsh + '.db')
broker = ContainerBroker(db_path, account=account, container=container)
if not os.path.exists(broker.db_file):
try:
broker.initialize(timestamp, 0)
except DatabaseAlreadyExists:
pass
if self.reconciler_containers is not None:
self.reconciler_containers[container] = part, broker, node['id']
return broker
def feed_reconciler(self, container, item_list):
"""
Add queue entries for rows in item_list to the local reconciler
container database.
:param container: the name of the reconciler container
:param item_list: the list of rows to enqueue
:returns: True if successfully enqueued
"""
try:
reconciler = self.get_reconciler_broker(container)
except DeviceUnavailable as e:
self.logger.warning('DeviceUnavailable: %s', e)
return False
self.logger.debug('Adding %d objects to the reconciler at %s',
len(item_list), reconciler.db_file)
try:
reconciler.merge_items(item_list)
except (Exception, Timeout):
self.logger.exception('UNHANDLED EXCEPTION: trying to merge '
'%d items to reconciler container %s',
len(item_list), reconciler.db_file)
return False
return True
def dump_to_reconciler(self, broker, point):
"""
Look for object rows for objects updates in the wrong storage policy
in broker with a ``ROWID`` greater than the rowid given as point.
:param broker: the container broker with misplaced objects
:param point: the last verified ``reconciler_sync_point``
:returns: the last successful enqueued rowid
"""
max_sync = broker.get_max_row()
misplaced = broker.get_misplaced_since(point, self.per_diff)
if not misplaced:
return max_sync
translator = get_row_to_q_entry_translator(broker)
errors = False
low_sync = point
while misplaced:
batches = defaultdict(list)
for item in misplaced:
container = get_reconciler_container_name(item['created_at'])
batches[container].append(translator(item))
for container, item_list in batches.items():
success = self.feed_reconciler(container, item_list)
if not success:
errors = True
point = misplaced[-1]['ROWID']
if not errors:
low_sync = point
misplaced = broker.get_misplaced_since(point, self.per_diff)
return low_sync
def _post_replicate_hook(self, broker, info, responses):
if info['account'] == MISPLACED_OBJECTS_ACCOUNT:
return
point = broker.get_reconciler_sync()
if not broker.has_multiple_policies() and info['max_row'] != point:
broker.update_reconciler_sync(info['max_row'])
return
max_sync = self.dump_to_reconciler(broker, point)
success = responses.count(True) >= quorum_size(len(responses))
if max_sync > point and success:
# to be safe, only slide up the sync point with a quorum on
# replication
broker.update_reconciler_sync(max_sync)
def delete_db(self, broker):
"""
Ensure that reconciler databases are only cleaned up at the end of the
replication run.
"""
if (self.reconciler_cleanups is not None and
broker.account == MISPLACED_OBJECTS_ACCOUNT):
# this container shouldn't be here, make sure it's cleaned up
self.reconciler_cleanups[broker.container] = broker
return
return super(ContainerReplicator, self).delete_db(broker)
def replicate_reconcilers(self):
"""
Ensure any items merged to reconciler containers during replication
are pushed out to correct nodes and any reconciler containers that do
not belong on this node are removed.
"""
self.logger.info('Replicating %d reconciler containers',
len(self.reconciler_containers))
for part, reconciler, node_id in self.reconciler_containers.values():
self.cpool.spawn_n(
self._replicate_object, part, reconciler.db_file, node_id)
self.cpool.waitall()
# wipe out the cache do disable bypass in delete_db
cleanups = self.reconciler_cleanups
self.reconciler_cleanups = self.reconciler_containers = None
self.logger.info('Cleaning up %d reconciler containers',
len(cleanups))
for reconciler in cleanups.values():
self.cpool.spawn_n(self.delete_db, reconciler)
self.cpool.waitall()
self.logger.info('Finished reconciler replication')
def run_once(self, *args, **kwargs):
self.reconciler_containers = {}
self.reconciler_cleanups = {}
rv = super(ContainerReplicator, self).run_once(*args, **kwargs)
if any([self.reconciler_containers, self.reconciler_cleanups]):
self.replicate_reconcilers()
return rv
class ContainerReplicatorRpc(db_replicator.ReplicatorRpc):
def _parse_sync_args(self, args):
parent = super(ContainerReplicatorRpc, self)
remote_info = parent._parse_sync_args(args)
if len(args) > 9:
remote_info['status_changed_at'] = args[7]
remote_info['count'] = args[8]
remote_info['storage_policy_index'] = args[9]
return remote_info
def _get_synced_replication_info(self, broker, remote_info):
"""
Sync the remote_info storage_policy_index if needed and return the
newly synced replication info.
:param broker: the database broker
:param remote_info: the remote replication info
:returns: local broker replication info
"""
info = broker.get_replication_info()
if incorrect_policy_index(info, remote_info):
status_changed_at = Timestamp(time.time()).internal
broker.set_storage_policy_index(
remote_info['storage_policy_index'],
timestamp=status_changed_at)
info = broker.get_replication_info()
return info
| apache-2.0 |
xzturn/tensorflow | tensorflow/python/tpu/tensor_tracer_report.py | 4 | 16807 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========================================================================
"""Tensor Tracer report generation utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.tpu import tensor_tracer_pb2
_TRACER_LOG_PREFIX = ' [>>>TT>>>]'
_MARKER_SECTION_BEGIN = '!!!!!!! section-begin:'
_MARKER_SECTION_END = '!!!!!!! section-end:'
_SECTION_NAME_CONFIG = 'configuration'
_SECTION_NAME_REASON = 'reason'
_SECTION_NAME_OP_LIST = 'op-list'
_SECTION_NAME_TENSOR_LIST = 'tensor-list'
_SECTION_NAME_CACHE_INDEX_MAP = 'cache-index-map'
_SECTION_NAME_GRAPH = 'graph'
_SECTION_NAME_TENSOR_TRACER_CHECKPOINT = 'tensor_tracer_checkpoint'
_FIELD_NAME_VERSION = 'version:'
_FIELD_NAME_DEVICE = 'device:'
_FIELD_NAME_TRACE_MODE = 'trace-mode:'
_FIELD_NAME_SUBMODE = 'submode:'
_FIELD_NAME_NUM_REPLICAS = 'num-replicas:'
_FIELD_NAME_NUM_REPLICAS_PER_HOST = 'num-replicas-per-host:'
_FIELD_NAME_NUM_HOSTS = 'num-hosts:'
_FIELD_NAME_NUM_OPS = 'number-of-ops:'
_FIELD_NAME_NUM_TENSORS = 'number-of-tensors:'
_FIELD_NAME_NUM_CACHE_INDICES = 'number-of-indices:'
_FIELD_NAME_TOPOLOGICAL_SORT_SUCCEED = 'topological-sort-succeed:'
_CURRENT_VERSION = 'use-outside-compilation'
_TT_REPORT_PROTO = 'tensor_tracer_report.report_pb'
def topological_sort(g):
"""Performs topological sort on the given graph.
Args:
g: the graph.
Returns:
A pair where the first element indicates if the topological
sort succeeded (True if there is no cycle found; False if a
cycle is found) and the second element is either the sorted
list of nodes or the cycle of nodes found.
"""
def _is_loop_edge(op):
"""Returns true if the op is the end of a while-loop creating a cycle."""
return op.type in ['NextIteration']
def _in_op_degree(op):
"""Returns the number of incoming edges to the given op.
The edge calculation skips the edges that come from 'NextIteration' ops.
NextIteration creates a cycle in the graph. We break cycles by treating
this op as 'sink' and ignoring all outgoing edges from it.
Args:
op: Tf.Operation
Returns:
the number of incoming edges.
"""
count = 0
for op in op.control_inputs + [in_tensor.op for in_tensor in op.inputs]:
if not _is_loop_edge(op):
count += 1
return count
sorted_ops = []
op_in_degree = {op: _in_op_degree(op) for op in g.get_operations()}
frontier = [op for (op, degree) in op_in_degree.items() if degree == 0]
frontier.sort(key=lambda op: op.name)
while frontier:
op = frontier.pop()
# Remove the op from graph, and remove its outgoing edges.
sorted_ops.append(op)
if _is_loop_edge(op):
continue
# pylint: disable=protected-access
consumers = list(op._control_outputs)
# pylint: enable=protected-access
for out_tensor in op.outputs:
consumers += [consumer_op for consumer_op in out_tensor.consumers()]
consumers.sort(key=lambda op: op.name)
for consumer in consumers:
# For each deleted edge shift the bucket of the vertex.
op_in_degree[consumer] -= 1
if op_in_degree[consumer] == 0:
frontier.append(consumer)
if op_in_degree[consumer] < 0:
raise ValueError('consumer:%s degree mismatch'%consumer.name)
left_ops = set(op for (op, degree) in op_in_degree.items() if degree > 0)
if left_ops:
return (True, left_ops)
else:
assert len(g.get_operations()) == len(sorted_ops)
return (False, sorted_ops)
class TensorTracerConfig(object):
"""Tensor Tracer config object."""
def __init__(self):
self.version = _CURRENT_VERSION
self.device_type = None
self.num_replicas = None
self.num_replicas_per_host = None
self.num_hosts = None
class TensorTraceOrder(object):
"""Class that is responsible from storing the trace-id of the tensors."""
def __init__(self, graph_order, traced_tensors):
self.graph_order = graph_order
self.traced_tensors = traced_tensors
self._create_tensor_maps()
def _create_tensor_maps(self):
"""Creates tensor to cache id maps."""
self.tensorname_to_cache_idx = {}
self.cache_idx_to_tensor_idx = []
for out_tensor in self.traced_tensors:
tensor_name = out_tensor.name
if tensor_name in self.tensorname_to_cache_idx:
raise ValueError(
'Tensor name %s should not be already in '
'tensorname_to_cache_idx'%tensor_name)
if tensor_name not in self.graph_order.tensor_to_idx:
raise ValueError(
'Tensor name %s is not in the tensor_to_idx'%tensor_name)
tensor_idx = self.graph_order.tensor_to_idx[tensor_name]
cache_idx = len(self.tensorname_to_cache_idx)
self.tensorname_to_cache_idx[tensor_name] = cache_idx
self.cache_idx_to_tensor_idx.append(tensor_idx)
if len(self.tensorname_to_cache_idx) != len(
self.cache_idx_to_tensor_idx):
raise RuntimeError('len(self.tensorname_to_cache_idx) != '
'len(self.cache_idx_to_tensor_idx')
def sort_tensors_and_ops(graph):
"""Returns a wrapper that has consistent tensor and op orders."""
graph_wrapper = collections.namedtuple('GraphWrapper',
['graph', 'operations', 'op_to_idx',
'tensors', 'tensor_to_idx',
'contains_cycle',
'topological_order_or_cycle'])
contains_cycle, topological_order_or_cycle = topological_sort(graph)
if not contains_cycle:
operations = topological_order_or_cycle
else:
operations = graph.get_operations()
op_to_idx = {op.name: index for index, op
in enumerate(operations)}
tensors = []
for op in operations:
tensors.extend(op.outputs)
tensor_to_idx = {tensor.name: index for index, tensor in
enumerate(tensors)}
return graph_wrapper(graph=graph, operations=operations, op_to_idx=op_to_idx,
tensors=tensors, tensor_to_idx=tensor_to_idx,
contains_cycle=contains_cycle,
topological_order_or_cycle=topological_order_or_cycle)
class OpenReportFile(object):
"""Context manager for writing report file."""
def __init__(self, tt_parameters):
if not tt_parameters.report_file_path:
self._report_file = None
return
try:
self._report_file = gfile.Open(tt_parameters.report_file_path, 'w')
except IOError as e:
raise e
def __enter__(self):
return self._report_file
def __exit__(self, unused_type, unused_value, unused_traceback):
if self._report_file:
self._report_file.close()
class TTReportHandle(object):
"""Utility class responsible from creating a tensor tracer report."""
def __init__(self):
self.instrument_records = {}
self._report_file = None
def instrument(self, name, explanation):
self.instrument_records[name] = explanation
def instrument_op(self, op, explanation):
self.instrument(op.name, explanation)
def instrument_tensor(self, tensor, explanation):
self.instrument(tensor.name, explanation)
def create_report_proto(self, tt_config, tt_parameters, tensor_trace_order,
tensor_trace_points, collected_signature_types):
"""Creates and returns a proto that stores tensor tracer configuration.
Args:
tt_config: TensorTracerConfig object holding information about the run
environment (device, # cores, # hosts), and tensor tracer version
information.
tt_parameters: TTParameters objects storing the user provided parameters
for tensor tracer.
tensor_trace_order: TensorTraceOrder object storing a topological order of
the graph.
tensor_trace_points: Progromatically added trace_points/checkpoints.
collected_signature_types: The signature types collected, e,g, norm,
max, min, mean...
Returns:
TensorTracerReport proto.
"""
report = tensor_tracer_pb2.TensorTracerReport()
report.config.version = tt_config.version
report.config.device = tt_config.device_type
report.config.num_cores = tt_config.num_replicas
report.config.num_hosts = tt_config.num_hosts
report.config.num_cores_per_host = tt_config.num_replicas_per_host
for core in tt_parameters.included_cores:
report.config.included_cores.append(core)
report.config.submode = tt_parameters.submode
report.config.trace_mode = tt_parameters.trace_mode
for signature_name, _ in sorted(collected_signature_types.items(),
key=lambda x: x[1]):
report.config.signatures.append(signature_name)
tf_graph = tensor_trace_order.graph_order.graph
report.graphdef.CopyFrom(tf_graph.as_graph_def())
for tensor in tensor_trace_order.graph_order.tensors:
tensor_def = tensor_tracer_pb2.TensorTracerReport.TracedTensorDef()
tensor_def.name = tensor.name
if tensor.name in tensor_trace_order.tensorname_to_cache_idx:
tensor_def.is_traced = True
tensor_def.cache_index = (
tensor_trace_order.tensorname_to_cache_idx[tensor.name])
else:
tensor_def.is_traced = False
if tensor.name in tensor_trace_points:
tensor_def.trace_point_name = tensor_trace_points[tensor.name]
if tensor.name in self.instrument_records:
tensor_def.explanation = self.instrument_records[tensor.name]
elif tensor.op.name in self.instrument_records:
tensor_def.explanation = self.instrument_records[tensor.op.name]
report.tensordef[tensor.name].CopyFrom(tensor_def)
return report
def write_report_proto(self, report_proto, tt_parameters):
"""Writes the given report proto under trace_dir."""
gfile.MakeDirs(tt_parameters.trace_dir)
report_path = os.path.join(tt_parameters.trace_dir, _TT_REPORT_PROTO)
with gfile.GFile(report_path, 'wb') as f:
f.write(report_proto.SerializeToString())
def create_report(self, tt_config, tt_parameters,
tensor_trace_order, tensor_trace_points):
"""Creates a report file and writes the trace information."""
with OpenReportFile(tt_parameters) as self._report_file:
self._write_config_section(tt_config, tt_parameters)
self._write_op_list_section(tensor_trace_order.graph_order)
self._write_tensor_list_section(tensor_trace_order.graph_order)
self._write_trace_points(tensor_trace_points)
self._write_cache_index_map_section(tensor_trace_order)
self._write_reason_section()
self._write_graph_section(tensor_trace_order.graph_order)
def _write_trace_points(self, tensor_trace_points):
"""Writes the list of checkpoints."""
self._write_report('%s %s\n'%(_MARKER_SECTION_BEGIN,
_SECTION_NAME_TENSOR_TRACER_CHECKPOINT))
for (tensor, checkpoint_name) in tensor_trace_points:
self._write_report('%s %s\n'%(tensor.name, checkpoint_name))
self._write_report('%s %s\n'%(_MARKER_SECTION_END,
_SECTION_NAME_TENSOR_TRACER_CHECKPOINT))
def _write_report(self, content):
"""Writes the given content to the report."""
line = '%s %s'%(_TRACER_LOG_PREFIX, content)
if self._report_file:
self._report_file.write(line)
else:
logging.info(line)
def _write_config_section(self, tt_config, tt_parameters):
"""Writes the config section of the report."""
self._write_report('%s %s\n'%(_MARKER_SECTION_BEGIN, _SECTION_NAME_CONFIG))
self._write_report('%s %s\n'%(_FIELD_NAME_VERSION, tt_config.version))
self._write_report('%s %s\n'%(_FIELD_NAME_DEVICE, tt_config.device_type))
self._write_report('%s %s\n'%(_FIELD_NAME_TRACE_MODE,
tt_parameters.trace_mode))
self._write_report('%s %s\n'%(_FIELD_NAME_SUBMODE,
tt_parameters.submode))
if tt_parameters.included_cores:
self._write_report('%s %s\n'%(_FIELD_NAME_NUM_REPLICAS,
len(tt_parameters.included_cores)))
else:
self._write_report('%s %s\n'%(_FIELD_NAME_NUM_REPLICAS,
tt_config.num_replicas))
self._write_report('%s %s\n'%(_FIELD_NAME_NUM_REPLICAS_PER_HOST,
tt_config.num_replicas_per_host))
self._write_report('%s %s\n'%(_FIELD_NAME_NUM_HOSTS, tt_config.num_hosts))
self._write_report('%s %s\n'%(_MARKER_SECTION_END, _SECTION_NAME_CONFIG))
def _write_reason_section(self):
"""Writes the reason section of the report."""
self._write_report('%s %s\n'%(_MARKER_SECTION_BEGIN, _SECTION_NAME_REASON))
for key in sorted(self.instrument_records):
self._write_report('"%s" %s\n'%(key, self.instrument_records[key]))
self._write_report('%s %s\n'%(_MARKER_SECTION_END, _SECTION_NAME_REASON))
def _write_op_list_section(self, graph_order):
"""Writes the Op-list section of the report."""
self._write_report('%s %s\n'%(_MARKER_SECTION_BEGIN, _SECTION_NAME_OP_LIST))
self._write_report('%s %d\n'%(_FIELD_NAME_NUM_OPS,
len(graph_order.operations)))
for i in range(0, len(graph_order.operations)):
op = graph_order.operations[i]
line = '%d "%s" %s'%(i, op.name, op.type)
for out_tensor in op.outputs:
if out_tensor.name not in graph_order.tensor_to_idx:
raise ValueError(
'out_tensor %s is not in tensor_to_idx'%out_tensor.name)
line += ' %d'%graph_order.tensor_to_idx[out_tensor.name]
line += '\n'
self._write_report(line)
self._write_report('%s %s\n'%(_MARKER_SECTION_END, _SECTION_NAME_OP_LIST))
def _write_tensor_list_section(self, graph_order):
"""Writes the tensor-list section of the report."""
self._write_report('%s %s\n'%(_MARKER_SECTION_BEGIN,
_SECTION_NAME_TENSOR_LIST))
self._write_report('%s %d\n'%(_FIELD_NAME_NUM_TENSORS,
len(graph_order.tensors)))
for i in range(0, len(graph_order.tensors)):
tensor = graph_order.tensors[i]
line = '%d "%s"'%(i, tensor.name)
consumers = tensor.consumers()
consumers.sort(key=lambda op: op.name)
for consumer_op in consumers:
if consumer_op.name not in graph_order.op_to_idx:
raise ValueError(
'consumer_op %s is not in op_to_idx'%consumer_op.name)
line += ' %d'%graph_order.op_to_idx[consumer_op.name]
line += '\n'
self._write_report(line)
self._write_report('%s %s\n'%(_MARKER_SECTION_END,
_SECTION_NAME_TENSOR_LIST))
def _write_cache_index_map_section(self, tensor_trace_order):
"""Writes the mapping from cache index to tensor index to the report."""
self._write_report('%s %s\n'%(_MARKER_SECTION_BEGIN,
_SECTION_NAME_CACHE_INDEX_MAP))
self._write_report('%s %d\n'%(
_FIELD_NAME_NUM_CACHE_INDICES,
len(tensor_trace_order.cache_idx_to_tensor_idx)))
for cache_idx in range(0, len(tensor_trace_order.cache_idx_to_tensor_idx)):
tensor_idx = tensor_trace_order.cache_idx_to_tensor_idx[cache_idx]
line = '%d %d\n'%(cache_idx, tensor_idx)
self._write_report(line)
self._write_report('%s %s\n'%(_MARKER_SECTION_END,
_SECTION_NAME_CACHE_INDEX_MAP))
def _write_graph_section(self, graph_order):
"""Writes the graph section of the report."""
self._write_report('%s %s\n'%(_MARKER_SECTION_BEGIN, _SECTION_NAME_GRAPH))
self._write_report('%s %s\n'%(_FIELD_NAME_TOPOLOGICAL_SORT_SUCCEED,
not graph_order.contains_cycle))
l = list(graph_order.topological_order_or_cycle)
for i in range(0, len(l)):
self._write_report('%d "%s"\n'%(i, l[i].name))
self._write_report('%s %s\n'%(_MARKER_SECTION_END, _SECTION_NAME_GRAPH))
| apache-2.0 |
pbs/django-filer | filer/utils/generate_filename.py | 1 | 1841 | import datetime
import os
import filer
from filer.utils.files import get_valid_filename
from django.core.files.uploadedfile import UploadedFile
from django.utils.encoding import smart_str
def by_date(instance, filename):
datepart = str(datetime.datetime.now().strftime(smart_str("%Y/%m/%d")))
return os.path.join(datepart, get_valid_filename(filename))
class prefixed_factory(object):
def __init__(self, upload_to, prefix):
self.upload_to = upload_to
self.prefix = prefix
def __call__(self, instance, filename):
if callable(self.upload_to):
upload_to_str = self.upload_to(instance, filename)
else:
upload_to_str = self.upload_to
if not self.prefix:
return upload_to_str
return os.path.join(self.prefix, upload_to_str)
def _is_in_memory(file_):
return isinstance(file_, UploadedFile)
def _construct_logical_folder_path(filer_file):
return os.path.join(*(folder.name for folder in filer_file.logical_path))
def _goes_to_clipboard(instance):
return instance.folder is None or (
instance.pk is None and _is_in_memory(instance.file.file))
def by_path(instance, filename):
if _goes_to_clipboard(instance):
from filer.models import Clipboard
return os.path.join(
Clipboard.folder_name,
instance.owner.username if instance.owner else '_missing_owner',
filename)
else:
return os.path.join(
_construct_logical_folder_path(instance),
instance.actual_name)
def get_trash_path(instance):
path = [filer.settings.FILER_TRASH_PREFIX]
# enforce uniqueness by using file's pk
path.append("%s" % instance.pk)
# add folder path
path.append(instance.pretty_logical_path.strip('/'))
return os.path.join(*path)
| bsd-3-clause |
amith01994/intellij-community | python/helpers/pydev/pydev_monkey.py | 32 | 16142 | import os
import sys
import pydev_log
import traceback
pydev_src_dir = os.path.dirname(__file__)
from pydevd_constants import xrange
def is_python(path):
if path.endswith("'") or path.endswith('"'):
path = path[1:len(path)-1]
filename = os.path.basename(path).lower()
for name in ['python', 'jython', 'pypy']:
if filename.find(name) != -1:
return True
return False
def patch_args(args):
try:
pydev_log.debug("Patching args: %s"% str(args))
import sys
new_args = []
i = 0
if len(args) == 0:
return args
if is_python(args[0]):
try:
indC = args.index('-c')
except ValueError:
indC = -1
if indC != -1:
import pydevd
host, port = pydevd.dispatch()
if port is not None:
new_args.extend(args)
new_args[indC + 1] = ("import sys; sys.path.append(r'%s'); import pydevd; "
"pydevd.settrace(host='%s', port=%s, suspend=False, trace_only_current_thread=False, patch_multiprocessing=True); %s") % (
pydev_src_dir, host, port, args[indC + 1])
return new_args
else:
new_args.append(args[0])
else:
pydev_log.debug("Process is not python, returning.")
return args
i = 1
while i < len(args):
if args[i].startswith('-'):
new_args.append(args[i])
else:
break
i += 1
if args[i].endswith('pydevd.py'): #no need to add pydevd twice
return args
for x in sys.original_argv:
if sys.platform == "win32" and not x.endswith('"'):
arg = '"%s"' % x
else:
arg = x
new_args.append(arg)
if x == '--file':
break
while i < len(args):
new_args.append(args[i])
i += 1
return new_args
except:
traceback.print_exc()
return args
def args_to_str(args):
quoted_args = []
for x in args:
if x.startswith('"') and x.endswith('"'):
quoted_args.append(x)
else:
x = x.replace('"', '\\"')
quoted_args.append('"%s"' % x)
return ' '.join(quoted_args)
def str_to_args_windows(args):
# see http:#msdn.microsoft.com/en-us/library/a1y7w461.aspx
result = []
DEFAULT = 0
ARG = 1
IN_DOUBLE_QUOTE = 2
state = DEFAULT
backslashes = 0
buf = ''
args_len = len(args)
for i in xrange(args_len):
ch = args[i]
if (ch == '\\'):
backslashes+=1
continue
elif (backslashes != 0):
if ch == '"':
while backslashes >= 2:
backslashes -= 2
buf += '\\'
if (backslashes == 1):
if (state == DEFAULT):
state = ARG
buf += '"'
backslashes = 0
continue
# else fall through to switch
else:
# false alarm, treat passed backslashes literally...
if (state == DEFAULT):
state = ARG
while backslashes > 0:
backslashes-=1
buf += '\\'
# fall through to switch
if ch in (' ', '\t'):
if (state == DEFAULT):
# skip
continue
elif (state == ARG):
state = DEFAULT
result.append(buf)
buf = ''
continue
if state in (DEFAULT, ARG):
if ch == '"':
state = IN_DOUBLE_QUOTE
else:
state = ARG
buf += ch
elif state == IN_DOUBLE_QUOTE:
if ch == '"':
if (i + 1 < args_len and args[i + 1] == '"'):
# Undocumented feature in Windows:
# Two consecutive double quotes inside a double-quoted argument are interpreted as
# a single double quote.
buf += '"'
i+=1
elif len(buf) == 0:
# empty string on Windows platform. Account for bug in constructor of JDK's java.lang.ProcessImpl.
result.append("\"\"")
state = DEFAULT
else:
state = ARG
else:
buf += ch
else:
raise RuntimeError('Illegal condition')
if len(buf) > 0 or state != DEFAULT:
result.append(buf)
return result
def patch_arg_str_win(arg_str):
args = str_to_args_windows(arg_str)
if not is_python(args[0]):
return arg_str
arg_str = args_to_str(patch_args(args))
pydev_log.debug("New args: %s" % arg_str)
return arg_str
def monkey_patch_module(module, funcname, create_func):
if hasattr(module, funcname):
original_name = 'original_' + funcname
if not hasattr(module, original_name):
setattr(module, original_name, getattr(module, funcname))
setattr(module, funcname, create_func(original_name))
def monkey_patch_os(funcname, create_func):
monkey_patch_module(os, funcname, create_func)
def warn_multiproc():
import pydev_log
pydev_log.error_once(
"pydev debugger: New process is launching (breakpoints won't work in the new process).\n"
"pydev debugger: To debug that process please enable 'Attach to subprocess automatically while debugging?' option in the debugger settings.\n")
def create_warn_multiproc(original_name):
def new_warn_multiproc(*args):
import os
warn_multiproc()
return getattr(os, original_name)(*args)
return new_warn_multiproc
def create_execl(original_name):
def new_execl(path, *args):
'''
os.execl(path, arg0, arg1, ...)
os.execle(path, arg0, arg1, ..., env)
os.execlp(file, arg0, arg1, ...)
os.execlpe(file, arg0, arg1, ..., env)
'''
import os
args = patch_args(args)
return getattr(os, original_name)(path, *args)
return new_execl
def create_execv(original_name):
def new_execv(path, args):
'''
os.execv(path, args)
os.execvp(file, args)
'''
import os
return getattr(os, original_name)(path, patch_args(args))
return new_execv
def create_execve(original_name):
"""
os.execve(path, args, env)
os.execvpe(file, args, env)
"""
def new_execve(path, args, env):
import os
return getattr(os, original_name)(path, patch_args(args), env)
return new_execve
def create_spawnl(original_name):
def new_spawnl(mode, path, *args):
'''
os.spawnl(mode, path, arg0, arg1, ...)
os.spawnlp(mode, file, arg0, arg1, ...)
'''
import os
args = patch_args(args)
return getattr(os, original_name)(mode, path, *args)
return new_spawnl
def create_spawnv(original_name):
def new_spawnv(mode, path, args):
'''
os.spawnv(mode, path, args)
os.spawnvp(mode, file, args)
'''
import os
return getattr(os, original_name)(mode, path, patch_args(args))
return new_spawnv
def create_spawnve(original_name):
"""
os.spawnve(mode, path, args, env)
os.spawnvpe(mode, file, args, env)
"""
def new_spawnve(mode, path, args, env):
import os
return getattr(os, original_name)(mode, path, patch_args(args), env)
return new_spawnve
def create_fork_exec(original_name):
"""
_posixsubprocess.fork_exec(args, executable_list, close_fds, ... (13 more))
"""
def new_fork_exec(args, *other_args):
import _posixsubprocess
args = patch_args(args)
return getattr(_posixsubprocess, original_name)(args, *other_args)
return new_fork_exec
def create_CreateProcess(original_name):
"""
CreateProcess(*args, **kwargs)
"""
def new_CreateProcess(appName, commandLine, *args):
try:
import _subprocess
except ImportError:
import _winapi as _subprocess
return getattr(_subprocess, original_name)(appName, patch_arg_str_win(commandLine), *args)
return new_CreateProcess
def create_CreateProcessWarnMultiproc(original_name):
"""
CreateProcess(*args, **kwargs)
"""
def new_CreateProcess(*args):
try:
import _subprocess
except ImportError:
import _winapi as _subprocess
warn_multiproc()
return getattr(_subprocess, original_name)(*args)
return new_CreateProcess
def create_fork(original_name):
def new_fork():
import os
child_process = getattr(os, original_name)() # fork
if not child_process:
import pydevd
pydevd.threadingCurrentThread().__pydevd_main_thread = True
pydevd.settrace_forked()
return child_process
return new_fork
def patch_new_process_functions():
#os.execl(path, arg0, arg1, ...)
#os.execle(path, arg0, arg1, ..., env)
#os.execlp(file, arg0, arg1, ...)
#os.execlpe(file, arg0, arg1, ..., env)
#os.execv(path, args)
#os.execve(path, args, env)
#os.execvp(file, args)
#os.execvpe(file, args, env)
monkey_patch_os('execl', create_execl)
monkey_patch_os('execle', create_execl)
monkey_patch_os('execlp', create_execl)
monkey_patch_os('execlpe', create_execl)
monkey_patch_os('execv', create_execv)
monkey_patch_os('execve', create_execve)
monkey_patch_os('execvp', create_execv)
monkey_patch_os('execvpe', create_execve)
#os.spawnl(mode, path, ...)
#os.spawnle(mode, path, ..., env)
#os.spawnlp(mode, file, ...)
#os.spawnlpe(mode, file, ..., env)
#os.spawnv(mode, path, args)
#os.spawnve(mode, path, args, env)
#os.spawnvp(mode, file, args)
#os.spawnvpe(mode, file, args, env)
monkey_patch_os('spawnl', create_spawnl)
monkey_patch_os('spawnle', create_spawnl)
monkey_patch_os('spawnlp', create_spawnl)
monkey_patch_os('spawnlpe', create_spawnl)
monkey_patch_os('spawnv', create_spawnv)
monkey_patch_os('spawnve', create_spawnve)
monkey_patch_os('spawnvp', create_spawnv)
monkey_patch_os('spawnvpe', create_spawnve)
if sys.platform != 'win32':
monkey_patch_os('fork', create_fork)
try:
import _posixsubprocess
monkey_patch_module(_posixsubprocess, 'fork_exec', create_fork_exec)
except ImportError:
pass
else:
#Windows
try:
import _subprocess
except ImportError:
import _winapi as _subprocess
monkey_patch_module(_subprocess, 'CreateProcess', create_CreateProcess)
def patch_new_process_functions_with_warning():
monkey_patch_os('execl', create_warn_multiproc)
monkey_patch_os('execle', create_warn_multiproc)
monkey_patch_os('execlp', create_warn_multiproc)
monkey_patch_os('execlpe', create_warn_multiproc)
monkey_patch_os('execv', create_warn_multiproc)
monkey_patch_os('execve', create_warn_multiproc)
monkey_patch_os('execvp', create_warn_multiproc)
monkey_patch_os('execvpe', create_warn_multiproc)
monkey_patch_os('spawnl', create_warn_multiproc)
monkey_patch_os('spawnle', create_warn_multiproc)
monkey_patch_os('spawnlp', create_warn_multiproc)
monkey_patch_os('spawnlpe', create_warn_multiproc)
monkey_patch_os('spawnv', create_warn_multiproc)
monkey_patch_os('spawnve', create_warn_multiproc)
monkey_patch_os('spawnvp', create_warn_multiproc)
monkey_patch_os('spawnvpe', create_warn_multiproc)
if sys.platform != 'win32':
monkey_patch_os('fork', create_warn_multiproc)
try:
import _posixsubprocess
monkey_patch_module(_posixsubprocess, 'fork_exec', create_warn_multiproc)
except ImportError:
pass
else:
#Windows
try:
import _subprocess
except ImportError:
import _winapi as _subprocess
monkey_patch_module(_subprocess, 'CreateProcess', create_CreateProcessWarnMultiproc)
class _NewThreadStartupWithTrace:
def __init__(self, original_func, args, kwargs):
self.original_func = original_func
self.args = args
self.kwargs = kwargs
def __call__(self):
from pydevd_comm import GetGlobalDebugger
global_debugger = GetGlobalDebugger()
if global_debugger is not None:
global_debugger.SetTrace(global_debugger.trace_dispatch)
return self.original_func(*self.args, **self.kwargs)
class _NewThreadStartupWithoutTrace:
def __init__(self, original_func, args, kwargs):
self.original_func = original_func
self.args = args
self.kwargs = kwargs
def __call__(self):
return self.original_func(*self.args, **self.kwargs)
_UseNewThreadStartup = _NewThreadStartupWithTrace
def _get_threading_modules_to_patch():
threading_modules_to_patch = []
try:
import thread as _thread
threading_modules_to_patch.append(_thread)
except:
import _thread
threading_modules_to_patch.append(_thread)
return threading_modules_to_patch
threading_modules_to_patch = _get_threading_modules_to_patch()
def patch_thread_module(thread):
if getattr(thread, '_original_start_new_thread', None) is None:
_original_start_new_thread = thread._original_start_new_thread = thread.start_new_thread
else:
_original_start_new_thread = thread._original_start_new_thread
class ClassWithPydevStartNewThread:
def pydev_start_new_thread(self, function, args=(), kwargs={}):
'''
We need to replace the original thread.start_new_thread with this function so that threads started
through it and not through the threading module are properly traced.
'''
return _original_start_new_thread(_UseNewThreadStartup(function, args, kwargs), ())
# This is a hack for the situation where the thread.start_new_thread is declared inside a class, such as the one below
# class F(object):
# start_new_thread = thread.start_new_thread
#
# def start_it(self):
# self.start_new_thread(self.function, args, kwargs)
# So, if it's an already bound method, calling self.start_new_thread won't really receive a different 'self' -- it
# does work in the default case because in builtins self isn't passed either.
pydev_start_new_thread = ClassWithPydevStartNewThread().pydev_start_new_thread
try:
# We need to replace the original thread.start_new_thread with this function so that threads started through
# it and not through the threading module are properly traced.
thread.start_new_thread = pydev_start_new_thread
thread.start_new = pydev_start_new_thread
except:
pass
def patch_thread_modules():
for t in threading_modules_to_patch:
patch_thread_module(t)
def undo_patch_thread_modules():
for t in threading_modules_to_patch:
try:
t.start_new_thread = t._original_start_new_thread
except:
pass
try:
t.start_new = t._original_start_new_thread
except:
pass
def disable_trace_thread_modules():
'''
Can be used to temporarily stop tracing threads created with thread.start_new_thread.
'''
global _UseNewThreadStartup
_UseNewThreadStartup = _NewThreadStartupWithoutTrace
def enable_trace_thread_modules():
'''
Can be used to start tracing threads created with thread.start_new_thread again.
'''
global _UseNewThreadStartup
_UseNewThreadStartup = _NewThreadStartupWithTrace
def get_original_start_new_thread(threading_module):
try:
return threading_module._original_start_new_thread
except:
return threading_module.start_new_thread
| apache-2.0 |
caedesvvv/zmqproto | zmqproto/zrenode.py | 5 | 1512 | from twisted.internet import reactor
from twisted.internet.protocol import DatagramProtocol
from twisted.internet.task import LoopingCall
from zmqproto.zre import ZreProtocol
from socket import SOL_SOCKET, SO_BROADCAST
class ZreNode(DatagramProtocol):
def __init__(self, ipaddr, peer_cb=None):
self.proto = ZreProtocol(ipaddr)
self.peers = {}
self.peer_cb = peer_cb
reactor.listenMulticast(5670, self, listenMultiple=True)
def startProtocol(self):
# Set the TTL>1 so multicast will cross router hops:
self.transport.setTTL(5)
# Join a specific multicast group:
#self.transport.joinGroup("224.0.0.1")
self.loopObj = LoopingCall(self.sendHeartBeat)
self.loopObj.start(10, now=True)
def sendHeartBeat(self):
self.transport.socket.setsockopt(SOL_SOCKET, SO_BROADCAST, 1)
self.transport.write(self.proto.buildBeacon(), ('255.255.255.255', 5670))
def datagramReceived(self, data, (host, port)):
uuid, _port = self.proto.parseBeacon(data)
if not uuid == self.proto.uuid:
if not uuid in self.peers:
self.peers[uuid] = [_port, host, port]
print "[%s] beacon id %s port %s [%s:%d]" % (self.proto.uuid, uuid, _port, host, port)
if self.peer_cb:
self.peer_cb(uuid)
if __name__ == '__main__':
node1 = ZreNode('127.0.0.1')
node2 = ZreNode('127.0.0.2')
node3 = ZreNode('127.0.0.3')
reactor.run()
| agpl-3.0 |
PennyDreadfulMTG/Penny-Dreadful-Tools | decksite/charts/chart.py | 1 | 2940 | import os.path
import pathlib
from typing import Dict
import matplotlib as mpl
# This has to happen before pyplot is imported to avoid needing an X server to draw the graphs.
# pylint: disable=wrong-import-position
mpl.use('Agg')
import matplotlib.pyplot as plt
import seaborn as sns
from decksite.data import deck
from shared import configuration, logger
from shared.pd_exception import DoesNotExistException, OperationalException
def cmc(deck_id: int, attempts: int = 0) -> str:
if attempts > 3:
msg = 'Unable to generate cmc chart for {id} in 3 attempts.'.format(id=deck_id)
logger.error(msg)
raise OperationalException(msg)
path = determine_path(str(deck_id) + '-cmc.png')
if acceptable_file(path):
return path
d = deck.load_deck(deck_id)
costs: Dict[str, int] = {}
for ci in d.maindeck:
c = ci.card
if c.is_land():
continue
if c.mana_cost is None:
cost = '0'
elif next((s for s in c.mana_cost if '{X}' in s), None) is not None:
cost = 'X'
else:
converted = int(float(c.cmc))
cost = '7+' if converted >= 7 else str(converted)
costs[cost] = ci.get('n') + costs.get(cost, 0)
path = image(path, costs)
if acceptable_file(path):
return path
return cmc(deck_id, attempts + 1)
def image(path: str, costs: Dict[str, int]) -> str:
ys = ['0', '1', '2', '3', '4', '5', '6', '7+', 'X']
xs = [costs.get(k, 0) for k in ys]
sns.set_style('white')
sns.set(font='Concourse C3', font_scale=3)
g = sns.barplot(x=ys, y=xs, palette=['#cccccc'] * len(ys)) # pylint: disable=no-member
g.axes.yaxis.set_ticklabels([])
rects = g.patches
sns.set(font='Concourse C3', font_scale=2)
for rect, label in zip(rects, xs):
if label == 0:
continue
height = rect.get_height()
g.text(rect.get_x() + rect.get_width() / 2, height + 0.5, label, ha='center', va='bottom')
g.margins(y=0, x=0)
sns.despine(left=True, bottom=True)
g.get_figure().savefig(path, transparent=True, pad_inches=0, bbox_inches='tight')
plt.clf() # Clear all data from matplotlib so it does not persist across requests.
return path
def determine_path(name: str) -> str:
charts_dir = configuration.get_str('charts_dir')
pathlib.Path(charts_dir).mkdir(parents=True, exist_ok=True)
if not os.path.exists(charts_dir):
raise DoesNotExistException('Cannot store graph images because {charts_dir} does not exist.'.format(charts_dir=charts_dir))
return os.path.join(charts_dir, name)
def acceptable_file(path: str) -> bool:
if not os.path.exists(path):
return False
if os.path.getsize(path) >= 6860: # This is a few bytes smaller than a completely empty graph on prod.
return True
logger.warning('Chart at {path} is suspiciously small.'.format(path=path))
return False
| gpl-3.0 |
toabctl/osc | osc/util/rpmquery.py | 8 | 11796 |
from __future__ import print_function
import os
import re
import struct
from . import packagequery
class RpmError(packagequery.PackageError):
pass
class RpmHeaderError(RpmError):
pass
class RpmHeader:
"""corresponds more or less to the indexEntry_s struct"""
def __init__(self, offset, length):
self.offset = offset
# length of the data section (without length of indexEntries)
self.length = length
self.entries = []
def append(self, entry):
self.entries.append(entry)
def gettag(self, tag):
for i in self.entries:
if i.tag == tag:
return i
return None
def __iter__(self):
for i in self.entries:
yield i
def __len__(self):
return len(self.entries)
class RpmHeaderEntry:
"""corresponds to the entryInfo_s struct (except the data attribute)"""
# each element represents an int
ENTRY_SIZE = 16
def __init__(self, tag, type, offset, count):
self.tag = tag
self.type = type
self.offset = offset
self.count = count
self.data = None
class RpmQuery(packagequery.PackageQuery, packagequery.PackageQueryResult):
LEAD_SIZE = 96
LEAD_MAGIC = 0xedabeedb
HEADER_MAGIC = 0x8eade801
HEADERSIG_TYPE = 5
LESS = 1 << 1
GREATER = 1 << 2
EQUAL = 1 << 3
default_tags = (1000, 1001, 1002, 1003, 1004, 1022, 1005, 1020,
1047, 1112, 1113, # provides
1049, 1048, 1050, # requires
1054, 1053, 1055, # conflicts
1090, 1114, 1115 # obsoletes
)
def __init__(self, fh):
self.__file = fh
self.__path = os.path.abspath(fh.name)
self.filename_suffix = 'rpm'
self.header = None
def read(self, all_tags=False, self_provides=True, *extra_tags, **extra_kw):
# self_provides is unused because a rpm always has a self provides
self.__read_lead()
data = self.__file.read(RpmHeaderEntry.ENTRY_SIZE)
hdrmgc, reserved, il, dl = struct.unpack('!I3i', data)
if self.HEADER_MAGIC != hdrmgc:
raise RpmHeaderError(self.__path, 'invalid headermagic \'%s\'' % hdrmgc)
# skip signature header for now
size = il * RpmHeaderEntry.ENTRY_SIZE + dl
# data is 8 byte aligned
pad = (size + 7) & ~7
querysig = extra_kw.get('querysig')
if not querysig:
self.__file.read(pad)
data = self.__file.read(RpmHeaderEntry.ENTRY_SIZE)
hdrmgc, reserved, il, dl = struct.unpack('!I3i', data)
self.header = RpmHeader(pad, dl)
if self.HEADER_MAGIC != hdrmgc:
raise RpmHeaderError(self.__path, 'invalid headermagic \'%s\'' % hdrmgc)
data = self.__file.read(il * RpmHeaderEntry.ENTRY_SIZE)
while len(data) > 0:
ei = struct.unpack('!4i', data[:RpmHeaderEntry.ENTRY_SIZE])
self.header.append(RpmHeaderEntry(*ei))
data = data[RpmHeaderEntry.ENTRY_SIZE:]
data = self.__file.read(self.header.length)
for i in self.header:
if i.tag in self.default_tags + extra_tags or all_tags:
try: # this may fail for -debug* packages
self.__read_data(i, data)
except: pass
return self
def __read_lead(self):
data = self.__file.read(self.LEAD_SIZE)
leadmgc, = struct.unpack('!I', data[:4])
if leadmgc != self.LEAD_MAGIC:
raise RpmError(self.__path, 'invalid lead magic \'%s\'' % leadmgc)
sigtype, = struct.unpack('!h', data[78:80])
if sigtype != self.HEADERSIG_TYPE:
raise RpmError(self.__path, 'invalid header signature \'%s\'' % sigtype)
def __read_data(self, entry, data):
off = entry.offset
if entry.type == 2:
entry.data = struct.unpack('!%dc' % entry.count, data[off:off + 1 * entry.count])
if entry.type == 3:
entry.data = struct.unpack('!%dh' % entry.count, data[off:off + 2 * entry.count])
elif entry.type == 4:
entry.data = struct.unpack('!%di' % entry.count, data[off:off + 4 * entry.count])
elif entry.type == 6:
entry.data = unpack_string(data[off:])
elif entry.type == 7:
entry.data = data[off:off + entry.count]
elif entry.type == 8 or entry.type == 9:
cnt = entry.count
entry.data = []
while cnt > 0:
cnt -= 1
s = unpack_string(data[off:])
# also skip '\0'
off += len(s) + 1
entry.data.append(s)
if entry.type == 8:
return
lang = os.getenv('LANGUAGE') or os.getenv('LC_ALL') \
or os.getenv('LC_MESSAGES') or os.getenv('LANG')
if lang is None:
entry.data = entry.data[0]
return
# get private i18n table
table = self.header.gettag(100)
# just care about the country code
lang = lang.split('_', 1)[0]
cnt = 0
for i in table.data:
if cnt > len(entry.data) - 1:
break
if i == lang:
entry.data = entry.data[cnt]
return
cnt += 1
entry.data = entry.data[0]
else:
raise RpmHeaderError(self.__path, 'unsupported tag type \'%d\' (tag: \'%s\'' % (entry.type, entry.tag))
def __reqprov(self, tag, flags, version):
pnames = self.header.gettag(tag)
if not pnames:
return []
pnames = pnames.data
pflags = self.header.gettag(flags).data
pvers = self.header.gettag(version).data
if not (pnames and pflags and pvers):
raise RpmError(self.__path, 'cannot get provides/requires, tags are missing')
res = []
for name, flags, ver in zip(pnames, pflags, pvers):
# RPMSENSE_SENSEMASK = 15 (see rpmlib.h) but ignore RPMSENSE_SERIAL (= 1 << 0) therefore use 14
if flags & 14:
name += ' '
if flags & self.GREATER:
name += '>'
elif flags & self.LESS:
name += '<'
if flags & self.EQUAL:
name += '='
name += ' %s' % ver
res.append(name)
return res
def vercmp(self, rpmq):
res = RpmQuery.rpmvercmp(str(self.epoch()), str(rpmq.epoch()))
if res != 0:
return res
res = RpmQuery.rpmvercmp(self.version(), rpmq.version())
if res != 0:
return res
res = RpmQuery.rpmvercmp(self.release(), rpmq.release())
return res
# XXX: create dict for the tag => number mapping?!
def name(self):
return self.header.gettag(1000).data
def version(self):
return self.header.gettag(1001).data
def release(self):
return self.header.gettag(1002).data
def epoch(self):
epoch = self.header.gettag(1003)
if epoch is None:
return 0
return epoch.data[0]
def arch(self):
return self.header.gettag(1022).data
def summary(self):
return self.header.gettag(1004).data
def description(self):
return self.header.gettag(1005).data
def url(self):
entry = self.header.gettag(1020)
if entry is None:
return None
return entry.data
def path(self):
return self.__path
def provides(self):
return self.__reqprov(1047, 1112, 1113)
def requires(self):
return self.__reqprov(1049, 1048, 1050)
def conflicts(self):
return self.__reqprov(1054, 1053, 1055)
def obsoletes(self):
return self.__reqprov(1090, 1114, 1115)
def is_src(self):
# SOURCERPM = 1044
return self.gettag(1044) is None
def is_nosrc(self):
# NOSOURCE = 1051, NOPATCH = 1052
return self.is_src() and \
(self.gettag(1051) is not None or self.gettag(1052) is not None)
def gettag(self, num):
return self.header.gettag(num)
def canonname(self):
if self.is_nosrc():
arch = 'nosrc'
elif self.is_src():
arch = 'src'
else:
arch = self.arch()
return RpmQuery.filename(self.name(), None, self.version(), self.release(), arch)
@staticmethod
def query(filename):
f = open(filename, 'rb')
rpmq = RpmQuery(f)
rpmq.read()
f.close()
return rpmq
@staticmethod
def queryhdrmd5(filename):
f = open(filename, 'rb')
rpmq = RpmQuery(f)
rpmq.read(1004, querysig=True)
f.close()
entry = rpmq.gettag(1004)
if entry is None:
return None
return ''.join([ "%02x" % x for x in struct.unpack('16B', entry.data) ])
@staticmethod
def rpmvercmp(ver1, ver2):
"""
implementation of RPM's version comparison algorithm
(as described in lib/rpmvercmp.c)
"""
if ver1 == ver2:
return 0
res = 0
while res == 0:
# remove all leading non alphanumeric or tilde chars
ver1 = re.sub('^[^a-zA-Z0-9~]*', '', ver1)
ver2 = re.sub('^[^a-zA-Z0-9~]*', '', ver2)
if ver1.startswith('~') or ver2.startswith('~'):
if not ver1.startswith('~'):
return 1
elif not ver2.startswith('~'):
return -1
ver1 = ver1[1:]
ver2 = ver2[1:]
continue
if not (len(ver1) and len(ver2)):
break
# check if we have a digits segment
mo1 = re.match('(\d+)', ver1)
mo2 = re.match('(\d+)', ver2)
numeric = True
if mo1 is None:
mo1 = re.match('([a-zA-Z]+)', ver1)
mo2 = re.match('([a-zA-Z]+)', ver2)
numeric = False
# check for different types: alpha and numeric
if mo2 is None:
if numeric:
return 1
return -1
seg1 = mo1.group(0)
ver1 = ver1[mo1.end(0):]
seg2 = mo2.group(1)
ver2 = ver2[mo2.end(1):]
if numeric:
# remove leading zeros
seg1 = re.sub('^0+', '', seg1)
seg2 = re.sub('^0+', '', seg2)
# longer digit segment wins - if both have the same length
# a simple ascii compare decides
res = len(seg1) - len(seg2) or cmp(seg1, seg2)
else:
res = cmp(seg1, seg2)
if res > 0:
return 1
elif res < 0:
return -1
return cmp(ver1, ver2)
@staticmethod
def filename(name, epoch, version, release, arch):
return '%s-%s-%s.%s.rpm' % (name, version, release, arch)
def unpack_string(data):
"""unpack a '\\0' terminated string from data"""
val = ''
for c in data:
c, = struct.unpack('!c', c)
if c == '\0':
break
else:
val += c
return val
if __name__ == '__main__':
import sys
try:
rpmq = RpmQuery.query(sys.argv[1])
except RpmError as e:
print(e.msg)
sys.exit(2)
print(rpmq.name(), rpmq.version(), rpmq.release(), rpmq.arch(), rpmq.url())
print(rpmq.summary())
print(rpmq.description())
print('##########')
print('\n'.join(rpmq.provides()))
print('##########')
print('\n'.join(rpmq.requires()))
print('##########')
print(RpmQuery.queryhdrmd5(sys.argv[1]))
| gpl-2.0 |
ganeshnalawade/ansible-modules-core | cloud/amazon/rds.py | 9 | 44390 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: rds
version_added: "1.3"
short_description: create, delete, or modify an Amazon rds instance
description:
- Creates, deletes, or modifies rds instances. When creating an instance it can be either a new instance or a read-only replica of an existing instance. This module has a dependency on python-boto >= 2.5. The 'promote' command requires boto >= 2.18.0. Certain features such as tags rely on boto.rds2 (boto >= 2.26.0)
options:
command:
description:
- Specifies the action to take. The 'reboot' option is available starting at version 2.0
required: true
choices: [ 'create', 'replicate', 'delete', 'facts', 'modify' , 'promote', 'snapshot', 'reboot', 'restore' ]
instance_name:
description:
- Database instance identifier. Required except when using command=facts or command=delete on just a snapshot
required: false
default: null
source_instance:
description:
- Name of the database to replicate. Used only when command=replicate.
required: false
default: null
db_engine:
description:
- The type of database. Used only when command=create.
- mariadb was added in version 2.2
required: false
default: null
choices: [ 'mariadb', 'MySQL', 'oracle-se1', 'oracle-se', 'oracle-ee', 'sqlserver-ee', 'sqlserver-se', 'sqlserver-ex', 'sqlserver-web', 'postgres', 'aurora']
size:
description:
- Size in gigabytes of the initial storage for the DB instance. Used only when command=create or command=modify.
required: false
default: null
instance_type:
description:
- The instance type of the database. Must be specified when command=create. Optional when command=replicate, command=modify or command=restore. If not specified then the replica inherits the same instance type as the source instance.
required: false
default: null
username:
description:
- Master database username. Used only when command=create.
required: false
default: null
password:
description:
- Password for the master database username. Used only when command=create or command=modify.
required: false
default: null
region:
description:
- The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used.
required: true
aliases: [ 'aws_region', 'ec2_region' ]
db_name:
description:
- Name of a database to create within the instance. If not specified then no database is created. Used only when command=create.
required: false
default: null
engine_version:
description:
- Version number of the database engine to use. Used only when command=create. If not specified then the current Amazon RDS default engine version is used.
required: false
default: null
parameter_group:
description:
- Name of the DB parameter group to associate with this instance. If omitted then the RDS default DBParameterGroup will be used. Used only when command=create or command=modify.
required: false
default: null
license_model:
description:
- The license model for this DB instance. Used only when command=create or command=restore.
required: false
default: null
choices: [ 'license-included', 'bring-your-own-license', 'general-public-license', 'postgresql-license' ]
multi_zone:
description:
- Specifies if this is a Multi-availability-zone deployment. Can not be used in conjunction with zone parameter. Used only when command=create or command=modify.
choices: [ "yes", "no" ]
required: false
default: null
iops:
description:
- Specifies the number of IOPS for the instance. Used only when command=create or command=modify. Must be an integer greater than 1000.
required: false
default: null
security_groups:
description:
- Comma separated list of one or more security groups. Used only when command=create or command=modify.
required: false
default: null
vpc_security_groups:
description:
- Comma separated list of one or more vpc security group ids. Also requires `subnet` to be specified. Used only when command=create or command=modify.
required: false
default: null
port:
description:
- Port number that the DB instance uses for connections. Used only when command=create or command=replicate.
- Prior to 2.0 it always defaults to null and the API would use 3306, it had to be set to other DB default values when not using MySql.
Starting at 2.0 it automatically defaults to what is expected for each c(db_engine).
required: false
default: 3306 for mysql, 1521 for Oracle, 1433 for SQL Server, 5432 for PostgreSQL.
upgrade:
description:
- Indicates that minor version upgrades should be applied automatically. Used only when command=create or command=replicate.
required: false
default: no
choices: [ "yes", "no" ]
option_group:
description:
- The name of the option group to use. If not specified then the default option group is used. Used only when command=create.
required: false
default: null
maint_window:
description:
- "Maintenance window in format of ddd:hh24:mi-ddd:hh24:mi. (Example: Mon:22:00-Mon:23:15) If not specified then a random maintenance window is assigned. Used only when command=create or command=modify."
required: false
default: null
backup_window:
description:
- Backup window in format of hh24:mi-hh24:mi. If not specified then a random backup window is assigned. Used only when command=create or command=modify.
required: false
default: null
backup_retention:
description:
- "Number of days backups are retained. Set to 0 to disable backups. Default is 1 day. Valid range: 0-35. Used only when command=create or command=modify."
required: false
default: null
zone:
description:
- availability zone in which to launch the instance. Used only when command=create, command=replicate or command=restore.
required: false
default: null
aliases: ['aws_zone', 'ec2_zone']
subnet:
description:
- VPC subnet group. If specified then a VPC instance is created. Used only when command=create.
required: false
default: null
snapshot:
description:
- Name of snapshot to take. When command=delete, if no snapshot name is provided then no snapshot is taken. If used with command=delete with no instance_name, the snapshot is deleted. Used with command=facts, command=delete or command=snapshot.
required: false
default: null
aws_secret_key:
description:
- AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used.
required: false
aliases: [ 'ec2_secret_key', 'secret_key' ]
aws_access_key:
description:
- AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used.
required: false
default: null
aliases: [ 'ec2_access_key', 'access_key' ]
wait:
description:
- When command=create, replicate, modify or restore then wait for the database to enter the 'available' state. When command=delete wait for the database to be terminated.
required: false
default: "no"
choices: [ "yes", "no" ]
wait_timeout:
description:
- how long before wait gives up, in seconds
default: 300
apply_immediately:
description:
- Used only when command=modify. If enabled, the modifications will be applied as soon as possible rather than waiting for the next preferred maintenance window.
default: no
choices: [ "yes", "no" ]
force_failover:
description:
- Used only when command=reboot. If enabled, the reboot is done using a MultiAZ failover.
required: false
default: "no"
choices: [ "yes", "no" ]
version_added: "2.0"
new_instance_name:
description:
- Name to rename an instance to. Used only when command=modify.
required: false
default: null
version_added: "1.5"
character_set_name:
description:
- Associate the DB instance with a specified character set. Used with command=create.
required: false
default: null
version_added: "1.9"
publicly_accessible:
description:
- explicitly set whether the resource should be publicly accessible or not. Used with command=create, command=replicate. Requires boto >= 2.26.0
required: false
default: null
version_added: "1.9"
tags:
description:
- tags dict to apply to a resource. Used with command=create, command=replicate, command=restore. Requires boto >= 2.26.0
required: false
default: null
version_added: "1.9"
requirements:
- "python >= 2.6"
- "boto"
author:
- "Bruce Pennypacker (@bpennypacker)"
- "Will Thames (@willthames)"
extends_documentation_fragment:
- aws
- ec2
'''
# FIXME: the command stuff needs a 'state' like alias to make things consistent -- MPD
EXAMPLES = '''
# Basic mysql provisioning example
- rds:
command: create
instance_name: new-database
db_engine: MySQL
size: 10
instance_type: db.m1.small
username: mysql_admin
password: 1nsecure
tags:
Environment: testing
Application: cms
# Create a read-only replica and wait for it to become available
- rds:
command: replicate
instance_name: new-database-replica
source_instance: new_database
wait: yes
wait_timeout: 600
# Delete an instance, but create a snapshot before doing so
- rds:
command: delete
instance_name: new-database
snapshot: new_database_snapshot
# Get facts about an instance
- rds:
command: facts
instance_name: new-database
register: new_database_facts
# Rename an instance and wait for the change to take effect
- rds:
command: modify
instance_name: new-database
new_instance_name: renamed-database
wait: yes
# Reboot an instance and wait for it to become available again
- rds
command: reboot
instance_name: database
wait: yes
# Restore a Postgres db instance from a snapshot, wait for it to become available again, and
# then modify it to add your security group. Also, display the new endpoint.
# Note that the "publicly_accessible" option is allowed here just as it is in the AWS CLI
- local_action:
module: rds
command: restore
snapshot: mypostgres-snapshot
instance_name: MyNewInstanceName
region: us-west-2
zone: us-west-2b
subnet: default-vpc-xx441xxx
publicly_accessible: yes
wait: yes
wait_timeout: 600
tags:
Name: pg1_test_name_tag
register: rds
- local_action:
module: rds
command: modify
instance_name: MyNewInstanceName
region: us-west-2
vpc_security_groups: sg-xxx945xx
- debug:
msg: "The new db endpoint is {{ rds.instance.endpoint }}"
'''
import sys
import time
try:
import boto.rds
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
try:
import boto.rds2
has_rds2 = True
except ImportError:
has_rds2 = False
DEFAULT_PORTS= {
'aurora': 3306,
'mariadb': 3306,
'mysql': 3306,
'oracle': 1521,
'sqlserver': 1433,
'postgres': 5432,
}
class RDSException(Exception):
def __init__(self, exc):
if hasattr(exc, 'error_message') and exc.error_message:
self.message = exc.error_message
self.code = exc.error_code
elif hasattr(exc, 'body') and 'Error' in exc.body:
self.message = exc.body['Error']['Message']
self.code = exc.body['Error']['Code']
else:
self.message = str(exc)
self.code = 'Unknown Error'
class RDSConnection:
def __init__(self, module, region, **aws_connect_params):
try:
self.connection = connect_to_aws(boto.rds, region, **aws_connect_params)
except boto.exception.BotoServerError as e:
module.fail_json(msg=e.error_message)
def get_db_instance(self, instancename):
try:
return RDSDBInstance(self.connection.get_all_dbinstances(instancename)[0])
except boto.exception.BotoServerError as e:
return None
def get_db_snapshot(self, snapshotid):
try:
return RDSSnapshot(self.connection.get_all_dbsnapshots(snapshot_id=snapshotid)[0])
except boto.exception.BotoServerError as e:
return None
def create_db_instance(self, instance_name, size, instance_class, db_engine,
username, password, **params):
params['engine'] = db_engine
try:
result = self.connection.create_dbinstance(instance_name, size, instance_class,
username, password, **params)
return RDSDBInstance(result)
except boto.exception.BotoServerError as e:
raise RDSException(e)
def create_db_instance_read_replica(self, instance_name, source_instance, **params):
try:
result = self.connection.createdb_instance_read_replica(instance_name, source_instance, **params)
return RDSDBInstance(result)
except boto.exception.BotoServerError as e:
raise RDSException(e)
def delete_db_instance(self, instance_name, **params):
try:
result = self.connection.delete_dbinstance(instance_name, **params)
return RDSDBInstance(result)
except boto.exception.BotoServerError as e:
raise RDSException(e)
def delete_db_snapshot(self, snapshot):
try:
result = self.connection.delete_dbsnapshot(snapshot)
return RDSSnapshot(result)
except boto.exception.BotoServerError as e:
raise RDSException(e)
def modify_db_instance(self, instance_name, **params):
try:
result = self.connection.modify_dbinstance(instance_name, **params)
return RDSDBInstance(result)
except boto.exception.BotoServerError as e:
raise RDSException(e)
def reboot_db_instance(self, instance_name, **params):
try:
result = self.connection.reboot_dbinstance(instance_name)
return RDSDBInstance(result)
except boto.exception.BotoServerError as e:
raise RDSException(e)
def restore_db_instance_from_db_snapshot(self, instance_name, snapshot, instance_type, **params):
try:
result = self.connection.restore_dbinstance_from_dbsnapshot(snapshot, instance_name, instance_type, **params)
return RDSDBInstance(result)
except boto.exception.BotoServerError as e:
raise RDSException(e)
def create_db_snapshot(self, snapshot, instance_name, **params):
try:
result = self.connection.create_dbsnapshot(snapshot, instance_name)
return RDSSnapshot(result)
except boto.exception.BotoServerError as e:
raise RDSException(e)
def promote_read_replica(self, instance_name, **params):
try:
result = self.connection.promote_read_replica(instance_name, **params)
return RDSDBInstance(result)
except boto.exception.BotoServerError as e:
raise RDSException(e)
class RDS2Connection:
def __init__(self, module, region, **aws_connect_params):
try:
self.connection = connect_to_aws(boto.rds2, region, **aws_connect_params)
except boto.exception.BotoServerError as e:
module.fail_json(msg=e.error_message)
def get_db_instance(self, instancename):
try:
dbinstances = self.connection.describe_db_instances(db_instance_identifier=instancename)['DescribeDBInstancesResponse']['DescribeDBInstancesResult']['DBInstances']
result = RDS2DBInstance(dbinstances[0])
return result
except boto.rds2.exceptions.DBInstanceNotFound as e:
return None
except Exception as e:
raise e
def get_db_snapshot(self, snapshotid):
try:
snapshots = self.connection.describe_db_snapshots(db_snapshot_identifier=snapshotid, snapshot_type='manual')['DescribeDBSnapshotsResponse']['DescribeDBSnapshotsResult']['DBSnapshots']
result = RDS2Snapshot(snapshots[0])
return result
except boto.rds2.exceptions.DBSnapshotNotFound as e:
return None
def create_db_instance(self, instance_name, size, instance_class, db_engine,
username, password, **params):
try:
result = self.connection.create_db_instance(instance_name, size, instance_class,
db_engine, username, password, **params)['CreateDBInstanceResponse']['CreateDBInstanceResult']['DBInstance']
return RDS2DBInstance(result)
except boto.exception.BotoServerError as e:
raise RDSException(e)
def create_db_instance_read_replica(self, instance_name, source_instance, **params):
try:
result = self.connection.create_db_instance_read_replica(instance_name, source_instance, **params)['CreateDBInstanceReadReplicaResponse']['CreateDBInstanceReadReplicaResult']['DBInstance']
return RDS2DBInstance(result)
except boto.exception.BotoServerError as e:
raise RDSException(e)
def delete_db_instance(self, instance_name, **params):
try:
result = self.connection.delete_db_instance(instance_name, **params)['DeleteDBInstanceResponse']['DeleteDBInstanceResult']['DBInstance']
return RDS2DBInstance(result)
except boto.exception.BotoServerError as e:
raise RDSException(e)
def delete_db_snapshot(self, snapshot):
try:
result = self.connection.delete_db_snapshot(snapshot)['DeleteDBSnapshotResponse']['DeleteDBSnapshotResult']['DBSnapshot']
return RDS2Snapshot(result)
except boto.exception.BotoServerError as e:
raise RDSException(e)
def modify_db_instance(self, instance_name, **params):
try:
result = self.connection.modify_db_instance(instance_name, **params)['ModifyDBInstanceResponse']['ModifyDBInstanceResult']['DBInstance']
return RDS2DBInstance(result)
except boto.exception.BotoServerError as e:
raise RDSException(e)
def reboot_db_instance(self, instance_name, **params):
try:
result = self.connection.reboot_db_instance(instance_name, **params)['RebootDBInstanceResponse']['RebootDBInstanceResult']['DBInstance']
return RDS2DBInstance(result)
except boto.exception.BotoServerError as e:
raise RDSException(e)
def restore_db_instance_from_db_snapshot(self, instance_name, snapshot, instance_type, **params):
try:
result = self.connection.restore_db_instance_from_db_snapshot(instance_name, snapshot, **params)['RestoreDBInstanceFromDBSnapshotResponse']['RestoreDBInstanceFromDBSnapshotResult']['DBInstance']
return RDS2DBInstance(result)
except boto.exception.BotoServerError as e:
raise RDSException(e)
def create_db_snapshot(self, snapshot, instance_name, **params):
try:
result = self.connection.create_db_snapshot(snapshot, instance_name, **params)['CreateDBSnapshotResponse']['CreateDBSnapshotResult']['DBSnapshot']
return RDS2Snapshot(result)
except boto.exception.BotoServerError as e:
raise RDSException(e)
def promote_read_replica(self, instance_name, **params):
try:
result = self.connection.promote_read_replica(instance_name, **params)['PromoteReadReplicaResponse']['PromoteReadReplicaResult']['DBInstance']
return RDS2DBInstance(result)
except boto.exception.BotoServerError as e:
raise RDSException(e)
class RDSDBInstance:
def __init__(self, dbinstance):
self.instance = dbinstance
self.name = dbinstance.id
self.status = dbinstance.status
def get_data(self):
d = {
'id' : self.name,
'create_time' : self.instance.create_time,
'status' : self.status,
'availability_zone' : self.instance.availability_zone,
'backup_retention' : self.instance.backup_retention_period,
'backup_window' : self.instance.preferred_backup_window,
'maintenance_window' : self.instance.preferred_maintenance_window,
'multi_zone' : self.instance.multi_az,
'instance_type' : self.instance.instance_class,
'username' : self.instance.master_username,
'iops' : self.instance.iops
}
# Only assign an Endpoint if one is available
if hasattr(self.instance, 'endpoint'):
d["endpoint"] = self.instance.endpoint[0]
d["port"] = self.instance.endpoint[1]
if self.instance.vpc_security_groups is not None:
d["vpc_security_groups"] = ','.join(x.vpc_group for x in self.instance.vpc_security_groups)
else:
d["vpc_security_groups"] = None
else:
d["endpoint"] = None
d["port"] = None
d["vpc_security_groups"] = None
# ReadReplicaSourceDBInstanceIdentifier may or may not exist
try:
d["replication_source"] = self.instance.ReadReplicaSourceDBInstanceIdentifier
except Exception as e:
d["replication_source"] = None
return d
class RDS2DBInstance:
def __init__(self, dbinstance):
self.instance = dbinstance
if 'DBInstanceIdentifier' not in dbinstance:
self.name = None
else:
self.name = self.instance.get('DBInstanceIdentifier')
self.status = self.instance.get('DBInstanceStatus')
def get_data(self):
d = {
'id': self.name,
'create_time': self.instance['InstanceCreateTime'],
'status': self.status,
'availability_zone': self.instance['AvailabilityZone'],
'backup_retention': self.instance['BackupRetentionPeriod'],
'maintenance_window': self.instance['PreferredMaintenanceWindow'],
'multi_zone': self.instance['MultiAZ'],
'instance_type': self.instance['DBInstanceClass'],
'username': self.instance['MasterUsername'],
'iops': self.instance['Iops'],
'replication_source': self.instance['ReadReplicaSourceDBInstanceIdentifier']
}
if self.instance["VpcSecurityGroups"] is not None:
d['vpc_security_groups'] = ','.join(x['VpcSecurityGroupId'] for x in self.instance['VpcSecurityGroups'])
if "Endpoint" in self.instance and self.instance["Endpoint"] is not None:
d['endpoint'] = self.instance["Endpoint"].get('Address', None)
d['port'] = self.instance["Endpoint"].get('Port', None)
else:
d['endpoint'] = None
d['port'] = None
return d
class RDSSnapshot:
def __init__(self, snapshot):
self.snapshot = snapshot
self.name = snapshot.id
self.status = snapshot.status
def get_data(self):
d = {
'id' : self.name,
'create_time' : self.snapshot.snapshot_create_time,
'status' : self.status,
'availability_zone' : self.snapshot.availability_zone,
'instance_id' : self.snapshot.instance_id,
'instance_created' : self.snapshot.instance_create_time,
}
# needs boto >= 2.21.0
if hasattr(self.snapshot, 'snapshot_type'):
d["snapshot_type"] = self.snapshot.snapshot_type
if hasattr(self.snapshot, 'iops'):
d["iops"] = self.snapshot.iops
return d
class RDS2Snapshot:
def __init__(self, snapshot):
if 'DeleteDBSnapshotResponse' in snapshot:
self.snapshot = snapshot['DeleteDBSnapshotResponse']['DeleteDBSnapshotResult']['DBSnapshot']
else:
self.snapshot = snapshot
self.name = self.snapshot.get('DBSnapshotIdentifier')
self.status = self.snapshot.get('Status')
def get_data(self):
d = {
'id' : self.name,
'create_time' : self.snapshot['SnapshotCreateTime'],
'status' : self.status,
'availability_zone' : self.snapshot['AvailabilityZone'],
'instance_id' : self.snapshot['DBInstanceIdentifier'],
'instance_created' : self.snapshot['InstanceCreateTime'],
'snapshot_type' : self.snapshot['SnapshotType'],
'iops' : self.snapshot['Iops'],
}
return d
def await_resource(conn, resource, status, module):
wait_timeout = module.params.get('wait_timeout') + time.time()
while wait_timeout > time.time() and resource.status != status:
time.sleep(5)
if wait_timeout <= time.time():
module.fail_json(msg="Timeout waiting for RDS resource %s" % resource.name)
if module.params.get('command') == 'snapshot':
# Temporary until all the rds2 commands have their responses parsed
if resource.name is None:
module.fail_json(msg="There was a problem waiting for RDS snapshot %s" % resource.snapshot)
resource = conn.get_db_snapshot(resource.name)
else:
# Temporary until all the rds2 commands have their responses parsed
if resource.name is None:
module.fail_json(msg="There was a problem waiting for RDS instance %s" % resource.instance)
resource = conn.get_db_instance(resource.name)
if resource is None:
break
return resource
def create_db_instance(module, conn):
subnet = module.params.get('subnet')
required_vars = ['instance_name', 'db_engine', 'size', 'instance_type', 'username', 'password']
valid_vars = ['backup_retention', 'backup_window',
'character_set_name', 'db_name', 'engine_version',
'instance_type', 'iops', 'license_model', 'maint_window',
'multi_zone', 'option_group', 'parameter_group','port',
'subnet', 'upgrade', 'zone']
if module.params.get('subnet'):
valid_vars.append('vpc_security_groups')
else:
valid_vars.append('security_groups')
if has_rds2:
valid_vars.extend(['publicly_accessible', 'tags'])
params = validate_parameters(required_vars, valid_vars, module)
instance_name = module.params.get('instance_name')
result = conn.get_db_instance(instance_name)
if result:
changed = False
else:
try:
result = conn.create_db_instance(instance_name, module.params.get('size'),
module.params.get('instance_type'), module.params.get('db_engine'),
module.params.get('username'), module.params.get('password'), **params)
changed = True
except RDSException as e:
module.fail_json(msg="Failed to create instance: %s" % e.message)
if module.params.get('wait'):
resource = await_resource(conn, result, 'available', module)
else:
resource = conn.get_db_instance(instance_name)
module.exit_json(changed=changed, instance=resource.get_data())
def replicate_db_instance(module, conn):
required_vars = ['instance_name', 'source_instance']
valid_vars = ['instance_type', 'port', 'upgrade', 'zone']
if has_rds2:
valid_vars.extend(['iops', 'option_group', 'publicly_accessible', 'tags'])
params = validate_parameters(required_vars, valid_vars, module)
instance_name = module.params.get('instance_name')
source_instance = module.params.get('source_instance')
result = conn.get_db_instance(instance_name)
if result:
changed = False
else:
try:
result = conn.create_db_instance_read_replica(instance_name, source_instance, **params)
changed = True
except RDSException as e:
module.fail_json(msg="Failed to create replica instance: %s " % e.message)
if module.params.get('wait'):
resource = await_resource(conn, result, 'available', module)
else:
resource = conn.get_db_instance(instance_name)
module.exit_json(changed=changed, instance=resource.get_data())
def delete_db_instance_or_snapshot(module, conn):
required_vars = []
valid_vars = ['instance_name', 'snapshot', 'skip_final_snapshot']
params = validate_parameters(required_vars, valid_vars, module)
instance_name = module.params.get('instance_name')
snapshot = module.params.get('snapshot')
if not instance_name:
result = conn.get_db_snapshot(snapshot)
else:
result = conn.get_db_instance(instance_name)
if not result:
module.exit_json(changed=False)
if result.status == 'deleting':
module.exit_json(changed=False)
try:
if instance_name:
if snapshot:
params["skip_final_snapshot"] = False
if has_rds2:
params["final_db_snapshot_identifier"] = snapshot
else:
params["final_snapshot_id"] = snapshot
else:
params["skip_final_snapshot"] = True
result = conn.delete_db_instance(instance_name, **params)
else:
result = conn.delete_db_snapshot(snapshot)
except RDSException as e:
module.fail_json(msg="Failed to delete instance: %s" % e.message)
# If we're not waiting for a delete to complete then we're all done
# so just return
if not module.params.get('wait'):
module.exit_json(changed=True)
try:
resource = await_resource(conn, result, 'deleted', module)
module.exit_json(changed=True)
except RDSException as e:
if e.code == 'DBInstanceNotFound':
module.exit_json(changed=True)
else:
module.fail_json(msg=e.message)
except Exception as e:
module.fail_json(msg=str(e))
def facts_db_instance_or_snapshot(module, conn):
required_vars = []
valid_vars = ['instance_name', 'snapshot']
params = validate_parameters(required_vars, valid_vars, module)
instance_name = module.params.get('instance_name')
snapshot = module.params.get('snapshot')
if instance_name and snapshot:
module.fail_json(msg="Facts must be called with either instance_name or snapshot, not both")
if instance_name:
resource = conn.get_db_instance(instance_name)
if not resource:
module.fail_json(msg="DB instance %s does not exist" % instance_name)
if snapshot:
resource = conn.get_db_snapshot(snapshot)
if not resource:
module.fail_json(msg="DB snapshot %s does not exist" % snapshot)
module.exit_json(changed=False, instance=resource.get_data())
def modify_db_instance(module, conn):
required_vars = ['instance_name']
valid_vars = ['apply_immediately', 'backup_retention', 'backup_window',
'db_name', 'engine_version', 'instance_type', 'iops', 'license_model',
'maint_window', 'multi_zone', 'new_instance_name',
'option_group', 'parameter_group', 'password', 'size', 'upgrade']
params = validate_parameters(required_vars, valid_vars, module)
instance_name = module.params.get('instance_name')
new_instance_name = module.params.get('new_instance_name')
try:
result = conn.modify_db_instance(instance_name, **params)
except RDSException as e:
module.fail_json(msg=e.message)
if params.get('apply_immediately'):
if new_instance_name:
# Wait until the new instance name is valid
new_instance = None
while not new_instance:
new_instance = conn.get_db_instance(new_instance_name)
time.sleep(5)
# Found instance but it briefly flicks to available
# before rebooting so let's wait until we see it rebooting
# before we check whether to 'wait'
result = await_resource(conn, new_instance, 'rebooting', module)
if module.params.get('wait'):
resource = await_resource(conn, result, 'available', module)
else:
resource = conn.get_db_instance(instance_name)
# guess that this changed the DB, need a way to check
module.exit_json(changed=True, instance=resource.get_data())
def promote_db_instance(module, conn):
required_vars = ['instance_name']
valid_vars = ['backup_retention', 'backup_window']
params = validate_parameters(required_vars, valid_vars, module)
instance_name = module.params.get('instance_name')
result = conn.get_db_instance(instance_name)
if not result:
module.fail_json(msg="DB Instance %s does not exist" % instance_name)
if result.get_data().get('replication_source'):
try:
result = conn.promote_read_replica(instance_name, **params)
changed = True
except RDSException as e:
module.fail_json(msg=e.message)
else:
changed = False
if module.params.get('wait'):
resource = await_resource(conn, result, 'available', module)
else:
resource = conn.get_db_instance(instance_name)
module.exit_json(changed=changed, instance=resource.get_data())
def snapshot_db_instance(module, conn):
required_vars = ['instance_name', 'snapshot']
valid_vars = ['tags']
params = validate_parameters(required_vars, valid_vars, module)
instance_name = module.params.get('instance_name')
snapshot = module.params.get('snapshot')
changed = False
result = conn.get_db_snapshot(snapshot)
if not result:
try:
result = conn.create_db_snapshot(snapshot, instance_name, **params)
changed = True
except RDSException as e:
module.fail_json(msg=e.message)
if module.params.get('wait'):
resource = await_resource(conn, result, 'available', module)
else:
resource = conn.get_db_snapshot(snapshot)
module.exit_json(changed=changed, snapshot=resource.get_data())
def reboot_db_instance(module, conn):
required_vars = ['instance_name']
valid_vars = []
if has_rds2:
valid_vars.append('force_failover')
params = validate_parameters(required_vars, valid_vars, module)
instance_name = module.params.get('instance_name')
result = conn.get_db_instance(instance_name)
changed = False
try:
result = conn.reboot_db_instance(instance_name, **params)
changed = True
except RDSException as e:
module.fail_json(msg=e.message)
if module.params.get('wait'):
resource = await_resource(conn, result, 'available', module)
else:
resource = conn.get_db_instance(instance_name)
module.exit_json(changed=changed, instance=resource.get_data())
def restore_db_instance(module, conn):
required_vars = ['instance_name', 'snapshot']
valid_vars = ['db_name', 'iops', 'license_model', 'multi_zone',
'option_group', 'port', 'publicly_accessible',
'subnet', 'tags', 'upgrade', 'zone']
if has_rds2:
valid_vars.append('instance_type')
else:
required_vars.append('instance_type')
params = validate_parameters(required_vars, valid_vars, module)
instance_name = module.params.get('instance_name')
instance_type = module.params.get('instance_type')
snapshot = module.params.get('snapshot')
changed = False
result = conn.get_db_instance(instance_name)
if not result:
try:
result = conn.restore_db_instance_from_db_snapshot(instance_name, snapshot, instance_type, **params)
changed = True
except RDSException as e:
module.fail_json(msg=e.message)
if module.params.get('wait'):
resource = await_resource(conn, result, 'available', module)
else:
resource = conn.get_db_instance(instance_name)
module.exit_json(changed=changed, instance=resource.get_data())
def validate_parameters(required_vars, valid_vars, module):
command = module.params.get('command')
for v in required_vars:
if not module.params.get(v):
module.fail_json(msg="Parameter %s required for %s command" % (v, command))
# map to convert rds module options to boto rds and rds2 options
optional_params = {
'port': 'port',
'db_name': 'db_name',
'zone': 'availability_zone',
'maint_window': 'preferred_maintenance_window',
'backup_window': 'preferred_backup_window',
'backup_retention': 'backup_retention_period',
'multi_zone': 'multi_az',
'engine_version': 'engine_version',
'upgrade': 'auto_minor_version_upgrade',
'subnet': 'db_subnet_group_name',
'license_model': 'license_model',
'option_group': 'option_group_name',
'size': 'allocated_storage',
'iops': 'iops',
'new_instance_name': 'new_instance_id',
'apply_immediately': 'apply_immediately',
}
# map to convert rds module options to boto rds options
optional_params_rds = {
'db_engine': 'engine',
'password': 'master_password',
'parameter_group': 'param_group',
'instance_type': 'instance_class',
}
# map to convert rds module options to boto rds2 options
optional_params_rds2 = {
'tags': 'tags',
'publicly_accessible': 'publicly_accessible',
'parameter_group': 'db_parameter_group_name',
'character_set_name': 'character_set_name',
'instance_type': 'db_instance_class',
'password': 'master_user_password',
'new_instance_name': 'new_db_instance_identifier',
'force_failover': 'force_failover',
}
if has_rds2:
optional_params.update(optional_params_rds2)
sec_group = 'db_security_groups'
else:
optional_params.update(optional_params_rds)
sec_group = 'security_groups'
# Check for options only supported with rds2
for k in set(optional_params_rds2.keys()) - set(optional_params_rds.keys()):
if module.params.get(k):
module.fail_json(msg="Parameter %s requires boto.rds (boto >= 2.26.0)" % k)
params = {}
for (k, v) in optional_params.items():
if module.params.get(k) and k not in required_vars:
if k in valid_vars:
params[v] = module.params[k]
else:
module.fail_json(msg="Parameter %s is not valid for %s command" % (k, command))
if module.params.get('security_groups'):
params[sec_group] = module.params.get('security_groups').split(',')
vpc_groups = module.params.get('vpc_security_groups')
if vpc_groups:
if has_rds2:
params['vpc_security_group_ids'] = vpc_groups
else:
groups_list = []
for x in vpc_groups:
groups_list.append(boto.rds.VPCSecurityGroupMembership(vpc_group=x))
params['vpc_security_groups'] = groups_list
# Convert tags dict to list of tuples that rds2 expects
if 'tags' in params:
params['tags'] = module.params['tags'].items()
return params
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
command = dict(choices=['create', 'replicate', 'delete', 'facts', 'modify', 'promote', 'snapshot', 'reboot', 'restore'], required=True),
instance_name = dict(required=False),
source_instance = dict(required=False),
db_engine = dict(choices=['mariadb', 'MySQL', 'oracle-se1', 'oracle-se', 'oracle-ee', 'sqlserver-ee', 'sqlserver-se', 'sqlserver-ex', 'sqlserver-web', 'postgres', 'aurora'], required=False),
size = dict(required=False),
instance_type = dict(aliases=['type'], required=False),
username = dict(required=False),
password = dict(no_log=True, required=False),
db_name = dict(required=False),
engine_version = dict(required=False),
parameter_group = dict(required=False),
license_model = dict(choices=['license-included', 'bring-your-own-license', 'general-public-license', 'postgresql-license'], required=False),
multi_zone = dict(type='bool', default=False),
iops = dict(required=False),
security_groups = dict(required=False),
vpc_security_groups = dict(type='list', required=False),
port = dict(required=False),
upgrade = dict(type='bool', default=False),
option_group = dict(required=False),
maint_window = dict(required=False),
backup_window = dict(required=False),
backup_retention = dict(required=False),
zone = dict(aliases=['aws_zone', 'ec2_zone'], required=False),
subnet = dict(required=False),
wait = dict(type='bool', default=False),
wait_timeout = dict(type='int', default=300),
snapshot = dict(required=False),
apply_immediately = dict(type='bool', default=False),
new_instance_name = dict(required=False),
tags = dict(type='dict', required=False),
publicly_accessible = dict(required=False),
character_set_name = dict(required=False),
force_failover = dict(type='bool', required=False, default=False)
)
)
module = AnsibleModule(
argument_spec=argument_spec,
)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
invocations = {
'create': create_db_instance,
'replicate': replicate_db_instance,
'delete': delete_db_instance_or_snapshot,
'facts': facts_db_instance_or_snapshot,
'modify': modify_db_instance,
'promote': promote_db_instance,
'snapshot': snapshot_db_instance,
'reboot': reboot_db_instance,
'restore': restore_db_instance,
}
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if not region:
module.fail_json(msg="Region not specified. Unable to determine region from EC2_REGION.")
# set port to per db defaults if not specified
if module.params['port'] is None and module.params['db_engine'] is not None and module.params['command'] == 'create':
if '-' in module.params['db_engine']:
engine = module.params['db_engine'].split('-')[0]
else:
engine = module.params['db_engine']
module.params['port'] = DEFAULT_PORTS[engine.lower()]
# connect to the rds endpoint
if has_rds2:
conn = RDS2Connection(module, region, **aws_connect_params)
else:
conn = RDSConnection(module, region, **aws_connect_params)
invocations[module.params.get('command')](module, conn)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
main()
| gpl-3.0 |
Aleks31/pychess | lib/pychess/Players/ICPlayer.py | 20 | 10779 | from collections import defaultdict
from pychess.compat import Queue
from pychess.Players.Player import Player, PlayerIsDead, TurnInterrupt
from pychess.Utils.Move import parseSAN, toAN
from pychess.Utils.lutils.lmove import ParsingError
from pychess.Utils.Offer import Offer
from pychess.Utils.const import *
from pychess.System.Log import log
class ICPlayer (Player):
__type__ = REMOTE
def __init__ (self, gamemodel, ichandle, gameno, color, name, icrating=None):
Player.__init__(self)
self.offers = {}
self.queue = Queue()
self.okqueue = Queue()
self.setName(name)
self.ichandle = ichandle
self.icrating = icrating
self.color = color
self.gameno = gameno
self.gamemodel = gamemodel
# If some times later FICS creates another game with same wplayer,bplayer,gameno
# this will change to False and boardUpdate messages will be ignored
self.current = True
self.connection = connection = self.gamemodel.connection
self.connections = connections = defaultdict(list)
connections[connection.bm].append(connection.bm.connect_after("boardUpdate", self.__boardUpdate))
connections[connection.bm].append(connection.bm.connect_after("playGameCreated", self.__playGameCreated))
connections[connection.bm].append(connection.bm.connect_after("obsGameCreated", self.__obsGameCreated))
connections[connection.om].append(connection.om.connect("onOfferAdd", self.__onOfferAdd))
connections[connection.om].append(connection.om.connect("onOfferRemove", self.__onOfferRemove))
connections[connection.om].append(connection.om.connect("onOfferDeclined", self.__onOfferDeclined))
connections[connection.cm].append(connection.cm.connect("privateMessage", self.__onPrivateMessage))
def getICHandle (self):
return self.name
@property
def time (self):
self.gamemodel.timemodel.getPlayerTime(self.color)
#===========================================================================
# Handle signals from the connection
#===========================================================================
def __playGameCreated (self, bm, ficsgame):
if self.gamemodel.ficsplayers[0] == ficsgame.wplayer and \
self.gamemodel.ficsplayers[1] == ficsgame.bplayer and \
self.gameno == ficsgame.gameno:
log.debug("ICPlayer.__playGameCreated: gameno reappeared: gameno=%s white=%s black=%s" % \
(ficsgame.gameno, ficsgame.wplayer.name, ficsgame.bplayer.name))
self.current = False
def __obsGameCreated (self, bm, ficsgame):
if self.gamemodel.ficsplayers[0] == ficsgame.wplayer and \
self.gamemodel.ficsplayers[1] == ficsgame.bplayer and \
self.gameno == ficsgame.gameno:
log.debug("ICPlayer.__obsGameCreated: gameno reappeared: gameno=%s white=%s black=%s" % \
(ficsgame.gameno, ficsgame.wplayer.name, ficsgame.bplayer.name))
self.current = False
def __onOfferAdd (self, om, offer):
if self.gamemodel.status in UNFINISHED_STATES and not self.gamemodel.isObservationGame():
log.debug("ICPlayer.__onOfferAdd: emitting offer: self.gameno=%s self.name=%s %s" % \
(self.gameno, self.name, offer))
self.offers[offer.index] = offer
self.emit ("offer", offer)
def __onOfferDeclined (self, om, offer):
for offer_ in self.gamemodel.offers.keys():
if offer.type == offer_.type:
offer.param = offer_.param
log.debug("ICPlayer.__onOfferDeclined: emitting decline for %s" % offer)
self.emit("decline", offer)
def __onOfferRemove (self, om, offer):
if offer.index in self.offers:
log.debug("ICPlayer.__onOfferRemove: emitting withdraw: self.gameno=%s self.name=%s %s" % \
(self.gameno, self.name, offer))
self.emit ("withdraw", self.offers[offer.index])
del self.offers[offer.index]
def __onPrivateMessage (self, cm, name, title, isadmin, text):
if name == self.ichandle:
self.emit("offer", Offer(CHAT_ACTION, param=text))
def __boardUpdate (self, bm, gameno, ply, curcol, lastmove, fen, wname, bname, wms, bms):
log.debug("ICPlayer.__boardUpdate: id(self)=%d self=%s %s %s %s %d %d %s %s %d %d" % \
(id(self), self, gameno, wname, bname, ply, curcol, lastmove, fen, wms, bms))
if gameno == self.gameno and len(self.gamemodel.players) >= 2 \
and wname == self.gamemodel.players[0].ichandle \
and bname == self.gamemodel.players[1].ichandle \
and self.current:
log.debug("ICPlayer.__boardUpdate: id=%d self=%s gameno=%s: this is my move" % \
(id(self), self, gameno))
# In some cases (like lost on time) the last move is resent
if ply <= self.gamemodel.ply:
return
if 1-curcol == self.color:
log.debug("ICPlayer.__boardUpdate: id=%d self=%s ply=%d: putting move=%s in queue" % \
(id(self), self, ply, lastmove))
self.queue.put((ply, lastmove))
# Ensure the fics thread doesn't continue parsing, before the
# game/player thread has recieved the move.
# Specifically this ensures that we aren't killed due to end of
# game before our last move is recieved
self.okqueue.get(block=True)
#===========================================================================
# Ending the game
#===========================================================================
def __disconnect (self):
if self.connections is None: return
for obj in self.connections:
for handler_id in self.connections[obj]:
if obj.handler_is_connected(handler_id):
obj.disconnect(handler_id)
self.connections = None
def end (self, status, reason):
self.__disconnect()
self.queue.put("del")
def kill (self, reason):
self.__disconnect()
self.queue.put("del")
#===========================================================================
# Send the player move updates
#===========================================================================
def makeMove (self, board1, move, board2):
log.debug("ICPlayer.makemove: id(self)=%d self=%s move=%s board1=%s board2=%s" % \
(id(self), self, move, board1, board2))
if board2 and not self.gamemodel.isObservationGame():
# TODO: Will this work if we just always use CASTLE_SAN?
cn = CASTLE_KK
if board2.variant == FISCHERRANDOMCHESS:
cn = CASTLE_SAN
self.connection.bm.sendMove (toAN (board2, move, castleNotation=cn))
item = self.queue.get(block=True)
try:
if item == "del":
raise PlayerIsDead
if item == "int":
raise TurnInterrupt
ply, sanmove = item
if ply < board1.ply:
# This should only happen in an observed game
board1 = self.gamemodel.getBoardAtPly(max(ply-1, 0))
log.debug("ICPlayer.makemove: id(self)=%d self=%s from queue got: ply=%d sanmove=%s" % \
(id(self), self, ply, sanmove))
try:
move = parseSAN (board1, sanmove)
log.debug("ICPlayer.makemove: id(self)=%d self=%s parsed move=%s" % \
(id(self), self, move))
except ParsingError as e:
raise
return move
finally:
log.debug("ICPlayer.makemove: id(self)=%d self=%s returning move=%s" % \
(id(self), self, move))
self.okqueue.put("ok")
#===========================================================================
# Interacting with the player
#===========================================================================
def pause (self):
pass
def resume (self):
pass
def setBoard (self, fen):
# setBoard will currently only be called for ServerPlayer when starting
# to observe some game. In this case FICS already knows how the board
# should look, and we don't need to set anything
pass
def playerUndoMoves (self, movecount, gamemodel):
log.debug("ICPlayer.playerUndoMoves: id(self)=%d self=%s, undoing movecount=%d" % \
(id(self), self, movecount))
# If current player has changed so that it is no longer us to move,
# We raise TurnInterruprt in order to let GameModel continue the game
if movecount % 2 == 1 and gamemodel.curplayer != self:
self.queue.put("int")
def putMessage (self, text):
self.connection.cm.tellPlayer (self.name, text)
#===========================================================================
# Offer handling
#===========================================================================
def offerRematch (self):
if self.gamemodel.timed:
min = int(self.gamemodel.timemodel.intervals[0][0])/60
inc = self.gamemodel.timemodel.gain
else:
min = 0
inc = 0
self.connection.om.challenge(self.ichandle,
self.gamemodel.ficsgame.game_type, min, inc,
self.gamemodel.ficsgame.rated)
def offer (self, offer):
log.debug("ICPlayer.offer: self=%s %s" % (repr(self), offer))
if offer.type == TAKEBACK_OFFER:
# only 1 outstanding takeback offer allowed on FICS, so remove any of ours
indexes = self.offers.keys()
for index in indexes:
if self.offers[index].type == TAKEBACK_OFFER:
log.debug("ICPlayer.offer: del self.offers[%s] %s" % (index, offer))
del self.offers[index]
self.connection.om.offer(offer, self.gamemodel.ply)
def offerDeclined (self, offer):
log.debug("ICPlayer.offerDeclined: sending decline for %s" % offer)
self.connection.om.decline(offer)
def offerWithdrawn (self, offer):
pass
def offerError (self, offer, error):
pass
def observe (self):
self.connection.client.run_command("observe %s" % self.ichandle)
| gpl-3.0 |
alexmogavero/home-assistant | homeassistant/components/device_tracker/cisco_ios.py | 6 | 5037 | """
Support for Cisco IOS Routers.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/device_tracker.cisco_ios/
"""
import logging
from datetime import timedelta
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.device_tracker import (
DOMAIN, PLATFORM_SCHEMA, DeviceScanner)
from homeassistant.const import CONF_HOST, CONF_PASSWORD, CONF_USERNAME, \
CONF_PORT
from homeassistant.util import Throttle
MIN_TIME_BETWEEN_SCANS = timedelta(seconds=5)
_LOGGER = logging.getLogger(__name__)
REQUIREMENTS = ['pexpect==4.0.1']
PLATFORM_SCHEMA = vol.All(
PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_USERNAME): cv.string,
vol.Optional(CONF_PASSWORD, default=''): cv.string,
vol.Optional(CONF_PORT): cv.port,
})
)
def get_scanner(hass, config):
"""Validate the configuration and return a Cisco scanner."""
scanner = CiscoDeviceScanner(config[DOMAIN])
return scanner if scanner.success_init else None
class CiscoDeviceScanner(DeviceScanner):
"""This class queries a wireless router running Cisco IOS firmware."""
def __init__(self, config):
"""Initialize the scanner."""
self.host = config[CONF_HOST]
self.username = config[CONF_USERNAME]
self.port = config.get(CONF_PORT)
self.password = config.get(CONF_PASSWORD)
self.last_results = {}
self.success_init = self._update_info()
_LOGGER.info('cisco_ios scanner initialized')
# pylint: disable=no-self-use
def get_device_name(self, device):
"""Get the firmware doesn't save the name of the wireless device."""
return None
def scan_devices(self):
"""Scan for new devices and return a list with found device IDs."""
self._update_info()
return self.last_results
@Throttle(MIN_TIME_BETWEEN_SCANS)
def _update_info(self):
"""
Ensure the information from the Cisco router is up to date.
Returns boolean if scanning successful.
"""
string_result = self._get_arp_data()
if string_result:
self.last_results = []
last_results = []
lines_result = string_result.splitlines()
# Remove the first two lines, as they contains the arp command
# and the arp table titles e.g.
# show ip arp
# Protocol Address | Age (min) | Hardware Addr | Type | Interface
lines_result = lines_result[2:]
for line in lines_result:
if len(line.split()) is 6:
parts = line.split()
if len(parts) != 6:
continue
# ['Internet', '10.10.11.1', '-', '0027.d32d.0123', 'ARPA',
# 'GigabitEthernet0']
age = parts[2]
hw_addr = parts[3]
if age != "-":
mac = _parse_cisco_mac_address(hw_addr)
age = int(age)
if age < 1:
last_results.append(mac)
self.last_results = last_results
return True
return False
def _get_arp_data(self):
"""Open connection to the router and get arp entries."""
from pexpect import pxssh
import re
try:
cisco_ssh = pxssh.pxssh()
cisco_ssh.login(self.host, self.username, self.password,
port=self.port, auto_prompt_reset=False)
# Find the hostname
initial_line = cisco_ssh.before.decode('utf-8').splitlines()
router_hostname = initial_line[len(initial_line) - 1]
router_hostname += "#"
# Set the discovered hostname as prompt
regex_expression = ('(?i)^%s' % router_hostname).encode()
cisco_ssh.PROMPT = re.compile(regex_expression, re.MULTILINE)
# Allow full arp table to print at once
cisco_ssh.sendline("terminal length 0")
cisco_ssh.prompt(1)
cisco_ssh.sendline("show ip arp")
cisco_ssh.prompt(1)
devices_result = cisco_ssh.before
return devices_result.decode('utf-8')
except pxssh.ExceptionPxssh as px_e:
_LOGGER.error("pxssh failed on login")
_LOGGER.error(px_e)
return None
def _parse_cisco_mac_address(cisco_hardware_addr):
"""
Parse a Cisco formatted HW address to normal MAC.
e.g. convert
001d.ec02.07ab
to:
00:1D:EC:02:07:AB
Takes in cisco_hwaddr: HWAddr String from Cisco ARP table
Returns a regular standard MAC address
"""
cisco_hardware_addr = cisco_hardware_addr.replace('.', '')
blocks = [cisco_hardware_addr[x:x + 2]
for x in range(0, len(cisco_hardware_addr), 2)]
return ':'.join(blocks).upper()
| apache-2.0 |
emakis/erpnext | erpnext/selling/doctype/product_bundle/product_bundle.py | 58 | 1444 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.model.document import Document
class ProductBundle(Document):
def autoname(self):
self.name = self.new_item_code
def validate(self):
self.validate_main_item()
self.validate_child_items()
from erpnext.utilities.transaction_base import validate_uom_is_integer
validate_uom_is_integer(self, "uom", "qty")
def validate_main_item(self):
"""Validates, main Item is not a stock item"""
if frappe.db.get_value("Item", self.new_item_code, "is_stock_item"):
frappe.throw(_("Parent Item {0} must not be a Stock Item").format(self.new_item_code))
def validate_child_items(self):
for item in self.items:
if frappe.db.exists("Product Bundle", item.item_code):
frappe.throw(_("Child Item should not be a Product Bundle. Please remove item `{0}` and save").format(item.item_code))
def get_new_item_code(doctype, txt, searchfield, start, page_len, filters):
from erpnext.controllers.queries import get_match_cond
return frappe.db.sql("""select name, item_name, description from tabItem
where is_stock_item=0 and name not in (select name from `tabProduct Bundle`)
and %s like %s %s limit %s, %s""" % (searchfield, "%s",
get_match_cond(doctype),"%s", "%s"),
("%%%s%%" % txt, start, page_len))
| gpl-3.0 |
jesseditson/rethinkdb | test/interface/permanently_remove.py | 27 | 9398 | #!/usr/bin/env python
# Copyright 2010-2014 RethinkDB, all rights reserved.
from __future__ import print_function
import os, socket, sys, time
startTime = time.time()
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir, 'common')))
import driver, scenario_common, utils, vcoptparse
op = vcoptparse.OptParser()
scenario_common.prepare_option_parser_mode_flags(op)
_, command_prefix, serve_options = scenario_common.parse_mode_flags(op.parse(sys.argv))
r = utils.import_python_driver()
dbName, _ = utils.get_test_db_table()
print("Starting servers PrinceHamlet and KingHamlet (%.2fs)" % (time.time() - startTime))
with driver.Cluster(output_folder='.') as cluster:
prince_hamlet = driver.Process(cluster=cluster, files='PrinceHamlet', command_prefix=command_prefix, extra_options=serve_options)
king_hamlet = driver.Process(cluster=cluster, files='KingHamlet', command_prefix=command_prefix, extra_options=serve_options)
other_server = driver.Process(cluster=cluster, files='OtherServer', command_prefix=command_prefix, extra_options=serve_options)
king_hamlet_files = king_hamlet.files
cluster.wait_until_ready()
cluster.check()
print("Establishing ReQL connection (%.2fs)" % (time.time() - startTime))
conn = r.connect(prince_hamlet.host, prince_hamlet.driver_port)
print("Creating three tables (%.2fs)" % (time.time() - startTime))
if dbName not in r.db_list().run(conn):
r.db_create(dbName).run(conn)
res = r.db("rethinkdb").table("table_config").insert([
# The `test` table will remain readable when `KingHamlet` is removed.
{
"db": dbName,
"name": "test",
"primary_key": "id",
"shards": [{
"primary_replica": "PrinceHamlet",
"replicas": ["PrinceHamlet", "KingHamlet"]
}],
"write_acks": "single"
},
# The `test2` table will raise a `table_needs_primary` issue
{
"db": dbName,
"name": "test2",
"primary_key": "id",
"shards": [{
"primary_replica": "KingHamlet",
"replicas": ["PrinceHamlet", "KingHamlet"]
}],
"write_acks": "single"
},
# The `test3` table will raise a `data_lost` issue
{
"db": dbName,
"name": "test3",
"primary_key": "id",
"shards": [{
"primary_replica": "KingHamlet",
"replicas": ["KingHamlet"]
}],
"write_acks": "single"
}
]).run(conn)
assert res["inserted"] == 3, res
r.db(dbName).wait().run(conn)
print("Inserting data into tables (%.2fs)" % (time.time() - startTime))
res = r.db(dbName).table("test").insert([{}]*100).run(conn)
assert res["inserted"] == 100
res = r.db(dbName).table("test2").insert([{}]*100).run(conn)
assert res["inserted"] == 100
res = r.db(dbName).table("test3").insert([{}]*100).run(conn)
assert res["inserted"] == 100
print("Killing KingHamlet (%.2fs)" % (time.time() - startTime))
king_hamlet.close()
time.sleep(1)
cluster.check()
print("Checking that the other shows an issue (%.2fs)" % (time.time() - startTime))
issues = list(r.db("rethinkdb").table("current_issues").run(conn))
assert len(issues) == 1, issues
assert issues[0]["type"] == "server_disconnected"
assert issues[0]["critical"]
assert "KingHamlet" in issues[0]["description"]
assert issues[0]["info"]["disconnected_server"] == "KingHamlet"
assert set(issues[0]["info"]["reporting_servers"]) == \
set(["PrinceHamlet", "OtherServer"])
# identifier_format='uuid'
issues = list(r.db("rethinkdb").table("current_issues", identifier_format='uuid').run(conn))
assert issues[0]["info"]["disconnected_server"] == king_hamlet.uuid
assert set(issues[0]["info"]["reporting_servers"]) == \
set([prince_hamlet.uuid, other_server.uuid])
test_status = r.db(dbName).table("test").status().run(conn)
test2_status = r.db(dbName).table("test2").status().run(conn)
test3_status = r.db(dbName).table("test3").status().run(conn)
assert test_status["status"]["ready_for_writes"], test_status
assert not test_status["status"]["all_replicas_ready"], test_status
assert test2_status["status"]["ready_for_outdated_reads"], test2_status
assert not test2_status["status"]["ready_for_reads"], test2_status
assert not test3_status["status"]["ready_for_outdated_reads"], test3_status
print("Permanently removing KingHamlet (%.2fs)" % (time.time() - startTime))
res = r.db("rethinkdb").table("server_config").filter({"name": "KingHamlet"}).delete().run(conn)
assert res["deleted"] == 1
assert res["errors"] == 0
print("Checking the issues that were generated (%.2fs)" % (time.time() - startTime))
issues = list(r.db("rethinkdb").table("current_issues").run(conn))
assert len(issues) == 2, issues
if issues[0]["type"] == "data_lost":
dl_issue, np_issue = issues
else:
np_issue, dl_issue = issues
assert np_issue["type"] == "table_needs_primary"
assert np_issue["info"]["table"] == "test2"
assert "no primary replica" in np_issue["description"]
assert dl_issue["type"] == "data_lost"
assert dl_issue["info"]["table"] == "test3"
assert "Some data has probably been lost permanently" in dl_issue["description"]
test_status = r.db(dbName).table("test").status().run(conn)
test2_status = r.db(dbName).table("test2").status().run(conn)
test3_status = r.db(dbName).table("test3").status().run(conn)
assert test_status["status"]["all_replicas_ready"]
assert test2_status["status"]["ready_for_outdated_reads"]
assert not test2_status["status"]["ready_for_reads"]
assert not test3_status["status"]["ready_for_outdated_reads"]
assert r.db(dbName).table("test").config()["shards"].run(conn) == [{
"primary_replica": "PrinceHamlet",
"replicas": ["PrinceHamlet"]
}]
assert r.db(dbName).table("test2").config()["shards"].run(conn) == [{
"primary_replica": None,
"replicas": ["PrinceHamlet"]
}]
assert r.db(dbName).table("test3").config()["shards"].run(conn) == [{
"primary_replica": None,
"replicas": []
}]
print("Testing that having primary_replica=None doesn't break `table_config` (%.2fs)" % (time.time() - startTime))
# By changing the table's name, we force a write to `table_config`, which tests the
# code path that writes `"primary_replica": None`.
res = r.db(dbName).table("test2").config().update({"name": "test2x"}).run(conn)
assert res["errors"] == 0
res = r.db(dbName).table("test2x").config().update({"name": "test2"}).run(conn)
assert res["errors"] == 0
assert r.db(dbName).table("test2").config()["shards"].run(conn) == [{
"primary_replica": None,
"replicas": ["PrinceHamlet"]
}]
print("Fixing table `test2` (%.2fs)" % (time.time() - startTime))
r.db(dbName).table("test2").reconfigure(shards=1, replicas=1).run(conn)
r.db(dbName).table("test2").wait().run(conn)
print("Fixing table `test3` (%.2fs)" % (time.time() - startTime))
r.db(dbName).table("test3").reconfigure(shards=1, replicas=1).run(conn)
r.db(dbName).table("test3").wait().run(conn)
print("Bringing the dead server back as a ghost (%.2fs)" % (time.time() - startTime))
ghost_of_king_hamlet = driver.Process(cluster, king_hamlet_files, console_output="king-hamlet-ghost-log", command_prefix=command_prefix)
ghost_of_king_hamlet.wait_until_started_up()
cluster.check()
print("Checking that there is an issue (%.2fs)" % (time.time() - startTime))
issues = list(r.db("rethinkdb").table("current_issues").run(conn))
assert len(issues) == 1, issues
assert issues[0]["type"] == "server_ghost"
assert not issues[0]["critical"]
assert issues[0]["info"]["server_id"] == king_hamlet.uuid
assert issues[0]["info"]["hostname"] == socket.gethostname()
assert issues[0]["info"]["pid"] == ghost_of_king_hamlet.process.pid
print("Checking table contents (%.2fs)" % (time.time() - startTime))
assert r.db(dbName).table("test").count().run(conn) == 100
assert r.db(dbName).table("test2").count().run(conn) == 100
assert r.db(dbName).table("test3").count().run(conn) == 0
print("Checking that we can reconfigure despite ghost (%.2fs)" % (time.time() - startTime))
# This is a regression test for GitHub issue #3627
res = r.db(dbName).table("test").config().update({
"shards": [
{
"primary_replica": "OtherServer",
"replicas": ["PrinceHamlet", "OtherServer"]
},
{
"primary_replica": "PrinceHamlet",
"replicas": ["PrinceHamlet", "OtherServer"]
}]
}).run(conn)
assert res["errors"] == 0, res
res = r.db(dbName).table("test").wait().run(conn)
assert res["ready"] == 1, res
st = r.db(dbName).table("test").status().run(conn)
assert st["status"]["all_replicas_ready"], st
print("Cleaning up (%.2fs)" % (time.time() - startTime))
print("Done. (%.2fs)" % (time.time() - startTime))
| agpl-3.0 |
prisae/pelican-plugins | gist_directive/gist_directive.py | 59 | 2020 | try:
from urllib.request import urlopen
except ImportError:
from urllib import urlopen
import os
import io
from docutils.parsers.rst import directives
from pelican.rstdirectives import Pygments
def fetch(gid, filename, typ):
if not os.path.exists('.gists'):
os.mkdir('.gists')
key = os.path.join('.gists', ("%s/%s/%s" % (typ, gid, filename)).replace('/', ';'))
if os.path.isfile(key):
print('LOAD-CACHED:', key)
return io.open(key, encoding='utf8').read()
else:
if typ == 'gist':
url = 'https://gist.githubusercontent.com/%s/raw/%s' % (gid, filename)
elif typ == 'github':
url = 'https://raw.githubusercontent.com/%s/%s' % (gid, filename)
else:
raise RuntimeError(typ)
print('FETCHING:', url)
fp = urlopen(url)
if fp.getcode() != 200:
print('FAILED TO FETCH:', url)
print(' status code:', fp.getcode())
print(' response:')
try:
print(fp.read())
finally:
raise SystemExit()
data = fp.read()
with open(key, 'wb') as fh:
fh.write(data)
return data.decode('utf8')
class Gist(Pygments):
""" Embed Github Gist Snippets in rst text
GIST_ID and FILENAME are required.
Usage:
.. gist:: GIST_ID FILENAME
"""
required_arguments = 1
optional_arguments = 2
has_content = False
gist_type = 'gist'
def run(self):
gist = self.arguments[0]
filename = self.arguments[1] if len(self.arguments) > 1 else ''
language = self.arguments[2] if len(self.arguments) > 2 else None
self.arguments = [language]
self.content = fetch(gist, filename, self.gist_type).splitlines()
return super(Gist, self).run()
class Github(Gist):
gist_type = 'github'
def register():
directives.register_directive('gist', Gist)
directives.register_directive('github', Github)
| agpl-3.0 |
camradal/ansible | lib/ansible/modules/network/panos/panos_restart.py | 32 | 3035 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Ansible module to manage PaloAltoNetworks Firewall
# (c) 2016, techbizdev <techbizdev@paloaltonetworks.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: panos_restart
short_description: restart a device
description:
- Restart a device
author: "Luigi Mori (@jtschichold), Ivan Bojer (@ivanbojer)"
version_added: "2.3"
requirements:
- pan-python
options:
ip_address:
description:
- IP address (or hostname) of PAN-OS device
required: true
password:
description:
- password for authentication
required: true
username:
description:
- username for authentication
required: false
default: "admin"
'''
EXAMPLES = '''
- panos_restart:
ip_address: "192.168.1.1"
username: "admin"
password: "admin"
'''
RETURN = '''
status:
description: success status
returned: success
type: string
sample: "okey dokey"
'''
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
from ansible.module_utils.basic import AnsibleModule
import sys
try:
import pan.xapi
HAS_LIB = True
except ImportError:
HAS_LIB = False
def main():
argument_spec = dict(
ip_address=dict(),
password=dict(no_log=True),
username=dict(default='admin')
)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
if not HAS_LIB:
module.fail_json(msg='pan-python required for this module')
ip_address = module.params["ip_address"]
if not ip_address:
module.fail_json(msg="ip_address should be specified")
password = module.params["password"]
if not password:
module.fail_json(msg="password is required")
username = module.params['username']
xapi = pan.xapi.PanXapi(
hostname=ip_address,
api_username=username,
api_password=password
)
try:
xapi.op(cmd="<request><restart><system></system></restart></request>")
except Exception:
x = sys.exc_info()[1]
if 'succeeded' in str(x):
module.exit_json(changed=True, msg=str(msg))
else:
module.fail_json(msg=x)
raise
module.exit_json(changed=True, msg="okey dokey")
if __name__ == '__main__':
main()
| gpl-3.0 |
mozilla/zamboni | mkt/tags/models.py | 5 | 2426 | from django.db import models
from django.core.urlresolvers import NoReverseMatch, reverse
import mkt
from mkt.site.models import ManagerBase, ModelBase
from mkt.site.utils import sorted_groupby
class TagManager(ManagerBase):
def not_blocked(self):
"""Get allowed tags only"""
return self.filter(blocked=False)
class Tag(ModelBase):
tag_text = models.CharField(max_length=128, unique=True)
blocked = models.BooleanField(default=False)
restricted = models.BooleanField(default=False)
objects = TagManager()
class Meta:
db_table = 'tags'
ordering = ('tag_text',)
def __unicode__(self):
return self.tag_text
def can_reverse(self):
try:
self.get_url_path()
return True
except NoReverseMatch:
return False
def get_url_path(self):
return reverse('tags.detail', args=[self.tag_text])
@classmethod
def _get_m2m_name(cls, obj):
"""Return the related field name of the m2m on Tag."""
related_models = cls._meta.get_all_related_m2m_objects_with_model()
field_map = {rm[0].related_model: rm[0].field.name
for rm in related_models}
return field_map.get(obj._meta.model)
def save_tag(self, obj):
tag, created = Tag.objects.get_or_create(tag_text=self.tag_text)
getattr(obj, self._get_m2m_name(obj)).add(tag)
mkt.log(mkt.LOG.ADD_TAG, self.tag_text, obj)
return tag
def remove_tag(self, obj):
m2m_name = self._get_m2m_name(obj)
for tag in getattr(obj, m2m_name).filter(tag_text=self.tag_text):
getattr(obj, m2m_name).remove(tag)
mkt.log(mkt.LOG.REMOVE_TAG, self.tag_text, obj)
def attach_tags(objs):
"""
Fetch tags from `objs` in one query and then attach them to a property on
each instance.
Assumes every instance in `objs` uses the same model.
"""
if objs:
obj_dict = {obj.id: obj for obj in objs}
m2m_name = Tag._get_m2m_name(objs[0])
field_name = getattr(objs[0], m2m_name).query_field_name
qs = (Tag.objects.not_blocked()
.filter(**{'%s__in' % field_name: obj_dict.keys()})
.values_list('%s__id' % field_name, 'tag_text'))
for obj, tags in sorted_groupby(qs, lambda x: x[0]):
setattr(obj_dict[obj], '%s_list' % m2m_name, [t[1] for t in tags])
| bsd-3-clause |
MiltosD/CEFELRC | lib/python2.7/site-packages/unidecode/x0b7.py | 253 | 4833 | data = (
'ddwim', # 0x00
'ddwib', # 0x01
'ddwibs', # 0x02
'ddwis', # 0x03
'ddwiss', # 0x04
'ddwing', # 0x05
'ddwij', # 0x06
'ddwic', # 0x07
'ddwik', # 0x08
'ddwit', # 0x09
'ddwip', # 0x0a
'ddwih', # 0x0b
'ddyu', # 0x0c
'ddyug', # 0x0d
'ddyugg', # 0x0e
'ddyugs', # 0x0f
'ddyun', # 0x10
'ddyunj', # 0x11
'ddyunh', # 0x12
'ddyud', # 0x13
'ddyul', # 0x14
'ddyulg', # 0x15
'ddyulm', # 0x16
'ddyulb', # 0x17
'ddyuls', # 0x18
'ddyult', # 0x19
'ddyulp', # 0x1a
'ddyulh', # 0x1b
'ddyum', # 0x1c
'ddyub', # 0x1d
'ddyubs', # 0x1e
'ddyus', # 0x1f
'ddyuss', # 0x20
'ddyung', # 0x21
'ddyuj', # 0x22
'ddyuc', # 0x23
'ddyuk', # 0x24
'ddyut', # 0x25
'ddyup', # 0x26
'ddyuh', # 0x27
'ddeu', # 0x28
'ddeug', # 0x29
'ddeugg', # 0x2a
'ddeugs', # 0x2b
'ddeun', # 0x2c
'ddeunj', # 0x2d
'ddeunh', # 0x2e
'ddeud', # 0x2f
'ddeul', # 0x30
'ddeulg', # 0x31
'ddeulm', # 0x32
'ddeulb', # 0x33
'ddeuls', # 0x34
'ddeult', # 0x35
'ddeulp', # 0x36
'ddeulh', # 0x37
'ddeum', # 0x38
'ddeub', # 0x39
'ddeubs', # 0x3a
'ddeus', # 0x3b
'ddeuss', # 0x3c
'ddeung', # 0x3d
'ddeuj', # 0x3e
'ddeuc', # 0x3f
'ddeuk', # 0x40
'ddeut', # 0x41
'ddeup', # 0x42
'ddeuh', # 0x43
'ddyi', # 0x44
'ddyig', # 0x45
'ddyigg', # 0x46
'ddyigs', # 0x47
'ddyin', # 0x48
'ddyinj', # 0x49
'ddyinh', # 0x4a
'ddyid', # 0x4b
'ddyil', # 0x4c
'ddyilg', # 0x4d
'ddyilm', # 0x4e
'ddyilb', # 0x4f
'ddyils', # 0x50
'ddyilt', # 0x51
'ddyilp', # 0x52
'ddyilh', # 0x53
'ddyim', # 0x54
'ddyib', # 0x55
'ddyibs', # 0x56
'ddyis', # 0x57
'ddyiss', # 0x58
'ddying', # 0x59
'ddyij', # 0x5a
'ddyic', # 0x5b
'ddyik', # 0x5c
'ddyit', # 0x5d
'ddyip', # 0x5e
'ddyih', # 0x5f
'ddi', # 0x60
'ddig', # 0x61
'ddigg', # 0x62
'ddigs', # 0x63
'ddin', # 0x64
'ddinj', # 0x65
'ddinh', # 0x66
'ddid', # 0x67
'ddil', # 0x68
'ddilg', # 0x69
'ddilm', # 0x6a
'ddilb', # 0x6b
'ddils', # 0x6c
'ddilt', # 0x6d
'ddilp', # 0x6e
'ddilh', # 0x6f
'ddim', # 0x70
'ddib', # 0x71
'ddibs', # 0x72
'ddis', # 0x73
'ddiss', # 0x74
'dding', # 0x75
'ddij', # 0x76
'ddic', # 0x77
'ddik', # 0x78
'ddit', # 0x79
'ddip', # 0x7a
'ddih', # 0x7b
'ra', # 0x7c
'rag', # 0x7d
'ragg', # 0x7e
'rags', # 0x7f
'ran', # 0x80
'ranj', # 0x81
'ranh', # 0x82
'rad', # 0x83
'ral', # 0x84
'ralg', # 0x85
'ralm', # 0x86
'ralb', # 0x87
'rals', # 0x88
'ralt', # 0x89
'ralp', # 0x8a
'ralh', # 0x8b
'ram', # 0x8c
'rab', # 0x8d
'rabs', # 0x8e
'ras', # 0x8f
'rass', # 0x90
'rang', # 0x91
'raj', # 0x92
'rac', # 0x93
'rak', # 0x94
'rat', # 0x95
'rap', # 0x96
'rah', # 0x97
'rae', # 0x98
'raeg', # 0x99
'raegg', # 0x9a
'raegs', # 0x9b
'raen', # 0x9c
'raenj', # 0x9d
'raenh', # 0x9e
'raed', # 0x9f
'rael', # 0xa0
'raelg', # 0xa1
'raelm', # 0xa2
'raelb', # 0xa3
'raels', # 0xa4
'raelt', # 0xa5
'raelp', # 0xa6
'raelh', # 0xa7
'raem', # 0xa8
'raeb', # 0xa9
'raebs', # 0xaa
'raes', # 0xab
'raess', # 0xac
'raeng', # 0xad
'raej', # 0xae
'raec', # 0xaf
'raek', # 0xb0
'raet', # 0xb1
'raep', # 0xb2
'raeh', # 0xb3
'rya', # 0xb4
'ryag', # 0xb5
'ryagg', # 0xb6
'ryags', # 0xb7
'ryan', # 0xb8
'ryanj', # 0xb9
'ryanh', # 0xba
'ryad', # 0xbb
'ryal', # 0xbc
'ryalg', # 0xbd
'ryalm', # 0xbe
'ryalb', # 0xbf
'ryals', # 0xc0
'ryalt', # 0xc1
'ryalp', # 0xc2
'ryalh', # 0xc3
'ryam', # 0xc4
'ryab', # 0xc5
'ryabs', # 0xc6
'ryas', # 0xc7
'ryass', # 0xc8
'ryang', # 0xc9
'ryaj', # 0xca
'ryac', # 0xcb
'ryak', # 0xcc
'ryat', # 0xcd
'ryap', # 0xce
'ryah', # 0xcf
'ryae', # 0xd0
'ryaeg', # 0xd1
'ryaegg', # 0xd2
'ryaegs', # 0xd3
'ryaen', # 0xd4
'ryaenj', # 0xd5
'ryaenh', # 0xd6
'ryaed', # 0xd7
'ryael', # 0xd8
'ryaelg', # 0xd9
'ryaelm', # 0xda
'ryaelb', # 0xdb
'ryaels', # 0xdc
'ryaelt', # 0xdd
'ryaelp', # 0xde
'ryaelh', # 0xdf
'ryaem', # 0xe0
'ryaeb', # 0xe1
'ryaebs', # 0xe2
'ryaes', # 0xe3
'ryaess', # 0xe4
'ryaeng', # 0xe5
'ryaej', # 0xe6
'ryaec', # 0xe7
'ryaek', # 0xe8
'ryaet', # 0xe9
'ryaep', # 0xea
'ryaeh', # 0xeb
'reo', # 0xec
'reog', # 0xed
'reogg', # 0xee
'reogs', # 0xef
'reon', # 0xf0
'reonj', # 0xf1
'reonh', # 0xf2
'reod', # 0xf3
'reol', # 0xf4
'reolg', # 0xf5
'reolm', # 0xf6
'reolb', # 0xf7
'reols', # 0xf8
'reolt', # 0xf9
'reolp', # 0xfa
'reolh', # 0xfb
'reom', # 0xfc
'reob', # 0xfd
'reobs', # 0xfe
'reos', # 0xff
)
| bsd-3-clause |
alexlo03/ansible | test/units/modules/network/f5/test_bigip_ssl_certificate.py | 5 | 4786 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import sys
from nose.plugins.skip import SkipTest
if sys.version_info < (2, 7):
raise SkipTest("F5 Ansible modules require Python >= 2.7")
from ansible.module_utils.basic import AnsibleModule
try:
from library.modules.bigip_ssl_certificate import ArgumentSpec
from library.modules.bigip_ssl_certificate import ApiParameters
from library.modules.bigip_ssl_certificate import ModuleParameters
from library.modules.bigip_ssl_certificate import ModuleManager
# In Ansible 2.8, Ansible changed import paths.
from test.units.compat import unittest
from test.units.compat.mock import Mock
from test.units.compat.mock import patch
from test.units.modules.utils import set_module_args
except ImportError:
try:
from ansible.modules.network.f5.bigip_ssl_certificate import ArgumentSpec
from ansible.modules.network.f5.bigip_ssl_certificate import ApiParameters
from ansible.modules.network.f5.bigip_ssl_certificate import ModuleParameters
from ansible.modules.network.f5.bigip_ssl_certificate import ModuleManager
# Ansible 2.8 imports
from units.compat import unittest
from units.compat.mock import Mock
from units.compat.mock import patch
from units.modules.utils import set_module_args
except ImportError:
raise SkipTest("F5 Ansible modules require the f5-sdk Python library")
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters_cert(self):
cert_content = load_fixture('create_insecure_cert1.crt')
args = dict(
content=cert_content,
name="cert1",
partition="Common",
state="present",
)
p = ModuleParameters(params=args)
assert p.name == 'cert1'
assert p.filename == 'cert1.crt'
assert 'Signature Algorithm' in p.content
assert '-----BEGIN CERTIFICATE-----' in p.content
assert '-----END CERTIFICATE-----' in p.content
assert p.checksum == '1e55aa57ee166a380e756b5aa4a835c5849490fe'
assert p.state == 'present'
def test_module_issuer_cert_key(self):
args = dict(
issuer_cert='foo',
partition="Common",
)
p = ModuleParameters(params=args)
assert p.issuer_cert == '/Common/foo.crt'
def test_api_issuer_cert_key(self):
args = load_fixture('load_sys_file_ssl_cert_with_issuer_cert.json')
p = ApiParameters(params=args)
assert p.issuer_cert == '/Common/intermediate.crt'
class TestCertificateManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
def test_import_certificate_and_key_no_key_passphrase(self, *args):
set_module_args(dict(
name='foo',
content=load_fixture('cert1.crt'),
state='present',
password='password',
server='localhost',
user='admin'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
mm = ModuleManager(module=module)
mm.exists = Mock(side_effect=[False, True])
mm.create_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
def test_import_certificate_chain(self, *args):
set_module_args(dict(
name='foo',
content=load_fixture('chain1.crt'),
state='present',
password='password',
server='localhost',
user='admin'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
mm = ModuleManager(module=module)
mm.exists = Mock(side_effect=[False, True])
mm.create_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
| gpl-3.0 |
jeongchanKim/TizenRT | tools/ttrace_parser/ttrace_tinyara.py | 10 | 8277 | #!/usr/bin/python
###########################################################################
#
# Copyright (c) 2017 The Chromium Authors. All rights reserved.
# Copyright 2017 Samsung Electronics All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
###########################################################################
from __future__ import print_function
from operator import itemgetter
import os
import sys
import time
import glob
import optparse
cycleIdDict = dict()
workIdDict = dict()
beginDict = dict()
endDict = dict()
cycleGap = 0
temporalId = 1000
parserDirPath = "scripts"
ftraceLogs = []
class TraceItem:
def __init__(self):
self.taskname = "null"
self.pid = "0"
self.tgid = "0"
self.core = "[000]"
self.flags = "...1"
self.timestamp = "0"
self.function_type = "null"
self.pair_type = "B"
self.message = "0"
def extractTime(self, line):
self.timestamp = line.strip('[]').replace(':', '.')
return self.timestamp
def extractPid(self, line):
self.pid = line.strip(':')
return self.pid
def extractPairType(self, line):
self.pair_type = line.split('|')[0].upper()
return self.pair_type
def extractMsg(self, line):
self.message = (line.split('|'))[1]
return self.message
def composeSchedLine(self):
self.function_type = "sched_switch"
line = "%s-%s %s %s %s: %s: %s" % (
self.taskname, self.pid, self.core, self.flags, self.timestamp,
self.function_type, self.message)
return line
def composeNormalLine(self):
self.function_type = "tracing_mark_write"
line = "%s-%s %s %s %s: %s: %s|%s|%s" % (self.taskname, self.pid,
self.core, self.flags, self.timestamp,
self.function_type, self.pair_type, self.pid, self.message)
return line
def composeLine(self):
if self.pair_type == 'S':
line = self.composeSchedLine()
else:
line = self.composeNormalLine()
return line
def addLineToFtraceLogs(self, line):
ftraceLogs.append(line)
return
def writeFtraceLogs(options):
with open(options.outputFile, "wb") as output:
for line in ftraceLogs:
if (options.verbose == True):
print(line)
output.write(line + "\n")
return True
def translateTinyaraLogs(options):
item = TraceItem()
filename = os.path.join(options.inputFile)
with open(filename, "r") as rawLogs:
for line in rawLogs:
if (line.isspace()):
continue
lineList = line.strip().split(None, 2)
time = item.extractTime(lineList[0])
pid = item.extractPid(lineList[1])
pair_type = item.extractPairType(lineList[2])
msg = item.extractMsg(lineList[2])
translatedLine = item.composeLine()
if (options.verbose == True):
print(translatedLine)
item.addLineToFtraceLogs(translatedLine)
return True
def get_os_cmd(cmdARGS):
fd_popen = subprocess.Popen(cmdARGS.split(),
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
ready = select.select([fd_popen.stdout, fd_popen.stderr],
[], [fd_popen.stdout, fd_popen.stderr])
if fd_popen.stdout in ready[0]:
out = os.read(fd_popen.stdout.fileno(), 4096)
return out
else:
return False
def makeHtml(options):
htmlfile = options.outputFile.replace(options.outputExt, '.html')
if os.name == 'nt':
os.system("%s/ttrace.py --from-text-file=%s -o %s\n"
% (parserDirPath, options.outputFile, htmlfile))
else:
os.system("./%s/ttrace.py --from-text-file=%s -o %s\n"
% (parserDirPath, options.outputFile, htmlfile))
return True
def findAddr(filename, target):
with open(filename, "r") as sysmap:
for line in sysmap:
if line.isspace():
continue
if target in line:
return line.strip().split()[0]
return False
def main():
usage = "Usage: %prog [options]"
desc = "Example: %prog -i logs.txt -o ttrace_dump"
parser = optparse.OptionParser(usage=usage, description=desc)
parser.add_option('-i', '--input', dest='inputFile',
default=None,
metavar='FILENAME',
help="Parsed text file only, Not support dumpped file, "
"[default:%default]")
parser.add_option('-d', '--dump', dest='dump',
default=None,
metavar='MODELNAME',
help="Dump trace buffer and generate html report, "
"[default:%default]")
parser.add_option('-o', '--output', dest='outputFile',
default=None,
metavar='FILENAME',
help="Output file that html report saved, "
"[default:%default]")
parser.add_option('-v', '--verbose', dest='verbose',
action="store_true",
default=False,
help="Generate verbose output, "
"[default:%default]")
options, arg = parser.parse_args()
options.curDir = os.path.dirname(os.path.abspath(sys.argv[0]))
if (options.inputFile == None and options.dump == None):
print("Please specify reading from file or dump")
exit()
if (options.inputFile != None and options.dump != None):
print("Please choose just one option for reading logs")
exit()
if (options.dump != None):
if (options.dump != "artik051" and options.dump != "artik053"):
print("%s is not supported" % (options.dump))
print("T-trace dump supports artik051, artik053")
exit()
os.system("./%s/ttrace_tinyaraDump.py -t %s -o %s\n" \
% (parserDirPath, options.dump, "dump.bin"))
options.inputFile = "%s/dump.trace" % (options.curDir)
if (options.inputFile != None):
# Check inputFile existance,
if not os.access(options.inputFile, os.F_OK | os.R_OK):
print("ERROR: " + "Can not read " + options.inputFile)
return
print("Input file: " + options.inputFile)
options.inputFolder = os.path.split(options.inputFile)[0]
options.inputFilenameExt = os.path.split(options.inputFile)[1]
options.inputFileName = \
os.path.splitext(options.inputFilenameExt)[0]
if (options.outputFile != None):
options.outputFolder = os.path.split(options.outputFile)[0]
options.outputFileName = os.path.split(options.outputFile)[1]
if not os.access(options.outputFolder, os.F_OK | os.W_OK):
os.mkdir(options.outputFolder)
else:
if (options.inputFile != None):
options.outputFolder = options.inputFolder
options.outputFileName = options.inputFileName
else:
options.outputFolder = options.curDir
options.outputFileName = "report"
options.outputExt = ".ftrace"
options.outputFilenameExt = options.outputFileName + options.outputExt
options.outputFile = \
os.path.join(options.outputFolder, options.outputFilenameExt)
print("output file will be saved at %s" % (options.outputFile))
translateTinyaraLogs(options)
writeFtraceLogs(options)
makeHtml(options)
if __name__ == '__main__':
main()
| apache-2.0 |
HydrelioxGitHub/home-assistant | homeassistant/components/climate/zhong_hong.py | 3 | 7171 | """
Support for ZhongHong HVAC Controller.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/climate.zhong_hong/
"""
import logging
import voluptuous as vol
from homeassistant.components.climate import ClimateDevice, PLATFORM_SCHEMA
from homeassistant.components.climate.const import (
ATTR_OPERATION_MODE, STATE_COOL, STATE_DRY,
STATE_FAN_ONLY, STATE_HEAT, SUPPORT_FAN_MODE, SUPPORT_ON_OFF,
SUPPORT_OPERATION_MODE, SUPPORT_TARGET_TEMPERATURE)
from homeassistant.const import (ATTR_TEMPERATURE, CONF_HOST, CONF_PORT,
EVENT_HOMEASSISTANT_STOP, TEMP_CELSIUS)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import (async_dispatcher_connect,
async_dispatcher_send)
REQUIREMENTS = ['zhong_hong_hvac==1.0.9']
_LOGGER = logging.getLogger(__name__)
CONF_GATEWAY_ADDRRESS = 'gateway_address'
DEFAULT_PORT = 9999
DEFAULT_GATEWAY_ADDRRESS = 1
SIGNAL_DEVICE_ADDED = 'zhong_hong_device_added'
SIGNAL_ZHONG_HONG_HUB_START = 'zhong_hong_hub_start'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_GATEWAY_ADDRRESS, default=DEFAULT_GATEWAY_ADDRRESS):
cv.positive_int,
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the ZhongHong HVAC platform."""
from zhong_hong_hvac.hub import ZhongHongGateway
host = config.get(CONF_HOST)
port = config.get(CONF_PORT)
gw_addr = config.get(CONF_GATEWAY_ADDRRESS)
hub = ZhongHongGateway(host, port, gw_addr)
devices = [
ZhongHongClimate(hub, addr_out, addr_in)
for (addr_out, addr_in) in hub.discovery_ac()
]
_LOGGER.debug("We got %s zhong_hong climate devices", len(devices))
hub_is_initialized = False
async def startup():
"""Start hub socket after all climate entity is setted up."""
nonlocal hub_is_initialized
if not all([device.is_initialized for device in devices]):
return
if hub_is_initialized:
return
_LOGGER.debug("zhong_hong hub start listen event")
await hass.async_add_job(hub.start_listen)
await hass.async_add_job(hub.query_all_status)
hub_is_initialized = True
async_dispatcher_connect(hass, SIGNAL_DEVICE_ADDED, startup)
# add devices after SIGNAL_DEVICE_SETTED_UP event is listend
add_entities(devices)
def stop_listen(event):
"""Stop ZhongHongHub socket."""
hub.stop_listen()
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, stop_listen)
class ZhongHongClimate(ClimateDevice):
"""Representation of a ZhongHong controller support HVAC."""
def __init__(self, hub, addr_out, addr_in):
"""Set up the ZhongHong climate devices."""
from zhong_hong_hvac.hvac import HVAC
self._device = HVAC(hub, addr_out, addr_in)
self._hub = hub
self._current_operation = None
self._current_temperature = None
self._target_temperature = None
self._current_fan_mode = None
self._is_on = None
self.is_initialized = False
async def async_added_to_hass(self):
"""Register callbacks."""
self._device.register_update_callback(self._after_update)
self.is_initialized = True
async_dispatcher_send(self.hass, SIGNAL_DEVICE_ADDED)
def _after_update(self, climate):
"""Handle state update."""
_LOGGER.debug("async update ha state")
if self._device.current_operation:
self._current_operation = self._device.current_operation.lower()
if self._device.current_temperature:
self._current_temperature = self._device.current_temperature
if self._device.current_fan_mode:
self._current_fan_mode = self._device.current_fan_mode
if self._device.target_temperature:
self._target_temperature = self._device.target_temperature
self._is_on = self._device.is_on
self.schedule_update_ha_state()
@property
def should_poll(self):
"""Return the polling state."""
return False
@property
def name(self):
"""Return the name of the thermostat, if any."""
return self.unique_id
@property
def unique_id(self):
"""Return the unique ID of the HVAC."""
return "zhong_hong_hvac_{}_{}".format(self._device.addr_out,
self._device.addr_in)
@property
def supported_features(self):
"""Return the list of supported features."""
return (SUPPORT_TARGET_TEMPERATURE | SUPPORT_FAN_MODE
| SUPPORT_OPERATION_MODE | SUPPORT_ON_OFF)
@property
def temperature_unit(self):
"""Return the unit of measurement used by the platform."""
return TEMP_CELSIUS
@property
def current_operation(self):
"""Return current operation ie. heat, cool, idle."""
return self._current_operation
@property
def operation_list(self):
"""Return the list of available operation modes."""
return [STATE_COOL, STATE_HEAT, STATE_DRY, STATE_FAN_ONLY]
@property
def current_temperature(self):
"""Return the current temperature."""
return self._current_temperature
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
return self._target_temperature
@property
def target_temperature_step(self):
"""Return the supported step of target temperature."""
return 1
@property
def is_on(self):
"""Return true if on."""
return self._device.is_on
@property
def current_fan_mode(self):
"""Return the fan setting."""
return self._current_fan_mode
@property
def fan_list(self):
"""Return the list of available fan modes."""
return self._device.fan_list
@property
def min_temp(self):
"""Return the minimum temperature."""
return self._device.min_temp
@property
def max_temp(self):
"""Return the maximum temperature."""
return self._device.max_temp
def turn_on(self):
"""Turn on ac."""
return self._device.turn_on()
def turn_off(self):
"""Turn off ac."""
return self._device.turn_off()
def set_temperature(self, **kwargs):
"""Set new target temperature."""
temperature = kwargs.get(ATTR_TEMPERATURE)
if temperature is not None:
self._device.set_temperature(temperature)
operation_mode = kwargs.get(ATTR_OPERATION_MODE)
if operation_mode is not None:
self.set_operation_mode(operation_mode)
def set_operation_mode(self, operation_mode):
"""Set new target operation mode."""
self._device.set_operation_mode(operation_mode.upper())
def set_fan_mode(self, fan_mode):
"""Set new target fan mode."""
self._device.set_fan_mode(fan_mode)
| apache-2.0 |
yencarnacion/jaikuengine | vendor/gdata/spreadsheet/service.py | 10 | 16755 | #!/usr/bin/python
#
# Copyright (C) 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""SpreadsheetsService extends the GDataService to streamline Google
Spreadsheets operations.
SpreadsheetService: Provides methods to query feeds and manipulate items.
Extends GDataService.
DictionaryToParamList: Function which converts a dictionary into a list of
URL arguments (represented as strings). This is a
utility function used in CRUD operations.
"""
__author__ = 'api.laurabeth@gmail.com (Laura Beth Lincoln)'
import gdata
import atom.service
import gdata.service
import gdata.spreadsheet
import atom
class Error(Exception):
"""Base class for exceptions in this module."""
pass
class RequestError(Error):
pass
class SpreadsheetsService(gdata.service.GDataService):
"""Client for the Google Spreadsheets service."""
def __init__(self, email=None, password=None, source=None,
server='spreadsheets.google.com', additional_headers=None,
**kwargs):
"""Creates a client for the Google Spreadsheets service.
Args:
email: string (optional) The user's email address, used for
authentication.
password: string (optional) The user's password.
source: string (optional) The name of the user's application.
server: string (optional) The name of the server to which a connection
will be opened. Default value: 'spreadsheets.google.com'.
**kwargs: The other parameters to pass to gdata.service.GDataService
constructor.
"""
gdata.service.GDataService.__init__(
self, email=email, password=password, service='wise', source=source,
server=server, additional_headers=additional_headers, **kwargs)
def GetSpreadsheetsFeed(self, key=None, query=None, visibility='private',
projection='full'):
"""Gets a spreadsheets feed or a specific entry if a key is defined
Args:
key: string (optional) The spreadsheet key defined in /ccc?key=
query: DocumentQuery (optional) Query parameters
Returns:
If there is no key, then a SpreadsheetsSpreadsheetsFeed.
If there is a key, then a SpreadsheetsSpreadsheet.
"""
uri = ('http://%s/feeds/spreadsheets/%s/%s'
% (self.server, visibility, projection))
if key is not None:
uri = '%s/%s' % (uri, key)
if query != None:
query.feed = uri
uri = query.ToUri()
if key:
return self.Get(uri,
converter=gdata.spreadsheet.SpreadsheetsSpreadsheetFromString)
else:
return self.Get(uri,
converter=gdata.spreadsheet.SpreadsheetsSpreadsheetsFeedFromString)
def GetWorksheetsFeed(self, key, wksht_id=None, query=None,
visibility='private', projection='full'):
"""Gets a worksheets feed or a specific entry if a wksht is defined
Args:
key: string The spreadsheet key defined in /ccc?key=
wksht_id: string (optional) The id for a specific worksheet entry
query: DocumentQuery (optional) Query parameters
Returns:
If there is no wksht_id, then a SpreadsheetsWorksheetsFeed.
If there is a wksht_id, then a SpreadsheetsWorksheet.
"""
uri = ('http://%s/feeds/worksheets/%s/%s/%s'
% (self.server, key, visibility, projection))
if wksht_id != None:
uri = '%s/%s' % (uri, wksht_id)
if query != None:
query.feed = uri
uri = query.ToUri()
if wksht_id:
return self.Get(uri,
converter=gdata.spreadsheet.SpreadsheetsWorksheetFromString)
else:
return self.Get(uri,
converter=gdata.spreadsheet.SpreadsheetsWorksheetsFeedFromString)
def AddWorksheet(self, title, row_count, col_count, key):
"""Creates a new worksheet in the desired spreadsheet.
The new worksheet is appended to the end of the list of worksheets. The
new worksheet will only have the available number of columns and cells
specified.
Args:
title: str The title which will be displayed in the list of worksheets.
row_count: int or str The number of rows in the new worksheet.
col_count: int or str The number of columns in the new worksheet.
key: str The spreadsheet key to the spreadsheet to which the new
worksheet should be added.
Returns:
A SpreadsheetsWorksheet if the new worksheet was created succesfully.
"""
new_worksheet = gdata.spreadsheet.SpreadsheetsWorksheet(
title=atom.Title(text=title),
row_count=gdata.spreadsheet.RowCount(text=str(row_count)),
col_count=gdata.spreadsheet.ColCount(text=str(col_count)))
return self.Post(new_worksheet,
'http://%s/feeds/worksheets/%s/private/full' % (self.server, key),
converter=gdata.spreadsheet.SpreadsheetsWorksheetFromString)
def UpdateWorksheet(self, worksheet_entry, url=None):
"""Changes the size and/or title of the desired worksheet.
Args:
worksheet_entry: SpreadsheetWorksheet The new contents of the
worksheet.
url: str (optional) The URL to which the edited worksheet entry should
be sent. If the url is None, the edit URL from the worksheet will
be used.
Returns:
A SpreadsheetsWorksheet with the new information about the worksheet.
"""
target_url = url or worksheet_entry.GetEditLink().href
return self.Put(worksheet_entry, target_url,
converter=gdata.spreadsheet.SpreadsheetsWorksheetFromString)
def DeleteWorksheet(self, worksheet_entry=None, url=None):
"""Removes the desired worksheet from the spreadsheet
Args:
worksheet_entry: SpreadsheetWorksheet (optional) The worksheet to
be deleted. If this is none, then the DELETE reqest is sent to
the url specified in the url parameter.
url: str (optaional) The URL to which the DELETE request should be
sent. If left as None, the worksheet's edit URL is used.
Returns:
True if the worksheet was deleted successfully.
"""
if url:
target_url = url
else:
target_url = worksheet_entry.GetEditLink().href
return self.Delete(target_url)
def GetCellsFeed(self, key, wksht_id='default', cell=None, query=None,
visibility='private', projection='full'):
"""Gets a cells feed or a specific entry if a cell is defined
Args:
key: string The spreadsheet key defined in /ccc?key=
wksht_id: string The id for a specific worksheet entry
cell: string (optional) The R1C1 address of the cell
query: DocumentQuery (optional) Query parameters
Returns:
If there is no cell, then a SpreadsheetsCellsFeed.
If there is a cell, then a SpreadsheetsCell.
"""
uri = ('http://%s/feeds/cells/%s/%s/%s/%s'
% (self.server, key, wksht_id, visibility, projection))
if cell != None:
uri = '%s/%s' % (uri, cell)
if query != None:
query.feed = uri
uri = query.ToUri()
if cell:
return self.Get(uri,
converter=gdata.spreadsheet.SpreadsheetsCellFromString)
else:
return self.Get(uri,
converter=gdata.spreadsheet.SpreadsheetsCellsFeedFromString)
def GetListFeed(self, key, wksht_id='default', row_id=None, query=None,
visibility='private', projection='full'):
"""Gets a list feed or a specific entry if a row_id is defined
Args:
key: string The spreadsheet key defined in /ccc?key=
wksht_id: string The id for a specific worksheet entry
row_id: string (optional) The row_id of a row in the list
query: DocumentQuery (optional) Query parameters
Returns:
If there is no row_id, then a SpreadsheetsListFeed.
If there is a row_id, then a SpreadsheetsList.
"""
uri = ('http://%s/feeds/list/%s/%s/%s/%s'
% (self.server, key, wksht_id, visibility, projection))
if row_id is not None:
uri = '%s/%s' % (uri, row_id)
if query is not None:
query.feed = uri
uri = query.ToUri()
if row_id:
return self.Get(uri,
converter=gdata.spreadsheet.SpreadsheetsListFromString)
else:
return self.Get(uri,
converter=gdata.spreadsheet.SpreadsheetsListFeedFromString)
def UpdateCell(self, row, col, inputValue, key, wksht_id='default'):
"""Updates an existing cell.
Args:
row: int The row the cell to be editted is in
col: int The column the cell to be editted is in
inputValue: str the new value of the cell
key: str The key of the spreadsheet in which this cell resides.
wksht_id: str The ID of the worksheet which holds this cell.
Returns:
The updated cell entry
"""
row = str(row)
col = str(col)
# make the new cell
new_cell = gdata.spreadsheet.Cell(row=row, col=col, inputValue=inputValue)
# get the edit uri and PUT
cell = 'R%sC%s' % (row, col)
entry = self.GetCellsFeed(key, wksht_id, cell)
for a_link in entry.link:
if a_link.rel == 'edit':
entry.cell = new_cell
return self.Put(entry, a_link.href,
converter=gdata.spreadsheet.SpreadsheetsCellFromString)
def _GenerateCellsBatchUrl(self, spreadsheet_key, worksheet_id):
return ('http://spreadsheets.google.com/feeds/cells/%s/%s/'
'private/full/batch' % (spreadsheet_key, worksheet_id))
def ExecuteBatch(self, batch_feed, url=None, spreadsheet_key=None,
worksheet_id=None,
converter=gdata.spreadsheet.SpreadsheetsCellsFeedFromString):
"""Sends a batch request feed to the server.
The batch request needs to be sent to the batch URL for a particular
worksheet. You can specify the worksheet by providing the spreadsheet_key
and worksheet_id, or by sending the URL from the cells feed's batch link.
Args:
batch_feed: gdata.spreadsheet.SpreadsheetsCellFeed A feed containing
BatchEntry elements which contain the desired CRUD operation and
any necessary data to modify a cell.
url: str (optional) The batch URL for the cells feed to which these
changes should be applied. This can be found by calling
cells_feed.GetBatchLink().href.
spreadsheet_key: str (optional) Used to generate the batch request URL
if the url argument is None. If using the spreadsheet key to
generate the URL, the worksheet id is also required.
worksheet_id: str (optional) Used if the url is not provided, it is
oart of the batch feed target URL. This is used with the spreadsheet
key.
converter: Function (optional) Function to be executed on the server's
response. This function should take one string as a parameter. The
default value is SpreadsheetsCellsFeedFromString which will turn the result
into a gdata.spreadsheet.SpreadsheetsCellsFeed object.
Returns:
A gdata.BatchFeed containing the results.
"""
if url is None:
url = self._GenerateCellsBatchUrl(spreadsheet_key, worksheet_id)
return self.Post(batch_feed, url, converter=converter)
def InsertRow(self, row_data, key, wksht_id='default'):
"""Inserts a new row with the provided data
Args:
uri: string The post uri of the list feed
row_data: dict A dictionary of column header to row data
Returns:
The inserted row
"""
new_entry = gdata.spreadsheet.SpreadsheetsList()
for k, v in row_data.iteritems():
new_custom = gdata.spreadsheet.Custom()
new_custom.column = k
new_custom.text = v
new_entry.custom[new_custom.column] = new_custom
# Generate the post URL for the worksheet which will receive the new entry.
post_url = 'http://spreadsheets.google.com/feeds/list/%s/%s/private/full'%(
key, wksht_id)
return self.Post(new_entry, post_url,
converter=gdata.spreadsheet.SpreadsheetsListFromString)
def UpdateRow(self, entry, new_row_data):
"""Updates a row with the provided data
If you want to add additional information to a row, it is often
easier to change the values in entry.custom, then use the Put
method instead of UpdateRow. This UpdateRow method will replace
the contents of the row with new_row_data - it will change all columns
not just the columns specified in the new_row_data dict.
Args:
entry: gdata.spreadsheet.SpreadsheetsList The entry to be updated
new_row_data: dict A dictionary of column header to row data
Returns:
The updated row
"""
entry.custom = {}
for k, v in new_row_data.iteritems():
new_custom = gdata.spreadsheet.Custom()
new_custom.column = k
new_custom.text = v
entry.custom[k] = new_custom
for a_link in entry.link:
if a_link.rel == 'edit':
return self.Put(entry, a_link.href,
converter=gdata.spreadsheet.SpreadsheetsListFromString)
def DeleteRow(self, entry):
"""Deletes a row, the provided entry
Args:
entry: gdata.spreadsheet.SpreadsheetsList The row to be deleted
Returns:
The delete response
"""
for a_link in entry.link:
if a_link.rel == 'edit':
return self.Delete(a_link.href)
class DocumentQuery(gdata.service.Query):
def _GetTitleQuery(self):
return self['title']
def _SetTitleQuery(self, document_query):
self['title'] = document_query
title = property(_GetTitleQuery, _SetTitleQuery,
doc="""The title query parameter""")
def _GetTitleExactQuery(self):
return self['title-exact']
def _SetTitleExactQuery(self, document_query):
self['title-exact'] = document_query
title_exact = property(_GetTitleExactQuery, _SetTitleExactQuery,
doc="""The title-exact query parameter""")
class CellQuery(gdata.service.Query):
def _GetMinRowQuery(self):
return self['min-row']
def _SetMinRowQuery(self, cell_query):
self['min-row'] = cell_query
min_row = property(_GetMinRowQuery, _SetMinRowQuery,
doc="""The min-row query parameter""")
def _GetMaxRowQuery(self):
return self['max-row']
def _SetMaxRowQuery(self, cell_query):
self['max-row'] = cell_query
max_row = property(_GetMaxRowQuery, _SetMaxRowQuery,
doc="""The max-row query parameter""")
def _GetMinColQuery(self):
return self['min-col']
def _SetMinColQuery(self, cell_query):
self['min-col'] = cell_query
min_col = property(_GetMinColQuery, _SetMinColQuery,
doc="""The min-col query parameter""")
def _GetMaxColQuery(self):
return self['max-col']
def _SetMaxColQuery(self, cell_query):
self['max-col'] = cell_query
max_col = property(_GetMaxColQuery, _SetMaxColQuery,
doc="""The max-col query parameter""")
def _GetRangeQuery(self):
return self['range']
def _SetRangeQuery(self, cell_query):
self['range'] = cell_query
range = property(_GetRangeQuery, _SetRangeQuery,
doc="""The range query parameter""")
def _GetReturnEmptyQuery(self):
return self['return-empty']
def _SetReturnEmptyQuery(self, cell_query):
self['return-empty'] = cell_query
return_empty = property(_GetReturnEmptyQuery, _SetReturnEmptyQuery,
doc="""The return-empty query parameter""")
class ListQuery(gdata.service.Query):
def _GetSpreadsheetQuery(self):
return self['sq']
def _SetSpreadsheetQuery(self, list_query):
self['sq'] = list_query
sq = property(_GetSpreadsheetQuery, _SetSpreadsheetQuery,
doc="""The sq query parameter""")
def _GetOrderByQuery(self):
return self['orderby']
def _SetOrderByQuery(self, list_query):
self['orderby'] = list_query
orderby = property(_GetOrderByQuery, _SetOrderByQuery,
doc="""The orderby query parameter""")
def _GetReverseQuery(self):
return self['reverse']
def _SetReverseQuery(self, list_query):
self['reverse'] = list_query
reverse = property(_GetReverseQuery, _SetReverseQuery,
doc="""The reverse query parameter""")
| apache-2.0 |
vitaly4uk/django | tests/template_tests/test_response.py | 199 | 14388 | from __future__ import unicode_literals
import pickle
import time
from datetime import datetime
from django.conf import settings
from django.template import Context, engines
from django.template.response import (
ContentNotRenderedError, SimpleTemplateResponse, TemplateResponse,
)
from django.test import (
RequestFactory, SimpleTestCase, ignore_warnings, override_settings,
)
from django.test.utils import require_jinja2
from django.utils.deprecation import RemovedInDjango110Warning
from .utils import TEMPLATE_DIR
def test_processor(request):
return {'processors': 'yes'}
test_processor_name = 'template_tests.test_response.test_processor'
# A test middleware that installs a temporary URLConf
class CustomURLConfMiddleware(object):
def process_request(self, request):
request.urlconf = 'template_tests.alternate_urls'
class SimpleTemplateResponseTest(SimpleTestCase):
def _response(self, template='foo', *args, **kwargs):
template = engines['django'].from_string(template)
return SimpleTemplateResponse(template, *args, **kwargs)
def test_template_resolving(self):
response = SimpleTemplateResponse('first/test.html')
response.render()
self.assertEqual(response.content, b'First template\n')
templates = ['foo.html', 'second/test.html', 'first/test.html']
response = SimpleTemplateResponse(templates)
response.render()
self.assertEqual(response.content, b'Second template\n')
response = self._response()
response.render()
self.assertEqual(response.content, b'foo')
def test_explicit_baking(self):
# explicit baking
response = self._response()
self.assertFalse(response.is_rendered)
response.render()
self.assertTrue(response.is_rendered)
def test_render(self):
# response is not re-rendered without the render call
response = self._response().render()
self.assertEqual(response.content, b'foo')
# rebaking doesn't change the rendered content
template = engines['django'].from_string('bar{{ baz }}')
response.template_name = template
response.render()
self.assertEqual(response.content, b'foo')
# but rendered content can be overridden by manually
# setting content
response.content = 'bar'
self.assertEqual(response.content, b'bar')
def test_iteration_unrendered(self):
# unrendered response raises an exception on iteration
response = self._response()
self.assertFalse(response.is_rendered)
def iteration():
for x in response:
pass
self.assertRaises(ContentNotRenderedError, iteration)
self.assertFalse(response.is_rendered)
def test_iteration_rendered(self):
# iteration works for rendered responses
response = self._response().render()
res = [x for x in response]
self.assertEqual(res, [b'foo'])
def test_content_access_unrendered(self):
# unrendered response raises an exception when content is accessed
response = self._response()
self.assertFalse(response.is_rendered)
self.assertRaises(ContentNotRenderedError, lambda: response.content)
self.assertFalse(response.is_rendered)
def test_content_access_rendered(self):
# rendered response content can be accessed
response = self._response().render()
self.assertEqual(response.content, b'foo')
def test_set_content(self):
# content can be overridden
response = self._response()
self.assertFalse(response.is_rendered)
response.content = 'spam'
self.assertTrue(response.is_rendered)
self.assertEqual(response.content, b'spam')
response.content = 'baz'
self.assertEqual(response.content, b'baz')
def test_dict_context(self):
response = self._response('{{ foo }}{{ processors }}',
{'foo': 'bar'})
self.assertEqual(response.context_data, {'foo': 'bar'})
response.render()
self.assertEqual(response.content, b'bar')
@ignore_warnings(category=RemovedInDjango110Warning)
def test_context_instance(self):
response = self._response('{{ foo }}{{ processors }}',
Context({'foo': 'bar'}))
self.assertEqual(response.context_data.__class__, Context)
response.render()
self.assertEqual(response.content, b'bar')
def test_kwargs(self):
response = self._response(content_type='application/json', status=504)
self.assertEqual(response['content-type'], 'application/json')
self.assertEqual(response.status_code, 504)
def test_args(self):
response = SimpleTemplateResponse('', {}, 'application/json', 504)
self.assertEqual(response['content-type'], 'application/json')
self.assertEqual(response.status_code, 504)
@require_jinja2
def test_using(self):
response = SimpleTemplateResponse('template_tests/using.html').render()
self.assertEqual(response.content, b'DTL\n')
response = SimpleTemplateResponse('template_tests/using.html', using='django').render()
self.assertEqual(response.content, b'DTL\n')
response = SimpleTemplateResponse('template_tests/using.html', using='jinja2').render()
self.assertEqual(response.content, b'Jinja2\n')
def test_post_callbacks(self):
"Rendering a template response triggers the post-render callbacks"
post = []
def post1(obj):
post.append('post1')
def post2(obj):
post.append('post2')
response = SimpleTemplateResponse('first/test.html', {})
response.add_post_render_callback(post1)
response.add_post_render_callback(post2)
# When the content is rendered, all the callbacks are invoked, too.
response.render()
self.assertEqual(response.content, b'First template\n')
self.assertEqual(post, ['post1', 'post2'])
def test_pickling(self):
# Create a template response. The context is
# known to be unpickleable (e.g., a function).
response = SimpleTemplateResponse('first/test.html', {
'value': 123,
'fn': datetime.now,
})
self.assertRaises(ContentNotRenderedError,
pickle.dumps, response)
# But if we render the response, we can pickle it.
response.render()
pickled_response = pickle.dumps(response)
unpickled_response = pickle.loads(pickled_response)
self.assertEqual(unpickled_response.content, response.content)
self.assertEqual(unpickled_response['content-type'], response['content-type'])
self.assertEqual(unpickled_response.status_code, response.status_code)
# ...and the unpickled response doesn't have the
# template-related attributes, so it can't be re-rendered
template_attrs = ('template_name', 'context_data', '_post_render_callbacks')
for attr in template_attrs:
self.assertFalse(hasattr(unpickled_response, attr))
# ...and requesting any of those attributes raises an exception
for attr in template_attrs:
with self.assertRaises(AttributeError):
getattr(unpickled_response, attr)
def test_repickling(self):
response = SimpleTemplateResponse('first/test.html', {
'value': 123,
'fn': datetime.now,
})
self.assertRaises(ContentNotRenderedError,
pickle.dumps, response)
response.render()
pickled_response = pickle.dumps(response)
unpickled_response = pickle.loads(pickled_response)
pickle.dumps(unpickled_response)
def test_pickling_cookie(self):
response = SimpleTemplateResponse('first/test.html', {
'value': 123,
'fn': datetime.now,
})
response.cookies['key'] = 'value'
response.render()
pickled_response = pickle.dumps(response, pickle.HIGHEST_PROTOCOL)
unpickled_response = pickle.loads(pickled_response)
self.assertEqual(unpickled_response.cookies['key'].value, 'value')
@override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATE_DIR],
'OPTIONS': {
'context_processors': [test_processor_name],
},
}])
class TemplateResponseTest(SimpleTestCase):
def setUp(self):
self.factory = RequestFactory()
def _response(self, template='foo', *args, **kwargs):
self._request = self.factory.get('/')
template = engines['django'].from_string(template)
return TemplateResponse(self._request, template, *args, **kwargs)
def test_render(self):
response = self._response('{{ foo }}{{ processors }}').render()
self.assertEqual(response.content, b'yes')
def test_render_with_requestcontext(self):
response = self._response('{{ foo }}{{ processors }}',
{'foo': 'bar'}).render()
self.assertEqual(response.content, b'baryes')
@ignore_warnings(category=RemovedInDjango110Warning)
def test_render_with_context(self):
response = self._response('{{ foo }}{{ processors }}',
Context({'foo': 'bar'})).render()
self.assertEqual(response.content, b'bar')
def test_context_processor_priority(self):
# context processors should be overridden by passed-in context
response = self._response('{{ foo }}{{ processors }}',
{'processors': 'no'}).render()
self.assertEqual(response.content, b'no')
def test_kwargs(self):
response = self._response(content_type='application/json',
status=504)
self.assertEqual(response['content-type'], 'application/json')
self.assertEqual(response.status_code, 504)
def test_args(self):
response = TemplateResponse(self.factory.get('/'), '', {},
'application/json', 504)
self.assertEqual(response['content-type'], 'application/json')
self.assertEqual(response.status_code, 504)
@require_jinja2
def test_using(self):
request = self.factory.get('/')
response = TemplateResponse(request, 'template_tests/using.html').render()
self.assertEqual(response.content, b'DTL\n')
response = TemplateResponse(request, 'template_tests/using.html', using='django').render()
self.assertEqual(response.content, b'DTL\n')
response = TemplateResponse(request, 'template_tests/using.html', using='jinja2').render()
self.assertEqual(response.content, b'Jinja2\n')
@ignore_warnings(category=RemovedInDjango110Warning)
def test_custom_app(self):
self._response('{{ foo }}', current_app="foobar")
self.assertEqual(self._request.current_app, 'foobar')
def test_pickling(self):
# Create a template response. The context is
# known to be unpickleable (e.g., a function).
response = TemplateResponse(self.factory.get('/'),
'first/test.html', {
'value': 123,
'fn': datetime.now,
}
)
self.assertRaises(ContentNotRenderedError,
pickle.dumps, response)
# But if we render the response, we can pickle it.
response.render()
pickled_response = pickle.dumps(response)
unpickled_response = pickle.loads(pickled_response)
self.assertEqual(unpickled_response.content, response.content)
self.assertEqual(unpickled_response['content-type'], response['content-type'])
self.assertEqual(unpickled_response.status_code, response.status_code)
# ...and the unpickled response doesn't have the
# template-related attributes, so it can't be re-rendered
template_attrs = ('template_name', 'context_data',
'_post_render_callbacks', '_request', '_current_app')
for attr in template_attrs:
self.assertFalse(hasattr(unpickled_response, attr))
# ...and requesting any of those attributes raises an exception
for attr in template_attrs:
with self.assertRaises(AttributeError):
getattr(unpickled_response, attr)
def test_repickling(self):
response = SimpleTemplateResponse('first/test.html', {
'value': 123,
'fn': datetime.now,
})
self.assertRaises(ContentNotRenderedError,
pickle.dumps, response)
response.render()
pickled_response = pickle.dumps(response)
unpickled_response = pickle.loads(pickled_response)
pickle.dumps(unpickled_response)
@override_settings(
MIDDLEWARE_CLASSES=settings.MIDDLEWARE_CLASSES + [
'template_tests.test_response.CustomURLConfMiddleware'
],
ROOT_URLCONF='template_tests.urls',
)
class CustomURLConfTest(SimpleTestCase):
def test_custom_urlconf(self):
response = self.client.get('/template_response_view/')
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'This is where you can find the snark: /snark/')
@override_settings(
CACHE_MIDDLEWARE_SECONDS=2.0,
MIDDLEWARE_CLASSES=settings.MIDDLEWARE_CLASSES + [
'django.middleware.cache.FetchFromCacheMiddleware',
'django.middleware.cache.UpdateCacheMiddleware',
],
ROOT_URLCONF='template_tests.alternate_urls',
)
class CacheMiddlewareTest(SimpleTestCase):
def test_middleware_caching(self):
response = self.client.get('/template_response_view/')
self.assertEqual(response.status_code, 200)
time.sleep(1.0)
response2 = self.client.get('/template_response_view/')
self.assertEqual(response2.status_code, 200)
self.assertEqual(response.content, response2.content)
time.sleep(2.0)
# Let the cache expire and test again
response2 = self.client.get('/template_response_view/')
self.assertEqual(response2.status_code, 200)
self.assertNotEqual(response.content, response2.content)
| bsd-3-clause |
pombredanne/django-linkcheck | linkcheck/management/commands/checkexternal.py | 7 | 1427 | from optparse import make_option
from django.core.management.base import BaseCommand
from linkcheck.utils import check_links
from linkcheck.linkcheck_settings import EXTERNAL_RECHECK_INTERVAL
from linkcheck.linkcheck_settings import MAX_CHECKS_PER_RUN
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--externalinterval', '-e', type='int',
help='Specifies the length of time in minutes until external links are rechecked. Defaults to linkcheck_config setting'),
make_option('--limit', '-l', type='int',
help='Specifies the maximum number (int) of links to be checked. Defaults to linkcheck_config setting. Value less than 1 will check all'),
)
help = 'Check and record external link status'
def execute(self, *args, **options):
if options['externalinterval']:
externalinterval = options['externalinterval']
else:
externalinterval = EXTERNAL_RECHECK_INTERVAL
if options['limit']:
limit = options['limit']
else:
limit = MAX_CHECKS_PER_RUN
print "Checking all external links that haven't been tested for %s minutes." % externalinterval
if limit!=-1:
print "Will run maximum of %s checks this run." % limit
return check_links(external_recheck_interval=externalinterval, limit=limit, check_internal=False)
| bsd-3-clause |
andrewleech/SickRage | lib/twilio/rest/resources/ip_messaging/roles.py | 23 | 1939 | from twilio.rest.resources import NextGenInstanceResource, NextGenListResource
class Role(NextGenInstanceResource):
def update(self, permission, **kwargs):
"""
Updates this Role instance
:param permission: Role permission
:return: Updated instance
"""
kwargs['permission'] = permission
return self.update_instance(**kwargs)
def delete(self):
"""
Delete this role
"""
return self.delete_instance()
class Roles(NextGenListResource):
name = "Roles"
instance = Role
def list(self, **kwargs):
"""
Returns a page of :class:`Role` resources as a list.
For paging information see :class:`ListResource`.
**NOTE**: Due to the potentially voluminous amount of data in an
alert, the full HTTP request and response data is only returned
in the Role instance resource representation.
"""
return self.get_instances(kwargs)
def delete(self, sid):
"""
Delete a given Role
"""
return self.delete_instance(sid)
def create(self, friendly_name, role_type, permission):
"""
Creates a Role
:param str friendly_name: Human readable name to the Role
:param str role_type: Type of role - deployment or channel
:param str permission: Set of permissions for the role
"""
kwargs = {
"friendly_name": friendly_name,
"type": role_type,
"permission": permission
}
return self.create_instance(kwargs)
def update(self, sid, permission, **kwargs):
"""
Updates the Role instance identified by sid
:param sid: Role instance identifier
:param permission: Role permission
:return: Updated instance
"""
kwargs['permission'] = permission
return self.update_instance(sid, kwargs)
| gpl-3.0 |
twiest/openshift-tools | openshift/installer/vendored/openshift-ansible-3.9.14-1/roles/lib_openshift/library/oc_serviceaccount.py | 7 | 60950 | #!/usr/bin/env python
# pylint: disable=missing-docstring
# flake8: noqa: T001
# ___ ___ _ _ ___ ___ _ _____ ___ ___
# / __| __| \| | __| _ \ /_\_ _| __| \
# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
# | |) | (_) | | .` | (_) || | | _|| |) | | | |
# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
#
# Copyright 2016 Red Hat, Inc. and/or its affiliates
# and other contributors as indicated by the @author tags.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -*- -*- -*- Begin included fragment: lib/import.py -*- -*- -*-
'''
OpenShiftCLI class that wraps the oc commands in a subprocess
'''
# pylint: disable=too-many-lines
from __future__ import print_function
import atexit
import copy
import fcntl
import json
import os
import re
import shutil
import subprocess
import tempfile
# pylint: disable=import-error
try:
import ruamel.yaml as yaml
except ImportError:
import yaml
from ansible.module_utils.basic import AnsibleModule
# -*- -*- -*- End included fragment: lib/import.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: doc/serviceaccount -*- -*- -*-
DOCUMENTATION = '''
---
module: oc_serviceaccount
short_description: Module to manage openshift service accounts
description:
- Manage openshift service accounts programmatically.
options:
state:
description:
- If present, the service account will be created if it doesn't exist or updated if different. If absent, the service account will be removed if present. If list, information about the service account will be gathered and returned as part of the Ansible call results.
required: false
default: present
choices: ["present", "absent", "list"]
aliases: []
kubeconfig:
description:
- The path for the kubeconfig file to use for authentication
required: false
default: /etc/origin/master/admin.kubeconfig
aliases: []
debug:
description:
- Turn on debug output.
required: false
default: false
aliases: []
name:
description:
- Name of the service account.
required: true
default: None
aliases: []
namespace:
description:
- Namespace of the service account.
required: true
default: default
aliases: []
secrets:
description:
- A list of secrets that are associated with the service account.
required: false
default: None
aliases: []
image_pull_secrets:
description:
- A list of the image pull secrets that are associated with the service account.
required: false
default: None
aliases: []
author:
- "Kenny Woodson <kwoodson@redhat.com>"
extends_documentation_fragment: []
'''
EXAMPLES = '''
- name: create registry serviceaccount
oc_serviceaccount:
name: registry
namespace: default
secrets:
- docker-registry-config
- registry-secret
register: sa_out
'''
# -*- -*- -*- End included fragment: doc/serviceaccount -*- -*- -*-
# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
class YeditException(Exception): # pragma: no cover
''' Exception class for Yedit '''
pass
# pylint: disable=too-many-public-methods
class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
def __init__(self,
filename=None,
content=None,
content_type='yaml',
separator='.',
backup=False):
self.content = content
self._separator = separator
self.filename = filename
self.__yaml_dict = content
self.content_type = content_type
self.backup = backup
self.load(content_type=self.content_type)
if self.__yaml_dict is None:
self.__yaml_dict = {}
@property
def separator(self):
''' getter method for separator '''
return self._separator
@separator.setter
def separator(self, inc_sep):
''' setter method for separator '''
self._separator = inc_sep
@property
def yaml_dict(self):
''' getter method for yaml_dict '''
return self.__yaml_dict
@yaml_dict.setter
def yaml_dict(self, value):
''' setter method for yaml_dict '''
self.__yaml_dict = value
@staticmethod
def parse_key(key, sep='.'):
'''parse the key allowing the appropriate separator'''
common_separators = list(Yedit.com_sep - set([sep]))
return re.findall(Yedit.re_key.format(''.join(common_separators)), key)
@staticmethod
def valid_key(key, sep='.'):
'''validate the incoming key'''
common_separators = list(Yedit.com_sep - set([sep]))
if not re.match(Yedit.re_valid_key.format(''.join(common_separators)), key):
return False
return True
# pylint: disable=too-many-return-statements,too-many-branches
@staticmethod
def remove_entry(data, key, index=None, value=None, sep='.'):
''' remove data at location key '''
if key == '' and isinstance(data, dict):
if value is not None:
data.pop(value)
elif index is not None:
raise YeditException("remove_entry for a dictionary does not have an index {}".format(index))
else:
data.clear()
return True
elif key == '' and isinstance(data, list):
ind = None
if value is not None:
try:
ind = data.index(value)
except ValueError:
return False
elif index is not None:
ind = index
else:
del data[:]
if ind is not None:
data.pop(ind)
return True
if not (key and Yedit.valid_key(key, sep)) and \
isinstance(data, (list, dict)):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key and isinstance(data, dict):
data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
return None
# process last index for remove
# expected list entry
if key_indexes[-1][0]:
if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
del data[int(key_indexes[-1][0])]
return True
# expected dict entry
elif key_indexes[-1][1]:
if isinstance(data, dict):
del data[key_indexes[-1][1]]
return True
@staticmethod
def add_entry(data, key, item=None, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a#b
return c
'''
if key == '':
pass
elif (not (key and Yedit.valid_key(key, sep)) and
isinstance(data, (list, dict))):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key:
if isinstance(data, dict) and dict_key in data and data[dict_key]: # noqa: E501
data = data[dict_key]
continue
elif data and not isinstance(data, dict):
raise YeditException("Unexpected item type found while going through key " +
"path: {} (at key: {})".format(key, dict_key))
data[dict_key] = {}
data = data[dict_key]
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
raise YeditException("Unexpected item type found while going through key path: {}".format(key))
if key == '':
data = item
# process last index for add
# expected list entry
elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
data[int(key_indexes[-1][0])] = item
# expected dict entry
elif key_indexes[-1][1] and isinstance(data, dict):
data[key_indexes[-1][1]] = item
# didn't add/update to an existing list, nor add/update key to a dict
# so we must have been provided some syntax like a.b.c[<int>] = "data" for a
# non-existent array
else:
raise YeditException("Error adding to object at path: {}".format(key))
return data
@staticmethod
def get_entry(data, key, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a.b
return c
'''
if key == '':
pass
elif (not (key and Yedit.valid_key(key, sep)) and
isinstance(data, (list, dict))):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes:
if dict_key and isinstance(data, dict):
data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
return None
return data
@staticmethod
def _write(filename, contents):
''' Actually write the file contents to disk. This helps with mocking. '''
tmp_filename = filename + '.yedit'
with open(tmp_filename, 'w') as yfd:
fcntl.flock(yfd, fcntl.LOCK_EX | fcntl.LOCK_NB)
yfd.write(contents)
fcntl.flock(yfd, fcntl.LOCK_UN)
os.rename(tmp_filename, filename)
def write(self):
''' write to file '''
if not self.filename:
raise YeditException('Please specify a filename.')
if self.backup and self.file_exists():
shutil.copy(self.filename, self.filename + '.orig')
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
# Try to use RoundTripDumper if supported.
if self.content_type == 'yaml':
try:
Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
except AttributeError:
Yedit._write(self.filename, yaml.safe_dump(self.yaml_dict, default_flow_style=False))
elif self.content_type == 'json':
Yedit._write(self.filename, json.dumps(self.yaml_dict, indent=4, sort_keys=True))
else:
raise YeditException('Unsupported content_type: {}.'.format(self.content_type) +
'Please specify a content_type of yaml or json.')
return (True, self.yaml_dict)
def read(self):
''' read from file '''
# check if it exists
if self.filename is None or not self.file_exists():
return None
contents = None
with open(self.filename) as yfd:
contents = yfd.read()
return contents
def file_exists(self):
''' return whether file exists '''
if os.path.exists(self.filename):
return True
return False
def load(self, content_type='yaml'):
''' return yaml file '''
contents = self.read()
if not contents and not self.content:
return None
if self.content:
if isinstance(self.content, dict):
self.yaml_dict = self.content
return self.yaml_dict
elif isinstance(self.content, str):
contents = self.content
# check if it is yaml
try:
if content_type == 'yaml' and contents:
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
# Try to use RoundTripLoader if supported.
try:
self.yaml_dict = yaml.load(contents, yaml.RoundTripLoader)
except AttributeError:
self.yaml_dict = yaml.safe_load(contents)
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
elif content_type == 'json' and contents:
self.yaml_dict = json.loads(contents)
except yaml.YAMLError as err:
# Error loading yaml or json
raise YeditException('Problem with loading yaml file. {}'.format(err))
return self.yaml_dict
def get(self, key):
''' get a specified key'''
try:
entry = Yedit.get_entry(self.yaml_dict, key, self.separator)
except KeyError:
entry = None
return entry
def pop(self, path, key_or_item):
''' remove a key, value pair from a dict or an item for a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
return (False, self.yaml_dict)
if isinstance(entry, dict):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
if key_or_item in entry:
entry.pop(key_or_item)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
elif isinstance(entry, list):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
ind = None
try:
ind = entry.index(key_or_item)
except ValueError:
return (False, self.yaml_dict)
entry.pop(ind)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
def delete(self, path, index=None, value=None):
''' remove path from a dict'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
return (False, self.yaml_dict)
result = Yedit.remove_entry(self.yaml_dict, path, index, value, self.separator)
if not result:
return (False, self.yaml_dict)
return (True, self.yaml_dict)
def exists(self, path, value):
''' check if value exists at path'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if isinstance(entry, list):
if value in entry:
return True
return False
elif isinstance(entry, dict):
if isinstance(value, dict):
rval = False
for key, val in value.items():
if entry[key] != val:
rval = False
break
else:
rval = True
return rval
return value in entry
return entry == value
def append(self, path, value):
'''append value to a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
self.put(path, [])
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
if not isinstance(entry, list):
return (False, self.yaml_dict)
# AUDIT:maybe-no-member makes sense due to loading data from
# a serialized format.
# pylint: disable=maybe-no-member
entry.append(value)
return (True, self.yaml_dict)
# pylint: disable=too-many-arguments
def update(self, path, value, index=None, curr_value=None):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if isinstance(entry, dict):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
if not isinstance(value, dict):
raise YeditException('Cannot replace key, value entry in dict with non-dict type. ' +
'value=[{}] type=[{}]'.format(value, type(value)))
entry.update(value)
return (True, self.yaml_dict)
elif isinstance(entry, list):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
ind = None
if curr_value:
try:
ind = entry.index(curr_value)
except ValueError:
return (False, self.yaml_dict)
elif index is not None:
ind = index
if ind is not None and entry[ind] != value:
entry[ind] = value
return (True, self.yaml_dict)
# see if it exists in the list
try:
ind = entry.index(value)
except ValueError:
# doesn't exist, append it
entry.append(value)
return (True, self.yaml_dict)
# already exists, return
if ind is not None:
return (False, self.yaml_dict)
return (False, self.yaml_dict)
def put(self, path, value):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry == value:
return (False, self.yaml_dict)
# deepcopy didn't work
# Try to use ruamel.yaml and fallback to pyyaml
try:
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
default_flow_style=False),
yaml.RoundTripLoader)
except AttributeError:
tmp_copy = copy.deepcopy(self.yaml_dict)
# set the format attributes if available
try:
tmp_copy.fa.set_block_style()
except AttributeError:
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result is None:
return (False, self.yaml_dict)
# When path equals "" it is a special case.
# "" refers to the root of the document
# Only update the root path (entire document) when its a list or dict
if path == '':
if isinstance(result, list) or isinstance(result, dict):
self.yaml_dict = result
return (True, self.yaml_dict)
return (False, self.yaml_dict)
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
def create(self, path, value):
''' create a yaml file '''
if not self.file_exists():
# deepcopy didn't work
# Try to use ruamel.yaml and fallback to pyyaml
try:
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
default_flow_style=False),
yaml.RoundTripLoader)
except AttributeError:
tmp_copy = copy.deepcopy(self.yaml_dict)
# set the format attributes if available
try:
tmp_copy.fa.set_block_style()
except AttributeError:
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result is not None:
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
return (False, self.yaml_dict)
@staticmethod
def get_curr_value(invalue, val_type):
'''return the current value'''
if invalue is None:
return None
curr_value = invalue
if val_type == 'yaml':
try:
# AUDIT:maybe-no-member makes sense due to different yaml libraries
# pylint: disable=maybe-no-member
curr_value = yaml.safe_load(invalue, Loader=yaml.RoundTripLoader)
except AttributeError:
curr_value = yaml.safe_load(invalue)
elif val_type == 'json':
curr_value = json.loads(invalue)
return curr_value
@staticmethod
def parse_value(inc_value, vtype=''):
'''determine value type passed'''
true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE',
'on', 'On', 'ON', ]
false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE',
'off', 'Off', 'OFF']
# It came in as a string but you didn't specify value_type as string
# we will convert to bool if it matches any of the above cases
if isinstance(inc_value, str) and 'bool' in vtype:
if inc_value not in true_bools and inc_value not in false_bools:
raise YeditException('Not a boolean type. str=[{}] vtype=[{}]'.format(inc_value, vtype))
elif isinstance(inc_value, bool) and 'str' in vtype:
inc_value = str(inc_value)
# There is a special case where '' will turn into None after yaml loading it so skip
if isinstance(inc_value, str) and inc_value == '':
pass
# If vtype is not str then go ahead and attempt to yaml load it.
elif isinstance(inc_value, str) and 'str' not in vtype:
try:
inc_value = yaml.safe_load(inc_value)
except Exception:
raise YeditException('Could not determine type of incoming value. ' +
'value=[{}] vtype=[{}]'.format(type(inc_value), vtype))
return inc_value
@staticmethod
def process_edits(edits, yamlfile):
'''run through a list of edits and process them one-by-one'''
results = []
for edit in edits:
value = Yedit.parse_value(edit['value'], edit.get('value_type', ''))
if edit.get('action') == 'update':
# pylint: disable=line-too-long
curr_value = Yedit.get_curr_value(
Yedit.parse_value(edit.get('curr_value')),
edit.get('curr_value_format'))
rval = yamlfile.update(edit['key'],
value,
edit.get('index'),
curr_value)
elif edit.get('action') == 'append':
rval = yamlfile.append(edit['key'], value)
else:
rval = yamlfile.put(edit['key'], value)
if rval[0]:
results.append({'key': edit['key'], 'edit': rval[1]})
return {'changed': len(results) > 0, 'results': results}
# pylint: disable=too-many-return-statements,too-many-branches
@staticmethod
def run_ansible(params):
'''perform the idempotent crud operations'''
yamlfile = Yedit(filename=params['src'],
backup=params['backup'],
content_type=params['content_type'],
separator=params['separator'])
state = params['state']
if params['src']:
rval = yamlfile.load()
if yamlfile.yaml_dict is None and state != 'present':
return {'failed': True,
'msg': 'Error opening file [{}]. Verify that the '.format(params['src']) +
'file exists, that it is has correct permissions, and is valid yaml.'}
if state == 'list':
if params['content']:
content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
if params['key']:
rval = yamlfile.get(params['key'])
return {'changed': False, 'result': rval, 'state': state}
elif state == 'absent':
if params['content']:
content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
if params['update']:
rval = yamlfile.pop(params['key'], params['value'])
else:
rval = yamlfile.delete(params['key'], params['index'], params['value'])
if rval[0] and params['src']:
yamlfile.write()
return {'changed': rval[0], 'result': rval[1], 'state': state}
elif state == 'present':
# check if content is different than what is in the file
if params['content']:
content = Yedit.parse_value(params['content'], params['content_type'])
# We had no edits to make and the contents are the same
if yamlfile.yaml_dict == content and \
params['value'] is None:
return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
yamlfile.yaml_dict = content
# If we were passed a key, value then
# we enapsulate it in a list and process it
# Key, Value passed to the module : Converted to Edits list #
edits = []
_edit = {}
if params['value'] is not None:
_edit['value'] = params['value']
_edit['value_type'] = params['value_type']
_edit['key'] = params['key']
if params['update']:
_edit['action'] = 'update'
_edit['curr_value'] = params['curr_value']
_edit['curr_value_format'] = params['curr_value_format']
_edit['index'] = params['index']
elif params['append']:
_edit['action'] = 'append'
edits.append(_edit)
elif params['edits'] is not None:
edits = params['edits']
if edits:
results = Yedit.process_edits(edits, yamlfile)
# if there were changes and a src provided to us we need to write
if results['changed'] and params['src']:
yamlfile.write()
return {'changed': results['changed'], 'result': results['results'], 'state': state}
# no edits to make
if params['src']:
# pylint: disable=redefined-variable-type
rval = yamlfile.write()
return {'changed': rval[0],
'result': rval[1],
'state': state}
# We were passed content but no src, key or value, or edits. Return contents in memory
return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
return {'failed': True, 'msg': 'Unkown state passed'}
# -*- -*- -*- End included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/base.py -*- -*- -*-
# pylint: disable=too-many-lines
# noqa: E301,E302,E303,T001
class OpenShiftCLIError(Exception):
'''Exception class for openshiftcli'''
pass
ADDITIONAL_PATH_LOOKUPS = ['/usr/local/bin', os.path.expanduser('~/bin')]
def locate_oc_binary():
''' Find and return oc binary file '''
# https://github.com/openshift/openshift-ansible/issues/3410
# oc can be in /usr/local/bin in some cases, but that may not
# be in $PATH due to ansible/sudo
paths = os.environ.get("PATH", os.defpath).split(os.pathsep) + ADDITIONAL_PATH_LOOKUPS
oc_binary = 'oc'
# Use shutil.which if it is available, otherwise fallback to a naive path search
try:
which_result = shutil.which(oc_binary, path=os.pathsep.join(paths))
if which_result is not None:
oc_binary = which_result
except AttributeError:
for path in paths:
if os.path.exists(os.path.join(path, oc_binary)):
oc_binary = os.path.join(path, oc_binary)
break
return oc_binary
# pylint: disable=too-few-public-methods
class OpenShiftCLI(object):
''' Class to wrap the command line tools '''
def __init__(self,
namespace,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False,
all_namespaces=False):
''' Constructor for OpenshiftCLI '''
self.namespace = namespace
self.verbose = verbose
self.kubeconfig = Utils.create_tmpfile_copy(kubeconfig)
self.all_namespaces = all_namespaces
self.oc_binary = locate_oc_binary()
# Pylint allows only 5 arguments to be passed.
# pylint: disable=too-many-arguments
def _replace_content(self, resource, rname, content, edits=None, force=False, sep='.'):
''' replace the current object with the content '''
res = self._get(resource, rname)
if not res['results']:
return res
fname = Utils.create_tmpfile(rname + '-')
yed = Yedit(fname, res['results'][0], separator=sep)
updated = False
if content is not None:
changes = []
for key, value in content.items():
changes.append(yed.put(key, value))
if any([change[0] for change in changes]):
updated = True
elif edits is not None:
results = Yedit.process_edits(edits, yed)
if results['changed']:
updated = True
if updated:
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._replace(fname, force)
return {'returncode': 0, 'updated': False}
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
# We are removing the 'resourceVersion' to handle
# a race condition when modifying oc objects
yed = Yedit(fname)
results = yed.delete('metadata.resourceVersion')
if results[0]:
yed.write()
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
return self.openshift_cmd(cmd)
def _create_from_content(self, rname, content):
'''create a temporary file and then call oc create on it'''
fname = Utils.create_tmpfile(rname + '-')
yed = Yedit(fname, content=content)
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._create(fname)
def _create(self, fname):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
def _delete(self, resource, name=None, selector=None):
'''call oc delete on a resource'''
cmd = ['delete', resource]
if selector is not None:
cmd.append('--selector={}'.format(selector))
elif name is not None:
cmd.append(name)
else:
raise OpenShiftCLIError('Either name or selector is required when calling delete.')
return self.openshift_cmd(cmd)
def _process(self, template_name, create=False, params=None, template_data=None): # noqa: E501
'''process a template
template_name: the name of the template to process
create: whether to send to oc create after processing
params: the parameters for the template
template_data: the incoming template's data; instead of a file
'''
cmd = ['process']
if template_data:
cmd.extend(['-f', '-'])
else:
cmd.append(template_name)
if params:
param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
results = self.openshift_cmd(cmd, output=True, input_data=template_data)
if results['returncode'] != 0 or not create:
return results
fname = Utils.create_tmpfile(template_name + '-')
yed = Yedit(fname, results['results'])
yed.write()
atexit.register(Utils.cleanup, [fname])
return self.openshift_cmd(['create', '-f', fname])
def _get(self, resource, name=None, selector=None, field_selector=None):
'''return a resource by name '''
cmd = ['get', resource]
if selector is not None:
cmd.append('--selector={}'.format(selector))
if field_selector is not None:
cmd.append('--field-selector={}'.format(field_selector))
# Name cannot be used with selector or field_selector.
if selector is None and field_selector is None and name is not None:
cmd.append(name)
cmd.extend(['-o', 'json'])
rval = self.openshift_cmd(cmd, output=True)
# Ensure results are retuned in an array
if 'items' in rval:
rval['results'] = rval['items']
elif not isinstance(rval['results'], list):
rval['results'] = [rval['results']]
return rval
def _schedulable(self, node=None, selector=None, schedulable=True):
''' perform oadm manage-node scheduable '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector={}'.format(selector))
cmd.append('--schedulable={}'.format(schedulable))
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
def _list_pods(self, node=None, selector=None, pod_selector=None):
''' perform oadm list pods
node: the node in which to list pods
selector: the label selector filter if provided
pod_selector: the pod selector filter if provided
'''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector={}'.format(selector))
if pod_selector:
cmd.append('--pod-selector={}'.format(pod_selector))
cmd.extend(['--list-pods', '-o', 'json'])
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
# pylint: disable=too-many-arguments
def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False):
''' perform oadm manage-node evacuate '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector={}'.format(selector))
if dry_run:
cmd.append('--dry-run')
if pod_selector:
cmd.append('--pod-selector={}'.format(pod_selector))
if grace_period:
cmd.append('--grace-period={}'.format(int(grace_period)))
if force:
cmd.append('--force')
cmd.append('--evacuate')
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
def _version(self):
''' return the openshift version'''
return self.openshift_cmd(['version'], output=True, output_type='raw')
def _import_image(self, url=None, name=None, tag=None):
''' perform image import '''
cmd = ['import-image']
image = '{0}'.format(name)
if tag:
image += ':{0}'.format(tag)
cmd.append(image)
if url:
cmd.append('--from={0}/{1}'.format(url, image))
cmd.append('-n{0}'.format(self.namespace))
cmd.append('--confirm')
return self.openshift_cmd(cmd)
def _run(self, cmds, input_data):
''' Actually executes the command. This makes mocking easier. '''
curr_env = os.environ.copy()
curr_env.update({'KUBECONFIG': self.kubeconfig})
proc = subprocess.Popen(cmds,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=curr_env)
stdout, stderr = proc.communicate(input_data)
return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
'''Base command for oc '''
cmds = [self.oc_binary]
if oadm:
cmds.append('adm')
cmds.extend(cmd)
if self.all_namespaces:
cmds.extend(['--all-namespaces'])
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
if self.verbose:
print(' '.join(cmds))
try:
returncode, stdout, stderr = self._run(cmds, input_data)
except OSError as ex:
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
"cmd": ' '.join(cmds)}
if output_type == 'json':
rval['results'] = {}
if output and stdout:
try:
rval['results'] = json.loads(stdout)
except ValueError as verr:
if "No JSON object could be decoded" in verr.args:
rval['err'] = verr.args
elif output_type == 'raw':
rval['results'] = stdout if output else ''
if self.verbose:
print("STDOUT: {0}".format(stdout))
print("STDERR: {0}".format(stderr))
if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
"stdout": stdout})
return rval
class Utils(object): # pragma: no cover
''' utilities for openshiftcli modules '''
@staticmethod
def _write(filename, contents):
''' Actually write the file contents to disk. This helps with mocking. '''
with open(filename, 'w') as sfd:
sfd.write(str(contents))
@staticmethod
def create_tmp_file_from_contents(rname, data, ftype='yaml'):
''' create a file in tmp with name and contents'''
tmp = Utils.create_tmpfile(prefix=rname)
if ftype == 'yaml':
# AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
# pylint: disable=no-member
if hasattr(yaml, 'RoundTripDumper'):
Utils._write(tmp, yaml.dump(data, Dumper=yaml.RoundTripDumper))
else:
Utils._write(tmp, yaml.safe_dump(data, default_flow_style=False))
elif ftype == 'json':
Utils._write(tmp, json.dumps(data))
else:
Utils._write(tmp, data)
# Register cleanup when module is done
atexit.register(Utils.cleanup, [tmp])
return tmp
@staticmethod
def create_tmpfile_copy(inc_file):
'''create a temporary copy of a file'''
tmpfile = Utils.create_tmpfile('lib_openshift-')
Utils._write(tmpfile, open(inc_file).read())
# Cleanup the tmpfile
atexit.register(Utils.cleanup, [tmpfile])
return tmpfile
@staticmethod
def create_tmpfile(prefix='tmp'):
''' Generates and returns a temporary file name '''
with tempfile.NamedTemporaryFile(prefix=prefix, delete=False) as tmp:
return tmp.name
@staticmethod
def create_tmp_files_from_contents(content, content_type=None):
'''Turn an array of dict: filename, content into a files array'''
if not isinstance(content, list):
content = [content]
files = []
for item in content:
path = Utils.create_tmp_file_from_contents(item['path'] + '-',
item['data'],
ftype=content_type)
files.append({'name': os.path.basename(item['path']),
'path': path})
return files
@staticmethod
def cleanup(files):
'''Clean up on exit '''
for sfile in files:
if os.path.exists(sfile):
if os.path.isdir(sfile):
shutil.rmtree(sfile)
elif os.path.isfile(sfile):
os.remove(sfile)
@staticmethod
def exists(results, _name):
''' Check to see if the results include the name '''
if not results:
return False
if Utils.find_result(results, _name):
return True
return False
@staticmethod
def find_result(results, _name):
''' Find the specified result by name'''
rval = None
for result in results:
if 'metadata' in result and result['metadata']['name'] == _name:
rval = result
break
return rval
@staticmethod
def get_resource_file(sfile, sfile_type='yaml'):
''' return the service file '''
contents = None
with open(sfile) as sfd:
contents = sfd.read()
if sfile_type == 'yaml':
# AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
# pylint: disable=no-member
if hasattr(yaml, 'RoundTripLoader'):
contents = yaml.load(contents, yaml.RoundTripLoader)
else:
contents = yaml.safe_load(contents)
elif sfile_type == 'json':
contents = json.loads(contents)
return contents
@staticmethod
def filter_versions(stdout):
''' filter the oc version output '''
version_dict = {}
version_search = ['oc', 'openshift', 'kubernetes']
for line in stdout.strip().split('\n'):
for term in version_search:
if not line:
continue
if line.startswith(term):
version_dict[term] = line.split()[-1]
# horrible hack to get openshift version in Openshift 3.2
# By default "oc version in 3.2 does not return an "openshift" version
if "openshift" not in version_dict:
version_dict["openshift"] = version_dict["oc"]
return version_dict
@staticmethod
def add_custom_versions(versions):
''' create custom versions strings '''
versions_dict = {}
for tech, version in versions.items():
# clean up "-" from version
if "-" in version:
version = version.split("-")[0]
if version.startswith('v'):
versions_dict[tech + '_numeric'] = version[1:].split('+')[0]
# "v3.3.0.33" is what we have, we want "3.3"
versions_dict[tech + '_short'] = version[1:4]
return versions_dict
@staticmethod
def openshift_installed():
''' check if openshift is installed '''
import rpm
transaction_set = rpm.TransactionSet()
rpmquery = transaction_set.dbMatch("name", "atomic-openshift")
return rpmquery.count() > 0
# Disabling too-many-branches. This is a yaml dictionary comparison function
# pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
@staticmethod
def check_def_equal(user_def, result_def, skip_keys=None, debug=False):
''' Given a user defined definition, compare it with the results given back by our query. '''
# Currently these values are autogenerated and we do not need to check them
skip = ['metadata', 'status']
if skip_keys:
skip.extend(skip_keys)
for key, value in result_def.items():
if key in skip:
continue
# Both are lists
if isinstance(value, list):
if key not in user_def:
if debug:
print('User data does not have key [%s]' % key)
print('User data: %s' % user_def)
return False
if not isinstance(user_def[key], list):
if debug:
print('user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key]))
return False
if len(user_def[key]) != len(value):
if debug:
print("List lengths are not equal.")
print("key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value)))
print("user_def: %s" % user_def[key])
print("value: %s" % value)
return False
for values in zip(user_def[key], value):
if isinstance(values[0], dict) and isinstance(values[1], dict):
if debug:
print('sending list - list')
print(type(values[0]))
print(type(values[1]))
result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug)
if not result:
print('list compare returned false')
return False
elif value != user_def[key]:
if debug:
print('value should be identical')
print(user_def[key])
print(value)
return False
# recurse on a dictionary
elif isinstance(value, dict):
if key not in user_def:
if debug:
print("user_def does not have key [%s]" % key)
return False
if not isinstance(user_def[key], dict):
if debug:
print("dict returned false: not instance of dict")
return False
# before passing ensure keys match
api_values = set(value.keys()) - set(skip)
user_values = set(user_def[key].keys()) - set(skip)
if api_values != user_values:
if debug:
print("keys are not equal in dict")
print(user_values)
print(api_values)
return False
result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
if not result:
if debug:
print("dict returned false")
print(result)
return False
# Verify each key, value pair is the same
else:
if key not in user_def or value != user_def[key]:
if debug:
print("value not equal; user_def does not have key")
print(key)
print(value)
if key in user_def:
print(user_def[key])
return False
if debug:
print('returning true')
return True
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
self.kubeconfig = kubeconfig
self.name = rname
self.namespace = namespace
self._options = options
@property
def config_options(self):
''' return config options '''
return self._options
def to_option_list(self, ascommalist=''):
'''return all options as a string
if ascommalist is set to the name of a key, and
the value of that key is a dict, format the dict
as a list of comma delimited key=value pairs'''
return self.stringify(ascommalist)
def stringify(self, ascommalist=''):
''' return the options hash as cli params in a string
if ascommalist is set to the name of a key, and
the value of that key is a dict, format the dict
as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] is not None or isinstance(data['value'], int)):
if key == ascommalist:
val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
else:
val = data['value']
rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
# -*- -*- -*- End included fragment: lib/base.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/serviceaccount.py -*- -*- -*-
class ServiceAccountConfig(object):
'''Service account config class
This class stores the options and returns a default service account
'''
# pylint: disable=too-many-arguments
def __init__(self, sname, namespace, kubeconfig, secrets=None, image_pull_secrets=None):
self.name = sname
self.kubeconfig = kubeconfig
self.namespace = namespace
self.secrets = secrets or []
self.image_pull_secrets = image_pull_secrets or []
self.data = {}
self.create_dict()
def create_dict(self):
''' instantiate a properly structured volume '''
self.data['apiVersion'] = 'v1'
self.data['kind'] = 'ServiceAccount'
self.data['metadata'] = {}
self.data['metadata']['name'] = self.name
self.data['metadata']['namespace'] = self.namespace
self.data['secrets'] = []
if self.secrets:
for sec in self.secrets:
self.data['secrets'].append({"name": sec})
self.data['imagePullSecrets'] = []
if self.image_pull_secrets:
for sec in self.image_pull_secrets:
self.data['imagePullSecrets'].append({"name": sec})
class ServiceAccount(Yedit):
''' Class to wrap the oc command line tools '''
image_pull_secrets_path = "imagePullSecrets"
secrets_path = "secrets"
def __init__(self, content):
'''ServiceAccount constructor'''
super(ServiceAccount, self).__init__(content=content)
self._secrets = None
self._image_pull_secrets = None
@property
def image_pull_secrets(self):
''' property for image_pull_secrets '''
if self._image_pull_secrets is None:
self._image_pull_secrets = self.get(ServiceAccount.image_pull_secrets_path) or []
return self._image_pull_secrets
@image_pull_secrets.setter
def image_pull_secrets(self, secrets):
''' property for secrets '''
self._image_pull_secrets = secrets
@property
def secrets(self):
''' property for secrets '''
if not self._secrets:
self._secrets = self.get(ServiceAccount.secrets_path) or []
return self._secrets
@secrets.setter
def secrets(self, secrets):
''' property for secrets '''
self._secrets = secrets
def delete_secret(self, inc_secret):
''' remove a secret '''
remove_idx = None
for idx, sec in enumerate(self.secrets):
if sec['name'] == inc_secret:
remove_idx = idx
break
if remove_idx:
del self.secrets[remove_idx]
return True
return False
def delete_image_pull_secret(self, inc_secret):
''' remove a image_pull_secret '''
remove_idx = None
for idx, sec in enumerate(self.image_pull_secrets):
if sec['name'] == inc_secret:
remove_idx = idx
break
if remove_idx:
del self.image_pull_secrets[remove_idx]
return True
return False
def find_secret(self, inc_secret):
'''find secret'''
for secret in self.secrets:
if secret['name'] == inc_secret:
return secret
return None
def find_image_pull_secret(self, inc_secret):
'''find secret'''
for secret in self.image_pull_secrets:
if secret['name'] == inc_secret:
return secret
return None
def add_secret(self, inc_secret):
'''add secret'''
if self.secrets:
self.secrets.append({"name": inc_secret}) # pylint: disable=no-member
else:
self.put(ServiceAccount.secrets_path, [{"name": inc_secret}])
def add_image_pull_secret(self, inc_secret):
'''add image_pull_secret'''
if self.image_pull_secrets:
self.image_pull_secrets.append({"name": inc_secret}) # pylint: disable=no-member
else:
self.put(ServiceAccount.image_pull_secrets_path, [{"name": inc_secret}])
# -*- -*- -*- End included fragment: lib/serviceaccount.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: class/oc_serviceaccount.py -*- -*- -*-
# pylint: disable=too-many-instance-attributes
class OCServiceAccount(OpenShiftCLI):
''' Class to wrap the oc command line tools '''
kind = 'sa'
# pylint allows 5
# pylint: disable=too-many-arguments
def __init__(self,
config,
verbose=False):
''' Constructor for OCVolume '''
super(OCServiceAccount, self).__init__(config.namespace, kubeconfig=config.kubeconfig, verbose=verbose)
self.config = config
self.service_account = None
def exists(self):
''' return whether a volume exists '''
if self.service_account:
return True
return False
def get(self):
'''return volume information '''
result = self._get(self.kind, self.config.name)
if result['returncode'] == 0:
self.service_account = ServiceAccount(content=result['results'][0])
elif '\"%s\" not found' % self.config.name in result['stderr']:
result['returncode'] = 0
result['results'] = [{}]
return result
def delete(self):
'''delete the object'''
return self._delete(self.kind, self.config.name)
def create(self):
'''create the object'''
return self._create_from_content(self.config.name, self.config.data)
def update(self):
'''update the object'''
# need to update the tls information and the service name
for secret in self.config.secrets:
result = self.service_account.find_secret(secret)
if not result:
self.service_account.add_secret(secret)
for secret in self.config.image_pull_secrets:
result = self.service_account.find_image_pull_secret(secret)
if not result:
self.service_account.add_image_pull_secret(secret)
return self._replace_content(self.kind, self.config.name, self.config.data)
def needs_update(self):
''' verify an update is needed '''
# since creating an service account generates secrets and imagepullsecrets
# check_def_equal will not work
# Instead, verify all secrets passed are in the list
for secret in self.config.secrets:
result = self.service_account.find_secret(secret)
if not result:
return True
for secret in self.config.image_pull_secrets:
result = self.service_account.find_image_pull_secret(secret)
if not result:
return True
return False
@staticmethod
# pylint: disable=too-many-return-statements,too-many-branches
# TODO: This function should be refactored into its individual parts.
def run_ansible(params, check_mode):
'''run the ansible idempotent code'''
rconfig = ServiceAccountConfig(params['name'],
params['namespace'],
params['kubeconfig'],
params['secrets'],
params['image_pull_secrets'],
)
oc_sa = OCServiceAccount(rconfig,
verbose=params['debug'])
state = params['state']
api_rval = oc_sa.get()
#####
# Get
#####
if state == 'list':
return {'changed': False, 'results': api_rval['results'], 'state': 'list'}
########
# Delete
########
if state == 'absent':
if oc_sa.exists():
if check_mode:
return {'changed': True, 'msg': 'Would have performed a delete.'}
api_rval = oc_sa.delete()
return {'changed': True, 'results': api_rval, 'state': 'absent'}
return {'changed': False, 'state': 'absent'}
if state == 'present':
########
# Create
########
if not oc_sa.exists():
if check_mode:
return {'changed': True, 'msg': 'Would have performed a create.'}
# Create it here
api_rval = oc_sa.create()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
# return the created object
api_rval = oc_sa.get()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
return {'changed': True, 'results': api_rval, 'state': 'present'}
########
# Update
########
if oc_sa.needs_update():
api_rval = oc_sa.update()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
# return the created object
api_rval = oc_sa.get()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
return {'changed': True, 'results': api_rval, 'state': 'present'}
return {'changed': False, 'results': api_rval, 'state': 'present'}
return {'failed': True,
'changed': False,
'msg': 'Unknown state passed. %s' % state,
'state': 'unknown'}
# -*- -*- -*- End included fragment: class/oc_serviceaccount.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: ansible/oc_serviceaccount.py -*- -*- -*-
def main():
'''
ansible oc module for service accounts
'''
module = AnsibleModule(
argument_spec=dict(
kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
state=dict(default='present', type='str',
choices=['present', 'absent', 'list']),
debug=dict(default=False, type='bool'),
name=dict(default=None, required=True, type='str'),
namespace=dict(default=None, required=True, type='str'),
secrets=dict(default=None, type='list'),
image_pull_secrets=dict(default=None, type='list'),
),
supports_check_mode=True,
)
rval = OCServiceAccount.run_ansible(module.params, module.check_mode)
if 'failed' in rval:
module.fail_json(**rval)
module.exit_json(**rval)
if __name__ == '__main__':
main()
# -*- -*- -*- End included fragment: ansible/oc_serviceaccount.py -*- -*- -*-
| apache-2.0 |
kustodian/ansible | test/units/modules/network/fortios/test_fortios_system_replacemsg_alertmail.py | 21 | 8539 | # Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <https://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
from mock import ANY
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
try:
from ansible.modules.network.fortios import fortios_system_replacemsg_alertmail
except ImportError:
pytest.skip("Could not load required modules for testing", allow_module_level=True)
@pytest.fixture(autouse=True)
def connection_mock(mocker):
connection_class_mock = mocker.patch('ansible.modules.network.fortios.fortios_system_replacemsg_alertmail.Connection')
return connection_class_mock
fos_instance = FortiOSHandler(connection_mock)
def test_system_replacemsg_alertmail_creation(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'system_replacemsg_alertmail': {
'buffer': 'test_value_3',
'format': 'none',
'header': 'none',
'msg_type': 'test_value_6'
},
'vdom': 'root'}
is_error, changed, response = fortios_system_replacemsg_alertmail.fortios_system_replacemsg(input_data, fos_instance)
expected_data = {
'buffer': 'test_value_3',
'format': 'none',
'header': 'none',
'msg-type': 'test_value_6'
}
set_method_mock.assert_called_with('system.replacemsg', 'alertmail', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_system_replacemsg_alertmail_creation_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'system_replacemsg_alertmail': {
'buffer': 'test_value_3',
'format': 'none',
'header': 'none',
'msg_type': 'test_value_6'
},
'vdom': 'root'}
is_error, changed, response = fortios_system_replacemsg_alertmail.fortios_system_replacemsg(input_data, fos_instance)
expected_data = {
'buffer': 'test_value_3',
'format': 'none',
'header': 'none',
'msg-type': 'test_value_6'
}
set_method_mock.assert_called_with('system.replacemsg', 'alertmail', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_system_replacemsg_alertmail_removal(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'system_replacemsg_alertmail': {
'buffer': 'test_value_3',
'format': 'none',
'header': 'none',
'msg_type': 'test_value_6'
},
'vdom': 'root'}
is_error, changed, response = fortios_system_replacemsg_alertmail.fortios_system_replacemsg(input_data, fos_instance)
delete_method_mock.assert_called_with('system.replacemsg', 'alertmail', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_system_replacemsg_alertmail_deletion_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'system_replacemsg_alertmail': {
'buffer': 'test_value_3',
'format': 'none',
'header': 'none',
'msg_type': 'test_value_6'
},
'vdom': 'root'}
is_error, changed, response = fortios_system_replacemsg_alertmail.fortios_system_replacemsg(input_data, fos_instance)
delete_method_mock.assert_called_with('system.replacemsg', 'alertmail', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_system_replacemsg_alertmail_idempotent(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'DELETE', 'http_status': 404}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'system_replacemsg_alertmail': {
'buffer': 'test_value_3',
'format': 'none',
'header': 'none',
'msg_type': 'test_value_6'
},
'vdom': 'root'}
is_error, changed, response = fortios_system_replacemsg_alertmail.fortios_system_replacemsg(input_data, fos_instance)
expected_data = {
'buffer': 'test_value_3',
'format': 'none',
'header': 'none',
'msg-type': 'test_value_6'
}
set_method_mock.assert_called_with('system.replacemsg', 'alertmail', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 404
def test_system_replacemsg_alertmail_filter_foreign_attributes(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'system_replacemsg_alertmail': {
'random_attribute_not_valid': 'tag',
'buffer': 'test_value_3',
'format': 'none',
'header': 'none',
'msg_type': 'test_value_6'
},
'vdom': 'root'}
is_error, changed, response = fortios_system_replacemsg_alertmail.fortios_system_replacemsg(input_data, fos_instance)
expected_data = {
'buffer': 'test_value_3',
'format': 'none',
'header': 'none',
'msg-type': 'test_value_6'
}
set_method_mock.assert_called_with('system.replacemsg', 'alertmail', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
| gpl-3.0 |
postlund/home-assistant | homeassistant/components/mysensors/climate.py | 3 | 6899 | """MySensors platform that offers a Climate (MySensors-HVAC) component."""
from homeassistant.components import mysensors
from homeassistant.components.climate import ClimateDevice
from homeassistant.components.climate.const import (
ATTR_TARGET_TEMP_HIGH,
ATTR_TARGET_TEMP_LOW,
DOMAIN,
HVAC_MODE_AUTO,
HVAC_MODE_COOL,
HVAC_MODE_HEAT,
HVAC_MODE_OFF,
SUPPORT_FAN_MODE,
SUPPORT_TARGET_TEMPERATURE,
SUPPORT_TARGET_TEMPERATURE_RANGE,
)
from homeassistant.const import ATTR_TEMPERATURE, TEMP_CELSIUS, TEMP_FAHRENHEIT
DICT_HA_TO_MYS = {
HVAC_MODE_AUTO: "AutoChangeOver",
HVAC_MODE_COOL: "CoolOn",
HVAC_MODE_HEAT: "HeatOn",
HVAC_MODE_OFF: "Off",
}
DICT_MYS_TO_HA = {
"AutoChangeOver": HVAC_MODE_AUTO,
"CoolOn": HVAC_MODE_COOL,
"HeatOn": HVAC_MODE_HEAT,
"Off": HVAC_MODE_OFF,
}
FAN_LIST = ["Auto", "Min", "Normal", "Max"]
OPERATION_LIST = [HVAC_MODE_OFF, HVAC_MODE_AUTO, HVAC_MODE_COOL, HVAC_MODE_HEAT]
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the mysensors climate."""
mysensors.setup_mysensors_platform(
hass,
DOMAIN,
discovery_info,
MySensorsHVAC,
async_add_entities=async_add_entities,
)
class MySensorsHVAC(mysensors.device.MySensorsEntity, ClimateDevice):
"""Representation of a MySensors HVAC."""
@property
def supported_features(self):
"""Return the list of supported features."""
features = 0
set_req = self.gateway.const.SetReq
if set_req.V_HVAC_SPEED in self._values:
features = features | SUPPORT_FAN_MODE
if (
set_req.V_HVAC_SETPOINT_COOL in self._values
and set_req.V_HVAC_SETPOINT_HEAT in self._values
):
features = features | SUPPORT_TARGET_TEMPERATURE_RANGE
else:
features = features | SUPPORT_TARGET_TEMPERATURE
return features
@property
def assumed_state(self):
"""Return True if unable to access real state of entity."""
return self.gateway.optimistic
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return TEMP_CELSIUS if self.gateway.metric else TEMP_FAHRENHEIT
@property
def current_temperature(self):
"""Return the current temperature."""
value = self._values.get(self.gateway.const.SetReq.V_TEMP)
if value is not None:
value = float(value)
return value
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
set_req = self.gateway.const.SetReq
if (
set_req.V_HVAC_SETPOINT_COOL in self._values
and set_req.V_HVAC_SETPOINT_HEAT in self._values
):
return None
temp = self._values.get(set_req.V_HVAC_SETPOINT_COOL)
if temp is None:
temp = self._values.get(set_req.V_HVAC_SETPOINT_HEAT)
return float(temp) if temp is not None else None
@property
def target_temperature_high(self):
"""Return the highbound target temperature we try to reach."""
set_req = self.gateway.const.SetReq
if set_req.V_HVAC_SETPOINT_HEAT in self._values:
temp = self._values.get(set_req.V_HVAC_SETPOINT_COOL)
return float(temp) if temp is not None else None
@property
def target_temperature_low(self):
"""Return the lowbound target temperature we try to reach."""
set_req = self.gateway.const.SetReq
if set_req.V_HVAC_SETPOINT_COOL in self._values:
temp = self._values.get(set_req.V_HVAC_SETPOINT_HEAT)
return float(temp) if temp is not None else None
@property
def hvac_mode(self):
"""Return current operation ie. heat, cool, idle."""
return self._values.get(self.value_type)
@property
def hvac_modes(self):
"""List of available operation modes."""
return OPERATION_LIST
@property
def fan_mode(self):
"""Return the fan setting."""
return self._values.get(self.gateway.const.SetReq.V_HVAC_SPEED)
@property
def fan_modes(self):
"""List of available fan modes."""
return FAN_LIST
async def async_set_temperature(self, **kwargs):
"""Set new target temperature."""
set_req = self.gateway.const.SetReq
temp = kwargs.get(ATTR_TEMPERATURE)
low = kwargs.get(ATTR_TARGET_TEMP_LOW)
high = kwargs.get(ATTR_TARGET_TEMP_HIGH)
heat = self._values.get(set_req.V_HVAC_SETPOINT_HEAT)
cool = self._values.get(set_req.V_HVAC_SETPOINT_COOL)
updates = []
if temp is not None:
if heat is not None:
# Set HEAT Target temperature
value_type = set_req.V_HVAC_SETPOINT_HEAT
elif cool is not None:
# Set COOL Target temperature
value_type = set_req.V_HVAC_SETPOINT_COOL
if heat is not None or cool is not None:
updates = [(value_type, temp)]
elif all(val is not None for val in (low, high, heat, cool)):
updates = [
(set_req.V_HVAC_SETPOINT_HEAT, low),
(set_req.V_HVAC_SETPOINT_COOL, high),
]
for value_type, value in updates:
self.gateway.set_child_value(
self.node_id, self.child_id, value_type, value, ack=1
)
if self.gateway.optimistic:
# Optimistically assume that device has changed state
self._values[value_type] = value
self.async_schedule_update_ha_state()
async def async_set_fan_mode(self, fan_mode):
"""Set new target temperature."""
set_req = self.gateway.const.SetReq
self.gateway.set_child_value(
self.node_id, self.child_id, set_req.V_HVAC_SPEED, fan_mode, ack=1
)
if self.gateway.optimistic:
# Optimistically assume that device has changed state
self._values[set_req.V_HVAC_SPEED] = fan_mode
self.async_schedule_update_ha_state()
async def async_set_hvac_mode(self, hvac_mode):
"""Set new target temperature."""
self.gateway.set_child_value(
self.node_id,
self.child_id,
self.value_type,
DICT_HA_TO_MYS[hvac_mode],
ack=1,
)
if self.gateway.optimistic:
# Optimistically assume that device has changed state
self._values[self.value_type] = hvac_mode
self.async_schedule_update_ha_state()
async def async_update(self):
"""Update the controller with the latest value from a sensor."""
await super().async_update()
self._values[self.value_type] = DICT_MYS_TO_HA[self._values[self.value_type]]
| apache-2.0 |
FireBladeNooT/Medusa_1_6 | lib/unidecode/x033.py | 222 | 4556 | data = (
'apartment', # 0x00
'alpha', # 0x01
'ampere', # 0x02
'are', # 0x03
'inning', # 0x04
'inch', # 0x05
'won', # 0x06
'escudo', # 0x07
'acre', # 0x08
'ounce', # 0x09
'ohm', # 0x0a
'kai-ri', # 0x0b
'carat', # 0x0c
'calorie', # 0x0d
'gallon', # 0x0e
'gamma', # 0x0f
'giga', # 0x10
'guinea', # 0x11
'curie', # 0x12
'guilder', # 0x13
'kilo', # 0x14
'kilogram', # 0x15
'kilometer', # 0x16
'kilowatt', # 0x17
'gram', # 0x18
'gram ton', # 0x19
'cruzeiro', # 0x1a
'krone', # 0x1b
'case', # 0x1c
'koruna', # 0x1d
'co-op', # 0x1e
'cycle', # 0x1f
'centime', # 0x20
'shilling', # 0x21
'centi', # 0x22
'cent', # 0x23
'dozen', # 0x24
'desi', # 0x25
'dollar', # 0x26
'ton', # 0x27
'nano', # 0x28
'knot', # 0x29
'heights', # 0x2a
'percent', # 0x2b
'parts', # 0x2c
'barrel', # 0x2d
'piaster', # 0x2e
'picul', # 0x2f
'pico', # 0x30
'building', # 0x31
'farad', # 0x32
'feet', # 0x33
'bushel', # 0x34
'franc', # 0x35
'hectare', # 0x36
'peso', # 0x37
'pfennig', # 0x38
'hertz', # 0x39
'pence', # 0x3a
'page', # 0x3b
'beta', # 0x3c
'point', # 0x3d
'volt', # 0x3e
'hon', # 0x3f
'pound', # 0x40
'hall', # 0x41
'horn', # 0x42
'micro', # 0x43
'mile', # 0x44
'mach', # 0x45
'mark', # 0x46
'mansion', # 0x47
'micron', # 0x48
'milli', # 0x49
'millibar', # 0x4a
'mega', # 0x4b
'megaton', # 0x4c
'meter', # 0x4d
'yard', # 0x4e
'yard', # 0x4f
'yuan', # 0x50
'liter', # 0x51
'lira', # 0x52
'rupee', # 0x53
'ruble', # 0x54
'rem', # 0x55
'roentgen', # 0x56
'watt', # 0x57
'0h', # 0x58
'1h', # 0x59
'2h', # 0x5a
'3h', # 0x5b
'4h', # 0x5c
'5h', # 0x5d
'6h', # 0x5e
'7h', # 0x5f
'8h', # 0x60
'9h', # 0x61
'10h', # 0x62
'11h', # 0x63
'12h', # 0x64
'13h', # 0x65
'14h', # 0x66
'15h', # 0x67
'16h', # 0x68
'17h', # 0x69
'18h', # 0x6a
'19h', # 0x6b
'20h', # 0x6c
'21h', # 0x6d
'22h', # 0x6e
'23h', # 0x6f
'24h', # 0x70
'HPA', # 0x71
'da', # 0x72
'AU', # 0x73
'bar', # 0x74
'oV', # 0x75
'pc', # 0x76
'[?]', # 0x77
'[?]', # 0x78
'[?]', # 0x79
'[?]', # 0x7a
'Heisei', # 0x7b
'Syouwa', # 0x7c
'Taisyou', # 0x7d
'Meiji', # 0x7e
'Inc.', # 0x7f
'pA', # 0x80
'nA', # 0x81
'microamp', # 0x82
'mA', # 0x83
'kA', # 0x84
'kB', # 0x85
'MB', # 0x86
'GB', # 0x87
'cal', # 0x88
'kcal', # 0x89
'pF', # 0x8a
'nF', # 0x8b
'microFarad', # 0x8c
'microgram', # 0x8d
'mg', # 0x8e
'kg', # 0x8f
'Hz', # 0x90
'kHz', # 0x91
'MHz', # 0x92
'GHz', # 0x93
'THz', # 0x94
'microliter', # 0x95
'ml', # 0x96
'dl', # 0x97
'kl', # 0x98
'fm', # 0x99
'nm', # 0x9a
'micrometer', # 0x9b
'mm', # 0x9c
'cm', # 0x9d
'km', # 0x9e
'mm^2', # 0x9f
'cm^2', # 0xa0
'm^2', # 0xa1
'km^2', # 0xa2
'mm^4', # 0xa3
'cm^3', # 0xa4
'm^3', # 0xa5
'km^3', # 0xa6
'm/s', # 0xa7
'm/s^2', # 0xa8
'Pa', # 0xa9
'kPa', # 0xaa
'MPa', # 0xab
'GPa', # 0xac
'rad', # 0xad
'rad/s', # 0xae
'rad/s^2', # 0xaf
'ps', # 0xb0
'ns', # 0xb1
'microsecond', # 0xb2
'ms', # 0xb3
'pV', # 0xb4
'nV', # 0xb5
'microvolt', # 0xb6
'mV', # 0xb7
'kV', # 0xb8
'MV', # 0xb9
'pW', # 0xba
'nW', # 0xbb
'microwatt', # 0xbc
'mW', # 0xbd
'kW', # 0xbe
'MW', # 0xbf
'kOhm', # 0xc0
'MOhm', # 0xc1
'a.m.', # 0xc2
'Bq', # 0xc3
'cc', # 0xc4
'cd', # 0xc5
'C/kg', # 0xc6
'Co.', # 0xc7
'dB', # 0xc8
'Gy', # 0xc9
'ha', # 0xca
'HP', # 0xcb
'in', # 0xcc
'K.K.', # 0xcd
'KM', # 0xce
'kt', # 0xcf
'lm', # 0xd0
'ln', # 0xd1
'log', # 0xd2
'lx', # 0xd3
'mb', # 0xd4
'mil', # 0xd5
'mol', # 0xd6
'pH', # 0xd7
'p.m.', # 0xd8
'PPM', # 0xd9
'PR', # 0xda
'sr', # 0xdb
'Sv', # 0xdc
'Wb', # 0xdd
'[?]', # 0xde
'[?]', # 0xdf
'1d', # 0xe0
'2d', # 0xe1
'3d', # 0xe2
'4d', # 0xe3
'5d', # 0xe4
'6d', # 0xe5
'7d', # 0xe6
'8d', # 0xe7
'9d', # 0xe8
'10d', # 0xe9
'11d', # 0xea
'12d', # 0xeb
'13d', # 0xec
'14d', # 0xed
'15d', # 0xee
'16d', # 0xef
'17d', # 0xf0
'18d', # 0xf1
'19d', # 0xf2
'20d', # 0xf3
'21d', # 0xf4
'22d', # 0xf5
'23d', # 0xf6
'24d', # 0xf7
'25d', # 0xf8
'26d', # 0xf9
'27d', # 0xfa
'28d', # 0xfb
'29d', # 0xfc
'30d', # 0xfd
'31d', # 0xfe
)
| gpl-3.0 |
Jonekee/chromium.src | third_party/typ/typ/tests/test_case_test.py | 84 | 1908 | # Copyright 2014 Dirk Pranke. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typ import test_case
class TestFuncs(test_case.MainTestCase):
def test_convert_newlines(self):
cn = test_case.convert_newlines
self.assertEqual(cn('foo'), 'foo')
self.assertEqual(cn('foo\nbar\nbaz'), 'foo\nbar\nbaz')
self.assertEqual(cn('foo\rbar\nbaz\r'), 'foo\nbar\nbaz\n')
self.assertEqual(cn('foo\r\nbar\r\nbaz\r\nmeh\n'),
'foo\nbar\nbaz\nmeh\n')
class TestMainTestCase(test_case.MainTestCase):
def test_basic(self):
h = self.make_host()
files = {
'test.py': """
import os
import sys
sys.stdout.write("in: %s\\n" % sys.stdin.read())
sys.stdout.write("out: %s\\n" % os.environ['TEST_VAR'])
sys.stderr.write("err\\n")
with open("../results", "w") as fp:
fp.write(open("../input", "r").read() + " written")
""",
'input': 'results',
'subdir/x': 'y',
}
exp_files = files.copy()
exp_files['results'] = 'results written'
self.check(prog=[h.python_interpreter, '../test.py'],
stdin='hello on stdin',
env={'TEST_VAR': 'foo'},
cwd='subdir',
files=files,
ret=0, out='in: hello on stdin\nout: foo\n',
err='err\n', exp_files=exp_files)
| bsd-3-clause |
jayzeng/pyes | tests/test_aggs.py | 5 | 5828 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import unittest
from pyes.tests import ESTestCase
from pyes.aggs import MissingAgg, MinAgg, MaxAgg, NestedAgg, ReverseNestedAgg
from pyes.query import MatchAllQuery
import datetime
class AggsSearchTestCase(ESTestCase):
def setUp(self):
super(AggsSearchTestCase, self).setUp()
mapping = {u'parsedtext': {'boost': 1.0,
'index': 'analyzed',
'store': 'yes',
'type': u'string',
'term_vector': 'with_positions_offsets'},
u'resellers' : {
'type' : 'nested',
'properties' : {
'name' : { 'type' : 'string' },
'price' : { 'type' : 'double' }
}},
u'name': {'boost': 1.0,
'index': 'analyzed',
'store': 'yes',
'type': u'string',
'term_vector': 'with_positions_offsets'},
u'title': {'boost': 1.0,
'index': 'analyzed',
'store': 'yes',
'type': u'string',
'term_vector': 'with_positions_offsets'},
u'position': {'store': 'yes',
'type': u'integer'},
u'tag': {'store': 'yes',
'type': u'string'},
u'array': {'store': 'yes',
'type': u'integer'},
u'date': {'store': 'yes',
'type': u'date'},
u'uuid': {'boost': 1.0,
'index': 'not_analyzed',
'store': 'yes',
'type': u'string'}}
self.conn.indices.create_index(self.index_name)
self.conn.indices.put_mapping(self.document_type, {'properties': mapping}, self.index_name)
self.conn.index({'name': 'Joe Tester',
'parsedtext': 'Joe Testere nice guy',
'uuid': '11111',
'position': 1,
'tag': 'foo',
'integer': 1,
'date': datetime.date(2011, 5, 16),
'resellers':[
{'name': 'name1', 'price': 100}, {'name': 'name1', 'price': 200}
]
},
self.index_name, self.document_type, 1)
self.conn.index({'name': ' Bill Baloney',
'parsedtext': 'Bill Testere nice guy',
'uuid': '22222',
'position': 2,
'integer': 2,
'tag': 'foo',
'resellers':[],
'date': datetime.date(2011, 4, 16)},
self.index_name, self.document_type, 2)
self.conn.index({'name': 'Bill Clinton',
'parsedtext': 'Bill is not nice guy',
'uuid': '33333',
'position': 3,
'tag': 'bar',
'resellers':[
{'name': 'name1', 'price': 1000}, {'name': 'name1', 'price': 2000}
],
'date': datetime.date(2011, 4, 28)},
self.index_name, self.document_type, 3)
self.conn.indices.refresh(self.index_name)
def test_missing_agg(self):
q = MatchAllQuery()
q = q.search()
missing = MissingAgg(name='missing', field='integer')
q.agg.add(missing)
resultset = self.conn.search(query=q, indices=self.index_name, doc_types=[self.document_type])
self.assertEqual(resultset.total, 3)
self.assertEqual(resultset.aggs.missing, {u'doc_count': 1})
def test_min_agg(self):
q = MatchAllQuery()
q = q.search()
min_agg = MinAgg(name='min', field='position')
q.agg.add(min_agg)
resultset = self.conn.search(query=q, indices=self.index_name, doc_types=[self.document_type])
self.assertEqual(resultset.total, 3)
self.assertEqual(resultset.aggs.min, {u'value': 1})
def test_max_agg(self):
q = MatchAllQuery()
q = q.search()
max_agg = MaxAgg(name='max', field='position')
q.agg.add(max_agg)
resultset = self.conn.search(query=q, indices=self.index_name, doc_types=[self.document_type])
self.assertEqual(resultset.total, 3)
self.assertEqual(resultset.aggs.max, {u'value': 3})
def test_nested_agg(self):
q = MatchAllQuery()
q = q.search()
nested = NestedAgg(name='nested', path='resellers')
q.agg.add(nested)
resultset = self.conn.search(query=q, indices=self.index_name, doc_types=[self.document_type])
self.assertEqual(resultset.total, 3)
self.assertEqual(resultset.aggs.nested, {u'doc_count': 4})
def test_reverse_nested_agg(self):
q = MatchAllQuery()
q = q.search()
reverse_nested = ReverseNestedAgg(name='reverse', field='id')
nested = NestedAgg(name='nested', path='resellers', sub_aggs=[reverse_nested])
q.agg.add(nested)
resultset = self.conn.search(query=q, indices=self.index_name, doc_types=[self.document_type])
self.assertEqual(resultset.total, 3)
self.assertEqual(resultset.aggs.nested['doc_count'], 4)
self.assertEqual(resultset.aggs.nested.reverse, {u'doc_count': 2})
| bsd-3-clause |
caseyc37/pygame_cffi | test/test_utils/run_tests.py | 2 | 12181 | #################################### IMPORTS ##################################
if __name__ == '__main__':
import sys
sys.exit("This module is for import only")
test_pkg_name = '.'.join(__name__.split('.')[0:-2])
is_pygame_pkg = test_pkg_name == 'pygame.tests'
if is_pygame_pkg:
from pygame.tests import test_utils
from pygame.tests.test_utils \
import unittest, unittest_patch, import_submodule
from pygame.tests.test_utils.test_runner \
import prepare_test_env, run_test, combine_results, test_failures, \
get_test_results, from_namespace, TEST_RESULTS_START, \
opt_parser
else:
from test import test_utils
from test.test_utils \
import unittest, unittest_patch, import_submodule
from test.test_utils.test_runner \
import prepare_test_env, run_test, combine_results, test_failures, \
get_test_results, from_namespace, TEST_RESULTS_START, \
opt_parser
import pygame
import pygame.threads
import sys
import os
import re
import time
import optparse
import random
from pprint import pformat
was_run = False
def run(*args, **kwds):
"""Run the Pygame unit test suite and return (total tests run, fails dict)
Positional arguments (optional):
The names of tests to include. If omitted then all tests are run. Test
names need not include the trailing '_test'.
Keyword arguments:
incomplete - fail incomplete tests (default False)
nosubprocess - run all test suites in the current process
(default False, use separate subprocesses)
dump - dump failures/errors as dict ready to eval (default False)
file - if provided, the name of a file into which to dump failures/errors
timings - if provided, the number of times to run each individual test to
get an average run time (default is run each test once)
exclude - A list of TAG names to exclude from the run. The items may be
comma or space separated.
show_output - show silenced stderr/stdout on errors (default False)
all - dump all results, not just errors (default False)
randomize - randomize order of tests (default False)
seed - if provided, a seed randomizer integer
multi_thread - if provided, the number of THREADS in which to run
subprocessed tests
time_out - if subprocess is True then the time limit in seconds before
killing a test (default 30)
fake - if provided, the name of the fake tests package in the
run_tests__tests subpackage to run instead of the normal
Pygame tests
python - the path to a python executable to run subprocessed tests
(default sys.executable)
interative - allow tests tagged 'interative'.
Return value:
A tuple of total number of tests run, dictionary of error information. The
dictionary is empty if no errors were recorded.
By default individual test modules are run in separate subprocesses. This
recreates normal Pygame usage where pygame.init() and pygame.quit() are
called only once per program execution, and avoids unfortunate
interactions between test modules. Also, a time limit is placed on test
execution, so frozen tests are killed when there time allotment expired.
Use the single process option if threading is not working properly or if
tests are taking too long. It is not guaranteed that all tests will pass
in single process mode.
Tests are run in a randomized order if the randomize argument is True or a
seed argument is provided. If no seed integer is provided then the system
time is used.
Individual test modules may have a corresponding *_tags.py module,
defining a __tags__ attribute, a list of tag strings used to selectively
omit modules from a run. By default only the 'interactive', 'ignore', and
'subprocess_ignore' tags are ignored. 'interactive' is for modules that
take user input, like cdrom_test.py. 'ignore' and 'subprocess_ignore' for
for disabling modules for foreground and subprocess modes respectively.
These are for disabling tests on optional modules or for experimental
modules with known problems. These modules can be run from the console as
a Python program.
This function can only be called once per Python session. It is not
reentrant.
"""
global was_run
if was_run:
raise RuntimeError("run() was already called this session")
was_run = True
options = kwds.copy()
option_nosubprocess = options.get('nosubprocess', False)
option_dump = options.pop('dump', False)
option_file = options.pop('file', None)
option_all = options.pop('all', False)
option_randomize = options.get('randomize', False)
option_seed = options.get('seed', None)
option_multi_thread = options.pop('multi_thread', 1)
option_time_out = options.pop('time_out', 120)
option_fake = options.pop('fake', None)
option_python = options.pop('python', sys.executable)
option_exclude = options.pop('exclude', ())
option_interactive = options.pop('interactive', False)
if not option_interactive and 'interactive' not in option_exclude:
option_exclude += ('interactive',)
if not option_nosubprocess and 'subprocess_ignore' not in option_exclude:
option_exclude += ('subprocess_ignore',)
elif 'ignore' not in option_exclude:
option_exclude += ('ignore',)
if sys.version_info < (3, 0, 0):
option_exclude += ('python2_ignore',)
else:
option_exclude += ('python3_ignore',)
main_dir, test_subdir, fake_test_subdir = prepare_test_env()
test_runner_py = os.path.join(test_subdir, "test_utils", "test_runner.py")
cur_working_dir = os.path.abspath(os.getcwd())
###########################################################################
# Compile a list of test modules. If fake, then compile list of fake
# xxxx_test.py from run_tests__tests
TEST_MODULE_RE = re.compile('^(.+_test)\.py$')
test_mods_pkg_name = test_pkg_name
if option_fake is not None:
test_mods_pkg_name = '.'.join([test_mods_pkg_name,
'run_tests__tests',
option_fake])
test_subdir = os.path.join(fake_test_subdir, option_fake)
working_dir = test_subdir
else:
working_dir = main_dir
# Added in because some machines will need os.environ else there will be
# false failures in subprocess mode. Same issue as python2.6. Needs some
# env vars.
test_env = os.environ
fmt1 = '%s.%%s' % test_mods_pkg_name
fmt2 = '%s.%%s_test' % test_mods_pkg_name
if args:
test_modules = [
m.endswith('_test') and (fmt1 % m) or (fmt2 % m) for m in args
]
else:
test_modules = []
for f in sorted(os.listdir(test_subdir)):
for match in TEST_MODULE_RE.findall(f):
test_modules.append(fmt1 % (match,))
###########################################################################
# Remove modules to be excluded.
tmp = test_modules
test_modules = []
for name in tmp:
tag_module_name = "%s_tags" % (name[0:-5],)
try:
tag_module = import_submodule(tag_module_name)
except ImportError:
test_modules.append(name)
else:
try:
tags = tag_module.__tags__
except AttributeError:
print ("%s has no tags: ignoring" % (tag_module_name,))
test_module.append(name)
else:
for tag in tags:
if tag in option_exclude:
print ("skipping %s (tag '%s')" % (name, tag))
break
else:
test_modules.append(name)
del tmp, tag_module_name, name
###########################################################################
# Meta results
results = {}
meta_results = {'__meta__' : {}}
meta = meta_results['__meta__']
###########################################################################
# Randomization
if option_randomize or option_seed is not None:
if option_seed is None:
option_seed = time.time()
meta['random_seed'] = option_seed
print ("\nRANDOM SEED USED: %s\n" % option_seed)
random.seed(option_seed)
random.shuffle(test_modules)
###########################################################################
# Single process mode
if option_nosubprocess:
unittest_patch.patch(**options)
options['exclude'] = option_exclude
t = time.time()
for module in test_modules:
results.update(run_test(module, **options))
t = time.time() - t
###########################################################################
# Subprocess mode
#
if not option_nosubprocess:
if is_pygame_pkg:
from pygame.tests.test_utils.async_sub import proc_in_time_or_kill
else:
from test.test_utils.async_sub import proc_in_time_or_kill
pass_on_args = ['--exclude', ','.join(option_exclude)]
for option in ['timings', 'seed']:
value = options.pop(option, None)
if value is not None:
pass_on_args.append('--%s' % option)
pass_on_args.append(str(value))
for option, value in options.items():
option = option.replace('_', '-')
if value:
pass_on_args.append('--%s' % option)
def sub_test(module):
print ('loading %s' % module)
cmd = [option_python, test_runner_py, module ] + pass_on_args
return (module,
(cmd, test_env, working_dir),
proc_in_time_or_kill(cmd, option_time_out, env=test_env,
wd=working_dir))
if option_multi_thread > 1:
def tmap(f, args):
return pygame.threads.tmap (
f, args, stop_on_error = False,
num_workers = option_multi_thread
)
else:
tmap = map
t = time.time()
for module, cmd, (return_code, raw_return) in tmap(sub_test,
test_modules):
test_file = '%s.py' % os.path.join(test_subdir, module)
cmd, test_env, working_dir = cmd
test_results = get_test_results(raw_return)
if test_results:
results.update(test_results)
else:
results[module] = {}
add_to_results = [
'return_code', 'raw_return', 'cmd', 'test_file',
'test_env', 'working_dir', 'module',
]
results[module].update(from_namespace(locals(), add_to_results))
t = time.time() - t
###########################################################################
# Output Results
#
untrusty_total, combined = combine_results(results, t)
total, fails = test_failures(results)
meta['total_tests'] = total
meta['combined'] = combined
results.update(meta_results)
if option_nosubprocess:
assert total == untrusty_total
if not option_dump:
print (combined)
else:
results = option_all and results or fails
print (TEST_RESULTS_START)
print (pformat(results))
if option_file is not None:
results_file = open(option_file, 'w')
try:
results_file.write(pformat(results))
finally:
results_file.close()
return total, fails
def run_and_exit(*args, **kwargs):
"""Run the tests, and if there are failures, exit with a return code of 1.
This is needed for various buildbots to recognise that the tests have
failed.
"""
total, fails = run(*args, **kwargs)
if fails:
sys.exit(1)
sys.exit(0)
| lgpl-2.1 |
jeongarmy/TizenRT | external/protobuf/python/stubout.py | 671 | 4940 | #!/usr/bin/python2.4
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is used for testing. The original is at:
# http://code.google.com/p/pymox/
class StubOutForTesting:
"""Sample Usage:
You want os.path.exists() to always return true during testing.
stubs = StubOutForTesting()
stubs.Set(os.path, 'exists', lambda x: 1)
...
stubs.UnsetAll()
The above changes os.path.exists into a lambda that returns 1. Once
the ... part of the code finishes, the UnsetAll() looks up the old value
of os.path.exists and restores it.
"""
def __init__(self):
self.cache = []
self.stubs = []
def __del__(self):
self.SmartUnsetAll()
self.UnsetAll()
def SmartSet(self, obj, attr_name, new_attr):
"""Replace obj.attr_name with new_attr. This method is smart and works
at the module, class, and instance level while preserving proper
inheritance. It will not stub out C types however unless that has been
explicitly allowed by the type.
This method supports the case where attr_name is a staticmethod or a
classmethod of obj.
Notes:
- If obj is an instance, then it is its class that will actually be
stubbed. Note that the method Set() does not do that: if obj is
an instance, it (and not its class) will be stubbed.
- The stubbing is using the builtin getattr and setattr. So, the __get__
and __set__ will be called when stubbing (TODO: A better idea would
probably be to manipulate obj.__dict__ instead of getattr() and
setattr()).
Raises AttributeError if the attribute cannot be found.
"""
if (inspect.ismodule(obj) or
(not inspect.isclass(obj) and obj.__dict__.has_key(attr_name))):
orig_obj = obj
orig_attr = getattr(obj, attr_name)
else:
if not inspect.isclass(obj):
mro = list(inspect.getmro(obj.__class__))
else:
mro = list(inspect.getmro(obj))
mro.reverse()
orig_attr = None
for cls in mro:
try:
orig_obj = cls
orig_attr = getattr(obj, attr_name)
except AttributeError:
continue
if orig_attr is None:
raise AttributeError("Attribute not found.")
# Calling getattr() on a staticmethod transforms it to a 'normal' function.
# We need to ensure that we put it back as a staticmethod.
old_attribute = obj.__dict__.get(attr_name)
if old_attribute is not None and isinstance(old_attribute, staticmethod):
orig_attr = staticmethod(orig_attr)
self.stubs.append((orig_obj, attr_name, orig_attr))
setattr(orig_obj, attr_name, new_attr)
def SmartUnsetAll(self):
"""Reverses all the SmartSet() calls, restoring things to their original
definition. Its okay to call SmartUnsetAll() repeatedly, as later calls
have no effect if no SmartSet() calls have been made.
"""
self.stubs.reverse()
for args in self.stubs:
setattr(*args)
self.stubs = []
def Set(self, parent, child_name, new_child):
"""Replace child_name's old definition with new_child, in the context
of the given parent. The parent could be a module when the child is a
function at module scope. Or the parent could be a class when a class'
method is being replaced. The named child is set to new_child, while
the prior definition is saved away for later, when UnsetAll() is called.
This method supports the case where child_name is a staticmethod or a
classmethod of parent.
"""
old_child = getattr(parent, child_name)
old_attribute = parent.__dict__.get(child_name)
if old_attribute is not None and isinstance(old_attribute, staticmethod):
old_child = staticmethod(old_child)
self.cache.append((parent, old_child, child_name))
setattr(parent, child_name, new_child)
def UnsetAll(self):
"""Reverses all the Set() calls, restoring things to their original
definition. Its okay to call UnsetAll() repeatedly, as later calls have
no effect if no Set() calls have been made.
"""
# Undo calls to Set() in reverse order, in case Set() was called on the
# same arguments repeatedly (want the original call to be last one undone)
self.cache.reverse()
for (parent, old_child, child_name) in self.cache:
setattr(parent, child_name, old_child)
self.cache = []
| apache-2.0 |
zephirefaith/AI_Fall15_Assignments | A2/lib/networkx/algorithms/bipartite/centrality.py | 76 | 8139 | #-*- coding: utf-8 -*-
# Copyright (C) 2011 by
# Jordi Torrents <jtorrents@milnou.net>
# Aric Hagberg <hagberg@lanl.gov>
# All rights reserved.
# BSD license.
import networkx as nx
__author__ = """\n""".join(['Jordi Torrents <jtorrents@milnou.net>',
'Aric Hagberg (hagberg@lanl.gov)'])
__all__=['degree_centrality',
'betweenness_centrality',
'closeness_centrality']
def degree_centrality(G, nodes):
r"""Compute the degree centrality for nodes in a bipartite network.
The degree centrality for a node `v` is the fraction of nodes
connected to it.
Parameters
----------
G : graph
A bipartite network
nodes : list or container
Container with all nodes in one bipartite node set.
Returns
-------
centrality : dictionary
Dictionary keyed by node with bipartite degree centrality as the value.
See Also
--------
betweenness_centrality,
closeness_centrality,
sets,
is_bipartite
Notes
-----
The nodes input parameter must conatin all nodes in one bipartite node set,
but the dictionary returned contains all nodes from both bipartite node
sets.
For unipartite networks, the degree centrality values are
normalized by dividing by the maximum possible degree (which is
`n-1` where `n` is the number of nodes in G).
In the bipartite case, the maximum possible degree of a node in a
bipartite node set is the number of nodes in the opposite node set
[1]_. The degree centrality for a node `v` in the bipartite
sets `U` with `n` nodes and `V` with `m` nodes is
.. math::
d_{v} = \frac{deg(v)}{m}, \mbox{for} v \in U ,
d_{v} = \frac{deg(v)}{n}, \mbox{for} v \in V ,
where `deg(v)` is the degree of node `v`.
References
----------
.. [1] Borgatti, S.P. and Halgin, D. In press. "Analyzing Affiliation
Networks". In Carrington, P. and Scott, J. (eds) The Sage Handbook
of Social Network Analysis. Sage Publications.
http://www.steveborgatti.com/papers/bhaffiliations.pdf
"""
top = set(nodes)
bottom = set(G) - top
s = 1.0/len(bottom)
centrality = dict((n,d*s) for n,d in G.degree_iter(top))
s = 1.0/len(top)
centrality.update(dict((n,d*s) for n,d in G.degree_iter(bottom)))
return centrality
def betweenness_centrality(G, nodes):
r"""Compute betweenness centrality for nodes in a bipartite network.
Betweenness centrality of a node `v` is the sum of the
fraction of all-pairs shortest paths that pass through `v`.
Values of betweenness are normalized by the maximum possible
value which for bipartite graphs is limited by the relative size
of the two node sets [1]_.
Let `n` be the number of nodes in the node set `U` and
`m` be the number of nodes in the node set `V`, then
nodes in `U` are normalized by dividing by
.. math::
\frac{1}{2} [m^2 (s + 1)^2 + m (s + 1)(2t - s - 1) - t (2s - t + 3)] ,
where
.. math::
s = (n - 1) \div m , t = (n - 1) \mod m ,
and nodes in `V` are normalized by dividing by
.. math::
\frac{1}{2} [n^2 (p + 1)^2 + n (p + 1)(2r - p - 1) - r (2p - r + 3)] ,
where,
.. math::
p = (m - 1) \div n , r = (m - 1) \mod n .
Parameters
----------
G : graph
A bipartite graph
nodes : list or container
Container with all nodes in one bipartite node set.
Returns
-------
betweenness : dictionary
Dictionary keyed by node with bipartite betweenness centrality
as the value.
See Also
--------
degree_centrality,
closeness_centrality,
sets,
is_bipartite
Notes
-----
The nodes input parameter must contain all nodes in one bipartite node set,
but the dictionary returned contains all nodes from both node sets.
References
----------
.. [1] Borgatti, S.P. and Halgin, D. In press. "Analyzing Affiliation
Networks". In Carrington, P. and Scott, J. (eds) The Sage Handbook
of Social Network Analysis. Sage Publications.
http://www.steveborgatti.com/papers/bhaffiliations.pdf
"""
top = set(nodes)
bottom = set(G) - top
n = float(len(top))
m = float(len(bottom))
s = (n-1) // m
t = (n-1) % m
bet_max_top = (((m**2)*((s+1)**2))+
(m*(s+1)*(2*t-s-1))-
(t*((2*s)-t+3)))/2.0
p = (m-1) // n
r = (m-1) % n
bet_max_bot = (((n**2)*((p+1)**2))+
(n*(p+1)*(2*r-p-1))-
(r*((2*p)-r+3)))/2.0
betweenness = nx.betweenness_centrality(G, normalized=False,
weight=None)
for node in top:
betweenness[node]/=bet_max_top
for node in bottom:
betweenness[node]/=bet_max_bot
return betweenness
def closeness_centrality(G, nodes, normalized=True):
r"""Compute the closeness centrality for nodes in a bipartite network.
The closeness of a node is the distance to all other nodes in the
graph or in the case that the graph is not connected to all other nodes
in the connected component containing that node.
Parameters
----------
G : graph
A bipartite network
nodes : list or container
Container with all nodes in one bipartite node set.
normalized : bool, optional
If True (default) normalize by connected component size.
Returns
-------
closeness : dictionary
Dictionary keyed by node with bipartite closeness centrality
as the value.
See Also
--------
betweenness_centrality,
degree_centrality
sets,
is_bipartite
Notes
-----
The nodes input parameter must conatin all nodes in one bipartite node set,
but the dictionary returned contains all nodes from both node sets.
Closeness centrality is normalized by the minimum distance possible.
In the bipartite case the minimum distance for a node in one bipartite
node set is 1 from all nodes in the other node set and 2 from all
other nodes in its own set [1]_. Thus the closeness centrality
for node `v` in the two bipartite sets `U` with
`n` nodes and `V` with `m` nodes is
.. math::
c_{v} = \frac{m + 2(n - 1)}{d}, \mbox{for} v \in U,
c_{v} = \frac{n + 2(m - 1)}{d}, \mbox{for} v \in V,
where `d` is the sum of the distances from `v` to all
other nodes.
Higher values of closeness indicate higher centrality.
As in the unipartite case, setting normalized=True causes the
values to normalized further to n-1 / size(G)-1 where n is the
number of nodes in the connected part of graph containing the
node. If the graph is not completely connected, this algorithm
computes the closeness centrality for each connected part
separately.
References
----------
.. [1] Borgatti, S.P. and Halgin, D. In press. "Analyzing Affiliation
Networks". In Carrington, P. and Scott, J. (eds) The Sage Handbook
of Social Network Analysis. Sage Publications.
http://www.steveborgatti.com/papers/bhaffiliations.pdf
"""
closeness={}
path_length=nx.single_source_shortest_path_length
top = set(nodes)
bottom = set(G) - top
n = float(len(top))
m = float(len(bottom))
for node in top:
sp=path_length(G,node)
totsp=sum(sp.values())
if totsp > 0.0 and len(G) > 1:
closeness[node]= (m + 2*(n-1)) / totsp
if normalized:
s=(len(sp)-1.0) / ( len(G) - 1 )
closeness[node] *= s
else:
closeness[n]=0.0
for node in bottom:
sp=path_length(G,node)
totsp=sum(sp.values())
if totsp > 0.0 and len(G) > 1:
closeness[node]= (n + 2*(m-1)) / totsp
if normalized:
s=(len(sp)-1.0) / ( len(G) - 1 )
closeness[node] *= s
else:
closeness[n]=0.0
return closeness
| mit |
CallaJun/hackprince | indico/networkx/algorithms/centrality/tests/test_current_flow_betweenness_centrality_subset.py | 75 | 7708 | #!/usr/bin/env python
from nose.tools import *
from nose import SkipTest
import networkx
from nose.plugins.attrib import attr
from networkx import edge_current_flow_betweenness_centrality \
as edge_current_flow
from networkx import edge_current_flow_betweenness_centrality_subset \
as edge_current_flow_subset
class TestFlowBetweennessCentrality(object):
numpy=1 # nosetests attribute, use nosetests -a 'not numpy' to skip test
@classmethod
def setupClass(cls):
global np
try:
import numpy as np
import scipy
except ImportError:
raise SkipTest('NumPy not available.')
def test_K4_normalized(self):
"""Betweenness centrality: K4"""
G=networkx.complete_graph(4)
b=networkx.current_flow_betweenness_centrality_subset(G,
G.nodes(),
G.nodes(),
normalized=True)
b_answer=networkx.current_flow_betweenness_centrality(G,normalized=True)
for n in sorted(G):
assert_almost_equal(b[n],b_answer[n])
def test_K4(self):
"""Betweenness centrality: K4"""
G=networkx.complete_graph(4)
b=networkx.current_flow_betweenness_centrality_subset(G,
G.nodes(),
G.nodes(),
normalized=True)
b_answer=networkx.current_flow_betweenness_centrality(G,normalized=True)
for n in sorted(G):
assert_almost_equal(b[n],b_answer[n])
# test weighted network
G.add_edge(0,1,{'weight':0.5,'other':0.3})
b=networkx.current_flow_betweenness_centrality_subset(G,
G.nodes(),
G.nodes(),
normalized=True,
weight=None)
for n in sorted(G):
assert_almost_equal(b[n],b_answer[n])
b=networkx.current_flow_betweenness_centrality_subset(G,
G.nodes(),
G.nodes(),
normalized=True)
b_answer=networkx.current_flow_betweenness_centrality(G,normalized=True)
for n in sorted(G):
assert_almost_equal(b[n],b_answer[n])
b=networkx.current_flow_betweenness_centrality_subset(G,
G.nodes(),
G.nodes(),
normalized=True,
weight='other')
b_answer=networkx.current_flow_betweenness_centrality(G,normalized=True,weight='other')
for n in sorted(G):
assert_almost_equal(b[n],b_answer[n])
def test_P4_normalized(self):
"""Betweenness centrality: P4 normalized"""
G=networkx.path_graph(4)
b=networkx.current_flow_betweenness_centrality_subset(G,
G.nodes(),
G.nodes(),
normalized=True)
b_answer=networkx.current_flow_betweenness_centrality(G,normalized=True)
for n in sorted(G):
assert_almost_equal(b[n],b_answer[n])
def test_P4(self):
"""Betweenness centrality: P4"""
G=networkx.path_graph(4)
b=networkx.current_flow_betweenness_centrality_subset(G,
G.nodes(),
G.nodes(),
normalized=True)
b_answer=networkx.current_flow_betweenness_centrality(G,normalized=True)
for n in sorted(G):
assert_almost_equal(b[n],b_answer[n])
def test_star(self):
"""Betweenness centrality: star """
G=networkx.Graph()
G.add_star(['a','b','c','d'])
b=networkx.current_flow_betweenness_centrality_subset(G,
G.nodes(),
G.nodes(),
normalized=True)
b_answer=networkx.current_flow_betweenness_centrality(G,normalized=True)
for n in sorted(G):
assert_almost_equal(b[n],b_answer[n])
# class TestWeightedFlowBetweennessCentrality():
# pass
class TestEdgeFlowBetweennessCentrality(object):
numpy=1 # nosetests attribute, use nosetests -a 'not numpy' to skip test
@classmethod
def setupClass(cls):
global np
try:
import numpy as np
import scipy
except ImportError:
raise SkipTest('NumPy not available.')
def test_K4_normalized(self):
"""Betweenness centrality: K4"""
G=networkx.complete_graph(4)
b=edge_current_flow_subset(G,G.nodes(),G.nodes(),normalized=True)
b_answer=edge_current_flow(G,normalized=True)
for (s,t),v1 in b_answer.items():
v2=b.get((s,t),b.get((t,s)))
assert_almost_equal(v1,v2)
def test_K4(self):
"""Betweenness centrality: K4"""
G=networkx.complete_graph(4)
b=edge_current_flow_subset(G,G.nodes(),G.nodes(),normalized=False)
b_answer=edge_current_flow(G,normalized=False)
for (s,t),v1 in b_answer.items():
v2=b.get((s,t),b.get((t,s)))
assert_almost_equal(v1,v2)
# test weighted network
G.add_edge(0,1,{'weight':0.5,'other':0.3})
b=edge_current_flow_subset(G,G.nodes(),G.nodes(),normalized=False,weight=None)
# weight is None => same as unweighted network
for (s,t),v1 in b_answer.items():
v2=b.get((s,t),b.get((t,s)))
assert_almost_equal(v1,v2)
b=edge_current_flow_subset(G,G.nodes(),G.nodes(),normalized=False)
b_answer=edge_current_flow(G,normalized=False)
for (s,t),v1 in b_answer.items():
v2=b.get((s,t),b.get((t,s)))
assert_almost_equal(v1,v2)
b=edge_current_flow_subset(G,G.nodes(),G.nodes(),normalized=False,weight='other')
b_answer=edge_current_flow(G,normalized=False,weight='other')
for (s,t),v1 in b_answer.items():
v2=b.get((s,t),b.get((t,s)))
assert_almost_equal(v1,v2)
def test_C4(self):
"""Edge betweenness centrality: C4"""
G=networkx.cycle_graph(4)
b=edge_current_flow_subset(G,G.nodes(),G.nodes(),normalized=True)
b_answer=edge_current_flow(G,normalized=True)
for (s,t),v1 in b_answer.items():
v2=b.get((s,t),b.get((t,s)))
assert_almost_equal(v1,v2)
def test_P4(self):
"""Edge betweenness centrality: P4"""
G=networkx.path_graph(4)
b=edge_current_flow_subset(G,G.nodes(),G.nodes(),normalized=True)
b_answer=edge_current_flow(G,normalized=True)
for (s,t),v1 in b_answer.items():
v2=b.get((s,t),b.get((t,s)))
assert_almost_equal(v1,v2)
| lgpl-3.0 |
BoltzmannBrain/nupic.research | tests/sensorimotor/integration/spatial_pooler_monitor_mixin_test.py | 4 | 4555 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a sepaself.rate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import unittest
import numpy
from nupic.research.spatial_pooler import SpatialPooler
from sensorimotor.spatial_pooler_monitor_mixin import (
SpatialPoolerMonitorMixin)
class MonitoredSpatialPooler(SpatialPoolerMonitorMixin, SpatialPooler): pass
class SpatialPoolerMonitorMixinTest(unittest.TestCase):
VERBOSITY = 2
def setUp(self):
# Initialize the spatial pooler
self.sp = MonitoredSpatialPooler(
inputDimensions=(15,),
columnDimensions=(4,),
potentialRadius=15,
numActiveColumnsPerInhArea=1,
globalInhibition=True,
synPermActiveInc=0.03,
potentialPct=1.0)
cat = numpy.array( [0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0], dtype='uint8')
dog = numpy.array( [0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0], dtype='uint8')
rat = numpy.array( [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0], dtype='uint8')
bat = numpy.array( [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1], dtype='uint8')
output = numpy.zeros((4,), dtype="int")
self.activeColumns = numpy.zeros((400,4), dtype="int")
for i in xrange(100):
self.sp.compute(cat, learn=True, activeArray=output)
self.activeColumns[4*i+0] = output
self.sp.compute(dog, learn=True, activeArray=output)
self.activeColumns[4*i+1] = output
self.sp.compute(rat, learn=True, activeArray=output)
self.activeColumns[4*i+2] = output
self.sp.compute(bat, learn=True, activeArray=output)
self.activeColumns[4*i+3] = output
def testGetActiveColumn(self):
"""
test whether the active column indices are correctly stored
"""
for i in range(400):
self.assertEqual(self.activeColumns[i][self.sp.mmGetTraceActiveColumns().data[i]], 1)
def testGetActiveDutyCycles(self):
"""
test whether active duty cycle are calculated correctly
"""
self.assertEqual(numpy.sum(self.sp.mmGetDataDutyCycles()), 400)
def testClearHistory(self):
"""
test whether history has been cleared with mmClearHistory
if we run clear history, the traces should be empty
"""
self.sp.mmClearHistory()
self.assertEqual(self.sp.mmGetTraceActiveColumns().data, [])
self.assertEqual(self.sp.mmGetTraceNumConnections().data, [])
def testGetTraceNumConnections(self):
self.assertTrue(self.sp.mmGetTraceNumConnections().data[-1] >= 3*4)
def testGetDefaultTrace(self):
# default trace with verbosity level == 1 returns count traces of activeColumn
# and connections
traces = self.sp.mmGetDefaultTraces()
self.assertTrue(all(traces[0].data))
self.assertEqual(max(traces[0].data), 1)
# default trace with verbosity == 2 returns indices trace of activeColumn
# and count trace of connections
traces = self.sp.mmGetDefaultTraces(verbosity=2)
for i in range(len(traces[0].data)):
self.assertEqual(self.sp.mmGetTraceActiveColumns().data[i], traces[0].data[i])
def testGetDefaultMetrics(self, display=False):
traces = self.sp.mmGetDefaultMetrics()
if display:
print self.sp.mmPrettyPrintMetrics(traces)
self.assertEqual(len(traces), 3)
# metric of active columns
self.assertEqual(traces[0].min, 1)
self.assertEqual(traces[0].max, 1)
self.assertEqual(traces[0].sum, 400)
# metric of connections
self.assertTrue(traces[1].max >= 12)
# metric of total column duty cycles
self.assertEqual(traces[2].sum, 400)
if __name__ == "__main__":
unittest.main()
| agpl-3.0 |
hgiemza/DIRAC | WorkloadManagementSystem/Service/WMSAdministratorHandler.py | 2 | 26612 | """
This is a DIRAC WMS administrator interface.
It exposes the following methods:
Site mask related methods:
setMask(<site mask>)
getMask()
Access to the pilot data:
getWMSStats()
"""
from tempfile import mkdtemp
import shutil
from DIRAC.Core.DISET.RequestHandler import RequestHandler
from DIRAC import gConfig, gLogger, S_OK, S_ERROR
from DIRAC.WorkloadManagementSystem.DB.JobDB import JobDB
from DIRAC.FrameworkSystem.Client.ProxyManagerClient import gProxyManager
from DIRAC.WorkloadManagementSystem.DB.PilotAgentsDB import PilotAgentsDB
from DIRAC.WorkloadManagementSystem.DB.TaskQueueDB import TaskQueueDB
from DIRAC.WorkloadManagementSystem.DB.PilotsLoggingDB import PilotsLoggingDB
from DIRAC.WorkloadManagementSystem.Service.WMSUtilities import getPilotLoggingInfo, getWMSPilotOutput, getGridEnv
from DIRAC.Resources.Computing.ComputingElementFactory import ComputingElementFactory
import DIRAC.Core.Utilities.Time as Time
from DIRAC.ConfigurationSystem.Client.Helpers.Registry import getGroupOption, getUsernameForDN
from DIRAC.ConfigurationSystem.Client.Helpers.Resources import getQueue
from DIRAC.Core.Utilities import DErrno
__RCSID__ = "$Id$"
# This is a global instance of the database classes
jobDB = None
pilotDB = None
taskQueueDB = None
pilotsLoggingDB = None
enablePilotsLogging = False
FINAL_STATES = ['Done','Aborted','Cleared','Deleted','Stalled']
def initializeWMSAdministratorHandler( serviceInfo ):
""" WMS AdministratorService initialization
"""
global jobDB
global pilotDB
global taskQueueDB
global enablePilotsLogging
# there is a problem with accessing CS with shorter paths, so full path is extracted from serviceInfo dict
enablePilotsLogging = gConfig.getValue( serviceInfo['serviceSectionPath'].replace('WMSAdministrator', 'PilotsLogging') + '/Enable', 'False').lower() in ('yes', 'true')
jobDB = JobDB()
pilotDB = PilotAgentsDB()
taskQueueDB = TaskQueueDB()
if enablePilotsLogging:
pilotsLoggingDB = PilotsLoggingDB()
return S_OK()
class WMSAdministratorHandler(RequestHandler):
###########################################################################
types_setSiteMask = [ list ]
def export_setSiteMask( self, siteList ):
""" Set the site mask for matching. The mask is given in a form of Classad string.
"""
result = self.getRemoteCredentials()
dn = result['DN']
maskList = [ (site,'Active') for site in siteList ]
result = jobDB.setSiteMask( maskList, dn, 'No comment' )
return result
##############################################################################
types_getSiteMask = []
@classmethod
def export_getSiteMask( cls, siteState = 'Active' ):
""" Get the site mask
"""
return jobDB.getSiteMask( siteState )
types_getSiteMaskStatus = []
@classmethod
def export_getSiteMaskStatus( cls, sites = None ):
""" Get the site mask of given site(s) with columns 'site' and 'status' only
"""
return jobDB.getSiteMaskStatus( sites )
##############################################################################
types_getAllSiteMaskStatus = []
@classmethod
def export_getAllSiteMaskStatus( cls ):
""" Get all the site parameters in the site mask
"""
return jobDB.getAllSiteMaskStatus()
##############################################################################
types_banSite = [ basestring ]
def export_banSite( self, site, comment = 'No comment' ):
""" Ban the given site in the site mask
"""
result = self.getRemoteCredentials()
dn = result['DN']
result = getUsernameForDN(dn)
if result['OK']:
author = result['Value']
else:
author = dn
result = jobDB.banSiteInMask(site, author, comment)
return result
##############################################################################
types_allowSite = [ basestring ]
def export_allowSite(self,site,comment='No comment'):
""" Allow the given site in the site mask
"""
result = self.getRemoteCredentials()
dn = result['DN']
result = getUsernameForDN(dn)
if result['OK']:
author = result['Value']
else:
author = dn
result = jobDB.allowSiteInMask(site,author,comment)
return result
##############################################################################
types_clearMask = []
@classmethod
def export_clearMask( cls ):
""" Clear up the entire site mask
"""
return jobDB.removeSiteFromMask( None )
##############################################################################
types_getSiteMaskLogging = [ ( basestring, list ) ]
@classmethod
def export_getSiteMaskLogging( cls, sites ):
""" Get the site mask logging history
"""
if isinstance( sites, basestring ):
sites = [sites]
return jobDB.getSiteMaskLogging( sites )
##############################################################################
types_getSiteMaskSummary = [ ]
@classmethod
def export_getSiteMaskSummary( cls ):
""" Get the mask status for all the configured sites
"""
# Get all the configured site names
result = gConfig.getSections('/Resources/Sites')
if not result['OK']:
return result
grids = result['Value']
sites = []
for grid in grids:
result = gConfig.getSections('/Resources/Sites/%s' % grid)
if not result['OK']:
return result
sites += result['Value']
# Get the current mask status
result = jobDB.getSiteMaskStatus()
siteDict = result['Value']
for site in sites:
if site not in siteDict:
siteDict[site] = 'Unknown'
return S_OK(siteDict)
##############################################################################
types_getCurrentPilotCounters = [ dict ]
@classmethod
def export_getCurrentPilotCounters( cls, attrDict={} ):
""" Get pilot counters per Status with attrDict selection. Final statuses are given for
the last day.
"""
result = pilotDB.getCounters( 'PilotAgents',['Status'], attrDict, timeStamp='LastUpdateTime')
if not result['OK']:
return result
last_update = Time.dateTime() - Time.day
resultDay = pilotDB.getCounters( 'PilotAgents',['Status'], attrDict, newer=last_update,
timeStamp='LastUpdateTime')
if not resultDay['OK']:
return resultDay
resultDict = {}
for statusDict, count in result['Value']:
status = statusDict['Status']
resultDict[status] = count
if status in FINAL_STATES:
resultDict[status] = 0
for statusDayDict,ccount in resultDay['Value']:
if status == statusDayDict['Status']:
resultDict[status] = ccount
break
return S_OK(resultDict)
##########################################################################################
types_addPilotTQReference = [ list, (int, long), basestring, basestring ]
@classmethod
def export_addPilotTQReference( cls, pilotRef, taskQueueID, ownerDN, ownerGroup, broker='Unknown',
gridType='DIRAC', requirements='Unknown', pilotStampDict={} ):
""" Add a new pilot job reference """
return pilotDB.addPilotTQReference(pilotRef, taskQueueID,
ownerDN, ownerGroup,
broker, gridType, requirements,pilotStampDict)
##############################################################################
types_getPilotOutput = [ basestring ]
def export_getPilotOutput(self,pilotReference):
""" Get the pilot job standard output and standard error files for the Grid
job reference
"""
return self.__getGridJobOutput(pilotReference)
##############################################################################
types_getPilotInfo = [ (list, basestring) ]
@classmethod
def export_getPilotInfo( cls, pilotReference ):
""" Get the info about a given pilot job reference
"""
return pilotDB.getPilotInfo(pilotReference)
##############################################################################
types_selectPilots = [ dict ]
@classmethod
def export_selectPilots( cls, condDict ):
""" Select pilots given the selection conditions
"""
return pilotDB.selectPilots(condDict)
##############################################################################
types_storePilotOutput = [ basestring, basestring, basestring ]
@classmethod
def export_storePilotOutput( cls, pilotReference, output, error ):
""" Store the pilot output and error
"""
return pilotDB.storePilotOutput(pilotReference,output,error)
##############################################################################
types_getPilotLoggingInfo = [ basestring ]
@classmethod
def export_getPilotLoggingInfo( cls, pilotReference ):
""" Get the pilot logging info for the Grid job reference
"""
result = pilotDB.getPilotInfo(pilotReference)
if not result['OK'] or not result[ 'Value' ]:
return S_ERROR('Failed to determine owner for pilot ' + pilotReference)
pilotDict = result['Value'][pilotReference]
owner = pilotDict['OwnerDN']
group = pilotDict['OwnerGroup']
gridType = pilotDict['GridType']
return getPilotLoggingInfo( gridType, pilotReference, #pylint: disable=unexpected-keyword-arg
proxyUserDN = owner, proxyUserGroup = group )
##############################################################################
types_getJobPilotOutput = [ (basestring, int, long) ]
def export_getJobPilotOutput( self, jobID ):
""" Get the pilot job standard output and standard error files for the DIRAC
job reference
"""
pilotReference = ''
# Get the pilot grid reference first from the job parameters
result = jobDB.getJobParameter( int( jobID ), 'Pilot_Reference' )
if result['OK']:
pilotReference = result['Value']
if not pilotReference:
# Failed to get the pilot reference, try to look in the attic parameters
result = jobDB.getAtticJobParameters( int( jobID ), ['Pilot_Reference'] )
if result['OK']:
c = -1
# Get the pilot reference for the last rescheduling cycle
for cycle in result['Value']:
if cycle > c:
pilotReference = result['Value'][cycle]['Pilot_Reference']
c = cycle
if pilotReference:
return self.__getGridJobOutput(pilotReference)
else:
return S_ERROR('No pilot job reference found')
##############################################################################
@classmethod
def __getGridJobOutput( cls, pilotReference ):
""" Get the pilot job standard output and standard error files for the Grid
job reference
"""
result = pilotDB.getPilotInfo(pilotReference)
if not result['OK'] or not result[ 'Value' ]:
return S_ERROR('Failed to get info for pilot ' + pilotReference)
pilotDict = result['Value'][pilotReference]
owner = pilotDict['OwnerDN']
group = pilotDict['OwnerGroup']
# FIXME: What if the OutputSandBox is not StdOut and StdErr, what do we do with other files?
result = pilotDB.getPilotOutput(pilotReference)
if result['OK']:
stdout = result['Value']['StdOut']
error = result['Value']['StdErr']
if stdout or error:
resultDict = {}
resultDict['StdOut'] = stdout
resultDict['StdErr'] = error
resultDict['OwnerDN'] = owner
resultDict['OwnerGroup'] = group
resultDict['FileList'] = []
return S_OK(resultDict)
else:
gLogger.warn( 'Empty pilot output found for %s' % pilotReference )
gridType = pilotDict['GridType']
if gridType == "gLite":
result = getWMSPilotOutput( pilotReference, proxyUserDN = owner, proxyUserGroup = group) #pylint: disable=unexpected-keyword-arg
if not result['OK']:
return S_ERROR('Failed to get pilot output: '+result['Message'])
# FIXME: What if the OutputSandBox is not StdOut and StdErr, what do we do with other files?
stdout = result['StdOut']
error = result['StdErr']
fileList = result['FileList']
if stdout:
result = pilotDB.storePilotOutput(pilotReference,stdout,error)
if not result['OK']:
gLogger.error('Failed to store pilot output:',result['Message'])
resultDict = {}
resultDict['StdOut'] = stdout
resultDict['StdErr'] = error
resultDict['OwnerDN'] = owner
resultDict['OwnerGroup'] = group
resultDict['FileList'] = fileList
return S_OK(resultDict)
else:
# Instantiate the appropriate CE
ceFactory = ComputingElementFactory()
result = getQueue( pilotDict['GridSite'], pilotDict['DestinationSite'], pilotDict['Queue'] )
if not result['OK']:
return result
queueDict = result['Value']
gridEnv = getGridEnv()
queueDict['GridEnv'] = gridEnv
queueDict['WorkingDirectory'] = mkdtemp()
result = ceFactory.getCE( gridType, pilotDict['DestinationSite'], queueDict )
if not result['OK']:
shutil.rmtree( queueDict['WorkingDirectory'] )
return result
ce = result['Value']
groupVOMS = getGroupOption(group,'VOMSRole',group)
result = gProxyManager.getPilotProxyFromVOMSGroup( owner, groupVOMS )
if not result['OK']:
gLogger.error( result['Message'] )
gLogger.error( 'Could not get proxy:', 'User "%s", Group "%s"' % ( owner, groupVOMS ) )
return S_ERROR("Failed to get the pilot's owner proxy")
proxy = result['Value']
ce.setProxy( proxy )
pilotStamp = pilotDict['PilotStamp']
pRef = pilotReference
if pilotStamp:
pRef = pRef + ':::' + pilotStamp
result = ce.getJobOutput( pRef )
if not result['OK']:
shutil.rmtree( queueDict['WorkingDirectory'] )
return result
stdout,error = result['Value']
if stdout:
result = pilotDB.storePilotOutput(pilotReference,stdout,error)
if not result['OK']:
gLogger.error('Failed to store pilot output:',result['Message'])
resultDict = {}
resultDict['StdOut'] = stdout
resultDict['StdErr'] = error
resultDict['OwnerDN'] = owner
resultDict['OwnerGroup'] = group
resultDict['FileList'] = []
shutil.rmtree( queueDict['WorkingDirectory'] )
return S_OK( resultDict )
##############################################################################
types_getPilotSummary = []
@classmethod
def export_getPilotSummary( cls, startdate='', enddate='' ):
""" Get summary of the status of the LCG Pilot Jobs
"""
result = pilotDB.getPilotSummary(startdate,enddate)
return result
##############################################################################
types_getPilotMonitorWeb = [ dict, list, (int, long), [int, long] ]
@classmethod
def export_getPilotMonitorWeb( cls, selectDict, sortList, startItem, maxItems ):
""" Get the summary of the pilot information for a given page in the
pilot monitor in a generic format
"""
result = pilotDB.getPilotMonitorWeb(selectDict, sortList, startItem, maxItems)
return result
##############################################################################
types_getPilotMonitorSelectors = []
@classmethod
def export_getPilotMonitorSelectors( cls ):
""" Get all the distinct selector values for the Pilot Monitor web portal page
"""
result = pilotDB.getPilotMonitorSelectors()
return result
##############################################################################
types_getPilotSummaryWeb = [ dict, list, (int, long), [int, long] ]
@classmethod
def export_getPilotSummaryWeb( cls, selectDict, sortList, startItem, maxItems ):
""" Get the summary of the pilot information for a given page in the
pilot monitor in a generic format
"""
result = pilotDB.getPilotSummaryWeb(selectDict, sortList, startItem, maxItems)
return result
##############################################################################
types_getSiteSummaryWeb = [ dict, list, (int, long), (int, long) ]
@classmethod
def export_getSiteSummaryWeb( cls, selectDict, sortList, startItem, maxItems ):
""" Get the summary of the jobs running on sites in a generic format
"""
result = jobDB.getSiteSummaryWeb(selectDict, sortList, startItem, maxItems)
return result
##############################################################################
types_getSiteSummarySelectors = []
@classmethod
def export_getSiteSummarySelectors( cls ):
""" Get all the distinct selector values for the site summary web portal page
"""
resultDict = {}
statusList = ['Good','Fair','Poor','Bad','Idle']
resultDict['Status'] = statusList
maskStatus = ['Active','Banned','NoMask','Reduced']
resultDict['MaskStatus'] = maskStatus
gridTypes = []
result = gConfig.getSections('Resources/Sites/',[])
if result['OK']:
gridTypes = result['Value']
resultDict['GridType'] = gridTypes
siteList = []
for grid in gridTypes:
result = gConfig.getSections('Resources/Sites/%s' % grid,[])
if result['OK']:
siteList += result['Value']
countryList = []
for site in siteList:
if site.find('.') != -1:
country = site.split('.')[2].lower()
if country not in countryList:
countryList.append(country)
countryList.sort()
resultDict['Country'] = countryList
siteList.sort()
resultDict['Site'] = siteList
return S_OK(resultDict)
##############################################################################
types_getPilots = [ (basestring, int, long) ]
@classmethod
def export_getPilots( cls, jobID ):
""" Get pilot references and their states for :
- those pilots submitted for the TQ where job is sitting
- (or) the pilots executing/having executed the Job
"""
pilots = []
result = pilotDB.getPilotsForJobID( int( jobID ) )
if not result['OK']:
if result['Message'].find('not found') == -1:
return S_ERROR('Failed to get pilot: '+result['Message'])
else:
pilots += result['Value']
if not pilots:
# Pilots were not found try to look in the Task Queue
taskQueueID = 0
result = taskQueueDB.getTaskQueueForJob( int( jobID ) )
if result['OK'] and result['Value']:
taskQueueID = result['Value']
if taskQueueID:
result = pilotDB.getPilotsForTaskQueue( taskQueueID, limit=10 )
if not result['OK']:
return S_ERROR('Failed to get pilot: '+result['Message'])
pilots += result['Value']
if not pilots:
return S_ERROR( 'Failed to get pilot for Job %d' % int( jobID ) )
return pilotDB.getPilotInfo(pilotID=pilots)
##############################################################################
types_killPilot = [ ( basestring, list ) ]
@classmethod
def export_killPilot( cls, pilotRefList ):
""" Kill the specified pilots
"""
# Make a list if it is not yet
pilotRefs = list( pilotRefList )
if isinstance( pilotRefList, basestring ):
pilotRefs = [pilotRefList]
# Regroup pilots per site and per owner
pilotRefDict = {}
for pilotReference in pilotRefs:
result = pilotDB.getPilotInfo(pilotReference)
if not result['OK'] or not result[ 'Value' ]:
return S_ERROR('Failed to get info for pilot ' + pilotReference)
pilotDict = result['Value'][pilotReference]
owner = pilotDict['OwnerDN']
group = pilotDict['OwnerGroup']
queue = '@@@'.join( [owner, group, pilotDict['GridSite'], pilotDict['DestinationSite'], pilotDict['Queue']] )
gridType = pilotDict['GridType']
pilotRefDict.setdefault( queue, {} )
pilotRefDict[queue].setdefault( 'PilotList', [] )
pilotRefDict[queue]['PilotList'].append( pilotReference )
pilotRefDict[queue]['GridType'] = gridType
# Do the work now queue by queue
ceFactory = ComputingElementFactory()
failed = []
for key, pilotDict in pilotRefDict.items():
owner,group,site,ce,queue = key.split( '@@@' )
result = getQueue( site, ce, queue )
if not result['OK']:
return result
queueDict = result['Value']
gridType = pilotDict['GridType']
result = ceFactory.getCE( gridType, ce, queueDict )
if not result['OK']:
return result
ce = result['Value']
# FIXME: quite hacky. Should be either removed, or based on some flag
if gridType in ["LCG", "gLite", "CREAM", "ARC", "Globus"]:
group = getGroupOption(group,'VOMSRole',group)
ret = gProxyManager.getPilotProxyFromVOMSGroup( owner, group )
if not ret['OK']:
gLogger.error( ret['Message'] )
gLogger.error( 'Could not get proxy:', 'User "%s", Group "%s"' % ( owner, group ) )
return S_ERROR("Failed to get the pilot's owner proxy")
proxy = ret['Value']
ce.setProxy( proxy )
pilotList = pilotDict['PilotList']
result = ce.killJob( pilotList )
if not result['OK']:
failed.extend( pilotList )
if failed:
return S_ERROR('Failed to kill at least some pilots')
return S_OK()
##############################################################################
types_setJobForPilot = [ (basestring, int, long), basestring ]
@classmethod
def export_setJobForPilot( cls, jobID, pilotRef, destination=None ):
""" Report the DIRAC job ID which is executed by the given pilot job
"""
result = pilotDB.setJobForPilot( int( jobID ), pilotRef )
if not result['OK']:
return result
result = pilotDB.setCurrentJobID( pilotRef, int( jobID ) )
if not result['OK']:
return result
if destination:
result = pilotDB.setPilotDestinationSite(pilotRef,destination)
return result
##########################################################################################
types_setPilotBenchmark = [ basestring, float ]
@classmethod
def export_setPilotBenchmark( cls, pilotRef, mark ):
""" Set the pilot agent benchmark
"""
result = pilotDB.setPilotBenchmark(pilotRef,mark)
return result
##########################################################################################
types_setAccountingFlag = [ basestring ]
@classmethod
def export_setAccountingFlag( cls, pilotRef, mark='True' ):
""" Set the pilot AccountingSent flag
"""
result = pilotDB.setAccountingFlag(pilotRef,mark)
return result
##########################################################################################
types_setPilotStatus = [ basestring, basestring ]
def export_setPilotStatus( self, pilotRef, status, destination=None, reason=None, gridSite=None, queue=None ):
""" Set the pilot agent status
"""
result = pilotDB.setPilotStatus(pilotRef,status,destination=destination,
statusReason=reason,gridSite=gridSite,queue=queue)
return result
##########################################################################################
types_countPilots = [ dict ]
@classmethod
def export_countPilots( cls, condDict, older=None, newer=None, timeStamp='SubmissionTime' ):
""" Set the pilot agent status
"""
result = pilotDB.countPilots(condDict, older, newer, timeStamp )
return result
##########################################################################################
types_getCounters = [ basestring, list, dict ]
@classmethod
def export_getCounters( cls, table, keys, condDict, newer=None, timeStamp='SubmissionTime' ):
""" Set the pilot agent status
"""
result = pilotDB.getCounters( table, keys, condDict, newer=newer, timeStamp=timeStamp )
return result
##############################################################################
types_getPilotStatistics = [ basestring, dict ]
@staticmethod
def export_getPilotStatistics ( attribute, selectDict ):
""" Get pilot statistics distribution per attribute value with a given selection
"""
startDate = selectDict.get( 'FromDate', None )
if startDate:
del selectDict['FromDate']
if startDate is None:
startDate = selectDict.get( 'LastUpdate', None )
if startDate:
del selectDict['LastUpdate']
endDate = selectDict.get( 'ToDate', None )
if endDate:
del selectDict['ToDate']
result = pilotDB.getCounters( 'PilotAgents', [attribute], selectDict,
newer = startDate,
older = endDate,
timeStamp = 'LastUpdateTime' )
statistics = {}
if result['OK']:
for status, count in result['Value']:
if "OwnerDN" in status:
userName = getUsernameForDN( status['OwnerDN'] )
if userName['OK']:
status['OwnerDN'] = userName['Value']
statistics[ status['OwnerDN'] ] = count
else:
statistics[ status[attribute] ] = count
return S_OK( statistics )
##############################################################################
types_deletePilots = [ (list, int, long) ]
def export_deletePilots( self, pilotIDs ):
if isinstance( pilotIDs, (int, long ) ):
pilotIDs = [pilotIDs, ]
result = pilotDB.deletePilots( pilotIDs )
if not result['OK']:
return result
if enablePilotsLogging:
pilotIDs = result[ 'Value' ]
pilots = pilotDB.getPilotInfo( pilotID = pilotIDs )
if not pilots['OK']:
return pilots
pilotRefs = []
for pilot in pilots:
pilotRefs.append( pilot['PilotJobReference'] )
result = pilotsLoggingDB.deletePilotsLogging( pilotRefs )
if not result['OK']:
return result
return S_OK()
##############################################################################
types_clearPilots = [ (int, long), (int, long) ]
def export_clearPilots( self, interval = 30, aborted_interval = 7 ):
result = pilotDB.clearPilots( interval, aborted_interval )
if not result[ 'OK' ]:
return result
if enablePilotsLogging:
pilotIDs = result[ 'Value' ]
pilots = pilotDB.getPilotInfo( pilotID = pilotIDs )
if not pilots['OK']:
return pilots
pilotRefs = []
for pilot in pilots:
pilotRefs.append( pilot['PilotJobReference'] )
result = pilotsLoggingDB.deletePilotsLogging( pilotRefs )
if not result['OK']:
return result
return S_OK()
| gpl-3.0 |
robertsj/poropy | poropy/coretools/laban.py | 1 | 9241 | '''
Created on Dec 11, 2011
@author: robertsj
'''
from evaluator import Evaluator
import numpy as np
import os
class Laban(Evaluator):
""" Uses the LABAN-PEL code to evaluate loading patterns.
"""
# Public Interface
def __init__(self, rank=0) :
""" Constructor
Parameters
----------
rank : integer
MPI process rank.
"""
# LABAN options of potential interest
self.order = 4
self.tolerance = 0.0001
# Rank is needed for MPI runs. This differentiates the inputs.
self.rank = rank
self.input = "laban"+str(rank)+".inp"
self.output = "laban"+str(rank)+".out"
self.keff = 0
self.maxpeak = 0
def setup(self, core) :
""" Set up some structures needed before solving.
"""
# We need direct access to the core
self.core = core
# Validate a square quarter core. (Not applicable to 1/2 or 1/8)
assert(len(self.core.stencil[0,:])==len(self.core.stencil[:,0]))
# Core size per dimension.
self.dimension = len(self.core.stencil[0,:])
# Assembly boundaries
self.widths = np.zeros(self.dimension+1)
self.widths[:] = self.core.width
self.widths[0] = 0.5 * self.core.width
# Subdivisions. Not really used.
self.subdivisions = np.ones(self.dimension,dtype='i')
# Peaking factor map
self.peaking_map = np.zeros((self.dimension, self.dimension))
self.peaking = np.zeros(len(self.core.assemblies))
# Create the static top part of the LABAN-PEL input
self.make_input_top()
def evaluate(self) :
""" Evaluate the current core.
"""
# The core is a member variable, and so the updated
# one is always available once shuffle is called
# in Reactor.
# Open the input file
f = open(self.input, 'w')
# Write
f.write(self.input_top)
self.make_input_map()
f.write(self.input_map)
self.make_input_materials()
f.write(self.input_materials)
# Close
f.close()
# Run LABAN-PEL
self.run()
# Read the output
self.read()
# Return the evaluation parameters
return self.keff, self.maxpeak
def display(self) :
""" Introduce myself.
"""
print " EVALUATOR: LABAN-PEL"
print " input: ", self.input
print " output: ", self.output
# Implementation
def make_input_top(self) :
""" Create the string for the top invariant part of the input.
"""
stencil_length = str(len(self.core.stencil[0,:]))
self.input_top = \
"LABANPEL -- poropy driver \n" +\
" 2 " + stencil_length + " " + stencil_length +\
" " + str(len(self.core.pattern)+1) + " 0 0 2 1.0\n" +\
" "+str(self.order)+" 0 0 0 0 0 0 0 0\n"+\
" 1 0. 0. 0 100 10 " + str(self.tolerance)+" " +\
str(self.tolerance)+" "+str(self.tolerance) + " 1.0\n"
# horizontal subdivisions
for i in range(0, self.dimension) :
self.input_top += " "+str(self.subdivisions[i])
self.input_top += " \n"
# vertical subdivisions
for i in range(0, self.dimension) :
self.input_top += " "+str(self.subdivisions[i])
self.input_top += " \n"
# horizontal mesh widths
for i in range(0, self.dimension) :
self.input_top += " "+str(self.widths[i])
self.input_top += " \n"
# vertical mesh widths
for i in range(0, self.dimension) :
self.input_top += " "+str(self.widths[i])
self.input_top += " \n"
def make_input_map(self) :
""" Print the map of assemblies.
"""
self.input_map = ""
stencil = self.core.stencil
pattern = self.core.pattern
reflect = len(pattern)+1 # reflector id, last material
N = self.dimension
coremap = np.zeros((N+2,N+2), dtype='i')
# reflections and vacuum
coremap[0, 1:N+1] = -1
coremap[1:N+1, 0] = -1
coremap[N+1, 1:N+1] = -2
coremap[1:N+1, N+1] = -2
fuelindex = 0
for i in range(1, N+1) :
for j in range(1, N+1) :
if j == 1 and i > 1 :
pass
else :
if stencil[i-1, j-1] > 0 : # a fuel
coremap[i, j] = pattern[fuelindex]+1
fuelindex += 1
elif stencil[i-1, j-1] == 0 : # a reflector
coremap[i, j] = reflect
else : # a void
pass
# Copy elements such that rotational symmetry is enforced.
for j in range(2, N+1) :
coremap[j, 1] = coremap[1, j]
for i in range(0, N+2) :
for j in range(0, N+2) :
self.input_map +='%4i' % (coremap[i, j])
self.input_map += '\n'
def make_input_materials(self) :
""" Print the materials definitions.
"""
# 1 5 1 MATERIAL 1 (arbitrary line, i think)
# 1.4493e+00 9.9000e-03 7.9000e-03 1. 0. 0. 7.9000e-03 1.
# 3.8070e-01 1.0420e-01 1.6920e-01 0 1.5100e-02 0. 1.6920e-01 1.
self.input_materials = ""
number_mats = len(self.core.pattern)+1
a = self.core.assemblies
for i in range(0, number_mats-1) :
# Row 1: description.
self.input_materials += " " + str(i+1) + " 5 1 MATERIAL " + \
str(i+1) + " (" + \
a[i].model + ", " + \
str(a[i].enrichment) + " w/o, " + \
str(a[i].burnup) + " MWd/kg)\n"
# Rows 2 and 3.
D1,D2,A1,A2,F1,F2,S12 = a[i].get_constants()
d = np.array([[D1,A1,F1,1.0,0.0,0.0,F1,1.0],[D2,A2,F2,0.0,S12,0.0,F2,1.0]])
for j in range(0, 2) :
for k in range(0, 8) :
self.input_materials +='%12.4e' %(d[j,k])
self.input_materials += '\n'
a = self.core.reflector
# Row 1: description.
self.input_materials += " " + str(number_mats) + " 5 1 MATERIAL " + \
str(number_mats) + " (REFLECTOR) \n"
# Rows 2 and 3.
D1,D2,A1,A2,F1,F2,S12 = a.get_constants()
d = np.array([[D1,A1,F1,1.0,0.0,0.0,F1,1.0],[D2,A2,F2,0.0,S12,0.0,F2,1.0]])
for i in range(0, 2) :
for j in range(0, 8) :
self.input_materials +='%12.4e' %(d[i,j])
self.input_materials += '\n'
self.input_materials += "WHITE\n" + "BLACK\n" + "END\n"
def read(self) :
""" Read a LABAN-PEL output file and load various data.
"""
# Open the file.
f = open(self.output, 'r')
lines = f.readlines()
# Find the eigenvalue.
count = 0
while True :
words = lines[count].split()
if len(words) == 5 :
if words[0] == "*" and words[1] == "K-EFF":
self.keff = float(words[3])
break
count += 1
# Find the peaking.
a = 0 # Assembly index
while True :
words = lines[count].split()
if len(words) == 8 :
if words[0] == "NODE" and words[1] == "AVERAGE" and words[2] == "POWERS" :
count += 5 # Powers start 5 lines below title
for row in range(0, self.dimension) :
words = lines[count].split()
assert(len(words) >= self.dimension)
for col in range(0, self.dimension) :
self.peaking_map[row, col] = float(words[col+1])
if self.core.stencil[row, col] > 0:
#print " a=", a, " row=", row, " col=", col, len(self.peaking)
self.peaking[a] = self.peaking_map[row, col]
a += 1
count += 1
break
count += 1
# Maximum peaking.
self.maxpeak = np.max(self.peaking)
def run(self) :
""" Run LABAN-PEL (must set input first)
"""
# print "evaluating with laban"
# currently, labanx reads from a preset file
os.system('labanx '+str(self.rank)+" "+self.input+" "+self.output)
| mit |
mogoweb/chromium-crosswalk | native_client_sdk/src/build_tools/tests/sdktools_config_test.py | 160 | 1609 | #!/usr/bin/env python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
import os
import unittest
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
BUILD_TOOLS_DIR = os.path.dirname(SCRIPT_DIR)
sys.path.append(BUILD_TOOLS_DIR)
import sdk_tools.config as config
class TestSdkToolsConfig(unittest.TestCase):
def testInvalidSyntax(self):
invalid_json = "# oops\n"
cfg = config.Config()
self.assertRaises(config.Error, lambda: cfg.LoadJson(invalid_json))
def testEmptyConfig(self):
"""Test that empty config contains just empty sources list."""
expected = '{\n "sources": []\n}'
cfg = config.Config()
json_output = cfg.ToJson()
self.assertEqual(json_output, expected)
def testIntegerSetting(self):
json_input = '{ "setting": 3 }'
cfg = config.Config()
cfg.LoadJson(json_input)
self.assertEqual(cfg.setting, 3)
def testReadWrite(self):
json_input1 = '{\n "sources": [], \n "setting": 3\n}'
json_input2 = '{\n "setting": 3\n}'
for json_input in (json_input1, json_input2):
cfg = config.Config()
cfg.LoadJson(json_input)
json_output = cfg.ToJson()
self.assertEqual(json_output, json_input1)
def testAddSource(self):
cfg = config.Config()
cfg.AddSource('http://localhost/foo')
json_output = cfg.ToJson()
expected = '{\n "sources": [\n "http://localhost/foo"\n ]\n}'
self.assertEqual(json_output, expected)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
ztemt/Z5_H112_kernel | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py | 12527 | 1935 | # Util.py - Python extension for perf script, miscellaneous utility code
#
# Copyright (C) 2010 by Tom Zanussi <tzanussi@gmail.com>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import errno, os
FUTEX_WAIT = 0
FUTEX_WAKE = 1
FUTEX_PRIVATE_FLAG = 128
FUTEX_CLOCK_REALTIME = 256
FUTEX_CMD_MASK = ~(FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME)
NSECS_PER_SEC = 1000000000
def avg(total, n):
return total / n
def nsecs(secs, nsecs):
return secs * NSECS_PER_SEC + nsecs
def nsecs_secs(nsecs):
return nsecs / NSECS_PER_SEC
def nsecs_nsecs(nsecs):
return nsecs % NSECS_PER_SEC
def nsecs_str(nsecs):
str = "%5u.%09u" % (nsecs_secs(nsecs), nsecs_nsecs(nsecs)),
return str
def add_stats(dict, key, value):
if not dict.has_key(key):
dict[key] = (value, value, value, 1)
else:
min, max, avg, count = dict[key]
if value < min:
min = value
if value > max:
max = value
avg = (avg + value) / 2
dict[key] = (min, max, avg, count + 1)
def clear_term():
print("\x1b[H\x1b[2J")
audit_package_warned = False
try:
import audit
machine_to_id = {
'x86_64': audit.MACH_86_64,
'alpha' : audit.MACH_ALPHA,
'ia64' : audit.MACH_IA64,
'ppc' : audit.MACH_PPC,
'ppc64' : audit.MACH_PPC64,
's390' : audit.MACH_S390,
's390x' : audit.MACH_S390X,
'i386' : audit.MACH_X86,
'i586' : audit.MACH_X86,
'i686' : audit.MACH_X86,
}
try:
machine_to_id['armeb'] = audit.MACH_ARMEB
except:
pass
machine_id = machine_to_id[os.uname()[4]]
except:
if not audit_package_warned:
audit_package_warned = True
print "Install the audit-libs-python package to get syscall names"
def syscall_name(id):
try:
return audit.audit_syscall_to_name(id, machine_id)
except:
return str(id)
def strerror(nr):
try:
return errno.errorcode[abs(nr)]
except:
return "Unknown %d errno" % nr
| gpl-2.0 |
james-d-mitchell/libsemigroups-python-bindings | tests/test_semifp.py | 1 | 10732 | # pylint: disable = C0103,E0611,C0111,W0104,R0201
import unittest
import sys
import os
from semigroups import FpSemigroup, FpMonoid
path = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
if path not in sys.path:
sys.path.insert(1, path)
del path
class TestFpSemigroup(unittest.TestCase):
def test_valid_init(self):
with self.assertRaises(TypeError):
FpSemigroup(['a'],[])
with self.assertRaises(ValueError):
FpSemigroup('aa',[])
with self.assertRaises(TypeError):
FpSemigroup('a', []).check_word({})
FpSemigroup('a', [])
FpSemigroup('~', [])
FpSemigroup('a', [['a', 'aa']])
FpSemigroup('ab', [['b', 'a(a)']])
FpSemigroup('a', [['a', 'a^4']])
FpSemigroup('ab', [['b', '(ab)^2a']])
def test_parse_word(self):
S = FpSemigroup('~', [])
self.assertFalse(S._pure_letter_alphabet)
self.assertEqual(S._parse_word("~"),"~")
S = FpSemigroup('a', [])
self.assertEqual(S._parse_word("aa"),"aa")
self.assertEqual(S._parse_word("()(()())"),"")
self.assertEqual(S._parse_word("ba^10b"),"baaaaaaaaaab")
self.assertEqual(S._parse_word("((b)a)^3b"),"bababab")
with self.assertRaises(ValueError):
S._parse_word(")(")
with self.assertRaises(ValueError):
S._parse_word("(((b)^2(a))))")
with self.assertRaises(ValueError):
S._parse_word("(((b)^2)))((((a))(b))")
with self.assertRaises(ValueError):
S._parse_word("^2")
with self.assertRaises(ValueError):
S._parse_word("a^")
with self.assertRaises(ValueError):
S._parse_word("a^a")
def test_alphabet(self):
with self.assertRaises(ValueError):
FpSemigroup('', [['a', 'aa']])
with self.assertRaises(ValueError):
FpSemigroup('a', [['b', 'aa']])
with self.assertRaises(ValueError):
FpSemigroup('aa', [['b', 'aa']])
def test_rels(self):
with self.assertRaises(TypeError):
FpSemigroup("ab", "[\"a\", \"aa\"]")
with self.assertRaises(TypeError):
FpSemigroup("ab", ["\"b", "aa\""])
with self.assertRaises(TypeError):
FpSemigroup("ab", [["a", "aa", "b"]])
with self.assertRaises(TypeError):
FpSemigroup("ab", [["b", ["a", "a"]]])
with self.assertRaises(ValueError):
FpSemigroup("ab", [["b", "ca"]])
def test_set_report(self):
S = FpSemigroup("a", [["a", "aa"]])
S.set_report(True)
S.set_report(False)
with self.assertRaises(TypeError):
S.set_report("False")
def test_is_finite(self):
S = FpSemigroup("ab", [["a", "aa"], ["b", "bb"], ["ab", "ba"]])
self.assertEqual(S.is_finite(), True)
S = FpSemigroup("ab", [])
self.assertEqual(S.is_finite(), False)
S = FpSemigroup("ab", [["a","a"],["a","a"]])
self.assertEqual(S.is_finite(), False)
def test_size(self):
S = FpSemigroup("a", [["a", "aa"]])
self.assertEqual(S.size(), 1)
S = FpSemigroup("ab", [["a", "aa"], ["b", "bb"], ["ab", "ba"]])
self.assertEqual(S.size(), 3)
S = FpSemigroup("ab", [])
self.assertEqual(S.size(), float("inf"))
def test_normal_form(self):
S = FpSemigroup("a", [["a", "aa"]])
self.assertEqual(S.normal_form("a^1000"), "a")
S = FpMonoid("a", [["a", "aa"]])
self.assertEqual(S.normal_form("a^0"), "1")
S = FpSemigroup("ab", [["a", "aaa"], ["b", "bb"], ["ab", "ba"]])
self.assertEqual(S.normal_form("(ba)^10"), "aab")
def test_word_to_class_index(self):
S = FpSemigroup("ab", [["a", "aa"], ["b", "bb"], ["ab", "ba"]])
self.assertIsInstance(S.word_to_class_index("aba"), int)
with self.assertRaises(TypeError):
S.word_to_class_index([1, "0"])
with self.assertRaises(TypeError):
S.word_to_class_index(["aba"])
self.assertEqual(S.word_to_class_index("aba"),
S.word_to_class_index("abaaabb"))
S = FpSemigroup("ab",[])
with self.assertRaises(ValueError):
S.word_to_class_index("aba")
def test_equal(self):
S = FpSemigroup("ab",[["a", "a^5"],["b","bb"],["ab","ba"]])
self.assertTrue(S.equal("a","a^5"))
self.assertTrue(S.equal("a","aaaaa"))
self.assertTrue(S.equal("abb","ba"))
self.assertTrue(S.equal("ab","((a)^3b)^167"))
S = FpSemigroup("ab",[])
with self.assertRaises(ValueError):
S.equal("a", "b")
def test_contains(self):
FpS = FpSemigroup("ab", [["aa", "a"], ["bbb", "b"], ["ba", "ab"]])
self.assertFalse(1 in FpS)
self.assertTrue("abb" in FpS)
self.assertFalse("c" in FpS)
self.assertFalse("" in FpS)
def test_enumerate(self):
S = FpSemigroup("ab",[["a", "a^5"],["b","bb"],["ab","ba"]])
S.enumerate(73)
S = FpSemigroup("ab",[])
with self.assertRaises(ValueError):
S.enumerate(73)
def test_nridempotents(self):
S = FpSemigroup("ab",[["a", "a^5"],["b","bb"],["ab","ba"]])
self.assertEqual(S.nridempotents(),3)
S = FpMonoid("ab",[["a", "a^5"], ["b","bb"], ["ab","ba"]])
self.assertEqual(S.nridempotents(),4)
S = FpSemigroup("ab",[])
with self.assertRaises(ValueError):
S.nridempotents()
def test_factorisation(self):
S = FpSemigroup("ab",[["a", "a^5"],["b","bb"],["ab","ba"]])
self.assertEqual(S.factorisation("aba"),[0, 0, 1])
S = FpSemigroup("ab",[])
with self.assertRaises(ValueError):
S.factorisation("aba")
def test_repr(self):
S = FpSemigroup("ab", [["aa", "a"], ["bbb", "ab"], ["ab", "ba"]])
self.assertEqual(S.__repr__(),
"<fp semigroup with 2 generators and 3 relations>")
class TestFpMonoid(unittest.TestCase):
def test_valid_init(self):
FpMonoid("", [])
FpMonoid("a", [])
with self.assertRaises(ValueError):
FpMonoid("1", [])
FpMonoid("a", [["a", "aa"]])
FpMonoid("ab", [["b", "aa"]])
FpMonoid("ab", [["1", "aa"]])
FpMonoid("ab", [["b", "a^2"]])
FpMonoid("ab", [["1", "(a)^6"]])
def test_alphabet(self):
with self.assertRaises(ValueError):
FpMonoid("", [["a", "aa"]])
with self.assertRaises(ValueError):
FpMonoid("a", [["b", "aa"]])
with self.assertRaises(ValueError):
FpMonoid("aa", [["b", "aa"]])
def test_rels(self):
with self.assertRaises(TypeError):
FpMonoid("ab", "[\"a\", \"aa\"]")
with self.assertRaises(TypeError):
FpMonoid("ab", ["\"b\", \"aa\""])
with self.assertRaises(TypeError):
FpMonoid("ab", [["a", "aa", "b"]])
with self.assertRaises(TypeError):
FpMonoid("ab", [["b", ["a", "a"]]])
with self.assertRaises(ValueError):
FpMonoid("ab", [["b", "ca"]])
def test_contains(self):
FpM = FpMonoid("ab", [["aa", "a"], ["bbb", "b"], ["ba", "ab"]])
self.assertFalse(1 in FpM)
self.assertTrue("abb" in FpM)
self.assertTrue("" in FpM)
def test_set_report(self):
M = FpMonoid("a", [["a", "aa"]])
M.set_report(True)
M.set_report(False)
with self.assertRaises(TypeError):
M.set_report("False")
def test_current_max_word_length(self):
S = FpSemigroup("ab",[["a", "a^5"],["b","bb"],["ab","ba"]])
S.current_max_word_length()
def test_size(self):
self.assertEqual(FpMonoid("a", [["a", "aa"]]).size(), 2)
self.assertEqual(FpMonoid("ab", [["a", "aa"], ["b", "bb"],
["ab", "ba"]]).size(), 4)
def test_word_to_class_index(self):
M = FpMonoid("ab", [["a", "aa"], ["b", "bb"], ["ab", "ba"]])
self.assertEqual(M.word_to_class_index('a'),
M.word_to_class_index('aa'))
self.assertNotEqual(M.word_to_class_index('a'),
M.word_to_class_index('bb'))
self.assertIsInstance(M.word_to_class_index('aba'), int)
def test_repr(self):
M = FpMonoid("ab", [["aa", "a"], ["bbb", "ab"], ["ab", "ba"]])
self.assertEqual(M.__repr__(),
"<fp monoid with 2 generators and 3 relations>")
class Test_FPSOME(unittest.TestCase):
def test_valid_init(self):
FpS = FpSemigroup("ab", [["aa", "a"], ["bbb", "b"], ["ba", "ab"]])
FpS.equal("a", "aba")
FpS = FpSemigroup("mo", [["m", "mm"], ["ooo", "o"], ["mo", "om"]])
FpS.equal("moo", "ooo")
FpS = FpSemigroup("cowie", [["c", "o"], ["o", "w"], ["w", "i"],
["i", "e"], ["ee", "e"]])
FpS.equal("cowie","cowie")
FpS2 = FpSemigroup('~', [["~~", "~"]])
FpS2.equal("~", "~~")
with self.assertRaises(TypeError):
FpS.equal(FpS, FpS)
with self.assertRaises(ValueError):
FpS.equal("abc", "abc")
def test_eq_(self):
FpS = FpSemigroup("ab", [["a^10", "a"], ["bbb", "b"], ["ba", "ab"]])
a = "aba"
b = a
self.assertTrue(FpS.equal(a, b))
a = "aaba"
b = "ba^3"
self.assertTrue(FpS.equal(a, b))
a = ""
self.assertEqual(a, a)
def test_ne_(self):
FpS = FpSemigroup("ab", [["a^10", "a"], ["bbb", "b"], ["ba", "ab"]])
a = "aba"
b = a + a
self.assertFalse(FpS.equal(a, b))
a = "aaba"
b = "ba^4"
self.assertFalse(FpS.equal(a, b))
def test_identity(self):
FpS = FpSemigroup("ab", [["a^10", "a"], ["bbb", "b"], ["ba", "ab"]])
a = FpS[0].get_value()
self.assertEqual(a.identity().word, "")
FpS = FpMonoid("ab", [["a^10", "a"], ["bbb", "b"], ["ba", "ab"]])
a = FpS[1].get_value()
self.assertEqual(a.identity().word, "1")
def test_mul(self):
FpS = FpSemigroup("ab", [["aa", "a"], ["bbb", "b"], ["ba", "ab"]])
other = "aa"
a = FpS[1].get_value()
a * a
self.assertEqual(a.word + a.word, (a * a).word)
with self.assertRaises(TypeError):
a * other
with self.assertRaises(TypeError):
FpSemigroup("a", [["aa", "a"]])[0].get_value() * a
def test_repr(self):
FpS = FpSemigroup("ab", [["aa", "a"], ["bbb", "b"], ["ab", "ba"]])
self.assertEqual(FpS[0].__repr__(), "'" + FpS[0].get_value().Repword + "'")
if __name__ == "__main__":
unittest.main()
| gpl-3.0 |
opennode/nodeconductor-saltstack | src/nodeconductor_saltstack/exchange/cost_tracking.py | 1 | 1101 | from nodeconductor.cost_tracking import CostTrackingStrategy, CostTrackingRegister, ConsumableItem
from . import models
class ExchangeTenantStrategy(CostTrackingStrategy):
resource_class = models.ExchangeTenant
class Types(object):
SUPPORT = 'support'
STORAGE = 'storage'
class Keys(object):
STORAGE = '1 GB'
SUPPORT = 'premium'
@classmethod
def get_consumable_items(cls):
return [
ConsumableItem(item_type=cls.Types.STORAGE, key=cls.Keys.STORAGE, name='1 GB of storage', units='GB'),
ConsumableItem(item_type=cls.Types.SUPPORT, key=cls.Keys.SUPPORT, name='Support: premium'),
]
@classmethod
def get_configuration(cls, tenant):
storage = tenant.quotas.get(name=models.ExchangeTenant.Quotas.mailbox_size).usage
return {
ConsumableItem(item_type=cls.Types.STORAGE, key=cls.Keys.STORAGE): float(storage) / 1024,
ConsumableItem(item_type=cls.Types.SUPPORT, key=cls.Keys.SUPPORT): 1,
}
CostTrackingRegister.register_strategy(ExchangeTenantStrategy)
| mit |
tectronics/arsenalsuite | cpp/lib/PyQt4/examples/designer/plugins/python/datetimeeditplugin.py | 20 | 4425 | #============================================================================#
# Designer plugins for PyDateEdit and PyDateTimeEdit #
#----------------------------------------------------------------------------#
# Copyright (c) 2008 by Denviso GmbH, <ulrich.berning@denviso.de> #
# #
# All Rights Reserved #
# #
# Permission to use, copy, modify, and distribute this software and its #
# documentation for any purpose and without fee is hereby granted, #
# provided that the above copyright notice appear in all copies and that #
# both that copyright notice and this permission notice appear in #
# supporting documentation. #
# #
# DENVISO DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS #
# SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY #
# AND FITNESS, IN NO EVENT SHALL DENVISO BE LIABLE FOR ANY #
# SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES #
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, #
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER #
# TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE #
# OR PERFORMANCE OF THIS SOFTWARE. #
#----------------------------------------------------------------------------#
from PyQt4 import QtGui, QtDesigner
from datetimeedit import PyDateEdit, PyDateTimeEdit
#============================================================================#
# The group name in designer widgetbox #
#----------------------------------------------------------------------------#
DESIGNER_GROUP_NAME = "PyQt Examples"
#============================================================================#
# Plugin for PyDateEdit #
#----------------------------------------------------------------------------#
class PyDateEditPlugin(QtDesigner.QPyDesignerCustomWidgetPlugin):
def __init__(self, parent=None):
super(PyDateEditPlugin, self).__init__(parent)
self.initialized = False
def initialize(self, formEditor):
if self.initialized:
return
self.initialized = True
def isInitialized(self):
return self.initialized
def isContainer(self):
return False
def icon(self):
return QtGui.QIcon()
def domXml(self):
return '<widget class="PyDateEdit" name="pyDateEdit">\n</widget>\n'
def group(self):
return DESIGNER_GROUP_NAME
def includeFile(self):
return "datetimeedit"
def name(self):
return "PyDateEdit"
def toolTip(self):
return ""
def whatsThis(self):
return ""
def createWidget(self, parent):
return PyDateEdit(parent)
#============================================================================#
# Plugin for PyDateTimeEdit #
#----------------------------------------------------------------------------#
class PyDateTimeEditPlugin(QtDesigner.QPyDesignerCustomWidgetPlugin):
def __init__(self, parent=None):
super(PyDateTimeEditPlugin, self).__init__(parent)
self.initialized = False
def initialize(self, formEditor):
if self.initialized:
return
self.initialized = True
def isInitialized(self):
return self.initialized
def isContainer(self):
return False
def icon(self):
return QtGui.QIcon()
def domXml(self):
return '<widget class="PyDateTimeEdit" name="pyDateTimeEdit">\n</widget>\n'
def group(self):
return DESIGNER_GROUP_NAME
def includeFile(self):
return "datetimeedit"
def name(self):
return "PyDateTimeEdit"
def toolTip(self):
return ""
def whatsThis(self):
return ""
def createWidget(self, parent):
return PyDateTimeEdit(parent)
| gpl-2.0 |
kartoza/geonode | scripts/misc/create_full_geonode_db.py | 2 | 4627 | # -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2016 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
import sys
import os
geonode_path = os.path.abspath(os.path.join(os.path.dirname(os.path.dirname(__file__)), '../geonode'))
sys.path.append(geonode_path)
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
import glob
from random import randint
from timeit import Timer
from django.core.files import File
from django.conf import settings
from taggit.models import Tag
from geonode.base.models import TopicCategory
from geonode.base.models import Region
from geonode.people.models import Profile
from geonode.documents.models import Document
from geonode.layers.models import Layer
from geonode.layers.utils import file_upload
from geonode.layers.tasks import delete_layer
def get_random_user():
""" Get a random user """
users_count = Profile.objects.all().count()
random_index = randint(0, users_count -1)
return Profile.objects.all()[random_index]
def assign_random_category(resource):
""" Assign a random category to a resource """
random_index = randint(0, TopicCategory.objects.all().count() - 1)
tc = TopicCategory.objects.all()[random_index]
resource.category = tc
resource.save()
def assign_keywords(resource):
""" Assigns up to 5 keywords to resource """
for i in range(0, randint(0, 5)):
resource.keywords.add('keyword_%s' % randint(0, n_keywords))
def assign_regions(resource):
""" Assign up to 5 regions to resource """
for i in range(0, randint(0, 5)):
random_index = randint(0, Region.objects.all().count() - 1)
region = Region.objects.all()[random_index]
resource.regions.add(region)
def create_users(n_users):
""" Create n users in the database """
for i in range(0, n_users):
user = Profile()
user.username = 'user_%s' % i
user.save()
def set_resource(resource):
""" Assign poc, metadata_author, category and regions to resource """
resource.poc = get_random_user()
resource.metadata_author = get_random_user()
assign_random_category(resource)
assign_regions(resource)
def create_document(number):
""" Creates a new document """
file_list = glob.glob('%s*.jpg' % doc_path)
random_index = randint(0, len(file_list) -1)
file_uri = file_list[random_index]
title = 'Document N. %s' % number
img_filename = '%s_img.jpg' % number
doc = Document(title=title, owner=get_random_user())
doc.save()
with open(file_uri, 'r') as f:
img_file = File(f)
doc.doc_file.save(img_filename, img_file, True)
assign_keywords(doc)
# regions
resource = doc.get_self_resource()
set_resource(resource)
def create_layer(number):
""" Creates a new layer """
file_list = glob.glob('%s*.shp' % shp_path)
random_index = randint(0, len(file_list) -1)
file_uri = file_list[random_index]
layer = file_upload(file_uri)
# keywords
assign_keywords(layer)
# other stuff
resource = layer.get_self_resource()
set_resource(resource)
# in doc_path set a path containing *.jpg files
# in shp_path set a path containing *.shp files
doc_path = '/tmp/docs/'
shp_path = '/tmp/shp/'
n_users = 50
n_keywords = 100
n_layers = 500
n_docs = 500
# Reset keywords
Tag.objects.all().delete()
# 1. create users
Profile.objects.exclude(username='admin').exclude(username='AnonymousUser').delete()
create_users(n_users)
# 2. create documents
Document.objects.all().delete()
for d in range(0, n_docs):
t = Timer(lambda: create_document(d))
print 'Document %s generated in: %s' % (d, t.timeit(number=1))
# 3. create layers
# first we delete layers
for layer in Layer.objects.all():
delete_layer.delay(object_id=layer.id)
for l in range(0, n_layers):
t = Timer(lambda: create_layer(l))
print 'Layer %s generated in: %s' % (l, t.timeit(number=1))
| gpl-3.0 |
inercia/evy | tests/stdlib/test_threading.py | 1 | 1651 | from evy import patcher
from evy.patched import threading
from evy.patched import thread
from evy.patched import time
# *NOTE: doesn't test as much of the threading api as we'd like because many of
# the tests are launched via subprocess and therefore don't get patched
patcher.inject('test.test_threading',
globals())
# "PyThreadState_SetAsyncExc() is a CPython-only gimmick, not (currently)
# exposed at the Python level. This test relies on ctypes to get at it."
# Therefore it's also disabled when testing evy, as it's not emulated.
try:
ThreadTests.test_PyThreadState_SetAsyncExc = lambda s: None
except (AttributeError, NameError):
pass
# disabling this test because it fails when run in Hudson even though it always
# succeeds when run manually
try:
ThreadJoinOnShutdown.test_3_join_in_forked_from_thread = lambda *a, **kw: None
except (AttributeError, NameError):
pass
# disabling this test because it relies on dorking with the hidden
# innards of the threading module in a way that doesn't appear to work
# when patched
try:
ThreadTests.test_limbo_cleanup = lambda *a, **kw: None
except (AttributeError, NameError):
pass
# this test has nothing to do with Eventlet; if it fails it's not
# because of patching (which it does, grump grump)
try:
ThreadTests.test_finalize_runnning_thread = lambda *a, **kw: None
# it's misspelled in the stdlib, silencing this version as well because
# inevitably someone will correct the error
ThreadTests.test_finalize_running_thread = lambda *a, **kw: None
except (AttributeError, NameError):
pass
if __name__ == "__main__":
test_main()
| mit |
2014c2g1/c2g1 | exts/wsgi/static/Brython2.1.0-20140419-113919/Lib/_threading_local.py | 923 | 7410 | """Thread-local objects.
(Note that this module provides a Python version of the threading.local
class. Depending on the version of Python you're using, there may be a
faster one available. You should always import the `local` class from
`threading`.)
Thread-local objects support the management of thread-local data.
If you have data that you want to be local to a thread, simply create
a thread-local object and use its attributes:
>>> mydata = local()
>>> mydata.number = 42
>>> mydata.number
42
You can also access the local-object's dictionary:
>>> mydata.__dict__
{'number': 42}
>>> mydata.__dict__.setdefault('widgets', [])
[]
>>> mydata.widgets
[]
What's important about thread-local objects is that their data are
local to a thread. If we access the data in a different thread:
>>> log = []
>>> def f():
... items = sorted(mydata.__dict__.items())
... log.append(items)
... mydata.number = 11
... log.append(mydata.number)
>>> import threading
>>> thread = threading.Thread(target=f)
>>> thread.start()
>>> thread.join()
>>> log
[[], 11]
we get different data. Furthermore, changes made in the other thread
don't affect data seen in this thread:
>>> mydata.number
42
Of course, values you get from a local object, including a __dict__
attribute, are for whatever thread was current at the time the
attribute was read. For that reason, you generally don't want to save
these values across threads, as they apply only to the thread they
came from.
You can create custom local objects by subclassing the local class:
>>> class MyLocal(local):
... number = 2
... initialized = False
... def __init__(self, **kw):
... if self.initialized:
... raise SystemError('__init__ called too many times')
... self.initialized = True
... self.__dict__.update(kw)
... def squared(self):
... return self.number ** 2
This can be useful to support default values, methods and
initialization. Note that if you define an __init__ method, it will be
called each time the local object is used in a separate thread. This
is necessary to initialize each thread's dictionary.
Now if we create a local object:
>>> mydata = MyLocal(color='red')
Now we have a default number:
>>> mydata.number
2
an initial color:
>>> mydata.color
'red'
>>> del mydata.color
And a method that operates on the data:
>>> mydata.squared()
4
As before, we can access the data in a separate thread:
>>> log = []
>>> thread = threading.Thread(target=f)
>>> thread.start()
>>> thread.join()
>>> log
[[('color', 'red'), ('initialized', True)], 11]
without affecting this thread's data:
>>> mydata.number
2
>>> mydata.color
Traceback (most recent call last):
...
AttributeError: 'MyLocal' object has no attribute 'color'
Note that subclasses can define slots, but they are not thread
local. They are shared across threads:
>>> class MyLocal(local):
... __slots__ = 'number'
>>> mydata = MyLocal()
>>> mydata.number = 42
>>> mydata.color = 'red'
So, the separate thread:
>>> thread = threading.Thread(target=f)
>>> thread.start()
>>> thread.join()
affects what we see:
>>> mydata.number
11
>>> del mydata
"""
from weakref import ref
from contextlib import contextmanager
__all__ = ["local"]
# We need to use objects from the threading module, but the threading
# module may also want to use our `local` class, if support for locals
# isn't compiled in to the `thread` module. This creates potential problems
# with circular imports. For that reason, we don't import `threading`
# until the bottom of this file (a hack sufficient to worm around the
# potential problems). Note that all platforms on CPython do have support
# for locals in the `thread` module, and there is no circular import problem
# then, so problems introduced by fiddling the order of imports here won't
# manifest.
class _localimpl:
"""A class managing thread-local dicts"""
__slots__ = 'key', 'dicts', 'localargs', 'locallock', '__weakref__'
def __init__(self):
# The key used in the Thread objects' attribute dicts.
# We keep it a string for speed but make it unlikely to clash with
# a "real" attribute.
self.key = '_threading_local._localimpl.' + str(id(self))
# { id(Thread) -> (ref(Thread), thread-local dict) }
self.dicts = {}
def get_dict(self):
"""Return the dict for the current thread. Raises KeyError if none
defined."""
thread = current_thread()
return self.dicts[id(thread)][1]
def create_dict(self):
"""Create a new dict for the current thread, and return it."""
localdict = {}
key = self.key
thread = current_thread()
idt = id(thread)
def local_deleted(_, key=key):
# When the localimpl is deleted, remove the thread attribute.
thread = wrthread()
if thread is not None:
del thread.__dict__[key]
def thread_deleted(_, idt=idt):
# When the thread is deleted, remove the local dict.
# Note that this is suboptimal if the thread object gets
# caught in a reference loop. We would like to be called
# as soon as the OS-level thread ends instead.
local = wrlocal()
if local is not None:
dct = local.dicts.pop(idt)
wrlocal = ref(self, local_deleted)
wrthread = ref(thread, thread_deleted)
thread.__dict__[key] = wrlocal
self.dicts[idt] = wrthread, localdict
return localdict
@contextmanager
def _patch(self):
impl = object.__getattribute__(self, '_local__impl')
try:
dct = impl.get_dict()
except KeyError:
dct = impl.create_dict()
args, kw = impl.localargs
self.__init__(*args, **kw)
with impl.locallock:
object.__setattr__(self, '__dict__', dct)
yield
class local:
__slots__ = '_local__impl', '__dict__'
def __new__(cls, *args, **kw):
if (args or kw) and (cls.__init__ is object.__init__):
raise TypeError("Initialization arguments are not supported")
self = object.__new__(cls)
impl = _localimpl()
impl.localargs = (args, kw)
impl.locallock = RLock()
object.__setattr__(self, '_local__impl', impl)
# We need to create the thread dict in anticipation of
# __init__ being called, to make sure we don't call it
# again ourselves.
impl.create_dict()
return self
def __getattribute__(self, name):
with _patch(self):
return object.__getattribute__(self, name)
def __setattr__(self, name, value):
if name == '__dict__':
raise AttributeError(
"%r object attribute '__dict__' is read-only"
% self.__class__.__name__)
with _patch(self):
return object.__setattr__(self, name, value)
def __delattr__(self, name):
if name == '__dict__':
raise AttributeError(
"%r object attribute '__dict__' is read-only"
% self.__class__.__name__)
with _patch(self):
return object.__delattr__(self, name)
from threading import current_thread, RLock
| gpl-2.0 |
PennartLoettring/Poettrix | rootfs/usr/lib/python3.4/test/test_buffer.py | 80 | 158753 | #
# The ndarray object from _testbuffer.c is a complete implementation of
# a PEP-3118 buffer provider. It is independent from NumPy's ndarray
# and the tests don't require NumPy.
#
# If NumPy is present, some tests check both ndarray implementations
# against each other.
#
# Most ndarray tests also check that memoryview(ndarray) behaves in
# the same way as the original. Thus, a substantial part of the
# memoryview tests is now in this module.
#
import unittest
from test import support
from itertools import permutations, product
from random import randrange, sample, choice
from sysconfig import get_config_var
import warnings
import sys, array, io
from decimal import Decimal
from fractions import Fraction
try:
from _testbuffer import *
except ImportError:
ndarray = None
try:
import struct
except ImportError:
struct = None
try:
import ctypes
except ImportError:
ctypes = None
try:
with warnings.catch_warnings():
from numpy import ndarray as numpy_array
except ImportError:
numpy_array = None
SHORT_TEST = True
# ======================================================================
# Random lists by format specifier
# ======================================================================
# Native format chars and their ranges.
NATIVE = {
'?':0, 'c':0, 'b':0, 'B':0,
'h':0, 'H':0, 'i':0, 'I':0,
'l':0, 'L':0, 'n':0, 'N':0,
'f':0, 'd':0, 'P':0
}
# NumPy does not have 'n' or 'N':
if numpy_array:
del NATIVE['n']
del NATIVE['N']
if struct:
try:
# Add "qQ" if present in native mode.
struct.pack('Q', 2**64-1)
NATIVE['q'] = 0
NATIVE['Q'] = 0
except struct.error:
pass
# Standard format chars and their ranges.
STANDARD = {
'?':(0, 2), 'c':(0, 1<<8),
'b':(-(1<<7), 1<<7), 'B':(0, 1<<8),
'h':(-(1<<15), 1<<15), 'H':(0, 1<<16),
'i':(-(1<<31), 1<<31), 'I':(0, 1<<32),
'l':(-(1<<31), 1<<31), 'L':(0, 1<<32),
'q':(-(1<<63), 1<<63), 'Q':(0, 1<<64),
'f':(-(1<<63), 1<<63), 'd':(-(1<<1023), 1<<1023)
}
def native_type_range(fmt):
"""Return range of a native type."""
if fmt == 'c':
lh = (0, 256)
elif fmt == '?':
lh = (0, 2)
elif fmt == 'f':
lh = (-(1<<63), 1<<63)
elif fmt == 'd':
lh = (-(1<<1023), 1<<1023)
else:
for exp in (128, 127, 64, 63, 32, 31, 16, 15, 8, 7):
try:
struct.pack(fmt, (1<<exp)-1)
break
except struct.error:
pass
lh = (-(1<<exp), 1<<exp) if exp & 1 else (0, 1<<exp)
return lh
fmtdict = {
'':NATIVE,
'@':NATIVE,
'<':STANDARD,
'>':STANDARD,
'=':STANDARD,
'!':STANDARD
}
if struct:
for fmt in fmtdict['@']:
fmtdict['@'][fmt] = native_type_range(fmt)
MEMORYVIEW = NATIVE.copy()
ARRAY = NATIVE.copy()
for k in NATIVE:
if not k in "bBhHiIlLfd":
del ARRAY[k]
BYTEFMT = NATIVE.copy()
for k in NATIVE:
if not k in "Bbc":
del BYTEFMT[k]
fmtdict['m'] = MEMORYVIEW
fmtdict['@m'] = MEMORYVIEW
fmtdict['a'] = ARRAY
fmtdict['b'] = BYTEFMT
fmtdict['@b'] = BYTEFMT
# Capabilities of the test objects:
MODE = 0
MULT = 1
cap = { # format chars # multiplier
'ndarray': (['', '@', '<', '>', '=', '!'], ['', '1', '2', '3']),
'array': (['a'], ['']),
'numpy': ([''], ['']),
'memoryview': (['@m', 'm'], ['']),
'bytefmt': (['@b', 'b'], ['']),
}
def randrange_fmt(mode, char, obj):
"""Return random item for a type specified by a mode and a single
format character."""
x = randrange(*fmtdict[mode][char])
if char == 'c':
x = bytes(chr(x), 'latin1')
if char == '?':
x = bool(x)
if char == 'f' or char == 'd':
x = struct.pack(char, x)
x = struct.unpack(char, x)[0]
if obj == 'numpy' and x == b'\x00':
# http://projects.scipy.org/numpy/ticket/1925
x = b'\x01'
return x
def gen_item(fmt, obj):
"""Return single random item."""
mode, chars = fmt.split('#')
x = []
for c in chars:
x.append(randrange_fmt(mode, c, obj))
return x[0] if len(x) == 1 else tuple(x)
def gen_items(n, fmt, obj):
"""Return a list of random items (or a scalar)."""
if n == 0:
return gen_item(fmt, obj)
lst = [0] * n
for i in range(n):
lst[i] = gen_item(fmt, obj)
return lst
def struct_items(n, obj):
mode = choice(cap[obj][MODE])
xfmt = mode + '#'
fmt = mode.strip('amb')
nmemb = randrange(2, 10) # number of struct members
for _ in range(nmemb):
char = choice(tuple(fmtdict[mode]))
multiplier = choice(cap[obj][MULT])
xfmt += (char * int(multiplier if multiplier else 1))
fmt += (multiplier + char)
items = gen_items(n, xfmt, obj)
item = gen_item(xfmt, obj)
return fmt, items, item
def randitems(n, obj='ndarray', mode=None, char=None):
"""Return random format, items, item."""
if mode is None:
mode = choice(cap[obj][MODE])
if char is None:
char = choice(tuple(fmtdict[mode]))
multiplier = choice(cap[obj][MULT])
fmt = mode + '#' + char * int(multiplier if multiplier else 1)
items = gen_items(n, fmt, obj)
item = gen_item(fmt, obj)
fmt = mode.strip('amb') + multiplier + char
return fmt, items, item
def iter_mode(n, obj='ndarray'):
"""Iterate through supported mode/char combinations."""
for mode in cap[obj][MODE]:
for char in fmtdict[mode]:
yield randitems(n, obj, mode, char)
def iter_format(nitems, testobj='ndarray'):
"""Yield (format, items, item) for all possible modes and format
characters plus one random compound format string."""
for t in iter_mode(nitems, testobj):
yield t
if testobj != 'ndarray':
raise StopIteration
yield struct_items(nitems, testobj)
def is_byte_format(fmt):
return 'c' in fmt or 'b' in fmt or 'B' in fmt
def is_memoryview_format(fmt):
"""format suitable for memoryview"""
x = len(fmt)
return ((x == 1 or (x == 2 and fmt[0] == '@')) and
fmt[x-1] in MEMORYVIEW)
NON_BYTE_FORMAT = [c for c in fmtdict['@'] if not is_byte_format(c)]
# ======================================================================
# Multi-dimensional tolist(), slicing and slice assignments
# ======================================================================
def atomp(lst):
"""Tuple items (representing structs) are regarded as atoms."""
return not isinstance(lst, list)
def listp(lst):
return isinstance(lst, list)
def prod(lst):
"""Product of list elements."""
if len(lst) == 0:
return 0
x = lst[0]
for v in lst[1:]:
x *= v
return x
def strides_from_shape(ndim, shape, itemsize, layout):
"""Calculate strides of a contiguous array. Layout is 'C' or
'F' (Fortran)."""
if ndim == 0:
return ()
if layout == 'C':
strides = list(shape[1:]) + [itemsize]
for i in range(ndim-2, -1, -1):
strides[i] *= strides[i+1]
else:
strides = [itemsize] + list(shape[:-1])
for i in range(1, ndim):
strides[i] *= strides[i-1]
return strides
def _ca(items, s):
"""Convert flat item list to the nested list representation of a
multidimensional C array with shape 's'."""
if atomp(items):
return items
if len(s) == 0:
return items[0]
lst = [0] * s[0]
stride = len(items) // s[0] if s[0] else 0
for i in range(s[0]):
start = i*stride
lst[i] = _ca(items[start:start+stride], s[1:])
return lst
def _fa(items, s):
"""Convert flat item list to the nested list representation of a
multidimensional Fortran array with shape 's'."""
if atomp(items):
return items
if len(s) == 0:
return items[0]
lst = [0] * s[0]
stride = s[0]
for i in range(s[0]):
lst[i] = _fa(items[i::stride], s[1:])
return lst
def carray(items, shape):
if listp(items) and not 0 in shape and prod(shape) != len(items):
raise ValueError("prod(shape) != len(items)")
return _ca(items, shape)
def farray(items, shape):
if listp(items) and not 0 in shape and prod(shape) != len(items):
raise ValueError("prod(shape) != len(items)")
return _fa(items, shape)
def indices(shape):
"""Generate all possible tuples of indices."""
iterables = [range(v) for v in shape]
return product(*iterables)
def getindex(ndim, ind, strides):
"""Convert multi-dimensional index to the position in the flat list."""
ret = 0
for i in range(ndim):
ret += strides[i] * ind[i]
return ret
def transpose(src, shape):
"""Transpose flat item list that is regarded as a multi-dimensional
matrix defined by shape: dest...[k][j][i] = src[i][j][k]... """
if not shape:
return src
ndim = len(shape)
sstrides = strides_from_shape(ndim, shape, 1, 'C')
dstrides = strides_from_shape(ndim, shape[::-1], 1, 'C')
dest = [0] * len(src)
for ind in indices(shape):
fr = getindex(ndim, ind, sstrides)
to = getindex(ndim, ind[::-1], dstrides)
dest[to] = src[fr]
return dest
def _flatten(lst):
"""flatten list"""
if lst == []:
return lst
if atomp(lst):
return [lst]
return _flatten(lst[0]) + _flatten(lst[1:])
def flatten(lst):
"""flatten list or return scalar"""
if atomp(lst): # scalar
return lst
return _flatten(lst)
def slice_shape(lst, slices):
"""Get the shape of lst after slicing: slices is a list of slice
objects."""
if atomp(lst):
return []
return [len(lst[slices[0]])] + slice_shape(lst[0], slices[1:])
def multislice(lst, slices):
"""Multi-dimensional slicing: slices is a list of slice objects."""
if atomp(lst):
return lst
return [multislice(sublst, slices[1:]) for sublst in lst[slices[0]]]
def m_assign(llst, rlst, lslices, rslices):
"""Multi-dimensional slice assignment: llst and rlst are the operands,
lslices and rslices are lists of slice objects. llst and rlst must
have the same structure.
For a two-dimensional example, this is not implemented in Python:
llst[0:3:2, 0:3:2] = rlst[1:3:1, 1:3:1]
Instead we write:
lslices = [slice(0,3,2), slice(0,3,2)]
rslices = [slice(1,3,1), slice(1,3,1)]
multislice_assign(llst, rlst, lslices, rslices)
"""
if atomp(rlst):
return rlst
rlst = [m_assign(l, r, lslices[1:], rslices[1:])
for l, r in zip(llst[lslices[0]], rlst[rslices[0]])]
llst[lslices[0]] = rlst
return llst
def cmp_structure(llst, rlst, lslices, rslices):
"""Compare the structure of llst[lslices] and rlst[rslices]."""
lshape = slice_shape(llst, lslices)
rshape = slice_shape(rlst, rslices)
if (len(lshape) != len(rshape)):
return -1
for i in range(len(lshape)):
if lshape[i] != rshape[i]:
return -1
if lshape[i] == 0:
return 0
return 0
def multislice_assign(llst, rlst, lslices, rslices):
"""Return llst after assigning: llst[lslices] = rlst[rslices]"""
if cmp_structure(llst, rlst, lslices, rslices) < 0:
raise ValueError("lvalue and rvalue have different structures")
return m_assign(llst, rlst, lslices, rslices)
# ======================================================================
# Random structures
# ======================================================================
#
# PEP-3118 is very permissive with respect to the contents of a
# Py_buffer. In particular:
#
# - shape can be zero
# - strides can be any integer, including zero
# - offset can point to any location in the underlying
# memory block, provided that it is a multiple of
# itemsize.
#
# The functions in this section test and verify random structures
# in full generality. A structure is valid iff it fits in the
# underlying memory block.
#
# The structure 't' (short for 'tuple') is fully defined by:
#
# t = (memlen, itemsize, ndim, shape, strides, offset)
#
def verify_structure(memlen, itemsize, ndim, shape, strides, offset):
"""Verify that the parameters represent a valid array within
the bounds of the allocated memory:
char *mem: start of the physical memory block
memlen: length of the physical memory block
offset: (char *)buf - mem
"""
if offset % itemsize:
return False
if offset < 0 or offset+itemsize > memlen:
return False
if any(v % itemsize for v in strides):
return False
if ndim <= 0:
return ndim == 0 and not shape and not strides
if 0 in shape:
return True
imin = sum(strides[j]*(shape[j]-1) for j in range(ndim)
if strides[j] <= 0)
imax = sum(strides[j]*(shape[j]-1) for j in range(ndim)
if strides[j] > 0)
return 0 <= offset+imin and offset+imax+itemsize <= memlen
def get_item(lst, indices):
for i in indices:
lst = lst[i]
return lst
def memory_index(indices, t):
"""Location of an item in the underlying memory."""
memlen, itemsize, ndim, shape, strides, offset = t
p = offset
for i in range(ndim):
p += strides[i]*indices[i]
return p
def is_overlapping(t):
"""The structure 't' is overlapping if at least one memory location
is visited twice while iterating through all possible tuples of
indices."""
memlen, itemsize, ndim, shape, strides, offset = t
visited = 1<<memlen
for ind in indices(shape):
i = memory_index(ind, t)
bit = 1<<i
if visited & bit:
return True
visited |= bit
return False
def rand_structure(itemsize, valid, maxdim=5, maxshape=16, shape=()):
"""Return random structure:
(memlen, itemsize, ndim, shape, strides, offset)
If 'valid' is true, the returned structure is valid, otherwise invalid.
If 'shape' is given, use that instead of creating a random shape.
"""
if not shape:
ndim = randrange(maxdim+1)
if (ndim == 0):
if valid:
return itemsize, itemsize, ndim, (), (), 0
else:
nitems = randrange(1, 16+1)
memlen = nitems * itemsize
offset = -itemsize if randrange(2) == 0 else memlen
return memlen, itemsize, ndim, (), (), offset
minshape = 2
n = randrange(100)
if n >= 95 and valid:
minshape = 0
elif n >= 90:
minshape = 1
shape = [0] * ndim
for i in range(ndim):
shape[i] = randrange(minshape, maxshape+1)
else:
ndim = len(shape)
maxstride = 5
n = randrange(100)
zero_stride = True if n >= 95 and n & 1 else False
strides = [0] * ndim
strides[ndim-1] = itemsize * randrange(-maxstride, maxstride+1)
if not zero_stride and strides[ndim-1] == 0:
strides[ndim-1] = itemsize
for i in range(ndim-2, -1, -1):
maxstride *= shape[i+1] if shape[i+1] else 1
if zero_stride:
strides[i] = itemsize * randrange(-maxstride, maxstride+1)
else:
strides[i] = ((1,-1)[randrange(2)] *
itemsize * randrange(1, maxstride+1))
imin = imax = 0
if not 0 in shape:
imin = sum(strides[j]*(shape[j]-1) for j in range(ndim)
if strides[j] <= 0)
imax = sum(strides[j]*(shape[j]-1) for j in range(ndim)
if strides[j] > 0)
nitems = imax - imin
if valid:
offset = -imin * itemsize
memlen = offset + (imax+1) * itemsize
else:
memlen = (-imin + imax) * itemsize
offset = -imin-itemsize if randrange(2) == 0 else memlen
return memlen, itemsize, ndim, shape, strides, offset
def randslice_from_slicelen(slicelen, listlen):
"""Create a random slice of len slicelen that fits into listlen."""
maxstart = listlen - slicelen
start = randrange(maxstart+1)
maxstep = (listlen - start) // slicelen if slicelen else 1
step = randrange(1, maxstep+1)
stop = start + slicelen * step
s = slice(start, stop, step)
_, _, _, control = slice_indices(s, listlen)
if control != slicelen:
raise RuntimeError
return s
def randslice_from_shape(ndim, shape):
"""Create two sets of slices for an array x with shape 'shape'
such that shapeof(x[lslices]) == shapeof(x[rslices])."""
lslices = [0] * ndim
rslices = [0] * ndim
for n in range(ndim):
l = shape[n]
slicelen = randrange(1, l+1) if l > 0 else 0
lslices[n] = randslice_from_slicelen(slicelen, l)
rslices[n] = randslice_from_slicelen(slicelen, l)
return tuple(lslices), tuple(rslices)
def rand_aligned_slices(maxdim=5, maxshape=16):
"""Create (lshape, rshape, tuple(lslices), tuple(rslices)) such that
shapeof(x[lslices]) == shapeof(y[rslices]), where x is an array
with shape 'lshape' and y is an array with shape 'rshape'."""
ndim = randrange(1, maxdim+1)
minshape = 2
n = randrange(100)
if n >= 95:
minshape = 0
elif n >= 90:
minshape = 1
all_random = True if randrange(100) >= 80 else False
lshape = [0]*ndim; rshape = [0]*ndim
lslices = [0]*ndim; rslices = [0]*ndim
for n in range(ndim):
small = randrange(minshape, maxshape+1)
big = randrange(minshape, maxshape+1)
if big < small:
big, small = small, big
# Create a slice that fits the smaller value.
if all_random:
start = randrange(-small, small+1)
stop = randrange(-small, small+1)
step = (1,-1)[randrange(2)] * randrange(1, small+2)
s_small = slice(start, stop, step)
_, _, _, slicelen = slice_indices(s_small, small)
else:
slicelen = randrange(1, small+1) if small > 0 else 0
s_small = randslice_from_slicelen(slicelen, small)
# Create a slice of the same length for the bigger value.
s_big = randslice_from_slicelen(slicelen, big)
if randrange(2) == 0:
rshape[n], lshape[n] = big, small
rslices[n], lslices[n] = s_big, s_small
else:
rshape[n], lshape[n] = small, big
rslices[n], lslices[n] = s_small, s_big
return lshape, rshape, tuple(lslices), tuple(rslices)
def randitems_from_structure(fmt, t):
"""Return a list of random items for structure 't' with format
'fmtchar'."""
memlen, itemsize, _, _, _, _ = t
return gen_items(memlen//itemsize, '#'+fmt, 'numpy')
def ndarray_from_structure(items, fmt, t, flags=0):
"""Return ndarray from the tuple returned by rand_structure()"""
memlen, itemsize, ndim, shape, strides, offset = t
return ndarray(items, shape=shape, strides=strides, format=fmt,
offset=offset, flags=ND_WRITABLE|flags)
def numpy_array_from_structure(items, fmt, t):
"""Return numpy_array from the tuple returned by rand_structure()"""
memlen, itemsize, ndim, shape, strides, offset = t
buf = bytearray(memlen)
for j, v in enumerate(items):
struct.pack_into(fmt, buf, j*itemsize, v)
return numpy_array(buffer=buf, shape=shape, strides=strides,
dtype=fmt, offset=offset)
# ======================================================================
# memoryview casts
# ======================================================================
def cast_items(exporter, fmt, itemsize, shape=None):
"""Interpret the raw memory of 'exporter' as a list of items with
size 'itemsize'. If shape=None, the new structure is assumed to
be 1-D with n * itemsize = bytelen. If shape is given, the usual
constraint for contiguous arrays prod(shape) * itemsize = bytelen
applies. On success, return (items, shape). If the constraints
cannot be met, return (None, None). If a chunk of bytes is interpreted
as NaN as a result of float conversion, return ('nan', None)."""
bytelen = exporter.nbytes
if shape:
if prod(shape) * itemsize != bytelen:
return None, shape
elif shape == []:
if exporter.ndim == 0 or itemsize != bytelen:
return None, shape
else:
n, r = divmod(bytelen, itemsize)
shape = [n]
if r != 0:
return None, shape
mem = exporter.tobytes()
byteitems = [mem[i:i+itemsize] for i in range(0, len(mem), itemsize)]
items = []
for v in byteitems:
item = struct.unpack(fmt, v)[0]
if item != item:
return 'nan', shape
items.append(item)
return (items, shape) if shape != [] else (items[0], shape)
def gencastshapes():
"""Generate shapes to test casting."""
for n in range(32):
yield [n]
ndim = randrange(4, 6)
minshape = 1 if randrange(100) > 80 else 2
yield [randrange(minshape, 5) for _ in range(ndim)]
ndim = randrange(2, 4)
minshape = 1 if randrange(100) > 80 else 2
yield [randrange(minshape, 5) for _ in range(ndim)]
# ======================================================================
# Actual tests
# ======================================================================
def genslices(n):
"""Generate all possible slices for a single dimension."""
return product(range(-n, n+1), range(-n, n+1), range(-n, n+1))
def genslices_ndim(ndim, shape):
"""Generate all possible slice tuples for 'shape'."""
iterables = [genslices(shape[n]) for n in range(ndim)]
return product(*iterables)
def rslice(n, allow_empty=False):
"""Generate random slice for a single dimension of length n.
If zero=True, the slices may be empty, otherwise they will
be non-empty."""
minlen = 0 if allow_empty or n == 0 else 1
slicelen = randrange(minlen, n+1)
return randslice_from_slicelen(slicelen, n)
def rslices(n, allow_empty=False):
"""Generate random slices for a single dimension."""
for _ in range(5):
yield rslice(n, allow_empty)
def rslices_ndim(ndim, shape, iterations=5):
"""Generate random slice tuples for 'shape'."""
# non-empty slices
for _ in range(iterations):
yield tuple(rslice(shape[n]) for n in range(ndim))
# possibly empty slices
for _ in range(iterations):
yield tuple(rslice(shape[n], allow_empty=True) for n in range(ndim))
# invalid slices
yield tuple(slice(0,1,0) for _ in range(ndim))
def rpermutation(iterable, r=None):
pool = tuple(iterable)
r = len(pool) if r is None else r
yield tuple(sample(pool, r))
def ndarray_print(nd):
"""Print ndarray for debugging."""
try:
x = nd.tolist()
except (TypeError, NotImplementedError):
x = nd.tobytes()
if isinstance(nd, ndarray):
offset = nd.offset
flags = nd.flags
else:
offset = 'unknown'
flags = 'unknown'
print("ndarray(%s, shape=%s, strides=%s, suboffsets=%s, offset=%s, "
"format='%s', itemsize=%s, flags=%s)" %
(x, nd.shape, nd.strides, nd.suboffsets, offset,
nd.format, nd.itemsize, flags))
sys.stdout.flush()
ITERATIONS = 100
MAXDIM = 5
MAXSHAPE = 10
if SHORT_TEST:
ITERATIONS = 10
MAXDIM = 3
MAXSHAPE = 4
genslices = rslices
genslices_ndim = rslices_ndim
permutations = rpermutation
@unittest.skipUnless(struct, 'struct module required for this test.')
@unittest.skipUnless(ndarray, 'ndarray object required for this test')
class TestBufferProtocol(unittest.TestCase):
def setUp(self):
# The suboffsets tests need sizeof(void *).
self.sizeof_void_p = get_sizeof_void_p()
def verify(self, result, obj=-1,
itemsize={1}, fmt=-1, readonly={1},
ndim={1}, shape=-1, strides=-1,
lst=-1, sliced=False, cast=False):
# Verify buffer contents against expected values. Default values
# are deliberately initialized to invalid types.
if shape:
expected_len = prod(shape)*itemsize
else:
if not fmt: # array has been implicitly cast to unsigned bytes
expected_len = len(lst)
else: # ndim = 0
expected_len = itemsize
# Reconstruct suboffsets from strides. Support for slicing
# could be added, but is currently only needed for test_getbuf().
suboffsets = ()
if result.suboffsets:
self.assertGreater(ndim, 0)
suboffset0 = 0
for n in range(1, ndim):
if shape[n] == 0:
break
if strides[n] <= 0:
suboffset0 += -strides[n] * (shape[n]-1)
suboffsets = [suboffset0] + [-1 for v in range(ndim-1)]
# Not correct if slicing has occurred in the first dimension.
stride0 = self.sizeof_void_p
if strides[0] < 0:
stride0 = -stride0
strides = [stride0] + list(strides[1:])
self.assertIs(result.obj, obj)
self.assertEqual(result.nbytes, expected_len)
self.assertEqual(result.itemsize, itemsize)
self.assertEqual(result.format, fmt)
self.assertEqual(result.readonly, readonly)
self.assertEqual(result.ndim, ndim)
self.assertEqual(result.shape, tuple(shape))
if not (sliced and suboffsets):
self.assertEqual(result.strides, tuple(strides))
self.assertEqual(result.suboffsets, tuple(suboffsets))
if isinstance(result, ndarray) or is_memoryview_format(fmt):
rep = result.tolist() if fmt else result.tobytes()
self.assertEqual(rep, lst)
if not fmt: # array has been cast to unsigned bytes,
return # the remaining tests won't work.
# PyBuffer_GetPointer() is the definition how to access an item.
# If PyBuffer_GetPointer(indices) is correct for all possible
# combinations of indices, the buffer is correct.
#
# Also test tobytes() against the flattened 'lst', with all items
# packed to bytes.
if not cast: # casts chop up 'lst' in different ways
b = bytearray()
buf_err = None
for ind in indices(shape):
try:
item1 = get_pointer(result, ind)
item2 = get_item(lst, ind)
if isinstance(item2, tuple):
x = struct.pack(fmt, *item2)
else:
x = struct.pack(fmt, item2)
b.extend(x)
except BufferError:
buf_err = True # re-exporter does not provide full buffer
break
self.assertEqual(item1, item2)
if not buf_err:
# test tobytes()
self.assertEqual(result.tobytes(), b)
# lst := expected multi-dimensional logical representation
# flatten(lst) := elements in C-order
ff = fmt if fmt else 'B'
flattened = flatten(lst)
# Rules for 'A': if the array is already contiguous, return
# the array unaltered. Otherwise, return a contiguous 'C'
# representation.
for order in ['C', 'F', 'A']:
expected = result
if order == 'F':
if not is_contiguous(result, 'A') or \
is_contiguous(result, 'C'):
# For constructing the ndarray, convert the
# flattened logical representation to Fortran order.
trans = transpose(flattened, shape)
expected = ndarray(trans, shape=shape, format=ff,
flags=ND_FORTRAN)
else: # 'C', 'A'
if not is_contiguous(result, 'A') or \
is_contiguous(result, 'F') and order == 'C':
# The flattened list is already in C-order.
expected = ndarray(flattened, shape=shape, format=ff)
contig = get_contiguous(result, PyBUF_READ, order)
self.assertEqual(contig.tobytes(), b)
self.assertTrue(cmp_contig(contig, expected))
if ndim == 0:
continue
nmemb = len(flattened)
ro = 0 if readonly else ND_WRITABLE
### See comment in test_py_buffer_to_contiguous for an
### explanation why these tests are valid.
# To 'C'
contig = py_buffer_to_contiguous(result, 'C', PyBUF_FULL_RO)
self.assertEqual(len(contig), nmemb * itemsize)
initlst = [struct.unpack_from(fmt, contig, n*itemsize)
for n in range(nmemb)]
if len(initlst[0]) == 1:
initlst = [v[0] for v in initlst]
y = ndarray(initlst, shape=shape, flags=ro, format=fmt)
self.assertEqual(memoryview(y), memoryview(result))
# To 'F'
contig = py_buffer_to_contiguous(result, 'F', PyBUF_FULL_RO)
self.assertEqual(len(contig), nmemb * itemsize)
initlst = [struct.unpack_from(fmt, contig, n*itemsize)
for n in range(nmemb)]
if len(initlst[0]) == 1:
initlst = [v[0] for v in initlst]
y = ndarray(initlst, shape=shape, flags=ro|ND_FORTRAN,
format=fmt)
self.assertEqual(memoryview(y), memoryview(result))
# To 'A'
contig = py_buffer_to_contiguous(result, 'A', PyBUF_FULL_RO)
self.assertEqual(len(contig), nmemb * itemsize)
initlst = [struct.unpack_from(fmt, contig, n*itemsize)
for n in range(nmemb)]
if len(initlst[0]) == 1:
initlst = [v[0] for v in initlst]
f = ND_FORTRAN if is_contiguous(result, 'F') else 0
y = ndarray(initlst, shape=shape, flags=f|ro, format=fmt)
self.assertEqual(memoryview(y), memoryview(result))
if is_memoryview_format(fmt):
try:
m = memoryview(result)
except BufferError: # re-exporter does not provide full information
return
ex = result.obj if isinstance(result, memoryview) else result
self.assertIs(m.obj, ex)
self.assertEqual(m.nbytes, expected_len)
self.assertEqual(m.itemsize, itemsize)
self.assertEqual(m.format, fmt)
self.assertEqual(m.readonly, readonly)
self.assertEqual(m.ndim, ndim)
self.assertEqual(m.shape, tuple(shape))
if not (sliced and suboffsets):
self.assertEqual(m.strides, tuple(strides))
self.assertEqual(m.suboffsets, tuple(suboffsets))
n = 1 if ndim == 0 else len(lst)
self.assertEqual(len(m), n)
rep = result.tolist() if fmt else result.tobytes()
self.assertEqual(rep, lst)
self.assertEqual(m, result)
def verify_getbuf(self, orig_ex, ex, req, sliced=False):
def simple_fmt(ex):
return ex.format == '' or ex.format == 'B'
def match(req, flag):
return ((req&flag) == flag)
if (# writable request to read-only exporter
(ex.readonly and match(req, PyBUF_WRITABLE)) or
# cannot match explicit contiguity request
(match(req, PyBUF_C_CONTIGUOUS) and not ex.c_contiguous) or
(match(req, PyBUF_F_CONTIGUOUS) and not ex.f_contiguous) or
(match(req, PyBUF_ANY_CONTIGUOUS) and not ex.contiguous) or
# buffer needs suboffsets
(not match(req, PyBUF_INDIRECT) and ex.suboffsets) or
# buffer without strides must be C-contiguous
(not match(req, PyBUF_STRIDES) and not ex.c_contiguous) or
# PyBUF_SIMPLE|PyBUF_FORMAT and PyBUF_WRITABLE|PyBUF_FORMAT
(not match(req, PyBUF_ND) and match(req, PyBUF_FORMAT))):
self.assertRaises(BufferError, ndarray, ex, getbuf=req)
return
if isinstance(ex, ndarray) or is_memoryview_format(ex.format):
lst = ex.tolist()
else:
nd = ndarray(ex, getbuf=PyBUF_FULL_RO)
lst = nd.tolist()
# The consumer may have requested default values or a NULL format.
ro = 0 if match(req, PyBUF_WRITABLE) else ex.readonly
fmt = ex.format
itemsize = ex.itemsize
ndim = ex.ndim
if not match(req, PyBUF_FORMAT):
# itemsize refers to the original itemsize before the cast.
# The equality product(shape) * itemsize = len still holds.
# The equality calcsize(format) = itemsize does _not_ hold.
fmt = ''
lst = orig_ex.tobytes() # Issue 12834
if not match(req, PyBUF_ND):
ndim = 1
shape = orig_ex.shape if match(req, PyBUF_ND) else ()
strides = orig_ex.strides if match(req, PyBUF_STRIDES) else ()
nd = ndarray(ex, getbuf=req)
self.verify(nd, obj=ex,
itemsize=itemsize, fmt=fmt, readonly=ro,
ndim=ndim, shape=shape, strides=strides,
lst=lst, sliced=sliced)
def test_ndarray_getbuf(self):
requests = (
# distinct flags
PyBUF_INDIRECT, PyBUF_STRIDES, PyBUF_ND, PyBUF_SIMPLE,
PyBUF_C_CONTIGUOUS, PyBUF_F_CONTIGUOUS, PyBUF_ANY_CONTIGUOUS,
# compound requests
PyBUF_FULL, PyBUF_FULL_RO,
PyBUF_RECORDS, PyBUF_RECORDS_RO,
PyBUF_STRIDED, PyBUF_STRIDED_RO,
PyBUF_CONTIG, PyBUF_CONTIG_RO,
)
# items and format
items_fmt = (
([True if x % 2 else False for x in range(12)], '?'),
([1,2,3,4,5,6,7,8,9,10,11,12], 'b'),
([1,2,3,4,5,6,7,8,9,10,11,12], 'B'),
([(2**31-x) if x % 2 else (-2**31+x) for x in range(12)], 'l')
)
# shape, strides, offset
structure = (
([], [], 0),
([12], [], 0),
([12], [-1], 11),
([6], [2], 0),
([6], [-2], 11),
([3, 4], [], 0),
([3, 4], [-4, -1], 11),
([2, 2], [4, 1], 4),
([2, 2], [-4, -1], 8)
)
# ndarray creation flags
ndflags = (
0, ND_WRITABLE, ND_FORTRAN, ND_FORTRAN|ND_WRITABLE,
ND_PIL, ND_PIL|ND_WRITABLE
)
# flags that can actually be used as flags
real_flags = (0, PyBUF_WRITABLE, PyBUF_FORMAT,
PyBUF_WRITABLE|PyBUF_FORMAT)
for items, fmt in items_fmt:
itemsize = struct.calcsize(fmt)
for shape, strides, offset in structure:
strides = [v * itemsize for v in strides]
offset *= itemsize
for flags in ndflags:
if strides and (flags&ND_FORTRAN):
continue
if not shape and (flags&ND_PIL):
continue
_items = items if shape else items[0]
ex1 = ndarray(_items, format=fmt, flags=flags,
shape=shape, strides=strides, offset=offset)
ex2 = ex1[::-2] if shape else None
m1 = memoryview(ex1)
if ex2:
m2 = memoryview(ex2)
if ex1.ndim == 0 or (ex1.ndim == 1 and shape and strides):
self.assertEqual(m1, ex1)
if ex2 and ex2.ndim == 1 and shape and strides:
self.assertEqual(m2, ex2)
for req in requests:
for bits in real_flags:
self.verify_getbuf(ex1, ex1, req|bits)
self.verify_getbuf(ex1, m1, req|bits)
if ex2:
self.verify_getbuf(ex2, ex2, req|bits,
sliced=True)
self.verify_getbuf(ex2, m2, req|bits,
sliced=True)
items = [1,2,3,4,5,6,7,8,9,10,11,12]
# ND_GETBUF_FAIL
ex = ndarray(items, shape=[12], flags=ND_GETBUF_FAIL)
self.assertRaises(BufferError, ndarray, ex)
# Request complex structure from a simple exporter. In this
# particular case the test object is not PEP-3118 compliant.
base = ndarray([9], [1])
ex = ndarray(base, getbuf=PyBUF_SIMPLE)
self.assertRaises(BufferError, ndarray, ex, getbuf=PyBUF_WRITABLE)
self.assertRaises(BufferError, ndarray, ex, getbuf=PyBUF_ND)
self.assertRaises(BufferError, ndarray, ex, getbuf=PyBUF_STRIDES)
self.assertRaises(BufferError, ndarray, ex, getbuf=PyBUF_C_CONTIGUOUS)
self.assertRaises(BufferError, ndarray, ex, getbuf=PyBUF_F_CONTIGUOUS)
self.assertRaises(BufferError, ndarray, ex, getbuf=PyBUF_ANY_CONTIGUOUS)
nd = ndarray(ex, getbuf=PyBUF_SIMPLE)
def test_ndarray_exceptions(self):
nd = ndarray([9], [1])
ndm = ndarray([9], [1], flags=ND_VAREXPORT)
# Initialization of a new ndarray or mutation of an existing array.
for c in (ndarray, nd.push, ndm.push):
# Invalid types.
self.assertRaises(TypeError, c, {1,2,3})
self.assertRaises(TypeError, c, [1,2,'3'])
self.assertRaises(TypeError, c, [1,2,(3,4)])
self.assertRaises(TypeError, c, [1,2,3], shape={3})
self.assertRaises(TypeError, c, [1,2,3], shape=[3], strides={1})
self.assertRaises(TypeError, c, [1,2,3], shape=[3], offset=[])
self.assertRaises(TypeError, c, [1], shape=[1], format={})
self.assertRaises(TypeError, c, [1], shape=[1], flags={})
self.assertRaises(TypeError, c, [1], shape=[1], getbuf={})
# ND_FORTRAN flag is only valid without strides.
self.assertRaises(TypeError, c, [1], shape=[1], strides=[1],
flags=ND_FORTRAN)
# ND_PIL flag is only valid with ndim > 0.
self.assertRaises(TypeError, c, [1], shape=[], flags=ND_PIL)
# Invalid items.
self.assertRaises(ValueError, c, [], shape=[1])
self.assertRaises(ValueError, c, ['XXX'], shape=[1], format="L")
# Invalid combination of items and format.
self.assertRaises(struct.error, c, [1000], shape=[1], format="B")
self.assertRaises(ValueError, c, [1,(2,3)], shape=[2], format="B")
self.assertRaises(ValueError, c, [1,2,3], shape=[3], format="QL")
# Invalid ndim.
n = ND_MAX_NDIM+1
self.assertRaises(ValueError, c, [1]*n, shape=[1]*n)
# Invalid shape.
self.assertRaises(ValueError, c, [1], shape=[-1])
self.assertRaises(ValueError, c, [1,2,3], shape=['3'])
self.assertRaises(OverflowError, c, [1], shape=[2**128])
# prod(shape) * itemsize != len(items)
self.assertRaises(ValueError, c, [1,2,3,4,5], shape=[2,2], offset=3)
# Invalid strides.
self.assertRaises(ValueError, c, [1,2,3], shape=[3], strides=['1'])
self.assertRaises(OverflowError, c, [1], shape=[1],
strides=[2**128])
# Invalid combination of strides and shape.
self.assertRaises(ValueError, c, [1,2], shape=[2,1], strides=[1])
# Invalid combination of strides and format.
self.assertRaises(ValueError, c, [1,2,3,4], shape=[2], strides=[3],
format="L")
# Invalid offset.
self.assertRaises(ValueError, c, [1,2,3], shape=[3], offset=4)
self.assertRaises(ValueError, c, [1,2,3], shape=[1], offset=3,
format="L")
# Invalid format.
self.assertRaises(ValueError, c, [1,2,3], shape=[3], format="")
self.assertRaises(struct.error, c, [(1,2,3)], shape=[1],
format="@#$")
# Striding out of the memory bounds.
items = [1,2,3,4,5,6,7,8,9,10]
self.assertRaises(ValueError, c, items, shape=[2,3],
strides=[-3, -2], offset=5)
# Constructing consumer: format argument invalid.
self.assertRaises(TypeError, c, bytearray(), format="Q")
# Constructing original base object: getbuf argument invalid.
self.assertRaises(TypeError, c, [1], shape=[1], getbuf=PyBUF_FULL)
# Shape argument is mandatory for original base objects.
self.assertRaises(TypeError, c, [1])
# PyBUF_WRITABLE request to read-only provider.
self.assertRaises(BufferError, ndarray, b'123', getbuf=PyBUF_WRITABLE)
# ND_VAREXPORT can only be specified during construction.
nd = ndarray([9], [1], flags=ND_VAREXPORT)
self.assertRaises(ValueError, nd.push, [1], [1], flags=ND_VAREXPORT)
# Invalid operation for consumers: push/pop
nd = ndarray(b'123')
self.assertRaises(BufferError, nd.push, [1], [1])
self.assertRaises(BufferError, nd.pop)
# ND_VAREXPORT not set: push/pop fail with exported buffers
nd = ndarray([9], [1])
nd.push([1], [1])
m = memoryview(nd)
self.assertRaises(BufferError, nd.push, [1], [1])
self.assertRaises(BufferError, nd.pop)
m.release()
nd.pop()
# Single remaining buffer: pop fails
self.assertRaises(BufferError, nd.pop)
del nd
# get_pointer()
self.assertRaises(TypeError, get_pointer, {}, [1,2,3])
self.assertRaises(TypeError, get_pointer, b'123', {})
nd = ndarray(list(range(100)), shape=[1]*100)
self.assertRaises(ValueError, get_pointer, nd, [5])
nd = ndarray(list(range(12)), shape=[3,4])
self.assertRaises(ValueError, get_pointer, nd, [2,3,4])
self.assertRaises(ValueError, get_pointer, nd, [3,3])
self.assertRaises(ValueError, get_pointer, nd, [-3,3])
self.assertRaises(OverflowError, get_pointer, nd, [1<<64,3])
# tolist() needs format
ex = ndarray([1,2,3], shape=[3], format='L')
nd = ndarray(ex, getbuf=PyBUF_SIMPLE)
self.assertRaises(ValueError, nd.tolist)
# memoryview_from_buffer()
ex1 = ndarray([1,2,3], shape=[3], format='L')
ex2 = ndarray(ex1)
nd = ndarray(ex2)
self.assertRaises(TypeError, nd.memoryview_from_buffer)
nd = ndarray([(1,)*200], shape=[1], format='L'*200)
self.assertRaises(TypeError, nd.memoryview_from_buffer)
n = ND_MAX_NDIM
nd = ndarray(list(range(n)), shape=[1]*n)
self.assertRaises(ValueError, nd.memoryview_from_buffer)
# get_contiguous()
nd = ndarray([1], shape=[1])
self.assertRaises(TypeError, get_contiguous, 1, 2, 3, 4, 5)
self.assertRaises(TypeError, get_contiguous, nd, "xyz", 'C')
self.assertRaises(OverflowError, get_contiguous, nd, 2**64, 'C')
self.assertRaises(TypeError, get_contiguous, nd, PyBUF_READ, 961)
self.assertRaises(UnicodeEncodeError, get_contiguous, nd, PyBUF_READ,
'\u2007')
self.assertRaises(ValueError, get_contiguous, nd, PyBUF_READ, 'Z')
self.assertRaises(ValueError, get_contiguous, nd, 255, 'A')
# cmp_contig()
nd = ndarray([1], shape=[1])
self.assertRaises(TypeError, cmp_contig, 1, 2, 3, 4, 5)
self.assertRaises(TypeError, cmp_contig, {}, nd)
self.assertRaises(TypeError, cmp_contig, nd, {})
# is_contiguous()
nd = ndarray([1], shape=[1])
self.assertRaises(TypeError, is_contiguous, 1, 2, 3, 4, 5)
self.assertRaises(TypeError, is_contiguous, {}, 'A')
self.assertRaises(TypeError, is_contiguous, nd, 201)
def test_ndarray_linked_list(self):
for perm in permutations(range(5)):
m = [0]*5
nd = ndarray([1,2,3], shape=[3], flags=ND_VAREXPORT)
m[0] = memoryview(nd)
for i in range(1, 5):
nd.push([1,2,3], shape=[3])
m[i] = memoryview(nd)
for i in range(5):
m[perm[i]].release()
self.assertRaises(BufferError, nd.pop)
del nd
def test_ndarray_format_scalar(self):
# ndim = 0: scalar
for fmt, scalar, _ in iter_format(0):
itemsize = struct.calcsize(fmt)
nd = ndarray(scalar, shape=(), format=fmt)
self.verify(nd, obj=None,
itemsize=itemsize, fmt=fmt, readonly=1,
ndim=0, shape=(), strides=(),
lst=scalar)
def test_ndarray_format_shape(self):
# ndim = 1, shape = [n]
nitems = randrange(1, 10)
for fmt, items, _ in iter_format(nitems):
itemsize = struct.calcsize(fmt)
for flags in (0, ND_PIL):
nd = ndarray(items, shape=[nitems], format=fmt, flags=flags)
self.verify(nd, obj=None,
itemsize=itemsize, fmt=fmt, readonly=1,
ndim=1, shape=(nitems,), strides=(itemsize,),
lst=items)
def test_ndarray_format_strides(self):
# ndim = 1, strides
nitems = randrange(1, 30)
for fmt, items, _ in iter_format(nitems):
itemsize = struct.calcsize(fmt)
for step in range(-5, 5):
if step == 0:
continue
shape = [len(items[::step])]
strides = [step*itemsize]
offset = itemsize*(nitems-1) if step < 0 else 0
for flags in (0, ND_PIL):
nd = ndarray(items, shape=shape, strides=strides,
format=fmt, offset=offset, flags=flags)
self.verify(nd, obj=None,
itemsize=itemsize, fmt=fmt, readonly=1,
ndim=1, shape=shape, strides=strides,
lst=items[::step])
def test_ndarray_fortran(self):
items = [1,2,3,4,5,6,7,8,9,10,11,12]
ex = ndarray(items, shape=(3, 4), strides=(1, 3))
nd = ndarray(ex, getbuf=PyBUF_F_CONTIGUOUS|PyBUF_FORMAT)
self.assertEqual(nd.tolist(), farray(items, (3, 4)))
def test_ndarray_multidim(self):
for ndim in range(5):
shape_t = [randrange(2, 10) for _ in range(ndim)]
nitems = prod(shape_t)
for shape in permutations(shape_t):
fmt, items, _ = randitems(nitems)
itemsize = struct.calcsize(fmt)
for flags in (0, ND_PIL):
if ndim == 0 and flags == ND_PIL:
continue
# C array
nd = ndarray(items, shape=shape, format=fmt, flags=flags)
strides = strides_from_shape(ndim, shape, itemsize, 'C')
lst = carray(items, shape)
self.verify(nd, obj=None,
itemsize=itemsize, fmt=fmt, readonly=1,
ndim=ndim, shape=shape, strides=strides,
lst=lst)
if is_memoryview_format(fmt):
# memoryview: reconstruct strides
ex = ndarray(items, shape=shape, format=fmt)
nd = ndarray(ex, getbuf=PyBUF_CONTIG_RO|PyBUF_FORMAT)
self.assertTrue(nd.strides == ())
mv = nd.memoryview_from_buffer()
self.verify(mv, obj=None,
itemsize=itemsize, fmt=fmt, readonly=1,
ndim=ndim, shape=shape, strides=strides,
lst=lst)
# Fortran array
nd = ndarray(items, shape=shape, format=fmt,
flags=flags|ND_FORTRAN)
strides = strides_from_shape(ndim, shape, itemsize, 'F')
lst = farray(items, shape)
self.verify(nd, obj=None,
itemsize=itemsize, fmt=fmt, readonly=1,
ndim=ndim, shape=shape, strides=strides,
lst=lst)
def test_ndarray_index_invalid(self):
# not writable
nd = ndarray([1], shape=[1])
self.assertRaises(TypeError, nd.__setitem__, 1, 8)
mv = memoryview(nd)
self.assertEqual(mv, nd)
self.assertRaises(TypeError, mv.__setitem__, 1, 8)
# cannot be deleted
nd = ndarray([1], shape=[1], flags=ND_WRITABLE)
self.assertRaises(TypeError, nd.__delitem__, 1)
mv = memoryview(nd)
self.assertEqual(mv, nd)
self.assertRaises(TypeError, mv.__delitem__, 1)
# overflow
nd = ndarray([1], shape=[1], flags=ND_WRITABLE)
self.assertRaises(OverflowError, nd.__getitem__, 1<<64)
self.assertRaises(OverflowError, nd.__setitem__, 1<<64, 8)
mv = memoryview(nd)
self.assertEqual(mv, nd)
self.assertRaises(IndexError, mv.__getitem__, 1<<64)
self.assertRaises(IndexError, mv.__setitem__, 1<<64, 8)
# format
items = [1,2,3,4,5,6,7,8]
nd = ndarray(items, shape=[len(items)], format="B", flags=ND_WRITABLE)
self.assertRaises(struct.error, nd.__setitem__, 2, 300)
self.assertRaises(ValueError, nd.__setitem__, 1, (100, 200))
mv = memoryview(nd)
self.assertEqual(mv, nd)
self.assertRaises(ValueError, mv.__setitem__, 2, 300)
self.assertRaises(TypeError, mv.__setitem__, 1, (100, 200))
items = [(1,2), (3,4), (5,6)]
nd = ndarray(items, shape=[len(items)], format="LQ", flags=ND_WRITABLE)
self.assertRaises(ValueError, nd.__setitem__, 2, 300)
self.assertRaises(struct.error, nd.__setitem__, 1, (b'\x001', 200))
def test_ndarray_index_scalar(self):
# scalar
nd = ndarray(1, shape=(), flags=ND_WRITABLE)
mv = memoryview(nd)
self.assertEqual(mv, nd)
x = nd[()]; self.assertEqual(x, 1)
x = nd[...]; self.assertEqual(x.tolist(), nd.tolist())
x = mv[()]; self.assertEqual(x, 1)
x = mv[...]; self.assertEqual(x.tolist(), nd.tolist())
self.assertRaises(TypeError, nd.__getitem__, 0)
self.assertRaises(TypeError, mv.__getitem__, 0)
self.assertRaises(TypeError, nd.__setitem__, 0, 8)
self.assertRaises(TypeError, mv.__setitem__, 0, 8)
self.assertEqual(nd.tolist(), 1)
self.assertEqual(mv.tolist(), 1)
nd[()] = 9; self.assertEqual(nd.tolist(), 9)
mv[()] = 9; self.assertEqual(mv.tolist(), 9)
nd[...] = 5; self.assertEqual(nd.tolist(), 5)
mv[...] = 5; self.assertEqual(mv.tolist(), 5)
def test_ndarray_index_null_strides(self):
ex = ndarray(list(range(2*4)), shape=[2, 4], flags=ND_WRITABLE)
nd = ndarray(ex, getbuf=PyBUF_CONTIG)
# Sub-views are only possible for full exporters.
self.assertRaises(BufferError, nd.__getitem__, 1)
# Same for slices.
self.assertRaises(BufferError, nd.__getitem__, slice(3,5,1))
def test_ndarray_index_getitem_single(self):
# getitem
for fmt, items, _ in iter_format(5):
nd = ndarray(items, shape=[5], format=fmt)
for i in range(-5, 5):
self.assertEqual(nd[i], items[i])
self.assertRaises(IndexError, nd.__getitem__, -6)
self.assertRaises(IndexError, nd.__getitem__, 5)
if is_memoryview_format(fmt):
mv = memoryview(nd)
self.assertEqual(mv, nd)
for i in range(-5, 5):
self.assertEqual(mv[i], items[i])
self.assertRaises(IndexError, mv.__getitem__, -6)
self.assertRaises(IndexError, mv.__getitem__, 5)
# getitem with null strides
for fmt, items, _ in iter_format(5):
ex = ndarray(items, shape=[5], flags=ND_WRITABLE, format=fmt)
nd = ndarray(ex, getbuf=PyBUF_CONTIG|PyBUF_FORMAT)
for i in range(-5, 5):
self.assertEqual(nd[i], items[i])
if is_memoryview_format(fmt):
mv = nd.memoryview_from_buffer()
self.assertIs(mv.__eq__(nd), NotImplemented)
for i in range(-5, 5):
self.assertEqual(mv[i], items[i])
# getitem with null format
items = [1,2,3,4,5]
ex = ndarray(items, shape=[5])
nd = ndarray(ex, getbuf=PyBUF_CONTIG_RO)
for i in range(-5, 5):
self.assertEqual(nd[i], items[i])
# getitem with null shape/strides/format
items = [1,2,3,4,5]
ex = ndarray(items, shape=[5])
nd = ndarray(ex, getbuf=PyBUF_SIMPLE)
for i in range(-5, 5):
self.assertEqual(nd[i], items[i])
def test_ndarray_index_setitem_single(self):
# assign single value
for fmt, items, single_item in iter_format(5):
nd = ndarray(items, shape=[5], format=fmt, flags=ND_WRITABLE)
for i in range(5):
items[i] = single_item
nd[i] = single_item
self.assertEqual(nd.tolist(), items)
self.assertRaises(IndexError, nd.__setitem__, -6, single_item)
self.assertRaises(IndexError, nd.__setitem__, 5, single_item)
if not is_memoryview_format(fmt):
continue
nd = ndarray(items, shape=[5], format=fmt, flags=ND_WRITABLE)
mv = memoryview(nd)
self.assertEqual(mv, nd)
for i in range(5):
items[i] = single_item
mv[i] = single_item
self.assertEqual(mv.tolist(), items)
self.assertRaises(IndexError, mv.__setitem__, -6, single_item)
self.assertRaises(IndexError, mv.__setitem__, 5, single_item)
# assign single value: lobject = robject
for fmt, items, single_item in iter_format(5):
nd = ndarray(items, shape=[5], format=fmt, flags=ND_WRITABLE)
for i in range(-5, 4):
items[i] = items[i+1]
nd[i] = nd[i+1]
self.assertEqual(nd.tolist(), items)
if not is_memoryview_format(fmt):
continue
nd = ndarray(items, shape=[5], format=fmt, flags=ND_WRITABLE)
mv = memoryview(nd)
self.assertEqual(mv, nd)
for i in range(-5, 4):
items[i] = items[i+1]
mv[i] = mv[i+1]
self.assertEqual(mv.tolist(), items)
def test_ndarray_index_getitem_multidim(self):
shape_t = (2, 3, 5)
nitems = prod(shape_t)
for shape in permutations(shape_t):
fmt, items, _ = randitems(nitems)
for flags in (0, ND_PIL):
# C array
nd = ndarray(items, shape=shape, format=fmt, flags=flags)
lst = carray(items, shape)
for i in range(-shape[0], shape[0]):
self.assertEqual(lst[i], nd[i].tolist())
for j in range(-shape[1], shape[1]):
self.assertEqual(lst[i][j], nd[i][j].tolist())
for k in range(-shape[2], shape[2]):
self.assertEqual(lst[i][j][k], nd[i][j][k])
# Fortran array
nd = ndarray(items, shape=shape, format=fmt,
flags=flags|ND_FORTRAN)
lst = farray(items, shape)
for i in range(-shape[0], shape[0]):
self.assertEqual(lst[i], nd[i].tolist())
for j in range(-shape[1], shape[1]):
self.assertEqual(lst[i][j], nd[i][j].tolist())
for k in range(shape[2], shape[2]):
self.assertEqual(lst[i][j][k], nd[i][j][k])
def test_ndarray_sequence(self):
nd = ndarray(1, shape=())
self.assertRaises(TypeError, eval, "1 in nd", locals())
mv = memoryview(nd)
self.assertEqual(mv, nd)
self.assertRaises(TypeError, eval, "1 in mv", locals())
for fmt, items, _ in iter_format(5):
nd = ndarray(items, shape=[5], format=fmt)
for i, v in enumerate(nd):
self.assertEqual(v, items[i])
self.assertTrue(v in nd)
if is_memoryview_format(fmt):
mv = memoryview(nd)
for i, v in enumerate(mv):
self.assertEqual(v, items[i])
self.assertTrue(v in mv)
def test_ndarray_slice_invalid(self):
items = [1,2,3,4,5,6,7,8]
# rvalue is not an exporter
xl = ndarray(items, shape=[8], flags=ND_WRITABLE)
ml = memoryview(xl)
self.assertRaises(TypeError, xl.__setitem__, slice(0,8,1), items)
self.assertRaises(TypeError, ml.__setitem__, slice(0,8,1), items)
# rvalue is not a full exporter
xl = ndarray(items, shape=[8], flags=ND_WRITABLE)
ex = ndarray(items, shape=[8], flags=ND_WRITABLE)
xr = ndarray(ex, getbuf=PyBUF_ND)
self.assertRaises(BufferError, xl.__setitem__, slice(0,8,1), xr)
# zero step
nd = ndarray(items, shape=[8], format="L", flags=ND_WRITABLE)
mv = memoryview(nd)
self.assertRaises(ValueError, nd.__getitem__, slice(0,1,0))
self.assertRaises(ValueError, mv.__getitem__, slice(0,1,0))
nd = ndarray(items, shape=[2,4], format="L", flags=ND_WRITABLE)
mv = memoryview(nd)
self.assertRaises(ValueError, nd.__getitem__,
(slice(0,1,1), slice(0,1,0)))
self.assertRaises(ValueError, nd.__getitem__,
(slice(0,1,0), slice(0,1,1)))
self.assertRaises(TypeError, nd.__getitem__, "@%$")
self.assertRaises(TypeError, nd.__getitem__, ("@%$", slice(0,1,1)))
self.assertRaises(TypeError, nd.__getitem__, (slice(0,1,1), {}))
# memoryview: not implemented
self.assertRaises(NotImplementedError, mv.__getitem__,
(slice(0,1,1), slice(0,1,0)))
self.assertRaises(TypeError, mv.__getitem__, "@%$")
# differing format
xl = ndarray(items, shape=[8], format="B", flags=ND_WRITABLE)
xr = ndarray(items, shape=[8], format="b")
ml = memoryview(xl)
mr = memoryview(xr)
self.assertRaises(ValueError, xl.__setitem__, slice(0,1,1), xr[7:8])
self.assertEqual(xl.tolist(), items)
self.assertRaises(ValueError, ml.__setitem__, slice(0,1,1), mr[7:8])
self.assertEqual(ml.tolist(), items)
# differing itemsize
xl = ndarray(items, shape=[8], format="B", flags=ND_WRITABLE)
yr = ndarray(items, shape=[8], format="L")
ml = memoryview(xl)
mr = memoryview(xr)
self.assertRaises(ValueError, xl.__setitem__, slice(0,1,1), xr[7:8])
self.assertEqual(xl.tolist(), items)
self.assertRaises(ValueError, ml.__setitem__, slice(0,1,1), mr[7:8])
self.assertEqual(ml.tolist(), items)
# differing ndim
xl = ndarray(items, shape=[2, 4], format="b", flags=ND_WRITABLE)
xr = ndarray(items, shape=[8], format="b")
ml = memoryview(xl)
mr = memoryview(xr)
self.assertRaises(ValueError, xl.__setitem__, slice(0,1,1), xr[7:8])
self.assertEqual(xl.tolist(), [[1,2,3,4], [5,6,7,8]])
self.assertRaises(NotImplementedError, ml.__setitem__, slice(0,1,1),
mr[7:8])
# differing shape
xl = ndarray(items, shape=[8], format="b", flags=ND_WRITABLE)
xr = ndarray(items, shape=[8], format="b")
ml = memoryview(xl)
mr = memoryview(xr)
self.assertRaises(ValueError, xl.__setitem__, slice(0,2,1), xr[7:8])
self.assertEqual(xl.tolist(), items)
self.assertRaises(ValueError, ml.__setitem__, slice(0,2,1), mr[7:8])
self.assertEqual(ml.tolist(), items)
# _testbuffer.c module functions
self.assertRaises(TypeError, slice_indices, slice(0,1,2), {})
self.assertRaises(TypeError, slice_indices, "###########", 1)
self.assertRaises(ValueError, slice_indices, slice(0,1,0), 4)
x = ndarray(items, shape=[8], format="b", flags=ND_PIL)
self.assertRaises(TypeError, x.add_suboffsets)
ex = ndarray(items, shape=[8], format="B")
x = ndarray(ex, getbuf=PyBUF_SIMPLE)
self.assertRaises(TypeError, x.add_suboffsets)
def test_ndarray_slice_zero_shape(self):
items = [1,2,3,4,5,6,7,8,9,10,11,12]
x = ndarray(items, shape=[12], format="L", flags=ND_WRITABLE)
y = ndarray(items, shape=[12], format="L")
x[4:4] = y[9:9]
self.assertEqual(x.tolist(), items)
ml = memoryview(x)
mr = memoryview(y)
self.assertEqual(ml, x)
self.assertEqual(ml, y)
ml[4:4] = mr[9:9]
self.assertEqual(ml.tolist(), items)
x = ndarray(items, shape=[3, 4], format="L", flags=ND_WRITABLE)
y = ndarray(items, shape=[4, 3], format="L")
x[1:2, 2:2] = y[1:2, 3:3]
self.assertEqual(x.tolist(), carray(items, [3, 4]))
def test_ndarray_slice_multidim(self):
shape_t = (2, 3, 5)
ndim = len(shape_t)
nitems = prod(shape_t)
for shape in permutations(shape_t):
fmt, items, _ = randitems(nitems)
itemsize = struct.calcsize(fmt)
for flags in (0, ND_PIL):
nd = ndarray(items, shape=shape, format=fmt, flags=flags)
lst = carray(items, shape)
for slices in rslices_ndim(ndim, shape):
listerr = None
try:
sliced = multislice(lst, slices)
except Exception as e:
listerr = e.__class__
nderr = None
try:
ndsliced = nd[slices]
except Exception as e:
nderr = e.__class__
if nderr or listerr:
self.assertIs(nderr, listerr)
else:
self.assertEqual(ndsliced.tolist(), sliced)
def test_ndarray_slice_redundant_suboffsets(self):
shape_t = (2, 3, 5, 2)
ndim = len(shape_t)
nitems = prod(shape_t)
for shape in permutations(shape_t):
fmt, items, _ = randitems(nitems)
itemsize = struct.calcsize(fmt)
nd = ndarray(items, shape=shape, format=fmt)
nd.add_suboffsets()
ex = ndarray(items, shape=shape, format=fmt)
ex.add_suboffsets()
mv = memoryview(ex)
lst = carray(items, shape)
for slices in rslices_ndim(ndim, shape):
listerr = None
try:
sliced = multislice(lst, slices)
except Exception as e:
listerr = e.__class__
nderr = None
try:
ndsliced = nd[slices]
except Exception as e:
nderr = e.__class__
if nderr or listerr:
self.assertIs(nderr, listerr)
else:
self.assertEqual(ndsliced.tolist(), sliced)
def test_ndarray_slice_assign_single(self):
for fmt, items, _ in iter_format(5):
for lslice in genslices(5):
for rslice in genslices(5):
for flags in (0, ND_PIL):
f = flags|ND_WRITABLE
nd = ndarray(items, shape=[5], format=fmt, flags=f)
ex = ndarray(items, shape=[5], format=fmt, flags=f)
mv = memoryview(ex)
lsterr = None
diff_structure = None
lst = items[:]
try:
lval = lst[lslice]
rval = lst[rslice]
lst[lslice] = lst[rslice]
diff_structure = len(lval) != len(rval)
except Exception as e:
lsterr = e.__class__
nderr = None
try:
nd[lslice] = nd[rslice]
except Exception as e:
nderr = e.__class__
if diff_structure: # ndarray cannot change shape
self.assertIs(nderr, ValueError)
else:
self.assertEqual(nd.tolist(), lst)
self.assertIs(nderr, lsterr)
if not is_memoryview_format(fmt):
continue
mverr = None
try:
mv[lslice] = mv[rslice]
except Exception as e:
mverr = e.__class__
if diff_structure: # memoryview cannot change shape
self.assertIs(mverr, ValueError)
else:
self.assertEqual(mv.tolist(), lst)
self.assertEqual(mv, nd)
self.assertIs(mverr, lsterr)
self.verify(mv, obj=ex,
itemsize=nd.itemsize, fmt=fmt, readonly=0,
ndim=nd.ndim, shape=nd.shape, strides=nd.strides,
lst=nd.tolist())
def test_ndarray_slice_assign_multidim(self):
shape_t = (2, 3, 5)
ndim = len(shape_t)
nitems = prod(shape_t)
for shape in permutations(shape_t):
fmt, items, _ = randitems(nitems)
for flags in (0, ND_PIL):
for _ in range(ITERATIONS):
lslices, rslices = randslice_from_shape(ndim, shape)
nd = ndarray(items, shape=shape, format=fmt,
flags=flags|ND_WRITABLE)
lst = carray(items, shape)
listerr = None
try:
result = multislice_assign(lst, lst, lslices, rslices)
except Exception as e:
listerr = e.__class__
nderr = None
try:
nd[lslices] = nd[rslices]
except Exception as e:
nderr = e.__class__
if nderr or listerr:
self.assertIs(nderr, listerr)
else:
self.assertEqual(nd.tolist(), result)
def test_ndarray_random(self):
# construction of valid arrays
for _ in range(ITERATIONS):
for fmt in fmtdict['@']:
itemsize = struct.calcsize(fmt)
t = rand_structure(itemsize, True, maxdim=MAXDIM,
maxshape=MAXSHAPE)
self.assertTrue(verify_structure(*t))
items = randitems_from_structure(fmt, t)
x = ndarray_from_structure(items, fmt, t)
xlist = x.tolist()
mv = memoryview(x)
if is_memoryview_format(fmt):
mvlist = mv.tolist()
self.assertEqual(mvlist, xlist)
if t[2] > 0:
# ndim > 0: test against suboffsets representation.
y = ndarray_from_structure(items, fmt, t, flags=ND_PIL)
ylist = y.tolist()
self.assertEqual(xlist, ylist)
mv = memoryview(y)
if is_memoryview_format(fmt):
self.assertEqual(mv, y)
mvlist = mv.tolist()
self.assertEqual(mvlist, ylist)
if numpy_array:
shape = t[3]
if 0 in shape:
continue # http://projects.scipy.org/numpy/ticket/1910
z = numpy_array_from_structure(items, fmt, t)
self.verify(x, obj=None,
itemsize=z.itemsize, fmt=fmt, readonly=0,
ndim=z.ndim, shape=z.shape, strides=z.strides,
lst=z.tolist())
def test_ndarray_random_invalid(self):
# exceptions during construction of invalid arrays
for _ in range(ITERATIONS):
for fmt in fmtdict['@']:
itemsize = struct.calcsize(fmt)
t = rand_structure(itemsize, False, maxdim=MAXDIM,
maxshape=MAXSHAPE)
self.assertFalse(verify_structure(*t))
items = randitems_from_structure(fmt, t)
nderr = False
try:
x = ndarray_from_structure(items, fmt, t)
except Exception as e:
nderr = e.__class__
self.assertTrue(nderr)
if numpy_array:
numpy_err = False
try:
y = numpy_array_from_structure(items, fmt, t)
except Exception as e:
numpy_err = e.__class__
if 0: # http://projects.scipy.org/numpy/ticket/1910
self.assertTrue(numpy_err)
def test_ndarray_random_slice_assign(self):
# valid slice assignments
for _ in range(ITERATIONS):
for fmt in fmtdict['@']:
itemsize = struct.calcsize(fmt)
lshape, rshape, lslices, rslices = \
rand_aligned_slices(maxdim=MAXDIM, maxshape=MAXSHAPE)
tl = rand_structure(itemsize, True, shape=lshape)
tr = rand_structure(itemsize, True, shape=rshape)
self.assertTrue(verify_structure(*tl))
self.assertTrue(verify_structure(*tr))
litems = randitems_from_structure(fmt, tl)
ritems = randitems_from_structure(fmt, tr)
xl = ndarray_from_structure(litems, fmt, tl)
xr = ndarray_from_structure(ritems, fmt, tr)
xl[lslices] = xr[rslices]
xllist = xl.tolist()
xrlist = xr.tolist()
ml = memoryview(xl)
mr = memoryview(xr)
self.assertEqual(ml.tolist(), xllist)
self.assertEqual(mr.tolist(), xrlist)
if tl[2] > 0 and tr[2] > 0:
# ndim > 0: test against suboffsets representation.
yl = ndarray_from_structure(litems, fmt, tl, flags=ND_PIL)
yr = ndarray_from_structure(ritems, fmt, tr, flags=ND_PIL)
yl[lslices] = yr[rslices]
yllist = yl.tolist()
yrlist = yr.tolist()
self.assertEqual(xllist, yllist)
self.assertEqual(xrlist, yrlist)
ml = memoryview(yl)
mr = memoryview(yr)
self.assertEqual(ml.tolist(), yllist)
self.assertEqual(mr.tolist(), yrlist)
if numpy_array:
if 0 in lshape or 0 in rshape:
continue # http://projects.scipy.org/numpy/ticket/1910
zl = numpy_array_from_structure(litems, fmt, tl)
zr = numpy_array_from_structure(ritems, fmt, tr)
zl[lslices] = zr[rslices]
if not is_overlapping(tl) and not is_overlapping(tr):
# Slice assignment of overlapping structures
# is undefined in NumPy.
self.verify(xl, obj=None,
itemsize=zl.itemsize, fmt=fmt, readonly=0,
ndim=zl.ndim, shape=zl.shape,
strides=zl.strides, lst=zl.tolist())
self.verify(xr, obj=None,
itemsize=zr.itemsize, fmt=fmt, readonly=0,
ndim=zr.ndim, shape=zr.shape,
strides=zr.strides, lst=zr.tolist())
def test_ndarray_re_export(self):
items = [1,2,3,4,5,6,7,8,9,10,11,12]
nd = ndarray(items, shape=[3,4], flags=ND_PIL)
ex = ndarray(nd)
self.assertTrue(ex.flags & ND_PIL)
self.assertIs(ex.obj, nd)
self.assertEqual(ex.suboffsets, (0, -1))
self.assertFalse(ex.c_contiguous)
self.assertFalse(ex.f_contiguous)
self.assertFalse(ex.contiguous)
def test_ndarray_zero_shape(self):
# zeros in shape
for flags in (0, ND_PIL):
nd = ndarray([1,2,3], shape=[0], flags=flags)
mv = memoryview(nd)
self.assertEqual(mv, nd)
self.assertEqual(nd.tolist(), [])
self.assertEqual(mv.tolist(), [])
nd = ndarray([1,2,3], shape=[0,3,3], flags=flags)
self.assertEqual(nd.tolist(), [])
nd = ndarray([1,2,3], shape=[3,0,3], flags=flags)
self.assertEqual(nd.tolist(), [[], [], []])
nd = ndarray([1,2,3], shape=[3,3,0], flags=flags)
self.assertEqual(nd.tolist(),
[[[], [], []], [[], [], []], [[], [], []]])
def test_ndarray_zero_strides(self):
# zero strides
for flags in (0, ND_PIL):
nd = ndarray([1], shape=[5], strides=[0], flags=flags)
mv = memoryview(nd)
self.assertEqual(mv, nd)
self.assertEqual(nd.tolist(), [1, 1, 1, 1, 1])
self.assertEqual(mv.tolist(), [1, 1, 1, 1, 1])
def test_ndarray_offset(self):
nd = ndarray(list(range(20)), shape=[3], offset=7)
self.assertEqual(nd.offset, 7)
self.assertEqual(nd.tolist(), [7,8,9])
def test_ndarray_memoryview_from_buffer(self):
for flags in (0, ND_PIL):
nd = ndarray(list(range(3)), shape=[3], flags=flags)
m = nd.memoryview_from_buffer()
self.assertEqual(m, nd)
def test_ndarray_get_pointer(self):
for flags in (0, ND_PIL):
nd = ndarray(list(range(3)), shape=[3], flags=flags)
for i in range(3):
self.assertEqual(nd[i], get_pointer(nd, [i]))
def test_ndarray_tolist_null_strides(self):
ex = ndarray(list(range(20)), shape=[2,2,5])
nd = ndarray(ex, getbuf=PyBUF_ND|PyBUF_FORMAT)
self.assertEqual(nd.tolist(), ex.tolist())
m = memoryview(ex)
self.assertEqual(m.tolist(), ex.tolist())
def test_ndarray_cmp_contig(self):
self.assertFalse(cmp_contig(b"123", b"456"))
x = ndarray(list(range(12)), shape=[3,4])
y = ndarray(list(range(12)), shape=[4,3])
self.assertFalse(cmp_contig(x, y))
x = ndarray([1], shape=[1], format="B")
self.assertTrue(cmp_contig(x, b'\x01'))
self.assertTrue(cmp_contig(b'\x01', x))
def test_ndarray_hash(self):
a = array.array('L', [1,2,3])
nd = ndarray(a)
self.assertRaises(ValueError, hash, nd)
# one-dimensional
b = bytes(list(range(12)))
nd = ndarray(list(range(12)), shape=[12])
self.assertEqual(hash(nd), hash(b))
# C-contiguous
nd = ndarray(list(range(12)), shape=[3,4])
self.assertEqual(hash(nd), hash(b))
nd = ndarray(list(range(12)), shape=[3,2,2])
self.assertEqual(hash(nd), hash(b))
# Fortran contiguous
b = bytes(transpose(list(range(12)), shape=[4,3]))
nd = ndarray(list(range(12)), shape=[3,4], flags=ND_FORTRAN)
self.assertEqual(hash(nd), hash(b))
b = bytes(transpose(list(range(12)), shape=[2,3,2]))
nd = ndarray(list(range(12)), shape=[2,3,2], flags=ND_FORTRAN)
self.assertEqual(hash(nd), hash(b))
# suboffsets
b = bytes(list(range(12)))
nd = ndarray(list(range(12)), shape=[2,2,3], flags=ND_PIL)
self.assertEqual(hash(nd), hash(b))
# non-byte formats
nd = ndarray(list(range(12)), shape=[2,2,3], format='L')
self.assertEqual(hash(nd), hash(nd.tobytes()))
def test_py_buffer_to_contiguous(self):
# The requests are used in _testbuffer.c:py_buffer_to_contiguous
# to generate buffers without full information for testing.
requests = (
# distinct flags
PyBUF_INDIRECT, PyBUF_STRIDES, PyBUF_ND, PyBUF_SIMPLE,
# compound requests
PyBUF_FULL, PyBUF_FULL_RO,
PyBUF_RECORDS, PyBUF_RECORDS_RO,
PyBUF_STRIDED, PyBUF_STRIDED_RO,
PyBUF_CONTIG, PyBUF_CONTIG_RO,
)
# no buffer interface
self.assertRaises(TypeError, py_buffer_to_contiguous, {}, 'F',
PyBUF_FULL_RO)
# scalar, read-only request
nd = ndarray(9, shape=(), format="L", flags=ND_WRITABLE)
for order in ['C', 'F', 'A']:
for request in requests:
b = py_buffer_to_contiguous(nd, order, request)
self.assertEqual(b, nd.tobytes())
# zeros in shape
nd = ndarray([1], shape=[0], format="L", flags=ND_WRITABLE)
for order in ['C', 'F', 'A']:
for request in requests:
b = py_buffer_to_contiguous(nd, order, request)
self.assertEqual(b, b'')
nd = ndarray(list(range(8)), shape=[2, 0, 7], format="L",
flags=ND_WRITABLE)
for order in ['C', 'F', 'A']:
for request in requests:
b = py_buffer_to_contiguous(nd, order, request)
self.assertEqual(b, b'')
### One-dimensional arrays are trivial, since Fortran and C order
### are the same.
# one-dimensional
for f in [0, ND_FORTRAN]:
nd = ndarray([1], shape=[1], format="h", flags=f|ND_WRITABLE)
ndbytes = nd.tobytes()
for order in ['C', 'F', 'A']:
for request in requests:
b = py_buffer_to_contiguous(nd, order, request)
self.assertEqual(b, ndbytes)
nd = ndarray([1, 2, 3], shape=[3], format="b", flags=f|ND_WRITABLE)
ndbytes = nd.tobytes()
for order in ['C', 'F', 'A']:
for request in requests:
b = py_buffer_to_contiguous(nd, order, request)
self.assertEqual(b, ndbytes)
# one-dimensional, non-contiguous input
nd = ndarray([1, 2, 3], shape=[2], strides=[2], flags=ND_WRITABLE)
ndbytes = nd.tobytes()
for order in ['C', 'F', 'A']:
for request in [PyBUF_STRIDES, PyBUF_FULL]:
b = py_buffer_to_contiguous(nd, order, request)
self.assertEqual(b, ndbytes)
nd = nd[::-1]
ndbytes = nd.tobytes()
for order in ['C', 'F', 'A']:
for request in requests:
try:
b = py_buffer_to_contiguous(nd, order, request)
except BufferError:
continue
self.assertEqual(b, ndbytes)
###
### Multi-dimensional arrays:
###
### The goal here is to preserve the logical representation of the
### input array but change the physical representation if necessary.
###
### _testbuffer example:
### ====================
###
### C input array:
### --------------
### >>> nd = ndarray(list(range(12)), shape=[3, 4])
### >>> nd.tolist()
### [[0, 1, 2, 3],
### [4, 5, 6, 7],
### [8, 9, 10, 11]]
###
### Fortran output:
### ---------------
### >>> py_buffer_to_contiguous(nd, 'F', PyBUF_FULL_RO)
### >>> b'\x00\x04\x08\x01\x05\t\x02\x06\n\x03\x07\x0b'
###
### The return value corresponds to this input list for
### _testbuffer's ndarray:
### >>> nd = ndarray([0,4,8,1,5,9,2,6,10,3,7,11], shape=[3,4],
### flags=ND_FORTRAN)
### >>> nd.tolist()
### [[0, 1, 2, 3],
### [4, 5, 6, 7],
### [8, 9, 10, 11]]
###
### The logical array is the same, but the values in memory are now
### in Fortran order.
###
### NumPy example:
### ==============
### _testbuffer's ndarray takes lists to initialize the memory.
### Here's the same sequence in NumPy:
###
### C input:
### --------
### >>> nd = ndarray(buffer=bytearray(list(range(12))),
### shape=[3, 4], dtype='B')
### >>> nd
### array([[ 0, 1, 2, 3],
### [ 4, 5, 6, 7],
### [ 8, 9, 10, 11]], dtype=uint8)
###
### Fortran output:
### ---------------
### >>> fortran_buf = nd.tostring(order='F')
### >>> fortran_buf
### b'\x00\x04\x08\x01\x05\t\x02\x06\n\x03\x07\x0b'
###
### >>> nd = ndarray(buffer=fortran_buf, shape=[3, 4],
### dtype='B', order='F')
###
### >>> nd
### array([[ 0, 1, 2, 3],
### [ 4, 5, 6, 7],
### [ 8, 9, 10, 11]], dtype=uint8)
###
# multi-dimensional, contiguous input
lst = list(range(12))
for f in [0, ND_FORTRAN]:
nd = ndarray(lst, shape=[3, 4], flags=f|ND_WRITABLE)
if numpy_array:
na = numpy_array(buffer=bytearray(lst),
shape=[3, 4], dtype='B',
order='C' if f == 0 else 'F')
# 'C' request
if f == ND_FORTRAN: # 'F' to 'C'
x = ndarray(transpose(lst, [4, 3]), shape=[3, 4],
flags=ND_WRITABLE)
expected = x.tobytes()
else:
expected = nd.tobytes()
for request in requests:
try:
b = py_buffer_to_contiguous(nd, 'C', request)
except BufferError:
continue
self.assertEqual(b, expected)
# Check that output can be used as the basis for constructing
# a C array that is logically identical to the input array.
y = ndarray([v for v in b], shape=[3, 4], flags=ND_WRITABLE)
self.assertEqual(memoryview(y), memoryview(nd))
if numpy_array:
self.assertEqual(b, na.tostring(order='C'))
# 'F' request
if f == 0: # 'C' to 'F'
x = ndarray(transpose(lst, [3, 4]), shape=[4, 3],
flags=ND_WRITABLE)
else:
x = ndarray(lst, shape=[3, 4], flags=ND_WRITABLE)
expected = x.tobytes()
for request in [PyBUF_FULL, PyBUF_FULL_RO, PyBUF_INDIRECT,
PyBUF_STRIDES, PyBUF_ND]:
try:
b = py_buffer_to_contiguous(nd, 'F', request)
except BufferError:
continue
self.assertEqual(b, expected)
# Check that output can be used as the basis for constructing
# a Fortran array that is logically identical to the input array.
y = ndarray([v for v in b], shape=[3, 4], flags=ND_FORTRAN|ND_WRITABLE)
self.assertEqual(memoryview(y), memoryview(nd))
if numpy_array:
self.assertEqual(b, na.tostring(order='F'))
# 'A' request
if f == ND_FORTRAN:
x = ndarray(lst, shape=[3, 4], flags=ND_WRITABLE)
expected = x.tobytes()
else:
expected = nd.tobytes()
for request in [PyBUF_FULL, PyBUF_FULL_RO, PyBUF_INDIRECT,
PyBUF_STRIDES, PyBUF_ND]:
try:
b = py_buffer_to_contiguous(nd, 'A', request)
except BufferError:
continue
self.assertEqual(b, expected)
# Check that output can be used as the basis for constructing
# an array with order=f that is logically identical to the input
# array.
y = ndarray([v for v in b], shape=[3, 4], flags=f|ND_WRITABLE)
self.assertEqual(memoryview(y), memoryview(nd))
if numpy_array:
self.assertEqual(b, na.tostring(order='A'))
# multi-dimensional, non-contiguous input
nd = ndarray(list(range(12)), shape=[3, 4], flags=ND_WRITABLE|ND_PIL)
# 'C'
b = py_buffer_to_contiguous(nd, 'C', PyBUF_FULL_RO)
self.assertEqual(b, nd.tobytes())
y = ndarray([v for v in b], shape=[3, 4], flags=ND_WRITABLE)
self.assertEqual(memoryview(y), memoryview(nd))
# 'F'
b = py_buffer_to_contiguous(nd, 'F', PyBUF_FULL_RO)
x = ndarray(transpose(lst, [3, 4]), shape=[4, 3], flags=ND_WRITABLE)
self.assertEqual(b, x.tobytes())
y = ndarray([v for v in b], shape=[3, 4], flags=ND_FORTRAN|ND_WRITABLE)
self.assertEqual(memoryview(y), memoryview(nd))
# 'A'
b = py_buffer_to_contiguous(nd, 'A', PyBUF_FULL_RO)
self.assertEqual(b, nd.tobytes())
y = ndarray([v for v in b], shape=[3, 4], flags=ND_WRITABLE)
self.assertEqual(memoryview(y), memoryview(nd))
def test_memoryview_construction(self):
items_shape = [(9, []), ([1,2,3], [3]), (list(range(2*3*5)), [2,3,5])]
# NumPy style, C-contiguous:
for items, shape in items_shape:
# From PEP-3118 compliant exporter:
ex = ndarray(items, shape=shape)
m = memoryview(ex)
self.assertTrue(m.c_contiguous)
self.assertTrue(m.contiguous)
ndim = len(shape)
strides = strides_from_shape(ndim, shape, 1, 'C')
lst = carray(items, shape)
self.verify(m, obj=ex,
itemsize=1, fmt='B', readonly=1,
ndim=ndim, shape=shape, strides=strides,
lst=lst)
# From memoryview:
m2 = memoryview(m)
self.verify(m2, obj=ex,
itemsize=1, fmt='B', readonly=1,
ndim=ndim, shape=shape, strides=strides,
lst=lst)
# PyMemoryView_FromBuffer(): no strides
nd = ndarray(ex, getbuf=PyBUF_CONTIG_RO|PyBUF_FORMAT)
self.assertEqual(nd.strides, ())
m = nd.memoryview_from_buffer()
self.verify(m, obj=None,
itemsize=1, fmt='B', readonly=1,
ndim=ndim, shape=shape, strides=strides,
lst=lst)
# PyMemoryView_FromBuffer(): no format, shape, strides
nd = ndarray(ex, getbuf=PyBUF_SIMPLE)
self.assertEqual(nd.format, '')
self.assertEqual(nd.shape, ())
self.assertEqual(nd.strides, ())
m = nd.memoryview_from_buffer()
lst = [items] if ndim == 0 else items
self.verify(m, obj=None,
itemsize=1, fmt='B', readonly=1,
ndim=1, shape=[ex.nbytes], strides=(1,),
lst=lst)
# NumPy style, Fortran contiguous:
for items, shape in items_shape:
# From PEP-3118 compliant exporter:
ex = ndarray(items, shape=shape, flags=ND_FORTRAN)
m = memoryview(ex)
self.assertTrue(m.f_contiguous)
self.assertTrue(m.contiguous)
ndim = len(shape)
strides = strides_from_shape(ndim, shape, 1, 'F')
lst = farray(items, shape)
self.verify(m, obj=ex,
itemsize=1, fmt='B', readonly=1,
ndim=ndim, shape=shape, strides=strides,
lst=lst)
# From memoryview:
m2 = memoryview(m)
self.verify(m2, obj=ex,
itemsize=1, fmt='B', readonly=1,
ndim=ndim, shape=shape, strides=strides,
lst=lst)
# PIL style:
for items, shape in items_shape[1:]:
# From PEP-3118 compliant exporter:
ex = ndarray(items, shape=shape, flags=ND_PIL)
m = memoryview(ex)
ndim = len(shape)
lst = carray(items, shape)
self.verify(m, obj=ex,
itemsize=1, fmt='B', readonly=1,
ndim=ndim, shape=shape, strides=ex.strides,
lst=lst)
# From memoryview:
m2 = memoryview(m)
self.verify(m2, obj=ex,
itemsize=1, fmt='B', readonly=1,
ndim=ndim, shape=shape, strides=ex.strides,
lst=lst)
# Invalid number of arguments:
self.assertRaises(TypeError, memoryview, b'9', 'x')
# Not a buffer provider:
self.assertRaises(TypeError, memoryview, {})
# Non-compliant buffer provider:
ex = ndarray([1,2,3], shape=[3])
nd = ndarray(ex, getbuf=PyBUF_SIMPLE)
self.assertRaises(BufferError, memoryview, nd)
nd = ndarray(ex, getbuf=PyBUF_CONTIG_RO|PyBUF_FORMAT)
self.assertRaises(BufferError, memoryview, nd)
# ndim > 64
nd = ndarray([1]*128, shape=[1]*128, format='L')
self.assertRaises(ValueError, memoryview, nd)
self.assertRaises(ValueError, nd.memoryview_from_buffer)
self.assertRaises(ValueError, get_contiguous, nd, PyBUF_READ, 'C')
self.assertRaises(ValueError, get_contiguous, nd, PyBUF_READ, 'F')
self.assertRaises(ValueError, get_contiguous, nd[::-1], PyBUF_READ, 'C')
def test_memoryview_cast_zero_shape(self):
# Casts are undefined if buffer is multidimensional and shape
# contains zeros. These arrays are regarded as C-contiguous by
# Numpy and PyBuffer_GetContiguous(), so they are not caught by
# the test for C-contiguity in memory_cast().
items = [1,2,3]
for shape in ([0,3,3], [3,0,3], [0,3,3]):
ex = ndarray(items, shape=shape)
self.assertTrue(ex.c_contiguous)
msrc = memoryview(ex)
self.assertRaises(TypeError, msrc.cast, 'c')
# Monodimensional empty view can be cast (issue #19014).
for fmt, _, _ in iter_format(1, 'memoryview'):
msrc = memoryview(b'')
m = msrc.cast(fmt)
self.assertEqual(m.tobytes(), b'')
self.assertEqual(m.tolist(), [])
def test_memoryview_struct_module(self):
class INT(object):
def __init__(self, val):
self.val = val
def __int__(self):
return self.val
class IDX(object):
def __init__(self, val):
self.val = val
def __index__(self):
return self.val
def f(): return 7
values = [INT(9), IDX(9),
2.2+3j, Decimal("-21.1"), 12.2, Fraction(5, 2),
[1,2,3], {4,5,6}, {7:8}, (), (9,),
True, False, None, NotImplemented,
b'a', b'abc', bytearray(b'a'), bytearray(b'abc'),
'a', 'abc', r'a', r'abc',
f, lambda x: x]
for fmt, items, item in iter_format(10, 'memoryview'):
ex = ndarray(items, shape=[10], format=fmt, flags=ND_WRITABLE)
nd = ndarray(items, shape=[10], format=fmt, flags=ND_WRITABLE)
m = memoryview(ex)
struct.pack_into(fmt, nd, 0, item)
m[0] = item
self.assertEqual(m[0], nd[0])
itemsize = struct.calcsize(fmt)
if 'P' in fmt:
continue
for v in values:
struct_err = None
try:
struct.pack_into(fmt, nd, itemsize, v)
except struct.error:
struct_err = struct.error
mv_err = None
try:
m[1] = v
except (TypeError, ValueError) as e:
mv_err = e.__class__
if struct_err or mv_err:
self.assertIsNot(struct_err, None)
self.assertIsNot(mv_err, None)
else:
self.assertEqual(m[1], nd[1])
def test_memoryview_cast_zero_strides(self):
# Casts are undefined if strides contains zeros. These arrays are
# (sometimes!) regarded as C-contiguous by Numpy, but not by
# PyBuffer_GetContiguous().
ex = ndarray([1,2,3], shape=[3], strides=[0])
self.assertFalse(ex.c_contiguous)
msrc = memoryview(ex)
self.assertRaises(TypeError, msrc.cast, 'c')
def test_memoryview_cast_invalid(self):
# invalid format
for sfmt in NON_BYTE_FORMAT:
sformat = '@' + sfmt if randrange(2) else sfmt
ssize = struct.calcsize(sformat)
for dfmt in NON_BYTE_FORMAT:
dformat = '@' + dfmt if randrange(2) else dfmt
dsize = struct.calcsize(dformat)
ex = ndarray(list(range(32)), shape=[32//ssize], format=sformat)
msrc = memoryview(ex)
self.assertRaises(TypeError, msrc.cast, dfmt, [32//dsize])
for sfmt, sitems, _ in iter_format(1):
ex = ndarray(sitems, shape=[1], format=sfmt)
msrc = memoryview(ex)
for dfmt, _, _ in iter_format(1):
if (not is_memoryview_format(sfmt) or
not is_memoryview_format(dfmt)):
self.assertRaises(ValueError, msrc.cast, dfmt,
[32//dsize])
else:
if not is_byte_format(sfmt) and not is_byte_format(dfmt):
self.assertRaises(TypeError, msrc.cast, dfmt,
[32//dsize])
# invalid shape
size_h = struct.calcsize('h')
size_d = struct.calcsize('d')
ex = ndarray(list(range(2*2*size_d)), shape=[2,2,size_d], format='h')
msrc = memoryview(ex)
self.assertRaises(TypeError, msrc.cast, shape=[2,2,size_h], format='d')
ex = ndarray(list(range(120)), shape=[1,2,3,4,5])
m = memoryview(ex)
# incorrect number of args
self.assertRaises(TypeError, m.cast)
self.assertRaises(TypeError, m.cast, 1, 2, 3)
# incorrect dest format type
self.assertRaises(TypeError, m.cast, {})
# incorrect dest format
self.assertRaises(ValueError, m.cast, "X")
self.assertRaises(ValueError, m.cast, "@X")
self.assertRaises(ValueError, m.cast, "@XY")
# dest format not implemented
self.assertRaises(ValueError, m.cast, "=B")
self.assertRaises(ValueError, m.cast, "!L")
self.assertRaises(ValueError, m.cast, "<P")
self.assertRaises(ValueError, m.cast, ">l")
self.assertRaises(ValueError, m.cast, "BI")
self.assertRaises(ValueError, m.cast, "xBI")
# src format not implemented
ex = ndarray([(1,2), (3,4)], shape=[2], format="II")
m = memoryview(ex)
self.assertRaises(NotImplementedError, m.__getitem__, 0)
self.assertRaises(NotImplementedError, m.__setitem__, 0, 8)
self.assertRaises(NotImplementedError, m.tolist)
# incorrect shape type
ex = ndarray(list(range(120)), shape=[1,2,3,4,5])
m = memoryview(ex)
self.assertRaises(TypeError, m.cast, "B", shape={})
# incorrect shape elements
ex = ndarray(list(range(120)), shape=[2*3*4*5])
m = memoryview(ex)
self.assertRaises(OverflowError, m.cast, "B", shape=[2**64])
self.assertRaises(ValueError, m.cast, "B", shape=[-1])
self.assertRaises(ValueError, m.cast, "B", shape=[2,3,4,5,6,7,-1])
self.assertRaises(ValueError, m.cast, "B", shape=[2,3,4,5,6,7,0])
self.assertRaises(TypeError, m.cast, "B", shape=[2,3,4,5,6,7,'x'])
# N-D -> N-D cast
ex = ndarray(list([9 for _ in range(3*5*7*11)]), shape=[3,5,7,11])
m = memoryview(ex)
self.assertRaises(TypeError, m.cast, "I", shape=[2,3,4,5])
# cast with ndim > 64
nd = ndarray(list(range(128)), shape=[128], format='I')
m = memoryview(nd)
self.assertRaises(ValueError, m.cast, 'I', [1]*128)
# view->len not a multiple of itemsize
ex = ndarray(list([9 for _ in range(3*5*7*11)]), shape=[3*5*7*11])
m = memoryview(ex)
self.assertRaises(TypeError, m.cast, "I", shape=[2,3,4,5])
# product(shape) * itemsize != buffer size
ex = ndarray(list([9 for _ in range(3*5*7*11)]), shape=[3*5*7*11])
m = memoryview(ex)
self.assertRaises(TypeError, m.cast, "B", shape=[2,3,4,5])
# product(shape) * itemsize overflow
nd = ndarray(list(range(128)), shape=[128], format='I')
m1 = memoryview(nd)
nd = ndarray(list(range(128)), shape=[128], format='B')
m2 = memoryview(nd)
if sys.maxsize == 2**63-1:
self.assertRaises(TypeError, m1.cast, 'B',
[7, 7, 73, 127, 337, 92737, 649657])
self.assertRaises(ValueError, m1.cast, 'B',
[2**20, 2**20, 2**10, 2**10, 2**3])
self.assertRaises(ValueError, m2.cast, 'I',
[2**20, 2**20, 2**10, 2**10, 2**1])
else:
self.assertRaises(TypeError, m1.cast, 'B',
[1, 2147483647])
self.assertRaises(ValueError, m1.cast, 'B',
[2**10, 2**10, 2**5, 2**5, 2**1])
self.assertRaises(ValueError, m2.cast, 'I',
[2**10, 2**10, 2**5, 2**3, 2**1])
def test_memoryview_cast(self):
bytespec = (
('B', lambda ex: list(ex.tobytes())),
('b', lambda ex: [x-256 if x > 127 else x for x in list(ex.tobytes())]),
('c', lambda ex: [bytes(chr(x), 'latin-1') for x in list(ex.tobytes())]),
)
def iter_roundtrip(ex, m, items, fmt):
srcsize = struct.calcsize(fmt)
for bytefmt, to_bytelist in bytespec:
m2 = m.cast(bytefmt)
lst = to_bytelist(ex)
self.verify(m2, obj=ex,
itemsize=1, fmt=bytefmt, readonly=0,
ndim=1, shape=[31*srcsize], strides=(1,),
lst=lst, cast=True)
m3 = m2.cast(fmt)
self.assertEqual(m3, ex)
lst = ex.tolist()
self.verify(m3, obj=ex,
itemsize=srcsize, fmt=fmt, readonly=0,
ndim=1, shape=[31], strides=(srcsize,),
lst=lst, cast=True)
# cast from ndim = 0 to ndim = 1
srcsize = struct.calcsize('I')
ex = ndarray(9, shape=[], format='I')
destitems, destshape = cast_items(ex, 'B', 1)
m = memoryview(ex)
m2 = m.cast('B')
self.verify(m2, obj=ex,
itemsize=1, fmt='B', readonly=1,
ndim=1, shape=destshape, strides=(1,),
lst=destitems, cast=True)
# cast from ndim = 1 to ndim = 0
destsize = struct.calcsize('I')
ex = ndarray([9]*destsize, shape=[destsize], format='B')
destitems, destshape = cast_items(ex, 'I', destsize, shape=[])
m = memoryview(ex)
m2 = m.cast('I', shape=[])
self.verify(m2, obj=ex,
itemsize=destsize, fmt='I', readonly=1,
ndim=0, shape=(), strides=(),
lst=destitems, cast=True)
# array.array: roundtrip to/from bytes
for fmt, items, _ in iter_format(31, 'array'):
ex = array.array(fmt, items)
m = memoryview(ex)
iter_roundtrip(ex, m, items, fmt)
# ndarray: roundtrip to/from bytes
for fmt, items, _ in iter_format(31, 'memoryview'):
ex = ndarray(items, shape=[31], format=fmt, flags=ND_WRITABLE)
m = memoryview(ex)
iter_roundtrip(ex, m, items, fmt)
def test_memoryview_cast_1D_ND(self):
# Cast between C-contiguous buffers. At least one buffer must
# be 1D, at least one format must be 'c', 'b' or 'B'.
for _tshape in gencastshapes():
for char in fmtdict['@']:
tfmt = ('', '@')[randrange(2)] + char
tsize = struct.calcsize(tfmt)
n = prod(_tshape) * tsize
obj = 'memoryview' if is_byte_format(tfmt) else 'bytefmt'
for fmt, items, _ in iter_format(n, obj):
size = struct.calcsize(fmt)
shape = [n] if n > 0 else []
tshape = _tshape + [size]
ex = ndarray(items, shape=shape, format=fmt)
m = memoryview(ex)
titems, tshape = cast_items(ex, tfmt, tsize, shape=tshape)
if titems is None:
self.assertRaises(TypeError, m.cast, tfmt, tshape)
continue
if titems == 'nan':
continue # NaNs in lists are a recipe for trouble.
# 1D -> ND
nd = ndarray(titems, shape=tshape, format=tfmt)
m2 = m.cast(tfmt, shape=tshape)
ndim = len(tshape)
strides = nd.strides
lst = nd.tolist()
self.verify(m2, obj=ex,
itemsize=tsize, fmt=tfmt, readonly=1,
ndim=ndim, shape=tshape, strides=strides,
lst=lst, cast=True)
# ND -> 1D
m3 = m2.cast(fmt)
m4 = m2.cast(fmt, shape=shape)
ndim = len(shape)
strides = ex.strides
lst = ex.tolist()
self.verify(m3, obj=ex,
itemsize=size, fmt=fmt, readonly=1,
ndim=ndim, shape=shape, strides=strides,
lst=lst, cast=True)
self.verify(m4, obj=ex,
itemsize=size, fmt=fmt, readonly=1,
ndim=ndim, shape=shape, strides=strides,
lst=lst, cast=True)
def test_memoryview_tolist(self):
# Most tolist() tests are in self.verify() etc.
a = array.array('h', list(range(-6, 6)))
m = memoryview(a)
self.assertEqual(m, a)
self.assertEqual(m.tolist(), a.tolist())
a = a[2::3]
m = m[2::3]
self.assertEqual(m, a)
self.assertEqual(m.tolist(), a.tolist())
ex = ndarray(list(range(2*3*5*7*11)), shape=[11,2,7,3,5], format='L')
m = memoryview(ex)
self.assertEqual(m.tolist(), ex.tolist())
ex = ndarray([(2, 5), (7, 11)], shape=[2], format='lh')
m = memoryview(ex)
self.assertRaises(NotImplementedError, m.tolist)
ex = ndarray([b'12345'], shape=[1], format="s")
m = memoryview(ex)
self.assertRaises(NotImplementedError, m.tolist)
ex = ndarray([b"a",b"b",b"c",b"d",b"e",b"f"], shape=[2,3], format='s')
m = memoryview(ex)
self.assertRaises(NotImplementedError, m.tolist)
def test_memoryview_repr(self):
m = memoryview(bytearray(9))
r = m.__repr__()
self.assertTrue(r.startswith("<memory"))
m.release()
r = m.__repr__()
self.assertTrue(r.startswith("<released"))
def test_memoryview_sequence(self):
for fmt in ('d', 'f'):
inf = float(3e400)
ex = array.array(fmt, [1.0, inf, 3.0])
m = memoryview(ex)
self.assertIn(1.0, m)
self.assertIn(5e700, m)
self.assertIn(3.0, m)
ex = ndarray(9.0, [], format='f')
m = memoryview(ex)
self.assertRaises(TypeError, eval, "9.0 in m", locals())
def test_memoryview_index(self):
# ndim = 0
ex = ndarray(12.5, shape=[], format='d')
m = memoryview(ex)
self.assertEqual(m[()], 12.5)
self.assertEqual(m[...], m)
self.assertEqual(m[...], ex)
self.assertRaises(TypeError, m.__getitem__, 0)
ex = ndarray((1,2,3), shape=[], format='iii')
m = memoryview(ex)
self.assertRaises(NotImplementedError, m.__getitem__, ())
# range
ex = ndarray(list(range(7)), shape=[7], flags=ND_WRITABLE)
m = memoryview(ex)
self.assertRaises(IndexError, m.__getitem__, 2**64)
self.assertRaises(TypeError, m.__getitem__, 2.0)
self.assertRaises(TypeError, m.__getitem__, 0.0)
# out of bounds
self.assertRaises(IndexError, m.__getitem__, -8)
self.assertRaises(IndexError, m.__getitem__, 8)
# Not implemented: multidimensional sub-views
ex = ndarray(list(range(12)), shape=[3,4], flags=ND_WRITABLE)
m = memoryview(ex)
self.assertRaises(NotImplementedError, m.__getitem__, 0)
self.assertRaises(NotImplementedError, m.__setitem__, 0, 9)
self.assertRaises(NotImplementedError, m.__getitem__, 0)
def test_memoryview_assign(self):
# ndim = 0
ex = ndarray(12.5, shape=[], format='f', flags=ND_WRITABLE)
m = memoryview(ex)
m[()] = 22.5
self.assertEqual(m[()], 22.5)
m[...] = 23.5
self.assertEqual(m[()], 23.5)
self.assertRaises(TypeError, m.__setitem__, 0, 24.7)
# read-only
ex = ndarray(list(range(7)), shape=[7])
m = memoryview(ex)
self.assertRaises(TypeError, m.__setitem__, 2, 10)
# range
ex = ndarray(list(range(7)), shape=[7], flags=ND_WRITABLE)
m = memoryview(ex)
self.assertRaises(IndexError, m.__setitem__, 2**64, 9)
self.assertRaises(TypeError, m.__setitem__, 2.0, 10)
self.assertRaises(TypeError, m.__setitem__, 0.0, 11)
# out of bounds
self.assertRaises(IndexError, m.__setitem__, -8, 20)
self.assertRaises(IndexError, m.__setitem__, 8, 25)
# pack_single() success:
for fmt in fmtdict['@']:
if fmt == 'c' or fmt == '?':
continue
ex = ndarray([1,2,3], shape=[3], format=fmt, flags=ND_WRITABLE)
m = memoryview(ex)
i = randrange(-3, 3)
m[i] = 8
self.assertEqual(m[i], 8)
self.assertEqual(m[i], ex[i])
ex = ndarray([b'1', b'2', b'3'], shape=[3], format='c',
flags=ND_WRITABLE)
m = memoryview(ex)
m[2] = b'9'
self.assertEqual(m[2], b'9')
ex = ndarray([True, False, True], shape=[3], format='?',
flags=ND_WRITABLE)
m = memoryview(ex)
m[1] = True
self.assertEqual(m[1], True)
# pack_single() exceptions:
nd = ndarray([b'x'], shape=[1], format='c', flags=ND_WRITABLE)
m = memoryview(nd)
self.assertRaises(TypeError, m.__setitem__, 0, 100)
ex = ndarray(list(range(120)), shape=[1,2,3,4,5], flags=ND_WRITABLE)
m1 = memoryview(ex)
for fmt, _range in fmtdict['@'].items():
if (fmt == '?'): # PyObject_IsTrue() accepts anything
continue
if fmt == 'c': # special case tested above
continue
m2 = m1.cast(fmt)
lo, hi = _range
if fmt == 'd' or fmt == 'f':
lo, hi = -2**1024, 2**1024
if fmt != 'P': # PyLong_AsVoidPtr() accepts negative numbers
self.assertRaises(ValueError, m2.__setitem__, 0, lo-1)
self.assertRaises(TypeError, m2.__setitem__, 0, "xyz")
self.assertRaises(ValueError, m2.__setitem__, 0, hi)
# invalid item
m2 = m1.cast('c')
self.assertRaises(ValueError, m2.__setitem__, 0, b'\xff\xff')
# format not implemented
ex = ndarray(list(range(1)), shape=[1], format="xL", flags=ND_WRITABLE)
m = memoryview(ex)
self.assertRaises(NotImplementedError, m.__setitem__, 0, 1)
ex = ndarray([b'12345'], shape=[1], format="s", flags=ND_WRITABLE)
m = memoryview(ex)
self.assertRaises(NotImplementedError, m.__setitem__, 0, 1)
# Not implemented: multidimensional sub-views
ex = ndarray(list(range(12)), shape=[3,4], flags=ND_WRITABLE)
m = memoryview(ex)
self.assertRaises(NotImplementedError, m.__setitem__, 0, [2, 3])
def test_memoryview_slice(self):
ex = ndarray(list(range(12)), shape=[12], flags=ND_WRITABLE)
m = memoryview(ex)
# zero step
self.assertRaises(ValueError, m.__getitem__, slice(0,2,0))
self.assertRaises(ValueError, m.__setitem__, slice(0,2,0),
bytearray([1,2]))
# invalid slice key
self.assertRaises(TypeError, m.__getitem__, ())
# multidimensional slices
ex = ndarray(list(range(12)), shape=[12], flags=ND_WRITABLE)
m = memoryview(ex)
self.assertRaises(NotImplementedError, m.__getitem__,
(slice(0,2,1), slice(0,2,1)))
self.assertRaises(NotImplementedError, m.__setitem__,
(slice(0,2,1), slice(0,2,1)), bytearray([1,2]))
# invalid slice tuple
self.assertRaises(TypeError, m.__getitem__, (slice(0,2,1), {}))
self.assertRaises(TypeError, m.__setitem__, (slice(0,2,1), {}),
bytearray([1,2]))
# rvalue is not an exporter
self.assertRaises(TypeError, m.__setitem__, slice(0,1,1), [1])
# non-contiguous slice assignment
for flags in (0, ND_PIL):
ex1 = ndarray(list(range(12)), shape=[12], strides=[-1], offset=11,
flags=ND_WRITABLE|flags)
ex2 = ndarray(list(range(24)), shape=[12], strides=[2], flags=flags)
m1 = memoryview(ex1)
m2 = memoryview(ex2)
ex1[2:5] = ex1[2:5]
m1[2:5] = m2[2:5]
self.assertEqual(m1, ex1)
self.assertEqual(m2, ex2)
ex1[1:3][::-1] = ex2[0:2][::1]
m1[1:3][::-1] = m2[0:2][::1]
self.assertEqual(m1, ex1)
self.assertEqual(m2, ex2)
ex1[4:1:-2][::-1] = ex1[1:4:2][::1]
m1[4:1:-2][::-1] = m1[1:4:2][::1]
self.assertEqual(m1, ex1)
self.assertEqual(m2, ex2)
def test_memoryview_array(self):
def cmptest(testcase, a, b, m, singleitem):
for i, _ in enumerate(a):
ai = a[i]
mi = m[i]
testcase.assertEqual(ai, mi)
a[i] = singleitem
if singleitem != ai:
testcase.assertNotEqual(a, m)
testcase.assertNotEqual(a, b)
else:
testcase.assertEqual(a, m)
testcase.assertEqual(a, b)
m[i] = singleitem
testcase.assertEqual(a, m)
testcase.assertEqual(b, m)
a[i] = ai
m[i] = mi
for n in range(1, 5):
for fmt, items, singleitem in iter_format(n, 'array'):
for lslice in genslices(n):
for rslice in genslices(n):
a = array.array(fmt, items)
b = array.array(fmt, items)
m = memoryview(b)
self.assertEqual(m, a)
self.assertEqual(m.tolist(), a.tolist())
self.assertEqual(m.tobytes(), a.tobytes())
self.assertEqual(len(m), len(a))
cmptest(self, a, b, m, singleitem)
array_err = None
have_resize = None
try:
al = a[lslice]
ar = a[rslice]
a[lslice] = a[rslice]
have_resize = len(al) != len(ar)
except Exception as e:
array_err = e.__class__
m_err = None
try:
m[lslice] = m[rslice]
except Exception as e:
m_err = e.__class__
if have_resize: # memoryview cannot change shape
self.assertIs(m_err, ValueError)
elif m_err or array_err:
self.assertIs(m_err, array_err)
else:
self.assertEqual(m, a)
self.assertEqual(m.tolist(), a.tolist())
self.assertEqual(m.tobytes(), a.tobytes())
cmptest(self, a, b, m, singleitem)
def test_memoryview_compare_special_cases(self):
a = array.array('L', [1, 2, 3])
b = array.array('L', [1, 2, 7])
# Ordering comparisons raise:
v = memoryview(a)
w = memoryview(b)
for attr in ('__lt__', '__le__', '__gt__', '__ge__'):
self.assertIs(getattr(v, attr)(w), NotImplemented)
self.assertIs(getattr(a, attr)(v), NotImplemented)
# Released views compare equal to themselves:
v = memoryview(a)
v.release()
self.assertEqual(v, v)
self.assertNotEqual(v, a)
self.assertNotEqual(a, v)
v = memoryview(a)
w = memoryview(a)
w.release()
self.assertNotEqual(v, w)
self.assertNotEqual(w, v)
# Operand does not implement the buffer protocol:
v = memoryview(a)
self.assertNotEqual(v, [1, 2, 3])
# NaNs
nd = ndarray([(0, 0)], shape=[1], format='l x d x', flags=ND_WRITABLE)
nd[0] = (-1, float('nan'))
self.assertNotEqual(memoryview(nd), nd)
# Depends on issue #15625: the struct module does not understand 'u'.
a = array.array('u', 'xyz')
v = memoryview(a)
self.assertNotEqual(a, v)
self.assertNotEqual(v, a)
# Some ctypes format strings are unknown to the struct module.
if ctypes:
# format: "T{>l:x:>l:y:}"
class BEPoint(ctypes.BigEndianStructure):
_fields_ = [("x", ctypes.c_long), ("y", ctypes.c_long)]
point = BEPoint(100, 200)
a = memoryview(point)
b = memoryview(point)
self.assertNotEqual(a, b)
self.assertNotEqual(a, point)
self.assertNotEqual(point, a)
self.assertRaises(NotImplementedError, a.tolist)
def test_memoryview_compare_ndim_zero(self):
nd1 = ndarray(1729, shape=[], format='@L')
nd2 = ndarray(1729, shape=[], format='L', flags=ND_WRITABLE)
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, w)
self.assertEqual(w, v)
self.assertEqual(v, nd2)
self.assertEqual(nd2, v)
self.assertEqual(w, nd1)
self.assertEqual(nd1, w)
self.assertFalse(v.__ne__(w))
self.assertFalse(w.__ne__(v))
w[()] = 1728
self.assertNotEqual(v, w)
self.assertNotEqual(w, v)
self.assertNotEqual(v, nd2)
self.assertNotEqual(nd2, v)
self.assertNotEqual(w, nd1)
self.assertNotEqual(nd1, w)
self.assertFalse(v.__eq__(w))
self.assertFalse(w.__eq__(v))
nd = ndarray(list(range(12)), shape=[12], flags=ND_WRITABLE|ND_PIL)
ex = ndarray(list(range(12)), shape=[12], flags=ND_WRITABLE|ND_PIL)
m = memoryview(ex)
self.assertEqual(m, nd)
m[9] = 100
self.assertNotEqual(m, nd)
# struct module: equal
nd1 = ndarray((1729, 1.2, b'12345'), shape=[], format='Lf5s')
nd2 = ndarray((1729, 1.2, b'12345'), shape=[], format='hf5s',
flags=ND_WRITABLE)
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, w)
self.assertEqual(w, v)
self.assertEqual(v, nd2)
self.assertEqual(nd2, v)
self.assertEqual(w, nd1)
self.assertEqual(nd1, w)
# struct module: not equal
nd1 = ndarray((1729, 1.2, b'12345'), shape=[], format='Lf5s')
nd2 = ndarray((-1729, 1.2, b'12345'), shape=[], format='hf5s',
flags=ND_WRITABLE)
v = memoryview(nd1)
w = memoryview(nd2)
self.assertNotEqual(v, w)
self.assertNotEqual(w, v)
self.assertNotEqual(v, nd2)
self.assertNotEqual(nd2, v)
self.assertNotEqual(w, nd1)
self.assertNotEqual(nd1, w)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
def test_memoryview_compare_ndim_one(self):
# contiguous
nd1 = ndarray([-529, 576, -625, 676, -729], shape=[5], format='@h')
nd2 = ndarray([-529, 576, -625, 676, 729], shape=[5], format='@h')
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertNotEqual(v, nd2)
self.assertNotEqual(w, nd1)
self.assertNotEqual(v, w)
# contiguous, struct module
nd1 = ndarray([-529, 576, -625, 676, -729], shape=[5], format='<i')
nd2 = ndarray([-529, 576, -625, 676, 729], shape=[5], format='>h')
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertNotEqual(v, nd2)
self.assertNotEqual(w, nd1)
self.assertNotEqual(v, w)
# non-contiguous
nd1 = ndarray([-529, -625, -729], shape=[3], format='@h')
nd2 = ndarray([-529, 576, -625, 676, -729], shape=[5], format='@h')
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd2[::2])
self.assertEqual(w[::2], nd1)
self.assertEqual(v, w[::2])
self.assertEqual(v[::-1], w[::-2])
# non-contiguous, struct module
nd1 = ndarray([-529, -625, -729], shape=[3], format='!h')
nd2 = ndarray([-529, 576, -625, 676, -729], shape=[5], format='<l')
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd2[::2])
self.assertEqual(w[::2], nd1)
self.assertEqual(v, w[::2])
self.assertEqual(v[::-1], w[::-2])
# non-contiguous, suboffsets
nd1 = ndarray([-529, -625, -729], shape=[3], format='@h')
nd2 = ndarray([-529, 576, -625, 676, -729], shape=[5], format='@h',
flags=ND_PIL)
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd2[::2])
self.assertEqual(w[::2], nd1)
self.assertEqual(v, w[::2])
self.assertEqual(v[::-1], w[::-2])
# non-contiguous, suboffsets, struct module
nd1 = ndarray([-529, -625, -729], shape=[3], format='h 0c')
nd2 = ndarray([-529, 576, -625, 676, -729], shape=[5], format='> h',
flags=ND_PIL)
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd2[::2])
self.assertEqual(w[::2], nd1)
self.assertEqual(v, w[::2])
self.assertEqual(v[::-1], w[::-2])
def test_memoryview_compare_zero_shape(self):
# zeros in shape
nd1 = ndarray([900, 961], shape=[0], format='@h')
nd2 = ndarray([-900, -961], shape=[0], format='@h')
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertEqual(v, nd2)
self.assertEqual(w, nd1)
self.assertEqual(v, w)
# zeros in shape, struct module
nd1 = ndarray([900, 961], shape=[0], format='= h0c')
nd2 = ndarray([-900, -961], shape=[0], format='@ i')
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertEqual(v, nd2)
self.assertEqual(w, nd1)
self.assertEqual(v, w)
def test_memoryview_compare_zero_strides(self):
# zero strides
nd1 = ndarray([900, 900, 900, 900], shape=[4], format='@L')
nd2 = ndarray([900], shape=[4], strides=[0], format='L')
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertEqual(v, nd2)
self.assertEqual(w, nd1)
self.assertEqual(v, w)
# zero strides, struct module
nd1 = ndarray([(900, 900)]*4, shape=[4], format='@ Li')
nd2 = ndarray([(900, 900)], shape=[4], strides=[0], format='!L h')
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertEqual(v, nd2)
self.assertEqual(w, nd1)
self.assertEqual(v, w)
def test_memoryview_compare_random_formats(self):
# random single character native formats
n = 10
for char in fmtdict['@m']:
fmt, items, singleitem = randitems(n, 'memoryview', '@', char)
for flags in (0, ND_PIL):
nd = ndarray(items, shape=[n], format=fmt, flags=flags)
m = memoryview(nd)
self.assertEqual(m, nd)
nd = nd[::-3]
m = memoryview(nd)
self.assertEqual(m, nd)
# random formats
n = 10
for _ in range(100):
fmt, items, singleitem = randitems(n)
for flags in (0, ND_PIL):
nd = ndarray(items, shape=[n], format=fmt, flags=flags)
m = memoryview(nd)
self.assertEqual(m, nd)
nd = nd[::-3]
m = memoryview(nd)
self.assertEqual(m, nd)
def test_memoryview_compare_multidim_c(self):
# C-contiguous, different values
nd1 = ndarray(list(range(-15, 15)), shape=[3, 2, 5], format='@h')
nd2 = ndarray(list(range(0, 30)), shape=[3, 2, 5], format='@h')
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertNotEqual(v, nd2)
self.assertNotEqual(w, nd1)
self.assertNotEqual(v, w)
# C-contiguous, different values, struct module
nd1 = ndarray([(0, 1, 2)]*30, shape=[3, 2, 5], format='=f q xxL')
nd2 = ndarray([(-1.2, 1, 2)]*30, shape=[3, 2, 5], format='< f 2Q')
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertNotEqual(v, nd2)
self.assertNotEqual(w, nd1)
self.assertNotEqual(v, w)
# C-contiguous, different shape
nd1 = ndarray(list(range(30)), shape=[2, 3, 5], format='L')
nd2 = ndarray(list(range(30)), shape=[3, 2, 5], format='L')
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertNotEqual(v, nd2)
self.assertNotEqual(w, nd1)
self.assertNotEqual(v, w)
# C-contiguous, different shape, struct module
nd1 = ndarray([(0, 1, 2)]*21, shape=[3, 7], format='! b B xL')
nd2 = ndarray([(0, 1, 2)]*21, shape=[7, 3], format='= Qx l xxL')
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertNotEqual(v, nd2)
self.assertNotEqual(w, nd1)
self.assertNotEqual(v, w)
# C-contiguous, different format, struct module
nd1 = ndarray(list(range(30)), shape=[2, 3, 5], format='L')
nd2 = ndarray(list(range(30)), shape=[2, 3, 5], format='l')
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertEqual(v, nd2)
self.assertEqual(w, nd1)
self.assertEqual(v, w)
def test_memoryview_compare_multidim_fortran(self):
# Fortran-contiguous, different values
nd1 = ndarray(list(range(-15, 15)), shape=[5, 2, 3], format='@h',
flags=ND_FORTRAN)
nd2 = ndarray(list(range(0, 30)), shape=[5, 2, 3], format='@h',
flags=ND_FORTRAN)
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertNotEqual(v, nd2)
self.assertNotEqual(w, nd1)
self.assertNotEqual(v, w)
# Fortran-contiguous, different values, struct module
nd1 = ndarray([(2**64-1, -1)]*6, shape=[2, 3], format='=Qq',
flags=ND_FORTRAN)
nd2 = ndarray([(-1, 2**64-1)]*6, shape=[2, 3], format='=qQ',
flags=ND_FORTRAN)
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertNotEqual(v, nd2)
self.assertNotEqual(w, nd1)
self.assertNotEqual(v, w)
# Fortran-contiguous, different shape
nd1 = ndarray(list(range(-15, 15)), shape=[2, 3, 5], format='l',
flags=ND_FORTRAN)
nd2 = ndarray(list(range(-15, 15)), shape=[3, 2, 5], format='l',
flags=ND_FORTRAN)
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertNotEqual(v, nd2)
self.assertNotEqual(w, nd1)
self.assertNotEqual(v, w)
# Fortran-contiguous, different shape, struct module
nd1 = ndarray(list(range(-15, 15)), shape=[2, 3, 5], format='0ll',
flags=ND_FORTRAN)
nd2 = ndarray(list(range(-15, 15)), shape=[3, 2, 5], format='l',
flags=ND_FORTRAN)
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertNotEqual(v, nd2)
self.assertNotEqual(w, nd1)
self.assertNotEqual(v, w)
# Fortran-contiguous, different format, struct module
nd1 = ndarray(list(range(30)), shape=[5, 2, 3], format='@h',
flags=ND_FORTRAN)
nd2 = ndarray(list(range(30)), shape=[5, 2, 3], format='@b',
flags=ND_FORTRAN)
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertEqual(v, nd2)
self.assertEqual(w, nd1)
self.assertEqual(v, w)
def test_memoryview_compare_multidim_mixed(self):
# mixed C/Fortran contiguous
lst1 = list(range(-15, 15))
lst2 = transpose(lst1, [3, 2, 5])
nd1 = ndarray(lst1, shape=[3, 2, 5], format='@l')
nd2 = ndarray(lst2, shape=[3, 2, 5], format='l', flags=ND_FORTRAN)
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertEqual(v, w)
# mixed C/Fortran contiguous, struct module
lst1 = [(-3.3, -22, b'x')]*30
lst1[5] = (-2.2, -22, b'x')
lst2 = transpose(lst1, [3, 2, 5])
nd1 = ndarray(lst1, shape=[3, 2, 5], format='d b c')
nd2 = ndarray(lst2, shape=[3, 2, 5], format='d h c', flags=ND_FORTRAN)
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertEqual(v, w)
# different values, non-contiguous
ex1 = ndarray(list(range(40)), shape=[5, 8], format='@I')
nd1 = ex1[3:1:-1, ::-2]
ex2 = ndarray(list(range(40)), shape=[5, 8], format='I')
nd2 = ex2[1:3:1, ::-2]
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertNotEqual(v, nd2)
self.assertNotEqual(w, nd1)
self.assertNotEqual(v, w)
# same values, non-contiguous, struct module
ex1 = ndarray([(2**31-1, -2**31)]*22, shape=[11, 2], format='=ii')
nd1 = ex1[3:1:-1, ::-2]
ex2 = ndarray([(2**31-1, -2**31)]*22, shape=[11, 2], format='>ii')
nd2 = ex2[1:3:1, ::-2]
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertEqual(v, nd2)
self.assertEqual(w, nd1)
self.assertEqual(v, w)
# different shape
ex1 = ndarray(list(range(30)), shape=[2, 3, 5], format='b')
nd1 = ex1[1:3:, ::-2]
nd2 = ndarray(list(range(30)), shape=[3, 2, 5], format='b')
nd2 = ex2[1:3:, ::-2]
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertNotEqual(v, nd2)
self.assertNotEqual(w, nd1)
self.assertNotEqual(v, w)
# different shape, struct module
ex1 = ndarray(list(range(30)), shape=[2, 3, 5], format='B')
nd1 = ex1[1:3:, ::-2]
nd2 = ndarray(list(range(30)), shape=[3, 2, 5], format='b')
nd2 = ex2[1:3:, ::-2]
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertNotEqual(v, nd2)
self.assertNotEqual(w, nd1)
self.assertNotEqual(v, w)
# different format, struct module
ex1 = ndarray([(2, b'123')]*30, shape=[5, 3, 2], format='b3s')
nd1 = ex1[1:3:, ::-2]
nd2 = ndarray([(2, b'123')]*30, shape=[5, 3, 2], format='i3s')
nd2 = ex2[1:3:, ::-2]
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertNotEqual(v, nd2)
self.assertNotEqual(w, nd1)
self.assertNotEqual(v, w)
def test_memoryview_compare_multidim_zero_shape(self):
# zeros in shape
nd1 = ndarray(list(range(30)), shape=[0, 3, 2], format='i')
nd2 = ndarray(list(range(30)), shape=[5, 0, 2], format='@i')
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertNotEqual(v, nd2)
self.assertNotEqual(w, nd1)
self.assertNotEqual(v, w)
# zeros in shape, struct module
nd1 = ndarray(list(range(30)), shape=[0, 3, 2], format='i')
nd2 = ndarray(list(range(30)), shape=[5, 0, 2], format='@i')
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertNotEqual(v, nd2)
self.assertNotEqual(w, nd1)
self.assertNotEqual(v, w)
def test_memoryview_compare_multidim_zero_strides(self):
# zero strides
nd1 = ndarray([900]*80, shape=[4, 5, 4], format='@L')
nd2 = ndarray([900], shape=[4, 5, 4], strides=[0, 0, 0], format='L')
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertEqual(v, nd2)
self.assertEqual(w, nd1)
self.assertEqual(v, w)
self.assertEqual(v.tolist(), w.tolist())
# zero strides, struct module
nd1 = ndarray([(1, 2)]*10, shape=[2, 5], format='=lQ')
nd2 = ndarray([(1, 2)], shape=[2, 5], strides=[0, 0], format='<lQ')
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertEqual(v, nd2)
self.assertEqual(w, nd1)
self.assertEqual(v, w)
def test_memoryview_compare_multidim_suboffsets(self):
# suboffsets
ex1 = ndarray(list(range(40)), shape=[5, 8], format='@I')
nd1 = ex1[3:1:-1, ::-2]
ex2 = ndarray(list(range(40)), shape=[5, 8], format='I', flags=ND_PIL)
nd2 = ex2[1:3:1, ::-2]
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertNotEqual(v, nd2)
self.assertNotEqual(w, nd1)
self.assertNotEqual(v, w)
# suboffsets, struct module
ex1 = ndarray([(2**64-1, -1)]*40, shape=[5, 8], format='=Qq',
flags=ND_WRITABLE)
ex1[2][7] = (1, -2)
nd1 = ex1[3:1:-1, ::-2]
ex2 = ndarray([(2**64-1, -1)]*40, shape=[5, 8], format='>Qq',
flags=ND_PIL|ND_WRITABLE)
ex2[2][7] = (1, -2)
nd2 = ex2[1:3:1, ::-2]
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertEqual(v, nd2)
self.assertEqual(w, nd1)
self.assertEqual(v, w)
# suboffsets, different shape
ex1 = ndarray(list(range(30)), shape=[2, 3, 5], format='b',
flags=ND_PIL)
nd1 = ex1[1:3:, ::-2]
nd2 = ndarray(list(range(30)), shape=[3, 2, 5], format='b')
nd2 = ex2[1:3:, ::-2]
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertNotEqual(v, nd2)
self.assertNotEqual(w, nd1)
self.assertNotEqual(v, w)
# suboffsets, different shape, struct module
ex1 = ndarray([(2**8-1, -1)]*40, shape=[2, 3, 5], format='Bb',
flags=ND_PIL|ND_WRITABLE)
nd1 = ex1[1:2:, ::-2]
ex2 = ndarray([(2**8-1, -1)]*40, shape=[3, 2, 5], format='Bb')
nd2 = ex2[1:2:, ::-2]
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertNotEqual(v, nd2)
self.assertNotEqual(w, nd1)
self.assertNotEqual(v, w)
# suboffsets, different format
ex1 = ndarray(list(range(30)), shape=[5, 3, 2], format='i', flags=ND_PIL)
nd1 = ex1[1:3:, ::-2]
ex2 = ndarray(list(range(30)), shape=[5, 3, 2], format='@I', flags=ND_PIL)
nd2 = ex2[1:3:, ::-2]
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertEqual(v, nd2)
self.assertEqual(w, nd1)
self.assertEqual(v, w)
# suboffsets, different format, struct module
ex1 = ndarray([(b'hello', b'', 1)]*27, shape=[3, 3, 3], format='5s0sP',
flags=ND_PIL|ND_WRITABLE)
ex1[1][2][2] = (b'sushi', b'', 1)
nd1 = ex1[1:3:, ::-2]
ex2 = ndarray([(b'hello', b'', 1)]*27, shape=[3, 3, 3], format='5s0sP',
flags=ND_PIL|ND_WRITABLE)
ex1[1][2][2] = (b'sushi', b'', 1)
nd2 = ex2[1:3:, ::-2]
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertNotEqual(v, nd2)
self.assertNotEqual(w, nd1)
self.assertNotEqual(v, w)
# initialize mixed C/Fortran + suboffsets
lst1 = list(range(-15, 15))
lst2 = transpose(lst1, [3, 2, 5])
nd1 = ndarray(lst1, shape=[3, 2, 5], format='@l', flags=ND_PIL)
nd2 = ndarray(lst2, shape=[3, 2, 5], format='l', flags=ND_FORTRAN|ND_PIL)
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertEqual(v, w)
# initialize mixed C/Fortran + suboffsets, struct module
lst1 = [(b'sashimi', b'sliced', 20.05)]*30
lst1[11] = (b'ramen', b'spicy', 9.45)
lst2 = transpose(lst1, [3, 2, 5])
nd1 = ndarray(lst1, shape=[3, 2, 5], format='< 10p 9p d', flags=ND_PIL)
nd2 = ndarray(lst2, shape=[3, 2, 5], format='> 10p 9p d',
flags=ND_FORTRAN|ND_PIL)
v = memoryview(nd1)
w = memoryview(nd2)
self.assertEqual(v, nd1)
self.assertEqual(w, nd2)
self.assertEqual(v, w)
def test_memoryview_compare_not_equal(self):
# items not equal
for byteorder in ['=', '<', '>', '!']:
x = ndarray([2**63]*120, shape=[3,5,2,2,2], format=byteorder+'Q')
y = ndarray([2**63]*120, shape=[3,5,2,2,2], format=byteorder+'Q',
flags=ND_WRITABLE|ND_FORTRAN)
y[2][3][1][1][1] = 1
a = memoryview(x)
b = memoryview(y)
self.assertEqual(a, x)
self.assertEqual(b, y)
self.assertNotEqual(a, b)
self.assertNotEqual(a, y)
self.assertNotEqual(b, x)
x = ndarray([(2**63, 2**31, 2**15)]*120, shape=[3,5,2,2,2],
format=byteorder+'QLH')
y = ndarray([(2**63, 2**31, 2**15)]*120, shape=[3,5,2,2,2],
format=byteorder+'QLH', flags=ND_WRITABLE|ND_FORTRAN)
y[2][3][1][1][1] = (1, 1, 1)
a = memoryview(x)
b = memoryview(y)
self.assertEqual(a, x)
self.assertEqual(b, y)
self.assertNotEqual(a, b)
self.assertNotEqual(a, y)
self.assertNotEqual(b, x)
def test_memoryview_check_released(self):
a = array.array('d', [1.1, 2.2, 3.3])
m = memoryview(a)
m.release()
# PyMemoryView_FromObject()
self.assertRaises(ValueError, memoryview, m)
# memoryview.cast()
self.assertRaises(ValueError, m.cast, 'c')
# getbuffer()
self.assertRaises(ValueError, ndarray, m)
# memoryview.tolist()
self.assertRaises(ValueError, m.tolist)
# memoryview.tobytes()
self.assertRaises(ValueError, m.tobytes)
# sequence
self.assertRaises(ValueError, eval, "1.0 in m", locals())
# subscript
self.assertRaises(ValueError, m.__getitem__, 0)
# assignment
self.assertRaises(ValueError, m.__setitem__, 0, 1)
for attr in ('obj', 'nbytes', 'readonly', 'itemsize', 'format', 'ndim',
'shape', 'strides', 'suboffsets', 'c_contiguous',
'f_contiguous', 'contiguous'):
self.assertRaises(ValueError, m.__getattribute__, attr)
# richcompare
b = array.array('d', [1.1, 2.2, 3.3])
m1 = memoryview(a)
m2 = memoryview(b)
self.assertEqual(m1, m2)
m1.release()
self.assertNotEqual(m1, m2)
self.assertNotEqual(m1, a)
self.assertEqual(m1, m1)
def test_memoryview_tobytes(self):
# Many implicit tests are already in self.verify().
t = (-529, 576, -625, 676, -729)
nd = ndarray(t, shape=[5], format='@h')
m = memoryview(nd)
self.assertEqual(m, nd)
self.assertEqual(m.tobytes(), nd.tobytes())
nd = ndarray([t], shape=[1], format='>hQiLl')
m = memoryview(nd)
self.assertEqual(m, nd)
self.assertEqual(m.tobytes(), nd.tobytes())
nd = ndarray([t for _ in range(12)], shape=[2,2,3], format='=hQiLl')
m = memoryview(nd)
self.assertEqual(m, nd)
self.assertEqual(m.tobytes(), nd.tobytes())
nd = ndarray([t for _ in range(120)], shape=[5,2,2,3,2],
format='<hQiLl')
m = memoryview(nd)
self.assertEqual(m, nd)
self.assertEqual(m.tobytes(), nd.tobytes())
# Unknown formats are handled: tobytes() purely depends on itemsize.
if ctypes:
# format: "T{>l:x:>l:y:}"
class BEPoint(ctypes.BigEndianStructure):
_fields_ = [("x", ctypes.c_long), ("y", ctypes.c_long)]
point = BEPoint(100, 200)
a = memoryview(point)
self.assertEqual(a.tobytes(), bytes(point))
def test_memoryview_get_contiguous(self):
# Many implicit tests are already in self.verify().
# no buffer interface
self.assertRaises(TypeError, get_contiguous, {}, PyBUF_READ, 'F')
# writable request to read-only object
self.assertRaises(BufferError, get_contiguous, b'x', PyBUF_WRITE, 'C')
# writable request to non-contiguous object
nd = ndarray([1, 2, 3], shape=[2], strides=[2])
self.assertRaises(BufferError, get_contiguous, nd, PyBUF_WRITE, 'A')
# scalar, read-only request from read-only exporter
nd = ndarray(9, shape=(), format="L")
for order in ['C', 'F', 'A']:
m = get_contiguous(nd, PyBUF_READ, order)
self.assertEqual(m, nd)
self.assertEqual(m[()], 9)
# scalar, read-only request from writable exporter
nd = ndarray(9, shape=(), format="L", flags=ND_WRITABLE)
for order in ['C', 'F', 'A']:
m = get_contiguous(nd, PyBUF_READ, order)
self.assertEqual(m, nd)
self.assertEqual(m[()], 9)
# scalar, writable request
for order in ['C', 'F', 'A']:
nd[()] = 9
m = get_contiguous(nd, PyBUF_WRITE, order)
self.assertEqual(m, nd)
self.assertEqual(m[()], 9)
m[()] = 10
self.assertEqual(m[()], 10)
self.assertEqual(nd[()], 10)
# zeros in shape
nd = ndarray([1], shape=[0], format="L", flags=ND_WRITABLE)
for order in ['C', 'F', 'A']:
m = get_contiguous(nd, PyBUF_READ, order)
self.assertRaises(IndexError, m.__getitem__, 0)
self.assertEqual(m, nd)
self.assertEqual(m.tolist(), [])
nd = ndarray(list(range(8)), shape=[2, 0, 7], format="L",
flags=ND_WRITABLE)
for order in ['C', 'F', 'A']:
m = get_contiguous(nd, PyBUF_READ, order)
self.assertEqual(ndarray(m).tolist(), [[], []])
# one-dimensional
nd = ndarray([1], shape=[1], format="h", flags=ND_WRITABLE)
for order in ['C', 'F', 'A']:
m = get_contiguous(nd, PyBUF_WRITE, order)
self.assertEqual(m, nd)
self.assertEqual(m.tolist(), nd.tolist())
nd = ndarray([1, 2, 3], shape=[3], format="b", flags=ND_WRITABLE)
for order in ['C', 'F', 'A']:
m = get_contiguous(nd, PyBUF_WRITE, order)
self.assertEqual(m, nd)
self.assertEqual(m.tolist(), nd.tolist())
# one-dimensional, non-contiguous
nd = ndarray([1, 2, 3], shape=[2], strides=[2], flags=ND_WRITABLE)
for order in ['C', 'F', 'A']:
m = get_contiguous(nd, PyBUF_READ, order)
self.assertEqual(m, nd)
self.assertEqual(m.tolist(), nd.tolist())
self.assertRaises(TypeError, m.__setitem__, 1, 20)
self.assertEqual(m[1], 3)
self.assertEqual(nd[1], 3)
nd = nd[::-1]
for order in ['C', 'F', 'A']:
m = get_contiguous(nd, PyBUF_READ, order)
self.assertEqual(m, nd)
self.assertEqual(m.tolist(), nd.tolist())
self.assertRaises(TypeError, m.__setitem__, 1, 20)
self.assertEqual(m[1], 1)
self.assertEqual(nd[1], 1)
# multi-dimensional, contiguous input
nd = ndarray(list(range(12)), shape=[3, 4], flags=ND_WRITABLE)
for order in ['C', 'A']:
m = get_contiguous(nd, PyBUF_WRITE, order)
self.assertEqual(ndarray(m).tolist(), nd.tolist())
self.assertRaises(BufferError, get_contiguous, nd, PyBUF_WRITE, 'F')
m = get_contiguous(nd, PyBUF_READ, order)
self.assertEqual(ndarray(m).tolist(), nd.tolist())
nd = ndarray(list(range(12)), shape=[3, 4],
flags=ND_WRITABLE|ND_FORTRAN)
for order in ['F', 'A']:
m = get_contiguous(nd, PyBUF_WRITE, order)
self.assertEqual(ndarray(m).tolist(), nd.tolist())
self.assertRaises(BufferError, get_contiguous, nd, PyBUF_WRITE, 'C')
m = get_contiguous(nd, PyBUF_READ, order)
self.assertEqual(ndarray(m).tolist(), nd.tolist())
# multi-dimensional, non-contiguous input
nd = ndarray(list(range(12)), shape=[3, 4], flags=ND_WRITABLE|ND_PIL)
for order in ['C', 'F', 'A']:
self.assertRaises(BufferError, get_contiguous, nd, PyBUF_WRITE,
order)
m = get_contiguous(nd, PyBUF_READ, order)
self.assertEqual(ndarray(m).tolist(), nd.tolist())
# flags
nd = ndarray([1,2,3,4,5], shape=[3], strides=[2])
m = get_contiguous(nd, PyBUF_READ, 'C')
self.assertTrue(m.c_contiguous)
def test_memoryview_serializing(self):
# C-contiguous
size = struct.calcsize('i')
a = array.array('i', [1,2,3,4,5])
m = memoryview(a)
buf = io.BytesIO(m)
b = bytearray(5*size)
buf.readinto(b)
self.assertEqual(m.tobytes(), b)
# C-contiguous, multi-dimensional
size = struct.calcsize('L')
nd = ndarray(list(range(12)), shape=[2,3,2], format="L")
m = memoryview(nd)
buf = io.BytesIO(m)
b = bytearray(2*3*2*size)
buf.readinto(b)
self.assertEqual(m.tobytes(), b)
# Fortran contiguous, multi-dimensional
#size = struct.calcsize('L')
#nd = ndarray(list(range(12)), shape=[2,3,2], format="L",
# flags=ND_FORTRAN)
#m = memoryview(nd)
#buf = io.BytesIO(m)
#b = bytearray(2*3*2*size)
#buf.readinto(b)
#self.assertEqual(m.tobytes(), b)
def test_memoryview_hash(self):
# bytes exporter
b = bytes(list(range(12)))
m = memoryview(b)
self.assertEqual(hash(b), hash(m))
# C-contiguous
mc = m.cast('c', shape=[3,4])
self.assertEqual(hash(mc), hash(b))
# non-contiguous
mx = m[::-2]
b = bytes(list(range(12))[::-2])
self.assertEqual(hash(mx), hash(b))
# Fortran contiguous
nd = ndarray(list(range(30)), shape=[3,2,5], flags=ND_FORTRAN)
m = memoryview(nd)
self.assertEqual(hash(m), hash(nd))
# multi-dimensional slice
nd = ndarray(list(range(30)), shape=[3,2,5])
x = nd[::2, ::, ::-1]
m = memoryview(x)
self.assertEqual(hash(m), hash(x))
# multi-dimensional slice with suboffsets
nd = ndarray(list(range(30)), shape=[2,5,3], flags=ND_PIL)
x = nd[::2, ::, ::-1]
m = memoryview(x)
self.assertEqual(hash(m), hash(x))
# equality-hash invariant
x = ndarray(list(range(12)), shape=[12], format='B')
a = memoryview(x)
y = ndarray(list(range(12)), shape=[12], format='b')
b = memoryview(y)
self.assertEqual(a, b)
self.assertEqual(hash(a), hash(b))
# non-byte formats
nd = ndarray(list(range(12)), shape=[2,2,3], format='L')
m = memoryview(nd)
self.assertRaises(ValueError, m.__hash__)
nd = ndarray(list(range(-6, 6)), shape=[2,2,3], format='h')
m = memoryview(nd)
self.assertRaises(ValueError, m.__hash__)
nd = ndarray(list(range(12)), shape=[2,2,3], format='= L')
m = memoryview(nd)
self.assertRaises(ValueError, m.__hash__)
nd = ndarray(list(range(-6, 6)), shape=[2,2,3], format='< h')
m = memoryview(nd)
self.assertRaises(ValueError, m.__hash__)
def test_memoryview_release(self):
# Create re-exporter from getbuffer(memoryview), then release the view.
a = bytearray([1,2,3])
m = memoryview(a)
nd = ndarray(m) # re-exporter
self.assertRaises(BufferError, m.release)
del nd
m.release()
a = bytearray([1,2,3])
m = memoryview(a)
nd1 = ndarray(m, getbuf=PyBUF_FULL_RO, flags=ND_REDIRECT)
nd2 = ndarray(nd1, getbuf=PyBUF_FULL_RO, flags=ND_REDIRECT)
self.assertIs(nd2.obj, m)
self.assertRaises(BufferError, m.release)
del nd1, nd2
m.release()
# chained views
a = bytearray([1,2,3])
m1 = memoryview(a)
m2 = memoryview(m1)
nd = ndarray(m2) # re-exporter
m1.release()
self.assertRaises(BufferError, m2.release)
del nd
m2.release()
a = bytearray([1,2,3])
m1 = memoryview(a)
m2 = memoryview(m1)
nd1 = ndarray(m2, getbuf=PyBUF_FULL_RO, flags=ND_REDIRECT)
nd2 = ndarray(nd1, getbuf=PyBUF_FULL_RO, flags=ND_REDIRECT)
self.assertIs(nd2.obj, m2)
m1.release()
self.assertRaises(BufferError, m2.release)
del nd1, nd2
m2.release()
# Allow changing layout while buffers are exported.
nd = ndarray([1,2,3], shape=[3], flags=ND_VAREXPORT)
m1 = memoryview(nd)
nd.push([4,5,6,7,8], shape=[5]) # mutate nd
m2 = memoryview(nd)
x = memoryview(m1)
self.assertEqual(x.tolist(), m1.tolist())
y = memoryview(m2)
self.assertEqual(y.tolist(), m2.tolist())
self.assertEqual(y.tolist(), nd.tolist())
m2.release()
y.release()
nd.pop() # pop the current view
self.assertEqual(x.tolist(), nd.tolist())
del nd
m1.release()
x.release()
# If multiple memoryviews share the same managed buffer, implicit
# release() in the context manager's __exit__() method should still
# work.
def catch22(b):
with memoryview(b) as m2:
pass
x = bytearray(b'123')
with memoryview(x) as m1:
catch22(m1)
self.assertEqual(m1[0], ord(b'1'))
x = ndarray(list(range(12)), shape=[2,2,3], format='l')
y = ndarray(x, getbuf=PyBUF_FULL_RO, flags=ND_REDIRECT)
z = ndarray(y, getbuf=PyBUF_FULL_RO, flags=ND_REDIRECT)
self.assertIs(z.obj, x)
with memoryview(z) as m:
catch22(m)
self.assertEqual(m[0:1].tolist(), [[[0, 1, 2], [3, 4, 5]]])
# Test garbage collection.
for flags in (0, ND_REDIRECT):
x = bytearray(b'123')
with memoryview(x) as m1:
del x
y = ndarray(m1, getbuf=PyBUF_FULL_RO, flags=flags)
with memoryview(y) as m2:
del y
z = ndarray(m2, getbuf=PyBUF_FULL_RO, flags=flags)
with memoryview(z) as m3:
del z
catch22(m3)
catch22(m2)
catch22(m1)
self.assertEqual(m1[0], ord(b'1'))
self.assertEqual(m2[1], ord(b'2'))
self.assertEqual(m3[2], ord(b'3'))
del m3
del m2
del m1
x = bytearray(b'123')
with memoryview(x) as m1:
del x
y = ndarray(m1, getbuf=PyBUF_FULL_RO, flags=flags)
with memoryview(y) as m2:
del y
z = ndarray(m2, getbuf=PyBUF_FULL_RO, flags=flags)
with memoryview(z) as m3:
del z
catch22(m1)
catch22(m2)
catch22(m3)
self.assertEqual(m1[0], ord(b'1'))
self.assertEqual(m2[1], ord(b'2'))
self.assertEqual(m3[2], ord(b'3'))
del m1, m2, m3
# memoryview.release() fails if the view has exported buffers.
x = bytearray(b'123')
with self.assertRaises(BufferError):
with memoryview(x) as m:
ex = ndarray(m)
m[0] == ord(b'1')
def test_memoryview_redirect(self):
nd = ndarray([1.0 * x for x in range(12)], shape=[12], format='d')
a = array.array('d', [1.0 * x for x in range(12)])
for x in (nd, a):
y = ndarray(x, getbuf=PyBUF_FULL_RO, flags=ND_REDIRECT)
z = ndarray(y, getbuf=PyBUF_FULL_RO, flags=ND_REDIRECT)
m = memoryview(z)
self.assertIs(y.obj, x)
self.assertIs(z.obj, x)
self.assertIs(m.obj, x)
self.assertEqual(m, x)
self.assertEqual(m, y)
self.assertEqual(m, z)
self.assertEqual(m[1:3], x[1:3])
self.assertEqual(m[1:3], y[1:3])
self.assertEqual(m[1:3], z[1:3])
del y, z
self.assertEqual(m[1:3], x[1:3])
def test_memoryview_from_static_exporter(self):
fmt = 'B'
lst = [0,1,2,3,4,5,6,7,8,9,10,11]
# exceptions
self.assertRaises(TypeError, staticarray, 1, 2, 3)
# view.obj==x
x = staticarray()
y = memoryview(x)
self.verify(y, obj=x,
itemsize=1, fmt=fmt, readonly=1,
ndim=1, shape=[12], strides=[1],
lst=lst)
for i in range(12):
self.assertEqual(y[i], i)
del x
del y
x = staticarray()
y = memoryview(x)
del y
del x
x = staticarray()
y = ndarray(x, getbuf=PyBUF_FULL_RO)
z = ndarray(y, getbuf=PyBUF_FULL_RO)
m = memoryview(z)
self.assertIs(y.obj, x)
self.assertIs(m.obj, z)
self.verify(m, obj=z,
itemsize=1, fmt=fmt, readonly=1,
ndim=1, shape=[12], strides=[1],
lst=lst)
del x, y, z, m
x = staticarray()
y = ndarray(x, getbuf=PyBUF_FULL_RO, flags=ND_REDIRECT)
z = ndarray(y, getbuf=PyBUF_FULL_RO, flags=ND_REDIRECT)
m = memoryview(z)
self.assertIs(y.obj, x)
self.assertIs(z.obj, x)
self.assertIs(m.obj, x)
self.verify(m, obj=x,
itemsize=1, fmt=fmt, readonly=1,
ndim=1, shape=[12], strides=[1],
lst=lst)
del x, y, z, m
# view.obj==NULL
x = staticarray(legacy_mode=True)
y = memoryview(x)
self.verify(y, obj=None,
itemsize=1, fmt=fmt, readonly=1,
ndim=1, shape=[12], strides=[1],
lst=lst)
for i in range(12):
self.assertEqual(y[i], i)
del x
del y
x = staticarray(legacy_mode=True)
y = memoryview(x)
del y
del x
x = staticarray(legacy_mode=True)
y = ndarray(x, getbuf=PyBUF_FULL_RO)
z = ndarray(y, getbuf=PyBUF_FULL_RO)
m = memoryview(z)
self.assertIs(y.obj, None)
self.assertIs(m.obj, z)
self.verify(m, obj=z,
itemsize=1, fmt=fmt, readonly=1,
ndim=1, shape=[12], strides=[1],
lst=lst)
del x, y, z, m
x = staticarray(legacy_mode=True)
y = ndarray(x, getbuf=PyBUF_FULL_RO, flags=ND_REDIRECT)
z = ndarray(y, getbuf=PyBUF_FULL_RO, flags=ND_REDIRECT)
m = memoryview(z)
# Clearly setting view.obj==NULL is inferior, since it
# messes up the redirection chain:
self.assertIs(y.obj, None)
self.assertIs(z.obj, y)
self.assertIs(m.obj, y)
self.verify(m, obj=y,
itemsize=1, fmt=fmt, readonly=1,
ndim=1, shape=[12], strides=[1],
lst=lst)
del x, y, z, m
def test_memoryview_getbuffer_undefined(self):
# getbufferproc does not adhere to the new documentation
nd = ndarray([1,2,3], [3], flags=ND_GETBUF_FAIL|ND_GETBUF_UNDEFINED)
self.assertRaises(BufferError, memoryview, nd)
def test_issue_7385(self):
x = ndarray([1,2,3], shape=[3], flags=ND_GETBUF_FAIL)
self.assertRaises(BufferError, memoryview, x)
if __name__ == "__main__":
unittest.main()
| gpl-2.0 |
cetic/ansible | lib/ansible/modules/storage/netapp/na_cdot_user.py | 9 | 10243 | #!/usr/bin/python
# (c) 2017, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: na_cdot_user
short_description: useradmin configuration and management
extends_documentation_fragment:
- netapp.ontap
version_added: '2.3'
author: Sumit Kumar (sumit4@netapp.com)
description:
- Create or destroy users.
options:
state:
description:
- Whether the specified user should exist or not.
required: true
choices: ['present', 'absent']
name:
description:
- The name of the user to manage.
required: true
application:
description:
- Applications to grant access to.
required: true
choices: ['console', 'http','ontapi','rsh','snmp','sp','ssh','telnet']
authentication_method:
description:
- Authentication method for the application.
- Not all authentication methods are valid for an application.
- Valid authentication methods for each application are as denoted in I(authentication_choices_description).
- password for console application
- password, domain, nsswitch, cert for http application.
- password, domain, nsswitch, cert for ontapi application.
- community for snmp application (when creating SNMPv1 and SNMPv2 users).
- usm and community for snmp application (when creating SNMPv3 users).
- password for sp application.
- password for rsh application.
- password for telnet application.
- password, publickey, domain, nsswitch for ssh application.
required: true
choices: ['community', 'password', 'publickey', 'domain', 'nsswitch', 'usm']
set_password:
description:
- Password for the user account.
- It is ignored for creating snmp users, but is required for creating non-snmp users.
- For an existing user, this value will be used as the new password.
default: None
role_name:
description:
- The name of the role. Required when C(state=present)
vserver:
description:
- The name of the vserver to use.
required: true
'''
EXAMPLES = """
- name: Create User
na_cdot_user:
state: present
name: SampleUser
application: ssh
authentication_method: password
set_password: apn1242183u1298u41
role_name: vsadmin
vserver: ansibleVServer
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
"""
RETURN = """
"""
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import ansible.module_utils.netapp as netapp_utils
HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
class NetAppCDOTUser(object):
"""
Common operations to manage users and roles.
"""
def __init__(self):
self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
self.argument_spec.update(dict(
state=dict(required=True, choices=['present', 'absent']),
name=dict(required=True, type='str'),
application=dict(required=True, type='str', choices=[
'console', 'http', 'ontapi', 'rsh',
'snmp', 'sp', 'ssh', 'telnet']),
authentication_method=dict(required=True, type='str',
choices=['community', 'password',
'publickey', 'domain',
'nsswitch', 'usm']),
set_password=dict(required=False, type='str', default=None),
role_name=dict(required=False, type='str'),
vserver=dict(required=True, type='str'),
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
required_if=[
('state', 'present', ['role_name'])
],
supports_check_mode=True
)
p = self.module.params
# set up state variables
self.state = p['state']
self.name = p['name']
self.application = p['application']
self.authentication_method = p['authentication_method']
self.set_password = p['set_password']
self.role_name = p['role_name']
self.vserver = p['vserver']
if HAS_NETAPP_LIB is False:
self.module.fail_json(msg="the python NetApp-Lib module is required")
else:
self.server = netapp_utils.setup_ontap_zapi(module=self.module)
def get_user(self):
"""
Checks if the user exists.
:return:
True if user found
False if user is not found
:rtype: bool
"""
security_login_get_iter = netapp_utils.zapi.NaElement('security-login-get-iter')
query_details = netapp_utils.zapi.NaElement.create_node_with_children(
'security-login-account-info', **{'vserver': self.vserver,
'user-name': self.name,
'application': self.application,
'authentication-method':
self.authentication_method})
query = netapp_utils.zapi.NaElement('query')
query.add_child_elem(query_details)
security_login_get_iter.add_child_elem(query)
try:
result = self.server.invoke_successfully(security_login_get_iter,
enable_tunneling=False)
if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1:
return True
else:
return False
except netapp_utils.zapi.NaApiError as e:
# Error 16034 denotes a user not being found.
if to_native(e.code) == "16034":
return False
else:
self.module.fail_json(msg='Error getting user %s: %s' % (self.name, to_native(e)),
exception=traceback.format_exc())
def create_user(self):
user_create = netapp_utils.zapi.NaElement.create_node_with_children(
'security-login-create', **{'vserver': self.vserver,
'user-name': self.name,
'application': self.application,
'authentication-method':
self.authentication_method,
'role-name': self.role_name})
if self.set_password is not None:
user_create.add_new_child('password', self.set_password)
try:
self.server.invoke_successfully(user_create,
enable_tunneling=False)
except netapp_utils.zapi.NaApiError as e:
self.module.fail_json(msg='Error creating user %s: %s' % (self.name, to_native(e)),
exception=traceback.format_exc())
def delete_user(self):
user_delete = netapp_utils.zapi.NaElement.create_node_with_children(
'security-login-delete', **{'vserver': self.vserver,
'user-name': self.name,
'application': self.application,
'authentication-method':
self.authentication_method})
try:
self.server.invoke_successfully(user_delete,
enable_tunneling=False)
except netapp_utils.zapi.NaApiError as e:
self.module.fail_json(msg='Error removing user %s: %s' % (self.name, to_native(e)),
exception=traceback.format_exc())
def change_password(self):
"""
Changes the password
:return:
True if password updated
False if password is not updated
:rtype: bool
"""
self.server.set_vserver(self.vserver)
modify_password = netapp_utils.zapi.NaElement.create_node_with_children(
'security-login-modify-password', **{
'new-password': str(self.set_password),
'user-name': self.name})
try:
self.server.invoke_successfully(modify_password,
enable_tunneling=True)
except netapp_utils.zapi.NaApiError as e:
if to_native(e.code) == '13114':
return False
else:
self.module.fail_json(msg='Error setting password for user %s: %s' % (self.name, to_native(e)),
exception=traceback.format_exc())
self.server.set_vserver(None)
return True
def apply(self):
property_changed = False
password_changed = False
user_exists = self.get_user()
if user_exists:
if self.state == 'absent':
property_changed = True
elif self.state == 'present':
if self.set_password is not None:
password_changed = self.change_password()
else:
if self.state == 'present':
# Check if anything needs to be updated
property_changed = True
if property_changed:
if self.module.check_mode:
pass
else:
if self.state == 'present':
if not user_exists:
self.create_user()
# Add ability to update parameters.
elif self.state == 'absent':
self.delete_user()
changed = property_changed or password_changed
self.module.exit_json(changed=changed)
def main():
v = NetAppCDOTUser()
v.apply()
if __name__ == '__main__':
main()
| gpl-3.0 |
ajdawson/windspharm | examples/iris/rws_example.py | 1 | 2190 | """Compute Rossby wave source from the long-term mean flow.
This example uses the iris interface.
Additional requirements for this example:
* iris (http://scitools.org.uk/iris/)
* matplotlib (http://matplotlib.org/)
* cartopy (http://scitools.org.uk/cartopy/)
"""
import warnings
import cartopy.crs as ccrs
import iris
import iris.plot as iplt
from iris.coord_categorisation import add_month
import matplotlib as mpl
import matplotlib.pyplot as plt
from windspharm.iris import VectorWind
from windspharm.examples import example_data_path
mpl.rcParams['mathtext.default'] = 'regular'
# Read zonal and meridional wind components from file using the iris module.
# The components are in separate files. We catch warnings here because the
# files are not completely CF compliant.
with warnings.catch_warnings():
warnings.simplefilter('ignore', UserWarning)
uwnd = iris.load_cube(example_data_path('uwnd_mean.nc'))
vwnd = iris.load_cube(example_data_path('vwnd_mean.nc'))
uwnd.coord('longitude').circular = True
vwnd.coord('longitude').circular = True
# Create a VectorWind instance to handle the computations.
w = VectorWind(uwnd, vwnd)
# Compute components of rossby wave source: absolute vorticity, divergence,
# irrotational (divergent) wind components, gradients of absolute vorticity.
eta = w.absolutevorticity()
div = w.divergence()
uchi, vchi = w.irrotationalcomponent()
etax, etay = w.gradient(eta)
etax.units = 'm**-1 s**-1'
etay.units = 'm**-1 s**-1'
# Combine the components to form the Rossby wave source term.
S = eta * -1. * div - (uchi * etax + vchi * etay)
S.coord('longitude').attributes['circular'] = True
# Pick out the field for December at 200 hPa.
time_constraint = iris.Constraint(month='Dec')
add_month(S, 'time')
S_dec = S.extract(time_constraint)
# Plot Rossby wave source.
clevs = [-30, -25, -20, -15, -10, -5, 0, 5, 10, 15, 20, 25, 30]
ax = plt.subplot(111, projection=ccrs.PlateCarree(central_longitude=180))
fill = iplt.contourf(S_dec * 1e11, clevs, cmap=plt.cm.RdBu_r, extend='both')
ax.coastlines()
ax.gridlines()
plt.colorbar(fill, orientation='horizontal')
plt.title('Rossby Wave Source ($10^{-11}$s$^{-1}$)', fontsize=16)
plt.show()
| mit |
Johnetordoff/waterbutler | tests/providers/github/test_provider.py | 1 | 45643 | import pytest
import io
import os
import copy
import json
import base64
import hashlib
from http import client
import aiohttpretty
from waterbutler.core import streams
from waterbutler.core import exceptions
from waterbutler.core.path import WaterButlerPath
from waterbutler.core.provider import build_url
from waterbutler.providers.github import GitHubProvider
from waterbutler.providers.github import settings as github_settings
from waterbutler.providers.github.provider import GitHubPath
from waterbutler.providers.github.metadata import GitHubRevision
from waterbutler.providers.github.metadata import GitHubFileTreeMetadata
from waterbutler.providers.github.metadata import GitHubFolderTreeMetadata
from waterbutler.providers.github.metadata import GitHubFileContentMetadata
from waterbutler.providers.github.metadata import GitHubFolderContentMetadata
@pytest.fixture
def auth():
return {
'name': 'cat',
'email': 'cat@cat.com',
}
@pytest.fixture
def credentials():
return {'token': 'naps'}
@pytest.fixture
def settings():
return {
'owner': 'cat',
'repo': 'food',
}
@pytest.fixture
def file_content():
return b'hungry'
@pytest.fixture
def file_like(file_content):
return io.BytesIO(file_content)
@pytest.fixture
def file_stream(file_like):
return streams.FileStreamReader(file_like)
@pytest.fixture
def upload_response():
return {
"content": {
"name": "hello.txt",
"path": "notes/hello.txt",
"sha": "95b966ae1c166bd92f8ae7d1c313e738c731dfc3",
"size": 9,
"url": "https://api.github.com/repos/octocat/Hello-World/contents/notes/hello.txt",
"html_url": "https://github.com/octocat/Hello-World/blob/master/notes/hello.txt",
"git_url": "https://api.github.com/repos/octocat/Hello-World/git/blobs/95b966ae1c166bd92f8ae7d1c313e738c731dfc3",
"type": "file",
"_links": {
"self": "https://api.github.com/repos/octocat/Hello-World/contents/notes/hello.txt",
"git": "https://api.github.com/repos/octocat/Hello-World/git/blobs/95b966ae1c166bd92f8ae7d1c313e738c731dfc3",
"html": "https://github.com/octocat/Hello-World/blob/master/notes/hello.txt"
}
},
"commit": {
"sha": "7638417db6d59f3c431d3e1f261cc637155684cd",
"url": "https://api.github.com/repos/octocat/Hello-World/git/commits/7638417db6d59f3c431d3e1f261cc637155684cd",
"html_url": "https://github.com/octocat/Hello-World/git/commit/7638417db6d59f3c431d3e1f261cc637155684cd",
"author": {
"date": "2010-04-10T14:10:01-07:00",
"name": "Scott Chacon",
"email": "schacon@gmail.com"
},
"committer": {
"date": "2010-04-10T14:10:01-07:00",
"name": "Scott Chacon",
"email": "schacon@gmail.com"
},
"message": "my commit message",
"tree": {
"url": "https://api.github.com/repos/octocat/Hello-World/git/trees/691272480426f78a0138979dd3ce63b77f706feb",
"sha": "691272480426f78a0138979dd3ce63b77f706feb"
},
"parents": [
{
"url": "https://api.github.com/repos/octocat/Hello-World/git/commits/1acc419d4d6a9ce985db7be48c6349a0475975b5",
"html_url": "https://github.com/octocat/Hello-World/git/commit/1acc419d4d6a9ce985db7be48c6349a0475975b5",
"sha": "1acc419d4d6a9ce985db7be48c6349a0475975b5"
}
]
}
}
@pytest.fixture
def create_folder_response():
return {
"content": {
"name": ".gitkeep",
"path": "i/like/trains/.gitkeep",
"sha": "95b966ae1c166bd92f8ae7d1c313e738c731dfc3",
"size": 9,
"url": "https://api.github.com/repos/octocat/Hello-World/contents/notes/hello.txt",
"html_url": "https://github.com/octocat/Hello-World/blob/master/notes/hello.txt",
"git_url": "https://api.github.com/repos/octocat/Hello-World/git/blobs/95b966ae1c166bd92f8ae7d1c313e738c731dfc3",
"type": "file",
"_links": {
"self": "https://api.github.com/repos/octocat/Hello-World/contents/notes/hello.txt",
"git": "https://api.github.com/repos/octocat/Hello-World/git/blobs/95b966ae1c166bd92f8ae7d1c313e738c731dfc3",
"html": "https://github.com/octocat/Hello-World/blob/master/notes/hello.txt"
}
},
"commit": {
"sha": "7638417db6d59f3c431d3e1f261cc637155684cd",
"url": "https://api.github.com/repos/octocat/Hello-World/git/commits/7638417db6d59f3c431d3e1f261cc637155684cd",
"html_url": "https://github.com/octocat/Hello-World/git/commit/7638417db6d59f3c431d3e1f261cc637155684cd",
"author": {
"date": "2010-04-10T14:10:01-07:00",
"name": "Scott Chacon",
"email": "schacon@gmail.com"
},
"committer": {
"date": "2010-04-10T14:10:01-07:00",
"name": "Scott Chacon",
"email": "schacon@gmail.com"
},
"message": "my commit message",
"tree": {
"url": "https://api.github.com/repos/octocat/Hello-World/git/trees/691272480426f78a0138979dd3ce63b77f706feb",
"sha": "691272480426f78a0138979dd3ce63b77f706feb"
},
"parents": [
{
"url": "https://api.github.com/repos/octocat/Hello-World/git/commits/1acc419d4d6a9ce985db7be48c6349a0475975b5",
"html_url": "https://github.com/octocat/Hello-World/git/commit/1acc419d4d6a9ce985db7be48c6349a0475975b5",
"sha": "1acc419d4d6a9ce985db7be48c6349a0475975b5"
}
]
}
}
@pytest.fixture
def repo_metadata():
return {
'full_name': 'octocat/Hello-World',
'permissions': {
'push': False,
'admin': False,
'pull': True
},
'has_downloads': True,
'notifications_url': 'https://api.github.com/repos/octocat/Hello-World/notifications{?since,all,participating}',
'releases_url': 'https://api.github.com/repos/octocat/Hello-World/releases{/id}',
'downloads_url': 'https://api.github.com/repos/octocat/Hello-World/downloads',
'merges_url': 'https://api.github.com/repos/octocat/Hello-World/merges',
'owner': {
'avatar_url': 'https://avatars.githubusercontent.com/u/583231?v=3',
'organizations_url': 'https://api.github.com/users/octocat/orgs',
'type': 'User',
'starred_url': 'https://api.github.com/users/octocat/starred{/owner}{/repo}',
'url': 'https://api.github.com/users/octocat',
'html_url': 'https://github.com/octocat',
'received_events_url': 'https://api.github.com/users/octocat/received_events',
'subscriptions_url': 'https://api.github.com/users/octocat/subscriptions',
'site_admin': False,
'gravatar_id': '',
'repos_url': 'https://api.github.com/users/octocat/repos',
'gists_url': 'https://api.github.com/users/octocat/gists{/gist_id}',
'id': 583231,
'events_url': 'https://api.github.com/users/octocat/events{/privacy}',
'login': 'octocat',
'following_url': 'https://api.github.com/users/octocat/following{/other_user}',
'followers_url': 'https://api.github.com/users/octocat/followers'
},
'html_url': 'https://github.com/octocat/Hello-World',
'comments_url': 'https://api.github.com/repos/octocat/Hello-World/comments{/number}',
'git_url': 'git://github.com/octocat/Hello-World.git',
'ssh_url': 'git@github.com:octocat/Hello-World.git',
'language': None,
'pulls_url': 'https://api.github.com/repos/octocat/Hello-World/pulls{/number}',
'subscribers_count': 1850,
'forks_count': 1085,
'watchers_count': 1407,
'id': 1296269,
'keys_url': 'https://api.github.com/repos/octocat/Hello-World/keys{/key_id}',
'default_branch': 'master',
'stargazers_count': 1407,
'tags_url': 'https://api.github.com/repos/octocat/Hello-World/tags',
'clone_url': 'https://github.com/octocat/Hello-World.git',
'homepage': '',
'forks_url': 'https://api.github.com/repos/octocat/Hello-World/forks',
'branches_url': 'https://api.github.com/repos/octocat/Hello-World/branches{/branch}',
'url': 'https://api.github.com/repos/octocat/Hello-World',
'contents_url': 'https://api.github.com/repos/octocat/Hello-World/contents/{+path}',
'hooks_url': 'https://api.github.com/repos/octocat/Hello-World/hooks',
'git_tags_url': 'https://api.github.com/repos/octocat/Hello-World/git/tags{/sha}',
'statuses_url': 'https://api.github.com/repos/octocat/Hello-World/statuses/{sha}',
'trees_url': 'https://api.github.com/repos/octocat/Hello-World/git/trees{/sha}',
'contributors_url': 'https://api.github.com/repos/octocat/Hello-World/contributors',
'open_issues': 126,
'has_pages': False,
'pushed_at': '2014-06-11T21:51:23Z',
'network_count': 1085,
'commits_url': 'https://api.github.com/repos/octocat/Hello-World/commits{/sha}',
'git_commits_url': 'https://api.github.com/repos/octocat/Hello-World/git/commits{/sha}',
'svn_url': 'https://github.com/octocat/Hello-World',
'forks': 1085,
'fork': False,
'subscription_url': 'https://api.github.com/repos/octocat/Hello-World/subscription',
'archive_url': 'https://api.github.com/repos/octocat/Hello-World/{archive_format}{/ref}',
'subscribers_url': 'https://api.github.com/repos/octocat/Hello-World/subscribers',
'description': 'This your first repo!',
'blobs_url': 'https://api.github.com/repos/octocat/Hello-World/git/blobs{/sha}',
'teams_url': 'https://api.github.com/repos/octocat/Hello-World/teams',
'compare_url': 'https://api.github.com/repos/octocat/Hello-World/compare/{base}...{head}',
'issues_url': 'https://api.github.com/repos/octocat/Hello-World/issues{/number}',
'stargazers_url': 'https://api.github.com/repos/octocat/Hello-World/stargazers',
'private': False,
'created_at': '2011-01-26T19:01:12Z',
'issue_comment_url': 'https://api.github.com/repos/octocat/Hello-World/issues/comments/{number}',
'has_issues': True,
'milestones_url': 'https://api.github.com/repos/octocat/Hello-World/milestones{/number}',
'issue_events_url': 'https://api.github.com/repos/octocat/Hello-World/issues/events{/number}',
'languages_url': 'https://api.github.com/repos/octocat/Hello-World/languages',
'name': 'Hello-World',
'mirror_url': None,
'has_wiki': True,
'updated_at': '2014-12-12T16:45:49Z',
'watchers': 1407,
'open_issues_count': 126,
'labels_url': 'https://api.github.com/repos/octocat/Hello-World/labels{/name}',
'collaborators_url': 'https://api.github.com/repos/octocat/Hello-World/collaborators{/collaborator}',
'assignees_url': 'https://api.github.com/repos/octocat/Hello-World/assignees{/user}',
'size': 558,
'git_refs_url': 'https://api.github.com/repos/octocat/Hello-World/git/refs{/sha}',
'events_url': 'https://api.github.com/repos/octocat/Hello-World/events'
}
@pytest.fixture
def branch_metadata():
return {
'commit': {
'html_url': 'https://github.com/octocat/Hello-World/commit/7fd1a60b01f91b314f59955a4e4d4e80d8edf11d',
'url': 'https://api.github.com/repos/octocat/Hello-World/commits/7fd1a60b01f91b314f59955a4e4d4e80d8edf11d',
'committer': {
'html_url': 'https://github.com/octocat',
'login': 'octocat',
'type': 'User',
'gravatar_id': '',
'avatar_url': 'https://avatars.githubusercontent.com/u/583231?v=3',
'received_events_url': 'https://api.github.com/users/octocat/received_events',
'id': 583231,
'starred_url': 'https://api.github.com/users/octocat/starred{/owner}{/repo}',
'subscriptions_url': 'https://api.github.com/users/octocat/subscriptions',
'organizations_url': 'https://api.github.com/users/octocat/orgs',
'url': 'https://api.github.com/users/octocat',
'following_url': 'https://api.github.com/users/octocat/following{/other_user}',
'followers_url': 'https://api.github.com/users/octocat/followers',
'repos_url': 'https://api.github.com/users/octocat/repos',
'events_url': 'https://api.github.com/users/octocat/events{/privacy}',
'gists_url': 'https://api.github.com/users/octocat/gists{/gist_id}',
'site_admin': False
},
'parents': [{
'html_url': 'https://github.com/octocat/Hello-World/commit/553c2077f0edc3d5dc5d17262f6aa498e69d6f8e',
'url': 'https://api.github.com/repos/octocat/Hello-World/commits/553c2077f0edc3d5dc5d17262f6aa498e69d6f8e',
'sha': '553c2077f0edc3d5dc5d17262f6aa498e69d6f8e'
}, {
'html_url': 'https://github.com/octocat/Hello-World/commit/762941318ee16e59dabbacb1b4049eec22f0d303',
'url': 'https://api.github.com/repos/octocat/Hello-World/commits/762941318ee16e59dabbacb1b4049eec22f0d303',
'sha': '762941318ee16e59dabbacb1b4049eec22f0d303'
}],
'sha': '7fd1a60b01f91b314f59955a4e4d4e80d8edf11d',
'author': {
'html_url': 'https://github.com/octocat',
'login': 'octocat',
'type': 'User',
'gravatar_id': '',
'avatar_url': 'https://avatars.githubusercontent.com/u/583231?v=3',
'received_events_url': 'https://api.github.com/users/octocat/received_events',
'id': 583231,
'starred_url': 'https://api.github.com/users/octocat/starred{/owner}{/repo}',
'subscriptions_url': 'https://api.github.com/users/octocat/subscriptions',
'organizations_url': 'https://api.github.com/users/octocat/orgs',
'url': 'https://api.github.com/users/octocat',
'following_url': 'https://api.github.com/users/octocat/following{/other_user}',
'followers_url': 'https://api.github.com/users/octocat/followers',
'repos_url': 'https://api.github.com/users/octocat/repos',
'events_url': 'https://api.github.com/users/octocat/events{/privacy}',
'gists_url': 'https://api.github.com/users/octocat/gists{/gist_id}',
'site_admin': False
},
'comments_url': 'https://api.github.com/repos/octocat/Hello-World/commits/7fd1a60b01f91b314f59955a4e4d4e80d8edf11d/comments',
'commit': {
'url': 'https://api.github.com/repos/octocat/Hello-World/git/commits/7fd1a60b01f91b314f59955a4e4d4e80d8edf11d',
'message': 'Merge pull request #6 from Spaceghost/patch-1\n\nNew line at end of file.',
'committer': {
'email': 'octocat@nowhere.com',
'date': '2012-03-06T23:06:50Z',
'name': 'The Octocat'
},
'tree': {
'url': 'https://api.github.com/repos/octocat/Hello-World/git/trees/b4eecafa9be2f2006ce1b709d6857b07069b4608',
'sha': 'b4eecafa9be2f2006ce1b709d6857b07069b4608'
},
'comment_count': 51,
'author': {
'email': 'octocat@nowhere.com',
'date': '2012-03-06T23:06:50Z',
'name': 'The Octocat'
}
}
},
'_links': {
'html': 'https://github.com/octocat/Hello-World/tree/master',
'self': 'https://api.github.com/repos/octocat/Hello-World/branches/master'
},
'name': 'master'
}
@pytest.fixture
def content_repo_metadata_root():
return [
{
'path': 'file.txt',
'type': 'file',
'html_url': 'https://github.com/icereval/test/blob/master/file.txt',
'git_url': 'https://api.github.com/repos/icereval/test/git/blobs/e69de29bb2d1d6434b8b29ae775ad8c2e48c5391',
'url': 'https://api.github.com/repos/icereval/test/contents/file.txt?ref=master',
'sha': 'e69de29bb2d1d6434b8b29ae775ad8c2e48c5391',
'_links': {
'git': 'https://api.github.com/repos/icereval/test/git/blobs/e69de29bb2d1d6434b8b29ae775ad8c2e48c5391',
'self': 'https://api.github.com/repos/icereval/test/contents/file.txt?ref=master',
'html': 'https://github.com/icereval/test/blob/master/file.txt'
},
'name': 'file.txt',
'size': 0,
'download_url': 'https://raw.githubusercontent.com/icereval/test/master/file.txt'
}, {
'path': 'level1',
'type': 'dir',
'html_url': 'https://github.com/icereval/test/tree/master/level1',
'git_url': 'https://api.github.com/repos/icereval/test/git/trees/bc1087ebfe8354a684bf9f8b75517784143dde86',
'url': 'https://api.github.com/repos/icereval/test/contents/level1?ref=master',
'sha': 'bc1087ebfe8354a684bf9f8b75517784143dde86',
'_links': {
'git': 'https://api.github.com/repos/icereval/test/git/trees/bc1087ebfe8354a684bf9f8b75517784143dde86',
'self': 'https://api.github.com/repos/icereval/test/contents/level1?ref=master',
'html': 'https://github.com/icereval/test/tree/master/level1'
},
'name': 'level1',
'size': 0,
'download_url': None
}, {
'path': 'test.rst',
'type': 'file',
'html_url': 'https://github.com/icereval/test/blob/master/test.rst',
'git_url': 'https://api.github.com/repos/icereval/test/git/blobs/ca39bcbf849231525ce9e775935fcb18ed477b5a',
'url': 'https://api.github.com/repos/icereval/test/contents/test.rst?ref=master',
'sha': 'ca39bcbf849231525ce9e775935fcb18ed477b5a',
'_links': {
'git': 'https://api.github.com/repos/icereval/test/git/blobs/ca39bcbf849231525ce9e775935fcb18ed477b5a',
'self': 'https://api.github.com/repos/icereval/test/contents/test.rst?ref=master',
'html': 'https://github.com/icereval/test/blob/master/test.rst'
},
'name': 'test.rst',
'size': 190,
'download_url': 'https://raw.githubusercontent.com/icereval/test/master/test.rst'
}
]
@pytest.fixture
def repo_tree_metadata_root():
return {
'tree': [
{
'url': 'https://api.github.com/repos/icereval/test/git/blobs/e69de29bb2d1d6434b8b29ae775ad8c2e48c5391',
'size': 0,
'type': 'blob',
'path': 'file.txt',
'mode': '100644',
'sha': 'e69de29bb2d1d6434b8b29ae775ad8c2e48c5391'
},
{
'type': 'tree',
'url': 'https://api.github.com/repos/icereval/test/git/trees/05353097666f449344b7f69036c70a52dc504088',
'path': 'level1',
'mode': '040000',
'sha': '05353097666f449344b7f69036c70a52dc504088'
},
{
'url': 'https://api.github.com/repos/icereval/test/git/blobs/ca39bcbf849231525ce9e775935fcb18ed477b5a',
'size': 190,
'type': 'blob',
'path': 'test.rst',
'mode': '100644',
'sha': 'ca39bcbf849231525ce9e775935fcb18ed477b5a'
}
],
'url': 'https://api.github.com/repos/icereval/test/git/trees/cd83e4a08261a54f1c4630fbb1de34d1e48f0c8a',
'truncated': False,
'sha': 'cd83e4a08261a54f1c4630fbb1de34d1e48f0c8a'
}
@pytest.fixture
def content_repo_metadata_root_file_txt():
return {
'_links': {
'git': 'https://api.github.com/repos/icereval/test/git/blobs/e69de29bb2d1d6434b8b29ae775ad8c2e48c5391',
'self': 'https://api.github.com/repos/icereval/test/contents/file.txt?ref=master',
'html': 'https://github.com/icereval/test/blob/master/file.txt'
},
'content': '',
'url': 'https://api.github.com/repos/icereval/test/contents/file.txt?ref=master',
'html_url': 'https://github.com/icereval/test/blob/master/file.txt',
'download_url': 'https://raw.githubusercontent.com/icereval/test/master/file.txt',
'name': 'file.txt',
'type': 'file',
'sha': 'e69de29bb2d1d6434b8b29ae775ad8c2e48c5391',
'encoding': 'base64',
'git_url': 'https://api.github.com/repos/icereval/test/git/blobs/e69de29bb2d1d6434b8b29ae775ad8c2e48c5391',
'path': 'file.txt',
'size': 0
}
@pytest.fixture
def nested_tree_metadata():
return {
'tree': [
{'path': 'alpha.txt', 'type': 'blob', 'mode': '100644', 'size': 11, 'url': 'https://api.github.com/repos/felliott/wb-testing/git/blobs/3e72bca321b45548d7a7cfd1e8570afec6e5f2f1', 'sha': '3e72bca321b45548d7a7cfd1e8570afec6e5f2f1'},
{'path': 'beta', 'type': 'tree', 'mode': '040000', 'url': 'https://api.github.com/repos/felliott/wb-testing/git/trees/48cf869b1f09e4b0cfa765ce3c0812fb719973e9', 'sha': '48cf869b1f09e4b0cfa765ce3c0812fb719973e9'},
{'path': 'beta/gamma.txt', 'type': 'blob', 'mode': '100644', 'size': 11, 'url': 'https://api.github.com/repos/felliott/wb-testing/git/blobs/f59573b4169cee7da926e6508961438952ba0aaf', 'sha': 'f59573b4169cee7da926e6508961438952ba0aaf'},
{'path': 'beta/delta', 'type': 'tree', 'mode': '040000', 'url': 'https://api.github.com/repos/felliott/wb-testing/git/trees/bb0c11bb86d7fc4807f6c8dc2a2bb9513802bf33','sha': 'bb0c11bb86d7fc4807f6c8dc2a2bb9513802bf33'},
{'path': 'beta/delta/epsilon.txt', 'type': 'blob', 'mode': '100644', 'size': 13, 'url': 'https://api.github.com/repos/felliott/wb-testing/git/blobs/44b20789279ae90266791ba07f87a3ab42264690', 'sha': '44b20789279ae90266791ba07f87a3ab42264690'},
],
'truncated': False,
'url': 'https://api.github.com/repos/felliott/wb-testing/git/trees/076cc413680157d4dea4c17831687873998a4928',
'sha': '076cc413680157d4dea4c17831687873998a4928'
}
@pytest.fixture
def provider(auth, credentials, settings, repo_metadata):
provider = GitHubProvider(auth, credentials, settings)
provider._repo = repo_metadata
provider.default_branch = repo_metadata['default_branch']
return provider
class TestHelpers:
async def test_build_repo_url(self, provider, settings):
expected = provider.build_url('repos', settings['owner'], settings['repo'], 'contents')
assert provider.build_repo_url('contents') == expected
async def test_committer(self, auth, provider):
expected = {
'name': auth['name'],
'email': auth['email'],
}
assert provider.committer == expected
class TestValidatePath:
def test_child_gets_branch(self):
parent = GitHubPath('/', _ids=[('master', None)], folder=True)
child_file = parent.child('childfile', folder=False)
assert child_file.identifier[0] == 'master'
child_folder = parent.child('childfolder', folder=True)
assert child_folder.identifier[0] == 'master'
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_validate_v1_path_file(self, provider, branch_metadata, repo_tree_metadata_root):
branch_url = provider.build_repo_url('branches', provider.default_branch)
tree_url = provider.build_repo_url('git', 'trees',
branch_metadata['commit']['commit']['tree']['sha'],
recursive=1)
aiohttpretty.register_json_uri('GET', branch_url, body=branch_metadata)
aiohttpretty.register_json_uri('GET', tree_url, body=repo_tree_metadata_root)
blob_path = 'file.txt'
try:
wb_path_v1 = await provider.validate_v1_path('/' + blob_path)
except Exception as exc:
pytest.fail(str(exc))
with pytest.raises(exceptions.NotFoundError) as exc:
await provider.validate_v1_path('/' + blob_path + '/')
assert exc.value.code == client.NOT_FOUND
wb_path_v0 = await provider.validate_path('/' + blob_path)
assert wb_path_v1 == wb_path_v0
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_validate_v1_path_folder(self, provider, branch_metadata, repo_tree_metadata_root):
branch_url = provider.build_repo_url('branches', provider.default_branch)
tree_url = provider.build_repo_url('git', 'trees',
branch_metadata['commit']['commit']['tree']['sha'],
recursive=1)
aiohttpretty.register_json_uri('GET', branch_url, body=branch_metadata)
aiohttpretty.register_json_uri('GET', tree_url, body=repo_tree_metadata_root)
tree_path = 'level1'
try:
wb_path_v1 = await provider.validate_v1_path('/' + tree_path + '/')
except Exception as exc:
pytest.fail(str(exc))
with pytest.raises(exceptions.NotFoundError) as exc:
await provider.validate_v1_path('/' + tree_path)
assert exc.value.code == client.NOT_FOUND
wb_path_v0 = await provider.validate_path('/' + tree_path + '/')
assert wb_path_v1 == wb_path_v0
@pytest.mark.asyncio
async def test_reject_multiargs(self, provider):
with pytest.raises(exceptions.InvalidParameters) as exc:
await provider.validate_v1_path('/foo', ref=['bar','baz'])
assert exc.value.code == client.BAD_REQUEST
with pytest.raises(exceptions.InvalidParameters) as exc:
await provider.validate_path('/foo', ref=['bar','baz'])
assert exc.value.code == client.BAD_REQUEST
@pytest.mark.asyncio
async def test_validate_path(self, provider):
path = await provider.validate_path('/this/is/my/path')
assert path.is_dir is False
assert path.is_file is True
assert path.name == 'path'
assert isinstance(path.identifier, tuple)
assert path.identifier == (provider.default_branch, None)
assert path.parts[0].identifier == (provider.default_branch, None)
@pytest.mark.asyncio
async def test_validate_path_passes_branch(self, provider):
path = await provider.validate_path('/this/is/my/path', branch='NotMaster')
assert path.is_dir is False
assert path.is_file is True
assert path.name == 'path'
assert isinstance(path.identifier, tuple)
assert path.identifier == ('NotMaster', None)
assert path.parts[0].identifier == ('NotMaster', None)
@pytest.mark.asyncio
async def test_validate_path_passes_ref(self, provider):
path = await provider.validate_path('/this/is/my/path', ref='NotMaster')
assert path.is_dir is False
assert path.is_file is True
assert path.name == 'path'
assert isinstance(path.identifier, tuple)
assert path.identifier == ('NotMaster', None)
assert path.parts[0].identifier == ('NotMaster', None)
@pytest.mark.asyncio
async def test_validate_path_passes_file_sha(self, provider):
path = await provider.validate_path('/this/is/my/path', fileSha='Thisisasha')
assert path.is_dir is False
assert path.is_file is True
assert path.name == 'path'
assert isinstance(path.identifier, tuple)
assert path.identifier == (provider.default_branch, 'Thisisasha')
assert path.parts[0].identifier == (provider.default_branch, None)
class TestCRUD:
# @pytest.mark.asyncio
# @pytest.mark.aiohttpretty
# async def test_download_by_file_sha(self, provider, content_repo_metadata_root_file_txt):
# ref = hashlib.sha1().hexdigest()
# url = provider.build_repo_url('git', 'refs', 'heads', 'master')
# path = WaterButlerPath('/file.txt', _ids=(None, ('master', ref)))
# aiohttpretty.register_uri('GET', url, body=b'delicious')
# aiohttpretty.register_json_uri('GET', url, body={'object': {'sha': ref}})
# result = await provider.download(path)
# content = await result.read()
# assert content == b'delicious'
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_download_by_path(self, provider, repo_tree_metadata_root):
ref = hashlib.sha1().hexdigest()
file_sha = repo_tree_metadata_root['tree'][0]['sha']
path = await provider.validate_path('/file.txt')
url = provider.build_repo_url('git', 'blobs', file_sha)
tree_url = provider.build_repo_url('git', 'trees', ref, recursive=1)
latest_sha_url = provider.build_repo_url('git', 'refs', 'heads', path.identifier[0])
commit_url = provider.build_repo_url('commits', path=path.path.lstrip('/'), sha=path.identifier[0])
aiohttpretty.register_uri('GET', url, body=b'delicious')
aiohttpretty.register_json_uri('GET', tree_url, body=repo_tree_metadata_root)
aiohttpretty.register_json_uri('GET', commit_url, body=[{'commit': {'tree': {'sha': ref}}}])
result = await provider.download(path)
content = await result.read()
assert content == b'delicious'
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_download_by_path_ref_branch(self, provider, repo_tree_metadata_root):
ref = hashlib.sha1().hexdigest()
file_sha = repo_tree_metadata_root['tree'][0]['sha']
path = await provider.validate_path('/file.txt', branch='other_branch')
url = provider.build_repo_url('git', 'blobs', file_sha)
tree_url = provider.build_repo_url('git', 'trees', ref, recursive=1)
commit_url = provider.build_repo_url('commits', path=path.path.lstrip('/'), sha=path.identifier[0])
aiohttpretty.register_uri('GET', url, body=b'delicious')
aiohttpretty.register_json_uri('GET', tree_url, body=repo_tree_metadata_root)
aiohttpretty.register_json_uri('GET', commit_url, body=[{'commit': {'tree': {'sha': ref}}}])
result = await provider.download(path)
content = await result.read()
assert content == b'delicious'
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_download_by_path_revision(self, provider, repo_tree_metadata_root):
ref = hashlib.sha1().hexdigest()
file_sha = repo_tree_metadata_root['tree'][0]['sha']
path = await provider.validate_path('/file.txt', branch='other_branch')
url = provider.build_repo_url('git', 'blobs', file_sha)
tree_url = provider.build_repo_url('git', 'trees', ref, recursive=1)
commit_url = provider.build_repo_url('commits', path=path.path.lstrip('/'), sha='Just a test')
aiohttpretty.register_uri('GET', url, body=b'delicious')
aiohttpretty.register_json_uri('GET', tree_url, body=repo_tree_metadata_root)
aiohttpretty.register_json_uri('GET', commit_url, body=[{'commit': {'tree': {'sha': ref}}}])
result = await provider.download(path, revision='Just a test')
content = await result.read()
assert content == b'delicious'
# @pytest.mark.asyncio
# @pytest.mark.aiohttpretty
# async def test_download_bad_status(self, provider):
# ref = hashlib.sha1().hexdigest()
# url = provider.build_repo_url('git', 'blobs', ref)
# aiohttpretty.register_uri('GET', url, body=b'delicious', status=418)
# with pytest.raises(exceptions.DownloadError):
# await provider.download('', fileSha=ref)
# @pytest.mark.asyncio
# @pytest.mark.aiohttpretty
# async def test_upload_create(self, provider, upload_response, file_content, file_stream):
# message = 'so hungry'
# path = upload_response['content']['path'][::-1]
# metadata_url = provider.build_repo_url('contents', os.path.dirname(path))
# aiohttpretty.register_json_uri('GET', metadata_url, body=[upload_response['content']], status=200)
# upload_url = provider.build_repo_url('contents', path)
# aiohttpretty.register_json_uri('PUT', upload_url, body=upload_response, status=201)
# await provider.upload(file_stream, path, message)
# expected_data = {
# 'path': path,
# 'message': message,
# 'content': base64.b64encode(file_content).decode('utf-8'),
# 'committer': provider.committer,
# }
# assert aiohttpretty.has_call(method='GET', uri=metadata_url)
# assert aiohttpretty.has_call(method='PUT', uri=upload_url, data=json.dumps(expected_data))
#
# @pytest.mark.asyncio
# @pytest.mark.aiohttpretty
# async def test_upload_update(self, provider, upload_response, file_content, file_stream):
# message = 'so hungry'
# sha = upload_response['content']['sha']
# path = '/' + upload_response['content']['path']
#
# upload_url = provider.build_repo_url('contents', provider.build_path(path))
# metadata_url = provider.build_repo_url('contents', os.path.dirname(path))
#
# aiohttpretty.register_json_uri('PUT', upload_url, body=upload_response)
# aiohttpretty.register_json_uri('GET', metadata_url, body=[upload_response['content']])
#
# await provider.upload(file_stream, path, message)
#
# expected_data = {
# 'path': path,
# 'message': message,
# 'content': base64.b64encode(file_content).decode('utf-8'),
# 'committer': provider.committer,
# 'sha': sha,
# }
#
# assert aiohttpretty.has_call(method='GET', uri=metadata_url)
# assert aiohttpretty.has_call(method='PUT', uri=upload_url, data=json.dumps(expected_data))
# @pytest.mark.asyncio
# @pytest.mark.aiohttpretty
# async def test_delete_with_branch(self, provider, repo_contents):
# path = os.path.join('/', repo_contents[0]['path'])
# sha = repo_contents[0]['sha']
# branch = 'master'
# message = 'deleted'
# url = provider.build_repo_url('contents', path)
# aiohttpretty.register_json_uri('DELETE', url)
# await provider.delete(path, message, sha, branch=branch)
# expected_data = {
# 'message': message,
# 'sha': sha,
# 'committer': provider.committer,
# 'branch': branch,
# }
#
# assert aiohttpretty.has_call(method='DELETE', uri=url, data=json.dumps(expected_data))
#
# @pytest.mark.asyncio
# @pytest.mark.aiohttpretty
# async def test_delete_without_branch(self, provider, repo_contents):
# path = repo_contents[0]['path']
# sha = repo_contents[0]['sha']
# message = 'deleted'
# url = provider.build_repo_url('contents', path)
# aiohttpretty.register_json_uri('DELETE', url)
# await provider.delete(path, message, sha)
# expected_data = {
# 'message': message,
# 'sha': sha,
# 'committer': provider.committer,
# }
#
# assert aiohttpretty.has_call(method='DELETE', uri=url, data=json.dumps(expected_data))
class TestMetadata:
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_metadata_file(self, provider, repo_metadata, repo_tree_metadata_root):
ref = hashlib.sha1().hexdigest()
path = await provider.validate_path('/file.txt')
tree_url = provider.build_repo_url('git', 'trees', ref, recursive=1)
commit_url = provider.build_repo_url('commits', path=path.path.lstrip('/'), sha=path.identifier[0])
aiohttpretty.register_json_uri('GET', tree_url, body=repo_tree_metadata_root)
aiohttpretty.register_json_uri('GET', commit_url, body=[{
'commit': {
'tree': {'sha': ref},
'author': {'date': '1970-01-02T03:04:05Z'}
},
}])
result = await provider.metadata(path)
item = repo_tree_metadata_root['tree'][0]
web_view = provider._web_view(path=path)
assert result == GitHubFileTreeMetadata(item, web_view=web_view, commit={
'tree': {'sha': ref}, 'author': {'date': '1970-01-02T03:04:05Z'}
}, ref=path.identifier[0])
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_metadata_doesnt_exist(self, provider, repo_metadata, repo_tree_metadata_root):
ref = hashlib.sha1().hexdigest()
path = await provider.validate_path('/file.txt')
tree_url = provider.build_repo_url('git', 'trees', ref, recursive=1)
commit_url = provider.build_repo_url('commits', path=path.path.lstrip('/'), sha=path.identifier[0])
aiohttpretty.register_json_uri('GET', tree_url, body=repo_tree_metadata_root)
aiohttpretty.register_json_uri('GET', commit_url, body=[])
with pytest.raises(exceptions.NotFoundError):
await provider.metadata(path)
# TODO: Additional Tests
# async def test_metadata_root_file_txt_branch(self, provider, repo_metadata, branch_metadata, repo_metadata_root):
# async def test_metadata_root_file_txt_commit_sha(self, provider, repo_metadata, branch_metadata, repo_metadata_root):
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_metadata_folder_root(self, provider, repo_metadata, content_repo_metadata_root):
path = await provider.validate_path('/')
url = provider.build_repo_url('contents', path.path, ref=provider.default_branch)
aiohttpretty.register_json_uri('GET', url, body=content_repo_metadata_root)
result = await provider.metadata(path)
ret = []
for item in content_repo_metadata_root:
if item['type'] == 'dir':
ret.append(GitHubFolderContentMetadata(item, ref=provider.default_branch))
else:
ret.append(GitHubFileContentMetadata(item, web_view=item['html_url'], ref=provider.default_branch))
assert result == ret
# TODO: Additional Tests
# async def test_metadata_non_root_folder(self, provider, repo_metadata, branch_metadata, repo_metadata_root):
# async def test_metadata_non_root_folder_branch(self, provider, repo_metadata, branch_metadata, repo_metadata_root):
# async def test_metadata_non_root_folder_commit_sha(self, provider, repo_metadata, branch_metadata, repo_metadata_root):
class TestCreateFolder:
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_errors_out(self, provider, repo_metadata):
path = await provider.validate_path('/Imarealboy/')
url = provider.build_repo_url('contents', path.child('.gitkeep').path)
aiohttpretty.register_uri('PUT', url, status=400)
with pytest.raises(exceptions.CreateFolderError) as e:
await provider.create_folder(path)
assert e.value.code == 400
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_must_be_folder(self, provider, repo_metadata):
path = await provider.validate_path('/Imarealboy')
with pytest.raises(exceptions.CreateFolderError) as e:
await provider.create_folder(path)
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_already_exists(self, provider, repo_metadata):
path = await provider.validate_path('/Imarealboy/')
url = provider.build_repo_url('contents', os.path.join(path.path, '.gitkeep'))
aiohttpretty.register_json_uri('PUT', url, status=422, body={
'message': 'Invalid request.\n\n"sha" wasn\'t supplied.'
})
with pytest.raises(exceptions.FolderNamingConflict) as e:
await provider.create_folder(path)
assert e.value.code == 409
assert e.value.message == 'Cannot create folder "Imarealboy" because a file or folder already exists at path "/Imarealboy/"'
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_raises_other_422(self, provider, repo_metadata):
path = await provider.validate_path('/Imarealboy/')
url = provider.build_repo_url('contents', os.path.join(path.path, '.gitkeep'))
aiohttpretty.register_json_uri('PUT', url, status=422, body={
'message': 'github no likey'
})
with pytest.raises(exceptions.CreateFolderError) as e:
await provider.create_folder(path)
assert e.value.code == 422
assert e.value.data == {'message': 'github no likey'}
@pytest.mark.asyncio
@pytest.mark.aiohttpretty
async def test_returns_metadata(self, provider, repo_metadata, create_folder_response):
path = await provider.validate_path('/i/like/trains/')
url = provider.build_repo_url('contents', os.path.join(path.path, '.gitkeep'))
aiohttpretty.register_json_uri('PUT', url, status=201, body=create_folder_response)
metadata = await provider.create_folder(path)
assert metadata.kind == 'folder'
assert metadata.name == 'trains'
assert metadata.path == '/i/like/trains/'
class TestUtilities:
def test__path_exists_in_tree(self, provider, nested_tree_metadata):
_ids = [('master', '')]
assert provider._path_exists_in_tree(nested_tree_metadata['tree'], GitHubPath('/alpha.txt', _ids=_ids))
assert provider._path_exists_in_tree(nested_tree_metadata['tree'], GitHubPath('/beta/', _ids=_ids))
assert not provider._path_exists_in_tree(nested_tree_metadata['tree'], GitHubPath('/gaw-gai.txt', _ids=_ids))
assert not provider._path_exists_in_tree(nested_tree_metadata['tree'], GitHubPath('/kaw-kai/', _ids=_ids))
def test__remove_path_from_tree(self, provider, nested_tree_metadata):
_ids = [('master', '')]
simple_file_tree = provider._remove_path_from_tree(nested_tree_metadata['tree'], GitHubPath('/alpha.txt', _ids=_ids))
assert len(simple_file_tree) == (len(nested_tree_metadata['tree']) - 1)
assert 'alpha.txt' not in [x['path'] for x in simple_file_tree]
simple_folder_tree = provider._remove_path_from_tree(nested_tree_metadata['tree'], GitHubPath('/beta/', _ids=_ids))
assert len(simple_folder_tree) == 1
assert simple_folder_tree[0]['path'] == 'alpha.txt'
nested_file_tree = provider._remove_path_from_tree(nested_tree_metadata['tree'], GitHubPath('/beta/gamma.txt', _ids=_ids))
assert len(nested_file_tree) == (len(nested_tree_metadata['tree']) - 1)
assert 'beta/gamma.txt' not in [x['path'] for x in nested_file_tree]
nested_folder_tree = provider._remove_path_from_tree(nested_tree_metadata['tree'], GitHubPath('/beta/delta/', _ids=_ids))
assert len(nested_folder_tree) == 3
assert len([x for x in nested_folder_tree if x['path'].startswith('beta/delta')]) == 0
missing_file_tree = provider._remove_path_from_tree(nested_tree_metadata['tree'], GitHubPath('/bet', _ids=_ids))
assert missing_file_tree == nested_tree_metadata['tree']
missing_folder_tree = provider._remove_path_from_tree(nested_tree_metadata['tree'], GitHubPath('/beta/gam/', _ids=_ids))
assert missing_file_tree == nested_tree_metadata['tree']
def test__reparent_blobs(self, provider, nested_tree_metadata):
_ids = [('master', '')]
file_rename_blobs = copy.deepcopy([x for x in nested_tree_metadata['tree'] if x['path'] == 'alpha.txt'])
provider._reparent_blobs(file_rename_blobs, GitHubPath('/alpha.txt', _ids=_ids), GitHubPath('/zeta.txt', _ids=_ids))
assert len(file_rename_blobs) == 1
assert file_rename_blobs[0]['path'] == 'zeta.txt'
folder_rename_blobs = copy.deepcopy([x for x in nested_tree_metadata['tree'] if x['path'].startswith('beta')])
provider._reparent_blobs(folder_rename_blobs, GitHubPath('/beta/', _ids=_ids), GitHubPath('/theta/', _ids=_ids))
assert len(folder_rename_blobs) == 4 # beta/, gamma.txt, delta/, epsilon.txt
assert len([x for x in folder_rename_blobs if x['path'].startswith('theta/')]) == 3 # gamma.txt, delta/, epsilon.txt
assert len([x for x in folder_rename_blobs if x['path'] == 'theta']) == 1 # theta/
def test__prune_subtrees(self, provider, nested_tree_metadata):
pruned_tree = provider._prune_subtrees(nested_tree_metadata['tree'])
assert len(pruned_tree) == 3 # alpha.txt, gamma.txt, epsilon.txt
assert len([x for x in pruned_tree if x['type'] == 'tree']) == 0
| apache-2.0 |
tellesnobrega/sahara | sahara/plugins/cdh/deploy.py | 2 | 1826 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sahara.plugins import kerberos
def setup_kerberos_for_cluster(cluster, cloudera_utils):
if kerberos.is_kerberos_security_enabled(cluster):
manager = cloudera_utils.pu.get_manager(cluster)
kerberos.deploy_infrastructure(cluster, manager)
cloudera_utils.full_cluster_stop(cluster)
kerberos.prepare_policy_files(cluster)
cloudera_utils.push_kerberos_configs(cluster)
cloudera_utils.full_cluster_start(cluster)
kerberos.create_keytabs_for_map(
cluster,
{'hdfs': cloudera_utils.pu.get_hdfs_nodes(cluster),
'spark': [cloudera_utils.pu.get_spark_historyserver(cluster)]})
def prepare_scaling_kerberized_cluster(cluster, cloudera_utils, instances):
if kerberos.is_kerberos_security_enabled(cluster):
server = None
if not kerberos.using_existing_kdc(cluster):
server = cloudera_utils.pu.get_manager(cluster)
kerberos.setup_clients(cluster, server)
kerberos.prepare_policy_files(cluster)
# manager can correctly handle updating configs
cloudera_utils.push_kerberos_configs(cluster)
kerberos.create_keytabs_for_map(
cluster,
{'hdfs': cloudera_utils.pu.get_hdfs_nodes(cluster, instances)})
| apache-2.0 |
jpush/jbox | Server/venv/lib/python3.5/site-packages/pip/_vendor/html5lib/treebuilders/__init__.py | 1730 | 3405 | """A collection of modules for building different kinds of tree from
HTML documents.
To create a treebuilder for a new type of tree, you need to do
implement several things:
1) A set of classes for various types of elements: Document, Doctype,
Comment, Element. These must implement the interface of
_base.treebuilders.Node (although comment nodes have a different
signature for their constructor, see treebuilders.etree.Comment)
Textual content may also be implemented as another node type, or not, as
your tree implementation requires.
2) A treebuilder object (called TreeBuilder by convention) that
inherits from treebuilders._base.TreeBuilder. This has 4 required attributes:
documentClass - the class to use for the bottommost node of a document
elementClass - the class to use for HTML Elements
commentClass - the class to use for comments
doctypeClass - the class to use for doctypes
It also has one required method:
getDocument - Returns the root node of the complete document tree
3) If you wish to run the unit tests, you must also create a
testSerializer method on your treebuilder which accepts a node and
returns a string containing Node and its children serialized according
to the format used in the unittests
"""
from __future__ import absolute_import, division, unicode_literals
from ..utils import default_etree
treeBuilderCache = {}
def getTreeBuilder(treeType, implementation=None, **kwargs):
"""Get a TreeBuilder class for various types of tree with built-in support
treeType - the name of the tree type required (case-insensitive). Supported
values are:
"dom" - A generic builder for DOM implementations, defaulting to
a xml.dom.minidom based implementation.
"etree" - A generic builder for tree implementations exposing an
ElementTree-like interface, defaulting to
xml.etree.cElementTree if available and
xml.etree.ElementTree if not.
"lxml" - A etree-based builder for lxml.etree, handling
limitations of lxml's implementation.
implementation - (Currently applies to the "etree" and "dom" tree types). A
module implementing the tree type e.g.
xml.etree.ElementTree or xml.etree.cElementTree."""
treeType = treeType.lower()
if treeType not in treeBuilderCache:
if treeType == "dom":
from . import dom
# Come up with a sane default (pref. from the stdlib)
if implementation is None:
from xml.dom import minidom
implementation = minidom
# NEVER cache here, caching is done in the dom submodule
return dom.getDomModule(implementation, **kwargs).TreeBuilder
elif treeType == "lxml":
from . import etree_lxml
treeBuilderCache[treeType] = etree_lxml.TreeBuilder
elif treeType == "etree":
from . import etree
if implementation is None:
implementation = default_etree
# NEVER cache here, caching is done in the etree submodule
return etree.getETreeModule(implementation, **kwargs).TreeBuilder
else:
raise ValueError("""Unrecognised treebuilder "%s" """ % treeType)
return treeBuilderCache.get(treeType)
| mit |
jruiperezv/ANALYSE | common/djangoapps/session_inactivity_timeout/middleware.py | 228 | 1981 | """
Middleware to auto-expire inactive sessions after N seconds, which is configurable in
settings.
To enable this feature, set in a settings.py:
SESSION_INACTIVITY_TIMEOUT_IN_SECS = 300
This was taken from StackOverflow (http://stackoverflow.com/questions/14830669/how-to-expire-django-session-in-5minutes)
"""
from datetime import datetime, timedelta
from django.conf import settings
from django.contrib import auth
LAST_TOUCH_KEYNAME = 'SessionInactivityTimeout:last_touch'
class SessionInactivityTimeout(object):
"""
Middleware class to keep track of activity on a given session
"""
def process_request(self, request):
"""
Standard entry point for processing requests in Django
"""
if not hasattr(request, "user") or not request.user.is_authenticated():
#Can't log out if not logged in
return
timeout_in_seconds = getattr(settings, "SESSION_INACTIVITY_TIMEOUT_IN_SECONDS", None)
# Do we have this feature enabled?
if timeout_in_seconds:
# what time is it now?
utc_now = datetime.utcnow()
# Get the last time user made a request to server, which is stored in session data
last_touch = request.session.get(LAST_TOUCH_KEYNAME)
# have we stored a 'last visited' in session? NOTE: first time access after login
# this key will not be present in the session data
if last_touch:
# compute the delta since last time user came to the server
time_since_last_activity = utc_now - last_touch
# did we exceed the timeout limit?
if time_since_last_activity > timedelta(seconds=timeout_in_seconds):
# yes? Then log the user out
del request.session[LAST_TOUCH_KEYNAME]
auth.logout(request)
return
request.session[LAST_TOUCH_KEYNAME] = utc_now
| agpl-3.0 |
jdemel/gnuradio | gnuradio-runtime/python/gnuradio/gr/qa_tag_utils.py | 3 | 3161 | #!/usr/bin/env python
#
# Copyright 2007,2010 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
from __future__ import print_function
from gnuradio import gr, gr_unittest
import pmt
class test_tag_utils (gr_unittest.TestCase):
def setUp (self):
self.tb = gr.top_block ()
def tearDown (self):
self.tb = None
def test_001(self):
t = gr.tag_t()
t.offset = 10
t.key = pmt.string_to_symbol('key')
t.value = pmt.from_long(23)
t.srcid = pmt.from_bool(False)
pt = gr.tag_to_python(t)
self.assertEqual(pt.key, 'key')
self.assertEqual(pt.value, 23)
self.assertEqual(pt.offset, 10)
def test_002(self):
offset = 10
key = pmt.string_to_symbol('key')
value = pmt.from_long(23)
srcid = pmt.from_bool(False)
format_dict = {'offset': offset,
'key': key,
'value': value,
'srcid': srcid}
format_list = [offset, key, value, srcid]
format_tuple = (offset, key, value, srcid)
t_dict = gr.python_to_tag(format_dict)
t_list = gr.python_to_tag(format_list)
t_tuple = gr.python_to_tag(format_tuple)
self.assertTrue(pmt.equal(t_dict.key, key))
self.assertTrue(pmt.equal(t_dict.value, value))
self.assertEqual(t_dict.offset, offset)
self.assertTrue(pmt.equal(t_list.key, key))
self.assertTrue(pmt.equal(t_list.value, value))
self.assertEqual(t_list.offset, offset)
self.assertTrue(pmt.equal(t_tuple.key, key))
self.assertTrue(pmt.equal(t_tuple.value, value))
self.assertEqual(t_tuple.offset, offset)
def test_003(self):
offsets = (6, 3, 8)
key = pmt.string_to_symbol('key')
srcid = pmt.string_to_symbol('qa_tag_utils')
tags = []
for k in offsets:
t = gr.tag_t()
t.offset = k
t.key = key
t.value = pmt.from_long(k)
t.srcid = srcid
tags.append(t)
for k, t in zip(sorted(offsets),
sorted(tags, key=gr.tag_t_offset_compare_key())):
self.assertEqual(t.offset, k)
self.assertTrue(pmt.equal(t.key, key))
self.assertTrue(pmt.equal(t.value, pmt.from_long(k)))
self.assertTrue(pmt.equal(t.srcid, srcid))
tmin = min(tags, key=gr.tag_t_offset_compare_key())
self.assertEqual(tmin.offset, min(offsets))
self.assertTrue(pmt.equal(tmin.key, key))
self.assertTrue(pmt.equal(tmin.value, pmt.from_long(min(offsets))))
self.assertTrue(pmt.equal(tmin.srcid, srcid))
tmax = max(tags, key=gr.tag_t_offset_compare_key())
self.assertEqual(tmax.offset, max(offsets))
self.assertTrue(pmt.equal(tmax.key, key))
self.assertTrue(pmt.equal(tmax.value, pmt.from_long(max(offsets))))
self.assertTrue(pmt.equal(tmax.srcid, srcid))
if __name__ == '__main__':
print('hi')
gr_unittest.run(test_tag_utils, "test_tag_utils.xml")
| gpl-3.0 |
andersonresende/django | tests/createsuperuser/tests.py | 234 | 1928 | from django.contrib.auth import models
from django.contrib.auth.management.commands import changepassword
from django.core.management import call_command
from django.test import TestCase
from django.utils.six import StringIO
class MultiDBChangepasswordManagementCommandTestCase(TestCase):
multi_db = True
def setUp(self):
self.user = models.User.objects.db_manager('other').create_user(username='joe', password='qwerty')
def test_that_changepassword_command_with_database_option_uses_given_db(self):
"""
Executing the changepassword management command with a database option
should operate on the specified DB
"""
self.assertTrue(self.user.check_password('qwerty'))
command = changepassword.Command()
command._get_pass = lambda *args: 'not qwerty'
out = StringIO()
command.execute(username="joe", database='other', stdout=out)
command_output = out.getvalue().strip()
self.assertEqual(command_output, "Changing password for user 'joe'\nPassword changed successfully for user 'joe'")
self.assertTrue(models.User.objects.using('other').get(username="joe").check_password("not qwerty"))
class MultiDBCreatesuperuserTestCase(TestCase):
multi_db = True
def test_createsuperuser_command_with_database_option(self):
" createsuperuser command should operate on specified DB"
new_io = StringIO()
call_command(
"createsuperuser",
interactive=False,
username="joe",
email="joe@somewhere.org",
database='other',
stdout=new_io
)
command_output = new_io.getvalue().strip()
self.assertEqual(command_output, 'Superuser created successfully.')
u = models.User.objects.using('other').get(username="joe")
self.assertEqual(u.email, 'joe@somewhere.org')
new_io.close()
| bsd-3-clause |
ljwolf/pysal_core | libpysal/weights/_contW_lists.py | 1 | 3800 | from ..cg.shapes import Polygon
import itertools as it
from sys import version_info
import collections
QUEEN = 1
ROOK = 2
if version_info[0] == 2:
zip = it.izip
range = xrange
__author__ = "Jay Laura jlaura@asu.edu"
def _get_verts(pgon):
if isinstance(pgon, Polygon):
return pgon.vertices
else:
return _get_boundary_points(pgon)
def _get_boundary_points(pgon):
"""
Recursively handle polygons vs. multipolygons to
extract the boundary point set from each.
"""
if pgon.type.lower() == 'polygon':
bounds = pgon.boundary
if bounds.type.lower() == 'linestring':
return list(map(tuple, zip(*bounds.coords.xy)))
elif bounds.type.lower() == 'multilinestring':
return list(it.chain(*(zip(*bound.coords.xy)
for bound in bounds)))
else:
raise TypeError('Input Polygon has unrecognized boundary type: {}'
''.format(bounds.type))
elif pgon.type.lower() == 'multipolygon':
return list(it.chain(*(_get_boundary_points(part)
for part in pgon)))
else:
raise TypeError('Input shape must be Polygon or Multipolygon and was '
'instead: {}'.format(pgon.type))
class ContiguityWeightsLists:
"""
Contiguity for a collection of polygons using high performance
list, set, and dict containers
"""
def __init__(self, collection, wttype=1):
"""
Arguments
=========
collection: PySAL PolygonCollection
wttype: int
1: Queen
2: Rook
"""
self.collection = list(collection)
self.wttype = wttype
self.jcontiguity()
def jcontiguity(self):
numPoly = len(self.collection)
w = {}
for i in range(numPoly):
w[i] = set()
geoms = []
offsets = []
c = 0 # PolyID Counter
if self.wttype == QUEEN:
for n in range(numPoly):
verts = _get_verts(self.collection[n])
offsets += [c] * len(verts)
geoms += (verts)
c += 1
items = collections.defaultdict(set)
for i, vertex in enumerate(geoms):
items[vertex].add(offsets[i])
shared_vertices = []
for item, location in items.iteritems():
if len(location) > 1:
shared_vertices.append(location)
for vert_set in shared_vertices:
for v in vert_set:
w[v] = w[v] | vert_set
try:
w[v].remove(v)
except:
pass
elif self.wttype == ROOK:
for n in range(numPoly):
verts = _get_verts(self.collection[n])
for v in range(len(verts) - 1):
geoms.append(tuple(sorted([verts[v], verts[v + 1]])))
offsets += [c] * (len(verts) - 1)
c += 1
items = collections.defaultdict(set)
for i, item in enumerate(geoms):
items[item].add(offsets[i])
shared_vertices = []
for item, location in items.iteritems():
if len(location) > 1:
shared_vertices.append(location)
for vert_set in shared_vertices:
for v in vert_set:
w[v] = w[v] | vert_set
try:
w[v].remove(v)
except:
pass
else:
raise Exception('Weight type {} Not Understood!'.format(self.wttype))
self.w = w
| bsd-3-clause |
GroestlCoin/encompass | lib/paymentrequest_pb2.py | 55 | 12540 | # Generated by the protocol buffer compiler. DO NOT EDIT!
from google.protobuf import descriptor
from google.protobuf import message
from google.protobuf import reflection
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
DESCRIPTOR = descriptor.FileDescriptor(
name='paymentrequest.proto',
package='payments',
serialized_pb='\n\x14paymentrequest.proto\x12\x08payments\"+\n\x06Output\x12\x11\n\x06\x61mount\x18\x01 \x01(\x04:\x01\x30\x12\x0e\n\x06script\x18\x02 \x02(\x0c\"\xa3\x01\n\x0ePaymentDetails\x12\x15\n\x07network\x18\x01 \x01(\t:\x04main\x12!\n\x07outputs\x18\x02 \x03(\x0b\x32\x10.payments.Output\x12\x0c\n\x04time\x18\x03 \x02(\x04\x12\x0f\n\x07\x65xpires\x18\x04 \x01(\x04\x12\x0c\n\x04memo\x18\x05 \x01(\t\x12\x13\n\x0bpayment_url\x18\x06 \x01(\t\x12\x15\n\rmerchant_data\x18\x07 \x01(\x0c\"\x95\x01\n\x0ePaymentRequest\x12\"\n\x17payment_details_version\x18\x01 \x01(\r:\x01\x31\x12\x16\n\x08pki_type\x18\x02 \x01(\t:\x04none\x12\x10\n\x08pki_data\x18\x03 \x01(\x0c\x12\"\n\x1aserialized_payment_details\x18\x04 \x02(\x0c\x12\x11\n\tsignature\x18\x05 \x01(\x0c\"\'\n\x10X509Certificates\x12\x13\n\x0b\x63\x65rtificate\x18\x01 \x03(\x0c\"i\n\x07Payment\x12\x15\n\rmerchant_data\x18\x01 \x01(\x0c\x12\x14\n\x0ctransactions\x18\x02 \x03(\x0c\x12#\n\trefund_to\x18\x03 \x03(\x0b\x32\x10.payments.Output\x12\x0c\n\x04memo\x18\x04 \x01(\t\">\n\nPaymentACK\x12\"\n\x07payment\x18\x01 \x02(\x0b\x32\x11.payments.Payment\x12\x0c\n\x04memo\x18\x02 \x01(\tB(\n\x1eorg.bitcoin.protocols.paymentsB\x06Protos')
_OUTPUT = descriptor.Descriptor(
name='Output',
full_name='payments.Output',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='amount', full_name='payments.Output.amount', index=0,
number=1, type=4, cpp_type=4, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='script', full_name='payments.Output.script', index=1,
number=2, type=12, cpp_type=9, label=2,
has_default_value=False, default_value="",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=34,
serialized_end=77,
)
_PAYMENTDETAILS = descriptor.Descriptor(
name='PaymentDetails',
full_name='payments.PaymentDetails',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='network', full_name='payments.PaymentDetails.network', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=True, default_value=unicode("main", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='outputs', full_name='payments.PaymentDetails.outputs', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='time', full_name='payments.PaymentDetails.time', index=2,
number=3, type=4, cpp_type=4, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='expires', full_name='payments.PaymentDetails.expires', index=3,
number=4, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='memo', full_name='payments.PaymentDetails.memo', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='payment_url', full_name='payments.PaymentDetails.payment_url', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='merchant_data', full_name='payments.PaymentDetails.merchant_data', index=6,
number=7, type=12, cpp_type=9, label=1,
has_default_value=False, default_value="",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=80,
serialized_end=243,
)
_PAYMENTREQUEST = descriptor.Descriptor(
name='PaymentRequest',
full_name='payments.PaymentRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='payment_details_version', full_name='payments.PaymentRequest.payment_details_version', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='pki_type', full_name='payments.PaymentRequest.pki_type', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=True, default_value=unicode("none", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='pki_data', full_name='payments.PaymentRequest.pki_data', index=2,
number=3, type=12, cpp_type=9, label=1,
has_default_value=False, default_value="",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='serialized_payment_details', full_name='payments.PaymentRequest.serialized_payment_details', index=3,
number=4, type=12, cpp_type=9, label=2,
has_default_value=False, default_value="",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='signature', full_name='payments.PaymentRequest.signature', index=4,
number=5, type=12, cpp_type=9, label=1,
has_default_value=False, default_value="",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=246,
serialized_end=395,
)
_X509CERTIFICATES = descriptor.Descriptor(
name='X509Certificates',
full_name='payments.X509Certificates',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='certificate', full_name='payments.X509Certificates.certificate', index=0,
number=1, type=12, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=397,
serialized_end=436,
)
_PAYMENT = descriptor.Descriptor(
name='Payment',
full_name='payments.Payment',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='merchant_data', full_name='payments.Payment.merchant_data', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value="",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='transactions', full_name='payments.Payment.transactions', index=1,
number=2, type=12, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='refund_to', full_name='payments.Payment.refund_to', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='memo', full_name='payments.Payment.memo', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=438,
serialized_end=543,
)
_PAYMENTACK = descriptor.Descriptor(
name='PaymentACK',
full_name='payments.PaymentACK',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='payment', full_name='payments.PaymentACK.payment', index=0,
number=1, type=11, cpp_type=10, label=2,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='memo', full_name='payments.PaymentACK.memo', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=545,
serialized_end=607,
)
_PAYMENTDETAILS.fields_by_name['outputs'].message_type = _OUTPUT
_PAYMENT.fields_by_name['refund_to'].message_type = _OUTPUT
_PAYMENTACK.fields_by_name['payment'].message_type = _PAYMENT
DESCRIPTOR.message_types_by_name['Output'] = _OUTPUT
DESCRIPTOR.message_types_by_name['PaymentDetails'] = _PAYMENTDETAILS
DESCRIPTOR.message_types_by_name['PaymentRequest'] = _PAYMENTREQUEST
DESCRIPTOR.message_types_by_name['X509Certificates'] = _X509CERTIFICATES
DESCRIPTOR.message_types_by_name['Payment'] = _PAYMENT
DESCRIPTOR.message_types_by_name['PaymentACK'] = _PAYMENTACK
class Output(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _OUTPUT
# @@protoc_insertion_point(class_scope:payments.Output)
class PaymentDetails(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _PAYMENTDETAILS
# @@protoc_insertion_point(class_scope:payments.PaymentDetails)
class PaymentRequest(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _PAYMENTREQUEST
# @@protoc_insertion_point(class_scope:payments.PaymentRequest)
class X509Certificates(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _X509CERTIFICATES
# @@protoc_insertion_point(class_scope:payments.X509Certificates)
class Payment(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _PAYMENT
# @@protoc_insertion_point(class_scope:payments.Payment)
class PaymentACK(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _PAYMENTACK
# @@protoc_insertion_point(class_scope:payments.PaymentACK)
# @@protoc_insertion_point(module_scope)
| gpl-3.0 |
liorvh/raspberry_pwn | src/pentest/sqlmap/plugins/generic/fingerprint.py | 7 | 1726 | #!/usr/bin/env python
"""
Copyright (c) 2006-2014 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
from lib.core.common import Backend
from lib.core.common import readInput
from lib.core.data import logger
from lib.core.enums import OS
from lib.core.exception import SqlmapUndefinedMethod
class Fingerprint:
"""
This class defines generic fingerprint functionalities for plugins.
"""
def __init__(self, dbms):
Backend.forceDbms(dbms)
def getFingerprint(self):
errMsg = "'getFingerprint' method must be defined "
errMsg += "into the specific DBMS plugin"
raise SqlmapUndefinedMethod(errMsg)
def checkDbms(self):
errMsg = "'checkDbms' method must be defined "
errMsg += "into the specific DBMS plugin"
raise SqlmapUndefinedMethod(errMsg)
def checkDbmsOs(self, detailed=False):
errMsg = "'checkDbmsOs' method must be defined "
errMsg += "into the specific DBMS plugin"
raise SqlmapUndefinedMethod(errMsg)
def forceDbmsEnum(self):
pass
def userChooseDbmsOs(self):
warnMsg = "for some reason sqlmap was unable to fingerprint "
warnMsg += "the back-end DBMS operating system"
logger.warn(warnMsg)
msg = "do you want to provide the OS? [(W)indows/(l)inux]"
while True:
os = readInput(msg, default="W")
if os[0].lower() == "w":
Backend.setOs(OS.WINDOWS)
break
elif os[0].lower() == "l":
Backend.setOs(OS.LINUX)
break
else:
warnMsg = "invalid value"
logger.warn(warnMsg)
| gpl-3.0 |
vuntz/glance | glance/common/crypt.py | 8 | 2645 | #!/usr/bin/env python
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Routines for URL-safe encrypting/decrypting
"""
import base64
from Crypto.Cipher import AES
from Crypto import Random
from Crypto.Random import random
import six
# NOTE(jokke): simplified transition to py3, behaves like py2 xrange
from six.moves import range
def urlsafe_encrypt(key, plaintext, blocksize=16):
"""
Encrypts plaintext. Resulting ciphertext will contain URL-safe characters.
If plaintext is Unicode, encode it to UTF-8 before encryption.
:param key: AES secret key
:param plaintext: Input text to be encrypted
:param blocksize: Non-zero integer multiple of AES blocksize in bytes (16)
:returns : Resulting ciphertext
"""
def pad(text):
"""
Pads text to be encrypted
"""
pad_length = (blocksize - len(text) % blocksize)
sr = random.StrongRandom()
pad = b''.join(six.int2byte(sr.randint(1, 0xFF))
for i in range(pad_length - 1))
# We use chr(0) as a delimiter between text and padding
return text + b'\0' + pad
if isinstance(plaintext, six.text_type):
plaintext = plaintext.encode('utf-8')
# random initial 16 bytes for CBC
init_vector = Random.get_random_bytes(16)
cypher = AES.new(key, AES.MODE_CBC, init_vector)
padded = cypher.encrypt(pad(six.binary_type(plaintext)))
return base64.urlsafe_b64encode(init_vector + padded)
def urlsafe_decrypt(key, ciphertext):
"""
Decrypts URL-safe base64 encoded ciphertext.
On Python 3, the result is decoded from UTF-8.
:param key: AES secret key
:param ciphertext: The encrypted text to decrypt
:returns : Resulting plaintext
"""
# Cast from unicode
ciphertext = base64.urlsafe_b64decode(six.binary_type(ciphertext))
cypher = AES.new(key, AES.MODE_CBC, ciphertext[:16])
padded = cypher.decrypt(ciphertext[16:])
text = padded[:padded.rfind(b'\0')]
if six.PY3:
text = text.decode('utf-8')
return text
| apache-2.0 |
madj4ck/ansible | plugins/inventory/vagrant.py | 37 | 3660 | #!/usr/bin/env python
"""
Vagrant external inventory script. Automatically finds the IP of the booted vagrant vm(s), and
returns it under the host group 'vagrant'
Example Vagrant configuration using this script:
config.vm.provision :ansible do |ansible|
ansible.playbook = "./provision/your_playbook.yml"
ansible.inventory_file = "./provision/inventory/vagrant.py"
ansible.verbose = true
end
"""
# Copyright (C) 2013 Mark Mandel <mark@compoundtheory.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Thanks to the spacewalk.py inventory script for giving me the basic structure
# of this.
#
import sys
import subprocess
import re
import string
from optparse import OptionParser
try:
import json
except:
import simplejson as json
# Options
#------------------------------
parser = OptionParser(usage="%prog [options] --list | --host <machine>")
parser.add_option('--list', default=False, dest="list", action="store_true",
help="Produce a JSON consumable grouping of Vagrant servers for Ansible")
parser.add_option('--host', default=None, dest="host",
help="Generate additional host specific details for given host for Ansible")
(options, args) = parser.parse_args()
#
# helper functions
#
# get all the ssh configs for all boxes in an array of dictionaries.
def get_ssh_config():
configs = []
boxes = list_running_boxes()
for box in boxes:
config = get_a_ssh_config(box)
configs.append(config)
return configs
#list all the running boxes
def list_running_boxes():
output = subprocess.check_output(["vagrant", "status"]).split('\n')
boxes = []
for line in output:
matcher = re.search("([^\s]+)[\s]+running \(.+", line)
if matcher:
boxes.append(matcher.group(1))
return boxes
#get the ssh config for a single box
def get_a_ssh_config(box_name):
"""Gives back a map of all the machine's ssh configurations"""
output = subprocess.check_output(["vagrant", "ssh-config", box_name]).split('\n')
config = {}
for line in output:
if line.strip() != '':
matcher = re.search("( )?([a-zA-Z]+) (.*)", line)
config[matcher.group(2)] = matcher.group(3)
return config
# List out servers that vagrant has running
#------------------------------
if options.list:
ssh_config = get_ssh_config()
hosts = { 'vagrant': []}
for data in ssh_config:
hosts['vagrant'].append(data['HostName'])
print json.dumps(hosts)
sys.exit(0)
# Get out the host details
#------------------------------
elif options.host:
result = {}
ssh_config = get_ssh_config()
details = filter(lambda x: (x['HostName'] == options.host), ssh_config)
if len(details) > 0:
#pass through the port, in case it's non standard.
result = details[0]
result['ansible_ssh_port'] = result['Port']
print json.dumps(result)
sys.exit(0)
# Print out help
#------------------------------
else:
parser.print_help()
sys.exit(0)
| gpl-3.0 |
caperren/Archives | OSU Coursework/ROB 421 - Applied Robotics/software/windows_packages/denso_slave/Framework/arm_control_receiver.py | 1 | 9085 | #####################################
# Imports
#####################################
# Python native imports
from PyQt5 import QtCore, QtWidgets
import pythoncom
import win32com.client
from time import time
import socket
import json
#####################################
# Global Variables
#####################################
THREAD_HERTZ = 100
P0 = (216.1302490234375, -9.575998306274414, 572.6145629882812, 63.89561462402344, 8.09478759765625, 83.43250274658203)
P1 = (251.22869873046875, -9.575998306274414, 572.6145629882812, 63.89561462402344, 8.09478759765625, 83.43250274658203)
P2 = (216.1302490234375, 0.10808953642845154, 606.7885131835938, 63.89561462402344, 8.09478759765625, 83.43250274658203)
J0 = (-2.4949951171875, -68.55029296875, 161.4649658203125, 0.2345581203699112, -40.739683151245117, 60.7391586303711)
BAD_VAL = -1000000
TCP_PORT = 9877
#####################################
# Controller Class Definition
#####################################
class RAWControlReceiver(QtCore.QThread):
new_message__signal = QtCore.pyqtSignal(dict)
def __init__(self):
super(RAWControlReceiver, self).__init__()
# ########## Thread Flags ##########
self.run_thread_flag = True
# ########## Class Variables ##########
self.wait_time = 1.0 / THREAD_HERTZ
self.control_tcp_server = None
self.client_connection = None
self.client_address = None
self.current_message = ""
self.num_messages = 0
self.last_time = time()
def run(self):
self.initialize_tcp_server()
while self.run_thread_flag:
self.check_for_new_command_message()
# self.msleep(2)
def initialize_tcp_server(self):
self.control_tcp_server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.control_tcp_server.bind(('', TCP_PORT))
self.control_tcp_server.listen(5)
def check_for_new_command_message(self):
try:
self.current_message += self.client_connection.recv(8)
found_pound = self.current_message.find("#####")
if found_pound != -1:
split_message = str(self.current_message[:found_pound])
self.current_message = self.current_message[found_pound + 5:]
try:
json_message = json.loads(split_message)
self.num_messages += 1
if time() - self.last_time > 1:
print "Num commands received:", self.num_messages
self.num_messages = 0
self.last_time = time()
self.new_message__signal.emit(json_message)
except Exception, e:
print e, "could not parse"
except Exception, e:
print e, "other"
self.client_connection, self.client_address = self.control_tcp_server.accept()
#####################################
# Controller Class Definition
#####################################
class ArmControlReceiver(QtCore.QThread):
def __init__(self, shared_objects):
super(ArmControlReceiver, self).__init__()
# ########## Reference to class init variables ##########
self.shared_objects = shared_objects
self.status_sender_class = self.shared_objects["threaded_classes"]["Arm Status Sender"]
# ########## Get the settings instance ##########
self.settings = QtCore.QSettings()
# ########## Get the settings instance ##########
self.message_handler = RAWControlReceiver()
self.message_handler.start()
# ########## Thread Flags ##########
self.run_thread_flag = True
# ########## Class Variables ##########
self.wait_time = 1.0 / THREAD_HERTZ
self.control_tcp_server = None
self.client_connection = None
self.client_address = None
self.cao_engine = None
self.controller = None
self.arm = None
self.CONTROL_COMMANDS = {
"enable_motors": self.enable_motors,
"change_robot_speed": self.change_robot_speed,
"move_position_abs": self.move_arm_position_absolute,
"move_position_rel": self.move_arm_position_relative,
"move_joint_abs": self.move_joints_absolute,
"move_joint_rel": self.move_joints_relative,
"charge_tank_psi": 0,
"fire_tank": 0
}
self.current_message = ""
self.command_queue = []
self.num_commands = 0
self.last_commands_time = time()
def run(self):
self.initialize_cao_engine()
while self.run_thread_flag:
start_time = time()
# self.add_item_to_command_queue({"move_joint_rel": (10, 0, 0, 0, 0, 0)})
# self.add_item_to_command_queue({"move_joint_rel": (-10, 0, 0, 0, 0, 0)})
self.process_command_queue_item()
if time() - self.last_commands_time > 1:
print "Num commands processed:", self.num_commands
self.num_commands = 0
self.last_commands_time = time()
time_diff = time() - start_time
self.msleep(max(int(self.wait_time - time_diff), 0))
def initialize_cao_engine(self):
pythoncom.CoInitialize()
self.cao_engine = win32com.client.Dispatch("CAO.CaoEngine")
self.controller = self.cao_engine.Workspaces(0).AddController("RC", "CaoProv.DENSO.NetwoRC", "", "conn=eth:192.168.1.10")
self.arm = self.controller.AddRobot("Arm1", "")
def on_new_message__signal(self, message):
self.command_queue.append(message)
def process_command_queue_item(self):
if self.command_queue:
key = list(self.command_queue[0].keys())[0]
data = self.command_queue[0][key]
del self.command_queue[0]
command_to_run = self.CONTROL_COMMANDS.get(key)
command_to_run(data)
self.num_commands += 1
def add_item_to_command_queue(self, item):
self.command_queue.append(item)
def enable_motors(self, should_enable):
try:
if should_enable:
self.arm.Execute("Motor", 1)
self.arm.Execute("TakeArm", 0)
else:
self.arm.Execute("Motor", 0)
self.arm.Execute("GiveArm", 0)
except:
print("Arm not able to change to state", "on." if should_enable else "off.")
def change_robot_speed(self, speed):
self.arm.Execute("ExtSpeed", (speed, speed, speed))
def move_arm_position_absolute(self, position):
try:
if self.status_sender_class.statuses["motor_enabled"]:
self.arm.Move(1, "@P " + str(tuple(position)), "NEXT")
except:
pass
def move_arm_position_relative(self, position_offsets):
current_position = self.status_sender_class.position
if current_position["rz"] == BAD_VAL or len(position_offsets) == position_offsets.count(0):
return
new_position = (
current_position["x"] + position_offsets[0],
current_position["y"] + position_offsets[1],
current_position["z"] + position_offsets[2],
current_position["rx"] + position_offsets[3],
current_position["ry"] + position_offsets[4],
current_position["rz"] + position_offsets[5],
)
# print "here"
self.move_arm_position_absolute(new_position)
def move_joints_absolute(self, joint_positions):
try:
if self.status_sender_class.statuses["motor_enabled"]:
self.arm.Move(1, "J" + str(tuple(joint_positions)), "NEXT")
except:
pass
def move_joints_relative(self, joint_position_offsets):
current_position = self.status_sender_class.joints
if current_position[6] == BAD_VAL or len(joint_position_offsets) == joint_position_offsets.count(0):
return
new_joint_positions = (
current_position[1] + joint_position_offsets[0],
current_position[2] + joint_position_offsets[1],
current_position[3] + joint_position_offsets[2],
current_position[4] + joint_position_offsets[3],
current_position[5] + joint_position_offsets[4],
current_position[6] + joint_position_offsets[5],
)
self.move_joints_absolute(new_joint_positions)
def connect_signals_and_slots(self):
self.message_handler.new_message__signal.connect(self.on_new_message__signal)
def setup_signals(self, start_signal, signals_and_slots_signal, kill_signal):
start_signal.connect(self.start)
signals_and_slots_signal.connect(self.connect_signals_and_slots)
kill_signal.connect(self.on_kill_threads_requested__slot)
def on_kill_threads_requested__slot(self):
self.message_handler.run_thread_flag = False
self.message_handler.wait()
self.run_thread_flag = False
| gpl-3.0 |
chainer/chainer | tests/chainer_tests/functions_tests/connection_tests/test_convolution_2d.py | 3 | 14148 | import unittest
import numpy
import chainer
from chainer.backends import cuda
import chainer.functions as F
from chainer import testing
from chainer.testing import attr
from chainer.testing import backend
@testing.parameterize(*(testing.product({
'contiguous': ['C', None],
'cover_all': [True, False],
'x_dtype': [numpy.float32],
'W_dtype': [numpy.float32],
'dilate': [1],
'groups': [1, 2],
'nobias': [True, False],
}) + testing.product({
'contiguous': [None],
'cover_all': [False],
'x_dtype': [numpy.float16, numpy.float32, numpy.float64],
'W_dtype': [numpy.float16, numpy.float32, numpy.float64],
'dilate': [1],
'groups': [1, 2],
'nobias': [True, False],
})))
@backend.inject_backend_tests(
None,
# ChainerX tests
testing.product({
'use_chainerx': [True],
'chainerx_device': ['native:0', 'cuda:0'],
})
# CPU tests
+ testing.product({
'use_cuda': [False],
'use_ideep': ['never', 'always'],
})
# GPU tests
+ testing.product([
[{'use_cuda': True}],
# Without cuDNN
testing.product({
'use_cudnn': ['never'],
})
# With cuDNN
+ testing.product({
'use_cudnn': ['always'],
'cudnn_deterministic': [True, False],
'autotune': [True, False],
})]))
class TestConvolution2DFunction(testing.FunctionTestCase):
def setUp(self):
self.batches = 2
self.in_channels_a_group = 3
self.out_channels_a_group = 2
self.in_channels = self.in_channels_a_group * self.groups
self.out_channels = self.out_channels_a_group * self.groups
self.kh, self.kw = (3, 3)
self.stride = 2
self.pad = (
int(self.kh / 2) * self.dilate, int(self.kw / 2) * self.dilate)
self.check_forward_options.update({
'atol': 5e-4, 'rtol': 5e-3
})
self.check_backward_options.update({
'atol': 5e-4, 'rtol': 5e-3
})
self.check_double_backward_options.update({
'atol': 5e-4, 'rtol': 5e-3
})
self.old_numpy_fp16 = False
if numpy.float16 in (self.x_dtype, self.W_dtype):
# Old numpy versions have a bug in the fp16 conversion
# that happens on the matrix multiplication for the grouped
# convolution, outputs will be zeroed but computations
# will be performed in order to detect other issues
old_numpy = numpy.lib.NumpyVersion(numpy.__version__) < '1.17.0'
self.old_numpy_fp16 = (old_numpy
and self.groups == 2
and self.x_dtype == self.W_dtype)
self.check_forward_options.update({
'atol': 1e-3, 'rtol': 1e-2
})
self.check_backward_options.update({
'atol': 1e-3, 'rtol': 1e-3
})
self.check_double_backward_options.update({
'atol': 1e-2, 'rtol': 1e-2
})
def before_test(self, test_name):
# cuDNN 5 and 5.1 results suffer from precision issues
using_old_cudnn = (self.backend_config.xp is cuda.cupy
and self.backend_config.use_cudnn == 'always'
and cuda.cuda.cudnn.getVersion() < 6000)
if using_old_cudnn:
self.check_backward_options.update({
'atol': 1e-3, 'rtol': 1e-3})
self.check_double_backward_options.update({
'atol': 1e-2, 'rtol': 1e-2})
def generate_inputs(self):
W = numpy.random.normal(
0, numpy.sqrt(1. / (self.kh * self.kw * self.in_channels_a_group)),
(self.out_channels, self.in_channels_a_group, self.kh, self.kw)
).astype(self.W_dtype)
x = numpy.random.uniform(
-1, 1, (self.batches, self.in_channels, 4, 3)).astype(self.x_dtype)
if self.nobias:
return x, W
else:
b = numpy.random.uniform(
-1, 1, self.out_channels).astype(self.x_dtype)
return x, W, b
def forward_expected(self, inputs):
"""
Current forward_expected implementation depends on
F.convolution_2d itself and thus it's only capable
of checking consistency between backends, not absolute
correctness of computations
"""
if self.nobias:
x, W = inputs
b = None
else:
x, W, b = inputs
with chainer.using_config('use_ideep', 'never'):
y_expected = F.convolution_2d(
x, W, b, stride=self.stride, pad=self.pad,
cover_all=self.cover_all, dilate=self.dilate,
groups=self.groups)
if self.old_numpy_fp16:
return y_expected.array*0,
return y_expected.array,
def forward(self, inputs, device):
if self.nobias:
x, W = inputs
b = None
else:
x, W, b = inputs
out = F.convolution_2d(
x, W, b, stride=self.stride, pad=self.pad,
cover_all=self.cover_all, dilate=self.dilate,
groups=self.groups)
if self.old_numpy_fp16:
return out*0,
return out,
@testing.parameterize(*(testing.product({
'use_cudnn': ['always', 'auto', 'never'],
'cudnn_deterministic': [False, True],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
'dilate': [1],
'groups': [1, 2],
}) + testing.product({
'use_cudnn': ['always', 'auto', 'never'],
'cudnn_deterministic': [False],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
'dilate': [2],
'groups': [1, 2],
})))
@attr.cudnn
class TestConvolution2DCudnnCall(unittest.TestCase):
def setUp(self):
batches = 2
in_channels_a_group = 3
out_channels_a_group = 2
in_channels = in_channels_a_group * self.groups
out_channels = out_channels_a_group * self.groups
kh, kw = (3, 3)
self.stride = 2
self.pad = (int(kh / 2) * self.dilate, int(kw / 2) * self.dilate)
self.x = cuda.cupy.random.uniform(
-1, 1, (batches, in_channels, 4, 3)).astype(self.dtype)
self.W = cuda.cupy.random.normal(
0, numpy.sqrt(1. / (kh * kw * in_channels_a_group)),
(out_channels, in_channels_a_group, kh, kw)).astype(self.dtype)
self.gy = cuda.cupy.random.uniform(
-1, 1, (batches, out_channels, 2, 2)).astype(self.dtype)
with chainer.using_config('use_cudnn', self.use_cudnn):
self.should_call_cudnn = chainer.should_use_cudnn('>=auto')
if self.dilate > 1 and cuda.cuda.cudnn.getVersion() < 6000:
self.should_call_cudnn = False
if self.groups > 1 and cuda.cuda.cudnn.getVersion() < 7000:
self.should_call_cudnn = False
def forward(self):
x = chainer.Variable(self.x)
W = chainer.Variable(self.W)
return F.convolution_2d(x, W, None, stride=self.stride, pad=self.pad,
dilate=self.dilate, groups=self.groups)
def test_call_cudnn_forward(self):
with chainer.using_config('use_cudnn', self.use_cudnn):
with chainer.using_config('cudnn_deterministic',
self.cudnn_deterministic):
with testing.patch('cupy.cudnn.convolution_forward') as func:
self.forward()
self.assertEqual(func.called, self.should_call_cudnn)
def test_call_cudnn_backward(self):
with chainer.using_config('use_cudnn', self.use_cudnn):
with chainer.using_config('cudnn_deterministic',
self.cudnn_deterministic):
y = self.forward()
y.grad = self.gy
name = 'cupy.cudnn.convolution_backward_data'
with testing.patch(name) as func:
y.backward()
self.assertEqual(func.called, self.should_call_cudnn)
@testing.parameterize(*testing.product({
'c_contiguous': [True, False],
'nobias': [True, False],
'groups': [1, 2],
}))
@attr.gpu
@attr.cudnn
class TestConvolution2DFunctionCudnnDeterministic(unittest.TestCase):
def setUp(self):
self.stride = 2
self.pad = 1
batch_sz = 2
in_channels_a_group = 64
out_channels_a_group = 64
in_channels = in_channels_a_group * self.groups
out_channels = out_channels_a_group * self.groups
kh, kw = (3, 3)
in_h, in_w = (32, 128)
out_h, out_w = (16, 64)
# should be same types for cudnn test
x_dtype = numpy.float32
W_dtype = numpy.float32
self.W = numpy.random.normal(
0, numpy.sqrt(1. / (kh * kw * in_channels_a_group)),
(out_channels, in_channels_a_group, kh, kw)).astype(W_dtype)
self.b = numpy.random.uniform(-1, 1, out_channels).astype(x_dtype)
self.x = numpy.random.uniform(
-1, 1, (batch_sz, in_channels, in_h, in_w)).astype(x_dtype)
self.gy = numpy.random.uniform(
-1, 1, (batch_sz, out_channels, out_h, out_w)).astype(x_dtype)
self.should_call_cudnn = True
if self.groups > 1 and cuda.cuda.cudnn.getVersion() < 7000:
self.should_call_cudnn = False
def test_called(self):
with testing.patch(
'cupy.cudnn.convolution_backward_filter', autospec=True) as f:
# cuDNN version >= v3 supports `cudnn_deterministic` option
self._run()
# in Convolution2DFunction.backward_gpu()
assert f.called == self.should_call_cudnn
def test_cudnn_deterministic(self):
x1, W1, b1, y1 = self._run()
x2, W2, b2, y2 = self._run()
cuda.cupy.testing.assert_array_equal(x1.grad, x2.grad)
cuda.cupy.testing.assert_array_equal(y1.data, y2.data)
cuda.cupy.testing.assert_array_equal(W1.grad, W2.grad)
def _contiguous(self, x_data, W_data, b_data, gy_data):
if not self.c_contiguous:
x_data = numpy.asfortranarray(x_data)
W_data = numpy.asfortranarray(W_data)
gy_data = numpy.asfortranarray(gy_data)
self.assertFalse(x_data.flags.c_contiguous)
self.assertFalse(W_data.flags.c_contiguous)
self.assertFalse(gy_data.flags.c_contiguous)
b = numpy.empty((len(b_data) * 2,), dtype=self.b.dtype)
b[::2] = b_data
b_data = b[::2]
self.assertFalse(b_data.flags.c_contiguous)
return x_data, W_data, b_data, gy_data
def _run(self):
with chainer.using_config('use_cudnn', 'always'):
with chainer.using_config('cudnn_deterministic', True):
# verify data continuity and move to gpu
x_data, W_data, b_data, gy_data = tuple(
cuda.to_gpu(data) for data in self._contiguous(
self.x, self.W, self.b, self.gy))
x, W, b, y = self._run_forward(x_data, W_data, b_data)
y.grad = gy_data
y.backward()
return x, W, b, y
def _run_forward(self, x_data, W_data, b_data):
x = chainer.Variable(x_data)
W = chainer.Variable(W_data)
b = None if self.nobias else chainer.Variable(b_data)
y = F.convolution_2d(x, W, b, stride=self.stride, pad=self.pad,
cover_all=False, groups=self.groups)
return x, W, b, y
class TestConvolution2DBackwardNoncontiguousGradOutputs(unittest.TestCase):
# NumPy raises an error when the inputs of dot operation are not
# contiguous. This test ensures this issue is correctly handled.
# (https://github.com/chainer/chainer/issues/2744)
# This test depdends on that backward() of F.sum generates
# a non-contiguous array.
def test_1(self):
n_batches = 2
in_channels = 3
out_channels = 1 # important
x_shape = (n_batches, in_channels, 10, 10)
w_shape = (out_channels, in_channels, 3, 3)
x = numpy.ones(x_shape, numpy.float32)
w = numpy.ones(w_shape, numpy.float32)
y = F.convolution_2d(x, chainer.Variable(w))
z = F.sum(y)
z.backward()
class TestConvolution2DInvalidDilation(unittest.TestCase):
n_batches = 2
in_channels = 3
out_channels = 2
dilate = 0
x_shape = (n_batches, in_channels, 10, 10)
w_shape = (out_channels, in_channels, 3, 3)
def check_invalid_dilation(self, x_data, w_data):
x = chainer.Variable(x_data)
w = chainer.Variable(w_data)
F.convolution_2d(x, w, dilate=self.dilate)
def test_invalid_dilation_cpu(self):
x = numpy.ones(self.x_shape, numpy.float32)
w = numpy.ones(self.w_shape, numpy.float32)
with self.assertRaises(ValueError):
with chainer.using_config('use_ideep', 'never'):
self.check_invalid_dilation(x, w)
@attr.ideep
def test_invalid_dilation_cpu_ideep(self):
x = numpy.ones(self.x_shape, numpy.float32)
w = numpy.ones(self.w_shape, numpy.float32)
with self.assertRaises(ValueError):
with chainer.using_config('use_ideep', 'always'):
self.check_invalid_dilation(x, w)
@attr.gpu
def test_invalid_dilation_gpu(self):
x = cuda.cupy.ones(self.x_shape, numpy.float32)
w = cuda.cupy.ones(self.w_shape, numpy.float32)
with self.assertRaises(ValueError):
with chainer.using_config('use_cudnn', 'never'):
self.check_invalid_dilation(x, w)
@attr.cudnn
def test_invalid_dilation_gpu_cudnn(self):
x = cuda.cupy.ones(self.x_shape, numpy.float32)
w = cuda.cupy.ones(self.w_shape, numpy.float32)
with self.assertRaises(ValueError):
with chainer.using_config('use_cudnn', 'always'):
self.check_invalid_dilation(x, w)
testing.run_module(__name__, __file__)
| mit |
restran/web-proxy | settings.py | 2 | 2822 | # -*- coding: utf-8 -*-
# Created on 2014/11/13
from __future__ import unicode_literals
__author__ = 'restran'
import logging
class BackendSite():
"""
后端站点的配置信息
"""
# 是否启用内容替换
enable_filter = True
def __init__(self, name, url, netloc, filter_rules):
self.name = name
# 完整的URL,如 http://192.168.10.2:9090
self.url = url
# 网络地址,如 192.168.10.2:9090
self.netloc = netloc
# 过滤规则
self.filter_rules = filter_rules
class Config(object):
# 令牌将在多久后过期
token_expires_seconds = 3600 * 24
# 站点变换的时候,是否清除旧站点的cookies
is_to_clear_old_cookies = True
# 用来配置 ASYNC_HTTP_CLIENT 最大并发请求数量
# 如果后端网站响应很慢,就可能占用连接数,导致其他网站的代理也跟着慢
# 因此需要设置一个足够大的并发数量,默认是10
async_http_client_max_clients = 500
class Development(Config):
DEBUG = True
# 可以给日志对象设置日志级别,低于该级别的日志消息将会被忽略
# CRITICAL, ERROR, WARNING, INFO, DEBUG, NOTSET
# logging.Logger.level = logging.DEBUG
LOGGING_LEVEL = logging.DEBUG
local_netloc = '127.0.0.1:9999'
local_protocol = 'http'
local_port = 9999
class Testing(Config):
TESTING = True
DEBUG = True
# logging.Logger.level = logging.DEBUG
LOGGING_LEVEL = logging.DEBUG
local_protocol = 'http'
local_netloc = '127.0.0.1:9999'
local_port = 9999
class Production(Config):
"""
生产环境
"""
DEBUG = False
logging.Logger.level = logging.WARNING
# LOGGING_LEVEL = logging.INFO
local_protocol = 'http'
local_netloc = '127.0.0.1:9000'
local_port = 9000
# 设置默认配置为开发环境
# config = Production()
config = Development()
logging.basicConfig(
level=config.LOGGING_LEVEL,
format='%(asctime)s %(levelname)s [%(module)s] %(message)s',
)
from subs_filter import SubsFilterRules
# 转发到后端需要代理的网站的地址列表
# todo access list,允许访问的后端网站下的具体链接
forward_list = {
"baidu": BackendSite('baidu', 'http://www.baidu.com', 'www.baidu.com', []),
"douban": BackendSite('douban', 'http://www.douban.com', 'www.douban.com', [
SubsFilterRules('.', r'http://www\.douban\.com', '/.site.douban'),
SubsFilterRules('.', r'http://img3\.douban\.com', '/.site.img3.douban'),
SubsFilterRules('.', r'http://img5\.douban\.com', '/.site.img5.douban'),
]),
"img3.douban": BackendSite('douban', 'http://img3.douban.com', 'img3.douban.com', []),
"img5.douban": BackendSite('douban', 'http://img5.douban.com', 'img5.douban.com', []),
}
| gpl-2.0 |
ivaano/zato | code/zato-server/test/zato/server/pickup/test__init__.py | 6 | 1916 | # -*- coding: utf-8 -*-
"""
Copyright (C) 2013 Dariusz Suchojad <dsuch at zato.io>
Licensed under LGPLv3, see LICENSE.txt for terms and conditions.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
# stdlib
import os
from tempfile import NamedTemporaryFile
from unittest import TestCase
from uuid import uuid4
# Bunch
from bunch import Bunch
# Mock
from mock import Mock
# Zato
from zato.common import util
from zato.common.test import rand_int
from zato.server.pickup import BasePickupEventProcessor
class BasePickupEventProcessorTest(TestCase):
def test_delete_after_pick_up(self):
package_id = rand_int()
support_values = (True, False)
for delete_after_pick_up in support_values:
def dummy(*ignored, **ignored_kwargs):
return package_id
server = Bunch()
server.parallel_server = Bunch()
server.parallel_server.id = rand_int()
server.parallel_server.hot_deploy_config = Bunch()
server.parallel_server.hot_deploy_config.delete_after_pick_up = delete_after_pick_up
server.parallel_server.odb = Bunch()
server.parallel_server.notify_new_package = dummy
server.parallel_server.odb.hot_deploy = dummy
file_name = '{}.py'.format(uuid4().hex)
processor = BasePickupEventProcessor(uuid4().hex, server)
_os_remove = Mock()
util._os_remove = _os_remove
with NamedTemporaryFile(prefix='zato-test-', suffix=file_name) as tf:
tf.flush()
ret = processor.hot_deploy(os.path.abspath(tf.name), tf.name)
self.assertEquals(ret, package_id)
if delete_after_pick_up:
_os_remove.assert_called_with(tf.name)
else:
self.assertFalse(_os_remove.called)
| gpl-3.0 |
lisael/pg-django | tests/regressiontests/logging_tests/tests.py | 25 | 8053 | from __future__ import with_statement
import copy
from django.conf import compat_patch_logging_config
from django.core import mail
from django.test import TestCase, RequestFactory
from django.test.utils import override_settings
from django.utils.log import CallbackFilter, RequireDebugFalse, getLogger
# logging config prior to using filter with mail_admins
OLD_LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
class PatchLoggingConfigTest(TestCase):
"""
Tests for backward-compat shim for #16288. These tests should be removed in
Django 1.6 when that shim and DeprecationWarning are removed.
"""
def test_filter_added(self):
"""
Test that debug-false filter is added to mail_admins handler if it has
no filters.
"""
config = copy.deepcopy(OLD_LOGGING)
compat_patch_logging_config(config)
self.assertEqual(
config["handlers"]["mail_admins"]["filters"],
['require_debug_false'])
def test_filter_configuration(self):
"""
Test that the auto-added require_debug_false filter is an instance of
`RequireDebugFalse` filter class.
"""
config = copy.deepcopy(OLD_LOGGING)
compat_patch_logging_config(config)
flt = config["filters"]["require_debug_false"]
self.assertEqual(flt["()"], "django.utils.log.RequireDebugFalse")
def test_require_debug_false_filter(self):
"""
Test the RequireDebugFalse filter class.
"""
filter_ = RequireDebugFalse()
with self.settings(DEBUG=True):
self.assertEqual(filter_.filter("record is not used"), False)
with self.settings(DEBUG=False):
self.assertEqual(filter_.filter("record is not used"), True)
def test_no_patch_if_filters_key_exists(self):
"""
Test that the logging configuration is not modified if the mail_admins
handler already has a "filters" key.
"""
config = copy.deepcopy(OLD_LOGGING)
config["handlers"]["mail_admins"]["filters"] = []
new_config = copy.deepcopy(config)
compat_patch_logging_config(new_config)
self.assertEqual(config, new_config)
def test_no_patch_if_no_mail_admins_handler(self):
"""
Test that the logging configuration is not modified if the mail_admins
handler is not present.
"""
config = copy.deepcopy(OLD_LOGGING)
config["handlers"].pop("mail_admins")
new_config = copy.deepcopy(config)
compat_patch_logging_config(new_config)
self.assertEqual(config, new_config)
class CallbackFilterTest(TestCase):
def test_sense(self):
f_false = CallbackFilter(lambda r: False)
f_true = CallbackFilter(lambda r: True)
self.assertEqual(f_false.filter("record"), False)
self.assertEqual(f_true.filter("record"), True)
def test_passes_on_record(self):
collector = []
def _callback(record):
collector.append(record)
return True
f = CallbackFilter(_callback)
f.filter("a record")
self.assertEqual(collector, ["a record"])
class AdminEmailHandlerTest(TestCase):
def get_admin_email_handler(self, logger):
# Inspired from regressiontests/views/views.py: send_log()
# ensuring the AdminEmailHandler does not get filtered out
# even with DEBUG=True.
admin_email_handler = [
h for h in logger.handlers
if h.__class__.__name__ == "AdminEmailHandler"
][0]
return admin_email_handler
@override_settings(
ADMINS=(('whatever admin', 'admin@example.com'),),
EMAIL_SUBJECT_PREFIX='-SuperAwesomeSubject-'
)
def test_accepts_args(self):
"""
Ensure that user-supplied arguments and the EMAIL_SUBJECT_PREFIX
setting are used to compose the email subject.
Refs #16736.
"""
message = "Custom message that says '%s' and '%s'"
token1 = 'ping'
token2 = 'pong'
logger = getLogger('django.request')
admin_email_handler = self.get_admin_email_handler(logger)
# Backup then override original filters
orig_filters = admin_email_handler.filters
try:
admin_email_handler.filters = []
logger.error(message, token1, token2)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].to, ['admin@example.com'])
self.assertEqual(mail.outbox[0].subject,
"-SuperAwesomeSubject-ERROR: Custom message that says 'ping' and 'pong'")
finally:
# Restore original filters
admin_email_handler.filters = orig_filters
@override_settings(
ADMINS=(('whatever admin', 'admin@example.com'),),
EMAIL_SUBJECT_PREFIX='-SuperAwesomeSubject-',
INTERNAL_IPS=('127.0.0.1',),
)
def test_accepts_args_and_request(self):
"""
Ensure that the subject is also handled if being
passed a request object.
"""
message = "Custom message that says '%s' and '%s'"
token1 = 'ping'
token2 = 'pong'
logger = getLogger('django.request')
admin_email_handler = self.get_admin_email_handler(logger)
# Backup then override original filters
orig_filters = admin_email_handler.filters
try:
admin_email_handler.filters = []
rf = RequestFactory()
request = rf.get('/')
logger.error(message, token1, token2,
extra={
'status_code': 403,
'request': request,
}
)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].to, ['admin@example.com'])
self.assertEqual(mail.outbox[0].subject,
"-SuperAwesomeSubject-ERROR (internal IP): Custom message that says 'ping' and 'pong'")
finally:
# Restore original filters
admin_email_handler.filters = orig_filters
@override_settings(
ADMINS=(('admin', 'admin@example.com'),),
EMAIL_SUBJECT_PREFIX='',
DEBUG=False,
)
def test_subject_accepts_newlines(self):
"""
Ensure that newlines in email reports' subjects are escaped to avoid
AdminErrorHandler to fail.
Refs #17281.
"""
message = u'Message \r\n with newlines'
expected_subject = u'ERROR: Message \\r\\n with newlines'
self.assertEqual(len(mail.outbox), 0)
logger = getLogger('django.request')
logger.error(message)
self.assertEqual(len(mail.outbox), 1)
self.assertFalse('\n' in mail.outbox[0].subject)
self.assertFalse('\r' in mail.outbox[0].subject)
self.assertEqual(mail.outbox[0].subject, expected_subject)
@override_settings(
ADMINS=(('admin', 'admin@example.com'),),
EMAIL_SUBJECT_PREFIX='',
DEBUG=False,
)
def test_truncate_subject(self):
"""
RFC 2822's hard limit is 998 characters per line.
So, minus "Subject: ", the actual subject must be no longer than 989
characters.
Refs #17281.
"""
message = 'a' * 1000
expected_subject = 'ERROR: aa' + 'a' * 980
self.assertEqual(len(mail.outbox), 0)
logger = getLogger('django.request')
logger.error(message)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, expected_subject)
| bsd-3-clause |
hainn8x/gnuradio | gr-filter/examples/gr_filtdes_live_upd.py | 47 | 3111 | #!/usr/bin/env python
#
# Copyright 2012 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio.filter import filter_design
from gnuradio import gr, filter
from gnuradio import blocks
import sys
try:
from gnuradio import qtgui
from PyQt4 import QtGui, QtCore
import sip
except ImportError:
sys.stderr.write("Error: Program requires PyQt4 and gr-qtgui.\n")
sys.exit(1)
try:
from gnuradio import analog
except ImportError:
sys.stderr.write("Error: Program requires gr-analog.\n")
sys.exit(1)
try:
from gnuradio import blocks
except ImportError:
sys.stderr.write("Error: Program requires gr-blocks.\n")
sys.exit(1)
try:
from gnuradio import channels
except ImportError:
sys.stderr.write("Error: Program requires gr-channels.\n")
sys.exit(1)
class my_top_block(gr.top_block):
def __init__(self):
gr.top_block.__init__(self)
Rs = 8000
f1 = 1000
f2 = 2000
npts = 2048
self.qapp = QtGui.QApplication(sys.argv)
self.filt_taps = [1,]
src1 = analog.sig_source_c(Rs, analog.GR_SIN_WAVE, f1, 0.1, 0)
src2 = analog.sig_source_c(Rs, analog.GR_SIN_WAVE, f2, 0.1, 0)
src = blocks.add_cc()
channel = channels.channel_model(0.01)
self.filt = filter.fft_filter_ccc(1, self.filt_taps)
thr = blocks.throttle(gr.sizeof_gr_complex, 100*npts)
self.snk1 = qtgui.freq_sink_c(npts, filter.firdes.WIN_BLACKMAN_hARRIS,
0, Rs,
"Complex Freq Example", 1)
self.connect(src1, (src,0))
self.connect(src2, (src,1))
self.connect(src, channel, thr, self.filt, (self.snk1, 0))
# Get the reference pointer to the SpectrumDisplayForm QWidget
pyQt = self.snk1.pyqwidget()
# Wrap the pointer as a PyQt SIP object
# This can now be manipulated as a PyQt4.QtGui.QWidget
pyWin = sip.wrapinstance(pyQt, QtGui.QWidget)
pyWin.show()
def update_filter(self, filtobj):
print "Filter type:", filtobj.get_restype()
print "Filter params", filtobj.get_params()
self.filt.set_taps(filtobj.get_taps())
if __name__ == "__main__":
tb = my_top_block();
tb.start()
mw = filter_design.launch(sys.argv, tb.update_filter)
mw.show()
tb.qapp.exec_()
tb.stop()
| gpl-3.0 |
DataDog/brod | brod/base.py | 1 | 32420 | import binascii
import logging
import struct
import time
import sys, traceback
from cStringIO import StringIO
from collections import namedtuple
from datetime import datetime
from functools import partial
__all__ = [
'KafkaError',
'ConnectionFailure',
'OffsetOutOfRange',
'InvalidMessageCode',
'WrongPartitionCode',
'InvalidFetchSizeCode',
'UnknownError',
'InvalidOffset',
'PRODUCE_REQUEST',
'FETCH_REQUEST',
'OFFSETS_REQUEST',
'LATEST_OFFSET',
'EARLIEST_OFFSET',
'Lengths',
'ConsumerStats'
]
VERSION_0_7 = False
class KafkaError(Exception): pass
class ConnectionFailure(KafkaError): pass
class OffsetOutOfRange(KafkaError): pass
class InvalidMessageCode(KafkaError): pass
class WrongPartitionCode(KafkaError): pass
class InvalidFetchSizeCode(KafkaError): pass
class UnknownError(KafkaError): pass
class InvalidOffset(KafkaError): pass
error_codes = {
1: OffsetOutOfRange,
2: InvalidMessageCode,
3: WrongPartitionCode,
4: InvalidFetchSizeCode,
}
PRODUCE_REQUEST = 0
FETCH_REQUEST = 1
MULTIFETCH_REQUEST = 2
MULTIPRODUCE_REQUEST = 3
OFFSETS_REQUEST = 4
MAGIC_BYTE = 0
LATEST_OFFSET = -1
EARLIEST_OFFSET = -2
kafka_log = logging.getLogger('brod')
class Lengths(object):
ERROR_CODE = 2
RESPONSE_SIZE = 4
REQUEST_TYPE = 2
TOPIC_LENGTH = 2
PARTITION = 4
OFFSET = 8
OFFSET_COUNT = 4
MAX_NUM_OFFSETS = 4
MAX_REQUEST_SIZE = 4
TIME_VAL = 8
MESSAGE_LENGTH = 4
MAGIC = 1
COMPRESSION = 1
CHECKSUM = 4
MESSAGE_HEADER = MESSAGE_LENGTH + MAGIC + CHECKSUM
class BrokerPartition(namedtuple('BrokerPartition',
'broker_id partition creator host port topic')):
@property
def id(self):
return "{0.broker_id}-{0.partition}".format(self)
@classmethod
def from_zk(cls, broker_id, broker_string, topic, num_parts):
"""Generate a list of BrokerPartition objects based on various values
taken from ZooKeeper.
broker_id is this broker's ID to ZooKeeper. It's a simple integer, set
as the "brokerid" param in Kafka's server config file. You can find a
list of them by asking for the children of /brokers/ids in ZooKeeper.
broker_string is found in ZooKeeper at /brokers/ids/{broker_id}
The format of broker_string is assumed to be "creator:host:port",
though the creator can have the host embedded in it because of the
version of UUID that Kafka uses.
num_parts is the number of partitions for that broker and is located at
/brokers/topics/{topic}/{broker_id}
"""
creator, host, port = broker_string.split(":")
num_parts = int(num_parts)
return [BrokerPartition(broker_id=int(broker_id), partition=i,
creator=creator, host=host, port=int(port),
topic=topic)
for i in range(num_parts)]
class ConsumerStats(namedtuple('ConsumerStats',
'fetches bytes messages max_fetch')):
def _human_bytes(self, bytes):
bytes = float(bytes)
TB, GB, MB, KB = 1024**4, 1024**3, 1024**2, 1024
if bytes >= TB: return '%.2fTB' % (bytes / TB)
elif bytes >= GB: return '%.2fGB' % (bytes / GB)
elif bytes >= MB: return '%.2fMB' % (bytes / MB)
elif bytes >= KB: return '%.2fKB' % (bytes / KB)
else: return '%.2fB' % bytes
def __str__(self):
return ("ConsumerStats: fetches={0}, bytes={1}, messages={2}, max_fetch={3}"
.format(self.fetches, self._human_bytes(self.bytes),
self.messages, self.max_fetch))
class FetchResult(object):
"""A FetchResult is what's returned when we do a MULTIFETCH request. It
can contain an arbitrary number of message sets, which it'll eventually
be able to query more intelligently than this. :-P
This should eventually move to base and be returned in a multifetch()
"""
def __init__(self, message_sets):
self._message_sets = message_sets[:]
def __iter__(self):
return iter(self._message_sets)
def __len__(self):
return len(self._message_sets)
def __getitem__(self, i):
return self._message_sets[i]
@property
def broker_partitions(self):
return [msg_set.broker_partition for msg_set in self]
@property
def num_messages(self):
return sum(len(msg_set) for msg_set in self)
@property
def num_bytes(self):
return sum(msg_set.size for msg_set in self)
class MessageSet(object):
"""A collection of messages and offsets returned from a request made to
a single broker/topic/partition. Allows you to iterate via (offset, msg)
tuples and grab origin information.
ZK info might not be available if this came from a regular multifetch. This
should be moved to base.
"""
def __init__(self, broker_partition, start_offset, offsets_msgs):
self._broker_partition = broker_partition
self._start_offset = start_offset
self._offsets_msgs = offsets_msgs[:]
################## Where did I come from? ##################
@property
def broker_partition(self):
return self._broker_partition
@property
def topic(self):
return self.broker_partition.topic
################## What do I have inside? ##################
@property
def offsets(self):
return [offset for offset, msg in self]
@property
def messages(self):
return [msg for offset, msg in self]
@property
def start_offset(self):
return self.offsets[0] if self else None
@property
def end_offset(self):
return self.offsets[-1] if self else None
@property
def next_offset(self):
# FIXME FIXME FIXME: This calcuation should be done at a much deeper
# level, or else this won't work with compressed messages, or be able
# to detect the difference between 0.6 and 0.7 headers
if not self:
return self._start_offset # We didn't read anything
MESSAGE_HEADER_SIZE = 10 if VERSION_0_7 else 9
last_offset, last_msg = self._offsets_msgs[-1]
next_offset = last_offset + len(last_msg) + MESSAGE_HEADER_SIZE
return next_offset
@property
def size(self):
return sum(len(msg) for msg in self.messages)
def __iter__(self):
return iter(self._offsets_msgs)
def __len__(self):
return len(self._offsets_msgs)
def __cmp__(self, other):
bp_cmp = cmp(self.broker_partition, other.broker_partition)
if bp_cmp:
return bp_cmp
else:
return cmp(self._offsets_msgs, other.offsets_msgs)
def __unicode__(self):
return "Broker Partition: {0}\nContents: {1}".format(self.broker_partition, self._offsets_msgs)
################## Parse from binary ##################
@classmethod
def parse(self, data_buff):
pass
#
# MIN_MSG_SIZE = Lengths.MESSAGE_LENGTH + Lengths.MAGIC + Lengths.CHECKSUM
#
# def parse_message(msg_len, msg_data):
# pass
#
# req_len, req_type, topic_len = struct.unpack(">IHH", data_buff.read(12))
# topic = unicode(buffer.read(topic_len), encoding='utf-8')
#
#
# # data_len =
#
# message_buffer.read(Lengths.MESSAGE_LENGTH)
#
# messages = [parse_message(msg_data) for msg_data in data]
#
# message_buffer.read(Lengths.MESSAGE_LENGTH)
#
#
#
# raise NotImplementedError()
class BaseKafka(object):
MAX_RETRY = 3
DEFAULT_MAX_SIZE = 1024 * 1024
def __init__(self, host=None, port=None, max_size=None,
include_corrupt=False):
self.host = host or 'localhost'
self.port = port or 9092
self.max_size = max_size or self.DEFAULT_MAX_SIZE
self.include_corrupt = include_corrupt
# Public API
def produce(self, topic, messages, partition=None, callback=None):
# Clean up the input parameters
partition = partition or 0
topic = topic.encode('utf-8')
if isinstance(messages, unicode):
messages = [messages.encode('utf-8')]
elif isinstance(messages, str):
messages = [messages]
# Encode the request
request = self._produce_request(topic, messages, partition)
# Send the request
return self._write(request, callback)
def fetch(self, topic, offset, partition=None, max_size=None,
callback=None, include_corrupt=False, min_size=None,
fetch_step=None):
""" Fetch messages from a kafka queue
This will sequentially read and return all available messages
starting at the specified offset and adding up to max_size bytes.
Params:
topic: kafka topic to read from
offset: offset of the first message requested
partition: topic partition to read from (optional)
max_size: maximum size to read from the queue,
in bytes (optional)
min_size: minimum size to read from the queue. if min_size and
fetch_step are defined, then we'll fetch sizes from
min_size to max_size until we have a result.
fetch_step: the step increase for each fetch to the queue. only
applies if both a min_size and max_size are set.
Returns:
a list: [(offset, message), ]
"""
if min_size and max_size and fetch_step:
fetch_sizes = xrange(min_size, max_size, fetch_step)
else:
fetch_sizes = [max_size or self.max_size]
# Clean up the input parameters
topic = topic.encode('utf-8')
partition = partition or 0
for fetch_size in fetch_sizes:
# Encode the request
fetch_request_size, fetch_request = self._fetch_request(topic,
offset, partition, fetch_size)
# Send the request. The logic for handling the response
# is in _read_fetch_response().
try:
result = self._write(
fetch_request_size,
partial(self._wrote_request_size,
fetch_request,
partial(self._read_fetch_response,
callback,
offset,
include_corrupt
)))
except IOError as io_err:
kafka_log.exception(io_err)
raise ConnectionFailure("Fetch failure because of: {0}".format(io_err))
if result:
return result
return result
def offsets(self, topic, time_val, max_offsets, partition=None, callback=None):
# Clean up the input parameters
partition = partition or 0
# Encode the request
request_size, request = self._offsets_request(topic, time_val,
max_offsets, partition)
# Send the request. The logic for handling the response
# is in _read_offset_response().
return self._write(request_size,
partial(self._wrote_request_size, request,
partial(self._read_offset_response, callback)))
def earliest_offset(self, topic, partition):
"""Return the first offset we have a message for."""
return self.offsets(topic, EARLIEST_OFFSET, max_offsets=1, partition=partition)[0]
def latest_offset(self, topic, partition):
"""Return the latest offset we can request. Note that this is the offset
*after* the last known message in the queue. The offset this method
returns will not have a message in it at the time you call it, but it's
where the next message *will* be placed, whenever it arrives."""
return self.offsets(topic, LATEST_OFFSET, max_offsets=1, partition=partition)[0]
# Helper methods
@staticmethod
def compute_checksum(value):
return binascii.crc32(value)
# Private methods
# Response decoding methods
def _read_fetch_response(self, callback, start_offset, include_corrupt,
message_buffer):
if message_buffer:
messages = list(self._parse_message_set(
start_offset, message_buffer, include_corrupt)
)
else:
messages = []
if callback:
return callback(messages)
else:
return messages
def _parse_message_set(self, start_offset, message_buffer,
include_corrupt=False):
offset = start_offset
try:
has_more = True
while has_more:
offset = start_offset + message_buffer.tell() - Lengths.ERROR_CODE
# Parse the message length (uint:4)
raw_message_length = message_buffer.read(Lengths.MESSAGE_LENGTH)
if raw_message_length == '':
break
elif len(raw_message_length) < Lengths.MESSAGE_LENGTH:
kafka_log.error('Unexpected end of message set. Expected {0} bytes for message length, only read {1}'.format(Lengths.MESSAGE_LENGTH, len(raw_message_length)))
break
message_length = struct.unpack('>I',
raw_message_length)[0]
# Parse the magic byte (int:1)
raw_magic = message_buffer.read(Lengths.MAGIC)
if len(raw_magic) < Lengths.MAGIC:
kafka_log.error('Unexpected end of message set. Expected {0} bytes for magic byte, only read{1}'.format(Lengths.MAGIC, len(raw_magic)))
break
magic = struct.unpack('>B', raw_magic)[0]
if magic == 1:
compression = message_buffer.read(Lengths.COMPRESSION)
# We don't do anything with this at the moment.
# Parse the checksum (int:4)
raw_checksum = message_buffer.read(Lengths.CHECKSUM)
if len(raw_checksum) < Lengths.CHECKSUM:
kafka_log.error('Unexpected end of message set. Expected {0} bytes for checksum, only read {1}'.format(Lengths.CHECKSUM, len(raw_checksum)))
break
checksum = struct.unpack('>i', raw_checksum)[0]
# Parse the payload (variable length string)
payload_length = message_length - Lengths.MAGIC - Lengths.CHECKSUM
payload = message_buffer.read(payload_length)
if len(payload) < payload_length and not self.include_corrupt:
# This is not an error - this happens everytime we reach
# the end of the read buffer without having parsed a complete msg
# kafka_log.error('Unexpected end of message set. Expected {0} bytes for payload, only read {1}'.format(payload_length, len(payload)))
break
actual_checksum = self.compute_checksum(payload)
if magic != MAGIC_BYTE:
kafka_log.error('Unexpected magic byte: {0} (expecting {1})'.format(magic, MAGIC_BYTE))
corrupt = True
elif checksum != actual_checksum:
kafka_log.error('Checksum failure at offset {0}'.format(offset))
corrupt = True
else:
corrupt = False
if include_corrupt:
# kafka_log.debug('message {0}: (offset: {1}, {2} bytes, corrupt: {3})'.format(payload, offset, message_length, corrupt))
yield offset, payload, corrupt
else:
# kafka_log.debug('message {0}: (offset: {1}, {2} bytes)'.format(payload, offset, message_length))
yield offset, payload
except:
kafka_log.error("Unexpected error:{0}".format(sys.exc_info()[0]))
finally:
message_buffer.close()
def _read_offset_response(self, callback, data):
# The number of offsets received (uint:4)
raw_offset_count = data.read(Lengths.OFFSET_COUNT)
offset_count = struct.unpack('>L', raw_offset_count)[0]
offsets = []
has_more = True
for i in range(offset_count):
raw_offset = data.read(Lengths.OFFSET)
offset = struct.unpack('>Q', raw_offset)[0]
offsets.append(offset)
#assert data.getvalue() == '', 'Some leftover data in offset response buffer: {0}'.format(data.getvalue())
kafka_log.debug('Received {0} offsets: {1}'.format(offset_count, len(offsets)))
if callback:
return callback(offsets)
else:
return offsets
# Request encoding methods
def _produce_request(self, topic, messages, partition):
message_set_buffer = StringIO()
for message in messages:
# <<int:1, int:4, str>>
encoded_message = struct.pack('>Bi{0}s'.format(len(message)),
MAGIC_BYTE,
self.compute_checksum(message),
message
)
message_size = len(encoded_message)
bin_format = '>i{0}s'.format(message_size)
message_set_buffer.write(struct.pack(bin_format, message_size,
encoded_message))
message_set = message_set_buffer.getvalue()
# create the request <<unit:4, uint:2, uint:2, str, uint:4, uint:4, str>>>
request = (
PRODUCE_REQUEST,
len(topic),
topic,
partition,
len(message_set),
message_set
)
data = struct.pack('>HH{0}sII{1}s'.format(len(topic), len(message_set)),
*request
)
request_size = len(data)
bin_format = '<<uint:4, uint:2, uint:2, str:{0}, uint:4, uint:4, str:{1}>>'.format(len(topic), len(message_set))
kafka_log.debug('produce request: {0} in format {1} ({2} bytes)'.format(request, bin_format, request_size))
return struct.pack('>I{0}s'.format(request_size), request_size, data)
def _fetch_request(self, topic, offset, partition, max_size):
# Build fetch request request
topic_length = len(topic)
request_size = sum([
Lengths.REQUEST_TYPE,
Lengths.TOPIC_LENGTH, # length of the topic length
topic_length,
Lengths.PARTITION,
Lengths.OFFSET,
Lengths.MAX_REQUEST_SIZE
])
request = (
FETCH_REQUEST,
topic_length,
topic,
partition,
offset,
max_size
)
# Send the fetch request
bin_format = '<<uint:4, uint:2, uint:2, str:{0}, uint:4, uint:8, uint:4>>'.format(topic_length)
# kafka_log.info('fetch request: {0} in format {1} ({2} bytes)'.format(request, bin_format, request_size))
bin_request_size = struct.pack('>I', request_size)
bin_request = struct.pack('>HH%dsIQI' % topic_length, *request)
return bin_request_size, bin_request
def _offsets_request(self, topic, time_val, max_offsets, partition):
offsets_request_size = sum([
Lengths.REQUEST_TYPE,
Lengths.TOPIC_LENGTH,
len(topic),
Lengths.PARTITION,
Lengths.TIME_VAL,
Lengths.MAX_NUM_OFFSETS,
])
offsets_request = (
OFFSETS_REQUEST,
len(topic),
topic,
partition,
time_val,
max_offsets
)
bin_format = '<<uint:4, uint:2, uint:2, str:{0}, uint:4, int:8, uint:4>>'.format(len(topic))
# kafka_log.debug('Fetching offsets for {0}-{1}, time: {2}, max_offsets: {3} in format {5} ({4} bytes)'.format(topic, partition, time_val, max_offsets, offsets_request_size, bin_format))
bin_request_size = struct.pack('>I', offsets_request_size)
bin_request = struct.pack('>HH{0}sIqI'.format(len(topic)),
*offsets_request)
return bin_request_size, bin_request
# Request/response protocol
def _wrote_request_size(self, request, callback):
return self._write(request, partial(self._wrote_request, callback))
def _wrote_request(self, callback):
# Read the first 4 bytes, which is the response size (unsigned int)
return self._read(Lengths.RESPONSE_SIZE,
partial(self._read_response_size, callback))
def _read_response_size(self, callback, raw_buf_length):
buf_length = struct.unpack('>I', raw_buf_length)[0]
# kafka_log.info('response: {0} bytes'.format(buf_length))
return self._read(buf_length,
partial(self._read_response, callback))
def _read_response(self, callback, data):
# Check if there is a non zero error code (2 byte unsigned int):
response_buffer = StringIO(data)
raw_error_code = response_buffer.read(Lengths.ERROR_CODE)
error_code = struct.unpack('>H', raw_error_code)[0]
if error_code != 0:
raise error_codes.get(error_code, UnknownError)('Code: {0}'.format(error_code))
else:
return callback(response_buffer)
# Socket management methods
def _connect(self):
raise NotImplementedError()
def _disconnect(self):
raise NotImplementedError()
def _reconnect(self):
self._disconnect()
self._connect()
def _read(self, length, callback=None):
raise NotImplementedError()
def _write(self, data, callback=None, retries=MAX_RETRY):
raise NotImplementedError()
def topic(self, topic, partition=None):
"""Return a Partition object that knows how to iterate through messages
in a topic/partition."""
return Partition(self, topic, partition)
def partition(self, topic, partition=None):
"""Return a Partition object that knows how to iterate through messages
in a topic/partition."""
return Partition(self, topic, partition)
# By David Ormsbee (dave@datadog.com):
class Partition(object):
"""This is deprectated, and should be rolled up into the higher level
Consumers.
A higher level abstraction over the Kafka object to make dealing with
Partitions a little easier. Currently only serves to read from a topic.
This class has not been properly tested with the non-blocking KafkaTornado.
"""
PollingStatus = namedtuple('PollingStatus',
'start_offset next_offset last_offset_read ' +
'messages_read bytes_read num_fetches ' +
'polling_start_time seconds_slept')
def __init__(self, kafka, topic, partition=None):
self._kafka = kafka
self._topic = topic
self._partition = partition
def earliest_offset(self):
"""Return the first offset we have a message for."""
return self._kafka.offsets(self._topic, EARLIEST_OFFSET, max_offsets=1,
partition=self._partition)[0]
def latest_offset(self):
"""Return the latest offset we can request. Note that this is the offset
*after* the last known message in the queue. The offset this method
returns will not have a message in it at the time you call it, but it's
where the next message *will* be placed, whenever it arrives."""
return self._kafka.offsets(self._topic, LATEST_OFFSET, max_offsets=1,
partition=self._partition)[0]
# FIXME DO: Put callback in
# Partition should have it's own fetch() with the basic stuff pre-filled
def poll(self,
offset=None,
end_offset=None,
poll_interval=1,
min_size=None,
max_size=None,
fetch_step=None,
include_corrupt=False,
retry_limit=3):
"""Poll and iterate through messages from a Kafka queue.
Params (all optional):
offset: Offset of the first message requested.
end_offset: Offset of the last message requested. We will return
the message that corresponds to end_offset, and then
stop.
poll_interval: How many seconds to pause between polling
min_size: minimum size to read from the queue
max_size: maximum size to read from the queue, in bytes
fetch_step: the step to increase the fetch size from min to max
include_corrupt:
This is a generator that will yield (status, messages) pairs, where
status is a Partition.PollingStatus showing the work done to date by this
Partition, and messages is a list of strs representing all available
messages at this time for the topic and partition this Partition was
initialized with.
By default, the generator will pause for 1 second between polling for
more messages.
Example:
dog_queue = Kafka().partition('good_dogs')
for status, messages in dog_queue.poll(offset, poll_interval=5):
for message in messages:
dog, bark = parse_barking(message)
print "{0} barked: {1}!".format(dog, bark)
print "Count of barks received: {0}".format(status.messages_read)
print "Total barking received: {0}".format(status.bytes_read)
Note that this method assumes we can increment the offset by knowing the
last read offset, the last read message size, and the header size. This
will change if compression ever gets implemented and the header format
changes: https://issues.apache.org/jira/browse/KAFKA-79
"""
# Kafka msg headers are 9 bytes: 4=len(msg), 1=magic val, 4=CRC
MESSAGE_HEADER_SIZE = 9
# Init for first run
first_loop = True
start_offset = self.latest_offset() if offset is None else offset
last_offset_read = None # The offset of the last message we returned
messages_read = 0 # How many messages have we read from the stream?
bytes_read = 0 # Total number of bytes read from the stream?
num_fetches = 0 # Number of times we've called fetch()
seconds_slept = 0
polling_start_time = datetime.now()
# Try fetching with a set of different max sizes until we return a
# set of messages.
if min_size and max_size and fetch_step:
fetch_sizes = range(min_size, max_size, fetch_step)
else:
fetch_sizes = [max_size]
# Shorthand fetch call alias with everything filled in except offset
# The return from a call to fetch is list of (offset, msg) tuples that
# look like: [(0, 'Rusty'), (14, 'Patty'), (28, 'Jack'), (41, 'Clyde')]
fetch_messages = partial(self._kafka.fetch,
self._topic,
partition=self._partition,
min_size=min_size,
max_size=max_size,
fetch_step=fetch_step,
callback=None,
include_corrupt=include_corrupt)
retry_attempts = 0
while True:
if end_offset is not None and offset > end_offset:
break
try:
msg_batch = fetch_messages(offset)
retry_attempts = 0 # resets after every successful fetch
except (ConnectionFailure, IOError) as ex:
if retry_limit is not None and retry_attempts > retry_limit:
kafka_log.exception(ex)
raise
else:
time.sleep(poll_interval)
retry_attempts += 1
# kafka_log.exception(ex)
kafka_log.error("Retry #{0} for fetch of topic {1}, offset {2}"
.format(retry_attempts, self._topic, offset))
continue
except OffsetOutOfRange:
# Catching and re-raising this with more helpful info.
raise OffsetOutOfRange(("Offset {offset} is out of range for " +
"topic {topic}, partition {partition} " +
"(earliest: {earliest}, latest: {latest})")
.format(offset=offset,
topic=self._topic,
partition=self._partition,
earliest=self.earliest_offset(),
latest=self.latest_offset()))
# Filter out the messages that are past our end_offset
if end_offset is not None:
msg_batch = [(msg_offset, msg) for msg_offset, msg in msg_batch
if msg_offset <= end_offset]
# For the first loop only, if nothing came back from the batch, make
# sure that the offset we're asking for is a valid one. Right
# now, Kafka.fetch() will just silently return an empty list if an
# invalid-but-in-plausible-range offset is requested. We assume that
# if we get past the first loop, we're ok, because we don't want to
# constantly call earliest/latest_offset() (they're network calls)
if first_loop and not msg_batch:
# If we're not at the latest available offset, then a call to
# fetch should return us something if it's valid. We have to
# make another fetch here because there's a chance
# latest_offset() could have moved since the last fetch.
if self.earliest_offset() <= offset < self.latest_offset() and \
not fetch_messages(offset):
raise InvalidOffset("No message at offset {0}".format(offset))
first_loop = False
# Our typical processing...
messages = [msg for msg_offset, msg in msg_batch]
messages_read += len(messages)
bytes_read += sum(len(msg) for msg in messages)
num_fetches += 1
if msg_batch:
last_offset_read, last_message_read = msg_batch[-1]
offset = last_offset_read + len(last_message_read) + \
MESSAGE_HEADER_SIZE
status = Partition.PollingStatus(start_offset=start_offset,
next_offset=offset,
last_offset_read=last_offset_read,
messages_read=messages_read,
bytes_read=bytes_read,
num_fetches=num_fetches,
polling_start_time=polling_start_time,
seconds_slept=seconds_slept)
yield status, messages # messages is a list of strs
# We keep grabbing as often as we can until we run out, after which
# we start sleeping between calls until we see more.
if poll_interval and not messages:
time.sleep(poll_interval)
seconds_slept += poll_interval
| mit |
Johannes-Sahlmann/linearfit | distribute_setup.py | 34 | 17319 | #!python
"""Bootstrap distribute installation
If you want to use setuptools in your package's setup.py, just include this
file in the same directory with it, and add this to the top of your setup.py::
from distribute_setup import use_setuptools
use_setuptools()
If you want to require a specific version of setuptools, set a download
mirror, or use an alternate download directory, you can do so by supplying
the appropriate options to ``use_setuptools()``.
This file can also be run as a script to install or upgrade setuptools.
"""
import os
import shutil
import sys
import time
import fnmatch
import tempfile
import tarfile
import optparse
from distutils import log
try:
from site import USER_SITE
except ImportError:
USER_SITE = None
try:
import subprocess
def _python_cmd(*args):
args = (sys.executable,) + args
return subprocess.call(args) == 0
except ImportError:
# will be used for python 2.3
def _python_cmd(*args):
args = (sys.executable,) + args
# quoting arguments if windows
if sys.platform == 'win32':
def quote(arg):
if ' ' in arg:
return '"%s"' % arg
return arg
args = [quote(arg) for arg in args]
return os.spawnl(os.P_WAIT, sys.executable, *args) == 0
DEFAULT_VERSION = "0.6.34"
DEFAULT_URL = "http://pypi.python.org/packages/source/d/distribute/"
SETUPTOOLS_FAKED_VERSION = "0.6c11"
SETUPTOOLS_PKG_INFO = """\
Metadata-Version: 1.0
Name: setuptools
Version: %s
Summary: xxxx
Home-page: xxx
Author: xxx
Author-email: xxx
License: xxx
Description: xxx
""" % SETUPTOOLS_FAKED_VERSION
def _install(tarball, install_args=()):
# extracting the tarball
tmpdir = tempfile.mkdtemp()
log.warn('Extracting in %s', tmpdir)
old_wd = os.getcwd()
try:
os.chdir(tmpdir)
tar = tarfile.open(tarball)
_extractall(tar)
tar.close()
# going in the directory
subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
os.chdir(subdir)
log.warn('Now working in %s', subdir)
# installing
log.warn('Installing Distribute')
if not _python_cmd('setup.py', 'install', *install_args):
log.warn('Something went wrong during the installation.')
log.warn('See the error message above.')
# exitcode will be 2
return 2
finally:
os.chdir(old_wd)
shutil.rmtree(tmpdir)
def _build_egg(egg, tarball, to_dir):
# extracting the tarball
tmpdir = tempfile.mkdtemp()
log.warn('Extracting in %s', tmpdir)
old_wd = os.getcwd()
try:
os.chdir(tmpdir)
tar = tarfile.open(tarball)
_extractall(tar)
tar.close()
# going in the directory
subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
os.chdir(subdir)
log.warn('Now working in %s', subdir)
# building an egg
log.warn('Building a Distribute egg in %s', to_dir)
_python_cmd('setup.py', '-q', 'bdist_egg', '--dist-dir', to_dir)
finally:
os.chdir(old_wd)
shutil.rmtree(tmpdir)
# returning the result
log.warn(egg)
if not os.path.exists(egg):
raise IOError('Could not build the egg.')
def _do_download(version, download_base, to_dir, download_delay):
egg = os.path.join(to_dir, 'distribute-%s-py%d.%d.egg'
% (version, sys.version_info[0], sys.version_info[1]))
if not os.path.exists(egg):
tarball = download_setuptools(version, download_base,
to_dir, download_delay)
_build_egg(egg, tarball, to_dir)
sys.path.insert(0, egg)
import setuptools
setuptools.bootstrap_install_from = egg
def use_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
to_dir=os.curdir, download_delay=15, no_fake=True):
# making sure we use the absolute path
to_dir = os.path.abspath(to_dir)
was_imported = 'pkg_resources' in sys.modules or \
'setuptools' in sys.modules
try:
try:
import pkg_resources
if not hasattr(pkg_resources, '_distribute'):
if not no_fake:
_fake_setuptools()
raise ImportError
except ImportError:
return _do_download(version, download_base, to_dir, download_delay)
try:
pkg_resources.require("distribute>=" + version)
return
except pkg_resources.VersionConflict:
e = sys.exc_info()[1]
if was_imported:
sys.stderr.write(
"The required version of distribute (>=%s) is not available,\n"
"and can't be installed while this script is running. Please\n"
"install a more recent version first, using\n"
"'easy_install -U distribute'."
"\n\n(Currently using %r)\n" % (version, e.args[0]))
sys.exit(2)
else:
del pkg_resources, sys.modules['pkg_resources'] # reload ok
return _do_download(version, download_base, to_dir,
download_delay)
except pkg_resources.DistributionNotFound:
return _do_download(version, download_base, to_dir,
download_delay)
finally:
if not no_fake:
_create_fake_setuptools_pkg_info(to_dir)
def download_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
to_dir=os.curdir, delay=15):
"""Download distribute from a specified location and return its filename
`version` should be a valid distribute version number that is available
as an egg for download under the `download_base` URL (which should end
with a '/'). `to_dir` is the directory where the egg will be downloaded.
`delay` is the number of seconds to pause before an actual download
attempt.
"""
# making sure we use the absolute path
to_dir = os.path.abspath(to_dir)
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
tgz_name = "distribute-%s.tar.gz" % version
url = download_base + tgz_name
saveto = os.path.join(to_dir, tgz_name)
src = dst = None
if not os.path.exists(saveto): # Avoid repeated downloads
try:
log.warn("Downloading %s", url)
src = urlopen(url)
# Read/write all in one block, so we don't create a corrupt file
# if the download is interrupted.
data = src.read()
dst = open(saveto, "wb")
dst.write(data)
finally:
if src:
src.close()
if dst:
dst.close()
return os.path.realpath(saveto)
def _no_sandbox(function):
def __no_sandbox(*args, **kw):
try:
from setuptools.sandbox import DirectorySandbox
if not hasattr(DirectorySandbox, '_old'):
def violation(*args):
pass
DirectorySandbox._old = DirectorySandbox._violation
DirectorySandbox._violation = violation
patched = True
else:
patched = False
except ImportError:
patched = False
try:
return function(*args, **kw)
finally:
if patched:
DirectorySandbox._violation = DirectorySandbox._old
del DirectorySandbox._old
return __no_sandbox
def _patch_file(path, content):
"""Will backup the file then patch it"""
f = open(path)
existing_content = f.read()
f.close()
if existing_content == content:
# already patched
log.warn('Already patched.')
return False
log.warn('Patching...')
_rename_path(path)
f = open(path, 'w')
try:
f.write(content)
finally:
f.close()
return True
_patch_file = _no_sandbox(_patch_file)
def _same_content(path, content):
f = open(path)
existing_content = f.read()
f.close()
return existing_content == content
def _rename_path(path):
new_name = path + '.OLD.%s' % time.time()
log.warn('Renaming %s to %s', path, new_name)
os.rename(path, new_name)
return new_name
def _remove_flat_installation(placeholder):
if not os.path.isdir(placeholder):
log.warn('Unkown installation at %s', placeholder)
return False
found = False
for file in os.listdir(placeholder):
if fnmatch.fnmatch(file, 'setuptools*.egg-info'):
found = True
break
if not found:
log.warn('Could not locate setuptools*.egg-info')
return
log.warn('Moving elements out of the way...')
pkg_info = os.path.join(placeholder, file)
if os.path.isdir(pkg_info):
patched = _patch_egg_dir(pkg_info)
else:
patched = _patch_file(pkg_info, SETUPTOOLS_PKG_INFO)
if not patched:
log.warn('%s already patched.', pkg_info)
return False
# now let's move the files out of the way
for element in ('setuptools', 'pkg_resources.py', 'site.py'):
element = os.path.join(placeholder, element)
if os.path.exists(element):
_rename_path(element)
else:
log.warn('Could not find the %s element of the '
'Setuptools distribution', element)
return True
_remove_flat_installation = _no_sandbox(_remove_flat_installation)
def _after_install(dist):
log.warn('After install bootstrap.')
placeholder = dist.get_command_obj('install').install_purelib
_create_fake_setuptools_pkg_info(placeholder)
def _create_fake_setuptools_pkg_info(placeholder):
if not placeholder or not os.path.exists(placeholder):
log.warn('Could not find the install location')
return
pyver = '%s.%s' % (sys.version_info[0], sys.version_info[1])
setuptools_file = 'setuptools-%s-py%s.egg-info' % \
(SETUPTOOLS_FAKED_VERSION, pyver)
pkg_info = os.path.join(placeholder, setuptools_file)
if os.path.exists(pkg_info):
log.warn('%s already exists', pkg_info)
return
log.warn('Creating %s', pkg_info)
try:
f = open(pkg_info, 'w')
except EnvironmentError:
log.warn("Don't have permissions to write %s, skipping", pkg_info)
return
try:
f.write(SETUPTOOLS_PKG_INFO)
finally:
f.close()
pth_file = os.path.join(placeholder, 'setuptools.pth')
log.warn('Creating %s', pth_file)
f = open(pth_file, 'w')
try:
f.write(os.path.join(os.curdir, setuptools_file))
finally:
f.close()
_create_fake_setuptools_pkg_info = _no_sandbox(
_create_fake_setuptools_pkg_info
)
def _patch_egg_dir(path):
# let's check if it's already patched
pkg_info = os.path.join(path, 'EGG-INFO', 'PKG-INFO')
if os.path.exists(pkg_info):
if _same_content(pkg_info, SETUPTOOLS_PKG_INFO):
log.warn('%s already patched.', pkg_info)
return False
_rename_path(path)
os.mkdir(path)
os.mkdir(os.path.join(path, 'EGG-INFO'))
pkg_info = os.path.join(path, 'EGG-INFO', 'PKG-INFO')
f = open(pkg_info, 'w')
try:
f.write(SETUPTOOLS_PKG_INFO)
finally:
f.close()
return True
_patch_egg_dir = _no_sandbox(_patch_egg_dir)
def _before_install():
log.warn('Before install bootstrap.')
_fake_setuptools()
def _under_prefix(location):
if 'install' not in sys.argv:
return True
args = sys.argv[sys.argv.index('install') + 1:]
for index, arg in enumerate(args):
for option in ('--root', '--prefix'):
if arg.startswith('%s=' % option):
top_dir = arg.split('root=')[-1]
return location.startswith(top_dir)
elif arg == option:
if len(args) > index:
top_dir = args[index + 1]
return location.startswith(top_dir)
if arg == '--user' and USER_SITE is not None:
return location.startswith(USER_SITE)
return True
def _fake_setuptools():
log.warn('Scanning installed packages')
try:
import pkg_resources
except ImportError:
# we're cool
log.warn('Setuptools or Distribute does not seem to be installed.')
return
ws = pkg_resources.working_set
try:
setuptools_dist = ws.find(
pkg_resources.Requirement.parse('setuptools', replacement=False)
)
except TypeError:
# old distribute API
setuptools_dist = ws.find(
pkg_resources.Requirement.parse('setuptools')
)
if setuptools_dist is None:
log.warn('No setuptools distribution found')
return
# detecting if it was already faked
setuptools_location = setuptools_dist.location
log.warn('Setuptools installation detected at %s', setuptools_location)
# if --root or --preix was provided, and if
# setuptools is not located in them, we don't patch it
if not _under_prefix(setuptools_location):
log.warn('Not patching, --root or --prefix is installing Distribute'
' in another location')
return
# let's see if its an egg
if not setuptools_location.endswith('.egg'):
log.warn('Non-egg installation')
res = _remove_flat_installation(setuptools_location)
if not res:
return
else:
log.warn('Egg installation')
pkg_info = os.path.join(setuptools_location, 'EGG-INFO', 'PKG-INFO')
if (os.path.exists(pkg_info) and
_same_content(pkg_info, SETUPTOOLS_PKG_INFO)):
log.warn('Already patched.')
return
log.warn('Patching...')
# let's create a fake egg replacing setuptools one
res = _patch_egg_dir(setuptools_location)
if not res:
return
log.warn('Patching complete.')
_relaunch()
def _relaunch():
log.warn('Relaunching...')
# we have to relaunch the process
# pip marker to avoid a relaunch bug
_cmd1 = ['-c', 'install', '--single-version-externally-managed']
_cmd2 = ['-c', 'install', '--record']
if sys.argv[:3] == _cmd1 or sys.argv[:3] == _cmd2:
sys.argv[0] = 'setup.py'
args = [sys.executable] + sys.argv
sys.exit(subprocess.call(args))
def _extractall(self, path=".", members=None):
"""Extract all members from the archive to the current working
directory and set owner, modification time and permissions on
directories afterwards. `path' specifies a different directory
to extract to. `members' is optional and must be a subset of the
list returned by getmembers().
"""
import copy
import operator
from tarfile import ExtractError
directories = []
if members is None:
members = self
for tarinfo in members:
if tarinfo.isdir():
# Extract directories with a safe mode.
directories.append(tarinfo)
tarinfo = copy.copy(tarinfo)
tarinfo.mode = 448 # decimal for oct 0700
self.extract(tarinfo, path)
# Reverse sort directories.
if sys.version_info < (2, 4):
def sorter(dir1, dir2):
return cmp(dir1.name, dir2.name)
directories.sort(sorter)
directories.reverse()
else:
directories.sort(key=operator.attrgetter('name'), reverse=True)
# Set correct owner, mtime and filemode on directories.
for tarinfo in directories:
dirpath = os.path.join(path, tarinfo.name)
try:
self.chown(tarinfo, dirpath)
self.utime(tarinfo, dirpath)
self.chmod(tarinfo, dirpath)
except ExtractError:
e = sys.exc_info()[1]
if self.errorlevel > 1:
raise
else:
self._dbg(1, "tarfile: %s" % e)
def _build_install_args(options):
"""
Build the arguments to 'python setup.py install' on the distribute package
"""
install_args = []
if options.user_install:
if sys.version_info < (2, 6):
log.warn("--user requires Python 2.6 or later")
raise SystemExit(1)
install_args.append('--user')
return install_args
def _parse_args():
"""
Parse the command line for options
"""
parser = optparse.OptionParser()
parser.add_option(
'--user', dest='user_install', action='store_true', default=False,
help='install in user site package (requires Python 2.6 or later)')
parser.add_option(
'--download-base', dest='download_base', metavar="URL",
default=DEFAULT_URL,
help='alternative URL from where to download the distribute package')
options, args = parser.parse_args()
# positional arguments are ignored
return options
def main(version=DEFAULT_VERSION):
"""Install or upgrade setuptools and EasyInstall"""
options = _parse_args()
tarball = download_setuptools(download_base=options.download_base)
return _install(tarball, _build_install_args(options))
if __name__ == '__main__':
sys.exit(main())
| lgpl-3.0 |
satori99/three.js | utils/exporters/blender/addons/io_three/exporter/object.py | 19 | 6762 | from .. import constants, logger
from . import base_classes, api
class Object(base_classes.BaseNode):
"""Class that wraps an object node"""
def __init__(self, node, parent=None, type=None):
logger.debug("Object().__init__(%s)", node)
base_classes.BaseNode.__init__(self, node, parent=parent, type=type)
if self.node:
self._node_setup()
else:
self._root_setup()
@property
def data(self):
"""
:return: returns the data block of the node
"""
return api.data(self.node)
def _init_camera(self):
"""Initialize camera attributes"""
logger.debug("Object()._init_camera()")
self[constants.FAR] = api.camera.far(self.data)
self[constants.NEAR] = api.camera.near(self.data)
if self[constants.TYPE] == constants.PERSPECTIVE_CAMERA:
self[constants.ASPECT] = api.camera.aspect(self.data)
self[constants.FOV] = api.camera.fov(self.data)
elif self[constants.TYPE] == constants.ORTHOGRAPHIC_CAMERA:
self[constants.LEFT] = api.camera.left(self.data)
self[constants.RIGHT] = api.camera.right(self.data)
self[constants.TOP] = api.camera.top(self.data)
self[constants.BOTTOM] = api.camera.bottom(self.data)
#@TODO: need more light attributes. Some may have to come from
# custom blender attributes.
def _init_light(self):
"""Initialize light attributes"""
logger.debug("Object()._init_light()")
self[constants.COLOR] = api.light.color(self.data)
self[constants.INTENSITY] = api.light.intensity(self.data)
# Commented out because Blender's distance is not a cutoff value.
#if self[constants.TYPE] != constants.DIRECTIONAL_LIGHT:
# self[constants.DISTANCE] = api.light.distance(self.data)
self[constants.DISTANCE] = 0;
lightType = self[constants.TYPE]
# TODO (abelnation): handle Area lights
if lightType == constants.SPOT_LIGHT:
self[constants.ANGLE] = api.light.angle(self.data)
self[constants.DECAY] = api.light.falloff(self.data)
elif lightType == constants.POINT_LIGHT:
self[constants.DECAY] = api.light.falloff(self.data)
def _init_mesh(self):
"""Initialize mesh attributes"""
logger.debug("Object()._init_mesh()")
mesh = api.object.mesh(self.node, self.options)
node = self.scene.geometry(mesh)
if node:
self[constants.GEOMETRY] = node[constants.UUID]
else:
msg = "Could not find Geometry() node for %s"
logger.error(msg, self.node)
def _node_setup(self):
"""Parse common node attributes of all objects"""
logger.debug("Object()._node_setup()")
self[constants.NAME] = api.object.name(self.node)
transform = api.object.matrix(self.node, self.options)
matrix = []
for col in range(0, 4):
for row in range(0, 4):
matrix.append(transform[row][col])
self[constants.MATRIX] = matrix
self[constants.VISIBLE] = api.object.visible(self.node)
self[constants.TYPE] = api.object.node_type(self.node)
if self.options.get(constants.MATERIALS):
logger.info("Parsing materials for %s", self.node)
material_names = api.object.material(self.node) #manthrax: changes for multimaterial start here
if material_names:
logger.info("Got material names for this object:%s",str(material_names));
materialArray = [self.scene.material(objname)[constants.UUID] for objname in material_names]
if len(materialArray) == 0: # If no materials.. dont export a material entry
materialArray = None
elif len(materialArray) == 1: # If only one material, export material UUID singly, not as array
materialArray = materialArray[0]
# else export array of material uuids
self[constants.MATERIAL] = materialArray
logger.info("Materials:%s",str(self[constants.MATERIAL]));
else:
logger.info("%s has no materials", self.node) #manthrax: end multimaterial
# TODO (abelnation): handle Area lights
casts_shadow = (constants.MESH,
constants.DIRECTIONAL_LIGHT,
constants.SPOT_LIGHT)
if self[constants.TYPE] in casts_shadow:
logger.info("Querying shadow casting for %s", self.node)
self[constants.CAST_SHADOW] = \
api.object.cast_shadow(self.node)
if self[constants.TYPE] == constants.MESH:
logger.info("Querying shadow receive for %s", self.node)
self[constants.RECEIVE_SHADOW] = \
api.object.receive_shadow(self.node)
camera = (constants.PERSPECTIVE_CAMERA,
constants.ORTHOGRAPHIC_CAMERA)
# TODO (abelnation): handle Area lights
lights = (constants.AMBIENT_LIGHT,
constants.DIRECTIONAL_LIGHT,
constants.POINT_LIGHT,
constants.SPOT_LIGHT, constants.HEMISPHERE_LIGHT)
if self[constants.TYPE] == constants.MESH:
self._init_mesh()
elif self[constants.TYPE] in camera:
self._init_camera()
elif self[constants.TYPE] in lights:
self._init_light()
no_anim = (None, False, constants.OFF)
if self.options.get(constants.KEYFRAMES) not in no_anim:
logger.info("Export Transform Animation for %s", self.node)
if self._scene:
# only when exporting scene
tracks = api.object.animated_xform(self.node, self.options)
merge = self._scene[constants.ANIMATION][0][constants.KEYFRAMES]
for track in tracks:
merge.append(track)
if self.options.get(constants.HIERARCHY, False):
for child in api.object.children(self.node, self.scene.valid_types):
if not self.get(constants.CHILDREN):
self[constants.CHILDREN] = [Object(child, parent=self)]
else:
self[constants.CHILDREN].append(Object(child, parent=self))
if self.options.get(constants.CUSTOM_PROPERTIES, False):
self[constants.USER_DATA] = api.object.custom_properties(self.node)
def _root_setup(self):
"""Applies to a root/scene object"""
logger.debug("Object()._root_setup()")
self[constants.MATRIX] = [1, 0, 0, 0, 0, 1, 0, 0, 0, 0,
1, 0, 0, 0, 0, 1]
| mit |
mdrumond/tensorflow | tensorflow/python/layers/core.py | 6 | 16103 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
# pylint: disable=unused-import,g-bad-import-order
"""Contains the core layers: Dense, Dropout.
Also contains their functional aliases.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from six.moves import xrange # pylint: disable=redefined-builtin
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.layers import base
from tensorflow.python.layers import utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import standard_ops
class Dense(base.Layer):
"""Densely-connected layer class.
This layer implements the operation:
`outputs = activation(inputs.kernel + bias)`
Where `activation` is the activation function passed as the `activation`
argument (if not `None`), `kernel` is a weights matrix created by the layer,
and `bias` is a bias vector created by the layer
(only if `use_bias` is `True`).
Note: if the input to the layer has a rank greater than 2, then it is
flattened prior to the initial matrix multiply by `kernel`.
Arguments:
units: Integer or Long, dimensionality of the output space.
activation: Activation function (callable). Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: Initializer function for the weight matrix.
If `None` (default), weights are initialized using the default
initializer used by `tf.get_variable`.
bias_initializer: Initializer function for the bias.
kernel_regularizer: Regularizer function for the weight matrix.
bias_regularizer: Regularizer function for the bias.
activity_regularizer: Regularizer function for the output.
kernel_constraint: An optional projection function to be applied to the
kernel after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
bias_constraint: An optional projection function to be applied to the
bias after being updated by an `Optimizer`.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: String, the name of the layer. Layers with the same name will
share weights, but to avoid mistakes we require reuse=True in such cases.
reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
Properties:
units: Python integer, dimensionality of the output space.
activation: Activation function (callable).
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: Initializer instance (or name) for the kernel matrix.
bias_initializer: Initializer instance (or name) for the bias.
kernel_regularizer: Regularizer instance for the kernel matrix (callable)
bias_regularizer: Regularizer instance for the bias (callable).
activity_regularizer: Regularizer instance for the output (callable)
kernel_constraint: Constraint function for the kernel matrix.
bias_constraint: Constraint function for the bias.
kernel: Weight matrix (TensorFlow variable or tensor).
bias: Bias vector, if applicable (TensorFlow variable or tensor).
"""
def __init__(self, units,
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=init_ops.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
**kwargs):
super(Dense, self).__init__(trainable=trainable, name=name,
activity_regularizer=activity_regularizer,
**kwargs)
self.units = units
self.activation = activation
self.use_bias = use_bias
self.kernel_initializer = kernel_initializer
self.bias_initializer = bias_initializer
self.kernel_regularizer = kernel_regularizer
self.bias_regularizer = bias_regularizer
self.kernel_constraint = kernel_constraint
self.bias_constraint = bias_constraint
self.input_spec = base.InputSpec(min_ndim=2)
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
if input_shape[-1].value is None:
raise ValueError('The last dimension of the inputs to `Dense` '
'should be defined. Found `None`.')
self.input_spec = base.InputSpec(min_ndim=2,
axes={-1: input_shape[-1].value})
self.kernel = self.add_variable('kernel',
shape=[input_shape[-1].value, self.units],
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
dtype=self.dtype,
trainable=True)
if self.use_bias:
self.bias = self.add_variable('bias',
shape=[self.units,],
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
dtype=self.dtype,
trainable=True)
else:
self.bias = None
self.built = True
def call(self, inputs):
inputs = ops.convert_to_tensor(inputs, dtype=self.dtype)
shape = inputs.get_shape().as_list()
if len(shape) > 2:
# Broadcasting is required for the inputs.
outputs = standard_ops.tensordot(inputs, self.kernel, [[len(shape) - 1],
[0]])
# Reshape the output back to the original ndim of the input.
if context.in_graph_mode():
output_shape = shape[:-1] + [self.units]
outputs.set_shape(output_shape)
else:
outputs = standard_ops.matmul(inputs, self.kernel)
if self.use_bias:
outputs = nn.bias_add(outputs, self.bias)
if self.activation is not None:
return self.activation(outputs) # pylint: disable=not-callable
return outputs
def _compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
input_shape = input_shape.with_rank_at_least(2)
if input_shape[-1].value is None:
raise ValueError(
'The innermost dimension of input_shape must be defined, but saw: %s'
% input_shape)
return input_shape[:-1].concatenate(self.units)
def dense(
inputs, units,
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=init_ops.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
reuse=None):
"""Functional interface for the densely-connected layer.
This layer implements the operation:
`outputs = activation(inputs.kernel + bias)`
Where `activation` is the activation function passed as the `activation`
argument (if not `None`), `kernel` is a weights matrix created by the layer,
and `bias` is a bias vector created by the layer
(only if `use_bias` is `True`).
Note: if the `inputs` tensor has a rank greater than 2, then it is
flattened prior to the initial matrix multiply by `kernel`.
Arguments:
inputs: Tensor input.
units: Integer or Long, dimensionality of the output space.
activation: Activation function (callable). Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: Initializer function for the weight matrix.
If `None` (default), weights are initialized using the default
initializer used by `tf.get_variable`.
bias_initializer: Initializer function for the bias.
kernel_regularizer: Regularizer function for the weight matrix.
bias_regularizer: Regularizer function for the bias.
activity_regularizer: Regularizer function for the output.
kernel_constraint: An optional projection function to be applied to the
kernel after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
bias_constraint: An optional projection function to be applied to the
bias after being updated by an `Optimizer`.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: String, the name of the layer.
reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
Returns:
Output tensor.
"""
layer = Dense(units,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
trainable=trainable,
name=name,
dtype=inputs.dtype.base_dtype,
_scope=name,
_reuse=reuse)
return layer.apply(inputs)
class Dropout(base.Layer):
"""Applies Dropout to the input.
Dropout consists in randomly setting a fraction `rate` of input units to 0
at each update during training time, which helps prevent overfitting.
The units that are kept are scaled by `1 / (1 - rate)`, so that their
sum is unchanged at training time and inference time.
Arguments:
rate: The dropout rate, between 0 and 1. E.g. `rate=0.1` would drop out
10% of input units.
noise_shape: 1D tensor of type `int32` representing the shape of the
binary dropout mask that will be multiplied with the input.
For instance, if your inputs have shape
`(batch_size, timesteps, features)`, and you want the dropout mask
to be the same for all timesteps, you can use
`noise_shape=[batch_size, 1, features]`.
seed: A Python integer. Used to create random seeds. See
@{tf.set_random_seed}.
for behavior.
name: The name of the layer (string).
"""
def __init__(self, rate=0.5,
noise_shape=None,
seed=None,
name=None,
**kwargs):
super(Dropout, self).__init__(name=name, **kwargs)
self.rate = rate
self.noise_shape = noise_shape
self.seed = seed
def _get_noise_shape(self, _):
# Subclasses of `Dropout` may implement `_get_noise_shape(self, inputs)`,
# which will override `self.noise_shape`, and allows for custom noise
# shapes with dynamically sized inputs.
return self.noise_shape
def call(self, inputs, training=False):
def dropped_inputs():
return nn.dropout(inputs, 1 - self.rate,
noise_shape=self._get_noise_shape(inputs),
seed=self.seed)
return utils.smart_cond(training,
dropped_inputs,
lambda: array_ops.identity(inputs))
def dropout(inputs,
rate=0.5,
noise_shape=None,
seed=None,
training=False,
name=None):
"""Applies Dropout to the input.
Dropout consists in randomly setting a fraction `rate` of input units to 0
at each update during training time, which helps prevent overfitting.
The units that are kept are scaled by `1 / (1 - rate)`, so that their
sum is unchanged at training time and inference time.
Arguments:
inputs: Tensor input.
rate: The dropout rate, between 0 and 1. E.g. "rate=0.1" would drop out
10% of input units.
noise_shape: 1D tensor of type `int32` representing the shape of the
binary dropout mask that will be multiplied with the input.
For instance, if your inputs have shape
`(batch_size, timesteps, features)`, and you want the dropout mask
to be the same for all timesteps, you can use
`noise_shape=[batch_size, 1, features]`.
seed: A Python integer. Used to create random seeds. See
@{tf.set_random_seed}
for behavior.
training: Either a Python boolean, or a TensorFlow boolean scalar tensor
(e.g. a placeholder). Whether to return the output in training mode
(apply dropout) or in inference mode (return the input untouched).
name: The name of the layer (string).
Returns:
Output tensor.
"""
layer = Dropout(rate, noise_shape=noise_shape, seed=seed, name=name)
return layer.apply(inputs, training=training)
class Flatten(base.Layer):
"""Flattens an input tensor while preserving the batch axis (axis 0).
Examples:
```
x = tf.placeholder(shape=(None, 4, 4), dtype='float32')
y = Flatten()(x)
# now `y` has shape `(None, 16)`
x = tf.placeholder(shape=(None, 3, None), dtype='float32')
y = Flatten()(x)
# now `y` has shape `(None, None)`
```
"""
def __init__(self, **kwargs):
super(Flatten, self).__init__(**kwargs)
self.input_spec = base.InputSpec(min_ndim=2)
def call(self, inputs):
outputs = array_ops.reshape(inputs, (array_ops.shape(inputs)[0], -1))
if context.in_graph_mode():
outputs.set_shape(self._compute_output_shape(inputs.get_shape()))
return outputs
def _compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
output_shape = [input_shape[0]]
if all(input_shape[1:]):
output_shape += [np.prod(input_shape[1:])]
else:
output_shape += [None]
return tensor_shape.TensorShape(output_shape)
def flatten(inputs, name=None):
"""Flattens an input tensor while preserving the batch axis (axis 0).
Arguments:
inputs: Tensor input.
name: The name of the layer (string).
Returns:
Reshaped tensor.
Examples:
```
x = tf.placeholder(shape=(None, 4, 4), dtype='float32')
y = flatten(x)
# now `y` has shape `(None, 16)`
x = tf.placeholder(shape=(None, 3, None), dtype='float32')
y = flatten(x)
# now `y` has shape `(None, None)`
```
"""
layer = Flatten(name=name)
return layer.apply(inputs)
# Aliases
FullyConnected = Dense
fully_connected = dense
| apache-2.0 |
BonexGu/Blik2D-SDK | Blik2D/addon/tensorflow-1.2.1_for_blik/tensorflow/contrib/keras/python/keras/regularizers_test.py | 55 | 2781 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras regularizers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.keras.python import keras
from tensorflow.contrib.keras.python.keras import testing_utils
from tensorflow.python.platform import test
DATA_DIM = 5
NUM_CLASSES = 2
def get_data():
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=10,
test_samples=10,
input_shape=(DATA_DIM,),
num_classes=NUM_CLASSES)
y_train = keras.utils.to_categorical(y_train, NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test, NUM_CLASSES)
return (x_train, y_train), (x_test, y_test)
def create_model(kernel_regularizer=None, activity_regularizer=None):
model = keras.models.Sequential()
model.add(keras.layers.Dense(NUM_CLASSES,
kernel_regularizer=kernel_regularizer,
activity_regularizer=activity_regularizer,
input_shape=(DATA_DIM,)))
return model
class KerasRegularizersTest(test.TestCase):
def test_kernel_regularization(self):
with self.test_session():
(x_train, y_train), _ = get_data()
for reg in [keras.regularizers.l1(),
keras.regularizers.l2(),
keras.regularizers.l1_l2()]:
model = create_model(kernel_regularizer=reg)
model.compile(loss='categorical_crossentropy', optimizer='sgd')
assert len(model.losses) == 1
model.fit(x_train, y_train, batch_size=10,
epochs=1, verbose=0)
def test_activity_regularization(self):
with self.test_session():
(x_train, y_train), _ = get_data()
for reg in [keras.regularizers.l1(), keras.regularizers.l2()]:
model = create_model(activity_regularizer=reg)
model.compile(loss='categorical_crossentropy', optimizer='sgd')
assert len(model.losses) == 1
model.fit(x_train, y_train, batch_size=10,
epochs=1, verbose=0)
if __name__ == '__main__':
test.main()
| mit |
gkulkarni/JetMorphology | fitjet_3d.py | 1 | 5370 | """
File: fitjet_3d.py
Fits a geometric model to mock jet data. Uses image subtraction;
otherwise same as fitjet.py
"""
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib import cm
import scipy.optimize as op
import emcee
import triangle
import sys
# These mock data are produced by jet3d.py.
a2 = np.fromfile('mockdata_3d_nc100.dat',dtype=np.float32)
def I(theta):
a, b, i, l, alpha, beta, gamma = theta
u = np.linspace(0.0, 20.0*np.pi, 1000)
def z(u):
return (a/(2.0*np.pi)) * u * (u/(2.0*np.pi))**beta
zv = z(u)
def x(u):
return (z(u)**-alpha) * (b/(2.0*np.pi)) * u * np.cos(u)
def y(u):
return (z(u)**-alpha) * (b/(2.0*np.pi)) * u * np.sin(u)
xv = x(u)
yv = y(u)
def ri(i):
return np.matrix([[np.cos(i), 0.0, np.sin(i)],[0.0, 1.0, 0.0],[-np.sin(i), 0.0, np.cos(i)]])
def rl(l):
return np.matrix([[np.cos(l), -np.sin(l), 0.0],[np.sin(l), np.cos(l), 0.0],[0.0, 0.0, 1.0]])
zvarr = zv*gamma
iarr = zvarr/zvarr.max()
iarr *= np.pi/2.0
c = np.dstack((xv, yv, zv))
c = np.squeeze(c)
d = np.zeros((1000,3))
lm = rl(l)
for n in range(1000):
d[n] = c[n]*ri(iarr[n])*lm
xv = d[:,0]
yv = d[:,1]
xv = xv[~np.isnan(xv)]
yv = yv[~np.isnan(yv)]
nc = 100
a = np.zeros((nc,nc),dtype=np.float32)
zl = xv.min() - 5.0
zu = xv.max() + 5.0
yl = yv.min() - 5.0
yu = yv.max() + 5.0
lz = zu - zl
ly = yu - yl
dz = lz/nc
dy = -ly/nc # Because "y" coordinate increases in opposite direction to "y" array index of a (or a2).
def zloc(cood):
return int((cood-zl)/dz) + 1
def yloc(cood):
return int((cood-yl)/dy) + 1
for i in xrange(xv.size):
zpos = zloc(xv[i])
ypos = yloc(yv[i])
a[ypos, zpos] += 1.0
return a.flatten()
def neglnlike(theta, intensity, intensity_err):
model = I(theta)
inv_sigma2 = 1.0/intensity_err**2
return 0.5*(np.sum((intensity-model)**2*inv_sigma2 - np.log(inv_sigma2)))
a2_err = np.zeros_like(a2)
a2_err += 0.1
theta_guess = (0.1, 10.0, 2.0, 3.0, 0.2, 2.0, 0.5)
result = op.minimize(neglnlike, theta_guess, args=(a2, a2_err), method='Nelder-Mead')
print result.x
print result.success
def lnprior(theta):
a, b, i, l, alpha, beta, gamma = theta
if (0.05 < a < 0.15 and
8.0 < b < 12.0 and
1.0 < i < 3.0 and
2.0 < l < 4 and
0.1 < alpha < 0.3 and
1.0 < beta < 3.0 and
0.3 < gamma < 0.7):
return 0.0
return -np.inf
def lnprob(theta, intensity, intensity_err):
lp = lnprior(theta)
if not np.isfinite(lp):
return -np.inf
return lp - neglnlike(theta, intensity, intensity_err)
ndim, nwalkers = 7, 100
pos = [result.x + 1e-4*np.random.randn(ndim) for i in range(nwalkers)]
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob, args=(a2, a2_err))
sampler.run_mcmc(pos, 500)
samples = sampler.chain[:, 100:, :].reshape((-1, ndim))
plot_chain = True
if plot_chain:
mpl.rcParams['font.size'] = '10'
nplots = 7
plot_number = 0
fig = plt.figure(figsize=(12, 6), dpi=100)
plot_number += 1
ax = fig.add_subplot(nplots, 1, plot_number)
for i in range(nwalkers):
ax.plot(sampler.chain[i,:,0], c='k', alpha=0.1)
ax.axhline(result.x[0], c='#CC9966', dashes=[7,2], lw=2)
ax.set_ylabel(r'$A$')
ax.set_xticklabels('')
plot_number += 1
ax = fig.add_subplot(nplots, 1, plot_number)
for i in range(nwalkers):
ax.plot(sampler.chain[i,:,1], c='k', alpha=0.1)
ax.axhline(result.x[1], c='#CC9966', dashes=[7,2], lw=2)
ax.set_ylabel('$B$')
ax.set_xticklabels('')
plot_number += 1
ax = fig.add_subplot(nplots, 1, plot_number)
for i in range(nwalkers):
ax.plot(sampler.chain[i,:,2], c='k', alpha=0.1)
ax.axhline(result.x[2], c='#CC9966', dashes=[7,2], lw=2)
ax.set_ylabel(r'$i_0$')
ax.set_xticklabels('')
plot_number += 1
ax = fig.add_subplot(nplots, 1, plot_number)
for i in range(nwalkers):
ax.plot(sampler.chain[i,:,3], c='k', alpha=0.1)
ax.axhline(result.x[3], c='#CC9966', dashes=[7,2], lw=2)
ax.set_ylabel(r'$\lambda_0$')
plot_number += 1
ax = fig.add_subplot(nplots, 1, plot_number)
for i in range(nwalkers):
ax.plot(sampler.chain[i,:,3], c='k', alpha=0.1)
ax.axhline(result.x[3], c='#CC9966', dashes=[7,2], lw=2)
ax.set_ylabel(r'$\alpha$')
plot_number += 1
ax = fig.add_subplot(nplots, 1, plot_number)
for i in range(nwalkers):
ax.plot(sampler.chain[i,:,3], c='k', alpha=0.1)
ax.axhline(result.x[3], c='#CC9966', dashes=[7,2], lw=2)
ax.set_ylabel(r'$\beta$')
plot_number += 1
ax = fig.add_subplot(nplots, 1, plot_number)
for i in range(nwalkers):
ax.plot(sampler.chain[i,:,3], c='k', alpha=0.1)
ax.axhline(result.x[3], c='#CC9966', dashes=[7,2], lw=2)
ax.set_ylabel(r'$\gamma$')
ax.set_xlabel('step')
plt.savefig('chains.pdf',bbox_inches='tight')
mpl.rcParams['font.size'] = '14'
fig = triangle.corner(samples, labels=['$A$', '$B$', '$i_0$', r'$\lambda_0$', r'$\alpha$', r'$\beta$', r'$\gamma$'],
truths=result.x)
fig.savefig("triangle.pdf")
| mit |
premanandchandrasekar/boto | boto/dynamodb/condition.py | 185 | 3881 | # Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from boto.dynamodb.types import dynamize_value
class Condition(object):
"""
Base class for conditions. Doesn't do a darn thing but allows
is to test if something is a Condition instance or not.
"""
def __eq__(self, other):
if isinstance(other, Condition):
return self.to_dict() == other.to_dict()
class ConditionNoArgs(Condition):
"""
Abstract class for Conditions that require no arguments, such
as NULL or NOT_NULL.
"""
def __repr__(self):
return '%s' % self.__class__.__name__
def to_dict(self):
return {'ComparisonOperator': self.__class__.__name__}
class ConditionOneArg(Condition):
"""
Abstract class for Conditions that require a single argument
such as EQ or NE.
"""
def __init__(self, v1):
self.v1 = v1
def __repr__(self):
return '%s:%s' % (self.__class__.__name__, self.v1)
def to_dict(self):
return {'AttributeValueList': [dynamize_value(self.v1)],
'ComparisonOperator': self.__class__.__name__}
class ConditionTwoArgs(Condition):
"""
Abstract class for Conditions that require two arguments.
The only example of this currently is BETWEEN.
"""
def __init__(self, v1, v2):
self.v1 = v1
self.v2 = v2
def __repr__(self):
return '%s(%s, %s)' % (self.__class__.__name__, self.v1, self.v2)
def to_dict(self):
values = (self.v1, self.v2)
return {'AttributeValueList': [dynamize_value(v) for v in values],
'ComparisonOperator': self.__class__.__name__}
class ConditionSeveralArgs(Condition):
"""
Abstract class for conditions that require several argument (ex: IN).
"""
def __init__(self, values):
self.values = values
def __repr__(self):
return '{0}({1})'.format(self.__class__.__name__,
', '.join(self.values))
def to_dict(self):
return {'AttributeValueList': [dynamize_value(v) for v in self.values],
'ComparisonOperator': self.__class__.__name__}
class EQ(ConditionOneArg):
pass
class NE(ConditionOneArg):
pass
class LE(ConditionOneArg):
pass
class LT(ConditionOneArg):
pass
class GE(ConditionOneArg):
pass
class GT(ConditionOneArg):
pass
class NULL(ConditionNoArgs):
pass
class NOT_NULL(ConditionNoArgs):
pass
class CONTAINS(ConditionOneArg):
pass
class NOT_CONTAINS(ConditionOneArg):
pass
class BEGINS_WITH(ConditionOneArg):
pass
class IN(ConditionSeveralArgs):
pass
class BEGINS_WITH(ConditionOneArg):
pass
class BETWEEN(ConditionTwoArgs):
pass
| mit |
dsfsdgsbngfggb/odoo | openerp/report/render/rml2pdf/customfonts.py | 261 | 3493 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 P. Christeas, Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2010-2013 OpenERP SA. (http://www.openerp.com)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from reportlab import rl_config
import logging
import glob
import os
# .apidoc title: TTF Font Table
"""This module allows the mapping of some system-available TTF fonts to
the reportlab engine.
This file could be customized per distro (although most Linux/Unix ones)
should have the same filenames, only need the code below).
Due to an awful configuration that ships with reportlab at many Linux
and Ubuntu distros, we have to override the search path, too.
"""
_logger = logging.getLogger(__name__)
CustomTTFonts = []
# Search path for TTF files, in addition of rl_config.TTFSearchPath
TTFSearchPath = [
'/usr/share/fonts/truetype', # SuSE
'/usr/share/fonts/dejavu', '/usr/share/fonts/liberation', # Fedora, RHEL
'/usr/share/fonts/truetype/*','/usr/local/share/fonts' # Ubuntu,
'/usr/share/fonts/TTF/*', # Mandriva/Mageia
'/usr/share/fonts/TTF', # Arch Linux
'/usr/lib/openoffice/share/fonts/truetype/',
'~/.fonts',
'~/.local/share/fonts',
# mac os X - from
# http://developer.apple.com/technotes/tn/tn2024.html
'~/Library/Fonts',
'/Library/Fonts',
'/Network/Library/Fonts',
'/System/Library/Fonts',
# windows
'c:/winnt/fonts',
'c:/windows/fonts'
]
def list_all_sysfonts():
"""
This function returns list of font directories of system.
"""
filepath = []
# Perform the search for font files ourselves, as reportlab's
# TTFOpenFile is not very good at it.
searchpath = list(set(TTFSearchPath + rl_config.TTFSearchPath))
for dirname in searchpath:
for filename in glob.glob(os.path.join(os.path.expanduser(dirname), '*.[Tt][Tt][FfCc]')):
filepath.append(filename)
return filepath
def SetCustomFonts(rmldoc):
""" Map some font names to the corresponding TTF fonts
The ttf font may not even have the same name, as in
Times -> Liberation Serif.
This function is called once per report, so it should
avoid system-wide processing (cache it, instead).
"""
for family, font, filename, mode in CustomTTFonts:
if os.path.isabs(filename) and os.path.exists(filename):
rmldoc.setTTFontMapping(family, font, filename, mode)
return True
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
neocogent/electrum | electrum/json_db.py | 2 | 28777 | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2015 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import ast
import json
import copy
import threading
from collections import defaultdict
from typing import Dict, Optional
from . import util, bitcoin
from .util import profiler, WalletFileException, multisig_type, TxMinedInfo
from .keystore import bip44_derivation
from .transaction import Transaction
from .logging import Logger
# seed_version is now used for the version of the wallet file
OLD_SEED_VERSION = 4 # electrum versions < 2.0
NEW_SEED_VERSION = 11 # electrum versions >= 2.0
FINAL_SEED_VERSION = 18 # electrum >= 2.7 will set this to prevent
# old versions from overwriting new format
class JsonDBJsonEncoder(util.MyEncoder):
def default(self, obj):
if isinstance(obj, Transaction):
return str(obj)
return super().default(obj)
class JsonDB(Logger):
def __init__(self, raw, *, manual_upgrades):
Logger.__init__(self)
self.lock = threading.RLock()
self.data = {}
self._modified = False
self.manual_upgrades = manual_upgrades
self._called_after_upgrade_tasks = False
if raw: # loading existing db
self.load_data(raw)
else: # creating new db
self.put('seed_version', FINAL_SEED_VERSION)
self._after_upgrade_tasks()
def set_modified(self, b):
with self.lock:
self._modified = b
def modified(self):
return self._modified
def modifier(func):
def wrapper(self, *args, **kwargs):
with self.lock:
self._modified = True
return func(self, *args, **kwargs)
return wrapper
def locked(func):
def wrapper(self, *args, **kwargs):
with self.lock:
return func(self, *args, **kwargs)
return wrapper
@locked
def get(self, key, default=None):
v = self.data.get(key)
if v is None:
v = default
else:
v = copy.deepcopy(v)
return v
@modifier
def put(self, key, value):
try:
json.dumps(key, cls=JsonDBJsonEncoder)
json.dumps(value, cls=JsonDBJsonEncoder)
except:
self.logger.info(f"json error: cannot save {repr(key)} ({repr(value)})")
return False
if value is not None:
if self.data.get(key) != value:
self.data[key] = copy.deepcopy(value)
return True
elif key in self.data:
self.data.pop(key)
return True
return False
def commit(self):
pass
@locked
def dump(self):
return json.dumps(self.data, indent=4, sort_keys=True, cls=JsonDBJsonEncoder)
def load_data(self, s):
try:
self.data = json.loads(s)
except:
try:
d = ast.literal_eval(s)
labels = d.get('labels', {})
except Exception as e:
raise IOError("Cannot read wallet file")
self.data = {}
for key, value in d.items():
try:
json.dumps(key)
json.dumps(value)
except:
self.logger.info(f'Failed to convert label to json format: {key}')
continue
self.data[key] = value
if not isinstance(self.data, dict):
raise WalletFileException("Malformed wallet file (not dict)")
if not self.manual_upgrades and self.requires_split():
raise WalletFileException("This wallet has multiple accounts and must be split")
if not self.requires_upgrade():
self._after_upgrade_tasks()
elif not self.manual_upgrades:
self.upgrade()
def requires_split(self):
d = self.get('accounts', {})
return len(d) > 1
def split_accounts(self):
result = []
# backward compatibility with old wallets
d = self.get('accounts', {})
if len(d) < 2:
return
wallet_type = self.get('wallet_type')
if wallet_type == 'old':
assert len(d) == 2
data1 = copy.deepcopy(self.data)
data1['accounts'] = {'0': d['0']}
data1['suffix'] = 'deterministic'
data2 = copy.deepcopy(self.data)
data2['accounts'] = {'/x': d['/x']}
data2['seed'] = None
data2['seed_version'] = None
data2['master_public_key'] = None
data2['wallet_type'] = 'imported'
data2['suffix'] = 'imported'
result = [data1, data2]
elif wallet_type in ['bip44', 'trezor', 'keepkey', 'ledger', 'btchip', 'digitalbitbox', 'safe_t']:
mpk = self.get('master_public_keys')
for k in d.keys():
i = int(k)
x = d[k]
if x.get("pending"):
continue
xpub = mpk["x/%d'"%i]
new_data = copy.deepcopy(self.data)
# save account, derivation and xpub at index 0
new_data['accounts'] = {'0': x}
new_data['master_public_keys'] = {"x/0'": xpub}
new_data['derivation'] = bip44_derivation(k)
new_data['suffix'] = k
result.append(new_data)
else:
raise WalletFileException("This wallet has multiple accounts and must be split")
return result
def requires_upgrade(self):
return self.get_seed_version() < FINAL_SEED_VERSION
@profiler
def upgrade(self):
self.logger.info('upgrading wallet format')
if self._called_after_upgrade_tasks:
# we need strict ordering between upgrade() and after_upgrade_tasks()
raise Exception("'after_upgrade_tasks' must NOT be called before 'upgrade'")
self._convert_imported()
self._convert_wallet_type()
self._convert_account()
self._convert_version_13_b()
self._convert_version_14()
self._convert_version_15()
self._convert_version_16()
self._convert_version_17()
self._convert_version_18()
self.put('seed_version', FINAL_SEED_VERSION) # just to be sure
self._after_upgrade_tasks()
def _after_upgrade_tasks(self):
self._called_after_upgrade_tasks = True
self._load_transactions()
def _convert_wallet_type(self):
if not self._is_upgrade_method_needed(0, 13):
return
wallet_type = self.get('wallet_type')
if wallet_type == 'btchip': wallet_type = 'ledger'
if self.get('keystore') or self.get('x1/') or wallet_type=='imported':
return False
assert not self.requires_split()
seed_version = self.get_seed_version()
seed = self.get('seed')
xpubs = self.get('master_public_keys')
xprvs = self.get('master_private_keys', {})
mpk = self.get('master_public_key')
keypairs = self.get('keypairs')
key_type = self.get('key_type')
if seed_version == OLD_SEED_VERSION or wallet_type == 'old':
d = {
'type': 'old',
'seed': seed,
'mpk': mpk,
}
self.put('wallet_type', 'standard')
self.put('keystore', d)
elif key_type == 'imported':
d = {
'type': 'imported',
'keypairs': keypairs,
}
self.put('wallet_type', 'standard')
self.put('keystore', d)
elif wallet_type in ['xpub', 'standard']:
xpub = xpubs["x/"]
xprv = xprvs.get("x/")
d = {
'type': 'bip32',
'xpub': xpub,
'xprv': xprv,
'seed': seed,
}
self.put('wallet_type', 'standard')
self.put('keystore', d)
elif wallet_type in ['bip44']:
xpub = xpubs["x/0'"]
xprv = xprvs.get("x/0'")
d = {
'type': 'bip32',
'xpub': xpub,
'xprv': xprv,
}
self.put('wallet_type', 'standard')
self.put('keystore', d)
elif wallet_type in ['trezor', 'keepkey', 'ledger', 'digitalbitbox', 'safe_t']:
xpub = xpubs["x/0'"]
derivation = self.get('derivation', bip44_derivation(0))
d = {
'type': 'hardware',
'hw_type': wallet_type,
'xpub': xpub,
'derivation': derivation,
}
self.put('wallet_type', 'standard')
self.put('keystore', d)
elif (wallet_type == '2fa') or multisig_type(wallet_type):
for key in xpubs.keys():
d = {
'type': 'bip32',
'xpub': xpubs[key],
'xprv': xprvs.get(key),
}
if key == 'x1/' and seed:
d['seed'] = seed
self.put(key, d)
else:
raise WalletFileException('Unable to tell wallet type. Is this even a wallet file?')
# remove junk
self.put('master_public_key', None)
self.put('master_public_keys', None)
self.put('master_private_keys', None)
self.put('derivation', None)
self.put('seed', None)
self.put('keypairs', None)
self.put('key_type', None)
def _convert_version_13_b(self):
# version 13 is ambiguous, and has an earlier and a later structure
if not self._is_upgrade_method_needed(0, 13):
return
if self.get('wallet_type') == 'standard':
if self.get('keystore').get('type') == 'imported':
pubkeys = self.get('keystore').get('keypairs').keys()
d = {'change': []}
receiving_addresses = []
for pubkey in pubkeys:
addr = bitcoin.pubkey_to_address('p2pkh', pubkey)
receiving_addresses.append(addr)
d['receiving'] = receiving_addresses
self.put('addresses', d)
self.put('pubkeys', None)
self.put('seed_version', 13)
def _convert_version_14(self):
# convert imported wallets for 3.0
if not self._is_upgrade_method_needed(13, 13):
return
if self.get('wallet_type') =='imported':
addresses = self.get('addresses')
if type(addresses) is list:
addresses = dict([(x, None) for x in addresses])
self.put('addresses', addresses)
elif self.get('wallet_type') == 'standard':
if self.get('keystore').get('type')=='imported':
addresses = set(self.get('addresses').get('receiving'))
pubkeys = self.get('keystore').get('keypairs').keys()
assert len(addresses) == len(pubkeys)
d = {}
for pubkey in pubkeys:
addr = bitcoin.pubkey_to_address('p2pkh', pubkey)
assert addr in addresses
d[addr] = {
'pubkey': pubkey,
'redeem_script': None,
'type': 'p2pkh'
}
self.put('addresses', d)
self.put('pubkeys', None)
self.put('wallet_type', 'imported')
self.put('seed_version', 14)
def _convert_version_15(self):
if not self._is_upgrade_method_needed(14, 14):
return
if self.get('seed_type') == 'segwit':
# should not get here; get_seed_version should have caught this
raise Exception('unsupported derivation (development segwit, v14)')
self.put('seed_version', 15)
def _convert_version_16(self):
# fixes issue #3193 for Imported_Wallets with addresses
# also, previous versions allowed importing any garbage as an address
# which we now try to remove, see pr #3191
if not self._is_upgrade_method_needed(15, 15):
return
def remove_address(addr):
def remove_from_dict(dict_name):
d = self.get(dict_name, None)
if d is not None:
d.pop(addr, None)
self.put(dict_name, d)
def remove_from_list(list_name):
lst = self.get(list_name, None)
if lst is not None:
s = set(lst)
s -= {addr}
self.put(list_name, list(s))
# note: we don't remove 'addr' from self.get('addresses')
remove_from_dict('addr_history')
remove_from_dict('labels')
remove_from_dict('payment_requests')
remove_from_list('frozen_addresses')
if self.get('wallet_type') == 'imported':
addresses = self.get('addresses')
assert isinstance(addresses, dict)
addresses_new = dict()
for address, details in addresses.items():
if not bitcoin.is_address(address):
remove_address(address)
continue
if details is None:
addresses_new[address] = {}
else:
addresses_new[address] = details
self.put('addresses', addresses_new)
self.put('seed_version', 16)
def _convert_version_17(self):
# delete pruned_txo; construct spent_outpoints
if not self._is_upgrade_method_needed(16, 16):
return
self.put('pruned_txo', None)
transactions = self.get('transactions', {}) # txid -> raw_tx
spent_outpoints = defaultdict(dict)
for txid, raw_tx in transactions.items():
tx = Transaction(raw_tx)
for txin in tx.inputs():
if txin['type'] == 'coinbase':
continue
prevout_hash = txin['prevout_hash']
prevout_n = txin['prevout_n']
spent_outpoints[prevout_hash][str(prevout_n)] = txid
self.put('spent_outpoints', spent_outpoints)
self.put('seed_version', 17)
def _convert_version_18(self):
# delete verified_tx3 as its structure changed
if not self._is_upgrade_method_needed(17, 17):
return
self.put('verified_tx3', None)
self.put('seed_version', 18)
# def _convert_version_19(self):
# TODO for "next" upgrade:
# - move "pw_hash_version" from keystore to storage
# pass
def _convert_imported(self):
if not self._is_upgrade_method_needed(0, 13):
return
# '/x' is the internal ID for imported accounts
d = self.get('accounts', {}).get('/x', {}).get('imported',{})
if not d:
return False
addresses = []
keypairs = {}
for addr, v in d.items():
pubkey, privkey = v
if privkey:
keypairs[pubkey] = privkey
else:
addresses.append(addr)
if addresses and keypairs:
raise WalletFileException('mixed addresses and privkeys')
elif addresses:
self.put('addresses', addresses)
self.put('accounts', None)
elif keypairs:
self.put('wallet_type', 'standard')
self.put('key_type', 'imported')
self.put('keypairs', keypairs)
self.put('accounts', None)
else:
raise WalletFileException('no addresses or privkeys')
def _convert_account(self):
if not self._is_upgrade_method_needed(0, 13):
return
self.put('accounts', None)
def _is_upgrade_method_needed(self, min_version, max_version):
assert min_version <= max_version
cur_version = self.get_seed_version()
if cur_version > max_version:
return False
elif cur_version < min_version:
raise WalletFileException(
'storage upgrade: unexpected version {} (should be {}-{})'
.format(cur_version, min_version, max_version))
else:
return True
@locked
def get_seed_version(self):
seed_version = self.get('seed_version')
if not seed_version:
seed_version = OLD_SEED_VERSION if len(self.get('master_public_key','')) == 128 else NEW_SEED_VERSION
if seed_version > FINAL_SEED_VERSION:
raise WalletFileException('This version of Electrum is too old to open this wallet.\n'
'(highest supported storage version: {}, version of this file: {})'
.format(FINAL_SEED_VERSION, seed_version))
if seed_version==14 and self.get('seed_type') == 'segwit':
self._raise_unsupported_version(seed_version)
if seed_version >=12:
return seed_version
if seed_version not in [OLD_SEED_VERSION, NEW_SEED_VERSION]:
self._raise_unsupported_version(seed_version)
return seed_version
def _raise_unsupported_version(self, seed_version):
msg = "Your wallet has an unsupported seed version."
if seed_version in [5, 7, 8, 9, 10, 14]:
msg += "\n\nTo open this wallet, try 'git checkout seed_v%d'"%seed_version
if seed_version == 6:
# version 1.9.8 created v6 wallets when an incorrect seed was entered in the restore dialog
msg += '\n\nThis file was created because of a bug in version 1.9.8.'
if self.get('master_public_keys') is None and self.get('master_private_keys') is None and self.get('imported_keys') is None:
# pbkdf2 (at that time an additional dependency) was not included with the binaries, and wallet creation aborted.
msg += "\nIt does not contain any keys, and can safely be removed."
else:
# creation was complete if electrum was run from source
msg += "\nPlease open this file with Electrum 1.9.8, and move your coins to a new wallet."
raise WalletFileException(msg)
@locked
def get_txi(self, tx_hash):
return list(self.txi.get(tx_hash, {}).keys())
@locked
def get_txo(self, tx_hash):
return list(self.txo.get(tx_hash, {}).keys())
@locked
def get_txi_addr(self, tx_hash, address):
return self.txi.get(tx_hash, {}).get(address, [])
@locked
def get_txo_addr(self, tx_hash, address):
return self.txo.get(tx_hash, {}).get(address, [])
@modifier
def add_txi_addr(self, tx_hash, addr, ser, v):
if tx_hash not in self.txi:
self.txi[tx_hash] = {}
d = self.txi[tx_hash]
if addr not in d:
# note that as this is a set, we can ignore "duplicates"
d[addr] = set()
d[addr].add((ser, v))
@modifier
def add_txo_addr(self, tx_hash, addr, n, v, is_coinbase):
if tx_hash not in self.txo:
self.txo[tx_hash] = {}
d = self.txo[tx_hash]
if addr not in d:
# note that as this is a set, we can ignore "duplicates"
d[addr] = set()
d[addr].add((n, v, is_coinbase))
@locked
def list_txi(self):
return list(self.txi.keys())
@locked
def list_txo(self):
return list(self.txo.keys())
@modifier
def remove_txi(self, tx_hash):
self.txi.pop(tx_hash, None)
@modifier
def remove_txo(self, tx_hash):
self.txo.pop(tx_hash, None)
@locked
def list_spent_outpoints(self):
return [(h, n)
for h in self.spent_outpoints.keys()
for n in self.get_spent_outpoints(h)
]
@locked
def get_spent_outpoints(self, prevout_hash):
return list(self.spent_outpoints.get(prevout_hash, {}).keys())
@locked
def get_spent_outpoint(self, prevout_hash, prevout_n):
prevout_n = str(prevout_n)
return self.spent_outpoints.get(prevout_hash, {}).get(prevout_n)
@modifier
def remove_spent_outpoint(self, prevout_hash, prevout_n):
prevout_n = str(prevout_n)
self.spent_outpoints[prevout_hash].pop(prevout_n, None)
if not self.spent_outpoints[prevout_hash]:
self.spent_outpoints.pop(prevout_hash)
@modifier
def set_spent_outpoint(self, prevout_hash, prevout_n, tx_hash):
prevout_n = str(prevout_n)
if prevout_hash not in self.spent_outpoints:
self.spent_outpoints[prevout_hash] = {}
self.spent_outpoints[prevout_hash][prevout_n] = tx_hash
@modifier
def add_transaction(self, tx_hash: str, tx: Transaction) -> None:
assert isinstance(tx, Transaction)
self.transactions[tx_hash] = tx
@modifier
def remove_transaction(self, tx_hash) -> Optional[Transaction]:
return self.transactions.pop(tx_hash, None)
@locked
def get_transaction(self, tx_hash: str) -> Optional[Transaction]:
return self.transactions.get(tx_hash)
@locked
def list_transactions(self):
return list(self.transactions.keys())
@locked
def get_history(self):
return list(self.history.keys())
def is_addr_in_history(self, addr):
# does not mean history is non-empty!
return addr in self.history
@locked
def get_addr_history(self, addr):
return self.history.get(addr, [])
@modifier
def set_addr_history(self, addr, hist):
self.history[addr] = hist
@modifier
def remove_addr_history(self, addr):
self.history.pop(addr, None)
@locked
def list_verified_tx(self):
return list(self.verified_tx.keys())
@locked
def get_verified_tx(self, txid):
if txid not in self.verified_tx:
return None
height, timestamp, txpos, header_hash = self.verified_tx[txid]
return TxMinedInfo(height=height,
conf=None,
timestamp=timestamp,
txpos=txpos,
header_hash=header_hash)
@modifier
def add_verified_tx(self, txid, info):
self.verified_tx[txid] = (info.height, info.timestamp, info.txpos, info.header_hash)
@modifier
def remove_verified_tx(self, txid):
self.verified_tx.pop(txid, None)
def is_in_verified_tx(self, txid):
return txid in self.verified_tx
@modifier
def update_tx_fees(self, d):
return self.tx_fees.update(d)
@locked
def get_tx_fee(self, txid):
return self.tx_fees.get(txid)
@modifier
def remove_tx_fee(self, txid):
self.tx_fees.pop(txid, None)
@locked
def get_data_ref(self, name):
# Warning: interacts un-intuitively with 'put': certain parts
# of 'data' will have pointers saved as separate variables.
if name not in self.data:
self.data[name] = {}
return self.data[name]
@locked
def num_change_addresses(self):
return len(self.change_addresses)
@locked
def num_receiving_addresses(self):
return len(self.receiving_addresses)
@locked
def get_change_addresses(self, *, slice_start=None, slice_stop=None):
# note: slicing makes a shallow copy
return self.change_addresses[slice_start:slice_stop]
@locked
def get_receiving_addresses(self, *, slice_start=None, slice_stop=None):
# note: slicing makes a shallow copy
return self.receiving_addresses[slice_start:slice_stop]
@modifier
def add_change_address(self, addr):
self._addr_to_addr_index[addr] = (True, len(self.change_addresses))
self.change_addresses.append(addr)
@modifier
def add_receiving_address(self, addr):
self._addr_to_addr_index[addr] = (False, len(self.receiving_addresses))
self.receiving_addresses.append(addr)
@locked
def get_address_index(self, address):
return self._addr_to_addr_index.get(address)
@modifier
def add_imported_address(self, addr, d):
self.imported_addresses[addr] = d
@modifier
def remove_imported_address(self, addr):
self.imported_addresses.pop(addr)
@locked
def has_imported_address(self, addr):
return addr in self.imported_addresses
@locked
def get_imported_addresses(self):
return list(sorted(self.imported_addresses.keys()))
@locked
def get_imported_address(self, addr):
return self.imported_addresses.get(addr)
def load_addresses(self, wallet_type):
""" called from Abstract_Wallet.__init__ """
if wallet_type == 'imported':
self.imported_addresses = self.get_data_ref('addresses')
else:
self.get_data_ref('addresses')
for name in ['receiving', 'change']:
if name not in self.data['addresses']:
self.data['addresses'][name] = []
self.change_addresses = self.data['addresses']['change']
self.receiving_addresses = self.data['addresses']['receiving']
self._addr_to_addr_index = {} # key: address, value: (is_change, index)
for i, addr in enumerate(self.receiving_addresses):
self._addr_to_addr_index[addr] = (False, i)
for i, addr in enumerate(self.change_addresses):
self._addr_to_addr_index[addr] = (True, i)
@profiler
def _load_transactions(self):
# references in self.data
self.txi = self.get_data_ref('txi') # txid -> address -> list of (prev_outpoint, value)
self.txo = self.get_data_ref('txo') # txid -> address -> list of (output_index, value, is_coinbase)
self.transactions = self.get_data_ref('transactions') # type: Dict[str, Transaction]
self.spent_outpoints = self.get_data_ref('spent_outpoints')
self.history = self.get_data_ref('addr_history') # address -> list of (txid, height)
self.verified_tx = self.get_data_ref('verified_tx3') # txid -> (height, timestamp, txpos, header_hash)
self.tx_fees = self.get_data_ref('tx_fees')
# convert raw hex transactions to Transaction objects
for tx_hash, raw_tx in self.transactions.items():
self.transactions[tx_hash] = Transaction(raw_tx)
# convert list to set
for t in self.txi, self.txo:
for d in t.values():
for addr, lst in d.items():
d[addr] = set([tuple(x) for x in lst])
# remove unreferenced tx
for tx_hash in list(self.transactions.keys()):
if not self.get_txi(tx_hash) and not self.get_txo(tx_hash):
self.logger.info(f"removing unreferenced tx: {tx_hash}")
self.transactions.pop(tx_hash)
# remove unreferenced outpoints
for prevout_hash in self.spent_outpoints.keys():
d = self.spent_outpoints[prevout_hash]
for prevout_n, spending_txid in list(d.items()):
if spending_txid not in self.transactions:
self.logger.info("removing unreferenced spent outpoint")
d.pop(prevout_n)
@modifier
def clear_history(self):
self.txi.clear()
self.txo.clear()
self.spent_outpoints.clear()
self.transactions.clear()
self.history.clear()
self.verified_tx.clear()
self.tx_fees.clear()
| mit |
txemagon/1984 | modules/Telegram-bot-python/telegram/callbackgame.py | 3 | 1054 | #!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2017
# Leandro Toledo de Souza <devs@python-telegram-bot.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
"""This module contains an object that represents a Telegram CallbackGame."""
from telegram import TelegramObject
class CallbackGame(TelegramObject):
"""A placeholder, currently holds no information. Use BotFather to set up your game."""
| gpl-3.0 |
RaspberryPiFi/CloudCode | handlers/api.py | 1 | 5836 | """api.py: handles API requests"""
__author__ = "Tom Hanson"
__copyright__ = "Copyright 2014"
__credits__ = ["Tom Hanson"]
__license__ = "GPL"
__maintainer__ = "Tom Hanson"
__email__ = "tom@aporcupine.com"
import json
from google.appengine.api import memcache
from google.appengine.ext import ndb
import models
from modules import customframework
class SystemEnrollHandler(customframework.RequestHandler):
"""Provides functions to handle system enroll requests"""
url = '/api/system_enroll'
page_title = 'enroll'
def post(self):
"""Creates a group entry with registration code and returns the ID"""
try:
request = json.loads(self.request.body)
registration_code = request['registration_code']
if len(registration_code) != 6:
raise ValueError
except Exception:
self.error(400)
self.response.write('6 digit registration_code in a JSON object required')
return
q = models.Group.query(models.Group.registration_code == registration_code)
if q.fetch():
self.error(409)
else:
new_group = models.Group(registration_code=registration_code).put()
new_device = models.Device(parent=new_group).put()
response_dict = {'group_id': str(new_group.id()),
'device_id': str(new_device.id())}
json_string = json.dumps(response_dict)
self.response.headers['Content-Type'] = "application/json"
self.response.write(json_string)
class CheckEnrollHandler(customframework.RequestHandler):
"""Provides functions to handle requests to check enroll status"""
url = '/api/system_enroll_status'
page_title = 'check enroll status'
def post(self):
"""Returns the registered status for ID provided"""
try:
request = json.loads(self.request.body)
group_id = int(request['group_id'])
except Exception:
self.error(400)
self.response.write('group_id in a JSON object must be supplied!')
return
group = models.Group.get_by_id(group_id)
if group:
json_string = json.dumps({'registered': group.registered})
self.response.headers['Content-Type'] = "application/json"
self.response.write(json_string)
else:
self.error(400)
self.response.write('Group with id provided does not exist')
class DeviceEnrollHandler(customframework.RequestHandler):
"""Provides functions to handle device enroll requests"""
url = '/api/device_enroll'
page_title = 'enroll device'
def post(self):
"""Creates a device entry and returns its id"""
try:
request = json.loads(self.request.body)
group_id = int(request['group_id'])
except Exception:
self.error(400)
self.response.write('group_id in a JSON object must be supplied!')
return
group = models.Group.get_by_id(group_id)
if group:
new_device = models.Device(parent=group.key).put()
json_string = json.dumps({'device_id': str(new_device.id())})
self.response.headers['Content-Type'] = "application/json"
self.response.write(json_string)
else:
self.error(400)
self.response.write('Group with id provided does not exist')
class GroupUpdateHandler(customframework.RequestHandler):
"""Provides functions to handle group update requests"""
url = '/api/group/update'
page_title = 'group update'
def post(self):
"""Communicates any actions to be taken to the master device"""
try:
request = json.loads(self.request.body)
group_id = int(request['group_id'])
except Exception:
self.error(400)
self.response.write('group_id in a JSON object must be supplied!')
return
#TODO: Use channels to send updates to the users
#TODO: Sanitise this data
memcache.set('%s_statuses' % group_id, request['statuses'], 30)
actions = memcache.get('actions_%s' % group_id)
if actions:
memcache.set('actions_%s' % group_id, [])
json_string = json.dumps({'actions': actions})
else:
json_string = json.dumps({})
self.response.headers['Content-Type'] = "application/json"
self.response.write(json_string)
class LibraryUpdateHandler(customframework.RequestHandler):
"""Provides functions to handle library update requests"""
url = '/api/group/library'
page_title = 'Library Update'
def post(self):
"""Gets an update from the master device"""
request = json.loads(self.request.body)
group_key = ndb.Key(models.Group, int(request['group_id']))
artists = request['artists']
albums = request['albums']
songs = request['songs']
db_artists = []
for artist in artists:
db_artists.append(models.Artist(name=artist['name']))
db_albums = []
for album in albums:
db_albums.append(models.Album(name=album['name'],
artist_name=album['artist']))
db_songs = []
for song in songs:
db_songs.append(models.Song(name=song['name'],
artist_name=song['artist'],
album_name=song['album'],
length=song['length'],
path=song['path'],
source=song['source'],
track=song['track']))
query = models.AudioData.query(models.AudioData.group_key == group_key)
results = query.fetch(1)
if results:
audio_data = results[0]
else:
audio_data = models.AudioData(group_key=group_key)
memcache.delete_multi(['_albums', '_artists', '_songs'],
key_prefix=group_key.urlsafe())
audio_data.artists = db_artists
audio_data.albums = db_albums
audio_data.songs = db_songs
audio_data.put()
| gpl-2.0 |
xiaoyaozi5566/GEM5_DRAMSim2 | src/arch/x86/isa/insts/x87/control/__init__.py | 91 | 2495 | # Copyright (c) 2007 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
categories = ["initialize",
"wait_for_exceptions",
"clear_exceptions",
"save_and_restore_x87_control_word",
"save_x87_status_word",
"save_and_restore_x87_environment"]
microcode = '''
# X86 microcode
'''
for category in categories:
exec "import %s as cat" % category
microcode += cat.microcode
| bsd-3-clause |
mupi/timtec | core/templatetags/usergroup.py | 8 | 1920 | # https://djangosnippets.org/snippets/2566/
from django import template
from django.template import resolve_variable, NodeList
from django.contrib.auth.models import Group
register = template.Library()
@register.tag()
def ifusergroup(parser, token):
""" Check to see if the currently logged in user belongs to a specific
group. Requires the Django authentication contrib app and middleware.
Usage: {% ifusergroup Admins %} ... {% endifusergroup %}, or
{% ifusergroup Admins|Group1|"Group 2" %} ... {% endifusergroup %}, or
{% ifusergroup Admins %} ... {% else %} ... {% endifusergroup %}
"""
try:
_, group = token.split_contents()
except ValueError:
raise template.TemplateSyntaxError("Tag 'ifusergroup' requires 1 argument.")
nodelist_true = parser.parse(('else', 'endifusergroup'))
token = parser.next_token()
if token.contents == 'else':
nodelist_false = parser.parse(('endifusergroup',))
parser.delete_first_token()
else:
nodelist_false = NodeList()
return GroupCheckNode(group, nodelist_true, nodelist_false)
class GroupCheckNode(template.Node):
def __init__(self, group, nodelist_true, nodelist_false):
self.group = group
self.nodelist_true = nodelist_true
self.nodelist_false = nodelist_false
def render(self, context):
user = resolve_variable('user', context)
if not user.is_authenticated():
return self.nodelist_false.render(context)
for group in self.group.split("|"):
group = group[1:-1] if group.startswith('"') and group.endswith('"') else group
try:
if Group.objects.get(name=group) in user.groups.all():
return self.nodelist_true.render(context)
except Group.DoesNotExist:
pass
return self.nodelist_false.render(context)
| agpl-3.0 |
hhatto/autopep8 | test/suite/E12.py | 13 | 6766 | #: E121
print "E121", (
"dent")
#: E122
print "E122", (
"dent")
#: E123
my_list = [
1, 2, 3,
4, 5, 6,
]
#: E124
print "E124", ("visual",
"indent_two"
)
#: E124
print "E124", ("visual",
"indent_five"
)
#: E124
a = (123,
)
#: E129
if (row < 0 or self.moduleCount <= row or
col < 0 or self.moduleCount <= col):
raise Exception("%s,%s - %s" % (row, col, self.moduleCount))
#: E126
print "E126", (
"dent")
#: E126
print "E126", (
"dent")
#: E127
print "E127", ("over-",
"over-indent")
#: E128
print "E128", ("visual",
"hanging")
#: E128
print "E128", ("under-",
"under-indent")
#:
#: E126
my_list = [
1, 2, 3,
4, 5, 6,
]
#: E121
result = {
'key1': 'value',
'key2': 'value',
}
#: E126 E126
rv.update(dict.fromkeys((
'qualif_nr', 'reasonComment_en', 'reasonComment_fr',
'reasonComment_de', 'reasonComment_it'),
'?'),
"foo")
#: E126
abricot = 3 + \
4 + \
5 + 6
#: E131
print "hello", (
"there",
# "john",
"dude")
#: E126
part = set_mimetype((
a.get('mime_type', 'text')),
'default')
#:
#: E122
if True:
result = some_function_that_takes_arguments(
'a', 'b', 'c',
'd', 'e', 'f',
)
#: E122
if some_very_very_very_long_variable_name or var \
or another_very_long_variable_name:
raise Exception()
#: E122
if some_very_very_very_long_variable_name or var[0] \
or another_very_long_variable_name:
raise Exception()
#: E122
if True:
if some_very_very_very_long_variable_name or var \
or another_very_long_variable_name:
raise Exception()
#: E122
if True:
if some_very_very_very_long_variable_name or var[0] \
or another_very_long_variable_name:
raise Exception()
#: E122
dictionary = [
"is": {
"nested": yes(),
},
]
#: E122
setup('',
scripts=[''],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
])
#:
#: E123 W291
print "E123", (
"bad", "hanging", "close"
)
#
#: E123 E123 E123
result = {
'foo': [
'bar', {
'baz': 'frop',
}
]
}
#: E123
result = some_function_that_takes_arguments(
'a', 'b', 'c',
'd', 'e', 'f',
)
#: E124
my_list = [1, 2, 3,
4, 5, 6,
]
#: E124
my_list = [1, 2, 3,
4, 5, 6,
]
#: E124
result = some_function_that_takes_arguments('a', 'b', 'c',
'd', 'e', 'f',
)
#: E124
fooff(aaaa,
cca(
vvv,
dadd
), fff,
)
#: E124
fooff(aaaa,
ccaaa(
vvv,
dadd
),
fff,
)
#: E124
d = dict('foo',
help="exclude files or directories which match these "
"comma separated patterns (default: %s)" % DEFAULT_EXCLUDE
)
#: E124 E128 E128
if line_removed:
self.event(cr, uid,
name="Removing the option for contract",
description="contract line has been removed",
)
#:
#: E125
if foo is None and bar is "frop" and \
blah == 'yeah':
blah = 'yeahnah'
#: E125
# Further indentation required as indentation is not distinguishable
def long_function_name(
var_one, var_two, var_three,
var_four):
print(var_one)
#
#: E125
def qualify_by_address(
self, cr, uid, ids, context=None,
params_to_check=frozenset(QUALIF_BY_ADDRESS_PARAM)):
""" This gets called by the web server """
#: E129
if (a == 2 or
b == "abc def ghi"
"jkl mno"):
return True
#:
#: E126
my_list = [
1, 2, 3,
4, 5, 6,
]
#: E126
abris = 3 + \
4 + \
5 + 6
#: E126
fixed = re.sub(r'\t+', ' ', target[c::-1], 1)[::-1] + \
target[c + 1:]
#: E126 E126
rv.update(dict.fromkeys((
'qualif_nr', 'reasonComment_en', 'reasonComment_fr',
'reasonComment_de', 'reasonComment_it'),
'?'),
"foo")
#: E126
eat_a_dict_a_day({
"foo": "bar",
})
#: E126
if (
x == (
3
) or
y == 4):
pass
#: E126
if (
x == (
3
) or
x == (
3
) or
y == 4):
pass
#: E131
troublesome_hash = {
"hash": "value",
"long": "the quick brown fox jumps over the lazy dog before doing a "
"somersault",
}
#:
#: E128
# Arguments on first line forbidden when not using vertical alignment
foo = long_function_name(var_one, var_two,
var_three, var_four)
#
#: E128
print('l.%s\t%s\t%s\t%r' %
(token[2][0], pos, tokenize.tok_name[token[0]], token[1]))
#: E128
def qualify_by_address(self, cr, uid, ids, context=None,
params_to_check=frozenset(QUALIF_BY_ADDRESS_PARAM)):
""" This gets called by the web server """
#:
#: E128
foo(1, 2, 3,
4, 5, 6)
#: E128
foo(1, 2, 3,
4, 5, 6)
#: E128
foo(1, 2, 3,
4, 5, 6)
#: E128
foo(1, 2, 3,
4, 5, 6)
#: E127
foo(1, 2, 3,
4, 5, 6)
#: E127
foo(1, 2, 3,
4, 5, 6)
#: E127
foo(1, 2, 3,
4, 5, 6)
#: E127
foo(1, 2, 3,
4, 5, 6)
#: E127
foo(1, 2, 3,
4, 5, 6)
#: E127
foo(1, 2, 3,
4, 5, 6)
#: E127
foo(1, 2, 3,
4, 5, 6)
#: E127
foo(1, 2, 3,
4, 5, 6)
#: E127
foo(1, 2, 3,
4, 5, 6)
#: E128 E128
if line_removed:
self.event(cr, uid,
name="Removing the option for contract",
description="contract line has been removed",
)
#: E124 E127 E127
if line_removed:
self.event(cr, uid,
name="Removing the option for contract",
description="contract line has been removed",
)
#: E127
rv.update(d=('a', 'b', 'c'),
e=42)
#
#: E127
rv.update(d=('a' + 'b', 'c'),
e=42, f=42
+ 42)
#: E127
input1 = {'a': {'calc': 1 + 2}, 'b': 1
+ 42}
#: E128
rv.update(d=('a' + 'b', 'c'),
e=42, f=(42
+ 42))
#: E123
if True:
def example_issue254():
return [node.copy(
(
replacement
# First, look at all the node's current children.
for child in node.children
# Replace them.
for replacement in replace(child)
),
dict(name=token.undefined)
)]
#: E125:2:5 E125:8:5
if ("""
"""):
pass
for foo in """
abc
123
""".strip().split():
print(foo)
#: E122:6:5 E122:7:5 E122:8:1
print dedent(
'''
mkdir -p ./{build}/
mv ./build/ ./{build}/%(revision)s/
'''.format(
build='build',
# more stuff
)
)
#: E701:1:8 E122:2:1 E203:4:8 E128:5:1
if True:\
print(True)
print(a
, end=' ')
#:
| mit |
LeartS/odoo | addons/sale/res_partner.py | 94 | 1964 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields,osv
from openerp.tools.translate import _
class res_partner(osv.osv):
_inherit = 'res.partner'
def _sale_order_count(self, cr, uid, ids, field_name, arg, context=None):
res = dict(map(lambda x: (x,0), ids))
# The current user may not have access rights for sale orders
try:
for partner in self.browse(cr, uid, ids, context):
res[partner.id] = len(partner.sale_order_ids)
except:
pass
return res
def copy(self, cr, uid, record_id, default=None, context=None):
if default is None:
default = {}
default.update({'sale_order_ids': []})
return super(res_partner, self).copy(cr, uid, record_id, default, context)
_columns = {
'sale_order_count': fields.function(_sale_order_count, string='# of Sales Order', type='integer'),
'sale_order_ids': fields.one2many('sale.order','partner_id','Sales Order')
}
| agpl-3.0 |
intel-ctrlsys/actsys | oobrestserver/oobrestserver/test/test_auth_server.py | 1 | 2251 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2016-2017 Intel Corp.
#
import os
import sys
import base64
import uuid
from cherrypy.test import helper
from oobrestserver.Application import Application
from oobrestserver.Authenticator import Authenticator
class TestServer(helper.CPWebCase):
app = None
@staticmethod
def setup_server():
test_path = os.path.dirname(os.path.realpath(__file__))
app_path = os.path.join(test_path, '..')
if app_path not in sys.path:
sys.path.append(app_path)
config = {
"node1": {
"FooString": {
"#obj": ["oob_rest_default_providers.StringDevice", "Foo"]
},
"HelloDevice": {
"#obj": ["oob_rest_default_providers.HelloSensor"]
},
"folder": {
"InsideString": {
"#obj": ["oob_rest_default_providers.StringDevice", "Inside"]
}
}
}
}
TestServer.app = Application(config)
auth = Authenticator()
filename = 'temp_auth_file_'+str(uuid.uuid4())
auth.add_user('test_user', 'Test_Pass_01')
auth.save(filename)
TestServer.app.enable_auth(filename)
TestServer.app.mount()
os.remove(os.path.abspath(filename))
def test_auth_file_created(self):
my_app = Application({})
filename = 'temp_auth_file_'+str(uuid.uuid4())
self.assertFalse(os.path.exists(os.path.abspath(filename)))
my_app.enable_auth(filename)
self.assertTrue(os.path.exists(os.path.abspath(filename)))
os.remove(os.path.abspath(filename))
def teardown_class(cls):
super(TestServer, cls).teardown_class()
def test_no_auth(self):
self.getPage('/api/node1/FooString/string/')
self.assertStatus('401 Unauthorized')
def test_auth(self):
b64_value = base64.b64encode('test_user:Test_Pass_01'.encode('utf-8'))
self.getPage('/api/node1/FooString/string/',
headers=[('Authorization',
'Basic %s' % b64_value.decode('utf-8'))])
self.assertStatus('200 OK')
| apache-2.0 |
Svjard/presto-admin | prestoadmin/workers.py | 1 | 3726 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Module for the presto worker`'s configuration.
Loads and validates the workers.json file and creates the files needed
to deploy on the presto cluster
"""
import copy
import logging
import urlparse
from fabric.api import env
from prestoadmin import config
from prestoadmin.presto_conf import validate_presto_conf, get_presto_conf, \
REQUIRED_FILES
from prestoadmin.util import constants
from prestoadmin.util.exception import ConfigurationError
import prestoadmin.util.fabricapi as util
DEFAULT_PROPERTIES = {"node.properties":
{"node.environment": "presto",
"node.data-dir": "/var/lib/presto/data",
"plugin.config-dir": "/etc/presto/catalog",
"plugin.dir": "/usr/lib/presto/lib/plugin"},
"jvm.config": ["-server",
"-Xmx1G",
"-XX:-UseBiasedLocking",
"-XX:+UseG1GC",
"-XX:+ExplicitGCInvokesConcurrent",
"-XX:+HeapDumpOnOutOfMemoryError",
"-XX:+UseGCOverheadLimit",
"-XX:OnOutOfMemoryError=kill -9 %p"],
"config.properties": {"coordinator": "false",
"http-server.http.port": "8080",
"task.max-memory": "1GB"}
}
_LOGGER = logging.getLogger(__name__)
def get_conf():
conf = _get_conf()
for name in REQUIRED_FILES:
if name not in conf:
_LOGGER.debug("Workers configuration for %s not found. "
"Default configuration will be deployed", name)
defaults = build_defaults()
config.fill_defaults(conf, defaults)
validate(conf)
return conf
def _get_conf():
return get_presto_conf(constants.WORKERS_DIR)
def build_defaults():
conf = copy.deepcopy(DEFAULT_PROPERTIES)
coordinator = util.get_coordinator_role()[0]
conf["config.properties"]["discovery.uri"] = "http://" + coordinator \
+ ":8080"
return conf
def islocalhost(hostname):
return hostname in ["localhost", "127.0.0.1", "::1"]
def validate(conf):
validate_presto_conf(conf)
if conf["config.properties"]["coordinator"] != "false":
raise ConfigurationError("Coordinator must be false in the "
"worker's config.properties")
uri = urlparse.urlparse(conf["config.properties"]["discovery.uri"])
if islocalhost(uri.hostname) and len(env.roledefs['all']) > 1:
raise ConfigurationError(
"discovery.uri should not be localhost in a "
"multi-node cluster, but found " + urlparse.urlunparse(uri) +
". You may have encountered this error by "
"choosing a coordinator that is localhost and a worker that "
"is not. The default discovery-uri is "
"http://<coordinator>:8080")
return conf
| apache-2.0 |
tedder/ansible | lib/ansible/plugins/callback/full_skip.py | 74 | 2498 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
callback: full_skip
type: stdout
short_description: suppresses tasks if all hosts skipped
description:
- Use this plugin when you dont care about any output for tasks that were completly skipped
version_added: "2.4"
deprecated:
why: The 'default' callback plugin now supports this functionality
removed_in: '2.11'
alternative: "'default' callback plugin with 'display_skipped_hosts = no' option"
extends_documentation_fragment:
- default_callback
requirements:
- set as stdout in configuation
'''
from ansible.plugins.callback.default import CallbackModule as CallbackModule_default
class CallbackModule(CallbackModule_default):
'''
This is the default callback interface, which simply prints messages
to stdout when new callback events are received.
'''
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'stdout'
CALLBACK_NAME = 'full_skip'
def v2_runner_on_skipped(self, result):
self.outlines = []
def v2_playbook_item_on_skipped(self, result):
self.outlines = []
def v2_runner_item_on_skipped(self, result):
self.outlines = []
def v2_runner_on_failed(self, result, ignore_errors=False):
self.display()
super(CallbackModule, self).v2_runner_on_failed(result, ignore_errors)
def v2_playbook_on_task_start(self, task, is_conditional):
self.outlines = []
self.outlines.append("TASK [%s]" % task.get_name().strip())
if self._display.verbosity >= 2:
path = task.get_path()
if path:
self.outlines.append("task path: %s" % path)
def v2_playbook_item_on_ok(self, result):
self.display()
super(CallbackModule, self).v2_playbook_item_on_ok(result)
def v2_runner_on_ok(self, result):
self.display()
super(CallbackModule, self).v2_runner_on_ok(result)
def display(self):
if len(self.outlines) == 0:
return
(first, rest) = self.outlines[0], self.outlines[1:]
self._display.banner(first)
for line in rest:
self._display.display(line)
self.outlines = []
| gpl-3.0 |
dkubiak789/OpenUpgrade | addons/l10n_be_coda/l10n_be_coda.py | 63 | 3843 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
#
# Copyright (c) 2011 Noviat nv/sa (www.noviat.be). All rights reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv, fields
class account_bank_statement(osv.osv):
_inherit = 'account.bank.statement'
_columns = {
'coda_note': fields.text('CODA Notes'),
}
class account_bank_statement_line(osv.osv):
_inherit = 'account.bank.statement.line'
_columns = {
'coda_account_number': fields.char('Account Number', help="The Counter Party Account Number")
}
def create(self, cr, uid, data, context=None):
"""
This function creates a Bank Account Number if, for a bank statement line,
the partner_id field and the coda_account_number field are set,
and the account number does not exist in the database
"""
if 'partner_id' in data and data['partner_id'] and 'coda_account_number' in data and data['coda_account_number']:
acc_number_ids = self.pool.get('res.partner.bank').search(cr, uid, [('acc_number', '=', data['coda_account_number'])])
if len(acc_number_ids) == 0:
try:
type_model, type_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'base', 'bank_normal')
type_id = self.pool.get('res.partner.bank.type').browse(cr, uid, type_id, context=context)
self.pool.get('res.partner.bank').create(cr, uid, {'acc_number': data['coda_account_number'], 'partner_id': data['partner_id'], 'state': type_id.code}, context=context)
except ValueError:
pass
return super(account_bank_statement_line, self).create(cr, uid, data, context=context)
def write(self, cr, uid, ids, vals, context=None):
super(account_bank_statement_line, self).write(cr, uid, ids, vals, context)
"""
Same as create function above, but for write function
"""
if 'partner_id' in vals:
for line in self.pool.get('account.bank.statement.line').browse(cr, uid, ids, context=context):
if line.coda_account_number:
acc_number_ids = self.pool.get('res.partner.bank').search(cr, uid, [('acc_number', '=', line.coda_account_number)])
if len(acc_number_ids) == 0:
try:
type_model, type_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'base', 'bank_normal')
type_id = self.pool.get('res.partner.bank.type').browse(cr, uid, type_id, context=context)
self.pool.get('res.partner.bank').create(cr, uid, {'acc_number': line.coda_account_number, 'partner_id': vals['partner_id'], 'state': type_id.code}, context=context)
except ValueError:
pass
return True
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
BPI-SINOVOIP/BPI-Mainline-kernel | toolchains/gcc-linaro-7.3.1-2018.05-x86_64_arm-linux-gnueabihf/share/gdb/system-gdbinit/elinos.py | 10 | 3080 | # Copyright (C) 2011-2018 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Configure GDB using the ELinOS environment."""
import os
import glob
import gdb
def warn(msg):
print "warning: %s" % msg
def get_elinos_environment():
"""Return the ELinOS environment.
If the ELinOS environment is properly set up, return a dictionary
which contains:
* The path to the ELinOS project at key 'project';
* The path to the ELinOS CDK at key 'cdk';
* The ELinOS target name at key 'target' (Eg. 'i486-linux');
* A list of Xenomai install prefixes (which could be empty, if
the ELinOS project does not include Xenomai) at key 'xenomai'.
If one of these cannot be found, print a warning; the corresponding
value in the returned dictionary will be None.
"""
result = {}
for key in ("project", "cdk", "target"):
var = "ELINOS_" + key.upper()
if var in os.environ:
result[key] = os.environ[var]
else:
warn("%s not set" % var)
result[key] = None
if result["project"] is not None:
result["xenomai"] = glob.glob(result["project"] + "/xenomai-[0-9.]*")
else:
result["xenomai"] = []
return result
def elinos_init():
"""Initialize debugger environment for ELinOS.
Let the debugger know where to find the ELinOS libraries on host. This
assumes that an ELinOS environment is properly set up. If some environment
variables are missing, warn about which library may be missing.
"""
elinos_env = get_elinos_environment()
solib_dirs = []
# System libraries
if None in (elinos_env[key] for key in ("cdk", "target")):
warn("ELinOS system libraries will not be loaded")
else:
solib_prefix = "%s/%s" % (elinos_env["cdk"], elinos_env["target"])
solib_dirs += ["%s/%s" % (solib_prefix, "lib")]
gdb.execute("set solib-absolute-prefix %s" % solib_prefix)
# Xenomai libraries. Those are optional, so have a lighter warning
# if they cannot be located.
if elinos_env["project"] is None:
warn("Xenomai libraries may not be loaded")
else:
for dir in elinos_env['xenomai']:
solib_dirs += ["%s/%s"
% (dir, "xenomai-build/usr/realtime/lib")]
if len(solib_dirs) != 0:
gdb.execute("set solib-search-path %s" % ":".join(solib_dirs))
if __name__ == "__main__":
elinos_init()
| gpl-2.0 |
chand3040/cloud_that | openedx/core/djangoapps/credit/migrations/0007_auto__add_field_creditprovider_enable_integration__chg_field_creditpro.py | 84 | 11967 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Removing unique constraint on 'CreditProvider', fields ['provider_url']
db.delete_unique('credit_creditprovider', ['provider_url'])
# Adding field 'CreditProvider.enable_integration'
db.add_column('credit_creditprovider', 'enable_integration',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
# Changing field 'CreditProvider.provider_url'
db.alter_column('credit_creditprovider', 'provider_url', self.gf('django.db.models.fields.URLField')(max_length=200))
def backwards(self, orm):
# Deleting field 'CreditProvider.enable_integration'
db.delete_column('credit_creditprovider', 'enable_integration')
# Changing field 'CreditProvider.provider_url'
db.alter_column('credit_creditprovider', 'provider_url', self.gf('django.db.models.fields.URLField')(max_length=255, unique=True))
# Adding unique constraint on 'CreditProvider', fields ['provider_url']
db.create_unique('credit_creditprovider', ['provider_url'])
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'credit.creditcourse': {
'Meta': {'object_name': 'CreditCourse'},
'course_key': ('xmodule_django.models.CourseKeyField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'providers': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['credit.CreditProvider']", 'symmetrical': 'False'})
},
'credit.crediteligibility': {
'Meta': {'unique_together': "(('username', 'course'),)", 'object_name': 'CreditEligibility'},
'course': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'eligibilities'", 'to': "orm['credit.CreditCourse']"}),
'created': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime.now'}),
'provider': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'eligibilities'", 'to': "orm['credit.CreditProvider']"}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'})
},
'credit.creditprovider': {
'Meta': {'object_name': 'CreditProvider'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'created': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime.now'}),
'display_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'eligibility_duration': ('django.db.models.fields.PositiveIntegerField', [], {'default': '31556970'}),
'enable_integration': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime.now'}),
'provider_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'}),
'provider_url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200'})
},
'credit.creditrequest': {
'Meta': {'unique_together': "(('username', 'course', 'provider'),)", 'object_name': 'CreditRequest'},
'course': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'credit_requests'", 'to': "orm['credit.CreditCourse']"}),
'created': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime.now'}),
'parameters': ('jsonfield.fields.JSONField', [], {}),
'provider': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'credit_requests'", 'to': "orm['credit.CreditProvider']"}),
'status': ('django.db.models.fields.CharField', [], {'default': "'pending'", 'max_length': '255'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'})
},
'credit.creditrequirement': {
'Meta': {'unique_together': "(('namespace', 'name', 'course'),)", 'object_name': 'CreditRequirement'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'course': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'credit_requirements'", 'to': "orm['credit.CreditCourse']"}),
'created': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime.now'}),
'criteria': ('jsonfield.fields.JSONField', [], {}),
'display_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime.now'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'namespace': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'credit.creditrequirementstatus': {
'Meta': {'object_name': 'CreditRequirementStatus'},
'created': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime.now'}),
'reason': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'requirement': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'statuses'", 'to': "orm['credit.CreditRequirement']"}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'})
},
'credit.historicalcreditrequest': {
'Meta': {'ordering': "(u'-history_date', u'-history_id')", 'object_name': 'HistoricalCreditRequest'},
'course': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'+'", 'null': 'True', 'on_delete': 'models.DO_NOTHING', 'to': "orm['credit.CreditCourse']"}),
'created': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime.now'}),
u'history_date': ('django.db.models.fields.DateTimeField', [], {}),
u'history_id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
u'history_type': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
u'history_user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'+'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'blank': 'True'}),
'modified': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime.now'}),
'parameters': ('jsonfield.fields.JSONField', [], {}),
'provider': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'+'", 'null': 'True', 'on_delete': 'models.DO_NOTHING', 'to': "orm['credit.CreditProvider']"}),
'status': ('django.db.models.fields.CharField', [], {'default': "'pending'", 'max_length': '255'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'})
}
}
complete_apps = ['credit']
| agpl-3.0 |
Luthaf/Chemharp-python | chemfiles/trajectory.py | 2 | 7109 | # -*- coding=utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
from ctypes import c_uint64, c_char_p
import sys
from .utils import CxxPointer, _call_with_growing_buffer
from .frame import Frame, Topology
from .misc import ChemfilesError
# Python 2 compatibility
if sys.hexversion >= 0x03000000:
unicode_string = str
bytes_string = bytes
else:
unicode_string = unicode # noqa
bytes_string = str
class BaseTrajectory(CxxPointer):
def __init__(self, ptr):
self.__closed = False
super(BaseTrajectory, self).__init__(ptr, is_const=False)
def __check_opened(self):
if self.__closed:
raise ChemfilesError("Can not use a closed Trajectory")
def __del__(self):
if not self.__closed:
self.close()
def __enter__(self):
self.__check_opened()
return self
def __exit__(self, *args):
self.close()
def __iter__(self):
self.__check_opened()
for step in range(self.nsteps):
yield self.read_step(step)
def read(self):
"""
Read the next step of this :py:class:`Trajectory` and return the
corresponding :py:class:`Frame`.
"""
self.__check_opened()
frame = Frame()
self.ffi.chfl_trajectory_read(self.mut_ptr, frame.mut_ptr)
return frame
def read_step(self, step):
"""
Read a specific ``step`` in this :py:class:`Trajectory` and return the
corresponding :py:class:`Frame`.
"""
self.__check_opened()
frame = Frame()
self.ffi.chfl_trajectory_read_step(self.mut_ptr, c_uint64(step), frame.mut_ptr)
return frame
def write(self, frame):
"""Write a :py:class:`Frame` to this :py:class:`Trajectory`."""
self.__check_opened()
self.ffi.chfl_trajectory_write(self.mut_ptr, frame.ptr)
def set_topology(self, topology, format=""):
"""
Set the :py:class:`Topology` associated with this :py:class:`Trajectory`.
The new topology will be used when reading and writing the files,
replacing any topology in the frames or files.
If the ``topology`` parameter is a :py:class:`Topology` instance, it is
used directly. If the ``topology`` parameter is a string, the first
:py:class:`Frame` of the corresponding file is read, and the topology of
this frame is used.
When reading from a file, if ``format`` is not the empty string, it is
used as the file format instead of guessing it from the file extension.
"""
self.__check_opened()
if isinstance(topology, Topology):
self.ffi.chfl_trajectory_set_topology(self.mut_ptr, topology.ptr)
else:
self.ffi.chfl_trajectory_topology_file(
self.mut_ptr, topology.encode("utf8"), format.encode("utf8")
)
def set_cell(self, cell):
"""
Set the :py:class:`UnitCell` associated with this :py:class:`Trajectory`
to a copy of ``cell``.
This :py:class:`UnitCell` will be used when reading and writing the
files, replacing any unit cell in the frames or files.
"""
self.__check_opened()
self.ffi.chfl_trajectory_set_cell(self.mut_ptr, cell.ptr)
@property
def nsteps(self):
"""Get the current number of steps in this :py:class:`Trajectory`."""
self.__check_opened()
nsteps = c_uint64()
self.ffi.chfl_trajectory_nsteps(self.mut_ptr, nsteps)
return nsteps.value
@property
def path(self):
"""Get the path used to open this :py:class:`Trajectory`."""
self.__check_opened()
return _call_with_growing_buffer(
lambda buffer, size: self.ffi.chfl_trajectory_path(self.ptr, buffer, size),
initial=256,
)
def close(self):
"""
Close this :py:class:`Trajectory` and write any buffered content to the
file.
"""
self.__check_opened()
self.__closed = True
self.ffi.chfl_trajectory_close(self.ptr)
class Trajectory(BaseTrajectory):
"""
A :py:class:`Trajectory` represent a physical file from which we can read
:py:class:`Frame`.
"""
def __init__(self, path, mode="r", format=""):
"""
Open the file at the given ``path`` using the given ``mode`` and
optional file ``format``.
Valid modes are ``'r'`` for read, ``'w'`` for write and ``'a'`` for
append.
The ``format`` parameter is needed when the file format does not match
the extension, or when there is not standard extension for this format.
If `format` is an empty string, the format will be guessed from the
file extension.
"""
ptr = self.ffi.chfl_trajectory_with_format(
path.encode("utf8"), mode.encode("utf8"), format.encode("utf8")
)
# Store mode and format for __repr__
self.__mode = mode
self.__format = format
super(Trajectory, self).__init__(ptr)
def __repr__(self):
return "Trajectory('{}', '{}', '{}')".format(
self.path, self.__mode, self.__format
)
class MemoryTrajectory(BaseTrajectory):
"""
A :py:class:`MemoryTrajectory` allow to read/write in-memory data as though
it was a formatted file.
"""
def __init__(self, data="", mode="r", format=""):
"""
The ``format`` parameter is always required.
When reading (``mode`` is ``'r'``), the ``data`` parameter will be used
as the formatted file.
When writing (``mode`` is ``'w'``), the ``data`` parameter is ignored.
To get the memory buffer containing everything already written, use the
:py:func:`buffer` function.
"""
if not format:
raise ChemfilesError(
"'format' is required when creating a MemoryTrajectory"
)
if mode == "r":
if isinstance(data, unicode_string):
data = data.encode("utf8")
elif not isinstance(data, bytes_string):
raise ChemfilesError("the 'data' parameter must be a string")
ptr = self.ffi.chfl_trajectory_memory_reader(
data, len(data), format.encode("utf8")
)
elif mode == "w":
ptr = self.ffi.chfl_trajectory_memory_writer(format.encode("utf8"))
else:
raise ChemfilesError(
"invalid mode '{}' passed to MemoryTrajectory".format(mode)
)
super(MemoryTrajectory, self).__init__(ptr)
def __repr__(self):
return "MemoryTrajectory({}', '{}')".format(self.__mode, self.__format)
def buffer(self):
"""
Get the data written to this in-memory trajectory. This is not valid to
call when reading in-memory data.
"""
buffer = c_char_p()
size = c_uint64()
self.ffi.chfl_trajectory_memory_buffer(self.ptr, buffer, size)
return buffer.value
| mpl-2.0 |
mikewiebe-ansible/ansible | lib/ansible/modules/network/skydive/skydive_edge.py | 38 | 4950 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2019, Ansible by Red Hat, inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: skydive_edge
version_added: "2.8"
author:
- "Sumit Jaiswal (@sjaiswal)"
short_description: Module to add edges to Skydive topology
description:
- This module handles setting up edges between two nodes based on the
relationship type to the Skydive topology.
requirements:
- skydive-client
extends_documentation_fragment: skydive
options:
parent_node:
description:
- To defined the first node of the link, it can be either an ID or
a gremlin expression
required: true
child_node:
description:
- To defined the second node of the link, it can be either an ID or
a gremlin expression
required: true
relation_type:
description:
- To define relation type of the node I(ownership, layer2, layer3).
required: true
host:
description:
- To define the host of the node.
default: ""
required: False
metadata:
description:
- To define metadata for the edge.
required: false
state:
description:
- State of the Skydive Edge. If value is I(present) new edge
will be created else if it is I(absent) it will be deleted.
default: present
choices:
- present
- absent
"""
EXAMPLES = """
- name: create tor
skydive_node:
name: 'TOR'
node_type: "fabric"
seed: TOR
metadata:
Model: Cisco xxxx
provider:
endpoint: localhost:8082
username: admin
password: admin
register: tor_result
- name: create port 1
skydive_node:
name: 'PORT1'
node_type: 'fabric'
seed: PORT1
provider:
endpoint: localhost:8082
username: admin
password: admin
register: port1_result
- name: create port 2
skydive_node:
name: 'PORT2'
node_type: 'fabric'
seed: PORT2
provider:
endpoint: localhost:8082
username: admin
password: admin
register: port2_result
- name: link node tor and port 1
skydive_edge:
parent_node: "{{ tor_result.UUID }}"
child_node: "{{ port1_result.UUID }}"
relation_type: ownership
state: present
provider:
endpoint: localhost:8082
username: admin
password: admin
- name: link node tor and port 2
skydive_edge:
parent_node: "{{ tor_result.UUID }}"
child_node: "{{ port2_result.UUID }}"
relation_type: ownership
state: present
provider:
endpoint: localhost:8082
username: admin
password: admin
- name: update link node tor and port 1 relation
skydive_edge:
parent_node: "{{ tor_result.UUID }}"
child_node: "{{ port2_result.UUID }}"
relation_type: layer2
state: upadte
provider:
endpoint: localhost:8082
username: admin
password: admin
- name: Unlink tor and port 2
skydive_edge:
parent_node: "{{ tor_result.UUID }}"
child_node: "{{ port2_result.UUID }}"
relation_type: ownership
state: absent
provider:
endpoint: localhost:8082
username: admin
password: admin
- name: link tor and port 2 via Gremlin expression
skydive_edge:
parent_node: G.V().Has('Name', 'TOR')
child_node: G.V().Has('Name', 'PORT2')
relation_type: ownership
state: present
provider:
endpoint: localhost:8082
username: admin
password: admin
- name: Unlink tor and port 2 via Gremlin expression
skydive_edge:
parent_node: G.V().Has('Name', 'TOR')
child_node: G.V().Has('Name', 'PORT2')
relation_type: ownership
state: absent
provider:
endpoint: localhost:8082
username: admin
password: admin
"""
RETURN = """ # """
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.skydive.api import skydive_edge
def main():
''' Main entry point for module execution
'''
ib_spec = dict(
relation_type=dict(type='str', required=True),
parent_node=dict(type='str', required=True),
child_node=dict(type='str', required=True),
host=dict(type='str', default=""),
metadata=dict(type='dict', default=dict())
)
argument_spec = dict(
provider=dict(required=False),
state=dict(default='present', choices=['present', 'absent'])
)
argument_spec.update(ib_spec)
argument_spec.update(skydive_edge.provider_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
skydive_obj = skydive_edge(module)
result = skydive_obj.run()
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
ThomasBollmeier/GObjectCreator2 | src/gobjcreator2/output/sections.py | 1 | 2533 | #
# Copyright 2011 Thomas Bollmeier
#
# This file is part of GObjectCreator2.
#
# GObjectCreator2 is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GObjectCreator2 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GObjectCreator2. If not, see <http://www.gnu.org/licenses/>.
#
# coding=UTF-8
class Sections(object):
"""
Abstract class to be inherited by all classes that implement
user editable code sections
"""
def __init__(self):
if self.__class__ is Sections:
raise NotImplementedError
self._sections = {}
def get_section_content(self, section_name):
return self._sections[section_name]
def _check_for_section_begin(self, line):
"""
returns boolean flag indicating section begin, <name of section>, <first line or None>
"""
raise NotImplementedError
def _is_section_end(self, line):
"""
returns True if section end is reached
"""
raise NotImplementedError
def _parse(self, file_path):
self._sections = {}
try:
input_file = open(file_path, "r" )
except IOError:
return
lines = input_file.readlines()
input_file.close()
in_user_section = False
for line in lines:
line = line[:-1] # remove line break
if not in_user_section:
section_begin, section_name, first_line = \
self._check_for_section_begin(line)
if section_begin:
in_user_section = True
content = []
if first_line is not None:
content.append(first_line)
else:
if not self._is_section_end(line):
content.append(line)
else:
self._sections[section_name] = content
in_user_section = False
section_name = ""
| gpl-3.0 |
stanbellcom/webapp_politik | sites/all/modules/annotator/lib/store-plugin/pyenv/lib/python2.7/site-packages/jinja2/lexer.py | 178 | 26111 | # -*- coding: utf-8 -*-
"""
jinja2.lexer
~~~~~~~~~~~~
This module implements a Jinja / Python combination lexer. The
`Lexer` class provided by this module is used to do some preprocessing
for Jinja.
On the one hand it filters out invalid operators like the bitshift
operators we don't allow in templates. On the other hand it separates
template code and python code in expressions.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import re
from operator import itemgetter
from collections import deque
from jinja2.exceptions import TemplateSyntaxError
from jinja2.utils import LRUCache, next
# cache for the lexers. Exists in order to be able to have multiple
# environments with the same lexer
_lexer_cache = LRUCache(50)
# static regular expressions
whitespace_re = re.compile(r'\s+', re.U)
string_re = re.compile(r"('([^'\\]*(?:\\.[^'\\]*)*)'"
r'|"([^"\\]*(?:\\.[^"\\]*)*)")', re.S)
integer_re = re.compile(r'\d+')
# we use the unicode identifier rule if this python version is able
# to handle unicode identifiers, otherwise the standard ASCII one.
try:
compile('föö', '<unknown>', 'eval')
except SyntaxError:
name_re = re.compile(r'\b[a-zA-Z_][a-zA-Z0-9_]*\b')
else:
from jinja2 import _stringdefs
name_re = re.compile(r'[%s][%s]*' % (_stringdefs.xid_start,
_stringdefs.xid_continue))
float_re = re.compile(r'(?<!\.)\d+\.\d+')
newline_re = re.compile(r'(\r\n|\r|\n)')
# internal the tokens and keep references to them
TOKEN_ADD = intern('add')
TOKEN_ASSIGN = intern('assign')
TOKEN_COLON = intern('colon')
TOKEN_COMMA = intern('comma')
TOKEN_DIV = intern('div')
TOKEN_DOT = intern('dot')
TOKEN_EQ = intern('eq')
TOKEN_FLOORDIV = intern('floordiv')
TOKEN_GT = intern('gt')
TOKEN_GTEQ = intern('gteq')
TOKEN_LBRACE = intern('lbrace')
TOKEN_LBRACKET = intern('lbracket')
TOKEN_LPAREN = intern('lparen')
TOKEN_LT = intern('lt')
TOKEN_LTEQ = intern('lteq')
TOKEN_MOD = intern('mod')
TOKEN_MUL = intern('mul')
TOKEN_NE = intern('ne')
TOKEN_PIPE = intern('pipe')
TOKEN_POW = intern('pow')
TOKEN_RBRACE = intern('rbrace')
TOKEN_RBRACKET = intern('rbracket')
TOKEN_RPAREN = intern('rparen')
TOKEN_SEMICOLON = intern('semicolon')
TOKEN_SUB = intern('sub')
TOKEN_TILDE = intern('tilde')
TOKEN_WHITESPACE = intern('whitespace')
TOKEN_FLOAT = intern('float')
TOKEN_INTEGER = intern('integer')
TOKEN_NAME = intern('name')
TOKEN_STRING = intern('string')
TOKEN_OPERATOR = intern('operator')
TOKEN_BLOCK_BEGIN = intern('block_begin')
TOKEN_BLOCK_END = intern('block_end')
TOKEN_VARIABLE_BEGIN = intern('variable_begin')
TOKEN_VARIABLE_END = intern('variable_end')
TOKEN_RAW_BEGIN = intern('raw_begin')
TOKEN_RAW_END = intern('raw_end')
TOKEN_COMMENT_BEGIN = intern('comment_begin')
TOKEN_COMMENT_END = intern('comment_end')
TOKEN_COMMENT = intern('comment')
TOKEN_LINESTATEMENT_BEGIN = intern('linestatement_begin')
TOKEN_LINESTATEMENT_END = intern('linestatement_end')
TOKEN_LINECOMMENT_BEGIN = intern('linecomment_begin')
TOKEN_LINECOMMENT_END = intern('linecomment_end')
TOKEN_LINECOMMENT = intern('linecomment')
TOKEN_DATA = intern('data')
TOKEN_INITIAL = intern('initial')
TOKEN_EOF = intern('eof')
# bind operators to token types
operators = {
'+': TOKEN_ADD,
'-': TOKEN_SUB,
'/': TOKEN_DIV,
'//': TOKEN_FLOORDIV,
'*': TOKEN_MUL,
'%': TOKEN_MOD,
'**': TOKEN_POW,
'~': TOKEN_TILDE,
'[': TOKEN_LBRACKET,
']': TOKEN_RBRACKET,
'(': TOKEN_LPAREN,
')': TOKEN_RPAREN,
'{': TOKEN_LBRACE,
'}': TOKEN_RBRACE,
'==': TOKEN_EQ,
'!=': TOKEN_NE,
'>': TOKEN_GT,
'>=': TOKEN_GTEQ,
'<': TOKEN_LT,
'<=': TOKEN_LTEQ,
'=': TOKEN_ASSIGN,
'.': TOKEN_DOT,
':': TOKEN_COLON,
'|': TOKEN_PIPE,
',': TOKEN_COMMA,
';': TOKEN_SEMICOLON
}
reverse_operators = dict([(v, k) for k, v in operators.iteritems()])
assert len(operators) == len(reverse_operators), 'operators dropped'
operator_re = re.compile('(%s)' % '|'.join(re.escape(x) for x in
sorted(operators, key=lambda x: -len(x))))
ignored_tokens = frozenset([TOKEN_COMMENT_BEGIN, TOKEN_COMMENT,
TOKEN_COMMENT_END, TOKEN_WHITESPACE,
TOKEN_WHITESPACE, TOKEN_LINECOMMENT_BEGIN,
TOKEN_LINECOMMENT_END, TOKEN_LINECOMMENT])
ignore_if_empty = frozenset([TOKEN_WHITESPACE, TOKEN_DATA,
TOKEN_COMMENT, TOKEN_LINECOMMENT])
def _describe_token_type(token_type):
if token_type in reverse_operators:
return reverse_operators[token_type]
return {
TOKEN_COMMENT_BEGIN: 'begin of comment',
TOKEN_COMMENT_END: 'end of comment',
TOKEN_COMMENT: 'comment',
TOKEN_LINECOMMENT: 'comment',
TOKEN_BLOCK_BEGIN: 'begin of statement block',
TOKEN_BLOCK_END: 'end of statement block',
TOKEN_VARIABLE_BEGIN: 'begin of print statement',
TOKEN_VARIABLE_END: 'end of print statement',
TOKEN_LINESTATEMENT_BEGIN: 'begin of line statement',
TOKEN_LINESTATEMENT_END: 'end of line statement',
TOKEN_DATA: 'template data / text',
TOKEN_EOF: 'end of template'
}.get(token_type, token_type)
def describe_token(token):
"""Returns a description of the token."""
if token.type == 'name':
return token.value
return _describe_token_type(token.type)
def describe_token_expr(expr):
"""Like `describe_token` but for token expressions."""
if ':' in expr:
type, value = expr.split(':', 1)
if type == 'name':
return value
else:
type = expr
return _describe_token_type(type)
def count_newlines(value):
"""Count the number of newline characters in the string. This is
useful for extensions that filter a stream.
"""
return len(newline_re.findall(value))
def compile_rules(environment):
"""Compiles all the rules from the environment into a list of rules."""
e = re.escape
rules = [
(len(environment.comment_start_string), 'comment',
e(environment.comment_start_string)),
(len(environment.block_start_string), 'block',
e(environment.block_start_string)),
(len(environment.variable_start_string), 'variable',
e(environment.variable_start_string))
]
if environment.line_statement_prefix is not None:
rules.append((len(environment.line_statement_prefix), 'linestatement',
r'^\s*' + e(environment.line_statement_prefix)))
if environment.line_comment_prefix is not None:
rules.append((len(environment.line_comment_prefix), 'linecomment',
r'(?:^|(?<=\S))[^\S\r\n]*' +
e(environment.line_comment_prefix)))
return [x[1:] for x in sorted(rules, reverse=True)]
class Failure(object):
"""Class that raises a `TemplateSyntaxError` if called.
Used by the `Lexer` to specify known errors.
"""
def __init__(self, message, cls=TemplateSyntaxError):
self.message = message
self.error_class = cls
def __call__(self, lineno, filename):
raise self.error_class(self.message, lineno, filename)
class Token(tuple):
"""Token class."""
__slots__ = ()
lineno, type, value = (property(itemgetter(x)) for x in range(3))
def __new__(cls, lineno, type, value):
return tuple.__new__(cls, (lineno, intern(str(type)), value))
def __str__(self):
if self.type in reverse_operators:
return reverse_operators[self.type]
elif self.type == 'name':
return self.value
return self.type
def test(self, expr):
"""Test a token against a token expression. This can either be a
token type or ``'token_type:token_value'``. This can only test
against string values and types.
"""
# here we do a regular string equality check as test_any is usually
# passed an iterable of not interned strings.
if self.type == expr:
return True
elif ':' in expr:
return expr.split(':', 1) == [self.type, self.value]
return False
def test_any(self, *iterable):
"""Test against multiple token expressions."""
for expr in iterable:
if self.test(expr):
return True
return False
def __repr__(self):
return 'Token(%r, %r, %r)' % (
self.lineno,
self.type,
self.value
)
class TokenStreamIterator(object):
"""The iterator for tokenstreams. Iterate over the stream
until the eof token is reached.
"""
def __init__(self, stream):
self.stream = stream
def __iter__(self):
return self
def next(self):
token = self.stream.current
if token.type is TOKEN_EOF:
self.stream.close()
raise StopIteration()
next(self.stream)
return token
class TokenStream(object):
"""A token stream is an iterable that yields :class:`Token`\s. The
parser however does not iterate over it but calls :meth:`next` to go
one token ahead. The current active token is stored as :attr:`current`.
"""
def __init__(self, generator, name, filename):
self._next = iter(generator).next
self._pushed = deque()
self.name = name
self.filename = filename
self.closed = False
self.current = Token(1, TOKEN_INITIAL, '')
next(self)
def __iter__(self):
return TokenStreamIterator(self)
def __nonzero__(self):
return bool(self._pushed) or self.current.type is not TOKEN_EOF
eos = property(lambda x: not x, doc="Are we at the end of the stream?")
def push(self, token):
"""Push a token back to the stream."""
self._pushed.append(token)
def look(self):
"""Look at the next token."""
old_token = next(self)
result = self.current
self.push(result)
self.current = old_token
return result
def skip(self, n=1):
"""Got n tokens ahead."""
for x in xrange(n):
next(self)
def next_if(self, expr):
"""Perform the token test and return the token if it matched.
Otherwise the return value is `None`.
"""
if self.current.test(expr):
return next(self)
def skip_if(self, expr):
"""Like :meth:`next_if` but only returns `True` or `False`."""
return self.next_if(expr) is not None
def next(self):
"""Go one token ahead and return the old one"""
rv = self.current
if self._pushed:
self.current = self._pushed.popleft()
elif self.current.type is not TOKEN_EOF:
try:
self.current = self._next()
except StopIteration:
self.close()
return rv
def close(self):
"""Close the stream."""
self.current = Token(self.current.lineno, TOKEN_EOF, '')
self._next = None
self.closed = True
def expect(self, expr):
"""Expect a given token type and return it. This accepts the same
argument as :meth:`jinja2.lexer.Token.test`.
"""
if not self.current.test(expr):
expr = describe_token_expr(expr)
if self.current.type is TOKEN_EOF:
raise TemplateSyntaxError('unexpected end of template, '
'expected %r.' % expr,
self.current.lineno,
self.name, self.filename)
raise TemplateSyntaxError("expected token %r, got %r" %
(expr, describe_token(self.current)),
self.current.lineno,
self.name, self.filename)
try:
return self.current
finally:
next(self)
def get_lexer(environment):
"""Return a lexer which is probably cached."""
key = (environment.block_start_string,
environment.block_end_string,
environment.variable_start_string,
environment.variable_end_string,
environment.comment_start_string,
environment.comment_end_string,
environment.line_statement_prefix,
environment.line_comment_prefix,
environment.trim_blocks,
environment.newline_sequence)
lexer = _lexer_cache.get(key)
if lexer is None:
lexer = Lexer(environment)
_lexer_cache[key] = lexer
return lexer
class Lexer(object):
"""Class that implements a lexer for a given environment. Automatically
created by the environment class, usually you don't have to do that.
Note that the lexer is not automatically bound to an environment.
Multiple environments can share the same lexer.
"""
def __init__(self, environment):
# shortcuts
c = lambda x: re.compile(x, re.M | re.S)
e = re.escape
# lexing rules for tags
tag_rules = [
(whitespace_re, TOKEN_WHITESPACE, None),
(float_re, TOKEN_FLOAT, None),
(integer_re, TOKEN_INTEGER, None),
(name_re, TOKEN_NAME, None),
(string_re, TOKEN_STRING, None),
(operator_re, TOKEN_OPERATOR, None)
]
# assamble the root lexing rule. because "|" is ungreedy
# we have to sort by length so that the lexer continues working
# as expected when we have parsing rules like <% for block and
# <%= for variables. (if someone wants asp like syntax)
# variables are just part of the rules if variable processing
# is required.
root_tag_rules = compile_rules(environment)
# block suffix if trimming is enabled
block_suffix_re = environment.trim_blocks and '\\n?' or ''
self.newline_sequence = environment.newline_sequence
# global lexing rules
self.rules = {
'root': [
# directives
(c('(.*?)(?:%s)' % '|'.join(
[r'(?P<raw_begin>(?:\s*%s\-|%s)\s*raw\s*(?:\-%s\s*|%s))' % (
e(environment.block_start_string),
e(environment.block_start_string),
e(environment.block_end_string),
e(environment.block_end_string)
)] + [
r'(?P<%s_begin>\s*%s\-|%s)' % (n, r, r)
for n, r in root_tag_rules
])), (TOKEN_DATA, '#bygroup'), '#bygroup'),
# data
(c('.+'), TOKEN_DATA, None)
],
# comments
TOKEN_COMMENT_BEGIN: [
(c(r'(.*?)((?:\-%s\s*|%s)%s)' % (
e(environment.comment_end_string),
e(environment.comment_end_string),
block_suffix_re
)), (TOKEN_COMMENT, TOKEN_COMMENT_END), '#pop'),
(c('(.)'), (Failure('Missing end of comment tag'),), None)
],
# blocks
TOKEN_BLOCK_BEGIN: [
(c('(?:\-%s\s*|%s)%s' % (
e(environment.block_end_string),
e(environment.block_end_string),
block_suffix_re
)), TOKEN_BLOCK_END, '#pop'),
] + tag_rules,
# variables
TOKEN_VARIABLE_BEGIN: [
(c('\-%s\s*|%s' % (
e(environment.variable_end_string),
e(environment.variable_end_string)
)), TOKEN_VARIABLE_END, '#pop')
] + tag_rules,
# raw block
TOKEN_RAW_BEGIN: [
(c('(.*?)((?:\s*%s\-|%s)\s*endraw\s*(?:\-%s\s*|%s%s))' % (
e(environment.block_start_string),
e(environment.block_start_string),
e(environment.block_end_string),
e(environment.block_end_string),
block_suffix_re
)), (TOKEN_DATA, TOKEN_RAW_END), '#pop'),
(c('(.)'), (Failure('Missing end of raw directive'),), None)
],
# line statements
TOKEN_LINESTATEMENT_BEGIN: [
(c(r'\s*(\n|$)'), TOKEN_LINESTATEMENT_END, '#pop')
] + tag_rules,
# line comments
TOKEN_LINECOMMENT_BEGIN: [
(c(r'(.*?)()(?=\n|$)'), (TOKEN_LINECOMMENT,
TOKEN_LINECOMMENT_END), '#pop')
]
}
def _normalize_newlines(self, value):
"""Called for strings and template data to normlize it to unicode."""
return newline_re.sub(self.newline_sequence, value)
def tokenize(self, source, name=None, filename=None, state=None):
"""Calls tokeniter + tokenize and wraps it in a token stream.
"""
stream = self.tokeniter(source, name, filename, state)
return TokenStream(self.wrap(stream, name, filename), name, filename)
def wrap(self, stream, name=None, filename=None):
"""This is called with the stream as returned by `tokenize` and wraps
every token in a :class:`Token` and converts the value.
"""
for lineno, token, value in stream:
if token in ignored_tokens:
continue
elif token == 'linestatement_begin':
token = 'block_begin'
elif token == 'linestatement_end':
token = 'block_end'
# we are not interested in those tokens in the parser
elif token in ('raw_begin', 'raw_end'):
continue
elif token == 'data':
value = self._normalize_newlines(value)
elif token == 'keyword':
token = value
elif token == 'name':
value = str(value)
elif token == 'string':
# try to unescape string
try:
value = self._normalize_newlines(value[1:-1]) \
.encode('ascii', 'backslashreplace') \
.decode('unicode-escape')
except Exception, e:
msg = str(e).split(':')[-1].strip()
raise TemplateSyntaxError(msg, lineno, name, filename)
# if we can express it as bytestring (ascii only)
# we do that for support of semi broken APIs
# as datetime.datetime.strftime. On python 3 this
# call becomes a noop thanks to 2to3
try:
value = str(value)
except UnicodeError:
pass
elif token == 'integer':
value = int(value)
elif token == 'float':
value = float(value)
elif token == 'operator':
token = operators[value]
yield Token(lineno, token, value)
def tokeniter(self, source, name, filename=None, state=None):
"""This method tokenizes the text and returns the tokens in a
generator. Use this method if you just want to tokenize a template.
"""
source = '\n'.join(unicode(source).splitlines())
pos = 0
lineno = 1
stack = ['root']
if state is not None and state != 'root':
assert state in ('variable', 'block'), 'invalid state'
stack.append(state + '_begin')
else:
state = 'root'
statetokens = self.rules[stack[-1]]
source_length = len(source)
balancing_stack = []
while 1:
# tokenizer loop
for regex, tokens, new_state in statetokens:
m = regex.match(source, pos)
# if no match we try again with the next rule
if m is None:
continue
# we only match blocks and variables if brances / parentheses
# are balanced. continue parsing with the lower rule which
# is the operator rule. do this only if the end tags look
# like operators
if balancing_stack and \
tokens in ('variable_end', 'block_end',
'linestatement_end'):
continue
# tuples support more options
if isinstance(tokens, tuple):
for idx, token in enumerate(tokens):
# failure group
if token.__class__ is Failure:
raise token(lineno, filename)
# bygroup is a bit more complex, in that case we
# yield for the current token the first named
# group that matched
elif token == '#bygroup':
for key, value in m.groupdict().iteritems():
if value is not None:
yield lineno, key, value
lineno += value.count('\n')
break
else:
raise RuntimeError('%r wanted to resolve '
'the token dynamically'
' but no group matched'
% regex)
# normal group
else:
data = m.group(idx + 1)
if data or token not in ignore_if_empty:
yield lineno, token, data
lineno += data.count('\n')
# strings as token just are yielded as it.
else:
data = m.group()
# update brace/parentheses balance
if tokens == 'operator':
if data == '{':
balancing_stack.append('}')
elif data == '(':
balancing_stack.append(')')
elif data == '[':
balancing_stack.append(']')
elif data in ('}', ')', ']'):
if not balancing_stack:
raise TemplateSyntaxError('unexpected \'%s\'' %
data, lineno, name,
filename)
expected_op = balancing_stack.pop()
if expected_op != data:
raise TemplateSyntaxError('unexpected \'%s\', '
'expected \'%s\'' %
(data, expected_op),
lineno, name,
filename)
# yield items
if data or tokens not in ignore_if_empty:
yield lineno, tokens, data
lineno += data.count('\n')
# fetch new position into new variable so that we can check
# if there is a internal parsing error which would result
# in an infinite loop
pos2 = m.end()
# handle state changes
if new_state is not None:
# remove the uppermost state
if new_state == '#pop':
stack.pop()
# resolve the new state by group checking
elif new_state == '#bygroup':
for key, value in m.groupdict().iteritems():
if value is not None:
stack.append(key)
break
else:
raise RuntimeError('%r wanted to resolve the '
'new state dynamically but'
' no group matched' %
regex)
# direct state name given
else:
stack.append(new_state)
statetokens = self.rules[stack[-1]]
# we are still at the same position and no stack change.
# this means a loop without break condition, avoid that and
# raise error
elif pos2 == pos:
raise RuntimeError('%r yielded empty string without '
'stack change' % regex)
# publish new function and start again
pos = pos2
break
# if loop terminated without break we havn't found a single match
# either we are at the end of the file or we have a problem
else:
# end of text
if pos >= source_length:
return
# something went wrong
raise TemplateSyntaxError('unexpected char %r at %d' %
(source[pos], pos), lineno,
name, filename)
| gpl-2.0 |
Centre-Alt-Rendiment-Esportiu/att | old_project/Python/win_libs/serial/sermsdos.py | 159 | 5848 | # sermsdos.py
#
# History:
#
# 3rd September 2002 Dave Haynes
# 1. First defined
#
# Although this code should run under the latest versions of
# Python, on DOS-based platforms such as Windows 95 and 98,
# it has been specifically written to be compatible with
# PyDOS, available at:
# http://www.python.org/ftp/python/wpy/dos.html
#
# PyDOS is a stripped-down version of Python 1.5.2 for
# DOS machines. Therefore, in making changes to this file,
# please respect Python 1.5.2 syntax. In addition, please
# limit the width of this file to 60 characters.
#
# Note also that the modules in PyDOS contain fewer members
# than other versions, so we are restricted to using the
# following:
#
# In module os:
# -------------
# environ, chdir, getcwd, getpid, umask, fdopen, close,
# dup, dup2, fstat, lseek, open, read, write, O_RDONLY,
# O_WRONLY, O_RDWR, O_APPEND, O_CREAT, O_EXCL, O_TRUNC,
# access, F_OK, R_OK, W_OK, X_OK, chmod, listdir, mkdir,
# remove, rename, renames, rmdir, stat, unlink, utime,
# execl, execle, execlp, execlpe, execvp, execvpe, _exit,
# system.
#
# In module os.path:
# ------------------
# curdir, pardir, sep, altsep, pathsep, defpath, linesep.
#
import os
import sys
import string
import serial.serialutil
BAUD_RATES = {
110: "11",
150: "15",
300: "30",
600: "60",
1200: "12",
2400: "24",
4800: "48",
9600: "96",
19200: "19"}
(PARITY_NONE, PARITY_EVEN, PARITY_ODD, PARITY_MARK,
PARITY_SPACE) = (0, 1, 2, 3, 4)
(STOPBITS_ONE, STOPBITS_ONEANDAHALF,
STOPBITS_TWO) = (1, 1.5, 2)
FIVEBITS, SIXBITS, SEVENBITS, EIGHTBITS = (5, 6, 7, 8)
(RETURN_ERROR, RETURN_BUSY, RETURN_RETRY, RETURN_READY,
RETURN_NONE) = ('E', 'B', 'P', 'R', 'N')
portNotOpenError = ValueError('port not open')
def device(portnum):
return 'COM%d' % (portnum+1)
class Serial(serialutil.FileLike):
"""
port: number of device; numbering starts at
zero. if everything fails, the user can
specify a device string, note that this
isn't portable any more
baudrate: baud rate
bytesize: number of databits
parity: enable parity checking
stopbits: number of stopbits
timeout: set a timeout (None for waiting forever)
xonxoff: enable software flow control
rtscts: enable RTS/CTS flow control
retry: DOS retry mode
"""
def __init__(self,
port,
baudrate = 9600,
bytesize = EIGHTBITS,
parity = PARITY_NONE,
stopbits = STOPBITS_ONE,
timeout = None,
xonxoff = 0,
rtscts = 0,
retry = RETURN_RETRY
):
if type(port) == type(''):
# strings are taken directly
self.portstr = port
else:
# numbers are transformed to a string
self.portstr = device(port+1)
self.baud = BAUD_RATES[baudrate]
self.bytesize = str(bytesize)
if parity == PARITY_NONE:
self.parity = 'N'
elif parity == PARITY_EVEN:
self.parity = 'E'
elif parity == PARITY_ODD:
self.parity = 'O'
elif parity == PARITY_MARK:
self.parity = 'M'
elif parity == PARITY_SPACE:
self.parity = 'S'
self.stop = str(stopbits)
self.retry = retry
self.filename = "sermsdos.tmp"
self._config(self.portstr, self.baud, self.parity,
self.bytesize, self.stop, self.retry, self.filename)
def __del__(self):
self.close()
def close(self):
pass
def _config(self, port, baud, parity, data, stop, retry,
filename):
comString = string.join(("MODE ", port, ":"
, " BAUD= ", baud, " PARITY= ", parity
, " DATA= ", data, " STOP= ", stop, " RETRY= ",
retry, " > ", filename ), '')
os.system(comString)
def setBaudrate(self, baudrate):
self._config(self.portstr, BAUD_RATES[baudrate],
self.parity, self.bytesize, self.stop, self.retry,
self.filename)
def inWaiting(self):
"""returns the number of bytes waiting to be read"""
raise NotImplementedError
def read(self, num = 1):
"""Read num bytes from serial port"""
handle = os.open(self.portstr,
os.O_RDONLY | os.O_BINARY)
rv = os.read(handle, num)
os.close(handle)
return rv
def write(self, s):
"""Write string to serial port"""
handle = os.open(self.portstr,
os.O_WRONLY | os.O_BINARY)
rv = os.write(handle, s)
os.close(handle)
return rv
def flushInput(self):
raise NotImplementedError
def flushOutput(self):
raise NotImplementedError
def sendBreak(self):
raise NotImplementedError
def setRTS(self,level=1):
"""Set terminal status line"""
raise NotImplementedError
def setDTR(self,level=1):
"""Set terminal status line"""
raise NotImplementedError
def getCTS(self):
"""Eead terminal status line"""
raise NotImplementedError
def getDSR(self):
"""Eead terminal status line"""
raise NotImplementedError
def getRI(self):
"""Eead terminal status line"""
raise NotImplementedError
def getCD(self):
"""Eead terminal status line"""
raise NotImplementedError
def __repr__(self):
return string.join(( "<Serial>: ", self.portstr
, self.baud, self.parity, self.bytesize, self.stop,
self.retry , self.filename), ' ')
if __name__ == '__main__':
s = Serial(0)
sys.stdio.write('%s %s\n' % (__name__, s))
| gpl-3.0 |
daviddao/luminosity | sklearn-server/flask/lib/python2.7/site-packages/pip/_vendor/lockfile/mkdirlockfile.py | 478 | 3098 | from __future__ import absolute_import, division
import time
import os
import sys
import errno
from . import (LockBase, LockFailed, NotLocked, NotMyLock, LockTimeout,
AlreadyLocked)
class MkdirLockFile(LockBase):
"""Lock file by creating a directory."""
def __init__(self, path, threaded=True, timeout=None):
"""
>>> lock = MkdirLockFile('somefile')
>>> lock = MkdirLockFile('somefile', threaded=False)
"""
LockBase.__init__(self, path, threaded, timeout)
# Lock file itself is a directory. Place the unique file name into
# it.
self.unique_name = os.path.join(self.lock_file,
"%s.%s%s" % (self.hostname,
self.tname,
self.pid))
def acquire(self, timeout=None):
timeout = timeout is not None and timeout or self.timeout
end_time = time.time()
if timeout is not None and timeout > 0:
end_time += timeout
if timeout is None:
wait = 0.1
else:
wait = max(0, timeout / 10)
while True:
try:
os.mkdir(self.lock_file)
except OSError:
err = sys.exc_info()[1]
if err.errno == errno.EEXIST:
# Already locked.
if os.path.exists(self.unique_name):
# Already locked by me.
return
if timeout is not None and time.time() > end_time:
if timeout > 0:
raise LockTimeout("Timeout waiting to acquire"
" lock for %s" %
self.path)
else:
# Someone else has the lock.
raise AlreadyLocked("%s is already locked" %
self.path)
time.sleep(wait)
else:
# Couldn't create the lock for some other reason
raise LockFailed("failed to create %s" % self.lock_file)
else:
open(self.unique_name, "wb").close()
return
def release(self):
if not self.is_locked():
raise NotLocked("%s is not locked" % self.path)
elif not os.path.exists(self.unique_name):
raise NotMyLock("%s is locked, but not by me" % self.path)
os.unlink(self.unique_name)
os.rmdir(self.lock_file)
def is_locked(self):
return os.path.exists(self.lock_file)
def i_am_locking(self):
return (self.is_locked() and
os.path.exists(self.unique_name))
def break_lock(self):
if os.path.exists(self.lock_file):
for name in os.listdir(self.lock_file):
os.unlink(os.path.join(self.lock_file, name))
os.rmdir(self.lock_file)
| bsd-3-clause |
donutmonger/youtube-dl | youtube_dl/extractor/teamcoco.py | 114 | 6633 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import base64
import binascii
import re
import json
from .common import InfoExtractor
from ..utils import (
ExtractorError,
qualities,
determine_ext,
)
from ..compat import compat_ord
class TeamcocoIE(InfoExtractor):
_VALID_URL = r'http://teamcoco\.com/video/(?P<video_id>[0-9]+)?/?(?P<display_id>.*)'
_TESTS = [
{
'url': 'http://teamcoco.com/video/80187/conan-becomes-a-mary-kay-beauty-consultant',
'md5': '3f7746aa0dc86de18df7539903d399ea',
'info_dict': {
'id': '80187',
'ext': 'mp4',
'title': 'Conan Becomes A Mary Kay Beauty Consultant',
'description': 'Mary Kay is perhaps the most trusted name in female beauty, so of course Conan is a natural choice to sell their products.',
'duration': 504,
'age_limit': 0,
}
}, {
'url': 'http://teamcoco.com/video/louis-ck-interview-george-w-bush',
'md5': 'cde9ba0fa3506f5f017ce11ead928f9a',
'info_dict': {
'id': '19705',
'ext': 'mp4',
'description': 'Louis C.K. got starstruck by George W. Bush, so what? Part one.',
'title': 'Louis C.K. Interview Pt. 1 11/3/11',
'duration': 288,
'age_limit': 0,
}
}, {
'url': 'http://teamcoco.com/video/timothy-olyphant-drinking-whiskey',
'info_dict': {
'id': '88748',
'ext': 'mp4',
'title': 'Timothy Olyphant Raises A Toast To “Justified”',
'description': 'md5:15501f23f020e793aeca761205e42c24',
},
'params': {
'skip_download': True, # m3u8 downloads
}
}, {
'url': 'http://teamcoco.com/video/full-episode-mon-6-1-joel-mchale-jake-tapper-and-musical-guest-courtney-barnett?playlist=x;eyJ0eXBlIjoidGFnIiwiaWQiOjl9',
'info_dict': {
'id': '89341',
'ext': 'mp4',
'title': 'Full Episode - Mon. 6/1 - Joel McHale, Jake Tapper, And Musical Guest Courtney Barnett',
'description': 'Guests: Joel McHale, Jake Tapper, And Musical Guest Courtney Barnett',
},
'params': {
'skip_download': True, # m3u8 downloads
}
}
]
_VIDEO_ID_REGEXES = (
r'"eVar42"\s*:\s*(\d+)',
r'Ginger\.TeamCoco\.openInApp\("video",\s*"([^"]+)"',
r'"id_not"\s*:\s*(\d+)'
)
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
display_id = mobj.group('display_id')
webpage, urlh = self._download_webpage_handle(url, display_id)
if 'src=expired' in urlh.geturl():
raise ExtractorError('This video is expired.', expected=True)
video_id = mobj.group('video_id')
if not video_id:
video_id = self._html_search_regex(
self._VIDEO_ID_REGEXES, webpage, 'video id')
data = None
preload_codes = self._html_search_regex(
r'(function.+)setTimeout\(function\(\)\{playlist',
webpage, 'preload codes')
base64_fragments = re.findall(r'"([a-zA-z0-9+/=]+)"', preload_codes)
base64_fragments.remove('init')
def _check_sequence(cur_fragments):
if not cur_fragments:
return
for i in range(len(cur_fragments)):
cur_sequence = (''.join(cur_fragments[i:] + cur_fragments[:i])).encode('ascii')
try:
raw_data = base64.b64decode(cur_sequence)
if compat_ord(raw_data[0]) == compat_ord('{'):
return json.loads(raw_data.decode('utf-8'))
except (TypeError, binascii.Error, UnicodeDecodeError, ValueError):
continue
def _check_data():
for i in range(len(base64_fragments) + 1):
for j in range(i, len(base64_fragments) + 1):
data = _check_sequence(base64_fragments[:i] + base64_fragments[j:])
if data:
return data
self.to_screen('Try to compute possible data sequence. This may take some time.')
data = _check_data()
if not data:
raise ExtractorError(
'Preload information could not be extracted', expected=True)
formats = []
get_quality = qualities(['500k', '480p', '1000k', '720p', '1080p'])
for filed in data['files']:
if determine_ext(filed['url']) == 'm3u8':
# compat_urllib_parse.urljoin does not work here
if filed['url'].startswith('/'):
m3u8_url = 'http://ht.cdn.turner.com/tbs/big/teamcoco' + filed['url']
else:
m3u8_url = filed['url']
m3u8_formats = self._extract_m3u8_formats(
m3u8_url, video_id, ext='mp4')
for m3u8_format in m3u8_formats:
if m3u8_format not in formats:
formats.append(m3u8_format)
elif determine_ext(filed['url']) == 'f4m':
# TODO Correct f4m extraction
continue
else:
if filed['url'].startswith('/mp4:protected/'):
# TODO Correct extraction for these files
continue
m_format = re.search(r'(\d+(k|p))\.mp4', filed['url'])
if m_format is not None:
format_id = m_format.group(1)
else:
format_id = filed['bitrate']
tbr = (
int(filed['bitrate'])
if filed['bitrate'].isdigit()
else None)
formats.append({
'url': filed['url'],
'ext': 'mp4',
'tbr': tbr,
'format_id': format_id,
'quality': get_quality(format_id),
})
self._sort_formats(formats)
return {
'id': video_id,
'display_id': display_id,
'formats': formats,
'title': data['title'],
'thumbnail': data.get('thumb', {}).get('href'),
'description': data.get('teaser'),
'duration': data.get('duration'),
'age_limit': self._family_friendly_search(webpage),
}
| unlicense |
skoslowski/gnuradio | gnuradio-runtime/examples/network/audio_source.py | 3 | 1864 | #!/usr/bin/env python
#
# Copyright 2006,2007,2010 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
from __future__ import unicode_literals
from gnuradio import gr
from gnuradio import blocks
from argparse import ArgumentParser
import sys
try:
from gnuradio import audio
except ImportError:
sys.stderr.write("Failed to import gnuradio.audio. Make sure gr-audio component is installed.\n")
sys.exit(1)
class audio_source(gr.top_block):
def __init__(self, host, port, pkt_size, sample_rate, eof):
gr.top_block.__init__(self, "audio_source")
self.audio = audio.source(sample_rate)
self.sink = blocks.udp_sink(gr.sizeof_float, host, port, pkt_size, eof=eof)
self.connect(self.audio, self.sink)
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument("--host", default="127.0.0.1",
help="Remote host name (domain name or IP address")
parser.add_argument("--port", type=int, default=65500,
help="port number to connect to")
parser.add_argument("--packet-size", type=int, default=1472,
help="packet size.")
parser.add_argument("-r", "--sample-rate", type=int, default=32000 ,
help="audio signal sample rate [default=%(default)r]")
parser.add_argument("--no-eof", action="store_true", default=False,
help="don't send EOF on disconnect")
args = parser.parse_args()
# Create an instance of a hierarchical block
top_block = audio_source(args.host, args.port,
args.packet_size, args.sample_rate,
not args.no_eof)
try:
# Run forever
top_block.run()
except KeyboardInterrupt:
# Ctrl-C exits
pass
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.