repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
shikhar413/openmc | openmc/lib/filter.py | 1 | 16037 | from collections.abc import Mapping
from ctypes import c_int, c_int32, c_double, c_char_p, POINTER, \
create_string_buffer, c_size_t
from weakref import WeakValueDictionary
import numpy as np
from numpy.ctypeslib import as_array
from openmc.exceptions import AllocationError, InvalidIDError
from . import _dll
from .core import _FortranObjectWithID
from .error import _error_handler
from .material import Material
from .mesh import _get_mesh
__all__ = [
'Filter', 'AzimuthalFilter', 'CellFilter', 'CellbornFilter', 'CellfromFilter',
'CellInstanceFilter', 'DistribcellFilter', 'DelayedGroupFilter', 'EnergyFilter',
'EnergyoutFilter', 'EnergyFunctionFilter', 'LegendreFilter', 'MaterialFilter',
'MeshFilter', 'MeshSurfaceFilter', 'MuFilter', 'ParticleFilter', 'PolarFilter',
'SphericalHarmonicsFilter', 'SpatialLegendreFilter', 'SurfaceFilter',
'UniverseFilter', 'ZernikeFilter', 'ZernikeRadialFilter', 'filters'
]
# Tally functions
_dll.openmc_cell_filter_get_bins.argtypes = [
c_int32, POINTER(POINTER(c_int32)), POINTER(c_int32)]
_dll.openmc_cell_filter_get_bins.restype = c_int
_dll.openmc_cell_filter_get_bins.errcheck = _error_handler
_dll.openmc_energy_filter_get_bins.argtypes = [
c_int32, POINTER(POINTER(c_double)), POINTER(c_size_t)]
_dll.openmc_energy_filter_get_bins.restype = c_int
_dll.openmc_energy_filter_get_bins.errcheck = _error_handler
_dll.openmc_energy_filter_set_bins.argtypes = [c_int32, c_size_t, POINTER(c_double)]
_dll.openmc_energy_filter_set_bins.restype = c_int
_dll.openmc_energy_filter_set_bins.errcheck = _error_handler
_dll.openmc_energyfunc_filter_set_data.restype = c_int
_dll.openmc_energyfunc_filter_set_data.errcheck = _error_handler
_dll.openmc_energyfunc_filter_set_data.argtypes = [
c_int32, c_size_t, POINTER(c_double), POINTER(c_double)]
_dll.openmc_energyfunc_filter_get_energy.resttpe = c_int
_dll.openmc_energyfunc_filter_get_energy.errcheck = _error_handler
_dll.openmc_energyfunc_filter_get_energy.argtypes = [
c_int32, POINTER(c_size_t), POINTER(POINTER(c_double))]
_dll.openmc_energyfunc_filter_get_y.resttpe = c_int
_dll.openmc_energyfunc_filter_get_y.errcheck = _error_handler
_dll.openmc_energyfunc_filter_get_y.argtypes = [
c_int32, POINTER(c_size_t), POINTER(POINTER(c_double))]
_dll.openmc_filter_get_id.argtypes = [c_int32, POINTER(c_int32)]
_dll.openmc_filter_get_id.restype = c_int
_dll.openmc_filter_get_id.errcheck = _error_handler
_dll.openmc_filter_get_type.argtypes = [c_int32, c_char_p]
_dll.openmc_filter_get_type.restype = c_int
_dll.openmc_filter_get_type.errcheck = _error_handler
_dll.openmc_filter_set_id.argtypes = [c_int32, c_int32]
_dll.openmc_filter_set_id.restype = c_int
_dll.openmc_filter_set_id.errcheck = _error_handler
_dll.openmc_get_filter_index.argtypes = [c_int32, POINTER(c_int32)]
_dll.openmc_get_filter_index.restype = c_int
_dll.openmc_get_filter_index.errcheck = _error_handler
_dll.openmc_legendre_filter_get_order.argtypes = [c_int32, POINTER(c_int)]
_dll.openmc_legendre_filter_get_order.restype = c_int
_dll.openmc_legendre_filter_get_order.errcheck = _error_handler
_dll.openmc_legendre_filter_set_order.argtypes = [c_int32, c_int]
_dll.openmc_legendre_filter_set_order.restype = c_int
_dll.openmc_legendre_filter_set_order.errcheck = _error_handler
_dll.openmc_material_filter_get_bins.argtypes = [
c_int32, POINTER(POINTER(c_int32)), POINTER(c_size_t)]
_dll.openmc_material_filter_get_bins.restype = c_int
_dll.openmc_material_filter_get_bins.errcheck = _error_handler
_dll.openmc_material_filter_set_bins.argtypes = [c_int32, c_size_t, POINTER(c_int32)]
_dll.openmc_material_filter_set_bins.restype = c_int
_dll.openmc_material_filter_set_bins.errcheck = _error_handler
_dll.openmc_mesh_filter_get_mesh.argtypes = [c_int32, POINTER(c_int32)]
_dll.openmc_mesh_filter_get_mesh.restype = c_int
_dll.openmc_mesh_filter_get_mesh.errcheck = _error_handler
_dll.openmc_mesh_filter_set_mesh.argtypes = [c_int32, c_int32]
_dll.openmc_mesh_filter_set_mesh.restype = c_int
_dll.openmc_mesh_filter_set_mesh.errcheck = _error_handler
_dll.openmc_meshsurface_filter_get_mesh.argtypes = [c_int32, POINTER(c_int32)]
_dll.openmc_meshsurface_filter_get_mesh.restype = c_int
_dll.openmc_meshsurface_filter_get_mesh.errcheck = _error_handler
_dll.openmc_meshsurface_filter_set_mesh.argtypes = [c_int32, c_int32]
_dll.openmc_meshsurface_filter_set_mesh.restype = c_int
_dll.openmc_meshsurface_filter_set_mesh.errcheck = _error_handler
_dll.openmc_new_filter.argtypes = [c_char_p, POINTER(c_int32)]
_dll.openmc_new_filter.restype = c_int
_dll.openmc_new_filter.errcheck = _error_handler
_dll.openmc_spatial_legendre_filter_get_order.argtypes = [c_int32, POINTER(c_int)]
_dll.openmc_spatial_legendre_filter_get_order.restype = c_int
_dll.openmc_spatial_legendre_filter_get_order.errcheck = _error_handler
_dll.openmc_spatial_legendre_filter_set_order.argtypes = [c_int32, c_int]
_dll.openmc_spatial_legendre_filter_set_order.restype = c_int
_dll.openmc_spatial_legendre_filter_set_order.errcheck = _error_handler
_dll.openmc_sphharm_filter_get_order.argtypes = [c_int32, POINTER(c_int)]
_dll.openmc_sphharm_filter_get_order.restype = c_int
_dll.openmc_sphharm_filter_get_order.errcheck = _error_handler
_dll.openmc_sphharm_filter_set_order.argtypes = [c_int32, c_int]
_dll.openmc_sphharm_filter_set_order.restype = c_int
_dll.openmc_sphharm_filter_set_order.errcheck = _error_handler
_dll.openmc_zernike_filter_get_order.argtypes = [c_int32, POINTER(c_int)]
_dll.openmc_zernike_filter_get_order.restype = c_int
_dll.openmc_zernike_filter_get_order.errcheck = _error_handler
_dll.openmc_zernike_filter_set_order.argtypes = [c_int32, c_int]
_dll.openmc_zernike_filter_set_order.restype = c_int
_dll.openmc_zernike_filter_set_order.errcheck = _error_handler
_dll.tally_filters_size.restype = c_size_t
class Filter(_FortranObjectWithID):
__instances = WeakValueDictionary()
def __new__(cls, obj=None, uid=None, new=True, index=None):
mapping = filters
if index is None:
if new:
# Determine ID to assign
if uid is None:
uid = max(mapping, default=0) + 1
else:
if uid in mapping:
raise AllocationError('A filter with ID={} has already '
'been allocated.'.format(uid))
# Set the filter type -- note that the filter_type attribute
# only exists on subclasses!
index = c_int32()
_dll.openmc_new_filter(cls.filter_type.encode(), index)
index = index.value
else:
index = mapping[uid]._index
if index not in cls.__instances:
instance = super().__new__(cls)
instance._index = index
if uid is not None:
instance.id = uid
cls.__instances[index] = instance
return cls.__instances[index]
@property
def id(self):
filter_id = c_int32()
_dll.openmc_filter_get_id(self._index, filter_id)
return filter_id.value
@id.setter
def id(self, filter_id):
_dll.openmc_filter_set_id(self._index, filter_id)
class EnergyFilter(Filter):
filter_type = 'energy'
def __init__(self, bins=None, uid=None, new=True, index=None):
super().__init__(uid, new, index)
if bins is not None:
self.bins = bins
@property
def bins(self):
energies = POINTER(c_double)()
n = c_size_t()
_dll.openmc_energy_filter_get_bins(self._index, energies, n)
return as_array(energies, (n.value,))
@bins.setter
def bins(self, bins):
# Get numpy array as a double*
energies = np.asarray(bins)
energies_p = energies.ctypes.data_as(POINTER(c_double))
_dll.openmc_energy_filter_set_bins(
self._index, len(energies), energies_p)
class EnergyoutFilter(EnergyFilter):
filter_type = 'energyout'
class AzimuthalFilter(Filter):
filter_type = 'azimuthal'
class CellFilter(Filter):
filter_type = 'cell'
@property
def bins(self):
cells = POINTER(c_int32)()
n = c_int32()
_dll.openmc_cell_filter_get_bins(self._index, cells, n)
return as_array(cells, (n.value,))
class CellbornFilter(Filter):
filter_type = 'cellborn'
class CellfromFilter(Filter):
filter_type = 'cellfrom'
class CellInstanceFilter(Filter):
filter_type = 'cellinstance'
class DelayedGroupFilter(Filter):
filter_type = 'delayedgroup'
class DistribcellFilter(Filter):
filter_type = 'distribcell'
class EnergyFunctionFilter(Filter):
filter_type = 'energyfunction'
def __new__(cls, energy=None, y=None, uid=None, new=True, index=None):
return super().__new__(cls, uid=uid, new=new, index=index)
def __init__(self, energy=None, y=None, uid=None, new=True, index=None):
if (energy is None) != (y is None):
raise AttributeError("Need both energy and y or neither")
super().__init__(uid, new, index)
if energy is not None:
self.set_data(energy, y)
def set_data(self, energy, y):
"""Set the interpolation information for the filter
Parameters
----------
energy : numpy.ndarray
Independent variable for the interpolation
y : numpy.ndarray
Dependent variable for the interpolation
"""
energy_array = np.asarray(energy)
y_array = np.asarray(y)
energy_p = energy_array.ctypes.data_as(POINTER(c_double))
y_p = y_array.ctypes.data_as(POINTER(c_double))
_dll.openmc_energyfunc_filter_set_data(
self._index, len(energy_array), energy_p, y_p)
@property
def energy(self):
return self._get_attr(_dll.openmc_energyfunc_filter_get_energy)
@property
def y(self):
return self._get_attr(_dll.openmc_energyfunc_filter_get_y)
def _get_attr(self, cfunc):
array_p = POINTER(c_double)()
n = c_size_t()
cfunc(self._index, n, array_p)
return as_array(array_p, (n.value, ))
class LegendreFilter(Filter):
filter_type = 'legendre'
def __init__(self, order=None, uid=None, new=True, index=None):
super().__init__(uid, new, index)
if order is not None:
self.order = order
@property
def order(self):
temp_order = c_int()
_dll.openmc_legendre_filter_get_order(self._index, temp_order)
return temp_order.value
@order.setter
def order(self, order):
_dll.openmc_legendre_filter_set_order(self._index, order)
class MaterialFilter(Filter):
filter_type = 'material'
def __init__(self, bins=None, uid=None, new=True, index=None):
super().__init__(uid, new, index)
if bins is not None:
self.bins = bins
@property
def bins(self):
materials = POINTER(c_int32)()
n = c_size_t()
_dll.openmc_material_filter_get_bins(self._index, materials, n)
return [Material(index=materials[i]) for i in range(n.value)]
@bins.setter
def bins(self, materials):
# Get material indices as int32_t[]
n = len(materials)
bins = (c_int32*n)(*(m._index for m in materials))
_dll.openmc_material_filter_set_bins(self._index, n, bins)
class MeshFilter(Filter):
filter_type = 'mesh'
def __init__(self, mesh=None, uid=None, new=True, index=None):
super().__init__(uid, new, index)
if mesh is not None:
self.mesh = mesh
@property
def mesh(self):
index_mesh = c_int32()
_dll.openmc_mesh_filter_get_mesh(self._index, index_mesh)
return _get_mesh(index_mesh.value)
@mesh.setter
def mesh(self, mesh):
_dll.openmc_mesh_filter_set_mesh(self._index, mesh._index)
class MeshSurfaceFilter(Filter):
filter_type = 'meshsurface'
def __init__(self, mesh=None, uid=None, new=True, index=None):
super().__init__(uid, new, index)
if mesh is not None:
self.mesh = mesh
@property
def mesh(self):
index_mesh = c_int32()
_dll.openmc_meshsurface_filter_get_mesh(self._index, index_mesh)
return _get_mesh(index_mesh.value)
@mesh.setter
def mesh(self, mesh):
_dll.openmc_meshsurface_filter_set_mesh(self._index, mesh._index)
class MuFilter(Filter):
filter_type = 'mu'
class ParticleFilter(Filter):
filter_type = 'particle'
class PolarFilter(Filter):
filter_type = 'polar'
class SphericalHarmonicsFilter(Filter):
filter_type = 'sphericalharmonics'
def __init__(self, order=None, uid=None, new=True, index=None):
super().__init__(uid, new, index)
if order is not None:
self.order = order
@property
def order(self):
temp_order = c_int()
_dll.openmc_sphharm_filter_get_order(self._index, temp_order)
return temp_order.value
@order.setter
def order(self, order):
_dll.openmc_sphharm_filter_set_order(self._index, order)
class SpatialLegendreFilter(Filter):
filter_type = 'spatiallegendre'
def __init__(self, order=None, uid=None, new=True, index=None):
super().__init__(uid, new, index)
if order is not None:
self.order = order
@property
def order(self):
temp_order = c_int()
_dll.openmc_spatial_legendre_filter_get_order(self._index, temp_order)
return temp_order.value
@order.setter
def order(self, order):
_dll.openmc_spatial_legendre_filter_set_order(self._index, order)
class SurfaceFilter(Filter):
filter_type = 'surface'
class UniverseFilter(Filter):
filter_type = 'universe'
class ZernikeFilter(Filter):
filter_type = 'zernike'
def __init__(self, order=None, uid=None, new=True, index=None):
super().__init__(uid, new, index)
if order is not None:
self.order = order
@property
def order(self):
temp_order = c_int()
_dll.openmc_zernike_filter_get_order(self._index, temp_order)
return temp_order.value
@order.setter
def order(self, order):
_dll.openmc_zernike_filter_set_order(self._index, order)
class ZernikeRadialFilter(ZernikeFilter):
filter_type = 'zernikeradial'
_FILTER_TYPE_MAP = {
'azimuthal': AzimuthalFilter,
'cell': CellFilter,
'cellborn': CellbornFilter,
'cellfrom': CellfromFilter,
'cellinstance': CellInstanceFilter,
'delayedgroup': DelayedGroupFilter,
'distribcell': DistribcellFilter,
'energy': EnergyFilter,
'energyout': EnergyoutFilter,
'energyfunction': EnergyFunctionFilter,
'legendre': LegendreFilter,
'material': MaterialFilter,
'mesh': MeshFilter,
'meshsurface': MeshSurfaceFilter,
'mu': MuFilter,
'particle': ParticleFilter,
'polar': PolarFilter,
'sphericalharmonics': SphericalHarmonicsFilter,
'spatiallegendre': SpatialLegendreFilter,
'surface': SurfaceFilter,
'universe': UniverseFilter,
'zernike': ZernikeFilter,
'zernikeradial': ZernikeRadialFilter
}
def _get_filter(index):
filter_type = create_string_buffer(20)
_dll.openmc_filter_get_type(index, filter_type)
filter_type = filter_type.value.decode()
return _FILTER_TYPE_MAP[filter_type](index=index)
class _FilterMapping(Mapping):
def __getitem__(self, key):
index = c_int32()
try:
_dll.openmc_get_filter_index(key, index)
except (AllocationError, InvalidIDError) as e:
# __contains__ expects a KeyError to work correctly
raise KeyError(str(e))
return _get_filter(index.value)
def __iter__(self):
for i in range(len(self)):
yield _get_filter(i).id
def __len__(self):
return _dll.tally_filters_size()
def __repr__(self):
return repr(dict(self))
filters = _FilterMapping()
| mit |
GhostThrone/django | tests/model_fields/test_durationfield.py | 296 | 2724 | import datetime
import json
from django import forms
from django.core import exceptions, serializers
from django.db import models
from django.test import SimpleTestCase, TestCase
from .models import DurationModel, NullDurationModel
class TestSaveLoad(TestCase):
def test_simple_roundtrip(self):
duration = datetime.timedelta(days=123, seconds=123, microseconds=123)
DurationModel.objects.create(field=duration)
loaded = DurationModel.objects.get()
self.assertEqual(loaded.field, duration)
def test_create_empty(self):
NullDurationModel.objects.create()
loaded = NullDurationModel.objects.get()
self.assertEqual(loaded.field, None)
class TestQuerying(TestCase):
@classmethod
def setUpTestData(cls):
cls.objs = [
DurationModel.objects.create(field=datetime.timedelta(days=1)),
DurationModel.objects.create(field=datetime.timedelta(seconds=1)),
DurationModel.objects.create(field=datetime.timedelta(seconds=-1)),
]
def test_exact(self):
self.assertSequenceEqual(
DurationModel.objects.filter(field=datetime.timedelta(days=1)),
[self.objs[0]]
)
def test_gt(self):
self.assertSequenceEqual(
DurationModel.objects.filter(field__gt=datetime.timedelta(days=0)),
[self.objs[0], self.objs[1]]
)
class TestSerialization(SimpleTestCase):
test_data = '[{"fields": {"field": "1 01:00:00"}, "model": "model_fields.durationmodel", "pk": null}]'
def test_dumping(self):
instance = DurationModel(field=datetime.timedelta(days=1, hours=1))
data = serializers.serialize('json', [instance])
self.assertEqual(json.loads(data), json.loads(self.test_data))
def test_loading(self):
instance = list(serializers.deserialize('json', self.test_data))[0].object
self.assertEqual(instance.field, datetime.timedelta(days=1, hours=1))
class TestValidation(SimpleTestCase):
def test_invalid_string(self):
field = models.DurationField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('not a datetime', None)
self.assertEqual(cm.exception.code, 'invalid')
self.assertEqual(
cm.exception.message % cm.exception.params,
"'not a datetime' value has an invalid format. "
"It must be in [DD] [HH:[MM:]]ss[.uuuuuu] format."
)
class TestFormField(SimpleTestCase):
# Tests for forms.DurationField are in the forms_tests app.
def test_formfield(self):
field = models.DurationField()
self.assertIsInstance(field.formfield(), forms.DurationField)
| bsd-3-clause |
liuyisiyisi/django-cms | cms/signals/__init__.py | 23 | 5477 | # -*- coding: utf-8 -*-
from cms.signals.apphook import debug_server_restart
from cms.signals.page import pre_save_page, post_save_page, pre_delete_page, post_delete_page, post_moved_page
from cms.signals.permissions import post_save_user, post_save_user_group, pre_save_user, pre_delete_user, pre_save_group, pre_delete_group, pre_save_pagepermission, pre_delete_pagepermission, pre_save_globalpagepermission, pre_delete_globalpagepermission
from cms.signals.placeholder import pre_delete_placeholder_ref, post_delete_placeholder_ref
from cms.signals.plugins import post_delete_plugins, pre_save_plugins, pre_delete_plugins
from cms.signals.reversion_signals import post_revision
from cms.signals.title import pre_save_title, post_save_title, pre_delete_title, post_delete_title
from cms.utils.compat.dj import is_installed
from cms.utils.conf import get_cms_setting
from django.db.models import signals
from django.dispatch import Signal
from cms.models import Page, Title, CMSPlugin, PagePermission, GlobalPagePermission, PageUser, PageUserGroup, PlaceholderReference
from django.conf import settings
from django.contrib.auth.models import User, Group
#################### Our own signals ###################
# fired after page location is changed - is moved from one node to other
page_moved = Signal(providing_args=["instance"])
# fired after page gets published - copied to public model - there may be more
# than one instances published before this signal gets called
post_publish = Signal(providing_args=["instance", "language"])
post_unpublish = Signal(providing_args=["instance", "language"])
# fired if a public page with an apphook is added or changed
urls_need_reloading = Signal(providing_args=[])
if settings.DEBUG:
urls_need_reloading.connect(debug_server_restart)
######################### plugins #######################
signals.pre_delete.connect(pre_delete_plugins, sender=CMSPlugin, dispatch_uid='cms_pre_delete_plugin')
signals.post_delete.connect(post_delete_plugins, sender=CMSPlugin, dispatch_uid='cms_post_delete_plugin')
signals.pre_save.connect(pre_save_plugins, sender=CMSPlugin, dispatch_uid='cms_pre_save_plugin')
########################## page #########################
signals.pre_save.connect(pre_save_page, sender=Page, dispatch_uid='cms_pre_save_page')
signals.post_save.connect(post_save_page, sender=Page, dispatch_uid='cms_post_save_page')
signals.pre_delete.connect(pre_delete_page, sender=Page, dispatch_uid='cms_pre_delete_page')
signals.post_delete.connect(post_delete_page, sender=Page, dispatch_uid='cms_post_delete_page')
page_moved.connect(post_moved_page, sender=Page, dispatch_uid='cms_post_move_page')
######################### title #########################
signals.pre_save.connect(pre_save_title, sender=Title, dispatch_uid='cms_pre_save_page')
signals.post_save.connect(post_save_title, sender=Title, dispatch_uid='cms_post_save_page')
signals.pre_delete.connect(pre_delete_title, sender=Title, dispatch_uid='cms_pre_delete_page')
signals.post_delete.connect(post_delete_title, sender=Title, dispatch_uid='cms_post_delete_page')
###################### placeholder #######################
signals.pre_delete.connect(pre_delete_placeholder_ref, sender=PlaceholderReference,
dispatch_uid='cms_pre_delete_placeholder_ref')
signals.post_delete.connect(post_delete_placeholder_ref, sender=PlaceholderReference,
dispatch_uid='cms_post_delete_placeholder_ref')
###################### permissions #######################
if get_cms_setting('PERMISSION'):
# only if permissions are in use
signals.pre_save.connect(pre_save_user, sender=User, dispatch_uid='cms_pre_save_user')
signals.post_save.connect(post_save_user, sender=User, dispatch_uid='cms_post_save_user')
signals.pre_delete.connect(pre_delete_user, sender=User, dispatch_uid='cms_pre_delete_user')
signals.pre_save.connect(pre_save_user, sender=PageUser, dispatch_uid='cms_pre_save_pageuser')
signals.pre_delete.connect(pre_delete_user, sender=PageUser, dispatch_uid='cms_pre_delete_pageuser')
signals.pre_save.connect(pre_save_group, sender=Group, dispatch_uid='cms_pre_save_group')
signals.post_save.connect(post_save_user_group, sender=Group, dispatch_uid='cms_post_save_group')
signals.pre_delete.connect(pre_delete_group, sender=Group, dispatch_uid='cms_post_save_group')
signals.pre_save.connect(pre_save_group, sender=PageUserGroup, dispatch_uid='cms_pre_save_pageusergroup')
signals.pre_delete.connect(pre_delete_group, sender=PageUserGroup, dispatch_uid='cms_pre_delete_pageusergroup')
signals.pre_save.connect(pre_save_pagepermission, sender=PagePermission, dispatch_uid='cms_pre_save_pagepermission')
signals.pre_delete.connect(pre_delete_pagepermission, sender=PagePermission,
dispatch_uid='cms_pre_delete_pagepermission')
signals.pre_save.connect(pre_save_globalpagepermission, sender=GlobalPagePermission,
dispatch_uid='cms_pre_save_globalpagepermission')
signals.pre_delete.connect(pre_delete_globalpagepermission, sender=GlobalPagePermission,
dispatch_uid='cms_pre_delete_globalpagepermission')
###################### reversion #########################
if is_installed('reversion'):
from reversion.models import post_revision_commit
post_revision_commit.connect(post_revision, dispatch_uid='cms_post_revision')
| bsd-3-clause |
jkandasa/integration_tests | scripts/repo_gen.py | 7 | 1286 | import click
import requests
template = """[update-{0}]
name=update-url-{0}
baseurl={1}
enabled=1
gpgcheck=0\n\n"""
def process_url(url):
'''Pulls urls from a network file'''
repo = requests.get(url)
urls = repo.text.split("\n")
ret_urls = []
for url in urls:
url = url[url.find("http"):].strip()
ret_urls.append(url)
return ret_urls
def build_file(urls):
'''Builds a update.repo file based on the urls given'''
file_string = ""
c = 0
for url in urls:
if url:
file_string = "{}{}".format(file_string, template.format(c, url))
c += 1
return file_string
@click.command(help="Assist in generating update repo file")
@click.option('--url', default=None, help='Specify a URL for downloading repo data')
@click.option('--filename', default=None, help='Specify a URL for downloading repo data')
@click.option('--output', default="update.repo", help="output filename")
def main(output, url, filename):
"""Assist in generating update repo file"""
if url:
urls = process_url(url)
output_data = build_file(urls)
elif filename:
print "Can't do this right now"
with open(output, 'w') as f:
f.write(output_data)
if __name__ == "__main__":
main()
| gpl-2.0 |
Lujeni/ansible | lib/ansible/modules/files/synchronize.py | 31 | 22698 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2012-2013, Timothy Appnel <tim@appnel.com>
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'core'}
DOCUMENTATION = r'''
---
module: synchronize
version_added: "1.4"
short_description: A wrapper around rsync to make common tasks in your playbooks quick and easy
description:
- C(synchronize) is a wrapper around rsync to make common tasks in your playbooks quick and easy.
- It is run and originates on the local host where Ansible is being run.
- Of course, you could just use the C(command) action to call rsync yourself, but you also have to add a fair number of
boilerplate options and host facts.
- This module is not intended to provide access to the full power of rsync, but does make the most common
invocations easier to implement. You `still` may need to call rsync directly via C(command) or C(shell) depending on your use case.
options:
src:
description:
- Path on the source host that will be synchronized to the destination.
- The path can be absolute or relative.
type: str
required: true
dest:
description:
- Path on the destination host that will be synchronized from the source.
- The path can be absolute or relative.
type: str
required: true
dest_port:
description:
- Port number for ssh on the destination host.
- Prior to Ansible 2.0, the ansible_ssh_port inventory var took precedence over this value.
- This parameter defaults to the value of C(ansible_ssh_port) or C(ansible_port),
the C(remote_port) config setting or the value from ssh client configuration
if none of the former have been set.
type: int
version_added: "1.5"
mode:
description:
- Specify the direction of the synchronization.
- In push mode the localhost or delegate is the source.
- In pull mode the remote host in context is the source.
type: str
choices: [ pull, push ]
default: push
archive:
description:
- Mirrors the rsync archive flag, enables recursive, links, perms, times, owner, group flags and -D.
type: bool
default: yes
checksum:
description:
- Skip based on checksum, rather than mod-time & size; Note that that "archive" option is still enabled by default - the "checksum" option will
not disable it.
type: bool
default: no
version_added: "1.6"
compress:
description:
- Compress file data during the transfer.
- In most cases, leave this enabled unless it causes problems.
type: bool
default: yes
version_added: "1.7"
existing_only:
description:
- Skip creating new files on receiver.
type: bool
default: no
version_added: "1.5"
delete:
description:
- Delete files in C(dest) that don't exist (after transfer, not before) in the C(src) path.
- This option requires C(recursive=yes).
- This option ignores excluded files and behaves like the rsync opt --delete-excluded.
type: bool
default: no
dirs:
description:
- Transfer directories without recursing.
type: bool
default: no
recursive:
description:
- Recurse into directories.
- This parameter defaults to the value of the archive option.
type: bool
links:
description:
- Copy symlinks as symlinks.
- This parameter defaults to the value of the archive option.
type: bool
copy_links:
description:
- Copy symlinks as the item that they point to (the referent) is copied, rather than the symlink.
type: bool
default: no
perms:
description:
- Preserve permissions.
- This parameter defaults to the value of the archive option.
type: bool
times:
description:
- Preserve modification times.
- This parameter defaults to the value of the archive option.
type: bool
owner:
description:
- Preserve owner (super user only).
- This parameter defaults to the value of the archive option.
type: bool
group:
description:
- Preserve group.
- This parameter defaults to the value of the archive option.
type: bool
rsync_path:
description:
- Specify the rsync command to run on the remote host. See C(--rsync-path) on the rsync man page.
- To specify the rsync command to run on the local host, you need to set this your task var C(ansible_rsync_path).
type: str
rsync_timeout:
description:
- Specify a C(--timeout) for the rsync command in seconds.
type: int
default: 0
set_remote_user:
description:
- Put user@ for the remote paths.
- If you have a custom ssh config to define the remote user for a host
that does not match the inventory user, you should set this parameter to C(no).
type: bool
default: yes
use_ssh_args:
description:
- Use the ssh_args specified in ansible.cfg.
type: bool
default: no
version_added: "2.0"
rsync_opts:
description:
- Specify additional rsync options by passing in an array.
- Note that an empty string in C(rsync_opts) will end up transfer the current working directory.
type: list
default:
version_added: "1.6"
partial:
description:
- Tells rsync to keep the partial file which should make a subsequent transfer of the rest of the file much faster.
type: bool
default: no
version_added: "2.0"
verify_host:
description:
- Verify destination host key.
type: bool
default: no
version_added: "2.0"
private_key:
description:
- Specify the private key to use for SSH-based rsync connections (e.g. C(~/.ssh/id_rsa)).
type: path
version_added: "1.6"
link_dest:
description:
- Add a destination to hard link against during the rsync.
type: list
default:
version_added: "2.5"
notes:
- rsync must be installed on both the local and remote host.
- For the C(synchronize) module, the "local host" is the host `the synchronize task originates on`, and the "destination host" is the host
`synchronize is connecting to`.
- The "local host" can be changed to a different host by using `delegate_to`. This enables copying between two remote hosts or entirely on one
remote machine.
- >
The user and permissions for the synchronize `src` are those of the user running the Ansible task on the local host (or the remote_user for a
delegate_to host when delegate_to is used).
- The user and permissions for the synchronize `dest` are those of the `remote_user` on the destination host or the `become_user` if `become=yes` is active.
- In Ansible 2.0 a bug in the synchronize module made become occur on the "local host". This was fixed in Ansible 2.0.1.
- Currently, synchronize is limited to elevating permissions via passwordless sudo. This is because rsync itself is connecting to the remote machine
and rsync doesn't give us a way to pass sudo credentials in.
- Currently there are only a few connection types which support synchronize (ssh, paramiko, local, and docker) because a sync strategy has been
determined for those connection types. Note that the connection for these must not need a password as rsync itself is making the connection and
rsync does not provide us a way to pass a password to the connection.
- Expect that dest=~/x will be ~<remote_user>/x even if using sudo.
- Inspect the verbose output to validate the destination user/host/path are what was expected.
- To exclude files and directories from being synchronized, you may add C(.rsync-filter) files to the source directory.
- rsync daemon must be up and running with correct permission when using rsync protocol in source or destination path.
- The C(synchronize) module forces `--delay-updates` to avoid leaving a destination in a broken in-between state if the underlying rsync process
encounters an error. Those synchronizing large numbers of files that are willing to trade safety for performance should call rsync directly.
- link_destination is subject to the same limitations as the underlying rsync daemon. Hard links are only preserved if the relative subtrees
of the source and destination are the same. Attempts to hardlink into a directory that is a subdirectory of the source will be prevented.
seealso:
- module: copy
- module: win_robocopy
author:
- Timothy Appnel (@tima)
'''
EXAMPLES = '''
- name: Synchronization of src on the control machine to dest on the remote hosts
synchronize:
src: some/relative/path
dest: /some/absolute/path
- name: Synchronization using rsync protocol (push)
synchronize:
src: some/relative/path/
dest: rsync://somehost.com/path/
- name: Synchronization using rsync protocol (pull)
synchronize:
mode: pull
src: rsync://somehost.com/path/
dest: /some/absolute/path/
- name: Synchronization using rsync protocol on delegate host (push)
synchronize:
src: /some/absolute/path/
dest: rsync://somehost.com/path/
delegate_to: delegate.host
- name: Synchronization using rsync protocol on delegate host (pull)
synchronize:
mode: pull
src: rsync://somehost.com/path/
dest: /some/absolute/path/
delegate_to: delegate.host
- name: Synchronization without any --archive options enabled
synchronize:
src: some/relative/path
dest: /some/absolute/path
archive: no
- name: Synchronization with --archive options enabled except for --recursive
synchronize:
src: some/relative/path
dest: /some/absolute/path
recursive: no
- name: Synchronization with --archive options enabled except for --times, with --checksum option enabled
synchronize:
src: some/relative/path
dest: /some/absolute/path
checksum: yes
times: no
- name: Synchronization without --archive options enabled except use --links
synchronize:
src: some/relative/path
dest: /some/absolute/path
archive: no
links: yes
- name: Synchronization of two paths both on the control machine
synchronize:
src: some/relative/path
dest: /some/absolute/path
delegate_to: localhost
- name: Synchronization of src on the inventory host to the dest on the localhost in pull mode
synchronize:
mode: pull
src: some/relative/path
dest: /some/absolute/path
- name: Synchronization of src on delegate host to dest on the current inventory host.
synchronize:
src: /first/absolute/path
dest: /second/absolute/path
delegate_to: delegate.host
- name: Synchronize two directories on one remote host.
synchronize:
src: /first/absolute/path
dest: /second/absolute/path
delegate_to: "{{ inventory_hostname }}"
- name: Synchronize and delete files in dest on the remote host that are not found in src of localhost.
synchronize:
src: some/relative/path
dest: /some/absolute/path
delete: yes
recursive: yes
# This specific command is granted su privileges on the destination
- name: Synchronize using an alternate rsync command
synchronize:
src: some/relative/path
dest: /some/absolute/path
rsync_path: su -c rsync
# Example .rsync-filter file in the source directory
# - var # exclude any path whose last part is 'var'
# - /var # exclude any path starting with 'var' starting at the source directory
# + /var/conf # include /var/conf even though it was previously excluded
- name: Synchronize passing in extra rsync options
synchronize:
src: /tmp/helloworld
dest: /var/www/helloworld
rsync_opts:
- "--no-motd"
- "--exclude=.git"
# Hardlink files if they didn't change
- name: Use hardlinks when synchronizing filesystems
synchronize:
src: /tmp/path_a/foo.txt
dest: /tmp/path_b/foo.txt
link_dest: /tmp/path_a/
# Specify the rsync binary to use on remote host and on local host
- hosts: groupofhosts
vars:
ansible_rsync_path: /usr/gnu/bin/rsync
tasks:
- name: copy /tmp/localpath/ to remote location /tmp/remotepath
synchronize:
src: /tmp/localpath/
dest: /tmp/remotepath
rsync_path: /usr/gnu/bin/rsync
'''
import os
import errno
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_bytes, to_native
from ansible.module_utils.six.moves import shlex_quote
client_addr = None
def substitute_controller(path):
global client_addr
if not client_addr:
ssh_env_string = os.environ.get('SSH_CLIENT', None)
try:
client_addr, _ = ssh_env_string.split(None, 1)
except AttributeError:
ssh_env_string = os.environ.get('SSH_CONNECTION', None)
try:
client_addr, _ = ssh_env_string.split(None, 1)
except AttributeError:
pass
if not client_addr:
raise ValueError
if path.startswith('localhost:'):
path = path.replace('localhost', client_addr, 1)
return path
def is_rsh_needed(source, dest):
if source.startswith('rsync://') or dest.startswith('rsync://'):
return False
if ':' in source or ':' in dest:
return True
return False
def main():
module = AnsibleModule(
argument_spec=dict(
src=dict(type='str', required=True),
dest=dict(type='str', required=True),
dest_port=dict(type='int'),
delete=dict(type='bool', default=False),
private_key=dict(type='path'),
rsync_path=dict(type='str'),
_local_rsync_path=dict(type='path', default='rsync'),
_local_rsync_password=dict(type='str', no_log=True),
_substitute_controller=dict(type='bool', default=False),
archive=dict(type='bool', default=True),
checksum=dict(type='bool', default=False),
compress=dict(type='bool', default=True),
existing_only=dict(type='bool', default=False),
dirs=dict(type='bool', default=False),
recursive=dict(type='bool'),
links=dict(type='bool'),
copy_links=dict(type='bool', default=False),
perms=dict(type='bool'),
times=dict(type='bool'),
owner=dict(type='bool'),
group=dict(type='bool'),
set_remote_user=dict(type='bool', default=True),
rsync_timeout=dict(type='int', default=0),
rsync_opts=dict(type='list', default=[]),
ssh_args=dict(type='str'),
partial=dict(type='bool', default=False),
verify_host=dict(type='bool', default=False),
mode=dict(type='str', default='push', choices=['pull', 'push']),
link_dest=dict(type='list')
),
supports_check_mode=True,
)
if module.params['_substitute_controller']:
try:
source = substitute_controller(module.params['src'])
dest = substitute_controller(module.params['dest'])
except ValueError:
module.fail_json(msg='Could not determine controller hostname for rsync to send to')
else:
source = module.params['src']
dest = module.params['dest']
dest_port = module.params['dest_port']
delete = module.params['delete']
private_key = module.params['private_key']
rsync_path = module.params['rsync_path']
rsync = module.params.get('_local_rsync_path', 'rsync')
rsync_password = module.params.get('_local_rsync_password')
rsync_timeout = module.params.get('rsync_timeout', 'rsync_timeout')
archive = module.params['archive']
checksum = module.params['checksum']
compress = module.params['compress']
existing_only = module.params['existing_only']
dirs = module.params['dirs']
partial = module.params['partial']
# the default of these params depends on the value of archive
recursive = module.params['recursive']
links = module.params['links']
copy_links = module.params['copy_links']
perms = module.params['perms']
times = module.params['times']
owner = module.params['owner']
group = module.params['group']
rsync_opts = module.params['rsync_opts']
ssh_args = module.params['ssh_args']
verify_host = module.params['verify_host']
link_dest = module.params['link_dest']
if '/' not in rsync:
rsync = module.get_bin_path(rsync, required=True)
cmd = [rsync, '--delay-updates', '-F']
_sshpass_pipe = None
if rsync_password:
try:
module.run_command(["sshpass"])
except OSError:
module.fail_json(
msg="to use rsync connection with passwords, you must install the sshpass program"
)
_sshpass_pipe = os.pipe()
cmd = ['sshpass', '-d' + to_native(_sshpass_pipe[0], errors='surrogate_or_strict')] + cmd
if compress:
cmd.append('--compress')
if rsync_timeout:
cmd.append('--timeout=%s' % rsync_timeout)
if module.check_mode:
cmd.append('--dry-run')
if delete:
cmd.append('--delete-after')
if existing_only:
cmd.append('--existing')
if checksum:
cmd.append('--checksum')
if copy_links:
cmd.append('--copy-links')
if archive:
cmd.append('--archive')
if recursive is False:
cmd.append('--no-recursive')
if links is False:
cmd.append('--no-links')
if perms is False:
cmd.append('--no-perms')
if times is False:
cmd.append('--no-times')
if owner is False:
cmd.append('--no-owner')
if group is False:
cmd.append('--no-group')
else:
if recursive is True:
cmd.append('--recursive')
if links is True:
cmd.append('--links')
if perms is True:
cmd.append('--perms')
if times is True:
cmd.append('--times')
if owner is True:
cmd.append('--owner')
if group is True:
cmd.append('--group')
if dirs:
cmd.append('--dirs')
if source.startswith('rsync://') and dest.startswith('rsync://'):
module.fail_json(msg='either src or dest must be a localhost', rc=1)
if is_rsh_needed(source, dest):
# https://github.com/ansible/ansible/issues/15907
has_rsh = False
for rsync_opt in rsync_opts:
if '--rsh' in rsync_opt:
has_rsh = True
break
# if the user has not supplied an --rsh option go ahead and add ours
if not has_rsh:
ssh_cmd = [module.get_bin_path('ssh', required=True), '-S', 'none']
if private_key is not None:
ssh_cmd.extend(['-i', private_key])
# If the user specified a port value
# Note: The action plugin takes care of setting this to a port from
# inventory if the user didn't specify an explicit dest_port
if dest_port is not None:
ssh_cmd.extend(['-o', 'Port=%s' % dest_port])
if not verify_host:
ssh_cmd.extend(['-o', 'StrictHostKeyChecking=no', '-o', 'UserKnownHostsFile=/dev/null'])
ssh_cmd_str = ' '.join(shlex_quote(arg) for arg in ssh_cmd)
if ssh_args:
ssh_cmd_str += ' %s' % ssh_args
cmd.append('--rsh=%s' % ssh_cmd_str)
if rsync_path:
cmd.append('--rsync-path=%s' % rsync_path)
if rsync_opts:
if '' in rsync_opts:
module.warn('The empty string is present in rsync_opts which will cause rsync to'
' transfer the current working directory. If this is intended, use "."'
' instead to get rid of this warning. If this is unintended, check for'
' problems in your playbook leading to empty string in rsync_opts.')
cmd.extend(rsync_opts)
if partial:
cmd.append('--partial')
if link_dest:
cmd.append('-H')
# verbose required because rsync does not believe that adding a
# hardlink is actually a change
cmd.append('-vv')
for x in link_dest:
link_path = os.path.abspath(os.path.expanduser(x))
destination_path = os.path.abspath(os.path.dirname(dest))
if destination_path.find(link_path) == 0:
module.fail_json(msg='Hardlinking into a subdirectory of the source would cause recursion. %s and %s' % (destination_path, dest))
cmd.append('--link-dest=%s' % link_path)
changed_marker = '<<CHANGED>>'
cmd.append('--out-format=' + changed_marker + '%i %n%L')
# expand the paths
if '@' not in source:
source = os.path.expanduser(source)
if '@' not in dest:
dest = os.path.expanduser(dest)
cmd.append(source)
cmd.append(dest)
cmdstr = ' '.join(cmd)
# If we are using password authentication, write the password into the pipe
if rsync_password:
def _write_password_to_pipe(proc):
os.close(_sshpass_pipe[0])
try:
os.write(_sshpass_pipe[1], to_bytes(rsync_password) + b'\n')
except OSError as exc:
# Ignore broken pipe errors if the sshpass process has exited.
if exc.errno != errno.EPIPE or proc.poll() is None:
raise
(rc, out, err) = module.run_command(
cmd, pass_fds=_sshpass_pipe,
before_communicate_callback=_write_password_to_pipe)
else:
(rc, out, err) = module.run_command(cmd)
if rc:
return module.fail_json(msg=err, rc=rc, cmd=cmdstr)
if link_dest:
# a leading period indicates no change
changed = (changed_marker + '.') not in out
else:
changed = changed_marker in out
out_clean = out.replace(changed_marker, '')
out_lines = out_clean.split('\n')
while '' in out_lines:
out_lines.remove('')
if module._diff:
diff = {'prepared': out_clean}
return module.exit_json(changed=changed, msg=out_clean,
rc=rc, cmd=cmdstr, stdout_lines=out_lines,
diff=diff)
return module.exit_json(changed=changed, msg=out_clean,
rc=rc, cmd=cmdstr, stdout_lines=out_lines)
if __name__ == '__main__':
main()
| gpl-3.0 |
maohongyuan/kbengine | kbe/res/scripts/common/Lib/lib2to3/fixes/fix_throw.py | 203 | 1582 | """Fixer for generator.throw(E, V, T).
g.throw(E) -> g.throw(E)
g.throw(E, V) -> g.throw(E(V))
g.throw(E, V, T) -> g.throw(E(V).with_traceback(T))
g.throw("foo"[, V[, T]]) will warn about string exceptions."""
# Author: Collin Winter
# Local imports
from .. import pytree
from ..pgen2 import token
from .. import fixer_base
from ..fixer_util import Name, Call, ArgList, Attr, is_tuple
class FixThrow(fixer_base.BaseFix):
BM_compatible = True
PATTERN = """
power< any trailer< '.' 'throw' >
trailer< '(' args=arglist< exc=any ',' val=any [',' tb=any] > ')' >
>
|
power< any trailer< '.' 'throw' > trailer< '(' exc=any ')' > >
"""
def transform(self, node, results):
syms = self.syms
exc = results["exc"].clone()
if exc.type is token.STRING:
self.cannot_convert(node, "Python 3 does not support string exceptions")
return
# Leave "g.throw(E)" alone
val = results.get("val")
if val is None:
return
val = val.clone()
if is_tuple(val):
args = [c.clone() for c in val.children[1:-1]]
else:
val.prefix = ""
args = [val]
throw_args = results["args"]
if "tb" in results:
tb = results["tb"].clone()
tb.prefix = ""
e = Call(exc, args)
with_tb = Attr(e, Name('with_traceback')) + [ArgList([tb])]
throw_args.replace(pytree.Node(syms.power, with_tb))
else:
throw_args.replace(Call(exc, args))
| lgpl-3.0 |
gvanburen/agrmin_multi | main.py | 1 | 1723 | #!/usr/bin/python3
from markdown import Markdown
from slugify import slugify
import glob
global post_paths
index_links = ['<ul>']
links = ['<ul>']
def main():
build_posts()
build_links()
build_pages()
write_index()
def build_posts():
global post_paths
post_paths = glob.glob("posts/*.md")
for idx, post in enumerate(post_paths):
post_paths[idx] = post.replace("posts/", "")
def build_links():
for post_path in post_paths:
html, title, slug = get_post_details(post_path)
index_links.append('<li><a href=pages/{slug}.html>{title}</a></li>'.format(**locals()))
links.append('<li><a href=../pages/{slug}.html>{title}</a></li>'.format(**locals()))
index_links.append('</ul>')
links.append('</ul>')
def build_pages():
for post_path in post_paths:
html, title, slug = get_post_details(post_path)
html_title = '<h2 id="{slug}">{title}</h2>'.format(**locals())
with open('static/header.html') as header, open('static/footer.html') as footer, open('pages/{}.html'.format(slug), 'w+') as p:
p.write('\n'.join([header.read()] + links + [html_title] + [html] + [footer.read()]))
def get_post_details(post_path):
md = Markdown(extensions=['markdown.extensions.meta'])
with open('posts/{}'.format(post_path)) as f:
html = md.convert(f.read())
title = md.Meta['title'][0]
slug = slugify(title)
return html, title, slug
def write_index():
with open('static/header.html') as header, open('static/footer.html') as footer, open('index.html', 'w') as index:
index.write('\n'.join([header.read()] + index_links + [footer.read()]))
if __name__ == "__main__":
main()
| mit |
franosincic/edx-platform | common/test/acceptance/pages/studio/asset_index.py | 53 | 2865 | """
The Files and Uploads page for a course in Studio
"""
import urllib
import os
from opaque_keys.edx.locator import CourseLocator
from . import BASE_URL
from .course_page import CoursePage
from bok_choy.javascript import wait_for_js, requirejs
@requirejs('js/views/assets')
class AssetIndexPage(CoursePage):
"""
The Files and Uploads page for a course in Studio
"""
url_path = "assets"
type_filter_element = '#js-asset-type-col'
@property
def url(self):
"""
Construct a URL to the page within the course.
"""
# TODO - is there a better way to make this agnostic to the underlying default module store?
default_store = os.environ.get('DEFAULT_STORE', 'draft')
course_key = CourseLocator(
self.course_info['course_org'],
self.course_info['course_num'],
self.course_info['course_run'],
deprecated=(default_store == 'draft')
)
url = "/".join([BASE_URL, self.url_path, urllib.quote_plus(unicode(course_key))])
return url if url[-1] is '/' else url + '/'
@wait_for_js
def is_browser_on_page(self):
return all([
self.q(css='body.view-uploads').present,
self.q(css='.page-header').present,
not self.q(css='div.ui-loading').visible,
])
@wait_for_js
def type_filter_on_page(self):
"""
Checks that type filter is in table header.
"""
return self.q(css=self.type_filter_element).present
@wait_for_js
def type_filter_header_label_visible(self):
"""
Checks type filter label is added and visible in the pagination header.
"""
return self.q(css='span.filter-column').visible
@wait_for_js
def click_type_filter(self):
"""
Clicks type filter menu.
"""
self.q(css=".filterable-column .nav-item").click()
@wait_for_js
def select_type_filter(self, filter_number):
"""
Selects Type filter from dropdown which filters the results.
Returns False if no filter.
"""
self.wait_for_ajax()
if self.q(css=".filterable-column .nav-item").is_present():
if not self.q(css=self.type_filter_element + " .wrapper-nav-sub").visible:
self.q(css=".filterable-column > .nav-item").first.click()
self.wait_for_element_visibility(
self.type_filter_element + " .wrapper-nav-sub", "Type Filter promise satisfied.")
self.q(css=self.type_filter_element + " .column-filter-link").nth(filter_number).click()
self.wait_for_ajax()
return True
return False
def return_results_set(self):
"""
Returns the asset set from the page
"""
return self.q(css="#asset-table-body tr").results
| agpl-3.0 |
ujjwalwahi/odoo | addons/mrp_operations/report/mrp_code_barcode.py | 381 | 1511 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.report import report_sxw
class code_barcode(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(code_barcode, self).__init__(cr, uid, name, context=context)
self.localcontext.update({
'time': time,
})
report_sxw.report_sxw('report.mrp.code.barcode', 'mrp_operations.operation.code', 'addons/mrp_operations/report/mrp_code_barcode.rml',parser=code_barcode,header=False)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
fsb4000/bitcoin | qa/rpc-tests/nulldummy.py | 62 | 6534 | #!/usr/bin/env python3
# Copyright (c) 2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.mininode import CTransaction, NetworkThread
from test_framework.blocktools import create_coinbase, create_block, add_witness_commitment
from test_framework.script import CScript
from io import BytesIO
import time
NULLDUMMY_ERROR = "64: non-mandatory-script-verify-flag (Dummy CHECKMULTISIG argument must be zero)"
def trueDummy(tx):
scriptSig = CScript(tx.vin[0].scriptSig)
newscript = []
for i in scriptSig:
if (len(newscript) == 0):
assert(len(i) == 0)
newscript.append(b'\x51')
else:
newscript.append(i)
tx.vin[0].scriptSig = CScript(newscript)
tx.rehash()
'''
This test is meant to exercise NULLDUMMY softfork.
Connect to a single node.
Generate 2 blocks (save the coinbases for later).
Generate 427 more blocks.
[Policy/Consensus] Check that NULLDUMMY compliant transactions are accepted in the 430th block.
[Policy] Check that non-NULLDUMMY transactions are rejected before activation.
[Consensus] Check that the new NULLDUMMY rules are not enforced on the 431st block.
[Policy/Consensus] Check that the new NULLDUMMY rules are enforced on the 432nd block.
'''
class NULLDUMMYTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 1
self.setup_clean_chain = True
def setup_network(self):
# Must set the blockversion for this test
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir,
extra_args=[['-debug', '-whitelist=127.0.0.1', '-walletprematurewitness']])
def run_test(self):
self.address = self.nodes[0].getnewaddress()
self.ms_address = self.nodes[0].addmultisigaddress(1,[self.address])
self.wit_address = self.nodes[0].addwitnessaddress(self.address)
self.wit_ms_address = self.nodes[0].addwitnessaddress(self.ms_address)
NetworkThread().start() # Start up network handling in another thread
self.coinbase_blocks = self.nodes[0].generate(2) # Block 2
coinbase_txid = []
for i in self.coinbase_blocks:
coinbase_txid.append(self.nodes[0].getblock(i)['tx'][0])
self.nodes[0].generate(427) # Block 429
self.lastblockhash = self.nodes[0].getbestblockhash()
self.tip = int("0x" + self.lastblockhash, 0)
self.lastblockheight = 429
self.lastblocktime = int(time.time()) + 429
print ("Test 1: NULLDUMMY compliant base transactions should be accepted to mempool and mined before activation [430]")
test1txs = [self.create_transaction(self.nodes[0], coinbase_txid[0], self.ms_address, 49)]
txid1 = self.tx_submit(self.nodes[0], test1txs[0])
test1txs.append(self.create_transaction(self.nodes[0], txid1, self.ms_address, 48))
txid2 = self.tx_submit(self.nodes[0], test1txs[1])
test1txs.append(self.create_transaction(self.nodes[0], coinbase_txid[1], self.wit_ms_address, 49))
txid3 = self.tx_submit(self.nodes[0], test1txs[2])
self.block_submit(self.nodes[0], test1txs, False, True)
print ("Test 2: Non-NULLDUMMY base multisig transaction should not be accepted to mempool before activation")
test2tx = self.create_transaction(self.nodes[0], txid2, self.ms_address, 48)
trueDummy(test2tx)
txid4 = self.tx_submit(self.nodes[0], test2tx, NULLDUMMY_ERROR)
print ("Test 3: Non-NULLDUMMY base transactions should be accepted in a block before activation [431]")
self.block_submit(self.nodes[0], [test2tx], False, True)
print ("Test 4: Non-NULLDUMMY base multisig transaction is invalid after activation")
test4tx = self.create_transaction(self.nodes[0], txid4, self.address, 47)
test6txs=[CTransaction(test4tx)]
trueDummy(test4tx)
self.tx_submit(self.nodes[0], test4tx, NULLDUMMY_ERROR)
self.block_submit(self.nodes[0], [test4tx])
print ("Test 5: Non-NULLDUMMY P2WSH multisig transaction invalid after activation")
test5tx = self.create_transaction(self.nodes[0], txid3, self.wit_address, 48)
test6txs.append(CTransaction(test5tx))
test5tx.wit.vtxinwit[0].scriptWitness.stack[0] = b'\x01'
self.tx_submit(self.nodes[0], test5tx, NULLDUMMY_ERROR)
self.block_submit(self.nodes[0], [test5tx], True)
print ("Test 6: NULLDUMMY compliant base/witness transactions should be accepted to mempool and in block after activation [432]")
for i in test6txs:
self.tx_submit(self.nodes[0], i)
self.block_submit(self.nodes[0], test6txs, True, True)
def create_transaction(self, node, txid, to_address, amount):
inputs = [{ "txid" : txid, "vout" : 0}]
outputs = { to_address : amount }
rawtx = node.createrawtransaction(inputs, outputs)
signresult = node.signrawtransaction(rawtx)
tx = CTransaction()
f = BytesIO(hex_str_to_bytes(signresult['hex']))
tx.deserialize(f)
return tx
def tx_submit(self, node, tx, msg = ""):
tx.rehash()
try:
node.sendrawtransaction(bytes_to_hex_str(tx.serialize_with_witness()), True)
except JSONRPCException as exp:
assert_equal(exp.error["message"], msg)
else:
assert_equal('', msg)
return tx.hash
def block_submit(self, node, txs, witness = False, accept = False):
block = create_block(self.tip, create_coinbase(self.lastblockheight + 1), self.lastblocktime + 1)
block.nVersion = 4
for tx in txs:
tx.rehash()
block.vtx.append(tx)
block.hashMerkleRoot = block.calc_merkle_root()
witness and add_witness_commitment(block)
block.rehash()
block.solve()
node.submitblock(bytes_to_hex_str(block.serialize(True)))
if (accept):
assert_equal(node.getbestblockhash(), block.hash)
self.tip = block.sha256
self.lastblockhash = block.hash
self.lastblocktime += 1
self.lastblockheight += 1
else:
assert_equal(node.getbestblockhash(), self.lastblockhash)
if __name__ == '__main__':
NULLDUMMYTest().main()
| mit |
Juniper/contrail-dev-neutron | neutron/plugins/bigswitch/plugin.py | 3 | 48990 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Big Switch Networks, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Mandeep Dhami, Big Switch Networks, Inc.
# @author: Sumit Naiksatam, sumitnaiksatam@gmail.com, Big Switch Networks, Inc.
"""
Neutron REST Proxy Plug-in for Big Switch and FloodLight Controllers.
NeutronRestProxy provides a generic neutron plugin that translates all plugin
function calls to equivalent authenticated REST calls to a set of redundant
external network controllers. It also keeps persistent store for all neutron
state to allow for re-sync of the external controller(s), if required.
The local state on the plugin also allows for local response and fast-fail
semantics where it can be determined based on the local persistent store.
Network controller specific code is decoupled from this plugin and expected
to reside on the controller itself (via the REST interface).
This allows for:
- independent authentication and redundancy schemes between neutron and the
network controller
- independent upgrade/development cycles between neutron and the controller
as it limits the proxy code upgrade requirement to neutron release cycle
and the controller specific code upgrade requirement to controller code
- ability to sync the controller with neutron for independent recovery/reset
External REST API used by proxy is the same API as defined for neutron (JSON
subset) with some additional parameters (gateway on network-create and macaddr
on port-attach) on an additional PUT to do a bulk dump of all persistent data.
"""
import copy
import httplib
import re
import eventlet
from oslo.config import cfg
from sqlalchemy.orm import exc as sqlexc
from neutron.agent import securitygroups_rpc as sg_rpc
from neutron.api import extensions as neutron_extensions
from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api
from neutron.common import constants as const
from neutron.common import exceptions
from neutron.common import rpc as q_rpc
from neutron.common import topics
from neutron import context as qcontext
from neutron.db import agents_db
from neutron.db import agentschedulers_db
from neutron.db import allowedaddresspairs_db as addr_pair_db
from neutron.db import api as db
from neutron.db import db_base_plugin_v2
from neutron.db import dhcp_rpc_base
from neutron.db import external_net_db
from neutron.db import extradhcpopt_db
from neutron.db import l3_db
from neutron.db import models_v2
from neutron.db import securitygroups_db as sg_db
from neutron.db import securitygroups_rpc_base as sg_rpc_base
from neutron.extensions import allowedaddresspairs as addr_pair
from neutron.extensions import external_net
from neutron.extensions import extra_dhcp_opt as edo_ext
from neutron.extensions import l3
from neutron.extensions import portbindings
from neutron import manager
from neutron.openstack.common import excutils
from neutron.openstack.common import importutils
from neutron.openstack.common import log as logging
from neutron.openstack.common import rpc
from neutron.plugins.bigswitch import config as pl_config
from neutron.plugins.bigswitch.db import porttracker_db
from neutron.plugins.bigswitch import extensions
from neutron.plugins.bigswitch import routerrule_db
from neutron.plugins.bigswitch import servermanager
from neutron.plugins.bigswitch.version import version_string_with_vcs
LOG = logging.getLogger(__name__)
SYNTAX_ERROR_MESSAGE = _('Syntax error in server config file, aborting plugin')
METADATA_SERVER_IP = '169.254.169.254'
class AgentNotifierApi(rpc.proxy.RpcProxy,
sg_rpc.SecurityGroupAgentRpcApiMixin):
BASE_RPC_API_VERSION = '1.1'
def __init__(self, topic):
super(AgentNotifierApi, self).__init__(
topic=topic, default_version=self.BASE_RPC_API_VERSION)
self.topic_port_update = topics.get_topic_name(
topic, topics.PORT, topics.UPDATE)
def port_update(self, context, port):
self.fanout_cast(context,
self.make_msg('port_update',
port=port),
topic=self.topic_port_update)
class RestProxyCallbacks(sg_rpc_base.SecurityGroupServerRpcCallbackMixin,
dhcp_rpc_base.DhcpRpcCallbackMixin):
RPC_API_VERSION = '1.1'
def create_rpc_dispatcher(self):
return q_rpc.PluginRpcDispatcher([self,
agents_db.AgentExtRpcCallback()])
def get_port_from_device(self, device):
port_id = re.sub(r"^tap", "", device)
port = self.get_port_and_sgs(port_id)
if port:
port['device'] = device
return port
def get_port_and_sgs(self, port_id):
"""Get port from database with security group info."""
LOG.debug(_("get_port_and_sgs() called for port_id %s"), port_id)
session = db.get_session()
sg_binding_port = sg_db.SecurityGroupPortBinding.port_id
with session.begin(subtransactions=True):
query = session.query(
models_v2.Port,
sg_db.SecurityGroupPortBinding.security_group_id
)
query = query.outerjoin(sg_db.SecurityGroupPortBinding,
models_v2.Port.id == sg_binding_port)
query = query.filter(models_v2.Port.id.startswith(port_id))
port_and_sgs = query.all()
if not port_and_sgs:
return
port = port_and_sgs[0][0]
plugin = manager.NeutronManager.get_plugin()
port_dict = plugin._make_port_dict(port)
port_dict['security_groups'] = [
sg_id for port_, sg_id in port_and_sgs if sg_id]
port_dict['security_group_rules'] = []
port_dict['security_group_source_groups'] = []
port_dict['fixed_ips'] = [ip['ip_address']
for ip in port['fixed_ips']]
return port_dict
class NeutronRestProxyV2Base(db_base_plugin_v2.NeutronDbPluginV2,
external_net_db.External_net_db_mixin,
routerrule_db.RouterRule_db_mixin):
supported_extension_aliases = ["binding"]
servers = None
def __init__(self, server_timeout=None):
super(NeutronRestProxyV2Base, self).__init__()
# This base class is not intended to be instantiated directly.
# Extending class should set ServerPool.
if not self.servers:
LOG.warning(_("ServerPool not set!"))
def _get_all_data(self, get_ports=True, get_floating_ips=True,
get_routers=True):
admin_context = qcontext.get_admin_context()
networks = []
all_networks = self.get_networks(admin_context) or []
for net in all_networks:
mapped_network = self._get_mapped_network_with_subnets(net)
flips_n_ports = {}
if get_floating_ips:
flips_n_ports = self._get_network_with_floatingips(
mapped_network)
if get_ports:
ports = []
net_filter = {'network_id': [net.get('id')]}
net_ports = self.get_ports(admin_context,
filters=net_filter) or []
for port in net_ports:
mapped_port = self._map_state_and_status(port)
mapped_port['attachment'] = {
'id': port.get('device_id'),
'mac': port.get('mac_address'),
}
mapped_port = self._extend_port_dict_binding(admin_context,
mapped_port)
ports.append(mapped_port)
flips_n_ports['ports'] = ports
if flips_n_ports:
networks.append(flips_n_ports)
data = {'networks': networks}
if get_routers:
routers = []
all_routers = self.get_routers(admin_context) or []
for router in all_routers:
interfaces = []
mapped_router = self._map_state_and_status(router)
router_filter = {
'device_owner': [const.DEVICE_OWNER_ROUTER_INTF],
'device_id': [router.get('id')]
}
router_ports = self.get_ports(admin_context,
filters=router_filter) or []
for port in router_ports:
net_id = port.get('network_id')
subnet_id = port['fixed_ips'][0]['subnet_id']
intf_details = self._get_router_intf_details(admin_context,
net_id,
subnet_id)
interfaces.append(intf_details)
mapped_router['interfaces'] = interfaces
routers.append(mapped_router)
data.update({'routers': routers})
return data
def _send_all_data(self, send_ports=True, send_floating_ips=True,
send_routers=True, timeout=None,
triggered_by_tenant=None):
"""Pushes all data to network ctrl (networks/ports, ports/attachments).
This gives the controller an option to re-sync it's persistent store
with neutron's current view of that data.
"""
data = self._get_all_data(send_ports, send_floating_ips, send_routers)
data['triggered_by_tenant'] = triggered_by_tenant
errstr = _("Unable to update remote topology: %s")
return self.servers.rest_action('PUT', servermanager.TOPOLOGY_PATH,
data, errstr, timeout=timeout)
def _get_network_with_floatingips(self, network, context=None):
if context is None:
context = qcontext.get_admin_context()
net_id = network['id']
net_filter = {'floating_network_id': [net_id]}
fl_ips = self.get_floatingips(context,
filters=net_filter) or []
network['floatingips'] = fl_ips
return network
def _get_all_subnets_json_for_network(self, net_id, context=None):
if context is None:
context = qcontext.get_admin_context()
# start a sub-transaction to avoid breaking parent transactions
with context.session.begin(subtransactions=True):
subnets = self._get_subnets_by_network(context,
net_id)
subnets_details = []
if subnets:
for subnet in subnets:
subnet_dict = self._make_subnet_dict(subnet)
mapped_subnet = self._map_state_and_status(subnet_dict)
subnets_details.append(mapped_subnet)
return subnets_details
def _get_mapped_network_with_subnets(self, network, context=None):
# if context is not provided, admin context is used
if context is None:
context = qcontext.get_admin_context()
network = self._map_state_and_status(network)
subnets = self._get_all_subnets_json_for_network(network['id'],
context)
network['subnets'] = subnets
for subnet in (subnets or []):
if subnet['gateway_ip']:
# FIX: For backward compatibility with wire protocol
network['gateway'] = subnet['gateway_ip']
break
else:
network['gateway'] = ''
network[external_net.EXTERNAL] = self._network_is_external(
context, network['id'])
# include ML2 segmentation types
network['segmentation_types'] = getattr(self, "segmentation_types", "")
return network
def _send_create_network(self, network, context=None):
tenant_id = network['tenant_id']
mapped_network = self._get_mapped_network_with_subnets(network,
context)
self.servers.rest_create_network(tenant_id, mapped_network)
def _send_update_network(self, network, context=None):
net_id = network['id']
tenant_id = network['tenant_id']
mapped_network = self._get_mapped_network_with_subnets(network,
context)
net_fl_ips = self._get_network_with_floatingips(mapped_network,
context)
self.servers.rest_update_network(tenant_id, net_id, net_fl_ips)
def _send_delete_network(self, network, context=None):
net_id = network['id']
tenant_id = network['tenant_id']
self.servers.rest_delete_network(tenant_id, net_id)
def _map_state_and_status(self, resource):
resource = copy.copy(resource)
resource['state'] = ('UP' if resource.pop('admin_state_up',
True) else 'DOWN')
resource.pop('status', None)
return resource
def _warn_on_state_status(self, resource):
if resource.get('admin_state_up', True) is False:
LOG.warning(_("Setting admin_state_up=False is not supported "
"in this plugin version. Ignoring setting for "
"resource: %s"), resource)
if 'status' in resource:
if resource['status'] != const.NET_STATUS_ACTIVE:
LOG.warning(_("Operational status is internally set by the "
"plugin. Ignoring setting status=%s."),
resource['status'])
def _get_router_intf_details(self, context, intf_id, subnet_id):
# we will use the network id as interface's id
net_id = intf_id
network = self.get_network(context, net_id)
subnet = self.get_subnet(context, subnet_id)
mapped_network = self._get_mapped_network_with_subnets(network)
mapped_subnet = self._map_state_and_status(subnet)
data = {
'id': intf_id,
"network": mapped_network,
"subnet": mapped_subnet
}
return data
def _extend_port_dict_binding(self, context, port):
cfg_vif_type = cfg.CONF.NOVA.vif_type.lower()
if not cfg_vif_type in (portbindings.VIF_TYPE_OVS,
portbindings.VIF_TYPE_IVS):
LOG.warning(_("Unrecognized vif_type in configuration "
"[%s]. Defaulting to ovs."),
cfg_vif_type)
cfg_vif_type = portbindings.VIF_TYPE_OVS
hostid = porttracker_db.get_port_hostid(context, port['id'])
if hostid:
port[portbindings.HOST_ID] = hostid
override = self._check_hostvif_override(hostid)
if override:
cfg_vif_type = override
port[portbindings.VIF_TYPE] = cfg_vif_type
port[portbindings.VIF_DETAILS] = {
# TODO(rkukura): Replace with new VIF security details
portbindings.CAP_PORT_FILTER:
'security-group' in self.supported_extension_aliases}
return port
def _check_hostvif_override(self, hostid):
for v in cfg.CONF.NOVA.vif_types:
if hostid in getattr(cfg.CONF.NOVA, "node_override_vif_" + v, []):
return v
return False
def _get_port_net_tenantid(self, context, port):
net = super(NeutronRestProxyV2Base,
self).get_network(context, port["network_id"])
return net['tenant_id']
def async_port_create(self, tenant_id, net_id, port):
try:
self.servers.rest_create_port(tenant_id, net_id, port)
except servermanager.RemoteRestError as e:
# 404 should never be received on a port create unless
# there are inconsistencies between the data in neutron
# and the data in the backend.
# Run a sync to get it consistent.
if (cfg.CONF.RESTPROXY.auto_sync_on_failure and
e.status == httplib.NOT_FOUND and
servermanager.NXNETWORK in e.reason):
LOG.error(_("Iconsistency with backend controller "
"triggering full synchronization."))
# args depend on if we are operating in ML2 driver
# or as the full plugin
topoargs = self.servers.get_topo_function_args
self._send_all_data(
send_ports=topoargs['get_ports'],
send_floating_ips=topoargs['get_floating_ips'],
send_routers=topoargs['get_routers'],
triggered_by_tenant=tenant_id
)
# If the full sync worked, the port will be created
# on the controller so it can be safely marked as active
else:
# Any errors that don't result in a successful auto-sync
# require that the port be placed into the error state.
LOG.error(
_("NeutronRestProxyV2: Unable to create port: %s"), e)
try:
self._set_port_status(port['id'], const.PORT_STATUS_ERROR)
except exceptions.PortNotFound:
# If port is already gone from DB and there was an error
# creating on the backend, everything is already consistent
pass
return
new_status = (const.PORT_STATUS_ACTIVE if port['state'] == 'UP'
else const.PORT_STATUS_DOWN)
try:
self._set_port_status(port['id'], new_status)
except exceptions.PortNotFound:
# This port was deleted before the create made it to the controller
# so it now needs to be deleted since the normal delete request
# would have deleted an non-existent port.
self.servers.rest_delete_port(tenant_id, net_id, port['id'])
def _set_port_status(self, port_id, status):
session = db.get_session()
try:
port = session.query(models_v2.Port).filter_by(id=port_id).one()
port['status'] = status
session.flush()
except sqlexc.NoResultFound:
raise exceptions.PortNotFound(port_id=port_id)
class NeutronRestProxyV2(NeutronRestProxyV2Base,
addr_pair_db.AllowedAddressPairsMixin,
extradhcpopt_db.ExtraDhcpOptMixin,
agentschedulers_db.DhcpAgentSchedulerDbMixin,
sg_rpc_base.SecurityGroupServerRpcMixin):
_supported_extension_aliases = ["external-net", "router", "binding",
"router_rules", "extra_dhcp_opt", "quotas",
"dhcp_agent_scheduler", "agent",
"security-group", "allowed-address-pairs"]
@property
def supported_extension_aliases(self):
if not hasattr(self, '_aliases'):
aliases = self._supported_extension_aliases[:]
sg_rpc.disable_security_group_extension_if_noop_driver(aliases)
self._aliases = aliases
return self._aliases
def __init__(self, server_timeout=None):
super(NeutronRestProxyV2, self).__init__()
LOG.info(_('NeutronRestProxy: Starting plugin. Version=%s'),
version_string_with_vcs())
pl_config.register_config()
self.evpool = eventlet.GreenPool(cfg.CONF.RESTPROXY.thread_pool_size)
# Include the BigSwitch Extensions path in the api_extensions
neutron_extensions.append_api_extensions_path(extensions.__path__)
self.add_meta_server_route = cfg.CONF.RESTPROXY.add_meta_server_route
# init network ctrl connections
self.servers = servermanager.ServerPool(server_timeout)
self.servers.get_topo_function = self._get_all_data
self.servers.get_topo_function_args = {'get_ports': True,
'get_floating_ips': True,
'get_routers': True}
self.network_scheduler = importutils.import_object(
cfg.CONF.network_scheduler_driver
)
# setup rpc for security and DHCP agents
self._setup_rpc()
if cfg.CONF.RESTPROXY.sync_data:
self._send_all_data()
LOG.debug(_("NeutronRestProxyV2: initialization done"))
def _setup_rpc(self):
self.conn = rpc.create_connection(new=True)
self.topic = topics.PLUGIN
self.notifier = AgentNotifierApi(topics.AGENT)
# init dhcp agent support
self._dhcp_agent_notifier = dhcp_rpc_agent_api.DhcpAgentNotifyAPI()
self.agent_notifiers[const.AGENT_TYPE_DHCP] = (
self._dhcp_agent_notifier
)
self.callbacks = RestProxyCallbacks()
self.dispatcher = self.callbacks.create_rpc_dispatcher()
self.conn.create_consumer(self.topic, self.dispatcher,
fanout=False)
# Consume from all consumers in a thread
self.conn.consume_in_thread()
def create_network(self, context, network):
"""Create a network.
Network represents an L2 network segment which can have a set of
subnets and ports associated with it.
:param context: neutron api request context
:param network: dictionary describing the network
:returns: a sequence of mappings with the following signature:
{
"id": UUID representing the network.
"name": Human-readable name identifying the network.
"tenant_id": Owner of network. NOTE: only admin user can specify
a tenant_id other than its own.
"admin_state_up": Sets admin state of network.
if down, network does not forward packets.
"status": Indicates whether network is currently operational
(values are "ACTIVE", "DOWN", "BUILD", and "ERROR")
"subnets": Subnets associated with this network.
}
:raises: RemoteRestError
"""
LOG.debug(_("NeutronRestProxyV2: create_network() called"))
self._warn_on_state_status(network['network'])
with context.session.begin(subtransactions=True):
self._ensure_default_security_group(
context,
network['network']["tenant_id"]
)
# create network in DB
new_net = super(NeutronRestProxyV2, self).create_network(context,
network)
self._process_l3_create(context, new_net, network['network'])
# create network on the network controller
self._send_create_network(new_net, context)
# return created network
return new_net
def update_network(self, context, net_id, network):
"""Updates the properties of a particular Virtual Network.
:param context: neutron api request context
:param net_id: uuid of the network to update
:param network: dictionary describing the updates
:returns: a sequence of mappings with the following signature:
{
"id": UUID representing the network.
"name": Human-readable name identifying the network.
"tenant_id": Owner of network. NOTE: only admin user can
specify a tenant_id other than its own.
"admin_state_up": Sets admin state of network.
if down, network does not forward packets.
"status": Indicates whether network is currently operational
(values are "ACTIVE", "DOWN", "BUILD", and "ERROR")
"subnets": Subnets associated with this network.
}
:raises: exceptions.NetworkNotFound
:raises: RemoteRestError
"""
LOG.debug(_("NeutronRestProxyV2.update_network() called"))
self._warn_on_state_status(network['network'])
session = context.session
with session.begin(subtransactions=True):
new_net = super(NeutronRestProxyV2, self).update_network(
context, net_id, network)
self._process_l3_update(context, new_net, network['network'])
# update network on network controller
self._send_update_network(new_net, context)
return new_net
def delete_network(self, context, net_id):
"""Delete a network.
:param context: neutron api request context
:param id: UUID representing the network to delete.
:returns: None
:raises: exceptions.NetworkInUse
:raises: exceptions.NetworkNotFound
:raises: RemoteRestError
"""
LOG.debug(_("NeutronRestProxyV2: delete_network() called"))
# Validate args
orig_net = super(NeutronRestProxyV2, self).get_network(context, net_id)
filter = {'network_id': [net_id]}
ports = self.get_ports(context, filters=filter)
# check if there are any tenant owned ports in-use
auto_delete_port_owners = db_base_plugin_v2.AUTO_DELETE_PORT_OWNERS
only_auto_del = all(p['device_owner'] in auto_delete_port_owners
for p in ports)
if not only_auto_del:
raise exceptions.NetworkInUse(net_id=net_id)
with context.session.begin(subtransactions=True):
ret_val = super(NeutronRestProxyV2, self).delete_network(context,
net_id)
self._send_delete_network(orig_net, context)
return ret_val
def create_port(self, context, port):
"""Create a port, which is a connection point of a device
(e.g., a VM NIC) to attach to a L2 Neutron network.
:param context: neutron api request context
:param port: dictionary describing the port
:returns:
{
"id": uuid represeting the port.
"network_id": uuid of network.
"tenant_id": tenant_id
"mac_address": mac address to use on this port.
"admin_state_up": Sets admin state of port. if down, port
does not forward packets.
"status": dicates whether port is currently operational
(limit values to "ACTIVE", "DOWN", "BUILD", and "ERROR")
"fixed_ips": list of subnet ID"s and IP addresses to be used on
this port
"device_id": identifies the device (e.g., virtual server) using
this port.
}
:raises: exceptions.NetworkNotFound
:raises: exceptions.StateInvalid
:raises: RemoteRestError
"""
LOG.debug(_("NeutronRestProxyV2: create_port() called"))
# Update DB in new session so exceptions rollback changes
with context.session.begin(subtransactions=True):
self._ensure_default_security_group_on_port(context, port)
sgids = self._get_security_groups_on_port(context, port)
# set port status to pending. updated after rest call completes
port['port']['status'] = const.PORT_STATUS_BUILD
dhcp_opts = port['port'].get(edo_ext.EXTRADHCPOPTS, [])
new_port = super(NeutronRestProxyV2, self).create_port(context,
port)
self._process_port_create_security_group(context, new_port, sgids)
if (portbindings.HOST_ID in port['port']
and 'id' in new_port):
host_id = port['port'][portbindings.HOST_ID]
porttracker_db.put_port_hostid(context, new_port['id'],
host_id)
new_port[addr_pair.ADDRESS_PAIRS] = (
self._process_create_allowed_address_pairs(
context, new_port,
port['port'].get(addr_pair.ADDRESS_PAIRS)))
self._process_port_create_extra_dhcp_opts(context, new_port,
dhcp_opts)
new_port = self._extend_port_dict_binding(context, new_port)
net = super(NeutronRestProxyV2,
self).get_network(context, new_port["network_id"])
if self.add_meta_server_route:
if new_port['device_owner'] == const.DEVICE_OWNER_DHCP:
destination = METADATA_SERVER_IP + '/32'
self._add_host_route(context, destination, new_port)
# create on network ctrl
mapped_port = self._map_state_and_status(new_port)
self.evpool.spawn_n(self.async_port_create, net["tenant_id"],
new_port["network_id"], mapped_port)
self.notify_security_groups_member_updated(context, new_port)
return new_port
def get_port(self, context, id, fields=None):
with context.session.begin(subtransactions=True):
port = super(NeutronRestProxyV2, self).get_port(context, id,
fields)
self._extend_port_dict_binding(context, port)
return self._fields(port, fields)
def get_ports(self, context, filters=None, fields=None):
with context.session.begin(subtransactions=True):
ports = super(NeutronRestProxyV2, self).get_ports(context, filters,
fields)
for port in ports:
self._extend_port_dict_binding(context, port)
return [self._fields(port, fields) for port in ports]
def update_port(self, context, port_id, port):
"""Update values of a port.
:param context: neutron api request context
:param id: UUID representing the port to update.
:param port: dictionary with keys indicating fields to update.
:returns: a mapping sequence with the following signature:
{
"id": uuid represeting the port.
"network_id": uuid of network.
"tenant_id": tenant_id
"mac_address": mac address to use on this port.
"admin_state_up": sets admin state of port. if down, port
does not forward packets.
"status": dicates whether port is currently operational
(limit values to "ACTIVE", "DOWN", "BUILD", and "ERROR")
"fixed_ips": list of subnet ID's and IP addresses to be used on
this port
"device_id": identifies the device (e.g., virtual server) using
this port.
}
:raises: exceptions.StateInvalid
:raises: exceptions.PortNotFound
:raises: RemoteRestError
"""
LOG.debug(_("NeutronRestProxyV2: update_port() called"))
self._warn_on_state_status(port['port'])
# Validate Args
orig_port = super(NeutronRestProxyV2, self).get_port(context, port_id)
with context.session.begin(subtransactions=True):
# Update DB
new_port = super(NeutronRestProxyV2,
self).update_port(context, port_id, port)
ctrl_update_required = False
if addr_pair.ADDRESS_PAIRS in port['port']:
ctrl_update_required |= (
self.update_address_pairs_on_port(context, port_id, port,
orig_port, new_port))
if 'fixed_ips' in port['port']:
self._check_fixed_ips_and_address_pairs_no_overlap(
context, new_port)
self._update_extra_dhcp_opts_on_port(context, port_id, port,
new_port)
old_host_id = porttracker_db.get_port_hostid(context,
orig_port['id'])
if (portbindings.HOST_ID in port['port']
and 'id' in new_port):
host_id = port['port'][portbindings.HOST_ID]
porttracker_db.put_port_hostid(context, new_port['id'],
host_id)
if old_host_id != host_id:
ctrl_update_required = True
if (new_port.get("device_id") != orig_port.get("device_id") and
orig_port.get("device_id")):
ctrl_update_required = True
if ctrl_update_required:
# tenant_id must come from network in case network is shared
net_tenant_id = self._get_port_net_tenantid(context, new_port)
new_port = self._extend_port_dict_binding(context, new_port)
mapped_port = self._map_state_and_status(new_port)
self.servers.rest_update_port(net_tenant_id,
new_port["network_id"],
mapped_port)
agent_update_required = self.update_security_group_on_port(
context, port_id, port, orig_port, new_port)
agent_update_required |= self.is_security_group_member_updated(
context, orig_port, new_port)
# return new_port
return new_port
def delete_port(self, context, port_id, l3_port_check=True):
"""Delete a port.
:param context: neutron api request context
:param id: UUID representing the port to delete.
:raises: exceptions.PortInUse
:raises: exceptions.PortNotFound
:raises: exceptions.NetworkNotFound
:raises: RemoteRestError
"""
LOG.debug(_("NeutronRestProxyV2: delete_port() called"))
# if needed, check to see if this is a port owned by
# and l3-router. If so, we should prevent deletion.
if l3_port_check:
self.prevent_l3_port_deletion(context, port_id)
with context.session.begin(subtransactions=True):
self.disassociate_floatingips(context, port_id)
self._delete_port_security_group_bindings(context, port_id)
super(NeutronRestProxyV2, self).delete_port(context, port_id)
def _delete_port(self, context, port_id):
port = super(NeutronRestProxyV2, self).get_port(context, port_id)
# Tenant ID must come from network in case the network is shared
tenant_id = self._get_port_net_tenantid(context, port)
# Delete from DB
ret_val = super(NeutronRestProxyV2,
self)._delete_port(context, port_id)
self.servers.rest_delete_port(tenant_id, port['network_id'], port_id)
return ret_val
def create_subnet(self, context, subnet):
LOG.debug(_("NeutronRestProxyV2: create_subnet() called"))
self._warn_on_state_status(subnet['subnet'])
with context.session.begin(subtransactions=True):
# create subnet in DB
new_subnet = super(NeutronRestProxyV2,
self).create_subnet(context, subnet)
net_id = new_subnet['network_id']
orig_net = super(NeutronRestProxyV2,
self).get_network(context, net_id)
# update network on network controller
self._send_update_network(orig_net, context)
return new_subnet
def update_subnet(self, context, id, subnet):
LOG.debug(_("NeutronRestProxyV2: update_subnet() called"))
self._warn_on_state_status(subnet['subnet'])
with context.session.begin(subtransactions=True):
# update subnet in DB
new_subnet = super(NeutronRestProxyV2,
self).update_subnet(context, id, subnet)
net_id = new_subnet['network_id']
orig_net = super(NeutronRestProxyV2,
self).get_network(context, net_id)
# update network on network controller
self._send_update_network(orig_net, context)
return new_subnet
def delete_subnet(self, context, id):
LOG.debug(_("NeutronRestProxyV2: delete_subnet() called"))
orig_subnet = super(NeutronRestProxyV2, self).get_subnet(context, id)
net_id = orig_subnet['network_id']
with context.session.begin(subtransactions=True):
# delete subnet in DB
super(NeutronRestProxyV2, self).delete_subnet(context, id)
orig_net = super(NeutronRestProxyV2, self).get_network(context,
net_id)
# update network on network controller - exception will rollback
self._send_update_network(orig_net, context)
def _get_tenant_default_router_rules(self, tenant):
rules = cfg.CONF.ROUTER.tenant_default_router_rule
defaultset = []
tenantset = []
for rule in rules:
items = rule.split(':')
if len(items) == 5:
(tenantid, source, destination, action, nexthops) = items
elif len(items) == 4:
(tenantid, source, destination, action) = items
nexthops = ''
else:
continue
parsedrule = {'source': source,
'destination': destination, 'action': action,
'nexthops': nexthops.split(',')}
if parsedrule['nexthops'][0] == '':
parsedrule['nexthops'] = []
if tenantid == '*':
defaultset.append(parsedrule)
if tenantid == tenant:
tenantset.append(parsedrule)
if tenantset:
return tenantset
return defaultset
def create_router(self, context, router):
LOG.debug(_("NeutronRestProxyV2: create_router() called"))
self._warn_on_state_status(router['router'])
tenant_id = self._get_tenant_id_for_create(context, router["router"])
# set default router rules
rules = self._get_tenant_default_router_rules(tenant_id)
router['router']['router_rules'] = rules
with context.session.begin(subtransactions=True):
# create router in DB
new_router = super(NeutronRestProxyV2, self).create_router(context,
router)
mapped_router = self._map_state_and_status(new_router)
self.servers.rest_create_router(tenant_id, mapped_router)
# return created router
return new_router
def update_router(self, context, router_id, router):
LOG.debug(_("NeutronRestProxyV2.update_router() called"))
self._warn_on_state_status(router['router'])
orig_router = super(NeutronRestProxyV2, self).get_router(context,
router_id)
tenant_id = orig_router["tenant_id"]
with context.session.begin(subtransactions=True):
new_router = super(NeutronRestProxyV2,
self).update_router(context, router_id, router)
router = self._map_state_and_status(new_router)
# update router on network controller
self.servers.rest_update_router(tenant_id, router, router_id)
# return updated router
return new_router
def delete_router(self, context, router_id):
LOG.debug(_("NeutronRestProxyV2: delete_router() called"))
with context.session.begin(subtransactions=True):
orig_router = self._get_router(context, router_id)
tenant_id = orig_router["tenant_id"]
# Ensure that the router is not used
router_filter = {'router_id': [router_id]}
fips = self.get_floatingips_count(context.elevated(),
filters=router_filter)
if fips:
raise l3.RouterInUse(router_id=router_id)
device_owner = l3_db.DEVICE_OWNER_ROUTER_INTF
device_filter = {'device_id': [router_id],
'device_owner': [device_owner]}
ports = self.get_ports_count(context.elevated(),
filters=device_filter)
if ports:
raise l3.RouterInUse(router_id=router_id)
ret_val = super(NeutronRestProxyV2,
self).delete_router(context, router_id)
# delete from network ctrl
self.servers.rest_delete_router(tenant_id, router_id)
return ret_val
def add_router_interface(self, context, router_id, interface_info):
LOG.debug(_("NeutronRestProxyV2: add_router_interface() called"))
# Validate args
router = self._get_router(context, router_id)
tenant_id = router['tenant_id']
with context.session.begin(subtransactions=True):
# create interface in DB
new_intf_info = super(NeutronRestProxyV2,
self).add_router_interface(context,
router_id,
interface_info)
port = self._get_port(context, new_intf_info['port_id'])
net_id = port['network_id']
subnet_id = new_intf_info['subnet_id']
# we will use the port's network id as interface's id
interface_id = net_id
intf_details = self._get_router_intf_details(context,
interface_id,
subnet_id)
# create interface on the network controller
self.servers.rest_add_router_interface(tenant_id, router_id,
intf_details)
return new_intf_info
def remove_router_interface(self, context, router_id, interface_info):
LOG.debug(_("NeutronRestProxyV2: remove_router_interface() called"))
# Validate args
router = self._get_router(context, router_id)
tenant_id = router['tenant_id']
# we will first get the interface identifier before deleting in the DB
if not interface_info:
msg = _("Either subnet_id or port_id must be specified")
raise exceptions.BadRequest(resource='router', msg=msg)
if 'port_id' in interface_info:
port = self._get_port(context, interface_info['port_id'])
interface_id = port['network_id']
elif 'subnet_id' in interface_info:
subnet = self._get_subnet(context, interface_info['subnet_id'])
interface_id = subnet['network_id']
else:
msg = _("Either subnet_id or port_id must be specified")
raise exceptions.BadRequest(resource='router', msg=msg)
with context.session.begin(subtransactions=True):
# remove router in DB
del_ret = super(NeutronRestProxyV2,
self).remove_router_interface(context,
router_id,
interface_info)
# create router on the network controller
self.servers.rest_remove_router_interface(tenant_id, router_id,
interface_id)
return del_ret
def create_floatingip(self, context, floatingip):
LOG.debug(_("NeutronRestProxyV2: create_floatingip() called"))
with context.session.begin(subtransactions=True):
# create floatingip in DB
new_fl_ip = super(NeutronRestProxyV2,
self).create_floatingip(context, floatingip)
# create floatingip on the network controller
try:
if 'floatingip' in self.servers.get_capabilities():
self.servers.rest_create_floatingip(
new_fl_ip['tenant_id'], new_fl_ip)
else:
self._send_floatingip_update(context)
except servermanager.RemoteRestError as e:
with excutils.save_and_reraise_exception():
LOG.error(
_("NeutronRestProxyV2: Unable to create remote "
"floating IP: %s"), e)
# return created floating IP
return new_fl_ip
def update_floatingip(self, context, id, floatingip):
LOG.debug(_("NeutronRestProxyV2: update_floatingip() called"))
with context.session.begin(subtransactions=True):
# update floatingip in DB
new_fl_ip = super(NeutronRestProxyV2,
self).update_floatingip(context, id, floatingip)
# update network on network controller
if 'floatingip' in self.servers.get_capabilities():
self.servers.rest_update_floatingip(new_fl_ip['tenant_id'],
new_fl_ip, id)
else:
self._send_floatingip_update(context)
return new_fl_ip
def delete_floatingip(self, context, id):
LOG.debug(_("NeutronRestProxyV2: delete_floatingip() called"))
with context.session.begin(subtransactions=True):
# delete floating IP in DB
old_fip = super(NeutronRestProxyV2, self).get_floatingip(context,
id)
super(NeutronRestProxyV2, self).delete_floatingip(context, id)
# update network on network controller
if 'floatingip' in self.servers.get_capabilities():
self.servers.rest_delete_floatingip(old_fip['tenant_id'], id)
else:
self._send_floatingip_update(context)
def disassociate_floatingips(self, context, port_id):
LOG.debug(_("NeutronRestProxyV2: diassociate_floatingips() called"))
super(NeutronRestProxyV2, self).disassociate_floatingips(context,
port_id)
self._send_floatingip_update(context)
def _send_floatingip_update(self, context):
try:
ext_net_id = self.get_external_network_id(context)
if ext_net_id:
# Use the elevated state of the context for the ext_net query
admin_context = context.elevated()
ext_net = super(NeutronRestProxyV2,
self).get_network(admin_context, ext_net_id)
# update external network on network controller
self._send_update_network(ext_net, admin_context)
except exceptions.TooManyExternalNetworks:
# get_external_network can raise errors when multiple external
# networks are detected, which isn't supported by the Plugin
LOG.error(_("NeutronRestProxyV2: too many external networks"))
def _add_host_route(self, context, destination, port):
subnet = {}
for fixed_ip in port['fixed_ips']:
subnet_id = fixed_ip['subnet_id']
nexthop = fixed_ip['ip_address']
subnet['host_routes'] = [{'destination': destination,
'nexthop': nexthop}]
updated_subnet = self.update_subnet(context,
subnet_id,
{'subnet': subnet})
payload = {'subnet': updated_subnet}
self._dhcp_agent_notifier.notify(context, payload,
'subnet.update.end')
LOG.debug(_("Adding host route: "))
LOG.debug(_("Destination:%(dst)s nexthop:%(next)s"),
{'dst': destination, 'next': nexthop})
| apache-2.0 |
bosha/torhandlers | torhandlers/rest/base.py | 1 | 1395 | # -*- coding: utf-8 -*-
__author__ = 'Alex Bo'
__email__ = 'bosha@the-bosha.ru'
import json
from torhandlers.base import GenericHandler, ContextMixin, HTTPError
class JSONResponseMixin:
def render_json(self, context, **kwargs):
data = self.get_json_data(context=context, **kwargs)
self.write(data)
self.set_header('Content-Type', 'application/json')
self.finish()
def get_json_data(self, context, **kwargs):
raise NotImplementedError(
"get_json_data should be implemented in %s" % (self.__class__.__name__)
)
def render_json_error(self, status_code=500, errors=None, **kwargs):
context = {
'success': False,
'errors': errors
}
if len(kwargs) > 0:
context.update(kwargs)
self.set_status(status_code)
self.set_header('Content-Type', 'application/json')
self.write(json.dumps(context))
self.finish()
class JSONGenericHandler(GenericHandler):
def initialize(self):
super().initialize()
if self.request_ajax() is False:
raise HTTPError(403)
class JSONTemplateHandler(JSONResponseMixin, ContextMixin, JSONGenericHandler):
def get(self, *args, **kwargs):
super().get(*args, **kwargs)
context = self.get_context_data(**kwargs)
return self.render_json(context=context)
| mit |
yongshengwang/hue | desktop/core/ext-py/pyopenssl/doc/tools/sgmlconv/esistools.py | 37 | 9087 | """Miscellaneous utility functions useful for dealing with ESIS streams."""
import re
import string
import xml.dom.pulldom
import xml.sax
import xml.sax.handler
import xml.sax.xmlreader
_data_match = re.compile(r"[^\\][^\\]*").match
def decode(s):
r = ''
while s:
m = _data_match(s)
if m:
r = r + m.group()
s = s[m.end():]
elif s[1] == "\\":
r = r + "\\"
s = s[2:]
elif s[1] == "n":
r = r + "\n"
s = s[2:]
elif s[1] == "%":
s = s[2:]
n, s = s.split(";", 1)
r = r + unichr(int(n))
else:
raise ValueError, "can't handle " + `s`
return r
_charmap = {}
for c in map(chr, range(256)):
_charmap[c] = c
_charmap["\n"] = r"\n"
_charmap["\\"] = r"\\"
del c
_null_join = ''.join
def encode(s):
return _null_join(map(_charmap.get, s))
class ESISReader(xml.sax.xmlreader.XMLReader):
"""SAX Reader which reads from an ESIS stream.
No verification of the document structure is performed by the
reader; a general verifier could be used as the target
ContentHandler instance.
"""
_decl_handler = None
_lexical_handler = None
_public_id = None
_system_id = None
_buffer = ""
_is_empty = 0
_lineno = 0
_started = 0
def __init__(self, contentHandler=None, errorHandler=None):
xml.sax.xmlreader.XMLReader.__init__(self)
self._attrs = {}
self._attributes = Attributes(self._attrs)
self._locator = Locator()
self._empties = {}
if contentHandler:
self.setContentHandler(contentHandler)
if errorHandler:
self.setErrorHandler(errorHandler)
def get_empties(self):
return self._empties.keys()
#
# XMLReader interface
#
def parse(self, source):
raise RuntimeError
self._locator._public_id = source.getPublicId()
self._locator._system_id = source.getSystemId()
fp = source.getByteStream()
handler = self.getContentHandler()
if handler:
handler.startDocument()
lineno = 0
while 1:
token, data = self._get_token(fp)
if token is None:
break
lineno = lineno + 1
self._locator._lineno = lineno
self._handle_token(token, data)
handler = self.getContentHandler()
if handler:
handler.startDocument()
def feed(self, data):
if not self._started:
handler = self.getContentHandler()
if handler:
handler.startDocument()
self._started = 1
data = self._buffer + data
self._buffer = None
lines = data.split("\n")
if lines:
for line in lines[:-1]:
self._lineno = self._lineno + 1
self._locator._lineno = self._lineno
if not line:
e = xml.sax.SAXParseException(
"ESIS input line contains no token type mark",
None, self._locator)
self.getErrorHandler().error(e)
else:
self._handle_token(line[0], line[1:])
self._buffer = lines[-1]
else:
self._buffer = ""
def close(self):
handler = self.getContentHandler()
if handler:
handler.endDocument()
self._buffer = ""
def _get_token(self, fp):
try:
line = fp.readline()
except IOError, e:
e = SAXException("I/O error reading input stream", e)
self.getErrorHandler().fatalError(e)
return
if not line:
return None, None
if line[-1] == "\n":
line = line[:-1]
if not line:
e = xml.sax.SAXParseException(
"ESIS input line contains no token type mark",
None, self._locator)
self.getErrorHandler().error(e)
return
return line[0], line[1:]
def _handle_token(self, token, data):
handler = self.getContentHandler()
if token == '-':
if data and handler:
handler.characters(decode(data))
elif token == ')':
if handler:
handler.endElement(decode(data))
elif token == '(':
if self._is_empty:
self._empties[data] = 1
if handler:
handler.startElement(data, self._attributes)
self._attrs.clear()
self._is_empty = 0
elif token == 'A':
name, value = data.split(' ', 1)
if value != "IMPLIED":
type, value = value.split(' ', 1)
self._attrs[name] = (decode(value), type)
elif token == '&':
# entity reference in SAX?
pass
elif token == '?':
if handler:
if ' ' in data:
target, data = string.split(data, None, 1)
else:
target, data = data, ""
handler.processingInstruction(target, decode(data))
elif token == 'N':
handler = self.getDTDHandler()
if handler:
handler.notationDecl(data, self._public_id, self._system_id)
self._public_id = None
self._system_id = None
elif token == 'p':
self._public_id = decode(data)
elif token == 's':
self._system_id = decode(data)
elif token == 'e':
self._is_empty = 1
elif token == 'C':
pass
else:
e = SAXParseException("unknown ESIS token in event stream",
None, self._locator)
self.getErrorHandler().error(e)
def setContentHandler(self, handler):
old = self.getContentHandler()
if old:
old.setDocumentLocator(None)
if handler:
handler.setDocumentLocator(self._locator)
xml.sax.xmlreader.XMLReader.setContentHandler(self, handler)
def getProperty(self, property):
if property == xml.sax.handler.property_lexical_handler:
return self._lexical_handler
elif property == xml.sax.handler.property_declaration_handler:
return self._decl_handler
else:
raise xml.sax.SAXNotRecognizedException("unknown property %s"
% `property`)
def setProperty(self, property, value):
if property == xml.sax.handler.property_lexical_handler:
if self._lexical_handler:
self._lexical_handler.setDocumentLocator(None)
if value:
value.setDocumentLocator(self._locator)
self._lexical_handler = value
elif property == xml.sax.handler.property_declaration_handler:
if self._decl_handler:
self._decl_handler.setDocumentLocator(None)
if value:
value.setDocumentLocator(self._locator)
self._decl_handler = value
else:
raise xml.sax.SAXNotRecognizedException()
def getFeature(self, feature):
if feature == xml.sax.handler.feature_namespaces:
return 1
else:
return xml.sax.xmlreader.XMLReader.getFeature(self, feature)
def setFeature(self, feature, enabled):
if feature == xml.sax.handler.feature_namespaces:
pass
else:
xml.sax.xmlreader.XMLReader.setFeature(self, feature, enabled)
class Attributes(xml.sax.xmlreader.AttributesImpl):
# self._attrs has the form {name: (value, type)}
def getType(self, name):
return self._attrs[name][1]
def getValue(self, name):
return self._attrs[name][0]
def getValueByQName(self, name):
return self._attrs[name][0]
def __getitem__(self, name):
return self._attrs[name][0]
def get(self, name, default=None):
if self._attrs.has_key(name):
return self._attrs[name][0]
return default
def items(self):
L = []
for name, (value, type) in self._attrs.items():
L.append((name, value))
return L
def values(self):
L = []
for value, type in self._attrs.values():
L.append(value)
return L
class Locator(xml.sax.xmlreader.Locator):
_lineno = -1
_public_id = None
_system_id = None
def getLineNumber(self):
return self._lineno
def getPublicId(self):
return self._public_id
def getSystemId(self):
return self._system_id
def parse(stream_or_string, parser=None):
if type(stream_or_string) in [type(""), type(u"")]:
stream = open(stream_or_string)
else:
stream = stream_or_string
if not parser:
parser = ESISReader()
return xml.dom.pulldom.DOMEventStream(stream, parser, (2 ** 14) - 20)
| apache-2.0 |
jneves/python-social-auth | social/apps/tornado_app/utils.py | 76 | 1546 | import warnings
from functools import wraps
from social.utils import setting_name
from social.strategies.utils import get_strategy
from social.backends.utils import get_backend
DEFAULTS = {
'STORAGE': 'social.apps.tornado_app.models.TornadoStorage',
'STRATEGY': 'social.strategies.tornado_strategy.TornadoStrategy'
}
def get_helper(request_handler, name):
return request_handler.settings.get(setting_name(name),
DEFAULTS.get(name, None))
def load_strategy(request_handler):
strategy = get_helper(request_handler, 'STRATEGY')
storage = get_helper(request_handler, 'STORAGE')
return get_strategy(strategy, storage, request_handler)
def load_backend(request_handler, strategy, name, redirect_uri):
backends = get_helper(request_handler, 'AUTHENTICATION_BACKENDS')
Backend = get_backend(backends, name)
return Backend(strategy, redirect_uri)
def psa(redirect_uri=None):
def decorator(func):
@wraps(func)
def wrapper(self, backend, *args, **kwargs):
uri = redirect_uri
if uri and not uri.startswith('/'):
uri = self.reverse_url(uri, backend)
self.strategy = load_strategy(self)
self.backend = load_backend(self, self.strategy, backend, uri)
return func(self, backend, *args, **kwargs)
return wrapper
return decorator
def strategy(*args, **kwargs):
warnings.warn('@strategy decorator is deprecated, use @psa instead')
return psa(*args, **kwargs)
| bsd-3-clause |
iot-alex/mosquitto | test/broker/02-subscribe-qos0.py | 18 | 1209 | #!/usr/bin/env python
# Test whether a SUBSCRIBE to a topic with QoS 0 results in the correct SUBACK packet.
import subprocess
import socket
import time
import inspect, os, sys
# From http://stackoverflow.com/questions/279237/python-import-a-module-from-a-folder
cmd_subfolder = os.path.realpath(os.path.abspath(os.path.join(os.path.split(inspect.getfile( inspect.currentframe() ))[0],"..")))
if cmd_subfolder not in sys.path:
sys.path.insert(0, cmd_subfolder)
import mosq_test
rc = 1
mid = 53
keepalive = 60
connect_packet = mosq_test.gen_connect("subscribe-qos0-test", keepalive=keepalive)
connack_packet = mosq_test.gen_connack(rc=0)
subscribe_packet = mosq_test.gen_subscribe(mid, "qos0/test", 0)
suback_packet = mosq_test.gen_suback(mid, 0)
broker = subprocess.Popen(['../../src/mosquitto', '-p', '1888'], stderr=subprocess.PIPE)
try:
time.sleep(0.5)
sock = mosq_test.do_client_connect(connect_packet, connack_packet)
sock.send(subscribe_packet)
if mosq_test.expect_packet(sock, "suback", suback_packet):
rc = 0
sock.close()
finally:
broker.terminate()
broker.wait()
if rc:
(stdo, stde) = broker.communicate()
print(stde)
exit(rc)
| bsd-3-clause |
tillahoffmann/tensorflow | tensorflow/python/training/checkpoint_utils.py | 27 | 13107 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tools to work with checkpoints."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import saver
__all__ = [
"load_checkpoint", "load_variable", "list_variables", "init_from_checkpoint"
]
def load_checkpoint(ckpt_dir_or_file):
"""Returns `CheckpointReader` for checkpoint found in `ckpt_dir_or_file`.
If `ckpt_dir_or_file` resolves to a directory with multiple checkpoints,
reader for the latest checkpoint is returned.
Args:
ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint
file.
Returns:
`CheckpointReader` object.
Raises:
ValueError: If `ckpt_dir_or_file` resolves to a directory with no
checkpoints.
"""
filename = _get_checkpoint_filename(ckpt_dir_or_file)
if filename is None:
raise ValueError("Couldn't find 'checkpoint' file or checkpoints in "
"given directory %s" % ckpt_dir_or_file)
return pywrap_tensorflow.NewCheckpointReader(filename)
def load_variable(ckpt_dir_or_file, name):
"""Returns the tensor value of the given variable in the checkpoint.
Args:
ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint.
name: Name of the variable to return.
Returns:
A numpy `ndarray` with a copy of the value of this variable.
"""
# TODO(b/29227106): Fix this in the right place and remove this.
if name.endswith(":0"):
name = name[:-2]
reader = load_checkpoint(ckpt_dir_or_file)
return reader.get_tensor(name)
def list_variables(ckpt_dir_or_file):
"""Returns list of all variables in the checkpoint.
Args:
ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint.
Returns:
List of tuples `(name, shape)`.
"""
reader = load_checkpoint(ckpt_dir_or_file)
variable_map = reader.get_variable_to_shape_map()
names = sorted(variable_map.keys())
result = []
for name in names:
result.append((name, variable_map[name]))
return result
def init_from_checkpoint(ckpt_dir_or_file, assignment_map):
"""Initializes current variables with tensors loaded from given checkpoint.
Note: This overrides default initialization ops of specified variables and
redefines dtype.
Assignment map supports following syntax:
* `'checkpoint_scope_name/': 'scope_name/'` - will load all variables in
current `scope_name` from `checkpoint_scope_name` with matching tensor
names.
* `'checkpoint_scope_name/some_other_variable': 'scope_name/variable_name'` -
will initialize `scope_name/variable_name` variable
from `checkpoint_scope_name/some_other_variable`.
* `'scope_variable_name': variable` - will initialize given `tf.Variable`
object with tensor 'scope_variable_name' from the checkpoint.
* `'scope_variable_name': list(variable)` - will initialize list of
partitioned variables with tensor 'scope_variable_name' from the checkpoint.
* `'/': 'scope_name/'` - will load all variables in current `scope_name` from
checkpoint's root (e.g. no scope).
Supports loading into partitioned variables, which are represented as
`'<variable>/part_<part #>'`.
Example:
```python
# Say, '/tmp/model.ckpt' has the following tensors:
# -- name='old_scope_1/var1', shape=[20, 2]
# -- name='old_scope_1/var2', shape=[50, 4]
# -- name='old_scope_2/var3', shape=[100, 100]
# Create new model's variables
with tf.variable_scope('new_scope_1'):
var1 = tf.get_variable('var1', shape=[20, 2],
initializer=tf.zeros_initializer())
with tf.variable_scope('new_scope_2'):
var2 = tf.get_variable('var2', shape=[50, 4],
initializer=tf.zeros_initializer())
# Partition into 5 variables along the first axis.
var3 = tf.get_variable(name='var3', shape=[100, 100],
initializer=tf.zeros_initializer(),
partitioner=lambda shape, dtype: [5, 1])
# Initialize all variables in `new_scope_1` from `old_scope_1`.
init_from_checkpoint('/tmp/model.ckpt', {'old_scope_1/', 'new_scope_1'})
# Use names to specify which variables to initialize from checkpoint.
init_from_checkpoint('/tmp/model.ckpt',
{'old_scope_1/var1': 'new_scope_1/var1',
'old_scope_1/var2': 'new_scope_2/var2'})
# Or use tf.Variable objects to identify what to initialize.
init_from_checkpoint('/tmp/model.ckpt',
{'old_scope_1/var1': var1,
'old_scope_1/var2': var2})
# Initialize partitioned variables using variable's name
init_from_checkpoint('/tmp/model.ckpt',
{'old_scope_2/var3': 'new_scope_2/var3'})
# Or specify the list of tf.Variable objects.
init_from_checkpoint('/tmp/model.ckpt',
{'old_scope_2/var3': var3._get_variable_list()})
```
Args:
ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint.
assignment_map: Dict, where keys are names of the variables in the
checkpoint and values are current variables or names of current variables
(in default graph).
Raises:
tf.errors.OpError: If missing checkpoints or tensors in checkpoints.
ValueError: If missing variables in current graph.
"""
ckpt_file = _get_checkpoint_filename(ckpt_dir_or_file)
reader = load_checkpoint(ckpt_dir_or_file)
variable_map = reader.get_variable_to_shape_map()
for tensor_name_in_ckpt, current_var_or_name in six.iteritems(assignment_map):
var = None
# Check if this is Variable object or list of Variable objects (in case of
# partitioned variables).
is_var = lambda x: isinstance(x, variables.Variable)
if is_var(current_var_or_name) or (
isinstance(current_var_or_name, list)
and all(is_var(v) for v in current_var_or_name)):
var = current_var_or_name
else:
store_vars = vs._get_default_variable_store()._vars # pylint:disable=protected-access
# Check if this variable is in var_store.
var = store_vars.get(current_var_or_name, None)
# Also check if variable is partitioned as list.
if var is None:
var = _collect_partitioned_variable(current_var_or_name, store_vars)
if var is not None:
# If 1 to 1 mapping was provided, find variable in the checkpoint.
if tensor_name_in_ckpt not in variable_map:
raise ValueError("Tensor %s is not found in %s checkpoint %s" % (
tensor_name_in_ckpt, ckpt_dir_or_file, variable_map
))
if is_var(var):
# Additional at-call-time checks.
if not var.get_shape().is_compatible_with(
variable_map[tensor_name_in_ckpt]):
raise ValueError(
"Shape of variable %s (%s) doesn't match with shape of "
"tensor %s (%s) from checkpoint reader." % (
var.name, str(var.get_shape()),
tensor_name_in_ckpt, str(variable_map[tensor_name_in_ckpt])
))
var_name = var.name
else:
var_name = ",".join([v.name for v in var])
_set_variable_or_list_initializer(var, ckpt_file, tensor_name_in_ckpt)
logging.info("Initialize variable %s from checkpoint %s with %s",
var_name, ckpt_dir_or_file, tensor_name_in_ckpt)
else:
scopes = ""
# TODO(vihanjain): Support list of 'current_var_or_name' here.
if "/" in current_var_or_name:
scopes = current_var_or_name[:current_var_or_name.rindex("/")]
if not tensor_name_in_ckpt.endswith("/"):
raise ValueError(
"Assignment map with scope only name {} should map to scope only "
"{}. Should be 'scope/': 'other_scope/'.".format(
scopes, tensor_name_in_ckpt))
# If scope to scope mapping was provided, find all variables in the scope
# and create variable to variable mapping.
scope_variables = set()
for var_name in store_vars:
if not scopes or var_name.startswith(scopes + "/"):
# Consume /part_ if partitioned variable.
if "/part_" in var_name:
var_name = var_name[:var_name.index("/part_")]
scope_variables.add(var_name)
for var_name in scope_variables:
# Lookup name with specified prefix and suffix from current variable.
# If tensor_name given is '/' (root), don't use it for full name.
full_tensor_name = var_name[len(scopes):]
if current_var_or_name != "/":
full_tensor_name = full_tensor_name[1:]
if tensor_name_in_ckpt != "/":
full_tensor_name = tensor_name_in_ckpt + full_tensor_name
if full_tensor_name not in variable_map:
raise ValueError(
"Tensor %s (%s in %s) is not found in %s checkpoint" % (
full_tensor_name, var_name[len(scopes) + 1:],
tensor_name_in_ckpt, ckpt_dir_or_file
))
var = store_vars.get(var_name, None)
if var is None:
var = _collect_partitioned_variable(var_name, store_vars)
_set_variable_or_list_initializer(var, ckpt_file, full_tensor_name)
logging.info("Initialize variable %s from checkpoint %s with %s",
var_name, ckpt_dir_or_file, full_tensor_name)
def _get_checkpoint_filename(ckpt_dir_or_file):
"""Returns checkpoint filename given directory or specific checkpoint file."""
if gfile.IsDirectory(ckpt_dir_or_file):
return saver.latest_checkpoint(ckpt_dir_or_file)
return ckpt_dir_or_file
def _set_checkpoint_initializer(variable,
ckpt_file,
tensor_name,
slice_spec,
name="checkpoint_initializer"):
"""Overrides given variable's initialization op.
Sets variable initializer to assign op that initializes variable from tensor's
value in the checkpoint.
Args:
variable: `tf.Variable` object.
ckpt_file: string, full path of the checkpoint.
tensor_name: Name of the tensor to load from the checkpoint.
slice_spec: Slice specification for loading partitioned tensors.
name: Name of the operation.
"""
base_type = variable.dtype.base_dtype
restore_op = io_ops.restore_v2(
ckpt_file, [tensor_name], [slice_spec], [base_type], name=name)[0]
variable._initializer_op = state_ops.assign(variable, restore_op) # pylint:disable=protected-access
def _set_variable_or_list_initializer(variable_or_list, ckpt_file,
tensor_name):
"""Overrides initialization op of given variable or list of variables.
Calls `_set_checkpoint_initializer` for each variable in the given list of
variables.
Args:
variable_or_list: `tf.Variable` object or a list of `tf.Variable` objects.
ckpt_file: string, full path of the checkpoint.
tensor_name: Name of the tensor to load from the checkpoint.
Raises:
ValueError: if all objects in `variable_or_list` are not partitions of the
same large variable.
"""
if isinstance(variable_or_list, (list, tuple)):
# A set of slices.
slice_name = None
for v in variable_or_list:
slice_info = v._save_slice_info # pylint:disable=protected-access
if slice_name is None:
slice_name = slice_info.full_name
elif slice_name != slice_info.full_name:
raise ValueError("Slices must all be from the same tensor: %s != %s" %
(slice_name, slice_info.full_name))
_set_checkpoint_initializer(v, ckpt_file, tensor_name, slice_info.spec)
else:
_set_checkpoint_initializer(variable_or_list, ckpt_file, tensor_name, "")
def _collect_partitioned_variable(name, all_vars):
"""Returns list of `tf.Variable` that comprise the partitioned variable."""
if name + "/part_0" in all_vars:
var = []
i = 0
while name + "/part_%d" % i in all_vars:
var.append(all_vars[name + "/part_%d" % i])
i += 1
return var
return None
| apache-2.0 |
jbenden/ansible | lib/ansible/modules/cloud/smartos/imgadm.py | 9 | 9841 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, 2017 Jasper Lievisse Adriaanse <j@jasper.la>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: imgadm
short_description: Manage SmartOS images
description:
- Manage SmartOS virtual machine images through imgadm(1M)
version_added: "2.3"
author: Jasper Lievisse Adriaanse (@jasperla)
options:
force:
required: false
choices: [ yes, no ]
description:
- Force a given operation (where supported by imgadm(1M)).
pool:
required: false
default: zones
description:
- zpool to import to or delete images from.
source:
required: false
description:
- URI for the image source.
state:
required: true
choices: [ present, absent, deleted, imported, updated, vacuumed ]
description:
- State the object operated on should be in. C(imported) is an alias for
for C(present) and C(deleted) for C(absent). When set to C(vacuumed)
and C(uuid) to C(*), it will remove all unused images.
type:
required: false
choices: [ imgapi, docker, dsapi ]
default: imgapi
description:
- Type for image sources.
uuid:
required: false
description:
- Image UUID. Can either be a full UUID or C(*) for all images.
requirements:
- python >= 2.6
'''
EXAMPLES = '''
- name: Import an image
imgadm:
uuid: '70e3ae72-96b6-11e6-9056-9737fd4d0764'
state: imported
- name: Delete an image
imgadm:
uuid: '70e3ae72-96b6-11e6-9056-9737fd4d0764'
state: deleted
- name: Update all images
imgadm:
uuid: '*'
state: updated
- name: Update a single image
imgadm:
uuid: '70e3ae72-96b6-11e6-9056-9737fd4d0764'
state: updated
- name: Add a source
imgadm:
source: 'https://datasets.project-fifo.net'
state: present
- name: Add a Docker source
imgadm:
source: 'https://docker.io'
type: docker
state: present
- name: Remove a source
imgadm:
source: 'https://docker.io'
state: absent
'''
RETURN = '''
source:
description: Source that is managed.
returned: When not managing an image.
type: string
sample: https://datasets.project-fifo.net
uuid:
description: UUID for an image operated on.
returned: When not managing an image source.
type: string
sample: 70e3ae72-96b6-11e6-9056-9737fd4d0764
state:
description: State of the target, after execution.
returned: success
type: string
sample: 'present'
'''
import re
from ansible.module_utils.basic import AnsibleModule
# Shortcut for the imgadm(1M) command. While imgadm(1M) supports a
# -E option to return any errors in JSON, the generated JSON does not play well
# with the JSON parsers of Python. The returned message contains '\n' as part of
# the stacktrace, which breaks the parsers.
class Imgadm(object):
def __init__(self, module):
self.module = module
self.params = module.params
self.cmd = module.get_bin_path('imgadm', required=True)
self.changed = False
self.uuid = module.params['uuid']
# Since there are a number of (natural) aliases, prevent having to look
# them up everytime we operate on `state`.
if self.params['state'] in ['present', 'imported', 'updated']:
self.present = True
else:
self.present = False
# Perform basic UUID validation upfront.
if self.uuid and self.uuid != '*':
if not re.match('^[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}$', self.uuid, re.IGNORECASE):
module.fail_json(msg='Provided value for uuid option is not a valid UUID.')
# Helper method to massage stderr
def errmsg(self, stderr):
match = re.match('^imgadm .*?: error \(\w+\): (.*): .*', stderr)
if match:
return match.groups()[0]
else:
return 'Unexpected failure'
def update_images(self):
if self.uuid == '*':
cmd = '{0} update'.format(self.cmd)
else:
cmd = '{0} update {1}'.format(self.cmd, self.uuid)
(rc, stdout, stderr) = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg='Failed to update images: {0}'.format(self.errmsg(stderr)))
# There is no feedback from imgadm(1M) to determine if anything
# was actually changed. So treat this as an 'always-changes' operation.
# Note that 'imgadm -v' produces unparseable JSON...
self.changed = True
def manage_sources(self):
force = self.params['force']
source = self.params['source']
imgtype = self.params['type']
cmd = '{0} sources'.format(self.cmd)
if force:
cmd += ' -f'
if self.present:
cmd = '{0} -a {1} -t {2}'.format(cmd, source, imgtype)
(rc, stdout, stderr) = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg='Failed to add source: {0}'.format(self.errmsg(stderr)))
# Check the various responses.
# Note that trying to add a source with the wrong type is handled
# above as it results in a non-zero status.
regex = 'Already have "{0}" image source "{1}", no change'.format(imgtype, source)
if re.match(regex, stdout):
self.changed = False
regex = 'Added "%s" image source "%s"' % (imgtype, source)
if re.match(regex, stdout):
self.changed = True
else:
# Type is ignored by imgadm(1M) here
cmd += ' -d %s' % (source)
(rc, stdout, stderr) = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg='Failed to remove source: {0}'.format(self.errmsg(stderr)))
regex = 'Do not have image source "%s", no change' % (source)
if re.match(regex, stdout):
self.changed = False
regex = 'Deleted ".*" image source "%s"' % (source)
if re.match(regex, stdout):
self.changed = True
def manage_images(self):
pool = self.params['pool']
state = self.params['state']
if state == 'vacuumed':
# Unconditionally pass '--force', otherwise we're prompted with 'y/N'
cmd = '{0} vacuum -f'.format(self.cmd)
(rc, stdout, stderr) = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg='Failed to vacuum images: {0}'.format(self.errmsg(stderr)))
else:
if stdout == '':
self.changed = False
else:
self.changed = True
if self.present:
cmd = '{0} import -P {1} -q {2}'.format(self.cmd, pool, self.uuid)
(rc, stdout, stderr) = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg='Failed to import image: {0}'.format(self.errmsg(stderr)))
regex = 'Image {0} \(.*\) is already installed, skipping'.format(self.uuid)
if re.match(regex, stdout):
self.changed = False
regex = '.*ActiveImageNotFound.*'
if re.match(regex, stderr):
self.changed = False
regex = 'Imported image {0}.*'.format(self.uuid)
if re.match(regex, stdout.splitlines()[-1]):
self.changed = True
else:
cmd = '{0} delete -P {1} {2}'.format(self.cmd, pool, self.uuid)
(rc, stdout, stderr) = self.module.run_command(cmd)
regex = '.*ImageNotInstalled.*'
if re.match(regex, stderr):
# Even if the 'rc' was non-zero (3), we handled the situation
# in order to determine if there was a change.
self.changed = False
regex = 'Deleted image {0}'.format(self.uuid)
if re.match(regex, stdout):
self.changed = True
def main():
module = AnsibleModule(
argument_spec=dict(
force=dict(default=None, type='bool'),
pool=dict(default='zones'),
source=dict(default=None),
state=dict(default=None, required=True, choices=['present', 'absent', 'deleted', 'imported', 'updated', 'vacuumed']),
type=dict(default='imgapi', choices=['imgapi', 'docker', 'dsapi']),
uuid=dict(default=None)
),
# This module relies largely on imgadm(1M) to enforce idempotency, which does not
# provide a "noop" (or equivalent) mode to do a dry-run.
supports_check_mode=False,
)
imgadm = Imgadm(module)
uuid = module.params['uuid']
source = module.params['source']
state = module.params['state']
result = {'state': state}
# Either manage sources or images.
if source:
result['source'] = source
imgadm.manage_sources()
else:
result['uuid'] = uuid
if state == 'updated':
imgadm.update_images()
else:
# Make sure operate on a single image for the following actions
if (uuid == '*') and (state != 'vacuumed'):
module.fail_json(msg='Can only specify uuid as "*" when updating image(s)')
imgadm.manage_images()
result['changed'] = imgadm.changed
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
kadashu/satori | satori-rules/plugin/libs/gevent/util.py | 12 | 1805 | # Copyright (c) 2009 Denis Bilenko. See LICENSE for details.
"""
Low-level utilities.
"""
from __future__ import absolute_import
import functools
__all__ = ['wrap_errors']
class wrap_errors(object):
"""
Helper to make function return an exception, rather than raise it.
Because every exception that is unhandled by greenlet will be logged,
it is desirable to prevent non-error exceptions from leaving a greenlet.
This can done with a simple ``try/except`` construct::
def wrapped_func(*args, **kwargs):
try:
return func(*args, **kwargs)
except (TypeError, ValueError, AttributeError) as ex:
return ex
This class provides a shortcut to write that in one line::
wrapped_func = wrap_errors((TypeError, ValueError, AttributeError), func)
It also preserves ``__str__`` and ``__repr__`` of the original function.
"""
# QQQ could also support using wrap_errors as a decorator
def __init__(self, errors, func):
"""
Calling this makes a new function from *func*, such that it catches *errors* (an
:exc:`BaseException` subclass, or a tuple of :exc:`BaseException` subclasses) and
return it as a value.
"""
self.__errors = errors
self.__func = func
# Set __doc__, __wrapped__, etc, especially useful on Python 3.
functools.update_wrapper(self, func)
def __call__(self, *args, **kwargs):
func = self.__func
try:
return func(*args, **kwargs)
except self.__errors as ex:
return ex
def __str__(self):
return str(self.__func)
def __repr__(self):
return repr(self.__func)
def __getattr__(self, name):
return getattr(self.__func, name)
| apache-2.0 |
xiangel/hue | desktop/core/src/desktop/lib/test_export_csvxls.py | 19 | 3248 | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tablib
from desktop.lib.export_csvxls import MAX_XLS_ROWS, MAX_XLS_COLS, create_generator, make_response
from nose.tools import assert_equal
def content_generator(header, data):
yield header, data
def test_export_csv():
headers = ["x", "y"]
data = [ ["1", "2"], ["3", "4"], ["5,6", "7"], [None, None] ]
# Check CSV
generator = create_generator(content_generator(headers, data), "csv")
response = make_response(generator, "csv", "foo")
assert_equal("application/csv", response["content-type"])
content = ''.join(response.streaming_content)
assert_equal('x,y\r\n1,2\r\n3,4\r\n"5,6",7\r\nNULL,NULL\r\n', content)
assert_equal("attachment; filename=foo.csv", response["content-disposition"])
def test_export_xls():
headers = ["x", "y"]
data = [ ["1", "2"], ["3", "4"], ["5,6", "7"], [None, None] ]
dataset = tablib.Dataset(headers=headers)
for row in data:
dataset.append([cell is not None and cell or "NULL" for cell in row])
# Check XLS
generator = create_generator(content_generator(headers, data), "xls")
response = make_response(generator, "xls", "foo")
assert_equal("application/xls", response["content-type"])
content = ''.join(response.streaming_content)
assert_equal(dataset.xls, content)
assert_equal("attachment; filename=foo.xls", response["content-disposition"])
def test_export_xls_truncate_rows():
headers = ["a"]
data = [["1"]] * (MAX_XLS_ROWS + 1)
dataset = tablib.Dataset(headers=headers)
dataset.extend(data[:MAX_XLS_ROWS])
# Check XLS
generator = create_generator(content_generator(headers, data), "xls")
response = make_response(generator, "xls", "foo")
assert_equal("application/xls", response["content-type"])
content = ''.join(response.streaming_content)
assert_equal(dataset.xls, content)
assert_equal("attachment; filename=foo.xls", response["content-disposition"])
def test_export_xls_truncate_cols():
headers = ["a"] * (MAX_XLS_COLS + 1)
data = [["1"] * (MAX_XLS_COLS + 1)]
dataset = tablib.Dataset(headers=headers[:MAX_XLS_COLS])
dataset.extend([data[0][:MAX_XLS_COLS]])
# Check XLS
generator = create_generator(content_generator(headers, data), "xls")
response = make_response(generator, "xls", "foo")
assert_equal("application/xls", response["content-type"])
content = ''.join(response.streaming_content)
assert_equal(dataset.xls, content)
assert_equal("attachment; filename=foo.xls", response["content-disposition"])
| apache-2.0 |
tls-dna/k-router | app_ui/hexcanvas.py | 1 | 1599 | from kivy.uix.floatlayout import FloatLayout
from kivy.properties import NumericProperty, ObjectProperty, BoundedNumericProperty, ListProperty
from .node import Node
from math import sqrt
class HexCanvas(FloatLayout):
last_node = ObjectProperty(None, allownone=True)
grid = ObjectProperty([])
row_count = BoundedNumericProperty(11, min=0, max=11)
column_count = BoundedNumericProperty(22, min=0, max=22)
vvhelix_id = NumericProperty(0)
scaffold_path = ListProperty([])
"""docstring for NanoCanvas"""
def __init__(self, **kwargs):
#super(HexCanvas, self).__init__(**kwargs)
super().__init__(**kwargs)
self.__construct()
def __construct(self):
x_start, y_start = 30, 30
a = 60
x_offset = a / 2
y_offset = a * sqrt(3) / 2
y = y_start
for j in range(self.row_count):
row = []
if j % 2 != 0:
offset = x_offset
else:
offset = 0
x = x_start + offset
for i in range(self.column_count):
node = Node(pos=(x, y), grid_id=(j, i))
row.append(node)
self.add_widget(node)
x += a
y += y_offset
self.grid.append(row)
def clean(self):
# TODO remove vhelixes and other stuff !!!
self.last_node = None
# for row in self.grid:
# for node in row:
# del node
self.grid = []
self.vvhelix_id = 0
self.scaffold_path = []
self.__construct()
| mit |
tszym/ansible | lib/ansible/modules/cloud/ovirt/ovirt_templates.py | 27 | 13962 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ovirt_templates
short_description: Module to manage virtual machine templates in oVirt/RHV
version_added: "2.3"
author: "Ondra Machacek (@machacekondra)"
description:
- "Module to manage virtual machine templates in oVirt/RHV."
options:
name:
description:
- "Name of the template to manage."
required: true
state:
description:
- "Should the template be present/absent/exported/imported/registered.
When C(state) is R(registered) and the unregistered template's name
belongs to an already registered in engine template then we fail
to register the unregistered template."
choices: ['present', 'absent', 'exported', 'imported', 'registered']
default: present
vm:
description:
- "Name of the VM, which will be used to create template."
description:
description:
- "Description of the template."
cpu_profile:
description:
- "CPU profile to be set to template."
cluster:
description:
- "Name of the cluster, where template should be created/imported."
exclusive:
description:
- "When C(state) is I(exported) this parameter indicates if the existing templates with the
same name should be overwritten."
export_domain:
description:
- "When C(state) is I(exported) or I(imported) this parameter specifies the name of the
export storage domain."
image_provider:
description:
- "When C(state) is I(imported) this parameter specifies the name of the image provider to be used."
image_disk:
description:
- "When C(state) is I(imported) and C(image_provider) is used this parameter specifies the name of disk
to be imported as template."
storage_domain:
description:
- "When C(state) is I(imported) this parameter specifies the name of the destination data storage domain.
When C(state) is R(registered) this parameter specifies the name of the data storage domain of the unregistered template."
clone_permissions:
description:
- "If I(True) then the permissions of the VM (only the direct ones, not the inherited ones)
will be copied to the created template."
- "This parameter is used only when C(state) I(present)."
default: False
extends_documentation_fragment: ovirt
'''
EXAMPLES = '''
# Examples don't contain auth parameter for simplicity,
# look at ovirt_auth module to see how to reuse authentication:
# Create template from vm
- ovirt_templates:
cluster: Default
name: mytemplate
vm: rhel7
cpu_profile: Default
description: Test
# Import template
- ovirt_templates:
state: imported
name: mytemplate
export_domain: myexport
storage_domain: mystorage
cluster: mycluster
# Remove template
- ovirt_templates:
state: absent
name: mytemplate
# Register template
- ovirt_templates:
state: registered
name: mytemplate
storage_domain: mystorage
cluster: mycluster
'''
RETURN = '''
id:
description: ID of the template which is managed
returned: On success if template is found.
type: str
sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
template:
description: "Dictionary of all the template attributes. Template attributes can be found on your oVirt/RHV instance
at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/template."
returned: On success if template is found.
type: dict
'''
import time
import traceback
try:
import ovirtsdk4.types as otypes
except ImportError:
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ovirt import (
BaseModule,
check_sdk,
create_connection,
equal,
get_dict_of_struct,
get_link_name,
get_id_by_name,
ovirt_full_argument_spec,
search_by_attributes,
search_by_name,
)
class TemplatesModule(BaseModule):
def build_entity(self):
return otypes.Template(
name=self._module.params['name'],
cluster=otypes.Cluster(
name=self._module.params['cluster']
) if self._module.params['cluster'] else None,
vm=otypes.Vm(
name=self._module.params['vm']
) if self._module.params['vm'] else None,
description=self._module.params['description'],
cpu_profile=otypes.CpuProfile(
id=search_by_name(
self._connection.system_service().cpu_profiles_service(),
self._module.params['cpu_profile'],
).id
) if self._module.params['cpu_profile'] else None,
)
def update_check(self, entity):
return (
equal(self._module.params.get('cluster'), get_link_name(self._connection, entity.cluster)) and
equal(self._module.params.get('description'), entity.description) and
equal(self._module.params.get('cpu_profile'), get_link_name(self._connection, entity.cpu_profile))
)
def _get_export_domain_service(self):
provider_name = self._module.params['export_domain'] or self._module.params['image_provider']
export_sds_service = self._connection.system_service().storage_domains_service()
export_sd = search_by_name(export_sds_service, provider_name)
if export_sd is None:
raise ValueError(
"Export storage domain/Image Provider '%s' wasn't found." % provider_name
)
return export_sds_service.service(export_sd.id)
def post_export_action(self, entity):
self._service = self._get_export_domain_service().templates_service()
def post_import_action(self, entity):
self._service = self._connection.system_service().templates_service()
def wait_for_import(module, templates_service):
if module.params['wait']:
start = time.time()
timeout = module.params['timeout']
poll_interval = module.params['poll_interval']
while time.time() < start + timeout:
template = search_by_name(templates_service, module.params['name'])
if template:
return template
time.sleep(poll_interval)
def main():
argument_spec = ovirt_full_argument_spec(
state=dict(
choices=['present', 'absent', 'exported', 'imported', 'registered'],
default='present',
),
name=dict(default=None, required=True),
vm=dict(default=None),
description=dict(default=None),
cluster=dict(default=None),
cpu_profile=dict(default=None),
disks=dict(default=[], type='list'),
clone_permissions=dict(type='bool'),
export_domain=dict(default=None),
storage_domain=dict(default=None),
exclusive=dict(type='bool'),
image_provider=dict(default=None),
image_disk=dict(default=None),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
check_sdk(module)
try:
auth = module.params.pop('auth')
connection = create_connection(auth)
templates_service = connection.system_service().templates_service()
templates_module = TemplatesModule(
connection=connection,
module=module,
service=templates_service,
)
state = module.params['state']
if state == 'present':
ret = templates_module.create(
result_state=otypes.TemplateStatus.OK,
clone_permissions=module.params['clone_permissions'],
)
elif state == 'absent':
ret = templates_module.remove()
elif state == 'exported':
template = templates_module.search_entity()
export_service = templates_module._get_export_domain_service()
export_template = search_by_attributes(export_service.templates_service(), id=template.id)
ret = templates_module.action(
entity=template,
action='export',
action_condition=lambda t: export_template is None,
wait_condition=lambda t: t is not None,
post_action=templates_module.post_export_action,
storage_domain=otypes.StorageDomain(id=export_service.get().id),
exclusive=module.params['exclusive'],
)
elif state == 'imported':
template = templates_module.search_entity()
if template:
ret = templates_module.create(
result_state=otypes.TemplateStatus.OK,
)
else:
kwargs = {}
if module.params['image_provider']:
kwargs.update(
disk=otypes.Disk(
name=module.params['image_disk']
),
template=otypes.Template(
name=module.params['name'],
),
import_as_template=True,
)
if module.params['image_disk']:
# We need to refresh storage domain to get list of images:
templates_module._get_export_domain_service().images_service().list()
glance_service = connection.system_service().openstack_image_providers_service()
image_provider = search_by_name(glance_service, module.params['image_provider'])
images_service = glance_service.service(image_provider.id).images_service()
else:
images_service = templates_module._get_export_domain_service().templates_service()
template_name = module.params['image_disk'] or module.params['name']
entity = search_by_name(images_service, template_name)
if entity is None:
raise Exception("Image/template '%s' was not found." % template_name)
images_service.service(entity.id).import_(
storage_domain=otypes.StorageDomain(
name=module.params['storage_domain']
) if module.params['storage_domain'] else None,
cluster=otypes.Cluster(
name=module.params['cluster']
) if module.params['cluster'] else None,
**kwargs
)
template = wait_for_import(module, templates_service)
ret = {
'changed': True,
'id': template.id,
'template': get_dict_of_struct(template),
}
elif state == 'registered':
storage_domains_service = connection.system_service().storage_domains_service()
# Find the storage domain with unregistered template:
sd_id = get_id_by_name(storage_domains_service, module.params['storage_domain'])
storage_domain_service = storage_domains_service.storage_domain_service(sd_id)
templates_service = storage_domain_service.templates_service()
# Find the the unregistered Template we want to register:
templates = templates_service.list(unregistered=True)
template = next(
(t for t in templates if t.name == module.params['name']),
None
)
changed = False
if template is None:
# Test if template is registered:
template = templates_module.search_entity()
if template is None:
raise ValueError(
"Template with name '%s' wasn't found." % module.params['name']
)
else:
changed = True
template_service = templates_service.template_service(template.id)
# Register the template into the system:
template_service.register(
cluster=otypes.Cluster(
name=module.params['cluster']
) if module.params['cluster'] else None,
template=otypes.Template(
name=module.params['name'],
),
)
if module.params['wait']:
template = wait_for_import(module, templates_service)
ret = {
'changed': changed,
'id': template.id,
'template': get_dict_of_struct(template)
}
module.exit_json(**ret)
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
finally:
connection.close(logout=auth.get('token') is None)
if __name__ == "__main__":
main()
| gpl-3.0 |
emsrc/daeso-dutch | lib/daeso_nl/ga/exp/merge.py | 1 | 3889 | """
Merging step in graph alignment experiment
"""
import copy
import logging as log
from daeso.pgc.corpus import ParallelGraphCorpus, LOAD_NONE
from daeso.utils.opsys import makedirs
from daeso.utils.report import header
from daeso_nl.ga.corpusinst import CorpusInst
from daeso_nl.ga.merger import Merger
__all__ = [
"merge",
"merge_files",
"merge_corpus"
]
def merge(setting):
"""
Merge data
@param setting: Setting instance specifying the experimental setting
"""
if setting.merge:
log.info("\n" + header("MERGE STEP"))
makedirs(setting.pred_dir)
if setting.develop:
pred_fns = setting.make_pred_fns(setting.dev_true_fns)
merge_files(
setting.dev_inst_fns,
setting.dev_true_fns,
pred_fns,
merger=setting.merger,
descriptor=setting.descriptor,
n=setting.n,
binary=setting.binary)
if setting.validate:
pred_fns = setting.make_pred_fns(setting.val_true_fns)
merge_files(
setting.val_inst_fns,
setting.val_true_fns,
pred_fns,
merger=setting.merger,
descriptor=setting.descriptor,
n=setting.n,
binary=setting.binary)
def merge_files(inst_fns, true_fns, pred_fns, merger=Merger(),
descriptor=None, n=None, binary=False):
"""
Merge corpus instance files
@param inst_fns: list of corpus instance filenames
@param true_fns: list of corpus filenames containing the true alignments
@param pred_fns: list of predicted corpus filenames to be created
@param merger: instance of Merger class for merging instances into a graph
pair
@keyword descriptor: a Descriptor instance, required if corpus instances
are loaded in text format
@keyword n: limit merging to the first n files
@keyword binary: corpus instances in binary rather than text format
"""
assert isinstance(merger, Merger)
assert len(inst_fns) == len(true_fns) > 0
for inst_fname, true_fname, pred_fname in zip(inst_fns,
true_fns,
pred_fns)[:n]:
corpus_inst = CorpusInst()
if binary:
corpus_inst.loadbin(inst_fname)
else:
corpus_inst.loadtxt(inst_fname, descriptor.dtype)
true_corpus = ParallelGraphCorpus(inf=true_fname,
graph_loading=LOAD_NONE)
pred_corpus = merge_corpus(corpus_inst, true_corpus, merger)
log.info("saving predictd corpus {0}".format(inst_fname))
pred_corpus.write(pred_fname)
def merge_corpus(corpus_inst, true_corpus, merger):
"""
Merge matched relations from corpus instances into a new predicted corpus
derived from a true corpus.
@param corpus_inst: CorpusInst object containing the instances for a
corpus of aligned parallel graphs
@param true_corpus: a ParallelGraphCorpus instance containing the true
alignments; remains untouched
@param merger: instance of Merger class for merging instances into a
graph pair
@return: a new ParallelGraphCorpus instance containing the predicted
alignments
"""
# This copies the graph pairs as well, so the true corpus remains
# untouched. However, it might be cheaper to just create new graph pairs?
pred_corpus = copy.deepcopy(true_corpus)
for graph_inst, graph_pair in zip(corpus_inst, pred_corpus):
graph_pair.clear()
merger.merge(graph_inst, graph_pair)
return pred_corpus
| gpl-3.0 |
AeroNotix/django-timetracker | vcs/activities.py | 1 | 30533 | import os
import imp
from django.conf import settings
from django.db import IntegrityError
from timetracker.vcs.models import Activity
def serialize_activityentry(entry):
'''Serializes a single ActivityEntry into a JSON format.'''
return { # pragma: no cover
"id": entry.id,
"date": str(entry.creation_date),
"text": entry.activity.groupdetail,
"amount": int(entry.amount)
}
def defaultplugins(acc=None):
'''Returns the plugins in the default location for either all accounts
or a single account.'''
return listplugins(settings.PLUGIN_DIRECTORY, acc=acc) # pragma: no cover
def pluginbyname(name, acc=None):
'''Returns the named plugin for either all accounts or a single
account.'''
return defaultplugins(acc=acc).get(name) # pragma: no cover
def listplugins(directory, acc=None): # pragma: no cover
'''Iterates through the passed-in directory, looking for raw Python
modules to import, when it imports the modules we specifically
look for several module-level attributes of which we make no
error-checking to see if they are there or not.
We check no errors since this will be a very early warning trigger
if there is a programmatic warning.
A more efficient version of this would be to load the module by
name whenever the user requests a specific report. However, this
presents a serious security risk and thus; instead of loading the
report processor which the user asks for, we find the available
reports and *then* filter that list by what the user is allowed to
access and *then* give them the report they requested in there if
it is available.
Otherwise, we could end up with a directory traversal attack. This
is somewhat mitigated by the fact that imp.load_module requires an
explicit load path and thus. I far much prefer the filtered
approach versus a whimsical dynamic lookup method.
:param directory: :class:`str`, the name of the directory to
search for plugins.
:return: List of dictionaries containing both the module and the
module-level attributes for that module.
'''
plugins = {}
for f in os.listdir(directory):
# ignore irrelevant files and compiled python modules.
if f == "__init__.py" or f == "example.py" or f.endswith(".pyc"):
continue
g = f.replace(".py", "")
info = imp.find_module(g, [directory])
# dynamically import our module and extract the callback along
# with the attributes.
m = imp.load_module(g, *info)
if acc and acc not in m.ACCOUNTS:
continue
plugins[m.PLUGIN_NAME] = {
"name": m.PLUGIN_NAME,
"accounts": m.ACCOUNTS,
"callback": getattr(m, m.CALLBACK),
"module": m,
}
return plugins
def createuseractivities():
activities = [
Activity(None, "CZAR", "Autorun", "Autorun", "# of autorun transaction launched", True, 1.52),
Activity(None, "CZAR", "Bellin update", "Foreign Exchange", "# of uploads", True, 2.04),
Activity(None, "CZAR", "Bellin update", "Treasury", "# of uploads", True, 0.98),
Activity(None, "CZAR", "Bellin update-cash flow", "Cash flow", "# of updates", True, 0.42),
Activity(None, "CZAR", "Call", "Call", "# of minutes", True, 1.00),
Activity(None, "CZAR", "Cash flow report", "Cash flow report", "# of minutes", True, 1.00),
Activity(None, "CZAR", "Clearing ", "Clearing ", "# of clearings ", True, 3.37),
Activity(None, "CZAR", "Clearing", "Cash on way (with report)", "# of clearings", True, 1.40),
Activity(None, "CZAR", "Clearing", "Leasing (with report)", "# of clearings", True, 2.82),
Activity(None, "CZAR", "Clearing", "Rest liability (with report)", "# of times process was done", True, 3.17),
Activity(None, "CZAR", "Daily tracker", "Daily tracker", "# of minutes", True, 1.00),
Activity(None, "CZAR", "Discount Report", "Discount Report_with email", "# of reports", True, 16.76),
Activity(None, "CZAR", "Download Bank Statement from Bank Website", "Download Bank Statement from Bank Website", "# of statements", True, 2.41),
Activity(None, "CZAR", "Emails sent", "Emails sent", "# of emails sent", True, 2.03),
Activity(None, "CZAR", "Exchange rate reevaluation for open receivables and payables", "Exchange rate reevaluation for open receivables and payables", "# of reports", True, 13.81),
Activity(None, "CZAR", "Exchange table rates differences", "Exchange table rates differences with mail", "# of reports", True, 26.45),
Activity(None, "CZAR", "GL to treasury reconciliation FSL10N", "GL to treasury reconciliation FSL10N", "# of reconciliations", True, 0.17),
Activity(None, "CZAR", "Investigation of accounts", "Investigation of accounts", "# of minutes", True, 1.00),
Activity(None, "CZAR", "Investments report", "Investments report", "# of minutes", True, 1.00),
Activity(None, "CZAR", "Open payables & receivables report", "Open payables & receivables report_with email", "# of reports", True, 32.86),
Activity(None, "CZAR", "Post payments", "Dailmler lotus", "# of times process was done", True, 11.34),
Activity(None, "CZAR", "Post payments", "Dailmler", "# of times process was done", True, 8.61),
Activity(None, "CZAR", "Post payments", "Post payments", "# of payments", True, 1.85),
Activity(None, "CZAR", "Prepayment reports", "Prepayment check accounts with reports", "# of accounts checked", True, 5.08),
Activity(None, "CZAR", "Prepayment reports", "Prepayment check accounts", "# of accounts checked", True, 0.87),
Activity(None, "CZAR", "Reevaluation", "Bank and cash", "# of times process was done", True, 3.33),
Activity(None, "CZAR", "Reevaluation", "Cash pooling", "# of times process was done", True, 8.01),
Activity(None, "CZAR", "Reevaluation", "Loan Behr Stuttgart with report", "# of times process was done", True, 7.43),
Activity(None, "CZAR", "Reevaluation", "Prepayment", "# of times process was done", True, 3.33),
Activity(None, "CZAR", "Research remittances", "Research remittances", "# of remittances searched", True, 2.56),
Activity(None, "CZAR", "Set off agreement", "Set off agreement", "# of minutes", True, 1.00),
Activity(None, "CZAR", "Special Client Request", "Special Client Request", "# of minutes", True, 1.00),
Activity(None, "CZAR", "Bellin update", "Treasury", "# of uploads", True, 0.98),
Activity(None, "CZAP", "Blocked invoices", "Blocked invoices", "# of invoices", True, 0.49),
Activity(None, "CZAP", "CRC mail", "Mail links deletion", "# of links/emails", True, 0.18),
Activity(None, "CZAP", "CRC mail", "Mail", "# of emails", True, 9.21),
Activity(None, "CZAP", "Call", "Call", "# of minutes", True, 1.00),
Activity(None, "CZAP", "Clearing f-44 and tracker", "Clearing f-44 and tracker", "# of clearings", True, 2.20),
Activity(None, "CZAP", "Consignment", "Add reference", "# of customers", True, 0.31),
Activity(None, "CZAP", "Consignment", "Add reference&check tax", "# of customers", True, 0.47),
Activity(None, "CZAP", "Consignment", "BASF", "# of minutes", True, 1.00),
Activity(None, "CZAP", "Consignment", "Booking from email with template", "# of consi booked", True, 10.00),
Activity(None, "CZAP", "Consignment", "Booking from email_verification of vendor", "# of vendors verified", True, 10.25),
Activity(None, "CZAP", "Consignment", "Check tax", "# of customers", True, 0.16),
Activity(None, "CZAP", "Consignment", "Consignment printing docs", "# of consigments printed", True, 0.98),
Activity(None, "CZAP", "Consignment", "Consignment without printing", "# of consigments posted", True, 2.01),
Activity(None, "CZAP", "Deleting invoices", "Deleting invoices", "# of invoices", True, 1.38),
Activity(None, "CZAP", "Invoice correction booking", "Invoice correction booking", "# of invoices", True, 4.14),
Activity(None, "CZAP", "Parked Invoices", "Parked Invoices", "# of invoices", True, 2.87),
Activity(None, "CZAP", "Posting invoices", "Posting invoices", "# of invoices", True, 3.48),
Activity(None, "CZAP", "Prepayment invoices posting", "Prepayment invoices posting", "# of invoices", True, 10.10),
Activity(None, "CZAP", "QC", "QC", "# of invoices", True, 3.37),
Activity(None, "CZAP", "Reminder letters", "Verification of invoice", "# of invoices", True, 6.58),
Activity(None, "CZAP", "Reminder letters", "Verification of letter_send email", "# of reminders", True, 2.47),
Activity(None, "CZAP", "Reports", "Unresolved parked invoices and debit notes", "# of reports", True, 46.08),
Activity(None, "CZAP", "Reports", "Volume reports for TL", "# of reports", True, 1.76),
Activity(None, "CZAP", "Reports", "Volume reports with email_to client", "# of reports", True, 2.97),
Activity(None, "CZAP", "Reversal", "Reversal", "# of reversals", True, 3.30),
Activity(None, "CZAP", "Smartfix", "Smartfix", "# of invoices", True, 2.06),
Activity(None, "CZAP", "Special Client Request", "Special Client Request", "# of minutes", True, 1.00),
Activity(None, "CZAP", "Speedi", "Speedi", "# of invoices in Speedi", True, 3.56),
Activity(None, "CZAP", "Telecommunication cost booking", "Telecommunication cost booking", "# of line items posted", True, 0.72),
Activity(None, "ALL", "Avoidable Losses", "No Load", "enter time in min", True, 1, "COUAL"),
Activity(None, "ALL", "Avoidable Losses", "System Downtime / System issues", "enter time in min", True, 1, "COUAL"),
Activity(None, "ALL", "Avoidable Losses", "Technical maintenance", "enter time in min", True, 1, "COUAL"),
Activity(None, "ALL", "Non Transaction Time", "Celebration", "enter time in min", True, 1, "COUTT"),
Activity(None, "ALL", "Non Transaction Time", "Documentation/ ISO Audits", "enter time in min", True, 1, "QPR"),
Activity(None, "ALL", "Non Transaction Time", "Escalations", "enter time in min", True, 1, "QEF"),
Activity(None, "ALL", "Non Transaction Time", "Green Star", "enter time in min", True, 1, "QAPP"),
Activity(None, "ALL", "Non Transaction Time", "Helping Others", "enter time in min", True, 1, "QIF"),
Activity(None, "ALL", "Non Transaction Time", "IE tool updation", "enter time in min", True, 1, "QAPP"),
Activity(None, "ALL", "Non Transaction Time", "Meetings", "enter time in min", True, 1, "QAPP"),
Activity(None, "ALL", "Non Transaction Time", "Projects", "enter time in min", True, 1, "QPR"),
Activity(None, "ALL", "Non Transaction Time", "Special/Management Requests", "enter time in min", True, 1, "COUTT"),
Activity(None, "ALL", "Non Transaction Time", "Training", "enter time in min", True, 1, "QPR"),
Activity(None, "ALL", "Unavoidable losses", "Absenteeism", "enter time in min", True, 1, "COUUL"),
Activity(None, "BGAP", "Blocked invoices_with email", "Blocked invoices_with email", "# of invoices", True, 5.1, "QEF"),
Activity(None, "BGAP", "Blocked invoices_without email", "Blocked invoices_without email", "# of invoices", True, 2.69, "QEF"),
Activity(None, "BGAP", "Check for DFU duplicates -paper doc", "Check for DFU duplicates -paper doc", "# of invoices checked", True, 0.63, "QIF"),
Activity(None, "BGAP", "Consignment", "Consignment direct posting SAP", "# of consignements posted", True, 0.65, "PVA"),
Activity(None, "BGAP", "Consignment", "Consignment_incoming mail with price discrepancies", "# of consignements posted", True, 5.4, "QEF"),
Activity(None, "BGAP", "Consignment", "Consignment_preparing forms_price discrepancies", "# of consi formulars prepared", True, 13.47, "QEF"),
Activity(None, "BGAP", "Consignment", "Consignment_price discrepancy_issue received from vendor via email", "# of consi formulars prepared", True, 12.34, "QEF"),
Activity(None, "BGAP", "CRC", "Call", "# of minutes", True, 1, "PVA"),
Activity(None, "BGAP", "CRC", "Mail", "# of emails", True, 7.32, "PVA"),
Activity(None, "BGAP", "Dunning Letters", "Dunning Letters", "# of invoices checked", True, 5.84, "QEF"),
Activity(None, "BGAP", "Emails with no answer", "Emails with no answer", "# of emails", True, 0.24, "PNVA"),
Activity(None, "BGAP", "IC", "FI invoices with many positions", "# of line items", True, 0.32, "PVA"),
Activity(None, "BGAP", "IC", "FI_WF kick off - IC", "# invoices", True, 1.89, "PVA"),
Activity(None, "BGAP", "IC", "Netting table update", "# of minutes", True, 1, "PVE"),
Activity(None, "BGAP", "IC", "Netting/parked inv. Tracker update_without posting", "# of invoices updated", True, 0.31, "PVE"),
Activity(None, "BGAP", "Parked invoices", "Parked invoices_with mail", "# of invoices", True, 4.91, "QEF"),
Activity(None, "BGAP", "Parked invoices", "Parked invoices_without mail", "# of invoices", True, 3.06, "QEF"),
Activity(None, "BGAP", "Posting invoices", "Posting invoices", "# of invoices", True, 2.42, "PVA"),
Activity(None, "BGAP", "Printing documents", "Printing documents", "# of docs", True, 1.65, "PNVA"),
Activity(None, "BGAP", "QC", "QC", "# of invoices", True, 1.92, "QPR"),
Activity(None, "BGAP", "Reversal", "Reversal_correction&booking", "# of invoices corrected", True, 11.35, "QIF"),
Activity(None, "BGAP", "Reversal tracking", "Reversal tracking", "# of invoices tracked", True, 3.15, "QAPP"),
Activity(None, "BGAP", "RTV", "RTV_Recheck", "# of invoices", True, 1.12, "QPR"),
Activity(None, "BGAP", "RTV", "RTV_SAP", "# of invoices", True, 4.79, "QPR"),
Activity(None, "BGAP", "Smartfix", "Consignment verification", "# of invoices", True, 0.61, "PVA"),
Activity(None, "BGAP", "Smartfix", "SAP-batch verification", "# of batches checked", True, 0.54, "PVA"),
Activity(None, "BGAP", "Smartfix", "SAP-invoice verification+note", "# of invoices", True, 1.32, "QPR"),
Activity(None, "BGAP", "Smartfix", "Smartfix", "# of invoices", True, 0.9, "QEF"),
Activity(None, "BGAP", "Special Client Request", "Special Client Request", "# of minutes", True, 1, "PVE"),
Activity(None, "BGAP", "Speedi", "Speedi_check/change invoice", "# of invoices", True, 2.34, "PVE"),
Activity(None, "BGAP", "Speedi", "Speedi_DFU", "# of invoices", True, 0.22, "PVA"),
Activity(None, "BGAP", "Speedi", "Speedi_DFU_check additional information-MIRO", "# of invoices", True, 2.11, "PVE"),
Activity(None, "BGAP", "Speedi", "Speedi_DFU_check order number/details", "# of invoices", True, 0.58, "PVE"),
Activity(None, "BGAP", "Speedi", "Speedi_DFU_left items_need to be clarified with client", "# of emails", True, 3.05, "QEF"),
Activity(None, "BGAP", "Trackers", "Parked/blocked report update", "# of minutes", True, 1, "QPR"),
Activity(None, "BGAP", "Trackers", "QC report update", "# of minutes", True, 1, "PVE"),
Activity(None, "BGAP", "Trackers", "Reminder report update", "# of minutes", True, 1, "PVE"),
Activity(None, "BGAP", "Vendor account maintanence", "Vendor account maintanence", "# of minutes", True, 1, "PNVA"),
Activity(None, "BKAP", "Blocked invoices", "Blocked invoices_with email", "# of invoices", True, 4.17, "QEF"),
Activity(None, "BKAP", "Blocked invoices", "Blocked invoices_without email", "# of invoices", True, 2.23, "QEF"),
Activity(None, "BKAP", "Consignment", "Consignment_preparing forms_price discrepancies", "# of consi formulars prepared", True, 13.47, "QEF"),
Activity(None, "BKAP", "Consignment posting", "Consignment posting", "# of invoices", True, 7.04, "PVA"),
Activity(None, "BKAP", "CRC", "Call", "# of minutes", True, 1, "PVA"),
Activity(None, "BKAP", "CRC", "Mail", "# of emails", True, 7.32, "PVA"),
Activity(None, "BKAP", "Dunning Letters", "Dunning Letters", "# of invoices checked", True, 5.84, "QEF"),
Activity(None, "BKAP", "Emails with no answer", "Emails with no answer", "# of emails", True, 0.24, "PNVA"),
Activity(None, "BKAP", "Parked invoices", "Parked invoices_with email", "# of invoices", True, 4.47, "QEF"),
Activity(None, "BKAP", "Parked invoices", "Parked invoices_without email", "# of invoices", True, 1.13, "QEF"),
Activity(None, "BKAP", "Posting invoices", "Posting invoices", "# of invoices", True, 2.42, "PVA"),
Activity(None, "BKAP", "Printing documents", "Printing documents", "# of docs", True, 1.65, "PNVA"),
Activity(None, "BKAP", "QC", "QC", "# of invoices", True, 1.92, "QPR"),
Activity(None, "BKAP", "Reversal", "Reversal_correction&booking", "# of invoices corrected", True, 11.35, "QIF"),
Activity(None, "BKAP", "Reversal tracking", "Reversal tracking", "# of invoices tracked", True, 3.15, "QAPP"),
Activity(None, "BKAP", "RTV", "RTV_Recheck", "# of invoices", True, 1.12, "QPR"),
Activity(None, "BKAP", "RTV", "RTV_SAP", "# of invoices", True, 4.79, "QPR"),
Activity(None, "BKAP", "Smartfix", "Smartfix", "# of invoices", True, 1.42, "PVA"),
Activity(None, "BKAP", "Special Client Request", "Special Client Request", "# of minutes", True, 1, "PVE"),
Activity(None, "BKAP", "Speedi", "Speedi_check/change invoice", "# of invoices", True, 2.34, "PVA"),
Activity(None, "BKAP", "Speedi", "Speedi_DFU", "# of invoices", True, 0.22, "PVE"),
Activity(None, "BKAP", "Speedi", "Speedi_DFU_change order number", "# of invoices", True, 1.16, "QEF"),
Activity(None, "BKAP", "Speedi", "Speedi_DFU_left items_need to be clarified with client", "# of emails", True, 3.62, "PVE"),
Activity(None, "BKAP", "Vendor account maintanence", "Vendor account maintanence", "# of minutes", True, 1, "PNVA"),
Activity(None, "BGAR", "AR List", "AR List", "# of AR lists done", True, 5.64, "PVA"),
Activity(None, "BGAR", "AR List", "AR List check", "# of AR lists done", True, 3, "QPR"),
Activity(None, "BGAR", "Autorun", "Autorun", "# of payments cleared manually", True, 0.19, "PVA"),
Activity(None, "BGAR", "Call", "Call", "# of minutes", True, 1, "PVA"),
Activity(None, "BGAR", "CiR List", "Check CiR Lists", "# of lists checked", True, 3.81, "PVA"),
Activity(None, "BGAR", "CiR List", "Check CiR Lists - big lists", "# of lists checked", True, 0.0, "PVA"),
Activity(None, "BGAR", "CiR List", "Check CiR Lists - small lists", "# of lists checked", True, 0.0, "PVA"),
Activity(None, "BGAR", "CiR List", "CiR List_big amounts", "# of amounts ", True, 1.35, "PVA"),
Activity(None, "BGAR", "CiR List", "CiR List_differences-mail", "# of emails", True, 8.24, "QEF"),
Activity(None, "BGAR", "CiR List", "CiR Lists", "# of lists", True, 13.1, "PVA"),
Activity(None, "BGAR", "CiR List", "Investigation of differences", "# of minutes", True, 1, "PVA"),
Activity(None, "BGAR", "Download Bank Statement from Bellin", "Download Bank Statement from Bellin", "# of attachments", True, 1.03, "PVE"),
Activity(None, "BGAR", "Emails sent", "Emails sent", "# of minutes", True, 1, "PVA"),
Activity(None, "BGAR", "GL to treasury reconciliation FSL10N", "GL to treasury reconciliation FSL10N", "# of reports", True, 8.76, "PVE"),
Activity(None, "BGAR", "Load Mappen into SAP", "Load Mappen into SAP", "# of mapp loadings", True, 2.94, "PVA"),
Activity(None, "BGAR", "Load Mappen into SAP", "Load Mappen into SAP - Kreditoren", "# of payments", True, 0.0, "PVA"),
Activity(None, "BGAR", "Load Mappen into SAP", "Load Mappen into SAP - Leasing", "# of payments", True, 0.0, "PVA"),
Activity(None, "BGAR", "Mapp loading/end of mapp", "Mapp loading/end of mapp", "# of mapps", True, 0.29, "PVA"),
Activity(None, "BGAR", "Netting investigation", "Netting investigation", "# of minutes", True, 1, "PVE"),
Activity(None, "BGAR", "Payment advice_extract page_describe", "Payment advice_extract page_describe", "# of payment advices described", True, 1.45, "PVE"),
Activity(None, "BGAR", "Post non standard payments-daimler etc", "Post non standard payments-daimler etc", "# of minutes", True, 1, "PVA"),
Activity(None, "BGAR", "Post payments", "Post payments", "# of payments ", True, 0.57, "PVA"),
Activity(None, "BGAR", "Post payments", "Post payments differences", "# of positions posted", True, 1.33, "PVA"),
Activity(None, "BGAR", "Post payments", "Post payments_reposting", "# of payments", True, 1.52, "PVA"),
Activity(None, "BGAR", "Post payments_Netting (check tracker)", "Post payments_Netting (check tracker)", "# of payments ", True, 1.47, "PVA"),
Activity(None, "BGAR", "Reminders (sending out) part I", "Reminders (sending out) part I", "# of times process was done", True, 25, "PVA"),
Activity(None, "BGAR", "Reminders (sending out) part II", "Reminders (sending out) part II", "# of documents", True, 3.42, "PVE"),
Activity(None, "BGAR", "Research remittances", "Research remittances_email/fax", "# of remittances", True, 0.55, "PVE"),
Activity(None, "BGAR", "Research remittances", "Research remittances_portal", "# of remittances", True, 1.95, "PVE"),
Activity(None, "BGAR", "Research remittances", "Research remittances_speedi", "# of remittances", True, 1.09, "PVE"),
Activity(None, "BGAR", "Save documents on Quick Place", "Save documents on Quick Place", "# of documents", True, 1.41, "PNVA"),
Activity(None, "BGAR", "Tracker", "Sharepoint tracker", "# of line items", True, 0.59, "QAPP"),
Activity(None, "BKAR", "Add posting/clearing number on pdf file", "Add posting/clearing number on pdf file", "# of payments described", True, 0.55, "PVE"),
Activity(None, "BKAR", "AR Aging report ", "AR Aging report ", "# of positions checked", True, 3.59, "PVA"),
Activity(None, "BKAR", "AR List", "AR List ckeck", "# of AR lists done", True, 3, "QPR"),
Activity(None, "BKAR", "Autorun", "Autorun", "# of payments cleared", True, 0.19, "PVA"),
Activity(None, "BKAR", "Call", "Call", "# of minutes", True, 1, "PVE"),
Activity(None, "BKAR", "Cashflowdata in Bellin (abgleich)", "Cashflowdata in Bellin (abgleich)", "# of positions changed", True, 0.82, "PVA"),
Activity(None, "BKAR", "Download Bank Statement from Bellin", "Download Bank Statement from Bellin", "# of attachments", True, 1.03, "PVE"),
Activity(None, "BKAR", "Emails sent", "Emails sent", "# of minutes", True, 1, "PVE"),
Activity(None, "BKAR", "GL to treasury reconciliation FSL10N", "GL to treasury reconciliation FSL10N", "# of reports", True, 8.76, "PVE"),
Activity(None, "BKAR", "Load Mappen into SAP", "Load Mappen into SAP", "# of mapp loadings", True, 2.94, "PVA"),
Activity(None, "BKAR", "Load Mappen into SAP", "Load Mappen into SAP - Kreditoren", "# of payments", True, 0.0, "PVA"),
Activity(None, "BKAR", "Load Mappen into SAP", "Load Mappen into SAP - Leasing", "# of payments", True, 0.0, "PVA"),
Activity(None, "BKAR", "Mapp loading/end of mapp", "Mapp loading/end of mapp", "# of mapps", True, 0.29, "PVA"),
Activity(None, "BKAR", "Netting investigation", "Netting investigation", "# of minutes", True, 1, "PVE"),
Activity(None, "BKAR", "Payment advice-describing", "Payment advice-describing", "# of payment advices described", True, 1.52, "PVE"),
Activity(None, "BKAR", "Post payments", "Post payments", "# of payments posted", True, 1.49, "PVA"),
Activity(None, "BKAR", "Post payments", "Post payments differences", "# of positions posted", True, 1.33, "PVA"),
Activity(None, "BKAR", "Post payments_Netting (check tracker)", "Post payments_Netting (check tracker)", "# of payments ", True, 1.47, "PVA"),
Activity(None, "BKAR", "Reminders (sending out) part I", "Reminders (sending out) part I", "# of times process was done", True, 25, "PVA"),
Activity(None, "BKAR", "Reminders (sending out) part II", "Reminders (sending out) part II", "# of documents", True, 3.42, "PVE"),
Activity(None, "BKAR", "Remittance advices_mail/fax", "Remittance advices_mail/fax", "# of remittances", True, 0.55, "PVE"),
Activity(None, "BKAR", "Research remittances_portal", "Research remittances_portal", "# of remittances", True, 1.95, "PVE"),
Activity(None, "BKAR", "Research remittances_speedi", "Research remittances_speedi", "# of remittances", True, 1.09, "PVE"),
Activity(None, "BKAR", "Save documents on Quick place", "Save documents on Quick place", "# of documents", True, 1.41, "PNVA"),
Activity(None, "BKAR", "Tracker", "Sharepoint tracker", "# of line items", True, 0.59, "QAPP"),
Activity(None, "BGSC", "Archiving", "Archiving", "# of documents", True, 0.21, "PNVA"),
Activity(None, "BGSC", "Export invoices via BGSC tool on client sharepiont", "Export invoices via BGSC tool on client sharepiont", "# of times process was done", True, 3.12, "PNVA"),
Activity(None, "BGSC", "Mahnungs tracking", "Mahnungs tracking", "# of mahnungs", True, 0.96, "QPR"),
Activity(None, "BGSC", "Printing", "Printing deckblats", "# of times process was done", True, 2, "PVE"),
Activity(None, "BGSC", "Printing", "Printing invoices from e-mail", "# of invoices", True, 0.52, "PVE"),
Activity(None, "BGSC", "Repatriation", "Repatriation", "# of minutes", True, 1, "PNVA"),
Activity(None, "BGSC", "Repatriation", "Repatriation - fracht", "# of minutes", True, 1, "PNVA"),
Activity(None, "BGSC", "RTV tracking", "RTV tracking", "# of rtv's", True, 0.98, "QPR"),
Activity(None, "BGSC", "BGSC", "BGSC regular documents", "# of pages", True, 0.14, "PVA"),
Activity(None, "BGSC", "BGSC", "Scannning attachments", "# of attachments", True, 0.14, "PVA"),
Activity(None, "BGSC", "Sorting documents", "Sorting documents", "# of documents", True, 0.06, "PVE"),
Activity(None, "BGSC", "Sticking barcodes", "Sticking barcodes", "# of documents", True, 0.12, "PVE"),
Activity(None, "BGTE", "Activate employees", "Activate employees", "# of employees activated", True, 0.88, "PVA"),
Activity(None, "BGTE", "Add an employee to a assistant", "Add an employee to a assistant", "# of employees added", True, 0.88, "PVA"),
Activity(None, "BGTE", "Archiving", "Archiving", "# of docs", True, 0.2, "PNVA"),
Activity(None, "BGTE", "Archiving - prepare new binder", "Archiving - prepare new binder", "# of times process was done", True, 7.03, "PNVA"),
Activity(None, "BGTE", "Archiving wrong trips", "Archiving wrong trips", "# of trips", True, 0.45, "PNVA"),
Activity(None, "BGTE", "Call", "Call", "# of minutes", True, 1, "PVA"),
Activity(None, "BGTE", "Correcting trip after getting an e-mail", "Correcting trip after getting an e-mail", "# of docs", True, 3.54, "QEF"),
Activity(None, "BGTE", "Cost center checking Reisenstrage for posted trips", "Cost center checking Reisenstrage for posted trips", "# of trips", True, 3.34, "QIF"),
Activity(None, "BGTE", "Emails with no answer", "Emails with no answer", "# of emails", True, 0.24, "PNVA"),
Activity(None, "BGTE", "Open/ log on into Wintrip", "Open/ log on into Wintrip", "# of times process was done", True, 1.36, "PVE"),
Activity(None, "BGTE", "Payment in cash", "Payment in cash", "# of docs", True, 2.63, "PNVA"),
Activity(None, "BGTE", "Posting trips", "Posting trips", "# of docs", True, 0.46, "PVA"),
Activity(None, "BGTE", "Processing trips", "Check KM on internet", "# of destinations", True, 0.89, "PVA"),
Activity(None, "BGTE", "Processing trips", "Processing big trips", "# of trips", True, 0.0, "PVA"),
Activity(None, "BGTE", "Processing trips", "Processing trips", "# of trips", True, 1.78, "PVA"),
Activity(None, "BGTE", "Processing trips", "Processing trips with KM", "# of trips", True, 2.32, "PVA"),
Activity(None, "BGTE", "Repatriation (1xmonth)", "Repatriation (1xmonth)", "# of boxes", True, 17.23, "PNVA"),
Activity(None, "BGTE", "Reset the password", "Reset the password", "# of passwords", True, 0.67, "PVA"),
Activity(None, "BGTE", "Reverse trip", "Reverse trip", "# of reversals", True, 1.96, "QEF"),
Activity(None, "BGTE", "Searching for an email", "Searching for an email", "# of emails", True, 0.0, ""),
Activity(None, "BGTE", "Sorting Trips after entering into the system (Wintrip,SAP,Archiv)", "Sorting Trips after entering into the system (Wintrip,SAP,Archiv)", "# of trips", True, 1.84, "PNVA"),
Activity(None, "BGTE", "Sorting Trips before enter into the system (by PersNo)", "Sorting Trips before enter into the system (by PersNo)", "# of trips", True, 0.2, "PVE"),
Activity(None, "BGTE", "Special Client Request", "Special Client Request", "# of minutes", True, 1, "PVE"),
Activity(None, "BGTE", "Trackers", "Mail tracker", "# of line items/emails", True, 0.32, "QAPP"),
Activity(None, "BGTE", "Trackers", "Packet tracker", "# of times process was done", True, 0.32, "QAPP"),
Activity(None, "BGTE", "Trackers", "Volume tracker", "# of times process was done", True, 1.57, "QAPP"),
Activity(None, "BGTE", "Trip with error_check bills and write email", "Trip with error_check bills and write email", "# of trips", True, 2.61, "QEF"),
Activity(None, "BGTE", "Unpack post", "Unpack post", "# of trips", True, 0.47, "PVE"),
Activity(None, "BGTE", "Writing emails", "Writing emails", "# of emails", True, 1.74, "PVE"),
Activity(None, "BGTE", "Writing emails", "Writing emails with attachment scanned", "# of emails", True, 2.66, "PVE"),
Activity(None, "BGTE", "Writing emails", "Writing non standard emails", "# of emails", True, 0.0, "PVE"),
]
for activity in activities:
try:
activity.save()
except IntegrityError as e:
pass
| bsd-3-clause |
xnox/linaro-gcc-4.7-src | libstdc++-v3/python/libstdcxx/v6/printers.py | 56 | 32610 | # Pretty-printers for libstc++.
# Copyright (C) 2008, 2009, 2010, 2011, 2012 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import gdb
import itertools
import re
# Try to use the new-style pretty-printing if available.
_use_gdb_pp = True
try:
import gdb.printing
except ImportError:
_use_gdb_pp = False
# Starting with the type ORIG, search for the member type NAME. This
# handles searching upward through superclasses. This is needed to
# work around http://sourceware.org/bugzilla/show_bug.cgi?id=13615.
def find_type(orig, name):
typ = orig.strip_typedefs()
while True:
search = str(typ) + '::' + name
try:
return gdb.lookup_type(search)
except RuntimeError:
pass
# The type was not found, so try the superclass. We only need
# to check the first superclass, so we don't bother with
# anything fancier here.
field = typ.fields()[0]
if not field.is_base_class:
raise ValueError, "Cannot find type %s::%s" % (str(orig), name)
typ = field.type
class SharedPointerPrinter:
"Print a shared_ptr or weak_ptr"
def __init__ (self, typename, val):
self.typename = typename
self.val = val
def to_string (self):
state = 'empty'
refcounts = self.val['_M_refcount']['_M_pi']
if refcounts != 0:
usecount = refcounts['_M_use_count']
weakcount = refcounts['_M_weak_count']
if usecount == 0:
state = 'expired, weak %d' % weakcount
else:
state = 'count %d, weak %d' % (usecount, weakcount - 1)
return '%s (%s) %s' % (self.typename, state, self.val['_M_ptr'])
class UniquePointerPrinter:
"Print a unique_ptr"
def __init__ (self, typename, val):
self.val = val
def to_string (self):
return self.val['_M_t']
class StdListPrinter:
"Print a std::list"
class _iterator:
def __init__(self, nodetype, head):
self.nodetype = nodetype
self.base = head['_M_next']
self.head = head.address
self.count = 0
def __iter__(self):
return self
def next(self):
if self.base == self.head:
raise StopIteration
elt = self.base.cast(self.nodetype).dereference()
self.base = elt['_M_next']
count = self.count
self.count = self.count + 1
return ('[%d]' % count, elt['_M_data'])
def __init__(self, typename, val):
self.typename = typename
self.val = val
def children(self):
nodetype = find_type(self.val.type, '_Node')
nodetype = nodetype.strip_typedefs().pointer()
return self._iterator(nodetype, self.val['_M_impl']['_M_node'])
def to_string(self):
if self.val['_M_impl']['_M_node'].address == self.val['_M_impl']['_M_node']['_M_next']:
return 'empty %s' % (self.typename)
return '%s' % (self.typename)
class StdListIteratorPrinter:
"Print std::list::iterator"
def __init__(self, typename, val):
self.val = val
self.typename = typename
def to_string(self):
nodetype = find_type(self.val.type, '_Node')
nodetype = nodetype.strip_typedefs().pointer()
return self.val['_M_node'].cast(nodetype).dereference()['_M_data']
class StdSlistPrinter:
"Print a __gnu_cxx::slist"
class _iterator:
def __init__(self, nodetype, head):
self.nodetype = nodetype
self.base = head['_M_head']['_M_next']
self.count = 0
def __iter__(self):
return self
def next(self):
if self.base == 0:
raise StopIteration
elt = self.base.cast(self.nodetype).dereference()
self.base = elt['_M_next']
count = self.count
self.count = self.count + 1
return ('[%d]' % count, elt['_M_data'])
def __init__(self, typename, val):
self.val = val
def children(self):
nodetype = find_type(self.val.type, '_Node')
nodetype = nodetype.strip_typedefs().pointer()
return self._iterator(nodetype, self.val)
def to_string(self):
if self.val['_M_head']['_M_next'] == 0:
return 'empty __gnu_cxx::slist'
return '__gnu_cxx::slist'
class StdSlistIteratorPrinter:
"Print __gnu_cxx::slist::iterator"
def __init__(self, typename, val):
self.val = val
def to_string(self):
nodetype = find_type(self.val.type, '_Node')
nodetype = nodetype.strip_typedefs().pointer()
return self.val['_M_node'].cast(nodetype).dereference()['_M_data']
class StdVectorPrinter:
"Print a std::vector"
class _iterator:
def __init__ (self, start, finish, bitvec):
self.bitvec = bitvec
if bitvec:
self.item = start['_M_p']
self.so = start['_M_offset']
self.finish = finish['_M_p']
self.fo = finish['_M_offset']
itype = self.item.dereference().type
self.isize = 8 * itype.sizeof
else:
self.item = start
self.finish = finish
self.count = 0
def __iter__(self):
return self
def next(self):
count = self.count
self.count = self.count + 1
if self.bitvec:
if self.item == self.finish and self.so >= self.fo:
raise StopIteration
elt = self.item.dereference()
if elt & (1 << self.so):
obit = 1
else:
obit = 0
self.so = self.so + 1
if self.so >= self.isize:
self.item = self.item + 1
self.so = 0
return ('[%d]' % count, obit)
else:
if self.item == self.finish:
raise StopIteration
elt = self.item.dereference()
self.item = self.item + 1
return ('[%d]' % count, elt)
def __init__(self, typename, val):
self.typename = typename
self.val = val
self.is_bool = val.type.template_argument(0).code == gdb.TYPE_CODE_BOOL
def children(self):
return self._iterator(self.val['_M_impl']['_M_start'],
self.val['_M_impl']['_M_finish'],
self.is_bool)
def to_string(self):
start = self.val['_M_impl']['_M_start']
finish = self.val['_M_impl']['_M_finish']
end = self.val['_M_impl']['_M_end_of_storage']
if self.is_bool:
start = self.val['_M_impl']['_M_start']['_M_p']
so = self.val['_M_impl']['_M_start']['_M_offset']
finish = self.val['_M_impl']['_M_finish']['_M_p']
fo = self.val['_M_impl']['_M_finish']['_M_offset']
itype = start.dereference().type
bl = 8 * itype.sizeof
length = (bl - so) + bl * ((finish - start) - 1) + fo
capacity = bl * (end - start)
return ('%s<bool> of length %d, capacity %d'
% (self.typename, int (length), int (capacity)))
else:
return ('%s of length %d, capacity %d'
% (self.typename, int (finish - start), int (end - start)))
def display_hint(self):
return 'array'
class StdVectorIteratorPrinter:
"Print std::vector::iterator"
def __init__(self, typename, val):
self.val = val
def to_string(self):
return self.val['_M_current'].dereference()
class StdTuplePrinter:
"Print a std::tuple"
class _iterator:
def __init__ (self, head):
self.head = head
# Set the base class as the initial head of the
# tuple.
nodes = self.head.type.fields ()
if len (nodes) == 1:
# Set the actual head to the first pair.
self.head = self.head.cast (nodes[0].type)
elif len (nodes) != 0:
raise ValueError, "Top of tuple tree does not consist of a single node."
self.count = 0
def __iter__ (self):
return self
def next (self):
nodes = self.head.type.fields ()
# Check for further recursions in the inheritance tree.
if len (nodes) == 0:
raise StopIteration
# Check that this iteration has an expected structure.
if len (nodes) != 2:
raise ValueError, "Cannot parse more than 2 nodes in a tuple tree."
# - Left node is the next recursion parent.
# - Right node is the actual class contained in the tuple.
# Process right node.
impl = self.head.cast (nodes[1].type)
# Process left node and set it as head.
self.head = self.head.cast (nodes[0].type)
self.count = self.count + 1
# Finally, check the implementation. If it is
# wrapped in _M_head_impl return that, otherwise return
# the value "as is".
fields = impl.type.fields ()
if len (fields) < 1 or fields[0].name != "_M_head_impl":
return ('[%d]' % self.count, impl)
else:
return ('[%d]' % self.count, impl['_M_head_impl'])
def __init__ (self, typename, val):
self.typename = typename
self.val = val;
def children (self):
return self._iterator (self.val)
def to_string (self):
if len (self.val.type.fields ()) == 0:
return 'empty %s' % (self.typename)
return '%s containing' % (self.typename)
class StdStackOrQueuePrinter:
"Print a std::stack or std::queue"
def __init__ (self, typename, val):
self.typename = typename
self.visualizer = gdb.default_visualizer(val['c'])
def children (self):
return self.visualizer.children()
def to_string (self):
return '%s wrapping: %s' % (self.typename,
self.visualizer.to_string())
def display_hint (self):
if hasattr (self.visualizer, 'display_hint'):
return self.visualizer.display_hint ()
return None
class RbtreeIterator:
def __init__(self, rbtree):
self.size = rbtree['_M_t']['_M_impl']['_M_node_count']
self.node = rbtree['_M_t']['_M_impl']['_M_header']['_M_left']
self.count = 0
def __iter__(self):
return self
def __len__(self):
return int (self.size)
def next(self):
if self.count == self.size:
raise StopIteration
result = self.node
self.count = self.count + 1
if self.count < self.size:
# Compute the next node.
node = self.node
if node.dereference()['_M_right']:
node = node.dereference()['_M_right']
while node.dereference()['_M_left']:
node = node.dereference()['_M_left']
else:
parent = node.dereference()['_M_parent']
while node == parent.dereference()['_M_right']:
node = parent
parent = parent.dereference()['_M_parent']
if node.dereference()['_M_right'] != parent:
node = parent
self.node = node
return result
# This is a pretty printer for std::_Rb_tree_iterator (which is
# std::map::iterator), and has nothing to do with the RbtreeIterator
# class above.
class StdRbtreeIteratorPrinter:
"Print std::map::iterator"
def __init__ (self, typename, val):
self.val = val
def to_string (self):
typename = str(self.val.type.strip_typedefs()) + '::_Link_type'
nodetype = gdb.lookup_type(typename).strip_typedefs()
return self.val.cast(nodetype).dereference()['_M_value_field']
class StdDebugIteratorPrinter:
"Print a debug enabled version of an iterator"
def __init__ (self, typename, val):
self.val = val
# Just strip away the encapsulating __gnu_debug::_Safe_iterator
# and return the wrapped iterator value.
def to_string (self):
itype = self.val.type.template_argument(0)
return self.val['_M_current'].cast(itype)
class StdMapPrinter:
"Print a std::map or std::multimap"
# Turn an RbtreeIterator into a pretty-print iterator.
class _iter:
def __init__(self, rbiter, type):
self.rbiter = rbiter
self.count = 0
self.type = type
def __iter__(self):
return self
def next(self):
if self.count % 2 == 0:
n = self.rbiter.next()
n = n.cast(self.type).dereference()['_M_value_field']
self.pair = n
item = n['first']
else:
item = self.pair['second']
result = ('[%d]' % self.count, item)
self.count = self.count + 1
return result
def __init__ (self, typename, val):
self.typename = typename
self.val = val
def to_string (self):
return '%s with %d elements' % (self.typename,
len (RbtreeIterator (self.val)))
def children (self):
rep_type = find_type(self.val.type, '_Rep_type')
node = find_type(rep_type, '_Link_type')
node = node.strip_typedefs()
return self._iter (RbtreeIterator (self.val), node)
def display_hint (self):
return 'map'
class StdSetPrinter:
"Print a std::set or std::multiset"
# Turn an RbtreeIterator into a pretty-print iterator.
class _iter:
def __init__(self, rbiter, type):
self.rbiter = rbiter
self.count = 0
self.type = type
def __iter__(self):
return self
def next(self):
item = self.rbiter.next()
item = item.cast(self.type).dereference()['_M_value_field']
# FIXME: this is weird ... what to do?
# Maybe a 'set' display hint?
result = ('[%d]' % self.count, item)
self.count = self.count + 1
return result
def __init__ (self, typename, val):
self.typename = typename
self.val = val
def to_string (self):
return '%s with %d elements' % (self.typename,
len (RbtreeIterator (self.val)))
def children (self):
rep_type = find_type(self.val.type, '_Rep_type')
node = find_type(rep_type, '_Link_type')
node = node.strip_typedefs()
return self._iter (RbtreeIterator (self.val), node)
class StdBitsetPrinter:
"Print a std::bitset"
def __init__(self, typename, val):
self.typename = typename
self.val = val
def to_string (self):
# If template_argument handled values, we could print the
# size. Or we could use a regexp on the type.
return '%s' % (self.typename)
def children (self):
words = self.val['_M_w']
wtype = words.type
# The _M_w member can be either an unsigned long, or an
# array. This depends on the template specialization used.
# If it is a single long, convert to a single element list.
if wtype.code == gdb.TYPE_CODE_ARRAY:
tsize = wtype.target ().sizeof
else:
words = [words]
tsize = wtype.sizeof
nwords = wtype.sizeof / tsize
result = []
byte = 0
while byte < nwords:
w = words[byte]
bit = 0
while w != 0:
if (w & 1) != 0:
# Another spot where we could use 'set'?
result.append(('[%d]' % (byte * tsize * 8 + bit), 1))
bit = bit + 1
w = w >> 1
byte = byte + 1
return result
class StdDequePrinter:
"Print a std::deque"
class _iter:
def __init__(self, node, start, end, last, buffer_size):
self.node = node
self.p = start
self.end = end
self.last = last
self.buffer_size = buffer_size
self.count = 0
def __iter__(self):
return self
def next(self):
if self.p == self.last:
raise StopIteration
result = ('[%d]' % self.count, self.p.dereference())
self.count = self.count + 1
# Advance the 'cur' pointer.
self.p = self.p + 1
if self.p == self.end:
# If we got to the end of this bucket, move to the
# next bucket.
self.node = self.node + 1
self.p = self.node[0]
self.end = self.p + self.buffer_size
return result
def __init__(self, typename, val):
self.typename = typename
self.val = val
self.elttype = val.type.template_argument(0)
size = self.elttype.sizeof
if size < 512:
self.buffer_size = int (512 / size)
else:
self.buffer_size = 1
def to_string(self):
start = self.val['_M_impl']['_M_start']
end = self.val['_M_impl']['_M_finish']
delta_n = end['_M_node'] - start['_M_node'] - 1
delta_s = start['_M_last'] - start['_M_cur']
delta_e = end['_M_cur'] - end['_M_first']
size = self.buffer_size * delta_n + delta_s + delta_e
return '%s with %d elements' % (self.typename, long (size))
def children(self):
start = self.val['_M_impl']['_M_start']
end = self.val['_M_impl']['_M_finish']
return self._iter(start['_M_node'], start['_M_cur'], start['_M_last'],
end['_M_cur'], self.buffer_size)
def display_hint (self):
return 'array'
class StdDequeIteratorPrinter:
"Print std::deque::iterator"
def __init__(self, typename, val):
self.val = val
def to_string(self):
return self.val['_M_cur'].dereference()
class StdStringPrinter:
"Print a std::basic_string of some kind"
def __init__(self, typename, val):
self.val = val
def to_string(self):
# Make sure &string works, too.
type = self.val.type
if type.code == gdb.TYPE_CODE_REF:
type = type.target ()
# Calculate the length of the string so that to_string returns
# the string according to length, not according to first null
# encountered.
ptr = self.val ['_M_dataplus']['_M_p']
realtype = type.unqualified ().strip_typedefs ()
reptype = gdb.lookup_type (str (realtype) + '::_Rep').pointer ()
header = ptr.cast(reptype) - 1
len = header.dereference ()['_M_length']
if hasattr(ptr, "lazy_string"):
return ptr.lazy_string (length = len)
return ptr.string (length = len)
def display_hint (self):
return 'string'
class Tr1HashtableIterator:
def __init__ (self, hash):
self.node = hash['_M_before_begin']['_M_nxt']
self.node_type = find_type(hash.type, '_Node').pointer()
def __iter__ (self):
return self
def next (self):
if self.node == 0:
raise StopIteration
node = self.node.cast(self.node_type)
result = node.dereference()['_M_v']
self.node = node.dereference()['_M_nxt']
return result
class Tr1UnorderedSetPrinter:
"Print a tr1::unordered_set"
def __init__ (self, typename, val):
self.typename = typename
self.val = val
def to_string (self):
return '%s with %d elements' % (self.typename, self.val['_M_element_count'])
@staticmethod
def format_count (i):
return '[%d]' % i
def children (self):
counter = itertools.imap (self.format_count, itertools.count())
return itertools.izip (counter, Tr1HashtableIterator (self.val))
class Tr1UnorderedMapPrinter:
"Print a tr1::unordered_map"
def __init__ (self, typename, val):
self.typename = typename
self.val = val
def to_string (self):
return '%s with %d elements' % (self.typename, self.val['_M_element_count'])
@staticmethod
def flatten (list):
for elt in list:
for i in elt:
yield i
@staticmethod
def format_one (elt):
return (elt['first'], elt['second'])
@staticmethod
def format_count (i):
return '[%d]' % i
def children (self):
counter = itertools.imap (self.format_count, itertools.count())
# Map over the hash table and flatten the result.
data = self.flatten (itertools.imap (self.format_one, Tr1HashtableIterator (self.val)))
# Zip the two iterators together.
return itertools.izip (counter, data)
def display_hint (self):
return 'map'
class StdForwardListPrinter:
"Print a std::forward_list"
class _iterator:
def __init__(self, nodetype, head):
self.nodetype = nodetype
self.base = head['_M_next']
self.count = 0
def __iter__(self):
return self
def next(self):
if self.base == 0:
raise StopIteration
elt = self.base.cast(self.nodetype).dereference()
self.base = elt['_M_next']
count = self.count
self.count = self.count + 1
return ('[%d]' % count, elt['_M_value'])
def __init__(self, typename, val):
self.val = val
self.typename = typename
def children(self):
nodetype = find_type(self.val.type, '_Node')
nodetype = nodetype.strip_typedefs().pointer()
return self._iterator(nodetype, self.val['_M_impl']['_M_head'])
def to_string(self):
if self.val['_M_impl']['_M_head']['_M_next'] == 0:
return 'empty %s' % (self.typename)
return '%s' % (self.typename)
# A "regular expression" printer which conforms to the
# "SubPrettyPrinter" protocol from gdb.printing.
class RxPrinter(object):
def __init__(self, name, function):
super(RxPrinter, self).__init__()
self.name = name
self.function = function
self.enabled = True
def invoke(self, value):
if not self.enabled:
return None
return self.function(self.name, value)
# A pretty-printer that conforms to the "PrettyPrinter" protocol from
# gdb.printing. It can also be used directly as an old-style printer.
class Printer(object):
def __init__(self, name):
super(Printer, self).__init__()
self.name = name
self.subprinters = []
self.lookup = {}
self.enabled = True
self.compiled_rx = re.compile('^([a-zA-Z0-9_:]+)<.*>$')
def add(self, name, function):
# A small sanity check.
# FIXME
if not self.compiled_rx.match(name + '<>'):
raise ValueError, 'libstdc++ programming error: "%s" does not match' % name
printer = RxPrinter(name, function)
self.subprinters.append(printer)
self.lookup[name] = printer
# Add a name using _GLIBCXX_BEGIN_NAMESPACE_VERSION.
def add_version(self, base, name, function):
self.add(base + name, function)
self.add(base + '__7::' + name, function)
# Add a name using _GLIBCXX_BEGIN_NAMESPACE_CONTAINER.
def add_container(self, base, name, function):
self.add_version(base, name, function)
self.add_version(base + '__cxx1998::', name, function)
@staticmethod
def get_basic_type(type):
# If it points to a reference, get the reference.
if type.code == gdb.TYPE_CODE_REF:
type = type.target ()
# Get the unqualified type, stripped of typedefs.
type = type.unqualified ().strip_typedefs ()
return type.tag
def __call__(self, val):
typename = self.get_basic_type(val.type)
if not typename:
return None
# All the types we match are template types, so we can use a
# dictionary.
match = self.compiled_rx.match(typename)
if not match:
return None
basename = match.group(1)
if basename in self.lookup:
return self.lookup[basename].invoke(val)
# Cannot find a pretty printer. Return None.
return None
libstdcxx_printer = None
def register_libstdcxx_printers (obj):
"Register libstdc++ pretty-printers with objfile Obj."
global _use_gdb_pp
global libstdcxx_printer
if _use_gdb_pp:
gdb.printing.register_pretty_printer(obj, libstdcxx_printer)
else:
if obj is None:
obj = gdb
obj.pretty_printers.append(libstdcxx_printer)
def build_libstdcxx_dictionary ():
global libstdcxx_printer
libstdcxx_printer = Printer("libstdc++-v6")
# For _GLIBCXX_BEGIN_NAMESPACE_VERSION.
vers = '(__7::)?'
# For _GLIBCXX_BEGIN_NAMESPACE_CONTAINER.
container = '(__cxx1998::' + vers + ')?'
# libstdc++ objects requiring pretty-printing.
# In order from:
# http://gcc.gnu.org/onlinedocs/libstdc++/latest-doxygen/a01847.html
libstdcxx_printer.add_version('std::', 'basic_string', StdStringPrinter)
libstdcxx_printer.add_container('std::', 'bitset', StdBitsetPrinter)
libstdcxx_printer.add_container('std::', 'deque', StdDequePrinter)
libstdcxx_printer.add_container('std::', 'list', StdListPrinter)
libstdcxx_printer.add_container('std::', 'map', StdMapPrinter)
libstdcxx_printer.add_container('std::', 'multimap', StdMapPrinter)
libstdcxx_printer.add_container('std::', 'multiset', StdSetPrinter)
libstdcxx_printer.add_version('std::', 'priority_queue',
StdStackOrQueuePrinter)
libstdcxx_printer.add_version('std::', 'queue', StdStackOrQueuePrinter)
libstdcxx_printer.add_version('std::', 'tuple', StdTuplePrinter)
libstdcxx_printer.add_container('std::', 'set', StdSetPrinter)
libstdcxx_printer.add_version('std::', 'stack', StdStackOrQueuePrinter)
libstdcxx_printer.add_version('std::', 'unique_ptr', UniquePointerPrinter)
libstdcxx_printer.add_container('std::', 'vector', StdVectorPrinter)
# vector<bool>
# Printer registrations for classes compiled with -D_GLIBCXX_DEBUG.
libstdcxx_printer.add('std::__debug::bitset', StdBitsetPrinter)
libstdcxx_printer.add('std::__debug::deque', StdDequePrinter)
libstdcxx_printer.add('std::__debug::list', StdListPrinter)
libstdcxx_printer.add('std::__debug::map', StdMapPrinter)
libstdcxx_printer.add('std::__debug::multimap', StdMapPrinter)
libstdcxx_printer.add('std::__debug::multiset', StdSetPrinter)
libstdcxx_printer.add('std::__debug::priority_queue',
StdStackOrQueuePrinter)
libstdcxx_printer.add('std::__debug::queue', StdStackOrQueuePrinter)
libstdcxx_printer.add('std::__debug::set', StdSetPrinter)
libstdcxx_printer.add('std::__debug::stack', StdStackOrQueuePrinter)
libstdcxx_printer.add('std::__debug::unique_ptr', UniquePointerPrinter)
libstdcxx_printer.add('std::__debug::vector', StdVectorPrinter)
# These are the TR1 and C++0x printers.
# For array - the default GDB pretty-printer seems reasonable.
libstdcxx_printer.add_version('std::', 'shared_ptr', SharedPointerPrinter)
libstdcxx_printer.add_version('std::', 'weak_ptr', SharedPointerPrinter)
libstdcxx_printer.add_container('std::', 'unordered_map',
Tr1UnorderedMapPrinter)
libstdcxx_printer.add_container('std::', 'unordered_set',
Tr1UnorderedSetPrinter)
libstdcxx_printer.add_container('std::', 'unordered_multimap',
Tr1UnorderedMapPrinter)
libstdcxx_printer.add_container('std::', 'unordered_multiset',
Tr1UnorderedSetPrinter)
libstdcxx_printer.add_container('std::', 'forward_list',
StdForwardListPrinter)
libstdcxx_printer.add_version('std::tr1::', 'shared_ptr', SharedPointerPrinter)
libstdcxx_printer.add_version('std::tr1::', 'weak_ptr', SharedPointerPrinter)
libstdcxx_printer.add_version('std::tr1::', 'unordered_map',
Tr1UnorderedMapPrinter)
libstdcxx_printer.add_version('std::tr1::', 'unordered_set',
Tr1UnorderedSetPrinter)
libstdcxx_printer.add_version('std::tr1::', 'unordered_multimap',
Tr1UnorderedMapPrinter)
libstdcxx_printer.add_version('std::tr1::', 'unordered_multiset',
Tr1UnorderedSetPrinter)
# These are the C++0x printer registrations for -D_GLIBCXX_DEBUG cases.
# The tr1 namespace printers do not seem to have any debug
# equivalents, so do no register them.
libstdcxx_printer.add('std::__debug::unordered_map',
Tr1UnorderedMapPrinter)
libstdcxx_printer.add('std::__debug::unordered_set',
Tr1UnorderedSetPrinter)
libstdcxx_printer.add('std::__debug::unordered_multimap',
Tr1UnorderedMapPrinter)
libstdcxx_printer.add('std::__debug::unordered_multiset',
Tr1UnorderedSetPrinter)
libstdcxx_printer.add('std::__debug::forward_list',
StdForwardListPrinter)
# Extensions.
libstdcxx_printer.add_version('__gnu_cxx::', 'slist', StdSlistPrinter)
if True:
# These shouldn't be necessary, if GDB "print *i" worked.
# But it often doesn't, so here they are.
libstdcxx_printer.add_container('std::', '_List_iterator',
StdListIteratorPrinter)
libstdcxx_printer.add_container('std::', '_List_const_iterator',
StdListIteratorPrinter)
libstdcxx_printer.add_version('std::', '_Rb_tree_iterator',
StdRbtreeIteratorPrinter)
libstdcxx_printer.add_version('std::', '_Rb_tree_const_iterator',
StdRbtreeIteratorPrinter)
libstdcxx_printer.add_container('std::', '_Deque_iterator',
StdDequeIteratorPrinter)
libstdcxx_printer.add_container('std::', '_Deque_const_iterator',
StdDequeIteratorPrinter)
libstdcxx_printer.add_version('__gnu_cxx::', '__normal_iterator',
StdVectorIteratorPrinter)
libstdcxx_printer.add_version('__gnu_cxx::', '_Slist_iterator',
StdSlistIteratorPrinter)
# Debug (compiled with -D_GLIBCXX_DEBUG) printer
# registrations. The Rb_tree debug iterator when unwrapped
# from the encapsulating __gnu_debug::_Safe_iterator does not
# have the __norm namespace. Just use the existing printer
# registration for that.
libstdcxx_printer.add('__gnu_debug::_Safe_iterator',
StdDebugIteratorPrinter)
libstdcxx_printer.add('std::__norm::_List_iterator',
StdListIteratorPrinter)
libstdcxx_printer.add('std::__norm::_List_const_iterator',
StdListIteratorPrinter)
libstdcxx_printer.add('std::__norm::_Deque_const_iterator',
StdDequeIteratorPrinter)
libstdcxx_printer.add('std::__norm::_Deque_iterator',
StdDequeIteratorPrinter)
build_libstdcxx_dictionary ()
| gpl-2.0 |
tlatzko/spmcluster | .tox/docs/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/lxmletree.py | 436 | 5992 | from __future__ import absolute_import, division, unicode_literals
from pip._vendor.six import text_type
from lxml import etree
from ..treebuilders.etree import tag_regexp
from . import _base
from .. import ihatexml
def ensure_str(s):
if s is None:
return None
elif isinstance(s, text_type):
return s
else:
return s.decode("utf-8", "strict")
class Root(object):
def __init__(self, et):
self.elementtree = et
self.children = []
if et.docinfo.internalDTD:
self.children.append(Doctype(self,
ensure_str(et.docinfo.root_name),
ensure_str(et.docinfo.public_id),
ensure_str(et.docinfo.system_url)))
root = et.getroot()
node = root
while node.getprevious() is not None:
node = node.getprevious()
while node is not None:
self.children.append(node)
node = node.getnext()
self.text = None
self.tail = None
def __getitem__(self, key):
return self.children[key]
def getnext(self):
return None
def __len__(self):
return 1
class Doctype(object):
def __init__(self, root_node, name, public_id, system_id):
self.root_node = root_node
self.name = name
self.public_id = public_id
self.system_id = system_id
self.text = None
self.tail = None
def getnext(self):
return self.root_node.children[1]
class FragmentRoot(Root):
def __init__(self, children):
self.children = [FragmentWrapper(self, child) for child in children]
self.text = self.tail = None
def getnext(self):
return None
class FragmentWrapper(object):
def __init__(self, fragment_root, obj):
self.root_node = fragment_root
self.obj = obj
if hasattr(self.obj, 'text'):
self.text = ensure_str(self.obj.text)
else:
self.text = None
if hasattr(self.obj, 'tail'):
self.tail = ensure_str(self.obj.tail)
else:
self.tail = None
def __getattr__(self, name):
return getattr(self.obj, name)
def getnext(self):
siblings = self.root_node.children
idx = siblings.index(self)
if idx < len(siblings) - 1:
return siblings[idx + 1]
else:
return None
def __getitem__(self, key):
return self.obj[key]
def __bool__(self):
return bool(self.obj)
def getparent(self):
return None
def __str__(self):
return str(self.obj)
def __unicode__(self):
return str(self.obj)
def __len__(self):
return len(self.obj)
class TreeWalker(_base.NonRecursiveTreeWalker):
def __init__(self, tree):
if hasattr(tree, "getroot"):
tree = Root(tree)
elif isinstance(tree, list):
tree = FragmentRoot(tree)
_base.NonRecursiveTreeWalker.__init__(self, tree)
self.filter = ihatexml.InfosetFilter()
def getNodeDetails(self, node):
if isinstance(node, tuple): # Text node
node, key = node
assert key in ("text", "tail"), "Text nodes are text or tail, found %s" % key
return _base.TEXT, ensure_str(getattr(node, key))
elif isinstance(node, Root):
return (_base.DOCUMENT,)
elif isinstance(node, Doctype):
return _base.DOCTYPE, node.name, node.public_id, node.system_id
elif isinstance(node, FragmentWrapper) and not hasattr(node, "tag"):
return _base.TEXT, node.obj
elif node.tag == etree.Comment:
return _base.COMMENT, ensure_str(node.text)
elif node.tag == etree.Entity:
return _base.ENTITY, ensure_str(node.text)[1:-1] # strip &;
else:
# This is assumed to be an ordinary element
match = tag_regexp.match(ensure_str(node.tag))
if match:
namespace, tag = match.groups()
else:
namespace = None
tag = ensure_str(node.tag)
attrs = {}
for name, value in list(node.attrib.items()):
name = ensure_str(name)
value = ensure_str(value)
match = tag_regexp.match(name)
if match:
attrs[(match.group(1), match.group(2))] = value
else:
attrs[(None, name)] = value
return (_base.ELEMENT, namespace, self.filter.fromXmlName(tag),
attrs, len(node) > 0 or node.text)
def getFirstChild(self, node):
assert not isinstance(node, tuple), "Text nodes have no children"
assert len(node) or node.text, "Node has no children"
if node.text:
return (node, "text")
else:
return node[0]
def getNextSibling(self, node):
if isinstance(node, tuple): # Text node
node, key = node
assert key in ("text", "tail"), "Text nodes are text or tail, found %s" % key
if key == "text":
# XXX: we cannot use a "bool(node) and node[0] or None" construct here
# because node[0] might evaluate to False if it has no child element
if len(node):
return node[0]
else:
return None
else: # tail
return node.getnext()
return (node, "tail") if node.tail else node.getnext()
def getParentNode(self, node):
if isinstance(node, tuple): # Text node
node, key = node
assert key in ("text", "tail"), "Text nodes are text or tail, found %s" % key
if key == "text":
return node
# else: fallback to "normal" processing
return node.getparent()
| bsd-2-clause |
hgl888/chromium-crosswalk | tools/check_grd_for_unused_strings.py | 49 | 6501 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Without any args, this simply loads the IDs out of a bunch of the Chrome GRD
files, and then checks the subset of the code that loads the strings to try
and figure out what isn't in use any more.
You can give paths to GRD files and source directories to control what is
check instead.
"""
import os
import re
import sys
import xml.sax
# Extra messages along the way
# 1 - Print ids that are found in sources but not in the found id set
# 2 - Files that aren't processes (don't match the source name regex)
DEBUG = 0
class GrdIDExtractor(xml.sax.handler.ContentHandler):
"""Extracts the IDs from messages in GRIT files"""
def __init__(self):
self.id_set_ = set()
def startElement(self, name, attrs):
if name == 'message':
self.id_set_.add(attrs['name'])
def allIDs(self):
"""Return all the IDs found"""
return self.id_set_.copy()
def CheckForUnusedGrdIDsInSources(grd_files, src_dirs):
"""Will collect the message ids out of the given GRD files and then scan
the source directories to try and figure out what ids are not currently
being used by any source.
grd_files:
A list of GRD files to collect the ids from.
src_dirs:
A list of directories to walk looking for source files.
"""
# Collect all the ids into a large map
all_ids = set()
file_id_map = {}
for y in grd_files:
handler = GrdIDExtractor()
xml.sax.parse(y, handler)
files_ids = handler.allIDs()
file_id_map[y] = files_ids
all_ids |= files_ids
# The regex that will be used to check sources
id_regex = re.compile('IDS_[A-Z0-9_]+')
# Make sure the regex matches every id found.
got_err = False
for x in all_ids:
match = id_regex.search(x)
if match is None:
print 'ERROR: "%s" did not match our regex' % (x)
got_err = True
if not match.group(0) is x:
print 'ERROR: "%s" did not fully match our regex' % (x)
got_err = True
if got_err:
return 1
# The regex for deciding what is a source file
src_regex = re.compile('\.(([chm])|(mm)|(cc)|(cp)|(cpp)|(xib)|(py))$')
ids_left = all_ids.copy()
# Scanning time.
for src_dir in src_dirs:
for root, dirs, files in os.walk(src_dir):
# Remove svn directories from recursion
if '.svn' in dirs:
dirs.remove('.svn')
for file in files:
if src_regex.search(file.lower()):
full_path = os.path.join(root, file)
src_file_contents = open(full_path).read()
for match in sorted(set(id_regex.findall(src_file_contents))):
if match in ids_left:
ids_left.remove(match)
if DEBUG:
if not match in all_ids:
print '%s had "%s", which was not in the found IDs' % \
(full_path, match)
elif DEBUG > 1:
full_path = os.path.join(root, file)
print 'Skipping %s.' % (full_path)
# Anything left?
if len(ids_left) > 0:
print 'The following ids are in GRD files, but *appear* to be unused:'
for file_path, file_ids in file_id_map.iteritems():
missing = ids_left.intersection(file_ids)
if len(missing) > 0:
print ' %s:' % (file_path)
print '\n'.join(' %s' % (x) for x in sorted(missing))
return 0
def main():
# script lives in src/tools
tools_dir = os.path.dirname(os.path.abspath(sys.argv[0]))
src_dir = os.path.dirname(tools_dir)
# Collect the args into the right buckets
src_dirs = []
grd_files = []
for arg in sys.argv[1:]:
if arg.lower().endswith('.grd'):
grd_files.append(arg)
else:
src_dirs.append(arg)
# If no GRD files were given, default them:
if len(grd_files) == 0:
ash_base_dir = os.path.join(src_dir, 'ash')
chrome_dir = os.path.join(src_dir, 'chrome')
chrome_app_dir = os.path.join(chrome_dir, 'app')
chrome_app_res_dir = os.path.join(chrome_app_dir, 'resources')
device_base_dir = os.path.join(src_dir, 'device')
ui_dir = os.path.join(src_dir, 'ui')
ui_strings_dir = os.path.join(ui_dir, 'strings')
ui_chromeos_dir = os.path.join(ui_dir, 'chromeos')
grd_files = [
os.path.join(ash_base_dir, 'ash_strings.grd'),
os.path.join(ash_base_dir, 'resources', 'ash_resources.grd'),
os.path.join(chrome_app_dir, 'chromium_strings.grd'),
os.path.join(chrome_app_dir, 'generated_resources.grd'),
os.path.join(chrome_app_dir, 'google_chrome_strings.grd'),
os.path.join(chrome_app_res_dir, 'locale_settings.grd'),
os.path.join(chrome_app_res_dir, 'locale_settings_chromiumos.grd'),
os.path.join(chrome_app_res_dir, 'locale_settings_google_chromeos.grd'),
os.path.join(chrome_app_res_dir, 'locale_settings_linux.grd'),
os.path.join(chrome_app_res_dir, 'locale_settings_mac.grd'),
os.path.join(chrome_app_res_dir, 'locale_settings_win.grd'),
os.path.join(chrome_app_dir, 'theme', 'theme_resources.grd'),
os.path.join(chrome_dir, 'browser', 'browser_resources.grd'),
os.path.join(chrome_dir, 'common', 'common_resources.grd'),
os.path.join(chrome_dir, 'renderer', 'resources',
'renderer_resources.grd'),
os.path.join(device_base_dir, 'bluetooth', 'bluetooth_strings.grd'),
os.path.join(src_dir, 'extensions', 'extensions_strings.grd'),
os.path.join(src_dir, 'ui', 'resources', 'ui_resources.grd'),
os.path.join(src_dir, 'ui', 'webui', 'resources', 'webui_resources.grd'),
os.path.join(ui_strings_dir, 'app_locale_settings.grd'),
os.path.join(ui_strings_dir, 'ui_strings.grd'),
os.path.join(ui_chromeos_dir, 'ui_chromeos_strings.grd'),
]
# If no source directories were given, default them:
if len(src_dirs) == 0:
src_dirs = [
os.path.join(src_dir, 'app'),
os.path.join(src_dir, 'ash'),
os.path.join(src_dir, 'chrome'),
os.path.join(src_dir, 'components'),
os.path.join(src_dir, 'content'),
os.path.join(src_dir, 'device'),
os.path.join(src_dir, 'extensions'),
os.path.join(src_dir, 'ui'),
# nsNSSCertHelper.cpp has a bunch of ids
os.path.join(src_dir, 'third_party', 'mozilla_security_manager'),
os.path.join(chrome_dir, 'installer'),
]
return CheckForUnusedGrdIDsInSources(grd_files, src_dirs)
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause |
pyQode/pyqode.python | test/test_backend/test_workers.py | 1 | 4597 | """
Test all workers in pyqode.python.backend.workers.
"""
import sys
import jedi
from pyqode.core.modes import CheckerMessages
from pyqode.core.share import Definition
try:
from future.builtins import str, open
except:
pass
from pyqode.python.backend import workers
from pyqode.python.widgets import code_edit
def test_calltips():
data = {
'code': "open(",
'line': 0,
'column': len('open('),
'path': None
}
results = workers.calltips(data)
assert len(results) == 6
def test_calltips_with_closing_paren():
data = {
'code': "open()",
'line': 0,
'column': len('open()'),
'path': None
}
results = workers.calltips(data)
assert len(results) == 0
def test_goto_assignments():
data = {
'code': "foo = 10;print(foo)",
'line': 0,
'column': len('foo = 10;print(foo)') - 1,
'path': None
}
results = workers.goto_assignments(data)
# todo: restore assertion once jedi#571 is resolved
# assert len(results) == 1
# definition = results[0]
# module, line, column, full_name = definition
# assert line == 0
# assert column == 0
data = {
'code': "foo = 10;print(foo)",
'line': 0,
'column': len('foo = 10;print(foo)'),
'path': None
}
results = workers.goto_assignments(data)
assert len(results) == 0
def test_extract_def():
code = """
import pyqode.python.widgets
import PyQt5.QtWidgets as QtWidgets
app = QtWidgets.QApplication([])
editor = pyqode.python.widgets.PyCyodeEdit()
editor.file.open(__file__)
editor.show()
app.exec()
"""
for definition in jedi.names(code):
result = workers._extract_def(definition, "")
assert result
def test_defined_names():
filename = __file__
with open(filename, 'r', encoding='utf-8') as file:
code = file.read()
results = workers.defined_names({'code': code, 'path': filename})
assert len(results)
definitions = []
for i, definition in enumerate(results):
d = Definition.from_dict(definition)
definitions.append(d)
if i:
assert d != definitions[i-1]
# now that the code changed, defined_names should return
# (True, [xxx, yyy, ...])
code += "\ndef foo():\n print('bar')"
results = workers.defined_names({'code': code, 'path': filename})
assert len(results)
def test_quick_doc():
data = {
'code': "open",
'line': 0,
'column': 1,
'path': None
}
results = workers.quick_doc(data)
assert len(results) == 1
assert isinstance(results[0], str)
def test_run_pep8():
messages = workers.run_pep8(
{'code': 'print("foo")\n', 'path': None,
'max_line_length': 79, 'ignore_rules': []})
assert len(messages) == 0
messages = workers.run_pep8(
{'code': 'print("foo"); print("bar")\n', 'path': None,
'max_line_length': 79, 'ignore_rules': []})
assert len(messages) == 1
assert messages[0][2] == 0
def test_run_pyflakes():
messages = workers.run_pyflakes(
{'code': None, 'path': __file__, 'encoding': 'utf-8',
'max_line_length': 79, 'ignore_rules': []})
assert len(messages) == 0
# OK
messages = workers.run_pyflakes(
{'code': 'print("foo")\n', 'path': __file__, 'encoding': 'utf-8'})
assert len(messages) == 0
# Syntax error
messages = workers.run_pyflakes(
{'code': 'print("foo\n', 'path': __file__,
'encoding': 'utf-8'})
assert len(messages) == 1
assert messages[0][2] == 0
assert messages[0][1] == CheckerMessages.ERROR
# unused import
messages = workers.run_pyflakes(
{'code': 'import sys; print("foo");\n', 'path': __file__,
'encoding': 'utf-8'})
assert len(messages) == 1
msg, status, line = messages[0]
assert "'sys' imported but unused" in msg
assert line == 0
def test_completions():
provider = workers.JediCompletionProvider()
completions = provider.complete('import ', 0, len('import '), None,
'utf-8', '')
assert len(completions) > 10
def test_make_icon():
class Name:
@property
def string(self):
return 'Foo'
assert workers.icon_from_typename(Name(), 'PARAM') is not None
assert workers.icon_from_typename(Name(), 'FOO') is None
assert workers.icon_from_typename('_protected', 'PARAM') is not None
assert workers.icon_from_typename('__private', 'PARAM') is not None
| mit |
jerbob92/CouchPotatoServer | libs/requests/packages/charade/gb2312prober.py | 2994 | 1681 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import GB2312DistributionAnalysis
from .mbcssm import GB2312SMModel
class GB2312Prober(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(GB2312SMModel)
self._mDistributionAnalyzer = GB2312DistributionAnalysis()
self.reset()
def get_charset_name(self):
return "GB2312"
| gpl-3.0 |
thinkopensolutions/geraldo | site/newsite/site-geraldo/appengine_django/management/commands/flush.py | 60 | 1127 | #!/usr/bin/python2.4
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import sys
from django.core.management.base import BaseCommand
class Command(BaseCommand):
"""Overrides the default Django flush command.
"""
help = 'Clears the current datastore and loads the initial fixture data.'
def run_from_argv(self, argv):
from django.db import connection
connection.flush()
from django.core.management import call_command
call_command('loaddata', 'initial_data')
def handle(self, *args, **kwargs):
self.run_from_argv(None)
| lgpl-3.0 |
alessandrocamilli/l10n-italy | l10n_it_vat_registries/wizard/print_registro_iva.py | 7 | 4516 | # -*- coding: utf-8 -*-
#
#
# Copyright (C) 2011 Associazione OpenERP Italia
# (<http://www.openerp-italia.org>).
# Copyright (C) 2014-2015 Agile Business Group
# (<http://www.agilebg.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
from openerp import models, fields, api, _
from openerp.exceptions import Warning as UserError
class WizardRegistroIva(models.TransientModel):
@api.model
def _get_period(self):
ctx = dict(self._context or {}, account_period_prefer_normal=True)
period_ids = self.env[
'account.period'].with_context(context=ctx).find()
return period_ids
_name = "wizard.registro.iva"
_rec_name = "type"
period_ids = fields.Many2many(
'account.period',
'registro_iva_periods_rel',
'period_id',
'registro_id',
string='Periods',
default=_get_period,
help='Select periods you want retrieve documents from',
required=True)
type = fields.Selection([
('customer', 'Customer Invoices'),
('supplier', 'Supplier Invoices'),
('corrispettivi', 'Corrispettivi'),
], 'Layout', required=True,
default='customer')
tax_registry_id = fields.Many2one('account.tax.registry', 'VAT registry')
journal_ids = fields.Many2many(
'account.journal',
'registro_iva_journals_rel',
'journal_id',
'registro_id',
string='Journals',
help='Select journals you want retrieve documents from',
required=True)
tax_sign = fields.Float(
string='Tax amount sign',
default=1.0,
help="Use -1 you have negative tax \
amounts and you want to print them prositive")
message = fields.Char(string='Message', size=64, readonly=True)
only_totals = fields.Boolean(
string='Prints only totals')
fiscal_page_base = fields.Integer('Last printed page', required=True)
@api.onchange('tax_registry_id')
def on_change_vat_registry(self):
self.journal_ids = self.tax_registry_id.journal_ids
self.type = self.tax_registry_id.type
if self.type:
if self.type == 'supplier':
self.tax_sign = -1
else:
self.tax_sign = 1
def print_registro(self, cr, uid, ids, context=None):
wizard = self.browse(cr, uid, ids[0], context=context)
move_obj = self.pool['account.move']
move_ids = move_obj.search(cr, uid, [
('journal_id', 'in', [j.id for j in wizard.journal_ids]),
('period_id', 'in', [p.id for p in wizard.period_ids]),
('state', '=', 'posted'),
], order='date, name')
if not move_ids:
raise UserError(_('No documents found in the current selection'))
datas = {}
datas_form = {}
datas_form['period_ids'] = [p.id for p in wizard.period_ids]
datas_form['journal_ids'] = [j.id for j in wizard.journal_ids]
datas_form['tax_sign'] = wizard.tax_sign
datas_form['fiscal_page_base'] = wizard.fiscal_page_base
datas_form['registry_type'] = wizard.type
if wizard.tax_registry_id:
datas_form['tax_registry_name'] = wizard.tax_registry_id.name
else:
datas_form['tax_registry_name'] = ''
datas_form['only_totals'] = wizard.only_totals
report_name = 'l10n_it_vat_registries.report_registro_iva'
datas = {
'ids': move_ids,
'model': 'account.move',
'form': datas_form
}
return self.pool['report'].get_action(
cr, uid, [], report_name, data=datas, context=context)
@api.onchange('type')
def on_type_changed(self):
if self.type:
if self.type == 'supplier':
self.tax_sign = -1
else:
self.tax_sign = 1
| agpl-3.0 |
benvermaercke/pyqtgraph | pyqtgraph/widgets/LayoutWidget.py | 49 | 3425 | from ..Qt import QtGui, QtCore
__all__ = ['LayoutWidget']
class LayoutWidget(QtGui.QWidget):
"""
Convenience class used for laying out QWidgets in a grid.
(It's just a little less effort to use than QGridLayout)
"""
def __init__(self, parent=None):
QtGui.QWidget.__init__(self, parent)
self.layout = QtGui.QGridLayout()
self.setLayout(self.layout)
self.items = {}
self.rows = {}
self.currentRow = 0
self.currentCol = 0
def nextRow(self):
"""Advance to next row for automatic widget placement"""
self.currentRow += 1
self.currentCol = 0
def nextColumn(self, colspan=1):
"""Advance to next column, while returning the current column number
(generally only for internal use--called by addWidget)"""
self.currentCol += colspan
return self.currentCol-colspan
def nextCol(self, *args, **kargs):
"""Alias of nextColumn"""
return self.nextColumn(*args, **kargs)
def addLabel(self, text=' ', row=None, col=None, rowspan=1, colspan=1, **kargs):
"""
Create a QLabel with *text* and place it in the next available cell (or in the cell specified)
All extra keyword arguments are passed to QLabel().
Returns the created widget.
"""
text = QtGui.QLabel(text, **kargs)
self.addItem(text, row, col, rowspan, colspan)
return text
def addLayout(self, row=None, col=None, rowspan=1, colspan=1, **kargs):
"""
Create an empty LayoutWidget and place it in the next available cell (or in the cell specified)
All extra keyword arguments are passed to :func:`LayoutWidget.__init__ <pyqtgraph.LayoutWidget.__init__>`
Returns the created widget.
"""
layout = LayoutWidget(**kargs)
self.addItem(layout, row, col, rowspan, colspan)
return layout
def addWidget(self, item, row=None, col=None, rowspan=1, colspan=1):
"""
Add a widget to the layout and place it in the next available cell (or in the cell specified).
"""
if row == 'next':
self.nextRow()
row = self.currentRow
elif row is None:
row = self.currentRow
if col is None:
col = self.nextCol(colspan)
if row not in self.rows:
self.rows[row] = {}
self.rows[row][col] = item
self.items[item] = (row, col)
self.layout.addWidget(item, row, col, rowspan, colspan)
def getWidget(self, row, col):
"""Return the widget in (*row*, *col*)"""
return self.row[row][col]
#def itemIndex(self, item):
#for i in range(self.layout.count()):
#if self.layout.itemAt(i).graphicsItem() is item:
#return i
#raise Exception("Could not determine index of item " + str(item))
#def removeItem(self, item):
#"""Remove *item* from the layout."""
#ind = self.itemIndex(item)
#self.layout.removeAt(ind)
#self.scene().removeItem(item)
#r,c = self.items[item]
#del self.items[item]
#del self.rows[r][c]
#self.update()
#def clear(self):
#items = []
#for i in list(self.items.keys()):
#self.removeItem(i)
| mit |
tinchoss/Python_Android | python/src/Lib/ctypes/test/test_as_parameter.py | 66 | 6600 | import unittest
from ctypes import *
import _ctypes_test
dll = CDLL(_ctypes_test.__file__)
try:
CALLBACK_FUNCTYPE = WINFUNCTYPE
except NameError:
# fake to enable this test on Linux
CALLBACK_FUNCTYPE = CFUNCTYPE
class POINT(Structure):
_fields_ = [("x", c_int), ("y", c_int)]
class BasicWrapTestCase(unittest.TestCase):
def wrap(self, param):
return param
def test_wchar_parm(self):
try:
c_wchar
except NameError:
return
f = dll._testfunc_i_bhilfd
f.argtypes = [c_byte, c_wchar, c_int, c_long, c_float, c_double]
result = f(self.wrap(1), self.wrap(u"x"), self.wrap(3), self.wrap(4), self.wrap(5.0), self.wrap(6.0))
self.failUnlessEqual(result, 139)
self.failUnless(type(result), int)
def test_pointers(self):
f = dll._testfunc_p_p
f.restype = POINTER(c_int)
f.argtypes = [POINTER(c_int)]
# This only works if the value c_int(42) passed to the
# function is still alive while the pointer (the result) is
# used.
v = c_int(42)
self.failUnlessEqual(pointer(v).contents.value, 42)
result = f(self.wrap(pointer(v)))
self.failUnlessEqual(type(result), POINTER(c_int))
self.failUnlessEqual(result.contents.value, 42)
# This on works...
result = f(self.wrap(pointer(v)))
self.failUnlessEqual(result.contents.value, v.value)
p = pointer(c_int(99))
result = f(self.wrap(p))
self.failUnlessEqual(result.contents.value, 99)
def test_shorts(self):
f = dll._testfunc_callback_i_if
args = []
expected = [262144, 131072, 65536, 32768, 16384, 8192, 4096, 2048,
1024, 512, 256, 128, 64, 32, 16, 8, 4, 2, 1]
def callback(v):
args.append(v)
return v
CallBack = CFUNCTYPE(c_int, c_int)
cb = CallBack(callback)
f(self.wrap(2**18), self.wrap(cb))
self.failUnlessEqual(args, expected)
################################################################
def test_callbacks(self):
f = dll._testfunc_callback_i_if
f.restype = c_int
MyCallback = CFUNCTYPE(c_int, c_int)
def callback(value):
#print "called back with", value
return value
cb = MyCallback(callback)
result = f(self.wrap(-10), self.wrap(cb))
self.failUnlessEqual(result, -18)
# test with prototype
f.argtypes = [c_int, MyCallback]
cb = MyCallback(callback)
result = f(self.wrap(-10), self.wrap(cb))
self.failUnlessEqual(result, -18)
result = f(self.wrap(-10), self.wrap(cb))
self.failUnlessEqual(result, -18)
AnotherCallback = CALLBACK_FUNCTYPE(c_int, c_int, c_int, c_int, c_int)
# check that the prototype works: we call f with wrong
# argument types
cb = AnotherCallback(callback)
self.assertRaises(ArgumentError, f, self.wrap(-10), self.wrap(cb))
def test_callbacks_2(self):
# Can also use simple datatypes as argument type specifiers
# for the callback function.
# In this case the call receives an instance of that type
f = dll._testfunc_callback_i_if
f.restype = c_int
MyCallback = CFUNCTYPE(c_int, c_int)
f.argtypes = [c_int, MyCallback]
def callback(value):
#print "called back with", value
self.failUnlessEqual(type(value), int)
return value
cb = MyCallback(callback)
result = f(self.wrap(-10), self.wrap(cb))
self.failUnlessEqual(result, -18)
def test_longlong_callbacks(self):
f = dll._testfunc_callback_q_qf
f.restype = c_longlong
MyCallback = CFUNCTYPE(c_longlong, c_longlong)
f.argtypes = [c_longlong, MyCallback]
def callback(value):
self.failUnless(isinstance(value, (int, long)))
return value & 0x7FFFFFFF
cb = MyCallback(callback)
self.failUnlessEqual(13577625587, int(f(self.wrap(1000000000000), self.wrap(cb))))
def test_byval(self):
# without prototype
ptin = POINT(1, 2)
ptout = POINT()
# EXPORT int _testfunc_byval(point in, point *pout)
result = dll._testfunc_byval(ptin, byref(ptout))
got = result, ptout.x, ptout.y
expected = 3, 1, 2
self.failUnlessEqual(got, expected)
# with prototype
ptin = POINT(101, 102)
ptout = POINT()
dll._testfunc_byval.argtypes = (POINT, POINTER(POINT))
dll._testfunc_byval.restype = c_int
result = dll._testfunc_byval(self.wrap(ptin), byref(ptout))
got = result, ptout.x, ptout.y
expected = 203, 101, 102
self.failUnlessEqual(got, expected)
def test_struct_return_2H(self):
class S2H(Structure):
_fields_ = [("x", c_short),
("y", c_short)]
dll.ret_2h_func.restype = S2H
dll.ret_2h_func.argtypes = [S2H]
inp = S2H(99, 88)
s2h = dll.ret_2h_func(self.wrap(inp))
self.failUnlessEqual((s2h.x, s2h.y), (99*2, 88*3))
def test_struct_return_8H(self):
class S8I(Structure):
_fields_ = [("a", c_int),
("b", c_int),
("c", c_int),
("d", c_int),
("e", c_int),
("f", c_int),
("g", c_int),
("h", c_int)]
dll.ret_8i_func.restype = S8I
dll.ret_8i_func.argtypes = [S8I]
inp = S8I(9, 8, 7, 6, 5, 4, 3, 2)
s8i = dll.ret_8i_func(self.wrap(inp))
self.failUnlessEqual((s8i.a, s8i.b, s8i.c, s8i.d, s8i.e, s8i.f, s8i.g, s8i.h),
(9*2, 8*3, 7*4, 6*5, 5*6, 4*7, 3*8, 2*9))
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class AsParamWrapper(object):
def __init__(self, param):
self._as_parameter_ = param
class AsParamWrapperTestCase(BasicWrapTestCase):
wrap = AsParamWrapper
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class AsParamPropertyWrapper(object):
def __init__(self, param):
self._param = param
def getParameter(self):
return self._param
_as_parameter_ = property(getParameter)
class AsParamPropertyWrapperTestCase(BasicWrapTestCase):
wrap = AsParamPropertyWrapper
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
chand3040/sree_odoo | openerp/addons/mass_mailing/wizard/test_mailing.py | 148 | 1994 | # -*- coding: utf-8 -*-
from openerp import tools
from openerp.osv import osv, fields
class TestMassMailing(osv.TransientModel):
_name = 'mail.mass_mailing.test'
_description = 'Sample Mail Wizard'
_columns = {
'email_to': fields.char('Recipients', required=True,
help='Comma-separated list of email addresses.'),
'mass_mailing_id': fields.many2one('mail.mass_mailing', 'Mailing', required=True),
}
_defaults = {
'email_to': lambda self, cr, uid, ctx=None: self.pool['mail.message']._get_default_from(cr, uid, context=ctx),
}
def send_mail_test(self, cr, uid, ids, context=None):
Mail = self.pool['mail.mail']
for wizard in self.browse(cr, uid, ids, context=context):
mailing = wizard.mass_mailing_id
test_emails = tools.email_split(wizard.email_to)
mail_ids = []
for test_mail in test_emails:
mail_values = {
'email_from': mailing.email_from,
'reply_to': mailing.reply_to,
'email_to': test_mail,
'subject': mailing.name,
'body_html': '',
'notification': True,
'mailing_id': mailing.id,
}
mail_mail_obj = Mail.browse(cr, uid, Mail.create(cr, uid, mail_values, context=context), context=context)
unsubscribe_url = Mail._get_unsubscribe_url(cr, uid, mail_mail_obj, test_mail, context=context)
body = tools.append_content_to_html(mailing.body_html, unsubscribe_url, plaintext=False, container_tag='p')
Mail.write(cr, uid, mail_mail_obj.id, {'body_html': mailing.body_html}, context=context)
mail_ids.append(mail_mail_obj.id)
Mail.send(cr, uid, mail_ids, context=context)
self.pool['mail.mass_mailing'].write(cr, uid, [mailing.id], {'state': 'test'}, context=context)
return True
| agpl-3.0 |
camptocamp/odoo | addons/l10n_be/wizard/l10n_be_partner_vat_listing.py | 34 | 14765 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# Corrections & modifications by Noviat nv/sa, (http://www.noviat.be):
# - VAT listing based upon year in stead of fiscal year
# - sql query adapted to select only 'tax-out' move lines
# - extra button to print readable PDF report
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
import base64
from openerp.tools.translate import _
from openerp.osv import fields, osv
from openerp.report import report_sxw
class vat_listing_clients(osv.osv_memory):
_name = 'vat.listing.clients'
_columns = {
'name': fields.char('Client Name', size=32),
'vat': fields.char('VAT', size=64),
'turnover': fields.float('Base Amount'),
'vat_amount': fields.float('VAT Amount'),
}
class partner_vat(osv.osv_memory):
""" Vat Listing """
_name = "partner.vat"
def get_partner(self, cr, uid, ids, context=None):
obj_period = self.pool.get('account.period')
obj_partner = self.pool.get('res.partner')
obj_vat_lclient = self.pool.get('vat.listing.clients')
obj_model_data = self.pool.get('ir.model.data')
obj_module = self.pool.get('ir.module.module')
data = self.read(cr, uid, ids)[0]
year = data['year']
date_start = year + '-01-01'
date_stop = year + '-12-31'
if context.get('company_id', False):
company_id = context['company_id']
else:
company_id = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.id
period_ids = obj_period.search(cr, uid, [('date_start' ,'>=', date_start), ('date_stop','<=',date_stop), ('company_id','=',company_id)])
if not period_ids:
raise osv.except_osv(_('Insufficient Data!'), _('No data for the selected year.'))
partners = []
partner_ids = obj_partner.search(cr, uid, [('vat_subjected', '!=', False), ('vat','ilike','BE%')], context=context)
if not partner_ids:
raise osv.except_osv(_('Error'),_('No belgium contact with a VAT number in your database.'))
cr.execute("""SELECT sub1.partner_id, sub1.name, sub1.vat, sub1.turnover, sub2.vat_amount
FROM (SELECT l.partner_id, p.name, p.vat, SUM(CASE WHEN c.code ='49' THEN -l.tax_amount ELSE l.tax_amount END) as turnover
FROM account_move_line l
LEFT JOIN res_partner p ON l.partner_id = p.id
LEFT JOIN account_tax_code c ON l.tax_code_id = c.id
WHERE c.code IN ('00','01','02','03','45','49')
AND l.partner_id IN %s
AND l.period_id IN %s
GROUP BY l.partner_id, p.name, p.vat) AS sub1
LEFT JOIN (SELECT l2.partner_id, SUM(CASE WHEN c2.code ='64' THEN -l2.tax_amount ELSE l2.tax_amount END) as vat_amount
FROM account_move_line l2
LEFT JOIN account_tax_code c2 ON l2.tax_code_id = c2.id
WHERE c2.code IN ('54','64')
AND l2.partner_id IN %s
AND l2.period_id IN %s
GROUP BY l2.partner_id) AS sub2 ON sub1.partner_id = sub2.partner_id
""",(tuple(partner_ids),tuple(period_ids),tuple(partner_ids),tuple(period_ids)))
for record in cr.dictfetchall():
record['vat'] = record['vat'].replace(' ','').upper()
if record['turnover'] >= data['limit_amount']:
id_client = obj_vat_lclient.create(cr, uid, record, context=context)
partners.append(id_client)
if not partners:
raise osv.except_osv(_('Insufficient Data!'), _('No data found for the selected year.'))
context.update({'partner_ids': partners, 'year': data['year'], 'limit_amount': data['limit_amount']})
model_data_ids = obj_model_data.search(cr, uid, [('model','=','ir.ui.view'), ('name','=','view_vat_listing')])
resource_id = obj_model_data.read(cr, uid, model_data_ids, fields=['res_id'])[0]['res_id']
return {
'name': _('Vat Listing'),
'view_type': 'form',
'view_mode': 'form',
'res_model': 'partner.vat.list',
'views': [(resource_id,'form')],
'context': context,
'type': 'ir.actions.act_window',
'target': 'new',
}
_columns = {
'year': fields.char('Year', size=4, required=True),
'limit_amount': fields.integer('Limit Amount', required=True),
}
_defaults={
'year': lambda *a: str(int(time.strftime('%Y'))-1),
'limit_amount': 250,
}
class partner_vat_list(osv.osv_memory):
""" Partner Vat Listing """
_name = "partner.vat.list"
_columns = {
'partner_ids': fields.many2many('vat.listing.clients', 'vat_partner_rel', 'vat_id', 'partner_id', 'Clients', help='You can remove clients/partners which you do not want to show in xml file'),
'name': fields.char('File Name', size=32),
'file_save' : fields.binary('Save File', readonly=True),
'comments': fields.text('Comments'),
}
def _get_partners(self, cr, uid, context=None):
return context.get('partner_ids', [])
_defaults={
'partner_ids': _get_partners,
}
def _get_datas(self, cr, uid, ids, context=None):
obj_vat_lclient = self.pool.get('vat.listing.clients')
datas = []
data = self.read(cr, uid, ids)[0]
for partner in data['partner_ids']:
if isinstance(partner, list) and partner:
datas.append(partner[2])
else:
client_data = obj_vat_lclient.read(cr, uid, partner, context=context)
datas.append(client_data)
client_datas = []
seq = 0
sum_tax = 0.00
sum_turnover = 0.00
amount_data = {}
for line in datas:
if not line:
continue
seq += 1
sum_tax += line['vat_amount']
sum_turnover += line['turnover']
vat = line['vat'].replace(' ','').upper()
amount_data ={
'seq': str(seq),
'vat': vat,
'only_vat': vat[2:],
'turnover': '%.2f' %line['turnover'],
'vat_amount': '%.2f' %line['vat_amount'],
'sum_tax': '%.2f' %sum_tax,
'sum_turnover': '%.2f' %sum_turnover,
'partner_name': line['name'],
}
client_datas += [amount_data]
return client_datas
def create_xml(self, cr, uid, ids, context=None):
obj_sequence = self.pool.get('ir.sequence')
obj_users = self.pool.get('res.users')
obj_partner = self.pool.get('res.partner')
obj_model_data = self.pool.get('ir.model.data')
seq_declarantnum = obj_sequence.get(cr, uid, 'declarantnum')
obj_cmpny = obj_users.browse(cr, uid, uid, context=context).company_id
company_vat = obj_cmpny.partner_id.vat
if not company_vat:
raise osv.except_osv(_('Insufficient Data!'),_('No VAT number associated with the company.'))
company_vat = company_vat.replace(' ','').upper()
SenderId = company_vat[2:]
issued_by = company_vat[:2]
seq_declarantnum = obj_sequence.get(cr, uid, 'declarantnum')
dnum = company_vat[2:] + seq_declarantnum[-4:]
street = city = country = ''
addr = obj_partner.address_get(cr, uid, [obj_cmpny.partner_id.id], ['invoice'])
if addr.get('invoice',False):
ads = obj_partner.browse(cr, uid, [addr['invoice']], context=context)[0]
phone = ads.phone and ads.phone.replace(' ','') or ''
email = ads.email or ''
name = ads.name or ''
city = ads.city or ''
zip = obj_partner.browse(cr, uid, ads.id, context=context).zip or ''
if not city:
city = ''
if ads.street:
street = ads.street
if ads.street2:
street += ' ' + ads.street2
if ads.country_id:
country = ads.country_id.code
data = self.read(cr, uid, ids)[0]
comp_name = obj_cmpny.name
if not email:
raise osv.except_osv(_('Insufficient Data!'),_('No email address associated with the company.'))
if not phone:
raise osv.except_osv(_('Insufficient Data!'),_('No phone associated with the company.'))
annual_listing_data = {
'issued_by': issued_by,
'company_vat': company_vat,
'comp_name': comp_name,
'street': street,
'zip': zip,
'city': city,
'country': country,
'email': email,
'phone': phone,
'SenderId': SenderId,
'period': context['year'],
'comments': data['comments'] or ''
}
data_file = """<?xml version="1.0" encoding="ISO-8859-1"?>
<ns2:ClientListingConsignment xmlns="http://www.minfin.fgov.be/InputCommon" xmlns:ns2="http://www.minfin.fgov.be/ClientListingConsignment" ClientListingsNbr="1">
<ns2:Representative>
<RepresentativeID identificationType="NVAT" issuedBy="%(issued_by)s">%(SenderId)s</RepresentativeID>
<Name>%(comp_name)s</Name>
<Street>%(street)s</Street>
<PostCode>%(zip)s</PostCode>
<City>%(city)s</City>"""
if annual_listing_data['country']:
data_file +="\n\t\t<CountryCode>%(country)s</CountryCode>"
data_file += """
<EmailAddress>%(email)s</EmailAddress>
<Phone>%(phone)s</Phone>
</ns2:Representative>"""
data_file = data_file % annual_listing_data
data_comp = """
<ns2:Declarant>
<VATNumber>%(SenderId)s</VATNumber>
<Name>%(comp_name)s</Name>
<Street>%(street)s</Street>
<PostCode>%(zip)s</PostCode>
<City>%(city)s</City>
<CountryCode>%(country)s</CountryCode>
<EmailAddress>%(email)s</EmailAddress>
<Phone>%(phone)s</Phone>
</ns2:Declarant>
<ns2:Period>%(period)s</ns2:Period>
""" % annual_listing_data
# Turnover and Farmer tags are not included
client_datas = self._get_datas(cr, uid, ids, context=context)
if not client_datas:
raise osv.except_osv(_('Data Insufficient!'),_('No data available for the client.'))
data_client_info = ''
for amount_data in client_datas:
data_client_info += """
<ns2:Client SequenceNumber="%(seq)s">
<ns2:CompanyVATNumber issuedBy="BE">%(only_vat)s</ns2:CompanyVATNumber>
<ns2:TurnOver>%(turnover)s</ns2:TurnOver>
<ns2:VATAmount>%(vat_amount)s</ns2:VATAmount>
</ns2:Client>""" % amount_data
amount_data_begin = client_datas[-1]
amount_data_begin.update({'dnum':dnum})
data_begin = """
<ns2:ClientListing SequenceNumber="1" ClientsNbr="%(seq)s" DeclarantReference="%(dnum)s"
TurnOverSum="%(sum_turnover)s" VATAmountSum="%(sum_tax)s">
""" % amount_data_begin
data_end = """
<ns2:Comment>%(comments)s</ns2:Comment>
</ns2:ClientListing>
</ns2:ClientListingConsignment>
""" % annual_listing_data
data_file += data_begin + data_comp + data_client_info + data_end
file_save = base64.encodestring(data_file.encode('utf8'))
self.write(cr, uid, ids, {'file_save':file_save, 'name':'vat_list.xml'}, context=context)
model_data_ids = obj_model_data.search(cr, uid, [('model','=','ir.ui.view'), ('name','=','view_vat_listing_result')])
resource_id = obj_model_data.read(cr, uid, model_data_ids, fields=['res_id'])[0]['res_id']
return {
'name': _('XML File has been Created'),
'res_id': ids[0],
'view_type': 'form',
'view_mode': 'form',
'res_model': 'partner.vat.list',
'views': [(resource_id,'form')],
'context': context,
'type': 'ir.actions.act_window',
'target': 'new',
}
def print_vatlist(self, cr, uid, ids, context=None):
if context is None:
context = {}
datas = {'ids': []}
datas['model'] = 'res.company'
datas['year'] = context['year']
datas['limit_amount'] = context['limit_amount']
datas['client_datas'] = self._get_datas(cr, uid, ids, context=context)
if not datas['client_datas']:
raise osv.except_osv(_('Error!'), _('No record to print.'))
return self.pool['report'].get_action(
cr, uid, [], 'l10n_be.report_l10nvatpartnerlisting', data=datas, context=context
)
class partner_vat_listing_print(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(partner_vat_listing_print, self).__init__(cr, uid, name, context=context)
self.localcontext.update( {
'time': time,
})
def set_context(self, objects, data, ids, report_type=None):
client_datas = data['client_datas']
self.localcontext.update( {
'year': data['year'],
'sum_turnover': client_datas[-1]['sum_turnover'],
'sum_tax': client_datas[-1]['sum_tax'],
'client_list': client_datas,
})
super(partner_vat_listing_print, self).set_context(objects, data, ids)
class wrapped_vat_listing_print(osv.AbstractModel):
_name = 'report.l10n_be.report_l10nvatpartnerlisting'
_inherit = 'report.abstract_report'
_template = 'l10n_be.report_l10nvatpartnerlisting'
_wrapped_report_class = partner_vat_listing_print
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
ramirocf/gem5-pipe | src/arch/x86/isa/insts/simd128/floating_point/data_conversion/convert_floating_point_to_gpr_integer.py | 91 | 4079 | # Copyright (c) 2007 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
microcode = '''
def macroop CVTSS2SI_R_XMM {
cvtf2i ufp1, xmmlm, srcSize=4, destSize=dsz, ext = Scalar + "| 4"
mov2int reg, ufp1, size=dsz
};
def macroop CVTSS2SI_R_M {
ldfp ufp1, seg, sib, disp, dataSize=8
cvtf2i ufp1, ufp1, srcSize=4, destSize=dsz, ext = Scalar + "| 4"
mov2int reg, ufp1, size=dsz
};
def macroop CVTSS2SI_R_P {
rdip t7
ldfp ufp1, seg, riprel, disp, dataSize=8
cvtf2i ufp1, ufp1, srcSize=4, destSize=dsz, ext = Scalar + "| 4"
mov2int reg, ufp1, size=dsz
};
def macroop CVTSD2SI_R_XMM {
cvtf2i ufp1, xmmlm, srcSize=8, destSize=dsz, ext = Scalar + "| 4"
mov2int reg, ufp1, size=dsz
};
def macroop CVTSD2SI_R_M {
ldfp ufp1, seg, sib, disp, dataSize=8
cvtf2i ufp1, ufp1, srcSize=8, destSize=dsz, ext = Scalar + "| 4"
mov2int reg, ufp1, size=dsz
};
def macroop CVTSD2SI_R_P {
rdip t7
ldfp ufp1, seg, riprel, disp, dataSize=8
cvtf2i ufp1, ufp1, srcSize=8, destSize=dsz, ext = Scalar + "| 4"
mov2int reg, ufp1, size=dsz
};
def macroop CVTTSS2SI_R_XMM {
cvtf2i ufp1, xmmlm, srcSize=4, destSize=dsz, ext=Scalar
mov2int reg, ufp1, size=dsz
};
def macroop CVTTSS2SI_R_M {
ldfp ufp1, seg, sib, disp, dataSize=8
cvtf2i ufp1, ufp1, srcSize=4, destSize=dsz, ext=Scalar
mov2int reg, ufp1, size=dsz
};
def macroop CVTTSS2SI_R_P {
rdip t7
ldfp ufp1, seg, riprel, disp, dataSize=8
cvtf2i ufp1, ufp1, srcSize=4, destSize=dsz, ext=Scalar
mov2int reg, ufp1, size=dsz
};
def macroop CVTTSD2SI_R_XMM {
cvtf2i ufp1, xmmlm, srcSize=8, destSize=dsz, ext=Scalar
mov2int reg, ufp1, size=dsz
};
def macroop CVTTSD2SI_R_M {
ldfp ufp1, seg, sib, disp, dataSize=8
cvtf2i ufp1, ufp1, srcSize=8, destSize=dsz, ext=Scalar
mov2int reg, ufp1, size=dsz
};
def macroop CVTTSD2SI_R_P {
rdip t7
ldfp ufp1, seg, riprel, disp, dataSize=8
cvtf2i ufp1, ufp1, srcSize=8, destSize=dsz, ext=Scalar
mov2int reg, ufp1, size=dsz
};
'''
| bsd-3-clause |
rickmendes/ansible-modules-extras | windows/win_regedit.py | 44 | 4326 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Adam Keech <akeech@chathamfinancial.com>, Josh Ludwig <jludwig@chathamfinancial.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
DOCUMENTATION = '''
---
module: win_regedit
version_added: "2.0"
short_description: Add, Edit, or Remove Registry Keys and Values
description:
- Add, Edit, or Remove Registry Keys and Values using ItemProperties Cmdlets
options:
key:
description:
- Name of Registry Key
required: true
default: null
aliases: []
value:
description:
- Name of Registry Value
required: true
default: null
aliases: []
data:
description:
- Registry Value Data. Binary data should be expressed a yaml byte array or as comma separated hex values. An easy way to generate this is to run C(regedit.exe) and use the I(Export) option to save the registry values to a file. In the exported file binary values will look like C(hex:be,ef,be,ef). The C(hex:) prefix is optional.
required: false
default: null
aliases: []
datatype:
description:
- Registry Value Data Type
required: false
choices:
- binary
- dword
- expandstring
- multistring
- string
- qword
default: string
aliases: []
state:
description:
- State of Registry Value
required: false
choices:
- present
- absent
default: present
aliases: []
author: "Adam Keech (@smadam813), Josh Ludwig (@joshludwig)"
'''
EXAMPLES = '''
# Creates Registry Key called MyCompany.
win_regedit:
key: HKCU:\Software\MyCompany
# Creates Registry Key called MyCompany,
# a value within MyCompany Key called "hello", and
# data for the value "hello" containing "world".
win_regedit:
key: HKCU:\Software\MyCompany
value: hello
data: world
# Creates Registry Key called MyCompany,
# a value within MyCompany Key called "hello", and
# data for the value "hello" containing "1337" as type "dword".
win_regedit:
key: HKCU:\Software\MyCompany
value: hello
data: 1337
datatype: dword
# Creates Registry Key called MyCompany,
# a value within MyCompany Key called "hello", and
# binary data for the value "hello" as type "binary"
# data expressed as comma separated list
win_regedit:
key: HKCU:\Software\MyCompany
value: hello
data: hex:be,ef,be,ef,be,ef,be,ef,be,ef
datatype: binary
# Creates Registry Key called MyCompany,
# a value within MyCompany Key called "hello", and
# binary data for the value "hello" as type "binary"
# data expressed as yaml array of bytes
win_regedit:
key: HKCU:\Software\MyCompany
value: hello
data: [0xbe,0xef,0xbe,0xef,0xbe,0xef,0xbe,0xef,0xbe,0xef]
datatype: binary
# Delete Registry Key MyCompany
# NOTE: Not specifying a value will delete the root key which means
# all values will be deleted
win_regedit:
key: HKCU:\Software\MyCompany
state: absent
# Delete Registry Value "hello" from MyCompany Key
win_regedit:
key: HKCU:\Software\MyCompany
value: hello
state: absent
# Ensure registry paths containing spaces are quoted.
# Creates Registry Key called 'My Company'.
win_regedit:
key: 'HKCU:\Software\My Company'
'''
RETURN = '''
data_changed:
description: whether this invocation changed the data in the registry value
returned: success
type: boolean
sample: False
data_type_changed:
description: whether this invocation changed the datatype of the registry value
returned: success
type: boolean
sample: True
'''
| gpl-3.0 |
photoninger/ansible | lib/ansible/modules/network/f5/bigip_vcmp_guest.py | 23 | 23915 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: bigip_vcmp_guest
short_description: Manages vCMP guests on a BIG-IP
description:
- Manages vCMP guests on a BIG-IP. This functionality only exists on
actual hardware and must be enabled by provisioning C(vcmp) with the
C(bigip_provision) module.
version_added: "2.5"
options:
name:
description:
- The name of the vCMP guest to manage.
required: True
vlans:
description:
- VLANs that the guest uses to communicate with other guests, the host, and with
the external network. The available VLANs in the list are those that are
currently configured on the vCMP host.
- The order of these VLANs is not important; in fact, it's ignored. This module will
order the VLANs for you automatically. Therefore, if you deliberately re-order them
in subsequent tasks, you will find that this module will B(not) register a change.
initial_image:
description:
- Specifies the base software release ISO image file for installing the TMOS
hypervisor instance and any licensed BIG-IP modules onto the guest's virtual
disk. When creating a new guest, this parameter is required.
mgmt_network:
description:
- Specifies the method by which the management address is used in the vCMP guest.
- When C(bridged), specifies that the guest can communicate with the vCMP host's
management network.
- When C(isolated), specifies that the guest is isolated from the vCMP host's
management network. In this case, the only way that a guest can communicate
with the vCMP host is through the console port or through a self IP address
on the guest that allows traffic through port 22.
- When C(host only), prevents the guest from installing images and hotfixes other
than those provided by the hypervisor.
- If the guest setting is C(isolated) or C(host only), the C(mgmt_address) does
not apply.
- Concerning mode changing, changing C(bridged) to C(isolated) causes the vCMP
host to remove all of the guest's management interfaces from its bridged
management network. This immediately disconnects the guest's VMs from the
physical management network. Changing C(isolated) to C(bridged) causes the
vCMP host to dynamically add the guest's management interfaces to the bridged
management network. This immediately connects all of the guest's VMs to the
physical management network. Changing this property while the guest is in the
C(configured) or C(provisioned) state has no immediate effect.
choices:
- bridged
- isolated
- host only
delete_virtual_disk:
description:
- When C(state) is C(absent), will additionally delete the virtual disk associated
with the vCMP guest. By default, this value is C(no).
default: no
mgmt_address:
description:
- Specifies the IP address, and subnet or subnet mask that you use to access
the guest when you want to manage a module running within the guest. This
parameter is required if the C(mgmt_network) parameter is C(bridged).
- When creating a new guest, if you do not specify a network or network mask,
a default of C(/24) (C(255.255.255.0)) will be assumed.
mgmt_route:
description:
- Specifies the gateway address for the C(mgmt_address).
- If this value is not specified when creating a new guest, it is set to C(none).
- The value C(none) can be used during an update to remove this value.
state:
description:
- The state of the vCMP guest on the system. Each state implies the actions of
all states before it.
- When C(configured), guarantees that the vCMP guest exists with the provided
attributes. Additionally, ensures that the vCMP guest is turned off.
- When C(disabled), behaves the same as C(configured) the name of this state
is just a convenience for the user that is more understandable.
- When C(provisioned), will ensure that the guest is created and installed.
This state will not start the guest; use C(deployed) for that. This state
is one step beyond C(present) as C(present) will not install the guest;
only setup the configuration for it to be installed.
- When C(present), ensures the guest is properly provisioned and starts
the guest so that it is in a running state.
- When C(absent), removes the vCMP from the system.
default: "present"
choices:
- configured
- disabled
- provisioned
- present
- absent
cores_per_slot:
description:
- Specifies the number of cores that the system allocates to the guest.
- Each core represents a portion of CPU and memory. Therefore, the amount of
memory allocated per core is directly tied to the amount of CPU. This amount
of memory varies per hardware platform type.
- The number you can specify depends on the type of hardware you have.
- In the event of a reboot, the system persists the guest to the same slot on
which it ran prior to the reboot.
partition:
description:
- Device partition to manage resources on.
default: Common
notes:
- This module can take a lot of time to deploy vCMP guests. This is an intrinsic
limitation of the vCMP system because it is booting real VMs on the BIG-IP
device. This boot time is very similar in length to the time it takes to
boot VMs on any other virtualization platform; public or private.
- When BIG-IP starts, the VMs are booted sequentially; not in parallel. This
means that it is not unusual for a vCMP host with many guests to take a
long time (60+ minutes) to reboot and bring all the guests online. The
BIG-IP chassis will be available before all vCMP guests are online.
- netaddr
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = r'''
- name: Create a vCMP guest
bigip_vcmp_guest:
name: foo
password: secret
server: lb.mydomain.com
state: present
user: admin
mgmt_network: bridge
mgmt_address: 10.20.30.40/24
delegate_to: localhost
- name: Create a vCMP guest with specific VLANs
bigip_vcmp_guest:
name: foo
password: secret
server: lb.mydomain.com
state: present
user: admin
mgmt_network: bridge
mgmt_address: 10.20.30.40/24
vlans:
- vlan1
- vlan2
delegate_to: localhost
- name: Remove vCMP guest and disk
bigip_vcmp_guest:
name: guest1
state: absent
delete_virtual_disk: yes
register: result
'''
RETURN = r'''
vlans:
description: The VLANs assigned to the vCMP guest, in their full path format.
returned: changed
type: list
sample: ['/Common/vlan1', '/Common/vlan2']
'''
import time
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import env_fallback
from collections import namedtuple
HAS_DEVEL_IMPORTS = False
try:
# Sideband repository used for dev
from library.module_utils.network.f5.bigip import HAS_F5SDK
from library.module_utils.network.f5.bigip import F5Client
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import cleanup_tokens
from library.module_utils.network.f5.common import fqdn_name
from library.module_utils.network.f5.common import f5_argument_spec
try:
from library.module_utils.network.f5.common import iControlUnexpectedHTTPError
except ImportError:
HAS_F5SDK = False
HAS_DEVEL_IMPORTS = True
except ImportError:
# Upstream Ansible
from ansible.module_utils.network.f5.bigip import HAS_F5SDK
from ansible.module_utils.network.f5.bigip import F5Client
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import cleanup_tokens
from ansible.module_utils.network.f5.common import fqdn_name
from ansible.module_utils.network.f5.common import f5_argument_spec
try:
from ansible.module_utils.network.f5.common import iControlUnexpectedHTTPError
except ImportError:
HAS_F5SDK = False
try:
from netaddr import IPAddress, AddrFormatError, IPNetwork
HAS_NETADDR = True
except ImportError:
HAS_NETADDR = False
try:
from f5.utils.responses.handlers import Stats
except ImportError:
HAS_F5SDK = False
class Parameters(AnsibleF5Parameters):
api_map = {
'managementGw': 'mgmt_route',
'managementNetwork': 'mgmt_network',
'managementIp': 'mgmt_address',
'initialImage': 'initial_image',
'virtualDisk': 'virtual_disk',
'coresPerSlot': 'cores_per_slot'
}
api_attributes = [
'vlans', 'managementNetwork', 'managementIp', 'initialImage', 'managementGw',
'state'
]
returnables = [
'vlans', 'mgmt_network', 'mgmt_address', 'initial_image', 'mgmt_route',
'name'
]
updatables = [
'vlans', 'mgmt_network', 'mgmt_address', 'initial_image', 'mgmt_route',
'state'
]
def _fqdn_name(self, value):
if value is not None and not value.startswith('/'):
return '/{0}/{1}'.format(self.partition, value)
return value
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
@property
def mgmt_route(self):
if self._values['mgmt_route'] is None:
return None
elif self._values['mgmt_route'] == 'none':
return 'none'
try:
result = IPAddress(self._values['mgmt_route'])
return str(result)
except AddrFormatError:
raise F5ModuleError(
"The specified 'mgmt_route' is not a valid IP address"
)
@property
def mgmt_address(self):
if self._values['mgmt_address'] is None:
return None
try:
addr = IPNetwork(self._values['mgmt_address'])
result = '{0}/{1}'.format(addr.ip, addr.prefixlen)
return result
except AddrFormatError:
raise F5ModuleError(
"The specified 'mgmt_address' is not a valid IP address"
)
@property
def mgmt_tuple(self):
result = None
Destination = namedtuple('Destination', ['ip', 'subnet'])
try:
parts = self._values['mgmt_address'].split('/')
if len(parts) == 2:
result = Destination(ip=parts[0], subnet=parts[1])
elif len(parts) < 2:
result = Destination(ip=parts[0], subnet=None)
else:
F5ModuleError(
"The provided mgmt_address is malformed."
)
except ValueError:
result = Destination(ip=None, subnet=None)
return result
@property
def state(self):
if self._values['state'] == 'present':
return 'deployed'
elif self._values['state'] in ['configured', 'disabled']:
return 'configured'
return self._values['state']
@property
def vlans(self):
if self._values['vlans'] is None:
return None
result = [self._fqdn_name(x) for x in self._values['vlans']]
result.sort()
return result
@property
def initial_image(self):
if self._values['initial_image'] is None:
return None
if self.initial_image_exists(self._values['initial_image']):
return self._values['initial_image']
raise F5ModuleError(
"The specified 'initial_image' does not exist on the remote device"
)
def initial_image_exists(self, image):
collection = self.client.api.tm.sys.software.images.get_collection()
for resource in collection:
if resource.name.startswith(image):
return True
return False
class Changes(Parameters):
pass
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
@property
def mgmt_address(self):
want = self.want.mgmt_tuple
if want.subnet is None:
raise F5ModuleError(
"A subnet must be specified when changing the mgmt_address"
)
if self.want.mgmt_address != self.have.mgmt_address:
return self.want.mgmt_address
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = kwargs.get('client', None)
self.want = Parameters(client=self.client, params=self.module.params)
self.changes = Changes()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = Changes(params=changed)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
changed[k] = change
if changed:
self.changes = Parameters(params=changed)
return True
return False
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def exec_module(self):
changed = False
result = dict()
state = self.want.state
try:
if state in ['configured', 'provisioned', 'deployed']:
changed = self.present()
elif state == "absent":
changed = self.absent()
except iControlUnexpectedHTTPError as e:
raise F5ModuleError(str(e))
changes = self.changes.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def _fqdn_name(self, value):
if value is not None and not value.startswith('/'):
return '/{0}/{1}'.format(self.partition, value)
return value
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def exists(self):
result = self.client.api.tm.vcmp.guests.guest.exists(
name=self.want.name
)
return result
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
if self.want.state == 'provisioned':
self.provision()
elif self.want.state == 'deployed':
self.deploy()
elif self.want.state == 'configured':
self.configure()
return True
def remove(self):
if self.module.check_mode:
return True
if self.want.delete_virtual_disk:
self.have = self.read_current_from_device()
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the resource.")
if self.want.delete_virtual_disk:
self.remove_virtual_disk()
return True
def create(self):
self._set_changed_options()
if self.module.check_mode:
return True
if self.want.mgmt_tuple.subnet is None:
self.want.update(dict(
mgmt_address='{0}/255.255.255.0'.format(self.want.mgmt_tuple.ip)
))
self.create_on_device()
if self.want.state == 'provisioned':
self.provision()
elif self.want.state == 'deployed':
self.deploy()
elif self.want.state == 'configured':
self.configure()
return True
def create_on_device(self):
params = self.want.api_params()
self.client.api.tm.vcmp.guests.guest.create(
name=self.want.name,
**params
)
def update_on_device(self):
params = self.changes.api_params()
resource = self.client.api.tm.vcmp.guests.guest.load(
name=self.want.name
)
resource.modify(**params)
def absent(self):
if self.exists():
return self.remove()
return False
def remove_from_device(self):
resource = self.client.api.tm.vcmp.guests.guest.load(
name=self.want.name
)
if resource:
resource.delete()
def read_current_from_device(self):
resource = self.client.api.tm.vcmp.guests.guest.load(
name=self.want.name
)
result = resource.attrs
return Parameters(params=result)
def remove_virtual_disk(self):
if self.virtual_disk_exists():
return self.remove_virtual_disk_from_device()
return False
def virtual_disk_exists(self):
collection = self.client.api.tm.vcmp.virtual_disks.get_collection()
for resource in collection:
check = '{0}/'.format(self.have.virtual_disk)
if resource.name.startswith(check):
return True
return False
def remove_virtual_disk_from_device(self):
collection = self.client.api.tm.vcmp.virtual_disks.get_collection()
for resource in collection:
check = '{0}/'.format(self.have.virtual_disk)
if resource.name.startswith(check):
resource.delete()
return True
return False
def is_configured(self):
"""Checks to see if guest is disabled
A disabled guest is fully disabled once their Stats go offline.
Until that point they are still in the process of disabling.
:return:
"""
try:
res = self.client.api.tm.vcmp.guests.guest.load(name=self.want.name)
Stats(res.stats.load())
return False
except iControlUnexpectedHTTPError as ex:
if 'Object not found - ' in str(ex):
return True
raise
def is_provisioned(self):
try:
res = self.client.api.tm.vcmp.guests.guest.load(name=self.want.name)
stats = Stats(res.stats.load())
if stats.stat['requestedState']['description'] == 'provisioned':
if stats.stat['vmStatus']['description'] == 'stopped':
return True
except iControlUnexpectedHTTPError:
pass
return False
def is_deployed(self):
try:
res = self.client.api.tm.vcmp.guests.guest.load(name=self.want.name)
stats = Stats(res.stats.load())
if stats.stat['requestedState']['description'] == 'deployed':
if stats.stat['vmStatus']['description'] == 'running':
return True
except iControlUnexpectedHTTPError:
pass
return False
def configure(self):
if self.is_configured():
return False
self.configure_on_device()
self.wait_for_configured()
return True
def configure_on_device(self):
resource = self.client.api.tm.vcmp.guests.guest.load(name=self.want.name)
resource.modify(state='configured')
def wait_for_configured(self):
nops = 0
while nops < 3:
if self.is_configured():
nops += 1
time.sleep(1)
def provision(self):
if self.is_provisioned():
return False
self.provision_on_device()
self.wait_for_provisioned()
def provision_on_device(self):
resource = self.client.api.tm.vcmp.guests.guest.load(name=self.want.name)
resource.modify(state='provisioned')
def wait_for_provisioned(self):
nops = 0
while nops < 3:
if self.is_provisioned():
nops += 1
time.sleep(1)
def deploy(self):
if self.is_deployed():
return False
self.deploy_on_device()
self.wait_for_deployed()
def deploy_on_device(self):
resource = self.client.api.tm.vcmp.guests.guest.load(name=self.want.name)
resource.modify(state='deployed')
def wait_for_deployed(self):
nops = 0
while nops < 3:
if self.is_deployed():
nops += 1
time.sleep(1)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
name=dict(required=True),
vlans=dict(type='list'),
mgmt_network=dict(choices=['bridged', 'isolated', 'host only']),
mgmt_address=dict(),
mgmt_route=dict(),
initial_image=dict(),
state=dict(
default='present',
choices=['configured', 'disabled', 'provisioned', 'absent', 'present']
),
delete_virtual_disk=dict(
type='bool', default='no'
),
cores_per_slot=dict(type='int'),
partition=dict(
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
self.required_if = [
['mgmt_network', 'bridged', ['mgmt_address']]
]
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode
)
if not HAS_F5SDK:
module.fail_json(msg="The python f5-sdk module is required")
if not HAS_NETADDR:
module.fail_json(msg="The python netaddr module is required")
try:
client = F5Client(**module.params)
mm = ModuleManager(module=module, client=client)
results = mm.exec_module()
cleanup_tokens(client)
module.exit_json(**results)
except F5ModuleError as ex:
cleanup_tokens(client)
module.fail_json(msg=str(ex))
if __name__ == '__main__':
main()
| gpl-3.0 |
varunr047/homefile | homeassistant/components/switch/enocean.py | 8 | 2530 | """
Support for EnOcean switches.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/switch.enocean/
"""
import logging
import voluptuous as vol
from homeassistant.components.switch import PLATFORM_SCHEMA
from homeassistant.const import (CONF_NAME, CONF_ID)
from homeassistant.components import enocean
from homeassistant.helpers.entity import ToggleEntity
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = 'EnOcean Switch'
DEPENDENCIES = ['enocean']
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_ID): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the EnOcean switch platform."""
dev_id = config.get(CONF_ID)
devname = config.get(CONF_NAME)
add_devices([EnOceanSwitch(dev_id, devname)])
class EnOceanSwitch(enocean.EnOceanDevice, ToggleEntity):
"""Representation of an EnOcean switch device."""
def __init__(self, dev_id, devname):
"""Initialize the EnOcean switch device."""
enocean.EnOceanDevice.__init__(self)
self.dev_id = dev_id
self._devname = devname
self._light = None
self._on_state = False
self._on_state2 = False
self.stype = "switch"
@property
def is_on(self):
"""Return whether the switch is on or off."""
return self._on_state
@property
def name(self):
"""Return the device name."""
return self._devname
def turn_on(self, **kwargs):
"""Turn on the switch."""
optional = [0x03, ]
optional.extend(self.dev_id)
optional.extend([0xff, 0x00])
self.send_command(data=[0xD2, 0x01, 0x00, 0x64, 0x00,
0x00, 0x00, 0x00, 0x00], optional=optional,
packet_type=0x01)
self._on_state = True
def turn_off(self, **kwargs):
"""Turn off the switch."""
optional = [0x03, ]
optional.extend(self.dev_id)
optional.extend([0xff, 0x00])
self.send_command(data=[0xD2, 0x01, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00], optional=optional,
packet_type=0x01)
self._on_state = False
def value_changed(self, val):
"""Update the internal state of the switch."""
self._on_state = val
self.update_ha_state()
| mit |
xpansa/project-service | sale_order_project/models/sale.py | 19 | 2400 | # -*- coding: utf-8 -*-
###############################################################################
#
# Module for OpenERP
# Copyright (C) 2014 Akretion (http://www.akretion.com).
# Copyright (C) 2010-2013 Akretion LDTA (<http://www.akretion.com>)
# @author Sébastien BEAU <sebastien.beau@akretion.com>
# @author Benoît GUILLOT <benoit.guillot@akretion.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from openerp import api, fields, models
from datetime import date
class SaleOrder(models.Model):
_inherit = "sale.order"
@api.one
@api.depends('project_id')
def _compute_related_project_id(self):
self.related_project_id = (
self.project_id.use_tasks and
self.env['project.project'].search(
[('analytic_account_id', '=', self.project_id.id)],
limit=1)[:1])
related_project_id = fields.Many2one(
comodel_name='project.project', string='Project',
compute='_compute_related_project_id')
@api.model
def _prepare_project_vals(self, order):
name = u" %s - %s - %s" % (
order.partner_id.name,
date.today().year,
order.name)
return {
'user_id': order.user_id.id,
'name': name,
'partner_id': order.partner_id.id,
}
@api.multi
def action_create_project(self):
project_obj = self.env['project.project']
for order in self:
vals = self._prepare_project_vals(order)
project = project_obj.create(vals)
order.write({
'project_id': project.analytic_account_id.id
})
return True
| agpl-3.0 |
p4datasystems/CarnotKE | jyhton/lib-python/2.7/email/mime/application.py | 414 | 1256 | # Copyright (C) 2001-2006 Python Software Foundation
# Author: Keith Dart
# Contact: email-sig@python.org
"""Class representing application/* type MIME documents."""
__all__ = ["MIMEApplication"]
from email import encoders
from email.mime.nonmultipart import MIMENonMultipart
class MIMEApplication(MIMENonMultipart):
"""Class for generating application/* MIME documents."""
def __init__(self, _data, _subtype='octet-stream',
_encoder=encoders.encode_base64, **_params):
"""Create an application/* type MIME document.
_data is a string containing the raw application data.
_subtype is the MIME content type subtype, defaulting to
'octet-stream'.
_encoder is a function which will perform the actual encoding for
transport of the application data, defaulting to base64 encoding.
Any additional keyword arguments are passed to the base class
constructor, which turns them into parameters on the Content-Type
header.
"""
if _subtype is None:
raise TypeError('Invalid application MIME subtype')
MIMENonMultipart.__init__(self, 'application', _subtype, **_params)
self.set_payload(_data)
_encoder(self)
| apache-2.0 |
MonicaHsu/truvaluation | venv/lib/python2.7/site-packages/werkzeug/debug/__init__.py | 310 | 7800 | # -*- coding: utf-8 -*-
"""
werkzeug.debug
~~~~~~~~~~~~~~
WSGI application traceback debugger.
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import json
import mimetypes
from os.path import join, dirname, basename, isfile
from werkzeug.wrappers import BaseRequest as Request, BaseResponse as Response
from werkzeug.debug.tbtools import get_current_traceback, render_console_html
from werkzeug.debug.console import Console
from werkzeug.security import gen_salt
#: import this here because it once was documented as being available
#: from this module. In case there are users left ...
from werkzeug.debug.repr import debug_repr
class _ConsoleFrame(object):
"""Helper class so that we can reuse the frame console code for the
standalone console.
"""
def __init__(self, namespace):
self.console = Console(namespace)
self.id = 0
class DebuggedApplication(object):
"""Enables debugging support for a given application::
from werkzeug.debug import DebuggedApplication
from myapp import app
app = DebuggedApplication(app, evalex=True)
The `evalex` keyword argument allows evaluating expressions in a
traceback's frame context.
.. versionadded:: 0.9
The `lodgeit_url` parameter was deprecated.
:param app: the WSGI application to run debugged.
:param evalex: enable exception evaluation feature (interactive
debugging). This requires a non-forking server.
:param request_key: The key that points to the request object in ths
environment. This parameter is ignored in current
versions.
:param console_path: the URL for a general purpose console.
:param console_init_func: the function that is executed before starting
the general purpose console. The return value
is used as initial namespace.
:param show_hidden_frames: by default hidden traceback frames are skipped.
You can show them by setting this parameter
to `True`.
"""
# this class is public
__module__ = 'werkzeug'
def __init__(self, app, evalex=False, request_key='werkzeug.request',
console_path='/console', console_init_func=None,
show_hidden_frames=False, lodgeit_url=None):
if lodgeit_url is not None:
from warnings import warn
warn(DeprecationWarning('Werkzeug now pastes into gists.'))
if not console_init_func:
console_init_func = dict
self.app = app
self.evalex = evalex
self.frames = {}
self.tracebacks = {}
self.request_key = request_key
self.console_path = console_path
self.console_init_func = console_init_func
self.show_hidden_frames = show_hidden_frames
self.secret = gen_salt(20)
def debug_application(self, environ, start_response):
"""Run the application and conserve the traceback frames."""
app_iter = None
try:
app_iter = self.app(environ, start_response)
for item in app_iter:
yield item
if hasattr(app_iter, 'close'):
app_iter.close()
except Exception:
if hasattr(app_iter, 'close'):
app_iter.close()
traceback = get_current_traceback(skip=1, show_hidden_frames=
self.show_hidden_frames,
ignore_system_exceptions=True)
for frame in traceback.frames:
self.frames[frame.id] = frame
self.tracebacks[traceback.id] = traceback
try:
start_response('500 INTERNAL SERVER ERROR', [
('Content-Type', 'text/html; charset=utf-8'),
# Disable Chrome's XSS protection, the debug
# output can cause false-positives.
('X-XSS-Protection', '0'),
])
except Exception:
# if we end up here there has been output but an error
# occurred. in that situation we can do nothing fancy any
# more, better log something into the error log and fall
# back gracefully.
environ['wsgi.errors'].write(
'Debugging middleware caught exception in streamed '
'response at a point where response headers were already '
'sent.\n')
else:
yield traceback.render_full(evalex=self.evalex,
secret=self.secret) \
.encode('utf-8', 'replace')
traceback.log(environ['wsgi.errors'])
def execute_command(self, request, command, frame):
"""Execute a command in a console."""
return Response(frame.console.eval(command), mimetype='text/html')
def display_console(self, request):
"""Display a standalone shell."""
if 0 not in self.frames:
self.frames[0] = _ConsoleFrame(self.console_init_func())
return Response(render_console_html(secret=self.secret),
mimetype='text/html')
def paste_traceback(self, request, traceback):
"""Paste the traceback and return a JSON response."""
rv = traceback.paste()
return Response(json.dumps(rv), mimetype='application/json')
def get_source(self, request, frame):
"""Render the source viewer."""
return Response(frame.render_source(), mimetype='text/html')
def get_resource(self, request, filename):
"""Return a static resource from the shared folder."""
filename = join(dirname(__file__), 'shared', basename(filename))
if isfile(filename):
mimetype = mimetypes.guess_type(filename)[0] \
or 'application/octet-stream'
f = open(filename, 'rb')
try:
return Response(f.read(), mimetype=mimetype)
finally:
f.close()
return Response('Not Found', status=404)
def __call__(self, environ, start_response):
"""Dispatch the requests."""
# important: don't ever access a function here that reads the incoming
# form data! Otherwise the application won't have access to that data
# any more!
request = Request(environ)
response = self.debug_application
if request.args.get('__debugger__') == 'yes':
cmd = request.args.get('cmd')
arg = request.args.get('f')
secret = request.args.get('s')
traceback = self.tracebacks.get(request.args.get('tb', type=int))
frame = self.frames.get(request.args.get('frm', type=int))
if cmd == 'resource' and arg:
response = self.get_resource(request, arg)
elif cmd == 'paste' and traceback is not None and \
secret == self.secret:
response = self.paste_traceback(request, traceback)
elif cmd == 'source' and frame and self.secret == secret:
response = self.get_source(request, frame)
elif self.evalex and cmd is not None and frame is not None and \
self.secret == secret:
response = self.execute_command(request, cmd, frame)
elif self.evalex and self.console_path is not None and \
request.path == self.console_path:
response = self.display_console(request)
return response(environ, start_response)
| mit |
Kiddinglife/geco-protocol-stack | thirdparty/googlemock/scripts/generator/cpp/gmock_class_test.py | 3 | 11804 | #!/usr/bin/env python
#
# Copyright 2009 Neal Norwitz All Rights Reserved.
# Portions Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for gmock.scripts.generator.cpp.gmock_class."""
__author__ = 'nnorwitz@google.com (Neal Norwitz)'
import os
import sys
import unittest
# Allow the cpp imports below to work when run as a standalone script.
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from cpp import ast
from cpp import gmock_class
class TestCase(unittest.TestCase):
"""Helper class that adds assert methods."""
def StripLeadingWhitespace(self, lines):
"""Strip leading whitespace in each line in 'lines'."""
return '\n'.join([s.lstrip() for s in lines.split('\n')])
def assertEqualIgnoreLeadingWhitespace(self, expected_lines, lines):
"""Specialized assert that ignores the indent level."""
self.assertEqual(expected_lines, self.StripLeadingWhitespace(lines))
class GenerateMethodsTest(TestCase):
def GenerateMethodSource(self, cpp_source):
"""Convert C++ source to Google Mock output source lines."""
method_source_lines = []
# <test> is a pseudo-filename, it is not read or written.
builder = ast.BuilderFromSource(cpp_source, '<test>')
ast_list = list(builder.Generate())
gmock_class._GenerateMethods(method_source_lines, cpp_source, ast_list[0])
return '\n'.join(method_source_lines)
def testSimpleMethod(self):
source = """
class Foo {
public:
virtual int Bar();
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD0(Bar,\nint());',
self.GenerateMethodSource(source))
def testSimpleConstructorsAndDestructor(self):
source = """
class Foo {
public:
Foo();
Foo(int x);
Foo(const Foo& f);
Foo(Foo&& f);
~Foo();
virtual int Bar() = 0;
};
"""
# The constructors and destructor should be ignored.
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD0(Bar,\nint());',
self.GenerateMethodSource(source))
def testVirtualDestructor(self):
source = """
class Foo {
public:
virtual ~Foo();
virtual int Bar() = 0;
};
"""
# The destructor should be ignored.
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD0(Bar,\nint());',
self.GenerateMethodSource(source))
def testExplicitlyDefaultedConstructorsAndDestructor(self):
source = """
class Foo {
public:
Foo() = default;
Foo(const Foo& f) = default;
Foo(Foo&& f) = default;
~Foo() = default;
virtual int Bar() = 0;
};
"""
# The constructors and destructor should be ignored.
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD0(Bar,\nint());',
self.GenerateMethodSource(source))
def testExplicitlyDeletedConstructorsAndDestructor(self):
source = """
class Foo {
public:
Foo() = delete;
Foo(const Foo& f) = delete;
Foo(Foo&& f) = delete;
~Foo() = delete;
virtual int Bar() = 0;
};
"""
# The constructors and destructor should be ignored.
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD0(Bar,\nint());',
self.GenerateMethodSource(source))
def testSimpleOverrideMethod(self):
source = """
class Foo {
public:
int Bar() override;
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD0(Bar,\nint());',
self.GenerateMethodSource(source))
def testSimpleConstMethod(self):
source = """
class Foo {
public:
virtual void Bar(bool flag) const;
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_CONST_METHOD1(Bar,\nvoid(bool flag));',
self.GenerateMethodSource(source))
def testExplicitVoid(self):
source = """
class Foo {
public:
virtual int Bar(void);
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD0(Bar,\nint(void));',
self.GenerateMethodSource(source))
def testStrangeNewlineInParameter(self):
source = """
class Foo {
public:
virtual void Bar(int
a) = 0;
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD1(Bar,\nvoid(int a));',
self.GenerateMethodSource(source))
def testDefaultParameters(self):
source = """
class Foo {
public:
virtual void Bar(int a, char c = 'x') = 0;
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD2(Bar,\nvoid(int, char));',
self.GenerateMethodSource(source))
def testMultipleDefaultParameters(self):
source = """
class Foo {
public:
virtual void Bar(int a = 42, char c = 'x') = 0;
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD2(Bar,\nvoid(int, char));',
self.GenerateMethodSource(source))
def testRemovesCommentsWhenDefaultsArePresent(self):
source = """
class Foo {
public:
virtual void Bar(int a = 42 /* a comment */,
char /* other comment */ c= 'x') = 0;
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD2(Bar,\nvoid(int, char));',
self.GenerateMethodSource(source))
def testDoubleSlashCommentsInParameterListAreRemoved(self):
source = """
class Foo {
public:
virtual void Bar(int a, // inline comments should be elided.
int b // inline comments should be elided.
) const = 0;
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_CONST_METHOD2(Bar,\nvoid(int a, int b));',
self.GenerateMethodSource(source))
def testCStyleCommentsInParameterListAreNotRemoved(self):
# NOTE(nnorwitz): I'm not sure if it's the best behavior to keep these
# comments. Also note that C style comments after the last parameter
# are still elided.
source = """
class Foo {
public:
virtual const string& Bar(int /* keeper */, int b);
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD2(Bar,\nconst string&(int /* keeper */, int b));',
self.GenerateMethodSource(source))
def testArgsOfTemplateTypes(self):
source = """
class Foo {
public:
virtual int Bar(const vector<int>& v, map<int, string>* output);
};"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD2(Bar,\n'
'int(const vector<int>& v, map<int, string>* output));',
self.GenerateMethodSource(source))
def testReturnTypeWithOneTemplateArg(self):
source = """
class Foo {
public:
virtual vector<int>* Bar(int n);
};"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD1(Bar,\nvector<int>*(int n));',
self.GenerateMethodSource(source))
def testReturnTypeWithManyTemplateArgs(self):
source = """
class Foo {
public:
virtual map<int, string> Bar();
};"""
# Comparing the comment text is brittle - we'll think of something
# better in case this gets annoying, but for now let's keep it simple.
self.assertEqualIgnoreLeadingWhitespace(
'// The following line won\'t really compile, as the return\n'
'// type has multiple template arguments. To fix it, use a\n'
'// typedef for the return type.\n'
'MOCK_METHOD0(Bar,\nmap<int, string>());',
self.GenerateMethodSource(source))
def testSimpleMethodInTemplatedClass(self):
source = """
template<class T>
class Foo {
public:
virtual int Bar();
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD0_T(Bar,\nint());',
self.GenerateMethodSource(source))
def testPointerArgWithoutNames(self):
source = """
class Foo {
virtual int Bar(C*);
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD1(Bar,\nint(C*));',
self.GenerateMethodSource(source))
def testReferenceArgWithoutNames(self):
source = """
class Foo {
virtual int Bar(C&);
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD1(Bar,\nint(C&));',
self.GenerateMethodSource(source))
def testArrayArgWithoutNames(self):
source = """
class Foo {
virtual int Bar(C[]);
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD1(Bar,\nint(C[]));',
self.GenerateMethodSource(source))
class GenerateMocksTest(TestCase):
def GenerateMocks(self, cpp_source):
"""Convert C++ source to complete Google Mock output source."""
# <test> is a pseudo-filename, it is not read or written.
filename = '<test>'
builder = ast.BuilderFromSource(cpp_source, filename)
ast_list = list(builder.Generate())
lines = gmock_class._GenerateMocks(filename, cpp_source, ast_list, None)
return '\n'.join(lines)
def testNamespaces(self):
source = """
namespace Foo {
namespace Bar { class Forward; }
namespace Baz {
class Test {
public:
virtual void Foo();
};
} // namespace Baz
} // namespace Foo
"""
expected = """\
namespace Foo {
namespace Baz {
class MockTest : public Test {
public:
MOCK_METHOD0(Foo,
void());
};
} // namespace Baz
} // namespace Foo
"""
self.assertEqualIgnoreLeadingWhitespace(
expected, self.GenerateMocks(source))
def testClassWithStorageSpecifierMacro(self):
source = """
class STORAGE_SPECIFIER Test {
public:
virtual void Foo();
};
"""
expected = """\
class MockTest : public Test {
public:
MOCK_METHOD0(Foo,
void());
};
"""
self.assertEqualIgnoreLeadingWhitespace(
expected, self.GenerateMocks(source))
def testTemplatedForwardDeclaration(self):
source = """
template <class T> class Forward; // Forward declaration should be ignored.
class Test {
public:
virtual void Foo();
};
"""
expected = """\
class MockTest : public Test {
public:
MOCK_METHOD0(Foo,
void());
};
"""
self.assertEqualIgnoreLeadingWhitespace(
expected, self.GenerateMocks(source))
def testTemplatedClass(self):
source = """
template <typename S, typename T>
class Test {
public:
virtual void Foo();
};
"""
expected = """\
template <typename T0, typename T1>
class MockTest : public Test<T0, T1> {
public:
MOCK_METHOD0_T(Foo,
void());
};
"""
self.assertEqualIgnoreLeadingWhitespace(
expected, self.GenerateMocks(source))
def testTemplateInATemplateTypedef(self):
source = """
class Test {
public:
typedef std::vector<std::list<int>> FooType;
virtual void Bar(const FooType& test_arg);
};
"""
expected = """\
class MockTest : public Test {
public:
MOCK_METHOD1(Bar,
void(const FooType& test_arg));
};
"""
self.assertEqualIgnoreLeadingWhitespace(
expected, self.GenerateMocks(source))
def testTemplateInATemplateTypedefWithComma(self):
source = """
class Test {
public:
typedef std::function<void(
const vector<std::list<int>>&, int> FooType;
virtual void Bar(const FooType& test_arg);
};
"""
expected = """\
class MockTest : public Test {
public:
MOCK_METHOD1(Bar,
void(const FooType& test_arg));
};
"""
self.assertEqualIgnoreLeadingWhitespace(
expected, self.GenerateMocks(source))
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
rajiteh/taiga-back | taiga/events/apps.py | 21 | 1478 | # Copyright (C) 2014 Andrey Antukh <niwi@niwi.be>
# Copyright (C) 2014 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2014 David Barragán <bameda@dbarragan.com>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
from django.apps import AppConfig
from django.db.models import signals
from . import signal_handlers as handlers
def connect_events_signals():
signals.post_save.connect(handlers.on_save_any_model, dispatch_uid="events_change")
signals.post_delete.connect(handlers.on_delete_any_model, dispatch_uid="events_delete")
def disconnect_events_signals():
signals.post_save.disconnect(dispatch_uid="events_change")
signals.post_delete.disconnect(dispatch_uid="events_delete")
class EventsAppConfig(AppConfig):
name = "taiga.events"
verbose_name = "Events App Config"
def ready(self):
connect_events_signals()
| agpl-3.0 |
mvcsantos/QGIS | python/ext-libs/pygments/lexer.py | 265 | 26921 | # -*- coding: utf-8 -*-
"""
pygments.lexer
~~~~~~~~~~~~~~
Base lexer classes.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re, itertools
from pygments.filter import apply_filters, Filter
from pygments.filters import get_filter_by_name
from pygments.token import Error, Text, Other, _TokenType
from pygments.util import get_bool_opt, get_int_opt, get_list_opt, \
make_analysator
__all__ = ['Lexer', 'RegexLexer', 'ExtendedRegexLexer', 'DelegatingLexer',
'LexerContext', 'include', 'inherit', 'bygroups', 'using', 'this']
_encoding_map = [('\xef\xbb\xbf', 'utf-8'),
('\xff\xfe\0\0', 'utf-32'),
('\0\0\xfe\xff', 'utf-32be'),
('\xff\xfe', 'utf-16'),
('\xfe\xff', 'utf-16be')]
_default_analyse = staticmethod(lambda x: 0.0)
class LexerMeta(type):
"""
This metaclass automagically converts ``analyse_text`` methods into
static methods which always return float values.
"""
def __new__(cls, name, bases, d):
if 'analyse_text' in d:
d['analyse_text'] = make_analysator(d['analyse_text'])
return type.__new__(cls, name, bases, d)
class Lexer(object):
"""
Lexer for a specific language.
Basic options recognized:
``stripnl``
Strip leading and trailing newlines from the input (default: True).
``stripall``
Strip all leading and trailing whitespace from the input
(default: False).
``ensurenl``
Make sure that the input ends with a newline (default: True). This
is required for some lexers that consume input linewise.
*New in Pygments 1.3.*
``tabsize``
If given and greater than 0, expand tabs in the input (default: 0).
``encoding``
If given, must be an encoding name. This encoding will be used to
convert the input string to Unicode, if it is not already a Unicode
string (default: ``'latin1'``).
Can also be ``'guess'`` to use a simple UTF-8 / Latin1 detection, or
``'chardet'`` to use the chardet library, if it is installed.
"""
#: Name of the lexer
name = None
#: Shortcuts for the lexer
aliases = []
#: File name globs
filenames = []
#: Secondary file name globs
alias_filenames = []
#: MIME types
mimetypes = []
#: Priority, should multiple lexers match and no content is provided
priority = 0
__metaclass__ = LexerMeta
def __init__(self, **options):
self.options = options
self.stripnl = get_bool_opt(options, 'stripnl', True)
self.stripall = get_bool_opt(options, 'stripall', False)
self.ensurenl = get_bool_opt(options, 'ensurenl', True)
self.tabsize = get_int_opt(options, 'tabsize', 0)
self.encoding = options.get('encoding', 'latin1')
# self.encoding = options.get('inencoding', None) or self.encoding
self.filters = []
for filter_ in get_list_opt(options, 'filters', ()):
self.add_filter(filter_)
def __repr__(self):
if self.options:
return '<pygments.lexers.%s with %r>' % (self.__class__.__name__,
self.options)
else:
return '<pygments.lexers.%s>' % self.__class__.__name__
def add_filter(self, filter_, **options):
"""
Add a new stream filter to this lexer.
"""
if not isinstance(filter_, Filter):
filter_ = get_filter_by_name(filter_, **options)
self.filters.append(filter_)
def analyse_text(text):
"""
Has to return a float between ``0`` and ``1`` that indicates
if a lexer wants to highlight this text. Used by ``guess_lexer``.
If this method returns ``0`` it won't highlight it in any case, if
it returns ``1`` highlighting with this lexer is guaranteed.
The `LexerMeta` metaclass automatically wraps this function so
that it works like a static method (no ``self`` or ``cls``
parameter) and the return value is automatically converted to
`float`. If the return value is an object that is boolean `False`
it's the same as if the return values was ``0.0``.
"""
def get_tokens(self, text, unfiltered=False):
"""
Return an iterable of (tokentype, value) pairs generated from
`text`. If `unfiltered` is set to `True`, the filtering mechanism
is bypassed even if filters are defined.
Also preprocess the text, i.e. expand tabs and strip it if
wanted and applies registered filters.
"""
if not isinstance(text, unicode):
if self.encoding == 'guess':
try:
text = text.decode('utf-8')
if text.startswith(u'\ufeff'):
text = text[len(u'\ufeff'):]
except UnicodeDecodeError:
text = text.decode('latin1')
elif self.encoding == 'chardet':
try:
import chardet
except ImportError:
raise ImportError('To enable chardet encoding guessing, '
'please install the chardet library '
'from http://chardet.feedparser.org/')
# check for BOM first
decoded = None
for bom, encoding in _encoding_map:
if text.startswith(bom):
decoded = unicode(text[len(bom):], encoding,
errors='replace')
break
# no BOM found, so use chardet
if decoded is None:
enc = chardet.detect(text[:1024]) # Guess using first 1KB
decoded = unicode(text, enc.get('encoding') or 'utf-8',
errors='replace')
text = decoded
else:
text = text.decode(self.encoding)
else:
if text.startswith(u'\ufeff'):
text = text[len(u'\ufeff'):]
# text now *is* a unicode string
text = text.replace('\r\n', '\n')
text = text.replace('\r', '\n')
if self.stripall:
text = text.strip()
elif self.stripnl:
text = text.strip('\n')
if self.tabsize > 0:
text = text.expandtabs(self.tabsize)
if self.ensurenl and not text.endswith('\n'):
text += '\n'
def streamer():
for i, t, v in self.get_tokens_unprocessed(text):
yield t, v
stream = streamer()
if not unfiltered:
stream = apply_filters(stream, self.filters, self)
return stream
def get_tokens_unprocessed(self, text):
"""
Return an iterable of (tokentype, value) pairs.
In subclasses, implement this method as a generator to
maximize effectiveness.
"""
raise NotImplementedError
class DelegatingLexer(Lexer):
"""
This lexer takes two lexer as arguments. A root lexer and
a language lexer. First everything is scanned using the language
lexer, afterwards all ``Other`` tokens are lexed using the root
lexer.
The lexers from the ``template`` lexer package use this base lexer.
"""
def __init__(self, _root_lexer, _language_lexer, _needle=Other, **options):
self.root_lexer = _root_lexer(**options)
self.language_lexer = _language_lexer(**options)
self.needle = _needle
Lexer.__init__(self, **options)
def get_tokens_unprocessed(self, text):
buffered = ''
insertions = []
lng_buffer = []
for i, t, v in self.language_lexer.get_tokens_unprocessed(text):
if t is self.needle:
if lng_buffer:
insertions.append((len(buffered), lng_buffer))
lng_buffer = []
buffered += v
else:
lng_buffer.append((i, t, v))
if lng_buffer:
insertions.append((len(buffered), lng_buffer))
return do_insertions(insertions,
self.root_lexer.get_tokens_unprocessed(buffered))
#-------------------------------------------------------------------------------
# RegexLexer and ExtendedRegexLexer
#
class include(str):
"""
Indicates that a state should include rules from another state.
"""
pass
class _inherit(object):
"""
Indicates the a state should inherit from its superclass.
"""
def __repr__(self):
return 'inherit'
inherit = _inherit()
class combined(tuple):
"""
Indicates a state combined from multiple states.
"""
def __new__(cls, *args):
return tuple.__new__(cls, args)
def __init__(self, *args):
# tuple.__init__ doesn't do anything
pass
class _PseudoMatch(object):
"""
A pseudo match object constructed from a string.
"""
def __init__(self, start, text):
self._text = text
self._start = start
def start(self, arg=None):
return self._start
def end(self, arg=None):
return self._start + len(self._text)
def group(self, arg=None):
if arg:
raise IndexError('No such group')
return self._text
def groups(self):
return (self._text,)
def groupdict(self):
return {}
def bygroups(*args):
"""
Callback that yields multiple actions for each group in the match.
"""
def callback(lexer, match, ctx=None):
for i, action in enumerate(args):
if action is None:
continue
elif type(action) is _TokenType:
data = match.group(i + 1)
if data:
yield match.start(i + 1), action, data
else:
data = match.group(i + 1)
if data is not None:
if ctx:
ctx.pos = match.start(i + 1)
for item in action(lexer, _PseudoMatch(match.start(i + 1),
data), ctx):
if item:
yield item
if ctx:
ctx.pos = match.end()
return callback
class _This(object):
"""
Special singleton used for indicating the caller class.
Used by ``using``.
"""
this = _This()
def using(_other, **kwargs):
"""
Callback that processes the match with a different lexer.
The keyword arguments are forwarded to the lexer, except `state` which
is handled separately.
`state` specifies the state that the new lexer will start in, and can
be an enumerable such as ('root', 'inline', 'string') or a simple
string which is assumed to be on top of the root state.
Note: For that to work, `_other` must not be an `ExtendedRegexLexer`.
"""
gt_kwargs = {}
if 'state' in kwargs:
s = kwargs.pop('state')
if isinstance(s, (list, tuple)):
gt_kwargs['stack'] = s
else:
gt_kwargs['stack'] = ('root', s)
if _other is this:
def callback(lexer, match, ctx=None):
# if keyword arguments are given the callback
# function has to create a new lexer instance
if kwargs:
# XXX: cache that somehow
kwargs.update(lexer.options)
lx = lexer.__class__(**kwargs)
else:
lx = lexer
s = match.start()
for i, t, v in lx.get_tokens_unprocessed(match.group(), **gt_kwargs):
yield i + s, t, v
if ctx:
ctx.pos = match.end()
else:
def callback(lexer, match, ctx=None):
# XXX: cache that somehow
kwargs.update(lexer.options)
lx = _other(**kwargs)
s = match.start()
for i, t, v in lx.get_tokens_unprocessed(match.group(), **gt_kwargs):
yield i + s, t, v
if ctx:
ctx.pos = match.end()
return callback
class RegexLexerMeta(LexerMeta):
"""
Metaclass for RegexLexer, creates the self._tokens attribute from
self.tokens on the first instantiation.
"""
def _process_regex(cls, regex, rflags):
"""Preprocess the regular expression component of a token definition."""
return re.compile(regex, rflags).match
def _process_token(cls, token):
"""Preprocess the token component of a token definition."""
assert type(token) is _TokenType or callable(token), \
'token type must be simple type or callable, not %r' % (token,)
return token
def _process_new_state(cls, new_state, unprocessed, processed):
"""Preprocess the state transition action of a token definition."""
if isinstance(new_state, str):
# an existing state
if new_state == '#pop':
return -1
elif new_state in unprocessed:
return (new_state,)
elif new_state == '#push':
return new_state
elif new_state[:5] == '#pop:':
return -int(new_state[5:])
else:
assert False, 'unknown new state %r' % new_state
elif isinstance(new_state, combined):
# combine a new state from existing ones
tmp_state = '_tmp_%d' % cls._tmpname
cls._tmpname += 1
itokens = []
for istate in new_state:
assert istate != new_state, 'circular state ref %r' % istate
itokens.extend(cls._process_state(unprocessed,
processed, istate))
processed[tmp_state] = itokens
return (tmp_state,)
elif isinstance(new_state, tuple):
# push more than one state
for istate in new_state:
assert (istate in unprocessed or
istate in ('#pop', '#push')), \
'unknown new state ' + istate
return new_state
else:
assert False, 'unknown new state def %r' % new_state
def _process_state(cls, unprocessed, processed, state):
"""Preprocess a single state definition."""
assert type(state) is str, "wrong state name %r" % state
assert state[0] != '#', "invalid state name %r" % state
if state in processed:
return processed[state]
tokens = processed[state] = []
rflags = cls.flags
for tdef in unprocessed[state]:
if isinstance(tdef, include):
# it's a state reference
assert tdef != state, "circular state reference %r" % state
tokens.extend(cls._process_state(unprocessed, processed,
str(tdef)))
continue
if isinstance(tdef, _inherit):
# processed already
continue
assert type(tdef) is tuple, "wrong rule def %r" % tdef
try:
rex = cls._process_regex(tdef[0], rflags)
except Exception, err:
raise ValueError("uncompilable regex %r in state %r of %r: %s" %
(tdef[0], state, cls, err))
token = cls._process_token(tdef[1])
if len(tdef) == 2:
new_state = None
else:
new_state = cls._process_new_state(tdef[2],
unprocessed, processed)
tokens.append((rex, token, new_state))
return tokens
def process_tokendef(cls, name, tokendefs=None):
"""Preprocess a dictionary of token definitions."""
processed = cls._all_tokens[name] = {}
tokendefs = tokendefs or cls.tokens[name]
for state in tokendefs.keys():
cls._process_state(tokendefs, processed, state)
return processed
def get_tokendefs(cls):
"""
Merge tokens from superclasses in MRO order, returning a single tokendef
dictionary.
Any state that is not defined by a subclass will be inherited
automatically. States that *are* defined by subclasses will, by
default, override that state in the superclass. If a subclass wishes to
inherit definitions from a superclass, it can use the special value
"inherit", which will cause the superclass' state definition to be
included at that point in the state.
"""
tokens = {}
inheritable = {}
for c in itertools.chain((cls,), cls.__mro__):
toks = c.__dict__.get('tokens', {})
for state, items in toks.iteritems():
curitems = tokens.get(state)
if curitems is None:
tokens[state] = items
try:
inherit_ndx = items.index(inherit)
except ValueError:
continue
inheritable[state] = inherit_ndx
continue
inherit_ndx = inheritable.pop(state, None)
if inherit_ndx is None:
continue
# Replace the "inherit" value with the items
curitems[inherit_ndx:inherit_ndx+1] = items
try:
new_inh_ndx = items.index(inherit)
except ValueError:
pass
else:
inheritable[state] = inherit_ndx + new_inh_ndx
return tokens
def __call__(cls, *args, **kwds):
"""Instantiate cls after preprocessing its token definitions."""
if '_tokens' not in cls.__dict__:
cls._all_tokens = {}
cls._tmpname = 0
if hasattr(cls, 'token_variants') and cls.token_variants:
# don't process yet
pass
else:
cls._tokens = cls.process_tokendef('', cls.get_tokendefs())
return type.__call__(cls, *args, **kwds)
class RegexLexer(Lexer):
"""
Base for simple stateful regular expression-based lexers.
Simplifies the lexing process so that you need only
provide a list of states and regular expressions.
"""
__metaclass__ = RegexLexerMeta
#: Flags for compiling the regular expressions.
#: Defaults to MULTILINE.
flags = re.MULTILINE
#: Dict of ``{'state': [(regex, tokentype, new_state), ...], ...}``
#:
#: The initial state is 'root'.
#: ``new_state`` can be omitted to signify no state transition.
#: If it is a string, the state is pushed on the stack and changed.
#: If it is a tuple of strings, all states are pushed on the stack and
#: the current state will be the topmost.
#: It can also be ``combined('state1', 'state2', ...)``
#: to signify a new, anonymous state combined from the rules of two
#: or more existing ones.
#: Furthermore, it can be '#pop' to signify going back one step in
#: the state stack, or '#push' to push the current state on the stack
#: again.
#:
#: The tuple can also be replaced with ``include('state')``, in which
#: case the rules from the state named by the string are included in the
#: current one.
tokens = {}
def get_tokens_unprocessed(self, text, stack=('root',)):
"""
Split ``text`` into (tokentype, text) pairs.
``stack`` is the inital stack (default: ``['root']``)
"""
pos = 0
tokendefs = self._tokens
statestack = list(stack)
statetokens = tokendefs[statestack[-1]]
while 1:
for rexmatch, action, new_state in statetokens:
m = rexmatch(text, pos)
if m:
if type(action) is _TokenType:
yield pos, action, m.group()
else:
for item in action(self, m):
yield item
pos = m.end()
if new_state is not None:
# state transition
if isinstance(new_state, tuple):
for state in new_state:
if state == '#pop':
statestack.pop()
elif state == '#push':
statestack.append(statestack[-1])
else:
statestack.append(state)
elif isinstance(new_state, int):
# pop
del statestack[new_state:]
elif new_state == '#push':
statestack.append(statestack[-1])
else:
assert False, "wrong state def: %r" % new_state
statetokens = tokendefs[statestack[-1]]
break
else:
try:
if text[pos] == '\n':
# at EOL, reset state to "root"
statestack = ['root']
statetokens = tokendefs['root']
yield pos, Text, u'\n'
pos += 1
continue
yield pos, Error, text[pos]
pos += 1
except IndexError:
break
class LexerContext(object):
"""
A helper object that holds lexer position data.
"""
def __init__(self, text, pos, stack=None, end=None):
self.text = text
self.pos = pos
self.end = end or len(text) # end=0 not supported ;-)
self.stack = stack or ['root']
def __repr__(self):
return 'LexerContext(%r, %r, %r)' % (
self.text, self.pos, self.stack)
class ExtendedRegexLexer(RegexLexer):
"""
A RegexLexer that uses a context object to store its state.
"""
def get_tokens_unprocessed(self, text=None, context=None):
"""
Split ``text`` into (tokentype, text) pairs.
If ``context`` is given, use this lexer context instead.
"""
tokendefs = self._tokens
if not context:
ctx = LexerContext(text, 0)
statetokens = tokendefs['root']
else:
ctx = context
statetokens = tokendefs[ctx.stack[-1]]
text = ctx.text
while 1:
for rexmatch, action, new_state in statetokens:
m = rexmatch(text, ctx.pos, ctx.end)
if m:
if type(action) is _TokenType:
yield ctx.pos, action, m.group()
ctx.pos = m.end()
else:
for item in action(self, m, ctx):
yield item
if not new_state:
# altered the state stack?
statetokens = tokendefs[ctx.stack[-1]]
# CAUTION: callback must set ctx.pos!
if new_state is not None:
# state transition
if isinstance(new_state, tuple):
for state in new_state:
if state == '#pop':
ctx.stack.pop()
elif state == '#push':
ctx.stack.append(statestack[-1])
else:
ctx.stack.append(state)
elif isinstance(new_state, int):
# pop
del ctx.stack[new_state:]
elif new_state == '#push':
ctx.stack.append(ctx.stack[-1])
else:
assert False, "wrong state def: %r" % new_state
statetokens = tokendefs[ctx.stack[-1]]
break
else:
try:
if ctx.pos >= ctx.end:
break
if text[ctx.pos] == '\n':
# at EOL, reset state to "root"
ctx.stack = ['root']
statetokens = tokendefs['root']
yield ctx.pos, Text, u'\n'
ctx.pos += 1
continue
yield ctx.pos, Error, text[ctx.pos]
ctx.pos += 1
except IndexError:
break
def do_insertions(insertions, tokens):
"""
Helper for lexers which must combine the results of several
sublexers.
``insertions`` is a list of ``(index, itokens)`` pairs.
Each ``itokens`` iterable should be inserted at position
``index`` into the token stream given by the ``tokens``
argument.
The result is a combined token stream.
TODO: clean up the code here.
"""
insertions = iter(insertions)
try:
index, itokens = insertions.next()
except StopIteration:
# no insertions
for item in tokens:
yield item
return
realpos = None
insleft = True
# iterate over the token stream where we want to insert
# the tokens from the insertion list.
for i, t, v in tokens:
# first iteration. store the postition of first item
if realpos is None:
realpos = i
oldi = 0
while insleft and i + len(v) >= index:
tmpval = v[oldi:index - i]
yield realpos, t, tmpval
realpos += len(tmpval)
for it_index, it_token, it_value in itokens:
yield realpos, it_token, it_value
realpos += len(it_value)
oldi = index - i
try:
index, itokens = insertions.next()
except StopIteration:
insleft = False
break # not strictly necessary
yield realpos, t, v[oldi:]
realpos += len(v) - oldi
# leftover tokens
while insleft:
# no normal tokens, set realpos to zero
realpos = realpos or 0
for p, t, v in itokens:
yield realpos, t, v
realpos += len(v)
try:
index, itokens = insertions.next()
except StopIteration:
insleft = False
break # not strictly necessary
| gpl-2.0 |
1st/django | django/db/utils.py | 50 | 11023 | import inspect
import os
import pkgutil
import warnings
from importlib import import_module
from threading import local
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.utils import six
from django.utils._os import npath, upath
from django.utils.deprecation import RemovedInDjango110Warning
from django.utils.functional import cached_property
from django.utils.module_loading import import_string
DEFAULT_DB_ALIAS = 'default'
DJANGO_VERSION_PICKLE_KEY = '_django_version'
class Error(Exception if six.PY3 else StandardError): # NOQA: StandardError undefined on PY3
pass
class InterfaceError(Error):
pass
class DatabaseError(Error):
pass
class DataError(DatabaseError):
pass
class OperationalError(DatabaseError):
pass
class IntegrityError(DatabaseError):
pass
class InternalError(DatabaseError):
pass
class ProgrammingError(DatabaseError):
pass
class NotSupportedError(DatabaseError):
pass
class DatabaseErrorWrapper(object):
"""
Context manager and decorator that re-throws backend-specific database
exceptions using Django's common wrappers.
"""
def __init__(self, wrapper):
"""
wrapper is a database wrapper.
It must have a Database attribute defining PEP-249 exceptions.
"""
self.wrapper = wrapper
def __enter__(self):
pass
def __exit__(self, exc_type, exc_value, traceback):
if exc_type is None:
return
for dj_exc_type in (
DataError,
OperationalError,
IntegrityError,
InternalError,
ProgrammingError,
NotSupportedError,
DatabaseError,
InterfaceError,
Error,
):
db_exc_type = getattr(self.wrapper.Database, dj_exc_type.__name__)
if issubclass(exc_type, db_exc_type):
dj_exc_value = dj_exc_type(*exc_value.args)
dj_exc_value.__cause__ = exc_value
# Only set the 'errors_occurred' flag for errors that may make
# the connection unusable.
if dj_exc_type not in (DataError, IntegrityError):
self.wrapper.errors_occurred = True
six.reraise(dj_exc_type, dj_exc_value, traceback)
def __call__(self, func):
# Note that we are intentionally not using @wraps here for performance
# reasons. Refs #21109.
def inner(*args, **kwargs):
with self:
return func(*args, **kwargs)
return inner
def load_backend(backend_name):
# Look for a fully qualified database backend name
try:
return import_module('%s.base' % backend_name)
except ImportError as e_user:
# The database backend wasn't found. Display a helpful error message
# listing all possible (built-in) database backends.
backend_dir = os.path.join(os.path.dirname(upath(__file__)), 'backends')
try:
builtin_backends = [
name for _, name, ispkg in pkgutil.iter_modules([npath(backend_dir)])
if ispkg and name not in {'base', 'dummy'}]
except EnvironmentError:
builtin_backends = []
if backend_name not in ['django.db.backends.%s' % b for b in
builtin_backends]:
backend_reprs = map(repr, sorted(builtin_backends))
error_msg = ("%r isn't an available database backend.\n"
"Try using 'django.db.backends.XXX', where XXX "
"is one of:\n %s\nError was: %s" %
(backend_name, ", ".join(backend_reprs), e_user))
raise ImproperlyConfigured(error_msg)
else:
# If there's some other error, this must be an error in Django
raise
class ConnectionDoesNotExist(Exception):
pass
class ConnectionHandler(object):
def __init__(self, databases=None):
"""
databases is an optional dictionary of database definitions (structured
like settings.DATABASES).
"""
self._databases = databases
self._connections = local()
@cached_property
def databases(self):
if self._databases is None:
self._databases = settings.DATABASES
if self._databases == {}:
self._databases = {
DEFAULT_DB_ALIAS: {
'ENGINE': 'django.db.backends.dummy',
},
}
if self._databases[DEFAULT_DB_ALIAS] == {}:
self._databases[DEFAULT_DB_ALIAS]['ENGINE'] = 'django.db.backends.dummy'
if DEFAULT_DB_ALIAS not in self._databases:
raise ImproperlyConfigured("You must define a '%s' database" % DEFAULT_DB_ALIAS)
return self._databases
def ensure_defaults(self, alias):
"""
Puts the defaults into the settings dictionary for a given connection
where no settings is provided.
"""
try:
conn = self.databases[alias]
except KeyError:
raise ConnectionDoesNotExist("The connection %s doesn't exist" % alias)
conn.setdefault('ATOMIC_REQUESTS', False)
conn.setdefault('AUTOCOMMIT', True)
conn.setdefault('ENGINE', 'django.db.backends.dummy')
if conn['ENGINE'] == 'django.db.backends.' or not conn['ENGINE']:
conn['ENGINE'] = 'django.db.backends.dummy'
conn.setdefault('CONN_MAX_AGE', 0)
conn.setdefault('OPTIONS', {})
conn.setdefault('TIME_ZONE', None)
for setting in ['NAME', 'USER', 'PASSWORD', 'HOST', 'PORT']:
conn.setdefault(setting, '')
def prepare_test_settings(self, alias):
"""
Makes sure the test settings are available in the 'TEST' sub-dictionary.
"""
try:
conn = self.databases[alias]
except KeyError:
raise ConnectionDoesNotExist("The connection %s doesn't exist" % alias)
test_settings = conn.setdefault('TEST', {})
for key in ['CHARSET', 'COLLATION', 'NAME', 'MIRROR']:
test_settings.setdefault(key, None)
def __getitem__(self, alias):
if hasattr(self._connections, alias):
return getattr(self._connections, alias)
self.ensure_defaults(alias)
self.prepare_test_settings(alias)
db = self.databases[alias]
backend = load_backend(db['ENGINE'])
conn = backend.DatabaseWrapper(db, alias)
setattr(self._connections, alias, conn)
return conn
def __setitem__(self, key, value):
setattr(self._connections, key, value)
def __delitem__(self, key):
delattr(self._connections, key)
def __iter__(self):
return iter(self.databases)
def all(self):
return [self[alias] for alias in self]
def close_all(self):
for alias in self:
try:
connection = getattr(self._connections, alias)
except AttributeError:
continue
connection.close()
class ConnectionRouter(object):
def __init__(self, routers=None):
"""
If routers is not specified, will default to settings.DATABASE_ROUTERS.
"""
self._routers = routers
@cached_property
def routers(self):
if self._routers is None:
self._routers = settings.DATABASE_ROUTERS
routers = []
for r in self._routers:
if isinstance(r, six.string_types):
router = import_string(r)()
else:
router = r
routers.append(router)
return routers
def _router_func(action):
def _route_db(self, model, **hints):
chosen_db = None
for router in self.routers:
try:
method = getattr(router, action)
except AttributeError:
# If the router doesn't have a method, skip to the next one.
pass
else:
chosen_db = method(model, **hints)
if chosen_db:
return chosen_db
instance = hints.get('instance')
if instance is not None and instance._state.db:
return instance._state.db
return DEFAULT_DB_ALIAS
return _route_db
db_for_read = _router_func('db_for_read')
db_for_write = _router_func('db_for_write')
def allow_relation(self, obj1, obj2, **hints):
for router in self.routers:
try:
method = router.allow_relation
except AttributeError:
# If the router doesn't have a method, skip to the next one.
pass
else:
allow = method(obj1, obj2, **hints)
if allow is not None:
return allow
return obj1._state.db == obj2._state.db
def allow_migrate(self, db, app_label, **hints):
for router in self.routers:
try:
method = router.allow_migrate
except AttributeError:
# If the router doesn't have a method, skip to the next one.
continue
if six.PY3:
sig = inspect.signature(router.allow_migrate)
has_deprecated_signature = not any(
p.kind == inspect.Parameter.VAR_KEYWORD for p in sig.parameters.values()
)
else:
argspec = inspect.getargspec(router.allow_migrate)
has_deprecated_signature = len(argspec.args) == 3 and not argspec.keywords
if has_deprecated_signature:
warnings.warn(
"The signature of allow_migrate has changed from "
"allow_migrate(self, db, model) to "
"allow_migrate(self, db, app_label, model_name=None, **hints). "
"Support for the old signature will be removed in Django 1.10.",
RemovedInDjango110Warning)
model = hints.get('model')
allow = None if model is None else method(db, model)
else:
allow = method(db, app_label, **hints)
if allow is not None:
return allow
return True
def allow_migrate_model(self, db, model):
return self.allow_migrate(
db,
model._meta.app_label,
model_name=model._meta.model_name,
model=model,
)
def get_migratable_models(self, app_config, db, include_auto_created=False):
"""
Return app models allowed to be synchronized on provided db.
"""
models = app_config.get_models(include_auto_created=include_auto_created)
return [model for model in models if self.allow_migrate_model(db, model)]
| bsd-3-clause |
hwjworld/xiaodun-platform | lms/djangoapps/analytics/basic.py | 30 | 3759 | """
Student and course analytics.
Serve miscellaneous course and student data
"""
from django.contrib.auth.models import User
import xmodule.graders as xmgraders
STUDENT_FEATURES = ('username', 'first_name', 'last_name', 'is_staff', 'email')
PROFILE_FEATURES = ('name', 'language', 'location', 'year_of_birth', 'gender',
'level_of_education', 'mailing_address', 'goals')
AVAILABLE_FEATURES = STUDENT_FEATURES + PROFILE_FEATURES
def enrolled_students_features(course_id, features):
"""
Return list of student features as dictionaries.
enrolled_students_features(course_id, ['username, first_name'])
would return [
{'username': 'username1', 'first_name': 'firstname1'}
{'username': 'username2', 'first_name': 'firstname2'}
{'username': 'username3', 'first_name': 'firstname3'}
]
"""
students = User.objects.filter(
courseenrollment__course_id=course_id,
courseenrollment__is_active=1,
).order_by('username').select_related('profile')
def extract_student(student, features):
""" convert student to dictionary """
student_features = [x for x in STUDENT_FEATURES if x in features]
profile_features = [x for x in PROFILE_FEATURES if x in features]
student_dict = dict((feature, getattr(student, feature))
for feature in student_features)
profile = student.profile
if profile is not None:
profile_dict = dict((feature, getattr(profile, feature))
for feature in profile_features)
student_dict.update(profile_dict)
return student_dict
return [extract_student(student, features) for student in students]
def dump_grading_context(course):
"""
Render information about course grading context
(e.g. which problems are graded in what assignments)
Useful for debugging grading_policy.json and policy.json
Returns HTML string
"""
hbar = "{}\n".format("-" * 77)
msg = hbar
msg += "Course grader:\n"
msg += '%s\n' % course.grader.__class__
graders = {}
if isinstance(course.grader, xmgraders.WeightedSubsectionsGrader):
msg += '\n'
msg += "Graded sections:\n"
for subgrader, category, weight in course.grader.sections:
msg += " subgrader=%s, type=%s, category=%s, weight=%s\n"\
% (subgrader.__class__, subgrader.type, category, weight)
subgrader.index = 1
graders[subgrader.type] = subgrader
msg += hbar
msg += "Listing grading context for course %s\n" % course.id
gcontext = course.grading_context
msg += "graded sections:\n"
msg += '%s\n' % gcontext['graded_sections'].keys()
for (gsomething, gsvals) in gcontext['graded_sections'].items():
msg += "--> Section %s:\n" % (gsomething)
for sec in gsvals:
sdesc = sec['section_descriptor']
frmat = getattr(sdesc, 'format', None)
aname = ''
if frmat in graders:
gform = graders[frmat]
aname = '%s %02d' % (gform.short_label, gform.index)
gform.index += 1
elif sdesc.display_name in graders:
gform = graders[sdesc.display_name]
aname = '%s' % gform.short_label
notes = ''
if getattr(sdesc, 'score_by_attempt', False):
notes = ', score by attempt!'
msg += " %s (format=%s, Assignment=%s%s)\n"\
% (sdesc.display_name, frmat, aname, notes)
msg += "all descriptors:\n"
msg += "length=%d\n" % len(gcontext['all_descriptors'])
msg = '<pre>%s</pre>' % msg.replace('<', '<')
return msg
| agpl-3.0 |
mihaip/NewsBlur | utils/PyRSS2Gen.py | 58 | 14316 | """PyRSS2Gen - A Python library for generating RSS 2.0 feeds."""
__name__ = "PyRSS2Gen"
__version__ = (1, 0, 0)
__author__ = "Andrew Dalke <dalke@dalkescientific.com>"
_generator_name = __name__ + "-" + ".".join(map(str, __version__))
import datetime
# Could make this the base class; will need to add 'publish'
class WriteXmlMixin:
def write_xml(self, outfile, encoding = "iso-8859-1"):
from xml.sax import saxutils
handler = saxutils.XMLGenerator(outfile, encoding)
handler.startDocument()
self.publish(handler)
handler.endDocument()
def to_xml(self, encoding = "iso-8859-1"):
try:
import cStringIO as StringIO
except ImportError:
import StringIO
f = StringIO.StringIO()
self.write_xml(f, encoding)
return f.getvalue()
def _element(handler, name, obj, d = {}):
if isinstance(obj, basestring) or obj is None:
# special-case handling to make the API easier
# to use for the common case.
handler.startElement(name, d)
if obj is not None:
handler.characters(obj)
handler.endElement(name)
else:
# It better know how to emit the correct XML.
obj.publish(handler)
def _opt_element(handler, name, obj):
if obj is None:
return
_element(handler, name, obj)
def _format_date(dt):
"""convert a datetime into an RFC 822 formatted date
Input date must be in GMT.
"""
# Looks like:
# Sat, 07 Sep 2002 00:00:01 GMT
# Can't use strftime because that's locale dependent
#
# Isn't there a standard way to do this for Python? The
# rfc822 and email.Utils modules assume a timestamp. The
# following is based on the rfc822 module.
return "%s, %02d %s %04d %02d:%02d:%02d GMT" % (
["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"][dt.weekday()],
dt.day,
["Jan", "Feb", "Mar", "Apr", "May", "Jun",
"Jul", "Aug", "Sep", "Oct", "Nov", "Dec"][dt.month-1],
dt.year, dt.hour, dt.minute, dt.second)
##
# A couple simple wrapper objects for the fields which
# take a simple value other than a string.
class IntElement:
"""implements the 'publish' API for integers
Takes the tag name and the integer value to publish.
(Could be used for anything which uses str() to be published
to text for XML.)
"""
element_attrs = {}
def __init__(self, name, val):
self.name = name
self.val = val
def publish(self, handler):
handler.startElement(self.name, self.element_attrs)
handler.characters(str(self.val))
handler.endElement(self.name)
class DateElement:
"""implements the 'publish' API for a datetime.datetime
Takes the tag name and the datetime to publish.
Converts the datetime to RFC 2822 timestamp (4-digit year).
"""
def __init__(self, name, dt):
self.name = name
self.dt = dt
def publish(self, handler):
_element(handler, self.name, _format_date(self.dt))
####
class Category:
"""Publish a category element"""
def __init__(self, category, domain = None):
self.category = category
self.domain = domain
def publish(self, handler):
d = {}
if self.domain is not None:
d["domain"] = self.domain
_element(handler, "category", self.category, d)
class Cloud:
"""Publish a cloud"""
def __init__(self, domain, port, path,
registerProcedure, protocol):
self.domain = domain
self.port = port
self.path = path
self.registerProcedure = registerProcedure
self.protocol = protocol
def publish(self, handler):
_element(handler, "cloud", None, {
"domain": self.domain,
"port": str(self.port),
"path": self.path,
"registerProcedure": self.registerProcedure,
"protocol": self.protocol})
class Image:
"""Publish a channel Image"""
element_attrs = {}
def __init__(self, url, title, link,
width = None, height = None, description = None):
self.url = url
self.title = title
self.link = link
self.width = width
self.height = height
self.description = description
def publish(self, handler):
handler.startElement("image", self.element_attrs)
_element(handler, "url", self.url)
_element(handler, "title", self.title)
_element(handler, "link", self.link)
width = self.width
if isinstance(width, int):
width = IntElement("width", width)
_opt_element(handler, "width", width)
height = self.height
if isinstance(height, int):
height = IntElement("height", height)
_opt_element(handler, "height", height)
_opt_element(handler, "description", self.description)
handler.endElement("image")
class Guid:
"""Publish a guid
Defaults to being a permalink, which is the assumption if it's
omitted. Hence strings are always permalinks.
"""
def __init__(self, guid, isPermaLink = 1):
self.guid = guid
self.isPermaLink = isPermaLink
def publish(self, handler):
d = {}
if self.isPermaLink:
d["isPermaLink"] = "true"
else:
d["isPermaLink"] = "false"
_element(handler, "guid", self.guid, d)
class TextInput:
"""Publish a textInput
Apparently this is rarely used.
"""
element_attrs = {}
def __init__(self, title, description, name, link):
self.title = title
self.description = description
self.name = name
self.link = link
def publish(self, handler):
handler.startElement("textInput", self.element_attrs)
_element(handler, "title", self.title)
_element(handler, "description", self.description)
_element(handler, "name", self.name)
_element(handler, "link", self.link)
handler.endElement("textInput")
class Enclosure:
"""Publish an enclosure"""
def __init__(self, url, length, type):
self.url = url
self.length = length
self.type = type
def publish(self, handler):
_element(handler, "enclosure", None,
{"url": self.url,
"length": str(self.length),
"type": self.type,
})
class Source:
"""Publish the item's original source, used by aggregators"""
def __init__(self, name, url):
self.name = name
self.url = url
def publish(self, handler):
_element(handler, "source", self.name, {"url": self.url})
class SkipHours:
"""Publish the skipHours
This takes a list of hours, as integers.
"""
element_attrs = {}
def __init__(self, hours):
self.hours = hours
def publish(self, handler):
if self.hours:
handler.startElement("skipHours", self.element_attrs)
for hour in self.hours:
_element(handler, "hour", str(hour))
handler.endElement("skipHours")
class SkipDays:
"""Publish the skipDays
This takes a list of days as strings.
"""
element_attrs = {}
def __init__(self, days):
self.days = days
def publish(self, handler):
if self.days:
handler.startElement("skipDays", self.element_attrs)
for day in self.days:
_element(handler, "day", day)
handler.endElement("skipDays")
class RSS2(WriteXmlMixin):
"""The main RSS class.
Stores the channel attributes, with the "category" elements under
".categories" and the RSS items under ".items".
"""
rss_attrs = {"version": "2.0"}
element_attrs = {}
def __init__(self,
title,
link,
description,
language = None,
copyright = None,
managingEditor = None,
webMaster = None,
pubDate = None, # a datetime, *in* *GMT*
lastBuildDate = None, # a datetime
categories = None, # list of strings or Category
generator = _generator_name,
docs = "http://blogs.law.harvard.edu/tech/rss",
cloud = None, # a Cloud
ttl = None, # integer number of minutes
image = None, # an Image
rating = None, # a string; I don't know how it's used
textInput = None, # a TextInput
skipHours = None, # a SkipHours with a list of integers
skipDays = None, # a SkipDays with a list of strings
items = None, # list of RSSItems
):
self.title = title
self.link = link
self.description = description
self.language = language
self.copyright = copyright
self.managingEditor = managingEditor
self.webMaster = webMaster
self.pubDate = pubDate
self.lastBuildDate = lastBuildDate
if categories is None:
categories = []
self.categories = categories
self.generator = generator
self.docs = docs
self.cloud = cloud
self.ttl = ttl
self.image = image
self.rating = rating
self.textInput = textInput
self.skipHours = skipHours
self.skipDays = skipDays
if items is None:
items = []
self.items = items
def publish(self, handler):
handler.startElement("rss", self.rss_attrs)
handler.startElement("channel", self.element_attrs)
_element(handler, "title", self.title)
_element(handler, "link", self.link)
_element(handler, "description", self.description)
self.publish_extensions(handler)
_opt_element(handler, "language", self.language)
_opt_element(handler, "copyright", self.copyright)
_opt_element(handler, "managingEditor", self.managingEditor)
_opt_element(handler, "webMaster", self.webMaster)
pubDate = self.pubDate
if isinstance(pubDate, datetime.datetime):
pubDate = DateElement("pubDate", pubDate)
_opt_element(handler, "pubDate", pubDate)
lastBuildDate = self.lastBuildDate
if isinstance(lastBuildDate, datetime.datetime):
lastBuildDate = DateElement("lastBuildDate", lastBuildDate)
_opt_element(handler, "lastBuildDate", lastBuildDate)
for category in self.categories:
if isinstance(category, basestring):
category = Category(category)
category.publish(handler)
_opt_element(handler, "generator", self.generator)
_opt_element(handler, "docs", self.docs)
if self.cloud is not None:
self.cloud.publish(handler)
ttl = self.ttl
if isinstance(self.ttl, int):
ttl = IntElement("ttl", ttl)
_opt_element(handler, "tt", ttl)
if self.image is not None:
self.image.publish(handler)
_opt_element(handler, "rating", self.rating)
if self.textInput is not None:
self.textInput.publish(handler)
if self.skipHours is not None:
self.skipHours.publish(handler)
if self.skipDays is not None:
self.skipDays.publish(handler)
for item in self.items:
item.publish(handler)
handler.endElement("channel")
handler.endElement("rss")
def publish_extensions(self, handler):
# Derived classes can hook into this to insert
# output after the three required fields.
pass
class RSSItem(WriteXmlMixin):
"""Publish an RSS Item"""
element_attrs = {}
def __init__(self,
title = None, # string
link = None, # url as string
description = None, # string
author = None, # email address as string
categories = None, # list of string or Category
comments = None, # url as string
enclosure = None, # an Enclosure
guid = None, # a unique string
pubDate = None, # a datetime
source = None, # a Source
):
if title is None and description is None:
raise TypeError(
"must define at least one of 'title' or 'description'")
self.title = title
self.link = link
self.description = description
self.author = author
if categories is None:
categories = []
self.categories = categories
self.comments = comments
self.enclosure = enclosure
self.guid = guid
self.pubDate = pubDate
self.source = source
# It sure does get tedious typing these names three times...
def publish(self, handler):
handler.startElement("item", self.element_attrs)
_opt_element(handler, "title", self.title)
_opt_element(handler, "link", self.link)
self.publish_extensions(handler)
_opt_element(handler, "description", self.description)
_opt_element(handler, "author", self.author)
for category in self.categories:
if isinstance(category, basestring):
category = Category(category)
category.publish(handler)
_opt_element(handler, "comments", self.comments)
if self.enclosure is not None:
self.enclosure.publish(handler)
_opt_element(handler, "guid", self.guid)
pubDate = self.pubDate
if isinstance(pubDate, datetime.datetime):
pubDate = DateElement("pubDate", pubDate)
_opt_element(handler, "pubDate", pubDate)
if self.source is not None:
self.source.publish(handler)
handler.endElement("item")
def publish_extensions(self, handler):
# Derived classes can hook into this to insert
# output after the title and link elements
pass
| mit |
sarielsaz/sarielsaz | test/functional/preciousblock.py | 1 | 5078 | #!/usr/bin/env python3
# Copyright (c) 2015-2016 The Sarielsaz Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the preciousblock RPC."""
from test_framework.test_framework import SarielsazTestFramework
from test_framework.util import (
assert_equal,
connect_nodes_bi,
sync_chain,
sync_blocks,
)
def unidirectional_node_sync_via_rpc(node_src, node_dest):
blocks_to_copy = []
blockhash = node_src.getbestblockhash()
while True:
try:
assert(len(node_dest.getblock(blockhash, False)) > 0)
break
except:
blocks_to_copy.append(blockhash)
blockhash = node_src.getblockheader(blockhash, True)['previousblockhash']
blocks_to_copy.reverse()
for blockhash in blocks_to_copy:
blockdata = node_src.getblock(blockhash, False)
assert(node_dest.submitblock(blockdata) in (None, 'inconclusive'))
def node_sync_via_rpc(nodes):
for node_src in nodes:
for node_dest in nodes:
if node_src is node_dest:
continue
unidirectional_node_sync_via_rpc(node_src, node_dest)
class PreciousTest(SarielsazTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
def setup_network(self):
self.setup_nodes()
def run_test(self):
self.log.info("Ensure submitblock can in principle reorg to a competing chain")
self.nodes[0].generate(1)
assert_equal(self.nodes[0].getblockcount(), 1)
hashZ = self.nodes[1].generate(2)[-1]
assert_equal(self.nodes[1].getblockcount(), 2)
node_sync_via_rpc(self.nodes[0:3])
assert_equal(self.nodes[0].getbestblockhash(), hashZ)
self.log.info("Mine blocks A-B-C on Node 0")
hashC = self.nodes[0].generate(3)[-1]
assert_equal(self.nodes[0].getblockcount(), 5)
self.log.info("Mine competing blocks E-F-G on Node 1")
hashG = self.nodes[1].generate(3)[-1]
assert_equal(self.nodes[1].getblockcount(), 5)
assert(hashC != hashG)
self.log.info("Connect nodes and check no reorg occurs")
# Submit competing blocks via RPC so any reorg should occur before we proceed (no way to wait on inaction for p2p sync)
node_sync_via_rpc(self.nodes[0:2])
connect_nodes_bi(self.nodes,0,1)
assert_equal(self.nodes[0].getbestblockhash(), hashC)
assert_equal(self.nodes[1].getbestblockhash(), hashG)
self.log.info("Make Node0 prefer block G")
self.nodes[0].preciousblock(hashG)
assert_equal(self.nodes[0].getbestblockhash(), hashG)
self.log.info("Make Node0 prefer block C again")
self.nodes[0].preciousblock(hashC)
assert_equal(self.nodes[0].getbestblockhash(), hashC)
self.log.info("Make Node1 prefer block C")
self.nodes[1].preciousblock(hashC)
sync_chain(self.nodes[0:2]) # wait because node 1 may not have downloaded hashC
assert_equal(self.nodes[1].getbestblockhash(), hashC)
self.log.info("Make Node1 prefer block G again")
self.nodes[1].preciousblock(hashG)
assert_equal(self.nodes[1].getbestblockhash(), hashG)
self.log.info("Make Node0 prefer block G again")
self.nodes[0].preciousblock(hashG)
assert_equal(self.nodes[0].getbestblockhash(), hashG)
self.log.info("Make Node1 prefer block C again")
self.nodes[1].preciousblock(hashC)
assert_equal(self.nodes[1].getbestblockhash(), hashC)
self.log.info("Mine another block (E-F-G-)H on Node 0 and reorg Node 1")
self.nodes[0].generate(1)
assert_equal(self.nodes[0].getblockcount(), 6)
sync_blocks(self.nodes[0:2])
hashH = self.nodes[0].getbestblockhash()
assert_equal(self.nodes[1].getbestblockhash(), hashH)
self.log.info("Node1 should not be able to prefer block C anymore")
self.nodes[1].preciousblock(hashC)
assert_equal(self.nodes[1].getbestblockhash(), hashH)
self.log.info("Mine competing blocks I-J-K-L on Node 2")
self.nodes[2].generate(4)
assert_equal(self.nodes[2].getblockcount(), 6)
hashL = self.nodes[2].getbestblockhash()
self.log.info("Connect nodes and check no reorg occurs")
node_sync_via_rpc(self.nodes[1:3])
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
assert_equal(self.nodes[0].getbestblockhash(), hashH)
assert_equal(self.nodes[1].getbestblockhash(), hashH)
assert_equal(self.nodes[2].getbestblockhash(), hashL)
self.log.info("Make Node1 prefer block L")
self.nodes[1].preciousblock(hashL)
assert_equal(self.nodes[1].getbestblockhash(), hashL)
self.log.info("Make Node2 prefer block H")
self.nodes[2].preciousblock(hashH)
assert_equal(self.nodes[2].getbestblockhash(), hashH)
if __name__ == '__main__':
PreciousTest().main()
| mit |
shakamunyi/neutron-vrrp | neutron/db/migration/alembic_migrations/versions/2032abe8edac_lbaas_add_status_des.py | 8 | 1599 | # Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""LBaaS add status description
Revision ID: 2032abe8edac
Revises: 477a4488d3f4
Create Date: 2013-06-24 06:51:47.308545
"""
# revision identifiers, used by Alembic.
revision = '2032abe8edac'
down_revision = '477a4488d3f4'
# Change to ['*'] if this migration applies to all plugins
migration_for_plugins = [
'neutron.services.loadbalancer.plugin.LoadBalancerPlugin',
]
from alembic import op
import sqlalchemy as sa
from neutron.db import migration
ENTITIES = ['vips', 'pools', 'members', 'healthmonitors']
def upgrade(active_plugins=None, options=None):
if not migration.should_run(active_plugins, migration_for_plugins):
return
for entity in ENTITIES:
op.add_column(entity, sa.Column('status_description', sa.String(255)))
def downgrade(active_plugins=None, options=None):
if not migration.should_run(active_plugins, migration_for_plugins):
return
for entity in ENTITIES:
op.drop_column(entity, 'status_description')
| apache-2.0 |
philroche/django-cached-hitcount | cached_hitcount/migrations/0001_initial.py | 2 | 3072 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Hit'
db.create_table(u'cached_hitcount_hit', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('hits', self.gf('django.db.models.fields.PositiveIntegerField')(default=0)),
('added', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.utcnow)),
('content_type', self.gf('django.db.models.fields.related.ForeignKey')(related_name='content_type_set_for_hit', to=orm['contenttypes.ContentType'])),
('object_pk', self.gf('django.db.models.fields.PositiveIntegerField')(db_index=True)),
))
db.send_create_signal(u'cached_hitcount', ['Hit'])
# Adding model 'BlacklistIP'
db.create_table(u'cached_hitcount_blacklistip', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('ip', self.gf('django.db.models.fields.CharField')(unique=True, max_length=40)),
))
db.send_create_signal(u'cached_hitcount', ['BlacklistIP'])
def backwards(self, orm):
# Deleting model 'Hit'
db.delete_table(u'cached_hitcount_hit')
# Deleting model 'BlacklistIP'
db.delete_table(u'cached_hitcount_blacklistip')
models = {
u'cached_hitcount.blacklistip': {
'Meta': {'object_name': 'BlacklistIP'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'})
},
u'cached_hitcount.hit': {
'Meta': {'ordering': "('-hits',)", 'object_name': 'Hit'},
'added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.utcnow'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'content_type_set_for_hit'", 'to': u"orm['contenttypes.ContentType']"}),
'hits': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_pk': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['cached_hitcount'] | gpl-3.0 |
AyoubZahid/odoo | addons/hr_timesheet_sheet/wizard/hr_timesheet_current.py | 46 | 1421 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import time
from openerp.osv import fields, osv
from openerp.tools.translate import _
from openerp.exceptions import UserError
class hr_timesheet_current_open(osv.osv_memory):
_name = 'hr.timesheet.current.open'
_description = 'hr.timesheet.current.open'
def open_timesheet(self, cr, uid, ids, context=None):
ts = self.pool.get('hr_timesheet_sheet.sheet')
if context is None:
context = {}
view_type = 'form,tree'
ids = ts.search(cr, uid, [('user_id','=',uid),('state','in',('draft','new')),('date_from','<=',time.strftime('%Y-%m-%d')), ('date_to','>=',time.strftime('%Y-%m-%d'))], context=context)
if len(ids) > 1:
view_type = 'tree,form'
domain = "[('id','in',["+','.join(map(str, ids))+"]),('user_id', '=', uid)]"
elif len(ids)==1:
domain = "[('user_id', '=', uid)]"
else:
domain = "[('user_id', '=', uid)]"
value = {
'domain': domain,
'name': _('Open Timesheet'),
'view_type': 'form',
'view_mode': view_type,
'res_model': 'hr_timesheet_sheet.sheet',
'view_id': False,
'type': 'ir.actions.act_window'
}
if len(ids) == 1:
value['res_id'] = ids[0]
return value
| gpl-3.0 |
thomashuang/white | white/orm/post.py | 4 | 5020 | #!/usr/bin/env python
# 2015 Copyright (C) White
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from .base import BaseMapper
from white.ext import db
from white.model import Post
class PostMapper(BaseMapper):
table = 'posts'
model = Post
def get_published_posts(self, page=1, perpage=10, category=None):
q = db.select(self.table).fields('title', 'slug', 'description', 'html', 'css', 'js',
'category', 'status', 'allow_comment', 'author', 'updated', 'created', 'pid')
if category:
q.condition('category', category)
results = (q.limit(perpage).offset((page - 1) * perpage).condition('status', 'published')
.order_by('created', 'DESC').execute())
return [self.load(data, self.model) for data in results]
def find_by_slug(self, slug):
data = db.select(self.table).fields('title', 'slug', 'description', 'html', 'css', 'js',
'category', 'status', 'allow_comment', 'author', 'updated', 'created', 'pid').condition('slug', slug).execute()
if data:
return self.load(data[0], self.model)
def search(self, key, page=1, perpage=10):
q = db.select(self.table).fields('title', 'slug', 'description', 'html', 'css', 'js',
'category', 'status', 'allow_comment', 'author', 'updated', 'created', 'pid')
_or = db.or_()
_or.condition('slug', '%%%s%%' % key, 'LIKE').condition('title', '%%%s%%' % key, 'LIKE')
q.condition(_or)
results = (q.condition('status', 'published').limit(perpage).offset((page - 1) * perpage)
.order_by('created', 'DESC').execute())
return [self.load(data, self.model) for data in results]
def serach_count(self, key):
q = db.select(self.table).fields(db.expr('count(*)',
'total')).condition('status', 'published')
_or = db.or_()
_or.condition('slug', '%%%s%%' % key, 'LIKE').condition('title', '%%%s%%' % key, 'LIKE')
q.condition(_or)
return q.execute()[0][0]
def find(self, pid):
data = db.select(self.table).fields('title', 'slug', 'description', 'html', 'css', 'js',
'category', 'status', 'allow_comment', 'author', 'updated', 'created', 'pid').condition('pid', pid).execute()
if data:
return self.load(data[0], self.model)
def count(self, status=None):
q = db.select(self.table).fields(db.expr('COUNT(*)'))
if status:
q.condition('status', status)
return q.execute()[0][0]
def create(self, post):
row = []
for _ in ('title', 'slug', 'description', 'updated', 'created', 'html', 'css', 'js',
'category', 'status', 'allow_comment', 'author'):
row.append(getattr(post, _))
return db.insert(self.table).fields('title', 'slug', 'description', 'updated', 'created', 'html', 'css', 'js',
'category', 'status', 'allow_comment', 'author').values(row).execute()
def paginate(self, page=1, perpage=10, category=None, status=None):
"""Paginate the posts"""
q = db.select(self.table).fields('title', 'slug', 'description', 'html', 'css', 'js',
'category', 'status', 'allow_comment', 'author', 'updated', 'created', 'pid')
if category:
q.condition('category', category)
if status:
q.condition('status', status)
results = (q.limit(perpage).offset((page - 1) * perpage)
.order_by('created', 'DESC').execute())
return [self.load(data, self.model) for data in results]
def reset_post_category(self, category_id):
return db.update(self.table).set('category', 1).condition('category', category_id).execute()
def save(self, page):
q = db.update(self.table)
data = dict((_, getattr(page, _)) for _ in ('title', 'slug', 'description', 'html', 'css', 'js',
'category', 'status', 'allow_comment', 'updated'))
q.mset(data)
return q.condition('pid', page.pid).execute()
def delete(self, page_id):
return db.delete(self.table).condition('pid', page_id).execute()
def category_count(self, category_id):
return db.select(self.table).fields(db.expr('count(*)',
'total')).condition('category', category_id).condition('status', 'published').execute()[0][0]
| gpl-2.0 |
umair-gujjar/chrome-extensions-1 | crx-appengine-packager/pyasn1/codec/ber/encoder.py | 5 | 9633 | # BER encoder
import string
from pyasn1.type import base, tag, univ, char, useful
from pyasn1.codec.ber import eoo
from pyasn1 import error
class Error(Exception): pass
class AbstractItemEncoder:
supportIndefLenMode = 1
def _encodeTag(self, t, isConstructed):
v = t[0]|t[1]
if isConstructed:
v = v|tag.tagFormatConstructed
if t[2] < 31:
return chr(v|t[2])
else:
longTag = t[2]
s = chr(longTag&0x7f)
longTag = longTag >> 7
while longTag:
s = chr(0x80|(longTag&0x7f)) + s
longTag = longTag >> 7
return chr(v|0x1F) + s
def _encodeLength(self, length, defMode):
if not defMode and self.supportIndefLenMode:
return '\x80'
if length < 0x80:
return chr(length)
else:
substrate = ''
while length:
substrate = chr(length&0xff) + substrate
length = length >> 8
if len(substrate) > 126:
raise Error('Length octets overflow (%d)' % len(substrate))
return chr(0x80 | len(substrate)) + substrate
def _encodeValue(self, encodeFun, value, defMode, maxChunkSize):
raise Error('Not implemented')
def _encodeEndOfOctets(self, encodeFun, defMode):
if defMode or not self.supportIndefLenMode:
return ''
else:
return encodeFun(eoo.endOfOctets, defMode)
def encode(self, encodeFun, value, defMode, maxChunkSize):
substrate, isConstructed = self._encodeValue(
encodeFun, value, defMode, maxChunkSize
)
tagSet = value.getTagSet()
if tagSet:
if not isConstructed: # primitive form implies definite mode
defMode = 1
return self._encodeTag(
tagSet[-1], isConstructed
) + self._encodeLength(
len(substrate), defMode
) + substrate + self._encodeEndOfOctets(encodeFun, defMode)
else:
return substrate # untagged value
class EndOfOctetsEncoder(AbstractItemEncoder):
def _encodeValue(self, encodeFun, value, defMode, maxChunkSize):
return '', 0
class ExplicitlyTaggedItemEncoder(AbstractItemEncoder):
def _encodeValue(self, encodeFun, value, defMode, maxChunkSize):
if isinstance(value, base.AbstractConstructedAsn1Item):
value = value.clone(tagSet=value.getTagSet()[:-1],
cloneValueFlag=1)
else:
value = value.clone(tagSet=value.getTagSet()[:-1])
return encodeFun(value, defMode, maxChunkSize), 1
explicitlyTaggedItemEncoder = ExplicitlyTaggedItemEncoder()
class IntegerEncoder(AbstractItemEncoder):
supportIndefLenMode = 0
def _encodeValue(self, encodeFun, value, defMode, maxChunkSize):
octets = []
value = long(value) # to save on ops on asn1 type
while 1:
octets.insert(0, value & 0xff)
if value == 0 or value == -1:
break
value = value >> 8
if value == 0 and octets[0] & 0x80:
octets.insert(0, 0)
while len(octets) > 1 and \
(octets[0] == 0 and octets[1] & 0x80 == 0 or \
octets[0] == 0xff and octets[1] & 0x80 != 0):
del octets[0]
return string.join(map(chr, octets), ''), 0
class BitStringEncoder(AbstractItemEncoder):
def _encodeValue(self, encodeFun, value, defMode, maxChunkSize):
if not maxChunkSize or len(value) <= maxChunkSize*8:
r = {}; l = len(value); p = 0; j = 7
while p < l:
i, j = divmod(p, 8)
r[i] = r.get(i,0) | value[p]<<(7-j)
p = p + 1
keys = r.keys(); keys.sort()
return chr(7-j) + string.join(
map(lambda k,r=r: chr(r[k]), keys),''
), 0
else:
pos = 0; substrate = ''
while 1:
# count in octets
v = value.clone(value[pos*8:pos*8+maxChunkSize*8])
if not v:
break
substrate = substrate + encodeFun(v, defMode, maxChunkSize)
pos = pos + maxChunkSize
return substrate, 1
class OctetStringEncoder(AbstractItemEncoder):
def _encodeValue(self, encodeFun, value, defMode, maxChunkSize):
if not maxChunkSize or len(value) <= maxChunkSize:
return str(value), 0
else:
pos = 0; substrate = ''
while 1:
v = value.clone(value[pos:pos+maxChunkSize])
if not v:
break
substrate = substrate + encodeFun(v, defMode, maxChunkSize)
pos = pos + maxChunkSize
return substrate, 1
class NullEncoder(AbstractItemEncoder):
supportIndefLenMode = 0
def _encodeValue(self, encodeFun, value, defMode, maxChunkSize):
return '', 0
class ObjectIdentifierEncoder(AbstractItemEncoder):
supportIndefLenMode = 0
def _encodeValue(self, encodeFun, value, defMode, maxChunkSize):
oid = tuple(value)
if len(oid) < 2:
raise error.PyAsn1Error('Short OID %s' % value)
# Build the first twos
index = 0
subid = oid[index] * 40
subid = subid + oid[index+1]
if 0 > subid > 0xff:
raise error.PyAsn1Error(
'Initial sub-ID overflow %s in OID %s' % (oid[index:], value)
)
octets = [ chr(subid) ]
index = index + 2
# Cycle through subids
for subid in oid[index:]:
if subid > -1 and subid < 128:
# Optimize for the common case
octets.append(chr(subid & 0x7f))
elif subid < 0 or subid > 0xFFFFFFFFL:
raise error.PyAsn1Error(
'SubId overflow %s in %s' % (subid, value)
)
else:
# Pack large Sub-Object IDs
res = [ chr(subid & 0x7f) ]
subid = subid >> 7
while subid > 0:
res.insert(0, chr(0x80 | (subid & 0x7f)))
subid = subid >> 7
# Convert packed Sub-Object ID to string and add packed
# it to resulted Object ID
octets.append(string.join(res, ''))
return string.join(octets, ''), 0
class SequenceOfEncoder(AbstractItemEncoder):
def _encodeValue(self, encodeFun, value, defMode, maxChunkSize):
if hasattr(value, 'setDefaultComponents'):
value.setDefaultComponents()
value.verifySizeSpec()
substrate = ''; idx = len(value)
while idx > 0:
idx = idx - 1
if value[idx] is None: # Optional component
continue
if hasattr(value, 'getDefaultComponentByPosition'):
if value.getDefaultComponentByPosition(idx) == value[idx]:
continue
substrate = encodeFun(
value[idx], defMode, maxChunkSize
) + substrate
return substrate, 1
codecMap = {
eoo.endOfOctets.tagSet: EndOfOctetsEncoder(),
univ.Boolean.tagSet: IntegerEncoder(),
univ.Integer.tagSet: IntegerEncoder(),
univ.BitString.tagSet: BitStringEncoder(),
univ.OctetString.tagSet: OctetStringEncoder(),
univ.Null.tagSet: NullEncoder(),
univ.ObjectIdentifier.tagSet: ObjectIdentifierEncoder(),
univ.Enumerated.tagSet: IntegerEncoder(),
# Sequence & Set have same tags as SequenceOf & SetOf
univ.SequenceOf.tagSet: SequenceOfEncoder(),
univ.SetOf.tagSet: SequenceOfEncoder(),
univ.Choice.tagSet: SequenceOfEncoder(),
# character string types
char.UTF8String.tagSet: OctetStringEncoder(),
char.NumericString.tagSet: OctetStringEncoder(),
char.PrintableString.tagSet: OctetStringEncoder(),
char.TeletexString.tagSet: OctetStringEncoder(),
char.VideotexString.tagSet: OctetStringEncoder(),
char.IA5String.tagSet: OctetStringEncoder(),
char.GraphicString.tagSet: OctetStringEncoder(),
char.VisibleString.tagSet: OctetStringEncoder(),
char.GeneralString.tagSet: OctetStringEncoder(),
char.UniversalString.tagSet: OctetStringEncoder(),
char.BMPString.tagSet: OctetStringEncoder(),
# useful types
useful.GeneralizedTime.tagSet: OctetStringEncoder(),
useful.UTCTime.tagSet: OctetStringEncoder()
}
class Encoder:
def __init__(self, _codecMap):
self.__codecMap = _codecMap
self.__emptyTagSet = tag.TagSet()
def __call__(self, value, defMode=1, maxChunkSize=0):
tagSet = value.getTagSet()
if len(tagSet) > 1:
concreteEncoder = explicitlyTaggedItemEncoder
else:
concreteEncoder = self.__codecMap.get(tagSet)
if not concreteEncoder:
# XXX
baseTagSet = tagSet.getBaseTag()
if baseTagSet:
concreteEncoder = self.__codecMap.get(
tag.TagSet(baseTagSet, baseTagSet)
)
else:
concreteEncoder = self.__codecMap.get(
self.__emptyTagSet
)
if concreteEncoder:
return concreteEncoder.encode(
self, value, defMode, maxChunkSize
)
else:
raise Error('No encoder for %s' % value)
encode = Encoder(codecMap)
| bsd-3-clause |
google-coral/pycoral | examples/imprinting_learning.py | 1 | 8019 | # Lint as: python3
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""A demo for on-device imprinting (transfer learning) of a classification model.
Here are the steps:
1) Download the data set for transfer learning:
```
wget https://dl.google.com/coral/sample_data/imprinting_data_script.tar.gz
tar zxf imprinting_data_script.tar.gz
./imprinting_data_script/download_imprinting_test_data.sh ./
```
This downloads 10 categories, 20 images for each category, saving it into
a directory named `open_image_v4_subset`.
2) Get model files:
```
bash examples/install_requirements.sh imprinting_learning.py
```
3) Start training the new classification model:
```
python3 examples/imprinting_learning.py \
--model_path test_data/mobilenet_v1_1.0_224_l2norm_quant_edgetpu.tflite \
--data open_image_v4_subset \
--output ${HOME}/my_model.tflite
```
4) Run an inference with the new model:
```
python3 examples/classify_image.py \
--model ${HOME}/my_model.tflite \
--label ${HOME}/my_model.txt \
--input test_data/cat.bmp
```
For more information, see
https://coral.ai/docs/edgetpu/retrain-classification-ondevice/
"""
import argparse
import os
import numpy as np
from PIL import Image
from pycoral.adapters import classify
from pycoral.adapters import common
from pycoral.learn.imprinting.engine import ImprintingEngine
from pycoral.utils.edgetpu import make_interpreter
def _read_data(path, test_ratio):
"""Parses data from given directory, split them into two sets.
Args:
path: string, path of the data set. Images are stored in sub-directory named
by category.
test_ratio: float in (0,1), ratio of data used for testing.
Returns:
(train_set, test_set), A tuple of two dicts. Keys are the categories and
values are lists of image file names.
"""
train_set = {}
test_set = {}
for category in os.listdir(path):
category_dir = os.path.join(path, category)
if os.path.isdir(category_dir):
images = [
f for f in os.listdir(category_dir)
if os.path.isfile(os.path.join(category_dir, f))
]
if images:
k = max(int(test_ratio * len(images)), 1)
test_set[category] = images[:k]
assert test_set[category], 'No images to test [{}]'.format(category)
train_set[category] = images[k:]
assert train_set[category], 'No images to train [{}]'.format(category)
return train_set, test_set
def _prepare_images(image_list, directory, shape):
"""Reads images and converts them to numpy array with given shape.
Args:
image_list: a list of strings storing file names.
directory: string, path of directory storing input images.
shape: a 2-D tuple represents the shape of required input tensor.
Returns:
A list of numpy.array.
"""
ret = []
for filename in image_list:
with Image.open(os.path.join(directory, filename)) as img:
img = img.convert('RGB')
img = img.resize(shape, Image.NEAREST)
ret.append(np.asarray(img))
return np.array(ret)
def _save_labels(labels, model_path):
"""Output labels as a txt file.
Args:
labels: {int : string}, map between label id and label.
model_path: string, path of the model.
"""
label_file_name = model_path.replace('.tflite', '.txt')
with open(label_file_name, 'w') as f:
for label_id, label in labels.items():
f.write(str(label_id) + ' ' + label + '\n')
print('Labels file saved as :', label_file_name)
def _parse_args():
"""Parses args, set default values if it's not passed.
Returns:
Object with attributes. Each attribute represents an argument.
"""
print('---------------------- Args ----------------------')
parser = argparse.ArgumentParser()
parser.add_argument(
'--model_path', help='Path to the model path.', required=True)
parser.add_argument(
'--data',
help=('Path to the training set, images are stored'
'under sub-directory named by category.'),
required=True)
parser.add_argument('--output', help='Name of the trained model.')
parser.add_argument(
'--test_ratio',
type=float,
help='Float number in (0,1), ratio of data used for test data.')
parser.add_argument(
'--keep_classes',
action='store_true',
help='Whether to keep base model classes.')
args = parser.parse_args()
if not args.output:
model_name = os.path.basename(args.model_path)
args.output = model_name.replace('.tflite', '_retrained.tflite')
print('Output path :', args.output)
# By default, choose 25% data for test.
if not args.test_ratio:
args.test_ratio = 0.25
assert args.test_ratio > 0
assert args.test_ratio < 1.0
print('Ratio of test images: {:.0%}'.format(args.test_ratio))
return args
def main():
args = _parse_args()
engine = ImprintingEngine(args.model_path, keep_classes=args.keep_classes)
extractor = make_interpreter(engine.serialize_extractor_model(), device=':0')
extractor.allocate_tensors()
shape = common.input_size(extractor)
print('--------------- Parsing data set -----------------')
print('Dataset path:', args.data)
train_set, test_set = _read_data(args.data, args.test_ratio)
print('Image list successfully parsed! Category Num = ', len(train_set))
print('---------------- Processing training data ----------------')
print('This process may take more than 30 seconds.')
train_input = []
labels_map = {}
for class_id, (category, image_list) in enumerate(train_set.items()):
print('Processing category:', category)
train_input.append(
_prepare_images(image_list, os.path.join(args.data, category), shape))
labels_map[class_id] = category
print('---------------- Start training -----------------')
num_classes = engine.num_classes
for class_id, tensors in enumerate(train_input):
for tensor in tensors:
common.set_input(extractor, tensor)
extractor.invoke()
embedding = classify.get_scores(extractor)
engine.train(embedding, class_id=num_classes + class_id)
print('---------------- Training finished! -----------------')
with open(args.output, 'wb') as f:
f.write(engine.serialize_model())
print('Model saved as : ', args.output)
_save_labels(labels_map, args.output)
print('------------------ Start evaluating ------------------')
interpreter = make_interpreter(args.output)
interpreter.allocate_tensors()
size = common.input_size(interpreter)
top_k = 5
correct = [0] * top_k
wrong = [0] * top_k
for category, image_list in test_set.items():
print('Evaluating category [', category, ']')
for img_name in image_list:
img = Image.open(os.path.join(args.data, category,
img_name)).resize(size, Image.NEAREST)
common.set_input(interpreter, img)
interpreter.invoke()
candidates = classify.get_classes(interpreter, top_k, score_threshold=0.1)
recognized = False
for i in range(top_k):
if i < len(candidates) and labels_map[candidates[i].id] == category:
recognized = True
if recognized:
correct[i] = correct[i] + 1
else:
wrong[i] = wrong[i] + 1
print('---------------- Evaluation result -----------------')
for i in range(top_k):
print('Top {} : {:.0%}'.format(i + 1, correct[i] / (correct[i] + wrong[i])))
if __name__ == '__main__':
main()
| apache-2.0 |
jandom/rdkit | rdkit/Dbase/UnitTestDbConnect.py | 1 | 5762 | # $Id$
#
# Copyright (C) 2003-2006 greg Landrum and Rational Discovery LLC
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
"""unit testing code for database connection objects
"""
from rdkit import RDConfig
import unittest, os, sys, tempfile, shutil
from rdkit.Dbase.DbConnection import DbConnect
class TestCase(unittest.TestCase):
def setUp(self):
#print '\n%s: '%self.shortDescription(),
#sys.stderr.write('\n%s: \n'%self.shortDescription())
#sys.stderr.flush()
self.baseDir = os.path.join(RDConfig.RDCodeDir, 'Dbase', 'test_data')
self.dbName = RDConfig.RDTestDatabase
self.colHeads = ('int_col', 'floatCol', 'strCol')
self.colTypes = ('integer', 'float', 'string')
if RDConfig.useSqlLite:
tmpf, tempName = tempfile.mkstemp(suffix='sqlt')
self.tempDbName = tempName
shutil.copyfile(self.dbName, self.tempDbName)
else:
self.tempDbName = '::RDTests'
def tearDown(self):
if RDConfig.useSqlLite and os.path.exists(self.tempDbName):
try:
os.unlink(self.tempDbName)
except:
import traceback
traceback.print_exc()
def testAddTable(self):
""" tests AddTable and GetTableNames functionalities """
newTblName = 'NEW_TABLE'
conn = DbConnect(self.tempDbName)
try:
conn.GetCursor().execute('drop table %s' % (newTblName))
except Exception:
pass
conn.Commit()
conn.AddTable(newTblName, 'id int')
names = [x.strip() for x in conn.GetTableNames()]
assert newTblName in names, 'name (%s) not found in %s' % (newTblName, str(names))
conn.GetCursor().execute('drop table %s' % (newTblName))
def testCursor(self):
""" tests GetCursor and GetTableNames functionalities """
viewName = 'TEST_VIEW'
conn = DbConnect(self.tempDbName)
curs = conn.GetCursor()
assert curs
try:
curs.execute('drop view %s' % (viewName))
except Exception:
pass
try:
curs.execute('create view %s as select val,id from ten_elements' % (viewName))
except Exception:
import traceback
traceback.print_exc()
assert 0
conn.Commit()
names = [x.strip() for x in conn.GetTableNames(includeViews=0)]
assert viewName not in names, 'improper view found'
names = [x.strip() for x in conn.GetTableNames(includeViews=1)]
assert viewName in names, 'improper view found in %s' % (str(names))
try:
curs.execute('drop view %s' % (viewName))
except Exception:
assert 0, 'drop table failed'
def testGetData1(self):
""" basic functionality
"""
conn = DbConnect(self.dbName, 'ten_elements')
d = conn.GetData(randomAccess=1)
assert len(d) == 10
assert tuple(d[0]) == (0, 11)
assert tuple(d[2]) == (4, 31)
self.assertRaises(IndexError, lambda: d[11])
def testGetData2(self):
""" using removeDups
"""
conn = DbConnect(self.dbName, 'ten_elements_dups')
d = conn.GetData(randomAccess=1, removeDups=1)
assert tuple(d[0]) == (0, 11)
assert tuple(d[2]) == (4, 31)
assert len(d) == 10
self.assertRaises(IndexError, lambda: d[11])
def testGetData3(self):
""" without removeDups
"""
conn = DbConnect(self.dbName, 'ten_elements_dups')
d = conn.GetData(randomAccess=1, removeDups=-1)
assert tuple(d[0]) == (0, 11)
assert tuple(d[2]) == (2, 21)
assert len(d) == 20
self.assertRaises(IndexError, lambda: d[21])
# repeat that test to make sure the table argument works
conn = DbConnect(self.dbName, 'ten_elements')
d = conn.GetData(table='ten_elements_dups', randomAccess=1, removeDups=-1)
assert tuple(d[0]) == (0, 11)
assert tuple(d[2]) == (2, 21)
assert len(d) == 20
self.assertRaises(IndexError, lambda: d[21])
def testGetData4(self):
""" non random access
"""
conn = DbConnect(self.dbName, 'ten_elements')
d = conn.GetData(randomAccess=0)
self.assertRaises(TypeError, lambda: len(d))
rs = []
for thing in d:
rs.append(thing)
assert len(rs) == 10
assert tuple(rs[0]) == (0, 11)
assert tuple(rs[2]) == (4, 31)
def testGetData5(self):
""" using a RandomAccessDbResultSet with a Transform
"""
fn = lambda x: (x[0], x[1] * 2)
conn = DbConnect(self.dbName, 'ten_elements')
d = conn.GetData(randomAccess=1, transform=fn)
assert tuple(d[0]) == (0, 22), str(d[0])
assert tuple(d[2]) == (4, 62)
assert len(d) == 10
self.assertRaises(IndexError, lambda: d[11])
def testGetData6(self):
""" using a DbResultSet with a Transform
"""
fn = lambda x: (x[0], x[1] * 2)
conn = DbConnect(self.dbName, 'ten_elements')
d = conn.GetData(randomAccess=0, transform=fn)
self.assertRaises(TypeError, lambda: len(d))
rs = []
for thing in d:
rs.append(thing)
assert len(rs) == 10
assert tuple(rs[0]) == (0, 22)
assert tuple(rs[2]) == (4, 62)
def testInsertData(self):
""" tests InsertData and InsertColumnData functionalities """
newTblName = 'NEW_TABLE'
conn = DbConnect(self.tempDbName)
try:
conn.GetCursor().execute('drop table %s' % (newTblName))
except Exception:
pass
conn.Commit()
conn.AddTable(newTblName, 'id int,val1 int, val2 int')
for i in range(10):
conn.InsertData(newTblName, (i, i + 1, 2 * i))
conn.Commit()
d = conn.GetData(table=newTblName)
assert len(d) == 10
d = None
try:
conn.GetCursor().execute('drop table %s' % (newTblName))
except Exception:
assert 0, 'drop table failed'
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
ronggong/jingjuSingingPhraseMatching | phoneticSimilarity/scoreManip.py | 1 | 7588 | '''
* Copyright (C) 2017 Music Technology Group - Universitat Pompeu Fabra
*
* This file is part of jingjuSingingPhraseMatching
*
* pypYIN is free software: you can redistribute it and/or modify it under
* the terms of the GNU Affero General Public License as published by the Free
* Software Foundation (FSF), either version 3 of the License, or (at your
* option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
* FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
* details.
*
* You should have received a copy of the Affero GNU General Public License
* version 3 along with this program. If not, see http://www.gnu.org/licenses/
*
* If you have any problem about this python version code, please contact: Rong Gong
* rong.gong@upf.edu
*
*
* If you want to refer this code, please use this article:
*
'''
from general.pinyinMap import *
from general.phonemeMap import *
from general.parameters import list_N_frames
from general.utilsFunctions import hz2cents
from general.filePath import class_name
from tailsMFCCTrain import loadMFCCTrain
from targetAudioProcessing import gmmModelLoad
import matplotlib.pyplot as plt
import numpy as np
import pinyin
import json
import pickle
from sklearn import preprocessing
def scoreMSynthesize(dict_score_info,N_frame):
'''
synthesize score Matrix
:param syl_durations: a list of the syllable durations
:return: Matrix
'''
syl_finals,syl_durations,_ = retrieveSylInfo(dict_score_info)
# print syl_finals,syl_durations
syl_durations = (np.array(syl_durations)/np.sum(syl_durations))*N_frame
scoreM = np.zeros((len(finals),N_frame))
counter_frame=0
for ii in range(len(syl_finals)-1):
index_final = finals.index(syl_finals[ii])
scoreM[index_final,counter_frame:counter_frame+syl_durations[ii]] = 1.0
counter_frame += syl_durations[ii]
index_final = finals.index(syl_finals[-1])
scoreM[index_final,counter_frame:] = 1
return scoreM
def scoreMSynthesizePho(dict_score_info,N_frame):
'''
synthesize score Matrix
:param syl_durations: a list of the syllable durations
:return: Matrix
'''
syl_finals,_,_ = retrieveSylInfo(dict_score_info)
# print syl_finals,syl_durations
list_pho = []
for sf in syl_finals:
pho_final = dic_final_2_sampa[sf]
for pho in pho_final:
pho_map = dic_pho_map[pho]
if pho_map == u'H':
pho_map = u'y'
elif pho_map == u'9':
pho_map = u'S'
elif pho_map == u'yn':
pho_map = u'in'
list_pho.append(pho_map)
pho_dict = dic_pho_map.values()
pho_durations = (np.array([1.0]*len(list_pho))/len(list_pho))*N_frame
scoreM = np.zeros((len(pho_dict),N_frame))
counter_frame=0
for ii in range(len(list_pho)-1):
index_pho = pho_dict.index(list_pho[ii])
scoreM[index_pho,counter_frame:counter_frame+pho_durations[ii]] = 1.0
counter_frame += pho_durations[ii]
index_final = finals.index(syl_finals[-1])
scoreM[index_final,counter_frame:] = 1
return scoreM
def mfccSynthesizeFromGMM(dict_score_info,dim_mfcc,N_frame):
'''
sample the mfcc array from GMM model
:param dic_score_info:
:param N_frame:
:return:
'''
gmmModels = gmmModelLoad()
syl_finals,syl_durations,_ = retrieveSylInfo(dict_score_info)
# print syl_finals,syl_durations
syl_durations = (np.array(syl_durations)/np.sum(syl_durations))*N_frame
mfcc_synthesized = np.zeros((N_frame,dim_mfcc))
counter_frame = 0
for ii in range(len(syl_finals)-1):
final_ii = syl_finals[ii]
dur = int(float(syl_durations[ii]))
if final_ii == 'v':
final_ii = 'u'
elif final_ii == 've':
final_ii = 'ue'
X,y = gmmModels[final_ii].sample(n_samples=dur)
mfcc_synthesized[counter_frame:counter_frame+dur,:] = X
counter_frame += dur
X,y = gmmModels[syl_finals[-1]].sample(n_samples=mfcc_synthesized.shape[0]-counter_frame)
mfcc_synthesized[counter_frame:,:] = X
return mfcc_synthesized
def mfccSynthesizeFromData(dict_score_info,dic_syllable_feature,N_frame):
'''
synthesize mfcc feature matrix for a singing candidate phrase
:param dict_score_info:
:param N_frame:
:return:
'''
syl_finals,syl_durations,syl_cents = retrieveSylInfo(dict_score_info)
syl_durations = (np.array(syl_durations)/np.sum(syl_durations))*N_frame
mfcc_synthesized = np.array([])
for ii in range(len(syl_finals)):
final_ii = syl_finals[ii]
if final_ii == 'v':
final_ii = 'u'
elif final_ii == 've':
final_ii = 'ue'
list_final_ii = dic_syllable_feature[final_ii]
if len(list_final_ii):
list_dur = np.array([dic_final_element_ii['N_frame'] for dic_final_element_ii in list_final_ii])
index_final_chosen = np.argmin((np.abs(list_dur-syl_durations[ii])))
mfcc = list_final_ii[index_final_chosen]['mfcc']
if not len(mfcc_synthesized):
mfcc_synthesized = mfcc
else:
mfcc_synthesized = np.vstack((mfcc_synthesized,mfcc))
else:
print('no candidate final found for final_ii')
# mfcc_synthesized = preprocessing.StandardScaler().fit_transform(mfcc_synthesized)
return mfcc_synthesized
def retrieveSylInfo(dict_score_info):
'''
collect pinyin and pinyin durations from score
:param dict_score_info:
:return:
'''
syl_finals = []
syl_durations =[]
syl_cents = []
for dict_note in dict_score_info['notes']:
lyric = dict_note['lyric']
dur = dict_note['quarterLength']
cents = hz2cents(dict_note['freq'])
# print lyric
# print dict_note['quarterLength']
if lyric and dur:
py = pinyin.get(lyric, format="strip", delimiter=" ")
py_split = py.split()
if len(py_split) > 1:
if py_split[0] in non_pinyin:
py = py_split[1]
else:
py = py_split[0]
# print py
final = dic_pinyin_2_initial_final_map[py]['final']
syl_finals.append(final)
syl_durations.append(dur)
syl_cents.append(cents)
elif len(syl_durations):
syl_durations[-1] += dur
return syl_finals,syl_durations,syl_cents
def plotMFCC(mfcc):
fig = plt.figure()
ax = fig.add_subplot(111)
ax.pcolormesh(mfcc)
plt.show()
def processSyllableMFCCTemplates(N_frame):
'''
generate mfcc synthesized templates according to N_frame
N_frame: template syllable frame length
:return:
'''
dic_mfcc_synthesized = {}
for key in dict_score:
print key
mfcc_synthesized = mfccSynthesizeFromData(dict_score[key],dic_syllable_feature_train,N_frame)
dic_mfcc_synthesized[key] = mfcc_synthesized
output = open('syllable_mfcc_templates/dic_mfcc_synthesized_'+str(N_frame)+'.pkl', 'wb')
pickle.dump(dic_mfcc_synthesized, output)
output.close()
if __name__ == '__main__':
dict_score = json.load(open('../melodicSimilarity/scores.json'))
dic_syllable_feature_train = loadMFCCTrain('dic_syllable_feature_train_'+class_name+'.pkl')
for N_frame in list_N_frames:
processSyllableMFCCTemplates(N_frame)
| agpl-3.0 |
vidbina/ansible | lib/ansible/plugins/action/synchronize.py | 26 | 10167 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012-2013, Timothy Appnel <tim@appnel.com>
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os.path
from ansible.plugins.action import ActionBase
from ansible.plugins import connection_loader
from ansible.utils.boolean import boolean
from ansible import constants as C
class ActionModule(ActionBase):
def _get_absolute_path(self, path):
if self._task._role is not None:
original_path = path
if self._task._role is not None:
path = self._loader.path_dwim_relative(self._task._role._role_path, 'files', path)
else:
path = self._loader.path_dwim_relative(self._loader.get_basedir(), 'files', path)
if original_path and original_path[-1] == '/' and path[-1] != '/':
# make sure the dwim'd path ends in a trailing "/"
# if the original path did
path += '/'
return path
def _process_origin(self, host, path, user):
if host not in C.LOCALHOST:
if user:
return '%s@[%s]:%s' % (user, host, path)
else:
return '[%s]:%s' % (host, path)
if ':' not in path and not path.startswith('/'):
path = self._get_absolute_path(path=path)
return path
def _process_remote(self, host, path, user):
transport = self._play_context.connection
if host not in C.LOCALHOST or transport != "local":
if user:
return '%s@[%s]:%s' % (user, host, path)
else:
return '[%s]:%s' % (host, path)
if ':' not in path and not path.startswith('/'):
path = self._get_absolute_path(path=path)
return path
def _override_module_replaced_vars(self, task_vars):
""" Some vars are substituted into the modules. Have to make sure
that those are correct for localhost when synchronize creates its own
connection to localhost."""
# Clear the current definition of these variables as they came from the
# connection to the remote host
if 'ansible_syslog_facility' in task_vars:
del task_vars['ansible_syslog_facility']
for key in task_vars.keys():
if key.startswith("ansible_") and key.endswith("_interpreter"):
del task_vars[key]
# Add the definitions from localhost
for host in C.LOCALHOST:
if host in task_vars['hostvars']:
localhost = task_vars['hostvars'][host]
break
if 'ansible_syslog_facility' in localhost:
task_vars['ansible_syslog_facility'] = localhost['ansible_syslog_facility']
for key in localhost:
if key.startswith("ansible_") and key.endswith("_interpreter"):
task_vars[key] = localhost[key]
def run(self, tmp=None, task_vars=dict()):
''' generates params and passes them on to the rsync module '''
original_transport = task_vars.get('ansible_connection') or self._play_context.connection
remote_transport = False
if original_transport != 'local':
remote_transport = True
try:
delegate_to = self._task.delegate_to
except (AttributeError, KeyError):
delegate_to = None
use_ssh_args = self._task.args.pop('use_ssh_args', None)
# Parameter name needed by the ansible module
self._task.args['_local_rsync_path'] = task_vars.get('ansible_rsync_path') or 'rsync'
# rsync thinks that one end of the connection is localhost and the
# other is the host we're running the task for (Note: We use
# ansible's delegate_to mechanism to determine which host rsync is
# running on so localhost could be a non-controller machine if
# delegate_to is used)
src_host = '127.0.0.1'
dest_host = task_vars.get('ansible_ssh_host') or task_vars.get('inventory_hostname')
dest_is_local = dest_host in C.LOCALHOST
# CHECK FOR NON-DEFAULT SSH PORT
if self._task.args.get('dest_port', None) is None:
inv_port = task_vars.get('ansible_ssh_port', None) or C.DEFAULT_REMOTE_PORT
if inv_port is not None:
self._task.args['dest_port'] = inv_port
# Set use_delegate if we are going to run rsync on a delegated host
# instead of localhost
use_delegate = False
if dest_host == delegate_to:
# edge case: explicit delegate and dest_host are the same
# so we run rsync on the remote machine targetting its localhost
# (itself)
dest_host = '127.0.0.1'
use_delegate = True
elif delegate_to is not None and remote_transport:
# If we're delegating to a remote host then we need to use the
# delegate_to settings
use_delegate = True
# Delegate to localhost as the source of the rsync unless we've been
# told (via delegate_to) that a different host is the source of the
# rsync
transport_overridden = False
if not use_delegate and remote_transport:
# Create a connection to localhost to run rsync on
new_stdin = self._connection._new_stdin
new_connection = connection_loader.get('local', self._play_context, new_stdin)
self._connection = new_connection
transport_overridden = True
self._override_module_replaced_vars(task_vars)
# COMPARE DELEGATE, HOST AND TRANSPORT
between_multiple_hosts = False
if dest_host != src_host and remote_transport:
# We're not copying two filesystem trees on the same host so we
# need to correctly format the paths for rsync (like
# user@host:path/to/tree
between_multiple_hosts = True
# SWITCH SRC AND DEST HOST PER MODE
if self._task.args.get('mode', 'push') == 'pull':
(dest_host, src_host) = (src_host, dest_host)
# MUNGE SRC AND DEST PER REMOTE_HOST INFO
src = self._task.args.get('src', None)
dest = self._task.args.get('dest', None)
if between_multiple_hosts or use_delegate:
# Private key handling
if use_delegate:
private_key = task_vars.get('ansible_ssh_private_key_file') or self._play_context.private_key_file
else:
private_key = task_vars.get('ansible_ssh_private_key_file') or self._play_context.private_key_file
if private_key is not None:
private_key = os.path.expanduser(private_key)
self._task.args['private_key'] = private_key
# Src and dest rsync "path" handling
# Determine if we need a user@
user = None
if boolean(self._task.args.get('set_remote_user', 'yes')):
if use_delegate:
user = task_vars.get('ansible_delegated_vars', dict()).get('ansible_ssh_user', None)
if not user:
user = C.DEFAULT_REMOTE_USER
else:
user = task_vars.get('ansible_ssh_user') or self._play_context.remote_user
# use the mode to define src and dest's url
if self._task.args.get('mode', 'push') == 'pull':
# src is a remote path: <user>@<host>, dest is a local path
src = self._process_remote(src_host, src, user)
dest = self._process_origin(dest_host, dest, user)
else:
# src is a local path, dest is a remote path: <user>@<host>
src = self._process_origin(src_host, src, user)
dest = self._process_remote(dest_host, dest, user)
else:
# Still need to munge paths (to account for roles) even if we aren't
# copying files between hosts
if not src.startswith('/'):
src = self._get_absolute_path(path=src)
if not dest.startswith('/'):
dest = self._get_absolute_path(path=dest)
self._task.args['src'] = src
self._task.args['dest'] = dest
# Allow custom rsync path argument
rsync_path = self._task.args.get('rsync_path', None)
# If no rsync_path is set, sudo was originally set, and dest is remote then add 'sudo rsync' argument
if not rsync_path and transport_overridden and self._play_context.become and self._play_context.become_method == 'sudo' and not dest_is_local:
rsync_path = 'sudo rsync'
# make sure rsync path is quoted.
if rsync_path:
self._task.args['rsync_path'] = '"%s"' % rsync_path
if use_ssh_args:
self._task.args['ssh_args'] = C.ANSIBLE_SSH_ARGS
# run the module and store the result
result = self._execute_module('synchronize', task_vars=task_vars)
if 'SyntaxError' in result['msg']:
# Emit a warning about using python3 because synchronize is
# somewhat unique in running on localhost
result['traceback'] = result['msg']
result['msg'] = 'SyntaxError parsing module. Perhaps invoking "python" on your local (or delegate_to) machine invokes python3. You can set ansible_python_interpreter for localhost (or the delegate_to machine) to the location of python2 to fix this'
return result
| gpl-3.0 |
dayo7116/three.js | utils/converters/fbx/convert_to_threejs.py | 213 | 77684 | # @author zfedoran / http://github.com/zfedoran
import os
import sys
import math
import operator
import re
import json
import types
import shutil
# #####################################################
# Globals
# #####################################################
option_triangulate = True
option_textures = True
option_copy_textures = True
option_prefix = True
option_geometry = False
option_forced_y_up = False
option_default_camera = False
option_default_light = False
option_pretty_print = False
converter = None
inputFolder = ""
outputFolder = ""
# #####################################################
# Pretty Printing Hacks
# #####################################################
# Force an array to be printed fully on a single line
class NoIndent(object):
def __init__(self, value, separator = ','):
self.separator = separator
self.value = value
def encode(self):
if not self.value:
return None
return '[ %s ]' % self.separator.join(str(f) for f in self.value)
# Force an array into chunks rather than printing each element on a new line
class ChunkedIndent(object):
def __init__(self, value, chunk_size = 15, force_rounding = False):
self.value = value
self.size = chunk_size
self.force_rounding = force_rounding
def encode(self):
# Turn the flat array into an array of arrays where each subarray is of
# length chunk_size. Then string concat the values in the chunked
# arrays, delimited with a ', ' and round the values finally append
# '{CHUNK}' so that we can find the strings with regex later
if not self.value:
return None
if self.force_rounding:
return ['{CHUNK}%s' % ', '.join(str(round(f, 6)) for f in self.value[i:i+self.size]) for i in range(0, len(self.value), self.size)]
else:
return ['{CHUNK}%s' % ', '.join(str(f) for f in self.value[i:i+self.size]) for i in range(0, len(self.value), self.size)]
# This custom encoder looks for instances of NoIndent or ChunkedIndent.
# When it finds
class CustomEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, NoIndent) or isinstance(obj, ChunkedIndent):
return obj.encode()
else:
return json.JSONEncoder.default(self, obj)
def executeRegexHacks(output_string):
# turn strings of arrays into arrays (remove the double quotes)
output_string = re.sub(':\s*\"(\[.*\])\"', r': \1', output_string)
output_string = re.sub('(\n\s*)\"(\[.*\])\"', r'\1\2', output_string)
output_string = re.sub('(\n\s*)\"{CHUNK}(.*)\"', r'\1\2', output_string)
# replace '0metadata' with metadata
output_string = re.sub('0metadata', r'metadata', output_string)
# replace 'zchildren' with children
output_string = re.sub('zchildren', r'children', output_string)
# add an extra newline after '"children": {'
output_string = re.sub('(children.*{\s*\n)', r'\1\n', output_string)
# add an extra newline after '},'
output_string = re.sub('},\s*\n', r'},\n\n', output_string)
# add an extra newline after '\n\s*],'
output_string = re.sub('(\n\s*)],\s*\n', r'\1],\n\n', output_string)
return output_string
# #####################################################
# Object Serializers
# #####################################################
# FbxVector2 is not JSON serializable
def serializeVector2(v, round_vector = False):
# JSON does not support NaN or Inf
if math.isnan(v[0]) or math.isinf(v[0]):
v[0] = 0
if math.isnan(v[1]) or math.isinf(v[1]):
v[1] = 0
if round_vector or option_pretty_print:
v = (round(v[0], 5), round(v[1], 5))
if option_pretty_print:
return NoIndent([v[0], v[1]], ', ')
else:
return [v[0], v[1]]
# FbxVector3 is not JSON serializable
def serializeVector3(v, round_vector = False):
# JSON does not support NaN or Inf
if math.isnan(v[0]) or math.isinf(v[0]):
v[0] = 0
if math.isnan(v[1]) or math.isinf(v[1]):
v[1] = 0
if math.isnan(v[2]) or math.isinf(v[2]):
v[2] = 0
if round_vector or option_pretty_print:
v = (round(v[0], 5), round(v[1], 5), round(v[2], 5))
if option_pretty_print:
return NoIndent([v[0], v[1], v[2]], ', ')
else:
return [v[0], v[1], v[2]]
# FbxVector4 is not JSON serializable
def serializeVector4(v, round_vector = False):
# JSON does not support NaN or Inf
if math.isnan(v[0]) or math.isinf(v[0]):
v[0] = 0
if math.isnan(v[1]) or math.isinf(v[1]):
v[1] = 0
if math.isnan(v[2]) or math.isinf(v[2]):
v[2] = 0
if math.isnan(v[3]) or math.isinf(v[3]):
v[3] = 0
if round_vector or option_pretty_print:
v = (round(v[0], 5), round(v[1], 5), round(v[2], 5), round(v[3], 5))
if option_pretty_print:
return NoIndent([v[0], v[1], v[2], v[3]], ', ')
else:
return [v[0], v[1], v[2], v[3]]
# #####################################################
# Helpers
# #####################################################
def getRadians(v):
return ((v[0]*math.pi)/180, (v[1]*math.pi)/180, (v[2]*math.pi)/180)
def getHex(c):
color = (int(c[0]*255) << 16) + (int(c[1]*255) << 8) + int(c[2]*255)
return int(color)
def setBit(value, position, on):
if on:
mask = 1 << position
return (value | mask)
else:
mask = ~(1 << position)
return (value & mask)
def generate_uvs(uv_layers):
layers = []
for uvs in uv_layers:
tmp = []
for uv in uvs:
tmp.append(uv[0])
tmp.append(uv[1])
if option_pretty_print:
layer = ChunkedIndent(tmp)
else:
layer = tmp
layers.append(layer)
return layers
# #####################################################
# Object Name Helpers
# #####################################################
def hasUniqueName(o, class_id):
scene = o.GetScene()
object_name = o.GetName()
object_id = o.GetUniqueID()
object_count = scene.GetSrcObjectCount(class_id)
for i in range(object_count):
other = scene.GetSrcObject(class_id, i)
other_id = other.GetUniqueID()
other_name = other.GetName()
if other_id == object_id:
continue
if other_name == object_name:
return False
return True
def getObjectName(o, force_prefix = False):
if not o:
return ""
object_name = o.GetName()
object_id = o.GetUniqueID()
if not force_prefix:
force_prefix = not hasUniqueName(o, FbxNode.ClassId)
prefix = ""
if option_prefix or force_prefix:
prefix = "Object_%s_" % object_id
return prefix + object_name
def getMaterialName(o, force_prefix = False):
object_name = o.GetName()
object_id = o.GetUniqueID()
if not force_prefix:
force_prefix = not hasUniqueName(o, FbxSurfaceMaterial.ClassId)
prefix = ""
if option_prefix or force_prefix:
prefix = "Material_%s_" % object_id
return prefix + object_name
def getTextureName(t, force_prefix = False):
if type(t) is FbxFileTexture:
texture_file = t.GetFileName()
texture_id = os.path.splitext(os.path.basename(texture_file))[0]
else:
texture_id = t.GetName()
if texture_id == "_empty_":
texture_id = ""
prefix = ""
if option_prefix or force_prefix:
prefix = "Texture_%s_" % t.GetUniqueID()
if len(texture_id) == 0:
prefix = prefix[0:len(prefix)-1]
return prefix + texture_id
def getMtlTextureName(texture_name, texture_id, force_prefix = False):
texture_name = os.path.splitext(texture_name)[0]
prefix = ""
if option_prefix or force_prefix:
prefix = "Texture_%s_" % texture_id
return prefix + texture_name
def getPrefixedName(o, prefix):
return (prefix + '_%s_') % o.GetUniqueID() + o.GetName()
# #####################################################
# Triangulation
# #####################################################
def triangulate_node_hierarchy(node):
node_attribute = node.GetNodeAttribute();
if node_attribute:
if node_attribute.GetAttributeType() == FbxNodeAttribute.eMesh or \
node_attribute.GetAttributeType() == FbxNodeAttribute.eNurbs or \
node_attribute.GetAttributeType() == FbxNodeAttribute.eNurbsSurface or \
node_attribute.GetAttributeType() == FbxNodeAttribute.ePatch:
converter.TriangulateInPlace(node);
child_count = node.GetChildCount()
for i in range(child_count):
triangulate_node_hierarchy(node.GetChild(i))
def triangulate_scene(scene):
node = scene.GetRootNode()
if node:
for i in range(node.GetChildCount()):
triangulate_node_hierarchy(node.GetChild(i))
# #####################################################
# Generate Material Object
# #####################################################
def generate_texture_bindings(material_property, material_params):
# FBX to Three.js texture types
binding_types = {
"DiffuseColor": "map",
"DiffuseFactor": "diffuseFactor",
"EmissiveColor": "emissiveMap",
"EmissiveFactor": "emissiveFactor",
"AmbientColor": "lightMap", # "ambientMap",
"AmbientFactor": "ambientFactor",
"SpecularColor": "specularMap",
"SpecularFactor": "specularFactor",
"ShininessExponent": "shininessExponent",
"NormalMap": "normalMap",
"Bump": "bumpMap",
"TransparentColor": "transparentMap",
"TransparencyFactor": "transparentFactor",
"ReflectionColor": "reflectionMap",
"ReflectionFactor": "reflectionFactor",
"DisplacementColor": "displacementMap",
"VectorDisplacementColor": "vectorDisplacementMap"
}
if material_property.IsValid():
#Here we have to check if it's layeredtextures, or just textures:
layered_texture_count = material_property.GetSrcObjectCount(FbxLayeredTexture.ClassId)
if layered_texture_count > 0:
for j in range(layered_texture_count):
layered_texture = material_property.GetSrcObject(FbxLayeredTexture.ClassId, j)
texture_count = layered_texture.GetSrcObjectCount(FbxTexture.ClassId)
for k in range(texture_count):
texture = layered_texture.GetSrcObject(FbxTexture.ClassId,k)
if texture:
texture_id = getTextureName(texture, True)
material_params[binding_types[str(material_property.GetName())]] = texture_id
else:
# no layered texture simply get on the property
texture_count = material_property.GetSrcObjectCount(FbxTexture.ClassId)
for j in range(texture_count):
texture = material_property.GetSrcObject(FbxTexture.ClassId,j)
if texture:
texture_id = getTextureName(texture, True)
material_params[binding_types[str(material_property.GetName())]] = texture_id
def generate_material_object(material):
#Get the implementation to see if it's a hardware shader.
implementation = GetImplementation(material, "ImplementationHLSL")
implementation_type = "HLSL"
if not implementation:
implementation = GetImplementation(material, "ImplementationCGFX")
implementation_type = "CGFX"
output = None
material_params = None
material_type = None
if implementation:
print("Shader materials are not supported")
elif material.GetClassId().Is(FbxSurfaceLambert.ClassId):
ambient = getHex(material.Ambient.Get())
diffuse = getHex(material.Diffuse.Get())
emissive = getHex(material.Emissive.Get())
opacity = 1.0 - material.TransparencyFactor.Get()
opacity = 1.0 if opacity == 0 else opacity
opacity = opacity
transparent = False
reflectivity = 1
material_type = 'MeshBasicMaterial'
# material_type = 'MeshLambertMaterial'
material_params = {
'color' : diffuse,
'ambient' : ambient,
'emissive' : emissive,
'reflectivity' : reflectivity,
'transparent' : transparent,
'opacity' : opacity
}
elif material.GetClassId().Is(FbxSurfacePhong.ClassId):
ambient = getHex(material.Ambient.Get())
diffuse = getHex(material.Diffuse.Get())
emissive = getHex(material.Emissive.Get())
specular = getHex(material.Specular.Get())
opacity = 1.0 - material.TransparencyFactor.Get()
opacity = 1.0 if opacity == 0 else opacity
opacity = opacity
shininess = material.Shininess.Get()
transparent = False
reflectivity = 1
bumpScale = 1
material_type = 'MeshPhongMaterial'
material_params = {
'color' : diffuse,
'ambient' : ambient,
'emissive' : emissive,
'specular' : specular,
'shininess' : shininess,
'bumpScale' : bumpScale,
'reflectivity' : reflectivity,
'transparent' : transparent,
'opacity' : opacity
}
else:
print "Unknown type of Material", getMaterialName(material)
# default to Lambert Material if the current Material type cannot be handeled
if not material_type:
ambient = getHex((0,0,0))
diffuse = getHex((0.5,0.5,0.5))
emissive = getHex((0,0,0))
opacity = 1
transparent = False
reflectivity = 1
material_type = 'MeshLambertMaterial'
material_params = {
'color' : diffuse,
'ambient' : ambient,
'emissive' : emissive,
'reflectivity' : reflectivity,
'transparent' : transparent,
'opacity' : opacity
}
if option_textures:
texture_count = FbxLayerElement.sTypeTextureCount()
for texture_index in range(texture_count):
material_property = material.FindProperty(FbxLayerElement.sTextureChannelNames(texture_index))
generate_texture_bindings(material_property, material_params)
material_params['wireframe'] = False
material_params['wireframeLinewidth'] = 1
output = {
'type' : material_type,
'parameters' : material_params
}
return output
def generate_proxy_material_object(node, material_names):
material_type = 'MeshFaceMaterial'
material_params = {
'materials' : material_names
}
output = {
'type' : material_type,
'parameters' : material_params
}
return output
# #####################################################
# Find Scene Materials
# #####################################################
def extract_materials_from_node(node, material_dict):
name = node.GetName()
mesh = node.GetNodeAttribute()
node = None
if mesh:
node = mesh.GetNode()
if node:
material_count = node.GetMaterialCount()
material_names = []
for l in range(mesh.GetLayerCount()):
materials = mesh.GetLayer(l).GetMaterials()
if materials:
if materials.GetReferenceMode() == FbxLayerElement.eIndex:
#Materials are in an undefined external table
continue
for i in range(material_count):
material = node.GetMaterial(i)
material_names.append(getMaterialName(material))
if material_count > 1:
proxy_material = generate_proxy_material_object(node, material_names)
proxy_name = getMaterialName(node, True)
material_dict[proxy_name] = proxy_material
def generate_materials_from_hierarchy(node, material_dict):
if node.GetNodeAttribute() == None:
pass
else:
attribute_type = (node.GetNodeAttribute().GetAttributeType())
if attribute_type == FbxNodeAttribute.eMesh:
extract_materials_from_node(node, material_dict)
for i in range(node.GetChildCount()):
generate_materials_from_hierarchy(node.GetChild(i), material_dict)
def generate_material_dict(scene):
material_dict = {}
# generate all materials for this scene
material_count = scene.GetSrcObjectCount(FbxSurfaceMaterial.ClassId)
for i in range(material_count):
material = scene.GetSrcObject(FbxSurfaceMaterial.ClassId, i)
material_object = generate_material_object(material)
material_name = getMaterialName(material)
material_dict[material_name] = material_object
# generate material porxies
# Three.js does not support meshs with multiple materials, however it does
# support materials with multiple submaterials
node = scene.GetRootNode()
if node:
for i in range(node.GetChildCount()):
generate_materials_from_hierarchy(node.GetChild(i), material_dict)
return material_dict
# #####################################################
# Generate Texture Object
# #####################################################
def generate_texture_object(texture):
#TODO: extract more texture properties
wrap_u = texture.GetWrapModeU()
wrap_v = texture.GetWrapModeV()
offset = texture.GetUVTranslation()
if type(texture) is FbxFileTexture:
url = texture.GetFileName()
else:
url = getTextureName( texture )
#url = replace_inFolder2OutFolder( url )
#print( url )
index = url.rfind( '/' )
if index == -1:
index = url.rfind( '\\' )
filename = url[ index+1 : len(url) ]
output = {
'url': filename,
'fullpath': url,
'repeat': serializeVector2( (1,1) ),
'offset': serializeVector2( texture.GetUVTranslation() ),
'magFilter': 'LinearFilter',
'minFilter': 'LinearMipMapLinearFilter',
'anisotropy': True
}
return output
# #####################################################
# Replace Texture input path to output
# #####################################################
def replace_inFolder2OutFolder(url):
folderIndex = url.find(inputFolder)
if folderIndex != -1:
url = url[ folderIndex+len(inputFolder): ]
url = outputFolder + url
return url
# #####################################################
# Replace Texture output path to input
# #####################################################
def replace_OutFolder2inFolder(url):
folderIndex = url.find(outputFolder)
if folderIndex != -1:
url = url[ folderIndex+len(outputFolder): ]
url = inputFolder + url
return url
# #####################################################
# Find Scene Textures
# #####################################################
def extract_material_textures(material_property, texture_dict):
if material_property.IsValid():
#Here we have to check if it's layeredtextures, or just textures:
layered_texture_count = material_property.GetSrcObjectCount(FbxLayeredTexture.ClassId)
if layered_texture_count > 0:
for j in range(layered_texture_count):
layered_texture = material_property.GetSrcObject(FbxLayeredTexture.ClassId, j)
texture_count = layered_texture.GetSrcObjectCount(FbxTexture.ClassId)
for k in range(texture_count):
texture = layered_texture.GetSrcObject(FbxTexture.ClassId,k)
if texture:
texture_object = generate_texture_object(texture)
texture_name = getTextureName( texture, True )
texture_dict[texture_name] = texture_object
else:
# no layered texture simply get on the property
texture_count = material_property.GetSrcObjectCount(FbxTexture.ClassId)
for j in range(texture_count):
texture = material_property.GetSrcObject(FbxTexture.ClassId,j)
if texture:
texture_object = generate_texture_object(texture)
texture_name = getTextureName( texture, True )
texture_dict[texture_name] = texture_object
def extract_textures_from_node(node, texture_dict):
name = node.GetName()
mesh = node.GetNodeAttribute()
#for all materials attached to this mesh
material_count = mesh.GetNode().GetSrcObjectCount(FbxSurfaceMaterial.ClassId)
for material_index in range(material_count):
material = mesh.GetNode().GetSrcObject(FbxSurfaceMaterial.ClassId, material_index)
#go through all the possible textures types
if material:
texture_count = FbxLayerElement.sTypeTextureCount()
for texture_index in range(texture_count):
material_property = material.FindProperty(FbxLayerElement.sTextureChannelNames(texture_index))
extract_material_textures(material_property, texture_dict)
def generate_textures_from_hierarchy(node, texture_dict):
if node.GetNodeAttribute() == None:
pass
else:
attribute_type = (node.GetNodeAttribute().GetAttributeType())
if attribute_type == FbxNodeAttribute.eMesh:
extract_textures_from_node(node, texture_dict)
for i in range(node.GetChildCount()):
generate_textures_from_hierarchy(node.GetChild(i), texture_dict)
def generate_texture_dict(scene):
if not option_textures:
return {}
texture_dict = {}
node = scene.GetRootNode()
if node:
for i in range(node.GetChildCount()):
generate_textures_from_hierarchy(node.GetChild(i), texture_dict)
return texture_dict
# #####################################################
# Extract Fbx SDK Mesh Data
# #####################################################
def extract_fbx_vertex_positions(mesh):
control_points_count = mesh.GetControlPointsCount()
control_points = mesh.GetControlPoints()
positions = []
for i in range(control_points_count):
tmp = control_points[i]
tmp = [tmp[0], tmp[1], tmp[2]]
positions.append(tmp)
node = mesh.GetNode()
if node:
t = node.GeometricTranslation.Get()
t = FbxVector4(t[0], t[1], t[2], 1)
r = node.GeometricRotation.Get()
r = FbxVector4(r[0], r[1], r[2], 1)
s = node.GeometricScaling.Get()
s = FbxVector4(s[0], s[1], s[2], 1)
hasGeometricTransform = False
if t[0] != 0 or t[1] != 0 or t[2] != 0 or \
r[0] != 0 or r[1] != 0 or r[2] != 0 or \
s[0] != 1 or s[1] != 1 or s[2] != 1:
hasGeometricTransform = True
if hasGeometricTransform:
geo_transform = FbxMatrix(t,r,s)
else:
geo_transform = FbxMatrix()
transform = None
if option_geometry:
# FbxMeshes are local to their node, we need the vertices in global space
# when scene nodes are not exported
transform = node.EvaluateGlobalTransform()
transform = FbxMatrix(transform) * geo_transform
elif hasGeometricTransform:
transform = geo_transform
if transform:
for i in range(len(positions)):
v = positions[i]
position = FbxVector4(v[0], v[1], v[2])
position = transform.MultNormalize(position)
positions[i] = [position[0], position[1], position[2]]
return positions
def extract_fbx_vertex_normals(mesh):
# eNone The mapping is undetermined.
# eByControlPoint There will be one mapping coordinate for each surface control point/vertex.
# eByPolygonVertex There will be one mapping coordinate for each vertex, for every polygon of which it is a part. This means that a vertex will have as many mapping coordinates as polygons of which it is a part.
# eByPolygon There can be only one mapping coordinate for the whole polygon.
# eByEdge There will be one mapping coordinate for each unique edge in the mesh. This is meant to be used with smoothing layer elements.
# eAllSame There can be only one mapping coordinate for the whole surface.
layered_normal_indices = []
layered_normal_values = []
poly_count = mesh.GetPolygonCount()
control_points = mesh.GetControlPoints()
for l in range(mesh.GetLayerCount()):
mesh_normals = mesh.GetLayer(l).GetNormals()
if not mesh_normals:
continue
normals_array = mesh_normals.GetDirectArray()
normals_count = normals_array.GetCount()
if normals_count == 0:
continue
normal_indices = []
normal_values = []
# values
for i in range(normals_count):
normal = normals_array.GetAt(i)
normal = [normal[0], normal[1], normal[2]]
normal_values.append(normal)
node = mesh.GetNode()
if node:
t = node.GeometricTranslation.Get()
t = FbxVector4(t[0], t[1], t[2], 1)
r = node.GeometricRotation.Get()
r = FbxVector4(r[0], r[1], r[2], 1)
s = node.GeometricScaling.Get()
s = FbxVector4(s[0], s[1], s[2], 1)
hasGeometricTransform = False
if t[0] != 0 or t[1] != 0 or t[2] != 0 or \
r[0] != 0 or r[1] != 0 or r[2] != 0 or \
s[0] != 1 or s[1] != 1 or s[2] != 1:
hasGeometricTransform = True
if hasGeometricTransform:
geo_transform = FbxMatrix(t,r,s)
else:
geo_transform = FbxMatrix()
transform = None
if option_geometry:
# FbxMeshes are local to their node, we need the vertices in global space
# when scene nodes are not exported
transform = node.EvaluateGlobalTransform()
transform = FbxMatrix(transform) * geo_transform
elif hasGeometricTransform:
transform = geo_transform
if transform:
t = FbxVector4(0,0,0,1)
transform.SetRow(3, t)
for i in range(len(normal_values)):
n = normal_values[i]
normal = FbxVector4(n[0], n[1], n[2])
normal = transform.MultNormalize(normal)
normal.Normalize()
normal = [normal[0], normal[1], normal[2]]
normal_values[i] = normal
# indices
vertexId = 0
for p in range(poly_count):
poly_size = mesh.GetPolygonSize(p)
poly_normals = []
for v in range(poly_size):
control_point_index = mesh.GetPolygonVertex(p, v)
# mapping mode is by control points. The mesh should be smooth and soft.
# we can get normals by retrieving each control point
if mesh_normals.GetMappingMode() == FbxLayerElement.eByControlPoint:
# reference mode is direct, the normal index is same as vertex index.
# get normals by the index of control vertex
if mesh_normals.GetReferenceMode() == FbxLayerElement.eDirect:
poly_normals.append(control_point_index)
elif mesh_normals.GetReferenceMode() == FbxLayerElement.eIndexToDirect:
index = mesh_normals.GetIndexArray().GetAt(control_point_index)
poly_normals.append(index)
# mapping mode is by polygon-vertex.
# we can get normals by retrieving polygon-vertex.
elif mesh_normals.GetMappingMode() == FbxLayerElement.eByPolygonVertex:
if mesh_normals.GetReferenceMode() == FbxLayerElement.eDirect:
poly_normals.append(vertexId)
elif mesh_normals.GetReferenceMode() == FbxLayerElement.eIndexToDirect:
index = mesh_normals.GetIndexArray().GetAt(vertexId)
poly_normals.append(index)
elif mesh_normals.GetMappingMode() == FbxLayerElement.eByPolygon or \
mesh_normals.GetMappingMode() == FbxLayerElement.eAllSame or \
mesh_normals.GetMappingMode() == FbxLayerElement.eNone:
print("unsupported normal mapping mode for polygon vertex")
vertexId += 1
normal_indices.append(poly_normals)
layered_normal_values.append(normal_values)
layered_normal_indices.append(normal_indices)
normal_values = []
normal_indices = []
# Three.js only supports one layer of normals
if len(layered_normal_values) > 0:
normal_values = layered_normal_values[0]
normal_indices = layered_normal_indices[0]
return normal_values, normal_indices
def extract_fbx_vertex_colors(mesh):
# eNone The mapping is undetermined.
# eByControlPoint There will be one mapping coordinate for each surface control point/vertex.
# eByPolygonVertex There will be one mapping coordinate for each vertex, for every polygon of which it is a part. This means that a vertex will have as many mapping coordinates as polygons of which it is a part.
# eByPolygon There can be only one mapping coordinate for the whole polygon.
# eByEdge There will be one mapping coordinate for each unique edge in the mesh. This is meant to be used with smoothing layer elements.
# eAllSame There can be only one mapping coordinate for the whole surface.
layered_color_indices = []
layered_color_values = []
poly_count = mesh.GetPolygonCount()
control_points = mesh.GetControlPoints()
for l in range(mesh.GetLayerCount()):
mesh_colors = mesh.GetLayer(l).GetVertexColors()
if not mesh_colors:
continue
colors_array = mesh_colors.GetDirectArray()
colors_count = colors_array.GetCount()
if colors_count == 0:
continue
color_indices = []
color_values = []
# values
for i in range(colors_count):
color = colors_array.GetAt(i)
color = [color.mRed, color.mGreen, color.mBlue, color.mAlpha]
color_values.append(color)
# indices
vertexId = 0
for p in range(poly_count):
poly_size = mesh.GetPolygonSize(p)
poly_colors = []
for v in range(poly_size):
control_point_index = mesh.GetPolygonVertex(p, v)
if mesh_colors.GetMappingMode() == FbxLayerElement.eByControlPoint:
if mesh_colors.GetReferenceMode() == FbxLayerElement.eDirect:
poly_colors.append(control_point_index)
elif mesh_colors.GetReferenceMode() == FbxLayerElement.eIndexToDirect:
index = mesh_colors.GetIndexArray().GetAt(control_point_index)
poly_colors.append(index)
elif mesh_colors.GetMappingMode() == FbxLayerElement.eByPolygonVertex:
if mesh_colors.GetReferenceMode() == FbxLayerElement.eDirect:
poly_colors.append(vertexId)
elif mesh_colors.GetReferenceMode() == FbxLayerElement.eIndexToDirect:
index = mesh_colors.GetIndexArray().GetAt(vertexId)
poly_colors.append(index)
elif mesh_colors.GetMappingMode() == FbxLayerElement.eByPolygon or \
mesh_colors.GetMappingMode() == FbxLayerElement.eAllSame or \
mesh_colors.GetMappingMode() == FbxLayerElement.eNone:
print("unsupported color mapping mode for polygon vertex")
vertexId += 1
color_indices.append(poly_colors)
layered_color_indices.append( color_indices )
layered_color_values.append( color_values )
color_values = []
color_indices = []
# Three.js only supports one layer of colors
if len(layered_color_values) > 0:
color_values = layered_color_values[0]
color_indices = layered_color_indices[0]
'''
# The Fbx SDK defaults mesh.Color to (0.8, 0.8, 0.8)
# This causes most models to receive incorrect vertex colors
if len(color_values) == 0:
color = mesh.Color.Get()
color_values = [[color[0], color[1], color[2]]]
color_indices = []
for p in range(poly_count):
poly_size = mesh.GetPolygonSize(p)
color_indices.append([0] * poly_size)
'''
return color_values, color_indices
def extract_fbx_vertex_uvs(mesh):
# eNone The mapping is undetermined.
# eByControlPoint There will be one mapping coordinate for each surface control point/vertex.
# eByPolygonVertex There will be one mapping coordinate for each vertex, for every polygon of which it is a part. This means that a vertex will have as many mapping coordinates as polygons of which it is a part.
# eByPolygon There can be only one mapping coordinate for the whole polygon.
# eByEdge There will be one mapping coordinate for each unique edge in the mesh. This is meant to be used with smoothing layer elements.
# eAllSame There can be only one mapping coordinate for the whole surface.
layered_uv_indices = []
layered_uv_values = []
poly_count = mesh.GetPolygonCount()
control_points = mesh.GetControlPoints()
for l in range(mesh.GetLayerCount()):
mesh_uvs = mesh.GetLayer(l).GetUVs()
if not mesh_uvs:
continue
uvs_array = mesh_uvs.GetDirectArray()
uvs_count = uvs_array.GetCount()
if uvs_count == 0:
continue
uv_indices = []
uv_values = []
# values
for i in range(uvs_count):
uv = uvs_array.GetAt(i)
uv = [uv[0], uv[1]]
uv_values.append(uv)
# indices
vertexId = 0
for p in range(poly_count):
poly_size = mesh.GetPolygonSize(p)
poly_uvs = []
for v in range(poly_size):
control_point_index = mesh.GetPolygonVertex(p, v)
if mesh_uvs.GetMappingMode() == FbxLayerElement.eByControlPoint:
if mesh_uvs.GetReferenceMode() == FbxLayerElement.eDirect:
poly_uvs.append(control_point_index)
elif mesh_uvs.GetReferenceMode() == FbxLayerElement.eIndexToDirect:
index = mesh_uvs.GetIndexArray().GetAt(control_point_index)
poly_uvs.append(index)
elif mesh_uvs.GetMappingMode() == FbxLayerElement.eByPolygonVertex:
uv_texture_index = mesh_uvs.GetIndexArray().GetAt(vertexId)
if mesh_uvs.GetReferenceMode() == FbxLayerElement.eDirect or \
mesh_uvs.GetReferenceMode() == FbxLayerElement.eIndexToDirect:
poly_uvs.append(uv_texture_index)
elif mesh_uvs.GetMappingMode() == FbxLayerElement.eByPolygon or \
mesh_uvs.GetMappingMode() == FbxLayerElement.eAllSame or \
mesh_uvs.GetMappingMode() == FbxLayerElement.eNone:
print("unsupported uv mapping mode for polygon vertex")
vertexId += 1
uv_indices.append(poly_uvs)
layered_uv_values.append(uv_values)
layered_uv_indices.append(uv_indices)
return layered_uv_values, layered_uv_indices
# #####################################################
# Process Mesh Geometry
# #####################################################
def generate_normal_key(normal):
return (round(normal[0], 6), round(normal[1], 6), round(normal[2], 6))
def generate_color_key(color):
return getHex(color)
def generate_uv_key(uv):
return (round(uv[0], 6), round(uv[1], 6))
def append_non_duplicate_uvs(source_uvs, dest_uvs, counts):
source_layer_count = len(source_uvs)
for layer_index in range(source_layer_count):
dest_layer_count = len(dest_uvs)
if dest_layer_count <= layer_index:
dest_uv_layer = {}
count = 0
dest_uvs.append(dest_uv_layer)
counts.append(count)
else:
dest_uv_layer = dest_uvs[layer_index]
count = counts[layer_index]
source_uv_layer = source_uvs[layer_index]
for uv in source_uv_layer:
key = generate_uv_key(uv)
if key not in dest_uv_layer:
dest_uv_layer[key] = count
count += 1
counts[layer_index] = count
return counts
def generate_unique_normals_dictionary(mesh_list):
normals_dictionary = {}
nnormals = 0
# Merge meshes, remove duplicate data
for mesh in mesh_list:
node = mesh.GetNode()
normal_values, normal_indices = extract_fbx_vertex_normals(mesh)
if len(normal_values) > 0:
for normal in normal_values:
key = generate_normal_key(normal)
if key not in normals_dictionary:
normals_dictionary[key] = nnormals
nnormals += 1
return normals_dictionary
def generate_unique_colors_dictionary(mesh_list):
colors_dictionary = {}
ncolors = 0
# Merge meshes, remove duplicate data
for mesh in mesh_list:
color_values, color_indices = extract_fbx_vertex_colors(mesh)
if len(color_values) > 0:
for color in color_values:
key = generate_color_key(color)
if key not in colors_dictionary:
colors_dictionary[key] = ncolors
ncolors += 1
return colors_dictionary
def generate_unique_uvs_dictionary_layers(mesh_list):
uvs_dictionary_layers = []
nuvs_list = []
# Merge meshes, remove duplicate data
for mesh in mesh_list:
uv_values, uv_indices = extract_fbx_vertex_uvs(mesh)
if len(uv_values) > 0:
nuvs_list = append_non_duplicate_uvs(uv_values, uvs_dictionary_layers, nuvs_list)
return uvs_dictionary_layers
def generate_normals_from_dictionary(normals_dictionary):
normal_values = []
for key, index in sorted(normals_dictionary.items(), key = operator.itemgetter(1)):
normal_values.append(key)
return normal_values
def generate_colors_from_dictionary(colors_dictionary):
color_values = []
for key, index in sorted(colors_dictionary.items(), key = operator.itemgetter(1)):
color_values.append(key)
return color_values
def generate_uvs_from_dictionary_layers(uvs_dictionary_layers):
uv_values = []
for uvs_dictionary in uvs_dictionary_layers:
uv_values_layer = []
for key, index in sorted(uvs_dictionary.items(), key = operator.itemgetter(1)):
uv_values_layer.append(key)
uv_values.append(uv_values_layer)
return uv_values
def generate_normal_indices_for_poly(poly_index, mesh_normal_values, mesh_normal_indices, normals_to_indices):
if len(mesh_normal_indices) <= 0:
return []
poly_normal_indices = mesh_normal_indices[poly_index]
poly_size = len(poly_normal_indices)
output_poly_normal_indices = []
for v in range(poly_size):
normal_index = poly_normal_indices[v]
normal_value = mesh_normal_values[normal_index]
key = generate_normal_key(normal_value)
output_index = normals_to_indices[key]
output_poly_normal_indices.append(output_index)
return output_poly_normal_indices
def generate_color_indices_for_poly(poly_index, mesh_color_values, mesh_color_indices, colors_to_indices):
if len(mesh_color_indices) <= 0:
return []
poly_color_indices = mesh_color_indices[poly_index]
poly_size = len(poly_color_indices)
output_poly_color_indices = []
for v in range(poly_size):
color_index = poly_color_indices[v]
color_value = mesh_color_values[color_index]
key = generate_color_key(color_value)
output_index = colors_to_indices[key]
output_poly_color_indices.append(output_index)
return output_poly_color_indices
def generate_uv_indices_for_poly(poly_index, mesh_uv_values, mesh_uv_indices, uvs_to_indices):
if len(mesh_uv_indices) <= 0:
return []
poly_uv_indices = mesh_uv_indices[poly_index]
poly_size = len(poly_uv_indices)
output_poly_uv_indices = []
for v in range(poly_size):
uv_index = poly_uv_indices[v]
uv_value = mesh_uv_values[uv_index]
key = generate_uv_key(uv_value)
output_index = uvs_to_indices[key]
output_poly_uv_indices.append(output_index)
return output_poly_uv_indices
def process_mesh_vertices(mesh_list):
vertex_offset = 0
vertex_offset_list = [0]
vertices = []
for mesh in mesh_list:
node = mesh.GetNode()
mesh_vertices = extract_fbx_vertex_positions(mesh)
vertices.extend(mesh_vertices[:])
vertex_offset += len(mesh_vertices)
vertex_offset_list.append(vertex_offset)
return vertices, vertex_offset_list
def process_mesh_materials(mesh_list):
material_offset = 0
material_offset_list = [0]
materials_list = []
#TODO: remove duplicate mesh references
for mesh in mesh_list:
node = mesh.GetNode()
material_count = node.GetMaterialCount()
if material_count > 0:
for l in range(mesh.GetLayerCount()):
materials = mesh.GetLayer(l).GetMaterials()
if materials:
if materials.GetReferenceMode() == FbxLayerElement.eIndex:
#Materials are in an undefined external table
continue
for i in range(material_count):
material = node.GetMaterial(i)
materials_list.append( material )
material_offset += material_count
material_offset_list.append(material_offset)
return materials_list, material_offset_list
def process_mesh_polygons(mesh_list, normals_to_indices, colors_to_indices, uvs_to_indices_list, vertex_offset_list, material_offset_list):
faces = []
for mesh_index in range(len(mesh_list)):
mesh = mesh_list[mesh_index]
flipWindingOrder = False
node = mesh.GetNode()
if node:
local_scale = node.EvaluateLocalScaling()
if local_scale[0] < 0 or local_scale[1] < 0 or local_scale[2] < 0:
flipWindingOrder = True
poly_count = mesh.GetPolygonCount()
control_points = mesh.GetControlPoints()
normal_values, normal_indices = extract_fbx_vertex_normals(mesh)
color_values, color_indices = extract_fbx_vertex_colors(mesh)
uv_values_layers, uv_indices_layers = extract_fbx_vertex_uvs(mesh)
for poly_index in range(poly_count):
poly_size = mesh.GetPolygonSize(poly_index)
face_normals = generate_normal_indices_for_poly(poly_index, normal_values, normal_indices, normals_to_indices)
face_colors = generate_color_indices_for_poly(poly_index, color_values, color_indices, colors_to_indices)
face_uv_layers = []
for l in range(len(uv_indices_layers)):
uv_values = uv_values_layers[l]
uv_indices = uv_indices_layers[l]
face_uv_indices = generate_uv_indices_for_poly(poly_index, uv_values, uv_indices, uvs_to_indices_list[l])
face_uv_layers.append(face_uv_indices)
face_vertices = []
for vertex_index in range(poly_size):
control_point_index = mesh.GetPolygonVertex(poly_index, vertex_index)
face_vertices.append(control_point_index)
#TODO: assign a default material to any mesh without one
if len(material_offset_list) <= mesh_index:
material_offset = 0
else:
material_offset = material_offset_list[mesh_index]
vertex_offset = vertex_offset_list[mesh_index]
if poly_size > 4:
new_face_normals = []
new_face_colors = []
new_face_uv_layers = []
for i in range(poly_size - 2):
new_face_vertices = [face_vertices[0], face_vertices[i+1], face_vertices[i+2]]
if len(face_normals):
new_face_normals = [face_normals[0], face_normals[i+1], face_normals[i+2]]
if len(face_colors):
new_face_colors = [face_colors[0], face_colors[i+1], face_colors[i+2]]
if len(face_uv_layers):
new_face_uv_layers = []
for layer in face_uv_layers:
new_face_uv_layers.append([layer[0], layer[i+1], layer[i+2]])
face = generate_mesh_face(mesh,
poly_index,
new_face_vertices,
new_face_normals,
new_face_colors,
new_face_uv_layers,
vertex_offset,
material_offset,
flipWindingOrder)
faces.append(face)
else:
face = generate_mesh_face(mesh,
poly_index,
face_vertices,
face_normals,
face_colors,
face_uv_layers,
vertex_offset,
material_offset,
flipWindingOrder)
faces.append(face)
return faces
def generate_mesh_face(mesh, polygon_index, vertex_indices, normals, colors, uv_layers, vertex_offset, material_offset, flipOrder):
isTriangle = ( len(vertex_indices) == 3 )
nVertices = 3 if isTriangle else 4
hasMaterial = False
for l in range(mesh.GetLayerCount()):
materials = mesh.GetLayer(l).GetMaterials()
if materials:
hasMaterial = True
break
hasFaceUvs = False
hasFaceVertexUvs = len(uv_layers) > 0
hasFaceNormals = False
hasFaceVertexNormals = len(normals) > 0
hasFaceColors = False
hasFaceVertexColors = len(colors) > 0
faceType = 0
faceType = setBit(faceType, 0, not isTriangle)
faceType = setBit(faceType, 1, hasMaterial)
faceType = setBit(faceType, 2, hasFaceUvs)
faceType = setBit(faceType, 3, hasFaceVertexUvs)
faceType = setBit(faceType, 4, hasFaceNormals)
faceType = setBit(faceType, 5, hasFaceVertexNormals)
faceType = setBit(faceType, 6, hasFaceColors)
faceType = setBit(faceType, 7, hasFaceVertexColors)
faceData = []
# order is important, must match order in JSONLoader
# face type
# vertex indices
# material index
# face uvs index
# face vertex uvs indices
# face color index
# face vertex colors indices
faceData.append(faceType)
if flipOrder:
if nVertices == 3:
vertex_indices = [vertex_indices[0], vertex_indices[2], vertex_indices[1]]
if hasFaceVertexNormals:
normals = [normals[0], normals[2], normals[1]]
if hasFaceVertexColors:
colors = [colors[0], colors[2], colors[1]]
if hasFaceVertexUvs:
tmp = []
for polygon_uvs in uv_layers:
tmp.append([polygon_uvs[0], polygon_uvs[2], polygon_uvs[1]])
uv_layers = tmp
else:
vertex_indices = [vertex_indices[0], vertex_indices[3], vertex_indices[2], vertex_indices[1]]
if hasFaceVertexNormals:
normals = [normals[0], normals[3], normals[2], normals[1]]
if hasFaceVertexColors:
colors = [colors[0], colors[3], colors[2], colors[1]]
if hasFaceVertexUvs:
tmp = []
for polygon_uvs in uv_layers:
tmp.append([polygon_uvs[0], polygon_uvs[3], polygon_uvs[2], polygon_uvs[3]])
uv_layers = tmp
for i in range(nVertices):
index = vertex_indices[i] + vertex_offset
faceData.append(index)
if hasMaterial:
material_id = 0
for l in range(mesh.GetLayerCount()):
materials = mesh.GetLayer(l).GetMaterials()
if materials:
material_id = materials.GetIndexArray().GetAt(polygon_index)
break
material_id += material_offset
faceData.append( material_id )
if hasFaceVertexUvs:
for polygon_uvs in uv_layers:
for i in range(nVertices):
index = polygon_uvs[i]
faceData.append(index)
if hasFaceVertexNormals:
for i in range(nVertices):
index = normals[i]
faceData.append(index)
if hasFaceVertexColors:
for i in range(nVertices):
index = colors[i]
faceData.append(index)
return faceData
# #####################################################
# Generate Mesh Object (for scene output format)
# #####################################################
def generate_scene_output(node):
mesh = node.GetNodeAttribute()
# This is done in order to keep the scene output and non-scene output code DRY
mesh_list = [ mesh ]
# Extract the mesh data into arrays
vertices, vertex_offsets = process_mesh_vertices(mesh_list)
materials, material_offsets = process_mesh_materials(mesh_list)
normals_to_indices = generate_unique_normals_dictionary(mesh_list)
colors_to_indices = generate_unique_colors_dictionary(mesh_list)
uvs_to_indices_list = generate_unique_uvs_dictionary_layers(mesh_list)
normal_values = generate_normals_from_dictionary(normals_to_indices)
color_values = generate_colors_from_dictionary(colors_to_indices)
uv_values = generate_uvs_from_dictionary_layers(uvs_to_indices_list)
# Generate mesh faces for the Three.js file format
faces = process_mesh_polygons(mesh_list,
normals_to_indices,
colors_to_indices,
uvs_to_indices_list,
vertex_offsets,
material_offsets)
# Generate counts for uvs, vertices, normals, colors, and faces
nuvs = []
for layer_index, uvs in enumerate(uv_values):
nuvs.append(str(len(uvs)))
nvertices = len(vertices)
nnormals = len(normal_values)
ncolors = len(color_values)
nfaces = len(faces)
# Flatten the arrays, currently they are in the form of [[0, 1, 2], [3, 4, 5], ...]
vertices = [val for v in vertices for val in v]
normal_values = [val for n in normal_values for val in n]
color_values = [c for c in color_values]
faces = [val for f in faces for val in f]
uv_values = generate_uvs(uv_values)
# Disable automatic json indenting when pretty printing for the arrays
if option_pretty_print:
nuvs = NoIndent(nuvs)
vertices = ChunkedIndent(vertices, 15, True)
normal_values = ChunkedIndent(normal_values, 15, True)
color_values = ChunkedIndent(color_values, 15)
faces = ChunkedIndent(faces, 30)
metadata = {
'vertices' : nvertices,
'normals' : nnormals,
'colors' : ncolors,
'faces' : nfaces,
'uvs' : nuvs
}
output = {
'scale' : 1,
'materials' : [],
'vertices' : vertices,
'normals' : [] if nnormals <= 0 else normal_values,
'colors' : [] if ncolors <= 0 else color_values,
'uvs' : uv_values,
'faces' : faces
}
if option_pretty_print:
output['0metadata'] = metadata
else:
output['metadata'] = metadata
return output
# #####################################################
# Generate Mesh Object (for non-scene output)
# #####################################################
def generate_non_scene_output(scene):
mesh_list = generate_mesh_list(scene)
# Extract the mesh data into arrays
vertices, vertex_offsets = process_mesh_vertices(mesh_list)
materials, material_offsets = process_mesh_materials(mesh_list)
normals_to_indices = generate_unique_normals_dictionary(mesh_list)
colors_to_indices = generate_unique_colors_dictionary(mesh_list)
uvs_to_indices_list = generate_unique_uvs_dictionary_layers(mesh_list)
normal_values = generate_normals_from_dictionary(normals_to_indices)
color_values = generate_colors_from_dictionary(colors_to_indices)
uv_values = generate_uvs_from_dictionary_layers(uvs_to_indices_list)
# Generate mesh faces for the Three.js file format
faces = process_mesh_polygons(mesh_list,
normals_to_indices,
colors_to_indices,
uvs_to_indices_list,
vertex_offsets,
material_offsets)
# Generate counts for uvs, vertices, normals, colors, and faces
nuvs = []
for layer_index, uvs in enumerate(uv_values):
nuvs.append(str(len(uvs)))
nvertices = len(vertices)
nnormals = len(normal_values)
ncolors = len(color_values)
nfaces = len(faces)
# Flatten the arrays, currently they are in the form of [[0, 1, 2], [3, 4, 5], ...]
vertices = [val for v in vertices for val in v]
normal_values = [val for n in normal_values for val in n]
color_values = [c for c in color_values]
faces = [val for f in faces for val in f]
uv_values = generate_uvs(uv_values)
# Disable json indenting when pretty printing for the arrays
if option_pretty_print:
nuvs = NoIndent(nuvs)
vertices = NoIndent(vertices)
normal_values = NoIndent(normal_values)
color_values = NoIndent(color_values)
faces = NoIndent(faces)
metadata = {
'formatVersion' : 3,
'type' : 'geometry',
'generatedBy' : 'convert-to-threejs.py',
'vertices' : nvertices,
'normals' : nnormals,
'colors' : ncolors,
'faces' : nfaces,
'uvs' : nuvs
}
output = {
'scale' : 1,
'materials' : [],
'vertices' : vertices,
'normals' : [] if nnormals <= 0 else normal_values,
'colors' : [] if ncolors <= 0 else color_values,
'uvs' : uv_values,
'faces' : faces
}
if option_pretty_print:
output['0metadata'] = metadata
else:
output['metadata'] = metadata
return output
def generate_mesh_list_from_hierarchy(node, mesh_list):
if node.GetNodeAttribute() == None:
pass
else:
attribute_type = (node.GetNodeAttribute().GetAttributeType())
if attribute_type == FbxNodeAttribute.eMesh or \
attribute_type == FbxNodeAttribute.eNurbs or \
attribute_type == FbxNodeAttribute.eNurbsSurface or \
attribute_type == FbxNodeAttribute.ePatch:
if attribute_type != FbxNodeAttribute.eMesh:
converter.TriangulateInPlace(node);
mesh_list.append(node.GetNodeAttribute())
for i in range(node.GetChildCount()):
generate_mesh_list_from_hierarchy(node.GetChild(i), mesh_list)
def generate_mesh_list(scene):
mesh_list = []
node = scene.GetRootNode()
if node:
for i in range(node.GetChildCount()):
generate_mesh_list_from_hierarchy(node.GetChild(i), mesh_list)
return mesh_list
# #####################################################
# Generate Embed Objects
# #####################################################
def generate_embed_dict_from_hierarchy(node, embed_dict):
if node.GetNodeAttribute() == None:
pass
else:
attribute_type = (node.GetNodeAttribute().GetAttributeType())
if attribute_type == FbxNodeAttribute.eMesh or \
attribute_type == FbxNodeAttribute.eNurbs or \
attribute_type == FbxNodeAttribute.eNurbsSurface or \
attribute_type == FbxNodeAttribute.ePatch:
if attribute_type != FbxNodeAttribute.eMesh:
converter.TriangulateInPlace(node);
embed_object = generate_scene_output(node)
embed_name = getPrefixedName(node, 'Embed')
embed_dict[embed_name] = embed_object
for i in range(node.GetChildCount()):
generate_embed_dict_from_hierarchy(node.GetChild(i), embed_dict)
def generate_embed_dict(scene):
embed_dict = {}
node = scene.GetRootNode()
if node:
for i in range(node.GetChildCount()):
generate_embed_dict_from_hierarchy(node.GetChild(i), embed_dict)
return embed_dict
# #####################################################
# Generate Geometry Objects
# #####################################################
def generate_geometry_object(node):
output = {
'type' : 'embedded',
'id' : getPrefixedName( node, 'Embed' )
}
return output
def generate_geometry_dict_from_hierarchy(node, geometry_dict):
if node.GetNodeAttribute() == None:
pass
else:
attribute_type = (node.GetNodeAttribute().GetAttributeType())
if attribute_type == FbxNodeAttribute.eMesh:
geometry_object = generate_geometry_object(node)
geometry_name = getPrefixedName( node, 'Geometry' )
geometry_dict[geometry_name] = geometry_object
for i in range(node.GetChildCount()):
generate_geometry_dict_from_hierarchy(node.GetChild(i), geometry_dict)
def generate_geometry_dict(scene):
geometry_dict = {}
node = scene.GetRootNode()
if node:
for i in range(node.GetChildCount()):
generate_geometry_dict_from_hierarchy(node.GetChild(i), geometry_dict)
return geometry_dict
# #####################################################
# Generate Light Node Objects
# #####################################################
def generate_default_light():
direction = (1,1,1)
color = (1,1,1)
intensity = 80.0
output = {
'type': 'DirectionalLight',
'color': getHex(color),
'intensity': intensity/100.00,
'direction': serializeVector3( direction ),
'target': getObjectName( None )
}
return output
def generate_light_object(node):
light = node.GetNodeAttribute()
light_types = ["point", "directional", "spot", "area", "volume"]
light_type = light_types[light.LightType.Get()]
transform = node.EvaluateLocalTransform()
position = transform.GetT()
output = None
if light_type == "directional":
# Three.js directional lights emit light from a point in 3d space to a target node or the origin.
# When there is no target, we need to take a point, one unit away from the origin, and move it
# into the right location so that the origin acts like the target
if node.GetTarget():
direction = position
else:
translation = FbxVector4(0,0,0,0)
scale = FbxVector4(1,1,1,1)
rotation = transform.GetR()
matrix = FbxMatrix(translation, rotation, scale)
direction = matrix.MultNormalize(FbxVector4(0,1,0,1))
output = {
'type': 'DirectionalLight',
'color': getHex(light.Color.Get()),
'intensity': light.Intensity.Get()/100.0,
'direction': serializeVector3( direction ),
'target': getObjectName( node.GetTarget() )
}
elif light_type == "point":
output = {
'type': 'PointLight',
'color': getHex(light.Color.Get()),
'intensity': light.Intensity.Get()/100.0,
'position': serializeVector3( position ),
'distance': light.FarAttenuationEnd.Get()
}
elif light_type == "spot":
output = {
'type': 'SpotLight',
'color': getHex(light.Color.Get()),
'intensity': light.Intensity.Get()/100.0,
'position': serializeVector3( position ),
'distance': light.FarAttenuationEnd.Get(),
'angle': light.OuterAngle.Get()*math.pi/180,
'exponent': light.DecayType.Get(),
'target': getObjectName( node.GetTarget() )
}
return output
def generate_ambient_light(scene):
scene_settings = scene.GetGlobalSettings()
ambient_color = scene_settings.GetAmbientColor()
ambient_color = (ambient_color.mRed, ambient_color.mGreen, ambient_color.mBlue)
if ambient_color[0] == 0 and ambient_color[1] == 0 and ambient_color[2] == 0:
return None
output = {
'type': 'AmbientLight',
'color': getHex(ambient_color)
}
return output
# #####################################################
# Generate Camera Node Objects
# #####################################################
def generate_default_camera():
position = (100, 100, 100)
near = 0.1
far = 1000
fov = 75
output = {
'type': 'PerspectiveCamera',
'fov': fov,
'near': near,
'far': far,
'position': serializeVector3( position )
}
return output
def generate_camera_object(node):
camera = node.GetNodeAttribute()
position = camera.Position.Get()
projection_types = [ "perspective", "orthogonal" ]
projection = projection_types[camera.ProjectionType.Get()]
near = camera.NearPlane.Get()
far = camera.FarPlane.Get()
name = getObjectName( node )
output = {}
if projection == "perspective":
aspect = camera.PixelAspectRatio.Get()
fov = camera.FieldOfView.Get()
output = {
'type': 'PerspectiveCamera',
'fov': fov,
'aspect': aspect,
'near': near,
'far': far,
'position': serializeVector3( position )
}
elif projection == "orthogonal":
left = ""
right = ""
top = ""
bottom = ""
output = {
'type': 'PerspectiveCamera',
'left': left,
'right': right,
'top': top,
'bottom': bottom,
'near': near,
'far': far,
'position': serializeVector3( position )
}
return output
# #####################################################
# Generate Camera Names
# #####################################################
def generate_camera_name_list_from_hierarchy(node, camera_list):
if node.GetNodeAttribute() == None:
pass
else:
attribute_type = (node.GetNodeAttribute().GetAttributeType())
if attribute_type == FbxNodeAttribute.eCamera:
camera_string = getObjectName(node)
camera_list.append(camera_string)
for i in range(node.GetChildCount()):
generate_camera_name_list_from_hierarchy(node.GetChild(i), camera_list)
def generate_camera_name_list(scene):
camera_list = []
node = scene.GetRootNode()
if node:
for i in range(node.GetChildCount()):
generate_camera_name_list_from_hierarchy(node.GetChild(i), camera_list)
return camera_list
# #####################################################
# Generate Mesh Node Object
# #####################################################
def generate_mesh_object(node):
mesh = node.GetNodeAttribute()
transform = node.EvaluateLocalTransform()
position = transform.GetT()
scale = transform.GetS()
rotation = getRadians(transform.GetR())
quaternion = transform.GetQ()
material_count = node.GetMaterialCount()
material_name = ""
if material_count > 0:
material_names = []
for l in range(mesh.GetLayerCount()):
materials = mesh.GetLayer(l).GetMaterials()
if materials:
if materials.GetReferenceMode() == FbxLayerElement.eIndex:
#Materials are in an undefined external table
continue
for i in range(material_count):
material = node.GetMaterial(i)
material_names.append( getMaterialName(material) )
if not material_count > 1 and not len(material_names) > 0:
material_names.append('')
#If this mesh has more than one material, use a proxy material
material_name = getMaterialName( node, True) if material_count > 1 else material_names[0]
output = {
'geometry': getPrefixedName( node, 'Geometry' ),
'material': material_name,
'position': serializeVector3( position ),
'quaternion': serializeVector4( quaternion ),
'scale': serializeVector3( scale ),
'visible': True,
}
return output
# #####################################################
# Generate Node Object
# #####################################################
def generate_object(node):
node_types = ["Unknown", "Null", "Marker", "Skeleton", "Mesh", "Nurbs", "Patch", "Camera",
"CameraStereo", "CameraSwitcher", "Light", "OpticalReference", "OpticalMarker", "NurbsCurve",
"TrimNurbsSurface", "Boundary", "NurbsSurface", "Shape", "LODGroup", "SubDiv", "CachedEffect", "Line"]
transform = node.EvaluateLocalTransform()
position = transform.GetT()
scale = transform.GetS()
rotation = getRadians(transform.GetR())
quaternion = transform.GetQ()
node_type = ""
if node.GetNodeAttribute() == None:
node_type = "Null"
else:
node_type = node_types[node.GetNodeAttribute().GetAttributeType()]
name = getObjectName( node )
output = {
'fbx_type': node_type,
'position': serializeVector3( position ),
'quaternion': serializeVector4( quaternion ),
'scale': serializeVector3( scale ),
'visible': True
}
return output
# #####################################################
# Parse Scene Node Objects
# #####################################################
def generate_object_hierarchy(node, object_dict):
object_count = 0
if node.GetNodeAttribute() == None:
object_data = generate_object(node)
else:
attribute_type = (node.GetNodeAttribute().GetAttributeType())
if attribute_type == FbxNodeAttribute.eMesh:
object_data = generate_mesh_object(node)
elif attribute_type == FbxNodeAttribute.eLight:
object_data = generate_light_object(node)
elif attribute_type == FbxNodeAttribute.eCamera:
object_data = generate_camera_object(node)
else:
object_data = generate_object(node)
object_count += 1
object_name = getObjectName(node)
object_children = {}
for i in range(node.GetChildCount()):
object_count += generate_object_hierarchy(node.GetChild(i), object_children)
if node.GetChildCount() > 0:
# Having 'children' above other attributes is hard to read.
# We can send it to the bottom using the last letter of the alphabet 'z'.
# This letter is removed from the final output.
if option_pretty_print:
object_data['zchildren'] = object_children
else:
object_data['children'] = object_children
object_dict[object_name] = object_data
return object_count
def generate_scene_objects(scene):
object_count = 0
object_dict = {}
ambient_light = generate_ambient_light(scene)
if ambient_light:
object_dict['AmbientLight'] = ambient_light
object_count += 1
if option_default_light:
default_light = generate_default_light()
object_dict['DefaultLight'] = default_light
object_count += 1
if option_default_camera:
default_camera = generate_default_camera()
object_dict['DefaultCamera'] = default_camera
object_count += 1
node = scene.GetRootNode()
if node:
for i in range(node.GetChildCount()):
object_count += generate_object_hierarchy(node.GetChild(i), object_dict)
return object_dict, object_count
# #####################################################
# Generate Scene Output
# #####################################################
def extract_scene(scene, filename):
global_settings = scene.GetGlobalSettings()
objects, nobjects = generate_scene_objects(scene)
textures = generate_texture_dict(scene)
materials = generate_material_dict(scene)
geometries = generate_geometry_dict(scene)
embeds = generate_embed_dict(scene)
ntextures = len(textures)
nmaterials = len(materials)
ngeometries = len(geometries)
position = serializeVector3( (0,0,0) )
rotation = serializeVector3( (0,0,0) )
scale = serializeVector3( (1,1,1) )
camera_names = generate_camera_name_list(scene)
scene_settings = scene.GetGlobalSettings()
# This does not seem to be any help here
# global_settings.GetDefaultCamera()
defcamera = camera_names[0] if len(camera_names) > 0 else ""
if option_default_camera:
defcamera = 'default_camera'
metadata = {
'formatVersion': 3.2,
'type': 'scene',
'generatedBy': 'convert-to-threejs.py',
'objects': nobjects,
'geometries': ngeometries,
'materials': nmaterials,
'textures': ntextures
}
transform = {
'position' : position,
'rotation' : rotation,
'scale' : scale
}
defaults = {
'bgcolor' : 0,
'camera' : defcamera,
'fog' : ''
}
output = {
'objects': objects,
'geometries': geometries,
'materials': materials,
'textures': textures,
'embeds': embeds,
'transform': transform,
'defaults': defaults,
}
if option_pretty_print:
output['0metadata'] = metadata
else:
output['metadata'] = metadata
return output
# #####################################################
# Generate Non-Scene Output
# #####################################################
def extract_geometry(scene, filename):
output = generate_non_scene_output(scene)
return output
# #####################################################
# File Helpers
# #####################################################
def write_file(filepath, content):
index = filepath.rfind('/')
dir = filepath[0:index]
#if not os.path.exists(dir):
#os.makedirs(dir)
out = open(filepath, "w")
out.write(content.encode('utf8', 'replace'))
out.close()
def read_file(filepath):
f = open(filepath)
content = f.readlines()
f.close()
return content
def copy_textures(textures):
texture_dict = {}
for key in textures:
url = textures[key]['fullpath']
#src = replace_OutFolder2inFolder(url)
#print( src )
#print( url )
if url in texture_dict: # texture has been copied
continue
if not os.path.exists(url):
print("copy_texture error: we can't find this texture at " + url)
continue
try:
index = url.rfind('/')
if index == -1:
index = url.rfind( '\\' )
filename = url[index+1:len(url)]
saveFolder = "maps"
saveFilename = saveFolder + "/" + filename
#print( src )
#print( url )
#print( saveFilename )
if not os.path.exists(saveFolder):
os.makedirs(saveFolder)
shutil.copyfile(url, saveFilename)
texture_dict[url] = True
except IOError as e:
print "I/O error({0}): {1} {2}".format(e.errno, e.strerror, url)
def findFilesWithExt(directory, ext, include_path = True):
ext = ext.lower()
found = []
for root, dirs, files in os.walk(directory):
for filename in files:
current_ext = os.path.splitext(filename)[1].lower()
if current_ext == ext:
if include_path:
found.append(os.path.join(root, filename))
else:
found.append(filename)
return found
# #####################################################
# main
# #####################################################
if __name__ == "__main__":
from optparse import OptionParser
try:
from FbxCommon import *
except ImportError:
import platform
msg = 'Could not locate the python FBX SDK!\n'
msg += 'You need to copy the FBX SDK into your python install folder such as '
if platform.system() == 'Windows' or platform.system() == 'Microsoft':
msg += '"Python26/Lib/site-packages"'
elif platform.system() == 'Linux':
msg += '"/usr/local/lib/python2.6/site-packages"'
elif platform.system() == 'Darwin':
msg += '"/Library/Frameworks/Python.framework/Versions/2.6/lib/python2.6/site-packages"'
msg += ' folder.'
print(msg)
sys.exit(1)
usage = "Usage: %prog [source_file.fbx] [output_file.js] [options]"
parser = OptionParser(usage=usage)
parser.add_option('-t', '--triangulate', action='store_true', dest='triangulate', help="force quad geometry into triangles", default=False)
parser.add_option('-x', '--ignore-textures', action='store_true', dest='notextures', help="don't include texture references in output file", default=False)
parser.add_option('-n', '--no-texture-copy', action='store_true', dest='notexturecopy', help="don't copy texture files", default=False)
parser.add_option('-u', '--force-prefix', action='store_true', dest='prefix', help="prefix all object names in output file to ensure uniqueness", default=False)
parser.add_option('-f', '--flatten-scene', action='store_true', dest='geometry', help="merge all geometries and apply node transforms", default=False)
parser.add_option('-y', '--force-y-up', action='store_true', dest='forceyup', help="ensure that the y axis shows up", default=False)
parser.add_option('-c', '--add-camera', action='store_true', dest='defcamera', help="include default camera in output scene", default=False)
parser.add_option('-l', '--add-light', action='store_true', dest='deflight', help="include default light in output scene", default=False)
parser.add_option('-p', '--pretty-print', action='store_true', dest='pretty', help="prefix all object names in output file", default=False)
(options, args) = parser.parse_args()
option_triangulate = options.triangulate
option_textures = True if not options.notextures else False
option_copy_textures = True if not options.notexturecopy else False
option_prefix = options.prefix
option_geometry = options.geometry
option_forced_y_up = options.forceyup
option_default_camera = options.defcamera
option_default_light = options.deflight
option_pretty_print = options.pretty
# Prepare the FBX SDK.
sdk_manager, scene = InitializeSdkObjects()
converter = FbxGeometryConverter(sdk_manager)
# The converter takes an FBX file as an argument.
if len(args) > 1:
print("\nLoading file: %s" % args[0])
result = LoadScene(sdk_manager, scene, args[0])
else:
result = False
print("\nUsage: convert_fbx_to_threejs [source_file.fbx] [output_file.js]\n")
if not result:
print("\nAn error occurred while loading the file...")
else:
if option_triangulate:
print("\nForcing geometry to triangles")
triangulate_scene(scene)
axis_system = FbxAxisSystem.MayaYUp
if not option_forced_y_up:
# According to asset's coordinate to convert scene
upVector = scene.GetGlobalSettings().GetAxisSystem().GetUpVector();
if upVector[0] == 3:
axis_system = FbxAxisSystem.MayaZUp
axis_system.ConvertScene(scene)
inputFolder = args[0].replace( "\\", "/" );
index = args[0].rfind( "/" );
inputFolder = inputFolder[:index]
outputFolder = args[1].replace( "\\", "/" );
index = args[1].rfind( "/" );
outputFolder = outputFolder[:index]
if option_geometry:
output_content = extract_geometry(scene, os.path.basename(args[0]))
else:
output_content = extract_scene(scene, os.path.basename(args[0]))
if option_pretty_print:
output_string = json.dumps(output_content, indent=4, cls=CustomEncoder, separators=(',', ': '), sort_keys=True)
output_string = executeRegexHacks(output_string)
else:
output_string = json.dumps(output_content, separators=(',', ': '), sort_keys=True)
output_path = os.path.join(os.getcwd(), args[1])
write_file(output_path, output_string)
if option_copy_textures:
copy_textures( output_content['textures'] )
print("\nExported Three.js file to:\n%s\n" % output_path)
# Destroy all objects created by the FBX SDK.
sdk_manager.Destroy()
sys.exit(0) | mit |
shaufi/odoo | addons/l10n_be/__init__.py | 430 | 1060 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import wizard
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
sunlianqiang/kbengine | kbe/src/lib/python/Lib/encodings/cp1006.py | 272 | 13568 | """ Python Character Mapping Codec cp1006 generated from 'MAPPINGS/VENDORS/MISC/CP1006.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp1006',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> NULL
'\x01' # 0x01 -> START OF HEADING
'\x02' # 0x02 -> START OF TEXT
'\x03' # 0x03 -> END OF TEXT
'\x04' # 0x04 -> END OF TRANSMISSION
'\x05' # 0x05 -> ENQUIRY
'\x06' # 0x06 -> ACKNOWLEDGE
'\x07' # 0x07 -> BELL
'\x08' # 0x08 -> BACKSPACE
'\t' # 0x09 -> HORIZONTAL TABULATION
'\n' # 0x0A -> LINE FEED
'\x0b' # 0x0B -> VERTICAL TABULATION
'\x0c' # 0x0C -> FORM FEED
'\r' # 0x0D -> CARRIAGE RETURN
'\x0e' # 0x0E -> SHIFT OUT
'\x0f' # 0x0F -> SHIFT IN
'\x10' # 0x10 -> DATA LINK ESCAPE
'\x11' # 0x11 -> DEVICE CONTROL ONE
'\x12' # 0x12 -> DEVICE CONTROL TWO
'\x13' # 0x13 -> DEVICE CONTROL THREE
'\x14' # 0x14 -> DEVICE CONTROL FOUR
'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x16 -> SYNCHRONOUS IDLE
'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
'\x18' # 0x18 -> CANCEL
'\x19' # 0x19 -> END OF MEDIUM
'\x1a' # 0x1A -> SUBSTITUTE
'\x1b' # 0x1B -> ESCAPE
'\x1c' # 0x1C -> FILE SEPARATOR
'\x1d' # 0x1D -> GROUP SEPARATOR
'\x1e' # 0x1E -> RECORD SEPARATOR
'\x1f' # 0x1F -> UNIT SEPARATOR
' ' # 0x20 -> SPACE
'!' # 0x21 -> EXCLAMATION MARK
'"' # 0x22 -> QUOTATION MARK
'#' # 0x23 -> NUMBER SIGN
'$' # 0x24 -> DOLLAR SIGN
'%' # 0x25 -> PERCENT SIGN
'&' # 0x26 -> AMPERSAND
"'" # 0x27 -> APOSTROPHE
'(' # 0x28 -> LEFT PARENTHESIS
')' # 0x29 -> RIGHT PARENTHESIS
'*' # 0x2A -> ASTERISK
'+' # 0x2B -> PLUS SIGN
',' # 0x2C -> COMMA
'-' # 0x2D -> HYPHEN-MINUS
'.' # 0x2E -> FULL STOP
'/' # 0x2F -> SOLIDUS
'0' # 0x30 -> DIGIT ZERO
'1' # 0x31 -> DIGIT ONE
'2' # 0x32 -> DIGIT TWO
'3' # 0x33 -> DIGIT THREE
'4' # 0x34 -> DIGIT FOUR
'5' # 0x35 -> DIGIT FIVE
'6' # 0x36 -> DIGIT SIX
'7' # 0x37 -> DIGIT SEVEN
'8' # 0x38 -> DIGIT EIGHT
'9' # 0x39 -> DIGIT NINE
':' # 0x3A -> COLON
';' # 0x3B -> SEMICOLON
'<' # 0x3C -> LESS-THAN SIGN
'=' # 0x3D -> EQUALS SIGN
'>' # 0x3E -> GREATER-THAN SIGN
'?' # 0x3F -> QUESTION MARK
'@' # 0x40 -> COMMERCIAL AT
'A' # 0x41 -> LATIN CAPITAL LETTER A
'B' # 0x42 -> LATIN CAPITAL LETTER B
'C' # 0x43 -> LATIN CAPITAL LETTER C
'D' # 0x44 -> LATIN CAPITAL LETTER D
'E' # 0x45 -> LATIN CAPITAL LETTER E
'F' # 0x46 -> LATIN CAPITAL LETTER F
'G' # 0x47 -> LATIN CAPITAL LETTER G
'H' # 0x48 -> LATIN CAPITAL LETTER H
'I' # 0x49 -> LATIN CAPITAL LETTER I
'J' # 0x4A -> LATIN CAPITAL LETTER J
'K' # 0x4B -> LATIN CAPITAL LETTER K
'L' # 0x4C -> LATIN CAPITAL LETTER L
'M' # 0x4D -> LATIN CAPITAL LETTER M
'N' # 0x4E -> LATIN CAPITAL LETTER N
'O' # 0x4F -> LATIN CAPITAL LETTER O
'P' # 0x50 -> LATIN CAPITAL LETTER P
'Q' # 0x51 -> LATIN CAPITAL LETTER Q
'R' # 0x52 -> LATIN CAPITAL LETTER R
'S' # 0x53 -> LATIN CAPITAL LETTER S
'T' # 0x54 -> LATIN CAPITAL LETTER T
'U' # 0x55 -> LATIN CAPITAL LETTER U
'V' # 0x56 -> LATIN CAPITAL LETTER V
'W' # 0x57 -> LATIN CAPITAL LETTER W
'X' # 0x58 -> LATIN CAPITAL LETTER X
'Y' # 0x59 -> LATIN CAPITAL LETTER Y
'Z' # 0x5A -> LATIN CAPITAL LETTER Z
'[' # 0x5B -> LEFT SQUARE BRACKET
'\\' # 0x5C -> REVERSE SOLIDUS
']' # 0x5D -> RIGHT SQUARE BRACKET
'^' # 0x5E -> CIRCUMFLEX ACCENT
'_' # 0x5F -> LOW LINE
'`' # 0x60 -> GRAVE ACCENT
'a' # 0x61 -> LATIN SMALL LETTER A
'b' # 0x62 -> LATIN SMALL LETTER B
'c' # 0x63 -> LATIN SMALL LETTER C
'd' # 0x64 -> LATIN SMALL LETTER D
'e' # 0x65 -> LATIN SMALL LETTER E
'f' # 0x66 -> LATIN SMALL LETTER F
'g' # 0x67 -> LATIN SMALL LETTER G
'h' # 0x68 -> LATIN SMALL LETTER H
'i' # 0x69 -> LATIN SMALL LETTER I
'j' # 0x6A -> LATIN SMALL LETTER J
'k' # 0x6B -> LATIN SMALL LETTER K
'l' # 0x6C -> LATIN SMALL LETTER L
'm' # 0x6D -> LATIN SMALL LETTER M
'n' # 0x6E -> LATIN SMALL LETTER N
'o' # 0x6F -> LATIN SMALL LETTER O
'p' # 0x70 -> LATIN SMALL LETTER P
'q' # 0x71 -> LATIN SMALL LETTER Q
'r' # 0x72 -> LATIN SMALL LETTER R
's' # 0x73 -> LATIN SMALL LETTER S
't' # 0x74 -> LATIN SMALL LETTER T
'u' # 0x75 -> LATIN SMALL LETTER U
'v' # 0x76 -> LATIN SMALL LETTER V
'w' # 0x77 -> LATIN SMALL LETTER W
'x' # 0x78 -> LATIN SMALL LETTER X
'y' # 0x79 -> LATIN SMALL LETTER Y
'z' # 0x7A -> LATIN SMALL LETTER Z
'{' # 0x7B -> LEFT CURLY BRACKET
'|' # 0x7C -> VERTICAL LINE
'}' # 0x7D -> RIGHT CURLY BRACKET
'~' # 0x7E -> TILDE
'\x7f' # 0x7F -> DELETE
'\x80' # 0x80 -> <control>
'\x81' # 0x81 -> <control>
'\x82' # 0x82 -> <control>
'\x83' # 0x83 -> <control>
'\x84' # 0x84 -> <control>
'\x85' # 0x85 -> <control>
'\x86' # 0x86 -> <control>
'\x87' # 0x87 -> <control>
'\x88' # 0x88 -> <control>
'\x89' # 0x89 -> <control>
'\x8a' # 0x8A -> <control>
'\x8b' # 0x8B -> <control>
'\x8c' # 0x8C -> <control>
'\x8d' # 0x8D -> <control>
'\x8e' # 0x8E -> <control>
'\x8f' # 0x8F -> <control>
'\x90' # 0x90 -> <control>
'\x91' # 0x91 -> <control>
'\x92' # 0x92 -> <control>
'\x93' # 0x93 -> <control>
'\x94' # 0x94 -> <control>
'\x95' # 0x95 -> <control>
'\x96' # 0x96 -> <control>
'\x97' # 0x97 -> <control>
'\x98' # 0x98 -> <control>
'\x99' # 0x99 -> <control>
'\x9a' # 0x9A -> <control>
'\x9b' # 0x9B -> <control>
'\x9c' # 0x9C -> <control>
'\x9d' # 0x9D -> <control>
'\x9e' # 0x9E -> <control>
'\x9f' # 0x9F -> <control>
'\xa0' # 0xA0 -> NO-BREAK SPACE
'\u06f0' # 0xA1 -> EXTENDED ARABIC-INDIC DIGIT ZERO
'\u06f1' # 0xA2 -> EXTENDED ARABIC-INDIC DIGIT ONE
'\u06f2' # 0xA3 -> EXTENDED ARABIC-INDIC DIGIT TWO
'\u06f3' # 0xA4 -> EXTENDED ARABIC-INDIC DIGIT THREE
'\u06f4' # 0xA5 -> EXTENDED ARABIC-INDIC DIGIT FOUR
'\u06f5' # 0xA6 -> EXTENDED ARABIC-INDIC DIGIT FIVE
'\u06f6' # 0xA7 -> EXTENDED ARABIC-INDIC DIGIT SIX
'\u06f7' # 0xA8 -> EXTENDED ARABIC-INDIC DIGIT SEVEN
'\u06f8' # 0xA9 -> EXTENDED ARABIC-INDIC DIGIT EIGHT
'\u06f9' # 0xAA -> EXTENDED ARABIC-INDIC DIGIT NINE
'\u060c' # 0xAB -> ARABIC COMMA
'\u061b' # 0xAC -> ARABIC SEMICOLON
'\xad' # 0xAD -> SOFT HYPHEN
'\u061f' # 0xAE -> ARABIC QUESTION MARK
'\ufe81' # 0xAF -> ARABIC LETTER ALEF WITH MADDA ABOVE ISOLATED FORM
'\ufe8d' # 0xB0 -> ARABIC LETTER ALEF ISOLATED FORM
'\ufe8e' # 0xB1 -> ARABIC LETTER ALEF FINAL FORM
'\ufe8e' # 0xB2 -> ARABIC LETTER ALEF FINAL FORM
'\ufe8f' # 0xB3 -> ARABIC LETTER BEH ISOLATED FORM
'\ufe91' # 0xB4 -> ARABIC LETTER BEH INITIAL FORM
'\ufb56' # 0xB5 -> ARABIC LETTER PEH ISOLATED FORM
'\ufb58' # 0xB6 -> ARABIC LETTER PEH INITIAL FORM
'\ufe93' # 0xB7 -> ARABIC LETTER TEH MARBUTA ISOLATED FORM
'\ufe95' # 0xB8 -> ARABIC LETTER TEH ISOLATED FORM
'\ufe97' # 0xB9 -> ARABIC LETTER TEH INITIAL FORM
'\ufb66' # 0xBA -> ARABIC LETTER TTEH ISOLATED FORM
'\ufb68' # 0xBB -> ARABIC LETTER TTEH INITIAL FORM
'\ufe99' # 0xBC -> ARABIC LETTER THEH ISOLATED FORM
'\ufe9b' # 0xBD -> ARABIC LETTER THEH INITIAL FORM
'\ufe9d' # 0xBE -> ARABIC LETTER JEEM ISOLATED FORM
'\ufe9f' # 0xBF -> ARABIC LETTER JEEM INITIAL FORM
'\ufb7a' # 0xC0 -> ARABIC LETTER TCHEH ISOLATED FORM
'\ufb7c' # 0xC1 -> ARABIC LETTER TCHEH INITIAL FORM
'\ufea1' # 0xC2 -> ARABIC LETTER HAH ISOLATED FORM
'\ufea3' # 0xC3 -> ARABIC LETTER HAH INITIAL FORM
'\ufea5' # 0xC4 -> ARABIC LETTER KHAH ISOLATED FORM
'\ufea7' # 0xC5 -> ARABIC LETTER KHAH INITIAL FORM
'\ufea9' # 0xC6 -> ARABIC LETTER DAL ISOLATED FORM
'\ufb84' # 0xC7 -> ARABIC LETTER DAHAL ISOLATED FORMN
'\ufeab' # 0xC8 -> ARABIC LETTER THAL ISOLATED FORM
'\ufead' # 0xC9 -> ARABIC LETTER REH ISOLATED FORM
'\ufb8c' # 0xCA -> ARABIC LETTER RREH ISOLATED FORM
'\ufeaf' # 0xCB -> ARABIC LETTER ZAIN ISOLATED FORM
'\ufb8a' # 0xCC -> ARABIC LETTER JEH ISOLATED FORM
'\ufeb1' # 0xCD -> ARABIC LETTER SEEN ISOLATED FORM
'\ufeb3' # 0xCE -> ARABIC LETTER SEEN INITIAL FORM
'\ufeb5' # 0xCF -> ARABIC LETTER SHEEN ISOLATED FORM
'\ufeb7' # 0xD0 -> ARABIC LETTER SHEEN INITIAL FORM
'\ufeb9' # 0xD1 -> ARABIC LETTER SAD ISOLATED FORM
'\ufebb' # 0xD2 -> ARABIC LETTER SAD INITIAL FORM
'\ufebd' # 0xD3 -> ARABIC LETTER DAD ISOLATED FORM
'\ufebf' # 0xD4 -> ARABIC LETTER DAD INITIAL FORM
'\ufec1' # 0xD5 -> ARABIC LETTER TAH ISOLATED FORM
'\ufec5' # 0xD6 -> ARABIC LETTER ZAH ISOLATED FORM
'\ufec9' # 0xD7 -> ARABIC LETTER AIN ISOLATED FORM
'\ufeca' # 0xD8 -> ARABIC LETTER AIN FINAL FORM
'\ufecb' # 0xD9 -> ARABIC LETTER AIN INITIAL FORM
'\ufecc' # 0xDA -> ARABIC LETTER AIN MEDIAL FORM
'\ufecd' # 0xDB -> ARABIC LETTER GHAIN ISOLATED FORM
'\ufece' # 0xDC -> ARABIC LETTER GHAIN FINAL FORM
'\ufecf' # 0xDD -> ARABIC LETTER GHAIN INITIAL FORM
'\ufed0' # 0xDE -> ARABIC LETTER GHAIN MEDIAL FORM
'\ufed1' # 0xDF -> ARABIC LETTER FEH ISOLATED FORM
'\ufed3' # 0xE0 -> ARABIC LETTER FEH INITIAL FORM
'\ufed5' # 0xE1 -> ARABIC LETTER QAF ISOLATED FORM
'\ufed7' # 0xE2 -> ARABIC LETTER QAF INITIAL FORM
'\ufed9' # 0xE3 -> ARABIC LETTER KAF ISOLATED FORM
'\ufedb' # 0xE4 -> ARABIC LETTER KAF INITIAL FORM
'\ufb92' # 0xE5 -> ARABIC LETTER GAF ISOLATED FORM
'\ufb94' # 0xE6 -> ARABIC LETTER GAF INITIAL FORM
'\ufedd' # 0xE7 -> ARABIC LETTER LAM ISOLATED FORM
'\ufedf' # 0xE8 -> ARABIC LETTER LAM INITIAL FORM
'\ufee0' # 0xE9 -> ARABIC LETTER LAM MEDIAL FORM
'\ufee1' # 0xEA -> ARABIC LETTER MEEM ISOLATED FORM
'\ufee3' # 0xEB -> ARABIC LETTER MEEM INITIAL FORM
'\ufb9e' # 0xEC -> ARABIC LETTER NOON GHUNNA ISOLATED FORM
'\ufee5' # 0xED -> ARABIC LETTER NOON ISOLATED FORM
'\ufee7' # 0xEE -> ARABIC LETTER NOON INITIAL FORM
'\ufe85' # 0xEF -> ARABIC LETTER WAW WITH HAMZA ABOVE ISOLATED FORM
'\ufeed' # 0xF0 -> ARABIC LETTER WAW ISOLATED FORM
'\ufba6' # 0xF1 -> ARABIC LETTER HEH GOAL ISOLATED FORM
'\ufba8' # 0xF2 -> ARABIC LETTER HEH GOAL INITIAL FORM
'\ufba9' # 0xF3 -> ARABIC LETTER HEH GOAL MEDIAL FORM
'\ufbaa' # 0xF4 -> ARABIC LETTER HEH DOACHASHMEE ISOLATED FORM
'\ufe80' # 0xF5 -> ARABIC LETTER HAMZA ISOLATED FORM
'\ufe89' # 0xF6 -> ARABIC LETTER YEH WITH HAMZA ABOVE ISOLATED FORM
'\ufe8a' # 0xF7 -> ARABIC LETTER YEH WITH HAMZA ABOVE FINAL FORM
'\ufe8b' # 0xF8 -> ARABIC LETTER YEH WITH HAMZA ABOVE INITIAL FORM
'\ufef1' # 0xF9 -> ARABIC LETTER YEH ISOLATED FORM
'\ufef2' # 0xFA -> ARABIC LETTER YEH FINAL FORM
'\ufef3' # 0xFB -> ARABIC LETTER YEH INITIAL FORM
'\ufbb0' # 0xFC -> ARABIC LETTER YEH BARREE WITH HAMZA ABOVE ISOLATED FORM
'\ufbae' # 0xFD -> ARABIC LETTER YEH BARREE ISOLATED FORM
'\ufe7c' # 0xFE -> ARABIC SHADDA ISOLATED FORM
'\ufe7d' # 0xFF -> ARABIC SHADDA MEDIAL FORM
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| lgpl-3.0 |
BD2KGenomics/slugflow | src/toil/test/sort/sortTest.py | 1 | 12590 | # Copyright (C) 2015-2021 Regents of the University of California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import random
import shutil
import subprocess
import unittest
from contextlib import contextmanager
from uuid import uuid4
from toil import resolveEntryPoint
from toil.batchSystems.mesos.test import MesosTestSupport
from toil.test.batchSystems.parasolTestSupport import ParasolTestSupport
from toil.common import Toil
from toil.job import Job
from toil.jobStores.abstractJobStore import (JobStoreExistsException,
NoSuchJobStoreException)
from toil.leader import FailedJobsException
from toil.lib.bioio import root_logger
from toil.test import (ToilTest,
needs_aws_ec2,
needs_google,
needs_gridengine,
needs_mesos,
needs_parasol,
needs_torque,
slow)
from toil.test.sort.sort import (copySubRangeOfFile,
getMidPoint,
main,
makeFileToSort,
merge,
sort)
logger = logging.getLogger(__name__)
defaultLineLen = int(os.environ.get('TOIL_TEST_SORT_LINE_LEN', 10))
defaultLines = int(os.environ.get('TOIL_TEST_SORT_LINES', 10))
defaultN = int(os.environ.get('TOIL_TEST_SORT_N', defaultLineLen * defaultLines / 5))
@contextmanager
def runMain(options):
"""
make sure the output file is deleted every time main is run
"""
main(options)
yield
if os.path.exists(options.outputFile):
os.remove(options.outputFile)
@slow
class SortTest(ToilTest, MesosTestSupport, ParasolTestSupport):
"""
Tests Toil by sorting a file in parallel on various combinations of job stores and batch
systems.
"""
def setUp(self):
super(SortTest, self).setUp()
self.tempDir = self._createTempDir(purpose='tempDir')
self.outputFile = os.path.join(self.tempDir, 'sortedFile.txt')
self.inputFile = os.path.join(self.tempDir, "fileToSort.txt")
def tearDown(self):
if os.path.exists(self.tempDir):
shutil.rmtree(self.tempDir)
ToilTest.tearDown(self)
def _toilSort(self, jobStoreLocator, batchSystem,
lines=defaultLines, N=defaultN, testNo=1, lineLen=defaultLineLen,
retryCount=2, badWorker=0.5, downCheckpoints=False, disableCaching=False):
"""
Generate a file consisting of the given number of random lines, each line of the given
length. Sort the file with Toil by splitting the file recursively until each part is less
than the given number of bytes, sorting each part and merging them back together. Then
verify the result.
:param jobStoreLocator: The location of the job store.
:param batchSystem: the name of the batch system
:param lines: the number of random lines to generate
:param N: the size in bytes of each split
:param testNo: the number of repeats of this test
:param lineLen: the length of each random line in the file
"""
for test in range(testNo):
try:
# Specify options
options = Job.Runner.getDefaultOptions(jobStoreLocator)
options.logLevel = logging.getLevelName(root_logger.getEffectiveLevel())
options.retryCount = retryCount
options.batchSystem = batchSystem
options.clean = "never"
options.badWorker = badWorker
options.badWorkerFailInterval = 0.05
options.disableCaching = disableCaching
# This is required because mesosMasterAddress now defaults to the IP of the machine
# that is starting the workflow while the mesos *tests* run locally.
if batchSystem == 'mesos':
options.mesosMasterAddress = 'localhost:5050'
options.downCheckpoints = downCheckpoints
options.N = N
options.outputFile = self.outputFile
options.fileToSort = self.inputFile
options.overwriteOutput = True
options.realTimeLogging = True
# Make the file to sort
makeFileToSort(options.fileToSort, lines=lines, lineLen=lineLen)
# First make our own sorted version
with open(options.fileToSort, 'r') as fileHandle:
l = fileHandle.readlines()
l.sort()
# Check we get an exception if we try to restart a workflow that doesn't exist
options.restart = True
with self.assertRaises(NoSuchJobStoreException):
with runMain(options):
# Now check the file is properly sorted..
with open(options.outputFile, 'r') as fileHandle:
l2 = fileHandle.readlines()
self.assertEqual(l, l2)
options.restart = False
# Now actually run the workflow
try:
with runMain(options):
pass
i = 0
except FailedJobsException as e:
i = e.numberOfFailedJobs
# Check we get an exception if we try to run without restart on an existing store
with self.assertRaises(JobStoreExistsException):
with runMain(options):
pass
options.restart = True
# This loop tests the restart behavior
totalTrys = 1
while i != 0:
options.useExistingOptions = random.random() > 0.5
try:
with runMain(options):
pass
i = 0
except FailedJobsException as e:
i = e.numberOfFailedJobs
if totalTrys > 32: # p(fail after this many restarts) = 0.5**32
self.fail('Exceeded a reasonable number of restarts')
totalTrys += 1
finally:
subprocess.check_call([resolveEntryPoint('toil'), 'clean', jobStoreLocator])
# final test to make sure the jobStore was actually deleted
self.assertRaises(NoSuchJobStoreException, Toil.resumeJobStore, jobStoreLocator)
@needs_aws_ec2
def testAwsSingle(self):
self._toilSort(jobStoreLocator=self._awsJobStore(), batchSystem='single_machine')
@needs_aws_ec2
@needs_mesos
def testAwsMesos(self):
self._startMesos()
try:
self._toilSort(jobStoreLocator=self._awsJobStore(), batchSystem="mesos")
finally:
self._stopMesos()
@needs_mesos
def testFileMesos(self):
self._startMesos()
try:
self._toilSort(jobStoreLocator=self._getTestJobStorePath(), batchSystem="mesos")
finally:
self._stopMesos()
@needs_google
def testGoogleSingle(self):
self._toilSort(jobStoreLocator=self._googleJobStore(), batchSystem="single_machine")
@needs_google
@needs_mesos
def testGoogleMesos(self):
self._startMesos()
try:
self._toilSort(jobStoreLocator=self._googleJobStore(), batchSystem="mesos")
finally:
self._stopMesos()
def testFileSingle(self):
self._toilSort(jobStoreLocator=self._getTestJobStorePath(), batchSystem='single_machine')
def testFileSingleNonCaching(self):
self._toilSort(jobStoreLocator=self._getTestJobStorePath(), batchSystem='single_machine',
disableCaching=True)
def testFileSingleCheckpoints(self):
self._toilSort(jobStoreLocator=self._getTestJobStorePath(), batchSystem='single_machine',
retryCount=2, downCheckpoints=True)
def testFileSingle10000(self):
self._toilSort(jobStoreLocator=self._getTestJobStorePath(), batchSystem='single_machine',
lines=10000, N=10000)
@needs_gridengine
@unittest.skip('GridEngine does not support shared caching')
def testFileGridEngine(self):
self._toilSort(jobStoreLocator=self._getTestJobStorePath(), batchSystem='gridengine')
@needs_torque
@unittest.skip('PBS/Torque does not support shared caching')
def testFileTorqueEngine(self):
self._toilSort(jobStoreLocator=self._getTestJobStorePath(), batchSystem='torque')
@needs_parasol
@unittest.skip("skipping until parasol support is less flaky (see github issue #449")
def testFileParasol(self):
self._startParasol()
try:
self._toilSort(jobStoreLocator=self._getTestJobStorePath(), batchSystem='parasol')
finally:
self._stopParasol()
testNo = 5
def testSort(self):
for test in range(self.testNo):
tempFile1 = os.path.join(self.tempDir, "fileToSort.txt")
makeFileToSort(tempFile1)
lines1 = self._loadFile(tempFile1)
lines1.sort()
sort(tempFile1)
with open(tempFile1, 'r') as f:
lines2 = f.readlines()
self.assertEqual(lines1, lines2)
def testMerge(self):
for test in range(self.testNo):
tempFile1 = os.path.join(self.tempDir, "fileToSort1.txt")
tempFile2 = os.path.join(self.tempDir, "fileToSort2.txt")
tempFile3 = os.path.join(self.tempDir, "mergedFile.txt")
makeFileToSort(tempFile1)
makeFileToSort(tempFile2)
sort(tempFile1)
sort(tempFile2)
with open(tempFile3, 'w') as fileHandle:
with open(tempFile1) as tempFileHandle1:
with open(tempFile2) as tempFileHandle2:
merge(tempFileHandle1, tempFileHandle2, fileHandle)
lines1 = self._loadFile(tempFile1) + self._loadFile(tempFile2)
lines1.sort()
with open(tempFile3, 'r') as f:
lines2 = f.readlines()
self.assertEqual(lines1, lines2)
def testCopySubRangeOfFile(self):
for test in range(self.testNo):
tempFile = os.path.join(self.tempDir, "fileToSort1.txt")
outputFile = os.path.join(self.tempDir, "outputFileToSort1.txt")
makeFileToSort(tempFile, lines=10, lineLen=defaultLineLen)
fileSize = os.path.getsize(tempFile)
assert fileSize > 0
fileStart = random.choice(range(0, fileSize))
fileEnd = random.choice(range(fileStart, fileSize))
with open(outputFile, 'w') as f:
f.write(copySubRangeOfFile(tempFile, fileStart, fileEnd))
with open(outputFile, 'r') as f:
l = f.read()
with open(tempFile, 'r') as f:
l2 = f.read()[fileStart:fileEnd]
self.assertEqual(l, l2)
def testGetMidPoint(self):
for test in range(self.testNo):
makeFileToSort(self.inputFile)
with open(self.inputFile, 'r') as f:
sorted_contents = f.read()
fileSize = os.path.getsize(self.inputFile)
midPoint = getMidPoint(self.inputFile, 0, fileSize)
print(f"The mid point is {midPoint} of a file of {fileSize} bytes.")
assert midPoint < fileSize
assert sorted_contents[midPoint] == '\n'
assert midPoint >= 0
def _awsJobStore(self):
return f'aws:{self.awsRegion()}:sort-test-{uuid4()}'
def _googleJobStore(self):
return f'google:{os.getenv("TOIL_GOOGLE_PROJECTID")}:sort-test-{uuid4()}'
def _loadFile(self, path):
with open(path, 'r') as f:
return f.readlines()
| apache-2.0 |
ted-gould/nova | nova/scheduler/opts.py | 19 | 2750 | # Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import nova.scheduler.driver
import nova.scheduler.filter_scheduler
import nova.scheduler.filters.aggregate_image_properties_isolation
import nova.scheduler.filters.core_filter
import nova.scheduler.filters.disk_filter
import nova.scheduler.filters.io_ops_filter
import nova.scheduler.filters.isolated_hosts_filter
import nova.scheduler.filters.num_instances_filter
import nova.scheduler.filters.ram_filter
import nova.scheduler.filters.trusted_filter
import nova.scheduler.host_manager
import nova.scheduler.ironic_host_manager
import nova.scheduler.manager
import nova.scheduler.rpcapi
import nova.scheduler.scheduler_options
import nova.scheduler.utils
import nova.scheduler.weights.io_ops
import nova.scheduler.weights.metrics
import nova.scheduler.weights.ram
def list_opts():
return [
('DEFAULT',
itertools.chain(
[nova.scheduler.filters.disk_filter.disk_allocation_ratio_opt],
[nova.scheduler.filters.io_ops_filter.max_io_ops_per_host_opt],
[nova.scheduler.filters.num_instances_filter.
max_instances_per_host_opt],
[nova.scheduler.scheduler_options.
scheduler_json_config_location_opt],
nova.scheduler.driver.scheduler_driver_opts,
nova.scheduler.filter_scheduler.filter_scheduler_opts,
nova.scheduler.filters.aggregate_image_properties_isolation.opts,
nova.scheduler.filters.isolated_hosts_filter.isolated_opts,
nova.scheduler.host_manager.host_manager_opts,
nova.scheduler.ironic_host_manager.host_manager_opts,
nova.scheduler.manager.scheduler_driver_opts,
nova.scheduler.rpcapi.rpcapi_opts,
nova.scheduler.utils.scheduler_opts,
nova.scheduler.weights.io_ops.io_ops_weight_opts,
nova.scheduler.weights.ram.ram_weight_opts,
)),
('metrics', nova.scheduler.weights.metrics.metrics_weight_opts),
('trusted_computing',
nova.scheduler.filters.trusted_filter.trusted_opts),
('upgrade_levels',
itertools.chain(
[nova.scheduler.rpcapi.rpcapi_cap_opt],
)),
]
| apache-2.0 |
apple/swift-llvm | utils/lit/lit/ShCommands.py | 69 | 3286 | class Command:
def __init__(self, args, redirects):
self.args = list(args)
self.redirects = list(redirects)
def __repr__(self):
return 'Command(%r, %r)' % (self.args, self.redirects)
def __eq__(self, other):
if not isinstance(other, Command):
return False
return ((self.args, self.redirects) ==
(other.args, other.redirects))
def toShell(self, file):
for arg in self.args:
if "'" not in arg:
quoted = "'%s'" % arg
elif '"' not in arg and '$' not in arg:
quoted = '"%s"' % arg
else:
raise NotImplementedError('Unable to quote %r' % arg)
file.write(quoted)
# For debugging / validation.
import ShUtil
dequoted = list(ShUtil.ShLexer(quoted).lex())
if dequoted != [arg]:
raise NotImplementedError('Unable to quote %r' % arg)
for r in self.redirects:
if len(r[0]) == 1:
file.write("%s '%s'" % (r[0][0], r[1]))
else:
file.write("%s%s '%s'" % (r[0][1], r[0][0], r[1]))
class GlobItem:
def __init__(self, pattern):
self.pattern = pattern
def __repr__(self):
return self.pattern
def __eq__(self, other):
if not isinstance(other, Command):
return False
return (self.pattern == other.pattern)
def resolve(self, cwd):
import glob
import os
if os.path.isabs(self.pattern):
abspath = self.pattern
else:
abspath = os.path.join(cwd, self.pattern)
results = glob.glob(abspath)
return [self.pattern] if len(results) == 0 else results
class Pipeline:
def __init__(self, commands, negate=False, pipe_err=False):
self.commands = commands
self.negate = negate
self.pipe_err = pipe_err
def __repr__(self):
return 'Pipeline(%r, %r, %r)' % (self.commands, self.negate,
self.pipe_err)
def __eq__(self, other):
if not isinstance(other, Pipeline):
return False
return ((self.commands, self.negate, self.pipe_err) ==
(other.commands, other.negate, self.pipe_err))
def toShell(self, file, pipefail=False):
if pipefail != self.pipe_err:
raise ValueError('Inconsistent "pipefail" attribute!')
if self.negate:
file.write('! ')
for cmd in self.commands:
cmd.toShell(file)
if cmd is not self.commands[-1]:
file.write('|\n ')
class Seq:
def __init__(self, lhs, op, rhs):
assert op in (';', '&', '||', '&&')
self.op = op
self.lhs = lhs
self.rhs = rhs
def __repr__(self):
return 'Seq(%r, %r, %r)' % (self.lhs, self.op, self.rhs)
def __eq__(self, other):
if not isinstance(other, Seq):
return False
return ((self.lhs, self.op, self.rhs) ==
(other.lhs, other.op, other.rhs))
def toShell(self, file, pipefail=False):
self.lhs.toShell(file, pipefail)
file.write(' %s\n' % self.op)
self.rhs.toShell(file, pipefail)
| apache-2.0 |
Codefans-fan/odoo | addons/hw_proxy/__init__.py | 1894 | 1075 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import controllers
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
chokribr/invenioold | modules/miscutil/lib/hepdatautils_unit_tests.py | 16 | 22504 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2008, 2009, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Unit tests for the hepdatautils library."""
__revision__ = "$Id$"
import os
import cPickle
from invenio.testutils import make_test_suite, run_test_suite, InvenioTestCase
from invenio import hepdatautils
from invenio.hepdatautils import Dataset
from invenio import bibrecord
class TestParsingSystematics(InvenioTestCase):
""" Systematics strings to be parsed (we check only that they parse
at all not generating exceptions)
"""
def test_guess_minimum_encoding(self):
"""textutils - guess_minimum_encoding."""
# self.assertEqual(guess_minimum_encoding('patata'), ('patata', 'ascii'))
# self.assertEqual(False, True, "ble")
# we make sure that this does not cauase any exception !
pass
class TestDatasetPaperLogic(InvenioTestCase):
""" Testing the business logic classes
"""
def test_parse_systematics(self):
"""
To be filled when it becomes more clear how to treat systematics
http://hepdata.cedar.ac.uk/view/ins1720
http://hepdata.cedar.ac.uk/view/ins215359
"""
pass
def test_copy_245_fields_correct_caption(self):
"""Test the case when original record contains already some
instances of 245 field and we want to extend by the HEPDATA
caption """
rec_string = """<record>
<controlfield tag="001">123456</controlfield>
<datafield tag="245" ind1=" " ind2=" ">
<subfield code="a">Some caption</subfield>
</datafield>
<datafield tag="245" ind1=" " ind2=" ">
<subfield code="z">Some ridiculous caption</subfield>
</datafield>
<datafield tag="245" ind1=" " ind2=" ">
<subfield code="a">Hepdata caption</subfield>
<subfield code="9">HEPDATA</subfield>
</datafield>
<!--some other fields-->
<datafield tag="520" ind1="" ind2=" ">
<subfield code="9">HEPDATA</subfield>
</datafield>
<datafield tag="245" ind1="z" ind2=" ">
<subfield code="z">Some ridiculous caption</subfield>
</datafield>
<datafield tag="856" ind1="4" ind2=" ">
<subfield code="z">Some other entry not following even the semantics 2</subfield>
<subfield code="3">ANOTHER</subfield>
</datafield>
</record>"""
rec = bibrecord.create_record(rec_string)[0]
paper = hepdatautils.Paper.create_from_record(rec)
self.assertEqual(None, paper.get_diff_marcxml(rec), \
"There should not be need of a patch on the same record")
paper.comment = "azerty"
diff_xml = paper.get_diff_marcxml(rec)
self.assertTrue(diff_xml.find(">Some caption") == -1, \
"One of existing captions not found")
self.assertTrue(diff_xml.find(">Some ridiculous caption") == -1, \
"One of existing captions not found")
self.assertTrue(diff_xml.find(">azerty") != -1, \
"New caption not found")
def test_copy_245_fields_add_caption(self):
""" Test adding a completely new caption"""
rec_string = """<record>
<controlfield tag="001">123456</controlfield>
<datafield tag="245" ind1=" " ind2=" ">
<subfield code="a">Some caption</subfield>
</datafield>
<datafield tag="245" ind1=" " ind2=" ">
<subfield code="z">Some ridiculous caption</subfield>
</datafield>
<!--some other fields-->
<datafield tag="520" ind1="" ind2=" ">
<subfield code="9">HEPDATA</subfield>
</datafield>
<datafield tag="245" ind1="z" ind2=" ">
<subfield code="z">Some ridiculous caption</subfield>
</datafield>
<datafield tag="856" ind1="4" ind2=" ">
<subfield code="z">Some other entry not following even the semantics 2</subfield>
<subfield code="3">ANOTHER</subfield>
</datafield>
</record>"""
rec = bibrecord.create_record(rec_string)[0]
paper = hepdatautils.Paper.create_from_record(rec)
self.assertEqual(None, paper.get_diff_marcxml(rec), \
"There should not be need of a patch on the same record")
paper.comment = "azerty"
diff_xml = paper.get_diff_marcxml(rec)
self.assertTrue(diff_xml.find(">Some caption") == -1, \
"One of existing captions not found")
self.assertTrue(diff_xml.find(">Some ridiculous caption") == -1, \
"One of existing captions not found")
self.assertTrue(diff_xml.find(">azerty") != -1, \
"New caption not found")
def test_copy_245_fields_remove_caption(self):
""" Test the case when the caption exists but
it should be removed"""
rec_string = """<record>
<controlfield tag="001">123456</controlfield>
<datafield tag="245" ind1=" " ind2=" ">
<subfield code="a">Some caption</subfield>
</datafield>
<datafield tag="245" ind1=" " ind2=" ">
<subfield code="z">Some ridiculous caption</subfield>
</datafield>
<datafield tag="245" ind1=" " ind2=" ">
<subfield code="a">Caption to remove</subfield>
<subfield code="9">HEPDATA</subfield>
</datafield>
<!--some other fields-->
<datafield tag="520" ind1="" ind2=" ">
<subfield code="9">HEPDATA</subfield>
</datafield>
<datafield tag="245" ind1="z" ind2=" ">
<subfield code="z">Some ridiculous caption</subfield>
</datafield>
<datafield tag="856" ind1="4" ind2=" ">
<subfield code="z">Some other entry not following even the semantics 2</subfield>
<subfield code="3">ANOTHER</subfield>
</datafield>
</record>"""
rec = bibrecord.create_record(rec_string)[0]
paper = hepdatautils.Paper.create_from_record(rec)
self.assertEqual(None, paper.get_diff_marcxml(rec), \
"There should not be need of a patch on the same record")
paper.comment = "" # empty caption should not be uploaded
diff_xml = paper.get_diff_marcxml(rec)
self.assertTrue(diff_xml is None, "Expected empty output")
def test_paper_logic(self):
"""Test the case when the main record has to be updated.
Only fields refering to HEPData directly have to be modified,
which means copying all fields not being directly related"""
rec_string = """<record>
<controlfield tag="001">123456</controlfield>
<datafield tag="856" ind1="4" ind2=" ">
<subfield code="z">Some other entry not following even the semantics</subfield>
<subfield code="3">ADDITIONAL HEPDATA</subfield>
</datafield>
<datafield tag="856" ind1="4" ind2=" ">
<subfield code="z">Some other entry not following even the semantics 2</subfield>
<subfield code="3">ANOTHER</subfield>
</datafield>
<datafield tag="856" ind1="4" ind2=" ">
<subfield code="u">http://google.com</subfield>
<subfield code="y">This is the link text</subfield>
<subfield code="3">ADDITIONAL HEPDATA</subfield>
</datafield>
<datafield tag="856" ind1="4" ind2=" ">
<subfield code="u">http://invenio-software.org</subfield>
<subfield code="y">This is some other completely unrelated field</subfield>
<subfield code="3">Different type</subfield>
</datafield>
<datafield tag="856" ind1=" " ind2=" ">
<subfield code="u">http://invenio-software.net</subfield>
<subfield code="y">This should not be copied</subfield>
<subfield code="3">Different type</subfield>
</datafield>
<!-- now fields similar to the one marking existence of HEPDATA -->
<datafield tag="520" ind1=" " ind2=" ">
<subfield code="9">SOMETHING</subfield>
</datafield>
<!-- Now an incorrect caption -->
<datafield tag="245" ind1=" " ind2=" ">
<subfield code="a">Some incorrect caption</subfield>
<subfield code="9">HEPDATA</subfield>
</datafield>
<datafield tag="245" ind1=" " ind2=" ">
<subfield code="a">Some other comment</subfield>
<subfield code="9">SOMETHING ELSE</subfield>
</datafield>
<datafield tag="245" ind1=" " ind2=" ">
<subfield code="u">Something not following the schema</subfield>
</datafield>
<!-- some completely unrelated fields that should not be copied -->
<datafield tag="764" ind1=" " ind2=" ">
<subfield code="u">Something not to repeat</subfield>
</datafield>
<datafield tag="764" ind1=" " ind2=" ">
<subfield code="u">Something not to repeat 2</subfield>
</datafield>
<datafield tag="200" ind1="C" ind2=" ">
<subfield code="a">Something not to repeat 3</subfield>
</datafield>
</record>"""
rec = bibrecord.create_record(rec_string)[0]
# generate entries that differ on title, additional data etc...
paper = hepdatautils.Paper()
paper.additional_data_links = \
[{"description": "This is the first dataset", \
"href": "http://something.com"}, \
{"description": "This is another description", \
"href" : "https://invenio-software.org"}, \
{"description" : "Yet something else", \
"href" : "http://and.now.for.something.completely.different"}]
diff_xml = paper.get_diff_marcxml(rec)
self.assertTrue(not(diff_xml is None),
"According to the diffing algorithm, records are the same")
# print diff_xml
# test the case of uploading an empty set !
def test_read_empty_dataset(self):
"""Assure that the case of empty dataset does not cause any problems
... this might be the case for example with
http://hepdata.cedar.ac.uk/view/ins215359"""
# Trying to generate columns on an empty dataset
dataset = cPickle.loads("ccopy_reg\n_reconstructor\np1\n(cinvenio.hepdatautils\nDataset\np2\nc__builtin__\nobject\np3\nNtRp4\n(dp5\nS'name'\np6\nS'Table 7'\np7\nsS'column_titles'\np8\n(lp9\nsS'additional_files'\np10\n(lp11\n(lp12\nS'ins1720/d7/plain.txt'\np13\naS'plain text'\np14\naa(lp15\nS'ins1720/d7/aida'\np16\naS'AIDA'\np17\naa(lp18\nS'ins1720/d7/pyroot.py'\np19\naS'PYROOT'\np20\naa(lp21\nS'ins1720/d7/yoda'\np22\naS'YODA'\np23\naa(lp24\nS'ins1720/d7/root'\np25\naS'ROOT'\np26\naa(lp27\nS'ins1720/d7/mpl'\np28\naS'mpl'\np29\naa(lp30\nS'ins1720/d7/jhepwork.py'\np31\naS'jhepwork'\np32\naasS'comments'\np33\nS'<br >Location: F 20<br >Additional systematic error: (RES-DEF(RES=DEL(1232P33)++,BACK=CORRECTED,C=P-WAVE BREIT-WIGNER)//RES-DEF(RES=N(1470P11)+,BACK=CORRECTED,C=P-WAVE BREIT-WIGNER, C=FITTED MASS//RES-DEF(RES=N(1520D13)+,BACK=CORRECTED,C=P-WAVE BREIT-WIGNER, C=FITTED MASS//RES-DEF(RES=N(1688F15)+,BACK=CORRECTED,C=F-WAVE BREIT-WIGNER, C=FITTED MASS)'\np34\nsS'data_qualifiers'\np35\n(lp36\nsS'location'\np37\nS''\nsS'num_columns'\np38\nI2\nsS'position'\np39\nI0\nsS'column_headers'\np40\n(lp41\n(dp42\nS'content'\np43\nS''\nsS'colspan'\np44\nI1\nsa(dp45\ng43\nS'No data is encoded for this table'\np46\nsg44\nI1\nsasS'data'\np47\n(lp48\nsS'additional_data_links'\np49\n(lp50\nsb.")
dataset.generate_columns()
def test_generate_columns(self):
"""Test the method generating columns of a given dataset"""
def generate_columns_longer(ds):
""" a much longer implemntation of the column generation"""
from invenio.bibrecord import record_add_field
rec = {}
columns = [[num, "", ""] for num in xrange(ds.num_columns)]
# (number, header, title)
cur_col = 0
for hd in ds.column_headers:
for i in xrange(hd["colspan"]):
columns[cur_col][1] = hd["content"].strip()
cur_col += 1
cur_col = 0
for ct in ds.column_titles:
for i in xrange(ct["colspan"]):
columns[cur_col][2] = ct["content"].strip()
cur_col += 1
for col in columns:
subfields = [("n", str(col[0]))]
if col[2] != "":
subfields.append(("t", col[2]))
if col[1] != "":
subfields.append(("d", col[1]))
record_add_field(rec, "910", subfields = subfields)
return rec
ds = Dataset()
ds.column_headers = [{"content": "header1", "colspan" : 1},
{"content": "header2", "colspan" : 3}]
ds.column_titles = [{"content": "title1", "colspan" : 2},
{"content": "title2", "colspan" : 1}]
ds.num_columns = 6
self.assertEqual(ds.generate_columns(), generate_columns_longer(ds), \
"Incorrectly generated columns")
def test_write_xml(self):
"""Tests the capability of writing xml files"""
fname = hepdatautils.write_xml_stream_to_tmpfile(\
["<entry1></entry1>", "<entry2></entry2>"], "testing")
self.assertTrue(os.path.exists(fname), "results file does not exist")
f = open(fname, "r")
content = f.read()
f.close()
self.assertTrue(content.find("<entry1></entry1>") != -1, \
"Can not find required string entry1")
self.assertTrue(content.find("<entry2></entry2>") != -1, \
"Can not find required string entry2")
self.assertTrue(content.find("<?xml") != -1, \
"Can not find required XML structure")
def test_update_the_same_record(self):
"""Tests parsing Paper from a record and diffing with the same
hepdata entry.
"""
rec_string = """<record>
<controlfield tag="001">123456</controlfield>
<datafield tag="856" ind1="4" ind2=" ">
<subfield code="z">Some other entry not following even the semantics 2</subfield>
<subfield code="3">ANOTHER</subfield>
</datafield>
<datafield tag="856" ind1="4" ind2=" ">
<subfield code="u">http://google.com</subfield>
<subfield code="y">1 This is the link text</subfield>
<subfield code="3">ADDITIONAL HEPDATA</subfield>
</datafield>
<datafield tag="856" ind1="4" ind2=" ">
<subfield code="u">http://invenio-software.org</subfield>
<subfield code="y">2 This is some other completely unrelated field</subfield>
<subfield code="3">ADDITIONAL HEPDATA</subfield>
</datafield>
<datafield tag="856" ind1=" " ind2=" ">
<subfield code="u">http://invenio-software.net</subfield>
<subfield code="y">This should not be copied</subfield>
<subfield code="3">Different type</subfield>
</datafield>
<datafield tag="520" ind1=" " ind2=" ">
<subfield code="9">HEPDATA</subfield>
</datafield>
</record>
"""
rec = bibrecord.create_record(rec_string)[0]
paper = hepdatautils.Paper.create_from_record(rec)
diff_xml = paper.get_diff_marcxml(rec)
self.assertTrue(diff_xml is None,
"Expecting empty XML in the case of the same dataset. Produced XML: %s" % (diff_xml, ))
self.assertEqual(len(paper.additional_data_links), 2,
"Incorrect number of recognised additional data links")
if paper.additional_data_links[0]["description"][0] > \
paper.additional_data_links[1]["description"][0]:
l1 = paper.additional_data_links[1]
l2 = paper.additional_data_links[0]
else:
l1 = paper.additional_data_links[0]
l2 = paper.additional_data_links[1]
self.assertEqual(l1["description"], "1 This is the link text", "Incorrect first parsed link")
self.assertEqual(l1["href"], "http://google.com", "Incorrect first parsed link")
self.assertEqual(l2["description"], "2 This is some other completely unrelated field",
"Incorrect second parsed link")
self.assertEqual(l2["href"], "http://invenio-software.org",
"Incorrect second parsed link")
def test_parse_record(self):
"""Tests building record form the MARC XML"""
rec_string = """<record>
<datafield tag="245" ind1=" " ind2=" ">
<subfield code="a">This is the caption</subfield>
<subfield code="9">HEPDATA</subfield>
</datafield>
<datafield tag="520" ind1=" " ind2=" ">
<subfield code="9">HEPDATA</subfield>
</datafield>
<datafield tag="710" ind1=" " ind2=" ">
<subfield code="g">ATLAS</subfield>
</datafield>
<datafield tag="786" ind1=" " ind2=" ">
<subfield code="w">214657</subfield>
<subfield code="r">arXiv:something</subfield>
<subfield code="h">F1</subfield>
</datafield>
<datafield tag="336" ind1=" " ind2=" ">
<subfield code="t">DATASET</subfield>
</datafield>
<datafield tag="980" ind1=" " ind2=" ">
<subfield code="a">DATA</subfield>
</datafield>
<!-- definitions of columns -->
<datafield tag="911" ind1=" " ind2=" ">
<subfield code="x">1</subfield>
<subfield code="y">2</subfield>
</datafield>
<datafield tag="910" ind1=" " ind2=" ">
<subfield code="t">column title</subfield>
<subfield code="d">column description</subfield>
<subfield code="n">0</subfield>
</datafield>
<datafield tag="910" ind1=" " ind2=" ">
<subfield code="t">title2</subfield>
<subfield code="d">column description</subfield>
<subfield code="n">1</subfield>
</datafield>
<datafield tag="910" ind1=" " ind2=" ">
<subfield code="t">title2</subfield>
<subfield code="d">description2</subfield>
<subfield code="n">2</subfield>
</datafield>
<!-- encoding data qualifiers -->
<datafield tag="653" ind1="1" ind2=" ">
<subfield code="r">1</subfield>
<subfield code="c">0</subfield>
<subfield code="c">1</subfield>
</datafield>
<datafield tag="653" ind1="1" ind2=" ">
<subfield code="k">m</subfield>
<subfield code="v">v</subfield>
<subfield code="c">2</subfield>
</datafield>
<datafield tag="653" ind1="1" ind2=" ">
<subfield code="k">p</subfield>
<subfield code="v">q</subfield>
<subfield code="c">0</subfield>
</datafield>
<datafield tag="653" ind1="1" ind2=" ">
<subfield code="k">w</subfield>
<subfield code="v">g</subfield>
<subfield code="c">1</subfield>
<subfield code="c">2</subfield>
</datafield>
<datafield tag="653" ind1="1" ind2=" ">
<subfield code="r">2</subfield>
<subfield code="c">0</subfield>
<subfield code="c">1</subfield>
</datafield>
<datafield tag="653" ind1="1" ind2=" ">
<subfield code="k">z</subfield>
<subfield code="v">v</subfield>
<subfield code="c">2</subfield>
</datafield>
</record>
"""
rec = bibrecord.create_record(rec_string)
# print "The record string: %s\n" % (str(rec), )
ds = Dataset.create_from_record(rec[0], '(l.', 214657, "")
# print str(ds.data_qualifiers)
self.assertEqual(3, ds.num_columns, \
"Incorrect number of columns has been read")
# asserting column titles:
self.assertEqual(2, len(ds.column_headers), "Incorrect number of headers")
self.assertEqual("column title", ds.column_titles[0]["content"], \
"Incorrect content of the first column")
self.assertEqual(1, ds.column_titles[0]["colspan"], \
"Incorrect colspan of the title of first column")
self.assertEqual("title2", ds.column_titles[1]["content"], \
"Incorrect content of the second and third column")
self.assertEqual(2, ds.column_titles[1]["colspan"], \
"Incorrect colspan of the title of second and " + \
"third column")
# asserting on column descriptions
self.assertEqual(2, len(ds.column_titles), \
"Incorrect number of column titles")
self.assertEqual("column description", ds.column_headers[0]["content"], \
"Incorrect description of the first andsecond" + \
" column")
self.assertEqual(2, ds.column_headers[0]["colspan"], \
"Incorrect colspan of the description first" + \
" and second column")
self.assertEqual("description2", ds.column_headers[1]["content"], \
"Incorrect description of the third column")
self.assertEqual(1, ds.column_headers[1]["colspan"], \
"Incorrect colspan of the description thirdcolumn")
self.assertEqual(3, len(ds.data_qualifiers), \
"Incorrect number of detected dscriptor rows")
existing_qual = []
for q_line in ds.data_qualifiers:
l_pos = 0
for q in q_line:
existing_qual.append((l_pos, q))
l_pos += 1
self.assertTrue((0, {"content": "RE : 1", "colspan" : 2 }) in \
existing_qual)
self.assertTrue((1, {"content": "m : v", "colspan" : 1 }) in \
existing_qual)
self.assertTrue((0, {"content": "p : q", "colspan" : 1 }) in \
existing_qual)
self.assertTrue((1, {"content": "w : g", "colspan" : 2 }) in \
existing_qual)
self.assertTrue((0, {"content": "RE : 2", "colspan" : 2 }) in \
existing_qual)
self.assertTrue((1, {"content": "z : v", "colspan" : 1 }) in \
existing_qual)
# now testing the comparison function ... on the same dataset
ds2 = Dataset.create_from_record(rec[0], '(l.', 214657, "")
# print str(ds.get_diff_marcxml(rec[0], None))
# print str(ds.get_diff_marcxml({}, None))
TEST_SUITE = make_test_suite(TestParsingSystematics, TestDatasetPaperLogic)
if __name__ == "__main__":
run_test_suite(TEST_SUITE)
| gpl-2.0 |
StefanRijnhart/odoo | addons/account/account_financial_report.py | 5 | 7657 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from datetime import datetime
from dateutil.relativedelta import relativedelta
from operator import itemgetter
from openerp.osv import fields, osv
import openerp.addons.decimal_precision as dp
from openerp.tools.translate import _
# ---------------------------------------------------------
# Account Financial Report
# ---------------------------------------------------------
class account_financial_report(osv.osv):
_name = "account.financial.report"
_description = "Account Report"
def _get_level(self, cr, uid, ids, field_name, arg, context=None):
'''Returns a dictionary with key=the ID of a record and value = the level of this
record in the tree structure.'''
res = {}
for report in self.browse(cr, uid, ids, context=context):
level = 0
if report.parent_id:
level = report.parent_id.level + 1
res[report.id] = level
return res
def _get_children_by_order(self, cr, uid, ids, context=None):
'''returns a dictionary with the key= the ID of a record and value = all its children,
computed recursively, and sorted by sequence. Ready for the printing'''
res = []
for id in ids:
res.append(id)
ids2 = self.search(cr, uid, [('parent_id', '=', id)], order='sequence ASC', context=context)
res += self._get_children_by_order(cr, uid, ids2, context=context)
return res
def _get_balance(self, cr, uid, ids, field_names, args, context=None):
'''returns a dictionary with key=the ID of a record and value=the balance amount
computed for this record. If the record is of type :
'accounts' : it's the sum of the linked accounts
'account_type' : it's the sum of leaf accoutns with such an account_type
'account_report' : it's the amount of the related report
'sum' : it's the sum of the children of this record (aka a 'view' record)'''
account_obj = self.pool.get('account.account')
res = {}
for report in self.browse(cr, uid, ids, context=context):
if report.id in res:
continue
res[report.id] = dict((fn, 0.0) for fn in field_names)
if report.type == 'accounts':
# it's the sum of the linked accounts
for a in report.account_ids:
for field in field_names:
res[report.id][field] += getattr(a, field)
elif report.type == 'account_type':
# it's the sum the leaf accounts with such an account type
report_types = [x.id for x in report.account_type_ids]
account_ids = account_obj.search(cr, uid, [('user_type','in', report_types), ('type','!=','view')], context=context)
for a in account_obj.browse(cr, uid, account_ids, context=context):
for field in field_names:
res[report.id][field] += getattr(a, field)
elif report.type == 'account_report' and report.account_report_id:
# it's the amount of the linked report
res2 = self._get_balance(cr, uid, [report.account_report_id.id], field_names, False, context=context)
for key, value in res2.items():
for field in field_names:
res[report.id][field] += value[field]
elif report.type == 'sum':
# it's the sum of the children of this account.report
res2 = self._get_balance(cr, uid, [rec.id for rec in report.children_ids], field_names, False, context=context)
for key, value in res2.items():
for field in field_names:
res[report.id][field] += value[field]
return res
_columns = {
'name': fields.char('Report Name', required=True, translate=True),
'parent_id': fields.many2one('account.financial.report', 'Parent'),
'children_ids': fields.one2many('account.financial.report', 'parent_id', 'Account Report'),
'sequence': fields.integer('Sequence'),
'balance': fields.function(_get_balance, string='Balance', multi='balance'),
'debit': fields.function(_get_balance, string='Debit', multi='balance'),
'credit': fields.function(_get_balance, string='Credit', multi="balance"),
'level': fields.function(_get_level, string='Level', store=True, type='integer'),
'type': fields.selection([
('sum','View'),
('accounts','Accounts'),
('account_type','Account Type'),
('account_report','Report Value'),
],'Type'),
'account_ids': fields.many2many('account.account', 'account_account_financial_report', 'report_line_id', 'account_id', 'Accounts'),
'account_report_id': fields.many2one('account.financial.report', 'Report Value'),
'account_type_ids': fields.many2many('account.account.type', 'account_account_financial_report_type', 'report_id', 'account_type_id', 'Account Types'),
'sign': fields.selection([(-1, 'Reverse balance sign'), (1, 'Preserve balance sign')], 'Sign on Reports', required=True, help='For accounts that are typically more debited than credited and that you would like to print as negative amounts in your reports, you should reverse the sign of the balance; e.g.: Expense account. The same applies for accounts that are typically more credited than debited and that you would like to print as positive amounts in your reports; e.g.: Income account.'),
'display_detail': fields.selection([
('no_detail','No detail'),
('detail_flat','Display children flat'),
('detail_with_hierarchy','Display children with hierarchy')
], 'Display details'),
'style_overwrite': fields.selection([
(0, 'Automatic formatting'),
(1,'Main Title 1 (bold, underlined)'),
(2,'Title 2 (bold)'),
(3,'Title 3 (bold, smaller)'),
(4,'Normal Text'),
(5,'Italic Text (smaller)'),
(6,'Smallest Text'),
],'Financial Report Style', help="You can set up here the format you want this record to be displayed. If you leave the automatic formatting, it will be computed based on the financial reports hierarchy (auto-computed field 'level')."),
}
_defaults = {
'type': 'sum',
'display_detail': 'detail_flat',
'sign': 1,
'style_overwrite': 0,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
chrisseto/osf.io | addons/figshare/tests/utils.py | 32 | 5115 | import mock
from addons.base.tests.base import OAuthAddonTestCaseMixin, AddonTestCase
from addons.figshare.models import FigshareProvider
from addons.figshare.tests.factories import FigshareAccountFactory
from addons.figshare.serializer import FigshareSerializer
article = {u'count': 1, u'items': [{u'status': u'Draft', u'files': [{u'thumb': None, u'download_url': u'http://files.figshare.com/1348803/0NUTZ', u'name': u'0NUTZ', u'id': 1348803, u'mime_type': u'text/plain', u'size': u'0 KB'}, {u'thumb': None, u'download_url': u'http://files.figshare.com/1348805/0MNXS', u'name': u'0MNXS', u'id': 1348805, u'mime_type': u'text/plain', u'size': u'0 KB'}, {u'thumb': None, u'download_url': u'http://files.figshare.com/1348806/0NUTZ', u'name': u'0NUTZ', u'id': 1348806, u'mime_type': u'text/plain', u'size': u'0 KB'}, {u'thumb': None, u'download_url': u'http://files.figshare.com/1348807/0OX1G', u'name': u'0OX1G', u'id': 1348807, u'mime_type': u'text/plain', u'size': u'0 KB'}, {u'thumb': u'http://previews.figshare.com/1350751/250_1350751.jpg', u'download_url': u'http://files.figshare.com/1350751/Selection_003.png', u'name': u'Selection_003.png', u'id': 1350751, u'mime_type': u'image/png', u'size': u'18 KB'}, {u'thumb': u'http://previews.figshare.com/1350754/250_1350754.jpg', u'download_url': u'http://files.figshare.com/1350754/Selection_003.png', u'name': u'Selection_003.png', u'id': 1350754, u'mime_type': u'image/png', u'size': u'18 KB'}], u'description': u'<p>This is made using python</p>', u'links': [], u'title': u'New fileset', u'total_size': u'34.59 KB', u'master_publisher_id': 0, u'authors': [{u'first_name': u'Samuel', u'last_name': u'Chrisinger', u'id': 506241, u'full_name': u'Samuel Chrisinger'}], u'defined_type': u'fileset', u'version': 15, u'categories': [{u'id': 77, u'name': u'Applied Computer Science'}], u'published_date': u'22:13, Jan 16, 2014', u'description_nohtml': u'This is made using python', u'article_id': 902210, u'tags': [{u'id': 3564, u'name': u'code'}]}]}
def create_mock_figshare(project):
figshare_mock = mock.create_autospec(FigshareProvider)
figshare_mock.projects.return_value = [{u'owner': 506241, u'description': u'', u'id': 436, u'title': u'OSF Test'}]
figshare_mock.project.return_value = {'articles': [{u'status': u'Draft', u'files': [{u'thumb': None, u'download_url': u'http://files.figshare.com/1348803/0NUTZ', u'name': u'0NUTZ', u'id': 1348803, u'mime_type': u'text/plain', u'size': u'0 KB'}, {u'thumb': None, u'download_url': u'http://files.figshare.com/1348805/0MNXS', u'name': u'0MNXS', u'id': 1348805, u'mime_type': u'text/plain', u'size': u'0 KB'}, {u'thumb': None, u'download_url': u'http://files.figshare.com/1348806/0NUTZ', u'name': u'0NUTZ', u'id': 1348806, u'mime_type': u'text/plain', u'size': u'0 KB'}, {u'thumb': None, u'download_url': u'http://files.figshare.com/1348807/0OX1G', u'name': u'0OX1G', u'id': 1348807, u'mime_type': u'text/plain', u'size': u'0 KB'}, {u'thumb': u'http://previews.figshare.com/1350751/250_1350751.jpg', u'download_url': u'http://files.figshare.com/1350751/Selection_003.png', u'name': u'Selection_003.png', u'id': 1350751, u'mime_type': u'image/png', u'size': u'18 KB'}, {u'thumb': u'http://previews.figshare.com/1350754/250_1350754.jpg', u'download_url': u'http://files.figshare.com/1350754/Selection_003.png', u'name': u'Selection_003.png', u'id': 1350754, u'mime_type': u'image/png', u'size': u'18 KB'}], u'description': u'<p>This is made using python</p>', u'links': [], u'title': u'New fileset', u'total_size': u'34.59 KB', u'master_publisher_id': 0, u'authors': [{u'first_name': u'Samuel', u'last_name': u'Chrisinger', u'id': 506241, u'full_name': u'Samuel Chrisinger'}], u'defined_type': u'fileset', u'version': 15, u'categories': [{u'id': 77, u'name': u'Applied Computer Science'}], u'published_date': u'22:13, Jan 16, 2014', u'description_nohtml': u'This is made using python', u'article_id': 902210, u'tags': [{u'id': 3564, u'name': u'code'}]}, {u'status': u'Drafts', u'files': [{u'id': 1404749, u'name': u'HW6.pdf', u'thumb': u'http://figshare.com/read/private/1404749/250_1404749.png', u'mime_type': u'application/pdf', u'size': u'177 KB'}], u'description': u'', u'links': [], u'title': u'HW6.pdf', u'total_size': u'172.82 KB', u'master_publisher_id': 0, u'authors': [{u'first_name': u'Samuel', u'last_name': u'Chrisinger', u'id': 506241, u'full_name': u'Samuel Chrisinger'}], u'defined_type': u'paper', u'version': 1, u'categories': [], u'published_date': u'09:25, Feb 24, 2014', u'description_nohtml': u'', u'article_id': 949657, u'tags': []}], u'description': u'', u'created': u'06/03/2014', u'id': 862, u'title': u'OSF Test'}
figshare_mock.articles.return_value = article
figshare_mock.article.return_value = article
return figshare_mock
class FigshareAddonTestCase(OAuthAddonTestCaseMixin, AddonTestCase):
ADDON_SHORT_NAME = 'figshare'
ExternalAccountFactory = FigshareAccountFactory
Provider = FigshareProvider
Serializer = FigshareSerializer
client = None
folder = {
'path': 'fileset',
'name': 'Memes',
'id': '009001'
}
| apache-2.0 |
polyval/CNC | flask/Lib/site-packages/migrate/versioning/script/sql.py | 63 | 2802 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
import re
import shutil
import sqlparse
from migrate.versioning.script import base
from migrate.versioning.template import Template
log = logging.getLogger(__name__)
class SqlScript(base.BaseScript):
"""A file containing plain SQL statements."""
@classmethod
def create(cls, path, **opts):
"""Create an empty migration script at specified path
:returns: :class:`SqlScript instance <migrate.versioning.script.sql.SqlScript>`"""
cls.require_notfound(path)
src = Template(opts.pop('templates_path', None)).get_sql_script(theme=opts.pop('templates_theme', None))
shutil.copy(src, path)
return cls(path)
# TODO: why is step parameter even here?
def run(self, engine, step=None):
"""Runs SQL script through raw dbapi execute call"""
text = self.source()
# Don't rely on SA's autocommit here
# (SA uses .startswith to check if a commit is needed. What if script
# starts with a comment?)
conn = engine.connect()
try:
trans = conn.begin()
try:
# ignore transaction management statements that are
# redundant in SQL script context and result in
# operational error being returned.
#
# Note: we don't ignore ROLLBACK in migration scripts
# since its usage would be insane anyway, and we're
# better to fail on its occurance instead of ignoring it
# (and committing transaction, which is contradictory to
# the whole idea of ROLLBACK)
ignored_statements = ('BEGIN', 'END', 'COMMIT')
ignored_regex = re.compile('^\s*(%s).*;?$' % '|'.join(ignored_statements),
re.IGNORECASE)
# NOTE(ihrachys): script may contain multiple statements, and
# not all drivers reliably handle multistatement queries or
# commands passed to .execute(), so split them and execute one
# by one
text = sqlparse.format(text, strip_comments=True, strip_whitespace=True)
for statement in sqlparse.split(text):
if statement:
if re.match(ignored_regex, statement):
log.warning('"%s" found in SQL script; ignoring' % statement)
else:
conn.execute(statement)
trans.commit()
except Exception as e:
log.error("SQL script %s failed: %s", self.path, e)
trans.rollback()
raise
finally:
conn.close()
| apache-2.0 |
FireWRT/OpenWrt-Firefly-Libraries | staging_dir/host/lib/python2.7/distutils/command/install.py | 88 | 26260 | """distutils.command.install
Implements the Distutils 'install' command."""
from distutils import log
# This module should be kept compatible with Python 2.1.
__revision__ = "$Id$"
import sys, os, string
from types import *
from distutils.core import Command
from distutils.debug import DEBUG
from distutils.sysconfig import get_config_vars
from distutils.errors import DistutilsPlatformError
from distutils.file_util import write_file
from distutils.util import convert_path, subst_vars, change_root
from distutils.util import get_platform
from distutils.errors import DistutilsOptionError
from site import USER_BASE
from site import USER_SITE
if sys.version < "2.2":
WINDOWS_SCHEME = {
'purelib': '$base',
'platlib': '$base',
'headers': '$base/Include/$dist_name',
'scripts': '$base/Scripts',
'data' : '$base',
}
else:
WINDOWS_SCHEME = {
'purelib': '$base/Lib/site-packages',
'platlib': '$base/Lib/site-packages',
'headers': '$base/Include/$dist_name',
'scripts': '$base/Scripts',
'data' : '$base',
}
INSTALL_SCHEMES = {
'unix_prefix': {
'purelib': '$base/lib/python$py_version_short/site-packages',
'platlib': '$platbase/lib/python$py_version_short/site-packages',
'headers': '$base/include/python$py_version_short/$dist_name',
'scripts': '$base/bin',
'data' : '$base',
},
'unix_home': {
'purelib': '$base/lib/python',
'platlib': '$base/lib/python',
'headers': '$base/include/python/$dist_name',
'scripts': '$base/bin',
'data' : '$base',
},
'unix_user': {
'purelib': '$usersite',
'platlib': '$usersite',
'headers': '$userbase/include/python$py_version_short/$dist_name',
'scripts': '$userbase/bin',
'data' : '$userbase',
},
'nt': WINDOWS_SCHEME,
'nt_user': {
'purelib': '$usersite',
'platlib': '$usersite',
'headers': '$userbase/Python$py_version_nodot/Include/$dist_name',
'scripts': '$userbase/Scripts',
'data' : '$userbase',
},
'os2': {
'purelib': '$base/Lib/site-packages',
'platlib': '$base/Lib/site-packages',
'headers': '$base/Include/$dist_name',
'scripts': '$base/Scripts',
'data' : '$base',
},
'os2_home': {
'purelib': '$usersite',
'platlib': '$usersite',
'headers': '$userbase/include/python$py_version_short/$dist_name',
'scripts': '$userbase/bin',
'data' : '$userbase',
},
}
# The keys to an installation scheme; if any new types of files are to be
# installed, be sure to add an entry to every installation scheme above,
# and to SCHEME_KEYS here.
SCHEME_KEYS = ('purelib', 'platlib', 'headers', 'scripts', 'data')
class install (Command):
description = "install everything from build directory"
user_options = [
# Select installation scheme and set base director(y|ies)
('prefix=', None,
"installation prefix"),
('exec-prefix=', None,
"(Unix only) prefix for platform-specific files"),
('home=', None,
"(Unix only) home directory to install under"),
('user', None,
"install in user site-package '%s'" % USER_SITE),
# Or, just set the base director(y|ies)
('install-base=', None,
"base installation directory (instead of --prefix or --home)"),
('install-platbase=', None,
"base installation directory for platform-specific files " +
"(instead of --exec-prefix or --home)"),
('root=', None,
"install everything relative to this alternate root directory"),
# Or, explicitly set the installation scheme
('install-purelib=', None,
"installation directory for pure Python module distributions"),
('install-platlib=', None,
"installation directory for non-pure module distributions"),
('install-lib=', None,
"installation directory for all module distributions " +
"(overrides --install-purelib and --install-platlib)"),
('install-headers=', None,
"installation directory for C/C++ headers"),
('install-scripts=', None,
"installation directory for Python scripts"),
('install-data=', None,
"installation directory for data files"),
# Byte-compilation options -- see install_lib.py for details, as
# these are duplicated from there (but only install_lib does
# anything with them).
('compile', 'c', "compile .py to .pyc [default]"),
('no-compile', None, "don't compile .py files"),
('optimize=', 'O',
"also compile with optimization: -O1 for \"python -O\", "
"-O2 for \"python -OO\", and -O0 to disable [default: -O0]"),
# Miscellaneous control options
('force', 'f',
"force installation (overwrite any existing files)"),
('skip-build', None,
"skip rebuilding everything (for testing/debugging)"),
# Where to install documentation (eventually!)
#('doc-format=', None, "format of documentation to generate"),
#('install-man=', None, "directory for Unix man pages"),
#('install-html=', None, "directory for HTML documentation"),
#('install-info=', None, "directory for GNU info files"),
('record=', None,
"filename in which to record list of installed files"),
]
boolean_options = ['compile', 'force', 'skip-build', 'user']
negative_opt = {'no-compile' : 'compile'}
def initialize_options (self):
# High-level options: these select both an installation base
# and scheme.
self.prefix = None
self.exec_prefix = None
self.home = None
self.user = 0
# These select only the installation base; it's up to the user to
# specify the installation scheme (currently, that means supplying
# the --install-{platlib,purelib,scripts,data} options).
self.install_base = None
self.install_platbase = None
self.root = None
# These options are the actual installation directories; if not
# supplied by the user, they are filled in using the installation
# scheme implied by prefix/exec-prefix/home and the contents of
# that installation scheme.
self.install_purelib = None # for pure module distributions
self.install_platlib = None # non-pure (dists w/ extensions)
self.install_headers = None # for C/C++ headers
self.install_lib = None # set to either purelib or platlib
self.install_scripts = None
self.install_data = None
self.install_userbase = USER_BASE
self.install_usersite = USER_SITE
self.compile = None
self.optimize = None
# These two are for putting non-packagized distributions into their
# own directory and creating a .pth file if it makes sense.
# 'extra_path' comes from the setup file; 'install_path_file' can
# be turned off if it makes no sense to install a .pth file. (But
# better to install it uselessly than to guess wrong and not
# install it when it's necessary and would be used!) Currently,
# 'install_path_file' is always true unless some outsider meddles
# with it.
self.extra_path = None
self.install_path_file = 1
# 'force' forces installation, even if target files are not
# out-of-date. 'skip_build' skips running the "build" command,
# handy if you know it's not necessary. 'warn_dir' (which is *not*
# a user option, it's just there so the bdist_* commands can turn
# it off) determines whether we warn about installing to a
# directory not in sys.path.
self.force = 0
self.skip_build = 0
self.warn_dir = 1
# These are only here as a conduit from the 'build' command to the
# 'install_*' commands that do the real work. ('build_base' isn't
# actually used anywhere, but it might be useful in future.) They
# are not user options, because if the user told the install
# command where the build directory is, that wouldn't affect the
# build command.
self.build_base = None
self.build_lib = None
# Not defined yet because we don't know anything about
# documentation yet.
#self.install_man = None
#self.install_html = None
#self.install_info = None
self.record = None
# -- Option finalizing methods -------------------------------------
# (This is rather more involved than for most commands,
# because this is where the policy for installing third-
# party Python modules on various platforms given a wide
# array of user input is decided. Yes, it's quite complex!)
def finalize_options (self):
# This method (and its pliant slaves, like 'finalize_unix()',
# 'finalize_other()', and 'select_scheme()') is where the default
# installation directories for modules, extension modules, and
# anything else we care to install from a Python module
# distribution. Thus, this code makes a pretty important policy
# statement about how third-party stuff is added to a Python
# installation! Note that the actual work of installation is done
# by the relatively simple 'install_*' commands; they just take
# their orders from the installation directory options determined
# here.
# Check for errors/inconsistencies in the options; first, stuff
# that's wrong on any platform.
if ((self.prefix or self.exec_prefix or self.home) and
(self.install_base or self.install_platbase)):
raise DistutilsOptionError, \
("must supply either prefix/exec-prefix/home or " +
"install-base/install-platbase -- not both")
if self.home and (self.prefix or self.exec_prefix):
raise DistutilsOptionError, \
"must supply either home or prefix/exec-prefix -- not both"
if self.user and (self.prefix or self.exec_prefix or self.home or
self.install_base or self.install_platbase):
raise DistutilsOptionError("can't combine user with prefix, "
"exec_prefix/home, or install_(plat)base")
# Next, stuff that's wrong (or dubious) only on certain platforms.
if os.name != "posix":
if self.exec_prefix:
self.warn("exec-prefix option ignored on this platform")
self.exec_prefix = None
# Now the interesting logic -- so interesting that we farm it out
# to other methods. The goal of these methods is to set the final
# values for the install_{lib,scripts,data,...} options, using as
# input a heady brew of prefix, exec_prefix, home, install_base,
# install_platbase, user-supplied versions of
# install_{purelib,platlib,lib,scripts,data,...}, and the
# INSTALL_SCHEME dictionary above. Phew!
self.dump_dirs("pre-finalize_{unix,other}")
if os.name == 'posix':
self.finalize_unix()
else:
self.finalize_other()
self.dump_dirs("post-finalize_{unix,other}()")
# Expand configuration variables, tilde, etc. in self.install_base
# and self.install_platbase -- that way, we can use $base or
# $platbase in the other installation directories and not worry
# about needing recursive variable expansion (shudder).
py_version = (string.split(sys.version))[0]
(prefix, exec_prefix) = get_config_vars('prefix', 'exec_prefix')
self.config_vars = {'dist_name': self.distribution.get_name(),
'dist_version': self.distribution.get_version(),
'dist_fullname': self.distribution.get_fullname(),
'py_version': py_version,
'py_version_short': py_version[0:3],
'py_version_nodot': py_version[0] + py_version[2],
'sys_prefix': prefix,
'prefix': prefix,
'sys_exec_prefix': exec_prefix,
'exec_prefix': exec_prefix,
'userbase': self.install_userbase,
'usersite': self.install_usersite,
}
self.expand_basedirs()
self.dump_dirs("post-expand_basedirs()")
# Now define config vars for the base directories so we can expand
# everything else.
self.config_vars['base'] = self.install_base
self.config_vars['platbase'] = self.install_platbase
if DEBUG:
from pprint import pprint
print "config vars:"
pprint(self.config_vars)
# Expand "~" and configuration variables in the installation
# directories.
self.expand_dirs()
self.dump_dirs("post-expand_dirs()")
# Create directories in the home dir:
if self.user:
self.create_home_path()
# Pick the actual directory to install all modules to: either
# install_purelib or install_platlib, depending on whether this
# module distribution is pure or not. Of course, if the user
# already specified install_lib, use their selection.
if self.install_lib is None:
if self.distribution.ext_modules: # has extensions: non-pure
self.install_lib = self.install_platlib
else:
self.install_lib = self.install_purelib
# Convert directories from Unix /-separated syntax to the local
# convention.
self.convert_paths('lib', 'purelib', 'platlib',
'scripts', 'data', 'headers',
'userbase', 'usersite')
# Well, we're not actually fully completely finalized yet: we still
# have to deal with 'extra_path', which is the hack for allowing
# non-packagized module distributions (hello, Numerical Python!) to
# get their own directories.
self.handle_extra_path()
self.install_libbase = self.install_lib # needed for .pth file
self.install_lib = os.path.join(self.install_lib, self.extra_dirs)
# If a new root directory was supplied, make all the installation
# dirs relative to it.
if self.root is not None:
self.change_roots('libbase', 'lib', 'purelib', 'platlib',
'scripts', 'data', 'headers')
self.dump_dirs("after prepending root")
# Find out the build directories, ie. where to install from.
self.set_undefined_options('build',
('build_base', 'build_base'),
('build_lib', 'build_lib'))
# Punt on doc directories for now -- after all, we're punting on
# documentation completely!
# finalize_options ()
def dump_dirs (self, msg):
if DEBUG:
from distutils.fancy_getopt import longopt_xlate
print msg + ":"
for opt in self.user_options:
opt_name = opt[0]
if opt_name[-1] == "=":
opt_name = opt_name[0:-1]
if opt_name in self.negative_opt:
opt_name = string.translate(self.negative_opt[opt_name],
longopt_xlate)
val = not getattr(self, opt_name)
else:
opt_name = string.translate(opt_name, longopt_xlate)
val = getattr(self, opt_name)
print " %s: %s" % (opt_name, val)
def finalize_unix (self):
if self.install_base is not None or self.install_platbase is not None:
if ((self.install_lib is None and
self.install_purelib is None and
self.install_platlib is None) or
self.install_headers is None or
self.install_scripts is None or
self.install_data is None):
raise DistutilsOptionError, \
("install-base or install-platbase supplied, but "
"installation scheme is incomplete")
return
if self.user:
if self.install_userbase is None:
raise DistutilsPlatformError(
"User base directory is not specified")
self.install_base = self.install_platbase = self.install_userbase
self.select_scheme("unix_user")
elif self.home is not None:
self.install_base = self.install_platbase = self.home
self.select_scheme("unix_home")
else:
if self.prefix is None:
if self.exec_prefix is not None:
raise DistutilsOptionError, \
"must not supply exec-prefix without prefix"
self.prefix = os.path.normpath(sys.prefix)
self.exec_prefix = os.path.normpath(sys.exec_prefix)
else:
if self.exec_prefix is None:
self.exec_prefix = self.prefix
self.install_base = self.prefix
self.install_platbase = self.exec_prefix
self.select_scheme("unix_prefix")
# finalize_unix ()
def finalize_other (self): # Windows and Mac OS for now
if self.user:
if self.install_userbase is None:
raise DistutilsPlatformError(
"User base directory is not specified")
self.install_base = self.install_platbase = self.install_userbase
self.select_scheme(os.name + "_user")
elif self.home is not None:
self.install_base = self.install_platbase = self.home
self.select_scheme("unix_home")
else:
if self.prefix is None:
self.prefix = os.path.normpath(sys.prefix)
self.install_base = self.install_platbase = self.prefix
try:
self.select_scheme(os.name)
except KeyError:
raise DistutilsPlatformError, \
"I don't know how to install stuff on '%s'" % os.name
# finalize_other ()
def select_scheme (self, name):
# it's the caller's problem if they supply a bad name!
scheme = INSTALL_SCHEMES[name]
for key in SCHEME_KEYS:
attrname = 'install_' + key
if getattr(self, attrname) is None:
setattr(self, attrname, scheme[key])
def _expand_attrs (self, attrs):
for attr in attrs:
val = getattr(self, attr)
if val is not None:
if os.name == 'posix' or os.name == 'nt':
val = os.path.expanduser(val)
val = subst_vars(val, self.config_vars)
setattr(self, attr, val)
def expand_basedirs (self):
self._expand_attrs(['install_base',
'install_platbase',
'root'])
def expand_dirs (self):
self._expand_attrs(['install_purelib',
'install_platlib',
'install_lib',
'install_headers',
'install_scripts',
'install_data',])
def convert_paths (self, *names):
for name in names:
attr = "install_" + name
setattr(self, attr, convert_path(getattr(self, attr)))
def handle_extra_path (self):
if self.extra_path is None:
self.extra_path = self.distribution.extra_path
if self.extra_path is not None:
if type(self.extra_path) is StringType:
self.extra_path = string.split(self.extra_path, ',')
if len(self.extra_path) == 1:
path_file = extra_dirs = self.extra_path[0]
elif len(self.extra_path) == 2:
(path_file, extra_dirs) = self.extra_path
else:
raise DistutilsOptionError, \
("'extra_path' option must be a list, tuple, or "
"comma-separated string with 1 or 2 elements")
# convert to local form in case Unix notation used (as it
# should be in setup scripts)
extra_dirs = convert_path(extra_dirs)
else:
path_file = None
extra_dirs = ''
# XXX should we warn if path_file and not extra_dirs? (in which
# case the path file would be harmless but pointless)
self.path_file = path_file
self.extra_dirs = extra_dirs
# handle_extra_path ()
def change_roots (self, *names):
for name in names:
attr = "install_" + name
setattr(self, attr, change_root(self.root, getattr(self, attr)))
def create_home_path(self):
"""Create directories under ~
"""
if not self.user:
return
home = convert_path(os.path.expanduser("~"))
for name, path in self.config_vars.iteritems():
if path.startswith(home) and not os.path.isdir(path):
self.debug_print("os.makedirs('%s', 0700)" % path)
os.makedirs(path, 0700)
# -- Command execution methods -------------------------------------
def run (self):
# Obviously have to build before we can install
if not self.skip_build:
self.run_command('build')
# If we built for any other platform, we can't install.
build_plat = self.distribution.get_command_obj('build').plat_name
# check warn_dir - it is a clue that the 'install' is happening
# internally, and not to sys.path, so we don't check the platform
# matches what we are running.
if self.warn_dir and build_plat != get_platform():
raise DistutilsPlatformError("Can't install when "
"cross-compiling")
# Run all sub-commands (at least those that need to be run)
for cmd_name in self.get_sub_commands():
self.run_command(cmd_name)
if self.path_file:
self.create_path_file()
# write list of installed files, if requested.
if self.record:
outputs = self.get_outputs()
if self.root: # strip any package prefix
root_len = len(self.root)
for counter in xrange(len(outputs)):
outputs[counter] = outputs[counter][root_len:]
self.execute(write_file,
(self.record, outputs),
"writing list of installed files to '%s'" %
self.record)
sys_path = map(os.path.normpath, sys.path)
sys_path = map(os.path.normcase, sys_path)
install_lib = os.path.normcase(os.path.normpath(self.install_lib))
if (self.warn_dir and
not (self.path_file and self.install_path_file) and
install_lib not in sys_path):
log.debug(("modules installed to '%s', which is not in "
"Python's module search path (sys.path) -- "
"you'll have to change the search path yourself"),
self.install_lib)
# run ()
def create_path_file (self):
filename = os.path.join(self.install_libbase,
self.path_file + ".pth")
if self.install_path_file:
self.execute(write_file,
(filename, [self.extra_dirs]),
"creating %s" % filename)
else:
self.warn("path file '%s' not created" % filename)
# -- Reporting methods ---------------------------------------------
def get_outputs (self):
# Assemble the outputs of all the sub-commands.
outputs = []
for cmd_name in self.get_sub_commands():
cmd = self.get_finalized_command(cmd_name)
# Add the contents of cmd.get_outputs(), ensuring
# that outputs doesn't contain duplicate entries
for filename in cmd.get_outputs():
if filename not in outputs:
outputs.append(filename)
if self.path_file and self.install_path_file:
outputs.append(os.path.join(self.install_libbase,
self.path_file + ".pth"))
return outputs
def get_inputs (self):
# XXX gee, this looks familiar ;-(
inputs = []
for cmd_name in self.get_sub_commands():
cmd = self.get_finalized_command(cmd_name)
inputs.extend(cmd.get_inputs())
return inputs
# -- Predicates for sub-command list -------------------------------
def has_lib (self):
"""Return true if the current distribution has any Python
modules to install."""
return (self.distribution.has_pure_modules() or
self.distribution.has_ext_modules())
def has_headers (self):
return self.distribution.has_headers()
def has_scripts (self):
return self.distribution.has_scripts()
def has_data (self):
return self.distribution.has_data_files()
# 'sub_commands': a list of commands this command might have to run to
# get its work done. See cmd.py for more info.
sub_commands = [('install_lib', has_lib),
('install_headers', has_headers),
('install_scripts', has_scripts),
('install_data', has_data),
('install_egg_info', lambda self:True),
]
# class install
| gpl-2.0 |
jdber1/opendrop | opendrop/widgets/canvas/_line.py | 2 | 2397 | from typing import Tuple, Optional
import cairo
from gi.repository import GObject
from opendrop.geometry import Line2, Rect2
from ._artist import Artist
__all__ = ('LineArtist',)
class LineArtist(Artist):
_line: Optional[Line2] = None
_stroke_color: Tuple[float, float, float] = (0.0, 0.0, 0.0)
_stroke_width: float = 1.0
_scale_strokes: bool = False
def draw(self, cr: cairo.Context) -> None:
line = self._line
stroke_color = self._stroke_color
stroke_width = self._stroke_width
scale_strokes = self._scale_strokes
if line is None:
return
if line.pt0 == line.pt1:
return
clip_extents = Rect2(cr.clip_extents())
start = line.eval(x=clip_extents.x0)
end = line.eval(x=clip_extents.x1)
if not clip_extents.contains(start):
start = line.eval(y=clip_extents.y0 if start.y < clip_extents.y0 else clip_extents.y1)
if not clip_extents.contains(end):
end = line.eval(y=clip_extents.y0 if end.y < clip_extents.y0 else clip_extents.y1)
cr.move_to(*start)
cr.line_to(*end)
cr.save()
if scale_strokes:
cr.identity_matrix()
cr.set_source_rgb(*stroke_color)
cr.set_line_width(stroke_width)
cr.stroke()
cr.restore()
@GObject.Property
def line(self) -> Optional[Line2]:
return self._line
@line.setter
def line(self, line: Optional[Line2]) -> None:
self._line = line
self._invalidate()
@GObject.Property
def stroke_color(self) -> Tuple[float, float, float]:
return self._stroke_color
@stroke_color.setter
def stroke_color(self, color: Tuple[float, float, float]) -> None:
self._stroke_color = color
self._invalidate()
@GObject.Property
def stroke_width(self) -> float:
return self._stroke_width
@stroke_width.setter
def stroke_width(self, width: float) -> None:
self._stroke_width = width
self._invalidate()
@GObject.Property(type=bool, default=_scale_strokes)
def scale_strokes(self) -> bool:
return self._scale_strokes
@scale_strokes.setter
def scale_strokes(self, value: bool) -> None:
self._scale_strokes = value
self._invalidate()
def _invalidate(self) -> None:
self.invalidate()
| gpl-3.0 |
AndroidOpenDevelopment/android_external_chromium_org | build/get_syzygy_binaries.py | 17 | 15077 | #!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A utility script for downloading versioned Syzygy binaries."""
import cStringIO
import hashlib
import errno
import json
import logging
import optparse
import os
import re
import shutil
import stat
import sys
import subprocess
import urllib2
import zipfile
_LOGGER = logging.getLogger(os.path.basename(__file__))
# The URL where official builds are archived.
_SYZYGY_ARCHIVE_URL = ('http://syzygy-archive.commondatastorage.googleapis.com/'
'builds/official/%(revision)s')
# A JSON file containing the state of the download directory. If this file and
# directory state do not agree, then the binaries will be downloaded and
# installed again.
_STATE = '.state'
# This matches an integer (an SVN revision number) or a SHA1 value (a GIT hash).
# The archive exclusively uses lowercase GIT hashes.
_REVISION_RE = re.compile('^(?:\d+|[a-f0-9]{40})$')
# This matches an MD5 hash.
_MD5_RE = re.compile('^[a-f0-9]{32}$')
# List of reources to be downloaded and installed. These are tuples with the
# following format:
# (basename, logging name, relative installation path, extraction filter)
_RESOURCES = [
('benchmark.zip', 'benchmark', '', None),
('binaries.zip', 'binaries', 'exe', None),
('symbols.zip', 'symbols', 'exe',
lambda x: x.filename.endswith('.dll.pdb')),
('include.zip', 'include', 'include', None),
('lib.zip', 'library', 'lib', None)]
def _Shell(*cmd, **kw):
"""Runs |cmd|, returns the results from Popen(cmd).communicate()."""
_LOGGER.debug('Executing %s.', cmd)
prog = subprocess.Popen(cmd, shell=True, **kw)
stdout, stderr = prog.communicate()
if prog.returncode != 0:
raise RuntimeError('Command "%s" returned %d.' % (cmd, prog.returncode))
return (stdout, stderr)
def _LoadState(output_dir):
"""Loads the contents of the state file for a given |output_dir|, returning
None if it doesn't exist.
"""
path = os.path.join(output_dir, _STATE)
if not os.path.exists(path):
_LOGGER.debug('No state file found.')
return None
with open(path, 'rb') as f:
_LOGGER.debug('Reading state file: %s', path)
try:
return json.load(f)
except ValueError:
_LOGGER.debug('Invalid state file.')
return None
def _SaveState(output_dir, state, dry_run=False):
"""Saves the |state| dictionary to the given |output_dir| as a JSON file."""
path = os.path.join(output_dir, _STATE)
_LOGGER.debug('Writing state file: %s', path)
if dry_run:
return
with open(path, 'wb') as f:
f.write(json.dumps(state, sort_keys=True, indent=2))
def _Md5(path):
"""Returns the MD5 hash of the file at |path|, which must exist."""
return hashlib.md5(open(path, 'rb').read()).hexdigest()
def _StateIsValid(state):
"""Returns true if the given state structure is valid."""
if not isinstance(state, dict):
_LOGGER.debug('State must be a dict.')
return False
r = state.get('revision', None)
if not isinstance(r, basestring) or not _REVISION_RE.match(r):
_LOGGER.debug('State contains an invalid revision.')
return False
c = state.get('contents', None)
if not isinstance(c, dict):
_LOGGER.debug('State must contain a contents dict.')
return False
for (relpath, md5) in c.iteritems():
if not isinstance(relpath, basestring) or len(relpath) == 0:
_LOGGER.debug('State contents dict contains an invalid path.')
return False
if not isinstance(md5, basestring) or not _MD5_RE.match(md5):
_LOGGER.debug('State contents dict contains an invalid MD5 digest.')
return False
return True
def _BuildActualState(stored, revision, output_dir):
"""Builds the actual state using the provided |stored| state as a template.
Only examines files listed in the stored state, causing the script to ignore
files that have been added to the directories locally. |stored| must be a
valid state dictionary.
"""
contents = {}
state = { 'revision': revision, 'contents': contents }
for relpath, md5 in stored['contents'].iteritems():
abspath = os.path.abspath(os.path.join(output_dir, relpath))
if os.path.isfile(abspath):
m = _Md5(abspath)
contents[relpath] = m
return state
def _StatesAreConsistent(stored, actual):
"""Validates whether two state dictionaries are consistent. Both must be valid
state dictionaries. Additional entries in |actual| are ignored.
"""
if stored['revision'] != actual['revision']:
_LOGGER.debug('Mismatched revision number.')
return False
cont_stored = stored['contents']
cont_actual = actual['contents']
for relpath, md5 in cont_stored.iteritems():
if relpath not in cont_actual:
_LOGGER.debug('Missing content: %s', relpath)
return False
if md5 != cont_actual[relpath]:
_LOGGER.debug('Modified content: %s', relpath)
return False
return True
def _GetCurrentState(revision, output_dir):
"""Loads the current state and checks to see if it is consistent. Returns
a tuple (state, bool). The returned state will always be valid, even if an
invalid state is present on disk.
"""
stored = _LoadState(output_dir)
if not _StateIsValid(stored):
_LOGGER.debug('State is invalid.')
# Return a valid but empty state.
return ({'revision': '0', 'contents': {}}, False)
actual = _BuildActualState(stored, revision, output_dir)
# If the script has been modified consider the state invalid.
path = os.path.join(output_dir, _STATE)
if os.path.getmtime(__file__) > os.path.getmtime(path):
return (stored, False)
# Otherwise, explicitly validate the state.
if not _StatesAreConsistent(stored, actual):
return (stored, False)
return (stored, True)
def _DirIsEmpty(path):
"""Returns true if the given directory is empty, false otherwise."""
for root, dirs, files in os.walk(path):
return not dirs and not files
def _RmTreeHandleReadOnly(func, path, exc):
"""An error handling function for use with shutil.rmtree. This will
detect failures to remove read-only files, and will change their properties
prior to removing them. This is necessary on Windows as os.remove will return
an access error for read-only files, and git repos contain read-only
pack/index files.
"""
excvalue = exc[1]
if func in (os.rmdir, os.remove) and excvalue.errno == errno.EACCES:
_LOGGER.debug('Removing read-only path: %s', path)
os.chmod(path, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)
func(path)
else:
raise
def _RmTree(path):
"""A wrapper of shutil.rmtree that handles read-only files."""
shutil.rmtree(path, ignore_errors=False, onerror=_RmTreeHandleReadOnly)
def _CleanState(output_dir, state, dry_run=False):
"""Cleans up files/directories in |output_dir| that are referenced by
the given |state|. Raises an error if there are local changes. Returns a
dictionary of files that were deleted.
"""
_LOGGER.debug('Deleting files from previous installation.')
deleted = {}
# Generate a list of files to delete, relative to |output_dir|.
contents = state['contents']
files = sorted(contents.keys())
# Try to delete the files. Keep track of directories to delete as well.
dirs = {}
for relpath in files:
fullpath = os.path.join(output_dir, relpath)
fulldir = os.path.dirname(fullpath)
dirs[fulldir] = True
if os.path.exists(fullpath):
# If somehow the file has become a directory complain about it.
if os.path.isdir(fullpath):
raise Exception('Directory exists where file expected: %s' % fullpath)
# Double check that the file doesn't have local changes. If it does
# then refuse to delete it.
if relpath in contents:
stored_md5 = contents[relpath]
actual_md5 = _Md5(fullpath)
if actual_md5 != stored_md5:
raise Exception('File has local changes: %s' % fullpath)
# The file is unchanged so it can safely be deleted.
_LOGGER.debug('Deleting file "%s".', fullpath)
deleted[relpath] = True
if not dry_run:
os.unlink(fullpath)
# Sort directories from longest name to shortest. This lets us remove empty
# directories from the most nested paths first.
dirs = sorted(dirs.keys(), key=lambda x: len(x), reverse=True)
for p in dirs:
if os.path.exists(p) and _DirIsEmpty(p):
_LOGGER.debug('Deleting empty directory "%s".', p)
if not dry_run:
_RmTree(p)
return deleted
def _Download(url):
"""Downloads the given URL and returns the contents as a string."""
response = urllib2.urlopen(url)
if response.code != 200:
raise RuntimeError('Failed to download "%s".' % url)
return response.read()
def _InstallBinaries(options, deleted={}):
"""Installs Syzygy binaries. This assumes that the output directory has
already been cleaned, as it will refuse to overwrite existing files."""
contents = {}
state = { 'revision': options.revision, 'contents': contents }
archive_url = _SYZYGY_ARCHIVE_URL % { 'revision': options.revision }
for (base, name, subdir, filt) in _RESOURCES:
# Create the output directory if it doesn't exist.
fulldir = os.path.join(options.output_dir, subdir)
if os.path.isfile(fulldir):
raise Exception('File exists where a directory needs to be created: %s' %
fulldir)
if not os.path.exists(fulldir):
_LOGGER.debug('Creating directory: %s', fulldir)
if not options.dry_run:
os.makedirs(fulldir)
# Download the archive.
url = archive_url + '/' + base
_LOGGER.debug('Retrieving %s archive at "%s".', name, url)
data = _Download(url)
_LOGGER.debug('Unzipping %s archive.', name)
archive = zipfile.ZipFile(cStringIO.StringIO(data))
for entry in archive.infolist():
if not filt or filt(entry):
fullpath = os.path.normpath(os.path.join(fulldir, entry.filename))
relpath = os.path.relpath(fullpath, options.output_dir)
if os.path.exists(fullpath):
# If in a dry-run take into account the fact that the file *would*
# have been deleted.
if options.dry_run and relpath in deleted:
pass
else:
raise Exception('Path already exists: %s' % fullpath)
# Extract the file and update the state dictionary.
_LOGGER.debug('Extracting "%s".', fullpath)
if not options.dry_run:
archive.extract(entry.filename, fulldir)
md5 = _Md5(fullpath)
contents[relpath] = md5
if sys.platform == 'cygwin':
os.chmod(fullpath, os.stat(fullpath).st_mode | stat.S_IXUSR)
return state
def _ParseCommandLine():
"""Parses the command-line and returns an options structure."""
option_parser = optparse.OptionParser()
option_parser.add_option('--dry-run', action='store_true', default=False,
help='If true then will simply list actions that would be performed.')
option_parser.add_option('--force', action='store_true', default=False,
help='Force an installation even if the binaries are up to date.')
option_parser.add_option('--output-dir', type='string',
help='The path where the binaries will be replaced. Existing binaries '
'will only be overwritten if not up to date.')
option_parser.add_option('--overwrite', action='store_true', default=False,
help='If specified then the installation will happily delete and rewrite '
'the entire output directory, blasting any local changes.')
option_parser.add_option('--revision', type='string',
help='The SVN revision or GIT hash associated with the required version.')
option_parser.add_option('--revision-file', type='string',
help='A text file containing an SVN revision or GIT hash.')
option_parser.add_option('--verbose', dest='log_level', action='store_const',
default=logging.INFO, const=logging.DEBUG,
help='Enables verbose logging.')
option_parser.add_option('--quiet', dest='log_level', action='store_const',
default=logging.INFO, const=logging.ERROR,
help='Disables all output except for errors.')
options, args = option_parser.parse_args()
if args:
option_parser.error('Unexpected arguments: %s' % args)
if not options.output_dir:
option_parser.error('Must specify --output-dir.')
if not options.revision and not options.revision_file:
option_parser.error('Must specify one of --revision or --revision-file.')
if options.revision and options.revision_file:
option_parser.error('Must not specify both --revision and --revision-file.')
# Configure logging.
logging.basicConfig(level=options.log_level)
# If a revision file has been specified then read it.
if options.revision_file:
options.revision = open(options.revision_file, 'rb').read().strip()
_LOGGER.debug('Parsed revision "%s" from file "%s".',
options.revision, options.revision_file)
# Ensure that the specified SVN revision or GIT hash is valid.
if not _REVISION_RE.match(options.revision):
option_parser.error('Must specify a valid SVN or GIT revision.')
# This just makes output prettier to read.
options.output_dir = os.path.normpath(options.output_dir)
return options
def main():
# We only care about Windows platforms, as the Syzygy binaries aren't used
# elsewhere.
if sys.platform not in ('win32', 'cygwin'):
return
options = _ParseCommandLine()
if options.dry_run:
_LOGGER.debug('Performing a dry-run.')
# Load the current installation state, and validate it against the
# requested installation.
state, is_consistent = _GetCurrentState(options.revision, options.output_dir)
# Decide whether or not an install is necessary.
if options.force:
_LOGGER.debug('Forcing reinstall of binaries.')
elif is_consistent:
# Avoid doing any work if the contents of the directory are consistent.
_LOGGER.debug('State unchanged, no reinstall necessary.')
return
# Under normal logging this is the only only message that will be reported.
_LOGGER.info('Installing revision %s Syzygy binaries.',
options.revision[0:12])
# Clean up the old state to begin with.
deleted = []
if options.overwrite:
if os.path.exists(options.output_dir):
# If overwrite was specified then take a heavy-handed approach.
_LOGGER.debug('Deleting entire installation directory.')
if not options.dry_run:
_RmTree(options.output_dir)
else:
# Otherwise only delete things that the previous installation put in place,
# and take care to preserve any local changes.
deleted = _CleanState(options.output_dir, state, options.dry_run)
# Install the new binaries. In a dry-run this will actually download the
# archives, but it won't write anything to disk.
state = _InstallBinaries(options, deleted)
# Build and save the state for the directory.
_SaveState(options.output_dir, state, options.dry_run)
if __name__ == '__main__':
main()
| bsd-3-clause |
dvliman/jaikuengine | .google_appengine/google/appengine/_internal/django/core/management/commands/startapp.py | 23 | 1963 | import os
from google.appengine._internal.django.core.management.base import copy_helper, CommandError, LabelCommand
from google.appengine._internal.django.utils.importlib import import_module
class Command(LabelCommand):
help = "Creates a Django app directory structure for the given app name in the current directory."
args = "[appname]"
label = 'application name'
requires_model_validation = False
# Can't import settings during this command, because they haven't
# necessarily been created.
can_import_settings = False
def handle_label(self, app_name, directory=None, **options):
if directory is None:
directory = os.getcwd()
# Determine the project_name by using the basename of directory,
# which should be the full path of the project directory (or the
# current directory if no directory was passed).
project_name = os.path.basename(directory)
if app_name == project_name:
raise CommandError("You cannot create an app with the same name"
" (%r) as your project." % app_name)
# Check that the app_name cannot be imported.
try:
import_module(app_name)
except ImportError:
pass
else:
raise CommandError("%r conflicts with the name of an existing Python module and cannot be used as an app name. Please try another name." % app_name)
copy_helper(self.style, 'app', app_name, directory, project_name)
class ProjectCommand(Command):
help = ("Creates a Django app directory structure for the given app name"
" in this project's directory.")
def __init__(self, project_directory):
super(ProjectCommand, self).__init__()
self.project_directory = project_directory
def handle_label(self, app_name, **options):
super(ProjectCommand, self).handle_label(app_name, self.project_directory, **options)
| apache-2.0 |
dharmabumstead/ansible | test/units/module_utils/basic/test_argument_spec.py | 17 | 19207 | # -*- coding: utf-8 -*-
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2016 Toshio Kuratomi <tkuratomi@ansible.com>
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import json
import os
import pytest
from ansible.compat.tests.mock import MagicMock, patch
from ansible.module_utils import basic
from ansible.module_utils.six import string_types
from ansible.module_utils.six.moves import builtins
from units.mock.procenv import ModuleTestCase, swap_stdin_and_argv
MOCK_VALIDATOR_FAIL = MagicMock(side_effect=TypeError("bad conversion"))
# Data is argspec, argument, expected
VALID_SPECS = (
# Simple type=int
({'arg': {'type': 'int'}}, {'arg': 42}, 42),
# Type=int with conversion from string
({'arg': {'type': 'int'}}, {'arg': '42'}, 42),
# Simple type=float
({'arg': {'type': 'float'}}, {'arg': 42.0}, 42.0),
# Type=float conversion from int
({'arg': {'type': 'float'}}, {'arg': 42}, 42.0),
# Type=float conversion from string
({'arg': {'type': 'float'}}, {'arg': '42.0'}, 42.0),
# Type=float conversion from string without decimal point
({'arg': {'type': 'float'}}, {'arg': '42'}, 42.0),
# Simple type=bool
({'arg': {'type': 'bool'}}, {'arg': True}, True),
# Type=int with conversion from string
({'arg': {'type': 'bool'}}, {'arg': 'yes'}, True),
# Type=str converts to string
({'arg': {'type': 'str'}}, {'arg': 42}, '42'),
# Type is implicit, converts to string
({'arg': {'type': 'str'}}, {'arg': 42}, '42'),
# parameter is required
({'arg': {'required': True}}, {'arg': 42}, '42'),
)
INVALID_SPECS = (
# Type is int; unable to convert this string
({'arg': {'type': 'int'}}, {'arg': "bad"}, "invalid literal for int() with base 10: 'bad'"),
# Type is int; unable to convert float
({'arg': {'type': 'int'}}, {'arg': 42.1}, "'float'> cannot be converted to an int"),
# type is a callable that fails to convert
({'arg': {'type': MOCK_VALIDATOR_FAIL}}, {'arg': "bad"}, "bad conversion"),
# unknown parameter
({'arg': {'type': 'int'}}, {'other': 'bad', '_ansible_module_name': 'ansible_unittest'},
'Unsupported parameters for (ansible_unittest) module: other Supported parameters include: arg'),
# parameter is required
({'arg': {'required': True}}, {}, 'missing required arguments: arg'),
)
@pytest.fixture
def complex_argspec():
arg_spec = dict(
foo=dict(required=True, aliases=['dup']),
bar=dict(),
bam=dict(),
baz=dict(fallback=(basic.env_fallback, ['BAZ'])),
bar1=dict(type='bool'),
zardoz=dict(choices=['one', 'two']),
zardoz2=dict(type='list', choices=['one', 'two', 'three']),
)
mut_ex = (('bar', 'bam'),)
req_to = (('bam', 'baz'),)
kwargs = dict(
argument_spec=arg_spec,
mutually_exclusive=mut_ex,
required_together=req_to,
no_log=True,
add_file_common_args=True,
supports_check_mode=True,
)
return kwargs
@pytest.fixture
def options_argspec_list():
options_spec = dict(
foo=dict(required=True, aliases=['dup']),
bar=dict(),
bam=dict(),
baz=dict(fallback=(basic.env_fallback, ['BAZ'])),
bam1=dict(),
bam2=dict(default='test'),
bam3=dict(type='bool'),
)
arg_spec = dict(
foobar=dict(
type='list',
elements='dict',
options=options_spec,
mutually_exclusive=[
['bam', 'bam1']
],
required_if=[
['foo', 'hello', ['bam']],
['foo', 'bam2', ['bam2']]
],
required_one_of=[
['bar', 'bam']
],
required_together=[
['bam1', 'baz']
]
)
)
kwargs = dict(
argument_spec=arg_spec,
no_log=True,
add_file_common_args=True,
supports_check_mode=True
)
return kwargs
@pytest.fixture
def options_argspec_dict():
# should test ok, for options in dict format.
kwargs = options_argspec_list()
kwargs['argument_spec']['foobar']['type'] = 'dict'
return kwargs
#
# Tests for one aspect of arg_spec
#
@pytest.mark.parametrize('argspec, expected, stdin', [(s[0], s[2], s[1]) for s in VALID_SPECS],
indirect=['stdin'])
def test_validator_basic_types(argspec, expected, stdin):
am = basic.AnsibleModule(argspec)
if 'type' in argspec['arg']:
type_ = getattr(builtins, argspec['arg']['type'])
else:
type_ = str
assert isinstance(am.params['arg'], type_)
assert am.params['arg'] == expected
@pytest.mark.parametrize('stdin', [{'arg': 42}], indirect=['stdin'])
def test_validator_function(mocker, stdin):
# Type is a callable
MOCK_VALIDATOR_SUCCESS = mocker.MagicMock(return_value=27)
argspec = {'arg': {'type': MOCK_VALIDATOR_SUCCESS}}
am = basic.AnsibleModule(argspec)
assert isinstance(am.params['arg'], int)
assert am.params['arg'] == 27
@pytest.mark.parametrize('argspec, expected, stdin', [(s[0], s[2], s[1]) for s in INVALID_SPECS],
indirect=['stdin'])
def test_validator_fail(stdin, capfd, argspec, expected):
with pytest.raises(SystemExit):
basic.AnsibleModule(argument_spec=argspec)
out, err = capfd.readouterr()
assert not err
assert expected in json.loads(out)['msg']
assert json.loads(out)['failed']
class TestComplexArgSpecs:
"""Test with a more complex arg_spec"""
@pytest.mark.parametrize('stdin', [{'foo': 'hello'}, {'dup': 'hello'}], indirect=['stdin'])
def test_complex_required(self, stdin, complex_argspec):
"""Test that the complex argspec works if we give it its required param as either the canonical or aliased name"""
am = basic.AnsibleModule(**complex_argspec)
assert isinstance(am.params['foo'], str)
assert am.params['foo'] == 'hello'
@pytest.mark.parametrize('stdin', [{'foo': 'hello', 'bam': 'test'}], indirect=['stdin'])
def test_complex_type_fallback(self, mocker, stdin, complex_argspec):
"""Test that the complex argspec works if we get a required parameter via fallback"""
environ = os.environ.copy()
environ['BAZ'] = 'test data'
mocker.patch('ansible.module_utils.basic.os.environ', environ)
am = basic.AnsibleModule(**complex_argspec)
assert isinstance(am.params['baz'], str)
assert am.params['baz'] == 'test data'
@pytest.mark.parametrize('stdin', [{'foo': 'hello', 'bar': 'bad', 'bam': 'bad2'}], indirect=['stdin'])
def test_fail_mutually_exclusive(self, capfd, stdin, complex_argspec):
"""Fail because of mutually exclusive parameters"""
with pytest.raises(SystemExit):
am = basic.AnsibleModule(**complex_argspec)
out, err = capfd.readouterr()
results = json.loads(out)
assert results['failed']
assert results['msg'] == "parameters are mutually exclusive: bar, bam"
@pytest.mark.parametrize('stdin', [{'foo': 'hello', 'bam': 'bad2'}], indirect=['stdin'])
def test_fail_required_together(self, capfd, stdin, complex_argspec):
"""Fail because only one of a required_together pair of parameters was specified"""
with pytest.raises(SystemExit):
am = basic.AnsibleModule(**complex_argspec)
out, err = capfd.readouterr()
results = json.loads(out)
assert results['failed']
assert results['msg'] == "parameters are required together: bam, baz"
@pytest.mark.parametrize('stdin', [{'foo': 'hello', 'bar': 'hi'}], indirect=['stdin'])
def test_fail_required_together_and_default(self, capfd, stdin, complex_argspec):
"""Fail because one of a required_together pair of parameters has a default and the other was not specified"""
complex_argspec['argument_spec']['baz'] = {'default': 42}
with pytest.raises(SystemExit):
am = basic.AnsibleModule(**complex_argspec)
out, err = capfd.readouterr()
results = json.loads(out)
assert results['failed']
assert results['msg'] == "parameters are required together: bam, baz"
@pytest.mark.parametrize('stdin', [{'foo': 'hello'}], indirect=['stdin'])
def test_fail_required_together_and_fallback(self, capfd, mocker, stdin, complex_argspec):
"""Fail because one of a required_together pair of parameters has a fallback and the other was not specified"""
environ = os.environ.copy()
environ['BAZ'] = 'test data'
mocker.patch('ansible.module_utils.basic.os.environ', environ)
with pytest.raises(SystemExit):
am = basic.AnsibleModule(**complex_argspec)
out, err = capfd.readouterr()
results = json.loads(out)
assert results['failed']
assert results['msg'] == "parameters are required together: bam, baz"
@pytest.mark.parametrize('stdin', [{'foo': 'hello', 'zardoz2': ['one', 'four', 'five']}], indirect=['stdin'])
def test_fail_list_with_choices(self, capfd, mocker, stdin, complex_argspec):
"""Fail because one of the items is not in the choice"""
with pytest.raises(SystemExit):
basic.AnsibleModule(**complex_argspec)
out, err = capfd.readouterr()
results = json.loads(out)
assert results['failed']
assert results['msg'] == "value of zardoz2 must be one or more of: one, two, three. Got no match for: four, five"
@pytest.mark.parametrize('stdin', [{'foo': 'hello', 'zardoz2': ['one', 'three']}], indirect=['stdin'])
def test_list_with_choices(self, capfd, mocker, stdin, complex_argspec):
"""Test choices with list"""
am = basic.AnsibleModule(**complex_argspec)
assert isinstance(am.params['zardoz2'], list)
assert am.params['zardoz2'] == ['one', 'three']
class TestComplexOptions:
"""Test arg spec options"""
# (Paramaters, expected value of module.params['foobar'])
OPTIONS_PARAMS_LIST = (
({'foobar': [{"foo": "hello", "bam": "good"}, {"foo": "test", "bar": "good"}]},
[{'foo': 'hello', 'bam': 'good', 'bam2': 'test', 'bar': None, 'baz': None, 'bam1': None, 'bam3': None},
{'foo': 'test', 'bam': None, 'bam2': 'test', 'bar': 'good', 'baz': None, 'bam1': None, 'bam3': None},
]),
# Alias for required param
({'foobar': [{"dup": "test", "bar": "good"}]},
[{'foo': 'test', 'dup': 'test', 'bam': None, 'bam2': 'test', 'bar': 'good', 'baz': None, 'bam1': None, 'bam3': None}]
),
# Required_if utilizing default value of the requirement
({'foobar': [{"foo": "bam2", "bar": "required_one_of"}]},
[{'bam': None, 'bam1': None, 'bam2': 'test', 'bam3': None, 'bar': 'required_one_of', 'baz': None, 'foo': 'bam2'}]
),
# Check that a bool option is converted
({"foobar": [{"foo": "required", "bam": "good", "bam3": "yes"}]},
[{'bam': 'good', 'bam1': None, 'bam2': 'test', 'bam3': True, 'bar': None, 'baz': None, 'foo': 'required'}]
),
)
# (Paramaters, expected value of module.params['foobar'])
OPTIONS_PARAMS_DICT = (
({'foobar': {"foo": "hello", "bam": "good"}},
{'foo': 'hello', 'bam': 'good', 'bam2': 'test', 'bar': None, 'baz': None, 'bam1': None, 'bam3': None}
),
# Alias for required param
({'foobar': {"dup": "test", "bar": "good"}},
{'foo': 'test', 'dup': 'test', 'bam': None, 'bam2': 'test', 'bar': 'good', 'baz': None, 'bam1': None, 'bam3': None}
),
# Required_if utilizing default value of the requirement
({'foobar': {"foo": "bam2", "bar": "required_one_of"}},
{'bam': None, 'bam1': None, 'bam2': 'test', 'bam3': None, 'bar': 'required_one_of', 'baz': None, 'foo': 'bam2'}
),
# Check that a bool option is converted
({"foobar": {"foo": "required", "bam": "good", "bam3": "yes"}},
{'bam': 'good', 'bam1': None, 'bam2': 'test', 'bam3': True, 'bar': None, 'baz': None, 'foo': 'required'}
),
)
# (Paramaters, failure message)
FAILING_PARAMS_LIST = (
# Missing required option
({'foobar': [{}]}, 'missing required arguments: foo found in foobar'),
# Invalid option
({'foobar': [{"foo": "hello", "bam": "good", "invalid": "bad"}]}, 'module: invalid found in foobar. Supported parameters include'),
# Mutually exclusive options found
({'foobar': [{"foo": "test", "bam": "bad", "bam1": "bad", "baz": "req_to"}]},
'parameters are mutually exclusive: bam, bam1 found in foobar'),
# required_if fails
({'foobar': [{"foo": "hello", "bar": "bad"}]},
'foo is hello but all of the following are missing: bam found in foobar'),
# Missing required_one_of option
({'foobar': [{"foo": "test"}]},
'one of the following is required: bar, bam found in foobar'),
# Missing required_together option
({'foobar': [{"foo": "test", "bar": "required_one_of", "bam1": "bad"}]},
'parameters are required together: bam1, baz found in foobar'),
)
# (Paramaters, failure message)
FAILING_PARAMS_DICT = (
# Missing required option
({'foobar': {}}, 'missing required arguments: foo found in foobar'),
# Invalid option
({'foobar': {"foo": "hello", "bam": "good", "invalid": "bad"}},
'module: invalid found in foobar. Supported parameters include'),
# Mutually exclusive options found
({'foobar': {"foo": "test", "bam": "bad", "bam1": "bad", "baz": "req_to"}},
'parameters are mutually exclusive: bam, bam1 found in foobar'),
# required_if fails
({'foobar': {"foo": "hello", "bar": "bad"}},
'foo is hello but all of the following are missing: bam found in foobar'),
# Missing required_one_of option
({'foobar': {"foo": "test"}},
'one of the following is required: bar, bam found in foobar'),
# Missing required_together option
({'foobar': {"foo": "test", "bar": "required_one_of", "bam1": "bad"}},
'parameters are required together: bam1, baz found in foobar'),
)
@pytest.mark.parametrize('stdin, expected', OPTIONS_PARAMS_DICT, indirect=['stdin'])
def test_options_type_dict(self, stdin, options_argspec_dict, expected):
"""Test that a basic creation with required and required_if works"""
# should test ok, tests basic foo requirement and required_if
am = basic.AnsibleModule(**options_argspec_dict)
assert isinstance(am.params['foobar'], dict)
assert am.params['foobar'] == expected
@pytest.mark.parametrize('stdin, expected', OPTIONS_PARAMS_LIST, indirect=['stdin'])
def test_options_type_list(self, stdin, options_argspec_list, expected):
"""Test that a basic creation with required and required_if works"""
# should test ok, tests basic foo requirement and required_if
am = basic.AnsibleModule(**options_argspec_list)
assert isinstance(am.params['foobar'], list)
assert am.params['foobar'] == expected
@pytest.mark.parametrize('stdin, expected', FAILING_PARAMS_DICT, indirect=['stdin'])
def test_fail_validate_options_dict(self, capfd, stdin, options_argspec_dict, expected):
"""Fail because one of a required_together pair of parameters has a default and the other was not specified"""
with pytest.raises(SystemExit):
am = basic.AnsibleModule(**options_argspec_dict)
out, err = capfd.readouterr()
results = json.loads(out)
assert results['failed']
assert expected in results['msg']
@pytest.mark.parametrize('stdin, expected', FAILING_PARAMS_LIST, indirect=['stdin'])
def test_fail_validate_options_list(self, capfd, stdin, options_argspec_list, expected):
"""Fail because one of a required_together pair of parameters has a default and the other was not specified"""
with pytest.raises(SystemExit):
am = basic.AnsibleModule(**options_argspec_list)
out, err = capfd.readouterr()
results = json.loads(out)
assert results['failed']
assert expected in results['msg']
@pytest.mark.parametrize('stdin', [{'foobar': {'foo': 'required', 'bam1': 'test', 'bar': 'case'}}], indirect=['stdin'])
def test_fallback_in_option(self, mocker, stdin, options_argspec_dict):
"""Test that the complex argspec works if we get a required parameter via fallback"""
environ = os.environ.copy()
environ['BAZ'] = 'test data'
mocker.patch('ansible.module_utils.basic.os.environ', environ)
am = basic.AnsibleModule(**options_argspec_dict)
assert isinstance(am.params['foobar']['baz'], str)
assert am.params['foobar']['baz'] == 'test data'
class TestLoadFileCommonArguments:
@pytest.mark.parametrize('stdin', [{}], indirect=['stdin'])
def test_smoketest_load_file_common_args(self, am):
"""With no file arguments, an empty dict is returned"""
am.selinux_mls_enabled = MagicMock()
am.selinux_mls_enabled.return_value = True
am.selinux_default_context = MagicMock()
am.selinux_default_context.return_value = 'unconfined_u:object_r:default_t:s0'.split(':', 3)
assert am.load_file_common_arguments(params={}) == {}
@pytest.mark.parametrize('stdin', [{}], indirect=['stdin'])
def test_load_file_common_args(self, am, mocker):
am.selinux_mls_enabled = MagicMock()
am.selinux_mls_enabled.return_value = True
am.selinux_default_context = MagicMock()
am.selinux_default_context.return_value = 'unconfined_u:object_r:default_t:s0'.split(':', 3)
base_params = dict(
path='/path/to/file',
mode=0o600,
owner='root',
group='root',
seuser='_default',
serole='_default',
setype='_default',
selevel='_default',
)
extended_params = base_params.copy()
extended_params.update(dict(
follow=True,
foo='bar',
))
final_params = base_params.copy()
final_params.update(dict(
path='/path/to/real_file',
secontext=['unconfined_u', 'object_r', 'default_t', 's0'],
attributes=None,
))
# with the proper params specified, the returned dictionary should represent
# only those params which have something to do with the file arguments, excluding
# other params and updated as required with proper values which may have been
# massaged by the method
mocker.patch('os.path.islink', return_value=True)
mocker.patch('os.path.realpath', return_value='/path/to/real_file')
res = am.load_file_common_arguments(params=extended_params)
assert res == final_params
| gpl-3.0 |
MeshCollider/bitcoin | test/functional/rpc_blockchain.py | 7 | 17905 | #!/usr/bin/env python3
# Copyright (c) 2014-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test RPCs related to blockchainstate.
Test the following RPCs:
- getblockchaininfo
- gettxoutsetinfo
- getdifficulty
- getbestblockhash
- getblockhash
- getblockheader
- getchaintxstats
- getnetworkhashps
- verifychain
Tests correspond to code in rpc/blockchain.cpp.
"""
from decimal import Decimal
import http.client
import os
import subprocess
from test_framework.address import ADDRESS_BCRT1_P2WSH_OP_TRUE
from test_framework.blocktools import (
create_block,
create_coinbase,
TIME_GENESIS_BLOCK,
)
from test_framework.messages import (
CBlockHeader,
FromHex,
msg_block,
)
from test_framework.p2p import P2PInterface
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_greater_than,
assert_greater_than_or_equal,
assert_raises,
assert_raises_rpc_error,
assert_is_hex_string,
assert_is_hash_string,
get_datadir_path,
)
from test_framework.wallet import MiniWallet
class BlockchainTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.supports_cli = False
def run_test(self):
self.mine_chain()
self.restart_node(0, extra_args=['-stopatheight=207', '-prune=1']) # Set extra args with pruning after rescan is complete
self._test_getblockchaininfo()
self._test_getchaintxstats()
self._test_gettxoutsetinfo()
self._test_getblockheader()
self._test_getdifficulty()
self._test_getnetworkhashps()
self._test_stopatheight()
self._test_waitforblockheight()
self._test_getblock()
assert self.nodes[0].verifychain(4, 0)
def mine_chain(self):
self.log.info('Create some old blocks')
for t in range(TIME_GENESIS_BLOCK, TIME_GENESIS_BLOCK + 200 * 600, 600):
# ten-minute steps from genesis block time
self.nodes[0].setmocktime(t)
self.nodes[0].generatetoaddress(1, ADDRESS_BCRT1_P2WSH_OP_TRUE)
assert_equal(self.nodes[0].getblockchaininfo()['blocks'], 200)
def _test_getblockchaininfo(self):
self.log.info("Test getblockchaininfo")
keys = [
'bestblockhash',
'blocks',
'chain',
'chainwork',
'difficulty',
'headers',
'initialblockdownload',
'mediantime',
'pruned',
'size_on_disk',
'softforks',
'verificationprogress',
'warnings',
]
res = self.nodes[0].getblockchaininfo()
# result should have these additional pruning keys if manual pruning is enabled
assert_equal(sorted(res.keys()), sorted(['pruneheight', 'automatic_pruning'] + keys))
# size_on_disk should be > 0
assert_greater_than(res['size_on_disk'], 0)
# pruneheight should be greater or equal to 0
assert_greater_than_or_equal(res['pruneheight'], 0)
# check other pruning fields given that prune=1
assert res['pruned']
assert not res['automatic_pruning']
self.restart_node(0, ['-stopatheight=207'])
res = self.nodes[0].getblockchaininfo()
# should have exact keys
assert_equal(sorted(res.keys()), keys)
self.restart_node(0, ['-stopatheight=207', '-prune=550'])
res = self.nodes[0].getblockchaininfo()
# result should have these additional pruning keys if prune=550
assert_equal(sorted(res.keys()), sorted(['pruneheight', 'automatic_pruning', 'prune_target_size'] + keys))
# check related fields
assert res['pruned']
assert_equal(res['pruneheight'], 0)
assert res['automatic_pruning']
assert_equal(res['prune_target_size'], 576716800)
assert_greater_than(res['size_on_disk'], 0)
assert_equal(res['softforks'], {
'bip34': {'type': 'buried', 'active': False, 'height': 500},
'bip66': {'type': 'buried', 'active': False, 'height': 1251},
'bip65': {'type': 'buried', 'active': False, 'height': 1351},
'csv': {'type': 'buried', 'active': False, 'height': 432},
'segwit': {'type': 'buried', 'active': True, 'height': 0},
'testdummy': {
'type': 'bip9',
'bip9': {
'status': 'started',
'bit': 28,
'start_time': 0,
'timeout': 0x7fffffffffffffff, # testdummy does not have a timeout so is set to the max int64 value
'since': 144,
'statistics': {
'period': 144,
'threshold': 108,
'elapsed': 57,
'count': 57,
'possible': True,
},
},
'active': False
},
'taproot': {
'type': 'bip9',
'bip9': {
'status': 'active',
'start_time': -1,
'timeout': 9223372036854775807,
'since': 0
},
'height': 0,
'active': True
}
})
def _test_getchaintxstats(self):
self.log.info("Test getchaintxstats")
# Test `getchaintxstats` invalid extra parameters
assert_raises_rpc_error(-1, 'getchaintxstats', self.nodes[0].getchaintxstats, 0, '', 0)
# Test `getchaintxstats` invalid `nblocks`
assert_raises_rpc_error(-1, "JSON value is not an integer as expected", self.nodes[0].getchaintxstats, '')
assert_raises_rpc_error(-8, "Invalid block count: should be between 0 and the block's height - 1", self.nodes[0].getchaintxstats, -1)
assert_raises_rpc_error(-8, "Invalid block count: should be between 0 and the block's height - 1", self.nodes[0].getchaintxstats, self.nodes[0].getblockcount())
# Test `getchaintxstats` invalid `blockhash`
assert_raises_rpc_error(-1, "JSON value is not a string as expected", self.nodes[0].getchaintxstats, blockhash=0)
assert_raises_rpc_error(-8, "blockhash must be of length 64 (not 1, for '0')", self.nodes[0].getchaintxstats, blockhash='0')
assert_raises_rpc_error(-8, "blockhash must be hexadecimal string (not 'ZZZ0000000000000000000000000000000000000000000000000000000000000')", self.nodes[0].getchaintxstats, blockhash='ZZZ0000000000000000000000000000000000000000000000000000000000000')
assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getchaintxstats, blockhash='0000000000000000000000000000000000000000000000000000000000000000')
blockhash = self.nodes[0].getblockhash(200)
self.nodes[0].invalidateblock(blockhash)
assert_raises_rpc_error(-8, "Block is not in main chain", self.nodes[0].getchaintxstats, blockhash=blockhash)
self.nodes[0].reconsiderblock(blockhash)
chaintxstats = self.nodes[0].getchaintxstats(nblocks=1)
# 200 txs plus genesis tx
assert_equal(chaintxstats['txcount'], 201)
# tx rate should be 1 per 10 minutes, or 1/600
# we have to round because of binary math
assert_equal(round(chaintxstats['txrate'] * 600, 10), Decimal(1))
b1_hash = self.nodes[0].getblockhash(1)
b1 = self.nodes[0].getblock(b1_hash)
b200_hash = self.nodes[0].getblockhash(200)
b200 = self.nodes[0].getblock(b200_hash)
time_diff = b200['mediantime'] - b1['mediantime']
chaintxstats = self.nodes[0].getchaintxstats()
assert_equal(chaintxstats['time'], b200['time'])
assert_equal(chaintxstats['txcount'], 201)
assert_equal(chaintxstats['window_final_block_hash'], b200_hash)
assert_equal(chaintxstats['window_final_block_height'], 200)
assert_equal(chaintxstats['window_block_count'], 199)
assert_equal(chaintxstats['window_tx_count'], 199)
assert_equal(chaintxstats['window_interval'], time_diff)
assert_equal(round(chaintxstats['txrate'] * time_diff, 10), Decimal(199))
chaintxstats = self.nodes[0].getchaintxstats(blockhash=b1_hash)
assert_equal(chaintxstats['time'], b1['time'])
assert_equal(chaintxstats['txcount'], 2)
assert_equal(chaintxstats['window_final_block_hash'], b1_hash)
assert_equal(chaintxstats['window_final_block_height'], 1)
assert_equal(chaintxstats['window_block_count'], 0)
assert 'window_tx_count' not in chaintxstats
assert 'window_interval' not in chaintxstats
assert 'txrate' not in chaintxstats
def _test_gettxoutsetinfo(self):
node = self.nodes[0]
res = node.gettxoutsetinfo()
assert_equal(res['total_amount'], Decimal('8725.00000000'))
assert_equal(res['transactions'], 200)
assert_equal(res['height'], 200)
assert_equal(res['txouts'], 200)
assert_equal(res['bogosize'], 16800),
assert_equal(res['bestblock'], node.getblockhash(200))
size = res['disk_size']
assert size > 6400
assert size < 64000
assert_equal(len(res['bestblock']), 64)
assert_equal(len(res['hash_serialized_2']), 64)
self.log.info("Test that gettxoutsetinfo() works for blockchain with just the genesis block")
b1hash = node.getblockhash(1)
node.invalidateblock(b1hash)
res2 = node.gettxoutsetinfo()
assert_equal(res2['transactions'], 0)
assert_equal(res2['total_amount'], Decimal('0'))
assert_equal(res2['height'], 0)
assert_equal(res2['txouts'], 0)
assert_equal(res2['bogosize'], 0),
assert_equal(res2['bestblock'], node.getblockhash(0))
assert_equal(len(res2['hash_serialized_2']), 64)
self.log.info("Test that gettxoutsetinfo() returns the same result after invalidate/reconsider block")
node.reconsiderblock(b1hash)
res3 = node.gettxoutsetinfo()
# The field 'disk_size' is non-deterministic and can thus not be
# compared between res and res3. Everything else should be the same.
del res['disk_size'], res3['disk_size']
assert_equal(res, res3)
self.log.info("Test hash_type option for gettxoutsetinfo()")
# Adding hash_type 'hash_serialized_2', which is the default, should
# not change the result.
res4 = node.gettxoutsetinfo(hash_type='hash_serialized_2')
del res4['disk_size']
assert_equal(res, res4)
# hash_type none should not return a UTXO set hash.
res5 = node.gettxoutsetinfo(hash_type='none')
assert 'hash_serialized_2' not in res5
# hash_type muhash should return a different UTXO set hash.
res6 = node.gettxoutsetinfo(hash_type='muhash')
assert 'muhash' in res6
assert(res['hash_serialized_2'] != res6['muhash'])
# muhash should not be included in gettxoutset unless requested.
for r in [res, res2, res3, res4, res5]:
assert 'muhash' not in r
# Unknown hash_type raises an error
assert_raises_rpc_error(-8, "foohash is not a valid hash_type", node.gettxoutsetinfo, "foohash")
def _test_getblockheader(self):
node = self.nodes[0]
assert_raises_rpc_error(-8, "hash must be of length 64 (not 8, for 'nonsense')", node.getblockheader, "nonsense")
assert_raises_rpc_error(-8, "hash must be hexadecimal string (not 'ZZZ7bb8b1697ea987f3b223ba7819250cae33efacb068d23dc24859824a77844')", node.getblockheader, "ZZZ7bb8b1697ea987f3b223ba7819250cae33efacb068d23dc24859824a77844")
assert_raises_rpc_error(-5, "Block not found", node.getblockheader, "0cf7bb8b1697ea987f3b223ba7819250cae33efacb068d23dc24859824a77844")
besthash = node.getbestblockhash()
secondbesthash = node.getblockhash(199)
header = node.getblockheader(blockhash=besthash)
assert_equal(header['hash'], besthash)
assert_equal(header['height'], 200)
assert_equal(header['confirmations'], 1)
assert_equal(header['previousblockhash'], secondbesthash)
assert_is_hex_string(header['chainwork'])
assert_equal(header['nTx'], 1)
assert_is_hash_string(header['hash'])
assert_is_hash_string(header['previousblockhash'])
assert_is_hash_string(header['merkleroot'])
assert_is_hash_string(header['bits'], length=None)
assert isinstance(header['time'], int)
assert isinstance(header['mediantime'], int)
assert isinstance(header['nonce'], int)
assert isinstance(header['version'], int)
assert isinstance(int(header['versionHex'], 16), int)
assert isinstance(header['difficulty'], Decimal)
# Test with verbose=False, which should return the header as hex.
header_hex = node.getblockheader(blockhash=besthash, verbose=False)
assert_is_hex_string(header_hex)
header = FromHex(CBlockHeader(), header_hex)
header.calc_sha256()
assert_equal(header.hash, besthash)
assert 'previousblockhash' not in node.getblockheader(node.getblockhash(0))
assert 'nextblockhash' not in node.getblockheader(node.getbestblockhash())
def _test_getdifficulty(self):
difficulty = self.nodes[0].getdifficulty()
# 1 hash in 2 should be valid, so difficulty should be 1/2**31
# binary => decimal => binary math is why we do this check
assert abs(difficulty * 2**31 - 1) < 0.0001
def _test_getnetworkhashps(self):
hashes_per_second = self.nodes[0].getnetworkhashps()
# This should be 2 hashes every 10 minutes or 1/300
assert abs(hashes_per_second * 300 - 1) < 0.0001
def _test_stopatheight(self):
assert_equal(self.nodes[0].getblockcount(), 200)
self.nodes[0].generatetoaddress(6, ADDRESS_BCRT1_P2WSH_OP_TRUE)
assert_equal(self.nodes[0].getblockcount(), 206)
self.log.debug('Node should not stop at this height')
assert_raises(subprocess.TimeoutExpired, lambda: self.nodes[0].process.wait(timeout=3))
try:
self.nodes[0].generatetoaddress(1, ADDRESS_BCRT1_P2WSH_OP_TRUE)
except (ConnectionError, http.client.BadStatusLine):
pass # The node already shut down before response
self.log.debug('Node should stop at this height...')
self.nodes[0].wait_until_stopped()
self.start_node(0)
assert_equal(self.nodes[0].getblockcount(), 207)
def _test_waitforblockheight(self):
self.log.info("Test waitforblockheight")
node = self.nodes[0]
peer = node.add_p2p_connection(P2PInterface())
current_height = node.getblock(node.getbestblockhash())['height']
# Create a fork somewhere below our current height, invalidate the tip
# of that fork, and then ensure that waitforblockheight still
# works as expected.
#
# (Previously this was broken based on setting
# `rpc/blockchain.cpp:latestblock` incorrectly.)
#
b20hash = node.getblockhash(20)
b20 = node.getblock(b20hash)
def solve_and_send_block(prevhash, height, time):
b = create_block(prevhash, create_coinbase(height), time)
b.solve()
peer.send_and_ping(msg_block(b))
return b
b21f = solve_and_send_block(int(b20hash, 16), 21, b20['time'] + 1)
b22f = solve_and_send_block(b21f.sha256, 22, b21f.nTime + 1)
node.invalidateblock(b22f.hash)
def assert_waitforheight(height, timeout=2):
assert_equal(
node.waitforblockheight(height=height, timeout=timeout)['height'],
current_height)
assert_waitforheight(0)
assert_waitforheight(current_height - 1)
assert_waitforheight(current_height)
assert_waitforheight(current_height + 1)
def _test_getblock(self):
node = self.nodes[0]
miniwallet = MiniWallet(node)
miniwallet.scan_blocks(num=5)
fee_per_byte = Decimal('0.00000010')
fee_per_kb = 1000 * fee_per_byte
miniwallet.send_self_transfer(fee_rate=fee_per_kb, from_node=node)
blockhash = node.generate(1)[0]
self.log.info("Test that getblock with verbosity 1 doesn't include fee")
block = node.getblock(blockhash, 1)
assert 'fee' not in block['tx'][1]
self.log.info('Test that getblock with verbosity 2 includes expected fee')
block = node.getblock(blockhash, 2)
tx = block['tx'][1]
assert 'fee' in tx
assert_equal(tx['fee'], tx['vsize'] * fee_per_byte)
self.log.info("Test that getblock with verbosity 2 still works with pruned Undo data")
datadir = get_datadir_path(self.options.tmpdir, 0)
def move_block_file(old, new):
old_path = os.path.join(datadir, self.chain, 'blocks', old)
new_path = os.path.join(datadir, self.chain, 'blocks', new)
os.rename(old_path, new_path)
# Move instead of deleting so we can restore chain state afterwards
move_block_file('rev00000.dat', 'rev_wrong')
block = node.getblock(blockhash, 2)
assert 'fee' not in block['tx'][1]
# Restore chain state
move_block_file('rev_wrong', 'rev00000.dat')
assert 'previousblockhash' not in node.getblock(node.getblockhash(0))
assert 'nextblockhash' not in node.getblock(node.getbestblockhash())
if __name__ == '__main__':
BlockchainTest().main()
| mit |
leedm777/ansible | v1/ansible/runner/poller.py | 132 | 4480 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
import time
from ansible import errors
class AsyncPoller(object):
""" Manage asynchronous jobs. """
def __init__(self, results, runner):
self.runner = runner
self.results = { 'contacted': {}, 'dark': {}}
self.hosts_to_poll = []
self.completed = False
# flag to determine if at least one host was contacted
self.active = False
# True to work with the `and` below
skipped = True
jid = None
for (host, res) in results['contacted'].iteritems():
if res.get('started', False):
self.hosts_to_poll.append(host)
jid = res.get('ansible_job_id', None)
self.runner.vars_cache[host]['ansible_job_id'] = jid
self.active = True
else:
skipped = skipped and res.get('skipped', False)
self.runner.vars_cache[host]['ansible_job_id'] = ''
self.results['contacted'][host] = res
for (host, res) in results['dark'].iteritems():
self.runner.vars_cache[host]['ansible_job_id'] = ''
self.results['dark'][host] = res
if not skipped:
if jid is None:
raise errors.AnsibleError("unexpected error: unable to determine jid")
if len(self.hosts_to_poll)==0:
raise errors.AnsibleError("unexpected error: no hosts to poll")
def poll(self):
""" Poll the job status.
Returns the changes in this iteration."""
self.runner.module_name = 'async_status'
self.runner.module_args = "jid={{ansible_job_id}}"
self.runner.pattern = "*"
self.runner.background = 0
self.runner.complex_args = None
self.runner.inventory.restrict_to(self.hosts_to_poll)
results = self.runner.run()
self.runner.inventory.lift_restriction()
hosts = []
poll_results = { 'contacted': {}, 'dark': {}, 'polled': {}}
for (host, res) in results['contacted'].iteritems():
if res.get('started',False):
hosts.append(host)
poll_results['polled'][host] = res
else:
self.results['contacted'][host] = res
poll_results['contacted'][host] = res
if res.get('failed', False) or res.get('rc', 0) != 0:
self.runner.callbacks.on_async_failed(host, res, self.runner.vars_cache[host]['ansible_job_id'])
else:
self.runner.callbacks.on_async_ok(host, res, self.runner.vars_cache[host]['ansible_job_id'])
for (host, res) in results['dark'].iteritems():
self.results['dark'][host] = res
poll_results['dark'][host] = res
if host in self.hosts_to_poll:
self.runner.callbacks.on_async_failed(host, res, self.runner.vars_cache[host].get('ansible_job_id','XX'))
self.hosts_to_poll = hosts
if len(hosts)==0:
self.completed = True
return poll_results
def wait(self, seconds, poll_interval):
""" Wait a certain time for job completion, check status every poll_interval. """
# jid is None when all hosts were skipped
if not self.active:
return self.results
clock = seconds - poll_interval
while (clock >= 0 and not self.completed):
time.sleep(poll_interval)
poll_results = self.poll()
for (host, res) in poll_results['polled'].iteritems():
if res.get('started'):
self.runner.callbacks.on_async_poll(host, res, self.runner.vars_cache[host]['ansible_job_id'], clock)
clock = clock - poll_interval
return self.results
| gpl-3.0 |
mvesper/invenio | modules/miscutil/lib/upgrades/invenio_2013_01_12_bibrec_master_format.py | 3 | 1422 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2012, 2013 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
import warnings
from invenio.dbquery import run_sql
depends_on = ['invenio_release_1_1_0']
def info():
return "New bibrec.master_format column"
def do_upgrade():
create_statement = run_sql('SHOW CREATE TABLE bibrec')[0][1]
if '`master_format` varchar(16)' not in create_statement:
run_sql("ALTER TABLE bibrec ADD COLUMN master_format varchar(16) NOT NULL default 'marc'")
try:
import pyparsing
except ImportError:
warnings.warn("Pyparsing is not installed in your machine!, please consider installing it")
def estimate():
return 1
def pre_upgrade():
pass
def post_upgrade():
pass
| gpl-2.0 |
Dandandan/wikiprogramming | jsrepl/extern/python/closured/lib/python2.7/encodings/cp737.py | 593 | 34937 | """ Python Character Mapping Codec cp737 generated from 'VENDORS/MICSFT/PC/CP737.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp737',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x0391, # GREEK CAPITAL LETTER ALPHA
0x0081: 0x0392, # GREEK CAPITAL LETTER BETA
0x0082: 0x0393, # GREEK CAPITAL LETTER GAMMA
0x0083: 0x0394, # GREEK CAPITAL LETTER DELTA
0x0084: 0x0395, # GREEK CAPITAL LETTER EPSILON
0x0085: 0x0396, # GREEK CAPITAL LETTER ZETA
0x0086: 0x0397, # GREEK CAPITAL LETTER ETA
0x0087: 0x0398, # GREEK CAPITAL LETTER THETA
0x0088: 0x0399, # GREEK CAPITAL LETTER IOTA
0x0089: 0x039a, # GREEK CAPITAL LETTER KAPPA
0x008a: 0x039b, # GREEK CAPITAL LETTER LAMDA
0x008b: 0x039c, # GREEK CAPITAL LETTER MU
0x008c: 0x039d, # GREEK CAPITAL LETTER NU
0x008d: 0x039e, # GREEK CAPITAL LETTER XI
0x008e: 0x039f, # GREEK CAPITAL LETTER OMICRON
0x008f: 0x03a0, # GREEK CAPITAL LETTER PI
0x0090: 0x03a1, # GREEK CAPITAL LETTER RHO
0x0091: 0x03a3, # GREEK CAPITAL LETTER SIGMA
0x0092: 0x03a4, # GREEK CAPITAL LETTER TAU
0x0093: 0x03a5, # GREEK CAPITAL LETTER UPSILON
0x0094: 0x03a6, # GREEK CAPITAL LETTER PHI
0x0095: 0x03a7, # GREEK CAPITAL LETTER CHI
0x0096: 0x03a8, # GREEK CAPITAL LETTER PSI
0x0097: 0x03a9, # GREEK CAPITAL LETTER OMEGA
0x0098: 0x03b1, # GREEK SMALL LETTER ALPHA
0x0099: 0x03b2, # GREEK SMALL LETTER BETA
0x009a: 0x03b3, # GREEK SMALL LETTER GAMMA
0x009b: 0x03b4, # GREEK SMALL LETTER DELTA
0x009c: 0x03b5, # GREEK SMALL LETTER EPSILON
0x009d: 0x03b6, # GREEK SMALL LETTER ZETA
0x009e: 0x03b7, # GREEK SMALL LETTER ETA
0x009f: 0x03b8, # GREEK SMALL LETTER THETA
0x00a0: 0x03b9, # GREEK SMALL LETTER IOTA
0x00a1: 0x03ba, # GREEK SMALL LETTER KAPPA
0x00a2: 0x03bb, # GREEK SMALL LETTER LAMDA
0x00a3: 0x03bc, # GREEK SMALL LETTER MU
0x00a4: 0x03bd, # GREEK SMALL LETTER NU
0x00a5: 0x03be, # GREEK SMALL LETTER XI
0x00a6: 0x03bf, # GREEK SMALL LETTER OMICRON
0x00a7: 0x03c0, # GREEK SMALL LETTER PI
0x00a8: 0x03c1, # GREEK SMALL LETTER RHO
0x00a9: 0x03c3, # GREEK SMALL LETTER SIGMA
0x00aa: 0x03c2, # GREEK SMALL LETTER FINAL SIGMA
0x00ab: 0x03c4, # GREEK SMALL LETTER TAU
0x00ac: 0x03c5, # GREEK SMALL LETTER UPSILON
0x00ad: 0x03c6, # GREEK SMALL LETTER PHI
0x00ae: 0x03c7, # GREEK SMALL LETTER CHI
0x00af: 0x03c8, # GREEK SMALL LETTER PSI
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x2561, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x00b6: 0x2562, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x00b7: 0x2556, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x00b8: 0x2555, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x255c, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x00be: 0x255b, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x255e, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x00c7: 0x255f, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x2567, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x00d0: 0x2568, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x00d1: 0x2564, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x00d2: 0x2565, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x00d3: 0x2559, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x00d4: 0x2558, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x00d5: 0x2552, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x00d6: 0x2553, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x00d7: 0x256b, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x00d8: 0x256a, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x258c, # LEFT HALF BLOCK
0x00de: 0x2590, # RIGHT HALF BLOCK
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x03c9, # GREEK SMALL LETTER OMEGA
0x00e1: 0x03ac, # GREEK SMALL LETTER ALPHA WITH TONOS
0x00e2: 0x03ad, # GREEK SMALL LETTER EPSILON WITH TONOS
0x00e3: 0x03ae, # GREEK SMALL LETTER ETA WITH TONOS
0x00e4: 0x03ca, # GREEK SMALL LETTER IOTA WITH DIALYTIKA
0x00e5: 0x03af, # GREEK SMALL LETTER IOTA WITH TONOS
0x00e6: 0x03cc, # GREEK SMALL LETTER OMICRON WITH TONOS
0x00e7: 0x03cd, # GREEK SMALL LETTER UPSILON WITH TONOS
0x00e8: 0x03cb, # GREEK SMALL LETTER UPSILON WITH DIALYTIKA
0x00e9: 0x03ce, # GREEK SMALL LETTER OMEGA WITH TONOS
0x00ea: 0x0386, # GREEK CAPITAL LETTER ALPHA WITH TONOS
0x00eb: 0x0388, # GREEK CAPITAL LETTER EPSILON WITH TONOS
0x00ec: 0x0389, # GREEK CAPITAL LETTER ETA WITH TONOS
0x00ed: 0x038a, # GREEK CAPITAL LETTER IOTA WITH TONOS
0x00ee: 0x038c, # GREEK CAPITAL LETTER OMICRON WITH TONOS
0x00ef: 0x038e, # GREEK CAPITAL LETTER UPSILON WITH TONOS
0x00f0: 0x038f, # GREEK CAPITAL LETTER OMEGA WITH TONOS
0x00f1: 0x00b1, # PLUS-MINUS SIGN
0x00f2: 0x2265, # GREATER-THAN OR EQUAL TO
0x00f3: 0x2264, # LESS-THAN OR EQUAL TO
0x00f4: 0x03aa, # GREEK CAPITAL LETTER IOTA WITH DIALYTIKA
0x00f5: 0x03ab, # GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA
0x00f6: 0x00f7, # DIVISION SIGN
0x00f7: 0x2248, # ALMOST EQUAL TO
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x2219, # BULLET OPERATOR
0x00fa: 0x00b7, # MIDDLE DOT
0x00fb: 0x221a, # SQUARE ROOT
0x00fc: 0x207f, # SUPERSCRIPT LATIN SMALL LETTER N
0x00fd: 0x00b2, # SUPERSCRIPT TWO
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Decoding Table
decoding_table = (
u'\x00' # 0x0000 -> NULL
u'\x01' # 0x0001 -> START OF HEADING
u'\x02' # 0x0002 -> START OF TEXT
u'\x03' # 0x0003 -> END OF TEXT
u'\x04' # 0x0004 -> END OF TRANSMISSION
u'\x05' # 0x0005 -> ENQUIRY
u'\x06' # 0x0006 -> ACKNOWLEDGE
u'\x07' # 0x0007 -> BELL
u'\x08' # 0x0008 -> BACKSPACE
u'\t' # 0x0009 -> HORIZONTAL TABULATION
u'\n' # 0x000a -> LINE FEED
u'\x0b' # 0x000b -> VERTICAL TABULATION
u'\x0c' # 0x000c -> FORM FEED
u'\r' # 0x000d -> CARRIAGE RETURN
u'\x0e' # 0x000e -> SHIFT OUT
u'\x0f' # 0x000f -> SHIFT IN
u'\x10' # 0x0010 -> DATA LINK ESCAPE
u'\x11' # 0x0011 -> DEVICE CONTROL ONE
u'\x12' # 0x0012 -> DEVICE CONTROL TWO
u'\x13' # 0x0013 -> DEVICE CONTROL THREE
u'\x14' # 0x0014 -> DEVICE CONTROL FOUR
u'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x0016 -> SYNCHRONOUS IDLE
u'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x0018 -> CANCEL
u'\x19' # 0x0019 -> END OF MEDIUM
u'\x1a' # 0x001a -> SUBSTITUTE
u'\x1b' # 0x001b -> ESCAPE
u'\x1c' # 0x001c -> FILE SEPARATOR
u'\x1d' # 0x001d -> GROUP SEPARATOR
u'\x1e' # 0x001e -> RECORD SEPARATOR
u'\x1f' # 0x001f -> UNIT SEPARATOR
u' ' # 0x0020 -> SPACE
u'!' # 0x0021 -> EXCLAMATION MARK
u'"' # 0x0022 -> QUOTATION MARK
u'#' # 0x0023 -> NUMBER SIGN
u'$' # 0x0024 -> DOLLAR SIGN
u'%' # 0x0025 -> PERCENT SIGN
u'&' # 0x0026 -> AMPERSAND
u"'" # 0x0027 -> APOSTROPHE
u'(' # 0x0028 -> LEFT PARENTHESIS
u')' # 0x0029 -> RIGHT PARENTHESIS
u'*' # 0x002a -> ASTERISK
u'+' # 0x002b -> PLUS SIGN
u',' # 0x002c -> COMMA
u'-' # 0x002d -> HYPHEN-MINUS
u'.' # 0x002e -> FULL STOP
u'/' # 0x002f -> SOLIDUS
u'0' # 0x0030 -> DIGIT ZERO
u'1' # 0x0031 -> DIGIT ONE
u'2' # 0x0032 -> DIGIT TWO
u'3' # 0x0033 -> DIGIT THREE
u'4' # 0x0034 -> DIGIT FOUR
u'5' # 0x0035 -> DIGIT FIVE
u'6' # 0x0036 -> DIGIT SIX
u'7' # 0x0037 -> DIGIT SEVEN
u'8' # 0x0038 -> DIGIT EIGHT
u'9' # 0x0039 -> DIGIT NINE
u':' # 0x003a -> COLON
u';' # 0x003b -> SEMICOLON
u'<' # 0x003c -> LESS-THAN SIGN
u'=' # 0x003d -> EQUALS SIGN
u'>' # 0x003e -> GREATER-THAN SIGN
u'?' # 0x003f -> QUESTION MARK
u'@' # 0x0040 -> COMMERCIAL AT
u'A' # 0x0041 -> LATIN CAPITAL LETTER A
u'B' # 0x0042 -> LATIN CAPITAL LETTER B
u'C' # 0x0043 -> LATIN CAPITAL LETTER C
u'D' # 0x0044 -> LATIN CAPITAL LETTER D
u'E' # 0x0045 -> LATIN CAPITAL LETTER E
u'F' # 0x0046 -> LATIN CAPITAL LETTER F
u'G' # 0x0047 -> LATIN CAPITAL LETTER G
u'H' # 0x0048 -> LATIN CAPITAL LETTER H
u'I' # 0x0049 -> LATIN CAPITAL LETTER I
u'J' # 0x004a -> LATIN CAPITAL LETTER J
u'K' # 0x004b -> LATIN CAPITAL LETTER K
u'L' # 0x004c -> LATIN CAPITAL LETTER L
u'M' # 0x004d -> LATIN CAPITAL LETTER M
u'N' # 0x004e -> LATIN CAPITAL LETTER N
u'O' # 0x004f -> LATIN CAPITAL LETTER O
u'P' # 0x0050 -> LATIN CAPITAL LETTER P
u'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
u'R' # 0x0052 -> LATIN CAPITAL LETTER R
u'S' # 0x0053 -> LATIN CAPITAL LETTER S
u'T' # 0x0054 -> LATIN CAPITAL LETTER T
u'U' # 0x0055 -> LATIN CAPITAL LETTER U
u'V' # 0x0056 -> LATIN CAPITAL LETTER V
u'W' # 0x0057 -> LATIN CAPITAL LETTER W
u'X' # 0x0058 -> LATIN CAPITAL LETTER X
u'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
u'Z' # 0x005a -> LATIN CAPITAL LETTER Z
u'[' # 0x005b -> LEFT SQUARE BRACKET
u'\\' # 0x005c -> REVERSE SOLIDUS
u']' # 0x005d -> RIGHT SQUARE BRACKET
u'^' # 0x005e -> CIRCUMFLEX ACCENT
u'_' # 0x005f -> LOW LINE
u'`' # 0x0060 -> GRAVE ACCENT
u'a' # 0x0061 -> LATIN SMALL LETTER A
u'b' # 0x0062 -> LATIN SMALL LETTER B
u'c' # 0x0063 -> LATIN SMALL LETTER C
u'd' # 0x0064 -> LATIN SMALL LETTER D
u'e' # 0x0065 -> LATIN SMALL LETTER E
u'f' # 0x0066 -> LATIN SMALL LETTER F
u'g' # 0x0067 -> LATIN SMALL LETTER G
u'h' # 0x0068 -> LATIN SMALL LETTER H
u'i' # 0x0069 -> LATIN SMALL LETTER I
u'j' # 0x006a -> LATIN SMALL LETTER J
u'k' # 0x006b -> LATIN SMALL LETTER K
u'l' # 0x006c -> LATIN SMALL LETTER L
u'm' # 0x006d -> LATIN SMALL LETTER M
u'n' # 0x006e -> LATIN SMALL LETTER N
u'o' # 0x006f -> LATIN SMALL LETTER O
u'p' # 0x0070 -> LATIN SMALL LETTER P
u'q' # 0x0071 -> LATIN SMALL LETTER Q
u'r' # 0x0072 -> LATIN SMALL LETTER R
u's' # 0x0073 -> LATIN SMALL LETTER S
u't' # 0x0074 -> LATIN SMALL LETTER T
u'u' # 0x0075 -> LATIN SMALL LETTER U
u'v' # 0x0076 -> LATIN SMALL LETTER V
u'w' # 0x0077 -> LATIN SMALL LETTER W
u'x' # 0x0078 -> LATIN SMALL LETTER X
u'y' # 0x0079 -> LATIN SMALL LETTER Y
u'z' # 0x007a -> LATIN SMALL LETTER Z
u'{' # 0x007b -> LEFT CURLY BRACKET
u'|' # 0x007c -> VERTICAL LINE
u'}' # 0x007d -> RIGHT CURLY BRACKET
u'~' # 0x007e -> TILDE
u'\x7f' # 0x007f -> DELETE
u'\u0391' # 0x0080 -> GREEK CAPITAL LETTER ALPHA
u'\u0392' # 0x0081 -> GREEK CAPITAL LETTER BETA
u'\u0393' # 0x0082 -> GREEK CAPITAL LETTER GAMMA
u'\u0394' # 0x0083 -> GREEK CAPITAL LETTER DELTA
u'\u0395' # 0x0084 -> GREEK CAPITAL LETTER EPSILON
u'\u0396' # 0x0085 -> GREEK CAPITAL LETTER ZETA
u'\u0397' # 0x0086 -> GREEK CAPITAL LETTER ETA
u'\u0398' # 0x0087 -> GREEK CAPITAL LETTER THETA
u'\u0399' # 0x0088 -> GREEK CAPITAL LETTER IOTA
u'\u039a' # 0x0089 -> GREEK CAPITAL LETTER KAPPA
u'\u039b' # 0x008a -> GREEK CAPITAL LETTER LAMDA
u'\u039c' # 0x008b -> GREEK CAPITAL LETTER MU
u'\u039d' # 0x008c -> GREEK CAPITAL LETTER NU
u'\u039e' # 0x008d -> GREEK CAPITAL LETTER XI
u'\u039f' # 0x008e -> GREEK CAPITAL LETTER OMICRON
u'\u03a0' # 0x008f -> GREEK CAPITAL LETTER PI
u'\u03a1' # 0x0090 -> GREEK CAPITAL LETTER RHO
u'\u03a3' # 0x0091 -> GREEK CAPITAL LETTER SIGMA
u'\u03a4' # 0x0092 -> GREEK CAPITAL LETTER TAU
u'\u03a5' # 0x0093 -> GREEK CAPITAL LETTER UPSILON
u'\u03a6' # 0x0094 -> GREEK CAPITAL LETTER PHI
u'\u03a7' # 0x0095 -> GREEK CAPITAL LETTER CHI
u'\u03a8' # 0x0096 -> GREEK CAPITAL LETTER PSI
u'\u03a9' # 0x0097 -> GREEK CAPITAL LETTER OMEGA
u'\u03b1' # 0x0098 -> GREEK SMALL LETTER ALPHA
u'\u03b2' # 0x0099 -> GREEK SMALL LETTER BETA
u'\u03b3' # 0x009a -> GREEK SMALL LETTER GAMMA
u'\u03b4' # 0x009b -> GREEK SMALL LETTER DELTA
u'\u03b5' # 0x009c -> GREEK SMALL LETTER EPSILON
u'\u03b6' # 0x009d -> GREEK SMALL LETTER ZETA
u'\u03b7' # 0x009e -> GREEK SMALL LETTER ETA
u'\u03b8' # 0x009f -> GREEK SMALL LETTER THETA
u'\u03b9' # 0x00a0 -> GREEK SMALL LETTER IOTA
u'\u03ba' # 0x00a1 -> GREEK SMALL LETTER KAPPA
u'\u03bb' # 0x00a2 -> GREEK SMALL LETTER LAMDA
u'\u03bc' # 0x00a3 -> GREEK SMALL LETTER MU
u'\u03bd' # 0x00a4 -> GREEK SMALL LETTER NU
u'\u03be' # 0x00a5 -> GREEK SMALL LETTER XI
u'\u03bf' # 0x00a6 -> GREEK SMALL LETTER OMICRON
u'\u03c0' # 0x00a7 -> GREEK SMALL LETTER PI
u'\u03c1' # 0x00a8 -> GREEK SMALL LETTER RHO
u'\u03c3' # 0x00a9 -> GREEK SMALL LETTER SIGMA
u'\u03c2' # 0x00aa -> GREEK SMALL LETTER FINAL SIGMA
u'\u03c4' # 0x00ab -> GREEK SMALL LETTER TAU
u'\u03c5' # 0x00ac -> GREEK SMALL LETTER UPSILON
u'\u03c6' # 0x00ad -> GREEK SMALL LETTER PHI
u'\u03c7' # 0x00ae -> GREEK SMALL LETTER CHI
u'\u03c8' # 0x00af -> GREEK SMALL LETTER PSI
u'\u2591' # 0x00b0 -> LIGHT SHADE
u'\u2592' # 0x00b1 -> MEDIUM SHADE
u'\u2593' # 0x00b2 -> DARK SHADE
u'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
u'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
u'\u2561' # 0x00b5 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
u'\u2562' # 0x00b6 -> BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
u'\u2556' # 0x00b7 -> BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
u'\u2555' # 0x00b8 -> BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
u'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
u'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
u'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
u'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
u'\u255c' # 0x00bd -> BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
u'\u255b' # 0x00be -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
u'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
u'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
u'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
u'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
u'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
u'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
u'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
u'\u255e' # 0x00c6 -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
u'\u255f' # 0x00c7 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
u'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
u'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
u'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
u'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
u'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
u'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
u'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
u'\u2567' # 0x00cf -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
u'\u2568' # 0x00d0 -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
u'\u2564' # 0x00d1 -> BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
u'\u2565' # 0x00d2 -> BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
u'\u2559' # 0x00d3 -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
u'\u2558' # 0x00d4 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
u'\u2552' # 0x00d5 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
u'\u2553' # 0x00d6 -> BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
u'\u256b' # 0x00d7 -> BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
u'\u256a' # 0x00d8 -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
u'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
u'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
u'\u2588' # 0x00db -> FULL BLOCK
u'\u2584' # 0x00dc -> LOWER HALF BLOCK
u'\u258c' # 0x00dd -> LEFT HALF BLOCK
u'\u2590' # 0x00de -> RIGHT HALF BLOCK
u'\u2580' # 0x00df -> UPPER HALF BLOCK
u'\u03c9' # 0x00e0 -> GREEK SMALL LETTER OMEGA
u'\u03ac' # 0x00e1 -> GREEK SMALL LETTER ALPHA WITH TONOS
u'\u03ad' # 0x00e2 -> GREEK SMALL LETTER EPSILON WITH TONOS
u'\u03ae' # 0x00e3 -> GREEK SMALL LETTER ETA WITH TONOS
u'\u03ca' # 0x00e4 -> GREEK SMALL LETTER IOTA WITH DIALYTIKA
u'\u03af' # 0x00e5 -> GREEK SMALL LETTER IOTA WITH TONOS
u'\u03cc' # 0x00e6 -> GREEK SMALL LETTER OMICRON WITH TONOS
u'\u03cd' # 0x00e7 -> GREEK SMALL LETTER UPSILON WITH TONOS
u'\u03cb' # 0x00e8 -> GREEK SMALL LETTER UPSILON WITH DIALYTIKA
u'\u03ce' # 0x00e9 -> GREEK SMALL LETTER OMEGA WITH TONOS
u'\u0386' # 0x00ea -> GREEK CAPITAL LETTER ALPHA WITH TONOS
u'\u0388' # 0x00eb -> GREEK CAPITAL LETTER EPSILON WITH TONOS
u'\u0389' # 0x00ec -> GREEK CAPITAL LETTER ETA WITH TONOS
u'\u038a' # 0x00ed -> GREEK CAPITAL LETTER IOTA WITH TONOS
u'\u038c' # 0x00ee -> GREEK CAPITAL LETTER OMICRON WITH TONOS
u'\u038e' # 0x00ef -> GREEK CAPITAL LETTER UPSILON WITH TONOS
u'\u038f' # 0x00f0 -> GREEK CAPITAL LETTER OMEGA WITH TONOS
u'\xb1' # 0x00f1 -> PLUS-MINUS SIGN
u'\u2265' # 0x00f2 -> GREATER-THAN OR EQUAL TO
u'\u2264' # 0x00f3 -> LESS-THAN OR EQUAL TO
u'\u03aa' # 0x00f4 -> GREEK CAPITAL LETTER IOTA WITH DIALYTIKA
u'\u03ab' # 0x00f5 -> GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA
u'\xf7' # 0x00f6 -> DIVISION SIGN
u'\u2248' # 0x00f7 -> ALMOST EQUAL TO
u'\xb0' # 0x00f8 -> DEGREE SIGN
u'\u2219' # 0x00f9 -> BULLET OPERATOR
u'\xb7' # 0x00fa -> MIDDLE DOT
u'\u221a' # 0x00fb -> SQUARE ROOT
u'\u207f' # 0x00fc -> SUPERSCRIPT LATIN SMALL LETTER N
u'\xb2' # 0x00fd -> SUPERSCRIPT TWO
u'\u25a0' # 0x00fe -> BLACK SQUARE
u'\xa0' # 0x00ff -> NO-BREAK SPACE
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # NULL
0x0001: 0x0001, # START OF HEADING
0x0002: 0x0002, # START OF TEXT
0x0003: 0x0003, # END OF TEXT
0x0004: 0x0004, # END OF TRANSMISSION
0x0005: 0x0005, # ENQUIRY
0x0006: 0x0006, # ACKNOWLEDGE
0x0007: 0x0007, # BELL
0x0008: 0x0008, # BACKSPACE
0x0009: 0x0009, # HORIZONTAL TABULATION
0x000a: 0x000a, # LINE FEED
0x000b: 0x000b, # VERTICAL TABULATION
0x000c: 0x000c, # FORM FEED
0x000d: 0x000d, # CARRIAGE RETURN
0x000e: 0x000e, # SHIFT OUT
0x000f: 0x000f, # SHIFT IN
0x0010: 0x0010, # DATA LINK ESCAPE
0x0011: 0x0011, # DEVICE CONTROL ONE
0x0012: 0x0012, # DEVICE CONTROL TWO
0x0013: 0x0013, # DEVICE CONTROL THREE
0x0014: 0x0014, # DEVICE CONTROL FOUR
0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
0x0016: 0x0016, # SYNCHRONOUS IDLE
0x0017: 0x0017, # END OF TRANSMISSION BLOCK
0x0018: 0x0018, # CANCEL
0x0019: 0x0019, # END OF MEDIUM
0x001a: 0x001a, # SUBSTITUTE
0x001b: 0x001b, # ESCAPE
0x001c: 0x001c, # FILE SEPARATOR
0x001d: 0x001d, # GROUP SEPARATOR
0x001e: 0x001e, # RECORD SEPARATOR
0x001f: 0x001f, # UNIT SEPARATOR
0x0020: 0x0020, # SPACE
0x0021: 0x0021, # EXCLAMATION MARK
0x0022: 0x0022, # QUOTATION MARK
0x0023: 0x0023, # NUMBER SIGN
0x0024: 0x0024, # DOLLAR SIGN
0x0025: 0x0025, # PERCENT SIGN
0x0026: 0x0026, # AMPERSAND
0x0027: 0x0027, # APOSTROPHE
0x0028: 0x0028, # LEFT PARENTHESIS
0x0029: 0x0029, # RIGHT PARENTHESIS
0x002a: 0x002a, # ASTERISK
0x002b: 0x002b, # PLUS SIGN
0x002c: 0x002c, # COMMA
0x002d: 0x002d, # HYPHEN-MINUS
0x002e: 0x002e, # FULL STOP
0x002f: 0x002f, # SOLIDUS
0x0030: 0x0030, # DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE
0x0032: 0x0032, # DIGIT TWO
0x0033: 0x0033, # DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE
0x003a: 0x003a, # COLON
0x003b: 0x003b, # SEMICOLON
0x003c: 0x003c, # LESS-THAN SIGN
0x003d: 0x003d, # EQUALS SIGN
0x003e: 0x003e, # GREATER-THAN SIGN
0x003f: 0x003f, # QUESTION MARK
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET
0x005c: 0x005c, # REVERSE SOLIDUS
0x005d: 0x005d, # RIGHT SQUARE BRACKET
0x005e: 0x005e, # CIRCUMFLEX ACCENT
0x005f: 0x005f, # LOW LINE
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET
0x007c: 0x007c, # VERTICAL LINE
0x007d: 0x007d, # RIGHT CURLY BRACKET
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # DELETE
0x00a0: 0x00ff, # NO-BREAK SPACE
0x00b0: 0x00f8, # DEGREE SIGN
0x00b1: 0x00f1, # PLUS-MINUS SIGN
0x00b2: 0x00fd, # SUPERSCRIPT TWO
0x00b7: 0x00fa, # MIDDLE DOT
0x00f7: 0x00f6, # DIVISION SIGN
0x0386: 0x00ea, # GREEK CAPITAL LETTER ALPHA WITH TONOS
0x0388: 0x00eb, # GREEK CAPITAL LETTER EPSILON WITH TONOS
0x0389: 0x00ec, # GREEK CAPITAL LETTER ETA WITH TONOS
0x038a: 0x00ed, # GREEK CAPITAL LETTER IOTA WITH TONOS
0x038c: 0x00ee, # GREEK CAPITAL LETTER OMICRON WITH TONOS
0x038e: 0x00ef, # GREEK CAPITAL LETTER UPSILON WITH TONOS
0x038f: 0x00f0, # GREEK CAPITAL LETTER OMEGA WITH TONOS
0x0391: 0x0080, # GREEK CAPITAL LETTER ALPHA
0x0392: 0x0081, # GREEK CAPITAL LETTER BETA
0x0393: 0x0082, # GREEK CAPITAL LETTER GAMMA
0x0394: 0x0083, # GREEK CAPITAL LETTER DELTA
0x0395: 0x0084, # GREEK CAPITAL LETTER EPSILON
0x0396: 0x0085, # GREEK CAPITAL LETTER ZETA
0x0397: 0x0086, # GREEK CAPITAL LETTER ETA
0x0398: 0x0087, # GREEK CAPITAL LETTER THETA
0x0399: 0x0088, # GREEK CAPITAL LETTER IOTA
0x039a: 0x0089, # GREEK CAPITAL LETTER KAPPA
0x039b: 0x008a, # GREEK CAPITAL LETTER LAMDA
0x039c: 0x008b, # GREEK CAPITAL LETTER MU
0x039d: 0x008c, # GREEK CAPITAL LETTER NU
0x039e: 0x008d, # GREEK CAPITAL LETTER XI
0x039f: 0x008e, # GREEK CAPITAL LETTER OMICRON
0x03a0: 0x008f, # GREEK CAPITAL LETTER PI
0x03a1: 0x0090, # GREEK CAPITAL LETTER RHO
0x03a3: 0x0091, # GREEK CAPITAL LETTER SIGMA
0x03a4: 0x0092, # GREEK CAPITAL LETTER TAU
0x03a5: 0x0093, # GREEK CAPITAL LETTER UPSILON
0x03a6: 0x0094, # GREEK CAPITAL LETTER PHI
0x03a7: 0x0095, # GREEK CAPITAL LETTER CHI
0x03a8: 0x0096, # GREEK CAPITAL LETTER PSI
0x03a9: 0x0097, # GREEK CAPITAL LETTER OMEGA
0x03aa: 0x00f4, # GREEK CAPITAL LETTER IOTA WITH DIALYTIKA
0x03ab: 0x00f5, # GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA
0x03ac: 0x00e1, # GREEK SMALL LETTER ALPHA WITH TONOS
0x03ad: 0x00e2, # GREEK SMALL LETTER EPSILON WITH TONOS
0x03ae: 0x00e3, # GREEK SMALL LETTER ETA WITH TONOS
0x03af: 0x00e5, # GREEK SMALL LETTER IOTA WITH TONOS
0x03b1: 0x0098, # GREEK SMALL LETTER ALPHA
0x03b2: 0x0099, # GREEK SMALL LETTER BETA
0x03b3: 0x009a, # GREEK SMALL LETTER GAMMA
0x03b4: 0x009b, # GREEK SMALL LETTER DELTA
0x03b5: 0x009c, # GREEK SMALL LETTER EPSILON
0x03b6: 0x009d, # GREEK SMALL LETTER ZETA
0x03b7: 0x009e, # GREEK SMALL LETTER ETA
0x03b8: 0x009f, # GREEK SMALL LETTER THETA
0x03b9: 0x00a0, # GREEK SMALL LETTER IOTA
0x03ba: 0x00a1, # GREEK SMALL LETTER KAPPA
0x03bb: 0x00a2, # GREEK SMALL LETTER LAMDA
0x03bc: 0x00a3, # GREEK SMALL LETTER MU
0x03bd: 0x00a4, # GREEK SMALL LETTER NU
0x03be: 0x00a5, # GREEK SMALL LETTER XI
0x03bf: 0x00a6, # GREEK SMALL LETTER OMICRON
0x03c0: 0x00a7, # GREEK SMALL LETTER PI
0x03c1: 0x00a8, # GREEK SMALL LETTER RHO
0x03c2: 0x00aa, # GREEK SMALL LETTER FINAL SIGMA
0x03c3: 0x00a9, # GREEK SMALL LETTER SIGMA
0x03c4: 0x00ab, # GREEK SMALL LETTER TAU
0x03c5: 0x00ac, # GREEK SMALL LETTER UPSILON
0x03c6: 0x00ad, # GREEK SMALL LETTER PHI
0x03c7: 0x00ae, # GREEK SMALL LETTER CHI
0x03c8: 0x00af, # GREEK SMALL LETTER PSI
0x03c9: 0x00e0, # GREEK SMALL LETTER OMEGA
0x03ca: 0x00e4, # GREEK SMALL LETTER IOTA WITH DIALYTIKA
0x03cb: 0x00e8, # GREEK SMALL LETTER UPSILON WITH DIALYTIKA
0x03cc: 0x00e6, # GREEK SMALL LETTER OMICRON WITH TONOS
0x03cd: 0x00e7, # GREEK SMALL LETTER UPSILON WITH TONOS
0x03ce: 0x00e9, # GREEK SMALL LETTER OMEGA WITH TONOS
0x207f: 0x00fc, # SUPERSCRIPT LATIN SMALL LETTER N
0x2219: 0x00f9, # BULLET OPERATOR
0x221a: 0x00fb, # SQUARE ROOT
0x2248: 0x00f7, # ALMOST EQUAL TO
0x2264: 0x00f3, # LESS-THAN OR EQUAL TO
0x2265: 0x00f2, # GREATER-THAN OR EQUAL TO
0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
0x2552: 0x00d5, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x2553: 0x00d6, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x2555: 0x00b8, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x2556: 0x00b7, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x2558: 0x00d4, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x2559: 0x00d3, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x255b: 0x00be, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x255c: 0x00bd, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
0x255e: 0x00c6, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x255f: 0x00c7, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x2561: 0x00b5, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x2562: 0x00b6, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x2564: 0x00d1, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x2565: 0x00d2, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x2567: 0x00cf, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x2568: 0x00d0, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x256a: 0x00d8, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x256b: 0x00d7, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x2580: 0x00df, # UPPER HALF BLOCK
0x2584: 0x00dc, # LOWER HALF BLOCK
0x2588: 0x00db, # FULL BLOCK
0x258c: 0x00dd, # LEFT HALF BLOCK
0x2590: 0x00de, # RIGHT HALF BLOCK
0x2591: 0x00b0, # LIGHT SHADE
0x2592: 0x00b1, # MEDIUM SHADE
0x2593: 0x00b2, # DARK SHADE
0x25a0: 0x00fe, # BLACK SQUARE
}
| mit |
Bushstar/UFO-Project | test/functional/wallet_listtransactions.py | 2 | 10588 | #!/usr/bin/env python3
# Copyright (c) 2014-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the listtransactions API."""
from decimal import Decimal
from io import BytesIO
from test_framework.messages import COIN, CTransaction
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_array_result,
assert_equal,
bytes_to_hex_str,
hex_str_to_bytes,
sync_mempools,
)
def tx_from_hex(hexstring):
tx = CTransaction()
f = BytesIO(hex_str_to_bytes(hexstring))
tx.deserialize(f)
return tx
class ListTransactionsTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.enable_mocktime()
def run_test(self):
# Simple send, 0 to 1:
txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1)
self.sync_all()
assert_array_result(self.nodes[0].listtransactions(),
{"txid": txid},
{"category": "send", "amount": Decimal("-0.1"), "confirmations": 0})
assert_array_result(self.nodes[1].listtransactions(),
{"txid": txid},
{"category": "receive", "amount": Decimal("0.1"), "confirmations": 0})
# mine a block, confirmations should change:
self.nodes[0].generate(1)
self.sync_all()
assert_array_result(self.nodes[0].listtransactions(),
{"txid": txid},
{"category": "send", "amount": Decimal("-0.1"), "confirmations": 1})
assert_array_result(self.nodes[1].listtransactions(),
{"txid": txid},
{"category": "receive", "amount": Decimal("0.1"), "confirmations": 1})
# send-to-self:
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 0.2)
assert_array_result(self.nodes[0].listtransactions(),
{"txid": txid, "category": "send"},
{"amount": Decimal("-0.2")})
assert_array_result(self.nodes[0].listtransactions(),
{"txid": txid, "category": "receive"},
{"amount": Decimal("0.2")})
# sendmany from node1: twice to self, twice to node2:
send_to = {self.nodes[0].getnewaddress(): 0.11,
self.nodes[1].getnewaddress(): 0.22,
self.nodes[0].getnewaddress(): 0.33,
self.nodes[1].getnewaddress(): 0.44}
txid = self.nodes[1].sendmany("", send_to)
self.sync_all()
assert_array_result(self.nodes[1].listtransactions(),
{"category": "send", "amount": Decimal("-0.11")},
{"txid": txid})
assert_array_result(self.nodes[0].listtransactions(),
{"category": "receive", "amount": Decimal("0.11")},
{"txid": txid})
assert_array_result(self.nodes[1].listtransactions(),
{"category": "send", "amount": Decimal("-0.22")},
{"txid": txid})
assert_array_result(self.nodes[1].listtransactions(),
{"category": "receive", "amount": Decimal("0.22")},
{"txid": txid})
assert_array_result(self.nodes[1].listtransactions(),
{"category": "send", "amount": Decimal("-0.33")},
{"txid": txid})
assert_array_result(self.nodes[0].listtransactions(),
{"category": "receive", "amount": Decimal("0.33")},
{"txid": txid})
assert_array_result(self.nodes[1].listtransactions(),
{"category": "send", "amount": Decimal("-0.44")},
{"txid": txid})
assert_array_result(self.nodes[1].listtransactions(),
{"category": "receive", "amount": Decimal("0.44")},
{"txid": txid})
pubkey = self.nodes[1].getaddressinfo(self.nodes[1].getnewaddress())['pubkey']
multisig = self.nodes[1].createmultisig(1, [pubkey])
self.nodes[0].importaddress(multisig["redeemScript"], "watchonly", False, True)
txid = self.nodes[1].sendtoaddress(multisig["address"], 0.1)
self.nodes[1].generate(1)
self.sync_all()
assert not [tx for tx in self.nodes[0].listtransactions(dummy="*", count=100, skip=0, include_watchonly=False) if "label" in tx and tx["label"] == "watchonly"]
txs = [tx for tx in self.nodes[0].listtransactions(dummy="*", count=100, skip=0, include_watchonly=True) if "label" in tx and tx['label'] == 'watchonly']
assert_array_result(txs, {"category": "receive", "amount": Decimal("0.1")}, {"txid": txid})
self.run_rbf_opt_in_test()
# Check that the opt-in-rbf flag works properly, for sent and received
# transactions.
def run_rbf_opt_in_test(self):
# Check whether a transaction signals opt-in RBF itself
def is_opt_in(node, txid):
rawtx = node.getrawtransaction(txid, 1)
for x in rawtx["vin"]:
if x["sequence"] < 0xfffffffe:
return True
return False
# Find an unconfirmed output matching a certain txid
def get_unconfirmed_utxo_entry(node, txid_to_match):
utxo = node.listunspent(0, 0)
for i in utxo:
if i["txid"] == txid_to_match:
return i
return None
# 1. Chain a few transactions that don't opt-in.
txid_1 = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1)
assert(not is_opt_in(self.nodes[0], txid_1))
assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_1}, {"bip125-replaceable": "no"})
sync_mempools(self.nodes)
assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_1}, {"bip125-replaceable": "no"})
# Tx2 will build off txid_1, still not opting in to RBF.
utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[0], txid_1)
assert_equal(utxo_to_use["safe"], True)
utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[1], txid_1)
utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[1], txid_1)
assert_equal(utxo_to_use["safe"], False)
# Create tx2 using createrawtransaction
inputs = [{"txid": utxo_to_use["txid"], "vout": utxo_to_use["vout"]}]
outputs = {self.nodes[0].getnewaddress(): 0.999}
tx2 = self.nodes[1].createrawtransaction(inputs, outputs)
tx2_signed = self.nodes[1].signrawtransactionwithwallet(tx2)["hex"]
txid_2 = self.nodes[1].sendrawtransaction(tx2_signed)
# ...and check the result
assert(not is_opt_in(self.nodes[1], txid_2))
assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_2}, {"bip125-replaceable": "no"})
sync_mempools(self.nodes)
assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_2}, {"bip125-replaceable": "no"})
# Tx3 will opt-in to RBF
utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[0], txid_2)
inputs = [{"txid": txid_2, "vout": utxo_to_use["vout"]}]
outputs = {self.nodes[1].getnewaddress(): 0.998}
tx3 = self.nodes[0].createrawtransaction(inputs, outputs)
tx3_modified = tx_from_hex(tx3)
tx3_modified.vin[0].nSequence = 0
tx3 = bytes_to_hex_str(tx3_modified.serialize())
tx3_signed = self.nodes[0].signrawtransactionwithwallet(tx3)['hex']
txid_3 = self.nodes[0].sendrawtransaction(tx3_signed)
assert(is_opt_in(self.nodes[0], txid_3))
assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_3}, {"bip125-replaceable": "yes"})
sync_mempools(self.nodes)
assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_3}, {"bip125-replaceable": "yes"})
# Tx4 will chain off tx3. Doesn't signal itself, but depends on one
# that does.
utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[1], txid_3)
inputs = [{"txid": txid_3, "vout": utxo_to_use["vout"]}]
outputs = {self.nodes[0].getnewaddress(): 0.997}
tx4 = self.nodes[1].createrawtransaction(inputs, outputs)
tx4_signed = self.nodes[1].signrawtransactionwithwallet(tx4)["hex"]
txid_4 = self.nodes[1].sendrawtransaction(tx4_signed)
assert(not is_opt_in(self.nodes[1], txid_4))
assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_4}, {"bip125-replaceable": "yes"})
sync_mempools(self.nodes)
assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_4}, {"bip125-replaceable": "yes"})
# Replace tx3, and check that tx4 becomes unknown
tx3_b = tx3_modified
tx3_b.vout[0].nValue -= int(Decimal("0.004") * COIN) # bump the fee
tx3_b = bytes_to_hex_str(tx3_b.serialize())
tx3_b_signed = self.nodes[0].signrawtransactionwithwallet(tx3_b)['hex']
txid_3b = self.nodes[0].sendrawtransaction(tx3_b_signed, True)
assert(is_opt_in(self.nodes[0], txid_3b))
assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_4}, {"bip125-replaceable": "unknown"})
sync_mempools(self.nodes)
assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_4}, {"bip125-replaceable": "unknown"})
# Check gettransaction as well:
for n in self.nodes[0:2]:
assert_equal(n.gettransaction(txid_1)["bip125-replaceable"], "no")
assert_equal(n.gettransaction(txid_2)["bip125-replaceable"], "no")
assert_equal(n.gettransaction(txid_3)["bip125-replaceable"], "yes")
assert_equal(n.gettransaction(txid_3b)["bip125-replaceable"], "yes")
assert_equal(n.gettransaction(txid_4)["bip125-replaceable"], "unknown")
# After mining a transaction, it's no longer BIP125-replaceable
self.nodes[0].generate(1)
assert(txid_3b not in self.nodes[0].getrawmempool())
assert_equal(self.nodes[0].gettransaction(txid_3b)["bip125-replaceable"], "no")
assert_equal(self.nodes[0].gettransaction(txid_4)["bip125-replaceable"], "unknown")
if __name__ == '__main__':
ListTransactionsTest().main()
| mit |
woylaski/notebook | graphic/kivy-master/kivy/config.py | 2 | 29648 | '''
Configuration object
====================
The :class:`Config` object is an instance of a modified Python ConfigParser.
See the `ConfigParser documentation
<http://docs.python.org/library/configparser.html>`_ for more information.
Kivy has a configuration file which determines the default settings. In
order to change these settings, you can alter this file manually or use
the Config object. Please see the :ref:`Configure Kivy` section for more
information.
Note: To avoid instances where the config settings do not work or they are
not applied before window creation (like setting an initial window size),
Config.set should be used before importing any modules that affect the
application window (ie. importing Window). Ideally, these settings should
be declared right at the start of your main.py script.
Usage of the Config object
--------------------------
To read a configuration token from a particular section::
>>> from kivy.config import Config
>>> Config.getint('kivy', 'show_fps')
0
Change the configuration and save it::
>>> Config.set('postproc', 'retain_time', '50')
>>> Config.write()
.. versionchanged:: 1.7.1
The ConfigParser should work correctly with utf-8 now. The values are
converted from ascii to unicode only when needed. The method get() returns
utf-8 strings.
.. _configuration-tokens:
Available configuration tokens
------------------------------
.. |log_levels| replace:: 'debug', 'info', 'warning', 'error' or 'critical'
:kivy:
`desktop`: int, 0 or 1
This option controls desktop OS specific features, such as enabling
drag-able scroll-bar in scroll views, disabling of bubbles in
TextInput etc. 0 is disabled, 1 is enabled.
`exit_on_escape`: int, 0 or 1
Enables exiting kivy when escape is pressed.
0 is disabled, 1 is enabled.
`pause_on_minimize`: int, 0 or 1
If set to `1`, the main loop is paused and the `on_pause` event
is dispatched when the window is minimized. This option is intended
for desktop use only. Defaults to `0`.
`keyboard_layout`: string
Identifier of the layout to use.
`keyboard_mode`: string
Specifies the keyboard mode to use. If can be one of the following:
* '' - Let Kivy choose the best option for your current platform.
* 'system' - real keyboard.
* 'dock' - one virtual keyboard docked to a screen side.
* 'multi' - one virtual keyboard for every widget request.
* 'systemanddock' - virtual docked keyboard plus input from real
keyboard.
* 'systemandmulti' - analogous.
`log_dir`: string
Path of log directory.
`log_enable`: int, 0 or 1
Activate file logging. 0 is disabled, 1 is enabled.
`log_level`: string, one of |log_levels|
Set the minimum log level to use.
`log_name`: string
Format string to use for the filename of log file.
`window_icon`: string
Path of the window icon. Use this if you want to replace the default
pygame icon.
:postproc:
`double_tap_distance`: float
Maximum distance allowed for a double tap, normalized inside the range
0 - 1000.
`double_tap_time`: int
Time allowed for the detection of double tap, in milliseconds.
`ignore`: list of tuples
List of regions where new touches are ignored.
This configuration token can be used to resolve hotspot problems
with DIY hardware. The format of the list must be::
ignore = [(xmin, ymin, xmax, ymax), ...]
All the values must be inside the range 0 - 1.
`jitter_distance`: int
Maximum distance for jitter detection, normalized inside the range 0
- 1000.
`jitter_ignore_devices`: string, separated with commas
List of devices to ignore from jitter detection.
`retain_distance`: int
If the touch moves more than is indicated by retain_distance, it will
not be retained. Argument should be an int between 0 and 1000.
`retain_time`: int
Time allowed for a retain touch, in milliseconds.
`triple_tap_distance`: float
Maximum distance allowed for a triple tap, normalized inside the range
0 - 1000.
`triple_tap_time`: int
Time allowed for the detection of triple tap, in milliseconds.
:graphics:
`borderless`: int , one of 0 or 1
If set to `1`, removes the window border/decoration.
`fbo`: string, one of 'hardware', 'software' or 'force-hardware'
Selects the FBO backend to use.
`fullscreen`: int or string, one of 0, 1, 'fake' or 'auto'
Activate fullscreen. If set to `1`, a resolution of `width`
times `height` pixels will be used.
If set to `auto`, your current display's resolution will be
used instead. This is most likely what you want.
If you want to place the window in another display,
use `fake`, or set the `borderless` option from the graphics section,
then adjust `width`, `height`, `top` and `left`.
`height`: int
Height of the :class:`~kivy.core.window.Window`, not used if
`fullscreen` is set to `auto`.
`left`: int
Left position of the :class:`~kivy.core.window.Window`.
`maxfps`: int, defaults to 60
Maximum FPS allowed.
'multisamples': int, defaults to 2
Sets the `MultiSample Anti-Aliasing (MSAA)
<http://en.wikipedia.org/wiki/Multisample_anti-aliasing>`_ level.
Increasing this value results in smoother graphics but at the cost of
processing time.
.. note::
This feature is limited by device hardware support and will have no
effect on devices which do not support the level of MSAA requested.
`position`: string, one of 'auto' or 'custom'
Position of the window on your display. If `auto` is used, you have no
control of the initial position: `top` and `left` are ignored.
`show_cursor`: int, one of 0 or 1
Show the cursor on the screen.
`top`: int
Top position of the :class:`~kivy.core.window.Window`.
`resizable`: int, one of 0 or 1
If 0, the window will have a fixed size. If 1, the window will be
resizable.
`rotation`: int, one of 0, 90, 180 or 270
Rotation of the :class:`~kivy.core.window.Window`.
`width`: int
Width of the :class:`~kivy.core.window.Window`, not used if
`fullscreen` is set to `auto`.
:input:
You can create new input devices using this syntax::
# example of input provider instance
yourid = providerid,parameters
# example for tuio provider
default = tuio,127.0.0.1:3333
mytable = tuio,192.168.0.1:3334
.. seealso::
Check the providers in kivy.input.providers for the syntax to use
inside the configuration file.
:widgets:
`scroll_distance`: int
Default value of the
:attr:`~kivy.uix.scrollview.ScrollView.scroll_distance`
property used by the :class:`~kivy.uix.scrollview.ScrollView` widget.
Check the widget documentation for more information.
`scroll_friction`: float
Default value of the
:attr:`~kivy.uix.scrollview.ScrollView.scroll_friction`
property used by the :class:`~kivy.uix.scrollview.ScrollView` widget.
Check the widget documentation for more information.
`scroll_timeout`: int
Default value of the
:attr:`~kivy.uix.scrollview.ScrollView.scroll_timeout`
property used by the :class:`~kivy.uix.scrollview.ScrollView` widget.
Check the widget documentation for more information.
`scroll_stoptime`: int
Default value of the
:attr:`~kivy.uix.scrollview.ScrollView.scroll_stoptime`
property used by the :class:`~kivy.uix.scrollview.ScrollView` widget.
Check the widget documentation for more information.
.. deprecated:: 1.7.0
Please use
:class:`~kivy.uix.scrollview.ScrollView.effect_cls` instead.
`scroll_moves`: int
Default value of the
:attr:`~kivy.uix.scrollview.ScrollView.scroll_moves`
property used by the :class:`~kivy.uix.scrollview.ScrollView` widget.
Check the widget documentation for more information.
.. deprecated:: 1.7.0
Please use
:class:`~kivy.uix.scrollview.ScrollView.effect_cls` instead.
:modules:
You can activate modules with this syntax::
modulename =
Anything after the = will be passed to the module as arguments.
Check the specific module's documentation for a list of accepted
arguments.
.. note::
These options control only the initalization of the app and a restart
is required for value changes to take effect.
.. versionchanged:: 1.9.0
`borderless` has been added to the graphics section.
The `fake` option of `fullscreen` in the graphics section has been
deprecated, use the `borderless` option instead.
`pause_on_minimize` has been added to the kivy section.
.. versionchanged:: 1.8.0
`systemanddock` and `systemandmulti` has been added as possible values for
`keyboard_mode` in the kivy section. `exit_on_escape` has been added
to the kivy section.
.. versionchanged:: 1.2.0
`resizable` has been added to graphics section.
.. versionchanged:: 1.1.0
tuio no longer listens by default. Window icons are not copied to
user directory anymore. You can still set a new window icon by using the
``window_icon`` config setting.
.. versionchanged:: 1.0.8
`scroll_timeout`, `scroll_distance` and `scroll_friction` have been added.
`list_friction`, `list_trigger_distance` and `list_friction_bound`
have been removed. `keyboard_type` and `keyboard_layout` have been
removed from the widget. `keyboard_mode` and `keyboard_layout` have
been added to the kivy section.
'''
__all__ = ('Config', 'ConfigParser')
try:
from ConfigParser import ConfigParser as PythonConfigParser
except ImportError:
from configparser import RawConfigParser as PythonConfigParser
from os import environ
from os.path import exists
from kivy import kivy_config_fn
from kivy.logger import Logger, logger_config_update
from collections import OrderedDict
from kivy.utils import platform
from kivy.compat import PY2, string_types
from weakref import ref
_is_rpi = exists('/opt/vc/include/bcm_host.h')
# Version number of current configuration format
KIVY_CONFIG_VERSION = 12
Config = None
'''Kivy configuration object. Its :attr:`~kivy.config.ConfigParser.name` is
`'kivy'`
'''
class ConfigParser(PythonConfigParser, object):
'''Enhanced ConfigParser class that supports the addition of default
sections and default values.
By default, the kivy ConfigParser instance, :attr:`~kivy.config.Config`,
is given the name `'kivy'` and the ConfigParser instance used by App,
:meth:`~kivy.app.App.build_settings`, is given the name `'app'`.
:Parameters:
`name`: string
The name of the instance. See :attr:`name`. Defaults to `''`.
..versionchanged:: 1.9.0
Each ConfigParser can now be named, :attr:`name`. You can get the
ConfigParser associated with a name using :meth:`get_configparser`.
In addition, you can now control the config values with
:class:`~kivy.properties.ConfigParserProperty`.
.. versionadded:: 1.0.7
'''
def __init__(self, name=''):
PythonConfigParser.__init__(self)
self._sections = OrderedDict()
self.filename = None
self._callbacks = []
self.name = name
def add_callback(self, callback, section=None, key=None):
'''Add a callback to be called when a specific section/key changed. If
you don't specify a section or a key, it will call the callback
for all section/keys changes.
Callbacks will receive 3 arguments: the section, key and value.
.. versionadded:: 1.4.1
'''
if section is None and key is not None:
raise Exception('You cannot specify a key without a section')
self._callbacks.append((callback, section, key))
def remove_callback(self, callback, section=None, key=None):
'''Removes a callback added with :meth:`add_callback`.
:meth:`remove_callback` must be called with the same parameters as
:meth:`add_callback`.
Raises a `ValueError` if not found.
.. versionadded:: 1.9.0
'''
self._callbacks.remove((callback, section, key))
def _do_callbacks(self, section, key, value):
for callback, csection, ckey in self._callbacks:
if csection is not None and csection != section:
continue
elif ckey is not None and ckey != key:
continue
callback(section, key, value)
def read(self, filename):
'''Read only one filename. In contrast to the original ConfigParser of
Python, this one is able to read only one file at a time. The last
read file will be used for the :meth:`write` method.
.. versionchanged:: 1.9.0
:meth:`read` now calls the callbacks if read changed any values.
'''
if not isinstance(filename, string_types):
raise Exception('Only one filename is accepted ({})'.format(
string_types.__name__))
self.filename = filename
# If we try to open directly the configuration file in utf-8,
# we correctly get the unicode value by default.
# But, when we try to save it again, all the values we didn't changed
# are still unicode, and then the PythonConfigParser internal do
# a str() conversion -> fail.
# Instead we currently to the conversion to utf-8 when value are
# "get()", but we internally store them in ascii.
#with codecs.open(filename, 'r', encoding='utf-8') as f:
# self.readfp(f)
old_vals = {sect: {k: v for k, v in self.items(sect)} for sect in
self.sections()}
PythonConfigParser.read(self, filename)
# when reading new file, sections/keys are only increased, not removed
f = self._do_callbacks
for section in self.sections():
if section not in old_vals: # new section
for k, v in self.items(section):
f(section, k, v)
continue
old_keys = old_vals[section]
for k, v in self.items(section): # just update new/changed keys
if k not in old_keys or v != old_keys[k]:
f(section, k, v)
def set(self, section, option, value):
'''Functions similarly to PythonConfigParser's set method, except that
the value is implicitly converted to a string.
'''
e_value = value
if not isinstance(value, string_types):
# might be boolean, int, etc.
e_value = str(value)
if PY2:
if isinstance(value, unicode):
e_value = value.encode('utf-8')
ret = PythonConfigParser.set(self, section, option, e_value)
self._do_callbacks(section, option, value)
return ret
def setall(self, section, keyvalues):
'''Set a lot of keys/values in one section at the same time.
'''
for key, value in keyvalues.items():
self.set(section, key, value)
def get(self, section, option, **kwargs):
value = PythonConfigParser.get(self, section, option, **kwargs)
if PY2:
if type(value) is str:
return value.decode('utf-8')
return value
def setdefaults(self, section, keyvalues):
'''Set a lot of keys/value defaults in one section at the same time.
'''
self.adddefaultsection(section)
for key, value in keyvalues.items():
self.setdefault(section, key, value)
def setdefault(self, section, option, value):
'''Set the default value of a particular option.
'''
if self.has_option(section, option):
return
self.set(section, option, value)
def getdefault(self, section, option, defaultvalue):
'''Get an option. If not found, it will return the default value.
'''
if not self.has_section(section):
return defaultvalue
if not self.has_option(section, option):
return defaultvalue
return self.get(section, option)
def getdefaultint(self, section, option, defaultvalue):
'''Get an option. If not found, it will return the default value.
The return value will be always converted as an integer.
.. versionadded:: 1.6.0
'''
return int(self.getdefault(section, option, defaultvalue))
def adddefaultsection(self, section):
'''Add a section if the section is missing.
'''
if self.has_section(section):
return
self.add_section(section)
def write(self):
'''Write the configuration to the last file opened using the
:meth:`read` method.
Return True if the write finished successfully.
'''
if self.filename is None:
return False
try:
with open(self.filename, 'w') as fd:
PythonConfigParser.write(self, fd)
except IOError:
Logger.exception('Unable to write the config <%s>' % self.filename)
return False
return True
def update_config(self, filename, overwrite=False):
'''Upgrade the configuration based on a new default config file.
Overwrite any existing values if overwrite is True.
'''
pcp = PythonConfigParser()
pcp.read(filename)
confset = self.setall if overwrite else self.setdefaults
for section in pcp.sections():
confset(section, dict(pcp.items(section)))
self.write()
@staticmethod
def _register_named_property(name, widget_ref, *largs):
''' Called by the ConfigParserProperty to register a property which
was created with a config name instead of a config object.
When a ConfigParser with this name is later created, the properties
are then notified that this parser now exists so they can use it.
If the parser already exists, the property is notified here. See
:meth:`~kivy.properties.ConfigParserProperty.set_config`.
:Parameters:
`name`: a non-empty string
The name of the ConfigParser that is associated with the
property. See :attr:`name`.
`widget_ref`: 2-tuple.
The first element is a reference to the widget containing the
property, the second element is the name of the property. E.g.:
class House(Widget):
address = ConfigParserProperty('', 'info', 'street',
'directory')
Then, the first element is a ref to a House instance, and the
second is `'address'`.
'''
configs = ConfigParser._named_configs
try:
config, props = configs[name]
except KeyError:
configs[name] = (None, [widget_ref])
return
props.append(widget_ref)
if config:
config = config()
widget = widget_ref[0]()
if config and widget: # associate this config with property
widget.property(widget_ref[1]).set_config(config)
@staticmethod
def get_configparser(name):
'''Returns the :class:`ConfigParser` instance whose name is `name`, or
None if not found.
:Parameters:
`name`: string
The name of the :class:`ConfigParser` instance to return.
'''
try:
config = ConfigParser._named_configs[name][0]
return config() if config else None
except KeyError:
return None
# keys are configparser names, values are 2-tuple of (ref(configparser),
# widget_ref), where widget_ref is same as in _register_named_property
_named_configs = {}
_name = ''
@property
def name(self):
''' The name associated with this ConfigParser instance, if not `''`.
Defaults to `''`. It can be safely dynamically changed or set to `''`.
When a ConfigParser is given a name, that config object can be
retrieved using :meth:`get_configparser`. In addition, that config
instance can also be used with a
:class:`~kivy.properties.ConfigParserProperty` instance that set its
`config` value to this name.
Setting more than one ConfigParser with the same name will raise a
`ValueError`.
'''
return self._name
@name.setter
def name(self, value):
old_name = self._name
if value is old_name:
return
self._name = value
configs = ConfigParser._named_configs
if old_name: # disconnect this parser from previously connected props
_, props = configs.get(old_name, (None, []))
for widget, prop in props:
widget = widget()
if widget:
widget.property(prop).set_config(None)
configs[old_name] = (None, props)
if not value:
return
# if given new name, connect it with property that used this name
try:
config, props = configs[value]
except KeyError:
configs[value] = (ref(self), [])
return
if config is not None:
raise ValueError('A parser named {} already exists'.format(value))
for widget, prop in props:
widget = widget()
if widget:
widget.property(prop).set_config(self)
configs[value] = (ref(self), props)
if not environ.get('KIVY_DOC_INCLUDE'):
#
# Read, analyse configuration file
# Support upgrade of older config file versions
#
# Create default configuration
Config = ConfigParser(name='kivy')
Config.add_callback(logger_config_update, 'kivy', 'log_level')
# Read config file if exist
if (exists(kivy_config_fn) and
'KIVY_USE_DEFAULTCONFIG' not in environ and
'KIVY_NO_CONFIG' not in environ):
try:
Config.read(kivy_config_fn)
except Exception as e:
Logger.exception('Core: error while reading local'
'configuration')
version = Config.getdefaultint('kivy', 'config_version', 0)
# Add defaults section
Config.adddefaultsection('kivy')
Config.adddefaultsection('graphics')
Config.adddefaultsection('input')
Config.adddefaultsection('postproc')
Config.adddefaultsection('widgets')
Config.adddefaultsection('modules')
# Upgrade default configuration until we have the current version
need_save = False
if version != KIVY_CONFIG_VERSION and 'KIVY_NO_CONFIG' not in environ:
Logger.warning('Config: Older configuration version detected'
' ({0} instead of {1})'.format(
version, KIVY_CONFIG_VERSION))
Logger.warning('Config: Upgrading configuration in progress.')
need_save = True
while version < KIVY_CONFIG_VERSION:
Logger.debug('Config: Upgrading from %d to %d' %
(version, version + 1))
if version == 0:
# log level
Config.setdefault('kivy', 'keyboard_repeat_delay', '300')
Config.setdefault('kivy', 'keyboard_repeat_rate', '30')
Config.setdefault('kivy', 'log_dir', 'logs')
Config.setdefault('kivy', 'log_enable', '1')
Config.setdefault('kivy', 'log_level', 'info')
Config.setdefault('kivy', 'log_name', 'kivy_%y-%m-%d_%_.txt')
Config.setdefault('kivy', 'window_icon', '')
# default graphics parameters
Config.setdefault('graphics', 'display', '-1')
Config.setdefault('graphics', 'fullscreen', 'no')
Config.setdefault('graphics', 'height', '600')
Config.setdefault('graphics', 'left', '0')
Config.setdefault('graphics', 'maxfps', '0')
Config.setdefault('graphics', 'multisamples', '2')
Config.setdefault('graphics', 'position', 'auto')
Config.setdefault('graphics', 'rotation', '0')
Config.setdefault('graphics', 'show_cursor', '1')
Config.setdefault('graphics', 'top', '0')
Config.setdefault('graphics', 'vsync', '1')
Config.setdefault('graphics', 'width', '800')
# input configuration
Config.setdefault('input', 'mouse', 'mouse')
# activate native input provider in configuration
# from 1.0.9, don't activate mactouch by default, or app are
# unusable.
if platform == 'win':
Config.setdefault('input', 'wm_touch', 'wm_touch')
Config.setdefault('input', 'wm_pen', 'wm_pen')
elif platform == 'linux':
probesysfs = 'probesysfs'
if _is_rpi:
probesysfs += ',provider=hidinput'
Config.setdefault('input', '%(name)s', probesysfs)
# input postprocessing configuration
Config.setdefault('postproc', 'double_tap_distance', '20')
Config.setdefault('postproc', 'double_tap_time', '250')
Config.setdefault('postproc', 'ignore', '[]')
Config.setdefault('postproc', 'jitter_distance', '0')
Config.setdefault('postproc', 'jitter_ignore_devices',
'mouse,mactouch,')
Config.setdefault('postproc', 'retain_distance', '50')
Config.setdefault('postproc', 'retain_time', '0')
# default configuration for keyboard repeatition
Config.setdefault('widgets', 'keyboard_layout', 'qwerty')
Config.setdefault('widgets', 'keyboard_type', '')
Config.setdefault('widgets', 'list_friction', '10')
Config.setdefault('widgets', 'list_friction_bound', '20')
Config.setdefault('widgets', 'list_trigger_distance', '5')
elif version == 1:
Config.remove_option('graphics', 'vsync')
Config.set('graphics', 'maxfps', '60')
elif version == 2:
# was a version to automatically copy windows icon in the user
# directory, but it's now not used anymore. User can still change
# the window icon by touching the config.
pass
elif version == 3:
# add token for scrollview
Config.setdefault('widgets', 'scroll_timeout', '55')
Config.setdefault('widgets', 'scroll_distance', '20')
Config.setdefault('widgets', 'scroll_friction', '1.')
# remove old list_* token
Config.remove_option('widgets', 'list_friction')
Config.remove_option('widgets', 'list_friction_bound')
Config.remove_option('widgets', 'list_trigger_distance')
elif version == 4:
Config.remove_option('widgets', 'keyboard_type')
Config.remove_option('widgets', 'keyboard_layout')
# add keyboard token
Config.setdefault('kivy', 'keyboard_mode', '')
Config.setdefault('kivy', 'keyboard_layout', 'qwerty')
elif version == 5:
Config.setdefault('graphics', 'resizable', '1')
elif version == 6:
# if the timeout is still the default value, change it
Config.setdefault('widgets', 'scroll_stoptime', '300')
Config.setdefault('widgets', 'scroll_moves', '5')
elif version == 7:
# desktop bool indicating whether to use desktop specific features
is_desktop = int(platform in ('win', 'macosx', 'linux'))
Config.setdefault('kivy', 'desktop', is_desktop)
Config.setdefault('postproc', 'triple_tap_distance', '20')
Config.setdefault('postproc', 'triple_tap_time', '375')
elif version == 8:
if Config.getint('widgets', 'scroll_timeout') == 55:
Config.set('widgets', 'scroll_timeout', '250')
elif version == 9:
Config.setdefault('kivy', 'exit_on_escape', '1')
elif version == 10:
Config.set('graphics', 'fullscreen', '0')
Config.setdefault('graphics', 'borderless', '0')
elif version == 11:
Config.setdefault('kivy', 'pause_on_minimize', '0')
#elif version == 1:
# # add here the command for upgrading from configuration 0 to 1
#
else:
# for future.
break
# Pass to the next version
version += 1
# Indicate to the Config that we've upgrade to the latest version.
Config.set('kivy', 'config_version', KIVY_CONFIG_VERSION)
# Now, activate log file
Logger.logfile_activated = bool(Config.getint('kivy', 'log_enable'))
# If no configuration exist, write the default one.
if ((not exists(kivy_config_fn) or need_save) and
'KIVY_NO_CONFIG' not in environ):
try:
Config.filename = kivy_config_fn
Config.write()
except Exception as e:
Logger.exception('Core: Error while saving default config file')
| gpl-3.0 |
ItsCalebJones/SpaceLaunchNow-Server | bot/migrations/0002_auto_20181106_0056.py | 1 | 2437 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2018-11-06 05:56
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('bot', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='newsitem',
name='featured_image',
field=models.CharField(blank=True, default='', max_length=1048, null=True),
),
migrations.AlterField(
model_name='newsitem',
name='link',
field=models.CharField(blank=True, default='', max_length=1048, null=True),
),
migrations.AlterField(
model_name='newsitem',
name='news_site',
field=models.CharField(blank=True, default='', max_length=1048, null=True),
),
migrations.AlterField(
model_name='newsitem',
name='title',
field=models.CharField(max_length=1048),
),
migrations.AlterField(
model_name='redditsubmission',
name='link',
field=models.CharField(blank=True, default='', max_length=1048, null=True),
),
migrations.AlterField(
model_name='redditsubmission',
name='permalink',
field=models.CharField(max_length=1048),
),
migrations.AlterField(
model_name='redditsubmission',
name='thumbnail',
field=models.CharField(blank=True, default='', max_length=1048, null=True),
),
migrations.AlterField(
model_name='redditsubmission',
name='title',
field=models.CharField(blank=True, default='', max_length=1048, null=True),
),
migrations.AlterField(
model_name='subreddit',
name='name',
field=models.CharField(max_length=1048),
),
migrations.AlterField(
model_name='twitteruser',
name='name',
field=models.CharField(max_length=255),
),
migrations.AlterField(
model_name='twitteruser',
name='profile_image',
field=models.CharField(max_length=1048),
),
migrations.AlterField(
model_name='twitteruser',
name='screen_name',
field=models.CharField(max_length=255),
),
]
| apache-2.0 |
Learningtribes/edx-platform | lms/djangoapps/course_wiki/utils.py | 204 | 3623 | """
Utility functions for course_wiki.
"""
from django.core.exceptions import ObjectDoesNotExist
from xmodule import modulestore
import courseware
def user_is_article_course_staff(user, article):
"""
The root of a course wiki is /<course_number>. This means in case there
are two courses which have the same course_number they will end up with
the same course wiki root e.g. MITx/Phy101/Spring and HarvardX/Phy101/Fall
will share /Phy101.
This looks at the course wiki root of the article and returns True if
the user belongs to a group whose name starts with 'instructor_' or
'staff_' and contains '/<course_wiki_root_slug>/'. So if the user is
staff on course MITx/Phy101/Spring they will be in
'instructor_MITx/Phy101/Spring' or 'staff_MITx/Phy101/Spring' groups and
so this will return True.
"""
wiki_slug = article_course_wiki_root_slug(article)
if wiki_slug is None:
return False
modstore = modulestore.django.modulestore()
return _has_wiki_staff_access(user, wiki_slug, modstore)
def _has_wiki_staff_access(user, wiki_slug, modstore):
"""Returns whether the user has staff access to the wiki represented by wiki_slug"""
course_keys = modstore.get_courses_for_wiki(wiki_slug)
# The wiki expects article slugs to contain at least one non-digit so if
# the course number is just a number the course wiki root slug is set to
# be '<course_number>_'. This means slug '202_' can belong to either
# course numbered '202_' or '202' and so we need to consider both.
if wiki_slug.endswith('_') and slug_is_numerical(wiki_slug[:-1]):
course_keys.extend(modstore.get_courses_for_wiki(wiki_slug[:-1]))
for course_key in course_keys:
course = modstore.get_course(course_key)
if courseware.access.has_access(user, 'staff', course, course_key):
return True
return False
def slug_is_numerical(slug):
"""Returns whether the slug can be interpreted as a number."""
try:
float(slug)
except ValueError:
return False
return True
def course_wiki_slug(course):
"""Returns the slug for the course wiki root."""
slug = course.wiki_slug
# Django-wiki expects article slug to be non-numerical. In case the
# course number is numerical append an underscore.
if slug_is_numerical(slug):
slug = slug + "_"
return slug
def article_course_wiki_root_slug(article):
"""
We assume the second level ancestor is the course wiki root. Examples:
/ returns None
/Phy101 returns 'Phy101'
/Phy101/Mechanics returns 'Phy101'
/Chem101/Metals/Iron returns 'Chem101'
Note that someone can create an article /random-article/sub-article on the
wiki. In this case this function will return 'some-random-article' even
if no course with course number 'some-random-article' exists.
"""
try:
urlpath = article.urlpath_set.get()
except ObjectDoesNotExist:
return None
# Ancestors of /Phy101/Mechanics/Acceleration/ is a list of URLPaths
# ['Root', 'Phy101', 'Mechanics']
ancestors = urlpath.cached_ancestors
course_wiki_root_urlpath = None
if len(ancestors) == 0: # It is the wiki root article.
course_wiki_root_urlpath = None
elif len(ancestors) == 1: # It is a course wiki root article.
course_wiki_root_urlpath = urlpath
else: # It is an article inside a course wiki.
course_wiki_root_urlpath = ancestors[1]
if course_wiki_root_urlpath is not None:
return course_wiki_root_urlpath.slug
return None
| agpl-3.0 |
w1ll1am23/home-assistant | homeassistant/components/huawei_lte/notify.py | 3 | 1969 | """Support for Huawei LTE router notifications."""
from __future__ import annotations
import logging
import time
from typing import Any
import attr
from huawei_lte_api.exceptions import ResponseErrorException
from homeassistant.components.notify import ATTR_TARGET, BaseNotificationService
from homeassistant.const import CONF_RECIPIENT, CONF_URL
from homeassistant.helpers.typing import HomeAssistantType
from . import Router
from .const import DOMAIN
_LOGGER = logging.getLogger(__name__)
async def async_get_service(
hass: HomeAssistantType,
config: dict[str, Any],
discovery_info: dict[str, Any] | None = None,
) -> HuaweiLteSmsNotificationService | None:
"""Get the notification service."""
if discovery_info is None:
return None
router = hass.data[DOMAIN].routers[discovery_info[CONF_URL]]
default_targets = discovery_info[CONF_RECIPIENT] or []
return HuaweiLteSmsNotificationService(router, default_targets)
@attr.s
class HuaweiLteSmsNotificationService(BaseNotificationService):
"""Huawei LTE router SMS notification service."""
router: Router = attr.ib()
default_targets: list[str] = attr.ib()
def send_message(self, message: str = "", **kwargs: Any) -> None:
"""Send message to target numbers."""
targets = kwargs.get(ATTR_TARGET, self.default_targets)
if not targets or not message:
return
if self.router.suspended:
_LOGGER.debug(
"Integration suspended, not sending notification to %s", targets
)
return
try:
resp = self.router.client.sms.send_sms(
phone_numbers=targets, message=message
)
_LOGGER.debug("Sent to %s: %s", targets, resp)
except ResponseErrorException as ex:
_LOGGER.error("Could not send to %s: %s", targets, ex)
finally:
self.router.notify_last_attempt = time.monotonic()
| apache-2.0 |
NashDisequilibrium/00_Conference_Central | conference.py | 1 | 21763 | #!/usr/bin/env python
"""
conference.py -- Udacity conference server-side Python App Engine API;
uses Google Cloud Endpoints
$Id: conference.py,v 1.25 2014/05/24 23:42:19 wesc Exp wesc $
created by wesc on 2014 apr 21
"""
__author__ = 'wesc+api@google.com (Wesley Chun)'
from datetime import datetime
import endpoints
from protorpc import messages
from protorpc import message_types
from protorpc import remote
from google.appengine.api import memcache
from google.appengine.api import taskqueue
from google.appengine.ext import ndb
from models import ConflictException
from models import Profile
from models import ProfileMiniForm
from models import ProfileForm
from models import BooleanMessage
from models import Conference
from models import ConferenceForm
from models import ConferenceForms
from models import ConferenceQueryForm
from models import ConferenceQueryForms
from models import TeeShirtSize
from utils import getUserId
from settings import WEB_CLIENT_ID
from google.appengine.api import memcache
from models import StringMessage
EMAIL_SCOPE = endpoints.EMAIL_SCOPE
API_EXPLORER_CLIENT_ID = endpoints.API_EXPLORER_CLIENT_ID
MEMCACHE_ANNOUNCEMENTS_KEY = "RECENT_ANNOUNCEMENTS"
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
DEFAULTS = {
"city": "Default City",
"maxAttendees": 0,
"seatsAvailable": 0,
"topics": [ "Default", "Topic" ],
}
OPERATORS = {
'EQ': '=',
'GT': '>',
'GTEQ': '>=',
'LT': '<',
'LTEQ': '<=',
'NE': '!='
}
FIELDS = {
'CITY': 'city',
'TOPIC': 'topics',
'MONTH': 'month',
'MAX_ATTENDEES': 'maxAttendees',
}
CONF_GET_REQUEST = endpoints.ResourceContainer(
message_types.VoidMessage,
websafeConferenceKey=messages.StringField(1),
)
CONF_POST_REQUEST = endpoints.ResourceContainer(
ConferenceForm,
websafeConferenceKey=messages.StringField(1),
)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@endpoints.api(name='conference', version='v1',
allowed_client_ids=[WEB_CLIENT_ID, API_EXPLORER_CLIENT_ID],
scopes=[EMAIL_SCOPE])
class ConferenceApi(remote.Service):
"""Conference API v0.1"""
# - - - Conference objects - - - - - - - - - - - - - - - - -
def _copyConferenceToForm(self, conf, displayName):
"""Copy relevant fields from Conference to ConferenceForm."""
cf = ConferenceForm()
for field in cf.all_fields():
if hasattr(conf, field.name):
# convert Date to date string; just copy others
if field.name.endswith('Date'):
setattr(cf, field.name, str(getattr(conf, field.name)))
else:
setattr(cf, field.name, getattr(conf, field.name))
elif field.name == "websafeKey":
setattr(cf, field.name, conf.key.urlsafe())
if displayName:
setattr(cf, 'organizerDisplayName', displayName)
cf.check_initialized()
return cf
def _createConferenceObject(self, request):
"""Create or update Conference object, returning ConferenceForm/request."""
# preload necessary data items
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
user_id = getUserId(user)
if not request.name:
raise endpoints.BadRequestException("Conference 'name' field required")
# copy ConferenceForm/ProtoRPC Message into dict
data = {field.name: getattr(request, field.name) for field in request.all_fields()}
del data['websafeKey']
del data['organizerDisplayName']
# add default values for those missing (both data model & outbound Message)
for df in DEFAULTS:
if data[df] in (None, []):
data[df] = DEFAULTS[df]
setattr(request, df, DEFAULTS[df])
# convert dates from strings to Date objects; set month based on start_date
if data['startDate']:
data['startDate'] = datetime.strptime(data['startDate'][:10], "%Y-%m-%d").date()
data['month'] = data['startDate'].month
else:
data['month'] = 0
if data['endDate']:
data['endDate'] = datetime.strptime(data['endDate'][:10], "%Y-%m-%d").date()
# set seatsAvailable to be same as maxAttendees on creation
if data["maxAttendees"] > 0:
data["seatsAvailable"] = data["maxAttendees"]
# generate Profile Key based on user ID and Conference
# ID based on Profile key get Conference key from ID
p_key = ndb.Key(Profile, user_id)
c_id = Conference.allocate_ids(size=1, parent=p_key)[0]
c_key = ndb.Key(Conference, c_id, parent=p_key)
data['key'] = c_key
data['organizerUserId'] = request.organizerUserId = user_id
# create Conference, send email to organizer confirming
# creation of Conference & return (modified) ConferenceForm
Conference(**data).put()
taskqueue.add(params={'email': user.email(),
'conferenceInfo': repr(request)},
url='/tasks/send_confirmation_email')
# TODO 2: add confirmation email sending task to queue
return request
@ndb.transactional()
def _updateConferenceObject(self, request):
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
user_id = getUserId(user)
# copy ConferenceForm/ProtoRPC Message into dict
data = {field.name: getattr(request, field.name) for field in request.all_fields()}
# update existing conference
conf = ndb.Key(urlsafe=request.websafeConferenceKey).get()
# check that conference exists
if not conf:
raise endpoints.NotFoundException(
'No conference found with key: %s' % request.websafeConferenceKey)
# check that user is owner
if user_id != conf.organizerUserId:
raise endpoints.ForbiddenException(
'Only the owner can update the conference.')
# Not getting all the fields, so don't create a new object; just
# copy relevant fields from ConferenceForm to Conference object
for field in request.all_fields():
data = getattr(request, field.name)
# only copy fields where we get data
if data not in (None, []):
# special handling for dates (convert string to Date)
if field.name in ('startDate', 'endDate'):
data = datetime.strptime(data, "%Y-%m-%d").date()
if field.name == 'startDate':
conf.month = data.month
# write to Conference object
setattr(conf, field.name, data)
conf.put()
prof = ndb.Key(Profile, user_id).get()
return self._copyConferenceToForm(conf, getattr(prof, 'displayName'))
@endpoints.method(ConferenceForm, ConferenceForm, path='conference',
http_method='POST', name='createConference')
def createConference(self, request):
"""Create new conference."""
return self._createConferenceObject(request)
@endpoints.method(CONF_POST_REQUEST, ConferenceForm,
path='conference/{websafeConferenceKey}',
http_method='PUT', name='updateConference')
def updateConference(self, request):
"""Update conference w/provided fields & return w/updated info."""
return self._updateConferenceObject(request)
@endpoints.method(CONF_GET_REQUEST, ConferenceForm,
path='conference/{websafeConferenceKey}',
http_method='GET', name='getConference')
def getConference(self, request):
"""Return requested conference (by websafeConferenceKey)."""
# get Conference object from request; bail if not found
conf = ndb.Key(urlsafe=request.websafeConferenceKey).get()
if not conf:
raise endpoints.NotFoundException(
'No conference found with key: %s' % request.websafeConferenceKey)
prof = conf.key.parent().get()
# return ConferenceForm
return self._copyConferenceToForm(conf, getattr(prof, 'displayName'))
@endpoints.method(message_types.VoidMessage, ConferenceForms,
path='getConferencesCreated',
http_method='POST', name='getConferencesCreated')
def getConferencesCreated(self, request):
"""Return conferences created by user."""
# make sure user is authed
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
user_id = getUserId(user)
# create ancestor query for all key matches for this user
confs = Conference.query(ancestor=ndb.Key(Profile, user_id))
prof = ndb.Key(Profile, user_id).get()
# return set of ConferenceForm objects per Conference
return ConferenceForms(
items=[self._copyConferenceToForm(conf, getattr(prof, 'displayName')) for conf in confs]
)
def _getQuery(self, request):
"""Return formatted query from the submitted filters."""
q = Conference.query()
inequality_filter, filters = self._formatFilters(request.filters)
# If exists, sort on inequality filter first
if not inequality_filter:
q = q.order(Conference.name)
else:
q = q.order(ndb.GenericProperty(inequality_filter))
q = q.order(Conference.name)
for filtr in filters:
if filtr["field"] in ["month", "maxAttendees"]:
filtr["value"] = int(filtr["value"])
formatted_query = ndb.query.FilterNode(filtr["field"], filtr["operator"], filtr["value"])
q = q.filter(formatted_query)
return q
def _formatFilters(self, filters):
"""Parse, check validity and format user supplied filters."""
formatted_filters = []
inequality_field = None
for f in filters:
filtr = {field.name: getattr(f, field.name) for field in f.all_fields()}
try:
filtr["field"] = FIELDS[filtr["field"]]
filtr["operator"] = OPERATORS[filtr["operator"]]
except KeyError:
raise endpoints.BadRequestException("Filter contains invalid field or operator.")
# Every operation except "=" is an inequality
if filtr["operator"] != "=":
# check if inequality operation has been used in previous filters
# disallow the filter if inequality was performed on a different field before
# track the field on which the inequality operation is performed
if inequality_field and inequality_field != filtr["field"]:
raise endpoints.BadRequestException("Inequality filter is allowed on only one field.")
else:
inequality_field = filtr["field"]
formatted_filters.append(filtr)
return (inequality_field, formatted_filters)
@endpoints.method(ConferenceQueryForms, ConferenceForms,
path='queryConferences',
http_method='POST',
name='queryConferences')
def queryConferences(self, request):
"""Query for conferences."""
conferences = self._getQuery(request)
# need to fetch organiser displayName from profiles
# get all keys and use get_multi for speed
organisers = [(ndb.Key(Profile, conf.organizerUserId)) for conf in conferences]
profiles = ndb.get_multi(organisers)
# put display names in a dict for easier fetching
names = {}
for profile in profiles:
names[profile.key.id()] = profile.displayName
# return individual ConferenceForm object per Conference
return ConferenceForms(
items=[self._copyConferenceToForm(conf, names[conf.organizerUserId]) for conf in \
conferences]
)
# - - - Profile objects - - - - - - - - - - - - - - - - - - -
def _copyProfileToForm(self, prof):
"""Copy relevant fields from Profile to ProfileForm."""
# copy relevant fields from Profile to ProfileForm
pf = ProfileForm()
for field in pf.all_fields():
if hasattr(prof, field.name):
# convert t-shirt string to Enum; just copy others
if field.name == 'teeShirtSize':
setattr(pf, field.name, getattr(TeeShirtSize, getattr(prof, field.name)))
else:
setattr(pf, field.name, getattr(prof, field.name))
pf.check_initialized()
return pf
def _getProfileFromUser(self):
"""Return user Profile from datastore, creating new one if non-existent."""
# make sure user is authed
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
# get Profile from datastore
user_id = getUserId(user)
p_key = ndb.Key(Profile, user_id)
profile = p_key.get()
# create new Profile if not there
if not profile:
profile = Profile(
key = p_key,
displayName = user.nickname(),
mainEmail= user.email(),
teeShirtSize = str(TeeShirtSize.NOT_SPECIFIED),
)
profile.put()
return profile # return Profile
def _doProfile(self, save_request=None):
"""Get user Profile and return to user, possibly updating it first."""
# get user Profile
prof = self._getProfileFromUser()
# if saveProfile(), process user-modifyable fields
if save_request:
for field in ('displayName', 'teeShirtSize'):
if hasattr(save_request, field):
val = getattr(save_request, field)
if val:
setattr(prof, field, str(val))
#if field == 'teeShirtSize':
# setattr(prof, field, str(val).upper())
#else:
# setattr(prof, field, val)
prof.put()
# return ProfileForm
return self._copyProfileToForm(prof)
@endpoints.method(message_types.VoidMessage, ProfileForm,
path='profile', http_method='GET', name='getProfile')
def getProfile(self, request):
"""Return user profile."""
return self._doProfile()
@endpoints.method(ProfileMiniForm, ProfileForm,
path='profile', http_method='POST', name='saveProfile')
def saveProfile(self, request):
"""Update & return user profile."""
return self._doProfile(request)
# - - - Registration - - - - - - - - - - - - - - - - - - - -
@ndb.transactional(xg=True)
def _conferenceRegistration(self, request, reg=True):
"""Register or unregister user for selected conference."""
retval = None
prof = self._getProfileFromUser() # get user Profile
# check if conf exists given websafeConfKey
# get conference; check that it exists
wsck = request.websafeConferenceKey
conf = ndb.Key(urlsafe=wsck).get()
if not conf:
raise endpoints.NotFoundException(
'No conference found with key: %s' % wsck)
# register
if reg:
# check if user already registered otherwise add
if wsck in prof.conferenceKeysToAttend:
raise ConflictException(
"You have already registered for this conference")
# check if seats avail
if conf.seatsAvailable <= 0:
raise ConflictException(
"There are no seats available.")
# register user, take away one seat
prof.conferenceKeysToAttend.append(wsck)
conf.seatsAvailable -= 1
retval = True
# unregister
else:
# check if user already registered
if wsck in prof.conferenceKeysToAttend:
# unregister user, add back one seat
prof.conferenceKeysToAttend.remove(wsck)
conf.seatsAvailable += 1
retval = True
else:
retval = False
# write things back to the datastore & return
prof.put()
conf.put()
return BooleanMessage(data=retval)
@endpoints.method(message_types.VoidMessage, ConferenceForms,
path='conferences/attending',
http_method='GET', name='getConferencesToAttend')
def getConferencesToAttend(self, request):
"""Get list of conferences that user has registered for."""
prof = self._getProfileFromUser() # get user Profile
conf_keys = [ndb.Key(urlsafe=wsck) for wsck in prof.conferenceKeysToAttend]
conferences = ndb.get_multi(conf_keys)
# get organizers
organisers = [ndb.Key(Profile, conf.organizerUserId) for conf in conferences]
profiles = ndb.get_multi(organisers)
# put display names in a dict for easier fetching
names = {}
for profile in profiles:
names[profile.key.id()] = profile.displayName
# return set of ConferenceForm objects per Conference
return ConferenceForms(items=[self._copyConferenceToForm(conf, names[conf.organizerUserId])\
for conf in conferences]
)
@endpoints.method(CONF_GET_REQUEST, BooleanMessage,
path='conference/{websafeConferenceKey}',
http_method='POST', name='registerForConference')
def registerForConference(self, request):
"""Register user for selected conference."""
return self._conferenceRegistration(request)
@endpoints.method(CONF_GET_REQUEST, BooleanMessage,
path='conference/{websafeConferenceKey}',
http_method='DELETE', name='unregisterFromConference')
def unregisterFromConference(self, request):
"""Unregister user for selected conference."""
return self._conferenceRegistration(request, reg=False)
# - - - Announcements - - - - - - - - - - - - - - - - - - - -
@staticmethod
def _cacheAnnouncement():
"""Create Announcement & assign to memcache; used by
memcache cron job & putAnnouncement().
"""
confs = Conference.query(ndb.AND(
Conference.seatsAvailable <= 5,
Conference.seatsAvailable > 0)
).fetch(projection=[Conference.name])
if confs:
# If there are almost sold out conferences,
# format announcement and set it in memcache
announcement = '%s %s' % (
'Last chance to attend! The following conferences '
'are nearly sold out:',
', '.join(conf.name for conf in confs))
memcache.set(MEMCACHE_ANNOUNCEMENTS_KEY, announcement)
else:
# If there are no sold out conferences,
# delete the memcache announcements entry
announcement = ""
memcache.delete(MEMCACHE_ANNOUNCEMENTS_KEY)
return announcement
@endpoints.method(message_types.VoidMessage, StringMessage,
path='conference/announcement/get',
http_method='GET', name='getAnnouncement')
def getAnnouncement(self, request):
"""Return Announcement from memcache."""
# TODO 1
# return an existing announcement from Memcache or an empty string.
announcement = memcache.get(MEMCACHE_ANNOUNCEMENTS_KEY)
if not announcement:
announcement = ""
return StringMessage(data=announcement)
api = endpoints.api_server([ConferenceApi]) # register API
# - - - Sessions - - - - - - - - - - - - - - - - - - - -
@endpoints.method(websafeConferenceKey,
path='conference/{websafeConferenceKey}',
http_method='GET', name='getConferenceSessions')
def getConferenceSessions(self, request):
"""Get sessions for a particular conference"""
q = Session.query()
q = q.filter(Session.parent, Session.parent)
result = q.fetch
return result()
@endpoints.method(websafeConferenceKey, session_type,
path='conference/{websafeConferenceKey}',
http_method='GET', name='getConferenceSessionsByType')
def getConferenceSessionsByType(self, request):
"""Get sessions for particular tupe of session"""
q = Session.query()
q = q.filter(Session.session_type == "Workshop")
#sessions = Session.query(Session.session_type = session_type)
q = q.order(Session.name)
return q
@endpoints.method(speaker,
path='conference/{websafeConferenceKey}',
http_method='GET', name='getSessionsBySpeaker')
def getSessionsBySpeaker(self, request):
"""Get sessions for particular tupe of session"""
q = Session.query()
q = q.filter(Session.speaker == "Hans Rosling")
#sessions = Session.query(Session.speaker = speaker)
q = q.order(Session.name)
return q
@endpoints.method(SessionForm, websafeConferenceKey,
path='conference/{websafeConferenceKey}',
http_method='POST', name='createSession')
def createSession(self, request):
"""Get sessions for particular tupe of session"""
q = Session.query(ancestor=ndb.Key(Profile, user_id))
return Session(Request, _createSession) | apache-2.0 |
ctb/cvxpy | cvxpy/atoms/total_variation.py | 11 | 2111 | """
Copyright 2013 Steven Diamond
This file is part of CVXPY.
CVXPY is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
CVXPY is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with CVXPY. If not, see <http://www.gnu.org/licenses/>.
"""
from cvxpy.expressions.expression import Expression
from cvxpy.atoms.norm import norm
from cvxpy.atoms.elementwise.norm2_elemwise import norm2_elemwise
from cvxpy.atoms.affine.reshape import reshape
from cvxpy.atoms.affine.sum_entries import sum_entries
def tv(value, *args):
"""Total variation of a vector, matrix, or list of matrices.
Uses L1 norm of discrete gradients for vectors and
L2 norm of discrete gradients for matrices.
Parameters
----------
value : Expression or numeric constant
The value to take the total variation of.
args : Matrix constants/expressions
Additional matrices extending the third dimension of value.
Returns
-------
Expression
An Expression representing the total variation.
"""
value = Expression.cast_to_const(value)
rows, cols = value.size
if value.is_scalar():
raise ValueError("tv cannot take a scalar argument.")
# L1 norm for vectors.
elif value.is_vector():
return norm(value[1:] - value[0:max(rows, cols)-1], 1)
# L2 norm for matrices.
else:
args = map(Expression.cast_to_const, args)
values = [value] + list(args)
diffs = []
for mat in values:
diffs += [
mat[0:rows-1, 1:cols] - mat[0:rows-1, 0:cols-1],
mat[1:rows, 0:cols-1] - mat[0:rows-1, 0:cols-1],
]
return sum_entries(norm2_elemwise(*diffs))
| gpl-3.0 |
niceandcoolusername/cosmos | code/string-algorithms/manachar_algorithm/manachar_longest_palindromic_subs.py | 5 | 1045 | # Part of Cosmos by OpenGenus Foundation
def get_palindrome_length(string, index):
length = 1
while index + length < len(string) and index - length >= 0:
if string[index + length] == string[index - length]:
length += 1
else:
break
return length - 1
def interleave(string):
ret = []
for s in string:
ret.extend(['#', s])
ret.append('#')
return ''.join(ret)
def manacher(string):
right = 0
center = 0
string = interleave(string)
P = map(lambda e: 0, xrange(len(string)))
for i in xrange(1, len(string)):
mirror = 2*center - i
if i + P[mirror] <= right and mirror >= len(string) - i:
P[i] = P[mirror]
else:
plength = get_palindrome_length(string, i)
P[i] = plength
if plength > 1:
center = int(i)
right = center + plength
return [e/2 for e in P]
def get_palindrome_number(string):
return sum(manacher(string))
| gpl-3.0 |
raymondxyang/tensorflow | tensorflow/contrib/framework/python/framework/experimental.py | 179 | 2476 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tensor utility functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import decorator_utils
def _add_experimental_function_notice_to_docstring(doc):
"""Adds an experimental notice to a docstring for experimental functions."""
return decorator_utils.add_notice_to_docstring(
doc, '',
'EXPERIMENTAL FUNCTION',
'(experimental)', ['THIS FUNCTION IS EXPERIMENTAL. It may change or '
'be removed at any time, and without warning.'])
def experimental(func):
"""Decorator for marking functions or methods experimental.
This decorator logs an experimental warning whenever the decorated function is
called. It has the following format:
<function> (from <module>) is experimental and may change or be removed at
any time, and without warning.
<function> will include the class name if it is a method.
It also edits the docstring of the function: ' (experimental)' is appended
to the first line of the docstring and a notice is prepended to the rest of
the docstring.
Args:
func: A function or method to mark experimental.
Returns:
Decorated function or method.
"""
decorator_utils.validate_callable(func, 'experimental')
@functools.wraps(func)
def new_func(*args, **kwargs):
logging.warning(
'%s (from %s) is experimental and may change or be removed at '
'any time, and without warning.',
decorator_utils.get_qualified_name(func), func.__module__)
return func(*args, **kwargs)
new_func.__doc__ = _add_experimental_function_notice_to_docstring(
func.__doc__)
return new_func
| apache-2.0 |
jn7163/django | tests/foreign_object/tests.py | 157 | 17137 | import datetime
from operator import attrgetter
from django.core.exceptions import FieldError
from django.test import TestCase, skipUnlessDBFeature
from django.utils import translation
from .models import (
Article, ArticleIdea, ArticleTag, ArticleTranslation, Country, Friendship,
Group, Membership, NewsArticle, Person,
)
# Note that these tests are testing internal implementation details.
# ForeignObject is not part of public API.
class MultiColumnFKTests(TestCase):
def setUp(self):
# Creating countries
self.usa = Country.objects.create(name="United States of America")
self.soviet_union = Country.objects.create(name="Soviet Union")
Person()
# Creating People
self.bob = Person()
self.bob.name = 'Bob'
self.bob.person_country = self.usa
self.bob.save()
self.jim = Person.objects.create(name='Jim', person_country=self.usa)
self.george = Person.objects.create(name='George', person_country=self.usa)
self.jane = Person.objects.create(name='Jane', person_country=self.soviet_union)
self.mark = Person.objects.create(name='Mark', person_country=self.soviet_union)
self.sam = Person.objects.create(name='Sam', person_country=self.soviet_union)
# Creating Groups
self.kgb = Group.objects.create(name='KGB', group_country=self.soviet_union)
self.cia = Group.objects.create(name='CIA', group_country=self.usa)
self.republican = Group.objects.create(name='Republican', group_country=self.usa)
self.democrat = Group.objects.create(name='Democrat', group_country=self.usa)
def test_get_succeeds_on_multicolumn_match(self):
# Membership objects have access to their related Person if both
# country_ids match between them
membership = Membership.objects.create(
membership_country_id=self.usa.id, person_id=self.bob.id, group_id=self.cia.id)
person = membership.person
self.assertEqual((person.id, person.name), (self.bob.id, "Bob"))
def test_get_fails_on_multicolumn_mismatch(self):
# Membership objects returns DoesNotExist error when the there is no
# Person with the same id and country_id
membership = Membership.objects.create(
membership_country_id=self.usa.id, person_id=self.jane.id, group_id=self.cia.id)
self.assertRaises(Person.DoesNotExist, getattr, membership, 'person')
def test_reverse_query_returns_correct_result(self):
# Creating a valid membership because it has the same country has the person
Membership.objects.create(
membership_country_id=self.usa.id, person_id=self.bob.id, group_id=self.cia.id)
# Creating an invalid membership because it has a different country has the person
Membership.objects.create(
membership_country_id=self.soviet_union.id, person_id=self.bob.id,
group_id=self.republican.id)
self.assertQuerysetEqual(
self.bob.membership_set.all(), [
self.cia.id
],
attrgetter("group_id")
)
def test_query_filters_correctly(self):
# Creating a to valid memberships
Membership.objects.create(
membership_country_id=self.usa.id, person_id=self.bob.id, group_id=self.cia.id)
Membership.objects.create(
membership_country_id=self.usa.id, person_id=self.jim.id,
group_id=self.cia.id)
# Creating an invalid membership
Membership.objects.create(membership_country_id=self.soviet_union.id,
person_id=self.george.id, group_id=self.cia.id)
self.assertQuerysetEqual(
Membership.objects.filter(person__name__contains='o'), [
self.bob.id
],
attrgetter("person_id")
)
def test_reverse_query_filters_correctly(self):
timemark = datetime.datetime.utcnow()
timedelta = datetime.timedelta(days=1)
# Creating a to valid memberships
Membership.objects.create(
membership_country_id=self.usa.id, person_id=self.bob.id,
group_id=self.cia.id, date_joined=timemark - timedelta)
Membership.objects.create(
membership_country_id=self.usa.id, person_id=self.jim.id,
group_id=self.cia.id, date_joined=timemark + timedelta)
# Creating an invalid membership
Membership.objects.create(
membership_country_id=self.soviet_union.id, person_id=self.george.id,
group_id=self.cia.id, date_joined=timemark + timedelta)
self.assertQuerysetEqual(
Person.objects.filter(membership__date_joined__gte=timemark), [
'Jim'
],
attrgetter('name')
)
def test_forward_in_lookup_filters_correctly(self):
Membership.objects.create(membership_country_id=self.usa.id, person_id=self.bob.id,
group_id=self.cia.id)
Membership.objects.create(membership_country_id=self.usa.id, person_id=self.jim.id,
group_id=self.cia.id)
# Creating an invalid membership
Membership.objects.create(
membership_country_id=self.soviet_union.id, person_id=self.george.id,
group_id=self.cia.id)
self.assertQuerysetEqual(
Membership.objects.filter(person__in=[self.george, self.jim]), [
self.jim.id,
],
attrgetter('person_id')
)
self.assertQuerysetEqual(
Membership.objects.filter(person__in=Person.objects.filter(name='Jim')), [
self.jim.id,
],
attrgetter('person_id')
)
def test_double_nested_query(self):
m1 = Membership.objects.create(membership_country_id=self.usa.id, person_id=self.bob.id,
group_id=self.cia.id)
m2 = Membership.objects.create(membership_country_id=self.usa.id, person_id=self.jim.id,
group_id=self.cia.id)
Friendship.objects.create(from_friend_country_id=self.usa.id, from_friend_id=self.bob.id,
to_friend_country_id=self.usa.id, to_friend_id=self.jim.id)
self.assertQuerysetEqual(Membership.objects.filter(
person__in=Person.objects.filter(
from_friend__in=Friendship.objects.filter(
to_friend__in=Person.objects.all()))),
[m1], lambda x: x)
self.assertQuerysetEqual(Membership.objects.exclude(
person__in=Person.objects.filter(
from_friend__in=Friendship.objects.filter(
to_friend__in=Person.objects.all()))),
[m2], lambda x: x)
def test_select_related_foreignkey_forward_works(self):
Membership.objects.create(membership_country=self.usa, person=self.bob, group=self.cia)
Membership.objects.create(membership_country=self.usa, person=self.jim, group=self.democrat)
with self.assertNumQueries(1):
people = [m.person for m in Membership.objects.select_related('person').order_by('pk')]
normal_people = [m.person for m in Membership.objects.all().order_by('pk')]
self.assertEqual(people, normal_people)
def test_prefetch_foreignkey_forward_works(self):
Membership.objects.create(membership_country=self.usa, person=self.bob, group=self.cia)
Membership.objects.create(membership_country=self.usa, person=self.jim, group=self.democrat)
with self.assertNumQueries(2):
people = [
m.person for m in Membership.objects.prefetch_related('person').order_by('pk')]
normal_people = [m.person for m in Membership.objects.order_by('pk')]
self.assertEqual(people, normal_people)
def test_prefetch_foreignkey_reverse_works(self):
Membership.objects.create(membership_country=self.usa, person=self.bob, group=self.cia)
Membership.objects.create(membership_country=self.usa, person=self.jim, group=self.democrat)
with self.assertNumQueries(2):
membership_sets = [
list(p.membership_set.all())
for p in Person.objects.prefetch_related('membership_set').order_by('pk')]
normal_membership_sets = [list(p.membership_set.all())
for p in Person.objects.order_by('pk')]
self.assertEqual(membership_sets, normal_membership_sets)
def test_m2m_through_forward_returns_valid_members(self):
# We start out by making sure that the Group 'CIA' has no members.
self.assertQuerysetEqual(
self.cia.members.all(),
[]
)
Membership.objects.create(membership_country=self.usa, person=self.bob, group=self.cia)
Membership.objects.create(membership_country=self.usa, person=self.jim, group=self.cia)
# Let's check to make sure that it worked. Bob and Jim should be members of the CIA.
self.assertQuerysetEqual(
self.cia.members.all(), [
'Bob',
'Jim'
], attrgetter("name")
)
def test_m2m_through_reverse_returns_valid_members(self):
# We start out by making sure that Bob is in no groups.
self.assertQuerysetEqual(
self.bob.groups.all(),
[]
)
Membership.objects.create(membership_country=self.usa, person=self.bob, group=self.cia)
Membership.objects.create(membership_country=self.usa, person=self.bob,
group=self.republican)
# Bob should be in the CIA and a Republican
self.assertQuerysetEqual(
self.bob.groups.all(), [
'CIA',
'Republican'
], attrgetter("name")
)
def test_m2m_through_forward_ignores_invalid_members(self):
# We start out by making sure that the Group 'CIA' has no members.
self.assertQuerysetEqual(
self.cia.members.all(),
[]
)
# Something adds jane to group CIA but Jane is in Soviet Union which isn't CIA's country
Membership.objects.create(membership_country=self.usa, person=self.jane, group=self.cia)
# There should still be no members in CIA
self.assertQuerysetEqual(
self.cia.members.all(),
[]
)
def test_m2m_through_reverse_ignores_invalid_members(self):
# We start out by making sure that Jane has no groups.
self.assertQuerysetEqual(
self.jane.groups.all(),
[]
)
# Something adds jane to group CIA but Jane is in Soviet Union which isn't CIA's country
Membership.objects.create(membership_country=self.usa, person=self.jane, group=self.cia)
# Jane should still not be in any groups
self.assertQuerysetEqual(
self.jane.groups.all(),
[]
)
def test_m2m_through_on_self_works(self):
self.assertQuerysetEqual(
self.jane.friends.all(),
[]
)
Friendship.objects.create(
from_friend_country=self.jane.person_country, from_friend=self.jane,
to_friend_country=self.george.person_country, to_friend=self.george)
self.assertQuerysetEqual(
self.jane.friends.all(),
['George'], attrgetter("name")
)
def test_m2m_through_on_self_ignores_mismatch_columns(self):
self.assertQuerysetEqual(self.jane.friends.all(), [])
# Note that we use ids instead of instances. This is because instances on ForeignObject
# properties will set all related field off of the given instance
Friendship.objects.create(
from_friend_id=self.jane.id, to_friend_id=self.george.id,
to_friend_country_id=self.jane.person_country_id,
from_friend_country_id=self.george.person_country_id)
self.assertQuerysetEqual(self.jane.friends.all(), [])
def test_prefetch_related_m2m_foward_works(self):
Membership.objects.create(membership_country=self.usa, person=self.bob, group=self.cia)
Membership.objects.create(membership_country=self.usa, person=self.jim, group=self.democrat)
with self.assertNumQueries(2):
members_lists = [list(g.members.all())
for g in Group.objects.prefetch_related('members')]
normal_members_lists = [list(g.members.all()) for g in Group.objects.all()]
self.assertEqual(members_lists, normal_members_lists)
def test_prefetch_related_m2m_reverse_works(self):
Membership.objects.create(membership_country=self.usa, person=self.bob, group=self.cia)
Membership.objects.create(membership_country=self.usa, person=self.jim, group=self.democrat)
with self.assertNumQueries(2):
groups_lists = [list(p.groups.all()) for p in Person.objects.prefetch_related('groups')]
normal_groups_lists = [list(p.groups.all()) for p in Person.objects.all()]
self.assertEqual(groups_lists, normal_groups_lists)
@translation.override('fi')
def test_translations(self):
a1 = Article.objects.create(pub_date=datetime.date.today())
at1_fi = ArticleTranslation(article=a1, lang='fi', title='Otsikko', body='Diipadaapa')
at1_fi.save()
at2_en = ArticleTranslation(article=a1, lang='en', title='Title', body='Lalalalala')
at2_en.save()
self.assertEqual(Article.objects.get(pk=a1.pk).active_translation, at1_fi)
with self.assertNumQueries(1):
fetched = Article.objects.select_related('active_translation').get(
active_translation__title='Otsikko')
self.assertEqual(fetched.active_translation.title, 'Otsikko')
a2 = Article.objects.create(pub_date=datetime.date.today())
at2_fi = ArticleTranslation(article=a2, lang='fi', title='Atsikko', body='Diipadaapa',
abstract='dipad')
at2_fi.save()
a3 = Article.objects.create(pub_date=datetime.date.today())
at3_en = ArticleTranslation(article=a3, lang='en', title='A title', body='lalalalala',
abstract='lala')
at3_en.save()
# Test model initialization with active_translation field.
a3 = Article(id=a3.id, pub_date=a3.pub_date, active_translation=at3_en)
a3.save()
self.assertEqual(
list(Article.objects.filter(active_translation__abstract=None)),
[a1, a3])
self.assertEqual(
list(Article.objects.filter(active_translation__abstract=None,
active_translation__pk__isnull=False)),
[a1])
with translation.override('en'):
self.assertEqual(
list(Article.objects.filter(active_translation__abstract=None)),
[a1, a2])
def test_foreign_key_raises_informative_does_not_exist(self):
referrer = ArticleTranslation()
with self.assertRaisesMessage(Article.DoesNotExist, 'ArticleTranslation has no article'):
referrer.article
def test_foreign_key_related_query_name(self):
a1 = Article.objects.create(pub_date=datetime.date.today())
ArticleTag.objects.create(article=a1, name="foo")
self.assertEqual(Article.objects.filter(tag__name="foo").count(), 1)
self.assertEqual(Article.objects.filter(tag__name="bar").count(), 0)
with self.assertRaises(FieldError):
Article.objects.filter(tags__name="foo")
def test_many_to_many_related_query_name(self):
a1 = Article.objects.create(pub_date=datetime.date.today())
i1 = ArticleIdea.objects.create(name="idea1")
a1.ideas.add(i1)
self.assertEqual(Article.objects.filter(idea_things__name="idea1").count(), 1)
self.assertEqual(Article.objects.filter(idea_things__name="idea2").count(), 0)
with self.assertRaises(FieldError):
Article.objects.filter(ideas__name="idea1")
@translation.override('fi')
def test_inheritance(self):
na = NewsArticle.objects.create(pub_date=datetime.date.today())
ArticleTranslation.objects.create(
article=na, lang="fi", title="foo", body="bar")
self.assertQuerysetEqual(
NewsArticle.objects.select_related('active_translation'),
[na], lambda x: x
)
with self.assertNumQueries(1):
self.assertEqual(
NewsArticle.objects.select_related(
'active_translation')[0].active_translation.title,
"foo")
@skipUnlessDBFeature('has_bulk_insert')
def test_batch_create_foreign_object(self):
""" See: https://code.djangoproject.com/ticket/21566 """
objs = [Person(name="abcd_%s" % i, person_country=self.usa) for i in range(0, 5)]
Person.objects.bulk_create(objs, 10)
| bsd-3-clause |
drakuna/odoo | addons/point_of_sale/report/pos_invoice.py | 43 | 1551 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from openerp import SUPERUSER_ID
from openerp.osv import osv
from openerp.tools.translate import _
from openerp.exceptions import UserError
class PosInvoiceReport(osv.AbstractModel):
_name = 'report.point_of_sale.report_invoice'
def render_html(self, cr, uid, ids, data=None, context=None):
report_obj = self.pool['report']
posorder_obj = self.pool['pos.order']
report = report_obj._get_report_from_name(cr, uid, 'account.report_invoice')
selected_orders = posorder_obj.browse(cr, uid, ids, context=context)
ids_to_print = []
invoiced_posorders_ids = []
for order in selected_orders:
if order.invoice_id:
ids_to_print.append(order.invoice_id.id)
invoiced_posorders_ids.append(order.id)
not_invoiced_orders_ids = list(set(ids) - set(invoiced_posorders_ids))
if not_invoiced_orders_ids:
not_invoiced_posorders = posorder_obj.browse(cr, uid, not_invoiced_orders_ids, context=context)
not_invoiced_orders_names = list(map(lambda a: a.name, not_invoiced_posorders))
raise UserError(_('No link to an invoice for %s.') % ', '.join(not_invoiced_orders_names))
docargs = {
'docs': self.pool['account.invoice'].browse(cr, uid, ids_to_print, context=context)
}
return report_obj.render(cr, SUPERUSER_ID, ids, 'account.report_invoice', docargs, context=context)
| gpl-3.0 |
GhostThrone/django | django/contrib/auth/migrations/0001_initial.py | 200 | 5063 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import django.contrib.auth.models
from django.core import validators
from django.db import migrations, models
from django.utils import timezone
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '__first__'),
]
operations = [
migrations.CreateModel(
name='Permission',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=50, verbose_name='name')),
('content_type', models.ForeignKey(
to='contenttypes.ContentType',
on_delete=models.CASCADE,
to_field='id',
verbose_name='content type',
)),
('codename', models.CharField(max_length=100, verbose_name='codename')),
],
options={
'ordering': ('content_type__app_label', 'content_type__model', 'codename'),
'unique_together': set([('content_type', 'codename')]),
'verbose_name': 'permission',
'verbose_name_plural': 'permissions',
},
managers=[
('objects', django.contrib.auth.models.PermissionManager()),
],
),
migrations.CreateModel(
name='Group',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(unique=True, max_length=80, verbose_name='name')),
('permissions', models.ManyToManyField(to='auth.Permission', verbose_name='permissions', blank=True)),
],
options={
'verbose_name': 'group',
'verbose_name_plural': 'groups',
},
managers=[
('objects', django.contrib.auth.models.GroupManager()),
],
),
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(default=timezone.now, verbose_name='last login')),
('is_superuser', models.BooleanField(
default=False,
help_text='Designates that this user has all permissions without explicitly assigning them.',
verbose_name='superuser status'
)),
('username', models.CharField(
help_text='Required. 30 characters or fewer. Letters, digits and @/./+/-/_ only.', unique=True,
max_length=30, verbose_name='username',
validators=[validators.RegexValidator('^[\\w.@+-]+$', 'Enter a valid username.', 'invalid')]
)),
('first_name', models.CharField(max_length=30, verbose_name='first name', blank=True)),
('last_name', models.CharField(max_length=30, verbose_name='last name', blank=True)),
('email', models.EmailField(max_length=75, verbose_name='email address', blank=True)),
('is_staff', models.BooleanField(
default=False, help_text='Designates whether the user can log into this admin site.',
verbose_name='staff status'
)),
('is_active', models.BooleanField(
default=True, verbose_name='active', help_text=(
'Designates whether this user should be treated as active. Unselect this instead of deleting '
'accounts.'
)
)),
('date_joined', models.DateTimeField(default=timezone.now, verbose_name='date joined')),
('groups', models.ManyToManyField(
to='auth.Group', verbose_name='groups', blank=True, related_name='user_set',
related_query_name='user', help_text=(
'The groups this user belongs to. A user will get all permissions granted to each of their '
'groups.'
)
)),
('user_permissions', models.ManyToManyField(
to='auth.Permission', verbose_name='user permissions', blank=True,
help_text='Specific permissions for this user.', related_name='user_set',
related_query_name='user')
),
],
options={
'swappable': 'AUTH_USER_MODEL',
'verbose_name': 'user',
'verbose_name_plural': 'users',
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
]
| bsd-3-clause |
t794104/ansible | lib/ansible/modules/network/dellos6/dellos6_command.py | 39 | 7267 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2015, Peter Sprygada <psprygada@ansible.com>
# Copyright: (c) 2016, Dell Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: dellos6_command
version_added: "2.2"
author: "Abirami N (@abirami-n)"
short_description: Run commands on remote devices running Dell OS6
description:
- Sends arbitrary commands to a Dell OS6 node and returns the results
read from the device. This module includes an
argument that will cause the module to wait for a specific condition
before returning or timing out if the condition is not met.
- This module does not support running commands in configuration mode.
Please use M(dellos6_config) to configure Dell OS6 devices.
extends_documentation_fragment: dellos6
options:
commands:
description:
- List of commands to send to the remote dellos6 device over the
configured provider. The resulting output from the command
is returned. If the I(wait_for) argument is provided, the
module is not returned until the condition is satisfied or
the number of retries has expired.
type: list
required: true
wait_for:
description:
- List of conditions to evaluate against the output of the
command. The task will wait for each condition to be true
before moving forward. If the conditional is not true
within the configured number of I(retries), the task fails.
See examples.
type: list
version_added: "2.2"
match:
description:
- The I(match) argument is used in conjunction with the
I(wait_for) argument to specify the match policy. Valid
values are C(all) or C(any). If the value is set to C(all)
then all conditionals in the wait_for must be satisfied. If
the value is set to C(any) then only one of the values must be
satisfied.
type: str
default: all
choices: [ all, any ]
version_added: "2.5"
retries:
description:
- Specifies the number of retries a command should be tried
before it is considered failed. The command is run on the
target device every retry and evaluated against the
I(wait_for) conditions.
type: int
default: 10
interval:
description:
- Configures the interval in seconds to wait between retries
of the command. If the command does not pass the specified
conditions, the interval indicates how long to wait before
trying the command again.
type: int
default: 1
"""
EXAMPLES = """
tasks:
- name: run show version on remote devices
dellos6_command:
commands: show version
- name: run show version and check to see if output contains Dell
dellos6_command:
commands: show version
wait_for: result[0] contains Dell
- name: run multiple commands on remote nodes
dellos6_command:
commands:
- show version
- show interfaces
- name: run multiple commands and evaluate the output
dellos6_command:
commands:
- show version
- show interfaces
wait_for:
- result[0] contains Dell
- result[1] contains Access
"""
RETURN = """
stdout:
description: The set of responses from the commands
returned: always apart from low level errors (such as action plugin)
type: list
sample: ['...', '...']
stdout_lines:
description: The value of stdout split into a list
returned: always apart from low level errors (such as action plugin)
type: list
sample: [['...', '...'], ['...'], ['...']]
failed_conditions:
description: The list of conditionals that have failed
returned: failed
type: list
sample: ['...', '...']
warnings:
description: The list of warnings (if any) generated by module based on arguments
returned: always
type: list
sample: ['...', '...']
"""
import time
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.dellos6.dellos6 import run_commands
from ansible.module_utils.network.dellos6.dellos6 import dellos6_argument_spec, check_args
from ansible.module_utils.network.common.utils import ComplexList
from ansible.module_utils.network.common.parsing import Conditional
from ansible.module_utils.six import string_types
def to_lines(stdout):
for item in stdout:
if isinstance(item, string_types):
item = str(item).split('\n')
yield item
def parse_commands(module, warnings):
command = ComplexList(dict(
command=dict(key=True),
prompt=dict(),
answer=dict()
), module)
commands = command(module.params['commands'])
for index, item in enumerate(commands):
if module.check_mode and not item['command'].startswith('show'):
warnings.append(
'only show commands are supported when using check mode, not '
'executing `%s`' % item['command']
)
elif item['command'].startswith('conf'):
module.fail_json(
msg='dellos6_command does not support running config mode '
'commands. Please use dellos6_config instead'
)
return commands
def main():
"""main entry point for module execution
"""
argument_spec = dict(
# { command: <str>, prompt: <str>, response: <str> }
commands=dict(type='list', required=True),
wait_for=dict(type='list'),
match=dict(default='all', choices=['all', 'any']),
retries=dict(default=10, type='int'),
interval=dict(default=1, type='int')
)
argument_spec.update(dellos6_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
result = {'changed': False}
warnings = list()
check_args(module, warnings)
commands = parse_commands(module, warnings)
result['warnings'] = warnings
wait_for = module.params['wait_for'] or list()
conditionals = [Conditional(c) for c in wait_for]
retries = module.params['retries']
interval = module.params['interval']
match = module.params['match']
while retries > 0:
responses = run_commands(module, commands)
for item in list(conditionals):
if item(responses):
if match == 'any':
conditionals = list()
break
conditionals.remove(item)
if not conditionals:
break
time.sleep(interval)
retries -= 1
if conditionals:
failed_conditions = [item.raw for item in conditionals]
msg = 'One or more conditional statements have not been satisfied'
module.fail_json(msg=msg, failed_conditions=failed_conditions)
result.update({
'changed': False,
'stdout': responses,
'stdout_lines': list(to_lines(responses))
})
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
mancoast/CPythonPyc_test | fail/324_test_sax.py | 1 | 32216 | # regression test for SAX 2.0
# $Id$
from xml.sax import make_parser, ContentHandler, \
SAXException, SAXReaderNotAvailable, SAXParseException
import unittest
try:
make_parser()
except SAXReaderNotAvailable:
# don't try to test this module if we cannot create a parser
raise unittest.SkipTest("no XML parsers available")
from xml.sax.saxutils import XMLGenerator, escape, unescape, quoteattr, \
XMLFilterBase
from xml.sax.expatreader import create_parser
from xml.sax.handler import feature_namespaces
from xml.sax.xmlreader import InputSource, AttributesImpl, AttributesNSImpl
from io import BytesIO, StringIO
import os.path
import shutil
from test import support
from test.support import findfile, run_unittest
TEST_XMLFILE = findfile("test.xml", subdir="xmltestdata")
TEST_XMLFILE_OUT = findfile("test.xml.out", subdir="xmltestdata")
try:
TEST_XMLFILE.encode("utf8")
TEST_XMLFILE_OUT.encode("utf8")
except UnicodeEncodeError:
raise unittest.SkipTest("filename is not encodable to utf8")
supports_nonascii_filenames = True
if not os.path.supports_unicode_filenames:
try:
support.TESTFN_UNICODE.encode(support.TESTFN_ENCODING)
except (UnicodeError, TypeError):
# Either the file system encoding is None, or the file name
# cannot be encoded in the file system encoding.
supports_nonascii_filenames = False
requires_nonascii_filenames = unittest.skipUnless(
supports_nonascii_filenames,
'Requires non-ascii filenames support')
ns_uri = "http://www.python.org/xml-ns/saxtest/"
class XmlTestBase(unittest.TestCase):
def verify_empty_attrs(self, attrs):
self.assertRaises(KeyError, attrs.getValue, "attr")
self.assertRaises(KeyError, attrs.getValueByQName, "attr")
self.assertRaises(KeyError, attrs.getNameByQName, "attr")
self.assertRaises(KeyError, attrs.getQNameByName, "attr")
self.assertRaises(KeyError, attrs.__getitem__, "attr")
self.assertEqual(attrs.getLength(), 0)
self.assertEqual(attrs.getNames(), [])
self.assertEqual(attrs.getQNames(), [])
self.assertEqual(len(attrs), 0)
self.assertNotIn("attr", attrs)
self.assertEqual(list(attrs.keys()), [])
self.assertEqual(attrs.get("attrs"), None)
self.assertEqual(attrs.get("attrs", 25), 25)
self.assertEqual(list(attrs.items()), [])
self.assertEqual(list(attrs.values()), [])
def verify_empty_nsattrs(self, attrs):
self.assertRaises(KeyError, attrs.getValue, (ns_uri, "attr"))
self.assertRaises(KeyError, attrs.getValueByQName, "ns:attr")
self.assertRaises(KeyError, attrs.getNameByQName, "ns:attr")
self.assertRaises(KeyError, attrs.getQNameByName, (ns_uri, "attr"))
self.assertRaises(KeyError, attrs.__getitem__, (ns_uri, "attr"))
self.assertEqual(attrs.getLength(), 0)
self.assertEqual(attrs.getNames(), [])
self.assertEqual(attrs.getQNames(), [])
self.assertEqual(len(attrs), 0)
self.assertNotIn((ns_uri, "attr"), attrs)
self.assertEqual(list(attrs.keys()), [])
self.assertEqual(attrs.get((ns_uri, "attr")), None)
self.assertEqual(attrs.get((ns_uri, "attr"), 25), 25)
self.assertEqual(list(attrs.items()), [])
self.assertEqual(list(attrs.values()), [])
def verify_attrs_wattr(self, attrs):
self.assertEqual(attrs.getLength(), 1)
self.assertEqual(attrs.getNames(), ["attr"])
self.assertEqual(attrs.getQNames(), ["attr"])
self.assertEqual(len(attrs), 1)
self.assertIn("attr", attrs)
self.assertEqual(list(attrs.keys()), ["attr"])
self.assertEqual(attrs.get("attr"), "val")
self.assertEqual(attrs.get("attr", 25), "val")
self.assertEqual(list(attrs.items()), [("attr", "val")])
self.assertEqual(list(attrs.values()), ["val"])
self.assertEqual(attrs.getValue("attr"), "val")
self.assertEqual(attrs.getValueByQName("attr"), "val")
self.assertEqual(attrs.getNameByQName("attr"), "attr")
self.assertEqual(attrs["attr"], "val")
self.assertEqual(attrs.getQNameByName("attr"), "attr")
class MakeParserTest(unittest.TestCase):
def test_make_parser2(self):
# Creating parsers several times in a row should succeed.
# Testing this because there have been failures of this kind
# before.
from xml.sax import make_parser
p = make_parser()
from xml.sax import make_parser
p = make_parser()
from xml.sax import make_parser
p = make_parser()
from xml.sax import make_parser
p = make_parser()
from xml.sax import make_parser
p = make_parser()
from xml.sax import make_parser
p = make_parser()
# ===========================================================================
#
# saxutils tests
#
# ===========================================================================
class SaxutilsTest(unittest.TestCase):
# ===== escape
def test_escape_basic(self):
self.assertEqual(escape("Donald Duck & Co"), "Donald Duck & Co")
def test_escape_all(self):
self.assertEqual(escape("<Donald Duck & Co>"),
"<Donald Duck & Co>")
def test_escape_extra(self):
self.assertEqual(escape("Hei på deg", {"å" : "å"}),
"Hei på deg")
# ===== unescape
def test_unescape_basic(self):
self.assertEqual(unescape("Donald Duck & Co"), "Donald Duck & Co")
def test_unescape_all(self):
self.assertEqual(unescape("<Donald Duck & Co>"),
"<Donald Duck & Co>")
def test_unescape_extra(self):
self.assertEqual(unescape("Hei på deg", {"å" : "å"}),
"Hei på deg")
def test_unescape_amp_extra(self):
self.assertEqual(unescape("&foo;", {"&foo;": "splat"}), "&foo;")
# ===== quoteattr
def test_quoteattr_basic(self):
self.assertEqual(quoteattr("Donald Duck & Co"),
'"Donald Duck & Co"')
def test_single_quoteattr(self):
self.assertEqual(quoteattr('Includes "double" quotes'),
'\'Includes "double" quotes\'')
def test_double_quoteattr(self):
self.assertEqual(quoteattr("Includes 'single' quotes"),
"\"Includes 'single' quotes\"")
def test_single_double_quoteattr(self):
self.assertEqual(quoteattr("Includes 'single' and \"double\" quotes"),
"\"Includes 'single' and "double" quotes\"")
# ===== make_parser
def test_make_parser(self):
# Creating a parser should succeed - it should fall back
# to the expatreader
p = make_parser(['xml.parsers.no_such_parser'])
# ===== XMLGenerator
class XmlgenTest:
def test_xmlgen_basic(self):
result = self.ioclass()
gen = XMLGenerator(result)
gen.startDocument()
gen.startElement("doc", {})
gen.endElement("doc")
gen.endDocument()
self.assertEqual(result.getvalue(), self.xml("<doc></doc>"))
def test_xmlgen_basic_empty(self):
result = self.ioclass()
gen = XMLGenerator(result, short_empty_elements=True)
gen.startDocument()
gen.startElement("doc", {})
gen.endElement("doc")
gen.endDocument()
self.assertEqual(result.getvalue(), self.xml("<doc/>"))
def test_xmlgen_content(self):
result = self.ioclass()
gen = XMLGenerator(result)
gen.startDocument()
gen.startElement("doc", {})
gen.characters("huhei")
gen.endElement("doc")
gen.endDocument()
self.assertEqual(result.getvalue(), self.xml("<doc>huhei</doc>"))
def test_xmlgen_content_empty(self):
result = self.ioclass()
gen = XMLGenerator(result, short_empty_elements=True)
gen.startDocument()
gen.startElement("doc", {})
gen.characters("huhei")
gen.endElement("doc")
gen.endDocument()
self.assertEqual(result.getvalue(), self.xml("<doc>huhei</doc>"))
def test_xmlgen_pi(self):
result = self.ioclass()
gen = XMLGenerator(result)
gen.startDocument()
gen.processingInstruction("test", "data")
gen.startElement("doc", {})
gen.endElement("doc")
gen.endDocument()
self.assertEqual(result.getvalue(),
self.xml("<?test data?><doc></doc>"))
def test_xmlgen_content_escape(self):
result = self.ioclass()
gen = XMLGenerator(result)
gen.startDocument()
gen.startElement("doc", {})
gen.characters("<huhei&")
gen.endElement("doc")
gen.endDocument()
self.assertEqual(result.getvalue(),
self.xml("<doc><huhei&</doc>"))
def test_xmlgen_attr_escape(self):
result = self.ioclass()
gen = XMLGenerator(result)
gen.startDocument()
gen.startElement("doc", {"a": '"'})
gen.startElement("e", {"a": "'"})
gen.endElement("e")
gen.startElement("e", {"a": "'\""})
gen.endElement("e")
gen.startElement("e", {"a": "\n\r\t"})
gen.endElement("e")
gen.endElement("doc")
gen.endDocument()
self.assertEqual(result.getvalue(), self.xml(
"<doc a='\"'><e a=\"'\"></e>"
"<e a=\"'"\"></e>"
"<e a=\" 	\"></e></doc>"))
def test_xmlgen_encoding(self):
encodings = ('iso-8859-15', 'utf-8', 'utf-8-sig',
'utf-16', 'utf-16be', 'utf-16le',
'utf-32', 'utf-32be', 'utf-32le')
for encoding in encodings:
result = self.ioclass()
gen = XMLGenerator(result, encoding=encoding)
gen.startDocument()
gen.startElement("doc", {"a": '\u20ac'})
gen.characters("\u20ac")
gen.endElement("doc")
gen.endDocument()
self.assertEqual(result.getvalue(),
self.xml('<doc a="\u20ac">\u20ac</doc>', encoding=encoding))
def test_xmlgen_unencodable(self):
result = self.ioclass()
gen = XMLGenerator(result, encoding='ascii')
gen.startDocument()
gen.startElement("doc", {"a": '\u20ac'})
gen.characters("\u20ac")
gen.endElement("doc")
gen.endDocument()
self.assertEqual(result.getvalue(),
self.xml('<doc a="€">€</doc>', encoding='ascii'))
def test_xmlgen_ignorable(self):
result = self.ioclass()
gen = XMLGenerator(result)
gen.startDocument()
gen.startElement("doc", {})
gen.ignorableWhitespace(" ")
gen.endElement("doc")
gen.endDocument()
self.assertEqual(result.getvalue(), self.xml("<doc> </doc>"))
def test_xmlgen_ignorable_empty(self):
result = self.ioclass()
gen = XMLGenerator(result, short_empty_elements=True)
gen.startDocument()
gen.startElement("doc", {})
gen.ignorableWhitespace(" ")
gen.endElement("doc")
gen.endDocument()
self.assertEqual(result.getvalue(), self.xml("<doc> </doc>"))
def test_xmlgen_ns(self):
result = self.ioclass()
gen = XMLGenerator(result)
gen.startDocument()
gen.startPrefixMapping("ns1", ns_uri)
gen.startElementNS((ns_uri, "doc"), "ns1:doc", {})
# add an unqualified name
gen.startElementNS((None, "udoc"), None, {})
gen.endElementNS((None, "udoc"), None)
gen.endElementNS((ns_uri, "doc"), "ns1:doc")
gen.endPrefixMapping("ns1")
gen.endDocument()
self.assertEqual(result.getvalue(), self.xml(
'<ns1:doc xmlns:ns1="%s"><udoc></udoc></ns1:doc>' %
ns_uri))
def test_xmlgen_ns_empty(self):
result = self.ioclass()
gen = XMLGenerator(result, short_empty_elements=True)
gen.startDocument()
gen.startPrefixMapping("ns1", ns_uri)
gen.startElementNS((ns_uri, "doc"), "ns1:doc", {})
# add an unqualified name
gen.startElementNS((None, "udoc"), None, {})
gen.endElementNS((None, "udoc"), None)
gen.endElementNS((ns_uri, "doc"), "ns1:doc")
gen.endPrefixMapping("ns1")
gen.endDocument()
self.assertEqual(result.getvalue(), self.xml(
'<ns1:doc xmlns:ns1="%s"><udoc/></ns1:doc>' %
ns_uri))
def test_1463026_1(self):
result = self.ioclass()
gen = XMLGenerator(result)
gen.startDocument()
gen.startElementNS((None, 'a'), 'a', {(None, 'b'):'c'})
gen.endElementNS((None, 'a'), 'a')
gen.endDocument()
self.assertEqual(result.getvalue(), self.xml('<a b="c"></a>'))
def test_1463026_1_empty(self):
result = self.ioclass()
gen = XMLGenerator(result, short_empty_elements=True)
gen.startDocument()
gen.startElementNS((None, 'a'), 'a', {(None, 'b'):'c'})
gen.endElementNS((None, 'a'), 'a')
gen.endDocument()
self.assertEqual(result.getvalue(), self.xml('<a b="c"/>'))
def test_1463026_2(self):
result = self.ioclass()
gen = XMLGenerator(result)
gen.startDocument()
gen.startPrefixMapping(None, 'qux')
gen.startElementNS(('qux', 'a'), 'a', {})
gen.endElementNS(('qux', 'a'), 'a')
gen.endPrefixMapping(None)
gen.endDocument()
self.assertEqual(result.getvalue(), self.xml('<a xmlns="qux"></a>'))
def test_1463026_2_empty(self):
result = self.ioclass()
gen = XMLGenerator(result, short_empty_elements=True)
gen.startDocument()
gen.startPrefixMapping(None, 'qux')
gen.startElementNS(('qux', 'a'), 'a', {})
gen.endElementNS(('qux', 'a'), 'a')
gen.endPrefixMapping(None)
gen.endDocument()
self.assertEqual(result.getvalue(), self.xml('<a xmlns="qux"/>'))
def test_1463026_3(self):
result = self.ioclass()
gen = XMLGenerator(result)
gen.startDocument()
gen.startPrefixMapping('my', 'qux')
gen.startElementNS(('qux', 'a'), 'a', {(None, 'b'):'c'})
gen.endElementNS(('qux', 'a'), 'a')
gen.endPrefixMapping('my')
gen.endDocument()
self.assertEqual(result.getvalue(),
self.xml('<my:a xmlns:my="qux" b="c"></my:a>'))
def test_1463026_3_empty(self):
result = self.ioclass()
gen = XMLGenerator(result, short_empty_elements=True)
gen.startDocument()
gen.startPrefixMapping('my', 'qux')
gen.startElementNS(('qux', 'a'), 'a', {(None, 'b'):'c'})
gen.endElementNS(('qux', 'a'), 'a')
gen.endPrefixMapping('my')
gen.endDocument()
self.assertEqual(result.getvalue(),
self.xml('<my:a xmlns:my="qux" b="c"/>'))
def test_5027_1(self):
# The xml prefix (as in xml:lang below) is reserved and bound by
# definition to http://www.w3.org/XML/1998/namespace. XMLGenerator had
# a bug whereby a KeyError is raised because this namespace is missing
# from a dictionary.
#
# This test demonstrates the bug by parsing a document.
test_xml = StringIO(
'<?xml version="1.0"?>'
'<a:g1 xmlns:a="http://example.com/ns">'
'<a:g2 xml:lang="en">Hello</a:g2>'
'</a:g1>')
parser = make_parser()
parser.setFeature(feature_namespaces, True)
result = self.ioclass()
gen = XMLGenerator(result)
parser.setContentHandler(gen)
parser.parse(test_xml)
self.assertEqual(result.getvalue(),
self.xml(
'<a:g1 xmlns:a="http://example.com/ns">'
'<a:g2 xml:lang="en">Hello</a:g2>'
'</a:g1>'))
def test_5027_2(self):
# The xml prefix (as in xml:lang below) is reserved and bound by
# definition to http://www.w3.org/XML/1998/namespace. XMLGenerator had
# a bug whereby a KeyError is raised because this namespace is missing
# from a dictionary.
#
# This test demonstrates the bug by direct manipulation of the
# XMLGenerator.
result = self.ioclass()
gen = XMLGenerator(result)
gen.startDocument()
gen.startPrefixMapping('a', 'http://example.com/ns')
gen.startElementNS(('http://example.com/ns', 'g1'), 'g1', {})
lang_attr = {('http://www.w3.org/XML/1998/namespace', 'lang'): 'en'}
gen.startElementNS(('http://example.com/ns', 'g2'), 'g2', lang_attr)
gen.characters('Hello')
gen.endElementNS(('http://example.com/ns', 'g2'), 'g2')
gen.endElementNS(('http://example.com/ns', 'g1'), 'g1')
gen.endPrefixMapping('a')
gen.endDocument()
self.assertEqual(result.getvalue(),
self.xml(
'<a:g1 xmlns:a="http://example.com/ns">'
'<a:g2 xml:lang="en">Hello</a:g2>'
'</a:g1>'))
def test_no_close_file(self):
result = self.ioclass()
def func(out):
gen = XMLGenerator(out)
gen.startDocument()
gen.startElement("doc", {})
func(result)
self.assertFalse(result.closed)
def test_xmlgen_fragment(self):
result = self.ioclass()
gen = XMLGenerator(result)
# Don't call gen.startDocument()
gen.startElement("foo", {"a": "1.0"})
gen.characters("Hello")
gen.endElement("foo")
gen.startElement("bar", {"b": "2.0"})
gen.endElement("bar")
# Don't call gen.endDocument()
self.assertEqual(result.getvalue(),
self.xml('<foo a="1.0">Hello</foo><bar b="2.0"></bar>')[len(self.xml('')):])
class StringXmlgenTest(XmlgenTest, unittest.TestCase):
ioclass = StringIO
def xml(self, doc, encoding='iso-8859-1'):
return '<?xml version="1.0" encoding="%s"?>\n%s' % (encoding, doc)
test_xmlgen_unencodable = None
class BytesXmlgenTest(XmlgenTest, unittest.TestCase):
ioclass = BytesIO
def xml(self, doc, encoding='iso-8859-1'):
return ('<?xml version="1.0" encoding="%s"?>\n%s' %
(encoding, doc)).encode(encoding, 'xmlcharrefreplace')
class WriterXmlgenTest(BytesXmlgenTest):
class ioclass(list):
write = list.append
closed = False
def seekable(self):
return True
def tell(self):
# return 0 at start and not 0 after start
return len(self)
def getvalue(self):
return b''.join(self)
start = b'<?xml version="1.0" encoding="iso-8859-1"?>\n'
class XMLFilterBaseTest(unittest.TestCase):
def test_filter_basic(self):
result = BytesIO()
gen = XMLGenerator(result)
filter = XMLFilterBase()
filter.setContentHandler(gen)
filter.startDocument()
filter.startElement("doc", {})
filter.characters("content")
filter.ignorableWhitespace(" ")
filter.endElement("doc")
filter.endDocument()
self.assertEqual(result.getvalue(), start + b"<doc>content </doc>")
# ===========================================================================
#
# expatreader tests
#
# ===========================================================================
with open(TEST_XMLFILE_OUT, 'rb') as f:
xml_test_out = f.read()
class ExpatReaderTest(XmlTestBase):
# ===== XMLReader support
def test_expat_file(self):
parser = create_parser()
result = BytesIO()
xmlgen = XMLGenerator(result)
parser.setContentHandler(xmlgen)
with open(TEST_XMLFILE, 'rb') as f:
parser.parse(f)
self.assertEqual(result.getvalue(), xml_test_out)
@requires_nonascii_filenames
def test_expat_file_nonascii(self):
fname = support.TESTFN_UNICODE
shutil.copyfile(TEST_XMLFILE, fname)
self.addCleanup(support.unlink, fname)
parser = create_parser()
result = BytesIO()
xmlgen = XMLGenerator(result)
parser.setContentHandler(xmlgen)
parser.parse(open(fname))
self.assertEqual(result.getvalue(), xml_test_out)
# ===== DTDHandler support
class TestDTDHandler:
def __init__(self):
self._notations = []
self._entities = []
def notationDecl(self, name, publicId, systemId):
self._notations.append((name, publicId, systemId))
def unparsedEntityDecl(self, name, publicId, systemId, ndata):
self._entities.append((name, publicId, systemId, ndata))
def test_expat_dtdhandler(self):
parser = create_parser()
handler = self.TestDTDHandler()
parser.setDTDHandler(handler)
parser.feed('<!DOCTYPE doc [\n')
parser.feed(' <!ENTITY img SYSTEM "expat.gif" NDATA GIF>\n')
parser.feed(' <!NOTATION GIF PUBLIC "-//CompuServe//NOTATION Graphics Interchange Format 89a//EN">\n')
parser.feed(']>\n')
parser.feed('<doc></doc>')
parser.close()
self.assertEqual(handler._notations,
[("GIF", "-//CompuServe//NOTATION Graphics Interchange Format 89a//EN", None)])
self.assertEqual(handler._entities, [("img", None, "expat.gif", "GIF")])
# ===== EntityResolver support
class TestEntityResolver:
def resolveEntity(self, publicId, systemId):
inpsrc = InputSource()
inpsrc.setByteStream(BytesIO(b"<entity/>"))
return inpsrc
def test_expat_entityresolver(self):
parser = create_parser()
parser.setEntityResolver(self.TestEntityResolver())
result = BytesIO()
parser.setContentHandler(XMLGenerator(result))
parser.feed('<!DOCTYPE doc [\n')
parser.feed(' <!ENTITY test SYSTEM "whatever">\n')
parser.feed(']>\n')
parser.feed('<doc>&test;</doc>')
parser.close()
self.assertEqual(result.getvalue(), start +
b"<doc><entity></entity></doc>")
# ===== Attributes support
class AttrGatherer(ContentHandler):
def startElement(self, name, attrs):
self._attrs = attrs
def startElementNS(self, name, qname, attrs):
self._attrs = attrs
def test_expat_attrs_empty(self):
parser = create_parser()
gather = self.AttrGatherer()
parser.setContentHandler(gather)
parser.feed("<doc/>")
parser.close()
self.verify_empty_attrs(gather._attrs)
def test_expat_attrs_wattr(self):
parser = create_parser()
gather = self.AttrGatherer()
parser.setContentHandler(gather)
parser.feed("<doc attr='val'/>")
parser.close()
self.verify_attrs_wattr(gather._attrs)
def test_expat_nsattrs_empty(self):
parser = create_parser(1)
gather = self.AttrGatherer()
parser.setContentHandler(gather)
parser.feed("<doc/>")
parser.close()
self.verify_empty_nsattrs(gather._attrs)
def test_expat_nsattrs_wattr(self):
parser = create_parser(1)
gather = self.AttrGatherer()
parser.setContentHandler(gather)
parser.feed("<doc xmlns:ns='%s' ns:attr='val'/>" % ns_uri)
parser.close()
attrs = gather._attrs
self.assertEqual(attrs.getLength(), 1)
self.assertEqual(attrs.getNames(), [(ns_uri, "attr")])
self.assertTrue((attrs.getQNames() == [] or
attrs.getQNames() == ["ns:attr"]))
self.assertEqual(len(attrs), 1)
self.assertIn((ns_uri, "attr"), attrs)
self.assertEqual(attrs.get((ns_uri, "attr")), "val")
self.assertEqual(attrs.get((ns_uri, "attr"), 25), "val")
self.assertEqual(list(attrs.items()), [((ns_uri, "attr"), "val")])
self.assertEqual(list(attrs.values()), ["val"])
self.assertEqual(attrs.getValue((ns_uri, "attr")), "val")
self.assertEqual(attrs[(ns_uri, "attr")], "val")
# ===== InputSource support
def test_expat_inpsource_filename(self):
parser = create_parser()
result = BytesIO()
xmlgen = XMLGenerator(result)
parser.setContentHandler(xmlgen)
parser.parse(TEST_XMLFILE)
self.assertEqual(result.getvalue(), xml_test_out)
def test_expat_inpsource_sysid(self):
parser = create_parser()
result = BytesIO()
xmlgen = XMLGenerator(result)
parser.setContentHandler(xmlgen)
parser.parse(InputSource(TEST_XMLFILE))
self.assertEqual(result.getvalue(), xml_test_out)
@requires_nonascii_filenames
def test_expat_inpsource_sysid_nonascii(self):
fname = support.TESTFN_UNICODE
shutil.copyfile(TEST_XMLFILE, fname)
self.addCleanup(support.unlink, fname)
parser = create_parser()
result = BytesIO()
xmlgen = XMLGenerator(result)
parser.setContentHandler(xmlgen)
parser.parse(InputSource(fname))
self.assertEqual(result.getvalue(), xml_test_out)
def test_expat_inpsource_stream(self):
parser = create_parser()
result = BytesIO()
xmlgen = XMLGenerator(result)
parser.setContentHandler(xmlgen)
inpsrc = InputSource()
with open(TEST_XMLFILE, 'rb') as f:
inpsrc.setByteStream(f)
parser.parse(inpsrc)
self.assertEqual(result.getvalue(), xml_test_out)
# ===== IncrementalParser support
def test_expat_incremental(self):
result = BytesIO()
xmlgen = XMLGenerator(result)
parser = create_parser()
parser.setContentHandler(xmlgen)
parser.feed("<doc>")
parser.feed("</doc>")
parser.close()
self.assertEqual(result.getvalue(), start + b"<doc></doc>")
def test_expat_incremental_reset(self):
result = BytesIO()
xmlgen = XMLGenerator(result)
parser = create_parser()
parser.setContentHandler(xmlgen)
parser.feed("<doc>")
parser.feed("text")
result = BytesIO()
xmlgen = XMLGenerator(result)
parser.setContentHandler(xmlgen)
parser.reset()
parser.feed("<doc>")
parser.feed("text")
parser.feed("</doc>")
parser.close()
self.assertEqual(result.getvalue(), start + b"<doc>text</doc>")
# ===== Locator support
def test_expat_locator_noinfo(self):
result = BytesIO()
xmlgen = XMLGenerator(result)
parser = create_parser()
parser.setContentHandler(xmlgen)
parser.feed("<doc>")
parser.feed("</doc>")
parser.close()
self.assertEqual(parser.getSystemId(), None)
self.assertEqual(parser.getPublicId(), None)
self.assertEqual(parser.getLineNumber(), 1)
def test_expat_locator_withinfo(self):
result = BytesIO()
xmlgen = XMLGenerator(result)
parser = create_parser()
parser.setContentHandler(xmlgen)
parser.parse(TEST_XMLFILE)
self.assertEqual(parser.getSystemId(), TEST_XMLFILE)
self.assertEqual(parser.getPublicId(), None)
@requires_nonascii_filenames
def test_expat_locator_withinfo_nonascii(self):
fname = support.TESTFN_UNICODE
shutil.copyfile(TEST_XMLFILE, fname)
self.addCleanup(support.unlink, fname)
result = BytesIO()
xmlgen = XMLGenerator(result)
parser = create_parser()
parser.setContentHandler(xmlgen)
parser.parse(fname)
self.assertEqual(parser.getSystemId(), fname)
self.assertEqual(parser.getPublicId(), None)
# ===========================================================================
#
# error reporting
#
# ===========================================================================
class ErrorReportingTest(unittest.TestCase):
def test_expat_inpsource_location(self):
parser = create_parser()
parser.setContentHandler(ContentHandler()) # do nothing
source = InputSource()
source.setByteStream(BytesIO(b"<foo bar foobar>")) #ill-formed
name = "a file name"
source.setSystemId(name)
try:
parser.parse(source)
self.fail()
except SAXException as e:
self.assertEqual(e.getSystemId(), name)
def test_expat_incomplete(self):
parser = create_parser()
parser.setContentHandler(ContentHandler()) # do nothing
self.assertRaises(SAXParseException, parser.parse, StringIO("<foo>"))
def test_sax_parse_exception_str(self):
# pass various values from a locator to the SAXParseException to
# make sure that the __str__() doesn't fall apart when None is
# passed instead of an integer line and column number
#
# use "normal" values for the locator:
str(SAXParseException("message", None,
self.DummyLocator(1, 1)))
# use None for the line number:
str(SAXParseException("message", None,
self.DummyLocator(None, 1)))
# use None for the column number:
str(SAXParseException("message", None,
self.DummyLocator(1, None)))
# use None for both:
str(SAXParseException("message", None,
self.DummyLocator(None, None)))
class DummyLocator:
def __init__(self, lineno, colno):
self._lineno = lineno
self._colno = colno
def getPublicId(self):
return "pubid"
def getSystemId(self):
return "sysid"
def getLineNumber(self):
return self._lineno
def getColumnNumber(self):
return self._colno
# ===========================================================================
#
# xmlreader tests
#
# ===========================================================================
class XmlReaderTest(XmlTestBase):
# ===== AttributesImpl
def test_attrs_empty(self):
self.verify_empty_attrs(AttributesImpl({}))
def test_attrs_wattr(self):
self.verify_attrs_wattr(AttributesImpl({"attr" : "val"}))
def test_nsattrs_empty(self):
self.verify_empty_nsattrs(AttributesNSImpl({}, {}))
def test_nsattrs_wattr(self):
attrs = AttributesNSImpl({(ns_uri, "attr") : "val"},
{(ns_uri, "attr") : "ns:attr"})
self.assertEqual(attrs.getLength(), 1)
self.assertEqual(attrs.getNames(), [(ns_uri, "attr")])
self.assertEqual(attrs.getQNames(), ["ns:attr"])
self.assertEqual(len(attrs), 1)
self.assertIn((ns_uri, "attr"), attrs)
self.assertEqual(list(attrs.keys()), [(ns_uri, "attr")])
self.assertEqual(attrs.get((ns_uri, "attr")), "val")
self.assertEqual(attrs.get((ns_uri, "attr"), 25), "val")
self.assertEqual(list(attrs.items()), [((ns_uri, "attr"), "val")])
self.assertEqual(list(attrs.values()), ["val"])
self.assertEqual(attrs.getValue((ns_uri, "attr")), "val")
self.assertEqual(attrs.getValueByQName("ns:attr"), "val")
self.assertEqual(attrs.getNameByQName("ns:attr"), (ns_uri, "attr"))
self.assertEqual(attrs[(ns_uri, "attr")], "val")
self.assertEqual(attrs.getQNameByName((ns_uri, "attr")), "ns:attr")
def test_main():
run_unittest(MakeParserTest,
SaxutilsTest,
StringXmlgenTest,
BytesXmlgenTest,
WriterXmlgenTest,
ExpatReaderTest,
ErrorReportingTest,
XmlReaderTest)
if __name__ == "__main__":
test_main()
| gpl-3.0 |
Hybrid-Cloud/badam | fs_patches_of_hybrid_cloud/cherry_for_111T/nova_cascaded/nova/virt/vmwareapi/vm_util.py | 5 | 62563 | # Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2012 VMware, Inc.
# Copyright (c) 2011 Citrix Systems, Inc.
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
The VMware API VM utility module to build SOAP object specs.
"""
import copy
import functools
from oslo.config import cfg
from oslo.vmware import exceptions as vexc
from nova import exception
from nova.i18n import _
from nova.network import model as network_model
from nova.openstack.common import log as logging
from nova.openstack.common import units
from nova.virt.vmwareapi import constants
from nova.virt.vmwareapi import vim_util
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
ALL_SUPPORTED_NETWORK_DEVICES = ['VirtualE1000', 'VirtualE1000e',
'VirtualPCNet32', 'VirtualSriovEthernetCard',
'VirtualVmxnet']
# A cache for VM references. The key will be the VM name
# and the value is the VM reference. The VM name is unique. This
# is either the UUID of the instance or UUID-rescue in the case
# that this is a rescue VM. This is in order to prevent
# unnecessary communication with the backend.
_VM_REFS_CACHE = {}
def vm_refs_cache_reset():
global _VM_REFS_CACHE
_VM_REFS_CACHE = {}
def vm_ref_cache_delete(id):
_VM_REFS_CACHE.pop(id, None)
def vm_ref_cache_update(id, vm_ref):
_VM_REFS_CACHE[id] = vm_ref
def vm_ref_cache_get(id):
return _VM_REFS_CACHE.get(id)
def _vm_ref_cache(id, func, session, data):
vm_ref = vm_ref_cache_get(id)
if not vm_ref:
vm_ref = func(session, data)
vm_ref_cache_update(id, vm_ref)
return vm_ref
def vm_ref_cache_from_instance(func):
@functools.wraps(func)
def wrapper(session, instance):
id = instance['uuid']
return _vm_ref_cache(id, func, session, instance)
return wrapper
def vm_ref_cache_from_name(func):
@functools.wraps(func)
def wrapper(session, name):
id = name
return _vm_ref_cache(id, func, session, name)
return wrapper
# the config key which stores the VNC port
VNC_CONFIG_KEY = 'config.extraConfig["RemoteDisplay.vnc.port"]'
def _iface_id_option_value(client_factory, iface_id, port_index):
opt = client_factory.create('ns0:OptionValue')
opt.key = "nvp.iface-id.%d" % port_index
opt.value = iface_id
return opt
def get_vm_create_spec(client_factory, instance, name, data_store_name,
vif_infos, os_type=constants.DEFAULT_OS_TYPE,
allocations=None):
"""Builds the VM Create spec."""
config_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
config_spec.name = name
config_spec.guestId = os_type
# The name is the unique identifier for the VM. This will either be the
# instance UUID or the instance UUID with suffix '-rescue' for VM's that
# are in rescue mode
config_spec.instanceUuid = name
# Allow nested ESX instances to host 64 bit VMs.
if os_type == "vmkernel5Guest":
config_spec.nestedHVEnabled = "True"
vm_file_info = client_factory.create('ns0:VirtualMachineFileInfo')
vm_file_info.vmPathName = "[" + data_store_name + "]"
config_spec.files = vm_file_info
tools_info = client_factory.create('ns0:ToolsConfigInfo')
tools_info.afterPowerOn = True
tools_info.afterResume = True
tools_info.beforeGuestStandby = True
tools_info.beforeGuestShutdown = True
tools_info.beforeGuestReboot = True
config_spec.tools = tools_info
config_spec.numCPUs = int(instance['vcpus'])
config_spec.memoryMB = int(instance['memory_mb'])
# Configure cpu information
if (allocations is not None and
('cpu_limit' in allocations or
'cpu_reservation' in allocations or
'cpu_shares_level' in allocations)):
allocation = client_factory.create('ns0:ResourceAllocationInfo')
if 'cpu_limit' in allocations:
allocation.limit = allocations['cpu_limit']
if 'cpu_reservation' in allocations:
allocation.reservation = allocations['cpu_reservation']
if 'cpu_shares_level' in allocations:
shares = client_factory.create('ns0:SharesInfo')
shares.level = allocations['cpu_shares_level']
if (shares.level == 'custom' and
'cpu_shares_share' in allocations):
shares.shares = allocations['cpu_shares_share']
else:
shares.shares = 0
allocation.shares = shares
config_spec.cpuAllocation = allocation
vif_spec_list = []
for vif_info in vif_infos:
vif_spec = _create_vif_spec(client_factory, vif_info)
vif_spec_list.append(vif_spec)
device_config_spec = vif_spec_list
config_spec.deviceChange = device_config_spec
# add vm-uuid and iface-id.x values for Neutron
extra_config = []
opt = client_factory.create('ns0:OptionValue')
opt.key = "nvp.vm-uuid"
opt.value = instance['uuid']
extra_config.append(opt)
port_index = 0
for vif_info in vif_infos:
if vif_info['iface_id']:
extra_config.append(_iface_id_option_value(client_factory,
vif_info['iface_id'],
port_index))
port_index += 1
config_spec.extraConfig = extra_config
return config_spec
def get_vm_resize_spec(client_factory, instance):
"""Provides updates for a VM spec."""
resize_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
resize_spec.numCPUs = int(instance['vcpus'])
resize_spec.memoryMB = int(instance['memory_mb'])
return resize_spec
def create_controller_spec(client_factory, key,
adapter_type=constants.DEFAULT_ADAPTER_TYPE):
"""Builds a Config Spec for the LSI or Bus Logic Controller's addition
which acts as the controller for the virtual hard disk to be attached
to the VM.
"""
# Create a controller for the Virtual Hard Disk
virtual_device_config = client_factory.create(
'ns0:VirtualDeviceConfigSpec')
virtual_device_config.operation = "add"
if adapter_type == constants.ADAPTER_TYPE_BUSLOGIC:
virtual_controller = client_factory.create(
'ns0:VirtualBusLogicController')
elif adapter_type == constants.ADAPTER_TYPE_LSILOGICSAS:
virtual_controller = client_factory.create(
'ns0:VirtualLsiLogicSASController')
elif adapter_type == constants.ADAPTER_TYPE_PARAVIRTUAL:
virtual_controller = client_factory.create(
'ns0:ParaVirtualSCSIController')
else:
virtual_controller = client_factory.create(
'ns0:VirtualLsiLogicController')
virtual_controller.key = key
virtual_controller.busNumber = 0
virtual_controller.sharedBus = "noSharing"
virtual_device_config.device = virtual_controller
return virtual_device_config
def convert_vif_model(name):
"""Converts standard VIF_MODEL types to the internal VMware ones."""
if name == network_model.VIF_MODEL_E1000:
return 'VirtualE1000'
if name == network_model.VIF_MODEL_E1000E:
return 'VirtualE1000e'
if name not in ALL_SUPPORTED_NETWORK_DEVICES:
msg = _('%s is not supported.') % name
raise exception.Invalid(msg)
return name
def _create_vif_spec(client_factory, vif_info):
"""Builds a config spec for the addition of a new network
adapter to the VM.
"""
network_spec = client_factory.create('ns0:VirtualDeviceConfigSpec')
network_spec.operation = "add"
# Keep compatible with other Hyper vif model parameter.
vif_info['vif_model'] = convert_vif_model(vif_info['vif_model'])
vif = 'ns0:' + vif_info['vif_model']
net_device = client_factory.create(vif)
# NOTE(asomya): Only works on ESXi if the portgroup binding is set to
# ephemeral. Invalid configuration if set to static and the NIC does
# not come up on boot if set to dynamic.
network_ref = vif_info['network_ref']
network_name = vif_info['network_name']
mac_address = vif_info['mac_address']
backing = None
if network_ref and network_ref['type'] == 'OpaqueNetwork':
backing_name = ''.join(['ns0:VirtualEthernetCard',
'OpaqueNetworkBackingInfo'])
backing = client_factory.create(backing_name)
backing.opaqueNetworkId = network_ref['network-id']
backing.opaqueNetworkType = network_ref['network-type']
elif (network_ref and
network_ref['type'] == "DistributedVirtualPortgroup"):
backing_name = ''.join(['ns0:VirtualEthernetCardDistributed',
'VirtualPortBackingInfo'])
backing = client_factory.create(backing_name)
portgroup = client_factory.create(
'ns0:DistributedVirtualSwitchPortConnection')
portgroup.switchUuid = network_ref['dvsw']
portgroup.portgroupKey = network_ref['dvpg']
backing.port = portgroup
else:
backing = client_factory.create(
'ns0:VirtualEthernetCardNetworkBackingInfo')
backing.deviceName = network_name
connectable_spec = client_factory.create('ns0:VirtualDeviceConnectInfo')
connectable_spec.startConnected = True
connectable_spec.allowGuestControl = True
connectable_spec.connected = True
net_device.connectable = connectable_spec
net_device.backing = backing
# The Server assigns a Key to the device. Here we pass a -ve temporary key.
# -ve because actual keys are +ve numbers and we don't
# want a clash with the key that server might associate with the device
net_device.key = -47
net_device.addressType = "manual"
net_device.macAddress = mac_address
net_device.wakeOnLanEnabled = True
network_spec.device = net_device
return network_spec
def get_network_attach_config_spec(client_factory, vif_info, index):
"""Builds the vif attach config spec."""
config_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
vif_spec = _create_vif_spec(client_factory, vif_info)
config_spec.deviceChange = [vif_spec]
if vif_info['iface_id'] is not None:
config_spec.extraConfig = [_iface_id_option_value(client_factory,
vif_info['iface_id'],
index)]
return config_spec
def get_network_detach_config_spec(client_factory, device, port_index):
"""Builds the vif detach config spec."""
config_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
virtual_device_config = client_factory.create(
'ns0:VirtualDeviceConfigSpec')
virtual_device_config.operation = "remove"
virtual_device_config.device = device
config_spec.deviceChange = [virtual_device_config]
# If a key is already present then it cannot be deleted, only updated.
# This enables us to reuse this key if there is an additional
# attachment. The keys need to be preserved. This is due to the fact
# that there is logic on the ESX that does the network wiring
# according to these values. If they are changed then this will
# break networking to and from the interface.
config_spec.extraConfig = [_iface_id_option_value(client_factory,
'free',
port_index)]
return config_spec
def get_iscsi_attach_config_spec(client_factory,
disk_type="preallocated",
file_path=None,
disk_size=None,
linked_clone=False,
controller_key=None,
unit_number=None,
device_name=None):
config_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
device_config_spec = []
virtual_scsi_config_spec = create_virtual_scsi_spec(client_factory,
controller_key,
disk_type, file_path,
disk_size, linked_clone,
unit_number,
device_name)
device_config_spec.append(virtual_scsi_config_spec)
config_spec.deviceChange = device_config_spec
return config_spec
def get_vmdk_attach_config_spec(client_factory,
disk_type=constants.DEFAULT_DISK_TYPE,
file_path=None,
disk_size=None,
linked_clone=False,
controller_key=None,
unit_number=None,
device_name=None):
"""Builds the vmdk attach config spec."""
config_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
device_config_spec = []
virtual_device_config_spec = create_virtual_disk_spec(client_factory,
controller_key, disk_type, file_path,
disk_size, linked_clone,
unit_number, device_name)
device_config_spec.append(virtual_device_config_spec)
config_spec.deviceChange = device_config_spec
return config_spec
def get_cdrom_attach_config_spec(client_factory,
datastore,
file_path,
controller_key,
cdrom_unit_number):
"""Builds and returns the cdrom attach config spec."""
config_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
device_config_spec = []
virtual_device_config_spec = create_virtual_cdrom_spec(client_factory,
datastore,
controller_key,
file_path,
cdrom_unit_number)
device_config_spec.append(virtual_device_config_spec)
config_spec.deviceChange = device_config_spec
return config_spec
def get_vmdk_detach_config_spec(client_factory, device,
destroy_disk=False):
"""Builds the vmdk detach config spec."""
config_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
device_config_spec = []
virtual_device_config_spec = detach_virtual_disk_spec(client_factory,
device,
destroy_disk)
device_config_spec.append(virtual_device_config_spec)
config_spec.deviceChange = device_config_spec
return config_spec
def get_vm_extra_config_spec(client_factory, extra_opts):
"""Builds extra spec fields from a dictionary."""
config_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
# add the key value pairs
extra_config = []
for key, value in extra_opts.iteritems():
opt = client_factory.create('ns0:OptionValue')
opt.key = key
opt.value = value
extra_config.append(opt)
config_spec.extraConfig = extra_config
return config_spec
def get_vmdk_path(session, vm_ref, instance):
"""Gets the vmdk file path for specified instance."""
hardware_devices = session._call_method(vim_util,
"get_dynamic_property", vm_ref, "VirtualMachine",
"config.hardware.device")
(vmdk_path, adapter_type, disk_type) = get_vmdk_path_and_adapter_type(
hardware_devices, uuid=instance['uuid'])
return vmdk_path
def get_vmdk_path_and_adapter_type(hardware_devices, uuid=None,
is_shared=False):
"""Gets the vmdk file path and the storage adapter type."""
if hardware_devices.__class__.__name__ == "ArrayOfVirtualDevice":
hardware_devices = hardware_devices.VirtualDevice
vmdk_file_path = None
vmdk_controller_key = None
disk_type = None
adapter_type_dict = {}
for device in hardware_devices:
if device.__class__.__name__ == "VirtualDisk":
if device.backing.__class__.__name__ == \
"VirtualDiskFlatVer2BackingInfo":
if uuid:
if uuid in device.backing.fileName:
vmdk_file_path = device.backing.fileName
else:
vmdk_file_path = device.backing.fileName
if is_shared:
vmdk_controller_key = 1001
else:
vmdk_controller_key = 1000
if getattr(device.backing, 'thinProvisioned', False):
disk_type = "thin"
else:
if getattr(device.backing, 'eagerlyScrub', False):
disk_type = "eagerZeroedThick"
else:
disk_type = constants.DEFAULT_DISK_TYPE
elif device.__class__.__name__ == "VirtualLsiLogicController":
adapter_type_dict[device.key] = constants.DEFAULT_ADAPTER_TYPE
elif device.__class__.__name__ == "VirtualBusLogicController":
adapter_type_dict[device.key] = constants.ADAPTER_TYPE_BUSLOGIC
elif device.__class__.__name__ == "VirtualIDEController":
adapter_type_dict[device.key] = constants.ADAPTER_TYPE_IDE
elif device.__class__.__name__ == "VirtualLsiLogicSASController":
adapter_type_dict[device.key] = constants.ADAPTER_TYPE_LSILOGICSAS
elif device.__class__.__name__ == "ParaVirtualSCSIController":
adapter_type_dict[device.key] = constants.ADAPTER_TYPE_PARAVIRTUAL
adapter_type = adapter_type_dict.get(vmdk_controller_key, "")
return (vmdk_file_path, adapter_type, disk_type)
def _find_controller_slot(controller_keys, taken, max_unit_number, is_shared):
if is_shared:
controller_keys = [1001]
for controller_key in controller_keys:
for unit_number in range(max_unit_number):
if unit_number not in taken.get(controller_key, []):
return controller_key, unit_number
def _is_ide_controller(device):
return device.__class__.__name__ == 'VirtualIDEController'
def _is_scsi_controller(device):
return device.__class__.__name__ in ['VirtualLsiLogicController',
'VirtualLsiLogicSASController',
'VirtualBusLogicController',
'ParaVirtualSCSIController']
def _find_allocated_slots(devices):
"""Return dictionary which maps controller_key to list of allocated unit
numbers for that controller_key.
"""
taken = {}
for device in devices:
if hasattr(device, 'controllerKey') and hasattr(device, 'unitNumber'):
unit_numbers = taken.setdefault(device.controllerKey, [])
unit_numbers.append(device.unitNumber)
if _is_scsi_controller(device):
# the SCSI controller sits on its own bus
unit_numbers = taken.setdefault(device.key, [])
unit_numbers.append(device.scsiCtlrUnitNumber)
return taken
def allocate_controller_key_and_unit_number(client_factory, devices,
adapter_type, is_shared):
"""This function inspects the current set of hardware devices and returns
controller_key and unit_number that can be used for attaching a new virtual
disk to adapter with the given adapter_type.
"""
if devices.__class__.__name__ == "ArrayOfVirtualDevice":
devices = devices.VirtualDevice
taken = _find_allocated_slots(devices)
ret = None
if adapter_type == constants.ADAPTER_TYPE_IDE:
ide_keys = [dev.key for dev in devices if _is_ide_controller(dev)]
ret = _find_controller_slot(ide_keys, taken, 2, is_shared)
elif adapter_type in [constants.DEFAULT_ADAPTER_TYPE,
constants.ADAPTER_TYPE_LSILOGICSAS,
constants.ADAPTER_TYPE_BUSLOGIC,
constants.ADAPTER_TYPE_PARAVIRTUAL]:
scsi_keys = [dev.key for dev in devices if _is_scsi_controller(dev)]
ret = _find_controller_slot(scsi_keys, taken, 16, is_shared)
if ret:
return ret[0], ret[1], None
# create new controller with the specified type and return its spec
controller_key = -101
if is_shared:
controller_key = 1001
else:
controller_key = 1000
controller_spec = create_controller_spec(client_factory, controller_key,
adapter_type)
return controller_key, 0, controller_spec
def get_rdm_disk(hardware_devices, uuid):
"""Gets the RDM disk key."""
if hardware_devices.__class__.__name__ == "ArrayOfVirtualDevice":
hardware_devices = hardware_devices.VirtualDevice
for device in hardware_devices:
if (device.__class__.__name__ == "VirtualDisk" and
device.backing.__class__.__name__ ==
"VirtualDiskRawDiskMappingVer1BackingInfo" and
device.backing.lunUuid == uuid):
return device
def get_vmdk_create_spec(client_factory, size_in_kb,
adapter_type=constants.DEFAULT_ADAPTER_TYPE,
disk_type=constants.DEFAULT_DISK_TYPE):
"""Builds the virtual disk create spec."""
create_vmdk_spec = client_factory.create('ns0:FileBackedVirtualDiskSpec')
create_vmdk_spec.adapterType = get_vmdk_adapter_type(adapter_type)
create_vmdk_spec.diskType = disk_type
create_vmdk_spec.capacityKb = size_in_kb
return create_vmdk_spec
def create_virtual_cdrom_spec(client_factory,
datastore,
controller_key,
file_path,
cdrom_unit_number):
"""Builds spec for the creation of a new Virtual CDROM to the VM."""
config_spec = client_factory.create(
'ns0:VirtualDeviceConfigSpec')
config_spec.operation = "add"
cdrom = client_factory.create('ns0:VirtualCdrom')
cdrom_device_backing = client_factory.create(
'ns0:VirtualCdromIsoBackingInfo')
cdrom_device_backing.datastore = datastore
cdrom_device_backing.fileName = file_path
cdrom.backing = cdrom_device_backing
cdrom.controllerKey = controller_key
cdrom.unitNumber = cdrom_unit_number
cdrom.key = -1
connectable_spec = client_factory.create('ns0:VirtualDeviceConnectInfo')
connectable_spec.startConnected = True
connectable_spec.allowGuestControl = False
connectable_spec.connected = True
cdrom.connectable = connectable_spec
config_spec.device = cdrom
return config_spec
def create_virtual_disk_spec(client_factory, controller_key,
disk_type=constants.DEFAULT_DISK_TYPE,
file_path=None,
disk_size=None,
linked_clone=False,
unit_number=None,
device_name=None):
"""Builds spec for the creation of a new/ attaching of an already existing
Virtual Disk to the VM.
"""
virtual_device_config = client_factory.create(
'ns0:VirtualDeviceConfigSpec')
virtual_device_config.operation = "add"
if (file_path is None) or linked_clone:
virtual_device_config.fileOperation = "create"
virtual_disk = client_factory.create('ns0:VirtualDisk')
if disk_type == "rdm" or disk_type == "rdmp":
disk_file_backing = client_factory.create(
'ns0:VirtualDiskRawDiskMappingVer1BackingInfo')
disk_file_backing.compatibilityMode = "virtualMode" \
if disk_type == "rdm" else "physicalMode"
disk_file_backing.diskMode = "independent_persistent"
disk_file_backing.deviceName = device_name or ""
else:
disk_file_backing = client_factory.create(
'ns0:VirtualDiskFlatVer2BackingInfo')
disk_file_backing.diskMode = "persistent"
if disk_type == "thin":
disk_file_backing.thinProvisioned = True
else:
if disk_type == "eagerZeroedThick":
disk_file_backing.eagerlyScrub = True
disk_file_backing.fileName = file_path or ""
connectable_spec = client_factory.create('ns0:VirtualDeviceConnectInfo')
connectable_spec.startConnected = True
connectable_spec.allowGuestControl = False
connectable_spec.connected = True
if not linked_clone:
virtual_disk.backing = disk_file_backing
else:
virtual_disk.backing = copy.copy(disk_file_backing)
virtual_disk.backing.fileName = ""
virtual_disk.backing.parent = disk_file_backing
virtual_disk.connectable = connectable_spec
# The Server assigns a Key to the device. Here we pass a -ve random key.
# -ve because actual keys are +ve numbers and we don't
# want a clash with the key that server might associate with the device
virtual_disk.key = -100
virtual_disk.controllerKey = controller_key
virtual_disk.unitNumber = unit_number or 0
virtual_disk.capacityInKB = disk_size or 0
virtual_device_config.device = virtual_disk
return virtual_device_config
def create_virtual_scsi_spec(client_factory, controller_key,
disk_type="preallocated",
file_path=None,
disk_size=None,
linked_clone=False,
unit_number=None,
device_name=None):
"""
Builds spec for the creation of a new/ attaching of an already existing
Virtual Disk to the VM.
"""
virtual_device_config = client_factory.create(
'ns0:VirtualDeviceConfigSpec')
virtual_device_config.operation = "add"
if (file_path is None) or linked_clone:
virtual_device_config.fileOperation = "create"
virtual_scsi_controller = client_factory.create('ns0:VirtualLsiLogicController')
virtual_scsi_controller.sharedBus = 'physicalSharing'
virtual_scsi_controller.key = -int(controller_key)
virtual_scsi_controller.busNumber = 1
virtual_device_config.device = virtual_scsi_controller
return virtual_device_config
def detach_virtual_disk_spec(client_factory, device, destroy_disk=False):
"""Builds spec for the detach of an already existing Virtual Disk from VM.
"""
virtual_device_config = client_factory.create(
'ns0:VirtualDeviceConfigSpec')
virtual_device_config.operation = "remove"
if destroy_disk:
virtual_device_config.fileOperation = "destroy"
virtual_device_config.device = device
return virtual_device_config
def clone_vm_spec(client_factory, location,
power_on=False, snapshot=None, template=False, config=None):
"""Builds the VM clone spec."""
clone_spec = client_factory.create('ns0:VirtualMachineCloneSpec')
clone_spec.location = location
clone_spec.powerOn = power_on
if snapshot:
clone_spec.snapshot = snapshot
if config is not None:
clone_spec.config = config
clone_spec.template = template
return clone_spec
def relocate_vm_spec(client_factory, datastore=None, host=None,
disk_move_type="moveAllDiskBackingsAndAllowSharing"):
"""Builds the VM relocation spec."""
rel_spec = client_factory.create('ns0:VirtualMachineRelocateSpec')
rel_spec.datastore = datastore
rel_spec.diskMoveType = disk_move_type
if host:
rel_spec.host = host
return rel_spec
def get_machine_id_change_spec(client_factory, machine_id_str):
"""Builds the machine id change config spec."""
virtual_machine_config_spec = client_factory.create(
'ns0:VirtualMachineConfigSpec')
opt = client_factory.create('ns0:OptionValue')
opt.key = "machine.id"
opt.value = machine_id_str
virtual_machine_config_spec.extraConfig = [opt]
return virtual_machine_config_spec
def get_add_vswitch_port_group_spec(client_factory, vswitch_name,
port_group_name, vlan_id):
"""Builds the virtual switch port group add spec."""
vswitch_port_group_spec = client_factory.create('ns0:HostPortGroupSpec')
vswitch_port_group_spec.name = port_group_name
vswitch_port_group_spec.vswitchName = vswitch_name
# VLAN ID of 0 means that VLAN tagging is not to be done for the network.
vswitch_port_group_spec.vlanId = int(vlan_id)
policy = client_factory.create('ns0:HostNetworkPolicy')
nicteaming = client_factory.create('ns0:HostNicTeamingPolicy')
nicteaming.notifySwitches = True
policy.nicTeaming = nicteaming
vswitch_port_group_spec.policy = policy
return vswitch_port_group_spec
def get_vnc_config_spec(client_factory, port):
"""Builds the vnc config spec."""
virtual_machine_config_spec = client_factory.create(
'ns0:VirtualMachineConfigSpec')
opt_enabled = client_factory.create('ns0:OptionValue')
opt_enabled.key = "RemoteDisplay.vnc.enabled"
opt_enabled.value = "true"
opt_port = client_factory.create('ns0:OptionValue')
opt_port.key = "RemoteDisplay.vnc.port"
opt_port.value = port
extras = [opt_enabled, opt_port]
virtual_machine_config_spec.extraConfig = extras
return virtual_machine_config_spec
def get_vnc_port(session):
"""Return VNC port for an VM or None if there is no available port."""
min_port = CONF.vmware.vnc_port
port_total = CONF.vmware.vnc_port_total
allocated_ports = _get_allocated_vnc_ports(session)
max_port = min_port + port_total
for port in range(min_port, max_port):
if port not in allocated_ports:
return port
raise exception.ConsolePortRangeExhausted(min_port=min_port,
max_port=max_port)
def _get_allocated_vnc_ports(session):
"""Return an integer set of all allocated VNC ports."""
# TODO(rgerganov): bug #1256944
# The VNC port should be unique per host, not per vCenter
vnc_ports = set()
result = session._call_method(vim_util, "get_objects",
"VirtualMachine", [VNC_CONFIG_KEY])
while result:
for obj in result.objects:
if not hasattr(obj, 'propSet'):
continue
dynamic_prop = obj.propSet[0]
option_value = dynamic_prop.val
vnc_port = option_value.value
vnc_ports.add(int(vnc_port))
token = _get_token(result)
if token:
result = session._call_method(vim_util,
"continue_to_get_objects",
token)
else:
break
return vnc_ports
# NOTE(mdbooth): this convenience function is temporarily duplicated in
# ds_util. The correct fix is to handle paginated results as they are returned
# from the relevant vim_util function. However, vim_util is currently
# effectively deprecated as we migrate to oslo.vmware. This duplication will be
# removed when we fix it properly in oslo.vmware.
def _get_token(results):
"""Get the token from the property results."""
return getattr(results, 'token', None)
def _get_reference_for_value(results, value):
for object in results.objects:
if object.obj.value == value:
return object
def _get_object_for_value(results, value):
for object in results.objects:
if object.propSet[0].val == value:
return object.obj
def _get_object_for_optionvalue(results, value):
for object in results.objects:
if hasattr(object, "propSet") and object.propSet:
if object.propSet[0].val.value == value:
return object.obj
def _get_object_from_results(session, results, value, func):
while results:
token = _get_token(results)
object = func(results, value)
if object:
if token:
session._call_method(vim_util,
"cancel_retrieve",
token)
return object
if token:
results = session._call_method(vim_util,
"continue_to_get_objects",
token)
else:
return None
def _cancel_retrieve_if_necessary(session, results):
token = _get_token(results)
if token:
results = session._call_method(vim_util,
"cancel_retrieve",
token)
def _get_vm_ref_from_name(session, vm_name):
"""Get reference to the VM with the name specified."""
vms = session._call_method(vim_util, "get_objects",
"VirtualMachine", ["name"])
return _get_object_from_results(session, vms, vm_name,
_get_object_for_value)
@vm_ref_cache_from_name
def get_vm_ref_from_name(session, vm_name):
return (_get_vm_ref_from_vm_uuid(session, vm_name) or
_get_vm_ref_from_name(session, vm_name))
def _get_vm_ref_from_uuid(session, instance_uuid):
"""Get reference to the VM with the uuid specified.
This method reads all of the names of the VM's that are running
on the backend, then it filters locally the matching
instance_uuid. It is far more optimal to use
_get_vm_ref_from_vm_uuid.
"""
vms = session._call_method(vim_util, "get_objects",
"VirtualMachine", ["name"])
return _get_object_from_results(session, vms, instance_uuid,
_get_object_for_value)
def _get_vm_ref_from_vm_uuid(session, instance_uuid):
"""Get reference to the VM.
The method will make use of FindAllByUuid to get the VM reference.
This method finds all VM's on the backend that match the
instance_uuid, more specifically all VM's on the backend that have
'config_spec.instanceUuid' set to 'instance_uuid'.
"""
vm_refs = session._call_method(
session._get_vim(),
"FindAllByUuid",
session._get_vim().service_content.searchIndex,
uuid=instance_uuid,
vmSearch=True,
instanceUuid=True)
if vm_refs:
return vm_refs[0]
def _get_vm_ref_from_extraconfig(session, instance_uuid):
"""Get reference to the VM with the uuid specified."""
vms = session._call_method(vim_util, "get_objects",
"VirtualMachine", ['config.extraConfig["nvp.vm-uuid"]'])
return _get_object_from_results(session, vms, instance_uuid,
_get_object_for_optionvalue)
@vm_ref_cache_from_instance
def get_vm_ref(session, instance):
"""Get reference to the VM through uuid or vm name."""
uuid = instance['uuid']
vm_ref = (search_vm_ref_by_identifier(session, uuid) or
_get_vm_ref_from_name(session, instance['name']))
if vm_ref is None:
raise exception.InstanceNotFound(instance_id=uuid)
return vm_ref
def search_vm_ref_by_identifier(session, identifier):
"""Searches VM reference using the identifier.
This method is primarily meant to separate out part of the logic for
vm_ref search that could be use directly in the special case of
migrating the instance. For querying VM linked to an instance always
use get_vm_ref instead.
"""
vm_ref = (_get_vm_ref_from_vm_uuid(session, identifier) or
_get_vm_ref_from_extraconfig(session, identifier) or
_get_vm_ref_from_uuid(session, identifier))
return vm_ref
def get_host_ref_from_id(session, host_id, property_list=None):
"""Get a host reference object for a host_id string."""
if property_list is None:
property_list = ['name']
host_refs = session._call_method(
vim_util, "get_objects",
"HostSystem", property_list)
return _get_object_from_results(session, host_refs, host_id,
_get_reference_for_value)
def get_host_id_from_vm_ref(session, vm_ref):
"""This method allows you to find the managed object
ID of the host running a VM. Since vMotion can
change the value, you should not presume that this
is a value that you can cache for very long and
should be prepared to allow for it to change.
:param session: a vSphere API connection
:param vm_ref: a reference object to the running VM
:return: the host_id running the virtual machine
"""
# to prevent typographical errors below
property_name = 'runtime.host'
# a property collector in VMware vSphere Management API
# is a set of local representations of remote values.
# property_set here, is a local representation of the
# properties we are querying for.
property_set = session._call_method(
vim_util, "get_object_properties",
None, vm_ref, vm_ref._type, [property_name])
prop = property_from_property_set(
property_name, property_set)
if prop is not None:
prop = prop.val.value
else:
# reaching here represents an impossible state
raise RuntimeError(
"Virtual Machine %s exists without a runtime.host!"
% (vm_ref))
return prop
def property_from_property_set(property_name, property_set):
'''Use this method to filter property collector results.
Because network traffic is expensive, multiple
VMwareAPI calls will sometimes pile-up properties
to be collected. That means results may contain
many different values for multiple purposes.
This helper will filter a list for a single result
and filter the properties of that result to find
the single value of whatever type resides in that
result. This could be a ManagedObjectReference ID
or a complex value.
:param property_name: name of property you want
:param property_set: all results from query
:return: the value of the property.
'''
for prop in property_set.objects:
p = _property_from_propSet(prop.propSet, property_name)
if p is not None:
return p
def _property_from_propSet(propSet, name='name'):
for p in propSet:
if p.name == name:
return p
def get_host_ref_for_vm(session, instance, props):
"""Get the ESXi host running a VM by its name."""
vm_ref = get_vm_ref(session, instance)
host_id = get_host_id_from_vm_ref(session, vm_ref)
return get_host_ref_from_id(session, host_id, props)
def get_host_name_for_vm(session, instance):
"""Get the ESXi host running a VM by its name."""
host_ref = get_host_ref_for_vm(session, instance, ['name'])
return get_host_name_from_host_ref(host_ref)
def get_host_name_from_host_ref(host_ref):
p = _property_from_propSet(host_ref.propSet)
if p is not None:
return p.val
def get_vm_state_from_name(session, vm_name):
vm_ref = get_vm_ref_from_name(session, vm_name)
vm_state = session._call_method(vim_util, "get_dynamic_property",
vm_ref, "VirtualMachine", "runtime.powerState")
return vm_state
def get_stats_from_cluster(session, cluster):
"""Get the aggregate resource stats of a cluster."""
cpu_info = {'vcpus': 0, 'cores': 0, 'vendor': [], 'model': []}
mem_info = {'total': 0, 'free': 0}
# Get the Host and Resource Pool Managed Object Refs
prop_dict = session._call_method(vim_util, "get_dynamic_properties",
cluster, "ClusterComputeResource",
["host", "resourcePool"])
if prop_dict:
host_ret = prop_dict.get('host')
if host_ret:
host_mors = host_ret.ManagedObjectReference
result = session._call_method(vim_util,
"get_properties_for_a_collection_of_objects",
"HostSystem", host_mors,
["summary.hardware", "summary.runtime"])
for obj in result.objects:
hardware_summary = obj.propSet[0].val
runtime_summary = obj.propSet[1].val
if (runtime_summary.inMaintenanceMode is False and
runtime_summary.connectionState == "connected"):
# Total vcpus is the sum of all pCPUs of individual hosts
# The overcommitment ratio is factored in by the scheduler
cpu_info['vcpus'] += hardware_summary.numCpuThreads
cpu_info['cores'] += hardware_summary.numCpuCores
cpu_info['vendor'].append(hardware_summary.vendor)
cpu_info['model'].append(hardware_summary.cpuModel)
res_mor = prop_dict.get('resourcePool')
if res_mor:
res_usage = session._call_method(vim_util, "get_dynamic_property",
res_mor, "ResourcePool", "summary.runtime.memory")
if res_usage:
# maxUsage is the memory limit of the cluster available to VM's
mem_info['total'] = int(res_usage.maxUsage / units.Mi)
# overallUsage is the hypervisor's view of memory usage by VM's
consumed = int(res_usage.overallUsage / units.Mi)
mem_info['free'] = mem_info['total'] - consumed
stats = {'cpu': cpu_info, 'mem': mem_info}
return stats
def get_host_ref(session, cluster=None):
"""Get reference to a host within the cluster specified."""
if cluster is None:
results = session._call_method(vim_util, "get_objects",
"HostSystem")
_cancel_retrieve_if_necessary(session, results)
host_mor = results.objects[0].obj
else:
host_ret = session._call_method(vim_util, "get_dynamic_property",
cluster, "ClusterComputeResource",
"host")
if not host_ret or not host_ret.ManagedObjectReference:
msg = _('No host available on cluster')
raise exception.NoValidHost(reason=msg)
host_mor = host_ret.ManagedObjectReference[0]
return host_mor
def propset_dict(propset):
"""Turn a propset list into a dictionary
PropSet is an optional attribute on ObjectContent objects
that are returned by the VMware API.
You can read more about these at:
| http://pubs.vmware.com/vsphere-51/index.jsp
| #com.vmware.wssdk.apiref.doc/
| vmodl.query.PropertyCollector.ObjectContent.html
:param propset: a property "set" from ObjectContent
:return: dictionary representing property set
"""
if propset is None:
return {}
# TODO(hartsocks): once support for Python 2.6 is dropped
# change to {[(prop.name, prop.val) for prop in propset]}
return dict([(prop.name, prop.val) for prop in propset])
def get_vmdk_backed_disk_uuid(hardware_devices, volume_uuid):
if hardware_devices.__class__.__name__ == "ArrayOfVirtualDevice":
hardware_devices = hardware_devices.VirtualDevice
for device in hardware_devices:
if (device.__class__.__name__ == "VirtualDisk" and
device.backing.__class__.__name__ ==
"VirtualDiskFlatVer2BackingInfo" and
volume_uuid in device.backing.fileName):
return device.backing.uuid
def get_vmdk_backed_disk_device(hardware_devices, uuid):
if hardware_devices.__class__.__name__ == "ArrayOfVirtualDevice":
hardware_devices = hardware_devices.VirtualDevice
for device in hardware_devices:
if (device.__class__.__name__ == "VirtualDisk" and
device.backing.__class__.__name__ ==
"VirtualDiskFlatVer2BackingInfo" and
device.backing.uuid == uuid):
return device
def get_vmdk_volume_disk(hardware_devices, path=None):
if hardware_devices.__class__.__name__ == "ArrayOfVirtualDevice":
hardware_devices = hardware_devices.VirtualDevice
for device in hardware_devices:
if (device.__class__.__name__ == "VirtualDisk"):
if not path or path == device.backing.fileName:
return device
def get_res_pool_ref(session, cluster, node_mo_id):
"""Get the resource pool."""
if cluster is None:
# With no cluster named, use the root resource pool.
results = session._call_method(vim_util, "get_objects",
"ResourcePool")
_cancel_retrieve_if_necessary(session, results)
# The 0th resource pool is always the root resource pool on both ESX
# and vCenter.
res_pool_ref = results.objects[0].obj
else:
if cluster.value == node_mo_id:
# Get the root resource pool of the cluster
res_pool_ref = session._call_method(vim_util,
"get_dynamic_property",
cluster,
"ClusterComputeResource",
"resourcePool")
return res_pool_ref
def get_all_cluster_mors(session):
"""Get all the clusters in the vCenter."""
try:
results = session._call_method(vim_util, "get_objects",
"ClusterComputeResource", ["name"])
_cancel_retrieve_if_necessary(session, results)
return results.objects
except Exception as excep:
LOG.warn(_("Failed to get cluster references %s") % excep)
def get_all_res_pool_mors(session):
"""Get all the resource pools in the vCenter."""
try:
results = session._call_method(vim_util, "get_objects",
"ResourcePool")
_cancel_retrieve_if_necessary(session, results)
return results.objects
except Exception as excep:
LOG.warn(_("Failed to get resource pool references " "%s") % excep)
def get_dynamic_property_mor(session, mor_ref, attribute):
"""Get the value of an attribute for a given managed object."""
return session._call_method(vim_util, "get_dynamic_property",
mor_ref, mor_ref._type, attribute)
def find_entity_mor(entity_list, entity_name):
"""Returns managed object ref for given cluster or resource pool name."""
return [mor for mor in entity_list if (hasattr(mor, 'propSet') and
mor.propSet[0].val == entity_name)]
def get_all_cluster_refs_by_name(session, path_list):
"""Get reference to the Cluster, ResourcePool with the path specified.
The path is the display name. This can be the full path as well.
The input will have the list of clusters and resource pool names
"""
cls = get_all_cluster_mors(session)
if not cls:
return
res = get_all_res_pool_mors(session)
if not res:
return
path_list = [path.strip() for path in path_list]
list_obj = []
for entity_path in path_list:
# entity_path could be unique cluster and/or resource-pool name
res_mor = find_entity_mor(res, entity_path)
cls_mor = find_entity_mor(cls, entity_path)
cls_mor.extend(res_mor)
for mor in cls_mor:
list_obj.append((mor.obj, mor.propSet[0].val))
return get_dict_mor(session, list_obj)
def get_dict_mor(session, list_obj):
"""The input is a list of objects in the form
(manage_object,display_name)
The managed object will be in the form
{ value = "domain-1002", _type = "ClusterComputeResource" }
Output data format:
| dict_mors = {
| 'respool-1001': { 'cluster_mor': clusterMor,
| 'res_pool_mor': resourcePoolMor,
| 'name': display_name },
| 'domain-1002': { 'cluster_mor': clusterMor,
| 'res_pool_mor': resourcePoolMor,
| 'name': display_name },
| }
"""
dict_mors = {}
for obj_ref, path in list_obj:
if obj_ref._type == "ResourcePool":
# Get owner cluster-ref mor
cluster_ref = get_dynamic_property_mor(session, obj_ref, "owner")
dict_mors[obj_ref.value] = {'cluster_mor': cluster_ref,
'res_pool_mor': obj_ref,
'name': path,
}
else:
# Get default resource pool of the cluster
res_pool_ref = get_dynamic_property_mor(session,
obj_ref, "resourcePool")
dict_mors[obj_ref.value] = {'cluster_mor': obj_ref,
'res_pool_mor': res_pool_ref,
'name': path,
}
return dict_mors
def get_mo_id_from_instance(instance):
"""Return the managed object ID from the instance.
The instance['node'] will have the hypervisor_hostname field of the
compute node on which the instance exists or will be provisioned.
This will be of the form
'respool-1001(MyResPoolName)'
'domain-1001(MyClusterName)'
"""
return instance['node'].partition('(')[0]
def get_vmdk_adapter_type(adapter_type):
"""Return the adapter type to be used in vmdk descriptor.
Adapter type in vmdk descriptor is same for LSI-SAS, LSILogic & ParaVirtual
because Virtual Disk Manager API does not recognize the newer controller
types.
"""
if adapter_type in [constants.ADAPTER_TYPE_LSILOGICSAS,
constants.ADAPTER_TYPE_PARAVIRTUAL]:
vmdk_adapter_type = constants.DEFAULT_ADAPTER_TYPE
else:
vmdk_adapter_type = adapter_type
return vmdk_adapter_type
def create_vm(session, instance, vm_folder, config_spec, res_pool_ref):
"""Create VM on ESX host."""
LOG.debug("Creating VM on the ESX host", instance=instance)
vm_create_task = session._call_method(
session._get_vim(),
"CreateVM_Task", vm_folder,
config=config_spec, pool=res_pool_ref)
task_info = session._wait_for_task(vm_create_task)
LOG.debug("Created VM on the ESX host", instance=instance)
return task_info.result
def create_virtual_disk(session, dc_ref, adapter_type, disk_type,
virtual_disk_path, size_in_kb):
# Create a Virtual Disk of the size of the flat vmdk file. This is
# done just to generate the meta-data file whose specifics
# depend on the size of the disk, thin/thick provisioning and the
# storage adapter type.
LOG.debug("Creating Virtual Disk of size "
"%(vmdk_file_size_in_kb)s KB and adapter type "
"%(adapter_type)s on the data store",
{"vmdk_file_size_in_kb": size_in_kb,
"adapter_type": adapter_type})
vmdk_create_spec = get_vmdk_create_spec(
session._get_vim().client.factory,
size_in_kb,
adapter_type,
disk_type)
vmdk_create_task = session._call_method(
session._get_vim(),
"CreateVirtualDisk_Task",
session._get_vim().service_content.virtualDiskManager,
name=virtual_disk_path,
datacenter=dc_ref,
spec=vmdk_create_spec)
session._wait_for_task(vmdk_create_task)
LOG.debug("Created Virtual Disk of size %(vmdk_file_size_in_kb)s"
" KB and type %(disk_type)s",
{"vmdk_file_size_in_kb": size_in_kb,
"disk_type": disk_type})
def copy_virtual_disk(session, dc_ref, source, dest):
"""Copy a sparse virtual disk to a thin virtual disk. This is also
done to generate the meta-data file whose specifics
depend on the size of the disk, thin/thick provisioning and the
storage adapter type.
:param session: - session for connection
:param dc_ref: - data center reference object
:param source: - source datastore path
:param dest: - destination datastore path
"""
LOG.debug("Copying Virtual Disk %(source)s to %(dest)s",
{'source': source, 'dest': dest})
vim = session._get_vim()
vmdk_copy_task = session._call_method(
vim,
"CopyVirtualDisk_Task",
vim.service_content.virtualDiskManager,
sourceName=source,
sourceDatacenter=dc_ref,
destName=dest)
session._wait_for_task(vmdk_copy_task)
LOG.debug("Copied Virtual Disk %(source)s to %(dest)s",
{'source': source, 'dest': dest})
def reconfigure_vm(session, vm_ref, config_spec):
"""Reconfigure a VM according to the config spec."""
reconfig_task = session._call_method(session._get_vim(),
"ReconfigVM_Task", vm_ref,
spec=config_spec)
session._wait_for_task(reconfig_task)
def clone_vmref_for_instance(session, instance, vm_ref, host_ref, ds_ref,
vmfolder_ref):
"""Clone VM and link the cloned VM to the instance.
Clones the passed vm_ref into a new VM and links the cloned vm to
the passed instance.
"""
if vm_ref is None:
LOG.warn(_("vmwareapi:vm_util:clone_vmref_for_instance, called "
"with vm_ref=None"))
raise vexc.MissingParameter(param="vm_ref")
# Get the clone vm spec
client_factory = session._get_vim().client.factory
rel_spec = relocate_vm_spec(client_factory, ds_ref, host_ref,
disk_move_type='moveAllDiskBackingsAndDisallowSharing')
extra_opts = {'nvp.vm-uuid': instance['uuid']}
config_spec = get_vm_extra_config_spec(client_factory, extra_opts)
config_spec.instanceUuid = instance['uuid']
clone_spec = clone_vm_spec(client_factory, rel_spec, config=config_spec)
# Clone VM on ESX host
LOG.debug("Cloning VM for instance %s", instance['uuid'],
instance=instance)
vm_clone_task = session._call_method(session._get_vim(), "CloneVM_Task",
vm_ref, folder=vmfolder_ref,
name=instance['uuid'],
spec=clone_spec)
session._wait_for_task(vm_clone_task)
LOG.debug("Cloned VM for instance %s", instance['uuid'],
instance=instance)
# Invalidate the cache, so that it is refetched the next time
vm_ref_cache_delete(instance['uuid'])
def disassociate_vmref_from_instance(session, instance, vm_ref=None,
suffix='-orig'):
"""Disassociates the VM linked to the instance.
Disassociates the VM linked to the instance by performing the following
1. Update the extraConfig property for nvp.vm-uuid to be replaced with
instance[uuid]+suffix
2. Rename the VM to be instance[uuid]+suffix instead
3. Reset the instanceUUID of the VM to a new generated value
"""
if vm_ref is None:
vm_ref = get_vm_ref(session, instance)
extra_opts = {'nvp.vm-uuid': instance['uuid'] + suffix}
client_factory = session._get_vim().client.factory
reconfig_spec = get_vm_extra_config_spec(client_factory, extra_opts)
reconfig_spec.name = instance['uuid'] + suffix
reconfig_spec.instanceUuid = ''
LOG.debug("Disassociating VM from instance %s", instance['uuid'],
instance=instance)
reconfigure_vm(session, vm_ref, reconfig_spec)
LOG.debug("Disassociated VM from instance %s", instance['uuid'],
instance=instance)
# Invalidate the cache, so that it is refetched the next time
vm_ref_cache_delete(instance['uuid'])
def associate_vmref_for_instance(session, instance, vm_ref=None,
suffix='-orig'):
"""Associates the VM to the instance.
Associates the VM to the instance by performing the following
1. Update the extraConfig property for nvp.vm-uuid to be replaced with
instance[uuid]
2. Rename the VM to be instance[uuid]
3. Reset the instanceUUID of the VM to be instance[uuid]
"""
if vm_ref is None:
vm_ref = search_vm_ref_by_identifier(session,
instance['uuid'] + suffix)
if vm_ref is None:
raise exception.InstanceNotFound(instance_id=instance['uuid']
+ suffix)
extra_opts = {'nvp.vm-uuid': instance['uuid']}
client_factory = session._get_vim().client.factory
reconfig_spec = get_vm_extra_config_spec(client_factory, extra_opts)
reconfig_spec.name = instance['uuid']
reconfig_spec.instanceUuid = instance['uuid']
LOG.debug("Associating VM to instance %s", instance['uuid'],
instance=instance)
reconfigure_vm(session, vm_ref, reconfig_spec)
LOG.debug("Associated VM to instance %s", instance['uuid'],
instance=instance)
# Invalidate the cache, so that it is refetched the next time
vm_ref_cache_delete(instance['uuid'])
def power_on_instance(session, instance, vm_ref=None):
"""Power on the specified instance."""
if vm_ref is None:
vm_ref = get_vm_ref(session, instance)
LOG.debug("Powering on the VM", instance=instance)
try:
poweron_task = session._call_method(
session._get_vim(),
"PowerOnVM_Task", vm_ref)
session._wait_for_task(poweron_task)
LOG.debug("Powered on the VM", instance=instance)
except vexc.InvalidPowerStateException:
LOG.debug("VM already powered on", instance=instance)
def get_values_from_object_properties(session, props):
"""Get the specific values from a object list.
The object values will be returned as a dictionary.
"""
dictionary = {}
while props:
for elem in props.objects:
propdict = propset_dict(elem.propSet)
dictionary.update(propdict)
token = _get_token(props)
if not token:
break
props = session._call_method(vim_util,
"continue_to_get_objects",
token)
return dictionary
def _get_vm_port_indices(session, vm_ref):
extra_config = session._call_method(vim_util,
'get_dynamic_property',
vm_ref, 'VirtualMachine',
'config.extraConfig')
ports = []
if extra_config is not None:
options = extra_config.OptionValue
for option in options:
if (option.key.startswith('nvp.iface-id.') and
option.value != 'free'):
ports.append(int(option.key.split('.')[2]))
return ports
def get_attach_port_index(session, vm_ref):
"""Get the first free port index."""
ports = _get_vm_port_indices(session, vm_ref)
# No ports are configured on the VM
if not ports:
return 0
ports.sort()
configured_ports_len = len(ports)
# Find the first free port index
for port_index in range(configured_ports_len):
if port_index != ports[port_index]:
return port_index
return configured_ports_len
def get_vm_detach_port_index(session, vm_ref, iface_id):
extra_config = session._call_method(vim_util,
'get_dynamic_property',
vm_ref, 'VirtualMachine',
'config.extraConfig')
if extra_config is not None:
options = extra_config.OptionValue
for option in options:
if (option.key.startswith('nvp.iface-id.') and
option.value == iface_id):
return int(option.key.split('.')[2])
def power_off_instance(session, instance, vm_ref=None):
"""Power off the specified instance."""
if vm_ref is None:
vm_ref = get_vm_ref(session, instance)
LOG.debug("Powering off the VM", instance=instance)
try:
poweroff_task = session._call_method(session._get_vim(),
"PowerOffVM_Task", vm_ref)
session._wait_for_task(poweroff_task)
LOG.debug("Powered off the VM", instance=instance)
except vexc.InvalidPowerStateException:
LOG.debug("VM already powered off", instance=instance)
def get_update_vif_spec(client_factory, vif_infos, mac_key_infos):
"""Builds the VM vif spec."""
config_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
vif_spec_list = []
for vif_info in vif_infos:
vif_spec = _create_vif_spec(client_factory, vif_info)
vif_spec.operation = "edit"
vif_spec.device.key = mac_key_infos.get(vif_spec.device.macAddress, -47)
vif_spec_list.append(vif_spec)
device_config_spec = vif_spec_list
config_spec.deviceChange = device_config_spec
return config_spec
| apache-2.0 |
BT-rmartin/server-tools | base_field_serialized/__init__.py | 38 | 1042 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# This module copyright (C) 2014 Therp BV (<http://therp.nl>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import fields
from . import base_field_serialized
| agpl-3.0 |
clay23/lab4 | lib/werkzeug/testsuite/urls.py | 83 | 14595 | # -*- coding: utf-8 -*-
"""
werkzeug.testsuite.urls
~~~~~~~~~~~~~~~~~~~~~~~
URL helper tests.
:copyright: (c) 2013 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import unittest
from werkzeug.testsuite import WerkzeugTestCase
from werkzeug.datastructures import OrderedMultiDict
from werkzeug import urls
from werkzeug._compat import text_type, NativeStringIO, BytesIO
class URLsTestCase(WerkzeugTestCase):
def test_replace(self):
url = urls.url_parse('http://de.wikipedia.org/wiki/Troll')
self.assert_strict_equal(url.replace(query='foo=bar'),
urls.url_parse('http://de.wikipedia.org/wiki/Troll?foo=bar'))
self.assert_strict_equal(url.replace(scheme='https'),
urls.url_parse('https://de.wikipedia.org/wiki/Troll'))
def test_quoting(self):
self.assert_strict_equal(urls.url_quote(u'\xf6\xe4\xfc'), '%C3%B6%C3%A4%C3%BC')
self.assert_strict_equal(urls.url_unquote(urls.url_quote(u'#%="\xf6')), u'#%="\xf6')
self.assert_strict_equal(urls.url_quote_plus('foo bar'), 'foo+bar')
self.assert_strict_equal(urls.url_unquote_plus('foo+bar'), u'foo bar')
self.assert_strict_equal(urls.url_quote_plus('foo+bar'), 'foo%2Bbar')
self.assert_strict_equal(urls.url_unquote_plus('foo%2Bbar'), u'foo+bar')
self.assert_strict_equal(urls.url_encode({b'a': None, b'b': b'foo bar'}), 'b=foo+bar')
self.assert_strict_equal(urls.url_encode({u'a': None, u'b': u'foo bar'}), 'b=foo+bar')
self.assert_strict_equal(urls.url_fix(u'http://de.wikipedia.org/wiki/Elf (Begriffsklärung)'),
'http://de.wikipedia.org/wiki/Elf%20(Begriffskl%C3%A4rung)')
self.assert_strict_equal(urls.url_quote_plus(42), '42')
self.assert_strict_equal(urls.url_quote(b'\xff'), '%FF')
def test_bytes_unquoting(self):
self.assert_strict_equal(urls.url_unquote(urls.url_quote(
u'#%="\xf6', charset='latin1'), charset=None), b'#%="\xf6')
def test_url_decoding(self):
x = urls.url_decode(b'foo=42&bar=23&uni=H%C3%A4nsel')
self.assert_strict_equal(x['foo'], u'42')
self.assert_strict_equal(x['bar'], u'23')
self.assert_strict_equal(x['uni'], u'Hänsel')
x = urls.url_decode(b'foo=42;bar=23;uni=H%C3%A4nsel', separator=b';')
self.assert_strict_equal(x['foo'], u'42')
self.assert_strict_equal(x['bar'], u'23')
self.assert_strict_equal(x['uni'], u'Hänsel')
x = urls.url_decode(b'%C3%9Ch=H%C3%A4nsel', decode_keys=True)
self.assert_strict_equal(x[u'Üh'], u'Hänsel')
def test_url_bytes_decoding(self):
x = urls.url_decode(b'foo=42&bar=23&uni=H%C3%A4nsel', charset=None)
self.assert_strict_equal(x[b'foo'], b'42')
self.assert_strict_equal(x[b'bar'], b'23')
self.assert_strict_equal(x[b'uni'], u'Hänsel'.encode('utf-8'))
def test_streamed_url_decoding(self):
item1 = u'a' * 100000
item2 = u'b' * 400
string = ('a=%s&b=%s&c=%s' % (item1, item2, item2)).encode('ascii')
gen = urls.url_decode_stream(BytesIO(string), limit=len(string),
return_iterator=True)
self.assert_strict_equal(next(gen), ('a', item1))
self.assert_strict_equal(next(gen), ('b', item2))
self.assert_strict_equal(next(gen), ('c', item2))
self.assert_raises(StopIteration, lambda: next(gen))
def test_stream_decoding_string_fails(self):
self.assert_raises(TypeError, urls.url_decode_stream, 'testing')
def test_url_encoding(self):
self.assert_strict_equal(urls.url_encode({'foo': 'bar 45'}), 'foo=bar+45')
d = {'foo': 1, 'bar': 23, 'blah': u'Hänsel'}
self.assert_strict_equal(urls.url_encode(d, sort=True), 'bar=23&blah=H%C3%A4nsel&foo=1')
self.assert_strict_equal(urls.url_encode(d, sort=True, separator=u';'), 'bar=23;blah=H%C3%A4nsel;foo=1')
def test_sorted_url_encode(self):
self.assert_strict_equal(urls.url_encode({u"a": 42, u"b": 23, 1: 1, 2: 2},
sort=True, key=lambda i: text_type(i[0])), '1=1&2=2&a=42&b=23')
self.assert_strict_equal(urls.url_encode({u'A': 1, u'a': 2, u'B': 3, 'b': 4}, sort=True,
key=lambda x: x[0].lower() + x[0]), 'A=1&a=2&B=3&b=4')
def test_streamed_url_encoding(self):
out = NativeStringIO()
urls.url_encode_stream({'foo': 'bar 45'}, out)
self.assert_strict_equal(out.getvalue(), 'foo=bar+45')
d = {'foo': 1, 'bar': 23, 'blah': u'Hänsel'}
out = NativeStringIO()
urls.url_encode_stream(d, out, sort=True)
self.assert_strict_equal(out.getvalue(), 'bar=23&blah=H%C3%A4nsel&foo=1')
out = NativeStringIO()
urls.url_encode_stream(d, out, sort=True, separator=u';')
self.assert_strict_equal(out.getvalue(), 'bar=23;blah=H%C3%A4nsel;foo=1')
gen = urls.url_encode_stream(d, sort=True)
self.assert_strict_equal(next(gen), 'bar=23')
self.assert_strict_equal(next(gen), 'blah=H%C3%A4nsel')
self.assert_strict_equal(next(gen), 'foo=1')
self.assert_raises(StopIteration, lambda: next(gen))
def test_url_fixing(self):
x = urls.url_fix(u'http://de.wikipedia.org/wiki/Elf (Begriffskl\xe4rung)')
self.assert_line_equal(x, 'http://de.wikipedia.org/wiki/Elf%20(Begriffskl%C3%A4rung)')
x = urls.url_fix("http://just.a.test/$-_.+!*'(),")
self.assert_equal(x, "http://just.a.test/$-_.+!*'(),")
def test_url_fixing_qs(self):
x = urls.url_fix(b'http://example.com/?foo=%2f%2f')
self.assert_line_equal(x, 'http://example.com/?foo=%2f%2f')
x = urls.url_fix('http://acronyms.thefreedictionary.com/Algebraic+Methods+of+Solving+the+Schr%C3%B6dinger+Equation')
self.assert_equal(x, 'http://acronyms.thefreedictionary.com/Algebraic+Methods+of+Solving+the+Schr%C3%B6dinger+Equation')
def test_iri_support(self):
self.assert_strict_equal(urls.uri_to_iri('http://xn--n3h.net/'),
u'http://\u2603.net/')
self.assert_strict_equal(
urls.uri_to_iri(b'http://%C3%BCser:p%C3%A4ssword@xn--n3h.net/p%C3%A5th'),
u'http://\xfcser:p\xe4ssword@\u2603.net/p\xe5th')
self.assert_strict_equal(urls.iri_to_uri(u'http://☃.net/'), 'http://xn--n3h.net/')
self.assert_strict_equal(
urls.iri_to_uri(u'http://üser:pässword@☃.net/påth'),
'http://%C3%BCser:p%C3%A4ssword@xn--n3h.net/p%C3%A5th')
self.assert_strict_equal(urls.uri_to_iri('http://test.com/%3Fmeh?foo=%26%2F'),
u'http://test.com/%3Fmeh?foo=%26%2F')
# this should work as well, might break on 2.4 because of a broken
# idna codec
self.assert_strict_equal(urls.uri_to_iri(b'/foo'), u'/foo')
self.assert_strict_equal(urls.iri_to_uri(u'/foo'), '/foo')
self.assert_strict_equal(urls.iri_to_uri(u'http://föö.com:8080/bam/baz'),
'http://xn--f-1gaa.com:8080/bam/baz')
def test_iri_safe_quoting(self):
uri = b'http://xn--f-1gaa.com/%2F%25?q=%C3%B6&x=%3D%25#%25'
iri = u'http://föö.com/%2F%25?q=ö&x=%3D%25#%25'
self.assert_strict_equal(urls.uri_to_iri(uri), iri)
self.assert_strict_equal(urls.iri_to_uri(urls.uri_to_iri(uri)), uri)
def test_ordered_multidict_encoding(self):
d = OrderedMultiDict()
d.add('foo', 1)
d.add('foo', 2)
d.add('foo', 3)
d.add('bar', 0)
d.add('foo', 4)
self.assert_equal(urls.url_encode(d), 'foo=1&foo=2&foo=3&bar=0&foo=4')
def test_href(self):
x = urls.Href('http://www.example.com/')
self.assert_strict_equal(x(u'foo'), 'http://www.example.com/foo')
self.assert_strict_equal(x.foo(u'bar'), 'http://www.example.com/foo/bar')
self.assert_strict_equal(x.foo(u'bar', x=42), 'http://www.example.com/foo/bar?x=42')
self.assert_strict_equal(x.foo(u'bar', class_=42), 'http://www.example.com/foo/bar?class=42')
self.assert_strict_equal(x.foo(u'bar', {u'class': 42}), 'http://www.example.com/foo/bar?class=42')
self.assert_raises(AttributeError, lambda: x.__blah__)
x = urls.Href('blah')
self.assert_strict_equal(x.foo(u'bar'), 'blah/foo/bar')
self.assert_raises(TypeError, x.foo, {u"foo": 23}, x=42)
x = urls.Href('')
self.assert_strict_equal(x('foo'), 'foo')
def test_href_url_join(self):
x = urls.Href(u'test')
self.assert_line_equal(x(u'foo:bar'), u'test/foo:bar')
self.assert_line_equal(x(u'http://example.com/'), u'test/http://example.com/')
self.assert_line_equal(x.a(), u'test/a')
def test_href_past_root(self):
base_href = urls.Href('http://www.blagga.com/1/2/3')
self.assert_strict_equal(base_href('../foo'), 'http://www.blagga.com/1/2/foo')
self.assert_strict_equal(base_href('../../foo'), 'http://www.blagga.com/1/foo')
self.assert_strict_equal(base_href('../../../foo'), 'http://www.blagga.com/foo')
self.assert_strict_equal(base_href('../../../../foo'), 'http://www.blagga.com/foo')
self.assert_strict_equal(base_href('../../../../../foo'), 'http://www.blagga.com/foo')
self.assert_strict_equal(base_href('../../../../../../foo'), 'http://www.blagga.com/foo')
def test_url_unquote_plus_unicode(self):
# was broken in 0.6
self.assert_strict_equal(urls.url_unquote_plus(u'\x6d'), u'\x6d')
self.assert_is(type(urls.url_unquote_plus(u'\x6d')), text_type)
def test_quoting_of_local_urls(self):
rv = urls.iri_to_uri(u'/foo\x8f')
self.assert_strict_equal(rv, '/foo%C2%8F')
self.assert_is(type(rv), str)
def test_url_attributes(self):
rv = urls.url_parse('http://foo%3a:bar%3a@[::1]:80/123?x=y#frag')
self.assert_strict_equal(rv.scheme, 'http')
self.assert_strict_equal(rv.auth, 'foo%3a:bar%3a')
self.assert_strict_equal(rv.username, u'foo:')
self.assert_strict_equal(rv.password, u'bar:')
self.assert_strict_equal(rv.raw_username, 'foo%3a')
self.assert_strict_equal(rv.raw_password, 'bar%3a')
self.assert_strict_equal(rv.host, '::1')
self.assert_equal(rv.port, 80)
self.assert_strict_equal(rv.path, '/123')
self.assert_strict_equal(rv.query, 'x=y')
self.assert_strict_equal(rv.fragment, 'frag')
rv = urls.url_parse(u'http://\N{SNOWMAN}.com/')
self.assert_strict_equal(rv.host, u'\N{SNOWMAN}.com')
self.assert_strict_equal(rv.ascii_host, 'xn--n3h.com')
def test_url_attributes_bytes(self):
rv = urls.url_parse(b'http://foo%3a:bar%3a@[::1]:80/123?x=y#frag')
self.assert_strict_equal(rv.scheme, b'http')
self.assert_strict_equal(rv.auth, b'foo%3a:bar%3a')
self.assert_strict_equal(rv.username, u'foo:')
self.assert_strict_equal(rv.password, u'bar:')
self.assert_strict_equal(rv.raw_username, b'foo%3a')
self.assert_strict_equal(rv.raw_password, b'bar%3a')
self.assert_strict_equal(rv.host, b'::1')
self.assert_equal(rv.port, 80)
self.assert_strict_equal(rv.path, b'/123')
self.assert_strict_equal(rv.query, b'x=y')
self.assert_strict_equal(rv.fragment, b'frag')
def test_url_joining(self):
self.assert_strict_equal(urls.url_join('/foo', '/bar'), '/bar')
self.assert_strict_equal(urls.url_join('http://example.com/foo', '/bar'),
'http://example.com/bar')
self.assert_strict_equal(urls.url_join('file:///tmp/', 'test.html'),
'file:///tmp/test.html')
self.assert_strict_equal(urls.url_join('file:///tmp/x', 'test.html'),
'file:///tmp/test.html')
self.assert_strict_equal(urls.url_join('file:///tmp/x', '../../../x.html'),
'file:///x.html')
def test_partial_unencoded_decode(self):
ref = u'foo=정상처리'.encode('euc-kr')
x = urls.url_decode(ref, charset='euc-kr')
self.assert_strict_equal(x['foo'], u'정상처리')
def test_iri_to_uri_idempotence_ascii_only(self):
uri = u'http://www.idempoten.ce'
uri = urls.iri_to_uri(uri)
self.assert_equal(urls.iri_to_uri(uri), uri)
def test_iri_to_uri_idempotence_non_ascii(self):
uri = u'http://\N{SNOWMAN}/\N{SNOWMAN}'
uri = urls.iri_to_uri(uri)
self.assert_equal(urls.iri_to_uri(uri), uri)
def test_uri_to_iri_idempotence_ascii_only(self):
uri = 'http://www.idempoten.ce'
uri = urls.uri_to_iri(uri)
self.assert_equal(urls.uri_to_iri(uri), uri)
def test_uri_to_iri_idempotence_non_ascii(self):
uri = 'http://xn--n3h/%E2%98%83'
uri = urls.uri_to_iri(uri)
self.assert_equal(urls.uri_to_iri(uri), uri)
def test_iri_to_uri_to_iri(self):
iri = u'http://föö.com/'
uri = urls.iri_to_uri(iri)
self.assert_equal(urls.uri_to_iri(uri), iri)
def test_uri_to_iri_to_uri(self):
uri = 'http://xn--f-rgao.com/%C3%9E'
iri = urls.uri_to_iri(uri)
self.assert_equal(urls.iri_to_uri(iri), uri)
def test_uri_iri_normalization(self):
uri = 'http://xn--f-rgao.com/%E2%98%90/fred?utf8=%E2%9C%93'
iri = u'http://föñ.com/\N{BALLOT BOX}/fred?utf8=\u2713'
tests = [
u'http://föñ.com/\N{BALLOT BOX}/fred?utf8=\u2713',
u'http://xn--f-rgao.com/\u2610/fred?utf8=\N{CHECK MARK}',
b'http://xn--f-rgao.com/%E2%98%90/fred?utf8=%E2%9C%93',
u'http://xn--f-rgao.com/%E2%98%90/fred?utf8=%E2%9C%93',
u'http://föñ.com/\u2610/fred?utf8=%E2%9C%93',
b'http://xn--f-rgao.com/\xe2\x98\x90/fred?utf8=\xe2\x9c\x93',
]
for test in tests:
self.assert_equal(urls.uri_to_iri(test), iri)
self.assert_equal(urls.iri_to_uri(test), uri)
self.assert_equal(urls.uri_to_iri(urls.iri_to_uri(test)), iri)
self.assert_equal(urls.iri_to_uri(urls.uri_to_iri(test)), uri)
self.assert_equal(urls.uri_to_iri(urls.uri_to_iri(test)), iri)
self.assert_equal(urls.iri_to_uri(urls.iri_to_uri(test)), uri)
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(URLsTestCase))
return suite
| apache-2.0 |
turbomanage/training-data-analyst | courses/dw-pso/dataflow_python_examples/datastore_schema_import.py | 2 | 2519 | # Copyright 2018 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Script to import file schema from CSV and save it into Google Cloud DataStore
To run this script, you will need Python packages listed in requirements.txt.
You can easily install them with virtualenv and pip by running these commands:
virtualenv env
source ./env/bin/activate
pip install -r requirements.txt
gcloud auth application-default login
This script's options and arguments are documented in
python dataflow_python_examples/datastore_schema_import.py --help
Example to run the script:
python dataflow_import.py --schema-file=<path_to_file>
"""
import argparse
import collections
import csv
import json
import os
from google.cloud import datastore
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--schema-file',
dest='schema_file',
required=True,
help='''
File containing the schema of the input CSV data to be
imported.
Filename should be the same as the BQ table name that you
want: "TABLENAME.csv".
This file will be used to create a DataStore entity.
Example:
COLUMN_1,STRING
COLUMN_2,FLOAT
''')
args = parser.parse_args()
client = datastore.Client()
filename = args.schema_file
print('Processing file %s' % filename)
csvfile = open(filename, 'r')
table = os.path.splitext(os.path.basename(filename))[0]
filetext = csv.reader(csvfile, delimiter=',')
fields = collections.OrderedDict()
for rows in filetext:
fields[rows[0]] = rows[1]
key = client.key('Table', table)
entry = datastore.Entity(key, exclude_from_indexes=['columns'])
entry.update({"columns": unicode(json.dumps(fields), "utf-8")})
client.put(entry)
print('Created/Updated entry for table %s.' % table)
print('Done.')
if __name__ == '__main__':
main()
| apache-2.0 |
ashkaushik0007/foodtriangle_beta | node_modules/node-gyp/gyp/pylib/gyp/generator/ninja_test.py | 1843 | 1786 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" Unit tests for the ninja.py file. """
import gyp.generator.ninja as ninja
import unittest
import StringIO
import sys
import TestCommon
class TestPrefixesAndSuffixes(unittest.TestCase):
def test_BinaryNamesWindows(self):
# These cannot run on non-Windows as they require a VS installation to
# correctly handle variable expansion.
if sys.platform.startswith('win'):
writer = ninja.NinjaWriter('foo', 'wee', '.', '.', 'build.ninja', '.',
'build.ninja', 'win')
spec = { 'target_name': 'wee' }
self.assertTrue(writer.ComputeOutputFileName(spec, 'executable').
endswith('.exe'))
self.assertTrue(writer.ComputeOutputFileName(spec, 'shared_library').
endswith('.dll'))
self.assertTrue(writer.ComputeOutputFileName(spec, 'static_library').
endswith('.lib'))
def test_BinaryNamesLinux(self):
writer = ninja.NinjaWriter('foo', 'wee', '.', '.', 'build.ninja', '.',
'build.ninja', 'linux')
spec = { 'target_name': 'wee' }
self.assertTrue('.' not in writer.ComputeOutputFileName(spec,
'executable'))
self.assertTrue(writer.ComputeOutputFileName(spec, 'shared_library').
startswith('lib'))
self.assertTrue(writer.ComputeOutputFileName(spec, 'static_library').
startswith('lib'))
self.assertTrue(writer.ComputeOutputFileName(spec, 'shared_library').
endswith('.so'))
self.assertTrue(writer.ComputeOutputFileName(spec, 'static_library').
endswith('.a'))
if __name__ == '__main__':
unittest.main()
| mit |
jonathanmei/rqsa | rqsa/environment.py | 1 | 2424 | import cv2
import gym
import random
import numpy as np
class Environment(object):
def __init__(self, config):
self.env = gym.make(config.env_name)
screen_width, screen_height, self.action_repeat, self.random_start = \
config.screen_width, config.screen_height, config.action_repeat, config.random_start
self.display = config.display
self.dims = (screen_width, screen_height)
self._screen = None
self.reward = 0
self.terminal = True
def new_game(self, from_random_game=False):
if self.lives == 0:
self._screen = self.env.reset()
self._step(0)
self.render()
return self.screen, 0, 0, self.terminal
def new_random_game(self):
self.new_game(True)
for _ in xrange(random.randint(0, self.random_start - 1)):
self._step(0)
self.render()
return self.screen, 0, 0, self.terminal
def _step(self, action):
self._screen, self.reward, self.terminal, _ = self.env.step(action)
def _random_step(self):
action = self.env.action_space.sample()
self._step(action)
@ property
def screen(self):
return cv2.resize(cv2.cvtColor(self._screen, cv2.COLOR_RGB2GRAY)/255., self.dims)
#return cv2.resize(cv2.cvtColor(self._screen, cv2.COLOR_BGR2YCR_CB)/255., self.dims)[:,:,0]
@property
def action_size(self):
return self.env.action_space.n
@property
def lives(self):
return self.env.ale.lives()
@property
def state(self):
return self.screen, self.reward, self.terminal
def render(self):
if self.display:
self.env.render()
def after_act(self, action):
self.render()
class GymEnvironment(Environment):
def __init__(self, config):
super(GymEnvironment, self).__init__(config)
def act(self, action, is_training=True):
cumulated_reward = 0
start_lives = self.lives
for _ in xrange(self.action_repeat):
self._step(action)
cumulated_reward = cumulated_reward + self.reward
if is_training and start_lives > self.lives:
cumulated_reward -= 1
self.terminal = True
if self.terminal:
break
self.reward = cumulated_reward
self.after_act(action)
return self.state
class SimpleGymEnvironment(Environment):
def __init__(self, config):
super(SimpleGymEnvironment, self).__init__(config)
def act(self, action, is_training=True):
self._step(action)
self.after_act(action)
return self.state
| mit |
ukdtom/WebTools.bundle | Contents/Code/findMediaV3.py | 1 | 34469 | ######################################################################################################################
# findMedia unit
# A WebTools bundle plugin
#
# Used to locate both items missing from the database, as well as from the filesystem
#
# Author: dane22, a Plex Community member
#
######################################################################################################################
import urllib
import unicodedata
import json
import time
import sys
import os
from consts import DEBUGMODE, VALIDEXT
from misc import misc
from wtV3 import wtV3
# Consts used here
# Int of amount of medias in a database section
AmountOfMediasInDatabase = 0
# Files from the database
mediasFromDB = []
# Files from the file system
mediasFromFileSystem = []
# Unmatched items
unmatchedByPlex = []
# Response to getStatus
statusMsg = wtV3().GETTRANSLATE(None, None, Internal=True, String='idle')
# Internal tracker of where we are
runningState = 0
# Flag to set if user wants to cancel
bAbort = False
Extras = ['behindthescenes', 'deleted', 'featurette', 'interview',
'scene', 'short', 'trailer'] # Local extras
ExtrasDirs = ['behind the scenes', 'deleted scenes', 'featurettes',
'interviews', 'scenes', 'shorts', 'trailers'] # Directories to be ignored
Specials = ['season 00', 'season 0', 'specials'] # Specials dirs
# Valid keys for prefs
KEYS = ['IGNORE_HIDDEN', 'IGNORED_DIRS', 'VALID_EXTENSIONS', 'IGNORE_SPECIALS']
excludeElements = 'Actor,Collection,Country,Director,Genre,Label,Mood,Producer,Role,Similar,Writer'
excludeFields = 'summary,tagline'
SUPPORTEDSECTIONS = ['movie', 'show']
GET = ['SCANSECTION', 'GETSECTIONSLIST',
'GETRESULT', 'GETSTATUS', 'GETSETTINGS']
PUT = ['ABORT', 'RESETSETTINGS']
POST = ['SETSETTINGS']
DELETE = []
class findMediaV3(object):
init_already = False # Make sure init only run once
bResultPresent = False # Do we have a result to present
# Init of the class
@classmethod
def init(self):
global retMsg
global MediaChuncks
global CoreUrl
global init_already
try:
# Only init once during the lifetime of this
if not self.init_already:
self.init_already = True
retMsg = ['WebTools']
self.populatePrefs()
Log.Debug('******* Starting findMedia *******')
Log.Debug('********* Prefs are ***********')
Log.Debug(Dict['findMedia'])
self.MediaChuncks = 40
self.CoreUrl = misc.GetLoopBack() + '/library/sections/'
except Exception, e:
Log.Exception('Exception in FM Init was %s' % (str(e)))
#********** Functions below ******************
# Set settings
@classmethod
def SETSETTINGS(self, req, *args):
try:
# Get the Payload
data = json.loads(req.request.body.decode('utf-8'))
except Exception, e:
Log.Exception('Not a valid payload: ' + str(e))
req.set_status(412)
req.finish('Not a valid payload?')
try:
Log.Debug('setSettings called with a body of: ' + str(data))
# Walk the settings body, and only accept valid settings
if 'IGNORE_HIDDEN' in data:
Dict['findMedia']['IGNORE_HIDDEN'] = data['IGNORE_HIDDEN']
if 'IGNORE_SPECIALS' in data:
Dict['findMedia']['IGNORE_SPECIALS'] = data['IGNORE_SPECIALS']
if 'IGNORED_DIRS' in data:
Dict['findMedia']['IGNORED_DIRS'] = data['IGNORED_DIRS']
if 'VALID_EXTENSIONS' in data:
Dict['findMedia']['VALID_EXTENSIONS'] = data['VALID_EXTENSIONS']
if 'IGNORE_EXTRAS' in data:
Dict['findMedia']['IGNORE_EXTRAS'] = data['IGNORE_EXTRAS']
Dict.Save()
except Exception, e:
Log.Exception('Exception in setSettings: ' + str(e))
req.clear()
req.set_status(500)
req.finish('Fatal error in setSettings was: ' + str(e))
# Main call for class.....
@classmethod
def SCANSECTION(self, req, *args):
global AmountOfMediasInDatabase
global retMsg
global bAbort
retMsg = ['WebTools']
bAbort = False
# Scan shows from the database
def scanShowDB(sectionNumber=0):
global AmountOfMediasInDatabase
global mediasFromDB
global statusMsg
global runningState
global unmatchedByPlex
try:
Log.Debug('Starting scanShowDB for section %s' %
(sectionNumber))
unmatchedByPlex = []
runningState = -1
statusMsg = wtV3().GETTRANSLATE(self, None, Internal=True,
String='Starting to scan database for section %s') % (sectionNumber)
# Start by getting the totals of this section
totalSize = XML.ElementFromURL(
self.CoreUrl + sectionNumber + '/all?X-Plex-Container-Start=1&X-Plex-Container-Size=0').get('totalSize')
AmountOfMediasInDatabase = totalSize
Log.Debug('Total size of medias are %s' % (totalSize))
iShow = 0
iCShow = 0
statusShows = wtV3().GETTRANSLATE(self, None, Internal=True,
String='Scanning database show %s of %s :') % (iShow, totalSize)
statusMsg = statusShows
# So let's walk the library
while True:
# Grap shows
urlShows = self.CoreUrl + sectionNumber + '/all?X-Plex-Container-Start=' + str(iCShow) + '&X-Plex-Container-Size=' + str(
self.MediaChuncks) + '&excludeElements=' + excludeElements + '&excludeFields=' + excludeFields
shows = XML.ElementFromURL(urlShows).xpath('//Directory')
# Grap individual show
for show in shows:
statusShow = show.get('title')
statusMsg = statusShows + statusShow
iSeason = 0
iCSeason = 0
# Grap seasons
while True:
seasons = XML.ElementFromURL(misc.GetLoopBack() + show.get('key') + '?X-Plex-Container-Start=' + str(iCSeason) + '&X-Plex-Container-Size=' + str(
self.MediaChuncks) + '&excludeElements=' + excludeElements + '&excludeFields=' + excludeFields).xpath('//Directory')
# Grap individual season
for season in seasons:
if season.get('title') == 'All episodes':
iSeason += 1
continue
statusSeason = ' ' + season.get('title')
statusMsg = statusShows + statusShow + statusSeason
iSeason += 1
# Grap Episodes
iEpisode = 0
iCEpisode = 0
while True:
url = misc.GetLoopBack() + season.get('key') + '?X-Plex-Container-Start=' + str(iCEpisode) + '&X-Plex-Container-Size=' + \
str(self.MediaChuncks) + '&excludeElements=' + \
excludeElements + '&excludeFields=' + excludeFields
videos = XML.ElementFromURL(
url).xpath('//Video')
for video in videos:
if bAbort:
raise ValueError('Aborted')
bUnmatched = False
if video.get('year') == None:
# Also check if summary is missing, since else, it might be a false alert
if (video.get('summary') == None) or (video.get('summary') == ""):
bUnmatched = True
# No year nor summary, so most likely a mismatch
key = video.get('ratingKey')
unmatchedURL = misc.GetLoopBack() + '/library/metadata/' + key + '?excludeElements=' + \
excludeElements + '&excludeFields=' + excludeFields
unmatched = XML.ElementFromURL(
unmatchedURL).xpath('//Video')
filename = unmatched[0].xpath(
'//Part/@file')[0]
if self.addThisItem(filename, 'video'):
Log.Info(
'Unmatched file confirmed as %s' % filename)
unmatchedByPlex.append(
filename.encode("utf-8"))
episodes = XML.ElementFromString(
XML.StringFromElement(video)).xpath('//Part')
for episode in episodes:
if bAbort:
raise ValueError('Aborted')
filename = episode.get('file')
filename = String.Unquote(
filename).encode('utf8', 'ignore')
if self.addThisItem(filename, 'video'):
mediasFromDB.append(
filename.encode("utf-8"))
iEpisode += 1
# Inc Episodes counter
iCEpisode += self.MediaChuncks
if len(videos) == 0:
break
# Inc Season counter
iCSeason += self.MediaChuncks
if len(seasons) == 0:
break
iShow += 1
statusShows = wtV3().GETTRANSLATE(self, None, Internal=True,
String='Scanning database show %s of %s :') % (str(iShow), str(totalSize))
# Inc. Shows counter
iCShow += self.MediaChuncks
if len(shows) == 0:
statusMsg = wtV3().GETTRANSLATE(self, None, Internal=True,
String='Scanning database: %s : Done') % (str(totalSize))
Log.Debug('***** Done scanning the database *****')
if DEBUGMODE:
Log.Debug(mediasFromDB)
runningState = 1
break
return
except ValueError:
statusMsg = wtV3().GETTRANSLATE(self, None, Internal=True, String='Idle')
runningState = 99
Log.Info('Aborted in ScanShowDB')
except Exception, e:
Log.Exception('Fatal error in scanShowDB: ' + str(e))
runningState = 99
# End scanShowDB
# Find missing files from the database
def findMissingFromDB():
global MissingFromDB
Log.Debug('Finding items missing from Database')
MissingFromDB = []
try:
for item in mediasFromFileSystem:
if bAbort:
raise ValueError('Aborted')
if item not in mediasFromDB:
MissingFromDB.append(item)
return MissingFromDB
except ValueError:
Log.Info('Aborted in findMissingFromDB')
# Find missing files from the filesystem
def findMissingFromFS():
global MissingFromFS
Log.Debug('Finding items missing from FileSystem')
MissingFromFS = []
try:
for item in mediasFromDB:
if bAbort:
raise ValueError('Aborted')
if item not in mediasFromFileSystem:
MissingFromFS.append(item)
return MissingFromFS
except ValueError:
Log.Info('Aborted in findMissingFromFS')
# Scan the file system
def getFiles(filePath, mediaType):
global mediasFromFileSystem
global runningState
global statusMsg
try:
runningState = -1
Log.Debug(
"*********************** FileSystem scan Paths: *****************************************")
bScanStatusCount = 0
# for filePath in files:
for Path in filePath:
# Decode filePath
bScanStatusCount += 1
filePath2 = urllib.unquote(Path).decode('utf8')
filePath2 = misc.Unicodize(filePath2)
Log.Debug("Handling filepath #%s: %s" % (
bScanStatusCount, filePath2.encode('utf8', 'ignore')))
try:
for root, subdirs, files in os.walk(filePath2):
for file in files:
filename = Core.storage.join_path(root, file)
file = misc.Unicodize(filename).encode('utf8')
if self.addThisItem(filename, mediaType):
if Platform.OS == 'Windows':
# I hate windows
pos = filename.find(':') - 1
if pos != -2:
# We dont got an UNC path here
filename = filename[pos:]
Log.Debug('appending file: ' + filename)
mediasFromFileSystem.append(
filename.encode("utf-8"))
if DEBUGMODE:
Log.Debug('Scanning file: ' + file)
statusMsg = wtV3().GETTRANSLATE(self, None, Internal=True,
String='Scanning file: %s') % file
except Exception, e:
Log.Exception(
'Exception happened in FM scanning filesystem: ' + str(e))
return
Log.Debug('***** Finished scanning filesystem *****')
if DEBUGMODE:
Log.Debug(mediasFromFileSystem)
runningState = 2
except ValueError:
statusMsg = wtV3().GETTRANSLATE(self, None, Internal=True, String='Idle')
runningState = 0
Log.Info('Aborted in getFiles')
except Exception, e:
Log.Exception('Exception happend in getFiles: ' + str(e))
runningState = 99
# Get a list of all files in a Movie Library from the database
def scanMovieDb(sectionNumber=0):
global AmountOfMediasInDatabase
global mediasFromDB
global unmatchedByPlex
global statusMsg
global runningState
try:
unmatchedByPlex = []
Log.Debug('Starting scanMovieDb for section %s' %
(sectionNumber))
runningState = -1
statusMsg = wtV3().GETTRANSLATE(self, None, Internal=True,
String='Starting to scan database for section %s') % sectionNumber
# Start by getting the totals of this section
totalSize = XML.ElementFromURL(
self.CoreUrl + sectionNumber + '/all?X-Plex-Container-Start=1&X-Plex-Container-Size=0').get('totalSize')
AmountOfMediasInDatabase = totalSize
Log.Debug('Total size of medias are %s' % (totalSize))
iStart = 0
iCount = 0
statusMsg = wtV3().GETTRANSLATE(self, None, Internal=True,
String='Scanning database: item %s of %s : Working') % (iCount, totalSize)
# So let's walk the library
while True:
# Grap a chunk of videos from the server
medias = XML.ElementFromURL(self.CoreUrl + sectionNumber + '/all?X-Plex-Container-Start=' + str(iStart) + '&X-Plex-Container-Size=' + str(
self.MediaChuncks) + '&excludeElements=' + excludeElements + '&excludeFields=' + excludeFields).xpath('//Video')
for video in medias:
iCount += 1
year = video.get('year')
if year == None:
# No year, so most likely a mismatch
key = video.get('ratingKey')
unmatchedURL = misc.GetLoopBack() + '/library/metadata/' + key + '?excludeElements=' + \
excludeElements + '&excludeFields=' + excludeFields
unmatched = XML.ElementFromURL(
unmatchedURL).xpath('//Video')
filename = unmatched[0].xpath(
'//Part/@file')[0]
if self.addThisItem(filename, 'video'):
Log.Info(
'Unmatched file confirmed as %s' % filename)
unmatchedByPlex.append(
filename.encode("utf-8"))
parts = XML.ElementFromString(
XML.StringFromElement(video)).xpath('//Part')
# Walk the parts of a media
for part in parts:
if bAbort:
runningState = 0
raise ValueError('Aborted')
break
filename = part.get('file')
filename = unicode(misc.Unicodize(
part.get('file')).encode('utf8', 'ignore'))
if self.addThisItem(filename, 'video'):
mediasFromDB.append(filename.encode("utf-8"))
statusMsg = wtV3().GETTRANSLATE(self, None, Internal=True,
String='Scanning database: item %s of %s : Working') % (iCount, totalSize)
iStart += self.MediaChuncks
if len(medias) == 0:
statusMsg = 'Scanning database: %s : Done' % (
totalSize)
Log.Debug('***** Done scanning the database *****')
if DEBUGMODE:
Log.Debug(mediasFromDB)
runningState = 1
break
'''
# Grap a chunk from the server
medias = XML.ElementFromURL(self.CoreUrl + sectionNumber + '/all?X-Plex-Container-Start=' + str(iStart) + '&X-Plex-Container-Size=' + str(
self.MediaChuncks) + '&excludeElements=' + excludeElements + '&excludeFields=' + excludeFields).xpath('//Part')
# Walk the chunk
for part in medias:
if bAbort:
runningState = 0
raise ValueError('Aborted')
break
iCount += 1
filename = part.get('file')
filename = unicode(misc.Unicodize(
part.get('file')).encode('utf8', 'ignore'))
mediasFromDB.append(filename)
statusMsg = wtV3().GETTRANSLATE(self, None, Internal=True,
String='Scanning database: item %s of %s : Working') % (iCount, totalSize)
iStart += self.MediaChuncks
if len(medias) == 0:
statusMsg = 'Scanning database: %s : Done' % (
totalSize)
Log.Debug('***** Done scanning the database *****')
if DEBUGMODE:
Log.Debug(mediasFromDB)
runningState = 1
break
'''
return
except Exception, e:
Log.Exception('Fatal error in scanMovieDb: ' + str(e))
runningState = 99
# End scanMovieDb
# Scan db and files. Must run as a thread
def scanMedias(sectionNumber, sectionLocations, sectionType, req):
global runningState
global statusMsg
global retMsg
try:
if sectionType == 'movie':
MediaType = 'video'
scanMovieDb(sectionNumber=sectionNumber)
elif sectionType == 'show':
MediaType = 'video'
scanShowDB(sectionNumber=sectionNumber)
else:
req.clear()
req.set_status(400)
req.finish('Unknown Section Type')
if bAbort:
raise ValueError('Aborted')
getFiles(sectionLocations, MediaType)
if bAbort:
raise ValueError('Aborted')
retMsg = {}
statusMsg = wtV3().GETTRANSLATE(self, None, Internal=True,
String='Get missing from File System')
retMsg["MissingFromFS"] = findMissingFromFS()
if bAbort:
raise ValueError('Aborted')
statusMsg = wtV3().GETTRANSLATE(self, None, Internal=True,
String='Get missing from database')
retMsg["MissingFromDB"] = findMissingFromDB()
retMsg["Unmatched"] = unmatchedByPlex
runningState = 0
statusMsg = 'done'
except ValueError:
Log.Info('Aborted in ScanMedias')
except Exception, e:
Log.Exception('Exception happend in scanMedias: ' + str(e))
statusMsg = wtV3().GETTRANSLATE(self, None, Internal=True, String='Idle')
# ************ Main function ************
Log.Debug('scanSection started')
try:
del mediasFromDB[:] # Files from the database
del mediasFromFileSystem[:] # Files from the file system
# Grap the section number from the req
try:
sectionNumber = args[0][0]
except:
Log.Critical('Missing section key')
req.clear()
req.set_status(412)
req.finish('Missing section parameter')
# Let's find out the info of section here
response = XML.ElementFromURL(self.CoreUrl).xpath(
'//Directory[@key=' + sectionNumber + ']')
sectionTitle = response[0].get('title')
sectionType = response[0].get('type')
locations = response[0].xpath(
'//Directory[@key=' + sectionNumber + ']/Location')
sectionLocations = []
for location in locations:
sectionLocations.append(os.path.normpath(location.get('path')))
Log.Debug('Going to scan section %s with a title of %s and a type of %s and locations as %s' % (
sectionNumber, sectionTitle, sectionType, str(sectionLocations)))
if runningState in [0, 99]:
Thread.Create(scanMedias, globalize=True, sectionNumber=sectionNumber,
sectionLocations=sectionLocations, sectionType=sectionType, req=req)
else:
req.clear()
req.set_status(409)
req.finish('Scanning already in progress')
except Exception, ex:
Log.Exception('Fatal error happened in scanSection: ' + str(ex))
req.clear()
req.set_status(500)
req.finish('Fatal error happened in scanSection: ' + str(ex))
return req
# Abort
@classmethod
def ABORT(self, req, *args):
global runningState
runningState = 0
global bAbort
bAbort = True
req.clear()
req.set_status(200)
# Get supported Section list
@classmethod
def GETSECTIONSLIST(self, req, *args):
Log.Debug('getSectionsList requested')
try:
rawSections = XML.ElementFromURL(
misc.GetLoopBack() + '/library/sections')
Sections = []
for directory in rawSections:
if directory.get('type') in SUPPORTEDSECTIONS:
Section = {'key': directory.get('key'), 'title': directory.get(
'title'), 'type': directory.get('type')}
Sections.append(Section)
req.clear()
req.set_status(200)
req.set_header('Content-Type', 'application/json; charset=utf-8')
req.finish(json.dumps(Sections))
except Exception, e:
Log.Exception(
'Fatal error happened in getSectionsList: %s' % (str(e)))
req.clear()
req.set_status(500)
req.finish('Fatal error happened in getSectionsList')
# Return the result
@classmethod
def GETRESULT(self, req, *args):
# Are we in idle mode?
if runningState == 0:
req.clear()
if 'WebTools' in retMsg:
req.set_status(204)
else:
Log.Info('Result is: ' + str(retMsg))
req.set_status(200)
req.set_header(
'Content-Type', 'application/json; charset=utf-8')
req.finish(retMsg)
elif runningState == 99:
if bAbort:
req.set_status(204)
else:
req.set_status(204)
else:
req.clear()
req.set_status(204)
return
# Return current status
@classmethod
def GETSTATUS(self, req, *args):
req.clear()
req.set_status(200)
if runningState == 0:
req.finish('Idle')
else:
req.finish(statusMsg)
# Reset settings to default
@classmethod
def RESETSETTINGS(self, req, *args):
Dict['findMedia'] = None
Dict.Save()
self.populatePrefs()
req.clear()
req.set_status(200)
# Return the settings of this plugin
@classmethod
def GETSETTINGS(self, req, *args):
req.clear()
req.set_header('Content-Type', 'application/json; charset=utf-8')
req.set_status(200)
req.finish(json.dumps(Dict['findMedia']))
################### Internal functions #############################
''' Get the relevant function and call it with optinal params '''
@classmethod
def getFunction(self, metode, req, *args):
self.init()
params = req.request.uri[8:].upper().split('/')
self.function = None
if metode == 'get':
for param in params:
if param in GET:
self.function = param
break
else:
pass
elif metode == 'post':
for param in params:
if param in POST:
self.function = param
break
else:
pass
elif metode == 'put':
for param in params:
if param in PUT:
self.function = param
break
else:
pass
elif metode == 'delete':
for param in params:
if param in DELETE:
self.function = param
break
else:
pass
if self.function == None:
Log.Debug('Function to call is None')
req.clear()
req.set_status(404)
req.finish('Unknown function call')
else:
# Check for optional argument
paramsStr = req.request.uri[req.request.uri.upper().find(
self.function) + len(self.function):]
# remove starting and ending slash
if paramsStr.endswith('/'):
paramsStr = paramsStr[:-1]
if paramsStr.startswith('/'):
paramsStr = paramsStr[1:]
# Turn into a list
params = paramsStr.split('/')
# If empty list, turn into None
if params[0] == '':
params = None
try:
Log.Debug('Function to call is: ' + self.function +
' with params: ' + str(params))
if params == None:
getattr(self, self.function)(req)
else:
getattr(self, self.function)(req, params)
except Exception, e:
Log.Exception('Exception in process of: ' + str(e))
''' Populate the defaults, if not already there '''
@classmethod
def populatePrefs(self):
try:
if Dict['findMedia'] == None:
Dict['findMedia'] = {
'IGNORE_HIDDEN': True,
'IGNORED_DIRS': [".@__thumb", ".AppleDouble", "lost+found"],
'VALID_EXTENSIONS': VALIDEXT['video'],
'IGNORE_EXTRAS': True,
'IGNORE_SPECIALS': True
}
Dict.Save()
# New key from V3.0, so need to handle seperately
if 'IGNORE_EXTRAS' not in Dict['findMedia'].keys():
Dict['findMedia']['IGNORE_EXTRAS'] = True
Dict.Save()
# New key from V3.0, so need to handle seperately
if 'IGNORE_SPECIALS' not in Dict['findMedia'].keys():
Dict['findMedia']['IGNORE_SPECIALS'] = True
Dict.Save()
except Exception, e:
Log.Exception('Exception in populatePrefs was %s' % str(e))
'''
Returns true or false, depending on if a media should be added to the list
Param file: The file to be investigated, with full path
Param mediaType: Type of media
'''
@classmethod
def addThisItem(self, file, mediaType):
try:
if os.path.splitext(file)[1].lower()[1:] in Dict['findMedia']['VALID_EXTENSIONS']:
parts = self.splitall(file)
for part in parts:
if Dict['findMedia']['IGNORE_EXTRAS']:
if part.lower() in ExtrasDirs:
return False
for extra in Extras:
if extra in part.lower():
return False
# Ignore specials
if Dict['findMedia']['IGNORE_SPECIALS']:
for special in Specials:
if special == part.lower():
return False
# Ignore hiddens
if Dict['findMedia']['IGNORE_HIDDEN']:
if part.startswith('.'):
return False
return True
else:
return False
except Exception, e:
Log.Exception('Exception in addThisItem was %s' % str(e))
return False
''' Returns the different parts of a filepath '''
@classmethod
def splitall(self, path):
allparts = []
while 1:
parts = os.path.split(path)
if parts[0] == path: # sentinel for absolute paths
allparts.insert(0, parts[0])
break
elif parts[1] == path: # sentinel for relative paths
allparts.insert(0, parts[1])
break
else:
path = parts[0]
allparts.insert(0, parts[1])
return allparts
| mpl-2.0 |
3dfxmadscientist/CBSS | addons/l10n_ro/__openerp__.py | 34 | 1810 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2012 (<http://www.erpsystems.ro>). All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name" : "Romania - Accounting",
"version" : "1.0",
"author" : "TOTAL PC SYSTEMS",
"website": "http://www.erpsystems.ro",
"category" : "Localization/Account Charts",
"depends" : ['account','account_chart','base_vat'],
"description": """
This is the module to manage the accounting chart, VAT structure and Registration Number for Romania in OpenERP.
================================================================================================================
Romanian accounting chart and localization.
""",
"demo_xml" : [],
"update_xml" : ['partner_view.xml','account_tax_code_template.xml','account_chart.xml','account_tax_template.xml','l10n_chart_ro_wizard.xml'],
"auto_install": False,
"installable": True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
vipul-sharma20/oh-mainline | vendor/packages/pytz/pytz/exceptions.py | 657 | 1333 | '''
Custom exceptions raised by pytz.
'''
__all__ = [
'UnknownTimeZoneError', 'InvalidTimeError', 'AmbiguousTimeError',
'NonExistentTimeError',
]
class UnknownTimeZoneError(KeyError):
'''Exception raised when pytz is passed an unknown timezone.
>>> isinstance(UnknownTimeZoneError(), LookupError)
True
This class is actually a subclass of KeyError to provide backwards
compatibility with code relying on the undocumented behavior of earlier
pytz releases.
>>> isinstance(UnknownTimeZoneError(), KeyError)
True
'''
pass
class InvalidTimeError(Exception):
'''Base class for invalid time exceptions.'''
class AmbiguousTimeError(InvalidTimeError):
'''Exception raised when attempting to create an ambiguous wallclock time.
At the end of a DST transition period, a particular wallclock time will
occur twice (once before the clocks are set back, once after). Both
possibilities may be correct, unless further information is supplied.
See DstTzInfo.normalize() for more info
'''
class NonExistentTimeError(InvalidTimeError):
'''Exception raised when attempting to create a wallclock time that
cannot exist.
At the start of a DST transition period, the wallclock time jumps forward.
The instants jumped over never occur.
'''
| agpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.