repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
|---|---|---|---|---|---|
upgradeadvice/fofix-grisly-virtualenv
|
FoFiX/VFS.py
|
1
|
14383
|
#####################################################################
# -*- coding: iso-8859-1 -*- #
# #
# Frets on Fire X (FoFiX) #
# Copyright (C) 2009-2010 John Stumpo #
# #
# This program is free software; you can redistribute it and/or #
# modify it under the terms of the GNU General Public License #
# as published by the Free Software Foundation; either version 2 #
# of the License, or (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program; if not, write to the Free Software #
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, #
# MA 02110-1301, USA. #
#####################################################################
'''
Functions providing a convenient virtual filesystem.
Among other things, this is how themes and mods will be implemented.
Note that B{all} VFS functions use slash-delimited paths, relieving
other code of the need to C{os.path.join()}. All VFS paths must also
be absolute (i.e. start with a slash) and may not contain "." or "..".
Files or directories may be mounted in the root of the VFS read-only
or read-write. Mounting multiple things onto the same mountpoint
overlays the newer mount's contents on the old mount's contents, but
read-write mounts always prevail over read-only mounts in the resolution
order. All write attempts to a given mountpoint go to the most recent
read-write mount on that mountpoint; trying to write to a mountpoint
that has no writable mounts raises C{OSError(EROFS)}. Modification of
files existing in lower layers but not the most recent writable mount
uses copy-on-write semantics. There is no way to make something in
a lower layer appear to have been deleted, however.
'''
import errno
import os
import re
from stat import S_IFDIR
from stat import S_ISDIR
from stat import S_ISREG
import sys
import time
import Version
import shutil
try: # {py24hack}
import sqlite3
except ImportError:
import pysqlite2.dbapi2 as sqlite3
_mountTable = {}
class Mount(object):
'''Implementation of a mount point in the VFS root.'''
def __init__(self):
# List of read-only backing directories, in decreasing order of priority.
self.readOnly = []
# List of writable backing directories, in decreasing order of priority.
self.writable = []
def resolveRead(self, path):
'''
Resolve a path to a file that the user wants to read.
@param path: Virtual path within this mount
@return: Physical path to the file
@raise OSError(ENOENT): if file does not exist
'''
for p in (self.writable + self.readOnly):
candidate = os.path.join(p, path).rstrip(os.sep)
if os.path.exists(candidate):
return candidate
raise OSError(errno.ENOENT, os.strerror(errno.ENOENT))
def resolveWrite(self, path):
'''
Resolve a path to a file that the user wants to create or modify.
Copies files to the most recently mounted writable directory if
necessary. If a path is returned, it is guaranteed to be in the
most recently mounted writable directory.
@param path: Virtual path within this mount
@return: Physical path to the file
@raise OSError(EROFS): if all mounted directories are read-only
'''
if len(self.writable) == 0:
raise OSError(errno.EROFS, os.strerror(errno.EROFS))
wpath = os.path.join(self.writable[0], path).rstrip(os.sep)
if os.path.exists(wpath):
return wpath
try:
rpath = self.resolveRead(path)
except OSError, e:
if e.errno != errno.ENOENT:
raise
rpath = None
if not os.path.isdir(os.path.dirname(wpath)):
os.makedirs(os.path.dirname(wpath))
if rpath is not None:
shutil.copy2(rpath, wpath)
return wpath
def listdir(self, path):
'''
List the contents of a directory within this mountpoint.
@param path: Virtual path within this mount to list
@return: List of entries (excluding '.' and '..')
'''
contents = set()
for p in (self.writable + self.readOnly):
candidate = os.path.join(p, path)
if os.path.isdir(candidate):
contents.update(os.listdir(candidate))
return list(contents)
def mount(dir, mountpt):
'''
Mount a directory read-only.
The virtual directory provides the union of the contents of all
directories mounted on it.
@param dir: Directory to mount
@param mountpt: Virtual directory to mount it onto
'''
if mountpt not in _mountTable:
_mountTable[mountpt] = Mount()
_mountTable[mountpt].readOnly.append(dir)
def mountWritable(dir, mountpt):
'''
Mount a directory and allow writing.
The most recent writable-mounted directory on a given
mountpoint receives all writes made to the mountpoint.
@param dir: Directory to mount
@param mountpt: Virtual directory to mount it onto
'''
if mountpt not in _mountTable:
_mountTable[mountpt] = Mount()
_mountTable[mountpt].writable.append(dir)
def _convertPath(path):
'''
Validate and convert a VFS path to a mount point and a native path
fragment suitable for passing to the mount point's methods.
@param path: VFS path
@return: 2-tuple of mount and native path fragment to pass to the mount
@raise OSError(EINVAL): on syntactically invalid VFS path
'''
if re.match('^/[^:\\\\]*$', path) is None or re.match('/\\.\\.?(/|$)', path) is not None:
raise OSError(errno.EINVAL, os.strerror(errno.EINVAL))
while '//' in path:
path = path.replace('//', '/')
components = path.lstrip('/').split('/')
if components[0] not in _mountTable:
raise OSError(errno.ENOENT, os.strerror(errno.ENOENT))
return _mountTable[components[0]], os.sep.join(components[1:])
def resolveRead(path):
'''
Convert a VFS path into a real path that is usable to access an
already-existing file or directory.
@param path: VFS path
@return: Real path
@raise OSError(ENOENT): if path does not exist
'''
mount, frag = _convertPath(path)
return mount.resolveRead(frag)
def resolveWrite(path):
'''
Convert a VFS path that the user wishes to write a file to into a real
writable path. Copies a file from a read-only area to a read-write area
if necessary.
@param path: VFS path
@return: Real path
'''
mount, frag = _convertPath(path)
return mount.resolveWrite(frag)
class StatResult(object):
'''
C{stat()} result for an object in the virtual filesystem.
This was originally a hack to give synthesized C{stat()} results for / so
things worked right in the test code, but it's left in because if you
can stat anything else in the VFS, you should be able to stat / too.
For all practical purposes, this object is compatible with the return type
of C{os.stat()}. Fields of the result can be accessed either as attributes
or via numeric indices.
'''
def __init__(self, path):
'''
Set the object up with C{os.stat()} results of C{path} or
synthesized properties if the VFS root is statted.
@param path: Path to operate on
'''
self._attrs = ('st_mode', 'st_ino', 'st_dev', 'st_nlink', 'st_uid',
'st_gid', 'st_size', 'st_atime', 'st_mtime', 'st_ctime')
if path == '/':
self.st_mode = S_IFDIR | 0555
self.st_ino = 0
self.st_dev = 0
self.st_nlink = 2 + len(_mountTable)
self.st_uid = 0
self.st_gid = 0
self.st_size = 4096
self.st_atime = time.time()
self.st_mtime = self.st_atime
self.st_ctime = self.st_atime
else:
s = os.stat(resolveRead(path))
for a in self._attrs:
setattr(self, a, getattr(s, a))
# Implement the sequence protocol (os.stat() returns a tuple-like object)
def __len__(self):
return len(self._attrs)
def __getitem__(self, idx):
return getattr(self, self._attrs[idx])
# Show our contents when repr()'d.
def __repr__(self):
return str(tuple(self))
def stat(path):
'''
Get some properties of the specified path, much like C{os.stat()}.
@param path: Path to operate on
@return: L{StatResult} for the path
'''
return StatResult(path)
def listdir(path):
'''
List the contents of a virtual directory, much like C{os.listdir()}.
@param path: Path to list
@return: List of names of objects in the directory
(excludes '.' and '..')
'''
if path == '/':
return list(_mountTable.keys())
else:
mount, frag = _convertPath(path)
return mount.listdir(frag)
def unlink(path):
'''
Delete a virtual file.
Note: If the virtual file exists in one of the read-only backing
directories of the mount in which the file is deleted, the file
will instead appear to revert to the read-only version.
@param path: Path to delete
'''
os.unlink(resolveWrite(path))
def mkdir(path):
'''
Create a virtual directory. Also creates any directories leading
up to it that are missing (like C{os.makedirs()}).
@param path: Path at which to create a directory
'''
os.makedirs(resolveWrite(path))
def rmdir(path):
'''
Remove a virtual directory. The directory must be empty.
@param path: Path to directory to remove
'''
os.rmdir(resolveWrite(path))
def rename(src, dest):
'''
Rename or move a virtual object.
@param src: Path to rename from
@param dest: Path to rename to
'''
os.rename(resolveWrite(src), resolveWrite(dest))
def exists(path):
'''
Check the existence of a virtual object at a given path.
@param path: Path to check for existence
@return: True if object exists, False otherwise
'''
try:
stat(path)
return True
except OSError, e:
if e.errno == errno.ENOENT:
return False
raise
def isfile(path):
'''
Check whether a virtual path represents a file.
@param path: Path to check for file-ness
@return: True if it is a file, False otherwise
'''
try:
return S_ISREG(stat(path).st_mode)
except OSError, e:
if e.errno == errno.ENOENT:
return False
raise
def isdir(path):
'''
Check whether a virtual path represents a directory.
@param path: Path to check for dir-ness
@return: True if it is a dir, False otherwise
'''
try:
return S_ISDIR(stat(path).st_mode)
except OSError, e:
if e.errno == errno.ENOENT:
return False
raise
_realopen = open
def open(path, mode='r'):
'''
Open a virtual file, much like the built-in C{open()} function.
@param path: Path to open
@param mode: File mode
@return: File object of the appropriate physical file
'''
if mode in ('r', 'rb'):
return _realopen(resolveRead(path), mode)
else:
return _realopen(resolveWrite(path), mode)
def openSqlite3(path):
'''
Open a virtual file as a writable SQLite database.
@param path: Path to open
@return: C{sqlite3.Connection} object for the file
'''
# There is a bug in the sqlite3 module's handling of path names containing
# unicode characters, so work around that by temporarily changing directory
# so we access the file with just its base name.
oldcwd = os.getcwd()
try:
dbName = resolveWrite(path)
os.chdir(os.path.dirname(dbName))
return sqlite3.Connection(os.path.basename(dbName))
finally:
os.chdir(oldcwd)
# TODO: Function to perform all overlay mounts called for by the
# active configuration: /data will become /data overlaid by the
# current theme overlaid by any active mods and /songs will be the
# song folder. Note that said function is fully intended to be
# called again if said configuration is modified in such a way that
# it changes what should appear in /data or /songs. Other code
# shouldn't really care if the VFS changes out from under it (the
# testing code might, but that's kind of a special case...) -
# VFS.open() returns real file objects that aren't tied to the
# current VFS state.
#
# There are some decisions that must be made before doing this;
# hence, we're leaving it undone presently.
#
# Note that unless the mount tables are modified directly, /data
# and /userdata are guaranteed to exist and point to the proper
# places: /data contains at least the contents of the main /data
# and is read-only, and /userdata points to the user-specific
# configuration area (%APPDATA%\fofix, ~/Library/Preferences/fofix,
# ~/.fofix, or what-have-you) and is read-write. /gameroot also
# always points (read-only) to the root of the game installation
# (though please try to avoid using it directly when the data you
# want is available elsewhere in the VFS!)
# Figure out where to map /userdata to.
if os.name == 'nt':
# Find the path to Application Data, and do it the Right Way(tm).
# The APPDATA envvar isn't guaranteed to be reliable.
# Use ctypes so we don't have to drag in win32com.
import ctypes
import ctypes.wintypes
_appdata = (ctypes.c_char * ctypes.wintypes.MAX_PATH)()
# [WinSDK]/include/shlobj.h: #define CSIDL_APPDATA 26
ctypes.windll.shell32.SHGetFolderPathA(None, 26, None, None, _appdata)
_writePath = os.path.join(_appdata.value, Version.PROGRAM_UNIXSTYLE_NAME)
elif sys.platform == 'darwin':
_writePath = os.path.expanduser(os.path.join('~', 'Library', 'Preferences', Version.PROGRAM_UNIXSTYLE_NAME))
else:
_writePath = os.path.expanduser(os.path.join('~', '.'+Version.PROGRAM_UNIXSTYLE_NAME))
mountWritable(_writePath, 'userdata')
if not isdir('/userdata'):
mkdir('/userdata')
# Map /data and /gameroot.
if Version.isWindowsExe():
_gameRoot = os.path.abspath('.')
else:
_gameRoot = os.path.abspath('..')
mount(_gameRoot, 'gameroot')
mount(os.path.join(_gameRoot, 'data'), 'data')
|
gpl-2.0
|
openstack/horizon
|
openstack_dashboard/dashboards/project/volumes/forms.py
|
1
|
38443
|
# Copyright 2012 Nebula, Inc.
# All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Views for managing volumes.
"""
from cinderclient import exceptions as cinder_exc
from django.conf import settings
from django.forms import ValidationError
from django.template.defaultfilters import filesizeformat
from django.urls import reverse
from django.utils.translation import pgettext_lazy
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from horizon.utils import functions
from horizon.utils.memoized import memoized
from openstack_dashboard import api
from openstack_dashboard.api import cinder
from openstack_dashboard.api import glance
from openstack_dashboard.api import nova
from openstack_dashboard.dashboards.project.images import utils
from openstack_dashboard.dashboards.project.instances import tables
from openstack_dashboard.usage import quotas
IMAGE_BACKEND_SETTINGS = settings.OPENSTACK_IMAGE_BACKEND
IMAGE_FORMAT_CHOICES = IMAGE_BACKEND_SETTINGS['image_formats']
VALID_DISK_FORMATS = ('raw', 'vmdk', 'vdi', 'qcow2', 'vhd', 'vhdx')
DEFAULT_CONTAINER_FORMAT = 'bare'
# Determine whether the extension for Cinder AZs is enabled
def cinder_az_supported(request):
try:
return cinder.extension_supported(request, 'AvailabilityZones')
except Exception:
exceptions.handle(request, _('Unable to determine if availability '
'zones extension is supported.'))
return False
def availability_zones(request):
zone_list = []
if cinder_az_supported(request):
try:
zones = api.cinder.availability_zone_list(request)
zone_list = [(zone.zoneName, zone.zoneName)
for zone in zones if zone.zoneState['available']]
zone_list.sort()
except Exception:
exceptions.handle(request, _('Unable to retrieve availability '
'zones.'))
if not zone_list:
zone_list.insert(0, ("", _("No availability zones found")))
elif len(zone_list) > 1:
zone_list.insert(0, ("", _("Any Availability Zone")))
return zone_list
class CreateForm(forms.SelfHandlingForm):
name = forms.CharField(max_length=255, label=_("Volume Name"),
required=False)
description = forms.CharField(max_length=255, widget=forms.Textarea(
attrs={'rows': 4}),
label=_("Description"), required=False)
volume_source_type = forms.ChoiceField(
label=_("Volume Source"),
required=False,
widget=forms.ThemableSelectWidget(attrs={
'class': 'switchable',
'data-slug': 'source'}))
snapshot_source = forms.ChoiceField(
label=_("Use snapshot as a source"),
widget=forms.ThemableSelectWidget(
attrs={'class': 'snapshot-selector switched',
'data-switch-on': 'source',
'data-source-snapshot_source':
_("Use snapshot as a source"),
'data-required-when-shown': 'true'},
data_attrs=('size', 'name'),
transform=lambda x: "%s (%s GiB)" % (x.name, x.size)),
required=False)
image_source = forms.ChoiceField(
label=_("Use image as a source"),
widget=forms.ThemableSelectWidget(
attrs={'class': 'image-selector switched',
'data-switch-on': 'source',
'data-source-image_source':
_("Use image as a source"),
'data-required-when-shown': 'true'},
data_attrs=('size', 'name', 'min_disk'),
transform=lambda x: "%s (%s)" % (x.name, filesizeformat(x.bytes))),
required=False)
volume_source = forms.ChoiceField(
label=_("Use a volume as source"),
widget=forms.ThemableSelectWidget(
attrs={'class': 'image-selector switched',
'data-switch-on': 'source',
'data-source-volume_source':
_("Use a volume as source"),
'data-required-when-shown': 'true'},
data_attrs=('size', 'name'),
transform=lambda x: "%s (%s GiB)" % (x.name, x.size)),
required=False)
type = forms.ChoiceField(
label=_("Type"),
required=False,
widget=forms.ThemableSelectWidget(
attrs={'class': 'switched',
'data-switch-on': 'source',
'data-source-no_source_type': _('Type'),
'data-source-image_source': _('Type')}))
size = forms.IntegerField(min_value=1, initial=1, label=_("Size (GiB)"))
availability_zone = forms.ChoiceField(
label=_("Availability Zone"),
required=False,
widget=forms.ThemableSelectWidget(
attrs={'class': 'switched',
'data-switch-on': 'source',
'data-source-no_source_type': _('Availability Zone'),
'data-source-image_source': _('Availability Zone')}))
group = forms.ThemableChoiceField(
label=_("Group"), required=False,
help_text=_("Group which the new volume belongs to. Choose "
"'No group' if the new volume belongs to no group."))
def prepare_source_fields_if_snapshot_specified(self, request):
try:
snapshot = self.get_snapshot(request,
request.GET["snapshot_id"])
self.fields['name'].initial = snapshot.name
self.fields['size'].initial = snapshot.size
self.fields['snapshot_source'].choices = ((snapshot.id,
snapshot),)
try:
# Set the volume type from the original volume
orig_volume = cinder.volume_get(request,
snapshot.volume_id)
self.fields['type'].initial = orig_volume.volume_type
except Exception:
pass
self.fields['size'].help_text = (
_('Volume size must be equal to or greater than the '
'snapshot size (%sGiB)') % snapshot.size)
self.fields['type'].widget = forms.widgets.HiddenInput()
del self.fields['image_source']
del self.fields['volume_source']
del self.fields['volume_source_type']
del self.fields['availability_zone']
except Exception:
exceptions.handle(request,
_('Unable to load the specified snapshot.'))
def prepare_source_fields_if_image_specified(self, request):
self.fields['availability_zone'].choices = \
availability_zones(request)
try:
image = self.get_image(request,
request.GET["image_id"])
image.bytes = image.size
self.fields['name'].initial = image.name
min_vol_size = functions.bytes_to_gigabytes(
image.size)
size_help_text = (_('Volume size must be equal to or greater '
'than the image size (%s)')
% filesizeformat(image.size))
properties = getattr(image, 'properties', {})
min_disk_size = (getattr(image, 'min_disk', 0) or
properties.get('min_disk', 0))
if min_disk_size > min_vol_size:
min_vol_size = min_disk_size
size_help_text = (_('Volume size must be equal to or '
'greater than the image minimum '
'disk size (%sGiB)')
% min_disk_size)
self.fields['size'].initial = min_vol_size
self.fields['size'].help_text = size_help_text
self.fields['image_source'].choices = ((image.id, image),)
del self.fields['snapshot_source']
del self.fields['volume_source']
del self.fields['volume_source_type']
except Exception:
msg = _('Unable to load the specified image. %s')
exceptions.handle(request, msg % request.GET['image_id'])
def prepare_source_fields_if_volume_specified(self, request):
self.fields['availability_zone'].choices = \
availability_zones(request)
volume = None
try:
volume = self.get_volume(request, request.GET["volume_id"])
except Exception:
msg = _('Unable to load the specified volume. %s')
exceptions.handle(request, msg % request.GET['volume_id'])
if volume is not None:
self.fields['name'].initial = volume.name
self.fields['description'].initial = volume.description
min_vol_size = volume.size
size_help_text = (_('Volume size must be equal to or greater '
'than the origin volume size (%sGiB)')
% volume.size)
self.fields['size'].initial = min_vol_size
self.fields['size'].help_text = size_help_text
self.fields['volume_source'].choices = ((volume.id, volume),)
self.fields['type'].initial = volume.type
del self.fields['snapshot_source']
del self.fields['image_source']
del self.fields['volume_source_type']
def prepare_source_fields_default(self, request):
source_type_choices = []
self.fields['availability_zone'].choices = \
availability_zones(request)
try:
available = api.cinder.VOLUME_STATE_AVAILABLE
snapshots = cinder.volume_snapshot_list(
request, search_opts=dict(status=available))
if snapshots:
source_type_choices.append(("snapshot_source",
_("Snapshot")))
choices = [('', _("Choose a snapshot"))] + \
[(s.id, s) for s in snapshots]
self.fields['snapshot_source'].choices = choices
else:
del self.fields['snapshot_source']
except Exception:
exceptions.handle(request,
_("Unable to retrieve volume snapshots."))
images = utils.get_available_images(request,
request.user.tenant_id)
if images:
source_type_choices.append(("image_source", _("Image")))
choices = [('', _("Choose an image"))]
for image in images:
image.bytes = image.size
image.size = functions.bytes_to_gigabytes(image.bytes)
choices.append((image.id, image))
self.fields['image_source'].choices = choices
else:
del self.fields['image_source']
volumes = self.get_volumes(request)
if volumes:
source_type_choices.append(("volume_source", _("Volume")))
choices = [('', _("Choose a volume"))]
for volume in volumes:
choices.append((volume.id, volume))
self.fields['volume_source'].choices = choices
else:
del self.fields['volume_source']
if source_type_choices:
choices = ([('no_source_type',
_("No source, empty volume"))] +
source_type_choices)
self.fields['volume_source_type'].choices = choices
else:
del self.fields['volume_source_type']
def _populate_group_choices(self, request):
try:
groups = cinder.group_list(request)
except cinder_exc.VersionNotFoundForAPIMethod:
del self.fields['group']
return
except Exception:
redirect = reverse("horizon:project:volumes:index")
exceptions.handle(request,
_('Unable to retrieve the volume group list.'),
redirect=redirect)
group_choices = [(g.id, g.name or g.id) for g in groups]
group_choices.insert(0, ("", _("No group")))
self.fields['group'].choices = group_choices
def __init__(self, request, *args, **kwargs):
super().__init__(request, *args, **kwargs)
volume_types = []
try:
volume_types = cinder.volume_type_list(request)
except Exception:
redirect_url = reverse("horizon:project:volumes:index")
error_message = _('Unable to retrieve the volume type list.')
exceptions.handle(request, error_message, redirect=redirect_url)
self.fields['type'].choices = [("", _("No volume type"))] + \
[(type.name, type.name)
for type in volume_types]
if 'initial' in kwargs and 'type' in kwargs['initial']:
# if there is a default volume type to select, then remove
# the first ""No volume type" entry
self.fields['type'].choices.pop(0)
if "snapshot_id" in request.GET:
self.prepare_source_fields_if_snapshot_specified(request)
elif 'image_id' in request.GET:
self.prepare_source_fields_if_image_specified(request)
elif 'volume_id' in request.GET:
self.prepare_source_fields_if_volume_specified(request)
else:
self.prepare_source_fields_default(request)
self._populate_group_choices(request)
def clean(self):
cleaned_data = super().clean()
source_type = self.cleaned_data.get('volume_source_type')
if (source_type == 'image_source' and
not cleaned_data.get('image_source')):
msg = _('Image source must be specified')
self._errors['image_source'] = self.error_class([msg])
elif (source_type == 'snapshot_source' and
not cleaned_data.get('snapshot_source')):
msg = _('Snapshot source must be specified')
self._errors['snapshot_source'] = self.error_class([msg])
elif (source_type == 'volume_source' and
not cleaned_data.get('volume_source')):
msg = _('Volume source must be specified')
self._errors['volume_source'] = self.error_class([msg])
return cleaned_data
def get_volumes(self, request):
volumes = []
try:
available = api.cinder.VOLUME_STATE_AVAILABLE
volumes = cinder.volume_list(self.request,
search_opts=dict(status=available))
except Exception:
exceptions.handle(request,
_('Unable to retrieve list of volumes.'))
return volumes
def handle(self, request, data):
try:
usages = quotas.tenant_quota_usages(
self.request, targets=('volumes', 'gigabytes'))
availableGB = usages['gigabytes']['available']
availableVol = usages['volumes']['available']
snapshot_id = None
image_id = None
volume_id = None
source_type = data.get('volume_source_type', None)
az = data.get('availability_zone', None) or None
volume_type = data.get('type')
if (data.get("snapshot_source", None) and
source_type in ['', None, 'snapshot_source']):
# Create from Snapshot
snapshot = self.get_snapshot(request,
data["snapshot_source"])
snapshot_id = snapshot.id
if data['size'] < snapshot.size:
error_message = (_('The volume size cannot be less than '
'the snapshot size (%sGiB)')
% snapshot.size)
raise ValidationError(error_message)
az = None
volume_type = ""
elif (data.get("image_source", None) and
source_type in ['', None, 'image_source']):
# Create from Snapshot
image = self.get_image(request,
data["image_source"])
image_id = image.id
image_size = functions.bytes_to_gigabytes(image.size)
if data['size'] < image_size:
error_message = (_('The volume size cannot be less than '
'the image size (%s)')
% filesizeformat(image.size))
raise ValidationError(error_message)
properties = getattr(image, 'properties', {})
min_disk_size = (getattr(image, 'min_disk', 0) or
properties.get('min_disk', 0))
if min_disk_size > 0 and data['size'] < min_disk_size:
error_message = (_('The volume size cannot be less than '
'the image minimum disk size (%sGiB)')
% min_disk_size)
raise ValidationError(error_message)
elif (data.get("volume_source", None) and
source_type in ['', None, 'volume_source']):
# Create from volume
volume = self.get_volume(request, data["volume_source"])
volume_id = volume.id
volume_type = None
if data['size'] < volume.size:
error_message = (_('The volume size cannot be less than '
'the source volume size (%sGiB)')
% volume.size)
raise ValidationError(error_message)
else:
if type(data['size']) is str:
data['size'] = int(data['size'])
if availableGB < data['size']:
error_message = _('A volume of %(req)iGiB cannot be created '
'as you only have %(avail)iGiB of your '
'quota available.')
params = {'req': data['size'],
'avail': availableGB}
raise ValidationError(error_message % params)
if availableVol <= 0:
error_message = _('You are already using all of your available'
' volumes.')
raise ValidationError(error_message)
metadata = {}
volume = cinder.volume_create(request,
data['size'],
data['name'],
data['description'],
volume_type,
snapshot_id=snapshot_id,
image_id=image_id,
metadata=metadata,
availability_zone=az,
source_volid=volume_id,
group_id=data.get('group') or None)
message = _('Creating volume "%s"') % volume.name
messages.info(request, message)
return volume
except ValidationError as e:
self.api_error(e.messages[0])
return False
except Exception:
redirect = reverse("horizon:project:volumes:index")
exceptions.handle(request,
_("Unable to create volume."),
redirect=redirect)
@memoized
def get_snapshot(self, request, id):
return cinder.volume_snapshot_get(request, id)
@memoized
def get_image(self, request, id):
return glance.image_get(request, id)
@memoized
def get_volume(self, request, id):
return cinder.volume_get(request, id)
class AttachForm(forms.SelfHandlingForm):
instance = forms.ThemableChoiceField(label=_("Attach to Instance"),
help_text=_("Select an instance to "
"attach to."))
device = forms.CharField(label=_("Device Name"),
widget=forms.TextInput(attrs={'placeholder':
'/dev/vdc'}),
required=False,
help_text=_("Actual device name may differ due "
"to hypervisor settings. If not "
"specified, then hypervisor will "
"select a device name."))
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Hide the device field if the hypervisor doesn't support it.
if not nova.can_set_mount_point():
self.fields['device'].widget = forms.widgets.HiddenInput()
# populate volume_id
volume = kwargs.get('initial', {}).get("volume", None)
if volume:
volume_id = volume.id
else:
volume_id = None
self.fields['volume_id'] = forms.CharField(widget=forms.HiddenInput(),
initial=volume_id)
# Populate instance choices
instance_list = kwargs.get('initial', {}).get('instances', [])
instances = []
for instance in instance_list:
if instance.status in tables.VOLUME_ATTACH_READY_STATES and \
not any(instance.id == att["server_id"]
for att in volume.attachments):
instances.append((instance.id, '%s (%s)' % (instance.name,
instance.id)))
if instances:
instances.insert(0, ("", _("Select an instance")))
else:
instances = (("", _("No instances available")),)
self.fields['instance'].choices = instances
def handle(self, request, data):
instance_choices = dict(self.fields['instance'].choices)
instance_name = instance_choices.get(data['instance'],
_("Unknown instance (None)"))
# The name of the instance in the choices list has the ID appended to
# it, so let's slice that off...
instance_name = instance_name.rsplit(" (")[0]
# api requires non-empty device name or None
device = data.get('device') or None
try:
attach = api.nova.instance_volume_attach(request,
data['volume_id'],
data['instance'],
device)
volume = cinder.volume_get(request, data['volume_id'])
message = _('Attaching volume %(vol)s to instance '
'%(inst)s on %(dev)s.') % {"vol": volume.name,
"inst": instance_name,
"dev": attach.device}
messages.info(request, message)
return True
except Exception:
redirect = reverse("horizon:project:volumes:index")
exceptions.handle(request,
_('Unable to attach volume.'),
redirect=redirect)
class CreateSnapshotForm(forms.SelfHandlingForm):
name = forms.CharField(max_length=255, label=_("Snapshot Name"),
required=False)
description = forms.CharField(max_length=255,
widget=forms.Textarea(attrs={'rows': 4}),
label=_("Description"),
required=False)
def __init__(self, request, *args, **kwargs):
super().__init__(request, *args, **kwargs)
# populate volume_id
volume_id = kwargs.get('initial', {}).get('volume_id', [])
self.fields['volume_id'] = forms.CharField(widget=forms.HiddenInput(),
initial=volume_id)
def handle(self, request, data):
try:
volume = cinder.volume_get(request,
data['volume_id'])
force = False
message = _('Creating volume snapshot "%s".')
if volume.status == 'in-use':
force = True
message = _('Forcing to create snapshot "%s" '
'from attached volume.')
snapshot = cinder.volume_snapshot_create(request,
data['volume_id'],
data['name'],
data['description'],
force=force)
messages.info(request, message % snapshot.name)
return snapshot
except Exception as e:
redirect = reverse("horizon:project:volumes:index")
msg = _('Unable to create volume snapshot.')
if e.code == 413:
msg = _('Requested snapshot would exceed the allowed quota.')
exceptions.handle(request,
msg,
redirect=redirect)
class CreateTransferForm(forms.SelfHandlingForm):
name = forms.CharField(max_length=255, label=_("Transfer Name"))
def __init__(self, *args, **kwargs):
self.next_view = kwargs.pop('next_view', None)
super().__init__(*args, **kwargs)
def clean_name(self):
cleaned_name = self.cleaned_data['name']
if cleaned_name.isspace():
msg = _('Volume transfer name cannot be empty.')
self._errors['name'] = self.error_class([msg])
return cleaned_name
def handle(self, request, data):
try:
volume_id = self.initial['volume_id']
transfer = cinder.transfer_create(request, volume_id, data['name'])
msg = _('Created volume transfer: "%s".') % data['name']
messages.success(request, msg)
kwargs = {
'transfer_id': transfer.id,
'auth_key': transfer.auth_key
}
request.method = 'GET'
return self.next_view.as_view()(request, **kwargs)
except Exception:
redirect = reverse("horizon:project:volumes:index")
exceptions.handle(request, _('Unable to create volume transfer.'),
redirect=redirect)
class AcceptTransferForm(forms.SelfHandlingForm):
# These max lengths correspond to the sizes in cinder
transfer_id = forms.CharField(max_length=36, label=_("Transfer ID"))
auth_key = forms.CharField(max_length=16, label=_("Authorization Key"))
def handle(self, request, data):
try:
transfer = cinder.transfer_accept(request,
data['transfer_id'],
data['auth_key'])
msg = (_('Successfully accepted volume transfer: "%s"')
% data['transfer_id'])
messages.success(request, msg)
return transfer
except Exception:
redirect = reverse("horizon:project:volumes:index")
exceptions.handle(request, _('Unable to accept volume transfer.'),
redirect=redirect)
class ShowTransferForm(forms.SelfHandlingForm):
name = forms.CharField(
label=_("Transfer Name"),
widget=forms.TextInput(attrs={'readonly': 'readonly'}),
required=False)
id = forms.CharField(
label=_("Transfer ID"),
widget=forms.TextInput(attrs={'readonly': 'readonly'}),
required=False)
auth_key = forms.CharField(
label=_("Authorization Key"),
widget=forms.TextInput(attrs={'readonly': 'readonly'}),
required=False)
def handle(self, request, data):
pass
class UpdateForm(forms.SelfHandlingForm):
name = forms.CharField(max_length=255,
label=_("Volume Name"),
required=False)
description = forms.CharField(max_length=255,
widget=forms.Textarea(attrs={'rows': 4}),
label=_("Description"),
required=False)
bootable = forms.BooleanField(label=_("Bootable"),
required=False,
help_text=_("Specifies that the volume can "
"be used to launch an instance"))
def handle(self, request, data):
volume_id = self.initial['volume_id']
try:
volume = cinder.volume_update(request, volume_id, data['name'],
data['description'])
except Exception:
redirect = reverse("horizon:project:volumes:index")
exceptions.handle(request,
_('Unable to update volume.'),
redirect=redirect)
# only update bootable flag if modified
make_bootable = data['bootable']
if make_bootable != self.initial['bootable']:
try:
cinder.volume_set_bootable(request, volume_id, make_bootable)
except Exception:
redirect = reverse("horizon:project:volumes:index")
exceptions.handle(request,
_('Unable to set bootable flag on volume.'),
redirect=redirect)
name_or_id = volume["volume"]["name"] or volume["volume"]["id"]
message = _('Updating volume "%s"') % name_or_id
messages.info(request, message)
return True
class UploadToImageForm(forms.SelfHandlingForm):
name = forms.CharField(label=_('Volume Name'),
widget=forms.TextInput(
attrs={'readonly': 'readonly'}))
image_name = forms.CharField(max_length=255, label=_('Image Name'))
disk_format = forms.ChoiceField(label=_('Disk Format'),
widget=forms.ThemableSelectWidget(),
required=False)
force = forms.BooleanField(
label=pgettext_lazy("Force upload volume in in-use status to image",
"Force"),
widget=forms.CheckboxInput(),
required=False)
def __init__(self, request, *args, **kwargs):
super().__init__(request, *args, **kwargs)
# 'vhd','iso','aki','ari' and 'ami' disk formats are supported by
# glance, but not by qemu-img. qemu-img supports 'vpc', 'cloop', 'cow'
# and 'qcow' which are not supported by glance.
# I can only use 'raw', 'vmdk', 'vdi' or 'qcow2' so qemu-img will not
# have issues when processes image request from cinder.
disk_format_choices = [(value, name) for value, name
in glance.get_image_formats(request)
if value in VALID_DISK_FORMATS]
self.fields['disk_format'].choices = disk_format_choices
self.fields['disk_format'].initial = 'raw'
if self.initial['status'] != 'in-use':
self.fields['force'].widget = forms.widgets.HiddenInput()
def handle(self, request, data):
volume_id = self.initial['id']
try:
# 'aki','ari','ami' container formats are supported by glance,
# but they need matching disk format to use.
# Glance usually uses 'bare' for other disk formats except
# amazon's. Please check the comment in CreateImageForm class
cinder.volume_upload_to_image(request,
volume_id,
data['force'],
data['image_name'],
DEFAULT_CONTAINER_FORMAT,
data['disk_format'])
message = _(
'Successfully sent the request to upload volume to image '
'for volume: "%s"') % data['name']
messages.info(request, message)
return True
except Exception:
redirect = reverse("horizon:project:volumes:index")
error_message = _(
'Unable to upload volume to image for volume: "%s"') \
% data['name']
exceptions.handle(request, error_message, redirect=redirect)
class ExtendForm(forms.SelfHandlingForm):
name = forms.CharField(
label=_("Volume Name"),
widget=forms.TextInput(attrs={'readonly': 'readonly'}),
required=False,
)
orig_size = forms.IntegerField(
label=_("Current Size (GiB)"),
widget=forms.TextInput(attrs={'readonly': 'readonly'}),
required=False,
)
new_size = forms.IntegerField(label=_("New Size (GiB)"))
def clean(self):
cleaned_data = super().clean()
new_size = cleaned_data.get('new_size')
orig_size = self.initial['orig_size']
if new_size <= orig_size:
error_msg = _("New size must be greater than current size.")
self._errors['new_size'] = self.error_class([error_msg])
return cleaned_data
usages = quotas.tenant_quota_usages(
self.request, targets=('gigabytes',))
availableGB = usages['gigabytes']['available']
if availableGB < (new_size - orig_size):
message = _('Volume cannot be extended to %(req)iGiB as '
'the maximum size it can be extended to is '
'%(max_size)iGiB.')
params = {'req': new_size, 'max_size': (availableGB + orig_size)}
self._errors["new_size"] = self.error_class([message % params])
return cleaned_data
def handle(self, request, data):
volume_id = self.initial['id']
try:
volume = cinder.volume_extend(request,
volume_id,
data['new_size'])
message = _('Extending volume: "%s"') % data['name']
messages.info(request, message)
return volume
except Exception:
redirect = reverse("horizon:project:volumes:index")
exceptions.handle(request,
_('Unable to extend volume.'),
redirect=redirect)
class RetypeForm(forms.SelfHandlingForm):
name = forms.CharField(label=_('Volume Name'),
widget=forms.TextInput(
attrs={'readonly': 'readonly'}))
volume_type = forms.ThemableChoiceField(label=_('Type'))
MIGRATION_POLICY_CHOICES = [('never', _('Never')),
('on-demand', _('On Demand'))]
migration_policy = forms.ChoiceField(label=_('Migration Policy'),
widget=forms.ThemableSelectWidget(),
choices=MIGRATION_POLICY_CHOICES,
initial='never',
required=False)
def __init__(self, request, *args, **kwargs):
super().__init__(request, *args, **kwargs)
try:
volume_types = cinder.volume_type_list(request)
except Exception:
redirect_url = reverse("horizon:project:volumes:index")
error_message = _('Unable to retrieve the volume type list.')
exceptions.handle(request, error_message, redirect=redirect_url)
origin_type = self.initial['volume_type']
type_list = [(t.name,
_("%s (current)") % t.name
if origin_type == t.name else t.name)
for t in volume_types]
if not type_list:
type_list.insert(0, ("", _("No other volume types available")))
self.fields['volume_type'].choices = sorted(type_list)
def clean_volume_type(self):
volume_type = self.cleaned_data.get("volume_type")
if self.initial['volume_type'] == volume_type:
msg = _('The new type must be different from the '
'current volume type.')
raise forms.ValidationError(msg)
return volume_type
def handle(self, request, data):
volume_id = self.initial['id']
try:
cinder.volume_retype(request,
volume_id,
data['volume_type'],
data['migration_policy'])
message = _(
'Successfully sent the request to change the volume '
'type to "%(vtype)s" for volume: "%(name)s"')
params = {'name': data['name'],
'vtype': data['volume_type']}
messages.info(request, message % params)
return True
except Exception:
redirect = reverse("horizon:project:volumes:index")
error_message = _(
'Unable to change the volume type for volume: "%s"') \
% data['name']
exceptions.handle(request, error_message, redirect=redirect)
|
apache-2.0
|
pharos3d/tundra
|
tools/Converters/tundra2-txml-converter.py
|
6
|
6763
|
import sys
import os
"""
For this script to work you need the jack.mesh from /media/models in /data/assets
You will have to manually put your txml file name down in the __main__ func to:
--> fileName = "putFileNameHere.txml" <--
1. Run this script on your tundra 1.x txml
2. This will create a new file with "_" appended before the .txml extension.
3. Open this file in your tundra 2.x server
4. Find the "Jack" entity, he will be lurking at pos 0,0,0.
5. Shift+E for entity editor, select jack, if you dont see the manip visual aids, hit "tilde" (next to "1" key)
6. Open Placeable component and rotate how you like, the whole scene will rotate with jack. Seems that -90 to x will do the trick.
7. Shitf+S for scene editor, right click -> save scene as...
Problems with this technique
* You cannot remove or edit the grandparent jacks placeable component
scale needs to be 1,1,1 or it will scale the whole scene (but might be handy if you want to do this)
* Jack grandparent needs to be there always or the scene will flip back.
There is also some experimental placeable Transform manipulations that you can do to the txml, but it wont work
propably on any scene as everything needs a common pivot point for the rotation.
So this is a temp hack. Proper way is to export your modelfs from eg. blender with the correct axis flip built in
and import again.
"""
def getFileContent(filePath):
try:
f = open(filePath, 'r')
c = f.read()
f.close()
return c
except IOError as e:
print "IOError on input file:", filePath
print e
return None
def saveNewContent(filePath, newContent):
try:
f = open(filePath, "w+")
f.write(newContent)
f.close()
except IOError as e:
print "IOError on writing to file:", filePath
print e
class Transform:
def __init__(self, value):
splitValue = value.split(",")
self.pos = {}
self.pos["x"] = splitValue[0]
self.pos["y"] = splitValue[1]
self.pos["z"] = splitValue[2]
self.rot = {}
self.rot["x"] = splitValue[3]
self.rot["y"] = splitValue[4]
self.rot["z"] = splitValue[5]
self.scale = {}
self.scale["x"] = splitValue[6]
self.scale["y"] = splitValue[7]
self.scale["z"] = splitValue[8]
def flip(self, vec, first, second):
temp = vec[first]
vec[first] = vec[second]
vec[second] = temp
def rotate(self, vec, axis, deg):
curDeg = float(vec[axis])
curDeg += deg
vec[axis] = str(curDeg)
def getNewValue(self):
line = self.pos["x"] + "," + self.pos["y"] + "," + self.pos["z"]
line += "," + self.rot["x"] + "," + self.rot["y"] + "," + self.rot["z"]
line += "," + self.scale["x"] + "," + self.scale["y"] + "," + self.scale["z"]
return line
if __name__ == "__main__":
try:
import getopt
try:
opts, args = getopt.getopt(sys.argv[1:], "i:o:", ["input=", "output="])
except getopt.GetoptError, err:
# print help information and exit:
print str(err)
print "Usage: tundra2-txml-converter.py [-i|--input=inputTXML] [-o|--output=outputTXML]"
sys.exit(2)
# Set defaults first:
fileName = "putFileNameHere.txml"
newFileName = fileName[:fileName.index(".txml")] + "_.txml"
for o, a in opts:
if o in ("-i", "--input"):
fileName = a
elif o in ("-o", "--output"):
newFileName = a
except ImportError:
fileName = "putFileNameHere.txml"
newFileName = fileName[:fileName.index(".txml")] + "_.txml"
c = getFileContent(fileName)
if c == None: sys.exit(2)
lines = c.splitlines(True)
parentName = "GeneratedGrandParentEntity"
parentEntXml = """ <entity id="1">
<component type="EC_Mesh" sync="1">
<attribute value="0,0,0,0,0,0,1,1,1" name="Transform"/>
<attribute value="local://Jack.mesh" name="Mesh ref"/>
<attribute value="" name="Skeleton ref"/>
<attribute value="" name="Mesh materials"/>
<attribute value="0" name="Draw distance"/>
<attribute value="false" name="Cast shadows"/>
</component>
<component type="EC_Placeable" sync="1">
<attribute value="0,0,-20,0,0,0,1,1,1" name="Transform"/>
<attribute value="false" name="Show bounding box"/>
<attribute value="true" name="Visible"/>
<attribute value="1" name="Selection layer"/>
<attribute value="" name="Parent entity ref"/>
<attribute value="" name="Parent bone name"/>
</component>
<component type="EC_Name" sync="1">
<attribute value="GeneratedGrandParentEntity" name="name"/>
<attribute value="" name="description"/>
</component>
</entity>
"""
out = ""
totalIndex = 0
expectParentAttr = False
for line in lines:
totalIndex += len(line)
if line.count("<scene>") > 0:
out += line
out += parentEntXml
continue
if line.count("component type=\"EC_Placeable\"") > 0:
out += line
compEnd = c.find("</component>", totalIndex)
iPlaceableEnd = c.find("name=\"Parent entity ref\"", totalIndex, compEnd)
# Found existing, update
if iPlaceableEnd > 0:
expectParentAttr = True
# did not find, generate
else:
out += " <attribute value=\"" + parentName + "\" name=\"Parent entity ref\"/>\n"
elif expectParentAttr:
if line.count("name=\"Parent entity ref\"") > 0:
expectParentAttr = False
start = line.find("\"")
end = line.find("\"", start+1)
value = line[start+1:end]
if value == "":
out += " <attribute value=\"" + parentName + "\" name=\"Parent entity ref\"/>\n"
else:
newLine = line[:start+1] + parentName + line[end:]
out += newLine
else:
out += line
else:
out += line
"""
if line.count("name=\"Transform\"") <= 0:
out += line
continue
start = line.find("\"")
if start == -1:
out += line
continue
end = line.find("\"", start+1)
value = line[start+1:end]
t = Transform(value)
t.flip(t.rot, "y", "z")
newValue = t.getNewValue()
out += line.replace(value, newValue)
"""
saveNewContent(newFileName, out)
|
apache-2.0
|
zepheira/amara
|
demo/7days/day1.py
|
1
|
1051
|
import sys, datetime
from amara.writers.struct import *
from amara.namespaces import *
tags = [u"xml", u"python", u"atom"]
w = structwriter(indent=u"yes")
w.feed(
ROOT(
E((ATOM_NAMESPACE, u'feed'), {(XML_NAMESPACE, u'xml:lang'): u'en'},
E(u'id', u'urn:bogus:myfeed'),
E(u'title', u'MyFeed'),
E(u'updated', datetime.datetime.now().isoformat()),
E(u'name',
E(u'title', u'Uche Ogbuji'),
E(u'uri', u'http://uche.ogbuji.net'),
E(u'email', u'uche@ogbuji.net'),
),
E(u'link', {u'href': u'/blog'}),
E(u'link', {u'href': u'/blog/atom1.0', u'rel': u'self'}),
E(u'entry',
E(u'id', u'urn:bogus:myfeed:entry1'),
E(u'title', u'Hello world'),
E(u'updated', datetime.datetime.now().isoformat()),
( E(u'category', {u'term': t}) for t in tags ),
),
E(u'content', {u'type': u'xhtml'},
E((XHTML_NAMESPACE, u'div'),
E(u'p', u'Happy to be here')
)
)
))
)
|
apache-2.0
|
jmmv/markdown2social
|
markdown2social/__init__.py
|
1
|
1225
|
# Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import os
import sys
# Program name to use for log messages.
PROGRAM_NAME = os.path.basename(sys.argv[0])
def _build_logger():
"""Instantiates a global logger for the program.
Returns:
Logger. The logger instance to use for the application.
"""
handler = logging.StreamHandler()
formatter = logging.Formatter(PROGRAM_NAME + ': %(levelname)s: %(message)s')
handler.setFormatter(formatter)
logger = logging.getLogger(PROGRAM_NAME)
logger.addHandler(handler)
logger.setLevel(logging.WARNING)
return logger
# Global logger instance for the application.
LOGGER = _build_logger()
|
apache-2.0
|
manaschaturvedi/oscarbuddy
|
html5lib/treebuilders/_base.py
|
715
|
13699
|
from __future__ import absolute_import, division, unicode_literals
from six import text_type
from ..constants import scopingElements, tableInsertModeElements, namespaces
# The scope markers are inserted when entering object elements,
# marquees, table cells, and table captions, and are used to prevent formatting
# from "leaking" into tables, object elements, and marquees.
Marker = None
listElementsMap = {
None: (frozenset(scopingElements), False),
"button": (frozenset(scopingElements | set([(namespaces["html"], "button")])), False),
"list": (frozenset(scopingElements | set([(namespaces["html"], "ol"),
(namespaces["html"], "ul")])), False),
"table": (frozenset([(namespaces["html"], "html"),
(namespaces["html"], "table")]), False),
"select": (frozenset([(namespaces["html"], "optgroup"),
(namespaces["html"], "option")]), True)
}
class Node(object):
def __init__(self, name):
"""Node representing an item in the tree.
name - The tag name associated with the node
parent - The parent of the current node (or None for the document node)
value - The value of the current node (applies to text nodes and
comments
attributes - a dict holding name, value pairs for attributes of the node
childNodes - a list of child nodes of the current node. This must
include all elements but not necessarily other node types
_flags - A list of miscellaneous flags that can be set on the node
"""
self.name = name
self.parent = None
self.value = None
self.attributes = {}
self.childNodes = []
self._flags = []
def __str__(self):
attributesStr = " ".join(["%s=\"%s\"" % (name, value)
for name, value in
self.attributes.items()])
if attributesStr:
return "<%s %s>" % (self.name, attributesStr)
else:
return "<%s>" % (self.name)
def __repr__(self):
return "<%s>" % (self.name)
def appendChild(self, node):
"""Insert node as a child of the current node
"""
raise NotImplementedError
def insertText(self, data, insertBefore=None):
"""Insert data as text in the current node, positioned before the
start of node insertBefore or to the end of the node's text.
"""
raise NotImplementedError
def insertBefore(self, node, refNode):
"""Insert node as a child of the current node, before refNode in the
list of child nodes. Raises ValueError if refNode is not a child of
the current node"""
raise NotImplementedError
def removeChild(self, node):
"""Remove node from the children of the current node
"""
raise NotImplementedError
def reparentChildren(self, newParent):
"""Move all the children of the current node to newParent.
This is needed so that trees that don't store text as nodes move the
text in the correct way
"""
# XXX - should this method be made more general?
for child in self.childNodes:
newParent.appendChild(child)
self.childNodes = []
def cloneNode(self):
"""Return a shallow copy of the current node i.e. a node with the same
name and attributes but with no parent or child nodes
"""
raise NotImplementedError
def hasContent(self):
"""Return true if the node has children or text, false otherwise
"""
raise NotImplementedError
class ActiveFormattingElements(list):
def append(self, node):
equalCount = 0
if node != Marker:
for element in self[::-1]:
if element == Marker:
break
if self.nodesEqual(element, node):
equalCount += 1
if equalCount == 3:
self.remove(element)
break
list.append(self, node)
def nodesEqual(self, node1, node2):
if not node1.nameTuple == node2.nameTuple:
return False
if not node1.attributes == node2.attributes:
return False
return True
class TreeBuilder(object):
"""Base treebuilder implementation
documentClass - the class to use for the bottommost node of a document
elementClass - the class to use for HTML Elements
commentClass - the class to use for comments
doctypeClass - the class to use for doctypes
"""
# Document class
documentClass = None
# The class to use for creating a node
elementClass = None
# The class to use for creating comments
commentClass = None
# The class to use for creating doctypes
doctypeClass = None
# Fragment class
fragmentClass = None
def __init__(self, namespaceHTMLElements):
if namespaceHTMLElements:
self.defaultNamespace = "http://www.w3.org/1999/xhtml"
else:
self.defaultNamespace = None
self.reset()
def reset(self):
self.openElements = []
self.activeFormattingElements = ActiveFormattingElements()
# XXX - rename these to headElement, formElement
self.headPointer = None
self.formPointer = None
self.insertFromTable = False
self.document = self.documentClass()
def elementInScope(self, target, variant=None):
# If we pass a node in we match that. if we pass a string
# match any node with that name
exactNode = hasattr(target, "nameTuple")
listElements, invert = listElementsMap[variant]
for node in reversed(self.openElements):
if (node.name == target and not exactNode or
node == target and exactNode):
return True
elif (invert ^ (node.nameTuple in listElements)):
return False
assert False # We should never reach this point
def reconstructActiveFormattingElements(self):
# Within this algorithm the order of steps described in the
# specification is not quite the same as the order of steps in the
# code. It should still do the same though.
# Step 1: stop the algorithm when there's nothing to do.
if not self.activeFormattingElements:
return
# Step 2 and step 3: we start with the last element. So i is -1.
i = len(self.activeFormattingElements) - 1
entry = self.activeFormattingElements[i]
if entry == Marker or entry in self.openElements:
return
# Step 6
while entry != Marker and entry not in self.openElements:
if i == 0:
# This will be reset to 0 below
i = -1
break
i -= 1
# Step 5: let entry be one earlier in the list.
entry = self.activeFormattingElements[i]
while True:
# Step 7
i += 1
# Step 8
entry = self.activeFormattingElements[i]
clone = entry.cloneNode() # Mainly to get a new copy of the attributes
# Step 9
element = self.insertElement({"type": "StartTag",
"name": clone.name,
"namespace": clone.namespace,
"data": clone.attributes})
# Step 10
self.activeFormattingElements[i] = element
# Step 11
if element == self.activeFormattingElements[-1]:
break
def clearActiveFormattingElements(self):
entry = self.activeFormattingElements.pop()
while self.activeFormattingElements and entry != Marker:
entry = self.activeFormattingElements.pop()
def elementInActiveFormattingElements(self, name):
"""Check if an element exists between the end of the active
formatting elements and the last marker. If it does, return it, else
return false"""
for item in self.activeFormattingElements[::-1]:
# Check for Marker first because if it's a Marker it doesn't have a
# name attribute.
if item == Marker:
break
elif item.name == name:
return item
return False
def insertRoot(self, token):
element = self.createElement(token)
self.openElements.append(element)
self.document.appendChild(element)
def insertDoctype(self, token):
name = token["name"]
publicId = token["publicId"]
systemId = token["systemId"]
doctype = self.doctypeClass(name, publicId, systemId)
self.document.appendChild(doctype)
def insertComment(self, token, parent=None):
if parent is None:
parent = self.openElements[-1]
parent.appendChild(self.commentClass(token["data"]))
def createElement(self, token):
"""Create an element but don't insert it anywhere"""
name = token["name"]
namespace = token.get("namespace", self.defaultNamespace)
element = self.elementClass(name, namespace)
element.attributes = token["data"]
return element
def _getInsertFromTable(self):
return self._insertFromTable
def _setInsertFromTable(self, value):
"""Switch the function used to insert an element from the
normal one to the misnested table one and back again"""
self._insertFromTable = value
if value:
self.insertElement = self.insertElementTable
else:
self.insertElement = self.insertElementNormal
insertFromTable = property(_getInsertFromTable, _setInsertFromTable)
def insertElementNormal(self, token):
name = token["name"]
assert isinstance(name, text_type), "Element %s not unicode" % name
namespace = token.get("namespace", self.defaultNamespace)
element = self.elementClass(name, namespace)
element.attributes = token["data"]
self.openElements[-1].appendChild(element)
self.openElements.append(element)
return element
def insertElementTable(self, token):
"""Create an element and insert it into the tree"""
element = self.createElement(token)
if self.openElements[-1].name not in tableInsertModeElements:
return self.insertElementNormal(token)
else:
# We should be in the InTable mode. This means we want to do
# special magic element rearranging
parent, insertBefore = self.getTableMisnestedNodePosition()
if insertBefore is None:
parent.appendChild(element)
else:
parent.insertBefore(element, insertBefore)
self.openElements.append(element)
return element
def insertText(self, data, parent=None):
"""Insert text data."""
if parent is None:
parent = self.openElements[-1]
if (not self.insertFromTable or (self.insertFromTable and
self.openElements[-1].name
not in tableInsertModeElements)):
parent.insertText(data)
else:
# We should be in the InTable mode. This means we want to do
# special magic element rearranging
parent, insertBefore = self.getTableMisnestedNodePosition()
parent.insertText(data, insertBefore)
def getTableMisnestedNodePosition(self):
"""Get the foster parent element, and sibling to insert before
(or None) when inserting a misnested table node"""
# The foster parent element is the one which comes before the most
# recently opened table element
# XXX - this is really inelegant
lastTable = None
fosterParent = None
insertBefore = None
for elm in self.openElements[::-1]:
if elm.name == "table":
lastTable = elm
break
if lastTable:
# XXX - we should really check that this parent is actually a
# node here
if lastTable.parent:
fosterParent = lastTable.parent
insertBefore = lastTable
else:
fosterParent = self.openElements[
self.openElements.index(lastTable) - 1]
else:
fosterParent = self.openElements[0]
return fosterParent, insertBefore
def generateImpliedEndTags(self, exclude=None):
name = self.openElements[-1].name
# XXX td, th and tr are not actually needed
if (name in frozenset(("dd", "dt", "li", "option", "optgroup", "p", "rp", "rt"))
and name != exclude):
self.openElements.pop()
# XXX This is not entirely what the specification says. We should
# investigate it more closely.
self.generateImpliedEndTags(exclude)
def getDocument(self):
"Return the final tree"
return self.document
def getFragment(self):
"Return the final fragment"
# assert self.innerHTML
fragment = self.fragmentClass()
self.openElements[0].reparentChildren(fragment)
return fragment
def testSerializer(self, node):
"""Serialize the subtree of node in the format required by unit tests
node - the node from which to start serializing"""
raise NotImplementedError
|
mit
|
seeminglee/pyglet64
|
contrib/toys/thrust.py
|
29
|
5162
|
'''
Code by Richard Jones, released into the public domain.
Beginnings of something like http://en.wikipedia.org/wiki/Thrust_(video_game)
'''
import sys
import math
import euclid
import primitives
import pyglet
from pyglet.window import key
from pyglet.gl import *
window = pyglet.window.Window(fullscreen='-fs' in sys.argv)
GRAVITY = -200
class Game(object):
def __init__(self):
self.batch = pyglet.graphics.Batch()
self.ship = Ship(window.width//2, window.height//2, self.batch)
self.debug_text = pyglet.text.Label('debug text', x=10, y=window.height-40, batch=self.batch)
def on_draw(self):
window.clear()
self.batch.draw()
def update(self, dt):
self.ship.update(dt)
class Ship(object):
def __init__(self, x, y, batch):
self.position = euclid.Point2(x, y)
self.velocity = euclid.Point2(0, 0)
self.angle = math.pi/2
self.batch = batch
self.lines = batch.add(6, GL_LINES, primitives.SmoothLineGroup(),
('v2f', (0, 0) * 6),
('c4B', (255, 255, 255, 255) * 6))
self.ball_position = euclid.Point2(window.width/2, window.height/4)
self.ball_velocity = euclid.Point2(0, 0)
self.ball_lines = primitives.add_circle(batch, 0, 0, 20, (255, 255, 255, 255), 20)
self._ball_verts = list(self.ball_lines.vertices)
self._update_ball_verts()
self.join_active = False
self.join_line = None
self.joined = False
def update(self, dt):
self.angle += (keyboard[key.LEFT] - keyboard[key.RIGHT]) * math.pi * dt
r = euclid.Matrix3.new_rotate(self.angle)
if keyboard[key.UP]:
thrust = r * euclid.Vector2(600, 0)
else:
thrust = euclid.Vector2(0, 0)
# attempt join on spacebar press
s_b = self.position - self.ball_position
if keyboard[key.SPACE] and abs(s_b) < 100:
self.join_active = True
if not self.joined:
# simulation is just the ship
# apply thrust to the ship directly
thrust.y += GRAVITY
# now figure my new velocity
self.velocity += thrust * dt
# calculate new line endpoints
self.position += self.velocity * dt
else:
# simulation is of a rod with ship and one end and ball at other
n_v = s_b.normalized()
n_t = thrust.normalized()
# figure the linear acceleration, velocity & move
d = abs(n_v.dot(n_t))
lin = thrust * d
lin.y += GRAVITY
self.velocity += lin * dt
self.cog += self.velocity * dt
# now the angular acceleration
r90 = euclid.Matrix3.new_rotate(math.pi/2)
r_n_t = r90 * n_t
rd = n_v.dot(r_n_t)
self.ang_velocity -= abs(abs(thrust)) * rd * 0.0001
self.join_angle += self.ang_velocity * dt
# vector from center of gravity our to either end
ar = euclid.Matrix3.new_rotate(self.join_angle)
a_r = ar * euclid.Vector2(self.join_length/2, 0)
# set the ship & ball positions
self.position = self.cog + a_r
self.ball_position = self.cog - a_r
self._update_ball_verts()
if self.join_active:
if abs(s_b) >= 100 and not self.joined:
self.joined = True
h_s_b = s_b / 2
self.cog = self.position - h_s_b
self.join_angle = math.atan2(s_b.y, s_b.x)
self.join_length = abs(s_b)
# mass just doubled, so slow linear velocity down
self.velocity /= 2
# XXX and generate some initial angular velocity based on
# XXX ship current velocity
self.ang_velocity = 0
# render the join line
l = [
self.position.x, self.position.y,
self.ball_position.x, self.ball_position.y
]
if self.join_line:
self.join_line.vertices[:] = l
else:
self.join_line = self.batch.add(2, GL_LINES, primitives.SmoothLineGroup(),
('v2f', l), ('c4B', (255, 255, 255, 255) * 2))
# update the ship verts
bl = r * euclid.Point2(-25, 25)
t = r * euclid.Point2(25, 0)
br = r * euclid.Point2(-25, -25)
x, y = self.position
self.lines.vertices[:] = [
x+bl.x, y+bl.y, x+t.x, y+t.y,
x+t.x, y+t.y, x+br.x, y+br.y,
x+br.x, y+br.y, x+bl.x, y+bl.y,
]
def _update_ball_verts(self):
# update the ball for its position
l = []
x, y = self.ball_position
for i, v in enumerate(self._ball_verts):
if i % 2:
l.append(int(v + y))
else:
l.append(int(v + x))
self.ball_lines.vertices[:] = l
g = Game()
window.push_handlers(g)
pyglet.clock.schedule(g.update)
keyboard = key.KeyStateHandler()
window.push_handlers(keyboard)
pyglet.app.run()
|
bsd-3-clause
|
sssemil/cjdns
|
node_build/dependencies/libuv/build/gyp/test/msvs/missing_sources/gyptest-missing.py
|
315
|
1413
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that missing 'sources' files are treated as fatal errors when the
the generator flag 'msvs_error_on_missing_sources' is set.
"""
import TestGyp
import os
import sys
if sys.platform == 'win32':
test = TestGyp.TestGyp(formats=['msvs', 'ninja'], workdir='workarea_all')
# With the flag not set
test.run_gyp('hello_missing.gyp')
# With the flag explicitly set to 0
try:
os.environ['GYP_GENERATOR_FLAGS'] = 'msvs_error_on_missing_sources=0'
test.run_gyp('hello_missing.gyp')
finally:
del os.environ['GYP_GENERATOR_FLAGS']
# With the flag explicitly set to 1
try:
os.environ['GYP_GENERATOR_FLAGS'] = 'msvs_error_on_missing_sources=1'
# Test to make sure GYP raises an exception (exit status 1). Since this will
# also print a backtrace, ensure that TestGyp is not checking that stderr is
# empty by specifying None, which means do not perform any checking.
# Instead, stderr is checked below to ensure it contains the expected
# output.
test.run_gyp('hello_missing.gyp', status=1, stderr=None)
finally:
del os.environ['GYP_GENERATOR_FLAGS']
test.must_contain_any_line(test.stderr(),
["Missing input files:"])
test.pass_test()
|
gpl-3.0
|
allenlavoie/tensorflow
|
tensorflow/python/data/kernel_tests/sequence_dataset_op_test.py
|
18
|
7947
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the experimental input pipeline ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class SequenceDatasetTest(test.TestCase):
def testRepeatTensorDataset(self):
"""Test a dataset that repeats its input multiple times."""
components = (np.array(1), np.array([1, 2, 3]), np.array(37.0))
# This placeholder can be fed when dataset-definition subgraph
# runs (i.e. `init_op` below) to configure the number of
# repetitions used in a particular iterator.
count_placeholder = array_ops.placeholder(dtypes.int64, shape=[])
iterator = (dataset_ops.Dataset.from_tensors(components)
.repeat(count_placeholder).make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
self.assertEqual([c.shape for c in components],
[t.shape for t in get_next])
with self.test_session() as sess:
# Test a finite repetition.
sess.run(init_op, feed_dict={count_placeholder: 3})
for _ in range(3):
results = sess.run(get_next)
for component, result_component in zip(components, results):
self.assertAllEqual(component, result_component)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Test a different finite repetition.
sess.run(init_op, feed_dict={count_placeholder: 7})
for _ in range(7):
results = sess.run(get_next)
for component, result_component in zip(components, results):
self.assertAllEqual(component, result_component)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Test an empty repetition.
sess.run(init_op, feed_dict={count_placeholder: 0})
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Test an infinite repetition.
# NOTE(mrry): There's not a good way to test that the sequence
# actually is infinite.
sess.run(init_op, feed_dict={count_placeholder: -1})
for _ in range(17):
results = sess.run(get_next)
for component, result_component in zip(components, results):
self.assertAllEqual(component, result_component)
def testTakeTensorDataset(self):
components = (np.arange(10),)
count_placeholder = array_ops.placeholder(dtypes.int64, shape=[])
iterator = (dataset_ops.Dataset.from_tensor_slices(components)
.take(count_placeholder).make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
self.assertEqual([c.shape[1:] for c in components],
[t.shape for t in get_next])
with self.test_session() as sess:
# Take fewer than input size
sess.run(init_op, feed_dict={count_placeholder: 4})
for i in range(4):
results = sess.run(get_next)
self.assertAllEqual(results, components[0][i:i+1])
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Take more than input size
sess.run(init_op, feed_dict={count_placeholder: 25})
for i in range(10):
results = sess.run(get_next)
self.assertAllEqual(results, components[0][i:i+1])
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Take all of input
sess.run(init_op, feed_dict={count_placeholder: -1})
for i in range(10):
results = sess.run(get_next)
self.assertAllEqual(results, components[0][i:i+1])
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Take nothing
sess.run(init_op, feed_dict={count_placeholder: 0})
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testSkipTensorDataset(self):
components = (np.arange(10),)
count_placeholder = array_ops.placeholder(dtypes.int64, shape=[])
iterator = (dataset_ops.Dataset.from_tensor_slices(components)
.skip(count_placeholder).make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
self.assertEqual([c.shape[1:] for c in components],
[t.shape for t in get_next])
with self.test_session() as sess:
# Skip fewer than input size, we should skip
# the first 4 elements and then read the rest.
sess.run(init_op, feed_dict={count_placeholder: 4})
for i in range(4, 10):
results = sess.run(get_next)
self.assertAllEqual(results, components[0][i:i+1])
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Skip more than input size: get nothing.
sess.run(init_op, feed_dict={count_placeholder: 25})
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Skip exactly input size.
sess.run(init_op, feed_dict={count_placeholder: 10})
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Set -1 for 'count': skip the entire dataset.
sess.run(init_op, feed_dict={count_placeholder: -1})
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Skip nothing
sess.run(init_op, feed_dict={count_placeholder: 0})
for i in range(0, 10):
results = sess.run(get_next)
self.assertAllEqual(results, components[0][i:i+1])
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testRepeatRepeatTensorDataset(self):
"""Test the composition of repeat datasets."""
components = (np.array(1), np.array([1, 2, 3]), np.array(37.0))
inner_count = array_ops.placeholder(dtypes.int64, shape=[])
outer_count = array_ops.placeholder(dtypes.int64, shape=[])
iterator = (dataset_ops.Dataset.from_tensors(components).repeat(inner_count)
.repeat(outer_count).make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
self.assertEqual([c.shape for c in components],
[t.shape for t in get_next])
with self.test_session() as sess:
sess.run(init_op, feed_dict={inner_count: 7, outer_count: 14})
for _ in range(7 * 14):
results = sess.run(get_next)
for component, result_component in zip(components, results):
self.assertAllEqual(component, result_component)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testRepeatEmptyDataset(self):
"""Test that repeating an empty dataset does not hang."""
iterator = (dataset_ops.Dataset.from_tensors(0).repeat(10).skip(10)
.repeat(-1).make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
if __name__ == "__main__":
test.main()
|
apache-2.0
|
nhicher/ansible
|
lib/ansible/modules/storage/netapp/_na_cdot_license.py
|
8
|
9278
|
#!/usr/bin/python
# (c) 2017, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['deprecated'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: na_cdot_license
short_description: Manage NetApp cDOT protocol and feature licenses
extends_documentation_fragment:
- netapp.ontap
version_added: '2.3'
author: Sumit Kumar (sumit4@netapp.com)
deprecated:
removed_in: '2.11'
why: Updated modules released with increased functionality
alternative: Use M(na_ontap_license) instead.
description:
- Add or remove licenses on NetApp ONTAP.
options:
remove_unused:
description:
- Remove licenses that have no controller affiliation in the cluster.
type: bool
remove_expired:
description:
- Remove licenses that have expired in the cluster.
type: bool
serial_number:
description:
- Serial number of the node associated with the license.
- This parameter is used primarily when removing license for a specific service.
- If this parameter is not provided, the cluster serial number is used by default.
licenses:
description:
- List of licenses to add or remove.
- Please note that trying to remove a non-existent license will throw an error.
suboptions:
base:
description:
- Cluster Base License
nfs:
description:
- NFS License
cifs:
description:
- CIFS License
iscsi:
description:
- iSCSI License
fcp:
description:
- FCP License
cdmi:
description:
- CDMI License
snaprestore:
description:
- SnapRestore License
snapmirror:
description:
- SnapMirror License
flexclone:
description:
- FlexClone License
snapvault:
description:
- SnapVault License
snaplock:
description:
- SnapLock License
snapmanagersuite:
description:
- SnapManagerSuite License
snapprotectapps:
description:
- SnapProtectApp License
v_storageattach:
description:
- Virtual Attached Storage License
'''
EXAMPLES = """
- name: Add licenses
na_cdot_license:
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
serial_number: #################
licenses:
nfs: #################
cifs: #################
iscsi: #################
fcp: #################
snaprestore: #################
flexclone: #################
- name: Remove licenses
na_cdot_license:
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
remove_unused: false
remove_expired: true
serial_number: #################
licenses:
nfs: remove
"""
RETURN = """
"""
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import ansible.module_utils.netapp as netapp_utils
HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
class NetAppCDOTLicense(object):
def __init__(self):
self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
self.argument_spec.update(dict(
serial_number=dict(required=False, type='str', default=None),
remove_unused=dict(default=False, type='bool'),
remove_expired=dict(default=False, type='bool'),
licenses=dict(default=False, type='dict'),
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
supports_check_mode=False
)
p = self.module.params
# set up state variables
self.serial_number = p['serial_number']
self.remove_unused = p['remove_unused']
self.remove_expired = p['remove_expired']
self.licenses = p['licenses']
if HAS_NETAPP_LIB is False:
self.module.fail_json(msg="the python NetApp-Lib module is required")
else:
self.server = netapp_utils.setup_ontap_zapi(module=self.module)
def get_licensing_status(self):
"""
Check licensing status
:return: package (key) and licensing status (value)
:rtype: dict
"""
license_status = netapp_utils.zapi.NaElement('license-v2-status-list-info')
result = None
try:
result = self.server.invoke_successfully(license_status,
enable_tunneling=False)
except netapp_utils.zapi.NaApiError as e:
self.module.fail_json(msg="Error checking license status: %s" %
to_native(e), exception=traceback.format_exc())
return_dictionary = {}
license_v2_status = result.get_child_by_name('license-v2-status')
if license_v2_status:
for license_v2_status_info in license_v2_status.get_children():
package = license_v2_status_info.get_child_content('package')
status = license_v2_status_info.get_child_content('method')
return_dictionary[package] = status
return return_dictionary
def remove_licenses(self, remove_list):
"""
Remove requested licenses
:param:
remove_list : List of packages to remove
"""
license_delete = netapp_utils.zapi.NaElement('license-v2-delete')
for package in remove_list:
license_delete.add_new_child('package', package)
if self.serial_number is not None:
license_delete.add_new_child('serial-number', self.serial_number)
try:
self.server.invoke_successfully(license_delete,
enable_tunneling=False)
except netapp_utils.zapi.NaApiError as e:
self.module.fail_json(msg="Error removing license %s" %
to_native(e), exception=traceback.format_exc())
def remove_unused_licenses(self):
"""
Remove unused licenses
"""
remove_unused = netapp_utils.zapi.NaElement('license-v2-delete-unused')
try:
self.server.invoke_successfully(remove_unused,
enable_tunneling=False)
except netapp_utils.zapi.NaApiError as e:
self.module.fail_json(msg="Error removing unused licenses: %s" %
to_native(e), exception=traceback.format_exc())
def remove_expired_licenses(self):
"""
Remove expired licenses
"""
remove_expired = netapp_utils.zapi.NaElement('license-v2-delete-expired')
try:
self.server.invoke_successfully(remove_expired,
enable_tunneling=False)
except netapp_utils.zapi.NaApiError as e:
self.module.fail_json(msg="Error removing expired licenses: %s" %
to_native(e), exception=traceback.format_exc())
def update_licenses(self):
"""
Update licenses
"""
# Remove unused and expired licenses, if requested.
if self.remove_unused:
self.remove_unused_licenses()
if self.remove_expired:
self.remove_expired_licenses()
# Next, add/remove specific requested licenses.
license_add = netapp_utils.zapi.NaElement('license-v2-add')
codes = netapp_utils.zapi.NaElement('codes')
remove_list = []
for key, value in self.licenses.items():
str_value = str(value)
# Make sure license is not an empty string.
if str_value and str_value.strip():
if str_value.lower() == 'remove':
remove_list.append(str(key).lower())
else:
codes.add_new_child('license-code-v2', str_value)
# Remove requested licenses.
if len(remove_list) != 0:
self.remove_licenses(remove_list)
# Add requested licenses
if len(codes.get_children()) != 0:
license_add.add_child_elem(codes)
try:
self.server.invoke_successfully(license_add,
enable_tunneling=False)
except netapp_utils.zapi.NaApiError as e:
self.module.fail_json(msg="Error adding licenses: %s" %
to_native(e), exception=traceback.format_exc())
def apply(self):
changed = False
# Add / Update licenses.
license_status = self.get_licensing_status()
self.update_licenses()
new_license_status = self.get_licensing_status()
if license_status != new_license_status:
changed = True
self.module.exit_json(changed=changed)
def main():
v = NetAppCDOTLicense()
v.apply()
if __name__ == '__main__':
main()
|
gpl-3.0
|
reviewboard/reviewboard
|
reviewboard/scmtools/tests/test_cvs.py
|
2
|
34597
|
# coding=utf-8
from __future__ import unicode_literals
import os
import nose
from django.core.exceptions import ValidationError
from djblets.testing.decorators import add_fixtures
from reviewboard.diffviewer.parser import DiffParserError
from reviewboard.scmtools.core import PRE_CREATION, Revision
from reviewboard.scmtools.cvs import CVSTool
from reviewboard.scmtools.errors import SCMError, FileNotFoundError
from reviewboard.scmtools.models import Repository, Tool
from reviewboard.scmtools.tests.testcases import SCMTestCase
from reviewboard.testing.testcase import TestCase
class CVSTests(SCMTestCase):
"""Unit tests for CVS."""
fixtures = ['test_scmtools']
def setUp(self):
super(CVSTests, self).setUp()
self.cvs_repo_path = os.path.join(os.path.dirname(__file__),
'..', 'testdata', 'cvs_repo')
self.cvs_ssh_path = (':ext:localhost:%s'
% self.cvs_repo_path.replace('\\', '/'))
self.repository = Repository(name='CVS',
path=self.cvs_repo_path,
tool=Tool.objects.get(name='CVS'))
try:
self.tool = self.repository.get_scmtool()
except ImportError:
raise nose.SkipTest('cvs binary not found')
def test_build_cvsroot_with_port(self):
"""Testing CVSTool.build_cvsroot with a port"""
self._test_build_cvsroot(
repo_path='example.com:123/cvsroot/test',
username='anonymous',
expected_cvsroot=':pserver:anonymous@example.com:123/cvsroot/test',
expected_path='/cvsroot/test')
def test_path_without_port(self):
"""Testing CVSTool.build_cvsroot without a port"""
self._test_build_cvsroot(
repo_path='example.com:/cvsroot/test',
username='anonymous',
expected_cvsroot=':pserver:anonymous@example.com:/cvsroot/test',
expected_path='/cvsroot/test')
def test_path_with_pserver_and_no_user_or_password(self):
"""Testing CVSTool.build_cvsroot with :pserver: and no user or
password
"""
self._test_build_cvsroot(
repo_path=':pserver:example.com:/cvsroot/test',
expected_cvsroot=':pserver:example.com:/cvsroot/test',
expected_path='/cvsroot/test')
def test_path_with_pserver_and_inline_user(self):
"""Testing CVSTool.build_cvsroot with :pserver: and inline user"""
self._test_build_cvsroot(
repo_path=':pserver:anonymous@example.com:/cvsroot/test',
expected_cvsroot=':pserver:anonymous@example.com:/cvsroot/test',
expected_path='/cvsroot/test')
def test_path_with_pserver_and_inline_user_and_password(self):
"""Testing CVSTool.build_cvsroot with :pserver: and inline user and
password
"""
self._test_build_cvsroot(
repo_path=':pserver:anonymous:pass@example.com:/cvsroot/test',
expected_cvsroot=':pserver:anonymous:pass@example.com:'
'/cvsroot/test',
expected_path='/cvsroot/test')
def test_path_with_pserver_and_form_user(self):
"""Testing CVSTool.build_cvsroot with :pserver: and form-provided
user
"""
self._test_build_cvsroot(
repo_path=':pserver:example.com:/cvsroot/test',
username='anonymous',
expected_cvsroot=':pserver:anonymous@example.com:/cvsroot/test',
expected_path='/cvsroot/test')
def test_path_with_pserver_and_form_user_and_password(self):
"""Testing CVSTool.build_cvsroot with :pserver: and form-provided
user and password
"""
self._test_build_cvsroot(
repo_path=':pserver:example.com:/cvsroot/test',
username='anonymous',
password='pass',
expected_cvsroot=':pserver:anonymous:pass@example.com:'
'/cvsroot/test',
expected_path='/cvsroot/test')
def test_path_with_pserver_and_inline_takes_precedence(self):
"""Testing CVSTool.build_cvsroot with :pserver: and inline
user/password taking precedence
"""
self._test_build_cvsroot(
repo_path=':pserver:anonymous:pass@example.com:/cvsroot/test',
username='grumpy',
password='grr',
expected_cvsroot=':pserver:anonymous:pass@example.com:'
'/cvsroot/test',
expected_path='/cvsroot/test')
def test_path_with_gserver(self):
"""Testing CVSTool.build_cvsroot with :gserver:"""
self._test_build_cvsroot(
repo_path=':gserver:localhost:/cvsroot/test',
expected_cvsroot=':gserver:localhost:/cvsroot/test',
expected_path='/cvsroot/test')
def test_path_with_gserver_with_username(self):
"""Testing CVSTool.build_cvsroot with :gserver: with username"""
self._test_build_cvsroot(
repo_path=':gserver:user@localhost:/cvsroot/test',
expected_cvsroot=':gserver:user@localhost:/cvsroot/test',
expected_path='/cvsroot/test')
self._test_build_cvsroot(
repo_path=':gserver:localhost:/cvsroot/test',
username='user',
expected_cvsroot=':gserver:user@localhost:/cvsroot/test',
expected_path='/cvsroot/test')
def test_path_with_gserver_with_port(self):
"""Testing CVSTool.build_cvsroot with :gserver: with port"""
self._test_build_cvsroot(
repo_path=':gserver:localhost:123/cvsroot/test',
expected_cvsroot=':gserver:localhost:123/cvsroot/test',
expected_path='/cvsroot/test')
def test_path_with_gserver_validates_password(self):
"""Testing CVSTool.build_cvsroot with :gserver: validates password"""
self._test_build_cvsroot(
repo_path=':gserver:user:pass@localhost:/cvsroot/test',
expected_error='"gserver" CVSROOTs do not support passwords.',
expected_cvsroot=':gserver:user@localhost:/cvsroot/test',
expected_path='/cvsroot/test')
def test_path_with_kserver(self):
"""Testing CVSTool.build_cvsroot with :kserver:"""
self._test_build_cvsroot(
repo_path=':kserver:localhost:/cvsroot/test',
expected_cvsroot=':kserver:localhost:/cvsroot/test',
expected_path='/cvsroot/test')
def test_path_with_kserver_with_username(self):
"""Testing CVSTool.build_cvsroot with :kserver: with username"""
self._test_build_cvsroot(
repo_path=':kserver:user@localhost:/cvsroot/test',
expected_cvsroot=':kserver:user@localhost:/cvsroot/test',
expected_path='/cvsroot/test')
self._test_build_cvsroot(
repo_path=':kserver:localhost:/cvsroot/test',
username='user',
expected_cvsroot=':kserver:user@localhost:/cvsroot/test',
expected_path='/cvsroot/test')
def test_path_with_kserver_with_port(self):
"""Testing CVSTool.build_cvsroot with :kserver: with port"""
self._test_build_cvsroot(
repo_path=':kserver:localhost:123/cvsroot/test',
expected_cvsroot=':kserver:localhost:123/cvsroot/test',
expected_path='/cvsroot/test')
def test_path_with_kserver_validates_password(self):
"""Testing CVSTool.build_cvsroot with :kserver: validates password"""
self._test_build_cvsroot(
repo_path=':kserver:user:pass@localhost:/cvsroot/test',
expected_error='"kserver" CVSROOTs do not support passwords.',
expected_cvsroot=':kserver:user@localhost:/cvsroot/test',
expected_path='/cvsroot/test')
def test_path_with_ext(self):
"""Testing CVSTool.build_cvsroot with :ext:"""
self._test_build_cvsroot(
repo_path=':ext:localhost:/cvsroot/test',
expected_cvsroot=':ext:localhost:/cvsroot/test',
expected_path='/cvsroot/test')
def test_path_with_ext_validates_password(self):
"""Testing CVSTool.build_cvsroot with :ext: validates password"""
self._test_build_cvsroot(
repo_path=':ext:user:pass@localhost:/cvsroot/test',
expected_error='"ext" CVSROOTs do not support passwords.',
expected_cvsroot=':ext:user@localhost:/cvsroot/test',
expected_path='/cvsroot/test')
def test_path_with_ext_validates_port(self):
"""Testing CVSTool.build_cvsroot with :ext: validates port"""
self._test_build_cvsroot(
repo_path=':ext:localhost:123/cvsroot/test',
expected_error='"ext" CVSROOTs do not support specifying ports.',
expected_cvsroot=':ext:localhost:/cvsroot/test',
expected_path='/cvsroot/test')
def test_path_with_server(self):
"""Testing CVSTool.build_cvsroot with :server:"""
self._test_build_cvsroot(
repo_path=':server:localhost:/cvsroot/test',
expected_cvsroot=':server:localhost:/cvsroot/test',
expected_path='/cvsroot/test')
def test_path_with_server_validates_password(self):
"""Testing CVSTool.build_cvsroot with :server: validates password"""
self._test_build_cvsroot(
repo_path=':server:user:pass@localhost:/cvsroot/test',
expected_error='"server" CVSROOTs do not support passwords.',
expected_cvsroot=':server:user@localhost:/cvsroot/test',
expected_path='/cvsroot/test')
def test_path_with_server_validates_port(self):
"""Testing CVSTool.build_cvsroot with :server: validates port"""
self._test_build_cvsroot(
repo_path=':server:localhost:123/cvsroot/test',
expected_error='"server" CVSROOTs do not support specifying '
'ports.',
expected_cvsroot=':server:localhost:/cvsroot/test',
expected_path='/cvsroot/test')
def test_path_with_ssh(self):
"""Testing CVSTool.build_cvsroot with :ssh:"""
self._test_build_cvsroot(
repo_path=':ssh:localhost:/cvsroot/test',
expected_cvsroot=':ssh:localhost:/cvsroot/test',
expected_path='/cvsroot/test')
def test_path_with_ssh_validates_password(self):
"""Testing CVSTool.build_cvsroot with :ssh: validates password"""
self._test_build_cvsroot(
repo_path=':ssh:user:pass@localhost:/cvsroot/test',
expected_error='"ssh" CVSROOTs do not support passwords.',
expected_cvsroot=':ssh:user@localhost:/cvsroot/test',
expected_path='/cvsroot/test')
def test_path_with_ssh_validates_port(self):
"""Testing CVSTool.build_cvsroot with :ssh: validates port"""
self._test_build_cvsroot(
repo_path=':ssh:localhost:123/cvsroot/test',
expected_error='"ssh" CVSROOTs do not support specifying '
'ports.',
expected_cvsroot=':ssh:localhost:/cvsroot/test',
expected_path='/cvsroot/test')
def test_path_with_extssh(self):
"""Testing CVSTool.build_cvsroot with :extssh:"""
self._test_build_cvsroot(
repo_path=':extssh:localhost:/cvsroot/test',
expected_cvsroot=':extssh:localhost:/cvsroot/test',
expected_path='/cvsroot/test')
def test_path_with_extssh_validates_password(self):
"""Testing CVSTool.build_cvsroot with :extssh: validates password"""
self._test_build_cvsroot(
repo_path=':extssh:user:pass@localhost:/cvsroot/test',
expected_error='"extssh" CVSROOTs do not support passwords.',
expected_cvsroot=':extssh:user@localhost:/cvsroot/test',
expected_path='/cvsroot/test')
def test_path_with_extssh_validates_port(self):
"""Testing CVSTool.build_cvsroot with :extssh: validates port"""
self._test_build_cvsroot(
repo_path=':extssh:localhost:123/cvsroot/test',
expected_error='"extssh" CVSROOTs do not support specifying '
'ports.',
expected_cvsroot=':extssh:localhost:/cvsroot/test',
expected_path='/cvsroot/test')
def test_path_with_fork(self):
"""Testing CVSTool.build_cvsroot with :fork:"""
self._test_build_cvsroot(
repo_path=':fork:/home/myuser/cvsroot',
expected_cvsroot=':fork:/home/myuser/cvsroot',
expected_path='/home/myuser/cvsroot')
def test_path_with_fork_validates_username(self):
"""Testing CVSTool.build_cvsroot with :fork: validates usernames"""
self._test_build_cvsroot(
repo_path=':fork:/home/myuser/cvsroot',
username='myuser',
expected_error='"fork" CVSROOTs do not support usernames.',
expected_cvsroot=':fork:/home/myuser/cvsroot',
expected_path='/home/myuser/cvsroot')
def test_path_with_fork_validates_password(self):
"""Testing CVSTool.build_cvsroot with :fork: validates passwords"""
self._test_build_cvsroot(
repo_path=':fork:/home/myuser/cvsroot',
password='myuser',
expected_error='"fork" CVSROOTs do not support passwords.',
expected_cvsroot=':fork:/home/myuser/cvsroot',
expected_path='/home/myuser/cvsroot')
def test_path_with_local(self):
"""Testing CVSTool.build_cvsroot with :local:"""
self._test_build_cvsroot(
repo_path=':local:/home/myuser/cvsroot',
expected_cvsroot=':local:/home/myuser/cvsroot',
expected_path='/home/myuser/cvsroot')
def test_path_with_local_validates_username(self):
"""Testing CVSTool.build_cvsroot with :local: validates usernames"""
self._test_build_cvsroot(
repo_path=':local:/home/myuser/cvsroot',
username='myuser',
expected_error='"local" CVSROOTs do not support usernames.',
expected_cvsroot=':local:/home/myuser/cvsroot',
expected_path='/home/myuser/cvsroot')
def test_path_with_local_validates_password(self):
"""Testing CVSTool.build_cvsroot with :local: validates passwords"""
self._test_build_cvsroot(
repo_path=':local:/home/myuser/cvsroot',
password='myuser',
expected_error='"local" CVSROOTs do not support passwords.',
expected_cvsroot=':local:/home/myuser/cvsroot',
expected_path='/home/myuser/cvsroot')
def test_get_file(self):
"""Testing CVSTool.get_file"""
tool = self.tool
expected = b'test content\n'
filename = 'test/testfile'
rev = Revision('1.1')
value = tool.get_file(filename, rev)
self.assertIsInstance(value, bytes)
self.assertEqual(value, expected)
value = tool.get_file('%s,v' % filename, rev)
self.assertIsInstance(value, bytes)
self.assertEqual(value, expected)
value = tool.get_file('%s/%s,v' % (tool.repopath, filename), rev)
self.assertIsInstance(value, bytes)
self.assertEqual(value, expected)
with self.assertRaises(FileNotFoundError):
tool.get_file('')
with self.assertRaises(FileNotFoundError):
tool.get_file('hello', PRE_CREATION)
def test_get_file_with_keywords(self):
"""Testing CVSTool.get_file with file containing keywords"""
self.assertEqual(self.tool.get_file('test/testfile', Revision('1.2')),
b'$Id$\n$Author$\n\ntest content\n')
def test_file_exists(self):
"""Testing CVSTool.file_exists"""
tool = self.tool
self.assertTrue(tool.file_exists('test/testfile'))
self.assertTrue(tool.file_exists('%s/test/testfile' % tool.repopath))
self.assertTrue(tool.file_exists('test/testfile,v'))
self.assertFalse(tool.file_exists('test/testfile2'))
self.assertFalse(tool.file_exists('%s/test/testfile2' % tool.repopath))
self.assertFalse(tool.file_exists('test/testfile2,v'))
self.assertFalse(tool.file_exists('test/testfile', Revision('2.1')))
def test_revision_parsing(self):
"""Testing CVSTool revision number parsing"""
self.assertEqual(self.tool.parse_diff_revision(b'',
b'PRE-CREATION')[1],
PRE_CREATION)
self.assertEqual(
self.tool.parse_diff_revision(
b'',
b'7 Nov 2005 13:17:07 -0000\t1.2')[1],
b'1.2')
self.assertEqual(
self.tool.parse_diff_revision(
b'',
b'7 Nov 2005 13:17:07 -0000\t1.2.3.4')[1],
b'1.2.3.4')
self.assertRaises(SCMError,
lambda: self.tool.parse_diff_revision(b'', b'hello'))
def test_interface(self):
"""Testing basic CVSTool API"""
self.assertTrue(self.tool.diffs_use_absolute_paths)
def test_simple_diff(self):
"""Testing parsing CVS simple diff"""
diff = (b'Index: testfile\n'
b'==========================================================='
b'========\n'
b'RCS file: %s/test/testfile,v\n'
b'retrieving revision 1.1.1.1\n'
b'diff -u -r1.1.1.1 testfile\n'
b'--- testfile 26 Jul 2007 08:50:30 -0000 1.1.1.1\n'
b'+++ testfile 26 Jul 2007 10:20:20 -0000\n'
b'@@ -1 +1,2 @@\n'
b'-test content\n'
b'+updated test content\n'
b'+added info\n'
% self.cvs_repo_path.encode('utf-8'))
file = self.tool.get_parser(diff).parse()[0]
self.assertEqual(file.orig_filename, b'test/testfile')
self.assertEqual(file.orig_file_details,
b'26 Jul 2007 08:50:30 -0000 1.1.1.1')
self.assertEqual(file.modified_filename, b'test/testfile')
self.assertEqual(file.modified_file_details,
b'26 Jul 2007 10:20:20 -0000')
self.assertEqual(file.data, diff)
self.assertEqual(file.insert_count, 2)
self.assertEqual(file.delete_count, 1)
def test_new_diff_revision_format(self):
"""Testing parsing CVS diff with new revision format"""
diff = (
'Index: %(path)s/test/testfile\n'
'diff -u %(path)s/test/testfile:1.5.2.1 '
'%(path)s/test/testfile:1.5.2.2\n'
'--- test/testfile:1.5.2.1\tThu Dec 15 16:27:47 2011\n'
'+++ test/testfile\tTue Jan 10 10:36:26 2012\n'
'@@ -1 +1,2 @@\n'
'-test content\n'
'+updated test content\n'
'+added info\n'
% {
'path': self.cvs_repo_path,
}
).encode('utf-8')
file = self.tool.get_parser(diff).parse()[0]
f2, revision = self.tool.parse_diff_revision(file.orig_filename,
file.orig_file_details,
file.moved)
self.assertIsInstance(f2, bytes)
self.assertIsInstance(revision, bytes)
self.assertEqual(f2, b'test/testfile')
self.assertEqual(revision, b'1.5.2.1')
self.assertEqual(file.modified_filename, b'test/testfile')
self.assertEqual(file.modified_file_details,
b'Tue Jan 10 10:36:26 2012')
self.assertEqual(file.insert_count, 2)
self.assertEqual(file.delete_count, 1)
def test_bad_diff(self):
"""Testing parsing CVS diff with bad info"""
diff = (b'Index: newfile\n'
b'==========================================================='
b'========\n'
b'diff -N newfile\n'
b'--- /dev/null\t1 Jan 1970 00:00:00 -0000\n'
b'+++ newfile\t26 Jul 2007 10:11:45 -0000\n'
b'@@ -0,0 +1 @@\n'
b'+new file content')
self.assertRaises(DiffParserError,
lambda: self.tool.get_parser(diff).parse())
def test_bad_diff2(self):
"""Testing parsing CVS bad diff with new file"""
diff = (b'Index: newfile\n'
b'==========================================================='
b'========\n'
b'RCS file: newfile\n'
b'diff -N newfile\n'
b'--- /dev/null\n'
b'+++ newfile\t26 Jul 2007 10:11:45 -0000\n'
b'@@ -0,0 +1 @@\n'
b'+new file content')
self.assertRaises(DiffParserError,
lambda: self.tool.get_parser(diff).parse())
def test_newfile_diff(self):
"""Testing parsing CVS diff with new file"""
diff = (b'Index: newfile\n'
b'==========================================================='
b'========\n'
b'RCS file: newfile\n'
b'diff -N newfile\n'
b'--- /dev/null\t1 Jan 1970 00:00:00 -0000\n'
b'+++ newfile\t26 Jul 2007 10:11:45 -0000\n'
b'@@ -0,0 +1 @@\n'
b'+new file content\n')
file = self.tool.get_parser(diff).parse()[0]
self.assertEqual(file.orig_filename, b'newfile')
self.assertEqual(file.orig_file_details, b'PRE-CREATION')
self.assertEqual(file.modified_filename, b'newfile')
self.assertEqual(file.modified_file_details,
b'26 Jul 2007 10:11:45 -0000')
self.assertEqual(file.data, diff)
self.assertEqual(file.insert_count, 1)
self.assertEqual(file.delete_count, 0)
def test_inter_revision_diff(self):
"""Testing parsing CVS inter-revision diff"""
diff = (b'Index: testfile\n'
b'==========================================================='
b'========\n'
b'RCS file: %s/test/testfile,v\n'
b'retrieving revision 1.1\n'
b'retrieving revision 1.2\n'
b'diff -u -p -r1.1 -r1.2\n'
b'--- testfile 26 Jul 2007 08:50:30 -0000 1.1\n'
b'+++ testfile 27 Sep 2007 22:57:16 -0000 1.2\n'
b'@@ -1 +1,2 @@\n'
b'-test content\n'
b'+updated test content\n'
b'+added info\n'
% self.cvs_repo_path.encode('utf-8'))
file = self.tool.get_parser(diff).parse()[0]
self.assertEqual(file.orig_filename, b'test/testfile')
self.assertEqual(file.orig_file_details,
b'26 Jul 2007 08:50:30 -0000 1.1')
self.assertEqual(file.modified_filename, b'test/testfile')
self.assertEqual(file.modified_file_details,
b'27 Sep 2007 22:57:16 -0000 1.2')
self.assertEqual(file.data, diff)
self.assertEqual(file.insert_count, 2)
self.assertEqual(file.delete_count, 1)
def test_unicode_diff(self):
"""Testing parsing CVS diff with unicode filenames"""
diff = ('Index: téstfile\n'
'==========================================================='
'========\n'
'RCS file: %s/test/téstfile,v\n'
'retrieving revision 1.1.1.1\n'
'diff -u -r1.1.1.1 téstfile\n'
'--- téstfile 26 Jul 2007 08:50:30 -0000 1.1.1.1\n'
'+++ téstfile 26 Jul 2007 10:20:20 -0000\n'
'@@ -1 +1,2 @@\n'
'-tést content\n'
'+updated test content\n'
'+added info\n')
diff = diff % self.cvs_repo_path
diff = diff.encode('utf-8')
file = self.tool.get_parser(diff).parse()[0]
self.assertEqual(file.orig_filename, 'test/téstfile'.encode('utf-8'))
self.assertEqual(file.orig_file_details,
b'26 Jul 2007 08:50:30 -0000 1.1.1.1')
self.assertEqual(file.modified_filename,
'test/téstfile'.encode('utf-8'))
self.assertEqual(file.modified_file_details,
b'26 Jul 2007 10:20:20 -0000')
self.assertEqual(file.data, diff)
self.assertEqual(file.insert_count, 2)
self.assertEqual(file.delete_count, 1)
def test_keyword_diff(self):
"""Testing parsing CVS diff with keywords"""
diff = self.tool.normalize_patch(
b'Index: Makefile\n'
b'==========================================================='
b'========\n'
b'RCS file: /cvsroot/src/Makefile,v\n'
b'retrieving revision 1.1\n'
b'retrieving revision 1.2\n'
b'diff -u -r1.1.1.1 Makefile\n'
b'--- Makefile 26 Jul 2007 08:50:30 -0000 1.1\n'
b'+++ Makefile 26 Jul 2007 10:20:20 -0000 1.2\n'
b'@@ -1,6 +1,7 @@\n'
b' # $Author: bob $\n'
b' # $Date: 2014/12/18 13:09:42 $\n'
b' # $Header: /src/Makefile,v 1.2 2014/12/18 '
b'13:09:42 bob Exp $\n'
b' # $Id: Makefile,v 1.2 2014/12/18 13:09:42 bob Exp $\n'
b' # $Locker: bob $\n'
b' # $Name: some_name $\n'
b' # $RCSfile: Makefile,v $\n'
b' # $Revision: 1.2 $\n'
b' # $Source: /src/Makefile,v $\n'
b' # $State: Exp $\n'
b'+# foo\n'
b' include ../tools/Makefile.base-vars\n'
b' NAME = misc-docs\n'
b' OUTNAME = cvs-misc-docs\n',
'Makefile')
self.assertEqual(
diff,
b'Index: Makefile\n'
b'==========================================================='
b'========\n'
b'RCS file: /cvsroot/src/Makefile,v\n'
b'retrieving revision 1.1\n'
b'retrieving revision 1.2\n'
b'diff -u -r1.1.1.1 Makefile\n'
b'--- Makefile 26 Jul 2007 08:50:30 -0000 1.1\n'
b'+++ Makefile 26 Jul 2007 10:20:20 -0000 1.2\n'
b'@@ -1,6 +1,7 @@\n'
b' # $Author$\n'
b' # $Date$\n'
b' # $Header$\n'
b' # $Id$\n'
b' # $Locker$\n'
b' # $Name$\n'
b' # $RCSfile$\n'
b' # $Revision$\n'
b' # $Source$\n'
b' # $State$\n'
b'+# foo\n'
b' include ../tools/Makefile.base-vars\n'
b' NAME = misc-docs\n'
b' OUTNAME = cvs-misc-docs\n')
def test_keyword_diff_unicode(self):
"""Testing parsing CVS diff with keywords and unicode characters"""
# Test bug 3931: this should succeed without a UnicodeDecodeError
self.tool.normalize_patch(
b'Index: Makefile\n'
b'==========================================================='
b'========\n'
b'RCS file: /cvsroot/src/Makefile,v\n'
b'retrieving revision 1.1\n'
b'retrieving revision 1.2\n'
b'diff -u -r1.1.1.1 Makefile\n'
b'--- Makefile 26 Jul 2007 08:50:30 -0000 1.1\n'
b'+++ Makefile 26 Jul 2007 10:20:20 -0000 1.2\n'
b'@@ -1,6 +1,7 @@\n'
b' # $Author: bob $\n'
b' # $Date: 2014/12/18 13:09:42 $\n'
b' # $Header: /src/Makefile,v 1.2 2014/12/18 '
b'13:09:42 bob Exp $\n'
b' # $Id: Makefile,v 1.2 2014/12/18 13:09:42 bob Exp $\n'
b' # $Locker: bob $\n'
b' # $Name: some_name $\n'
b' # $RCSfile: Makefile,v $\n'
b' # $Revision: 1.2 $\n'
b' # $Source: /src/Makefile,v $\n'
b' # $State: Exp $\n'
b'+# foo \xf0\x9f\x92\xa9\n'
b' include ../tools/Makefile.base-vars\n'
b' NAME = misc-docs\n'
b' OUTNAME = cvs-misc-docs\n',
'Makefile')
def test_binary_diff(self):
"""Testing parsing CVS binary diff"""
diff = (
b'Index: testfile\n'
b'==============================================================='
b'====\n'
b'RCS file: %s/test/testfile,v\n'
b'retrieving revision 1.1.1.1\n'
b'diff -u -r1.1.1.1 testfile\n'
b'Binary files testfile and testfile differ\n'
% self.cvs_repo_path.encode('utf-8'))
file = self.tool.get_parser(diff).parse()[0]
self.assertEqual(file.orig_filename, b'test/testfile')
self.assertEqual(file.orig_file_details, b'')
self.assertEqual(file.modified_filename, b'test/testfile')
self.assertEqual(file.modified_file_details, b'')
self.assertTrue(file.binary)
self.assertEqual(file.data, diff)
def test_binary_diff_new_file(self):
"""Testing parsing CVS binary diff with new file"""
diff = (
b'Index: test/testfile\n'
b'==============================================================='
b'====\n'
b'RCS file: test/testfile,v\n'
b'diff -N test/testfile\n'
b'Binary files /dev/null and testfile differ\n')
file = self.tool.get_parser(diff).parse()[0]
self.assertEqual(file.orig_filename, b'test/testfile')
self.assertEqual(file.orig_file_details, b'PRE-CREATION')
self.assertEqual(file.modified_filename, b'test/testfile')
self.assertEqual(file.modified_file_details, b'')
self.assertTrue(file.binary)
self.assertEqual(file.data, diff)
def test_bad_root(self):
"""Testing CVSTool with a bad CVSROOT"""
file = 'test/testfile'
rev = Revision('1.1')
badrepo = Repository(name='CVS',
path=self.cvs_repo_path + '2',
tool=Tool.objects.get(name='CVS'))
badtool = badrepo.get_scmtool()
self.assertRaises(SCMError, lambda: badtool.get_file(file, rev))
def test_ssh(self):
"""Testing a SSH-backed CVS repository"""
self._test_ssh(self.cvs_ssh_path, 'CVSROOT/modules')
def test_ssh_with_site(self):
"""Testing a SSH-backed CVS repository with a LocalSite"""
self._test_ssh_with_site(self.cvs_ssh_path, 'CVSROOT/modules')
def _test_build_cvsroot(self, repo_path, expected_cvsroot, expected_path,
expected_error=None, username=None, password=None):
if expected_error:
with self.assertRaisesMessage(ValidationError, expected_error):
self.tool.build_cvsroot(repo_path, username, password,
validate=True)
cvsroot, norm_path = self.tool.build_cvsroot(repo_path, username,
password, validate=False)
self.assertEqual(cvsroot, expected_cvsroot)
self.assertEqual(norm_path, expected_path)
class CVSAuthFormTests(TestCase):
"""Unit tests for CVSTool's authentication form."""
def test_fields(self):
"""Testing CVSTool authentication form fields"""
form = CVSTool.create_auth_form()
self.assertEqual(list(form.fields), ['username', 'password'])
self.assertEqual(form['username'].help_text, '')
self.assertEqual(form['username'].label, 'Username')
self.assertEqual(form['password'].help_text, '')
self.assertEqual(form['password'].label, 'Password')
@add_fixtures(['test_scmtools'])
def test_load(self):
"""Tetting CVSTool authentication form load"""
repository = self.create_repository(
tool_name='CVS',
username='test-user',
password='test-pass')
form = CVSTool.create_auth_form(repository=repository)
form.load()
self.assertEqual(form['username'].value(), 'test-user')
self.assertEqual(form['password'].value(), 'test-pass')
@add_fixtures(['test_scmtools'])
def test_save(self):
"""Tetting CVSTool authentication form save"""
repository = self.create_repository(tool_name='CVS')
form = CVSTool.create_auth_form(
repository=repository,
data={
'username': 'test-user',
'password': 'test-pass',
})
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(repository.username, 'test-user')
self.assertEqual(repository.password, 'test-pass')
class CVSRepositoryFormTests(TestCase):
"""Unit tests for CVSTool's repository form."""
def test_fields(self):
"""Testing CVSTool repository form fields"""
form = CVSTool.create_repository_form()
self.assertEqual(list(form.fields), ['path', 'mirror_path'])
self.assertEqual(form['path'].help_text,
'The CVSROOT used to access the repository.')
self.assertEqual(form['path'].label, 'Path')
self.assertEqual(form['mirror_path'].help_text, '')
self.assertEqual(form['mirror_path'].label, 'Mirror Path')
@add_fixtures(['test_scmtools'])
def test_load(self):
"""Tetting CVSTool repository form load"""
repository = self.create_repository(
tool_name='CVS',
path='example.com:123/cvsroot/test',
mirror_path=':pserver:example.com:/cvsroot/test')
form = CVSTool.create_repository_form(repository=repository)
form.load()
self.assertEqual(form['path'].value(), 'example.com:123/cvsroot/test')
self.assertEqual(form['mirror_path'].value(),
':pserver:example.com:/cvsroot/test')
@add_fixtures(['test_scmtools'])
def test_save(self):
"""Tetting CVSTool repository form save"""
repository = self.create_repository(tool_name='CVS')
form = CVSTool.create_repository_form(
repository=repository,
data={
'path': 'example.com:123/cvsroot/test',
'mirror_path': ':pserver:example.com:/cvsroot/test',
})
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(repository.path, 'example.com:123/cvsroot/test')
self.assertEqual(repository.mirror_path,
':pserver:example.com:/cvsroot/test')
|
mit
|
jiangzhuo/kbengine
|
kbe/src/lib/python/Lib/test/test_posixpath.py
|
68
|
23737
|
import itertools
import os
import posixpath
import sys
import unittest
import warnings
from posixpath import realpath, abspath, dirname, basename
from test import support, test_genericpath
try:
import posix
except ImportError:
posix = None
# An absolute path to a temporary filename for testing. We can't rely on TESTFN
# being an absolute path, so we need this.
ABSTFN = abspath(support.TESTFN)
def skip_if_ABSTFN_contains_backslash(test):
"""
On Windows, posixpath.abspath still returns paths with backslashes
instead of posix forward slashes. If this is the case, several tests
fail, so skip them.
"""
found_backslash = '\\' in ABSTFN
msg = "ABSTFN is not a posix path - tests fail"
return [test, unittest.skip(msg)(test)][found_backslash]
def safe_rmdir(dirname):
try:
os.rmdir(dirname)
except OSError:
pass
class PosixPathTest(unittest.TestCase):
def setUp(self):
self.tearDown()
def tearDown(self):
for suffix in ["", "1", "2"]:
support.unlink(support.TESTFN + suffix)
safe_rmdir(support.TESTFN + suffix)
def test_join(self):
self.assertEqual(posixpath.join("/foo", "bar", "/bar", "baz"),
"/bar/baz")
self.assertEqual(posixpath.join("/foo", "bar", "baz"), "/foo/bar/baz")
self.assertEqual(posixpath.join("/foo/", "bar/", "baz/"),
"/foo/bar/baz/")
self.assertEqual(posixpath.join(b"/foo", b"bar", b"/bar", b"baz"),
b"/bar/baz")
self.assertEqual(posixpath.join(b"/foo", b"bar", b"baz"),
b"/foo/bar/baz")
self.assertEqual(posixpath.join(b"/foo/", b"bar/", b"baz/"),
b"/foo/bar/baz/")
def test_join_errors(self):
# Check posixpath.join raises friendly TypeErrors.
errmsg = "Can't mix strings and bytes in path components"
with self.assertRaisesRegex(TypeError, errmsg):
posixpath.join(b'bytes', 'str')
with self.assertRaisesRegex(TypeError, errmsg):
posixpath.join('str', b'bytes')
# regression, see #15377
with self.assertRaises(TypeError) as cm:
posixpath.join(None, 'str')
self.assertNotEqual(cm.exception.args[0], errmsg)
def test_split(self):
self.assertEqual(posixpath.split("/foo/bar"), ("/foo", "bar"))
self.assertEqual(posixpath.split("/"), ("/", ""))
self.assertEqual(posixpath.split("foo"), ("", "foo"))
self.assertEqual(posixpath.split("////foo"), ("////", "foo"))
self.assertEqual(posixpath.split("//foo//bar"), ("//foo", "bar"))
self.assertEqual(posixpath.split(b"/foo/bar"), (b"/foo", b"bar"))
self.assertEqual(posixpath.split(b"/"), (b"/", b""))
self.assertEqual(posixpath.split(b"foo"), (b"", b"foo"))
self.assertEqual(posixpath.split(b"////foo"), (b"////", b"foo"))
self.assertEqual(posixpath.split(b"//foo//bar"), (b"//foo", b"bar"))
def splitextTest(self, path, filename, ext):
self.assertEqual(posixpath.splitext(path), (filename, ext))
self.assertEqual(posixpath.splitext("/" + path), ("/" + filename, ext))
self.assertEqual(posixpath.splitext("abc/" + path),
("abc/" + filename, ext))
self.assertEqual(posixpath.splitext("abc.def/" + path),
("abc.def/" + filename, ext))
self.assertEqual(posixpath.splitext("/abc.def/" + path),
("/abc.def/" + filename, ext))
self.assertEqual(posixpath.splitext(path + "/"),
(filename + ext + "/", ""))
path = bytes(path, "ASCII")
filename = bytes(filename, "ASCII")
ext = bytes(ext, "ASCII")
self.assertEqual(posixpath.splitext(path), (filename, ext))
self.assertEqual(posixpath.splitext(b"/" + path),
(b"/" + filename, ext))
self.assertEqual(posixpath.splitext(b"abc/" + path),
(b"abc/" + filename, ext))
self.assertEqual(posixpath.splitext(b"abc.def/" + path),
(b"abc.def/" + filename, ext))
self.assertEqual(posixpath.splitext(b"/abc.def/" + path),
(b"/abc.def/" + filename, ext))
self.assertEqual(posixpath.splitext(path + b"/"),
(filename + ext + b"/", b""))
def test_splitext(self):
self.splitextTest("foo.bar", "foo", ".bar")
self.splitextTest("foo.boo.bar", "foo.boo", ".bar")
self.splitextTest("foo.boo.biff.bar", "foo.boo.biff", ".bar")
self.splitextTest(".csh.rc", ".csh", ".rc")
self.splitextTest("nodots", "nodots", "")
self.splitextTest(".cshrc", ".cshrc", "")
self.splitextTest("...manydots", "...manydots", "")
self.splitextTest("...manydots.ext", "...manydots", ".ext")
self.splitextTest(".", ".", "")
self.splitextTest("..", "..", "")
self.splitextTest("........", "........", "")
self.splitextTest("", "", "")
def test_isabs(self):
self.assertIs(posixpath.isabs(""), False)
self.assertIs(posixpath.isabs("/"), True)
self.assertIs(posixpath.isabs("/foo"), True)
self.assertIs(posixpath.isabs("/foo/bar"), True)
self.assertIs(posixpath.isabs("foo/bar"), False)
self.assertIs(posixpath.isabs(b""), False)
self.assertIs(posixpath.isabs(b"/"), True)
self.assertIs(posixpath.isabs(b"/foo"), True)
self.assertIs(posixpath.isabs(b"/foo/bar"), True)
self.assertIs(posixpath.isabs(b"foo/bar"), False)
def test_basename(self):
self.assertEqual(posixpath.basename("/foo/bar"), "bar")
self.assertEqual(posixpath.basename("/"), "")
self.assertEqual(posixpath.basename("foo"), "foo")
self.assertEqual(posixpath.basename("////foo"), "foo")
self.assertEqual(posixpath.basename("//foo//bar"), "bar")
self.assertEqual(posixpath.basename(b"/foo/bar"), b"bar")
self.assertEqual(posixpath.basename(b"/"), b"")
self.assertEqual(posixpath.basename(b"foo"), b"foo")
self.assertEqual(posixpath.basename(b"////foo"), b"foo")
self.assertEqual(posixpath.basename(b"//foo//bar"), b"bar")
def test_dirname(self):
self.assertEqual(posixpath.dirname("/foo/bar"), "/foo")
self.assertEqual(posixpath.dirname("/"), "/")
self.assertEqual(posixpath.dirname("foo"), "")
self.assertEqual(posixpath.dirname("////foo"), "////")
self.assertEqual(posixpath.dirname("//foo//bar"), "//foo")
self.assertEqual(posixpath.dirname(b"/foo/bar"), b"/foo")
self.assertEqual(posixpath.dirname(b"/"), b"/")
self.assertEqual(posixpath.dirname(b"foo"), b"")
self.assertEqual(posixpath.dirname(b"////foo"), b"////")
self.assertEqual(posixpath.dirname(b"//foo//bar"), b"//foo")
def test_islink(self):
self.assertIs(posixpath.islink(support.TESTFN + "1"), False)
self.assertIs(posixpath.lexists(support.TESTFN + "2"), False)
f = open(support.TESTFN + "1", "wb")
try:
f.write(b"foo")
f.close()
self.assertIs(posixpath.islink(support.TESTFN + "1"), False)
if support.can_symlink():
os.symlink(support.TESTFN + "1", support.TESTFN + "2")
self.assertIs(posixpath.islink(support.TESTFN + "2"), True)
os.remove(support.TESTFN + "1")
self.assertIs(posixpath.islink(support.TESTFN + "2"), True)
self.assertIs(posixpath.exists(support.TESTFN + "2"), False)
self.assertIs(posixpath.lexists(support.TESTFN + "2"), True)
finally:
if not f.close():
f.close()
def test_ismount(self):
self.assertIs(posixpath.ismount("/"), True)
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
self.assertIs(posixpath.ismount(b"/"), True)
def test_ismount_non_existent(self):
# Non-existent mountpoint.
self.assertIs(posixpath.ismount(ABSTFN), False)
try:
os.mkdir(ABSTFN)
self.assertIs(posixpath.ismount(ABSTFN), False)
finally:
safe_rmdir(ABSTFN)
@unittest.skipUnless(support.can_symlink(),
"Test requires symlink support")
def test_ismount_symlinks(self):
# Symlinks are never mountpoints.
try:
os.symlink("/", ABSTFN)
self.assertIs(posixpath.ismount(ABSTFN), False)
finally:
os.unlink(ABSTFN)
@unittest.skipIf(posix is None, "Test requires posix module")
def test_ismount_different_device(self):
# Simulate the path being on a different device from its parent by
# mocking out st_dev.
save_lstat = os.lstat
def fake_lstat(path):
st_ino = 0
st_dev = 0
if path == ABSTFN:
st_dev = 1
st_ino = 1
return posix.stat_result((0, st_ino, st_dev, 0, 0, 0, 0, 0, 0, 0))
try:
os.lstat = fake_lstat
self.assertIs(posixpath.ismount(ABSTFN), True)
finally:
os.lstat = save_lstat
def test_expanduser(self):
self.assertEqual(posixpath.expanduser("foo"), "foo")
self.assertEqual(posixpath.expanduser(b"foo"), b"foo")
try:
import pwd
except ImportError:
pass
else:
self.assertIsInstance(posixpath.expanduser("~/"), str)
self.assertIsInstance(posixpath.expanduser(b"~/"), bytes)
# if home directory == root directory, this test makes no sense
if posixpath.expanduser("~") != '/':
self.assertEqual(
posixpath.expanduser("~") + "/",
posixpath.expanduser("~/")
)
self.assertEqual(
posixpath.expanduser(b"~") + b"/",
posixpath.expanduser(b"~/")
)
self.assertIsInstance(posixpath.expanduser("~root/"), str)
self.assertIsInstance(posixpath.expanduser("~foo/"), str)
self.assertIsInstance(posixpath.expanduser(b"~root/"), bytes)
self.assertIsInstance(posixpath.expanduser(b"~foo/"), bytes)
with support.EnvironmentVarGuard() as env:
env['HOME'] = '/'
self.assertEqual(posixpath.expanduser("~"), "/")
self.assertEqual(posixpath.expanduser("~/foo"), "/foo")
# expanduser should fall back to using the password database
del env['HOME']
home = pwd.getpwuid(os.getuid()).pw_dir
# $HOME can end with a trailing /, so strip it (see #17809)
self.assertEqual(posixpath.expanduser("~"), home.rstrip("/"))
def test_normpath(self):
self.assertEqual(posixpath.normpath(""), ".")
self.assertEqual(posixpath.normpath("/"), "/")
self.assertEqual(posixpath.normpath("//"), "//")
self.assertEqual(posixpath.normpath("///"), "/")
self.assertEqual(posixpath.normpath("///foo/.//bar//"), "/foo/bar")
self.assertEqual(posixpath.normpath("///foo/.//bar//.//..//.//baz"),
"/foo/baz")
self.assertEqual(posixpath.normpath("///..//./foo/.//bar"), "/foo/bar")
self.assertEqual(posixpath.normpath(b""), b".")
self.assertEqual(posixpath.normpath(b"/"), b"/")
self.assertEqual(posixpath.normpath(b"//"), b"//")
self.assertEqual(posixpath.normpath(b"///"), b"/")
self.assertEqual(posixpath.normpath(b"///foo/.//bar//"), b"/foo/bar")
self.assertEqual(posixpath.normpath(b"///foo/.//bar//.//..//.//baz"),
b"/foo/baz")
self.assertEqual(posixpath.normpath(b"///..//./foo/.//bar"),
b"/foo/bar")
@skip_if_ABSTFN_contains_backslash
def test_realpath_curdir(self):
self.assertEqual(realpath('.'), os.getcwd())
self.assertEqual(realpath('./.'), os.getcwd())
self.assertEqual(realpath('/'.join(['.'] * 100)), os.getcwd())
self.assertEqual(realpath(b'.'), os.getcwdb())
self.assertEqual(realpath(b'./.'), os.getcwdb())
self.assertEqual(realpath(b'/'.join([b'.'] * 100)), os.getcwdb())
@skip_if_ABSTFN_contains_backslash
def test_realpath_pardir(self):
self.assertEqual(realpath('..'), dirname(os.getcwd()))
self.assertEqual(realpath('../..'), dirname(dirname(os.getcwd())))
self.assertEqual(realpath('/'.join(['..'] * 100)), '/')
self.assertEqual(realpath(b'..'), dirname(os.getcwdb()))
self.assertEqual(realpath(b'../..'), dirname(dirname(os.getcwdb())))
self.assertEqual(realpath(b'/'.join([b'..'] * 100)), b'/')
@unittest.skipUnless(hasattr(os, "symlink"),
"Missing symlink implementation")
@skip_if_ABSTFN_contains_backslash
def test_realpath_basic(self):
# Basic operation.
try:
os.symlink(ABSTFN+"1", ABSTFN)
self.assertEqual(realpath(ABSTFN), ABSTFN+"1")
finally:
support.unlink(ABSTFN)
@unittest.skipUnless(hasattr(os, "symlink"),
"Missing symlink implementation")
@skip_if_ABSTFN_contains_backslash
def test_realpath_relative(self):
try:
os.symlink(posixpath.relpath(ABSTFN+"1"), ABSTFN)
self.assertEqual(realpath(ABSTFN), ABSTFN+"1")
finally:
support.unlink(ABSTFN)
@unittest.skipUnless(hasattr(os, "symlink"),
"Missing symlink implementation")
@skip_if_ABSTFN_contains_backslash
def test_realpath_symlink_loops(self):
# Bug #930024, return the path unchanged if we get into an infinite
# symlink loop.
try:
old_path = abspath('.')
os.symlink(ABSTFN, ABSTFN)
self.assertEqual(realpath(ABSTFN), ABSTFN)
os.symlink(ABSTFN+"1", ABSTFN+"2")
os.symlink(ABSTFN+"2", ABSTFN+"1")
self.assertEqual(realpath(ABSTFN+"1"), ABSTFN+"1")
self.assertEqual(realpath(ABSTFN+"2"), ABSTFN+"2")
self.assertEqual(realpath(ABSTFN+"1/x"), ABSTFN+"1/x")
self.assertEqual(realpath(ABSTFN+"1/.."), dirname(ABSTFN))
self.assertEqual(realpath(ABSTFN+"1/../x"), dirname(ABSTFN) + "/x")
os.symlink(ABSTFN+"x", ABSTFN+"y")
self.assertEqual(realpath(ABSTFN+"1/../" + basename(ABSTFN) + "y"),
ABSTFN + "y")
self.assertEqual(realpath(ABSTFN+"1/../" + basename(ABSTFN) + "1"),
ABSTFN + "1")
os.symlink(basename(ABSTFN) + "a/b", ABSTFN+"a")
self.assertEqual(realpath(ABSTFN+"a"), ABSTFN+"a/b")
os.symlink("../" + basename(dirname(ABSTFN)) + "/" +
basename(ABSTFN) + "c", ABSTFN+"c")
self.assertEqual(realpath(ABSTFN+"c"), ABSTFN+"c")
# Test using relative path as well.
os.chdir(dirname(ABSTFN))
self.assertEqual(realpath(basename(ABSTFN)), ABSTFN)
finally:
os.chdir(old_path)
support.unlink(ABSTFN)
support.unlink(ABSTFN+"1")
support.unlink(ABSTFN+"2")
support.unlink(ABSTFN+"y")
support.unlink(ABSTFN+"c")
support.unlink(ABSTFN+"a")
@unittest.skipUnless(hasattr(os, "symlink"),
"Missing symlink implementation")
@skip_if_ABSTFN_contains_backslash
def test_realpath_repeated_indirect_symlinks(self):
# Issue #6975.
try:
os.mkdir(ABSTFN)
os.symlink('../' + basename(ABSTFN), ABSTFN + '/self')
os.symlink('self/self/self', ABSTFN + '/link')
self.assertEqual(realpath(ABSTFN + '/link'), ABSTFN)
finally:
support.unlink(ABSTFN + '/self')
support.unlink(ABSTFN + '/link')
safe_rmdir(ABSTFN)
@unittest.skipUnless(hasattr(os, "symlink"),
"Missing symlink implementation")
@skip_if_ABSTFN_contains_backslash
def test_realpath_deep_recursion(self):
depth = 10
old_path = abspath('.')
try:
os.mkdir(ABSTFN)
for i in range(depth):
os.symlink('/'.join(['%d' % i] * 10), ABSTFN + '/%d' % (i + 1))
os.symlink('.', ABSTFN + '/0')
self.assertEqual(realpath(ABSTFN + '/%d' % depth), ABSTFN)
# Test using relative path as well.
os.chdir(ABSTFN)
self.assertEqual(realpath('%d' % depth), ABSTFN)
finally:
os.chdir(old_path)
for i in range(depth + 1):
support.unlink(ABSTFN + '/%d' % i)
safe_rmdir(ABSTFN)
@unittest.skipUnless(hasattr(os, "symlink"),
"Missing symlink implementation")
@skip_if_ABSTFN_contains_backslash
def test_realpath_resolve_parents(self):
# We also need to resolve any symlinks in the parents of a relative
# path passed to realpath. E.g.: current working directory is
# /usr/doc with 'doc' being a symlink to /usr/share/doc. We call
# realpath("a"). This should return /usr/share/doc/a/.
try:
old_path = abspath('.')
os.mkdir(ABSTFN)
os.mkdir(ABSTFN + "/y")
os.symlink(ABSTFN + "/y", ABSTFN + "/k")
os.chdir(ABSTFN + "/k")
self.assertEqual(realpath("a"), ABSTFN + "/y/a")
finally:
os.chdir(old_path)
support.unlink(ABSTFN + "/k")
safe_rmdir(ABSTFN + "/y")
safe_rmdir(ABSTFN)
@unittest.skipUnless(hasattr(os, "symlink"),
"Missing symlink implementation")
@skip_if_ABSTFN_contains_backslash
def test_realpath_resolve_before_normalizing(self):
# Bug #990669: Symbolic links should be resolved before we
# normalize the path. E.g.: if we have directories 'a', 'k' and 'y'
# in the following hierarchy:
# a/k/y
#
# and a symbolic link 'link-y' pointing to 'y' in directory 'a',
# then realpath("link-y/..") should return 'k', not 'a'.
try:
old_path = abspath('.')
os.mkdir(ABSTFN)
os.mkdir(ABSTFN + "/k")
os.mkdir(ABSTFN + "/k/y")
os.symlink(ABSTFN + "/k/y", ABSTFN + "/link-y")
# Absolute path.
self.assertEqual(realpath(ABSTFN + "/link-y/.."), ABSTFN + "/k")
# Relative path.
os.chdir(dirname(ABSTFN))
self.assertEqual(realpath(basename(ABSTFN) + "/link-y/.."),
ABSTFN + "/k")
finally:
os.chdir(old_path)
support.unlink(ABSTFN + "/link-y")
safe_rmdir(ABSTFN + "/k/y")
safe_rmdir(ABSTFN + "/k")
safe_rmdir(ABSTFN)
@unittest.skipUnless(hasattr(os, "symlink"),
"Missing symlink implementation")
@skip_if_ABSTFN_contains_backslash
def test_realpath_resolve_first(self):
# Bug #1213894: The first component of the path, if not absolute,
# must be resolved too.
try:
old_path = abspath('.')
os.mkdir(ABSTFN)
os.mkdir(ABSTFN + "/k")
os.symlink(ABSTFN, ABSTFN + "link")
os.chdir(dirname(ABSTFN))
base = basename(ABSTFN)
self.assertEqual(realpath(base + "link"), ABSTFN)
self.assertEqual(realpath(base + "link/k"), ABSTFN + "/k")
finally:
os.chdir(old_path)
support.unlink(ABSTFN + "link")
safe_rmdir(ABSTFN + "/k")
safe_rmdir(ABSTFN)
def test_relpath(self):
(real_getcwd, os.getcwd) = (os.getcwd, lambda: r"/home/user/bar")
try:
curdir = os.path.split(os.getcwd())[-1]
self.assertRaises(ValueError, posixpath.relpath, "")
self.assertEqual(posixpath.relpath("a"), "a")
self.assertEqual(posixpath.relpath(posixpath.abspath("a")), "a")
self.assertEqual(posixpath.relpath("a/b"), "a/b")
self.assertEqual(posixpath.relpath("../a/b"), "../a/b")
self.assertEqual(posixpath.relpath("a", "../b"), "../"+curdir+"/a")
self.assertEqual(posixpath.relpath("a/b", "../c"),
"../"+curdir+"/a/b")
self.assertEqual(posixpath.relpath("a", "b/c"), "../../a")
self.assertEqual(posixpath.relpath("a", "a"), ".")
self.assertEqual(posixpath.relpath("/foo/bar/bat", "/x/y/z"), '../../../foo/bar/bat')
self.assertEqual(posixpath.relpath("/foo/bar/bat", "/foo/bar"), 'bat')
self.assertEqual(posixpath.relpath("/foo/bar/bat", "/"), 'foo/bar/bat')
self.assertEqual(posixpath.relpath("/", "/foo/bar/bat"), '../../..')
self.assertEqual(posixpath.relpath("/foo/bar/bat", "/x"), '../foo/bar/bat')
self.assertEqual(posixpath.relpath("/x", "/foo/bar/bat"), '../../../x')
self.assertEqual(posixpath.relpath("/", "/"), '.')
self.assertEqual(posixpath.relpath("/a", "/a"), '.')
self.assertEqual(posixpath.relpath("/a/b", "/a/b"), '.')
finally:
os.getcwd = real_getcwd
def test_relpath_bytes(self):
(real_getcwdb, os.getcwdb) = (os.getcwdb, lambda: br"/home/user/bar")
try:
curdir = os.path.split(os.getcwdb())[-1]
self.assertRaises(ValueError, posixpath.relpath, b"")
self.assertEqual(posixpath.relpath(b"a"), b"a")
self.assertEqual(posixpath.relpath(posixpath.abspath(b"a")), b"a")
self.assertEqual(posixpath.relpath(b"a/b"), b"a/b")
self.assertEqual(posixpath.relpath(b"../a/b"), b"../a/b")
self.assertEqual(posixpath.relpath(b"a", b"../b"),
b"../"+curdir+b"/a")
self.assertEqual(posixpath.relpath(b"a/b", b"../c"),
b"../"+curdir+b"/a/b")
self.assertEqual(posixpath.relpath(b"a", b"b/c"), b"../../a")
self.assertEqual(posixpath.relpath(b"a", b"a"), b".")
self.assertEqual(posixpath.relpath(b"/foo/bar/bat", b"/x/y/z"), b'../../../foo/bar/bat')
self.assertEqual(posixpath.relpath(b"/foo/bar/bat", b"/foo/bar"), b'bat')
self.assertEqual(posixpath.relpath(b"/foo/bar/bat", b"/"), b'foo/bar/bat')
self.assertEqual(posixpath.relpath(b"/", b"/foo/bar/bat"), b'../../..')
self.assertEqual(posixpath.relpath(b"/foo/bar/bat", b"/x"), b'../foo/bar/bat')
self.assertEqual(posixpath.relpath(b"/x", b"/foo/bar/bat"), b'../../../x')
self.assertEqual(posixpath.relpath(b"/", b"/"), b'.')
self.assertEqual(posixpath.relpath(b"/a", b"/a"), b'.')
self.assertEqual(posixpath.relpath(b"/a/b", b"/a/b"), b'.')
self.assertRaises(TypeError, posixpath.relpath, b"bytes", "str")
self.assertRaises(TypeError, posixpath.relpath, "str", b"bytes")
finally:
os.getcwdb = real_getcwdb
class PosixCommonTest(test_genericpath.CommonTest, unittest.TestCase):
pathmodule = posixpath
attributes = ['relpath', 'samefile', 'sameopenfile', 'samestat']
if __name__=="__main__":
unittest.main()
|
lgpl-3.0
|
jaysonsantos/servo
|
components/script/dom/bindings/codegen/parser/tests/test_arraybuffer.py
|
158
|
4183
|
import WebIDL
def WebIDLTest(parser, harness):
parser.parse("""
interface TestArrayBuffer {
attribute ArrayBuffer bufferAttr;
void bufferMethod(ArrayBuffer arg1, ArrayBuffer? arg2, ArrayBuffer[] arg3, sequence<ArrayBuffer> arg4);
attribute ArrayBufferView viewAttr;
void viewMethod(ArrayBufferView arg1, ArrayBufferView? arg2, ArrayBufferView[] arg3, sequence<ArrayBufferView> arg4);
attribute Int8Array int8ArrayAttr;
void int8ArrayMethod(Int8Array arg1, Int8Array? arg2, Int8Array[] arg3, sequence<Int8Array> arg4);
attribute Uint8Array uint8ArrayAttr;
void uint8ArrayMethod(Uint8Array arg1, Uint8Array? arg2, Uint8Array[] arg3, sequence<Uint8Array> arg4);
attribute Uint8ClampedArray uint8ClampedArrayAttr;
void uint8ClampedArrayMethod(Uint8ClampedArray arg1, Uint8ClampedArray? arg2, Uint8ClampedArray[] arg3, sequence<Uint8ClampedArray> arg4);
attribute Int16Array int16ArrayAttr;
void int16ArrayMethod(Int16Array arg1, Int16Array? arg2, Int16Array[] arg3, sequence<Int16Array> arg4);
attribute Uint16Array uint16ArrayAttr;
void uint16ArrayMethod(Uint16Array arg1, Uint16Array? arg2, Uint16Array[] arg3, sequence<Uint16Array> arg4);
attribute Int32Array int32ArrayAttr;
void int32ArrayMethod(Int32Array arg1, Int32Array? arg2, Int32Array[] arg3, sequence<Int32Array> arg4);
attribute Uint32Array uint32ArrayAttr;
void uint32ArrayMethod(Uint32Array arg1, Uint32Array? arg2, Uint32Array[] arg3, sequence<Uint32Array> arg4);
attribute Float32Array float32ArrayAttr;
void float32ArrayMethod(Float32Array arg1, Float32Array? arg2, Float32Array[] arg3, sequence<Float32Array> arg4);
attribute Float64Array float64ArrayAttr;
void float64ArrayMethod(Float64Array arg1, Float64Array? arg2, Float64Array[] arg3, sequence<Float64Array> arg4);
};
""")
results = parser.finish()
iface = results[0]
harness.ok(True, "TestArrayBuffer interface parsed without error")
harness.check(len(iface.members), 22, "Interface should have twenty two members")
members = iface.members
def checkStuff(attr, method, t):
harness.ok(isinstance(attr, WebIDL.IDLAttribute), "Expect an IDLAttribute")
harness.ok(isinstance(method, WebIDL.IDLMethod), "Expect an IDLMethod")
harness.check(str(attr.type), t, "Expect an ArrayBuffer type")
harness.ok(attr.type.isSpiderMonkeyInterface(), "Should test as a js interface")
(retType, arguments) = method.signatures()[0]
harness.ok(retType.isVoid(), "Should have a void return type")
harness.check(len(arguments), 4, "Expect 4 arguments")
harness.check(str(arguments[0].type), t, "Expect an ArrayBuffer type")
harness.ok(arguments[0].type.isSpiderMonkeyInterface(), "Should test as a js interface")
harness.check(str(arguments[1].type), t + "OrNull", "Expect an ArrayBuffer type")
harness.ok(arguments[1].type.inner.isSpiderMonkeyInterface(), "Should test as a js interface")
harness.check(str(arguments[2].type), t + "Array", "Expect an ArrayBuffer type")
harness.ok(arguments[2].type.inner.isSpiderMonkeyInterface(), "Should test as a js interface")
harness.check(str(arguments[3].type), t + "Sequence", "Expect an ArrayBuffer type")
harness.ok(arguments[3].type.inner.isSpiderMonkeyInterface(), "Should test as a js interface")
checkStuff(members[0], members[1], "ArrayBuffer")
checkStuff(members[2], members[3], "ArrayBufferView")
checkStuff(members[4], members[5], "Int8Array")
checkStuff(members[6], members[7], "Uint8Array")
checkStuff(members[8], members[9], "Uint8ClampedArray")
checkStuff(members[10], members[11], "Int16Array")
checkStuff(members[12], members[13], "Uint16Array")
checkStuff(members[14], members[15], "Int32Array")
checkStuff(members[16], members[17], "Uint32Array")
checkStuff(members[18], members[19], "Float32Array")
checkStuff(members[20], members[21], "Float64Array")
|
mpl-2.0
|
GitAngel/django
|
django/contrib/auth/decorators.py
|
356
|
3049
|
from functools import wraps
from django.conf import settings
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.core.exceptions import PermissionDenied
from django.shortcuts import resolve_url
from django.utils import six
from django.utils.decorators import available_attrs
from django.utils.six.moves.urllib.parse import urlparse
def user_passes_test(test_func, login_url=None, redirect_field_name=REDIRECT_FIELD_NAME):
"""
Decorator for views that checks that the user passes the given test,
redirecting to the log-in page if necessary. The test should be a callable
that takes the user object and returns True if the user passes.
"""
def decorator(view_func):
@wraps(view_func, assigned=available_attrs(view_func))
def _wrapped_view(request, *args, **kwargs):
if test_func(request.user):
return view_func(request, *args, **kwargs)
path = request.build_absolute_uri()
resolved_login_url = resolve_url(login_url or settings.LOGIN_URL)
# If the login url is the same scheme and net location then just
# use the path as the "next" url.
login_scheme, login_netloc = urlparse(resolved_login_url)[:2]
current_scheme, current_netloc = urlparse(path)[:2]
if ((not login_scheme or login_scheme == current_scheme) and
(not login_netloc or login_netloc == current_netloc)):
path = request.get_full_path()
from django.contrib.auth.views import redirect_to_login
return redirect_to_login(
path, resolved_login_url, redirect_field_name)
return _wrapped_view
return decorator
def login_required(function=None, redirect_field_name=REDIRECT_FIELD_NAME, login_url=None):
"""
Decorator for views that checks that the user is logged in, redirecting
to the log-in page if necessary.
"""
actual_decorator = user_passes_test(
lambda u: u.is_authenticated(),
login_url=login_url,
redirect_field_name=redirect_field_name
)
if function:
return actual_decorator(function)
return actual_decorator
def permission_required(perm, login_url=None, raise_exception=False):
"""
Decorator for views that checks whether a user has a particular permission
enabled, redirecting to the log-in page if necessary.
If the raise_exception parameter is given the PermissionDenied exception
is raised.
"""
def check_perms(user):
if isinstance(perm, six.string_types):
perms = (perm, )
else:
perms = perm
# First check if the user has the permission (even anon users)
if user.has_perms(perms):
return True
# In case the 403 handler should be called raise the exception
if raise_exception:
raise PermissionDenied
# As the last resort, show the login form
return False
return user_passes_test(check_perms, login_url=login_url)
|
bsd-3-clause
|
EmreAtes/spack
|
var/spack/repos/builtin/packages/xorg-sgml-doctools/package.py
|
5
|
1781
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class XorgSgmlDoctools(AutotoolsPackage):
"""This package provides a common set of SGML entities and XML/CSS style
sheets used in building/formatting the documentation provided in other
X.Org packages."""
homepage = "http://cgit.freedesktop.org/xorg/doc/xorg-sgml-doctools"
url = "https://www.x.org/archive/individual/doc/xorg-sgml-doctools-1.11.tar.gz"
version('1.11', '51cf4c6b476e2b98a068fea6975b9b21')
depends_on('pkgconfig', type='build')
depends_on('util-macros', type='build')
|
lgpl-2.1
|
v-zhongz/azure-linux-extensions
|
OSPatching/azure/http/__init__.py
|
52
|
2502
|
#-------------------------------------------------------------------------
# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
HTTP_RESPONSE_NO_CONTENT = 204
class HTTPError(Exception):
''' HTTP Exception when response status code >= 300 '''
def __init__(self, status, message, respheader, respbody):
'''Creates a new HTTPError with the specified status, message,
response headers and body'''
self.status = status
self.respheader = respheader
self.respbody = respbody
Exception.__init__(self, message)
class HTTPResponse(object):
"""Represents a response from an HTTP request. An HTTPResponse has the
following attributes:
status: the status code of the response
message: the message
headers: the returned headers, as a list of (name, value) pairs
body: the body of the response
"""
def __init__(self, status, message, headers, body):
self.status = status
self.message = message
self.headers = headers
self.body = body
class HTTPRequest(object):
'''Represents an HTTP Request. An HTTP Request consists of the following
attributes:
host: the host name to connect to
method: the method to use to connect (string such as GET, POST, PUT, etc.)
path: the uri fragment
query: query parameters specified as a list of (name, value) pairs
headers: header values specified as (name, value) pairs
body: the body of the request.
protocol_override:
specify to use this protocol instead of the global one stored in
_HTTPClient.
'''
def __init__(self):
self.host = ''
self.method = ''
self.path = ''
self.query = [] # list of (name, value)
self.headers = [] # list of (header name, header value)
self.body = ''
self.protocol_override = None
|
apache-2.0
|
JLBoor/eagles-hackathon
|
node_modules/node-gyp/gyp/pylib/gyp/flock_tool.py
|
1835
|
1748
|
#!/usr/bin/env python
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""These functions are executed via gyp-flock-tool when using the Makefile
generator. Used on systems that don't have a built-in flock."""
import fcntl
import os
import struct
import subprocess
import sys
def main(args):
executor = FlockTool()
executor.Dispatch(args)
class FlockTool(object):
"""This class emulates the 'flock' command."""
def Dispatch(self, args):
"""Dispatches a string command to a method."""
if len(args) < 1:
raise Exception("Not enough arguments")
method = "Exec%s" % self._CommandifyName(args[0])
getattr(self, method)(*args[1:])
def _CommandifyName(self, name_string):
"""Transforms a tool name like copy-info-plist to CopyInfoPlist"""
return name_string.title().replace('-', '')
def ExecFlock(self, lockfile, *cmd_list):
"""Emulates the most basic behavior of Linux's flock(1)."""
# Rely on exception handling to report errors.
# Note that the stock python on SunOS has a bug
# where fcntl.flock(fd, LOCK_EX) always fails
# with EBADF, that's why we use this F_SETLK
# hack instead.
fd = os.open(lockfile, os.O_WRONLY|os.O_NOCTTY|os.O_CREAT, 0666)
if sys.platform.startswith('aix'):
# Python on AIX is compiled with LARGEFILE support, which changes the
# struct size.
op = struct.pack('hhIllqq', fcntl.F_WRLCK, 0, 0, 0, 0, 0, 0)
else:
op = struct.pack('hhllhhl', fcntl.F_WRLCK, 0, 0, 0, 0, 0, 0)
fcntl.fcntl(fd, fcntl.F_SETLK, op)
return subprocess.call(cmd_list)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
mit
|
rghe/ansible
|
lib/ansible/plugins/action/eos_config.py
|
56
|
4248
|
#
# Copyright 2015 Peter Sprygada <psprygada@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import re
import time
import glob
from ansible.plugins.action.eos import ActionModule as _ActionModule
from ansible.module_utils._text import to_text
from ansible.module_utils.six.moves.urllib.parse import urlsplit
from ansible.utils.vars import merge_hash
PRIVATE_KEYS_RE = re.compile('__.+__')
class ActionModule(_ActionModule):
def run(self, tmp=None, task_vars=None):
if self._task.args.get('src'):
try:
self._handle_template()
except ValueError as exc:
return dict(failed=True, msg=to_text(exc))
result = super(ActionModule, self).run(tmp, task_vars)
del tmp # tmp no longer has any effect
if self._task.args.get('backup') and result.get('__backup__'):
# User requested backup and no error occurred in module.
# NOTE: If there is a parameter error, _backup key may not be in results.
filepath = self._write_backup(task_vars['inventory_hostname'],
result['__backup__'])
result['backup_path'] = filepath
# strip out any keys that have two leading and two trailing
# underscore characters
for key in list(result.keys()):
if PRIVATE_KEYS_RE.match(key):
del result[key]
return result
def _get_working_path(self):
cwd = self._loader.get_basedir()
if self._task._role is not None:
cwd = self._task._role._role_path
return cwd
def _write_backup(self, host, contents):
backup_path = self._get_working_path() + '/backup'
if not os.path.exists(backup_path):
os.mkdir(backup_path)
for fn in glob.glob('%s/%s*' % (backup_path, host)):
os.remove(fn)
tstamp = time.strftime("%Y-%m-%d@%H:%M:%S", time.localtime(time.time()))
filename = '%s/%s_config.%s' % (backup_path, host, tstamp)
open(filename, 'w').write(contents)
return filename
def _handle_template(self):
src = self._task.args.get('src')
working_path = self._get_working_path()
if os.path.isabs(src) or urlsplit('src').scheme:
source = src
else:
source = self._loader.path_dwim_relative(working_path, 'templates', src)
if not source:
source = self._loader.path_dwim_relative(working_path, src)
if not os.path.exists(source):
raise ValueError('path specified in src not found')
try:
with open(source, 'r') as f:
template_data = to_text(f.read())
except IOError:
return dict(failed=True, msg='unable to load src file')
# Create a template search path in the following order:
# [working_path, self_role_path, dependent_role_paths, dirname(source)]
searchpath = [working_path]
if self._task._role is not None:
searchpath.append(self._task._role._role_path)
if hasattr(self._task, "_block:"):
dep_chain = self._task._block.get_dep_chain()
if dep_chain is not None:
for role in dep_chain:
searchpath.append(role._role_path)
searchpath.append(os.path.dirname(source))
self._templar.environment.loader.searchpath = searchpath
self._task.args['src'] = self._templar.template(template_data)
|
gpl-3.0
|
OpenSoccerManager/opensoccermanager-editor
|
uigtk/dialogs.py
|
1
|
4445
|
#!/usr/bin/env python3
# This file is part of OpenSoccerManager-Editor.
#
# OpenSoccerManager is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by the
# Free Software Foundation, either version 3 of the License, or (at your
# option) any later version.
#
# OpenSoccerManager is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# OpenSoccerManager. If not, see <http://www.gnu.org/licenses/>.
from gi.repository import Gtk
import data
class RemoveItem(Gtk.MessageDialog):
'''
Message dialog displayed to confirm removal of item.
'''
def __init__(self, item, value):
if value == "":
value = "item"
Gtk.MessageDialog.__init__(self)
self.set_transient_for(data.window)
self.set_modal(True)
self.set_title("Remove %s" % (item))
self.set_markup("Remove %s from database?" % (value))
self.set_property("message-type", Gtk.MessageType.QUESTION)
self.add_button("_Do Not Remove", Gtk.ResponseType.CANCEL)
self.add_button("_Remove", Gtk.ResponseType.OK)
self.set_default_response(Gtk.ResponseType.CANCEL)
def show(self):
state = self.run() == Gtk.ResponseType.OK
self.destroy()
return state
class RemoveAttribute(Gtk.MessageDialog):
'''
Message dialog displayed to confirm removal of attribute.
'''
def __init__(self, index=0):
item = ("player", "club", "stadium", "league", "referee")[index]
Gtk.MessageDialog.__init__(self)
self.set_transient_for(data.window)
self.set_modal(True)
self.set_title("Remove Attribute")
self.set_markup("Remove selected attribute from %s?" % (item))
self.set_property("message-type", Gtk.MessageType.QUESTION)
self.add_button("_Do Not Remove", Gtk.ResponseType.CANCEL)
self.add_button("_Remove", Gtk.ResponseType.OK)
self.set_default_response(Gtk.ResponseType.CANCEL)
def show(self):
state = self.run() == Gtk.ResponseType.OK
self.destroy()
return state
class ClubKeyError(Gtk.MessageDialog):
'''
Message dialog for key errors when removing club with associated data.
'''
def __init__(self, item):
Gtk.MessageDialog.__init__(self)
self.set_transient_for(data.window)
self.set_modal(True)
self.set_title("Key Error")
self.set_markup("<span size='12000'><b>Unable to remove %s from the database.</b></span>" % (item))
self.format_secondary_markup("Remove all associated players from the club to delete.")
self.set_property("message-type", Gtk.MessageType.ERROR)
self.add_button("_Close", Gtk.ResponseType.CLOSE)
self.run()
self.destroy()
class StadiumKeyError(Gtk.MessageDialog):
'''
Message dialog for key errors when removing stadium with associated data.
'''
def __init__(self, item):
Gtk.MessageDialog.__init__(self)
self.set_transient_for(data.window)
self.set_modal(True)
self.set_title("Key Error")
self.set_markup("<span size='12000'><b>Unable to remove %s from the database.</b></span>" % (item))
self.format_secondary_markup("Remove all associated clubs from the stadium to delete.")
self.set_property("message-type", Gtk.MessageType.ERROR)
self.add_button("_Close", Gtk.ResponseType.CLOSE)
self.run()
self.destroy()
class LeagueKeyError(Gtk.MessageDialog):
'''
Message dialog for key errors when removing league with associated data.
'''
def __init__(self, item):
Gtk.MessageDialog.__init__(self)
self.set_transient_for(data.window)
self.set_modal(True)
self.set_title("Key Error")
self.set_markup("<span size='12000'><b>Unable to remove %s from the database.</b></span>" % (item))
self.format_secondary_markup("Remove all associated clubs from the league to delete.")
self.set_property("message-type", Gtk.MessageType.ERROR)
self.add_button("_Close", Gtk.ResponseType.CLOSE)
self.run()
self.destroy()
|
gpl-3.0
|
aequitas/home-assistant
|
homeassistant/components/discovery/__init__.py
|
1
|
7474
|
"""
Starts a service to scan in intervals for new devices.
Will emit EVENT_PLATFORM_DISCOVERED whenever a new service has been discovered.
Knows which components handle certain types, will make sure they are
loaded before the EVENT_PLATFORM_DISCOVERED is fired.
"""
import json
from datetime import timedelta
import logging
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.core import callback
from homeassistant.const import EVENT_HOMEASSISTANT_START
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.event import async_track_point_in_utc_time
from homeassistant.helpers.discovery import async_load_platform, async_discover
import homeassistant.util.dt as dt_util
DOMAIN = 'discovery'
SCAN_INTERVAL = timedelta(seconds=300)
SERVICE_APPLE_TV = 'apple_tv'
SERVICE_DAIKIN = 'daikin'
SERVICE_DECONZ = 'deconz'
SERVICE_DLNA_DMR = 'dlna_dmr'
SERVICE_ENIGMA2 = 'enigma2'
SERVICE_FREEBOX = 'freebox'
SERVICE_HASS_IOS_APP = 'hass_ios'
SERVICE_HASSIO = 'hassio'
SERVICE_HEOS = 'heos'
SERVICE_IGD = 'igd'
SERVICE_KONNECTED = 'konnected'
SERVICE_MOBILE_APP = 'hass_mobile_app'
SERVICE_NETGEAR = 'netgear_router'
SERVICE_OCTOPRINT = 'octoprint'
SERVICE_ROKU = 'roku'
SERVICE_SABNZBD = 'sabnzbd'
SERVICE_SAMSUNG_PRINTER = 'samsung_printer'
SERVICE_TELLDUSLIVE = 'tellstick'
SERVICE_YEELIGHT = 'yeelight'
SERVICE_WEMO = 'belkin_wemo'
SERVICE_WINK = 'wink'
SERVICE_XIAOMI_GW = 'xiaomi_gw'
CONFIG_ENTRY_HANDLERS = {
SERVICE_DAIKIN: 'daikin',
SERVICE_DECONZ: 'deconz',
'google_cast': 'cast',
SERVICE_HEOS: 'heos',
SERVICE_TELLDUSLIVE: 'tellduslive',
'sonos': 'sonos',
SERVICE_IGD: 'upnp',
}
SERVICE_HANDLERS = {
SERVICE_MOBILE_APP: ('mobile_app', None),
SERVICE_HASS_IOS_APP: ('ios', None),
SERVICE_NETGEAR: ('device_tracker', None),
SERVICE_WEMO: ('wemo', None),
SERVICE_HASSIO: ('hassio', None),
SERVICE_APPLE_TV: ('apple_tv', None),
SERVICE_ENIGMA2: ('media_player', 'enigma2'),
SERVICE_ROKU: ('roku', None),
SERVICE_WINK: ('wink', None),
SERVICE_XIAOMI_GW: ('xiaomi_aqara', None),
SERVICE_SABNZBD: ('sabnzbd', None),
SERVICE_SAMSUNG_PRINTER: ('sensor', 'syncthru'),
SERVICE_KONNECTED: ('konnected', None),
SERVICE_OCTOPRINT: ('octoprint', None),
SERVICE_FREEBOX: ('freebox', None),
SERVICE_YEELIGHT: ('yeelight', None),
'panasonic_viera': ('media_player', 'panasonic_viera'),
'plex_mediaserver': ('media_player', 'plex'),
'yamaha': ('media_player', 'yamaha'),
'logitech_mediaserver': ('media_player', 'squeezebox'),
'directv': ('media_player', 'directv'),
'denonavr': ('media_player', 'denonavr'),
'samsung_tv': ('media_player', 'samsungtv'),
'frontier_silicon': ('media_player', 'frontier_silicon'),
'openhome': ('media_player', 'openhome'),
'harmony': ('remote', 'harmony'),
'bose_soundtouch': ('media_player', 'soundtouch'),
'bluesound': ('media_player', 'bluesound'),
'songpal': ('media_player', 'songpal'),
'kodi': ('media_player', 'kodi'),
'volumio': ('media_player', 'volumio'),
'lg_smart_device': ('media_player', 'lg_soundbar'),
'nanoleaf_aurora': ('light', 'nanoleaf'),
}
OPTIONAL_SERVICE_HANDLERS = {
SERVICE_DLNA_DMR: ('media_player', 'dlna_dmr'),
}
MIGRATED_SERVICE_HANDLERS = {
'axis': None,
'esphome': None,
'ikea_tradfri': None,
'homekit': None,
'philips_hue': None
}
DEFAULT_ENABLED = list(CONFIG_ENTRY_HANDLERS) + list(SERVICE_HANDLERS) + \
list(MIGRATED_SERVICE_HANDLERS)
DEFAULT_DISABLED = list(OPTIONAL_SERVICE_HANDLERS) + \
list(MIGRATED_SERVICE_HANDLERS)
CONF_IGNORE = 'ignore'
CONF_ENABLE = 'enable'
CONFIG_SCHEMA = vol.Schema({
vol.Optional(DOMAIN): vol.Schema({
vol.Optional(CONF_IGNORE, default=[]):
vol.All(cv.ensure_list, [vol.In(DEFAULT_ENABLED)]),
vol.Optional(CONF_ENABLE, default=[]):
vol.All(cv.ensure_list, [
vol.In(DEFAULT_DISABLED + DEFAULT_ENABLED)]),
}),
}, extra=vol.ALLOW_EXTRA)
async def async_setup(hass, config):
"""Start a discovery service."""
from netdisco.discovery import NetworkDiscovery
logger = logging.getLogger(__name__)
netdisco = NetworkDiscovery()
already_discovered = set()
# Disable zeroconf logging, it spams
logging.getLogger('zeroconf').setLevel(logging.CRITICAL)
if DOMAIN in config:
# Platforms ignore by config
ignored_platforms = config[DOMAIN][CONF_IGNORE]
# Optional platforms enabled by config
enabled_platforms = config[DOMAIN][CONF_ENABLE]
else:
ignored_platforms = []
enabled_platforms = []
for platform in enabled_platforms:
if platform in DEFAULT_ENABLED:
logger.warning(
"Please remove %s from your discovery.enable configuration "
"as it is now enabled by default",
platform,
)
async def new_service_found(service, info):
"""Handle a new service if one is found."""
if service in MIGRATED_SERVICE_HANDLERS:
return
if service in ignored_platforms:
logger.info("Ignoring service: %s %s", service, info)
return
discovery_hash = json.dumps([service, info], sort_keys=True)
if discovery_hash in already_discovered:
logger.debug("Already discovered service %s %s.", service, info)
return
already_discovered.add(discovery_hash)
if service in CONFIG_ENTRY_HANDLERS:
await hass.config_entries.flow.async_init(
CONFIG_ENTRY_HANDLERS[service],
context={'source': config_entries.SOURCE_DISCOVERY},
data=info
)
return
comp_plat = SERVICE_HANDLERS.get(service)
if not comp_plat and service in enabled_platforms:
comp_plat = OPTIONAL_SERVICE_HANDLERS[service]
# We do not know how to handle this service.
if not comp_plat:
logger.info("Unknown service discovered: %s %s", service, info)
return
logger.info("Found new service: %s %s", service, info)
component, platform = comp_plat
if platform is None:
await async_discover(hass, service, info, component, config)
else:
await async_load_platform(
hass, component, platform, info, config)
async def scan_devices(now):
"""Scan for devices."""
try:
results = await hass.async_add_job(_discover, netdisco)
for result in results:
hass.async_create_task(new_service_found(*result))
except OSError:
logger.error("Network is unreachable")
async_track_point_in_utc_time(
hass, scan_devices, dt_util.utcnow() + SCAN_INTERVAL)
@callback
def schedule_first(event):
"""Schedule the first discovery when Home Assistant starts up."""
async_track_point_in_utc_time(hass, scan_devices, dt_util.utcnow())
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_START, schedule_first)
return True
def _discover(netdisco):
"""Discover devices."""
results = []
try:
netdisco.scan()
for disc in netdisco.discover():
for service in netdisco.get_info(disc):
results.append((disc, service))
finally:
netdisco.stop()
return results
|
apache-2.0
|
WeblateOrg/weblate
|
weblate/gitexport/apps.py
|
1
|
1433
|
#
# Copyright © 2012 - 2021 Michal Čihař <michal@cihar.com>
#
# This file is part of Weblate <https://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
from django.apps import AppConfig
from django.core.checks import register
from weblate.gitexport.utils import find_git_http_backend
from weblate.utils.checks import weblate_check
class GitExportConfig(AppConfig):
name = "weblate.gitexport"
label = "gitexport"
verbose_name = "Git Exporter"
def ready(self):
super().ready()
register(check_git_backend)
def check_git_backend(app_configs, **kwargs):
if find_git_http_backend() is None:
return [
weblate_check(
"weblate.E022",
"Failed to find git-http-backend, the git exporter will not work.",
)
]
return []
|
gpl-3.0
|
JPFrancoia/scikit-learn
|
sklearn/cluster/k_means_.py
|
4
|
59475
|
"""K-means clustering"""
# Authors: Gael Varoquaux <gael.varoquaux@normalesup.org>
# Thomas Rueckstiess <ruecksti@in.tum.de>
# James Bergstra <james.bergstra@umontreal.ca>
# Jan Schlueter <scikit-learn@jan-schlueter.de>
# Nelle Varoquaux
# Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
# Robert Layton <robertlayton@gmail.com>
# License: BSD 3 clause
import warnings
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, ClusterMixin, TransformerMixin
from ..metrics.pairwise import euclidean_distances
from ..utils.extmath import row_norms, squared_norm, stable_cumsum
from ..utils.sparsefuncs_fast import assign_rows_csr
from ..utils.sparsefuncs import mean_variance_axis
from ..utils.fixes import astype
from ..utils import check_array
from ..utils import check_random_state
from ..utils import as_float_array
from ..utils import gen_batches
from ..utils.validation import check_is_fitted
from ..utils.validation import FLOAT_DTYPES
from ..utils.random import choice
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
from ..externals.six import string_types
from . import _k_means
from ._k_means_elkan import k_means_elkan
###############################################################################
# Initialization heuristic
def _k_init(X, n_clusters, x_squared_norms, random_state, n_local_trials=None):
"""Init n_clusters seeds according to k-means++
Parameters
-----------
X: array or sparse matrix, shape (n_samples, n_features)
The data to pick seeds for. To avoid memory copy, the input data
should be double precision (dtype=np.float64).
n_clusters: integer
The number of seeds to choose
x_squared_norms: array, shape (n_samples,)
Squared Euclidean norm of each data point.
random_state: numpy.RandomState
The generator used to initialize the centers.
n_local_trials: integer, optional
The number of seeding trials for each center (except the first),
of which the one reducing inertia the most is greedily chosen.
Set to None to make the number of trials depend logarithmically
on the number of seeds (2+log(k)); this is the default.
Notes
-----
Selects initial cluster centers for k-mean clustering in a smart way
to speed up convergence. see: Arthur, D. and Vassilvitskii, S.
"k-means++: the advantages of careful seeding". ACM-SIAM symposium
on Discrete algorithms. 2007
Version ported from http://www.stanford.edu/~darthur/kMeansppTest.zip,
which is the implementation used in the aforementioned paper.
"""
n_samples, n_features = X.shape
centers = np.empty((n_clusters, n_features), dtype=X.dtype)
assert x_squared_norms is not None, 'x_squared_norms None in _k_init'
# Set the number of local seeding trials if none is given
if n_local_trials is None:
# This is what Arthur/Vassilvitskii tried, but did not report
# specific results for other than mentioning in the conclusion
# that it helped.
n_local_trials = 2 + int(np.log(n_clusters))
# Pick first center randomly
center_id = random_state.randint(n_samples)
if sp.issparse(X):
centers[0] = X[center_id].toarray()
else:
centers[0] = X[center_id]
# Initialize list of closest distances and calculate current potential
closest_dist_sq = euclidean_distances(
centers[0, np.newaxis], X, Y_norm_squared=x_squared_norms,
squared=True)
current_pot = closest_dist_sq.sum()
# Pick the remaining n_clusters-1 points
for c in range(1, n_clusters):
# Choose center candidates by sampling with probability proportional
# to the squared distance to the closest existing center
rand_vals = random_state.random_sample(n_local_trials) * current_pot
candidate_ids = np.searchsorted(stable_cumsum(closest_dist_sq),
rand_vals)
# Compute distances to center candidates
distance_to_candidates = euclidean_distances(
X[candidate_ids], X, Y_norm_squared=x_squared_norms, squared=True)
# Decide which candidate is the best
best_candidate = None
best_pot = None
best_dist_sq = None
for trial in range(n_local_trials):
# Compute potential when including center candidate
new_dist_sq = np.minimum(closest_dist_sq,
distance_to_candidates[trial])
new_pot = new_dist_sq.sum()
# Store result if it is the best local trial so far
if (best_candidate is None) or (new_pot < best_pot):
best_candidate = candidate_ids[trial]
best_pot = new_pot
best_dist_sq = new_dist_sq
# Permanently add best center candidate found in local tries
if sp.issparse(X):
centers[c] = X[best_candidate].toarray()
else:
centers[c] = X[best_candidate]
current_pot = best_pot
closest_dist_sq = best_dist_sq
return centers
###############################################################################
# K-means batch estimation by EM (expectation maximization)
def _validate_center_shape(X, n_centers, centers):
"""Check if centers is compatible with X and n_centers"""
if len(centers) != n_centers:
raise ValueError('The shape of the initial centers (%s) '
'does not match the number of clusters %i'
% (centers.shape, n_centers))
if centers.shape[1] != X.shape[1]:
raise ValueError(
"The number of features of the initial centers %s "
"does not match the number of features of the data %s."
% (centers.shape[1], X.shape[1]))
def _tolerance(X, tol):
"""Return a tolerance which is independent of the dataset"""
if sp.issparse(X):
variances = mean_variance_axis(X, axis=0)[1]
else:
variances = np.var(X, axis=0)
return np.mean(variances) * tol
def k_means(X, n_clusters, init='k-means++', precompute_distances='auto',
n_init=10, max_iter=300, verbose=False,
tol=1e-4, random_state=None, copy_x=True, n_jobs=1,
algorithm="auto", return_n_iter=False):
"""K-means clustering algorithm.
Read more in the :ref:`User Guide <k_means>`.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
The observations to cluster.
n_clusters : int
The number of clusters to form as well as the number of
centroids to generate.
max_iter : int, optional, default 300
Maximum number of iterations of the k-means algorithm to run.
n_init : int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
init : {'k-means++', 'random', or ndarray, or a callable}, optional
Method for initialization, default to 'k-means++':
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
'random': generate k centroids from a Gaussian with mean and
variance estimated from the data.
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
If a callable is passed, it should take arguments X, k and
and a random state and return an initialization.
algorithm : "auto", "full" or "elkan", default="auto"
K-means algorithm to use. The classical EM-style algorithm is "full".
The "elkan" variation is more efficient by using the triangle
inequality, but currently doesn't support sparse data. "auto" chooses
"elkan" for dense data and "full" for sparse data.
precompute_distances : {'auto', True, False}
Precompute distances (faster but takes more memory).
'auto' : do not precompute distances if n_samples * n_clusters > 12
million. This corresponds to about 100MB overhead per job using
double precision.
True : always precompute distances
False : never precompute distances
tol : float, optional
The relative increment in the results before declaring convergence.
verbose : boolean, optional
Verbosity mode.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
copy_x : boolean, optional
When pre-computing distances it is more numerically accurate to center
the data first. If copy_x is True, then the original data is not
modified. If False, the original data is modified, and put back before
the function returns, but small numerical differences may be introduced
by subtracting and then adding the data mean.
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
return_n_iter : bool, optional
Whether or not to return the number of iterations.
Returns
-------
centroid : float ndarray with shape (k, n_features)
Centroids found at the last iteration of k-means.
label : integer ndarray with shape (n_samples,)
label[i] is the code or index of the centroid the
i'th observation is closest to.
inertia : float
The final value of the inertia criterion (sum of squared distances to
the closest centroid for all observations in the training set).
best_n_iter: int
Number of iterations corresponding to the best results.
Returned only if `return_n_iter` is set to True.
"""
if n_init <= 0:
raise ValueError("Invalid number of initializations."
" n_init=%d must be bigger than zero." % n_init)
random_state = check_random_state(random_state)
if max_iter <= 0:
raise ValueError('Number of iterations should be a positive number,'
' got %d instead' % max_iter)
X = as_float_array(X, copy=copy_x)
tol = _tolerance(X, tol)
# If the distances are precomputed every job will create a matrix of shape
# (n_clusters, n_samples). To stop KMeans from eating up memory we only
# activate this if the created matrix is guaranteed to be under 100MB. 12
# million entries consume a little under 100MB if they are of type double.
if precompute_distances == 'auto':
n_samples = X.shape[0]
precompute_distances = (n_clusters * n_samples) < 12e6
elif isinstance(precompute_distances, bool):
pass
else:
raise ValueError("precompute_distances should be 'auto' or True/False"
", but a value of %r was passed" %
precompute_distances)
# subtract of mean of x for more accurate distance computations
if not sp.issparse(X) or hasattr(init, '__array__'):
X_mean = X.mean(axis=0)
if not sp.issparse(X):
# The copy was already done above
X -= X_mean
if hasattr(init, '__array__'):
init = check_array(init, dtype=X.dtype.type, copy=True)
_validate_center_shape(X, n_clusters, init)
init -= X_mean
if n_init != 1:
warnings.warn(
'Explicit initial center position passed: '
'performing only one init in k-means instead of n_init=%d'
% n_init, RuntimeWarning, stacklevel=2)
n_init = 1
# precompute squared norms of data points
x_squared_norms = row_norms(X, squared=True)
best_labels, best_inertia, best_centers = None, None, None
if n_clusters == 1:
# elkan doesn't make sense for a single cluster, full will produce
# the right result.
algorithm = "full"
if algorithm == "auto":
algorithm = "full" if sp.issparse(X) else 'elkan'
if algorithm == "full":
kmeans_single = _kmeans_single_lloyd
elif algorithm == "elkan":
kmeans_single = _kmeans_single_elkan
else:
raise ValueError("Algorithm must be 'auto', 'full' or 'elkan', got"
" %s" % str(algorithm))
if n_jobs == 1:
# For a single thread, less memory is needed if we just store one set
# of the best results (as opposed to one set per run per thread).
for it in range(n_init):
# run a k-means once
labels, inertia, centers, n_iter_ = kmeans_single(
X, n_clusters, max_iter=max_iter, init=init, verbose=verbose,
precompute_distances=precompute_distances, tol=tol,
x_squared_norms=x_squared_norms, random_state=random_state)
# determine if these results are the best so far
if best_inertia is None or inertia < best_inertia:
best_labels = labels.copy()
best_centers = centers.copy()
best_inertia = inertia
best_n_iter = n_iter_
else:
# parallelisation of k-means runs
seeds = random_state.randint(np.iinfo(np.int32).max, size=n_init)
results = Parallel(n_jobs=n_jobs, verbose=0)(
delayed(kmeans_single)(X, n_clusters, max_iter=max_iter, init=init,
verbose=verbose, tol=tol,
precompute_distances=precompute_distances,
x_squared_norms=x_squared_norms,
# Change seed to ensure variety
random_state=seed)
for seed in seeds)
# Get results with the lowest inertia
labels, inertia, centers, n_iters = zip(*results)
best = np.argmin(inertia)
best_labels = labels[best]
best_inertia = inertia[best]
best_centers = centers[best]
best_n_iter = n_iters[best]
if not sp.issparse(X):
if not copy_x:
X += X_mean
best_centers += X_mean
if return_n_iter:
return best_centers, best_labels, best_inertia, best_n_iter
else:
return best_centers, best_labels, best_inertia
def _kmeans_single_elkan(X, n_clusters, max_iter=300, init='k-means++',
verbose=False, x_squared_norms=None,
random_state=None, tol=1e-4,
precompute_distances=True):
if sp.issparse(X):
raise ValueError("algorithm='elkan' not supported for sparse input X")
X = check_array(X, order="C")
random_state = check_random_state(random_state)
if x_squared_norms is None:
x_squared_norms = row_norms(X, squared=True)
# init
centers = _init_centroids(X, n_clusters, init, random_state=random_state,
x_squared_norms=x_squared_norms)
centers = np.ascontiguousarray(centers)
if verbose:
print('Initialization complete')
centers, labels, n_iter = k_means_elkan(X, n_clusters, centers, tol=tol,
max_iter=max_iter, verbose=verbose)
inertia = np.sum((X - centers[labels]) ** 2, dtype=np.float64)
return labels, inertia, centers, n_iter
def _kmeans_single_lloyd(X, n_clusters, max_iter=300, init='k-means++',
verbose=False, x_squared_norms=None,
random_state=None, tol=1e-4,
precompute_distances=True):
"""A single run of k-means, assumes preparation completed prior.
Parameters
----------
X: array-like of floats, shape (n_samples, n_features)
The observations to cluster.
n_clusters: int
The number of clusters to form as well as the number of
centroids to generate.
max_iter: int, optional, default 300
Maximum number of iterations of the k-means algorithm to run.
init: {'k-means++', 'random', or ndarray, or a callable}, optional
Method for initialization, default to 'k-means++':
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
'random': generate k centroids from a Gaussian with mean and
variance estimated from the data.
If an ndarray is passed, it should be of shape (k, p) and gives
the initial centers.
If a callable is passed, it should take arguments X, k and
and a random state and return an initialization.
tol: float, optional
The relative increment in the results before declaring convergence.
verbose: boolean, optional
Verbosity mode
x_squared_norms: array
Precomputed x_squared_norms.
precompute_distances : boolean, default: True
Precompute distances (faster but takes more memory).
random_state: integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
Returns
-------
centroid: float ndarray with shape (k, n_features)
Centroids found at the last iteration of k-means.
label: integer ndarray with shape (n_samples,)
label[i] is the code or index of the centroid the
i'th observation is closest to.
inertia: float
The final value of the inertia criterion (sum of squared distances to
the closest centroid for all observations in the training set).
n_iter : int
Number of iterations run.
"""
random_state = check_random_state(random_state)
best_labels, best_inertia, best_centers = None, None, None
# init
centers = _init_centroids(X, n_clusters, init, random_state=random_state,
x_squared_norms=x_squared_norms)
if verbose:
print("Initialization complete")
# Allocate memory to store the distances for each sample to its
# closer center for reallocation in case of ties
distances = np.zeros(shape=(X.shape[0],), dtype=X.dtype)
# iterations
for i in range(max_iter):
centers_old = centers.copy()
# labels assignment is also called the E-step of EM
labels, inertia = \
_labels_inertia(X, x_squared_norms, centers,
precompute_distances=precompute_distances,
distances=distances)
# computation of the means is also called the M-step of EM
if sp.issparse(X):
centers = _k_means._centers_sparse(X, labels, n_clusters,
distances)
else:
centers = _k_means._centers_dense(X, labels, n_clusters, distances)
if verbose:
print("Iteration %2d, inertia %.3f" % (i, inertia))
if best_inertia is None or inertia < best_inertia:
best_labels = labels.copy()
best_centers = centers.copy()
best_inertia = inertia
center_shift_total = squared_norm(centers_old - centers)
if center_shift_total <= tol:
if verbose:
print("Converged at iteration %d: "
"center shift %e within tolerance %e"
% (i, center_shift_total, tol))
break
if center_shift_total > 0:
# rerun E-step in case of non-convergence so that predicted labels
# match cluster centers
best_labels, best_inertia = \
_labels_inertia(X, x_squared_norms, best_centers,
precompute_distances=precompute_distances,
distances=distances)
return best_labels, best_inertia, best_centers, i + 1
def _labels_inertia_precompute_dense(X, x_squared_norms, centers, distances):
"""Compute labels and inertia using a full distance matrix.
This will overwrite the 'distances' array in-place.
Parameters
----------
X : numpy array, shape (n_sample, n_features)
Input data.
x_squared_norms : numpy array, shape (n_samples,)
Precomputed squared norms of X.
centers : numpy array, shape (n_clusters, n_features)
Cluster centers which data is assigned to.
distances : numpy array, shape (n_samples,)
Pre-allocated array in which distances are stored.
Returns
-------
labels : numpy array, dtype=np.int, shape (n_samples,)
Indices of clusters that samples are assigned to.
inertia : float
Sum of distances of samples to their closest cluster center.
"""
n_samples = X.shape[0]
k = centers.shape[0]
all_distances = euclidean_distances(centers, X, x_squared_norms,
squared=True)
labels = np.empty(n_samples, dtype=np.int32)
labels.fill(-1)
mindist = np.empty(n_samples)
mindist.fill(np.infty)
for center_id in range(k):
dist = all_distances[center_id]
labels[dist < mindist] = center_id
mindist = np.minimum(dist, mindist)
if n_samples == distances.shape[0]:
# distances will be changed in-place
distances[:] = mindist
inertia = mindist.sum()
return labels, inertia
def _labels_inertia(X, x_squared_norms, centers,
precompute_distances=True, distances=None):
"""E step of the K-means EM algorithm.
Compute the labels and the inertia of the given samples and centers.
This will compute the distances in-place.
Parameters
----------
X: float64 array-like or CSR sparse matrix, shape (n_samples, n_features)
The input samples to assign to the labels.
x_squared_norms: array, shape (n_samples,)
Precomputed squared euclidean norm of each data point, to speed up
computations.
centers: float array, shape (k, n_features)
The cluster centers.
precompute_distances : boolean, default: True
Precompute distances (faster but takes more memory).
distances: float array, shape (n_samples,)
Pre-allocated array to be filled in with each sample's distance
to the closest center.
Returns
-------
labels: int array of shape(n)
The resulting assignment
inertia : float
Sum of distances of samples to their closest cluster center.
"""
n_samples = X.shape[0]
# set the default value of centers to -1 to be able to detect any anomaly
# easily
labels = -np.ones(n_samples, np.int32)
if distances is None:
distances = np.zeros(shape=(0,), dtype=X.dtype)
# distances will be changed in-place
if sp.issparse(X):
inertia = _k_means._assign_labels_csr(
X, x_squared_norms, centers, labels, distances=distances)
else:
if precompute_distances:
return _labels_inertia_precompute_dense(X, x_squared_norms,
centers, distances)
inertia = _k_means._assign_labels_array(
X, x_squared_norms, centers, labels, distances=distances)
return labels, inertia
def _init_centroids(X, k, init, random_state=None, x_squared_norms=None,
init_size=None):
"""Compute the initial centroids
Parameters
----------
X: array, shape (n_samples, n_features)
k: int
number of centroids
init: {'k-means++', 'random' or ndarray or callable} optional
Method for initialization
random_state: integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
x_squared_norms: array, shape (n_samples,), optional
Squared euclidean norm of each data point. Pass it if you have it at
hands already to avoid it being recomputed here. Default: None
init_size : int, optional
Number of samples to randomly sample for speeding up the
initialization (sometimes at the expense of accuracy): the
only algorithm is initialized by running a batch KMeans on a
random subset of the data. This needs to be larger than k.
Returns
-------
centers: array, shape(k, n_features)
"""
random_state = check_random_state(random_state)
n_samples = X.shape[0]
if x_squared_norms is None:
x_squared_norms = row_norms(X, squared=True)
if init_size is not None and init_size < n_samples:
if init_size < k:
warnings.warn(
"init_size=%d should be larger than k=%d. "
"Setting it to 3*k" % (init_size, k),
RuntimeWarning, stacklevel=2)
init_size = 3 * k
init_indices = random_state.randint(0, n_samples, init_size)
X = X[init_indices]
x_squared_norms = x_squared_norms[init_indices]
n_samples = X.shape[0]
elif n_samples < k:
raise ValueError(
"n_samples=%d should be larger than k=%d" % (n_samples, k))
if isinstance(init, string_types) and init == 'k-means++':
centers = _k_init(X, k, random_state=random_state,
x_squared_norms=x_squared_norms)
elif isinstance(init, string_types) and init == 'random':
seeds = random_state.permutation(n_samples)[:k]
centers = X[seeds]
elif hasattr(init, '__array__'):
# ensure that the centers have the same dtype as X
# this is a requirement of fused types of cython
centers = np.array(init, dtype=X.dtype)
elif callable(init):
centers = init(X, k, random_state=random_state)
centers = np.asarray(centers, dtype=X.dtype)
else:
raise ValueError("the init parameter for the k-means should "
"be 'k-means++' or 'random' or an ndarray, "
"'%s' (type '%s') was passed." % (init, type(init)))
if sp.issparse(centers):
centers = centers.toarray()
_validate_center_shape(X, k, centers)
return centers
class KMeans(BaseEstimator, ClusterMixin, TransformerMixin):
"""K-Means clustering
Read more in the :ref:`User Guide <k_means>`.
Parameters
----------
n_clusters : int, optional, default: 8
The number of clusters to form as well as the number of
centroids to generate.
max_iter : int, default: 300
Maximum number of iterations of the k-means algorithm for a
single run.
n_init : int, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
init : {'k-means++', 'random' or an ndarray}
Method for initialization, defaults to 'k-means++':
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
'random': choose k observations (rows) at random from data for
the initial centroids.
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
algorithm : "auto", "full" or "elkan", default="auto"
K-means algorithm to use. The classical EM-style algorithm is "full".
The "elkan" variation is more efficient by using the triangle
inequality, but currently doesn't support sparse data. "auto" chooses
"elkan" for dense data and "full" for sparse data.
precompute_distances : {'auto', True, False}
Precompute distances (faster but takes more memory).
'auto' : do not precompute distances if n_samples * n_clusters > 12
million. This corresponds to about 100MB overhead per job using
double precision.
True : always precompute distances
False : never precompute distances
tol : float, default: 1e-4
Relative tolerance with regards to inertia to declare convergence
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
verbose : int, default 0
Verbosity mode.
copy_x : boolean, default True
When pre-computing distances it is more numerically accurate to center
the data first. If copy_x is True, then the original data is not
modified. If False, the original data is modified, and put back before
the function returns, but small numerical differences may be introduced
by subtracting and then adding the data mean.
Attributes
----------
cluster_centers_ : array, [n_clusters, n_features]
Coordinates of cluster centers
labels_ :
Labels of each point
inertia_ : float
Sum of distances of samples to their closest cluster center.
Examples
--------
>>> from sklearn.cluster import KMeans
>>> import numpy as np
>>> X = np.array([[1, 2], [1, 4], [1, 0],
... [4, 2], [4, 4], [4, 0]])
>>> kmeans = KMeans(n_clusters=2, random_state=0).fit(X)
>>> kmeans.labels_
array([0, 0, 0, 1, 1, 1], dtype=int32)
>>> kmeans.predict([[0, 0], [4, 4]])
array([0, 1], dtype=int32)
>>> kmeans.cluster_centers_
array([[ 1., 2.],
[ 4., 2.]])
See also
--------
MiniBatchKMeans
Alternative online implementation that does incremental updates
of the centers positions using mini-batches.
For large scale learning (say n_samples > 10k) MiniBatchKMeans is
probably much faster than the default batch implementation.
Notes
------
The k-means problem is solved using Lloyd's algorithm.
The average complexity is given by O(k n T), were n is the number of
samples and T is the number of iteration.
The worst case complexity is given by O(n^(k+2/p)) with
n = n_samples, p = n_features. (D. Arthur and S. Vassilvitskii,
'How slow is the k-means method?' SoCG2006)
In practice, the k-means algorithm is very fast (one of the fastest
clustering algorithms available), but it falls in local minima. That's why
it can be useful to restart it several times.
"""
def __init__(self, n_clusters=8, init='k-means++', n_init=10,
max_iter=300, tol=1e-4, precompute_distances='auto',
verbose=0, random_state=None, copy_x=True,
n_jobs=1, algorithm='auto'):
self.n_clusters = n_clusters
self.init = init
self.max_iter = max_iter
self.tol = tol
self.precompute_distances = precompute_distances
self.n_init = n_init
self.verbose = verbose
self.random_state = random_state
self.copy_x = copy_x
self.n_jobs = n_jobs
self.algorithm = algorithm
def _check_fit_data(self, X):
"""Verify that the number of samples given is larger than k"""
X = check_array(X, accept_sparse='csr', dtype=[np.float64, np.float32])
if X.shape[0] < self.n_clusters:
raise ValueError("n_samples=%d should be >= n_clusters=%d" % (
X.shape[0], self.n_clusters))
return X
def _check_test_data(self, X):
X = check_array(X, accept_sparse='csr', dtype=FLOAT_DTYPES)
n_samples, n_features = X.shape
expected_n_features = self.cluster_centers_.shape[1]
if not n_features == expected_n_features:
raise ValueError("Incorrect number of features. "
"Got %d features, expected %d" % (
n_features, expected_n_features))
return X
def fit(self, X, y=None):
"""Compute k-means clustering.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
"""
random_state = check_random_state(self.random_state)
X = self._check_fit_data(X)
self.cluster_centers_, self.labels_, self.inertia_, self.n_iter_ = \
k_means(
X, n_clusters=self.n_clusters, init=self.init,
n_init=self.n_init, max_iter=self.max_iter, verbose=self.verbose,
precompute_distances=self.precompute_distances,
tol=self.tol, random_state=random_state, copy_x=self.copy_x,
n_jobs=self.n_jobs, algorithm=self.algorithm,
return_n_iter=True)
return self
def fit_predict(self, X, y=None):
"""Compute cluster centers and predict cluster index for each sample.
Convenience method; equivalent to calling fit(X) followed by
predict(X).
"""
return self.fit(X).labels_
def fit_transform(self, X, y=None):
"""Compute clustering and transform X to cluster-distance space.
Equivalent to fit(X).transform(X), but more efficiently implemented.
"""
# Currently, this just skips a copy of the data if it is not in
# np.array or CSR format already.
# XXX This skips _check_test_data, which may change the dtype;
# we should refactor the input validation.
X = self._check_fit_data(X)
return self.fit(X)._transform(X)
def transform(self, X, y=None):
"""Transform X to a cluster-distance space.
In the new space, each dimension is the distance to the cluster
centers. Note that even if X is sparse, the array returned by
`transform` will typically be dense.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to transform.
Returns
-------
X_new : array, shape [n_samples, k]
X transformed in the new space.
"""
check_is_fitted(self, 'cluster_centers_')
X = self._check_test_data(X)
return self._transform(X)
def _transform(self, X):
"""guts of transform method; no input validation"""
return euclidean_distances(X, self.cluster_centers_)
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
In the vector quantization literature, `cluster_centers_` is called
the code book and each value returned by `predict` is the index of
the closest code in the code book.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to predict.
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
check_is_fitted(self, 'cluster_centers_')
X = self._check_test_data(X)
x_squared_norms = row_norms(X, squared=True)
return _labels_inertia(X, x_squared_norms, self.cluster_centers_)[0]
def score(self, X, y=None):
"""Opposite of the value of X on the K-means objective.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data.
Returns
-------
score : float
Opposite of the value of X on the K-means objective.
"""
check_is_fitted(self, 'cluster_centers_')
X = self._check_test_data(X)
x_squared_norms = row_norms(X, squared=True)
return -_labels_inertia(X, x_squared_norms, self.cluster_centers_)[1]
def _mini_batch_step(X, x_squared_norms, centers, counts,
old_center_buffer, compute_squared_diff,
distances, random_reassign=False,
random_state=None, reassignment_ratio=.01,
verbose=False):
"""Incremental update of the centers for the Minibatch K-Means algorithm.
Parameters
----------
X : array, shape (n_samples, n_features)
The original data array.
x_squared_norms : array, shape (n_samples,)
Squared euclidean norm of each data point.
centers : array, shape (k, n_features)
The cluster centers. This array is MODIFIED IN PLACE
counts : array, shape (k,)
The vector in which we keep track of the numbers of elements in a
cluster. This array is MODIFIED IN PLACE
distances : array, dtype float, shape (n_samples), optional
If not None, should be a pre-allocated array that will be used to store
the distances of each sample to its closest center.
May not be None when random_reassign is True.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
random_reassign : boolean, optional
If True, centers with very low counts are randomly reassigned
to observations.
reassignment_ratio : float, optional
Control the fraction of the maximum number of counts for a
center to be reassigned. A higher value means that low count
centers are more likely to be reassigned, which means that the
model will take longer to converge, but should converge in a
better clustering.
verbose : bool, optional, default False
Controls the verbosity.
compute_squared_diff : bool
If set to False, the squared diff computation is skipped.
old_center_buffer : int
Copy of old centers for monitoring convergence.
Returns
-------
inertia : float
Sum of distances of samples to their closest cluster center.
squared_diff : numpy array, shape (n_clusters,)
Squared distances between previous and updated cluster centers.
"""
# Perform label assignment to nearest centers
nearest_center, inertia = _labels_inertia(X, x_squared_norms, centers,
distances=distances)
if random_reassign and reassignment_ratio > 0:
random_state = check_random_state(random_state)
# Reassign clusters that have very low counts
to_reassign = counts < reassignment_ratio * counts.max()
# pick at most .5 * batch_size samples as new centers
if to_reassign.sum() > .5 * X.shape[0]:
indices_dont_reassign = np.argsort(counts)[int(.5 * X.shape[0]):]
to_reassign[indices_dont_reassign] = False
n_reassigns = to_reassign.sum()
if n_reassigns:
# Pick new clusters amongst observations with uniform probability
new_centers = choice(X.shape[0], replace=False, size=n_reassigns,
random_state=random_state)
if verbose:
print("[MiniBatchKMeans] Reassigning %i cluster centers."
% n_reassigns)
if sp.issparse(X) and not sp.issparse(centers):
assign_rows_csr(X,
astype(new_centers, np.intp),
astype(np.where(to_reassign)[0], np.intp),
centers)
else:
centers[to_reassign] = X[new_centers]
# reset counts of reassigned centers, but don't reset them too small
# to avoid instant reassignment. This is a pretty dirty hack as it
# also modifies the learning rates.
counts[to_reassign] = np.min(counts[~to_reassign])
# implementation for the sparse CSR representation completely written in
# cython
if sp.issparse(X):
return inertia, _k_means._mini_batch_update_csr(
X, x_squared_norms, centers, counts, nearest_center,
old_center_buffer, compute_squared_diff)
# dense variant in mostly numpy (not as memory efficient though)
k = centers.shape[0]
squared_diff = 0.0
for center_idx in range(k):
# find points from minibatch that are assigned to this center
center_mask = nearest_center == center_idx
count = center_mask.sum()
if count > 0:
if compute_squared_diff:
old_center_buffer[:] = centers[center_idx]
# inplace remove previous count scaling
centers[center_idx] *= counts[center_idx]
# inplace sum with new points members of this cluster
centers[center_idx] += np.sum(X[center_mask], axis=0)
# update the count statistics for this center
counts[center_idx] += count
# inplace rescale to compute mean of all points (old and new)
# Note: numpy >= 1.10 does not support '/=' for the following
# expression for a mixture of int and float (see numpy issue #6464)
centers[center_idx] = centers[center_idx] / counts[center_idx]
# update the squared diff if necessary
if compute_squared_diff:
diff = centers[center_idx].ravel() - old_center_buffer.ravel()
squared_diff += np.dot(diff, diff)
return inertia, squared_diff
def _mini_batch_convergence(model, iteration_idx, n_iter, tol,
n_samples, centers_squared_diff, batch_inertia,
context, verbose=0):
"""Helper function to encapsulate the early stopping logic"""
# Normalize inertia to be able to compare values when
# batch_size changes
batch_inertia /= model.batch_size
centers_squared_diff /= model.batch_size
# Compute an Exponentially Weighted Average of the squared
# diff to monitor the convergence while discarding
# minibatch-local stochastic variability:
# https://en.wikipedia.org/wiki/Moving_average
ewa_diff = context.get('ewa_diff')
ewa_inertia = context.get('ewa_inertia')
if ewa_diff is None:
ewa_diff = centers_squared_diff
ewa_inertia = batch_inertia
else:
alpha = float(model.batch_size) * 2.0 / (n_samples + 1)
alpha = 1.0 if alpha > 1.0 else alpha
ewa_diff = ewa_diff * (1 - alpha) + centers_squared_diff * alpha
ewa_inertia = ewa_inertia * (1 - alpha) + batch_inertia * alpha
# Log progress to be able to monitor convergence
if verbose:
progress_msg = (
'Minibatch iteration %d/%d:'
' mean batch inertia: %f, ewa inertia: %f ' % (
iteration_idx + 1, n_iter, batch_inertia,
ewa_inertia))
print(progress_msg)
# Early stopping based on absolute tolerance on squared change of
# centers position (using EWA smoothing)
if tol > 0.0 and ewa_diff <= tol:
if verbose:
print('Converged (small centers change) at iteration %d/%d'
% (iteration_idx + 1, n_iter))
return True
# Early stopping heuristic due to lack of improvement on smoothed inertia
ewa_inertia_min = context.get('ewa_inertia_min')
no_improvement = context.get('no_improvement', 0)
if ewa_inertia_min is None or ewa_inertia < ewa_inertia_min:
no_improvement = 0
ewa_inertia_min = ewa_inertia
else:
no_improvement += 1
if (model.max_no_improvement is not None
and no_improvement >= model.max_no_improvement):
if verbose:
print('Converged (lack of improvement in inertia)'
' at iteration %d/%d'
% (iteration_idx + 1, n_iter))
return True
# update the convergence context to maintain state across successive calls:
context['ewa_diff'] = ewa_diff
context['ewa_inertia'] = ewa_inertia
context['ewa_inertia_min'] = ewa_inertia_min
context['no_improvement'] = no_improvement
return False
class MiniBatchKMeans(KMeans):
"""Mini-Batch K-Means clustering
Read more in the :ref:`User Guide <mini_batch_kmeans>`.
Parameters
----------
n_clusters : int, optional, default: 8
The number of clusters to form as well as the number of
centroids to generate.
max_iter : int, optional
Maximum number of iterations over the complete dataset before
stopping independently of any early stopping criterion heuristics.
max_no_improvement : int, default: 10
Control early stopping based on the consecutive number of mini
batches that does not yield an improvement on the smoothed inertia.
To disable convergence detection based on inertia, set
max_no_improvement to None.
tol : float, default: 0.0
Control early stopping based on the relative center changes as
measured by a smoothed, variance-normalized of the mean center
squared position changes. This early stopping heuristics is
closer to the one used for the batch variant of the algorithms
but induces a slight computational and memory overhead over the
inertia heuristic.
To disable convergence detection based on normalized center
change, set tol to 0.0 (default).
batch_size : int, optional, default: 100
Size of the mini batches.
init_size : int, optional, default: 3 * batch_size
Number of samples to randomly sample for speeding up the
initialization (sometimes at the expense of accuracy): the
only algorithm is initialized by running a batch KMeans on a
random subset of the data. This needs to be larger than n_clusters.
init : {'k-means++', 'random' or an ndarray}, default: 'k-means++'
Method for initialization, defaults to 'k-means++':
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
'random': choose k observations (rows) at random from data for
the initial centroids.
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
n_init : int, default=3
Number of random initializations that are tried.
In contrast to KMeans, the algorithm is only run once, using the
best of the ``n_init`` initializations as measured by inertia.
compute_labels : boolean, default=True
Compute label assignment and inertia for the complete dataset
once the minibatch optimization has converged in fit.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
reassignment_ratio : float, default: 0.01
Control the fraction of the maximum number of counts for a
center to be reassigned. A higher value means that low count
centers are more easily reassigned, which means that the
model will take longer to converge, but should converge in a
better clustering.
verbose : boolean, optional
Verbosity mode.
Attributes
----------
cluster_centers_ : array, [n_clusters, n_features]
Coordinates of cluster centers
labels_ :
Labels of each point (if compute_labels is set to True).
inertia_ : float
The value of the inertia criterion associated with the chosen
partition (if compute_labels is set to True). The inertia is
defined as the sum of square distances of samples to their nearest
neighbor.
See also
--------
KMeans
The classic implementation of the clustering method based on the
Lloyd's algorithm. It consumes the whole set of input data at each
iteration.
Notes
-----
See http://www.eecs.tufts.edu/~dsculley/papers/fastkmeans.pdf
"""
def __init__(self, n_clusters=8, init='k-means++', max_iter=100,
batch_size=100, verbose=0, compute_labels=True,
random_state=None, tol=0.0, max_no_improvement=10,
init_size=None, n_init=3, reassignment_ratio=0.01):
super(MiniBatchKMeans, self).__init__(
n_clusters=n_clusters, init=init, max_iter=max_iter,
verbose=verbose, random_state=random_state, tol=tol, n_init=n_init)
self.max_no_improvement = max_no_improvement
self.batch_size = batch_size
self.compute_labels = compute_labels
self.init_size = init_size
self.reassignment_ratio = reassignment_ratio
def fit(self, X, y=None):
"""Compute the centroids on X by chunking it into mini-batches.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Coordinates of the data points to cluster
"""
random_state = check_random_state(self.random_state)
X = check_array(X, accept_sparse="csr", order='C',
dtype=[np.float64, np.float32])
n_samples, n_features = X.shape
if n_samples < self.n_clusters:
raise ValueError("Number of samples smaller than number "
"of clusters.")
n_init = self.n_init
if hasattr(self.init, '__array__'):
self.init = np.ascontiguousarray(self.init, dtype=X.dtype)
if n_init != 1:
warnings.warn(
'Explicit initial center position passed: '
'performing only one init in MiniBatchKMeans instead of '
'n_init=%d'
% self.n_init, RuntimeWarning, stacklevel=2)
n_init = 1
x_squared_norms = row_norms(X, squared=True)
if self.tol > 0.0:
tol = _tolerance(X, self.tol)
# using tol-based early stopping needs the allocation of a
# dedicated before which can be expensive for high dim data:
# hence we allocate it outside of the main loop
old_center_buffer = np.zeros(n_features, dtype=X.dtype)
else:
tol = 0.0
# no need for the center buffer if tol-based early stopping is
# disabled
old_center_buffer = np.zeros(0, dtype=X.dtype)
distances = np.zeros(self.batch_size, dtype=X.dtype)
n_batches = int(np.ceil(float(n_samples) / self.batch_size))
n_iter = int(self.max_iter * n_batches)
init_size = self.init_size
if init_size is None:
init_size = 3 * self.batch_size
if init_size > n_samples:
init_size = n_samples
self.init_size_ = init_size
validation_indices = random_state.randint(0, n_samples, init_size)
X_valid = X[validation_indices]
x_squared_norms_valid = x_squared_norms[validation_indices]
# perform several inits with random sub-sets
best_inertia = None
for init_idx in range(n_init):
if self.verbose:
print("Init %d/%d with method: %s"
% (init_idx + 1, n_init, self.init))
counts = np.zeros(self.n_clusters, dtype=np.int32)
# TODO: once the `k_means` function works with sparse input we
# should refactor the following init to use it instead.
# Initialize the centers using only a fraction of the data as we
# expect n_samples to be very large when using MiniBatchKMeans
cluster_centers = _init_centroids(
X, self.n_clusters, self.init,
random_state=random_state,
x_squared_norms=x_squared_norms,
init_size=init_size)
# Compute the label assignment on the init dataset
batch_inertia, centers_squared_diff = _mini_batch_step(
X_valid, x_squared_norms[validation_indices],
cluster_centers, counts, old_center_buffer, False,
distances=None, verbose=self.verbose)
# Keep only the best cluster centers across independent inits on
# the common validation set
_, inertia = _labels_inertia(X_valid, x_squared_norms_valid,
cluster_centers)
if self.verbose:
print("Inertia for init %d/%d: %f"
% (init_idx + 1, n_init, inertia))
if best_inertia is None or inertia < best_inertia:
self.cluster_centers_ = cluster_centers
self.counts_ = counts
best_inertia = inertia
# Empty context to be used inplace by the convergence check routine
convergence_context = {}
# Perform the iterative optimization until the final convergence
# criterion
for iteration_idx in range(n_iter):
# Sample a minibatch from the full dataset
minibatch_indices = random_state.randint(
0, n_samples, self.batch_size)
# Perform the actual update step on the minibatch data
batch_inertia, centers_squared_diff = _mini_batch_step(
X[minibatch_indices], x_squared_norms[minibatch_indices],
self.cluster_centers_, self.counts_,
old_center_buffer, tol > 0.0, distances=distances,
# Here we randomly choose whether to perform
# random reassignment: the choice is done as a function
# of the iteration index, and the minimum number of
# counts, in order to force this reassignment to happen
# every once in a while
random_reassign=((iteration_idx + 1)
% (10 + self.counts_.min()) == 0),
random_state=random_state,
reassignment_ratio=self.reassignment_ratio,
verbose=self.verbose)
# Monitor convergence and do early stopping if necessary
if _mini_batch_convergence(
self, iteration_idx, n_iter, tol, n_samples,
centers_squared_diff, batch_inertia, convergence_context,
verbose=self.verbose):
break
self.n_iter_ = iteration_idx + 1
if self.compute_labels:
self.labels_, self.inertia_ = self._labels_inertia_minibatch(X)
return self
def _labels_inertia_minibatch(self, X):
"""Compute labels and inertia using mini batches.
This is slightly slower than doing everything at once but preventes
memory errors / segfaults.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
labels : array, shap (n_samples,)
Cluster labels for each point.
inertia : float
Sum of squared distances of points to nearest cluster.
"""
if self.verbose:
print('Computing label assignment and total inertia')
x_squared_norms = row_norms(X, squared=True)
slices = gen_batches(X.shape[0], self.batch_size)
results = [_labels_inertia(X[s], x_squared_norms[s],
self.cluster_centers_) for s in slices]
labels, inertia = zip(*results)
return np.hstack(labels), np.sum(inertia)
def partial_fit(self, X, y=None):
"""Update k means estimate on a single mini-batch X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Coordinates of the data points to cluster.
"""
X = check_array(X, accept_sparse="csr")
n_samples, n_features = X.shape
if hasattr(self.init, '__array__'):
self.init = np.ascontiguousarray(self.init, dtype=X.dtype)
if n_samples == 0:
return self
x_squared_norms = row_norms(X, squared=True)
self.random_state_ = getattr(self, "random_state_",
check_random_state(self.random_state))
if (not hasattr(self, 'counts_')
or not hasattr(self, 'cluster_centers_')):
# this is the first call partial_fit on this object:
# initialize the cluster centers
self.cluster_centers_ = _init_centroids(
X, self.n_clusters, self.init,
random_state=self.random_state_,
x_squared_norms=x_squared_norms, init_size=self.init_size)
self.counts_ = np.zeros(self.n_clusters, dtype=np.int32)
random_reassign = False
distances = None
else:
# The lower the minimum count is, the more we do random
# reassignment, however, we don't want to do random
# reassignment too often, to allow for building up counts
random_reassign = self.random_state_.randint(
10 * (1 + self.counts_.min())) == 0
distances = np.zeros(X.shape[0], dtype=X.dtype)
_mini_batch_step(X, x_squared_norms, self.cluster_centers_,
self.counts_, np.zeros(0, dtype=X.dtype), 0,
random_reassign=random_reassign, distances=distances,
random_state=self.random_state_,
reassignment_ratio=self.reassignment_ratio,
verbose=self.verbose)
if self.compute_labels:
self.labels_, self.inertia_ = _labels_inertia(
X, x_squared_norms, self.cluster_centers_)
return self
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
In the vector quantization literature, `cluster_centers_` is called
the code book and each value returned by `predict` is the index of
the closest code in the code book.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to predict.
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
check_is_fitted(self, 'cluster_centers_')
X = self._check_test_data(X)
return self._labels_inertia_minibatch(X)[0]
|
bsd-3-clause
|
gskachkov/phantomjs
|
src/qt/qtwebkit/Tools/Scripts/webkitpy/style/checkers/python.py
|
120
|
5538
|
# Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org)
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Supports checking WebKit style in Python files."""
import re
from StringIO import StringIO
from webkitpy.common.system.filesystem import FileSystem
from webkitpy.common.webkit_finder import WebKitFinder
from webkitpy.thirdparty.autoinstalled import pep8
from webkitpy.thirdparty.autoinstalled.pylint import lint
from webkitpy.thirdparty.autoinstalled.pylint.reporters.text import ParseableTextReporter
class PythonChecker(object):
"""Processes text lines for checking style."""
def __init__(self, file_path, handle_style_error):
self._file_path = file_path
self._handle_style_error = handle_style_error
def check(self, lines):
self._check_pep8(lines)
self._check_pylint(lines)
def _check_pep8(self, lines):
# Initialize pep8.options, which is necessary for
# Checker.check_all() to execute.
pep8.process_options(arglist=[self._file_path])
pep8_checker = pep8.Checker(self._file_path)
def _pep8_handle_error(line_number, offset, text, check):
# FIXME: Incorporate the character offset into the error output.
# This will require updating the error handler __call__
# signature to include an optional "offset" parameter.
pep8_code = text[:4]
pep8_message = text[5:]
category = "pep8/" + pep8_code
self._handle_style_error(line_number, category, 5, pep8_message)
pep8_checker.report_error = _pep8_handle_error
pep8_errors = pep8_checker.check_all()
def _check_pylint(self, lines):
pylinter = Pylinter()
# FIXME: for now, we only report pylint errors, but we should be catching and
# filtering warnings using the rules in style/checker.py instead.
output = pylinter.run(['-E', self._file_path])
lint_regex = re.compile('([^:]+):([^:]+): \[([^]]+)\] (.*)')
for error in output.getvalue().splitlines():
match_obj = lint_regex.match(error)
assert(match_obj)
line_number = int(match_obj.group(2))
category_and_method = match_obj.group(3).split(', ')
category = 'pylint/' + (category_and_method[0])
if len(category_and_method) > 1:
message = '[%s] %s' % (category_and_method[1], match_obj.group(4))
else:
message = match_obj.group(4)
self._handle_style_error(line_number, category, 5, message)
class Pylinter(object):
# We filter out these messages because they are bugs in pylint that produce false positives.
# FIXME: Does it make sense to combine these rules with the rules in style/checker.py somehow?
FALSE_POSITIVES = [
# possibly http://www.logilab.org/ticket/98613 ?
"Instance of 'Popen' has no 'poll' member",
"Instance of 'Popen' has no 'returncode' member",
"Instance of 'Popen' has no 'stdin' member",
"Instance of 'Popen' has no 'stdout' member",
"Instance of 'Popen' has no 'stderr' member",
"Instance of 'Popen' has no 'wait' member",
"Instance of 'Popen' has no 'pid' member",
]
def __init__(self):
self._pylintrc = WebKitFinder(FileSystem()).path_from_webkit_base('Tools', 'Scripts', 'webkitpy', 'pylintrc')
def run(self, argv):
output = _FilteredStringIO(self.FALSE_POSITIVES)
lint.Run(['--rcfile', self._pylintrc] + argv, reporter=ParseableTextReporter(output=output), exit=False)
return output
class _FilteredStringIO(StringIO):
def __init__(self, bad_messages):
StringIO.__init__(self)
self.dropped_last_msg = False
self.bad_messages = bad_messages
def write(self, msg=''):
if not self._filter(msg):
StringIO.write(self, msg)
def _filter(self, msg):
if any(bad_message in msg for bad_message in self.bad_messages):
self.dropped_last_msg = True
return True
if self.dropped_last_msg:
# We drop the newline after a dropped message as well.
self.dropped_last_msg = False
if msg == '\n':
return True
return False
|
bsd-3-clause
|
mrquim/repository.mrquim
|
plugin.video.salts/scrapers/pubfilmto_scraper.py
|
6
|
4875
|
"""
SALTS XBMC Addon
Copyright (C) 2014 tknorris
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
import scraper
import kodi
import log_utils # @UnusedImport
import dom_parser2
from salts_lib import scraper_utils
from salts_lib.constants import VIDEO_TYPES
from salts_lib.constants import FORCE_NO_MATCH
logger = log_utils.Logger.get_logger(__name__)
BASE_URL = 'http://pubfilm.to'
class Scraper(scraper.Scraper):
base_url = BASE_URL
def __init__(self, timeout=scraper.DEFAULT_TIMEOUT):
self.timeout = timeout
self.base_url = kodi.get_setting('%s-base_url' % (self.get_name()))
@classmethod
def provides(cls):
return frozenset([VIDEO_TYPES.MOVIE, VIDEO_TYPES.TVSHOW, VIDEO_TYPES.EPISODE])
@classmethod
def get_name(cls):
return 'Pubfilm.to'
def get_sources(self, video):
hosters = []
source_url = self.get_url(video)
if not source_url or source_url == FORCE_NO_MATCH: return hosters
page_url = scraper_utils.urljoin(self.base_url, source_url)
html = self._http_get(page_url, cache_limit=.5)
iframe_url = dom_parser2.parse_dom(html, 'iframe', {'id': 'myiframe'}, req='src', exclude_comments=True)
if not iframe_url: return hosters
iframe_url = iframe_url[0].attrs['src']
html = self._http_get(iframe_url, headers={'Referer': page_url}, cache_limit=.5)
for source in dom_parser2.parse_dom(html, 'source', {'type': 'video/mp4'}, req=['src', 'data-res']):
stream_url = source.attrs['src']
host = scraper_utils.get_direct_hostname(self, stream_url)
if host == 'gvideo':
quality = scraper_utils.gv_get_quality(stream_url)
stream_url += scraper_utils.append_headers({'User-Agent': scraper_utils.get_ua()})
else:
quality = scraper_utils.height_get_quality(source.attrs['data-res'])
stream_url += scraper_utils.append_headers({'User-Agent': scraper_utils.get_ua(), 'Referer': page_url})
source = {'multi-part': False, 'url': stream_url, 'host': host, 'class': self, 'quality': quality, 'views': None, 'rating': None, 'direct': True}
hosters.append(source)
return hosters
def _get_episode_url(self, show_url, video):
episode_pattern = 'href="([^"]+season-0*%s-episode-0*%s-[^"]*)' % (video.season, video.episode)
page_url = show_url
pages = 0
while page_url and pages < MAX_PAGES:
page_url = scraper_utils.urljoin(self.base_url, page_url)
html = self._http_get(page_url, cache_limit=2)
ep_url = self._default_get_episode_url(html, video, episode_pattern)
if ep_url: return ep_url
fragment = dom_parser2.parse_dom(html, 'div', {'class': 'pagination'})
if not fragment: break
match = re.search('href="([^"]+)[^>]+>\s*>\s*<', fragment[0].content)
if not match: break
page_url = scraper_utils.cleanse_title(match.group(1))
pages += 1
def search(self, video_type, title, year, season=''): # @UnusedVariable
results = []
params = {'c': 'movie', 'm': 'filter', 'keyword': title}
html = self._http_get(self.base_url, params=params, cache_limit=8)
for attrs, item in dom_parser2.parse_dom(html, 'div', {'class': 'recent-item'}, req='title'):
match_url = dom_parser2.parse_dom(item, 'a', req='href')
if not match_url: continue
match_url = match_url[0].attrs['href']
is_series = re.search('/series/', match_url, re.I)
if (video_type == VIDEO_TYPES.MOVIE and is_series) or (video_type == VIDEO_TYPES.TVSHOW and not is_series):
continue
match_title_year = attrs['title']
match_title, match_year = scraper_utils.extra_year(match_title_year)
if not year or not match_year or year == match_year:
result = {'title': scraper_utils.cleanse_title(match_title), 'year': match_year, 'url': scraper_utils.pathify_url(match_url)}
results.append(result)
return results
|
gpl-2.0
|
square/pants
|
tests/python/pants_test/targets/test_python_binary.py
|
2
|
3271
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (nested_scopes, generators, division, absolute_import, with_statement,
print_function, unicode_literals)
import pytest
from pants.base.address import SyntheticAddress
from pants.base.exceptions import TargetDefinitionException
from pants.backend.python.targets.python_binary import PythonBinary
from pants_test.base_test import BaseTest
class TestPythonBinary(BaseTest):
def test_python_binary_must_have_some_entry_point(self):
with pytest.raises(TargetDefinitionException):
self.make_target(spec=':binary', target_type=PythonBinary)
def test_python_binary_with_entry_point_no_source(self):
assert self.make_target(spec=':binary',
target_type=PythonBinary,
entry_point = 'blork').entry_point == 'blork'
def test_python_binary_with_source_no_entry_point(self):
assert self.make_target(spec=':binary1',
target_type=PythonBinary,
source = 'blork.py').entry_point == 'blork'
assert self.make_target(spec=':binary2',
target_type=PythonBinary,
source = 'bin/blork.py').entry_point == 'bin.blork'
def test_python_binary_with_entry_point_and_source(self):
assert 'blork' == self.make_target(spec=':binary1',
target_type=PythonBinary,
entry_point = 'blork',
source='blork.py').entry_point
assert 'blork:main' == self.make_target(spec=':binary2',
target_type=PythonBinary,
entry_point = 'blork:main',
source='blork.py').entry_point
assert 'bin.blork:main' == self.make_target(spec=':binary3',
target_type=PythonBinary,
entry_point = 'bin.blork:main',
source='bin/blork.py').entry_point
def test_python_binary_with_entry_point_and_source_mismatch(self):
with pytest.raises(TargetDefinitionException):
self.make_target(spec=':binary1',
target_type=PythonBinary,
entry_point = 'blork',
source='hork.py')
with pytest.raises(TargetDefinitionException):
self.make_target(spec=':binary2',
target_type=PythonBinary,
entry_point = 'blork:main',
source='hork.py')
with pytest.raises(TargetDefinitionException):
self.make_target(spec=':binary3',
target_type=PythonBinary,
entry_point = 'bin.blork',
source='blork.py')
with pytest.raises(TargetDefinitionException):
self.make_target(spec=':binary4',
target_type=PythonBinary,
entry_point = 'bin.blork',
source='bin.py')
|
apache-2.0
|
powerlim2/project_free_insight
|
data_api/venv/lib/python2.7/site-packages/pip/_vendor/pyparsing.py
|
75
|
158096
|
# module pyparsing.py
#
# Copyright (c) 2003-2015 Paul T. McGuire
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__doc__ = \
"""
pyparsing module - Classes and methods to define and execute parsing grammars
The pyparsing module is an alternative approach to creating and executing simple grammars,
vs. the traditional lex/yacc approach, or the use of regular expressions. With pyparsing, you
don't need to learn a new syntax for defining grammars or matching expressions - the parsing module
provides a library of classes that you use to construct the grammar directly in Python.
Here is a program to parse "Hello, World!" (or any greeting of the form C{"<salutation>, <addressee>!"})::
from pyparsing import Word, alphas
# define grammar of a greeting
greet = Word( alphas ) + "," + Word( alphas ) + "!"
hello = "Hello, World!"
print (hello, "->", greet.parseString( hello ))
The program outputs the following::
Hello, World! -> ['Hello', ',', 'World', '!']
The Python representation of the grammar is quite readable, owing to the self-explanatory
class names, and the use of '+', '|' and '^' operators.
The parsed results returned from C{parseString()} can be accessed as a nested list, a dictionary, or an
object with named attributes.
The pyparsing module handles some of the problems that are typically vexing when writing text parsers:
- extra or missing whitespace (the above program will also handle "Hello,World!", "Hello , World !", etc.)
- quoted strings
- embedded comments
"""
__version__ = "2.1.1"
__versionTime__ = "21 Mar 2016 05:04 UTC"
__author__ = "Paul McGuire <ptmcg@users.sourceforge.net>"
import string
from weakref import ref as wkref
import copy
import sys
import warnings
import re
import sre_constants
import collections
import pprint
import functools
import itertools
import traceback
#~ sys.stderr.write( "testing pyparsing module, version %s, %s\n" % (__version__,__versionTime__ ) )
__all__ = [
'And', 'CaselessKeyword', 'CaselessLiteral', 'CharsNotIn', 'Combine', 'Dict', 'Each', 'Empty',
'FollowedBy', 'Forward', 'GoToColumn', 'Group', 'Keyword', 'LineEnd', 'LineStart', 'Literal',
'MatchFirst', 'NoMatch', 'NotAny', 'OneOrMore', 'OnlyOnce', 'Optional', 'Or',
'ParseBaseException', 'ParseElementEnhance', 'ParseException', 'ParseExpression', 'ParseFatalException',
'ParseResults', 'ParseSyntaxException', 'ParserElement', 'QuotedString', 'RecursiveGrammarException',
'Regex', 'SkipTo', 'StringEnd', 'StringStart', 'Suppress', 'Token', 'TokenConverter',
'White', 'Word', 'WordEnd', 'WordStart', 'ZeroOrMore',
'alphanums', 'alphas', 'alphas8bit', 'anyCloseTag', 'anyOpenTag', 'cStyleComment', 'col',
'commaSeparatedList', 'commonHTMLEntity', 'countedArray', 'cppStyleComment', 'dblQuotedString',
'dblSlashComment', 'delimitedList', 'dictOf', 'downcaseTokens', 'empty', 'hexnums',
'htmlComment', 'javaStyleComment', 'line', 'lineEnd', 'lineStart', 'lineno',
'makeHTMLTags', 'makeXMLTags', 'matchOnlyAtCol', 'matchPreviousExpr', 'matchPreviousLiteral',
'nestedExpr', 'nullDebugAction', 'nums', 'oneOf', 'opAssoc', 'operatorPrecedence', 'printables',
'punc8bit', 'pythonStyleComment', 'quotedString', 'removeQuotes', 'replaceHTMLEntity',
'replaceWith', 'restOfLine', 'sglQuotedString', 'srange', 'stringEnd',
'stringStart', 'traceParseAction', 'unicodeString', 'upcaseTokens', 'withAttribute',
'indentedBlock', 'originalTextFor', 'ungroup', 'infixNotation','locatedExpr', 'withClass',
]
PY_3 = sys.version.startswith('3')
if PY_3:
_MAX_INT = sys.maxsize
basestring = str
unichr = chr
_ustr = str
# build list of single arg builtins, that can be used as parse actions
singleArgBuiltins = [sum, len, sorted, reversed, list, tuple, set, any, all, min, max]
else:
_MAX_INT = sys.maxint
range = xrange
def _ustr(obj):
"""Drop-in replacement for str(obj) that tries to be Unicode friendly. It first tries
str(obj). If that fails with a UnicodeEncodeError, then it tries unicode(obj). It
then < returns the unicode object | encodes it with the default encoding | ... >.
"""
if isinstance(obj,unicode):
return obj
try:
# If this works, then _ustr(obj) has the same behaviour as str(obj), so
# it won't break any existing code.
return str(obj)
except UnicodeEncodeError:
# Else encode it
ret = unicode(obj).encode(sys.getdefaultencoding(), 'xmlcharrefreplace')
xmlcharref = Regex('&#\d+;')
xmlcharref.setParseAction(lambda t: '\\u' + hex(int(t[0][2:-1]))[2:])
return xmlcharref.transformString(ret)
# build list of single arg builtins, tolerant of Python version, that can be used as parse actions
singleArgBuiltins = []
import __builtin__
for fname in "sum len sorted reversed list tuple set any all min max".split():
try:
singleArgBuiltins.append(getattr(__builtin__,fname))
except AttributeError:
continue
_generatorType = type((y for y in range(1)))
def _xml_escape(data):
"""Escape &, <, >, ", ', etc. in a string of data."""
# ampersand must be replaced first
from_symbols = '&><"\''
to_symbols = ('&'+s+';' for s in "amp gt lt quot apos".split())
for from_,to_ in zip(from_symbols, to_symbols):
data = data.replace(from_, to_)
return data
class _Constants(object):
pass
alphas = string.ascii_uppercase + string.ascii_lowercase
nums = "0123456789"
hexnums = nums + "ABCDEFabcdef"
alphanums = alphas + nums
_bslash = chr(92)
printables = "".join(c for c in string.printable if c not in string.whitespace)
class ParseBaseException(Exception):
"""base exception class for all parsing runtime exceptions"""
# Performance tuning: we construct a *lot* of these, so keep this
# constructor as small and fast as possible
def __init__( self, pstr, loc=0, msg=None, elem=None ):
self.loc = loc
if msg is None:
self.msg = pstr
self.pstr = ""
else:
self.msg = msg
self.pstr = pstr
self.parserElement = elem
def __getattr__( self, aname ):
"""supported attributes by name are:
- lineno - returns the line number of the exception text
- col - returns the column number of the exception text
- line - returns the line containing the exception text
"""
if( aname == "lineno" ):
return lineno( self.loc, self.pstr )
elif( aname in ("col", "column") ):
return col( self.loc, self.pstr )
elif( aname == "line" ):
return line( self.loc, self.pstr )
else:
raise AttributeError(aname)
def __str__( self ):
return "%s (at char %d), (line:%d, col:%d)" % \
( self.msg, self.loc, self.lineno, self.column )
def __repr__( self ):
return _ustr(self)
def markInputline( self, markerString = ">!<" ):
"""Extracts the exception line from the input string, and marks
the location of the exception with a special symbol.
"""
line_str = self.line
line_column = self.column - 1
if markerString:
line_str = "".join((line_str[:line_column],
markerString, line_str[line_column:]))
return line_str.strip()
def __dir__(self):
return "lineno col line".split() + dir(type(self))
class ParseException(ParseBaseException):
"""exception thrown when parse expressions don't match class;
supported attributes by name are:
- lineno - returns the line number of the exception text
- col - returns the column number of the exception text
- line - returns the line containing the exception text
"""
pass
class ParseFatalException(ParseBaseException):
"""user-throwable exception thrown when inconsistent parse content
is found; stops all parsing immediately"""
pass
class ParseSyntaxException(ParseFatalException):
"""just like C{L{ParseFatalException}}, but thrown internally when an
C{L{ErrorStop<And._ErrorStop>}} ('-' operator) indicates that parsing is to stop immediately because
an unbacktrackable syntax error has been found"""
def __init__(self, pe):
super(ParseSyntaxException, self).__init__(
pe.pstr, pe.loc, pe.msg, pe.parserElement)
#~ class ReparseException(ParseBaseException):
#~ """Experimental class - parse actions can raise this exception to cause
#~ pyparsing to reparse the input string:
#~ - with a modified input string, and/or
#~ - with a modified start location
#~ Set the values of the ReparseException in the constructor, and raise the
#~ exception in a parse action to cause pyparsing to use the new string/location.
#~ Setting the values as None causes no change to be made.
#~ """
#~ def __init_( self, newstring, restartLoc ):
#~ self.newParseText = newstring
#~ self.reparseLoc = restartLoc
class RecursiveGrammarException(Exception):
"""exception thrown by C{validate()} if the grammar could be improperly recursive"""
def __init__( self, parseElementList ):
self.parseElementTrace = parseElementList
def __str__( self ):
return "RecursiveGrammarException: %s" % self.parseElementTrace
class _ParseResultsWithOffset(object):
def __init__(self,p1,p2):
self.tup = (p1,p2)
def __getitem__(self,i):
return self.tup[i]
def __repr__(self):
return repr(self.tup)
def setOffset(self,i):
self.tup = (self.tup[0],i)
class ParseResults(object):
"""Structured parse results, to provide multiple means of access to the parsed data:
- as a list (C{len(results)})
- by list index (C{results[0], results[1]}, etc.)
- by attribute (C{results.<resultsName>})
"""
def __new__(cls, toklist=None, name=None, asList=True, modal=True ):
if isinstance(toklist, cls):
return toklist
retobj = object.__new__(cls)
retobj.__doinit = True
return retobj
# Performance tuning: we construct a *lot* of these, so keep this
# constructor as small and fast as possible
def __init__( self, toklist=None, name=None, asList=True, modal=True, isinstance=isinstance ):
if self.__doinit:
self.__doinit = False
self.__name = None
self.__parent = None
self.__accumNames = {}
self.__asList = asList
self.__modal = modal
if toklist is None:
toklist = []
if isinstance(toklist, list):
self.__toklist = toklist[:]
elif isinstance(toklist, _generatorType):
self.__toklist = list(toklist)
else:
self.__toklist = [toklist]
self.__tokdict = dict()
if name is not None and name:
if not modal:
self.__accumNames[name] = 0
if isinstance(name,int):
name = _ustr(name) # will always return a str, but use _ustr for consistency
self.__name = name
if not (isinstance(toklist, (type(None), basestring, list)) and toklist in (None,'',[])):
if isinstance(toklist,basestring):
toklist = [ toklist ]
if asList:
if isinstance(toklist,ParseResults):
self[name] = _ParseResultsWithOffset(toklist.copy(),0)
else:
self[name] = _ParseResultsWithOffset(ParseResults(toklist[0]),0)
self[name].__name = name
else:
try:
self[name] = toklist[0]
except (KeyError,TypeError,IndexError):
self[name] = toklist
def __getitem__( self, i ):
if isinstance( i, (int,slice) ):
return self.__toklist[i]
else:
if i not in self.__accumNames:
return self.__tokdict[i][-1][0]
else:
return ParseResults([ v[0] for v in self.__tokdict[i] ])
def __setitem__( self, k, v, isinstance=isinstance ):
if isinstance(v,_ParseResultsWithOffset):
self.__tokdict[k] = self.__tokdict.get(k,list()) + [v]
sub = v[0]
elif isinstance(k,(int,slice)):
self.__toklist[k] = v
sub = v
else:
self.__tokdict[k] = self.__tokdict.get(k,list()) + [_ParseResultsWithOffset(v,0)]
sub = v
if isinstance(sub,ParseResults):
sub.__parent = wkref(self)
def __delitem__( self, i ):
if isinstance(i,(int,slice)):
mylen = len( self.__toklist )
del self.__toklist[i]
# convert int to slice
if isinstance(i, int):
if i < 0:
i += mylen
i = slice(i, i+1)
# get removed indices
removed = list(range(*i.indices(mylen)))
removed.reverse()
# fixup indices in token dictionary
#~ for name in self.__tokdict:
#~ occurrences = self.__tokdict[name]
#~ for j in removed:
#~ for k, (value, position) in enumerate(occurrences):
#~ occurrences[k] = _ParseResultsWithOffset(value, position - (position > j))
for name,occurrences in self.__tokdict.items():
for j in removed:
for k, (value, position) in enumerate(occurrences):
occurrences[k] = _ParseResultsWithOffset(value, position - (position > j))
else:
del self.__tokdict[i]
def __contains__( self, k ):
return k in self.__tokdict
def __len__( self ): return len( self.__toklist )
def __bool__(self): return ( not not self.__toklist )
__nonzero__ = __bool__
def __iter__( self ): return iter( self.__toklist )
def __reversed__( self ): return iter( self.__toklist[::-1] )
def iterkeys( self ):
"""Returns all named result keys."""
if hasattr(self.__tokdict, "iterkeys"):
return self.__tokdict.iterkeys()
else:
return iter(self.__tokdict)
def itervalues( self ):
"""Returns all named result values."""
return (self[k] for k in self.iterkeys())
def iteritems( self ):
return ((k, self[k]) for k in self.iterkeys())
if PY_3:
keys = iterkeys
values = itervalues
items = iteritems
else:
def keys( self ):
"""Returns all named result keys."""
return list(self.iterkeys())
def values( self ):
"""Returns all named result values."""
return list(self.itervalues())
def items( self ):
"""Returns all named result keys and values as a list of tuples."""
return list(self.iteritems())
def haskeys( self ):
"""Since keys() returns an iterator, this method is helpful in bypassing
code that looks for the existence of any defined results names."""
return bool(self.__tokdict)
def pop( self, *args, **kwargs):
"""Removes and returns item at specified index (default=last).
Supports both list and dict semantics for pop(). If passed no
argument or an integer argument, it will use list semantics
and pop tokens from the list of parsed tokens. If passed a
non-integer argument (most likely a string), it will use dict
semantics and pop the corresponding value from any defined
results names. A second default return value argument is
supported, just as in dict.pop()."""
if not args:
args = [-1]
for k,v in kwargs.items():
if k == 'default':
args = (args[0], v)
else:
raise TypeError("pop() got an unexpected keyword argument '%s'" % k)
if (isinstance(args[0], int) or
len(args) == 1 or
args[0] in self):
index = args[0]
ret = self[index]
del self[index]
return ret
else:
defaultvalue = args[1]
return defaultvalue
def get(self, key, defaultValue=None):
"""Returns named result matching the given key, or if there is no
such name, then returns the given C{defaultValue} or C{None} if no
C{defaultValue} is specified."""
if key in self:
return self[key]
else:
return defaultValue
def insert( self, index, insStr ):
"""Inserts new element at location index in the list of parsed tokens."""
self.__toklist.insert(index, insStr)
# fixup indices in token dictionary
#~ for name in self.__tokdict:
#~ occurrences = self.__tokdict[name]
#~ for k, (value, position) in enumerate(occurrences):
#~ occurrences[k] = _ParseResultsWithOffset(value, position + (position > index))
for name,occurrences in self.__tokdict.items():
for k, (value, position) in enumerate(occurrences):
occurrences[k] = _ParseResultsWithOffset(value, position + (position > index))
def append( self, item ):
"""Add single element to end of ParseResults list of elements."""
self.__toklist.append(item)
def extend( self, itemseq ):
"""Add sequence of elements to end of ParseResults list of elements."""
if isinstance(itemseq, ParseResults):
self += itemseq
else:
self.__toklist.extend(itemseq)
def clear( self ):
"""Clear all elements and results names."""
del self.__toklist[:]
self.__tokdict.clear()
def __getattr__( self, name ):
try:
return self[name]
except KeyError:
return ""
if name in self.__tokdict:
if name not in self.__accumNames:
return self.__tokdict[name][-1][0]
else:
return ParseResults([ v[0] for v in self.__tokdict[name] ])
else:
return ""
def __add__( self, other ):
ret = self.copy()
ret += other
return ret
def __iadd__( self, other ):
if other.__tokdict:
offset = len(self.__toklist)
addoffset = lambda a: offset if a<0 else a+offset
otheritems = other.__tokdict.items()
otherdictitems = [(k, _ParseResultsWithOffset(v[0],addoffset(v[1])) )
for (k,vlist) in otheritems for v in vlist]
for k,v in otherdictitems:
self[k] = v
if isinstance(v[0],ParseResults):
v[0].__parent = wkref(self)
self.__toklist += other.__toklist
self.__accumNames.update( other.__accumNames )
return self
def __radd__(self, other):
if isinstance(other,int) and other == 0:
# useful for merging many ParseResults using sum() builtin
return self.copy()
else:
# this may raise a TypeError - so be it
return other + self
def __repr__( self ):
return "(%s, %s)" % ( repr( self.__toklist ), repr( self.__tokdict ) )
def __str__( self ):
return '[' + ', '.join(_ustr(i) if isinstance(i, ParseResults) else repr(i) for i in self.__toklist) + ']'
def _asStringList( self, sep='' ):
out = []
for item in self.__toklist:
if out and sep:
out.append(sep)
if isinstance( item, ParseResults ):
out += item._asStringList()
else:
out.append( _ustr(item) )
return out
def asList( self ):
"""Returns the parse results as a nested list of matching tokens, all converted to strings."""
return [res.asList() if isinstance(res,ParseResults) else res for res in self.__toklist]
def asDict( self ):
"""Returns the named parse results as a nested dictionary."""
if PY_3:
item_fn = self.items
else:
item_fn = self.iteritems
def toItem(obj):
if isinstance(obj, ParseResults):
if obj.haskeys():
return obj.asDict()
else:
return [toItem(v) for v in obj]
else:
return obj
return dict((k,toItem(v)) for k,v in item_fn())
def copy( self ):
"""Returns a new copy of a C{ParseResults} object."""
ret = ParseResults( self.__toklist )
ret.__tokdict = self.__tokdict.copy()
ret.__parent = self.__parent
ret.__accumNames.update( self.__accumNames )
ret.__name = self.__name
return ret
def asXML( self, doctag=None, namedItemsOnly=False, indent="", formatted=True ):
"""Returns the parse results as XML. Tags are created for tokens and lists that have defined results names."""
nl = "\n"
out = []
namedItems = dict((v[1],k) for (k,vlist) in self.__tokdict.items()
for v in vlist)
nextLevelIndent = indent + " "
# collapse out indents if formatting is not desired
if not formatted:
indent = ""
nextLevelIndent = ""
nl = ""
selfTag = None
if doctag is not None:
selfTag = doctag
else:
if self.__name:
selfTag = self.__name
if not selfTag:
if namedItemsOnly:
return ""
else:
selfTag = "ITEM"
out += [ nl, indent, "<", selfTag, ">" ]
for i,res in enumerate(self.__toklist):
if isinstance(res,ParseResults):
if i in namedItems:
out += [ res.asXML(namedItems[i],
namedItemsOnly and doctag is None,
nextLevelIndent,
formatted)]
else:
out += [ res.asXML(None,
namedItemsOnly and doctag is None,
nextLevelIndent,
formatted)]
else:
# individual token, see if there is a name for it
resTag = None
if i in namedItems:
resTag = namedItems[i]
if not resTag:
if namedItemsOnly:
continue
else:
resTag = "ITEM"
xmlBodyText = _xml_escape(_ustr(res))
out += [ nl, nextLevelIndent, "<", resTag, ">",
xmlBodyText,
"</", resTag, ">" ]
out += [ nl, indent, "</", selfTag, ">" ]
return "".join(out)
def __lookup(self,sub):
for k,vlist in self.__tokdict.items():
for v,loc in vlist:
if sub is v:
return k
return None
def getName(self):
"""Returns the results name for this token expression."""
if self.__name:
return self.__name
elif self.__parent:
par = self.__parent()
if par:
return par.__lookup(self)
else:
return None
elif (len(self) == 1 and
len(self.__tokdict) == 1 and
self.__tokdict.values()[0][0][1] in (0,-1)):
return self.__tokdict.keys()[0]
else:
return None
def dump(self,indent='',depth=0):
"""Diagnostic method for listing out the contents of a C{ParseResults}.
Accepts an optional C{indent} argument so that this string can be embedded
in a nested display of other data."""
out = []
NL = '\n'
out.append( indent+_ustr(self.asList()) )
if self.haskeys():
items = sorted(self.items())
for k,v in items:
if out:
out.append(NL)
out.append( "%s%s- %s: " % (indent,(' '*depth), k) )
if isinstance(v,ParseResults):
if v:
out.append( v.dump(indent,depth+1) )
else:
out.append(_ustr(v))
else:
out.append(_ustr(v))
elif any(isinstance(vv,ParseResults) for vv in self):
v = self
for i,vv in enumerate(v):
if isinstance(vv,ParseResults):
out.append("\n%s%s[%d]:\n%s%s%s" % (indent,(' '*(depth)),i,indent,(' '*(depth+1)),vv.dump(indent,depth+1) ))
else:
out.append("\n%s%s[%d]:\n%s%s%s" % (indent,(' '*(depth)),i,indent,(' '*(depth+1)),_ustr(vv)))
return "".join(out)
def pprint(self, *args, **kwargs):
"""Pretty-printer for parsed results as a list, using the C{pprint} module.
Accepts additional positional or keyword args as defined for the
C{pprint.pprint} method. (U{http://docs.python.org/3/library/pprint.html#pprint.pprint})"""
pprint.pprint(self.asList(), *args, **kwargs)
# add support for pickle protocol
def __getstate__(self):
return ( self.__toklist,
( self.__tokdict.copy(),
self.__parent is not None and self.__parent() or None,
self.__accumNames,
self.__name ) )
def __setstate__(self,state):
self.__toklist = state[0]
(self.__tokdict,
par,
inAccumNames,
self.__name) = state[1]
self.__accumNames = {}
self.__accumNames.update(inAccumNames)
if par is not None:
self.__parent = wkref(par)
else:
self.__parent = None
def __getnewargs__(self):
return self.__toklist, self.__name, self.__asList, self.__modal
def __dir__(self):
return (dir(type(self)) + list(self.keys()))
collections.MutableMapping.register(ParseResults)
def col (loc,strg):
"""Returns current column within a string, counting newlines as line separators.
The first column is number 1.
Note: the default parsing behavior is to expand tabs in the input string
before starting the parsing process. See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information
on parsing strings containing C{<TAB>}s, and suggested methods to maintain a
consistent view of the parsed string, the parse location, and line and column
positions within the parsed string.
"""
s = strg
return 1 if loc<len(s) and s[loc] == '\n' else loc - s.rfind("\n", 0, loc)
def lineno(loc,strg):
"""Returns current line number within a string, counting newlines as line separators.
The first line is number 1.
Note: the default parsing behavior is to expand tabs in the input string
before starting the parsing process. See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information
on parsing strings containing C{<TAB>}s, and suggested methods to maintain a
consistent view of the parsed string, the parse location, and line and column
positions within the parsed string.
"""
return strg.count("\n",0,loc) + 1
def line( loc, strg ):
"""Returns the line of text containing loc within a string, counting newlines as line separators.
"""
lastCR = strg.rfind("\n", 0, loc)
nextCR = strg.find("\n", loc)
if nextCR >= 0:
return strg[lastCR+1:nextCR]
else:
return strg[lastCR+1:]
def _defaultStartDebugAction( instring, loc, expr ):
print (("Match " + _ustr(expr) + " at loc " + _ustr(loc) + "(%d,%d)" % ( lineno(loc,instring), col(loc,instring) )))
def _defaultSuccessDebugAction( instring, startloc, endloc, expr, toks ):
print ("Matched " + _ustr(expr) + " -> " + str(toks.asList()))
def _defaultExceptionDebugAction( instring, loc, expr, exc ):
print ("Exception raised:" + _ustr(exc))
def nullDebugAction(*args):
"""'Do-nothing' debug action, to suppress debugging output during parsing."""
pass
# Only works on Python 3.x - nonlocal is toxic to Python 2 installs
#~ 'decorator to trim function calls to match the arity of the target'
#~ def _trim_arity(func, maxargs=3):
#~ if func in singleArgBuiltins:
#~ return lambda s,l,t: func(t)
#~ limit = 0
#~ foundArity = False
#~ def wrapper(*args):
#~ nonlocal limit,foundArity
#~ while 1:
#~ try:
#~ ret = func(*args[limit:])
#~ foundArity = True
#~ return ret
#~ except TypeError:
#~ if limit == maxargs or foundArity:
#~ raise
#~ limit += 1
#~ continue
#~ return wrapper
# this version is Python 2.x-3.x cross-compatible
'decorator to trim function calls to match the arity of the target'
def _trim_arity(func, maxargs=2):
if func in singleArgBuiltins:
return lambda s,l,t: func(t)
limit = [0]
foundArity = [False]
def wrapper(*args):
while 1:
try:
ret = func(*args[limit[0]:]) #~@$^*)+_(&%#!=-`~;:"[]{}
foundArity[0] = True
return ret
except TypeError:
# re-raise TypeErrors if they did not come from our arity testing
if foundArity[0]:
raise
else:
try:
tb = sys.exc_info()[-1]
exc_source_line = traceback.extract_tb(tb)[-1][-1]
if not exc_source_line.endswith('#~@$^*)+_(&%#!=-`~;:"[]{}'):
raise
finally:
del tb
if limit[0] <= maxargs:
limit[0] += 1
continue
raise
return wrapper
class ParserElement(object):
"""Abstract base level parser element class."""
DEFAULT_WHITE_CHARS = " \n\t\r"
verbose_stacktrace = False
@staticmethod
def setDefaultWhitespaceChars( chars ):
"""Overrides the default whitespace chars
"""
ParserElement.DEFAULT_WHITE_CHARS = chars
@staticmethod
def inlineLiteralsUsing(cls):
"""
Set class to be used for inclusion of string literals into a parser.
"""
ParserElement.literalStringClass = cls
def __init__( self, savelist=False ):
self.parseAction = list()
self.failAction = None
#~ self.name = "<unknown>" # don't define self.name, let subclasses try/except upcall
self.strRepr = None
self.resultsName = None
self.saveAsList = savelist
self.skipWhitespace = True
self.whiteChars = ParserElement.DEFAULT_WHITE_CHARS
self.copyDefaultWhiteChars = True
self.mayReturnEmpty = False # used when checking for left-recursion
self.keepTabs = False
self.ignoreExprs = list()
self.debug = False
self.streamlined = False
self.mayIndexError = True # used to optimize exception handling for subclasses that don't advance parse index
self.errmsg = ""
self.modalResults = True # used to mark results names as modal (report only last) or cumulative (list all)
self.debugActions = ( None, None, None ) #custom debug actions
self.re = None
self.callPreparse = True # used to avoid redundant calls to preParse
self.callDuringTry = False
def copy( self ):
"""Make a copy of this C{ParserElement}. Useful for defining different parse actions
for the same parsing pattern, using copies of the original parse element."""
cpy = copy.copy( self )
cpy.parseAction = self.parseAction[:]
cpy.ignoreExprs = self.ignoreExprs[:]
if self.copyDefaultWhiteChars:
cpy.whiteChars = ParserElement.DEFAULT_WHITE_CHARS
return cpy
def setName( self, name ):
"""Define name for this expression, for use in debugging."""
self.name = name
self.errmsg = "Expected " + self.name
if hasattr(self,"exception"):
self.exception.msg = self.errmsg
return self
def setResultsName( self, name, listAllMatches=False ):
"""Define name for referencing matching tokens as a nested attribute
of the returned parse results.
NOTE: this returns a *copy* of the original C{ParserElement} object;
this is so that the client can define a basic element, such as an
integer, and reference it in multiple places with different names.
You can also set results names using the abbreviated syntax,
C{expr("name")} in place of C{expr.setResultsName("name")} -
see L{I{__call__}<__call__>}.
"""
newself = self.copy()
if name.endswith("*"):
name = name[:-1]
listAllMatches=True
newself.resultsName = name
newself.modalResults = not listAllMatches
return newself
def setBreak(self,breakFlag = True):
"""Method to invoke the Python pdb debugger when this element is
about to be parsed. Set C{breakFlag} to True to enable, False to
disable.
"""
if breakFlag:
_parseMethod = self._parse
def breaker(instring, loc, doActions=True, callPreParse=True):
import pdb
pdb.set_trace()
return _parseMethod( instring, loc, doActions, callPreParse )
breaker._originalParseMethod = _parseMethod
self._parse = breaker
else:
if hasattr(self._parse,"_originalParseMethod"):
self._parse = self._parse._originalParseMethod
return self
def setParseAction( self, *fns, **kwargs ):
"""Define action to perform when successfully matching parse element definition.
Parse action fn is a callable method with 0-3 arguments, called as C{fn(s,loc,toks)},
C{fn(loc,toks)}, C{fn(toks)}, or just C{fn()}, where:
- s = the original string being parsed (see note below)
- loc = the location of the matching substring
- toks = a list of the matched tokens, packaged as a C{L{ParseResults}} object
If the functions in fns modify the tokens, they can return them as the return
value from fn, and the modified list of tokens will replace the original.
Otherwise, fn does not need to return any value.
Note: the default parsing behavior is to expand tabs in the input string
before starting the parsing process. See L{I{parseString}<parseString>} for more information
on parsing strings containing C{<TAB>}s, and suggested methods to maintain a
consistent view of the parsed string, the parse location, and line and column
positions within the parsed string.
"""
self.parseAction = list(map(_trim_arity, list(fns)))
self.callDuringTry = kwargs.get("callDuringTry", False)
return self
def addParseAction( self, *fns, **kwargs ):
"""Add parse action to expression's list of parse actions. See L{I{setParseAction}<setParseAction>}."""
self.parseAction += list(map(_trim_arity, list(fns)))
self.callDuringTry = self.callDuringTry or kwargs.get("callDuringTry", False)
return self
def addCondition(self, *fns, **kwargs):
"""Add a boolean predicate function to expression's list of parse actions. See
L{I{setParseAction}<setParseAction>}. Optional keyword argument C{message} can
be used to define a custom message to be used in the raised exception."""
msg = kwargs.get("message") or "failed user-defined condition"
for fn in fns:
def pa(s,l,t):
if not bool(_trim_arity(fn)(s,l,t)):
raise ParseException(s,l,msg)
return t
self.parseAction.append(pa)
self.callDuringTry = self.callDuringTry or kwargs.get("callDuringTry", False)
return self
def setFailAction( self, fn ):
"""Define action to perform if parsing fails at this expression.
Fail acton fn is a callable function that takes the arguments
C{fn(s,loc,expr,err)} where:
- s = string being parsed
- loc = location where expression match was attempted and failed
- expr = the parse expression that failed
- err = the exception thrown
The function returns no value. It may throw C{L{ParseFatalException}}
if it is desired to stop parsing immediately."""
self.failAction = fn
return self
def _skipIgnorables( self, instring, loc ):
exprsFound = True
while exprsFound:
exprsFound = False
for e in self.ignoreExprs:
try:
while 1:
loc,dummy = e._parse( instring, loc )
exprsFound = True
except ParseException:
pass
return loc
def preParse( self, instring, loc ):
if self.ignoreExprs:
loc = self._skipIgnorables( instring, loc )
if self.skipWhitespace:
wt = self.whiteChars
instrlen = len(instring)
while loc < instrlen and instring[loc] in wt:
loc += 1
return loc
def parseImpl( self, instring, loc, doActions=True ):
return loc, []
def postParse( self, instring, loc, tokenlist ):
return tokenlist
#~ @profile
def _parseNoCache( self, instring, loc, doActions=True, callPreParse=True ):
debugging = ( self.debug ) #and doActions )
if debugging or self.failAction:
#~ print ("Match",self,"at loc",loc,"(%d,%d)" % ( lineno(loc,instring), col(loc,instring) ))
if (self.debugActions[0] ):
self.debugActions[0]( instring, loc, self )
if callPreParse and self.callPreparse:
preloc = self.preParse( instring, loc )
else:
preloc = loc
tokensStart = preloc
try:
try:
loc,tokens = self.parseImpl( instring, preloc, doActions )
except IndexError:
raise ParseException( instring, len(instring), self.errmsg, self )
except ParseBaseException as err:
#~ print ("Exception raised:", err)
if self.debugActions[2]:
self.debugActions[2]( instring, tokensStart, self, err )
if self.failAction:
self.failAction( instring, tokensStart, self, err )
raise
else:
if callPreParse and self.callPreparse:
preloc = self.preParse( instring, loc )
else:
preloc = loc
tokensStart = preloc
if self.mayIndexError or loc >= len(instring):
try:
loc,tokens = self.parseImpl( instring, preloc, doActions )
except IndexError:
raise ParseException( instring, len(instring), self.errmsg, self )
else:
loc,tokens = self.parseImpl( instring, preloc, doActions )
tokens = self.postParse( instring, loc, tokens )
retTokens = ParseResults( tokens, self.resultsName, asList=self.saveAsList, modal=self.modalResults )
if self.parseAction and (doActions or self.callDuringTry):
if debugging:
try:
for fn in self.parseAction:
tokens = fn( instring, tokensStart, retTokens )
if tokens is not None:
retTokens = ParseResults( tokens,
self.resultsName,
asList=self.saveAsList and isinstance(tokens,(ParseResults,list)),
modal=self.modalResults )
except ParseBaseException as err:
#~ print "Exception raised in user parse action:", err
if (self.debugActions[2] ):
self.debugActions[2]( instring, tokensStart, self, err )
raise
else:
for fn in self.parseAction:
tokens = fn( instring, tokensStart, retTokens )
if tokens is not None:
retTokens = ParseResults( tokens,
self.resultsName,
asList=self.saveAsList and isinstance(tokens,(ParseResults,list)),
modal=self.modalResults )
if debugging:
#~ print ("Matched",self,"->",retTokens.asList())
if (self.debugActions[1] ):
self.debugActions[1]( instring, tokensStart, loc, self, retTokens )
return loc, retTokens
def tryParse( self, instring, loc ):
try:
return self._parse( instring, loc, doActions=False )[0]
except ParseFatalException:
raise ParseException( instring, loc, self.errmsg, self)
def canParseNext(self, instring, loc):
try:
self.tryParse(instring, loc)
except (ParseException, IndexError):
return False
else:
return True
# this method gets repeatedly called during backtracking with the same arguments -
# we can cache these arguments and save ourselves the trouble of re-parsing the contained expression
def _parseCache( self, instring, loc, doActions=True, callPreParse=True ):
lookup = (self,instring,loc,callPreParse,doActions)
if lookup in ParserElement._exprArgCache:
value = ParserElement._exprArgCache[ lookup ]
if isinstance(value, Exception):
raise value
return (value[0],value[1].copy())
else:
try:
value = self._parseNoCache( instring, loc, doActions, callPreParse )
ParserElement._exprArgCache[ lookup ] = (value[0],value[1].copy())
return value
except ParseBaseException as pe:
pe.__traceback__ = None
ParserElement._exprArgCache[ lookup ] = pe
raise
_parse = _parseNoCache
# argument cache for optimizing repeated calls when backtracking through recursive expressions
_exprArgCache = {}
@staticmethod
def resetCache():
ParserElement._exprArgCache.clear()
_packratEnabled = False
@staticmethod
def enablePackrat():
"""Enables "packrat" parsing, which adds memoizing to the parsing logic.
Repeated parse attempts at the same string location (which happens
often in many complex grammars) can immediately return a cached value,
instead of re-executing parsing/validating code. Memoizing is done of
both valid results and parsing exceptions.
This speedup may break existing programs that use parse actions that
have side-effects. For this reason, packrat parsing is disabled when
you first import pyparsing. To activate the packrat feature, your
program must call the class method C{ParserElement.enablePackrat()}. If
your program uses C{psyco} to "compile as you go", you must call
C{enablePackrat} before calling C{psyco.full()}. If you do not do this,
Python will crash. For best results, call C{enablePackrat()} immediately
after importing pyparsing.
"""
if not ParserElement._packratEnabled:
ParserElement._packratEnabled = True
ParserElement._parse = ParserElement._parseCache
def parseString( self, instring, parseAll=False ):
"""Execute the parse expression with the given string.
This is the main interface to the client code, once the complete
expression has been built.
If you want the grammar to require that the entire input string be
successfully parsed, then set C{parseAll} to True (equivalent to ending
the grammar with C{L{StringEnd()}}).
Note: C{parseString} implicitly calls C{expandtabs()} on the input string,
in order to report proper column numbers in parse actions.
If the input string contains tabs and
the grammar uses parse actions that use the C{loc} argument to index into the
string being parsed, you can ensure you have a consistent view of the input
string by:
- calling C{parseWithTabs} on your grammar before calling C{parseString}
(see L{I{parseWithTabs}<parseWithTabs>})
- define your parse action using the full C{(s,loc,toks)} signature, and
reference the input string using the parse action's C{s} argument
- explictly expand the tabs in your input string before calling
C{parseString}
"""
ParserElement.resetCache()
if not self.streamlined:
self.streamline()
#~ self.saveAsList = True
for e in self.ignoreExprs:
e.streamline()
if not self.keepTabs:
instring = instring.expandtabs()
try:
loc, tokens = self._parse( instring, 0 )
if parseAll:
loc = self.preParse( instring, loc )
se = Empty() + StringEnd()
se._parse( instring, loc )
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
raise exc
else:
return tokens
def scanString( self, instring, maxMatches=_MAX_INT, overlap=False ):
"""Scan the input string for expression matches. Each match will return the
matching tokens, start location, and end location. May be called with optional
C{maxMatches} argument, to clip scanning after 'n' matches are found. If
C{overlap} is specified, then overlapping matches will be reported.
Note that the start and end locations are reported relative to the string
being parsed. See L{I{parseString}<parseString>} for more information on parsing
strings with embedded tabs."""
if not self.streamlined:
self.streamline()
for e in self.ignoreExprs:
e.streamline()
if not self.keepTabs:
instring = _ustr(instring).expandtabs()
instrlen = len(instring)
loc = 0
preparseFn = self.preParse
parseFn = self._parse
ParserElement.resetCache()
matches = 0
try:
while loc <= instrlen and matches < maxMatches:
try:
preloc = preparseFn( instring, loc )
nextLoc,tokens = parseFn( instring, preloc, callPreParse=False )
except ParseException:
loc = preloc+1
else:
if nextLoc > loc:
matches += 1
yield tokens, preloc, nextLoc
if overlap:
nextloc = preparseFn( instring, loc )
if nextloc > loc:
loc = nextLoc
else:
loc += 1
else:
loc = nextLoc
else:
loc = preloc+1
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
raise exc
def transformString( self, instring ):
"""Extension to C{L{scanString}}, to modify matching text with modified tokens that may
be returned from a parse action. To use C{transformString}, define a grammar and
attach a parse action to it that modifies the returned token list.
Invoking C{transformString()} on a target string will then scan for matches,
and replace the matched text patterns according to the logic in the parse
action. C{transformString()} returns the resulting transformed string."""
out = []
lastE = 0
# force preservation of <TAB>s, to minimize unwanted transformation of string, and to
# keep string locs straight between transformString and scanString
self.keepTabs = True
try:
for t,s,e in self.scanString( instring ):
out.append( instring[lastE:s] )
if t:
if isinstance(t,ParseResults):
out += t.asList()
elif isinstance(t,list):
out += t
else:
out.append(t)
lastE = e
out.append(instring[lastE:])
out = [o for o in out if o]
return "".join(map(_ustr,_flatten(out)))
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
raise exc
def searchString( self, instring, maxMatches=_MAX_INT ):
"""Another extension to C{L{scanString}}, simplifying the access to the tokens found
to match the given parse expression. May be called with optional
C{maxMatches} argument, to clip searching after 'n' matches are found.
"""
try:
return ParseResults([ t for t,s,e in self.scanString( instring, maxMatches ) ])
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
raise exc
def __add__(self, other ):
"""Implementation of + operator - returns C{L{And}}"""
if isinstance( other, basestring ):
other = ParserElement.literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return And( [ self, other ] )
def __radd__(self, other ):
"""Implementation of + operator when left operand is not a C{L{ParserElement}}"""
if isinstance( other, basestring ):
other = ParserElement.literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other + self
def __sub__(self, other):
"""Implementation of - operator, returns C{L{And}} with error stop"""
if isinstance( other, basestring ):
other = ParserElement.literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return And( [ self, And._ErrorStop(), other ] )
def __rsub__(self, other ):
"""Implementation of - operator when left operand is not a C{L{ParserElement}}"""
if isinstance( other, basestring ):
other = ParserElement.literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other - self
def __mul__(self,other):
"""Implementation of * operator, allows use of C{expr * 3} in place of
C{expr + expr + expr}. Expressions may also me multiplied by a 2-integer
tuple, similar to C{{min,max}} multipliers in regular expressions. Tuples
may also include C{None} as in:
- C{expr*(n,None)} or C{expr*(n,)} is equivalent
to C{expr*n + L{ZeroOrMore}(expr)}
(read as "at least n instances of C{expr}")
- C{expr*(None,n)} is equivalent to C{expr*(0,n)}
(read as "0 to n instances of C{expr}")
- C{expr*(None,None)} is equivalent to C{L{ZeroOrMore}(expr)}
- C{expr*(1,None)} is equivalent to C{L{OneOrMore}(expr)}
Note that C{expr*(None,n)} does not raise an exception if
more than n exprs exist in the input stream; that is,
C{expr*(None,n)} does not enforce a maximum number of expr
occurrences. If this behavior is desired, then write
C{expr*(None,n) + ~expr}
"""
if isinstance(other,int):
minElements, optElements = other,0
elif isinstance(other,tuple):
other = (other + (None, None))[:2]
if other[0] is None:
other = (0, other[1])
if isinstance(other[0],int) and other[1] is None:
if other[0] == 0:
return ZeroOrMore(self)
if other[0] == 1:
return OneOrMore(self)
else:
return self*other[0] + ZeroOrMore(self)
elif isinstance(other[0],int) and isinstance(other[1],int):
minElements, optElements = other
optElements -= minElements
else:
raise TypeError("cannot multiply 'ParserElement' and ('%s','%s') objects", type(other[0]),type(other[1]))
else:
raise TypeError("cannot multiply 'ParserElement' and '%s' objects", type(other))
if minElements < 0:
raise ValueError("cannot multiply ParserElement by negative value")
if optElements < 0:
raise ValueError("second tuple value must be greater or equal to first tuple value")
if minElements == optElements == 0:
raise ValueError("cannot multiply ParserElement by 0 or (0,0)")
if (optElements):
def makeOptionalList(n):
if n>1:
return Optional(self + makeOptionalList(n-1))
else:
return Optional(self)
if minElements:
if minElements == 1:
ret = self + makeOptionalList(optElements)
else:
ret = And([self]*minElements) + makeOptionalList(optElements)
else:
ret = makeOptionalList(optElements)
else:
if minElements == 1:
ret = self
else:
ret = And([self]*minElements)
return ret
def __rmul__(self, other):
return self.__mul__(other)
def __or__(self, other ):
"""Implementation of | operator - returns C{L{MatchFirst}}"""
if isinstance( other, basestring ):
other = ParserElement.literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return MatchFirst( [ self, other ] )
def __ror__(self, other ):
"""Implementation of | operator when left operand is not a C{L{ParserElement}}"""
if isinstance( other, basestring ):
other = ParserElement.literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other | self
def __xor__(self, other ):
"""Implementation of ^ operator - returns C{L{Or}}"""
if isinstance( other, basestring ):
other = ParserElement.literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return Or( [ self, other ] )
def __rxor__(self, other ):
"""Implementation of ^ operator when left operand is not a C{L{ParserElement}}"""
if isinstance( other, basestring ):
other = ParserElement.literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other ^ self
def __and__(self, other ):
"""Implementation of & operator - returns C{L{Each}}"""
if isinstance( other, basestring ):
other = ParserElement.literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return Each( [ self, other ] )
def __rand__(self, other ):
"""Implementation of & operator when left operand is not a C{L{ParserElement}}"""
if isinstance( other, basestring ):
other = ParserElement.literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other & self
def __invert__( self ):
"""Implementation of ~ operator - returns C{L{NotAny}}"""
return NotAny( self )
def __call__(self, name=None):
"""Shortcut for C{L{setResultsName}}, with C{listAllMatches=default}::
userdata = Word(alphas).setResultsName("name") + Word(nums+"-").setResultsName("socsecno")
could be written as::
userdata = Word(alphas)("name") + Word(nums+"-")("socsecno")
If C{name} is given with a trailing C{'*'} character, then C{listAllMatches} will be
passed as C{True}.
If C{name} is omitted, same as calling C{L{copy}}.
"""
if name is not None:
return self.setResultsName(name)
else:
return self.copy()
def suppress( self ):
"""Suppresses the output of this C{ParserElement}; useful to keep punctuation from
cluttering up returned output.
"""
return Suppress( self )
def leaveWhitespace( self ):
"""Disables the skipping of whitespace before matching the characters in the
C{ParserElement}'s defined pattern. This is normally only used internally by
the pyparsing module, but may be needed in some whitespace-sensitive grammars.
"""
self.skipWhitespace = False
return self
def setWhitespaceChars( self, chars ):
"""Overrides the default whitespace chars
"""
self.skipWhitespace = True
self.whiteChars = chars
self.copyDefaultWhiteChars = False
return self
def parseWithTabs( self ):
"""Overrides default behavior to expand C{<TAB>}s to spaces before parsing the input string.
Must be called before C{parseString} when the input grammar contains elements that
match C{<TAB>} characters."""
self.keepTabs = True
return self
def ignore( self, other ):
"""Define expression to be ignored (e.g., comments) while doing pattern
matching; may be called repeatedly, to define multiple comment or other
ignorable patterns.
"""
if isinstance(other, basestring):
other = Suppress(other)
if isinstance( other, Suppress ):
if other not in self.ignoreExprs:
self.ignoreExprs.append(other)
else:
self.ignoreExprs.append( Suppress( other.copy() ) )
return self
def setDebugActions( self, startAction, successAction, exceptionAction ):
"""Enable display of debugging messages while doing pattern matching."""
self.debugActions = (startAction or _defaultStartDebugAction,
successAction or _defaultSuccessDebugAction,
exceptionAction or _defaultExceptionDebugAction)
self.debug = True
return self
def setDebug( self, flag=True ):
"""Enable display of debugging messages while doing pattern matching.
Set C{flag} to True to enable, False to disable."""
if flag:
self.setDebugActions( _defaultStartDebugAction, _defaultSuccessDebugAction, _defaultExceptionDebugAction )
else:
self.debug = False
return self
def __str__( self ):
return self.name
def __repr__( self ):
return _ustr(self)
def streamline( self ):
self.streamlined = True
self.strRepr = None
return self
def checkRecursion( self, parseElementList ):
pass
def validate( self, validateTrace=[] ):
"""Check defined expressions for valid structure, check for infinite recursive definitions."""
self.checkRecursion( [] )
def parseFile( self, file_or_filename, parseAll=False ):
"""Execute the parse expression on the given file or filename.
If a filename is specified (instead of a file object),
the entire file is opened, read, and closed before parsing.
"""
try:
file_contents = file_or_filename.read()
except AttributeError:
f = open(file_or_filename, "r")
file_contents = f.read()
f.close()
try:
return self.parseString(file_contents, parseAll)
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
raise exc
def __eq__(self,other):
if isinstance(other, ParserElement):
return self is other or vars(self) == vars(other)
elif isinstance(other, basestring):
try:
self.parseString(_ustr(other), parseAll=True)
return True
except ParseBaseException:
return False
else:
return super(ParserElement,self)==other
def __ne__(self,other):
return not (self == other)
def __hash__(self):
return hash(id(self))
def __req__(self,other):
return self == other
def __rne__(self,other):
return not (self == other)
def runTests(self, tests, parseAll=False):
"""Execute the parse expression on a series of test strings, showing each
test, the parsed results or where the parse failed. Quick and easy way to
run a parse expression against a list of sample strings.
Parameters:
- tests - a list of separate test strings, or a multiline string of test strings
- parseAll - (default=False) - flag to pass to C{L{parseString}} when running tests
"""
if isinstance(tests, basestring):
tests = map(str.strip, tests.splitlines())
for t in tests:
out = [t]
try:
out.append(self.parseString(t, parseAll=parseAll).dump())
except ParseException as pe:
if '\n' in t:
out.append(line(pe.loc, t))
out.append(' '*(col(pe.loc,t)-1) + '^')
else:
out.append(' '*pe.loc + '^')
out.append(str(pe))
out.append('')
print('\n'.join(out))
class Token(ParserElement):
"""Abstract C{ParserElement} subclass, for defining atomic matching patterns."""
def __init__( self ):
super(Token,self).__init__( savelist=False )
class Empty(Token):
"""An empty token, will always match."""
def __init__( self ):
super(Empty,self).__init__()
self.name = "Empty"
self.mayReturnEmpty = True
self.mayIndexError = False
class NoMatch(Token):
"""A token that will never match."""
def __init__( self ):
super(NoMatch,self).__init__()
self.name = "NoMatch"
self.mayReturnEmpty = True
self.mayIndexError = False
self.errmsg = "Unmatchable token"
def parseImpl( self, instring, loc, doActions=True ):
raise ParseException(instring, loc, self.errmsg, self)
class Literal(Token):
"""Token to exactly match a specified string."""
def __init__( self, matchString ):
super(Literal,self).__init__()
self.match = matchString
self.matchLen = len(matchString)
try:
self.firstMatchChar = matchString[0]
except IndexError:
warnings.warn("null string passed to Literal; use Empty() instead",
SyntaxWarning, stacklevel=2)
self.__class__ = Empty
self.name = '"%s"' % _ustr(self.match)
self.errmsg = "Expected " + self.name
self.mayReturnEmpty = False
self.mayIndexError = False
# Performance tuning: this routine gets called a *lot*
# if this is a single character match string and the first character matches,
# short-circuit as quickly as possible, and avoid calling startswith
#~ @profile
def parseImpl( self, instring, loc, doActions=True ):
if (instring[loc] == self.firstMatchChar and
(self.matchLen==1 or instring.startswith(self.match,loc)) ):
return loc+self.matchLen, self.match
raise ParseException(instring, loc, self.errmsg, self)
_L = Literal
ParserElement.literalStringClass = Literal
class Keyword(Token):
"""Token to exactly match a specified string as a keyword, that is, it must be
immediately followed by a non-keyword character. Compare with C{L{Literal}}::
Literal("if") will match the leading C{'if'} in C{'ifAndOnlyIf'}.
Keyword("if") will not; it will only match the leading C{'if'} in C{'if x=1'}, or C{'if(y==2)'}
Accepts two optional constructor arguments in addition to the keyword string:
C{identChars} is a string of characters that would be valid identifier characters,
defaulting to all alphanumerics + "_" and "$"; C{caseless} allows case-insensitive
matching, default is C{False}.
"""
DEFAULT_KEYWORD_CHARS = alphanums+"_$"
def __init__( self, matchString, identChars=DEFAULT_KEYWORD_CHARS, caseless=False ):
super(Keyword,self).__init__()
self.match = matchString
self.matchLen = len(matchString)
try:
self.firstMatchChar = matchString[0]
except IndexError:
warnings.warn("null string passed to Keyword; use Empty() instead",
SyntaxWarning, stacklevel=2)
self.name = '"%s"' % self.match
self.errmsg = "Expected " + self.name
self.mayReturnEmpty = False
self.mayIndexError = False
self.caseless = caseless
if caseless:
self.caselessmatch = matchString.upper()
identChars = identChars.upper()
self.identChars = set(identChars)
def parseImpl( self, instring, loc, doActions=True ):
if self.caseless:
if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and
(loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) and
(loc == 0 or instring[loc-1].upper() not in self.identChars) ):
return loc+self.matchLen, self.match
else:
if (instring[loc] == self.firstMatchChar and
(self.matchLen==1 or instring.startswith(self.match,loc)) and
(loc >= len(instring)-self.matchLen or instring[loc+self.matchLen] not in self.identChars) and
(loc == 0 or instring[loc-1] not in self.identChars) ):
return loc+self.matchLen, self.match
raise ParseException(instring, loc, self.errmsg, self)
def copy(self):
c = super(Keyword,self).copy()
c.identChars = Keyword.DEFAULT_KEYWORD_CHARS
return c
@staticmethod
def setDefaultKeywordChars( chars ):
"""Overrides the default Keyword chars
"""
Keyword.DEFAULT_KEYWORD_CHARS = chars
class CaselessLiteral(Literal):
"""Token to match a specified string, ignoring case of letters.
Note: the matched results will always be in the case of the given
match string, NOT the case of the input text.
"""
def __init__( self, matchString ):
super(CaselessLiteral,self).__init__( matchString.upper() )
# Preserve the defining literal.
self.returnString = matchString
self.name = "'%s'" % self.returnString
self.errmsg = "Expected " + self.name
def parseImpl( self, instring, loc, doActions=True ):
if instring[ loc:loc+self.matchLen ].upper() == self.match:
return loc+self.matchLen, self.returnString
raise ParseException(instring, loc, self.errmsg, self)
class CaselessKeyword(Keyword):
def __init__( self, matchString, identChars=Keyword.DEFAULT_KEYWORD_CHARS ):
super(CaselessKeyword,self).__init__( matchString, identChars, caseless=True )
def parseImpl( self, instring, loc, doActions=True ):
if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and
(loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) ):
return loc+self.matchLen, self.match
raise ParseException(instring, loc, self.errmsg, self)
class Word(Token):
"""Token for matching words composed of allowed character sets.
Defined with string containing all allowed initial characters,
an optional string containing allowed body characters (if omitted,
defaults to the initial character set), and an optional minimum,
maximum, and/or exact length. The default value for C{min} is 1 (a
minimum value < 1 is not valid); the default values for C{max} and C{exact}
are 0, meaning no maximum or exact length restriction. An optional
C{excludeChars} parameter can list characters that might be found in
the input C{bodyChars} string; useful to define a word of all printables
except for one or two characters, for instance.
"""
def __init__( self, initChars, bodyChars=None, min=1, max=0, exact=0, asKeyword=False, excludeChars=None ):
super(Word,self).__init__()
if excludeChars:
initChars = ''.join(c for c in initChars if c not in excludeChars)
if bodyChars:
bodyChars = ''.join(c for c in bodyChars if c not in excludeChars)
self.initCharsOrig = initChars
self.initChars = set(initChars)
if bodyChars :
self.bodyCharsOrig = bodyChars
self.bodyChars = set(bodyChars)
else:
self.bodyCharsOrig = initChars
self.bodyChars = set(initChars)
self.maxSpecified = max > 0
if min < 1:
raise ValueError("cannot specify a minimum length < 1; use Optional(Word()) if zero-length word is permitted")
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = _MAX_INT
if exact > 0:
self.maxLen = exact
self.minLen = exact
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
self.mayIndexError = False
self.asKeyword = asKeyword
if ' ' not in self.initCharsOrig+self.bodyCharsOrig and (min==1 and max==0 and exact==0):
if self.bodyCharsOrig == self.initCharsOrig:
self.reString = "[%s]+" % _escapeRegexRangeChars(self.initCharsOrig)
elif len(self.initCharsOrig) == 1:
self.reString = "%s[%s]*" % \
(re.escape(self.initCharsOrig),
_escapeRegexRangeChars(self.bodyCharsOrig),)
else:
self.reString = "[%s][%s]*" % \
(_escapeRegexRangeChars(self.initCharsOrig),
_escapeRegexRangeChars(self.bodyCharsOrig),)
if self.asKeyword:
self.reString = r"\b"+self.reString+r"\b"
try:
self.re = re.compile( self.reString )
except:
self.re = None
def parseImpl( self, instring, loc, doActions=True ):
if self.re:
result = self.re.match(instring,loc)
if not result:
raise ParseException(instring, loc, self.errmsg, self)
loc = result.end()
return loc, result.group()
if not(instring[ loc ] in self.initChars):
raise ParseException(instring, loc, self.errmsg, self)
start = loc
loc += 1
instrlen = len(instring)
bodychars = self.bodyChars
maxloc = start + self.maxLen
maxloc = min( maxloc, instrlen )
while loc < maxloc and instring[loc] in bodychars:
loc += 1
throwException = False
if loc - start < self.minLen:
throwException = True
if self.maxSpecified and loc < instrlen and instring[loc] in bodychars:
throwException = True
if self.asKeyword:
if (start>0 and instring[start-1] in bodychars) or (loc<instrlen and instring[loc] in bodychars):
throwException = True
if throwException:
raise ParseException(instring, loc, self.errmsg, self)
return loc, instring[start:loc]
def __str__( self ):
try:
return super(Word,self).__str__()
except:
pass
if self.strRepr is None:
def charsAsStr(s):
if len(s)>4:
return s[:4]+"..."
else:
return s
if ( self.initCharsOrig != self.bodyCharsOrig ):
self.strRepr = "W:(%s,%s)" % ( charsAsStr(self.initCharsOrig), charsAsStr(self.bodyCharsOrig) )
else:
self.strRepr = "W:(%s)" % charsAsStr(self.initCharsOrig)
return self.strRepr
class Regex(Token):
"""Token for matching strings that match a given regular expression.
Defined with string specifying the regular expression in a form recognized by the inbuilt Python re module.
"""
compiledREtype = type(re.compile("[A-Z]"))
def __init__( self, pattern, flags=0):
"""The parameters C{pattern} and C{flags} are passed to the C{re.compile()} function as-is. See the Python C{re} module for an explanation of the acceptable patterns and flags."""
super(Regex,self).__init__()
if isinstance(pattern, basestring):
if not pattern:
warnings.warn("null string passed to Regex; use Empty() instead",
SyntaxWarning, stacklevel=2)
self.pattern = pattern
self.flags = flags
try:
self.re = re.compile(self.pattern, self.flags)
self.reString = self.pattern
except sre_constants.error:
warnings.warn("invalid pattern (%s) passed to Regex" % pattern,
SyntaxWarning, stacklevel=2)
raise
elif isinstance(pattern, Regex.compiledREtype):
self.re = pattern
self.pattern = \
self.reString = str(pattern)
self.flags = flags
else:
raise ValueError("Regex may only be constructed with a string or a compiled RE object")
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
self.mayIndexError = False
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
result = self.re.match(instring,loc)
if not result:
raise ParseException(instring, loc, self.errmsg, self)
loc = result.end()
d = result.groupdict()
ret = ParseResults(result.group())
if d:
for k in d:
ret[k] = d[k]
return loc,ret
def __str__( self ):
try:
return super(Regex,self).__str__()
except:
pass
if self.strRepr is None:
self.strRepr = "Re:(%s)" % repr(self.pattern)
return self.strRepr
class QuotedString(Token):
"""Token for matching strings that are delimited by quoting characters.
"""
def __init__( self, quoteChar, escChar=None, escQuote=None, multiline=False, unquoteResults=True, endQuoteChar=None, convertWhitespaceEscapes=True):
r"""Defined with the following parameters:
- quoteChar - string of one or more characters defining the quote delimiting string
- escChar - character to escape quotes, typically backslash (default=None)
- escQuote - special quote sequence to escape an embedded quote string (such as SQL's "" to escape an embedded ") (default=None)
- multiline - boolean indicating whether quotes can span multiple lines (default=C{False})
- unquoteResults - boolean indicating whether the matched text should be unquoted (default=C{True})
- endQuoteChar - string of one or more characters defining the end of the quote delimited string (default=C{None} => same as quoteChar)
- convertWhitespaceEscapes - convert escaped whitespace (C{'\t'}, C{'\n'}, etc.) to actual whitespace (default=C{True})
"""
super(QuotedString,self).__init__()
# remove white space from quote chars - wont work anyway
quoteChar = quoteChar.strip()
if not quoteChar:
warnings.warn("quoteChar cannot be the empty string",SyntaxWarning,stacklevel=2)
raise SyntaxError()
if endQuoteChar is None:
endQuoteChar = quoteChar
else:
endQuoteChar = endQuoteChar.strip()
if not endQuoteChar:
warnings.warn("endQuoteChar cannot be the empty string",SyntaxWarning,stacklevel=2)
raise SyntaxError()
self.quoteChar = quoteChar
self.quoteCharLen = len(quoteChar)
self.firstQuoteChar = quoteChar[0]
self.endQuoteChar = endQuoteChar
self.endQuoteCharLen = len(endQuoteChar)
self.escChar = escChar
self.escQuote = escQuote
self.unquoteResults = unquoteResults
self.convertWhitespaceEscapes = convertWhitespaceEscapes
if multiline:
self.flags = re.MULTILINE | re.DOTALL
self.pattern = r'%s(?:[^%s%s]' % \
( re.escape(self.quoteChar),
_escapeRegexRangeChars(self.endQuoteChar[0]),
(escChar is not None and _escapeRegexRangeChars(escChar) or '') )
else:
self.flags = 0
self.pattern = r'%s(?:[^%s\n\r%s]' % \
( re.escape(self.quoteChar),
_escapeRegexRangeChars(self.endQuoteChar[0]),
(escChar is not None and _escapeRegexRangeChars(escChar) or '') )
if len(self.endQuoteChar) > 1:
self.pattern += (
'|(?:' + ')|(?:'.join("%s[^%s]" % (re.escape(self.endQuoteChar[:i]),
_escapeRegexRangeChars(self.endQuoteChar[i]))
for i in range(len(self.endQuoteChar)-1,0,-1)) + ')'
)
if escQuote:
self.pattern += (r'|(?:%s)' % re.escape(escQuote))
if escChar:
self.pattern += (r'|(?:%s.)' % re.escape(escChar))
self.escCharReplacePattern = re.escape(self.escChar)+"(.)"
self.pattern += (r')*%s' % re.escape(self.endQuoteChar))
try:
self.re = re.compile(self.pattern, self.flags)
self.reString = self.pattern
except sre_constants.error:
warnings.warn("invalid pattern (%s) passed to Regex" % self.pattern,
SyntaxWarning, stacklevel=2)
raise
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
self.mayIndexError = False
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
result = instring[loc] == self.firstQuoteChar and self.re.match(instring,loc) or None
if not result:
raise ParseException(instring, loc, self.errmsg, self)
loc = result.end()
ret = result.group()
if self.unquoteResults:
# strip off quotes
ret = ret[self.quoteCharLen:-self.endQuoteCharLen]
if isinstance(ret,basestring):
# replace escaped whitespace
if '\\' in ret and self.convertWhitespaceEscapes:
ws_map = {
r'\t' : '\t',
r'\n' : '\n',
r'\f' : '\f',
r'\r' : '\r',
}
for wslit,wschar in ws_map.items():
ret = ret.replace(wslit, wschar)
# replace escaped characters
if self.escChar:
ret = re.sub(self.escCharReplacePattern,"\g<1>",ret)
# replace escaped quotes
if self.escQuote:
ret = ret.replace(self.escQuote, self.endQuoteChar)
return loc, ret
def __str__( self ):
try:
return super(QuotedString,self).__str__()
except:
pass
if self.strRepr is None:
self.strRepr = "quoted string, starting with %s ending with %s" % (self.quoteChar, self.endQuoteChar)
return self.strRepr
class CharsNotIn(Token):
"""Token for matching words composed of characters *not* in a given set.
Defined with string containing all disallowed characters, and an optional
minimum, maximum, and/or exact length. The default value for C{min} is 1 (a
minimum value < 1 is not valid); the default values for C{max} and C{exact}
are 0, meaning no maximum or exact length restriction.
"""
def __init__( self, notChars, min=1, max=0, exact=0 ):
super(CharsNotIn,self).__init__()
self.skipWhitespace = False
self.notChars = notChars
if min < 1:
raise ValueError("cannot specify a minimum length < 1; use Optional(CharsNotIn()) if zero-length char group is permitted")
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = _MAX_INT
if exact > 0:
self.maxLen = exact
self.minLen = exact
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
self.mayReturnEmpty = ( self.minLen == 0 )
self.mayIndexError = False
def parseImpl( self, instring, loc, doActions=True ):
if instring[loc] in self.notChars:
raise ParseException(instring, loc, self.errmsg, self)
start = loc
loc += 1
notchars = self.notChars
maxlen = min( start+self.maxLen, len(instring) )
while loc < maxlen and \
(instring[loc] not in notchars):
loc += 1
if loc - start < self.minLen:
raise ParseException(instring, loc, self.errmsg, self)
return loc, instring[start:loc]
def __str__( self ):
try:
return super(CharsNotIn, self).__str__()
except:
pass
if self.strRepr is None:
if len(self.notChars) > 4:
self.strRepr = "!W:(%s...)" % self.notChars[:4]
else:
self.strRepr = "!W:(%s)" % self.notChars
return self.strRepr
class White(Token):
"""Special matching class for matching whitespace. Normally, whitespace is ignored
by pyparsing grammars. This class is included when some whitespace structures
are significant. Define with a string containing the whitespace characters to be
matched; default is C{" \\t\\r\\n"}. Also takes optional C{min}, C{max}, and C{exact} arguments,
as defined for the C{L{Word}} class."""
whiteStrs = {
" " : "<SPC>",
"\t": "<TAB>",
"\n": "<LF>",
"\r": "<CR>",
"\f": "<FF>",
}
def __init__(self, ws=" \t\r\n", min=1, max=0, exact=0):
super(White,self).__init__()
self.matchWhite = ws
self.setWhitespaceChars( "".join(c for c in self.whiteChars if c not in self.matchWhite) )
#~ self.leaveWhitespace()
self.name = ("".join(White.whiteStrs[c] for c in self.matchWhite))
self.mayReturnEmpty = True
self.errmsg = "Expected " + self.name
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = _MAX_INT
if exact > 0:
self.maxLen = exact
self.minLen = exact
def parseImpl( self, instring, loc, doActions=True ):
if not(instring[ loc ] in self.matchWhite):
raise ParseException(instring, loc, self.errmsg, self)
start = loc
loc += 1
maxloc = start + self.maxLen
maxloc = min( maxloc, len(instring) )
while loc < maxloc and instring[loc] in self.matchWhite:
loc += 1
if loc - start < self.minLen:
raise ParseException(instring, loc, self.errmsg, self)
return loc, instring[start:loc]
class _PositionToken(Token):
def __init__( self ):
super(_PositionToken,self).__init__()
self.name=self.__class__.__name__
self.mayReturnEmpty = True
self.mayIndexError = False
class GoToColumn(_PositionToken):
"""Token to advance to a specific column of input text; useful for tabular report scraping."""
def __init__( self, colno ):
super(GoToColumn,self).__init__()
self.col = colno
def preParse( self, instring, loc ):
if col(loc,instring) != self.col:
instrlen = len(instring)
if self.ignoreExprs:
loc = self._skipIgnorables( instring, loc )
while loc < instrlen and instring[loc].isspace() and col( loc, instring ) != self.col :
loc += 1
return loc
def parseImpl( self, instring, loc, doActions=True ):
thiscol = col( loc, instring )
if thiscol > self.col:
raise ParseException( instring, loc, "Text not in expected column", self )
newloc = loc + self.col - thiscol
ret = instring[ loc: newloc ]
return newloc, ret
class LineStart(_PositionToken):
"""Matches if current position is at the beginning of a line within the parse string"""
def __init__( self ):
super(LineStart,self).__init__()
self.setWhitespaceChars( ParserElement.DEFAULT_WHITE_CHARS.replace("\n","") )
self.errmsg = "Expected start of line"
def preParse( self, instring, loc ):
preloc = super(LineStart,self).preParse(instring,loc)
if instring[preloc] == "\n":
loc += 1
return loc
def parseImpl( self, instring, loc, doActions=True ):
if not( loc==0 or
(loc == self.preParse( instring, 0 )) or
(instring[loc-1] == "\n") ): #col(loc, instring) != 1:
raise ParseException(instring, loc, self.errmsg, self)
return loc, []
class LineEnd(_PositionToken):
"""Matches if current position is at the end of a line within the parse string"""
def __init__( self ):
super(LineEnd,self).__init__()
self.setWhitespaceChars( ParserElement.DEFAULT_WHITE_CHARS.replace("\n","") )
self.errmsg = "Expected end of line"
def parseImpl( self, instring, loc, doActions=True ):
if loc<len(instring):
if instring[loc] == "\n":
return loc+1, "\n"
else:
raise ParseException(instring, loc, self.errmsg, self)
elif loc == len(instring):
return loc+1, []
else:
raise ParseException(instring, loc, self.errmsg, self)
class StringStart(_PositionToken):
"""Matches if current position is at the beginning of the parse string"""
def __init__( self ):
super(StringStart,self).__init__()
self.errmsg = "Expected start of text"
def parseImpl( self, instring, loc, doActions=True ):
if loc != 0:
# see if entire string up to here is just whitespace and ignoreables
if loc != self.preParse( instring, 0 ):
raise ParseException(instring, loc, self.errmsg, self)
return loc, []
class StringEnd(_PositionToken):
"""Matches if current position is at the end of the parse string"""
def __init__( self ):
super(StringEnd,self).__init__()
self.errmsg = "Expected end of text"
def parseImpl( self, instring, loc, doActions=True ):
if loc < len(instring):
raise ParseException(instring, loc, self.errmsg, self)
elif loc == len(instring):
return loc+1, []
elif loc > len(instring):
return loc, []
else:
raise ParseException(instring, loc, self.errmsg, self)
class WordStart(_PositionToken):
"""Matches if the current position is at the beginning of a Word, and
is not preceded by any character in a given set of C{wordChars}
(default=C{printables}). To emulate the C{\b} behavior of regular expressions,
use C{WordStart(alphanums)}. C{WordStart} will also match at the beginning of
the string being parsed, or at the beginning of a line.
"""
def __init__(self, wordChars = printables):
super(WordStart,self).__init__()
self.wordChars = set(wordChars)
self.errmsg = "Not at the start of a word"
def parseImpl(self, instring, loc, doActions=True ):
if loc != 0:
if (instring[loc-1] in self.wordChars or
instring[loc] not in self.wordChars):
raise ParseException(instring, loc, self.errmsg, self)
return loc, []
class WordEnd(_PositionToken):
"""Matches if the current position is at the end of a Word, and
is not followed by any character in a given set of C{wordChars}
(default=C{printables}). To emulate the C{\b} behavior of regular expressions,
use C{WordEnd(alphanums)}. C{WordEnd} will also match at the end of
the string being parsed, or at the end of a line.
"""
def __init__(self, wordChars = printables):
super(WordEnd,self).__init__()
self.wordChars = set(wordChars)
self.skipWhitespace = False
self.errmsg = "Not at the end of a word"
def parseImpl(self, instring, loc, doActions=True ):
instrlen = len(instring)
if instrlen>0 and loc<instrlen:
if (instring[loc] in self.wordChars or
instring[loc-1] not in self.wordChars):
raise ParseException(instring, loc, self.errmsg, self)
return loc, []
class ParseExpression(ParserElement):
"""Abstract subclass of ParserElement, for combining and post-processing parsed tokens."""
def __init__( self, exprs, savelist = False ):
super(ParseExpression,self).__init__(savelist)
if isinstance( exprs, _generatorType ):
exprs = list(exprs)
if isinstance( exprs, basestring ):
self.exprs = [ Literal( exprs ) ]
elif isinstance( exprs, collections.Sequence ):
# if sequence of strings provided, wrap with Literal
if all(isinstance(expr, basestring) for expr in exprs):
exprs = map(Literal, exprs)
self.exprs = list(exprs)
else:
try:
self.exprs = list( exprs )
except TypeError:
self.exprs = [ exprs ]
self.callPreparse = False
def __getitem__( self, i ):
return self.exprs[i]
def append( self, other ):
self.exprs.append( other )
self.strRepr = None
return self
def leaveWhitespace( self ):
"""Extends C{leaveWhitespace} defined in base class, and also invokes C{leaveWhitespace} on
all contained expressions."""
self.skipWhitespace = False
self.exprs = [ e.copy() for e in self.exprs ]
for e in self.exprs:
e.leaveWhitespace()
return self
def ignore( self, other ):
if isinstance( other, Suppress ):
if other not in self.ignoreExprs:
super( ParseExpression, self).ignore( other )
for e in self.exprs:
e.ignore( self.ignoreExprs[-1] )
else:
super( ParseExpression, self).ignore( other )
for e in self.exprs:
e.ignore( self.ignoreExprs[-1] )
return self
def __str__( self ):
try:
return super(ParseExpression,self).__str__()
except:
pass
if self.strRepr is None:
self.strRepr = "%s:(%s)" % ( self.__class__.__name__, _ustr(self.exprs) )
return self.strRepr
def streamline( self ):
super(ParseExpression,self).streamline()
for e in self.exprs:
e.streamline()
# collapse nested And's of the form And( And( And( a,b), c), d) to And( a,b,c,d )
# but only if there are no parse actions or resultsNames on the nested And's
# (likewise for Or's and MatchFirst's)
if ( len(self.exprs) == 2 ):
other = self.exprs[0]
if ( isinstance( other, self.__class__ ) and
not(other.parseAction) and
other.resultsName is None and
not other.debug ):
self.exprs = other.exprs[:] + [ self.exprs[1] ]
self.strRepr = None
self.mayReturnEmpty |= other.mayReturnEmpty
self.mayIndexError |= other.mayIndexError
other = self.exprs[-1]
if ( isinstance( other, self.__class__ ) and
not(other.parseAction) and
other.resultsName is None and
not other.debug ):
self.exprs = self.exprs[:-1] + other.exprs[:]
self.strRepr = None
self.mayReturnEmpty |= other.mayReturnEmpty
self.mayIndexError |= other.mayIndexError
self.errmsg = "Expected " + _ustr(self)
return self
def setResultsName( self, name, listAllMatches=False ):
ret = super(ParseExpression,self).setResultsName(name,listAllMatches)
return ret
def validate( self, validateTrace=[] ):
tmp = validateTrace[:]+[self]
for e in self.exprs:
e.validate(tmp)
self.checkRecursion( [] )
def copy(self):
ret = super(ParseExpression,self).copy()
ret.exprs = [e.copy() for e in self.exprs]
return ret
class And(ParseExpression):
"""Requires all given C{ParseExpression}s to be found in the given order.
Expressions may be separated by whitespace.
May be constructed using the C{'+'} operator.
"""
class _ErrorStop(Empty):
def __init__(self, *args, **kwargs):
super(And._ErrorStop,self).__init__(*args, **kwargs)
self.name = '-'
self.leaveWhitespace()
def __init__( self, exprs, savelist = True ):
super(And,self).__init__(exprs, savelist)
self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)
self.setWhitespaceChars( self.exprs[0].whiteChars )
self.skipWhitespace = self.exprs[0].skipWhitespace
self.callPreparse = True
def parseImpl( self, instring, loc, doActions=True ):
# pass False as last arg to _parse for first element, since we already
# pre-parsed the string as part of our And pre-parsing
loc, resultlist = self.exprs[0]._parse( instring, loc, doActions, callPreParse=False )
errorStop = False
for e in self.exprs[1:]:
if isinstance(e, And._ErrorStop):
errorStop = True
continue
if errorStop:
try:
loc, exprtokens = e._parse( instring, loc, doActions )
except ParseSyntaxException:
raise
except ParseBaseException as pe:
pe.__traceback__ = None
raise ParseSyntaxException(pe)
except IndexError:
raise ParseSyntaxException( ParseException(instring, len(instring), self.errmsg, self) )
else:
loc, exprtokens = e._parse( instring, loc, doActions )
if exprtokens or exprtokens.haskeys():
resultlist += exprtokens
return loc, resultlist
def __iadd__(self, other ):
if isinstance( other, basestring ):
other = Literal( other )
return self.append( other ) #And( [ self, other ] )
def checkRecursion( self, parseElementList ):
subRecCheckList = parseElementList[:] + [ self ]
for e in self.exprs:
e.checkRecursion( subRecCheckList )
if not e.mayReturnEmpty:
break
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " ".join(_ustr(e) for e in self.exprs) + "}"
return self.strRepr
class Or(ParseExpression):
"""Requires that at least one C{ParseExpression} is found.
If two expressions match, the expression that matches the longest string will be used.
May be constructed using the C{'^'} operator.
"""
def __init__( self, exprs, savelist = False ):
super(Or,self).__init__(exprs, savelist)
if self.exprs:
self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs)
else:
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
maxExcLoc = -1
maxException = None
matches = []
for e in self.exprs:
try:
loc2 = e.tryParse( instring, loc )
except ParseException as err:
err.__traceback__ = None
if err.loc > maxExcLoc:
maxException = err
maxExcLoc = err.loc
except IndexError:
if len(instring) > maxExcLoc:
maxException = ParseException(instring,len(instring),e.errmsg,self)
maxExcLoc = len(instring)
else:
# save match among all matches, to retry longest to shortest
matches.append((loc2, e))
if matches:
matches.sort(key=lambda x: -x[0])
for _,e in matches:
try:
return e._parse( instring, loc, doActions )
except ParseException as err:
err.__traceback__ = None
if err.loc > maxExcLoc:
maxException = err
maxExcLoc = err.loc
if maxException is not None:
maxException.msg = self.errmsg
raise maxException
else:
raise ParseException(instring, loc, "no defined alternatives to match", self)
def __ixor__(self, other ):
if isinstance( other, basestring ):
other = ParserElement.literalStringClass( other )
return self.append( other ) #Or( [ self, other ] )
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " ^ ".join(_ustr(e) for e in self.exprs) + "}"
return self.strRepr
def checkRecursion( self, parseElementList ):
subRecCheckList = parseElementList[:] + [ self ]
for e in self.exprs:
e.checkRecursion( subRecCheckList )
class MatchFirst(ParseExpression):
"""Requires that at least one C{ParseExpression} is found.
If two expressions match, the first one listed is the one that will match.
May be constructed using the C{'|'} operator.
"""
def __init__( self, exprs, savelist = False ):
super(MatchFirst,self).__init__(exprs, savelist)
if self.exprs:
self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs)
else:
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
maxExcLoc = -1
maxException = None
for e in self.exprs:
try:
ret = e._parse( instring, loc, doActions )
return ret
except ParseException as err:
if err.loc > maxExcLoc:
maxException = err
maxExcLoc = err.loc
except IndexError:
if len(instring) > maxExcLoc:
maxException = ParseException(instring,len(instring),e.errmsg,self)
maxExcLoc = len(instring)
# only got here if no expression matched, raise exception for match that made it the furthest
else:
if maxException is not None:
maxException.msg = self.errmsg
raise maxException
else:
raise ParseException(instring, loc, "no defined alternatives to match", self)
def __ior__(self, other ):
if isinstance( other, basestring ):
other = ParserElement.literalStringClass( other )
return self.append( other ) #MatchFirst( [ self, other ] )
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " | ".join(_ustr(e) for e in self.exprs) + "}"
return self.strRepr
def checkRecursion( self, parseElementList ):
subRecCheckList = parseElementList[:] + [ self ]
for e in self.exprs:
e.checkRecursion( subRecCheckList )
class Each(ParseExpression):
"""Requires all given C{ParseExpression}s to be found, but in any order.
Expressions may be separated by whitespace.
May be constructed using the C{'&'} operator.
"""
def __init__( self, exprs, savelist = True ):
super(Each,self).__init__(exprs, savelist)
self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)
self.skipWhitespace = True
self.initExprGroups = True
def parseImpl( self, instring, loc, doActions=True ):
if self.initExprGroups:
self.opt1map = dict((id(e.expr),e) for e in self.exprs if isinstance(e,Optional))
opt1 = [ e.expr for e in self.exprs if isinstance(e,Optional) ]
opt2 = [ e for e in self.exprs if e.mayReturnEmpty and not isinstance(e,Optional)]
self.optionals = opt1 + opt2
self.multioptionals = [ e.expr for e in self.exprs if isinstance(e,ZeroOrMore) ]
self.multirequired = [ e.expr for e in self.exprs if isinstance(e,OneOrMore) ]
self.required = [ e for e in self.exprs if not isinstance(e,(Optional,ZeroOrMore,OneOrMore)) ]
self.required += self.multirequired
self.initExprGroups = False
tmpLoc = loc
tmpReqd = self.required[:]
tmpOpt = self.optionals[:]
matchOrder = []
keepMatching = True
while keepMatching:
tmpExprs = tmpReqd + tmpOpt + self.multioptionals + self.multirequired
failed = []
for e in tmpExprs:
try:
tmpLoc = e.tryParse( instring, tmpLoc )
except ParseException:
failed.append(e)
else:
matchOrder.append(self.opt1map.get(id(e),e))
if e in tmpReqd:
tmpReqd.remove(e)
elif e in tmpOpt:
tmpOpt.remove(e)
if len(failed) == len(tmpExprs):
keepMatching = False
if tmpReqd:
missing = ", ".join(_ustr(e) for e in tmpReqd)
raise ParseException(instring,loc,"Missing one or more required elements (%s)" % missing )
# add any unmatched Optionals, in case they have default values defined
matchOrder += [e for e in self.exprs if isinstance(e,Optional) and e.expr in tmpOpt]
resultlist = []
for e in matchOrder:
loc,results = e._parse(instring,loc,doActions)
resultlist.append(results)
finalResults = ParseResults()
for r in resultlist:
dups = {}
for k in r.keys():
if k in finalResults:
tmp = ParseResults(finalResults[k])
tmp += ParseResults(r[k])
dups[k] = tmp
finalResults += ParseResults(r)
for k,v in dups.items():
finalResults[k] = v
return loc, finalResults
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " & ".join(_ustr(e) for e in self.exprs) + "}"
return self.strRepr
def checkRecursion( self, parseElementList ):
subRecCheckList = parseElementList[:] + [ self ]
for e in self.exprs:
e.checkRecursion( subRecCheckList )
class ParseElementEnhance(ParserElement):
"""Abstract subclass of C{ParserElement}, for combining and post-processing parsed tokens."""
def __init__( self, expr, savelist=False ):
super(ParseElementEnhance,self).__init__(savelist)
if isinstance( expr, basestring ):
expr = Literal(expr)
self.expr = expr
self.strRepr = None
if expr is not None:
self.mayIndexError = expr.mayIndexError
self.mayReturnEmpty = expr.mayReturnEmpty
self.setWhitespaceChars( expr.whiteChars )
self.skipWhitespace = expr.skipWhitespace
self.saveAsList = expr.saveAsList
self.callPreparse = expr.callPreparse
self.ignoreExprs.extend(expr.ignoreExprs)
def parseImpl( self, instring, loc, doActions=True ):
if self.expr is not None:
return self.expr._parse( instring, loc, doActions, callPreParse=False )
else:
raise ParseException("",loc,self.errmsg,self)
def leaveWhitespace( self ):
self.skipWhitespace = False
self.expr = self.expr.copy()
if self.expr is not None:
self.expr.leaveWhitespace()
return self
def ignore( self, other ):
if isinstance( other, Suppress ):
if other not in self.ignoreExprs:
super( ParseElementEnhance, self).ignore( other )
if self.expr is not None:
self.expr.ignore( self.ignoreExprs[-1] )
else:
super( ParseElementEnhance, self).ignore( other )
if self.expr is not None:
self.expr.ignore( self.ignoreExprs[-1] )
return self
def streamline( self ):
super(ParseElementEnhance,self).streamline()
if self.expr is not None:
self.expr.streamline()
return self
def checkRecursion( self, parseElementList ):
if self in parseElementList:
raise RecursiveGrammarException( parseElementList+[self] )
subRecCheckList = parseElementList[:] + [ self ]
if self.expr is not None:
self.expr.checkRecursion( subRecCheckList )
def validate( self, validateTrace=[] ):
tmp = validateTrace[:]+[self]
if self.expr is not None:
self.expr.validate(tmp)
self.checkRecursion( [] )
def __str__( self ):
try:
return super(ParseElementEnhance,self).__str__()
except:
pass
if self.strRepr is None and self.expr is not None:
self.strRepr = "%s:(%s)" % ( self.__class__.__name__, _ustr(self.expr) )
return self.strRepr
class FollowedBy(ParseElementEnhance):
"""Lookahead matching of the given parse expression. C{FollowedBy}
does *not* advance the parsing position within the input string, it only
verifies that the specified parse expression matches at the current
position. C{FollowedBy} always returns a null token list."""
def __init__( self, expr ):
super(FollowedBy,self).__init__(expr)
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
self.expr.tryParse( instring, loc )
return loc, []
class NotAny(ParseElementEnhance):
"""Lookahead to disallow matching with the given parse expression. C{NotAny}
does *not* advance the parsing position within the input string, it only
verifies that the specified parse expression does *not* match at the current
position. Also, C{NotAny} does *not* skip over leading whitespace. C{NotAny}
always returns a null token list. May be constructed using the '~' operator."""
def __init__( self, expr ):
super(NotAny,self).__init__(expr)
#~ self.leaveWhitespace()
self.skipWhitespace = False # do NOT use self.leaveWhitespace(), don't want to propagate to exprs
self.mayReturnEmpty = True
self.errmsg = "Found unwanted token, "+_ustr(self.expr)
def parseImpl( self, instring, loc, doActions=True ):
if self.expr.canParseNext(instring, loc):
raise ParseException(instring, loc, self.errmsg, self)
return loc, []
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "~{" + _ustr(self.expr) + "}"
return self.strRepr
class OneOrMore(ParseElementEnhance):
"""Repetition of one or more of the given expression.
Parameters:
- expr - expression that must match one or more times
- stopOn - (default=None) - expression for a terminating sentinel
(only required if the sentinel would ordinarily match the repetition
expression)
"""
def __init__( self, expr, stopOn=None):
super(OneOrMore, self).__init__(expr)
ender = stopOn
if isinstance(ender, basestring):
ender = Literal(ender)
self.not_ender = ~ender if ender is not None else None
def parseImpl( self, instring, loc, doActions=True ):
self_expr_parse = self.expr._parse
self_skip_ignorables = self._skipIgnorables
check_ender = self.not_ender is not None
if check_ender:
try_not_ender = self.not_ender.tryParse
# must be at least one (but first see if we are the stopOn sentinel;
# if so, fail)
if check_ender:
try_not_ender(instring, loc)
loc, tokens = self_expr_parse( instring, loc, doActions, callPreParse=False )
try:
hasIgnoreExprs = (not not self.ignoreExprs)
while 1:
if check_ender:
try_not_ender(instring, loc)
if hasIgnoreExprs:
preloc = self_skip_ignorables( instring, loc )
else:
preloc = loc
loc, tmptokens = self_expr_parse( instring, preloc, doActions )
if tmptokens or tmptokens.haskeys():
tokens += tmptokens
except (ParseException,IndexError):
pass
return loc, tokens
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + _ustr(self.expr) + "}..."
return self.strRepr
def setResultsName( self, name, listAllMatches=False ):
ret = super(OneOrMore,self).setResultsName(name,listAllMatches)
ret.saveAsList = True
return ret
class ZeroOrMore(OneOrMore):
"""Optional repetition of zero or more of the given expression.
Parameters:
- expr - expression that must match zero or more times
- stopOn - (default=None) - expression for a terminating sentinel
(only required if the sentinel would ordinarily match the repetition
expression)
"""
def __init__( self, expr, stopOn=None):
super(ZeroOrMore,self).__init__(expr, stopOn=stopOn)
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
try:
return super(ZeroOrMore, self).parseImpl(instring, loc, doActions)
except (ParseException,IndexError):
return loc, []
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "[" + _ustr(self.expr) + "]..."
return self.strRepr
class _NullToken(object):
def __bool__(self):
return False
__nonzero__ = __bool__
def __str__(self):
return ""
_optionalNotMatched = _NullToken()
class Optional(ParseElementEnhance):
"""Optional matching of the given expression.
Parameters:
- expr - expression that must match zero or more times
- default (optional) - value to be returned if the optional expression
is not found.
"""
def __init__( self, expr, default=_optionalNotMatched ):
super(Optional,self).__init__( expr, savelist=False )
self.defaultValue = default
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
try:
loc, tokens = self.expr._parse( instring, loc, doActions, callPreParse=False )
except (ParseException,IndexError):
if self.defaultValue is not _optionalNotMatched:
if self.expr.resultsName:
tokens = ParseResults([ self.defaultValue ])
tokens[self.expr.resultsName] = self.defaultValue
else:
tokens = [ self.defaultValue ]
else:
tokens = []
return loc, tokens
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "[" + _ustr(self.expr) + "]"
return self.strRepr
class SkipTo(ParseElementEnhance):
"""Token for skipping over all undefined text until the matched expression is found.
Parameters:
- expr - target expression marking the end of the data to be skipped
- include - (default=False) if True, the target expression is also parsed
(the skipped text and target expression are returned as a 2-element list).
- ignore - (default=None) used to define grammars (typically quoted strings and
comments) that might contain false matches to the target expression
- failOn - (default=None) define expressions that are not allowed to be
included in the skipped test; if found before the target expression is found,
the SkipTo is not a match
"""
def __init__( self, other, include=False, ignore=None, failOn=None ):
super( SkipTo, self ).__init__( other )
self.ignoreExpr = ignore
self.mayReturnEmpty = True
self.mayIndexError = False
self.includeMatch = include
self.asList = False
if isinstance(failOn, basestring):
self.failOn = Literal(failOn)
else:
self.failOn = failOn
self.errmsg = "No match found for "+_ustr(self.expr)
def parseImpl( self, instring, loc, doActions=True ):
startloc = loc
instrlen = len(instring)
expr = self.expr
expr_parse = self.expr._parse
self_failOn_canParseNext = self.failOn.canParseNext if self.failOn is not None else None
self_ignoreExpr_tryParse = self.ignoreExpr.tryParse if self.ignoreExpr is not None else None
tmploc = loc
while tmploc <= instrlen:
if self_failOn_canParseNext is not None:
# break if failOn expression matches
if self_failOn_canParseNext(instring, tmploc):
break
if self_ignoreExpr_tryParse is not None:
# advance past ignore expressions
while 1:
try:
tmploc = self_ignoreExpr_tryParse(instring, tmploc)
except ParseBaseException:
break
try:
expr_parse(instring, tmploc, doActions=False, callPreParse=False)
except (ParseException, IndexError):
# no match, advance loc in string
tmploc += 1
else:
# matched skipto expr, done
break
else:
# ran off the end of the input string without matching skipto expr, fail
raise ParseException(instring, loc, self.errmsg, self)
# build up return values
loc = tmploc
skiptext = instring[startloc:loc]
skipresult = ParseResults(skiptext)
if self.includeMatch:
loc, mat = expr_parse(instring,loc,doActions,callPreParse=False)
skipresult += mat
return loc, skipresult
class Forward(ParseElementEnhance):
"""Forward declaration of an expression to be defined later -
used for recursive grammars, such as algebraic infix notation.
When the expression is known, it is assigned to the C{Forward} variable using the '<<' operator.
Note: take care when assigning to C{Forward} not to overlook precedence of operators.
Specifically, '|' has a lower precedence than '<<', so that::
fwdExpr << a | b | c
will actually be evaluated as::
(fwdExpr << a) | b | c
thereby leaving b and c out as parseable alternatives. It is recommended that you
explicitly group the values inserted into the C{Forward}::
fwdExpr << (a | b | c)
Converting to use the '<<=' operator instead will avoid this problem.
"""
def __init__( self, other=None ):
super(Forward,self).__init__( other, savelist=False )
def __lshift__( self, other ):
if isinstance( other, basestring ):
other = ParserElement.literalStringClass(other)
self.expr = other
self.strRepr = None
self.mayIndexError = self.expr.mayIndexError
self.mayReturnEmpty = self.expr.mayReturnEmpty
self.setWhitespaceChars( self.expr.whiteChars )
self.skipWhitespace = self.expr.skipWhitespace
self.saveAsList = self.expr.saveAsList
self.ignoreExprs.extend(self.expr.ignoreExprs)
return self
def __ilshift__(self, other):
return self << other
def leaveWhitespace( self ):
self.skipWhitespace = False
return self
def streamline( self ):
if not self.streamlined:
self.streamlined = True
if self.expr is not None:
self.expr.streamline()
return self
def validate( self, validateTrace=[] ):
if self not in validateTrace:
tmp = validateTrace[:]+[self]
if self.expr is not None:
self.expr.validate(tmp)
self.checkRecursion([])
def __str__( self ):
if hasattr(self,"name"):
return self.name
return self.__class__.__name__ + ": ..."
# stubbed out for now - creates awful memory and perf issues
self._revertClass = self.__class__
self.__class__ = _ForwardNoRecurse
try:
if self.expr is not None:
retString = _ustr(self.expr)
else:
retString = "None"
finally:
self.__class__ = self._revertClass
return self.__class__.__name__ + ": " + retString
def copy(self):
if self.expr is not None:
return super(Forward,self).copy()
else:
ret = Forward()
ret <<= self
return ret
class _ForwardNoRecurse(Forward):
def __str__( self ):
return "..."
class TokenConverter(ParseElementEnhance):
"""Abstract subclass of C{ParseExpression}, for converting parsed results."""
def __init__( self, expr, savelist=False ):
super(TokenConverter,self).__init__( expr )#, savelist )
self.saveAsList = False
class Combine(TokenConverter):
"""Converter to concatenate all matching tokens to a single string.
By default, the matching patterns must also be contiguous in the input string;
this can be disabled by specifying C{'adjacent=False'} in the constructor.
"""
def __init__( self, expr, joinString="", adjacent=True ):
super(Combine,self).__init__( expr )
# suppress whitespace-stripping in contained parse expressions, but re-enable it on the Combine itself
if adjacent:
self.leaveWhitespace()
self.adjacent = adjacent
self.skipWhitespace = True
self.joinString = joinString
self.callPreparse = True
def ignore( self, other ):
if self.adjacent:
ParserElement.ignore(self, other)
else:
super( Combine, self).ignore( other )
return self
def postParse( self, instring, loc, tokenlist ):
retToks = tokenlist.copy()
del retToks[:]
retToks += ParseResults([ "".join(tokenlist._asStringList(self.joinString)) ], modal=self.modalResults)
if self.resultsName and retToks.haskeys():
return [ retToks ]
else:
return retToks
class Group(TokenConverter):
"""Converter to return the matched tokens as a list - useful for returning tokens of C{L{ZeroOrMore}} and C{L{OneOrMore}} expressions."""
def __init__( self, expr ):
super(Group,self).__init__( expr )
self.saveAsList = True
def postParse( self, instring, loc, tokenlist ):
return [ tokenlist ]
class Dict(TokenConverter):
"""Converter to return a repetitive expression as a list, but also as a dictionary.
Each element can also be referenced using the first token in the expression as its key.
Useful for tabular report scraping when the first column can be used as a item key.
"""
def __init__( self, expr ):
super(Dict,self).__init__( expr )
self.saveAsList = True
def postParse( self, instring, loc, tokenlist ):
for i,tok in enumerate(tokenlist):
if len(tok) == 0:
continue
ikey = tok[0]
if isinstance(ikey,int):
ikey = _ustr(tok[0]).strip()
if len(tok)==1:
tokenlist[ikey] = _ParseResultsWithOffset("",i)
elif len(tok)==2 and not isinstance(tok[1],ParseResults):
tokenlist[ikey] = _ParseResultsWithOffset(tok[1],i)
else:
dictvalue = tok.copy() #ParseResults(i)
del dictvalue[0]
if len(dictvalue)!= 1 or (isinstance(dictvalue,ParseResults) and dictvalue.haskeys()):
tokenlist[ikey] = _ParseResultsWithOffset(dictvalue,i)
else:
tokenlist[ikey] = _ParseResultsWithOffset(dictvalue[0],i)
if self.resultsName:
return [ tokenlist ]
else:
return tokenlist
class Suppress(TokenConverter):
"""Converter for ignoring the results of a parsed expression."""
def postParse( self, instring, loc, tokenlist ):
return []
def suppress( self ):
return self
class OnlyOnce(object):
"""Wrapper for parse actions, to ensure they are only called once."""
def __init__(self, methodCall):
self.callable = _trim_arity(methodCall)
self.called = False
def __call__(self,s,l,t):
if not self.called:
results = self.callable(s,l,t)
self.called = True
return results
raise ParseException(s,l,"")
def reset(self):
self.called = False
def traceParseAction(f):
"""Decorator for debugging parse actions."""
f = _trim_arity(f)
def z(*paArgs):
thisFunc = f.func_name
s,l,t = paArgs[-3:]
if len(paArgs)>3:
thisFunc = paArgs[0].__class__.__name__ + '.' + thisFunc
sys.stderr.write( ">>entering %s(line: '%s', %d, %s)\n" % (thisFunc,line(l,s),l,t) )
try:
ret = f(*paArgs)
except Exception as exc:
sys.stderr.write( "<<leaving %s (exception: %s)\n" % (thisFunc,exc) )
raise
sys.stderr.write( "<<leaving %s (ret: %s)\n" % (thisFunc,ret) )
return ret
try:
z.__name__ = f.__name__
except AttributeError:
pass
return z
#
# global helpers
#
def delimitedList( expr, delim=",", combine=False ):
"""Helper to define a delimited list of expressions - the delimiter defaults to ','.
By default, the list elements and delimiters can have intervening whitespace, and
comments, but this can be overridden by passing C{combine=True} in the constructor.
If C{combine} is set to C{True}, the matching tokens are returned as a single token
string, with the delimiters included; otherwise, the matching tokens are returned
as a list of tokens, with the delimiters suppressed.
"""
dlName = _ustr(expr)+" ["+_ustr(delim)+" "+_ustr(expr)+"]..."
if combine:
return Combine( expr + ZeroOrMore( delim + expr ) ).setName(dlName)
else:
return ( expr + ZeroOrMore( Suppress( delim ) + expr ) ).setName(dlName)
def countedArray( expr, intExpr=None ):
"""Helper to define a counted list of expressions.
This helper defines a pattern of the form::
integer expr expr expr...
where the leading integer tells how many expr expressions follow.
The matched tokens returns the array of expr tokens as a list - the leading count token is suppressed.
"""
arrayExpr = Forward()
def countFieldParseAction(s,l,t):
n = t[0]
arrayExpr << (n and Group(And([expr]*n)) or Group(empty))
return []
if intExpr is None:
intExpr = Word(nums).setParseAction(lambda t:int(t[0]))
else:
intExpr = intExpr.copy()
intExpr.setName("arrayLen")
intExpr.addParseAction(countFieldParseAction, callDuringTry=True)
return ( intExpr + arrayExpr ).setName('(len) ' + _ustr(expr) + '...')
def _flatten(L):
ret = []
for i in L:
if isinstance(i,list):
ret.extend(_flatten(i))
else:
ret.append(i)
return ret
def matchPreviousLiteral(expr):
"""Helper to define an expression that is indirectly defined from
the tokens matched in a previous expression, that is, it looks
for a 'repeat' of a previous expression. For example::
first = Word(nums)
second = matchPreviousLiteral(first)
matchExpr = first + ":" + second
will match C{"1:1"}, but not C{"1:2"}. Because this matches a
previous literal, will also match the leading C{"1:1"} in C{"1:10"}.
If this is not desired, use C{matchPreviousExpr}.
Do *not* use with packrat parsing enabled.
"""
rep = Forward()
def copyTokenToRepeater(s,l,t):
if t:
if len(t) == 1:
rep << t[0]
else:
# flatten t tokens
tflat = _flatten(t.asList())
rep << And(Literal(tt) for tt in tflat)
else:
rep << Empty()
expr.addParseAction(copyTokenToRepeater, callDuringTry=True)
rep.setName('(prev) ' + _ustr(expr))
return rep
def matchPreviousExpr(expr):
"""Helper to define an expression that is indirectly defined from
the tokens matched in a previous expression, that is, it looks
for a 'repeat' of a previous expression. For example::
first = Word(nums)
second = matchPreviousExpr(first)
matchExpr = first + ":" + second
will match C{"1:1"}, but not C{"1:2"}. Because this matches by
expressions, will *not* match the leading C{"1:1"} in C{"1:10"};
the expressions are evaluated first, and then compared, so
C{"1"} is compared with C{"10"}.
Do *not* use with packrat parsing enabled.
"""
rep = Forward()
e2 = expr.copy()
rep <<= e2
def copyTokenToRepeater(s,l,t):
matchTokens = _flatten(t.asList())
def mustMatchTheseTokens(s,l,t):
theseTokens = _flatten(t.asList())
if theseTokens != matchTokens:
raise ParseException("",0,"")
rep.setParseAction( mustMatchTheseTokens, callDuringTry=True )
expr.addParseAction(copyTokenToRepeater, callDuringTry=True)
rep.setName('(prev) ' + _ustr(expr))
return rep
def _escapeRegexRangeChars(s):
#~ escape these chars: ^-]
for c in r"\^-]":
s = s.replace(c,_bslash+c)
s = s.replace("\n",r"\n")
s = s.replace("\t",r"\t")
return _ustr(s)
def oneOf( strs, caseless=False, useRegex=True ):
"""Helper to quickly define a set of alternative Literals, and makes sure to do
longest-first testing when there is a conflict, regardless of the input order,
but returns a C{L{MatchFirst}} for best performance.
Parameters:
- strs - a string of space-delimited literals, or a list of string literals
- caseless - (default=False) - treat all literals as caseless
- useRegex - (default=True) - as an optimization, will generate a Regex
object; otherwise, will generate a C{MatchFirst} object (if C{caseless=True}, or
if creating a C{Regex} raises an exception)
"""
if caseless:
isequal = ( lambda a,b: a.upper() == b.upper() )
masks = ( lambda a,b: b.upper().startswith(a.upper()) )
parseElementClass = CaselessLiteral
else:
isequal = ( lambda a,b: a == b )
masks = ( lambda a,b: b.startswith(a) )
parseElementClass = Literal
symbols = []
if isinstance(strs,basestring):
symbols = strs.split()
elif isinstance(strs, collections.Sequence):
symbols = list(strs[:])
elif isinstance(strs, _generatorType):
symbols = list(strs)
else:
warnings.warn("Invalid argument to oneOf, expected string or list",
SyntaxWarning, stacklevel=2)
if not symbols:
return NoMatch()
i = 0
while i < len(symbols)-1:
cur = symbols[i]
for j,other in enumerate(symbols[i+1:]):
if ( isequal(other, cur) ):
del symbols[i+j+1]
break
elif ( masks(cur, other) ):
del symbols[i+j+1]
symbols.insert(i,other)
cur = other
break
else:
i += 1
if not caseless and useRegex:
#~ print (strs,"->", "|".join( [ _escapeRegexChars(sym) for sym in symbols] ))
try:
if len(symbols)==len("".join(symbols)):
return Regex( "[%s]" % "".join(_escapeRegexRangeChars(sym) for sym in symbols) ).setName(' | '.join(symbols))
else:
return Regex( "|".join(re.escape(sym) for sym in symbols) ).setName(' | '.join(symbols))
except:
warnings.warn("Exception creating Regex for oneOf, building MatchFirst",
SyntaxWarning, stacklevel=2)
# last resort, just use MatchFirst
return MatchFirst(parseElementClass(sym) for sym in symbols).setName(' | '.join(symbols))
def dictOf( key, value ):
"""Helper to easily and clearly define a dictionary by specifying the respective patterns
for the key and value. Takes care of defining the C{L{Dict}}, C{L{ZeroOrMore}}, and C{L{Group}} tokens
in the proper order. The key pattern can include delimiting markers or punctuation,
as long as they are suppressed, thereby leaving the significant key text. The value
pattern can include named results, so that the C{Dict} results can include named token
fields.
"""
return Dict( ZeroOrMore( Group ( key + value ) ) )
def originalTextFor(expr, asString=True):
"""Helper to return the original, untokenized text for a given expression. Useful to
restore the parsed fields of an HTML start tag into the raw tag text itself, or to
revert separate tokens with intervening whitespace back to the original matching
input text. By default, returns astring containing the original parsed text.
If the optional C{asString} argument is passed as C{False}, then the return value is a
C{L{ParseResults}} containing any results names that were originally matched, and a
single token containing the original matched text from the input string. So if
the expression passed to C{L{originalTextFor}} contains expressions with defined
results names, you must set C{asString} to C{False} if you want to preserve those
results name values."""
locMarker = Empty().setParseAction(lambda s,loc,t: loc)
endlocMarker = locMarker.copy()
endlocMarker.callPreparse = False
matchExpr = locMarker("_original_start") + expr + endlocMarker("_original_end")
if asString:
extractText = lambda s,l,t: s[t._original_start:t._original_end]
else:
def extractText(s,l,t):
t[:] = [s[t.pop('_original_start'):t.pop('_original_end')]]
matchExpr.setParseAction(extractText)
return matchExpr
def ungroup(expr):
"""Helper to undo pyparsing's default grouping of And expressions, even
if all but one are non-empty."""
return TokenConverter(expr).setParseAction(lambda t:t[0])
def locatedExpr(expr):
"""Helper to decorate a returned token with its starting and ending locations in the input string.
This helper adds the following results names:
- locn_start = location where matched expression begins
- locn_end = location where matched expression ends
- value = the actual parsed results
Be careful if the input text contains C{<TAB>} characters, you may want to call
C{L{ParserElement.parseWithTabs}}
"""
locator = Empty().setParseAction(lambda s,l,t: l)
return Group(locator("locn_start") + expr("value") + locator.copy().leaveWhitespace()("locn_end"))
# convenience constants for positional expressions
empty = Empty().setName("empty")
lineStart = LineStart().setName("lineStart")
lineEnd = LineEnd().setName("lineEnd")
stringStart = StringStart().setName("stringStart")
stringEnd = StringEnd().setName("stringEnd")
_escapedPunc = Word( _bslash, r"\[]-*.$+^?()~ ", exact=2 ).setParseAction(lambda s,l,t:t[0][1])
_escapedHexChar = Regex(r"\\0?[xX][0-9a-fA-F]+").setParseAction(lambda s,l,t:unichr(int(t[0].lstrip(r'\0x'),16)))
_escapedOctChar = Regex(r"\\0[0-7]+").setParseAction(lambda s,l,t:unichr(int(t[0][1:],8)))
_singleChar = _escapedPunc | _escapedHexChar | _escapedOctChar | Word(printables, excludeChars=r'\]', exact=1) | Regex(r"\w", re.UNICODE)
_charRange = Group(_singleChar + Suppress("-") + _singleChar)
_reBracketExpr = Literal("[") + Optional("^").setResultsName("negate") + Group( OneOrMore( _charRange | _singleChar ) ).setResultsName("body") + "]"
def srange(s):
r"""Helper to easily define string ranges for use in Word construction. Borrows
syntax from regexp '[]' string range definitions::
srange("[0-9]") -> "0123456789"
srange("[a-z]") -> "abcdefghijklmnopqrstuvwxyz"
srange("[a-z$_]") -> "abcdefghijklmnopqrstuvwxyz$_"
The input string must be enclosed in []'s, and the returned string is the expanded
character set joined into a single string.
The values enclosed in the []'s may be::
a single character
an escaped character with a leading backslash (such as \- or \])
an escaped hex character with a leading '\x' (\x21, which is a '!' character)
(\0x## is also supported for backwards compatibility)
an escaped octal character with a leading '\0' (\041, which is a '!' character)
a range of any of the above, separated by a dash ('a-z', etc.)
any combination of the above ('aeiouy', 'a-zA-Z0-9_$', etc.)
"""
_expanded = lambda p: p if not isinstance(p,ParseResults) else ''.join(unichr(c) for c in range(ord(p[0]),ord(p[1])+1))
try:
return "".join(_expanded(part) for part in _reBracketExpr.parseString(s).body)
except:
return ""
def matchOnlyAtCol(n):
"""Helper method for defining parse actions that require matching at a specific
column in the input text.
"""
def verifyCol(strg,locn,toks):
if col(locn,strg) != n:
raise ParseException(strg,locn,"matched token not at column %d" % n)
return verifyCol
def replaceWith(replStr):
"""Helper method for common parse actions that simply return a literal value. Especially
useful when used with C{L{transformString<ParserElement.transformString>}()}.
"""
return lambda s,l,t: [replStr]
def removeQuotes(s,l,t):
"""Helper parse action for removing quotation marks from parsed quoted strings.
To use, add this parse action to quoted string using::
quotedString.setParseAction( removeQuotes )
"""
return t[0][1:-1]
def upcaseTokens(s,l,t):
"""Helper parse action to convert tokens to upper case."""
return [ tt.upper() for tt in map(_ustr,t) ]
def downcaseTokens(s,l,t):
"""Helper parse action to convert tokens to lower case."""
return [ tt.lower() for tt in map(_ustr,t) ]
def _makeTags(tagStr, xml):
"""Internal helper to construct opening and closing tag expressions, given a tag name"""
if isinstance(tagStr,basestring):
resname = tagStr
tagStr = Keyword(tagStr, caseless=not xml)
else:
resname = tagStr.name
tagAttrName = Word(alphas,alphanums+"_-:")
if (xml):
tagAttrValue = dblQuotedString.copy().setParseAction( removeQuotes )
openTag = Suppress("<") + tagStr("tag") + \
Dict(ZeroOrMore(Group( tagAttrName + Suppress("=") + tagAttrValue ))) + \
Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">")
else:
printablesLessRAbrack = "".join(c for c in printables if c not in ">")
tagAttrValue = quotedString.copy().setParseAction( removeQuotes ) | Word(printablesLessRAbrack)
openTag = Suppress("<") + tagStr("tag") + \
Dict(ZeroOrMore(Group( tagAttrName.setParseAction(downcaseTokens) + \
Optional( Suppress("=") + tagAttrValue ) ))) + \
Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">")
closeTag = Combine(_L("</") + tagStr + ">")
openTag = openTag.setResultsName("start"+"".join(resname.replace(":"," ").title().split())).setName("<%s>" % resname)
closeTag = closeTag.setResultsName("end"+"".join(resname.replace(":"," ").title().split())).setName("</%s>" % resname)
openTag.tag = resname
closeTag.tag = resname
return openTag, closeTag
def makeHTMLTags(tagStr):
"""Helper to construct opening and closing tag expressions for HTML, given a tag name"""
return _makeTags( tagStr, False )
def makeXMLTags(tagStr):
"""Helper to construct opening and closing tag expressions for XML, given a tag name"""
return _makeTags( tagStr, True )
def withAttribute(*args,**attrDict):
"""Helper to create a validating parse action to be used with start tags created
with C{L{makeXMLTags}} or C{L{makeHTMLTags}}. Use C{withAttribute} to qualify a starting tag
with a required attribute value, to avoid false matches on common tags such as
C{<TD>} or C{<DIV>}.
Call C{withAttribute} with a series of attribute names and values. Specify the list
of filter attributes names and values as:
- keyword arguments, as in C{(align="right")}, or
- as an explicit dict with C{**} operator, when an attribute name is also a Python
reserved word, as in C{**{"class":"Customer", "align":"right"}}
- a list of name-value tuples, as in ( ("ns1:class", "Customer"), ("ns2:align","right") )
For attribute names with a namespace prefix, you must use the second form. Attribute
names are matched insensitive to upper/lower case.
If just testing for C{class} (with or without a namespace), use C{L{withClass}}.
To verify that the attribute exists, but without specifying a value, pass
C{withAttribute.ANY_VALUE} as the value.
"""
if args:
attrs = args[:]
else:
attrs = attrDict.items()
attrs = [(k,v) for k,v in attrs]
def pa(s,l,tokens):
for attrName,attrValue in attrs:
if attrName not in tokens:
raise ParseException(s,l,"no matching attribute " + attrName)
if attrValue != withAttribute.ANY_VALUE and tokens[attrName] != attrValue:
raise ParseException(s,l,"attribute '%s' has value '%s', must be '%s'" %
(attrName, tokens[attrName], attrValue))
return pa
withAttribute.ANY_VALUE = object()
def withClass(classname, namespace=''):
"""Simplified version of C{L{withAttribute}} when matching on a div class - made
difficult because C{class} is a reserved word in Python.
"""
classattr = "%s:class" % namespace if namespace else "class"
return withAttribute(**{classattr : classname})
opAssoc = _Constants()
opAssoc.LEFT = object()
opAssoc.RIGHT = object()
def infixNotation( baseExpr, opList, lpar=Suppress('('), rpar=Suppress(')') ):
"""Helper method for constructing grammars of expressions made up of
operators working in a precedence hierarchy. Operators may be unary or
binary, left- or right-associative. Parse actions can also be attached
to operator expressions.
Parameters:
- baseExpr - expression representing the most basic element for the nested
- opList - list of tuples, one for each operator precedence level in the
expression grammar; each tuple is of the form
(opExpr, numTerms, rightLeftAssoc, parseAction), where:
- opExpr is the pyparsing expression for the operator;
may also be a string, which will be converted to a Literal;
if numTerms is 3, opExpr is a tuple of two expressions, for the
two operators separating the 3 terms
- numTerms is the number of terms for this operator (must
be 1, 2, or 3)
- rightLeftAssoc is the indicator whether the operator is
right or left associative, using the pyparsing-defined
constants C{opAssoc.RIGHT} and C{opAssoc.LEFT}.
- parseAction is the parse action to be associated with
expressions matching this operator expression (the
parse action tuple member may be omitted)
- lpar - expression for matching left-parentheses (default=Suppress('('))
- rpar - expression for matching right-parentheses (default=Suppress(')'))
"""
ret = Forward()
lastExpr = baseExpr | ( lpar + ret + rpar )
for i,operDef in enumerate(opList):
opExpr,arity,rightLeftAssoc,pa = (operDef + (None,))[:4]
termName = "%s term" % opExpr if arity < 3 else "%s%s term" % opExpr
if arity == 3:
if opExpr is None or len(opExpr) != 2:
raise ValueError("if numterms=3, opExpr must be a tuple or list of two expressions")
opExpr1, opExpr2 = opExpr
thisExpr = Forward().setName(termName)
if rightLeftAssoc == opAssoc.LEFT:
if arity == 1:
matchExpr = FollowedBy(lastExpr + opExpr) + Group( lastExpr + OneOrMore( opExpr ) )
elif arity == 2:
if opExpr is not None:
matchExpr = FollowedBy(lastExpr + opExpr + lastExpr) + Group( lastExpr + OneOrMore( opExpr + lastExpr ) )
else:
matchExpr = FollowedBy(lastExpr+lastExpr) + Group( lastExpr + OneOrMore(lastExpr) )
elif arity == 3:
matchExpr = FollowedBy(lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr) + \
Group( lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr )
else:
raise ValueError("operator must be unary (1), binary (2), or ternary (3)")
elif rightLeftAssoc == opAssoc.RIGHT:
if arity == 1:
# try to avoid LR with this extra test
if not isinstance(opExpr, Optional):
opExpr = Optional(opExpr)
matchExpr = FollowedBy(opExpr.expr + thisExpr) + Group( opExpr + thisExpr )
elif arity == 2:
if opExpr is not None:
matchExpr = FollowedBy(lastExpr + opExpr + thisExpr) + Group( lastExpr + OneOrMore( opExpr + thisExpr ) )
else:
matchExpr = FollowedBy(lastExpr + thisExpr) + Group( lastExpr + OneOrMore( thisExpr ) )
elif arity == 3:
matchExpr = FollowedBy(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr) + \
Group( lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr )
else:
raise ValueError("operator must be unary (1), binary (2), or ternary (3)")
else:
raise ValueError("operator must indicate right or left associativity")
if pa:
matchExpr.setParseAction( pa )
thisExpr <<= ( matchExpr.setName(termName) | lastExpr )
lastExpr = thisExpr
ret <<= lastExpr
return ret
operatorPrecedence = infixNotation
dblQuotedString = Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\x[0-9a-fA-F]+)|(?:\\.))*"').setName("string enclosed in double quotes")
sglQuotedString = Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\x[0-9a-fA-F]+)|(?:\\.))*'").setName("string enclosed in single quotes")
quotedString = Regex(r'''(?:"(?:[^"\n\r\\]|(?:"")|(?:\\x[0-9a-fA-F]+)|(?:\\.))*")|(?:'(?:[^'\n\r\\]|(?:'')|(?:\\x[0-9a-fA-F]+)|(?:\\.))*')''').setName("quotedString using single or double quotes")
unicodeString = Combine(_L('u') + quotedString.copy()).setName("unicode string literal")
def nestedExpr(opener="(", closer=")", content=None, ignoreExpr=quotedString.copy()):
"""Helper method for defining nested lists enclosed in opening and closing
delimiters ("(" and ")" are the default).
Parameters:
- opener - opening character for a nested list (default="("); can also be a pyparsing expression
- closer - closing character for a nested list (default=")"); can also be a pyparsing expression
- content - expression for items within the nested lists (default=None)
- ignoreExpr - expression for ignoring opening and closing delimiters (default=quotedString)
If an expression is not provided for the content argument, the nested
expression will capture all whitespace-delimited content between delimiters
as a list of separate values.
Use the C{ignoreExpr} argument to define expressions that may contain
opening or closing characters that should not be treated as opening
or closing characters for nesting, such as quotedString or a comment
expression. Specify multiple expressions using an C{L{Or}} or C{L{MatchFirst}}.
The default is L{quotedString}, but if no expressions are to be ignored,
then pass C{None} for this argument.
"""
if opener == closer:
raise ValueError("opening and closing strings cannot be the same")
if content is None:
if isinstance(opener,basestring) and isinstance(closer,basestring):
if len(opener) == 1 and len(closer)==1:
if ignoreExpr is not None:
content = (Combine(OneOrMore(~ignoreExpr +
CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS,exact=1))
).setParseAction(lambda t:t[0].strip()))
else:
content = (empty.copy()+CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS
).setParseAction(lambda t:t[0].strip()))
else:
if ignoreExpr is not None:
content = (Combine(OneOrMore(~ignoreExpr +
~Literal(opener) + ~Literal(closer) +
CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS,exact=1))
).setParseAction(lambda t:t[0].strip()))
else:
content = (Combine(OneOrMore(~Literal(opener) + ~Literal(closer) +
CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS,exact=1))
).setParseAction(lambda t:t[0].strip()))
else:
raise ValueError("opening and closing arguments must be strings if no content expression is given")
ret = Forward()
if ignoreExpr is not None:
ret <<= Group( Suppress(opener) + ZeroOrMore( ignoreExpr | ret | content ) + Suppress(closer) )
else:
ret <<= Group( Suppress(opener) + ZeroOrMore( ret | content ) + Suppress(closer) )
ret.setName('nested %s%s expression' % (opener,closer))
return ret
def indentedBlock(blockStatementExpr, indentStack, indent=True):
"""Helper method for defining space-delimited indentation blocks, such as
those used to define block statements in Python source code.
Parameters:
- blockStatementExpr - expression defining syntax of statement that
is repeated within the indented block
- indentStack - list created by caller to manage indentation stack
(multiple statementWithIndentedBlock expressions within a single grammar
should share a common indentStack)
- indent - boolean indicating whether block must be indented beyond the
the current level; set to False for block of left-most statements
(default=True)
A valid block must contain at least one C{blockStatement}.
"""
def checkPeerIndent(s,l,t):
if l >= len(s): return
curCol = col(l,s)
if curCol != indentStack[-1]:
if curCol > indentStack[-1]:
raise ParseFatalException(s,l,"illegal nesting")
raise ParseException(s,l,"not a peer entry")
def checkSubIndent(s,l,t):
curCol = col(l,s)
if curCol > indentStack[-1]:
indentStack.append( curCol )
else:
raise ParseException(s,l,"not a subentry")
def checkUnindent(s,l,t):
if l >= len(s): return
curCol = col(l,s)
if not(indentStack and curCol < indentStack[-1] and curCol <= indentStack[-2]):
raise ParseException(s,l,"not an unindent")
indentStack.pop()
NL = OneOrMore(LineEnd().setWhitespaceChars("\t ").suppress())
INDENT = (Empty() + Empty().setParseAction(checkSubIndent)).setName('INDENT')
PEER = Empty().setParseAction(checkPeerIndent).setName('')
UNDENT = Empty().setParseAction(checkUnindent).setName('UNINDENT')
if indent:
smExpr = Group( Optional(NL) +
#~ FollowedBy(blockStatementExpr) +
INDENT + (OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) + UNDENT)
else:
smExpr = Group( Optional(NL) +
(OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) )
blockStatementExpr.ignore(_bslash + LineEnd())
return smExpr.setName('indented block')
alphas8bit = srange(r"[\0xc0-\0xd6\0xd8-\0xf6\0xf8-\0xff]")
punc8bit = srange(r"[\0xa1-\0xbf\0xd7\0xf7]")
anyOpenTag,anyCloseTag = makeHTMLTags(Word(alphas,alphanums+"_:").setName('any tag'))
_htmlEntityMap = dict(zip("gt lt amp nbsp quot apos".split(),'><& "\''))
commonHTMLEntity = Regex('&(?P<entity>' + '|'.join(_htmlEntityMap.keys()) +");").setName("common HTML entity")
def replaceHTMLEntity(t):
"""Helper parser action to replace common HTML entities with their special characters"""
return _htmlEntityMap.get(t.entity)
# it's easy to get these comment structures wrong - they're very common, so may as well make them available
cStyleComment = Regex(r"/\*(?:[^*]*\*+)+?/").setName("C style comment")
htmlComment = Regex(r"<!--[\s\S]*?-->").setName("HTML comment")
restOfLine = Regex(r".*").leaveWhitespace().setName("rest of line")
dblSlashComment = Regex(r"\/\/(\\\n|.)*").setName("// comment")
cppStyleComment = Regex(r"/(?:\*(?:[^*]*\*+)+?/|/[^\n]*(?:\n[^\n]*)*?(?:(?<!\\)|\Z))").setName("C++ style comment")
javaStyleComment = cppStyleComment
pythonStyleComment = Regex(r"#.*").setName("Python style comment")
_commasepitem = Combine(OneOrMore(Word(printables, excludeChars=',') +
Optional( Word(" \t") +
~Literal(",") + ~LineEnd() ) ) ).streamline().setName("commaItem")
commaSeparatedList = delimitedList( Optional( quotedString.copy() | _commasepitem, default="") ).setName("commaSeparatedList")
if __name__ == "__main__":
selectToken = CaselessLiteral( "select" )
fromToken = CaselessLiteral( "from" )
ident = Word( alphas, alphanums + "_$" )
columnName = delimitedList( ident, ".", combine=True ).setParseAction( upcaseTokens )
columnNameList = Group( delimitedList( columnName ) ).setName("columns")
tableName = delimitedList( ident, ".", combine=True ).setParseAction( upcaseTokens )
tableNameList = Group( delimitedList( tableName ) ).setName("tables")
simpleSQL = ( selectToken + \
( '*' | columnNameList ).setResultsName( "columns" ) + \
fromToken + \
tableNameList.setResultsName( "tables" ) )
simpleSQL.runTests("""\
SELECT * from XYZZY, ABC
select * from SYS.XYZZY
Select A from Sys.dual
Select AA,BB,CC from Sys.dual
Select A, B, C from Sys.dual
Select A, B, C from Sys.dual
Xelect A, B, C from Sys.dual
Select A, B, C frox Sys.dual
Select
Select ^^^ frox Sys.dual
Select A, B, C from Sys.dual, Table2""")
|
bsd-3-clause
|
ai0/sanic
|
examples/exception_monitoring.py
|
1
|
1791
|
"""
Example intercepting uncaught exceptions using Sanic's error handler framework.
This may be useful for developers wishing to use Sentry, Airbrake, etc.
or a custom system to log and monitor unexpected errors in production.
First we create our own class inheriting from Handler in sanic.exceptions,
and pass in an instance of it when we create our Sanic instance. Inside this
class' default handler, we can do anything including sending exceptions to
an external service.
"""
from sanic.handlers import ErrorHandler
from sanic.exceptions import SanicException
"""
Imports and code relevant for our CustomHandler class
(Ordinarily this would be in a separate file)
"""
class CustomHandler(ErrorHandler):
def default(self, request, exception):
# Here, we have access to the exception object
# and can do anything with it (log, send to external service, etc)
# Some exceptions are trivial and built into Sanic (404s, etc)
if not isinstance(exception, SanicException):
print(exception)
# Then, we must finish handling the exception by returning
# our response to the client
# For this we can just call the super class' default handler
return super().default(request, exception)
"""
This is an ordinary Sanic server, with the exception that we set the
server's error_handler to an instance of our CustomHandler
"""
from sanic import Sanic
from sanic.response import json
app = Sanic(__name__)
handler = CustomHandler()
app.error_handler = handler
@app.route("/")
async def test(request):
# Here, something occurs which causes an unexpected exception
# This exception will flow to our custom handler.
1 / 0
return json({"test": True})
app.run(host="0.0.0.0", port=8000, debug=True)
|
mit
|
RockinRoel/FrameworkBenchmarks
|
toolset/benchmark/fortune_html_parser.py
|
20
|
7205
|
# -*- coding: utf-8
import re
from HTMLParser import HTMLParser
from difflib import unified_diff
class FortuneHTMLParser(HTMLParser):
body = []
valid = '''<!doctype html><html>
<head><title>Fortunes</title></head>
<body><table>
<tr><th>id</th><th>message</th></tr>
<tr><td>11</td><td><script>alert("This should not be displayed in a browser alert box.");</script></td></tr>
<tr><td>4</td><td>A bad random number generator: 1, 1, 1, 1, 1, 4.33e+67, 1, 1, 1</td></tr>
<tr><td>5</td><td>A computer program does what you tell it to do, not what you want it to do.</td></tr>
<tr><td>2</td><td>A computer scientist is someone who fixes things that aren't broken.</td></tr>
<tr><td>8</td><td>A list is only as strong as its weakest link. — Donald Knuth</td></tr>
<tr><td>0</td><td>Additional fortune added at request time.</td></tr>
<tr><td>3</td><td>After enough decimal places, nobody gives a damn.</td></tr>
<tr><td>7</td><td>Any program that runs right is obsolete.</td></tr>
<tr><td>10</td><td>Computers make very fast, very accurate mistakes.</td></tr>
<tr><td>6</td><td>Emacs is a nice operating system, but I prefer UNIX. — Tom Christaensen</td></tr>
<tr><td>9</td><td>Feature: A bug with seniority.</td></tr>
<tr><td>1</td><td>fortune: No such file or directory</td></tr>
<tr><td>12</td><td>フレームワークのベンチマーク</td></tr>
</table></body></html>'''
# Is called when a doctype or other such tag is read in.
# For our purposes, we assume this is only going to be
# "DOCTYPE html", so we will surround it with "<!" and ">".
def handle_decl(self, decl):
# The spec says that for HTML this is case insensitive,
# and since we did not specify xml compliance (where
# incorrect casing would throw a syntax error), we must
# allow all casings. We will lower for our normalization.
self.body.append("<!{d}>".format(d=decl.lower()))
# This is called when an HTML character is parsed (i.e.
# "). There are a number of issues to be resolved
# here. For instance, some tests choose to leave the
# "+" character as-is, which should be fine as far as
# character escaping goes, but others choose to use the
# character reference of "+", which is also fine.
# Therefore, this method looks for all possible character
# references and normalizes them so that we can
# validate the input against a single valid spec string.
# Another example problem: """ is valid, but so is
# """
def handle_charref(self, name):
val = name.lower()
# """ is a valid escaping, but we are normalizing
# it so that our final parse can just be checked for
# equality.
if val == "34" or val == "034" or val == "x22":
# Append our normalized entity reference to our body.
self.body.append(""")
# "'" is a valid escaping of "-", but it is not
# required, so we normalize for equality checking.
if val == "39" or val == "039" or val == "x27":
self.body.append("'")
# Again, "+" is a valid escaping of the "+", but
# it is not required, so we need to normalize for out
# final parse and equality check.
if val == "43" or val == "043" or val == "x2b":
self.body.append("+")
# Again, ">" is a valid escaping of ">", but we
# need to normalize to ">" for equality checking.
if val == "62" or val == "062" or val == "x3e":
self.body.append(">")
# Again, "<" is a valid escaping of "<", but we
# need to normalize to "<" for equality checking.
if val == "60" or val == "060" or val == "x3c":
self.body.append("<")
# Not sure why some are escaping '/'
if val == "47" or val == "047" or val == "x2f":
self.body.append("/")
# "(" is a valid escaping of "(", but
# it is not required, so we need to normalize for out
# final parse and equality check.
if val == "40" or val == "040" or val == "x28":
self.body.append("(")
# ")" is a valid escaping of ")", but
# it is not required, so we need to normalize for out
# final parse and equality check.
if val == "41" or val == "041" or val == "x29":
self.body.append(")")
def handle_entityref(self, name):
# Again, "—" is a valid escaping of "—", but we
# need to normalize to "—" for equality checking.
if name == "mdash":
self.body.append("—")
else:
self.body.append("&{n};".format(n=name))
# This is called every time a tag is opened. We append
# each one wrapped in "<" and ">".
def handle_starttag(self, tag, attrs):
self.body.append("<{t}>".format(t=tag))
# Append a newline after the <table> and <html>
if tag.lower() == 'table' or tag.lower() == 'html':
self.body.append("\n")
# This is called whenever data is presented inside of a
# start and end tag. Generally, this will only ever be
# the contents inside of "<td>" and "</td>", but there
# are also the "<title>" and "</title>" tags.
def handle_data (self, data):
if data.strip() != '':
# After a LOT of debate, these are now considered
# valid in data. The reason for this approach is
# because a few tests use tools which determine
# at compile time whether or not a string needs
# a given type of html escaping, and our fortune
# test has apostrophes and quotes in html data
# rather than as an html attribute etc.
# example:
# <td>A computer scientist is someone who fixes things that aren't broken.</td>
# Semanticly, that apostrophe does not NEED to
# be escaped. The same is currently true for our
# quotes.
# In fact, in data (read: between two html tags)
# even the '>' need not be replaced as long as
# the '<' are all escaped.
# We replace them with their escapings here in
# order to have a noramlized string for equality
# comparison at the end.
data = data.replace('\'', ''')
data = data.replace('"', '"')
data = data.replace('>', '>')
self.body.append("{d}".format(d=data))
# This is called every time a tag is closed. We append
# each one wrapped in "</" and ">".
def handle_endtag(self, tag):
self.body.append("</{t}>".format(t=tag))
# Append a newline after each </tr> and </head>
if tag.lower() == 'tr' or tag.lower() == 'head':
self.body.append("\n")
# Returns whether the HTML input parsed by this parser
# is valid against our known "fortune" spec.
# The parsed data in 'body' is joined on empty strings
# and checked for equality against our spec.
def isValidFortune(self, out):
body = ''.join(self.body)
same = self.valid == body
diff_lines = []
if not same:
out.write("Oh no! I compared %s\n\n\nto.....%s" % (self.valid, body))
out.write("Fortune invalid. Diff following:\n")
headers_left = 3
for line in unified_diff(self.valid.split('\n'), body.split('\n'), fromfile='Valid', tofile='Response', n=0):
diff_lines.append(line)
out.write(line)
headers_left -= 1
if headers_left <= 0:
out.write('\n')
return (same, diff_lines)
|
bsd-3-clause
|
jordanemedlock/psychtruths
|
temboo/Library/Zendesk/IncrementalTickets/ExportIncrementalTickets.py
|
5
|
4063
|
# -*- coding: utf-8 -*-
###############################################################################
#
# ExportIncrementalTickets
# Returns a lightweight representation of what changed in the ticket "since you last asked".
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class ExportIncrementalTickets(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the ExportIncrementalTickets Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(ExportIncrementalTickets, self).__init__(temboo_session, '/Library/Zendesk/IncrementalTickets/ExportIncrementalTickets')
def new_input_set(self):
return ExportIncrementalTicketsInputSet()
def _make_result_set(self, result, path):
return ExportIncrementalTicketsResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return ExportIncrementalTicketsChoreographyExecution(session, exec_id, path)
class ExportIncrementalTicketsInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the ExportIncrementalTickets
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_Email(self, value):
"""
Set the value of the Email input for this Choreo. ((required, string) The email address you use to login to your Zendesk account.)
"""
super(ExportIncrementalTicketsInputSet, self)._set_input('Email', value)
def set_Password(self, value):
"""
Set the value of the Password input for this Choreo. ((required, password) Your Zendesk password.)
"""
super(ExportIncrementalTicketsInputSet, self)._set_input('Password', value)
def set_Server(self, value):
"""
Set the value of the Server input for this Choreo. ((required, string) Your Zendesk domain and subdomain (e.g., temboocare.zendesk.com).)
"""
super(ExportIncrementalTicketsInputSet, self)._set_input('Server', value)
def set_StartTime(self, value):
"""
Set the value of the StartTime input for this Choreo. ((required, string) Return a list of tickets created after this timestamp (in seconds since Epoch UTC). Tickets less than 5 minutes old are not included in the response.)
"""
super(ExportIncrementalTicketsInputSet, self)._set_input('StartTime', value)
class ExportIncrementalTicketsResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the ExportIncrementalTickets Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((json) The response from Zendesk.)
"""
return self._output.get('Response', None)
class ExportIncrementalTicketsChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return ExportIncrementalTicketsResultSet(response, path)
|
apache-2.0
|
omoju/Fundamentals
|
Data/twitterDataAnalysis/info_gain.py
|
1
|
3353
|
import os
import sys
import numpy as np
import math
def findBinIndexFor(aFloatValue, binsList):
#print "findBinIndexFor: %s" % aFloatValue
returnIndex = -1
for i in range(len(binsList)):
thisBin = binsList[i]
if (aFloatValue >= thisBin[0]) and (aFloatValue < thisBin[1]):
returnIndex = i
break
return returnIndex
def compute_joint_prob(joint_list, vals1, vals2, bins1=None, bins2=None, asFreq=False):
returnDict = {}
for rec in joint_list:
val1 = rec[0]
val2 = rec[1]
#Find name by which first val should appear
dictName1 = val1
if bins1 is not None:
dictName1 = findBinIndexFor(val1, bins1)
#Find name by which second val should appear
dictName2 = val2
if bins2 is not None:
dictName2 = findBinIndexFor(val2, bins2)
#If first name is not present in dict,
#then initialize it
if dictName1 not in returnDict:
returnDict[dictName1] = {}
for val in vals2:
#Determine name under which
#y-values should appear (i.e. as bin names
#or as given names)
asDictName = val
if bins2 is not None:
asDictName = findBinIndexFor(val, bins2)
returnDict[dictName1][asDictName] = 0
returnDict[dictName1][dictName2]+=1
if not asFreq:
#Normalize values
for key in returnDict:
for secondKey in returnDict[key]:
returnDict[key][secondKey] = float(returnDict[key][secondKey]) / len(joint_list)
return returnDict
def getXForFixedY(joint_prob_dist, yVal):
returnList = []
for key in joint_prob_dist:
returnList.append( joint_prob_dist[key][yVal])
return returnList
def compute_h(floatsList):
returnFloat = None
acc = 0
for f in floatsList:
if f != 0:
acc = acc - f * math.log(f, 2)
returnFloat = acc
return returnFloat
# Computes Kullback-Leibler divergence between
# P(X,Y) and P(X)
def conditional_entropy(joint_prob_dist, xVals, yVals):
returnFloat = None
h_acc = 0
marginal_y_dist = getYMarginalDist(joint_prob_dist)
for x in xVals:
for y in yVals:
joint_xy = 0
marginal_y = 0
if not x in joint_prob_dist or y not in joint_prob_dist[x]:
joint_xy = 0
else:
joint_xy = joint_prob_dist[x][y]
if not y in marginal_y_dist:
marginal_y = 0
else:
marginal_y = marginal_y_dist[y]
if joint_xy!=0 and marginal_y!=0:
h_acc-=joint_xy*math.log(joint_xy/marginal_y, 2)
# for yVal in yVals:
# new_xDist = getXForFixedY(joint_prob_dist, yVal)
# h_yVal = compute_h(new_xDist)
# p_yVal = reduce(lambda x, y: x+y, new_xDist)
# h_acc+=p_yVal * h_yVal
returnFloat = h_acc
return returnFloat
def getYMarginalDist(joint_prob_dist):
returnDict = {}
for xKey in joint_prob_dist:
for yKey in joint_prob_dist[xKey]:
if not yKey in returnDict:
returnDict[yKey] = 0
returnDict[yKey]+=joint_prob_dist[xKey][yKey]
return returnDict
def getXMarginalDist(joint_prob_dist):
returnDict = {}
for key in joint_prob_dist:
yVals = joint_prob_dist[key]
marginalVal = reduce(lambda x,y: x+y, [yVals[e] for e in yVals])
returnDict[key] = marginalVal
return returnDict
def entropy_loss(joint_prob_dist, xVals, yVals):
returnFloat = None
priorsDict = getXMarginalDist(joint_prob_dist)
priors = priorsDict.values()
h_prior = compute_h(priors)
h_conditional = conditional_entropy(joint_prob_dist, xVals, yVals)
returnFloat = h_prior - h_conditional
return returnFloat
|
gpl-3.0
|
nash-x/hws
|
neutron/common/constants.py
|
1
|
5802
|
# Copyright (c) 2012 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO(salv-orlando): Verify if a single set of operational
# status constants is achievable
NET_STATUS_ACTIVE = 'ACTIVE'
NET_STATUS_BUILD = 'BUILD'
NET_STATUS_DOWN = 'DOWN'
NET_STATUS_ERROR = 'ERROR'
PORT_STATUS_ACTIVE = 'ACTIVE'
PORT_STATUS_BUILD = 'BUILD'
PORT_STATUS_DOWN = 'DOWN'
PORT_STATUS_ERROR = 'ERROR'
FLOATINGIP_STATUS_ACTIVE = 'ACTIVE'
FLOATINGIP_STATUS_DOWN = 'DOWN'
FLOATINGIP_STATUS_ERROR = 'ERROR'
DEVICE_OWNER_COMPUTER = "compute"
DEVICE_OWNER_ROUTER_HA_INTF = "network:router_ha_interface"
DEVICE_OWNER_ROUTER_INTF = "network:router_interface"
DEVICE_OWNER_ROUTER_GW = "network:router_gateway"
DEVICE_OWNER_FLOATINGIP = "network:floatingip"
DEVICE_OWNER_DHCP = "network:dhcp"
DEVICE_OWNER_DVR_INTERFACE = "network:router_interface_distributed"
DEVICE_OWNER_AGENT_GW = "network:floatingip_agent_gateway"
DEVICE_OWNER_ROUTER_SNAT = "network:router_centralized_snat"
DEVICE_OWNER_LOADBALANCER = "neutron:LOADBALANCER"
DEVICE_ID_RESERVED_DHCP_PORT = "reserved_dhcp_port"
DEVICE_OWNER_TRUNK_INTF = "network:trunk_interface"
FLOATINGIP_KEY = '_floatingips'
INTERFACE_KEY = '_interfaces'
HA_INTERFACE_KEY = '_ha_interface'
HA_ROUTER_STATE_KEY = '_ha_state'
METERING_LABEL_KEY = '_metering_labels'
FLOATINGIP_AGENT_INTF_KEY = '_floatingip_agent_interfaces'
SNAT_ROUTER_INTF_KEY = '_snat_router_interfaces'
HA_NETWORK_NAME = 'HA network tenant %s'
HA_SUBNET_NAME = 'HA subnet tenant %s'
HA_PORT_NAME = 'HA port tenant %s'
MINIMUM_AGENTS_FOR_HA = 2
IPv4 = 'IPv4'
IPv6 = 'IPv6'
DHCP_RESPONSE_PORT = 68
MIN_VLAN_TAG = 1
MAX_VLAN_TAG = 4094
# For GRE Tunnel
MIN_GRE_ID = 1
MAX_GRE_ID = 2 ** 32 - 1
# For VXLAN Tunnel
MIN_VXLAN_VNI = 1
MAX_VXLAN_VNI = 2 ** 24 - 1
FLOODING_ENTRY = ['00:00:00:00:00:00', '0.0.0.0']
EXT_NS_COMP = '_backward_comp_e_ns'
EXT_NS = '_extension_ns'
XML_NS_V20 = 'http://openstack.org/quantum/api/v2.0'
XSI_NAMESPACE = "http://www.w3.org/2001/XMLSchema-instance"
XSI_ATTR = "xsi:nil"
XSI_NIL_ATTR = "xmlns:xsi"
ATOM_NAMESPACE = "http://www.w3.org/2005/Atom"
ATOM_XMLNS = "xmlns:atom"
ATOM_LINK_NOTATION = "{%s}link" % ATOM_NAMESPACE
TYPE_XMLNS = "xmlns:quantum"
TYPE_ATTR = "quantum:type"
VIRTUAL_ROOT_KEY = "_v_root"
TYPE_BOOL = "bool"
TYPE_INT = "int"
TYPE_LONG = "long"
TYPE_FLOAT = "float"
TYPE_LIST = "list"
TYPE_DICT = "dict"
TYPE_QOS_DSCP = "dscp"
TYPE_QOS_RATELIMIT = "ratelimit"
TYPE_QOS_ALL = "all"
TYPE_QOS_POLICY_TC_RATE = "tc_rate"
TYPE_QOS_POLICY_TC_LATENCY = "tc_latency"
TYPE_QOS_POLICY_TC_BURST = "tc_burst"
TYPE_QOS_POLICY_TC_RX_RATE = "rx_averateLimit"
TYPE_QOS_POLICY_TC_TX_RATE = "tx_averateLimit"
TYPE_QOS_POLICY_TC_RX_BURST = "rx_burstsize"
TYPE_QOS_POLICY_TC_TX_BURST = "tx_burstsize"
AGENT_TYPE_DHCP = 'DHCP agent'
AGENT_TYPE_OVS = 'Open vSwitch agent'
AGENT_TYPE_LINUXBRIDGE = 'Linux bridge agent'
AGENT_TYPE_HYPERV = 'HyperV agent'
AGENT_TYPE_NEC = 'NEC plugin agent'
AGENT_TYPE_OFA = 'OFA driver agent'
AGENT_TYPE_L3 = 'L3 agent'
AGENT_TYPE_LOADBALANCER = 'Loadbalancer agent'
AGENT_TYPE_MLNX = 'Mellanox plugin agent'
AGENT_TYPE_METERING = 'Metering agent'
AGENT_TYPE_METADATA = 'Metadata agent'
AGENT_TYPE_SDNVE = 'IBM SDN-VE agent'
AGENT_TYPE_NIC_SWITCH = 'NIC Switch agent'
AGENT_TYPE_NIC_NETMAP = 'NIC Netmap agent'
L2_AGENT_TOPIC = 'N/A'
AGENT_TYPE_EVS = 'evs agent'
AGENT_TYPE_SERVICECHAIN = 'Service chain agent'
PAGINATION_INFINITE = 'infinite'
SORT_DIRECTION_ASC = 'asc'
SORT_DIRECTION_DESC = 'desc'
PORT_BINDING_EXT_ALIAS = 'binding'
PORT_UNBOUND_EXT_ALIAS = 'unbound'
L3_AGENT_SCHEDULER_EXT_ALIAS = 'l3_agent_scheduler'
DHCP_AGENT_SCHEDULER_EXT_ALIAS = 'dhcp_agent_scheduler'
LBAAS_AGENT_SCHEDULER_EXT_ALIAS = 'lbaas_agent_scheduler'
L3_DISTRIBUTED_EXT_ALIAS = 'dvr'
L3_HA_MODE_EXT_ALIAS = 'l3-ha'
# Protocol names and numbers for Security Groups/Firewalls
PROTO_NAME_TCP = 'tcp'
PROTO_NAME_ICMP = 'icmp'
PROTO_NAME_ICMP_V6 = 'icmpv6'
PROTO_NAME_UDP = 'udp'
PROTO_NUM_TCP = 6
PROTO_NUM_ICMP = 1
PROTO_NUM_ICMP_V6 = 58
PROTO_NUM_UDP = 17
# List of ICMPv6 types that should be allowed by default:
# Multicast Listener Query (130),
# Multicast Listener Report (131),
# Multicast Listener Done (132),
# Neighbor Solicitation (135),
# Neighbor Advertisement (136)
ICMPV6_ALLOWED_TYPES = [130, 131, 132, 135, 136]
ICMPV6_TYPE_RA = 134
DHCPV6_STATEFUL = 'dhcpv6-stateful'
DHCPV6_STATELESS = 'dhcpv6-stateless'
IPV6_SLAAC = 'slaac'
IPV6_MODES = [DHCPV6_STATEFUL, DHCPV6_STATELESS, IPV6_SLAAC]
IPV6_LLA_PREFIX = 'fe80::/64'
# Linux interface max length
DEVICE_NAME_MAX_LEN = 15
# Device names start with "tap"
TAP_DEVICE_PREFIX = 'tap'
ATTRIBUTES_TO_UPDATE = 'attributes_to_update'
# Name prefixes for veth device or patch port pair linking the integration
# bridge with the physical bridge for a physical network
PEER_INTEGRATION_PREFIX = 'int-'
PEER_PHYSICAL_PREFIX = 'phy-'
NONEXISTENT_PEER = 'nonexistent-peer'
#EVS vSwitch prefix
QEP_VSWITCH_PREFIX = 'qep'
QVP_PATCH_PORT_PREFIX = 'qvp'
QVE_PATCH_PORT_PREFIX = 'qve'
# Tables for integration bridge
# Table 0 is used for forwarding.
CANARY_TABLE = 23
#REMOTE PORT
REMOTE_PORT_KEY = 'remote_port'
#DESTROY PORT RETRY
DESTROY_RETRY = 3
#UPDATE PORT RETRY
UPDATE_RETRY = 3
#GET VERSION RETRY
GET_RETRY = 5
#MIN_NET_NO
MIN_NET_NO = 1
#MAX_NET_NO
MAX_NET_NO = 100000
|
apache-2.0
|
AversivePlusPlus/AversivePlusPlus
|
tools/conan/conans/test/integration/order_libs_test.py
|
3
|
2031
|
import unittest
from conans.test.tools import TestClient
from conans.paths import CONANFILE
from conans.util.files import load
import os
class OrderLibsTest(unittest.TestCase):
def setUp(self):
self.client = TestClient()
def _export(self, name, deps=None, export=True):
def _libs():
if name == "LibPNG":
libs = '"m"'
elif name == "SDL2":
libs = '"m", "rt", "pthread", "dl"'
else:
libs = ""
return libs
deps = ", ".join(['"%s/1.0@lasote/stable"' % d for d in deps or []]) or '""'
conanfile = """
from conans import ConanFile, CMake
class HelloReuseConan(ConanFile):
name = "%s"
version = "1.0"
requires = %s
generators = "txt", "cmake"
def package_info(self):
self.cpp_info.libs = ["%s", %s]
""" % (name, deps, name, _libs())
files = {CONANFILE: conanfile}
self.client.save(files, clean_first=True)
if export:
self.client.run("export lasote/stable")
def reuse_test(self):
self._export("ZLib")
self._export("BZip2")
self._export("SDL2", ["ZLib"])
self._export("LibPNG", ["ZLib"])
self._export("freeType", ["BZip2", "LibPNG"])
self._export("SDL2_ttf", ["freeType", "SDL2"])
self._export("MyProject", ["SDL2_ttf"], export=False)
self.client.run("install . --build missing")
self.assertIn("PROJECT: Generated conaninfo.txt", self.client.user_io.out)
expected_libs = ['SDL2_ttf', 'SDL2', 'rt', 'pthread', 'dl', 'freeType',
'BZip2', 'LibPNG', 'm', 'ZLib']
conanbuildinfo = load(os.path.join(self.client.current_folder, "conanbuildinfo.txt"))
libs = os.linesep.join(expected_libs)
self.assertIn(libs, conanbuildinfo)
conanbuildinfo = load(os.path.join(self.client.current_folder, "conanbuildinfo.cmake"))
libs = " ".join(expected_libs)
self.assertIn(libs, conanbuildinfo)
|
bsd-3-clause
|
yfried/ansible
|
lib/ansible/plugins/terminal/vyos.py
|
191
|
1700
|
#
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import re
from ansible.plugins.terminal import TerminalBase
from ansible.errors import AnsibleConnectionFailure
class TerminalModule(TerminalBase):
terminal_stdout_re = [
re.compile(br"[\r\n]?[\w+\-\.:\/\[\]]+(?:\([^\)]+\)){,3}(?:>|#) ?$"),
re.compile(br"\@[\w\-\.]+:\S+?[>#\$] ?$")
]
terminal_stderr_re = [
re.compile(br"\n\s*Invalid command:"),
re.compile(br"\nCommit failed"),
re.compile(br"\n\s+Set failed"),
]
terminal_length = os.getenv('ANSIBLE_VYOS_TERMINAL_LENGTH', 10000)
def on_open_shell(self):
try:
for cmd in (b'set terminal length 0', b'set terminal width 512'):
self._exec_cli_command(cmd)
self._exec_cli_command(b'set terminal length %d' % self.terminal_length)
except AnsibleConnectionFailure:
raise AnsibleConnectionFailure('unable to set terminal parameters')
|
gpl-3.0
|
PredictionGuru/pgurin
|
models.py
|
1
|
1214
|
"""Models / Schema of pgur.in"""
from google.appengine.ext import ndb
class Accounts(ndb.Model):
"""Stores registration information."""
playstore_url = ndb.StringProperty()
appstore_url = ndb.StringProperty()
winstore_url = ndb.StringProperty()
default_url = ndb.StringProperty()
title = ndb.StringProperty(required=True)
banner = ndb.StringProperty(required=True)
description = ndb.TextProperty()
token = ndb.StringProperty(required=True, indexed=True)
class ShortURLs(ndb.Model):
"""Stores shorten url for given app links."""
url_id = ndb.StringProperty(indexed=True)
android_url = ndb.StringProperty()
ios_url = ndb.StringProperty()
windows_url = ndb.StringProperty()
other_url = ndb.StringProperty()
data = ndb.JsonProperty(required=True)
delay = ndb.IntegerProperty(default=1000)
account = ndb.StructuredProperty(Accounts, required=True)
created = ndb.DateTimeProperty(auto_now=True)
class IPMapping(ndb.Model):
"""Stores IP-UID mapping."""
ip_address = ndb.StringProperty(required=True, indexed=True)
short_url = ndb.StructuredProperty(ShortURLs, required=True)
created = ndb.DateTimeProperty(auto_now=True)
|
mit
|
jhoos/django
|
django/core/management/__init__.py
|
39
|
13130
|
from __future__ import unicode_literals
import collections
from importlib import import_module
import os
import pkgutil
import sys
import django
from django.apps import apps
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.management.base import (BaseCommand, CommandError,
CommandParser, handle_default_options)
from django.core.management.color import color_style
from django.utils import lru_cache, six
from django.utils._os import npath, upath
def find_commands(management_dir):
"""
Given a path to a management directory, returns a list of all the command
names that are available.
Returns an empty list if no commands are defined.
"""
command_dir = os.path.join(management_dir, 'commands')
return [name for _, name, is_pkg in pkgutil.iter_modules([npath(command_dir)])
if not is_pkg and not name.startswith('_')]
def load_command_class(app_name, name):
"""
Given a command name and an application name, returns the Command
class instance. All errors raised by the import process
(ImportError, AttributeError) are allowed to propagate.
"""
module = import_module('%s.management.commands.%s' % (app_name, name))
return module.Command()
@lru_cache.lru_cache(maxsize=None)
def get_commands():
"""
Returns a dictionary mapping command names to their callback applications.
This works by looking for a management.commands package in django.core, and
in each installed application -- if a commands package exists, all commands
in that package are registered.
Core commands are always included. If a settings module has been
specified, user-defined commands will also be included.
The dictionary is in the format {command_name: app_name}. Key-value
pairs from this dictionary can then be used in calls to
load_command_class(app_name, command_name)
If a specific version of a command must be loaded (e.g., with the
startapp command), the instantiated module can be placed in the
dictionary in place of the application name.
The dictionary is cached on the first call and reused on subsequent
calls.
"""
commands = {name: 'django.core' for name in find_commands(upath(__path__[0]))}
if not settings.configured:
return commands
for app_config in reversed(list(apps.get_app_configs())):
path = os.path.join(app_config.path, 'management')
commands.update({name: app_config.name for name in find_commands(path)})
return commands
def call_command(name, *args, **options):
"""
Calls the given command, with the given options and args/kwargs.
This is the primary API you should use for calling specific commands.
Some examples:
call_command('migrate')
call_command('shell', plain=True)
call_command('sqlmigrate', 'myapp')
"""
# Load the command object.
try:
app_name = get_commands()[name]
except KeyError:
raise CommandError("Unknown command: %r" % name)
if isinstance(app_name, BaseCommand):
# If the command is already loaded, use it directly.
command = app_name
else:
command = load_command_class(app_name, name)
# Simulate argument parsing to get the option defaults (see #10080 for details).
parser = command.create_parser('', name)
if command.use_argparse:
# Use the `dest` option name from the parser option
opt_mapping = {sorted(s_opt.option_strings)[0].lstrip('-').replace('-', '_'): s_opt.dest
for s_opt in parser._actions if s_opt.option_strings}
arg_options = {opt_mapping.get(key, key): value for key, value in options.items()}
defaults = parser.parse_args(args=args)
defaults = dict(defaults._get_kwargs(), **arg_options)
# Move positional args out of options to mimic legacy optparse
args = defaults.pop('args', ())
else:
# Legacy optparse method
defaults, _ = parser.parse_args(args=[])
defaults = dict(defaults.__dict__, **options)
if 'skip_checks' not in options:
defaults['skip_checks'] = True
return command.execute(*args, **defaults)
class ManagementUtility(object):
"""
Encapsulates the logic of the django-admin and manage.py utilities.
A ManagementUtility has a number of commands, which can be manipulated
by editing the self.commands dictionary.
"""
def __init__(self, argv=None):
self.argv = argv or sys.argv[:]
self.prog_name = os.path.basename(self.argv[0])
self.settings_exception = None
def main_help_text(self, commands_only=False):
"""
Returns the script's main help text, as a string.
"""
if commands_only:
usage = sorted(get_commands().keys())
else:
usage = [
"",
"Type '%s help <subcommand>' for help on a specific subcommand." % self.prog_name,
"",
"Available subcommands:",
]
commands_dict = collections.defaultdict(lambda: [])
for name, app in six.iteritems(get_commands()):
if app == 'django.core':
app = 'django'
else:
app = app.rpartition('.')[-1]
commands_dict[app].append(name)
style = color_style()
for app in sorted(commands_dict.keys()):
usage.append("")
usage.append(style.NOTICE("[%s]" % app))
for name in sorted(commands_dict[app]):
usage.append(" %s" % name)
# Output an extra note if settings are not properly configured
if self.settings_exception is not None:
usage.append(style.NOTICE(
"Note that only Django core commands are listed "
"as settings are not properly configured (error: %s)."
% self.settings_exception))
return '\n'.join(usage)
def fetch_command(self, subcommand):
"""
Tries to fetch the given subcommand, printing a message with the
appropriate command called from the command line (usually
"django-admin" or "manage.py") if it can't be found.
"""
# Get commands outside of try block to prevent swallowing exceptions
commands = get_commands()
try:
app_name = commands[subcommand]
except KeyError:
# This might trigger ImproperlyConfigured (masked in get_commands)
settings.INSTALLED_APPS
sys.stderr.write("Unknown command: %r\nType '%s help' for usage.\n" %
(subcommand, self.prog_name))
sys.exit(1)
if isinstance(app_name, BaseCommand):
# If the command is already loaded, use it directly.
klass = app_name
else:
klass = load_command_class(app_name, subcommand)
return klass
def autocomplete(self):
"""
Output completion suggestions for BASH.
The output of this function is passed to BASH's `COMREPLY` variable and
treated as completion suggestions. `COMREPLY` expects a space
separated string as the result.
The `COMP_WORDS` and `COMP_CWORD` BASH environment variables are used
to get information about the cli input. Please refer to the BASH
man-page for more information about this variables.
Subcommand options are saved as pairs. A pair consists of
the long option string (e.g. '--exclude') and a boolean
value indicating if the option requires arguments. When printing to
stdout, an equal sign is appended to options which require arguments.
Note: If debugging this function, it is recommended to write the debug
output in a separate file. Otherwise the debug output will be treated
and formatted as potential completion suggestions.
"""
# Don't complete if user hasn't sourced bash_completion file.
if 'DJANGO_AUTO_COMPLETE' not in os.environ:
return
cwords = os.environ['COMP_WORDS'].split()[1:]
cword = int(os.environ['COMP_CWORD'])
try:
curr = cwords[cword - 1]
except IndexError:
curr = ''
subcommands = list(get_commands()) + ['help']
options = [('--help', False)]
# subcommand
if cword == 1:
print(' '.join(sorted(filter(lambda x: x.startswith(curr), subcommands))))
# subcommand options
# special case: the 'help' subcommand has no options
elif cwords[0] in subcommands and cwords[0] != 'help':
subcommand_cls = self.fetch_command(cwords[0])
# special case: add the names of installed apps to options
if cwords[0] in ('dumpdata', 'sqlmigrate', 'sqlsequencereset', 'test'):
try:
app_configs = apps.get_app_configs()
# Get the last part of the dotted path as the app name.
options.extend((app_config.label, 0) for app_config in app_configs)
except ImportError:
# Fail silently if DJANGO_SETTINGS_MODULE isn't set. The
# user will find out once they execute the command.
pass
parser = subcommand_cls.create_parser('', cwords[0])
if subcommand_cls.use_argparse:
options.extend((sorted(s_opt.option_strings)[0], s_opt.nargs != 0) for s_opt in
parser._actions if s_opt.option_strings)
else:
options.extend((s_opt.get_opt_string(), s_opt.nargs) for s_opt in
parser.option_list)
# filter out previously specified options from available options
prev_opts = [x.split('=')[0] for x in cwords[1:cword - 1]]
options = [opt for opt in options if opt[0] not in prev_opts]
# filter options by current input
options = sorted((k, v) for k, v in options if k.startswith(curr))
for option in options:
opt_label = option[0]
# append '=' to options which require args
if option[1]:
opt_label += '='
print(opt_label)
sys.exit(1)
def execute(self):
"""
Given the command-line arguments, this figures out which subcommand is
being run, creates a parser appropriate to that command, and runs it.
"""
try:
subcommand = self.argv[1]
except IndexError:
subcommand = 'help' # Display help if no arguments were given.
# Preprocess options to extract --settings and --pythonpath.
# These options could affect the commands that are available, so they
# must be processed early.
parser = CommandParser(None, usage="%(prog)s subcommand [options] [args]", add_help=False)
parser.add_argument('--settings')
parser.add_argument('--pythonpath')
parser.add_argument('args', nargs='*') # catch-all
try:
options, args = parser.parse_known_args(self.argv[2:])
handle_default_options(options)
except CommandError:
pass # Ignore any option errors at this point.
no_settings_commands = [
'help', 'version', '--help', '--version', '-h',
'compilemessages', 'makemessages',
'startapp', 'startproject',
]
try:
settings.INSTALLED_APPS
except ImproperlyConfigured as exc:
self.settings_exception = exc
# A handful of built-in management commands work without settings.
# Load the default settings -- where INSTALLED_APPS is empty.
if subcommand in no_settings_commands:
settings.configure()
if settings.configured:
django.setup()
self.autocomplete()
if subcommand == 'help':
if '--commands' in args:
sys.stdout.write(self.main_help_text(commands_only=True) + '\n')
elif len(options.args) < 1:
sys.stdout.write(self.main_help_text() + '\n')
else:
self.fetch_command(options.args[0]).print_help(self.prog_name, options.args[0])
# Special-cases: We want 'django-admin --version' and
# 'django-admin --help' to work, for backwards compatibility.
elif subcommand == 'version' or self.argv[1:] == ['--version']:
sys.stdout.write(django.get_version() + '\n')
elif self.argv[1:] in (['--help'], ['-h']):
sys.stdout.write(self.main_help_text() + '\n')
else:
self.fetch_command(subcommand).run_from_argv(self.argv)
def execute_from_command_line(argv=None):
"""
A simple method that runs a ManagementUtility.
"""
utility = ManagementUtility(argv)
utility.execute()
|
bsd-3-clause
|
linked67/p2pool-500
|
SOAPpy/Types.py
|
289
|
52214
|
from __future__ import nested_scopes
"""
################################################################################
# Copyright (c) 2003, Pfizer
# Copyright (c) 2001, Cayce Ullman.
# Copyright (c) 2001, Brian Matthews.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of actzero, inc. nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
################################################################################
"""
ident = '$Id: Types.py 1496 2010-03-04 23:46:17Z pooryorick $'
from version import __version__
import UserList
import base64
import cgi
import urllib
import copy
import re
import time
from types import *
# SOAPpy modules
from Errors import *
from NS import NS
from Utilities import encodeHexString, cleanDate
from Config import Config
###############################################################################
# Utility functions
###############################################################################
def isPrivate(name): return name[0]=='_'
def isPublic(name): return name[0]!='_'
###############################################################################
# Types and Wrappers
###############################################################################
class anyType:
_validURIs = (NS.XSD, NS.XSD2, NS.XSD3, NS.ENC)
def __init__(self, data = None, name = None, typed = 1, attrs = None):
if self.__class__ == anyType:
raise Error, "anyType can't be instantiated directly"
if type(name) in (ListType, TupleType):
self._ns, self._name = name
else:
self._ns = self._validURIs[0]
self._name = name
self._typed = typed
self._attrs = {}
self._cache = None
self._type = self._typeName()
self._data = self._checkValueSpace(data)
if attrs != None:
self._setAttrs(attrs)
def __str__(self):
if hasattr(self,'_name') and self._name:
return "<%s %s at %d>" % (self.__class__, self._name, id(self))
return "<%s at %d>" % (self.__class__, id(self))
__repr__ = __str__
def _checkValueSpace(self, data):
return data
def _marshalData(self):
return str(self._data)
def _marshalAttrs(self, ns_map, builder):
a = ''
for attr, value in self._attrs.items():
ns, n = builder.genns(ns_map, attr[0])
a += n + ' %s%s="%s"' % \
(ns, attr[1], cgi.escape(str(value), 1))
return a
def _fixAttr(self, attr):
if type(attr) in (StringType, UnicodeType):
attr = (None, attr)
elif type(attr) == ListType:
attr = tuple(attr)
elif type(attr) != TupleType:
raise AttributeError, "invalid attribute type"
if len(attr) != 2:
raise AttributeError, "invalid attribute length"
if type(attr[0]) not in (NoneType, StringType, UnicodeType):
raise AttributeError, "invalid attribute namespace URI type"
return attr
def _getAttr(self, attr):
attr = self._fixAttr(attr)
try:
return self._attrs[attr]
except:
return None
def _setAttr(self, attr, value):
attr = self._fixAttr(attr)
if type(value) is StringType:
value = unicode(value)
self._attrs[attr] = value
def _setAttrs(self, attrs):
if type(attrs) in (ListType, TupleType):
for i in range(0, len(attrs), 2):
self._setAttr(attrs[i], attrs[i + 1])
return
if type(attrs) == DictType:
d = attrs
elif isinstance(attrs, anyType):
d = attrs._attrs
else:
raise AttributeError, "invalid attribute type"
for attr, value in d.items():
self._setAttr(attr, value)
def _setMustUnderstand(self, val):
self._setAttr((NS.ENV, "mustUnderstand"), val)
def _getMustUnderstand(self):
return self._getAttr((NS.ENV, "mustUnderstand"))
def _setActor(self, val):
self._setAttr((NS.ENV, "actor"), val)
def _getActor(self):
return self._getAttr((NS.ENV, "actor"))
def _typeName(self):
return self.__class__.__name__[:-4]
def _validNamespaceURI(self, URI, strict):
if not hasattr(self, '_typed') or not self._typed:
return None
if URI in self._validURIs:
return URI
if not strict:
return self._ns
raise AttributeError, \
"not a valid namespace for type %s" % self._type
class voidType(anyType):
pass
class stringType(anyType):
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (StringType, UnicodeType):
raise AttributeError, "invalid %s type:" % self._type
return data
def _marshalData(self):
return self._data
class untypedType(stringType):
def __init__(self, data = None, name = None, attrs = None):
stringType.__init__(self, data, name, 0, attrs)
class IDType(stringType): pass
class NCNameType(stringType): pass
class NameType(stringType): pass
class ENTITYType(stringType): pass
class IDREFType(stringType): pass
class languageType(stringType): pass
class NMTOKENType(stringType): pass
class QNameType(stringType): pass
class tokenType(anyType):
_validURIs = (NS.XSD2, NS.XSD3)
__invalidre = '[\n\t]|^ | $| '
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (StringType, UnicodeType):
raise AttributeError, "invalid %s type" % self._type
if type(self.__invalidre) == StringType:
self.__invalidre = re.compile(self.__invalidre)
if self.__invalidre.search(data):
raise ValueError, "invalid %s value" % self._type
return data
class normalizedStringType(anyType):
_validURIs = (NS.XSD3,)
__invalidre = '[\n\r\t]'
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (StringType, UnicodeType):
raise AttributeError, "invalid %s type" % self._type
if type(self.__invalidre) == StringType:
self.__invalidre = re.compile(self.__invalidre)
if self.__invalidre.search(data):
raise ValueError, "invalid %s value" % self._type
return data
class CDATAType(normalizedStringType):
_validURIs = (NS.XSD2,)
class booleanType(anyType):
def __int__(self):
return self._data
__nonzero__ = __int__
def _marshalData(self):
return ['false', 'true'][self._data]
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if data in (0, '0', 'false', ''):
return 0
if data in (1, '1', 'true'):
return 1
raise ValueError, "invalid %s value" % self._type
class decimalType(anyType):
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType, FloatType):
raise Error, "invalid %s value" % self._type
return data
class floatType(anyType):
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType, FloatType) or \
data < -3.4028234663852886E+38 or \
data > 3.4028234663852886E+38:
raise ValueError, "invalid %s value: %s" % (self._type, repr(data))
return data
def _marshalData(self):
return "%.18g" % self._data # More precision
class doubleType(anyType):
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType, FloatType) or \
data < -1.7976931348623158E+308 or \
data > 1.7976931348623157E+308:
raise ValueError, "invalid %s value: %s" % (self._type, repr(data))
return data
def _marshalData(self):
return "%.18g" % self._data # More precision
class durationType(anyType):
_validURIs = (NS.XSD3,)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
try:
# A tuple or a scalar is OK, but make them into a list
if type(data) == TupleType:
data = list(data)
elif type(data) != ListType:
data = [data]
if len(data) > 6:
raise Exception, "too many values"
# Now check the types of all the components, and find
# the first nonzero element along the way.
f = -1
for i in range(len(data)):
if data[i] == None:
data[i] = 0
continue
if type(data[i]) not in \
(IntType, LongType, FloatType):
raise Exception, "element %d a bad type" % i
if data[i] and f == -1:
f = i
# If they're all 0, just use zero seconds.
if f == -1:
self._cache = 'PT0S'
return (0,) * 6
# Make sure only the last nonzero element has a decimal fraction
# and only the first element is negative.
d = -1
for i in range(f, len(data)):
if data[i]:
if d != -1:
raise Exception, \
"all except the last nonzero element must be " \
"integers"
if data[i] < 0 and i > f:
raise Exception, \
"only the first nonzero element can be negative"
elif data[i] != long(data[i]):
d = i
# Pad the list on the left if necessary.
if len(data) < 6:
n = 6 - len(data)
f += n
d += n
data = [0] * n + data
# Save index of the first nonzero element and the decimal
# element for _marshalData.
self.__firstnonzero = f
self.__decimal = d
except Exception, e:
raise ValueError, "invalid %s value - %s" % (self._type, e)
return tuple(data)
def _marshalData(self):
if self._cache == None:
d = self._data
t = 0
if d[self.__firstnonzero] < 0:
s = '-P'
else:
s = 'P'
t = 0
for i in range(self.__firstnonzero, len(d)):
if d[i]:
if i > 2 and not t:
s += 'T'
t = 1
if self.__decimal == i:
s += "%g" % abs(d[i])
else:
s += "%d" % long(abs(d[i]))
s += ['Y', 'M', 'D', 'H', 'M', 'S'][i]
self._cache = s
return self._cache
class timeDurationType(durationType):
_validURIs = (NS.XSD, NS.XSD2, NS.ENC)
class dateTimeType(anyType):
_validURIs = (NS.XSD3,)
def _checkValueSpace(self, data):
try:
if data == None:
data = time.time()
if (type(data) in (IntType, LongType)):
data = list(time.gmtime(data)[:6])
elif (type(data) == FloatType):
f = data - int(data)
data = list(time.gmtime(int(data))[:6])
data[5] += f
elif type(data) in (ListType, TupleType):
if len(data) < 6:
raise Exception, "not enough values"
if len(data) > 9:
raise Exception, "too many values"
data = list(data[:6])
cleanDate(data)
else:
raise Exception, "invalid type"
except Exception, e:
raise ValueError, "invalid %s value - %s" % (self._type, e)
return tuple(data)
def _marshalData(self):
if self._cache == None:
d = self._data
s = "%04d-%02d-%02dT%02d:%02d:%02d" % ((abs(d[0]),) + d[1:])
if d[0] < 0:
s = '-' + s
f = d[5] - int(d[5])
if f != 0:
s += ("%g" % f)[1:]
s += 'Z'
self._cache = s
return self._cache
class recurringInstantType(anyType):
_validURIs = (NS.XSD,)
def _checkValueSpace(self, data):
try:
if data == None:
data = list(time.gmtime(time.time())[:6])
if (type(data) in (IntType, LongType)):
data = list(time.gmtime(data)[:6])
elif (type(data) == FloatType):
f = data - int(data)
data = list(time.gmtime(int(data))[:6])
data[5] += f
elif type(data) in (ListType, TupleType):
if len(data) < 1:
raise Exception, "not enough values"
if len(data) > 9:
raise Exception, "too many values"
data = list(data[:6])
if len(data) < 6:
data += [0] * (6 - len(data))
f = len(data)
for i in range(f):
if data[i] == None:
if f < i:
raise Exception, \
"only leftmost elements can be none"
else:
f = i
break
cleanDate(data, f)
else:
raise Exception, "invalid type"
except Exception, e:
raise ValueError, "invalid %s value - %s" % (self._type, e)
return tuple(data)
def _marshalData(self):
if self._cache == None:
d = self._data
e = list(d)
neg = ''
if not e[0]:
e[0] = '--'
else:
if e[0] < 0:
neg = '-'
e[0] = abs(e[0])
if e[0] < 100:
e[0] = '-' + "%02d" % e[0]
else:
e[0] = "%04d" % e[0]
for i in range(1, len(e)):
if e[i] == None or (i < 3 and e[i] == 0):
e[i] = '-'
else:
if e[i] < 0:
neg = '-'
e[i] = abs(e[i])
e[i] = "%02d" % e[i]
if d[5]:
f = abs(d[5] - int(d[5]))
if f:
e[5] += ("%g" % f)[1:]
s = "%s%s-%s-%sT%s:%s:%sZ" % ((neg,) + tuple(e))
self._cache = s
return self._cache
class timeInstantType(dateTimeType):
_validURIs = (NS.XSD, NS.XSD2, NS.ENC)
class timePeriodType(dateTimeType):
_validURIs = (NS.XSD2, NS.ENC)
class timeType(anyType):
def _checkValueSpace(self, data):
try:
if data == None:
data = time.gmtime(time.time())[3:6]
elif (type(data) == FloatType):
f = data - int(data)
data = list(time.gmtime(int(data))[3:6])
data[2] += f
elif type(data) in (IntType, LongType):
data = time.gmtime(data)[3:6]
elif type(data) in (ListType, TupleType):
if len(data) == 9:
data = data[3:6]
elif len(data) > 3:
raise Exception, "too many values"
data = [None, None, None] + list(data)
if len(data) < 6:
data += [0] * (6 - len(data))
cleanDate(data, 3)
data = data[3:]
else:
raise Exception, "invalid type"
except Exception, e:
raise ValueError, "invalid %s value - %s" % (self._type, e)
return tuple(data)
def _marshalData(self):
if self._cache == None:
d = self._data
#s = ''
#
#s = time.strftime("%H:%M:%S", (0, 0, 0) + d + (0, 0, -1))
s = "%02d:%02d:%02d" % d
f = d[2] - int(d[2])
if f != 0:
s += ("%g" % f)[1:]
s += 'Z'
self._cache = s
return self._cache
class dateType(anyType):
def _checkValueSpace(self, data):
try:
if data == None:
data = time.gmtime(time.time())[0:3]
elif type(data) in (IntType, LongType, FloatType):
data = time.gmtime(data)[0:3]
elif type(data) in (ListType, TupleType):
if len(data) == 9:
data = data[0:3]
elif len(data) > 3:
raise Exception, "too many values"
data = list(data)
if len(data) < 3:
data += [1, 1, 1][len(data):]
data += [0, 0, 0]
cleanDate(data)
data = data[:3]
else:
raise Exception, "invalid type"
except Exception, e:
raise ValueError, "invalid %s value - %s" % (self._type, e)
return tuple(data)
def _marshalData(self):
if self._cache == None:
d = self._data
s = "%04d-%02d-%02dZ" % ((abs(d[0]),) + d[1:])
if d[0] < 0:
s = '-' + s
self._cache = s
return self._cache
class gYearMonthType(anyType):
_validURIs = (NS.XSD3,)
def _checkValueSpace(self, data):
try:
if data == None:
data = time.gmtime(time.time())[0:2]
elif type(data) in (IntType, LongType, FloatType):
data = time.gmtime(data)[0:2]
elif type(data) in (ListType, TupleType):
if len(data) == 9:
data = data[0:2]
elif len(data) > 2:
raise Exception, "too many values"
data = list(data)
if len(data) < 2:
data += [1, 1][len(data):]
data += [1, 0, 0, 0]
cleanDate(data)
data = data[:2]
else:
raise Exception, "invalid type"
except Exception, e:
raise ValueError, "invalid %s value - %s" % (self._type, e)
return tuple(data)
def _marshalData(self):
if self._cache == None:
d = self._data
s = "%04d-%02dZ" % ((abs(d[0]),) + d[1:])
if d[0] < 0:
s = '-' + s
self._cache = s
return self._cache
class gYearType(anyType):
_validURIs = (NS.XSD3,)
def _checkValueSpace(self, data):
try:
if data == None:
data = time.gmtime(time.time())[0:1]
elif type(data) in (IntType, LongType, FloatType):
data = [data]
if type(data) in (ListType, TupleType):
if len(data) == 9:
data = data[0:1]
elif len(data) < 1:
raise Exception, "too few values"
elif len(data) > 1:
raise Exception, "too many values"
if type(data[0]) == FloatType:
try: s = int(data[0])
except: s = long(data[0])
if s != data[0]:
raise Exception, "not integral"
data = [s]
elif type(data[0]) not in (IntType, LongType):
raise Exception, "bad type"
else:
raise Exception, "invalid type"
except Exception, e:
raise ValueError, "invalid %s value - %s" % (self._type, e)
return data[0]
def _marshalData(self):
if self._cache == None:
d = self._data
s = "%04dZ" % abs(d)
if d < 0:
s = '-' + s
self._cache = s
return self._cache
class centuryType(anyType):
_validURIs = (NS.XSD2, NS.ENC)
def _checkValueSpace(self, data):
try:
if data == None:
data = time.gmtime(time.time())[0:1] / 100
elif type(data) in (IntType, LongType, FloatType):
data = [data]
if type(data) in (ListType, TupleType):
if len(data) == 9:
data = data[0:1] / 100
elif len(data) < 1:
raise Exception, "too few values"
elif len(data) > 1:
raise Exception, "too many values"
if type(data[0]) == FloatType:
try: s = int(data[0])
except: s = long(data[0])
if s != data[0]:
raise Exception, "not integral"
data = [s]
elif type(data[0]) not in (IntType, LongType):
raise Exception, "bad type"
else:
raise Exception, "invalid type"
except Exception, e:
raise ValueError, "invalid %s value - %s" % (self._type, e)
return data[0]
def _marshalData(self):
if self._cache == None:
d = self._data
s = "%02dZ" % abs(d)
if d < 0:
s = '-' + s
self._cache = s
return self._cache
class yearType(gYearType):
_validURIs = (NS.XSD2, NS.ENC)
class gMonthDayType(anyType):
_validURIs = (NS.XSD3,)
def _checkValueSpace(self, data):
try:
if data == None:
data = time.gmtime(time.time())[1:3]
elif type(data) in (IntType, LongType, FloatType):
data = time.gmtime(data)[1:3]
elif type(data) in (ListType, TupleType):
if len(data) == 9:
data = data[0:2]
elif len(data) > 2:
raise Exception, "too many values"
data = list(data)
if len(data) < 2:
data += [1, 1][len(data):]
data = [0] + data + [0, 0, 0]
cleanDate(data, 1)
data = data[1:3]
else:
raise Exception, "invalid type"
except Exception, e:
raise ValueError, "invalid %s value - %s" % (self._type, e)
return tuple(data)
def _marshalData(self):
if self._cache == None:
self._cache = "--%02d-%02dZ" % self._data
return self._cache
class recurringDateType(gMonthDayType):
_validURIs = (NS.XSD2, NS.ENC)
class gMonthType(anyType):
_validURIs = (NS.XSD3,)
def _checkValueSpace(self, data):
try:
if data == None:
data = time.gmtime(time.time())[1:2]
elif type(data) in (IntType, LongType, FloatType):
data = [data]
if type(data) in (ListType, TupleType):
if len(data) == 9:
data = data[1:2]
elif len(data) < 1:
raise Exception, "too few values"
elif len(data) > 1:
raise Exception, "too many values"
if type(data[0]) == FloatType:
try: s = int(data[0])
except: s = long(data[0])
if s != data[0]:
raise Exception, "not integral"
data = [s]
elif type(data[0]) not in (IntType, LongType):
raise Exception, "bad type"
if data[0] < 1 or data[0] > 12:
raise Exception, "bad value"
else:
raise Exception, "invalid type"
except Exception, e:
raise ValueError, "invalid %s value - %s" % (self._type, e)
return data[0]
def _marshalData(self):
if self._cache == None:
self._cache = "--%02d--Z" % self._data
return self._cache
class monthType(gMonthType):
_validURIs = (NS.XSD2, NS.ENC)
class gDayType(anyType):
_validURIs = (NS.XSD3,)
def _checkValueSpace(self, data):
try:
if data == None:
data = time.gmtime(time.time())[2:3]
elif type(data) in (IntType, LongType, FloatType):
data = [data]
if type(data) in (ListType, TupleType):
if len(data) == 9:
data = data[2:3]
elif len(data) < 1:
raise Exception, "too few values"
elif len(data) > 1:
raise Exception, "too many values"
if type(data[0]) == FloatType:
try: s = int(data[0])
except: s = long(data[0])
if s != data[0]:
raise Exception, "not integral"
data = [s]
elif type(data[0]) not in (IntType, LongType):
raise Exception, "bad type"
if data[0] < 1 or data[0] > 31:
raise Exception, "bad value"
else:
raise Exception, "invalid type"
except Exception, e:
raise ValueError, "invalid %s value - %s" % (self._type, e)
return data[0]
def _marshalData(self):
if self._cache == None:
self._cache = "---%02dZ" % self._data
return self._cache
class recurringDayType(gDayType):
_validURIs = (NS.XSD2, NS.ENC)
class hexBinaryType(anyType):
_validURIs = (NS.XSD3,)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (StringType, UnicodeType):
raise AttributeError, "invalid %s type" % self._type
return data
def _marshalData(self):
if self._cache == None:
self._cache = encodeHexString(self._data)
return self._cache
class base64BinaryType(anyType):
_validURIs = (NS.XSD3,)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (StringType, UnicodeType):
raise AttributeError, "invalid %s type" % self._type
return data
def _marshalData(self):
if self._cache == None:
self._cache = base64.encodestring(self._data)
return self._cache
class base64Type(base64BinaryType):
_validURIs = (NS.ENC,)
class binaryType(anyType):
_validURIs = (NS.XSD, NS.ENC)
def __init__(self, data, name = None, typed = 1, encoding = 'base64',
attrs = None):
anyType.__init__(self, data, name, typed, attrs)
self._setAttr('encoding', encoding)
def _marshalData(self):
if self._cache == None:
if self._getAttr((None, 'encoding')) == 'base64':
self._cache = base64.encodestring(self._data)
else:
self._cache = encodeHexString(self._data)
return self._cache
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (StringType, UnicodeType):
raise AttributeError, "invalid %s type" % self._type
return data
def _setAttr(self, attr, value):
attr = self._fixAttr(attr)
if attr[1] == 'encoding':
if attr[0] != None or value not in ('base64', 'hex'):
raise AttributeError, "invalid encoding"
self._cache = None
anyType._setAttr(self, attr, value)
class anyURIType(anyType):
_validURIs = (NS.XSD3,)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (StringType, UnicodeType):
raise AttributeError, "invalid %s type" % self._type
return data
def _marshalData(self):
if self._cache == None:
self._cache = urllib.quote(self._data)
return self._cache
class uriType(anyURIType):
_validURIs = (NS.XSD,)
class uriReferenceType(anyURIType):
_validURIs = (NS.XSD2,)
class NOTATIONType(anyType):
def __init__(self, data, name = None, typed = 1, attrs = None):
if self.__class__ == NOTATIONType:
raise Error, "a NOTATION can't be instantiated directly"
anyType.__init__(self, data, name, typed, attrs)
class ENTITIESType(anyType):
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) in (StringType, UnicodeType):
return (data,)
if type(data) not in (ListType, TupleType) or \
filter (lambda x: type(x) not in (StringType, UnicodeType), data):
raise AttributeError, "invalid %s type" % self._type
return data
def _marshalData(self):
return ' '.join(self._data)
class IDREFSType(ENTITIESType): pass
class NMTOKENSType(ENTITIESType): pass
class integerType(anyType):
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType):
raise ValueError, "invalid %s value" % self._type
return data
class nonPositiveIntegerType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType) or data > 0:
raise ValueError, "invalid %s value" % self._type
return data
class non_Positive_IntegerType(nonPositiveIntegerType):
_validURIs = (NS.XSD,)
def _typeName(self):
return 'non-positive-integer'
class negativeIntegerType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType) or data >= 0:
raise ValueError, "invalid %s value" % self._type
return data
class negative_IntegerType(negativeIntegerType):
_validURIs = (NS.XSD,)
def _typeName(self):
return 'negative-integer'
class longType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType) or \
data < -9223372036854775808L or \
data > 9223372036854775807L:
raise ValueError, "invalid %s value" % self._type
return data
class intType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType) or \
data < -2147483648L or \
data > 2147483647L:
raise ValueError, "invalid %s value" % self._type
return data
class shortType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType) or \
data < -32768 or \
data > 32767:
raise ValueError, "invalid %s value" % self._type
return data
class byteType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType) or \
data < -128 or \
data > 127:
raise ValueError, "invalid %s value" % self._type
return data
class nonNegativeIntegerType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType) or data < 0:
raise ValueError, "invalid %s value" % self._type
return data
class non_Negative_IntegerType(nonNegativeIntegerType):
_validURIs = (NS.XSD,)
def _typeName(self):
return 'non-negative-integer'
class unsignedLongType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType) or \
data < 0 or \
data > 18446744073709551615L:
raise ValueError, "invalid %s value" % self._type
return data
class unsignedIntType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType) or \
data < 0 or \
data > 4294967295L:
raise ValueError, "invalid %s value" % self._type
return data
class unsignedShortType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType) or \
data < 0 or \
data > 65535:
raise ValueError, "invalid %s value" % self._type
return data
class unsignedByteType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType) or \
data < 0 or \
data > 255:
raise ValueError, "invalid %s value" % self._type
return data
class positiveIntegerType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType) or data <= 0:
raise ValueError, "invalid %s value" % self._type
return data
class positive_IntegerType(positiveIntegerType):
_validURIs = (NS.XSD,)
def _typeName(self):
return 'positive-integer'
# Now compound types
class compoundType(anyType):
def __init__(self, data = None, name = None, typed = 1, attrs = None):
if self.__class__ == compoundType:
raise Error, "a compound can't be instantiated directly"
anyType.__init__(self, data, name, typed, attrs)
self._keyord = []
if type(data) == DictType:
self.__dict__.update(data)
def _aslist(self, item=None):
if item is not None:
return self.__dict__[self._keyord[item]]
else:
return map( lambda x: self.__dict__[x], self._keyord)
def _asdict(self, item=None, encoding=Config.dict_encoding):
if item is not None:
if type(item) in (UnicodeType,StringType):
item = item.encode(encoding)
return self.__dict__[item]
else:
retval = {}
def fun(x): retval[x.encode(encoding)] = self.__dict__[x]
if hasattr(self, '_keyord'):
map( fun, self._keyord)
else:
for name in dir(self):
if isPublic(name):
retval[name] = getattr(self,name)
return retval
def __getitem__(self, item):
if type(item) == IntType:
return self.__dict__[self._keyord[item]]
else:
return getattr(self, item)
def __len__(self):
return len(self._keyord)
def __nonzero__(self):
return 1
def _keys(self):
return filter(lambda x: x[0] != '_', self.__dict__.keys())
def _addItem(self, name, value, attrs = None):
if name in self._keyord:
if type(self.__dict__[name]) != ListType:
self.__dict__[name] = [self.__dict__[name]]
self.__dict__[name].append(value)
else:
self.__dict__[name] = value
self._keyord.append(name)
def _placeItem(self, name, value, pos, subpos = 0, attrs = None):
if subpos == 0 and type(self.__dict__[name]) != ListType:
self.__dict__[name] = value
else:
self.__dict__[name][subpos] = value
# only add to key order list if it does not already
# exist in list
if not (name in self._keyord):
if pos < len(x):
self._keyord[pos] = name
else:
self._keyord.append(name)
def _getItemAsList(self, name, default = []):
try:
d = self.__dict__[name]
except:
return default
if type(d) == ListType:
return d
return [d]
def __str__(self):
return anyType.__str__(self) + ": " + str(self._asdict())
def __repr__(self):
return self.__str__()
class structType(compoundType):
pass
class headerType(structType):
_validURIs = (NS.ENV,)
def __init__(self, data = None, typed = 1, attrs = None):
structType.__init__(self, data, "Header", typed, attrs)
class bodyType(structType):
_validURIs = (NS.ENV,)
def __init__(self, data = None, typed = 1, attrs = None):
structType.__init__(self, data, "Body", typed, attrs)
class arrayType(UserList.UserList, compoundType):
def __init__(self, data = None, name = None, attrs = None,
offset = 0, rank = None, asize = 0, elemsname = None):
if data:
if type(data) not in (ListType, TupleType):
raise Error, "Data must be a sequence"
UserList.UserList.__init__(self, data)
compoundType.__init__(self, data, name, 0, attrs)
self._elemsname = elemsname or "item"
if data == None:
self._rank = rank
# According to 5.4.2.2 in the SOAP spec, each element in a
# sparse array must have a position. _posstate keeps track of
# whether we've seen a position or not. It's possible values
# are:
# -1 No elements have been added, so the state is indeterminate
# 0 An element without a position has been added, so no
# elements can have positions
# 1 An element with a position has been added, so all elements
# must have positions
self._posstate = -1
self._full = 0
if asize in ('', None):
asize = '0'
self._dims = map (lambda x: int(x), str(asize).split(','))
self._dims.reverse() # It's easier to work with this way
self._poss = [0] * len(self._dims) # This will end up
# reversed too
for i in range(len(self._dims)):
if self._dims[i] < 0 or \
self._dims[i] == 0 and len(self._dims) > 1:
raise TypeError, "invalid Array dimensions"
if offset > 0:
self._poss[i] = offset % self._dims[i]
offset = int(offset / self._dims[i])
# Don't break out of the loop if offset is 0 so we test all the
# dimensions for > 0.
if offset:
raise AttributeError, "invalid Array offset"
a = [None] * self._dims[0]
for i in range(1, len(self._dims)):
b = []
for j in range(self._dims[i]):
b.append(copy.deepcopy(a))
a = b
self.data = a
def _aslist(self, item=None):
if item is not None:
return self.data[int(item)]
else:
return self.data
def _asdict(self, item=None, encoding=Config.dict_encoding):
if item is not None:
if type(item) in (UnicodeType,StringType):
item = item.encode(encoding)
return self.data[int(item)]
else:
retval = {}
def fun(x): retval[str(x).encode(encoding)] = self.data[x]
map( fun, range(len(self.data)) )
return retval
def __getitem__(self, item):
try:
return self.data[int(item)]
except ValueError:
return getattr(self, item)
def __len__(self):
return len(self.data)
def __nonzero__(self):
return 1
def __str__(self):
return anyType.__str__(self) + ": " + str(self._aslist())
def _keys(self):
return filter(lambda x: x[0] != '_', self.__dict__.keys())
def _addItem(self, name, value, attrs):
if self._full:
raise ValueError, "Array is full"
pos = attrs.get((NS.ENC, 'position'))
if pos != None:
if self._posstate == 0:
raise AttributeError, \
"all elements in a sparse Array must have a " \
"position attribute"
self._posstate = 1
try:
if pos[0] == '[' and pos[-1] == ']':
pos = map (lambda x: int(x), pos[1:-1].split(','))
pos.reverse()
if len(pos) == 1:
pos = pos[0]
curpos = [0] * len(self._dims)
for i in range(len(self._dims)):
curpos[i] = pos % self._dims[i]
pos = int(pos / self._dims[i])
if pos == 0:
break
if pos:
raise Exception
elif len(pos) != len(self._dims):
raise Exception
else:
for i in range(len(self._dims)):
if pos[i] >= self._dims[i]:
raise Exception
curpos = pos
else:
raise Exception
except:
raise AttributeError, \
"invalid Array element position %s" % str(pos)
else:
if self._posstate == 1:
raise AttributeError, \
"only elements in a sparse Array may have a " \
"position attribute"
self._posstate = 0
curpos = self._poss
a = self.data
for i in range(len(self._dims) - 1, 0, -1):
a = a[curpos[i]]
if curpos[0] >= len(a):
a += [None] * (len(a) - curpos[0] + 1)
a[curpos[0]] = value
if pos == None:
self._poss[0] += 1
for i in range(len(self._dims) - 1):
if self._poss[i] < self._dims[i]:
break
self._poss[i] = 0
self._poss[i + 1] += 1
if self._dims[-1] and self._poss[-1] >= self._dims[-1]:
#self._full = 1
#FIXME: why is this occuring?
pass
def _placeItem(self, name, value, pos, subpos, attrs = None):
curpos = [0] * len(self._dims)
for i in range(len(self._dims)):
if self._dims[i] == 0:
curpos[0] = pos
break
curpos[i] = pos % self._dims[i]
pos = int(pos / self._dims[i])
if pos == 0:
break
if self._dims[i] != 0 and pos:
raise Error, "array index out of range"
a = self.data
for i in range(len(self._dims) - 1, 0, -1):
a = a[curpos[i]]
if curpos[0] >= len(a):
a += [None] * (len(a) - curpos[0] + 1)
a[curpos[0]] = value
class typedArrayType(arrayType):
def __init__(self, data = None, name = None, typed = None, attrs = None,
offset = 0, rank = None, asize = 0, elemsname = None, complexType = 0):
arrayType.__init__(self, data, name, attrs, offset, rank, asize,
elemsname)
self._typed = 1
self._type = typed
self._complexType = complexType
class faultType(structType, Error):
def __init__(self, faultcode = "", faultstring = "", detail = None):
self.faultcode = faultcode
self.faultstring = faultstring
if detail != None:
self.detail = detail
structType.__init__(self, None, 0)
def _setDetail(self, detail = None):
if detail != None:
self.detail = detail
else:
try: del self.detail
except AttributeError: pass
def __repr__(self):
if getattr(self, 'detail', None) != None:
return "<Fault %s: %s: %s>" % (self.faultcode,
self.faultstring,
self.detail)
else:
return "<Fault %s: %s>" % (self.faultcode, self.faultstring)
__str__ = __repr__
def __call__(self):
return (self.faultcode, self.faultstring, self.detail)
class SOAPException(Exception):
def __init__(self, code="", string="", detail=None):
self.value = ("SOAPpy SOAP Exception", code, string, detail)
self.code = code
self.string = string
self.detail = detail
def __str__(self):
return repr(self.value)
class RequiredHeaderMismatch(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class MethodNotFound(Exception):
def __init__(self, value):
(val, detail) = value.split(":")
self.value = val
self.detail = detail
def __str__(self):
return repr(self.value, self.detail)
class AuthorizationFailed(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class MethodFailed(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
#######
# Convert complex SOAPpy objects to native python equivalents
#######
def simplify(object, level=0):
"""
Convert the SOAPpy objects and their contents to simple python types.
This function recursively converts the passed 'container' object,
and all public subobjects. (Private subobjects have names that
start with '_'.)
Conversions:
- faultType --> raise python exception
- arrayType --> array
- compoundType --> dictionary
"""
if level > 10:
return object
if isinstance( object, faultType ):
if object.faultstring == "Required Header Misunderstood":
raise RequiredHeaderMismatch(object.detail)
elif object.faultstring == "Method Not Found":
raise MethodNotFound(object.detail)
elif object.faultstring == "Authorization Failed":
raise AuthorizationFailed(object.detail)
elif object.faultstring == "Method Failed":
raise MethodFailed(object.detail)
else:
se = SOAPException(object.faultcode, object.faultstring,
object.detail)
raise se
elif isinstance( object, arrayType ):
data = object._aslist()
for k in range(len(data)):
data[k] = simplify(data[k], level=level+1)
return data
elif isinstance( object, compoundType ) or isinstance(object, structType):
data = object._asdict()
for k in data.keys():
if isPublic(k):
data[k] = simplify(data[k], level=level+1)
return data
elif type(object)==DictType:
for k in object.keys():
if isPublic(k):
object[k] = simplify(object[k])
return object
elif type(object)==list:
for k in range(len(object)):
object[k] = simplify(object[k])
return object
else:
return object
def simplify_contents(object, level=0):
"""
Convert the contents of SOAPpy objects to simple python types.
This function recursively converts the sub-objects contained in a
'container' object to simple python types.
Conversions:
- faultType --> raise python exception
- arrayType --> array
- compoundType --> dictionary
"""
if level>10: return object
if isinstance( object, faultType ):
for k in object._keys():
if isPublic(k):
setattr(object, k, simplify(object[k], level=level+1))
raise object
elif isinstance( object, arrayType ):
data = object._aslist()
for k in range(len(data)):
object[k] = simplify(data[k], level=level+1)
elif isinstance(object, structType):
data = object._asdict()
for k in data.keys():
if isPublic(k):
setattr(object, k, simplify(data[k], level=level+1))
elif isinstance( object, compoundType ) :
data = object._asdict()
for k in data.keys():
if isPublic(k):
object[k] = simplify(data[k], level=level+1)
elif type(object)==DictType:
for k in object.keys():
if isPublic(k):
object[k] = simplify(object[k])
elif type(object)==list:
for k in range(len(object)):
object[k] = simplify(object[k])
return object
|
gpl-3.0
|
GoodRx/fwffr
|
fwffr.py
|
1
|
7288
|
#!/usr/bin/env python
"""
Utilities related to parsing files
"""
from collections import OrderedDict
import sys
PY3 = sys.version_info[0] == 3
__all__ = [
'FixedLengthError',
'FixedLengthUnknownRecordTypeError',
'FixedLengthSeparatorError',
'FixedLengthJustificationError',
'FixedLengthFieldParser',
]
if PY3:
def iteritems(d, **kw):
return iter(d.items(**kw))
else:
def iteritems(d, **kw):
return d.iteritems(**kw)
class FixedLengthError(ValueError):
""" Base class for parsing errors """
def __str__(self):
return self.message
class FixedLengthUnknownRecordTypeError(FixedLengthError):
""" Unknown record type encountered error """
MESSAGE = "Unknown record type %r encountered"
def __init__(self, record_type):
super(FixedLengthUnknownRecordTypeError, self).__init__(record_type)
self.message = self.MESSAGE % (record_type,)
class FixedLengthSeparatorError(FixedLengthError):
""" Separator not found error """
MESSAGE = "No field separator found before %r at %d"
def __init__(self, field, pointer):
super(FixedLengthSeparatorError, self).__init__(field, pointer)
self.message = self.MESSAGE % (field, pointer)
class FixedLengthJustificationError(FixedLengthError):
""" Justification error """
MESSAGE = "Field %r value %r is not justified correctly"
def __init__(self, field, value):
super(FixedLengthJustificationError, self).__init__(field, value)
self.message = self.MESSAGE % (field, value)
class FixedLengthFieldParser(object):
"""
Utility to parse and read from fixed-length field files.
For these classes, process_fixed_length_record should be passed a sequence
of (field_name, field_length) tuples. The record will be parsed, and
validation will be run to ensure that the file is not malformed.
file_obj
The file-like object to parse.
fields
Field specification. For files with homogeneous records, this should be
a list of tuples in the form (field_name, length_of_field), or an
OrderedDict with items in the same format. For files that combine
different types of records in the same file, a dictionary can be passed
whose keys are record type indicators, and whose values are in the
previously described format for fields. For these files, the
record_type_func parameter MUST be passed.
record_type_func
For files with multiple record types, this must be a function that
accepts a line from the file and returns a key into the fields dict.
For simple usage, a simple utility function
FixedLengthFieldParser.generate_type_from_offset_func is provided,
which generates a suitable function from a position and offset.
override_justification_error_func
Provide a hook to override justification errors for known issues
Should return None or the corrected value for the field
field_separator
A string or None indicating a separator between the fixed-length
fields. If provided, the existence of the separator between fields
will be checked for, and an error will be raised if it is not found.
Providing a field separator will disable the starting-whitespace check.
right_justified
A list of fields that are right-justified instead of left.
skip_justified
A list of fields that aren't justfiied.
encoding
The base encoding of the file. If set, all values will be decoded.
skip_unknown_types
For files with multiple record types, indicate whether an unknown type
should result in a ValueError or silently pass.
"""
def __init__(self, file_obj, fields, record_type_func=None,
override_justification_error_func=None, field_separator=None,
right_justified=(), skip_justified=(), encoding=None,
skip_unknown_types=True, strip=True):
self.file_obj = file_obj
self.fields = fields
self.record_type_func = record_type_func
self.override_justification_error_func = (
override_justification_error_func or (lambda f, v: None)
)
self.field_separator = field_separator
self.right_justified = right_justified
self.skip_justified = skip_justified
self.encoding = encoding
self.skip_unknown_types = skip_unknown_types
self.strip = True
def __iter__(self):
for line in self.file_obj:
result = self.process_fixed_length_record(line)
if result is not None:
yield result
def process_fixed_length_record(self, record_line):
"""
Given the raw fixed-length record line, returns two dictionaries: the
raw fields, and the processed and converted fields
"""
record = {}
pointer = 0
field_sep = self.field_separator or ''
field_sep_len = len(field_sep)
if self.record_type_func:
record_type = self.record_type_func(record_line)
if record_type not in self.fields:
if self.skip_unknown_types:
return None
else:
raise FixedLengthUnknownRecordTypeError(record_type)
field_length_sequence = self.fields[record_type]
else:
field_length_sequence = self.fields
if isinstance(field_length_sequence, OrderedDict):
field_length_sequence = iteritems(field_length_sequence)
for field, field_length in field_length_sequence:
# Check that fields are separated correctly
if pointer and field_sep_len:
if record_line[pointer:pointer + field_sep_len] != field_sep:
raise FixedLengthSeparatorError(field, pointer)
pointer += field_sep_len
value = record_line[pointer:pointer + field_length]
# Check that the field is empty or doesn't start with a space
if not field_sep_len:
if self._invalid_just(field, value):
override = self.override_justification_error_func(field, value)
if override is None:
raise FixedLengthJustificationError(field, value)
else:
value = override
if self.encoding is not None:
value = value.decode(self.encoding)
record[field] = value.strip() if self.strip else value
pointer += field_length
return record
def _invalid_just(self, field, value):
""" Returns True if the value is not justified correctly """
if field in self.skip_justified:
return False
if field in self.right_justified:
value = value.lstrip()[-1:]
else:
value = value.rstrip()[:1]
return value.isspace()
@classmethod
def generate_type_from_offset_func(cls, position, length):
""" Returns a function suitable for the record_type_func parameter """
def get_record_type(record_line):
return record_line[position:position + length]
return get_record_type
|
mit
|
gaddman/ansible
|
lib/ansible/modules/remote_management/ucs/ucs_vhba_template.py
|
64
|
11155
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: ucs_vhba_template
short_description: Configures vHBA templates on Cisco UCS Manager
description:
- Configures vHBA templates on Cisco UCS Manager.
- Examples can be used with the UCS Platform Emulator U(https://communities.cisco.com/ucspe).
extends_documentation_fragment: ucs
options:
state:
description:
- If C(present), will verify vHBA templates are present and will create if needed.
- If C(absent), will verify vHBA templates are absent and will delete if needed.
choices: [present, absent]
default: present
name:
description:
- The name of the virtual HBA template.
- This name can be between 1 and 16 alphanumeric characters.
- "You cannot use spaces or any special characters other than - (hyphen), \"_\" (underscore), : (colon), and . (period)."
- You cannot change this name after the template is created.
required: yes
description:
description:
- A user-defined description of the template.
- Enter up to 256 characters.
- "You can use any characters or spaces except the following:"
- "` (accent mark), \ (backslash), ^ (carat), \" (double quote), = (equal sign), > (greater than), < (less than), or ' (single quote)."
aliases: [ descr ]
fabric:
description:
- The Fabric ID field.
- The name of the fabric interconnect that vHBAs created with this template are associated with.
choices: [A, B]
default: A
redundancy_type:
description:
- The Redundancy Type used for template pairing from the Primary or Secondary redundancy template.
- "primary — Creates configurations that can be shared with the Secondary template."
- Any other shared changes on the Primary template are automatically synchronized to the Secondary template.
- "secondary — All shared configurations are inherited from the Primary template."
- "none - Legacy vHBA template behavior. Select this option if you do not want to use redundancy."
choices: [none, primary, secondary]
default: none
vsan:
description:
- The VSAN to associate with vHBAs created from this template.
default: default
template_type:
description:
- The Template Type field.
- "This can be one of the following:"
- "initial-template — vHBAs created from this template are not updated if the template changes."
- "updating-template - vHBAs created from this template are updated if the template changes."
choices: [initial-template, updating-template]
default: initial-template
max_data:
description:
- The Max Data Field Size field.
- The maximum size of the Fibre Channel frame payload bytes that the vHBA supports.
- Enter an string between '256' and '2112'.
default: '2048'
wwpn_pool:
description:
- The WWPN pool that a vHBA created from this template uses to derive its WWPN address.
default: default
qos_policy:
description:
- The QoS policy that is associated with vHBAs created from this template.
pin_group:
description:
- The SAN pin group that is associated with vHBAs created from this template.
stats_policy:
description:
- The statistics collection policy that is associated with vHBAs created from this template.
default: default
org_dn:
description:
- Org dn (distinguished name)
default: org-root
requirements:
- ucsmsdk
author:
- David Soper (@dsoper2)
- CiscoUcs (@CiscoUcs)
version_added: '2.5'
'''
EXAMPLES = r'''
- name: Configure vHBA template
ucs_vhba_template:
hostname: 172.16.143.150
username: admin
password: password
name: vHBA-A
fabric: A
vsan: VSAN-A
wwpn_pool: WWPN-Pool-A
- name: Remote vHBA template
ucs_vhba_template:
hostname: 172.16.143.150
username: admin
password: password
name: vHBA-A
state: absent
'''
RETURN = r'''
#
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.remote_management.ucs import UCSModule, ucs_argument_spec
def main():
argument_spec = ucs_argument_spec
argument_spec.update(
org_dn=dict(type='str', default='org-root'),
name=dict(type='str'),
descr=dict(type='str'),
fabric=dict(type='str', default='A', choices=['A', 'B']),
redundancy_type=dict(type='str', default='none', choices=['none', 'primary', 'secondary']),
vsan=dict(type='str', default='default'),
template_type=dict(type='str', default='initial-template', choices=['initial-template', 'updating-template']),
max_data=dict(type='str', default='2048'),
wwpn_pool=dict(type='str', default='default'),
qos_policy=dict(type='str'),
pin_group=dict(type='str'),
stats_policy=dict(type='str', default='default'),
state=dict(type='str', default='present', choices=['present', 'absent']),
vhba_template_list=dict(type='list'),
)
# Note that use of vhba_template_list is an experimental feature which allows multiple resource updates with a single UCSM connection.
# Support for vhba_template_list may change or be removed once persistent UCS connections are supported.
# Either vhba_template_list or name is required (user can specify either a list of single resource).
module = AnsibleModule(
argument_spec,
supports_check_mode=True,
required_one_of=[
['vhba_template_list', 'name']
],
mutually_exclusive=[
['vhba_template_list', 'name']
],
)
ucs = UCSModule(module)
err = False
from ucsmsdk.mometa.vnic.VnicSanConnTempl import VnicSanConnTempl
from ucsmsdk.mometa.vnic.VnicFcIf import VnicFcIf
changed = False
try:
# Only documented use is a single resource, but to also support experimental
# feature allowing multiple updates all params are converted to a vhba_template_list below.
if module.params['vhba_template_list']:
# directly use the list (single resource and list are mutually exclusive
vhba_template_list = module.params['vhba_template_list']
else:
# single resource specified, create list from the current params
vhba_template_list = [module.params]
for vhba_template in vhba_template_list:
mo_exists = False
props_match = False
# set default params. Done here to set values for lists which can't be done in the argument_spec
if not vhba_template.get('descr'):
vhba_template['descr'] = ''
if not vhba_template.get('fabric'):
vhba_template['fabric'] = 'A'
if not vhba_template.get('redundancy_type'):
vhba_template['redundancy_type'] = 'none'
if not vhba_template.get('vsan'):
vhba_template['vsan'] = 'default'
if not vhba_template.get('template_type'):
vhba_template['template_type'] = 'initial-template'
if not vhba_template.get('max_data'):
vhba_template['max_data'] = '2048'
if not vhba_template.get('wwpn_pool'):
vhba_template['wwpn_pool'] = 'default'
if not vhba_template.get('qos_policy'):
vhba_template['qos_policy'] = ''
if not vhba_template.get('pin_group'):
vhba_template['pin_group'] = ''
if not vhba_template.get('stats_policy'):
vhba_template['stats_policy'] = 'default'
# dn is <org_dn>/san-conn-templ-<name>
dn = module.params['org_dn'] + '/san-conn-templ-' + vhba_template['name']
mo = ucs.login_handle.query_dn(dn)
if mo:
mo_exists = True
# check top-level mo props
kwargs = dict(descr=vhba_template['descr'])
kwargs['switch_id'] = vhba_template['fabric']
kwargs['redundancy_pair_type'] = vhba_template['redundancy_type']
kwargs['templ_type'] = vhba_template['template_type']
kwargs['max_data_field_size'] = vhba_template['max_data']
kwargs['ident_pool_name'] = vhba_template['wwpn_pool']
kwargs['qos_policy_name'] = vhba_template['qos_policy']
kwargs['pin_to_group_name'] = vhba_template['pin_group']
kwargs['stats_policy_name'] = vhba_template['stats_policy']
if (mo.check_prop_match(**kwargs)):
# top-level props match, check next level mo/props
child_dn = dn + '/if-default'
mo_1 = ucs.login_handle.query_dn(child_dn)
if mo_1:
kwargs = dict(name=vhba_template['vsan'])
if (mo_1.check_prop_match(**kwargs)):
props_match = True
if module.params['state'] == 'absent':
# mo must exist but all properties do not have to match
if mo_exists:
if not module.check_mode:
ucs.login_handle.remove_mo(mo)
ucs.login_handle.commit()
changed = True
else:
if not props_match:
if not module.check_mode:
# create if mo does not already exist
mo = VnicSanConnTempl(
parent_mo_or_dn=module.params['org_dn'],
name=vhba_template['name'],
descr=vhba_template['descr'],
switch_id=vhba_template['fabric'],
redundancy_pair_type=vhba_template['redundancy_type'],
templ_type=vhba_template['template_type'],
max_data_field_size=vhba_template['max_data'],
ident_pool_name=vhba_template['wwpn_pool'],
qos_policy_name=vhba_template['qos_policy'],
pin_to_group_name=vhba_template['pin_group'],
stats_policy_name=vhba_template['stats_policy'],
)
mo_1 = VnicFcIf(
parent_mo_or_dn=mo,
name=vhba_template['vsan'],
)
ucs.login_handle.add_mo(mo, True)
ucs.login_handle.commit()
changed = True
except Exception as e:
err = True
ucs.result['msg'] = "setup error: %s " % str(e)
ucs.result['changed'] = changed
if err:
module.fail_json(**ucs.result)
module.exit_json(**ucs.result)
if __name__ == '__main__':
main()
|
gpl-3.0
|
alxgu/ansible
|
lib/ansible/modules/storage/netapp/netapp_e_ldap.py
|
31
|
15914
|
#!/usr/bin/python
# (c) 2018, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: netapp_e_ldap
short_description: NetApp E-Series manage LDAP integration to use for authentication
description:
- Configure an E-Series system to allow authentication via an LDAP server
version_added: '2.7'
author: Michael Price (@lmprice)
extends_documentation_fragment:
- netapp.eseries
options:
state:
description:
- Enable/disable LDAP support on the system. Disabling will clear out any existing defined domains.
choices:
- present
- absent
default: present
identifier:
description:
- This is a unique identifier for the configuration (for cases where there are multiple domains configured).
- If this is not specified, but I(state=present), we will utilize a default value of 'default'.
username:
description:
- This is the user account that will be used for querying the LDAP server.
- "Example: CN=MyBindAcct,OU=ServiceAccounts,DC=example,DC=com"
required: yes
aliases:
- bind_username
password:
description:
- This is the password for the bind user account.
required: yes
aliases:
- bind_password
attributes:
description:
- The user attributes that should be considered for the group to role mapping.
- Typically this is used with something like 'memberOf', and a user's access is tested against group
membership or lack thereof.
default: memberOf
server:
description:
- This is the LDAP server url.
- The connection string should be specified as using the ldap or ldaps protocol along with the port
information.
aliases:
- server_url
required: yes
name:
description:
- The domain name[s] that will be utilized when authenticating to identify which domain to utilize.
- Default to use the DNS name of the I(server).
- The only requirement is that the name[s] be resolvable.
- "Example: user@example.com"
required: no
search_base:
description:
- The search base is used to find group memberships of the user.
- "Example: ou=users,dc=example,dc=com"
required: yes
role_mappings:
description:
- This is where you specify which groups should have access to what permissions for the
storage-system.
- For example, all users in group A will be assigned all 4 available roles, which will allow access
to all the management functionality of the system (super-user). Those in group B only have the
storage.monitor role, which will allow only read-only acess.
- This is specified as a mapping of regular expressions to a list of roles. See the examples.
- The roles that will be assigned to to the group/groups matching the provided regex.
- storage.admin allows users full read/write access to storage objects and operations.
- storage.monitor allows users read-only access to storage objects and operations.
- support.admin allows users access to hardware, diagnostic information, the Major Event
Log, and other critical support-related functionality, but not the storage configuration.
- security.admin allows users access to authentication/authorization configuration, as well
as the audit log configuration, and certification management.
required: yes
user_attribute:
description:
- This is the attribute we will use to match the provided username when a user attempts to
authenticate.
default: sAMAccountName
log_path:
description:
- A local path to a file to be used for debug logging
required: no
notes:
- Check mode is supported.
- This module allows you to define one or more LDAP domains identified uniquely by I(identifier) to use for
authentication. Authorization is determined by I(role_mappings), in that different groups of users may be given
different (or no), access to certain aspects of the system and API.
- The local user accounts will still be available if the LDAP server becomes unavailable/inaccessible.
- Generally, you'll need to get the details of your organization's LDAP server before you'll be able to configure
the system for using LDAP authentication; every implementation is likely to be very different.
- This API is currently only supported with the Embedded Web Services API v2.0 and higher, or the Web Services Proxy
v3.0 and higher.
'''
EXAMPLES = '''
- name: Disable LDAP authentication
netapp_e_ldap:
api_url: "10.1.1.1:8443"
api_username: "admin"
api_password: "myPass"
ssid: "1"
state: absent
- name: Remove the 'default' LDAP domain configuration
netapp_e_ldap:
state: absent
identifier: default
- name: Define a new LDAP domain, utilizing defaults where possible
netapp_e_ldap:
state: present
bind_username: "CN=MyBindAccount,OU=ServiceAccounts,DC=example,DC=com"
bind_password: "mySecretPass"
server: "ldap://example.com:389"
search_base: 'OU=Users,DC=example,DC=com'
role_mappings:
".*dist-dev-storage.*":
- storage.admin
- security.admin
- support.admin
- storage.monitor
'''
RETURN = """
msg:
description: Success message
returned: on success
type: str
sample: The ldap settings have been updated.
"""
import json
import logging
try:
import urlparse
except ImportError:
import urllib.parse as urlparse
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.netapp import request, eseries_host_argument_spec
from ansible.module_utils._text import to_native
class Ldap(object):
NO_CHANGE_MSG = "No changes were necessary."
def __init__(self):
argument_spec = eseries_host_argument_spec()
argument_spec.update(dict(
state=dict(type='str', required=False, default='present',
choices=['present', 'absent']),
identifier=dict(type='str', required=False, ),
username=dict(type='str', required=False, aliases=['bind_username']),
password=dict(type='str', required=False, aliases=['bind_password'], no_log=True),
name=dict(type='list', required=False, ),
server=dict(type='str', required=False, aliases=['server_url']),
search_base=dict(type='str', required=False, ),
role_mappings=dict(type='dict', required=False, ),
user_attribute=dict(type='str', required=False, default='sAMAccountName'),
attributes=dict(type='list', default=['memberOf'], required=False, ),
log_path=dict(type='str', required=False),
))
required_if = [
["state", "present", ["username", "password", "server", "search_base", "role_mappings", ]]
]
self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, required_if=required_if)
args = self.module.params
self.ldap = args['state'] == 'present'
self.identifier = args['identifier']
self.username = args['username']
self.password = args['password']
self.names = args['name']
self.server = args['server']
self.search_base = args['search_base']
self.role_mappings = args['role_mappings']
self.user_attribute = args['user_attribute']
self.attributes = args['attributes']
self.ssid = args['ssid']
self.url = args['api_url']
self.creds = dict(url_password=args['api_password'],
validate_certs=args['validate_certs'],
url_username=args['api_username'],
timeout=60)
self.check_mode = self.module.check_mode
log_path = args['log_path']
# logging setup
self._logger = logging.getLogger(self.__class__.__name__)
if log_path:
logging.basicConfig(
level=logging.DEBUG, filename=log_path, filemode='w',
format='%(relativeCreated)dms %(levelname)s %(module)s.%(funcName)s:%(lineno)d\n %(message)s')
if not self.url.endswith('/'):
self.url += '/'
self.embedded = None
self.base_path = None
def make_configuration(self):
if not self.identifier:
self.identifier = 'default'
if not self.names:
parts = urlparse.urlparse(self.server)
netloc = parts.netloc
if ':' in netloc:
netloc = netloc.split(':')[0]
self.names = [netloc]
roles = list()
for regex in self.role_mappings:
for role in self.role_mappings[regex]:
roles.append(dict(groupRegex=regex,
ignoreCase=True,
name=role))
domain = dict(id=self.identifier,
ldapUrl=self.server,
bindLookupUser=dict(user=self.username, password=self.password),
roleMapCollection=roles,
groupAttributes=self.attributes,
names=self.names,
searchBase=self.search_base,
userAttribute=self.user_attribute,
)
return domain
def is_embedded(self):
"""Determine whether or not we're using the embedded or proxy implemenation of Web Services"""
if self.embedded is None:
url = self.url
try:
parts = urlparse.urlparse(url)
parts = parts._replace(path='/devmgr/utils/')
url = urlparse.urlunparse(parts)
(rc, result) = request(url + 'about', **self.creds)
self.embedded = not result['runningAsProxy']
except Exception as err:
self._logger.exception("Failed to retrieve the About information.")
self.module.fail_json(msg="Failed to determine the Web Services implementation type!"
" Array Id [%s]. Error [%s]."
% (self.ssid, to_native(err)))
return self.embedded
def get_full_configuration(self):
try:
(rc, result) = request(self.url + self.base_path, **self.creds)
return result
except Exception as err:
self._logger.exception("Failed to retrieve the LDAP configuration.")
self.module.fail_json(msg="Failed to retrieve LDAP configuration! Array Id [%s]. Error [%s]."
% (self.ssid, to_native(err)))
def get_configuration(self, identifier):
try:
(rc, result) = request(self.url + self.base_path + '%s' % (identifier), ignore_errors=True, **self.creds)
if rc == 200:
return result
elif rc == 404:
return None
else:
self.module.fail_json(msg="Failed to retrieve LDAP configuration! Array Id [%s]. Error [%s]."
% (self.ssid, result))
except Exception as err:
self._logger.exception("Failed to retrieve the LDAP configuration.")
self.module.fail_json(msg="Failed to retrieve LDAP configuration! Array Id [%s]. Error [%s]."
% (self.ssid, to_native(err)))
def update_configuration(self):
# Define a new domain based on the user input
domain = self.make_configuration()
# This is the current list of configurations
current = self.get_configuration(self.identifier)
update = current != domain
msg = "No changes were necessary for [%s]." % self.identifier
self._logger.info("Is updated: %s", update)
if update and not self.check_mode:
msg = "The configuration changes were made for [%s]." % self.identifier
try:
if current is None:
api = self.base_path + 'addDomain'
else:
api = self.base_path + '%s' % (domain['id'])
(rc, result) = request(self.url + api, method='POST', data=json.dumps(domain), **self.creds)
except Exception as err:
self._logger.exception("Failed to modify the LDAP configuration.")
self.module.fail_json(msg="Failed to modify LDAP configuration! Array Id [%s]. Error [%s]."
% (self.ssid, to_native(err)))
return msg, update
def clear_single_configuration(self, identifier=None):
if identifier is None:
identifier = self.identifier
configuration = self.get_configuration(identifier)
updated = False
msg = self.NO_CHANGE_MSG
if configuration:
updated = True
msg = "The LDAP domain configuration for [%s] was cleared." % identifier
if not self.check_mode:
try:
(rc, result) = request(self.url + self.base_path + '%s' % identifier, method='DELETE', **self.creds)
except Exception as err:
self.module.fail_json(msg="Failed to remove LDAP configuration! Array Id [%s]. Error [%s]."
% (self.ssid, to_native(err)))
return msg, updated
def clear_configuration(self):
configuration = self.get_full_configuration()
updated = False
msg = self.NO_CHANGE_MSG
if configuration['ldapDomains']:
updated = True
msg = "The LDAP configuration for all domains was cleared."
if not self.check_mode:
try:
(rc, result) = request(self.url + self.base_path, method='DELETE', ignore_errors=True, **self.creds)
# Older versions of NetApp E-Series restAPI does not possess an API to remove all existing configs
if rc == 405:
for config in configuration['ldapDomains']:
self.clear_single_configuration(config['id'])
except Exception as err:
self.module.fail_json(msg="Failed to clear LDAP configuration! Array Id [%s]. Error [%s]."
% (self.ssid, to_native(err)))
return msg, updated
def get_base_path(self):
embedded = self.is_embedded()
if embedded:
return 'storage-systems/%s/ldap/' % self.ssid
else:
return '/ldap/'
def update(self):
self.base_path = self.get_base_path()
if self.ldap:
msg, update = self.update_configuration()
elif self.identifier:
msg, update = self.clear_single_configuration()
else:
msg, update = self.clear_configuration()
self.module.exit_json(msg=msg, changed=update, )
def __call__(self, *args, **kwargs):
self.update()
def main():
settings = Ldap()
settings()
if __name__ == '__main__':
main()
|
gpl-3.0
|
inveniosoftware/invenio-collections
|
invenio_collections/ext.py
|
3
|
5519
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015, 2016 CERN.
#
# Invenio is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Invenio module for organizing metadata into collections."""
from __future__ import absolute_import, print_function
import six
from invenio_records import signals
from sqlalchemy.event import contains, listen, remove
from werkzeug.utils import cached_property, import_string
from . import config
class _AppState(object):
"""State for storing collections."""
def __init__(self, app, cache=None):
"""Initialize state."""
self.app = app
self._cache = cache
if self.app.config['COLLECTIONS_REGISTER_RECORD_SIGNALS']:
self.register_signals()
@cached_property
def cache(self):
"""Return a cache instance."""
cache = self._cache or self.app.config.get('COLLECTIONS_CACHE')
return import_string(cache) if isinstance(cache, six.string_types) \
else cache
@property
def collections(self):
"""Get list of collections."""
# if cache server is configured, load collection from there
if self.cache:
return self.cache.get(
self.app.config['COLLECTIONS_CACHE_KEY'])
@collections.setter
def collections(self, values):
"""Set list of collections."""
# if cache server is configured, save collection list
if self.cache:
self.cache.set(
self.app.config['COLLECTIONS_CACHE_KEY'], values)
def register_signals(self):
"""Register signals."""
from .models import Collection
from .receivers import CollectionUpdater
if self.app.config['COLLECTIONS_USE_PERCOLATOR']:
from .percolator import collection_inserted_percolator, \
collection_removed_percolator, \
collection_updated_percolator
# Register collection signals to update percolators
listen(Collection, 'after_insert',
collection_inserted_percolator)
listen(Collection, 'after_update',
collection_updated_percolator)
listen(Collection, 'after_delete',
collection_removed_percolator)
# Register Record signals to update record['_collections']
self.update_function = CollectionUpdater(app=self.app)
signals.before_record_insert.connect(self.update_function,
weak=False)
signals.before_record_update.connect(self.update_function,
weak=False)
def unregister_signals(self):
"""Unregister signals."""
from .models import Collection
from .percolator import collection_inserted_percolator, \
collection_removed_percolator, collection_updated_percolator
# Unregister Record signals
if hasattr(self, 'update_function'):
signals.before_record_insert.disconnect(self.update_function)
signals.before_record_update.disconnect(self.update_function)
# Unregister collection signals
if contains(Collection, 'after_insert',
collection_inserted_percolator):
remove(Collection, 'after_insert', collection_inserted_percolator)
remove(Collection, 'after_update', collection_updated_percolator)
remove(Collection, 'after_delete', collection_removed_percolator)
class InvenioCollections(object):
"""Invenio-Collections extension."""
def __init__(self, app=None, **kwargs):
"""Extension initialization.
:param app: Flask application object.
:param cache: Cache system to store the result of the queries. None or
not set to avoid the use of a cache system.
"""
if app:
self._state = self.init_app(app, **kwargs)
def init_app(self, app, **kwargs):
"""Flask application initialization."""
self.init_config(app)
state = _AppState(app=app, cache=kwargs.get('cache'))
app.extensions['invenio-collections'] = state
return state
def init_config(self, app):
"""Initialize configuration."""
app.config.setdefault(
'COLLECTIONS_BASE_TEMPLATE',
app.config.get('BASE_TEMPLATE',
'invenio_collections/base.html'))
for k in dir(config):
if k.startswith('COLLECTIONS_'):
app.config.setdefault(k, getattr(config, k))
def __getattr__(self, name):
"""Proxy to state object."""
return getattr(self._state, name, None)
|
gpl-2.0
|
mrfawy/ModernEnigma
|
test/testUtil.py
|
1
|
2635
|
import unittest
from Util import Util
class TestUtil(unittest.TestCase):
def testConvertTupleListToMap(self):
tupleList=[]
tupleList.append((0,0))
tupleList.append((0,1))
tupleList.append((1,0))
tupleList.append((1,2))
tupleList.append((2,2))
wiringMap=Util.convertTupleListToMap(tupleList)
self.assertEqual(3,len(wiringMap))
self.assertEqual(2,len(wiringMap[0]))
self.assertEqual(1,len(wiringMap[2]))
def testPadSequence(self):
seq=[1,2]
blkSize=4
padSeq=Util.padSequence(seq,4)
self.assertEqual(0,len(padSeq)%4)
self.assertEqual(1,padSeq[0])
def testPaddingBlksizeLargeerThanSeq(self):
seq=[1,2]
blkSize=16
padSeq=Util.padSequence(seq,blkSize)
self.assertEqual(0,len(padSeq)%blkSize)
self.assertEqual(13,padSeq[0])
def testUnpadSequence(self):
seq=[1,2,3,4]
unPadSeq=Util.unpadSequence(seq)
self.assertEqual(2,len(unPadSeq))
self.assertEqual([2,3],unPadSeq)
def testRemoveDuplicates(self):
seq=[1,2,1,3,1,3,4]
self.assertEqual([1,2,3,4],Util.removeDuplicates(seq))
def testEncodeStringIntoBytsList(self):
msg="helLo World!!"
result=Util.encodeStringIntoByteList(msg)
self.assertEqual([104, 101, 108, 76, 111, 32, 87, 111, 114, 108, 100, 33, 33],result)
def testConvertByteListIntoHexString(self):
l=[104, 101, 108, 76, 111, 32, 87, 111, 114, 108, 100, 33, 33]
self.assertEqual("68656C4C6F20576F726C642121",Util.convertByteListIntoHexString(l))
def testConvertHexStringIntoByteList(self):
string="68656C4C6F20576F726C642121"
result=[104, 101, 108, 76, 111, 32, 87, 111, 114, 108, 100, 33, 33]
self.assertEqual(result,Util.convertHexStringIntoByteList(string))
def testDecodeListtoString(self):
l=[72, 101, 108, 108, 111, 32, 69, 110, 105, 103, 109, 97, 32, 33]
self.assertEqual("Hello Enigma !",Util.decodeByteListIntoString(l))
def testHashing(self):
hashed=Util.hashString("My String")
print(hashed)
self.assertIsNotNone(hashed)
def testWriteObjectToFileAsJosn(self):
obj=[1,2,3]
testFile="testWriteTofile.txt"
Util.writeObjectToFileAsJson(obj,testFile)
res=Util.readJsonFileIntoObject(testFile)
self.assertEqual(obj,res)
def testDivideIntoChunks(self):
seq=[0,1,2,3,4,5,6,7,8,9]
expected=[[0,1],[2,3],[4,5],[6,7],[8,9]]
res=Util.divideIntoChunks(seq,2)
self.assertEqual(expected,res)
|
mit
|
fti7/ansible-modules-core
|
cloud/rax.py
|
7
|
24940
|
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# This is a DOCUMENTATION stub specific to this module, it extends
# a documentation fragment located in ansible.utils.module_docs_fragments
DOCUMENTATION = '''
---
module: rax
short_description: create / delete an instance in Rackspace Public Cloud
description:
- creates / deletes a Rackspace Public Cloud instance and optionally
waits for it to be 'running'.
version_added: "1.2"
options:
auto_increment:
description:
- Whether or not to increment a single number with the name of the
created servers. Only applicable when used with the I(group) attribute
or meta key.
default: yes
choices:
- "yes"
- "no"
version_added: 1.5
config_drive:
description:
- Attach read-only configuration drive to server as label config-2
default: no
choices:
- "yes"
- "no"
version_added: 1.7
count:
description:
- number of instances to launch
default: 1
version_added: 1.4
count_offset:
description:
- number count to start at
default: 1
version_added: 1.4
disk_config:
description:
- Disk partitioning strategy
choices:
- auto
- manual
version_added: '1.4'
default: auto
exact_count:
description:
- Explicitly ensure an exact count of instances, used with
state=active/present
default: no
choices:
- "yes"
- "no"
version_added: 1.4
extra_client_args:
description:
- A hash of key/value pairs to be used when creating the cloudservers
client. This is considered an advanced option, use it wisely and
with caution.
version_added: 1.6
extra_create_args:
description:
- A hash of key/value pairs to be used when creating a new server.
This is considered an advanced option, use it wisely and with caution.
version_added: 1.6
files:
description:
- Files to insert into the instance. remotefilename:localcontent
default: null
flavor:
description:
- flavor to use for the instance
default: null
group:
description:
- host group to assign to server, is also used for idempotent operations
to ensure a specific number of instances
version_added: 1.4
image:
description:
- image to use for the instance. Can be an C(id), C(human_id) or C(name)
default: null
instance_ids:
description:
- list of instance ids, currently only used when state='absent' to
remove instances
version_added: 1.4
key_name:
description:
- key pair to use on the instance
default: null
aliases:
- keypair
meta:
description:
- A hash of metadata to associate with the instance
default: null
name:
description:
- Name to give the instance
default: null
networks:
description:
- The network to attach to the instances. If specified, you must include
ALL networks including the public and private interfaces. Can be C(id)
or C(label).
default:
- public
- private
version_added: 1.4
state:
description:
- Indicate desired state of the resource
choices:
- present
- absent
default: present
user_data:
description:
- Data to be uploaded to the servers config drive. This option implies
I(config_drive). Can be a file path or a string
version_added: 1.7
wait:
description:
- wait for the instance to be in state 'running' before returning
default: "no"
choices:
- "yes"
- "no"
wait_timeout:
description:
- how long before wait gives up, in seconds
default: 300
author: Jesse Keating, Matt Martz
extends_documentation_fragment: rackspace.openstack
'''
EXAMPLES = '''
- name: Build a Cloud Server
gather_facts: False
tasks:
- name: Server build request
local_action:
module: rax
credentials: ~/.raxpub
name: rax-test1
flavor: 5
image: b11d9567-e412-4255-96b9-bd63ab23bcfe
key_name: my_rackspace_key
files:
/root/test.txt: /home/localuser/test.txt
wait: yes
state: present
networks:
- private
- public
register: rax
- name: Build an exact count of cloud servers with incremented names
hosts: local
gather_facts: False
tasks:
- name: Server build requests
local_action:
module: rax
credentials: ~/.raxpub
name: test%03d.example.org
flavor: performance1-1
image: ubuntu-1204-lts-precise-pangolin
state: present
count: 10
count_offset: 10
exact_count: yes
group: test
wait: yes
register: rax
'''
try:
import pyrax
HAS_PYRAX = True
except ImportError:
HAS_PYRAX = False
def create(module, names=[], flavor=None, image=None, meta={}, key_name=None,
files={}, wait=True, wait_timeout=300, disk_config=None,
group=None, nics=[], extra_create_args={}, user_data=None,
config_drive=False, existing=[]):
cs = pyrax.cloudservers
changed = False
if user_data:
config_drive = True
if user_data and os.path.isfile(user_data):
try:
f = open(user_data)
user_data = f.read()
f.close()
except Exception, e:
module.fail_json(msg='Failed to load %s' % user_data)
# Handle the file contents
for rpath in files.keys():
lpath = os.path.expanduser(files[rpath])
try:
fileobj = open(lpath, 'r')
files[rpath] = fileobj.read()
fileobj.close()
except Exception, e:
module.fail_json(msg='Failed to load %s' % lpath)
try:
servers = []
for name in names:
servers.append(cs.servers.create(name=name, image=image,
flavor=flavor, meta=meta,
key_name=key_name,
files=files, nics=nics,
disk_config=disk_config,
config_drive=config_drive,
userdata=user_data,
**extra_create_args))
except Exception, e:
module.fail_json(msg='%s' % e.message)
else:
changed = True
if wait:
end_time = time.time() + wait_timeout
infinite = wait_timeout == 0
while infinite or time.time() < end_time:
for server in servers:
try:
server.get()
except:
server.status == 'ERROR'
if not filter(lambda s: s.status not in FINAL_STATUSES,
servers):
break
time.sleep(5)
success = []
error = []
timeout = []
for server in servers:
try:
server.get()
except:
server.status == 'ERROR'
instance = rax_to_dict(server, 'server')
if server.status == 'ACTIVE' or not wait:
success.append(instance)
elif server.status == 'ERROR':
error.append(instance)
elif wait:
timeout.append(instance)
untouched = [rax_to_dict(s, 'server') for s in existing]
instances = success + untouched
results = {
'changed': changed,
'action': 'create',
'instances': instances,
'success': success,
'error': error,
'timeout': timeout,
'instance_ids': {
'instances': [i['id'] for i in instances],
'success': [i['id'] for i in success],
'error': [i['id'] for i in error],
'timeout': [i['id'] for i in timeout]
}
}
if timeout:
results['msg'] = 'Timeout waiting for all servers to build'
elif error:
results['msg'] = 'Failed to build all servers'
if 'msg' in results:
module.fail_json(**results)
else:
module.exit_json(**results)
def delete(module, instance_ids=[], wait=True, wait_timeout=300, kept=[]):
cs = pyrax.cloudservers
changed = False
instances = {}
servers = []
for instance_id in instance_ids:
servers.append(cs.servers.get(instance_id))
for server in servers:
try:
server.delete()
except Exception, e:
module.fail_json(msg=e.message)
else:
changed = True
instance = rax_to_dict(server, 'server')
instances[instance['id']] = instance
# If requested, wait for server deletion
if wait:
end_time = time.time() + wait_timeout
infinite = wait_timeout == 0
while infinite or time.time() < end_time:
for server in servers:
instance_id = server.id
try:
server.get()
except:
instances[instance_id]['status'] = 'DELETED'
instances[instance_id]['rax_status'] = 'DELETED'
if not filter(lambda s: s['status'] not in ('', 'DELETED',
'ERROR'),
instances.values()):
break
time.sleep(5)
timeout = filter(lambda s: s['status'] not in ('', 'DELETED', 'ERROR'),
instances.values())
error = filter(lambda s: s['status'] in ('ERROR'),
instances.values())
success = filter(lambda s: s['status'] in ('', 'DELETED'),
instances.values())
instances = [rax_to_dict(s, 'server') for s in kept]
results = {
'changed': changed,
'action': 'delete',
'instances': instances,
'success': success,
'error': error,
'timeout': timeout,
'instance_ids': {
'instances': [i['id'] for i in instances],
'success': [i['id'] for i in success],
'error': [i['id'] for i in error],
'timeout': [i['id'] for i in timeout]
}
}
if timeout:
results['msg'] = 'Timeout waiting for all servers to delete'
elif error:
results['msg'] = 'Failed to delete all servers'
if 'msg' in results:
module.fail_json(**results)
else:
module.exit_json(**results)
def cloudservers(module, state=None, name=None, flavor=None, image=None,
meta={}, key_name=None, files={}, wait=True, wait_timeout=300,
disk_config=None, count=1, group=None, instance_ids=[],
exact_count=False, networks=[], count_offset=0,
auto_increment=False, extra_create_args={}, user_data=None,
config_drive=False):
cs = pyrax.cloudservers
cnw = pyrax.cloud_networks
if not cnw:
module.fail_json(msg='Failed to instantiate client. This '
'typically indicates an invalid region or an '
'incorrectly capitalized region name.')
servers = []
# Add the group meta key
if group and 'group' not in meta:
meta['group'] = group
elif 'group' in meta and group is None:
group = meta['group']
# Normalize and ensure all metadata values are strings
for k, v in meta.items():
if isinstance(v, list):
meta[k] = ','.join(['%s' % i for i in v])
elif isinstance(v, dict):
meta[k] = json.dumps(v)
elif not isinstance(v, basestring):
meta[k] = '%s' % v
# When using state=absent with group, the absent block won't match the
# names properly. Use the exact_count functionality to decrease the count
# to the desired level
was_absent = False
if group is not None and state == 'absent':
exact_count = True
state = 'present'
was_absent = True
if image:
image = rax_find_image(module, pyrax, image)
nics = []
if networks:
for network in networks:
nics.extend(rax_find_network(module, pyrax, network))
# act on the state
if state == 'present':
for arg, value in dict(name=name, flavor=flavor,
image=image).iteritems():
if not value:
module.fail_json(msg='%s is required for the "rax" module' %
arg)
# Idempotent ensurance of a specific count of servers
if exact_count is not False:
# See if we can find servers that match our options
if group is None:
module.fail_json(msg='"group" must be provided when using '
'"exact_count"')
else:
if auto_increment:
numbers = set()
try:
name % 0
except TypeError, e:
if e.message.startswith('not all'):
name = '%s%%d' % name
else:
module.fail_json(msg=e.message)
pattern = re.sub(r'%\d*[sd]', r'(\d+)', name)
for server in cs.servers.list():
if server.metadata.get('group') == group:
servers.append(server)
match = re.search(pattern, server.name)
if match:
number = int(match.group(1))
numbers.add(number)
number_range = xrange(count_offset, count_offset + count)
available_numbers = list(set(number_range)
.difference(numbers))
else:
for server in cs.servers.list():
if server.metadata.get('group') == group:
servers.append(server)
# If state was absent but the count was changed,
# assume we only wanted to remove that number of instances
if was_absent:
diff = len(servers) - count
if diff < 0:
count = 0
else:
count = diff
if len(servers) > count:
state = 'absent'
kept = servers[:count]
del servers[:count]
instance_ids = []
for server in servers:
instance_ids.append(server.id)
delete(module, instance_ids=instance_ids, wait=wait,
wait_timeout=wait_timeout, kept=kept)
elif len(servers) < count:
if auto_increment:
names = []
name_slice = count - len(servers)
numbers_to_use = available_numbers[:name_slice]
for number in numbers_to_use:
names.append(name % number)
else:
names = [name] * (count - len(servers))
else:
instances = []
instance_ids = []
for server in servers:
instances.append(rax_to_dict(server, 'server'))
instance_ids.append(server.id)
module.exit_json(changed=False, action=None,
instances=instances,
success=[], error=[], timeout=[],
instance_ids={'instances': instance_ids,
'success': [], 'error': [],
'timeout': []})
else:
if group is not None:
if auto_increment:
numbers = set()
try:
name % 0
except TypeError, e:
if e.message.startswith('not all'):
name = '%s%%d' % name
else:
module.fail_json(msg=e.message)
pattern = re.sub(r'%\d*[sd]', r'(\d+)', name)
for server in cs.servers.list():
if server.metadata.get('group') == group:
servers.append(server)
match = re.search(pattern, server.name)
if match:
number = int(match.group(1))
numbers.add(number)
number_range = xrange(count_offset,
count_offset + count + len(numbers))
available_numbers = list(set(number_range)
.difference(numbers))
names = []
numbers_to_use = available_numbers[:count]
for number in numbers_to_use:
names.append(name % number)
else:
names = [name] * count
else:
search_opts = {
'name': '^%s$' % name,
'image': image,
'flavor': flavor
}
servers = []
for server in cs.servers.list(search_opts=search_opts):
if server.metadata != meta:
continue
servers.append(server)
if len(servers) >= count:
instances = []
for server in servers:
instances.append(rax_to_dict(server, 'server'))
instance_ids = [i['id'] for i in instances]
module.exit_json(changed=False, action=None,
instances=instances, success=[], error=[],
timeout=[],
instance_ids={'instances': instance_ids,
'success': [], 'error': [],
'timeout': []})
names = [name] * (count - len(servers))
create(module, names=names, flavor=flavor, image=image,
meta=meta, key_name=key_name, files=files, wait=wait,
wait_timeout=wait_timeout, disk_config=disk_config, group=group,
nics=nics, extra_create_args=extra_create_args,
user_data=user_data, config_drive=config_drive,
existing=servers)
elif state == 'absent':
if instance_ids is None:
for arg, value in dict(name=name, flavor=flavor,
image=image).iteritems():
if not value:
module.fail_json(msg='%s is required for the "rax" '
'module' % arg)
search_opts = {
'name': '^%s$' % name,
'image': image,
'flavor': flavor
}
for server in cs.servers.list(search_opts=search_opts):
if meta != server.metadata:
continue
servers.append(server)
instance_ids = []
for server in servers:
if len(instance_ids) < count:
instance_ids.append(server.id)
else:
break
if not instance_ids:
module.exit_json(changed=False, action=None, instances=[],
success=[], error=[], timeout=[],
instance_ids={'instances': [],
'success': [], 'error': [],
'timeout': []})
delete(module, instance_ids=instance_ids, wait=wait,
wait_timeout=wait_timeout)
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
auto_increment=dict(default=True, type='bool'),
config_drive=dict(default=False, type='bool'),
count=dict(default=1, type='int'),
count_offset=dict(default=1, type='int'),
disk_config=dict(choices=['auto', 'manual']),
exact_count=dict(default=False, type='bool'),
extra_client_args=dict(type='dict', default={}),
extra_create_args=dict(type='dict', default={}),
files=dict(type='dict', default={}),
flavor=dict(),
group=dict(),
image=dict(),
instance_ids=dict(type='list'),
key_name=dict(aliases=['keypair']),
meta=dict(type='dict', default={}),
name=dict(),
networks=dict(type='list', default=['public', 'private']),
service=dict(),
state=dict(default='present', choices=['present', 'absent']),
user_data=dict(no_log=True),
wait=dict(default=False, type='bool'),
wait_timeout=dict(default=300),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
required_together=rax_required_together(),
)
if not HAS_PYRAX:
module.fail_json(msg='pyrax is required for this module')
service = module.params.get('service')
if service is not None:
module.fail_json(msg='The "service" attribute has been deprecated, '
'please remove "service: cloudservers" from your '
'playbook pertaining to the "rax" module')
auto_increment = module.params.get('auto_increment')
config_drive = module.params.get('config_drive')
count = module.params.get('count')
count_offset = module.params.get('count_offset')
disk_config = module.params.get('disk_config')
if disk_config:
disk_config = disk_config.upper()
exact_count = module.params.get('exact_count', False)
extra_client_args = module.params.get('extra_client_args')
extra_create_args = module.params.get('extra_create_args')
files = module.params.get('files')
flavor = module.params.get('flavor')
group = module.params.get('group')
image = module.params.get('image')
instance_ids = module.params.get('instance_ids')
key_name = module.params.get('key_name')
meta = module.params.get('meta')
name = module.params.get('name')
networks = module.params.get('networks')
state = module.params.get('state')
user_data = module.params.get('user_data')
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
setup_rax_module(module, pyrax)
if extra_client_args:
pyrax.cloudservers = pyrax.connect_to_cloudservers(
region=pyrax.cloudservers.client.region_name,
**extra_client_args)
client = pyrax.cloudservers.client
if 'bypass_url' in extra_client_args:
client.management_url = extra_client_args['bypass_url']
if pyrax.cloudservers is None:
module.fail_json(msg='Failed to instantiate client. This '
'typically indicates an invalid region or an '
'incorrectly capitalized region name.')
cloudservers(module, state=state, name=name, flavor=flavor,
image=image, meta=meta, key_name=key_name, files=files,
wait=wait, wait_timeout=wait_timeout, disk_config=disk_config,
count=count, group=group, instance_ids=instance_ids,
exact_count=exact_count, networks=networks,
count_offset=count_offset, auto_increment=auto_increment,
extra_create_args=extra_create_args, user_data=user_data,
config_drive=config_drive)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.rax import *
# invoke the module
main()
|
gpl-3.0
|
amwelch/a10sdk-python
|
a10sdk/core/cm/cm_ut_list_entry_2.py
|
2
|
3168
|
from a10sdk.common.A10BaseClass import A10BaseClass
class EntryCfg(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param distance: {"minimum": 1, "type": "number", "maximum": 255, "format": "number"}
:param protocol: {"enum": ["any", "static", "dynamic"], "type": "string", "description": "any: \"any\"; static: \"static\"; dynamic: \"dynamic\"; ", "format": "enum"}
:param weight: {"minimum": 1, "type": "number", "maximum": 255, "format": "number"}
:param dst: {"type": "string", "format": "ipv4-address"}
:param mask: {"type": "string", "format": "ipv4-netmask"}
:param gateway: {"type": "string", "format": "ipv4-address"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "entry-cfg"
self.DeviceProxy = ""
self.distance = ""
self.protocol = ""
self.weight = ""
self.dst = ""
self.mask = ""
self.gateway = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class List(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param entry_cfg: {"minItems": 1, "items": {"type": "object"}, "uniqueItems": true, "type": "array", "array": [{"properties": {"distance": {"minimum": 1, "type": "number", "maximum": 255, "format": "number"}, "protocol": {"enum": ["any", "static", "dynamic"], "type": "string", "description": "any: \"any\"; static: \"static\"; dynamic: \"dynamic\"; ", "format": "enum"}, "weight": {"minimum": 1, "type": "number", "maximum": 255, "format": "number"}, "dst": {"type": "string", "format": "ipv4-address"}, "mask": {"type": "string", "format": "ipv4-netmask"}, "optional": true, "gateway": {"type": "string", "format": "ipv4-address"}}}]}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "list"
self.DeviceProxy = ""
self.entry_cfg = []
for keys, value in kwargs.items():
setattr(self,keys, value)
class ListEntry2(A10BaseClass):
"""Class Description::
Unit test CM list where entry fields are mandatory for one with construct.
Class list-entry-2 supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/cm-ut/list-entry-2`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required=[]
self.b_key = "list-entry-2"
self.a10_url="/axapi/v3/cm-ut/list-entry-2"
self.DeviceProxy = ""
self.A10WW_list = {}
for keys, value in kwargs.items():
setattr(self,keys, value)
|
apache-2.0
|
rikima/spark
|
python/pyspark/streaming/listener.py
|
75
|
2333
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
__all__ = ["StreamingListener"]
class StreamingListener(object):
def __init__(self):
pass
def onStreamingStarted(self, streamingStarted):
"""
Called when the streaming has been started.
"""
pass
def onReceiverStarted(self, receiverStarted):
"""
Called when a receiver has been started
"""
pass
def onReceiverError(self, receiverError):
"""
Called when a receiver has reported an error
"""
pass
def onReceiverStopped(self, receiverStopped):
"""
Called when a receiver has been stopped
"""
pass
def onBatchSubmitted(self, batchSubmitted):
"""
Called when a batch of jobs has been submitted for processing.
"""
pass
def onBatchStarted(self, batchStarted):
"""
Called when processing of a batch of jobs has started.
"""
pass
def onBatchCompleted(self, batchCompleted):
"""
Called when processing of a batch of jobs has completed.
"""
pass
def onOutputOperationStarted(self, outputOperationStarted):
"""
Called when processing of a job of a batch has started.
"""
pass
def onOutputOperationCompleted(self, outputOperationCompleted):
"""
Called when processing of a job of a batch has completed
"""
pass
class Java:
implements = ["org.apache.spark.streaming.api.java.PythonStreamingListener"]
|
apache-2.0
|
MSA-Argentina/relojito_project
|
relojito/app/api_views.py
|
1
|
1715
|
from datetime import datetime
from rest_framework import generics, viewsets
from rest_framework.exceptions import ParseError
from rest_framework.response import Response
from .models import Project, Task, TaskType
from .serializers import ProjectSerializer, TaskSerializer, TaskTypeSerializer
class TaskViewSet(viewsets.ModelViewSet):
serializer_class = TaskSerializer
model = Task
def pre_save(self, obj):
obj.owner = self.request.user
def get_queryset(self):
return Task.objects.filter(owner=self.request.user)
def list(self, request, *args, **kwargs):
qs = self.get_queryset().order_by('-created_at')[:15]
instance = self.filter_queryset(qs)
page = self.paginate_queryset(instance)
if page is not None:
serializer = self.get_pagination_serializer(page)
else:
serializer = self.get_serializer(instance, many=True)
return Response(serializer.data)
class TaskDayView(generics.ListAPIView):
serializer_class = TaskSerializer
model = Task
def get_queryset(self):
try:
date = datetime.strptime(self.kwargs['date'], '%Y-%m-%d').date()
except ValueError:
raise ParseError("Invalid date")
return Task.objects.filter(owner=self.request.user, date=date)
class ProjectViewSet(viewsets.ReadOnlyModelViewSet):
serializer_class = ProjectSerializer
model = Project
def get_queryset(self):
return Project.objects.filter(
projectcollaborator__user=self.request.user,
is_active=True)
class TaskTypeViewSet(viewsets.ReadOnlyModelViewSet):
serializer_class = TaskTypeSerializer
model = TaskType
|
mit
|
hugs/django
|
django/contrib/flatpages/admin.py
|
82
|
1072
|
from django import forms
from django.contrib import admin
from django.contrib.flatpages.models import FlatPage
from django.utils.translation import ugettext_lazy as _
class FlatpageForm(forms.ModelForm):
url = forms.RegexField(label=_("URL"), max_length=100, regex=r'^[-\w/]+$',
help_text = _("Example: '/about/contact/'. Make sure to have leading"
" and trailing slashes."),
error_message = _("This value must contain only letters, numbers,"
" underscores, dashes or slashes."))
class Meta:
model = FlatPage
class FlatPageAdmin(admin.ModelAdmin):
form = FlatpageForm
fieldsets = (
(None, {'fields': ('url', 'title', 'content', 'sites')}),
(_('Advanced options'), {'classes': ('collapse',), 'fields': ('enable_comments', 'registration_required', 'template_name')}),
)
list_display = ('url', 'title')
list_filter = ('sites', 'enable_comments', 'registration_required')
search_fields = ('url', 'title')
admin.site.register(FlatPage, FlatPageAdmin)
|
bsd-3-clause
|
torchingloom/edx-platform
|
lms/djangoapps/verify_student/ssencrypt.py
|
55
|
7001
|
"""
NOTE: Anytime a `key` is passed into a function here, we assume it's a raw byte
string. It should *not* be a string representation of a hex value. In other
words, passing the `str` value of
`"32fe72aaf2abb44de9e161131b5435c8d37cbdb6f5df242ae860b283115f2dae"` is bad.
You want to pass in the result of calling .decode('hex') on that, so this instead:
"'2\xfer\xaa\xf2\xab\xb4M\xe9\xe1a\x13\x1bT5\xc8\xd3|\xbd\xb6\xf5\xdf$*\xe8`\xb2\x83\x11_-\xae'"
The RSA functions take any key format that RSA.importKey() accepts, so...
An RSA public key can be in any of the following formats:
* X.509 subjectPublicKeyInfo DER SEQUENCE (binary or PEM encoding)
* PKCS#1 RSAPublicKey DER SEQUENCE (binary or PEM encoding)
* OpenSSH (textual public key only)
An RSA private key can be in any of the following formats:
* PKCS#1 RSAPrivateKey DER SEQUENCE (binary or PEM encoding)
* PKCS#8 PrivateKeyInfo DER SEQUENCE (binary or PEM encoding)
* OpenSSH (textual public key only)
In case of PEM encoding, the private key can be encrypted with DES or 3TDES
according to a certain pass phrase. Only OpenSSL-compatible pass phrases are
supported.
"""
from hashlib import md5, sha256
import base64
import binascii
import hmac
import logging
from Crypto import Random
from Crypto.Cipher import AES, PKCS1_OAEP
from Crypto.PublicKey import RSA
log = logging.getLogger(__name__)
def encrypt_and_encode(data, key):
""" Encrypts and endcodes `data` using `key' """
return base64.urlsafe_b64encode(aes_encrypt(data, key))
def decode_and_decrypt(encoded_data, key):
""" Decrypts and decodes `data` using `key' """
return aes_decrypt(base64.urlsafe_b64decode(encoded_data), key)
def aes_encrypt(data, key):
"""
Return a version of the `data` that has been encrypted to
"""
cipher = aes_cipher_from_key(key)
padded_data = pad(data)
return cipher.encrypt(padded_data)
def aes_decrypt(encrypted_data, key):
"""
Decrypt `encrypted_data` using `key`
"""
cipher = aes_cipher_from_key(key)
padded_data = cipher.decrypt(encrypted_data)
return unpad(padded_data)
def aes_cipher_from_key(key):
"""
Given an AES key, return a Cipher object that has `encrypt()` and
`decrypt()` methods. It will create the cipher to use CBC mode, and create
the initialization vector as Software Secure expects it.
"""
return AES.new(key, AES.MODE_CBC, generate_aes_iv(key))
def generate_aes_iv(key):
"""
Return the initialization vector Software Secure expects for a given AES
key (they hash it a couple of times and take a substring).
"""
return md5(key + md5(key).hexdigest()).hexdigest()[:AES.block_size]
def random_aes_key():
return Random.new().read(32)
def pad(data):
""" Pad the given `data` such that it fits into the proper AES block size """
bytes_to_pad = AES.block_size - len(data) % AES.block_size
return data + (bytes_to_pad * chr(bytes_to_pad))
def unpad(padded_data):
""" remove all padding from `padded_data` """
num_padded_bytes = ord(padded_data[-1])
return padded_data[:-num_padded_bytes]
def rsa_encrypt(data, rsa_pub_key_str):
"""
`rsa_pub_key` is a string with the public key
"""
key = RSA.importKey(rsa_pub_key_str)
cipher = PKCS1_OAEP.new(key)
encrypted_data = cipher.encrypt(data)
return encrypted_data
def rsa_decrypt(data, rsa_priv_key_str):
"""
When given some `data` and an RSA private key, decrypt the data
"""
key = RSA.importKey(rsa_priv_key_str)
cipher = PKCS1_OAEP.new(key)
return cipher.decrypt(data)
def has_valid_signature(method, headers_dict, body_dict, access_key, secret_key):
"""
Given a message (either request or response), say whether it has a valid
signature or not.
"""
_, expected_signature, _ = generate_signed_message(
method, headers_dict, body_dict, access_key, secret_key
)
authorization = headers_dict["Authorization"]
auth_token, post_signature = authorization.split(":")
_, post_access_key = auth_token.split()
if post_access_key != access_key:
log.error("Posted access key does not match ours")
log.debug("Their access: %s; Our access: %s", post_access_key, access_key)
return False
if post_signature != expected_signature:
log.error("Posted signature does not match expected")
log.debug("Their sig: %s; Expected: %s", post_signature, expected_signature)
return False
return True
def generate_signed_message(method, headers_dict, body_dict, access_key, secret_key):
"""
Returns a (message, signature) pair.
"""
message = signing_format_message(method, headers_dict, body_dict)
# hmac needs a byte string for it's starting key, can't be unicode.
hashed = hmac.new(secret_key.encode('utf-8'), message, sha256)
signature = binascii.b2a_base64(hashed.digest()).rstrip('\n')
authorization_header = "SSI {}:{}".format(access_key, signature)
message += '\n'
return message, signature, authorization_header
def signing_format_message(method, headers_dict, body_dict):
"""
Given a dictionary of headers and a dictionary of the JSON for the body,
will return a str that represents the normalized version of this messsage
that will be used to generate a signature.
"""
headers_str = "{}\n\n{}".format(method, header_string(headers_dict))
body_str = body_string(body_dict)
message = headers_str + body_str
return message
def header_string(headers_dict):
"""Given a dictionary of headers, return a canonical string representation."""
header_list = []
if 'Content-Type' in headers_dict:
header_list.append(headers_dict['Content-Type'] + "\n")
if 'Date' in headers_dict:
header_list.append(headers_dict['Date'] + "\n")
if 'Content-MD5' in headers_dict:
header_list.append(headers_dict['Content-MD5'] + "\n")
return "".join(header_list) # Note that trailing \n's are important
def body_string(body_dict, prefix=""):
"""
Return a canonical string representation of the body of a JSON request or
response. This canonical representation will be used as an input to the
hashing used to generate a signature.
"""
body_list = []
for key, value in sorted(body_dict.items()):
if isinstance(value, (list, tuple)):
for i, arr in enumerate(value):
if isinstance(arr, dict):
body_list.append(body_string(arr, u"{}.{}.".format(key, i)))
else:
body_list.append(u"{}.{}:{}\n".format(key, i, arr).encode('utf-8'))
elif isinstance(value, dict):
body_list.append(body_string(value, key + ":"))
else:
if value is None:
value = "null"
body_list.append(u"{}{}:{}\n".format(prefix, key, value).encode('utf-8'))
return "".join(body_list) # Note that trailing \n's are important
|
agpl-3.0
|
taozhijiang/linux
|
tools/perf/python/twatch.py
|
625
|
2726
|
#! /usr/bin/python
# -*- python -*-
# -*- coding: utf-8 -*-
# twatch - Experimental use of the perf python interface
# Copyright (C) 2011 Arnaldo Carvalho de Melo <acme@redhat.com>
#
# This application is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2.
#
# This application is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
import perf
def main(context_switch = 0, thread = -1):
cpus = perf.cpu_map()
threads = perf.thread_map(thread)
evsel = perf.evsel(type = perf.TYPE_SOFTWARE,
config = perf.COUNT_SW_DUMMY,
task = 1, comm = 1, mmap = 0, freq = 0,
wakeup_events = 1, watermark = 1,
sample_id_all = 1, context_switch = context_switch,
sample_type = perf.SAMPLE_PERIOD | perf.SAMPLE_TID | perf.SAMPLE_CPU)
"""What we want are just the PERF_RECORD_ lifetime events for threads,
using the default, PERF_TYPE_HARDWARE + PERF_COUNT_HW_CYCLES & freq=1
(the default), makes perf reenable irq_vectors:local_timer_entry, when
disabling nohz, not good for some use cases where all we want is to get
threads comes and goes... So use (perf.TYPE_SOFTWARE, perf_COUNT_SW_DUMMY,
freq=0) instead."""
evsel.open(cpus = cpus, threads = threads);
evlist = perf.evlist(cpus, threads)
evlist.add(evsel)
evlist.mmap()
while True:
evlist.poll(timeout = -1)
for cpu in cpus:
event = evlist.read_on_cpu(cpu)
if not event:
continue
print "cpu: %2d, pid: %4d, tid: %4d" % (event.sample_cpu,
event.sample_pid,
event.sample_tid),
print event
if __name__ == '__main__':
"""
To test the PERF_RECORD_SWITCH record, pick a pid and replace
in the following line.
Example output:
cpu: 3, pid: 31463, tid: 31593 { type: context_switch, next_prev_pid: 31463, next_prev_tid: 31593, switch_out: 1 }
cpu: 1, pid: 31463, tid: 31489 { type: context_switch, next_prev_pid: 31463, next_prev_tid: 31489, switch_out: 1 }
cpu: 2, pid: 31463, tid: 31496 { type: context_switch, next_prev_pid: 31463, next_prev_tid: 31496, switch_out: 1 }
cpu: 3, pid: 31463, tid: 31491 { type: context_switch, next_prev_pid: 31463, next_prev_tid: 31491, switch_out: 0 }
It is possible as well to use event.misc & perf.PERF_RECORD_MISC_SWITCH_OUT
to figure out if this is a context switch in or out of the monitored threads.
If bored, please add command line option parsing support for these options :-)
"""
# main(context_switch = 1, thread = 31463)
main()
|
gpl-2.0
|
t0in4/django
|
django/db/backends/postgresql/version.py
|
632
|
1517
|
"""
Extracts the version of the PostgreSQL server.
"""
import re
# This reg-exp is intentionally fairly flexible here.
# Needs to be able to handle stuff like:
# PostgreSQL #.#.#
# EnterpriseDB #.#
# PostgreSQL #.# beta#
# PostgreSQL #.#beta#
VERSION_RE = re.compile(r'\S+ (\d+)\.(\d+)\.?(\d+)?')
def _parse_version(text):
"Internal parsing method. Factored out for testing purposes."
major, major2, minor = VERSION_RE.search(text).groups()
try:
return int(major) * 10000 + int(major2) * 100 + int(minor)
except (ValueError, TypeError):
return int(major) * 10000 + int(major2) * 100
def get_version(connection):
"""
Returns an integer representing the major, minor and revision number of the
server. Format is the one used for the return value of libpq
PQServerVersion()/``server_version`` connection attribute (available in
newer psycopg2 versions.)
For example, 90304 for 9.3.4. The last two digits will be 00 in the case of
releases (e.g., 90400 for 'PostgreSQL 9.4') or in the case of beta and
prereleases (e.g. 90100 for 'PostgreSQL 9.1beta2').
PQServerVersion()/``server_version`` doesn't execute a query so try that
first, then fallback to a ``SELECT version()`` query.
"""
if hasattr(connection, 'server_version'):
return connection.server_version
else:
with connection.cursor() as cursor:
cursor.execute("SELECT version()")
return _parse_version(cursor.fetchone()[0])
|
bsd-3-clause
|
SivilTaram/edx-platform
|
common/lib/xmodule/xmodule/modulestore/split_mongo/split.py
|
12
|
147531
|
"""
Provides full versioning CRUD and representation for collections of xblocks (e.g., courses, modules, etc).
Representation:
* course_index: a dictionary:
** '_id': a unique id which cannot change,
** 'org': the org's id. Only used for searching not identity,
** 'course': the course's catalog number
** 'run': the course's run id,
** 'edited_by': user_id of user who created the original entry,
** 'edited_on': the datetime of the original creation,
** 'versions': versions_dict: {branch_id: structure_id, ...}
** 'search_targets': a dict of search key and value. For example, wiki_slug. Add any fields whose edits
should change the search targets to SplitMongoModuleStore.SEARCH_TARGET dict
* structure:
** '_id': an ObjectId (guid),
** 'root': BlockKey (the block_type and block_id of the root block in the 'blocks' dictionary)
** 'previous_version': the structure from which this one was derived. For published courses, this
points to the previously published version of the structure not the draft published to this.
** 'original_version': the original structure id in the previous_version relation. Is a pseudo object
identifier enabling quick determination if 2 structures have any shared history,
** 'edited_by': user_id of the user whose change caused the creation of this structure version,
** 'edited_on': the datetime for the change causing this creation of this structure version,
** 'blocks': dictionary of xblocks in this structure:
*** BlockKey: key mapping to each BlockData:
*** BlockData: object containing the following attributes:
**** 'block_type': the xblock type id
**** 'definition': the db id of the record containing the content payload for this xblock
**** 'fields': the Scope.settings and children field values
***** 'children': This is stored as a list of (block_type, block_id) pairs
**** 'defaults': Scope.settings default values copied from a template block (used e.g. when
blocks are copied from a library to a course)
**** 'edit_info': EditInfo object:
***** 'edited_on': when was this xblock's fields last changed (will be edited_on value of
update_version structure)
***** 'edited_by': user_id for who changed this xblock last (will be edited_by value of
update_version structure)
***** 'update_version': the guid for the structure where this xblock got its current field
values. This may point to a structure not in this structure's history (e.g., to a draft
branch from which this version was published.)
***** 'previous_version': the guid for the structure which previously changed this xblock
(will be the previous value of update_version; so, may point to a structure not in this
structure's history.)
***** 'source_version': the guid for the structure was copied/published into this block
* definition: shared content with revision history for xblock content fields
** '_id': definition_id (guid),
** 'block_type': xblock type id
** 'fields': scope.content (and possibly other) field values.
** 'edit_info': dictionary:
*** 'edited_by': user_id whose edit caused this version of the definition,
*** 'edited_on': datetime of the change causing this version
*** 'previous_version': the definition_id of the previous version of this definition
*** 'original_version': definition_id of the root of the previous version relation on this
definition. Acts as a pseudo-object identifier.
"""
import copy
import datetime
import hashlib
import logging
from contracts import contract, new_contract
from importlib import import_module
from mongodb_proxy import autoretry_read
from path import Path as path
from pytz import UTC
from bson.objectid import ObjectId
from xblock.core import XBlock
from xblock.fields import Scope, Reference, ReferenceList, ReferenceValueDict
from xmodule.errortracker import null_error_tracker
from opaque_keys.edx.locator import (
BlockUsageLocator, DefinitionLocator, CourseLocator, LibraryLocator, VersionTree, LocalId,
)
from xmodule.modulestore.exceptions import InsufficientSpecificationError, VersionConflictError, DuplicateItemError, \
DuplicateCourseError
from xmodule.modulestore import (
inheritance, ModuleStoreWriteBase, ModuleStoreEnum,
BulkOpsRecord, BulkOperationsMixin, SortedAssetList, BlockData
)
from ..exceptions import ItemNotFoundError
from .caching_descriptor_system import CachingDescriptorSystem
from xmodule.modulestore.split_mongo.mongo_connection import MongoConnection, DuplicateKeyError
from xmodule.modulestore.split_mongo import BlockKey, CourseEnvelope
from xmodule.error_module import ErrorDescriptor
from collections import defaultdict
from types import NoneType
from xmodule.assetstore import AssetMetadata
log = logging.getLogger(__name__)
# ==============================================================================
#
# Known issue:
# Inheritance for cached kvs doesn't work on edits. Use case.
# 1) attribute foo is inheritable
# 2) g.children = [p], p.children = [a]
# 3) g.foo = 1 on load
# 4) if g.foo > 0, if p.foo > 0, if a.foo > 0 all eval True
# 5) p.foo = -1
# 6) g.foo > 0, p.foo <= 0 all eval True BUT
# 7) BUG: a.foo > 0 still evals True but should be False
# 8) reread and everything works right
# 9) p.del(foo), p.foo > 0 is True! works
# 10) BUG: a.foo < 0!
# Local fix wont' permanently work b/c xblock may cache a.foo...
#
# ==============================================================================
# When blacklists are this, all children should be excluded
EXCLUDE_ALL = '*'
new_contract('BlockUsageLocator', BlockUsageLocator)
new_contract('BlockKey', BlockKey)
new_contract('XBlock', XBlock)
class SplitBulkWriteRecord(BulkOpsRecord):
def __init__(self):
super(SplitBulkWriteRecord, self).__init__()
self.initial_index = None
self.index = None
self.structures = {}
self.structures_in_db = set()
# dict(version_guid, dict(BlockKey, module))
self.modules = defaultdict(dict)
self.definitions = {}
self.definitions_in_db = set()
self.course_key = None
# TODO: This needs to track which branches have actually been modified/versioned,
# so that copying one branch to another doesn't update the original branch.
@property
def dirty_branches(self):
"""
Return a list of which branch version ids differ from what was stored
in the database at the beginning of this bulk operation.
"""
# If no course index has been set, then no branches have changed
if self.index is None:
return []
# If there was no index in the database to start with, then all branches
# are dirty by definition
if self.initial_index is None:
return self.index.get('versions', {}).keys()
# Return branches whose ids differ between self.index and self.initial_index
return [
branch
for branch, _id
in self.index.get('versions', {}).items()
if self.initial_index.get('versions', {}).get(branch) != _id
]
def structure_for_branch(self, branch):
return self.structures.get(self.index.get('versions', {}).get(branch))
def set_structure_for_branch(self, branch, structure):
if self.index is not None:
self.index.setdefault('versions', {})[branch] = structure['_id']
self.structures[structure['_id']] = structure
def __repr__(self):
return u"SplitBulkWriteRecord<{!r}, {!r}, {!r}, {!r}, {!r}>".format(
self._active_count,
self.initial_index,
self.index,
self.structures,
self.structures_in_db,
)
class SplitBulkWriteMixin(BulkOperationsMixin):
"""
This implements the :meth:`bulk_operations` modulestore semantics for the :class:`SplitMongoModuleStore`.
In particular, it implements :meth:`_begin_bulk_operation` and
:meth:`_end_bulk_operation` to provide the external interface, and then exposes a set of methods
for interacting with course_indexes and structures that can be used by :class:`SplitMongoModuleStore`.
Internally, this mixin records the set of all active bulk operations (keyed on the active course),
and only writes those values to ``self.mongo_connection`` when :meth:`_end_bulk_operation` is called.
If a bulk write operation isn't active, then the changes are immediately written to the underlying
mongo_connection.
"""
_bulk_ops_record_type = SplitBulkWriteRecord
def _get_bulk_ops_record(self, course_key, ignore_case=False):
"""
Return the :class:`.SplitBulkWriteRecord` for this course.
"""
# handle split specific things and defer to super otherwise
if course_key is None:
return self._bulk_ops_record_type()
if not isinstance(course_key, (CourseLocator, LibraryLocator)):
raise TypeError(u'{!r} is not a CourseLocator or LibraryLocator'.format(course_key))
# handle version_guid based retrieval locally
if course_key.org is None or course_key.course is None or course_key.run is None:
return self._active_bulk_ops.records[
course_key.replace(org=None, course=None, run=None, branch=None)
]
# handle ignore case and general use
return super(SplitBulkWriteMixin, self)._get_bulk_ops_record(
course_key.replace(branch=None, version_guid=None), ignore_case
)
def _clear_bulk_ops_record(self, course_key):
"""
Clear the record for this course
"""
if not isinstance(course_key, (CourseLocator, LibraryLocator)):
raise TypeError('{!r} is not a CourseLocator or LibraryLocator'.format(course_key))
if course_key.org and course_key.course and course_key.run:
del self._active_bulk_ops.records[course_key.replace(branch=None, version_guid=None)]
else:
del self._active_bulk_ops.records[
course_key.replace(org=None, course=None, run=None, branch=None)
]
def _start_outermost_bulk_operation(self, bulk_write_record, course_key):
"""
Begin a bulk write operation on course_key.
"""
bulk_write_record.initial_index = self.db_connection.get_course_index(course_key)
# Ensure that any edits to the index don't pollute the initial_index
bulk_write_record.index = copy.deepcopy(bulk_write_record.initial_index)
bulk_write_record.course_key = course_key
def _end_outermost_bulk_operation(self, bulk_write_record, structure_key):
"""
End the active bulk write operation on structure_key (course or library key).
"""
dirty = False
# If the content is dirty, then update the database
for _id in bulk_write_record.structures.viewkeys() - bulk_write_record.structures_in_db:
dirty = True
try:
self.db_connection.insert_structure(bulk_write_record.structures[_id], bulk_write_record.course_key)
except DuplicateKeyError:
# We may not have looked up this structure inside this bulk operation, and thus
# didn't realize that it was already in the database. That's OK, the store is
# append only, so if it's already been written, we can just keep going.
log.debug("Attempted to insert duplicate structure %s", _id)
for _id in bulk_write_record.definitions.viewkeys() - bulk_write_record.definitions_in_db:
dirty = True
try:
self.db_connection.insert_definition(bulk_write_record.definitions[_id], bulk_write_record.course_key)
except DuplicateKeyError:
# We may not have looked up this definition inside this bulk operation, and thus
# didn't realize that it was already in the database. That's OK, the store is
# append only, so if it's already been written, we can just keep going.
log.debug("Attempted to insert duplicate definition %s", _id)
if bulk_write_record.index is not None and bulk_write_record.index != bulk_write_record.initial_index:
dirty = True
if bulk_write_record.initial_index is None:
self.db_connection.insert_course_index(bulk_write_record.index, bulk_write_record.course_key)
else:
self.db_connection.update_course_index(
bulk_write_record.index,
from_index=bulk_write_record.initial_index,
course_context=bulk_write_record.course_key
)
return dirty
def get_course_index(self, course_key, ignore_case=False):
"""
Return the index for course_key.
"""
if self._is_in_bulk_operation(course_key, ignore_case):
return self._get_bulk_ops_record(course_key, ignore_case).index
else:
return self.db_connection.get_course_index(course_key, ignore_case)
def delete_course_index(self, course_key):
"""
Delete the course index from cache and the db
"""
if self._is_in_bulk_operation(course_key, False):
self._clear_bulk_ops_record(course_key)
self.db_connection.delete_course_index(course_key)
def insert_course_index(self, course_key, index_entry):
bulk_write_record = self._get_bulk_ops_record(course_key)
if bulk_write_record.active:
bulk_write_record.index = index_entry
else:
self.db_connection.insert_course_index(index_entry, course_key)
def update_course_index(self, course_key, updated_index_entry):
"""
Change the given course's index entry.
Note, this operation can be dangerous and break running courses.
Does not return anything useful.
"""
bulk_write_record = self._get_bulk_ops_record(course_key)
if bulk_write_record.active:
bulk_write_record.index = updated_index_entry
else:
self.db_connection.update_course_index(updated_index_entry, course_key)
def get_structure(self, course_key, version_guid):
bulk_write_record = self._get_bulk_ops_record(course_key)
if bulk_write_record.active:
structure = bulk_write_record.structures.get(version_guid)
# The structure hasn't been loaded from the db yet, so load it
if structure is None:
structure = self.db_connection.get_structure(version_guid, course_key)
bulk_write_record.structures[version_guid] = structure
if structure is not None:
bulk_write_record.structures_in_db.add(version_guid)
return structure
else:
# cast string to ObjectId if necessary
version_guid = course_key.as_object_id(version_guid)
return self.db_connection.get_structure(version_guid, course_key)
def update_structure(self, course_key, structure):
"""
Update a course structure, respecting the current bulk operation status
(no data will be written to the database if a bulk operation is active.)
"""
self._clear_cache(structure['_id'])
bulk_write_record = self._get_bulk_ops_record(course_key)
if bulk_write_record.active:
bulk_write_record.structures[structure['_id']] = structure
else:
self.db_connection.insert_structure(structure, course_key)
def get_cached_block(self, course_key, version_guid, block_id):
"""
If there's an active bulk_operation, see if it's cached this module and just return it
Don't do any extra work to get the ones which are not cached. Make the caller do the work & cache them.
"""
bulk_write_record = self._get_bulk_ops_record(course_key)
if bulk_write_record.active:
return bulk_write_record.modules[version_guid].get(block_id, None)
else:
return None
def cache_block(self, course_key, version_guid, block_key, block):
"""
The counterpart to :method `get_cached_block` which caches a block.
Returns nothing.
"""
bulk_write_record = self._get_bulk_ops_record(course_key)
if bulk_write_record.active:
bulk_write_record.modules[version_guid][block_key] = block
def decache_block(self, course_key, version_guid, block_key):
"""
Write operations which don't write from blocks must remove the target blocks from the cache.
Returns nothing.
"""
bulk_write_record = self._get_bulk_ops_record(course_key)
if bulk_write_record.active:
try:
del bulk_write_record.modules[version_guid][block_key]
except KeyError:
pass
def get_definition(self, course_key, definition_guid):
"""
Retrieve a single definition by id, respecting the active bulk operation
on course_key.
Args:
course_key (:class:`.CourseKey`): The course being operated on
definition_guid (str or ObjectID): The id of the definition to load
"""
bulk_write_record = self._get_bulk_ops_record(course_key)
if bulk_write_record.active:
definition = bulk_write_record.definitions.get(definition_guid)
# The definition hasn't been loaded from the db yet, so load it
if definition is None:
definition = self.db_connection.get_definition(definition_guid, course_key)
bulk_write_record.definitions[definition_guid] = definition
if definition is not None:
bulk_write_record.definitions_in_db.add(definition_guid)
return definition
else:
# cast string to ObjectId if necessary
definition_guid = course_key.as_object_id(definition_guid)
return self.db_connection.get_definition(definition_guid, course_key)
def get_definitions(self, course_key, ids):
"""
Return all definitions that specified in ``ids``.
If a definition with the same id is in both the cache and the database,
the cached version will be preferred.
Arguments:
course_key (:class:`.CourseKey`): The course that these definitions are being loaded
for (to respect bulk operations).
ids (list): A list of definition ids
"""
definitions = []
ids = set(ids)
bulk_write_record = self._get_bulk_ops_record(course_key)
if bulk_write_record.active:
# Only query for the definitions that aren't already cached.
for definition in bulk_write_record.definitions.values():
definition_id = definition.get('_id')
if definition_id in ids:
ids.remove(definition_id)
definitions.append(definition)
if len(ids):
# Query the db for the definitions.
defs_from_db = self.db_connection.get_definitions(list(ids), course_key)
# Add the retrieved definitions to the cache.
bulk_write_record.definitions.update({d.get('_id'): d for d in defs_from_db})
definitions.extend(defs_from_db)
return definitions
def update_definition(self, course_key, definition):
"""
Update a definition, respecting the current bulk operation status
(no data will be written to the database if a bulk operation is active.)
"""
bulk_write_record = self._get_bulk_ops_record(course_key)
if bulk_write_record.active:
bulk_write_record.definitions[definition['_id']] = definition
else:
self.db_connection.insert_definition(definition, course_key)
def version_structure(self, course_key, structure, user_id):
"""
Copy the structure and update the history info (edited_by, edited_on, previous_version)
"""
if course_key.branch is None:
raise InsufficientSpecificationError(course_key)
bulk_write_record = self._get_bulk_ops_record(course_key)
# If we have an active bulk write, and it's already been edited, then just use that structure
if bulk_write_record.active and course_key.branch in bulk_write_record.dirty_branches:
return bulk_write_record.structure_for_branch(course_key.branch)
# Otherwise, make a new structure
new_structure = copy.deepcopy(structure)
new_structure['_id'] = ObjectId()
new_structure['previous_version'] = structure['_id']
new_structure['edited_by'] = user_id
new_structure['edited_on'] = datetime.datetime.now(UTC)
new_structure['schema_version'] = self.SCHEMA_VERSION
# If we're in a bulk write, update the structure used there, and mark it as dirty
if bulk_write_record.active:
bulk_write_record.set_structure_for_branch(course_key.branch, new_structure)
return new_structure
def version_block(self, block_data, user_id, update_version):
"""
Update the block_data object based on it having been edited.
"""
if block_data.edit_info.update_version == update_version:
return
original_usage = block_data.edit_info.original_usage
original_usage_version = block_data.edit_info.original_usage_version
block_data.edit_info.edited_on = datetime.datetime.now(UTC)
block_data.edit_info.edited_by = user_id
block_data.edit_info.previous_version = block_data.edit_info.update_version
block_data.edit_info.update_version = update_version
if original_usage:
block_data.edit_info.original_usage = original_usage
block_data.edit_info.original_usage_version = original_usage_version
def find_matching_course_indexes(self, branch=None, search_targets=None, org_target=None):
"""
Find the course_indexes which have the specified branch and search_targets. An optional org_target
can be specified to apply an ORG filter to return only the courses that are part of
that ORG.
Returns:
a Cursor if there are no changes in flight or a list if some have changed in current bulk op
"""
indexes = self.db_connection.find_matching_course_indexes(branch, search_targets, org_target)
def _replace_or_append_index(altered_index):
"""
If the index is already in indexes, replace it. Otherwise, append it.
"""
for index, existing in enumerate(indexes):
if all(existing[attr] == altered_index[attr] for attr in ['org', 'course', 'run']):
indexes[index] = altered_index
return
indexes.append(altered_index)
# add any being built but not yet persisted or in the process of being updated
for _, record in self._active_records:
if branch and branch not in record.index.get('versions', {}):
continue
if search_targets:
if any(
'search_targets' not in record.index or
field not in record.index['search_targets'] or
record.index['search_targets'][field] != value
for field, value in search_targets.iteritems()
):
continue
# if we've specified a filter by org,
# make sure we've honored that filter when
# integrating in-transit records
if org_target:
if record.index['org'] != org_target:
continue
if not hasattr(indexes, 'append'): # Just in time conversion to list from cursor
indexes = list(indexes)
_replace_or_append_index(record.index)
return indexes
def find_structures_by_id(self, ids):
"""
Return all structures that specified in ``ids``.
If a structure with the same id is in both the cache and the database,
the cached version will be preferred.
Arguments:
ids (list): A list of structure ids
"""
structures = []
ids = set(ids)
for _, record in self._active_records:
for structure in record.structures.values():
structure_id = structure.get('_id')
if structure_id in ids:
ids.remove(structure_id)
structures.append(structure)
structures.extend(self.db_connection.find_structures_by_id(list(ids)))
return structures
def find_structures_derived_from(self, ids):
"""
Return all structures that were immediately derived from a structure listed in ``ids``.
Arguments:
ids (list): A list of structure ids
"""
found_structure_ids = set()
structures = []
for _, record in self._active_records:
for structure in record.structures.values():
if structure.get('previous_version') in ids:
structures.append(structure)
if '_id' in structure:
found_structure_ids.add(structure['_id'])
structures.extend(
structure
for structure in self.db_connection.find_structures_derived_from(ids)
if structure['_id'] not in found_structure_ids
)
return structures
def find_ancestor_structures(self, original_version, block_key):
"""
Find all structures that originated from ``original_version`` that contain ``block_key``.
Any structure found in the cache will be preferred to a structure with the same id from the database.
Arguments:
original_version (str or ObjectID): The id of a structure
block_key (BlockKey): The id of the block in question
"""
found_structure_ids = set()
structures = []
for _, record in self._active_records:
for structure in record.structures.values():
if 'original_version' not in structure:
continue
if structure['original_version'] != original_version:
continue
if block_key not in structure.get('blocks', {}):
continue
if 'update_version' not in structure['blocks'][block_key].get('edit_info', {}):
continue
structures.append(structure)
found_structure_ids.add(structure['_id'])
structures.extend(
structure
for structure in self.db_connection.find_ancestor_structures(original_version, block_key)
if structure['_id'] not in found_structure_ids
)
return structures
class SplitMongoModuleStore(SplitBulkWriteMixin, ModuleStoreWriteBase):
"""
A Mongodb backed ModuleStore supporting versions, inheritance,
and sharing.
"""
SCHEMA_VERSION = 1
# a list of field names to store in course index search_targets. Note, this will
# only record one value per key. If branches disagree, the last one set wins.
# It won't recompute the value on operations such as update_course_index (e.g., to revert to a prev
# version) but those functions will have an optional arg for setting these.
SEARCH_TARGET_DICT = ['wiki_slug']
def __init__(self, contentstore, doc_store_config, fs_root, render_template,
default_class=None,
error_tracker=null_error_tracker,
i18n_service=None, fs_service=None, user_service=None,
services=None, signal_handler=None, **kwargs):
"""
:param doc_store_config: must have a host, db, and collection entries. Other common entries: port, tz_aware.
"""
super(SplitMongoModuleStore, self).__init__(contentstore, **kwargs)
self.db_connection = MongoConnection(**doc_store_config)
self.db = self.db_connection.database
if default_class is not None:
module_path, __, class_name = default_class.rpartition('.')
class_ = getattr(import_module(module_path), class_name)
self.default_class = class_
else:
self.default_class = None
self.fs_root = path(fs_root)
self.error_tracker = error_tracker
self.render_template = render_template
self.services = services or {}
if i18n_service is not None:
self.services["i18n"] = i18n_service
if fs_service is not None:
self.services["fs"] = fs_service
if user_service is not None:
self.services["user"] = user_service
if self.request_cache is not None:
self.services["request_cache"] = self.request_cache
self.signal_handler = signal_handler
def close_connections(self):
"""
Closes any open connections to the underlying databases
"""
self.db.connection.close()
def mongo_wire_version(self):
"""
Returns the wire version for mongo. Only used to unit tests which instrument the connection.
"""
return self.db.connection.max_wire_version
def _drop_database(self):
"""
A destructive operation to drop the underlying database and close all connections.
Intended to be used by test code for cleanup.
"""
# drop the assets
super(SplitMongoModuleStore, self)._drop_database()
connection = self.db.connection
connection.drop_database(self.db.name)
connection.close()
def cache_items(self, system, base_block_ids, course_key, depth=0, lazy=True):
"""
Handles caching of items once inheritance and any other one time
per course per fetch operations are done.
Arguments:
system: a CachingDescriptorSystem
base_block_ids: list of BlockIds to fetch
course_key: the destination course providing the context
depth: how deep below these to prefetch
lazy: whether to load definitions now or later
"""
with self.bulk_operations(course_key, emit_signals=False):
new_module_data = {}
for block_id in base_block_ids:
new_module_data = self.descendants(
system.course_entry.structure['blocks'],
block_id,
depth,
new_module_data
)
# This method supports lazy loading, where the descendent definitions aren't loaded
# until they're actually needed.
if not lazy:
# Non-lazy loading: Load all descendants by id.
descendent_definitions = self.get_definitions(
course_key,
[
block.definition
for block in new_module_data.itervalues()
]
)
# Turn definitions into a map.
definitions = {definition['_id']: definition
for definition in descendent_definitions}
for block in new_module_data.itervalues():
if block.definition in definitions:
definition = definitions[block.definition]
# convert_fields gets done later in the runtime's xblock_from_json
block.fields.update(definition.get('fields'))
block.definition_loaded = True
system.module_data.update(new_module_data)
return system.module_data
@contract(course_entry=CourseEnvelope, block_keys="list(BlockKey)", depth="int | None")
def _load_items(self, course_entry, block_keys, depth=0, **kwargs):
"""
Load & cache the given blocks from the course. May return the blocks in any order.
Load the definitions into each block if lazy is in kwargs and is False;
otherwise, do not load the definitions - they'll be loaded later when needed.
"""
runtime = self._get_cache(course_entry.structure['_id'])
if runtime is None:
lazy = kwargs.pop('lazy', True)
runtime = self.create_runtime(course_entry, lazy)
self._add_cache(course_entry.structure['_id'], runtime)
self.cache_items(runtime, block_keys, course_entry.course_key, depth, lazy)
return [runtime.load_item(block_key, course_entry, **kwargs) for block_key in block_keys]
def _get_cache(self, course_version_guid):
"""
Find the descriptor cache for this course if it exists
:param course_version_guid:
"""
if self.request_cache is None:
return None
return self.request_cache.data.setdefault('course_cache', {}).get(course_version_guid)
def _add_cache(self, course_version_guid, system):
"""
Save this cache for subsequent access
:param course_version_guid:
:param system:
"""
if self.request_cache is not None:
self.request_cache.data.setdefault('course_cache', {})[course_version_guid] = system
return system
def _clear_cache(self, course_version_guid=None):
"""
Should only be used by testing or something which implements transactional boundary semantics.
:param course_version_guid: if provided, clear only this entry
"""
if self.request_cache is None:
return
if course_version_guid:
try:
del self.request_cache.data.setdefault('course_cache', {})[course_version_guid]
except KeyError:
pass
else:
self.request_cache.data['course_cache'] = {}
def _lookup_course(self, course_key, head_validation=True):
"""
Decode the locator into the right series of db access. Does not
return the CourseDescriptor! It returns the actual db json from
structures.
Semantics: if course id and branch given, then it will get that branch. If
also give a version_guid, it will see if the current head of that branch == that guid. If not
it raises VersionConflictError (the version now differs from what it was when you got your
reference) unless you specify head_validation = False, in which case it will return the
revision (if specified) by the course_key.
:param course_key: any subclass of CourseLocator
"""
if not course_key.version_guid:
head_validation = True
if head_validation and course_key.org and course_key.course and course_key.run:
if course_key.branch is None:
raise InsufficientSpecificationError(course_key)
# use the course id
index = self.get_course_index(course_key)
if index is None:
raise ItemNotFoundError(course_key)
if course_key.branch not in index['versions']:
raise ItemNotFoundError(course_key)
version_guid = index['versions'][course_key.branch]
if course_key.version_guid is not None and version_guid != course_key.version_guid:
# This may be a bit too touchy but it's hard to infer intent
raise VersionConflictError(course_key, version_guid)
elif course_key.version_guid is None:
raise InsufficientSpecificationError(course_key)
else:
# TODO should this raise an exception if branch was provided?
version_guid = course_key.version_guid
entry = self.get_structure(course_key, version_guid)
if entry is None:
raise ItemNotFoundError('Structure: {}'.format(version_guid))
# b/c more than one course can use same structure, the 'org', 'course',
# 'run', and 'branch' are not intrinsic to structure
# and the one assoc'd w/ it by another fetch may not be the one relevant to this fetch; so,
# add it in the envelope for the structure.
return CourseEnvelope(course_key.replace(version_guid=version_guid), entry)
def _get_structures_for_branch(self, branch, **kwargs):
"""
Internal generator for fetching lists of courses, libraries, etc.
"""
# if we pass in a 'org' parameter that means to
# only get the course which match the passed in
# ORG
matching_indexes = self.find_matching_course_indexes(
branch,
search_targets=None,
org_target=kwargs.get('org')
)
# collect ids and then query for those
version_guids = []
id_version_map = {}
for course_index in matching_indexes:
version_guid = course_index['versions'][branch]
version_guids.append(version_guid)
id_version_map[version_guid] = course_index
if not version_guids:
return
for entry in self.find_structures_by_id(version_guids):
yield entry, id_version_map[entry['_id']]
def _get_structures_for_branch_and_locator(self, branch, locator_factory, **kwargs):
"""
Internal generator for fetching lists of courses, libraries, etc.
:param str branch: Branch to fetch structures from
:param type locator_factory: Factory to create locator from structure info and branch
"""
result = []
for entry, structure_info in self._get_structures_for_branch(branch, **kwargs):
locator = locator_factory(structure_info, branch)
envelope = CourseEnvelope(locator, entry)
root = entry['root']
structures_list = self._load_items(envelope, [root], depth=0, **kwargs)
if not isinstance(structures_list[0], ErrorDescriptor):
result.append(structures_list[0])
return result
def _create_course_locator(self, course_info, branch):
"""
Creates course locator using course_info dict and branch
"""
return CourseLocator(
org=course_info['org'],
course=course_info['course'],
run=course_info['run'],
branch=branch,
)
def _create_library_locator(self, library_info, branch):
"""
Creates library locator using library_info dict and branch
"""
return LibraryLocator(
org=library_info['org'],
library=library_info['course'],
branch=branch,
)
@autoretry_read()
def get_courses(self, branch, **kwargs):
"""
Returns a list of course descriptors matching any given qualifiers.
qualifiers should be a dict of keywords matching the db fields or any
legal query for mongo to use against the active_versions collection.
Note, this is to find the current head of the named branch type.
To get specific versions via guid use get_course.
:param branch: the branch for which to return courses.
"""
# get the blocks for each course index (s/b the root)
return self._get_structures_for_branch_and_locator(branch, self._create_course_locator, **kwargs)
def get_libraries(self, branch="library", **kwargs):
"""
Returns a list of "library" root blocks matching any given qualifiers.
TODO: better way of identifying library index entry vs. course index entry.
"""
return self._get_structures_for_branch_and_locator(branch, self._create_library_locator, **kwargs)
def make_course_key(self, org, course, run):
"""
Return a valid :class:`~opaque_keys.edx.keys.CourseKey` for this modulestore
that matches the supplied `org`, `course`, and `run`.
This key may represent a course that doesn't exist in this modulestore.
"""
return CourseLocator(org, course, run)
def _get_structure(self, structure_id, depth, head_validation=True, **kwargs):
"""
Gets Course or Library by locator
"""
structure_entry = self._lookup_course(structure_id, head_validation=head_validation)
root = structure_entry.structure['root']
result = self._load_items(structure_entry, [root], depth, **kwargs)
return result[0]
def get_course(self, course_id, depth=0, **kwargs):
"""
Gets the course descriptor for the course identified by the locator
"""
if not isinstance(course_id, CourseLocator) or course_id.deprecated:
# The supplied CourseKey is of the wrong type, so it can't possibly be stored in this modulestore.
raise ItemNotFoundError(course_id)
return self._get_structure(course_id, depth, **kwargs)
def get_library(self, library_id, depth=0, head_validation=True, **kwargs):
"""
Gets the 'library' root block for the library identified by the locator
"""
if not isinstance(library_id, LibraryLocator):
# The supplied CourseKey is of the wrong type, so it can't possibly be stored in this modulestore.
raise ItemNotFoundError(library_id)
return self._get_structure(library_id, depth, head_validation=head_validation, **kwargs)
def has_course(self, course_id, ignore_case=False, **kwargs):
"""
Does this course exist in this modulestore. This method does not verify that the branch &/or
version in the course_id exists. Use get_course_index_info to check that.
Returns the course_id of the course if it was found, else None
Note: we return the course_id instead of a boolean here since the found course may have
a different id than the given course_id when ignore_case is True.
"""
if not isinstance(course_id, CourseLocator) or course_id.deprecated:
# The supplied CourseKey is of the wrong type, so it can't possibly be stored in this modulestore.
return False
course_index = self.get_course_index(course_id, ignore_case)
return CourseLocator(course_index['org'], course_index['course'], course_index['run'], course_id.branch) if course_index else None
def has_library(self, library_id, ignore_case=False, **kwargs):
"""
Does this library exist in this modulestore. This method does not verify that the branch &/or
version in the library_id exists.
Returns the library_id of the course if it was found, else None.
"""
if not isinstance(library_id, LibraryLocator):
return None
index = self.get_course_index(library_id, ignore_case)
if index:
return LibraryLocator(index['org'], index['course'], library_id.branch)
return None
def has_item(self, usage_key):
"""
Returns True if usage_key exists in its course. Returns false if
the course or the block w/in the course do not exist for the given version.
raises InsufficientSpecificationError if the usage_key does not id a block
"""
if not isinstance(usage_key, BlockUsageLocator) or usage_key.deprecated:
# The supplied UsageKey is of the wrong type, so it can't possibly be stored in this modulestore.
return False
if usage_key.block_id is None:
raise InsufficientSpecificationError(usage_key)
try:
course_structure = self._lookup_course(usage_key.course_key).structure
except ItemNotFoundError:
# this error only occurs if the course does not exist
return False
return self._get_block_from_structure(course_structure, BlockKey.from_usage_key(usage_key)) is not None
@contract(returns='XBlock')
def get_item(self, usage_key, depth=0, **kwargs):
"""
depth (int): An argument that some module stores may use to prefetch
descendants of the queried modules for more efficient results later
in the request. The depth is counted in the number of
calls to get_children() to cache. None indicates to cache all
descendants.
raises InsufficientSpecificationError or ItemNotFoundError
"""
if not isinstance(usage_key, BlockUsageLocator) or usage_key.deprecated:
# The supplied UsageKey is of the wrong type, so it can't possibly be stored in this modulestore.
raise ItemNotFoundError(usage_key)
with self.bulk_operations(usage_key.course_key):
course = self._lookup_course(usage_key.course_key)
items = self._load_items(course, [BlockKey.from_usage_key(usage_key)], depth, **kwargs)
if len(items) == 0:
raise ItemNotFoundError(usage_key)
elif len(items) > 1:
log.debug("Found more than one item for '{}'".format(usage_key))
return items[0]
def get_items(self, course_locator, settings=None, content=None, qualifiers=None, **kwargs):
"""
Returns:
list of XModuleDescriptor instances for the matching items within the course with
the given course_locator
NOTE: don't use this to look for courses as the course_locator is required. Use get_courses.
Args:
course_locator (CourseLocator): the course identifier
settings (dict): fields to look for which have settings scope. Follows same syntax
and rules as qualifiers below
content (dict): fields to look for which have content scope. Follows same syntax and
rules as qualifiers below.
qualifiers (dict): what to look for within the course.
Common qualifiers are ``category`` or any field name. if the target field is a list,
then it searches for the given value in the list not list equivalence.
For substring matching pass a regex object.
For split,
you can search by ``edited_by``, ``edited_on`` providing a function testing limits.
"""
if not isinstance(course_locator, CourseLocator) or course_locator.deprecated:
# The supplied CourseKey is of the wrong type, so it can't possibly be stored in this modulestore.
return []
course = self._lookup_course(course_locator)
items = []
qualifiers = qualifiers.copy() if qualifiers else {} # copy the qualifiers (destructively manipulated here)
def _block_matches_all(block_data):
"""
Check that the block matches all the criteria
"""
# do the checks which don't require loading any additional data
if ( # pylint: disable=bad-continuation
self._block_matches(block_data, qualifiers) and
self._block_matches(block_data.fields, settings)
):
if content:
definition_block = self.get_definition(course_locator, block_data.definition)
return self._block_matches(definition_block['fields'], content)
else:
return True
if settings is None:
settings = {}
if 'name' in qualifiers:
# odd case where we don't search just confirm
block_name = qualifiers.pop('name')
block_ids = []
for block_id, block in course.structure['blocks'].iteritems():
if block_name == block_id.id and _block_matches_all(block):
block_ids.append(block_id)
return self._load_items(course, block_ids, **kwargs)
if 'category' in qualifiers:
qualifiers['block_type'] = qualifiers.pop('category')
# don't expect caller to know that children are in fields
if 'children' in qualifiers:
settings['children'] = qualifiers.pop('children')
for block_id, value in course.structure['blocks'].iteritems():
if _block_matches_all(value):
items.append(block_id)
if len(items) > 0:
return self._load_items(course, items, depth=0, **kwargs)
else:
return []
def get_parent_location(self, locator, **kwargs):
"""
Return the location (Locators w/ block_ids) for the parent of this location in this
course. Could use get_items(location, {'children': block_id}) but this is slightly faster.
NOTE: the locator must contain the block_id, and this code does not actually ensure block_id exists
:param locator: BlockUsageLocator restricting search scope
"""
if not isinstance(locator, BlockUsageLocator) or locator.deprecated:
# The supplied locator is of the wrong type, so it can't possibly be stored in this modulestore.
raise ItemNotFoundError(locator)
course = self._lookup_course(locator.course_key)
parent_ids = self._get_parents_from_structure(BlockKey.from_usage_key(locator), course.structure)
if len(parent_ids) == 0:
return None
# find alphabetically least
parent_ids.sort(key=lambda parent: (parent.type, parent.id))
return BlockUsageLocator.make_relative(
locator,
block_type=parent_ids[0].type,
block_id=parent_ids[0].id,
)
def get_orphans(self, course_key, **kwargs):
"""
Return an array of all of the orphans in the course.
"""
if not isinstance(course_key, CourseLocator) or course_key.deprecated:
# The supplied CourseKey is of the wrong type, so it can't possibly be stored in this modulestore.
raise ItemNotFoundError(course_key)
detached_categories = [name for name, __ in XBlock.load_tagged_classes("detached")]
course = self._lookup_course(course_key)
items = set(course.structure['blocks'].keys())
items.remove(course.structure['root'])
blocks = course.structure['blocks']
for block_id, block_data in blocks.iteritems():
items.difference_update(BlockKey(*child) for child in block_data.fields.get('children', []))
if block_data.block_type in detached_categories:
items.discard(block_id)
return [
course_key.make_usage_key(block_type=block_id.type, block_id=block_id.id)
for block_id in items
]
def get_course_index_info(self, course_key):
"""
The index records the initial creation of the indexed course and tracks the current version
heads. This function is primarily for test verification but may serve some
more general purpose.
:param course_key: must have a org, course, and run set
:return {'org': string,
versions: {'draft': the head draft version id,
'published': the head published version id if any,
},
'edited_by': who created the course originally (named edited for consistency),
'edited_on': when the course was originally created
}
"""
if not isinstance(course_key, CourseLocator) or course_key.deprecated:
# The supplied CourseKey is of the wrong type, so it can't possibly be stored in this modulestore.
raise ItemNotFoundError(course_key)
if not (course_key.course and course_key.run and course_key.org):
return None
index = self.get_course_index(course_key)
return index
# TODO figure out a way to make this info accessible from the course descriptor
def get_course_history_info(self, course_key):
"""
Because xblocks doesn't give a means to separate the course structure's meta information from
the course xblock's, this method will get that info for the structure as a whole.
:param course_key:
:return {'original_version': the version guid of the original version of this course,
'previous_version': the version guid of the previous version,
'edited_by': who made the last change,
'edited_on': when the change was made
}
"""
if not isinstance(course_key, CourseLocator) or course_key.deprecated:
# The supplied CourseKey is of the wrong type, so it can't possibly be stored in this modulestore.
raise ItemNotFoundError(course_key)
course = self._lookup_course(course_key).structure
return {
'original_version': course['original_version'],
'previous_version': course['previous_version'],
'edited_by': course['edited_by'],
'edited_on': course['edited_on']
}
def get_definition_history_info(self, definition_locator, course_context=None):
"""
Because xblocks doesn't give a means to separate the definition's meta information from
the usage xblock's, this method will get that info for the definition
:return {'original_version': the version guid of the original version of this course,
'previous_version': the version guid of the previous version,
'edited_by': who made the last change,
'edited_on': when the change was made
}
"""
if not isinstance(definition_locator, DefinitionLocator) or definition_locator.deprecated:
# The supplied locator is of the wrong type, so it can't possibly be stored in this modulestore.
raise ItemNotFoundError(definition_locator)
definition = self.db_connection.get_definition(definition_locator.definition_id, course_context)
if definition is None:
return None
return definition['edit_info']
def get_course_successors(self, course_locator, version_history_depth=1):
"""
Find the version_history_depth next versions of this course. Return as a VersionTree
Mostly makes sense when course_locator uses a version_guid, but because it finds all relevant
next versions, these do include those created for other courses.
:param course_locator:
"""
if not isinstance(course_locator, CourseLocator) or course_locator.deprecated:
# The supplied CourseKey is of the wrong type, so it can't possibly be stored in this modulestore.
raise ItemNotFoundError(course_locator)
if version_history_depth < 1:
return None
if course_locator.version_guid is None:
course = self._lookup_course(course_locator)
version_guid = course.structure['_id']
course_locator = course_locator.for_version(version_guid)
else:
version_guid = course_locator.version_guid
# TODO if depth is significant, it may make sense to get all that have the same original_version
# and reconstruct the subtree from version_guid
next_entries = self.find_structures_derived_from([version_guid])
# must only scan cursor's once
next_versions = [struct for struct in next_entries]
result = {version_guid: [CourseLocator(version_guid=struct['_id']) for struct in next_versions]}
depth = 1
while depth < version_history_depth and len(next_versions) > 0:
depth += 1
next_entries = self.find_structures_derived_from([struct['_id'] for struct in next_versions])
next_versions = [struct for struct in next_entries]
for course_structure in next_versions:
result.setdefault(course_structure['previous_version'], []).append(
CourseLocator(version_guid=struct['_id']))
return VersionTree(course_locator, result)
def get_block_generations(self, block_locator):
"""
Find the history of this block. Return as a VersionTree of each place the block changed (except
deletion).
The block's history tracks its explicit changes but not the changes in its children starting
from when the block was created.
"""
# course_agnostic means we don't care if the head and version don't align, trust the version
course_struct = self._lookup_course(block_locator.course_key.course_agnostic()).structure
block_key = BlockKey.from_usage_key(block_locator)
all_versions_with_block = self.find_ancestor_structures(
original_version=course_struct['original_version'],
block_key=block_key
)
# find (all) root versions and build map {previous: {successors}..}
possible_roots = []
result = {}
for version in all_versions_with_block:
block_payload = self._get_block_from_structure(version, block_key)
if version['_id'] == block_payload.edit_info.update_version:
if block_payload.edit_info.previous_version is None:
# this was when this block was created
possible_roots.append(block_payload.edit_info.update_version)
else: # map previous to {update..}
result.setdefault(block_payload.edit_info.previous_version, set()).add(
block_payload.edit_info.update_version)
# more than one possible_root means usage was added and deleted > 1x.
if len(possible_roots) > 1:
# find the history segment including block_locator's version
element_to_find = self._get_block_from_structure(course_struct, block_key).edit_info.update_version
if element_to_find in possible_roots:
possible_roots = [element_to_find]
for possibility in possible_roots:
if self._find_local_root(element_to_find, possibility, result):
possible_roots = [possibility]
break
elif len(possible_roots) == 0:
return None
# convert the results value sets to locators
for k, versions in result.iteritems():
result[k] = [
block_locator.for_version(version)
for version in versions
]
return VersionTree(
block_locator.for_version(possible_roots[0]),
result
)
def get_definition_successors(self, definition_locator, version_history_depth=1):
"""
Find the version_history_depth next versions of this definition. Return as a VersionTree
"""
# TODO implement
pass
def get_block_original_usage(self, usage_key):
"""
If a block was inherited into another structure using copy_from_template,
this will return the original block usage locator and version from
which the copy was inherited.
Returns usage_key, version if the data is available, otherwise returns (None, None)
"""
blocks = self._lookup_course(usage_key.course_key).structure['blocks']
block = blocks.get(BlockKey.from_usage_key(usage_key))
if block and block.edit_info.original_usage is not None:
usage_key = BlockUsageLocator.from_string(block.edit_info.original_usage)
return usage_key, block.edit_info.original_usage_version
return None, None
def create_definition_from_data(self, course_key, new_def_data, category, user_id):
"""
Pull the definition fields out of descriptor and save to the db as a new definition
w/o a predecessor and return the new id.
:param user_id: request.user object
"""
new_def_data = self._serialize_fields(category, new_def_data)
new_id = ObjectId()
document = {
'_id': new_id,
"block_type": category,
"fields": new_def_data,
"edit_info": {
"edited_by": user_id,
"edited_on": datetime.datetime.now(UTC),
"previous_version": None,
"original_version": new_id,
},
'schema_version': self.SCHEMA_VERSION,
}
self.update_definition(course_key, document)
definition_locator = DefinitionLocator(category, new_id)
return definition_locator
def update_definition_from_data(self, course_key, definition_locator, new_def_data, user_id):
"""
See if new_def_data differs from the persisted version. If so, update
the persisted version and return the new id.
:param user_id: request.user
"""
def needs_saved():
for key, value in new_def_data.iteritems():
if key not in old_definition['fields'] or value != old_definition['fields'][key]:
return True
for key, value in old_definition.get('fields', {}).iteritems():
if key not in new_def_data:
return True
# if this looks in cache rather than fresh fetches, then it will probably not detect
# actual change b/c the descriptor and cache probably point to the same objects
old_definition = self.get_definition(course_key, definition_locator.definition_id)
if old_definition is None:
raise ItemNotFoundError(definition_locator)
new_def_data = self._serialize_fields(old_definition['block_type'], new_def_data)
if needs_saved():
definition_locator = self._update_definition_from_data(course_key, old_definition, new_def_data, user_id)
return definition_locator, True
else:
return definition_locator, False
def _update_definition_from_data(self, course_key, old_definition, new_def_data, user_id):
"""
Update the persisted version of the given definition and return the
locator of the new definition. Does not check if data differs from the
previous version.
"""
new_definition = copy.deepcopy(old_definition)
new_definition['_id'] = ObjectId()
new_definition['fields'] = new_def_data
new_definition['edit_info']['edited_by'] = user_id
new_definition['edit_info']['edited_on'] = datetime.datetime.now(UTC)
# previous version id
new_definition['edit_info']['previous_version'] = old_definition['_id']
new_definition['schema_version'] = self.SCHEMA_VERSION
self.update_definition(course_key, new_definition)
return DefinitionLocator(new_definition['block_type'], new_definition['_id'])
def _generate_block_key(self, course_blocks, category):
"""
Generate a somewhat readable block id unique w/in this course using the category
:param course_blocks: the current list of blocks.
:param category:
"""
# NOTE: a potential bug is that a block is deleted and another created which gets the old
# block's id. a possible fix is to cache the last serial in a dict in the structure
# {category: last_serial...}
# A potential confusion is if the name incorporates the parent's name, then if the child
# moves, its id won't change and will be confusing
serial = 1
while True:
potential_key = BlockKey(category, "{}{}".format(category, serial))
if potential_key not in course_blocks:
return potential_key
serial += 1
@contract(returns='XBlock')
def create_item(
self, user_id, course_key, block_type, block_id=None,
definition_locator=None, fields=None,
force=False, **kwargs
):
"""
Add a descriptor to persistence as an element
of the course. Return the resulting post saved version with populated locators.
:param course_key: If it has a version_guid and a course org + course + run + branch, this
method ensures that the version is the head of the given course branch before making the change.
raises InsufficientSpecificationError if there is no course locator.
raises VersionConflictError if the version_guid of the course_or_parent_locator is not the head
of the its course unless force is true.
:param force: fork the structure and don't update the course draftVersion if the above
:param continue_revision: for multistep transactions, continue revising the given version rather than creating
a new version. Setting force to True conflicts with setting this to True and will cause a VersionConflictError
:param definition_locator: should either be None to indicate this is a brand new definition or
a pointer to the existing definition to which this block should point or from which this was derived
or a LocalId to indicate that it's new.
If fields does not contain any Scope.content, then definition_locator must have a value meaning that this
block points
to the existing definition. If fields contains Scope.content and definition_locator is not None, then
the Scope.content fields are assumed to be a new payload for definition_locator.
:param block_id: if provided, must not already exist in the structure. Provides the block id for the
new item in this structure. Otherwise, one is computed using the category appended w/ a few digits.
This method creates a new version of the course structure unless the course has a bulk_write operation
active.
It creates and inserts the new block, makes the block point
to the definition which may be new or a new version of an existing or an existing.
Rules for course locator:
* If the course locator specifies a org and course and run and either it doesn't
specify version_guid or the one it specifies == the current head of the branch,
it progresses the course to point
to the new head and sets the active version to point to the new head
* If the locator has a org and course and run but its version_guid != current head, it raises VersionConflictError.
NOTE: using a version_guid will end up creating a new version of the course. Your new item won't be in
the course id'd by version_guid but instead in one w/ a new version_guid. Ensure in this case that you get
the new version_guid from the locator in the returned object!
"""
with self.bulk_operations(course_key):
# split handles all the fields in one dict not separated by scope
fields = fields or {}
fields.update(kwargs.pop('metadata', {}) or {})
definition_data = kwargs.pop('definition_data', {})
if definition_data:
if not isinstance(definition_data, dict):
definition_data = {'data': definition_data} # backward compatibility to mongo's hack
fields.update(definition_data)
# find course_index entry if applicable and structures entry
index_entry = self._get_index_if_valid(course_key, force)
structure = self._lookup_course(course_key).structure
partitioned_fields = self.partition_fields_by_scope(block_type, fields)
new_def_data = partitioned_fields.get(Scope.content, {})
# persist the definition if persisted != passed
if definition_locator is None or isinstance(definition_locator.definition_id, LocalId):
definition_locator = self.create_definition_from_data(course_key, new_def_data, block_type, user_id)
elif new_def_data:
definition_locator, _ = self.update_definition_from_data(course_key, definition_locator, new_def_data, user_id)
# copy the structure and modify the new one
new_structure = self.version_structure(course_key, structure, user_id)
new_id = new_structure['_id']
# generate usage id
if block_id is not None:
block_key = BlockKey(block_type, block_id)
if block_key in new_structure['blocks']:
raise DuplicateItemError(block_id, self, 'structures')
else:
block_key = self._generate_block_key(new_structure['blocks'], block_type)
block_fields = partitioned_fields.get(Scope.settings, {})
if Scope.children in partitioned_fields:
block_fields.update(partitioned_fields[Scope.children])
self._update_block_in_structure(new_structure, block_key, self._new_block(
user_id,
block_type,
block_fields,
definition_locator.definition_id,
new_id,
))
self.update_structure(course_key, new_structure)
# update the index entry if appropriate
if index_entry is not None:
# see if any search targets changed
if fields is not None:
self._update_search_targets(index_entry, fields)
self._update_head(course_key, index_entry, course_key.branch, new_id)
item_loc = BlockUsageLocator(
course_key.version_agnostic(),
block_type=block_type,
block_id=block_key.id,
)
else:
item_loc = BlockUsageLocator(
CourseLocator(version_guid=new_id),
block_type=block_type,
block_id=block_key.id,
)
if isinstance(course_key, LibraryLocator):
self._flag_library_updated_event(course_key)
# reconstruct the new_item from the cache
return self.get_item(item_loc)
def create_child(self, user_id, parent_usage_key, block_type, block_id=None, fields=None, **kwargs):
"""
Creates and saves a new xblock that as a child of the specified block
Returns the newly created item.
Args:
user_id: ID of the user creating and saving the xmodule
parent_usage_key: a :class:`~opaque_key.edx.UsageKey` identifying the
block that this item should be parented under
block_type: The typo of block to create
block_id: a unique identifier for the new item. If not supplied,
a new identifier will be generated
fields (dict): A dictionary specifying initial values for some or all fields
in the newly created block
"""
with self.bulk_operations(parent_usage_key.course_key):
xblock = self.create_item(
user_id, parent_usage_key.course_key, block_type, block_id=block_id, fields=fields,
**kwargs)
# skip attach to parent if xblock has 'detached' tag
if 'detached' in xblock._class_tags: # pylint: disable=protected-access
return xblock
# don't version the structure as create_item handled that already.
new_structure = self._lookup_course(xblock.location.course_key).structure
# add new block as child and update parent's version
block_id = BlockKey.from_usage_key(parent_usage_key)
if block_id not in new_structure['blocks']:
raise ItemNotFoundError(parent_usage_key)
parent = new_structure['blocks'][block_id]
# Originally added to support entrance exams (settings.FEATURES.get('ENTRANCE_EXAMS'))
if kwargs.get('position') is None:
parent.fields.setdefault('children', []).append(BlockKey.from_usage_key(xblock.location))
else:
parent.fields.setdefault('children', []).insert(
kwargs.get('position'),
BlockKey.from_usage_key(xblock.location)
)
if parent.edit_info.update_version != new_structure['_id']:
# if the parent hadn't been previously changed in this bulk transaction, indicate that it's
# part of the bulk transaction
self.version_block(parent, user_id, new_structure['_id'])
self.decache_block(parent_usage_key.course_key, new_structure['_id'], block_id)
# db update
self.update_structure(parent_usage_key.course_key, new_structure)
# don't need to update the index b/c create_item did it for this version
return xblock
def clone_course(self, source_course_id, dest_course_id, user_id, fields=None, **kwargs):
"""
See :meth: `.ModuleStoreWrite.clone_course` for documentation.
In split, other than copying the assets, this is cheap as it merely creates a new version of the
existing course.
"""
source_index = self.get_course_index_info(source_course_id)
if source_index is None:
raise ItemNotFoundError("Cannot find a course at {0}. Aborting".format(source_course_id))
with self.bulk_operations(dest_course_id):
new_course = self.create_course(
dest_course_id.org, dest_course_id.course, dest_course_id.run,
user_id,
fields=fields,
versions_dict=source_index['versions'],
search_targets=source_index['search_targets'],
skip_auto_publish=True,
**kwargs
)
# don't copy assets until we create the course in case something's awry
super(SplitMongoModuleStore, self).clone_course(source_course_id, dest_course_id, user_id, fields, **kwargs)
return new_course
DEFAULT_ROOT_COURSE_BLOCK_ID = 'course'
DEFAULT_ROOT_LIBRARY_BLOCK_ID = 'library'
def create_course(
self, org, course, run, user_id, master_branch=None, fields=None,
versions_dict=None, search_targets=None, root_category='course',
root_block_id=None, **kwargs
):
"""
Create a new entry in the active courses index which points to an existing or new structure. Returns
the course root of the resulting entry (the location has the course id)
Arguments:
org (str): the organization that owns the course
course (str): the course number of the course
run (str): the particular run of the course (e.g. 2013_T1)
user_id: id of the user creating the course
fields (dict): Fields to set on the course at initialization
kwargs: Any optional arguments understood by a subset of modulestores to customize instantiation
course + run: If there are duplicates, this method will raise DuplicateCourseError
fields: if scope.settings fields provided, will set the fields of the root course object in the
new course. If both
settings fields and a starting version are provided (via versions_dict), it will generate a successor version
to the given version,
and update the settings fields with any provided values (via update not setting).
fields (content): if scope.content fields provided, will update the fields of the new course
xblock definition to this. Like settings fields,
if provided, this will cause a new version of any given version as well as a new version of the
definition (which will point to the existing one if given a version). If not provided and given
a version_dict, it will reuse the same definition as that version's course
(obvious since it's reusing the
course). If not provided and no version_dict is given, it will be empty and get the field defaults
when
loaded.
master_branch: the tag (key) for the version name in the dict which is the DRAFT version. Not the actual
version guid, but what to call it.
search_targets: a dict of search key and value. For example, wiki_slug. Add any fields whose edits
should change the search targets to SplitMongoModuleStore.SEARCH_TARGET dict
versions_dict: the starting version ids where the keys are the tags such as DRAFT and PUBLISHED
and the values are structure guids. If provided, the new course will reuse this version (unless you also
provide any fields overrides, see above). if not provided, will create a mostly empty course
structure with just a category course root xblock.
"""
# either need to assert this or have a default
assert master_branch is not None
# check course and run's uniqueness
locator = CourseLocator(org=org, course=course, run=run, branch=master_branch)
return self._create_courselike(
locator, user_id, master_branch, fields, versions_dict,
search_targets, root_category, root_block_id, **kwargs
)
def _create_courselike(
self, locator, user_id, master_branch, fields=None,
versions_dict=None, search_targets=None, root_category='course',
root_block_id=None, **kwargs
):
"""
Internal code for creating a course or library
"""
index = self.get_course_index(locator)
if index is not None:
raise DuplicateCourseError(locator, index)
partitioned_fields = self.partition_fields_by_scope(root_category, fields)
block_fields = partitioned_fields[Scope.settings]
if Scope.children in partitioned_fields:
block_fields.update(partitioned_fields[Scope.children])
definition_fields = self._serialize_fields(root_category, partitioned_fields.get(Scope.content, {}))
# build from inside out: definition, structure, index entry
# if building a wholly new structure
if versions_dict is None or master_branch not in versions_dict:
# create new definition and structure
definition_id = self.create_definition_from_data(locator, definition_fields, root_category, user_id).definition_id
draft_structure = self._new_structure(
user_id,
BlockKey(
root_category,
root_block_id or SplitMongoModuleStore.DEFAULT_ROOT_COURSE_BLOCK_ID,
),
block_fields,
definition_id
)
new_id = draft_structure['_id']
if versions_dict is None:
versions_dict = {master_branch: new_id}
else:
versions_dict[master_branch] = new_id
elif block_fields or definition_fields: # pointing to existing course w/ some overrides
# just get the draft_version structure
draft_version = CourseLocator(version_guid=versions_dict[master_branch])
draft_structure = self._lookup_course(draft_version).structure
draft_structure = self.version_structure(locator, draft_structure, user_id)
new_id = draft_structure['_id']
root_block = draft_structure['blocks'][draft_structure['root']]
if block_fields is not None:
root_block.fields.update(self._serialize_fields(root_category, block_fields))
if definition_fields is not None:
old_def = self.get_definition(locator, root_block.definition)
new_fields = old_def['fields']
new_fields.update(definition_fields)
definition_id = self._update_definition_from_data(locator, old_def, new_fields, user_id).definition_id
root_block.definition = definition_id
root_block.edit_info.edited_on = datetime.datetime.now(UTC)
root_block.edit_info.edited_by = user_id
root_block.edit_info.previous_version = root_block.edit_info.update_version
root_block.edit_info.update_version = new_id
versions_dict[master_branch] = new_id
else: # Pointing to an existing course structure
new_id = versions_dict[master_branch]
draft_version = CourseLocator(version_guid=new_id)
draft_structure = self._lookup_course(draft_version).structure
locator = locator.replace(version_guid=new_id)
with self.bulk_operations(locator):
self.update_structure(locator, draft_structure)
index_entry = {
'_id': ObjectId(),
'org': locator.org,
'course': locator.course,
'run': locator.run,
'edited_by': user_id,
'edited_on': datetime.datetime.now(UTC),
'versions': versions_dict,
'schema_version': self.SCHEMA_VERSION,
'search_targets': search_targets or {},
}
if fields is not None:
self._update_search_targets(index_entry, fields)
self.insert_course_index(locator, index_entry)
# expensive hack to persist default field values set in __init__ method (e.g., wiki_slug)
if isinstance(locator, LibraryLocator):
course = self.get_library(locator, **kwargs)
else:
course = self.get_course(locator, **kwargs)
return self.update_item(course, user_id, **kwargs)
def create_library(self, org, library, user_id, fields, **kwargs):
"""
Create a new library. Arguments are similar to create_course().
"""
kwargs["fields"] = fields
kwargs["master_branch"] = kwargs.get("master_branch", ModuleStoreEnum.BranchName.library)
kwargs["root_category"] = kwargs.get("root_category", "library")
kwargs["root_block_id"] = kwargs.get("root_block_id", "library")
locator = LibraryLocator(org=org, library=library, branch=kwargs["master_branch"])
return self._create_courselike(locator, user_id, **kwargs)
def update_item(self, descriptor, user_id, allow_not_found=False, force=False, **kwargs):
"""
Save the descriptor's fields. it doesn't descend the course dag to save the children.
Return the new descriptor (updated location).
raises ItemNotFoundError if the location does not exist.
Creates a new course version. If the descriptor's location has a org and course and run, it moves the course head
pointer. If the version_guid of the descriptor points to a non-head version and there's been an intervening
change to this item, it raises a VersionConflictError unless force is True. In the force case, it forks
the course but leaves the head pointer where it is (this change will not be in the course head).
The implementation tries to detect which, if any changes, actually need to be saved and thus won't version
the definition, structure, nor course if they didn't change.
"""
partitioned_fields = self.partition_xblock_fields_by_scope(descriptor)
return self._update_item_from_fields(
user_id, descriptor.location.course_key, BlockKey.from_usage_key(descriptor.location),
partitioned_fields, descriptor.definition_locator, allow_not_found, force, **kwargs
) or descriptor
def _update_item_from_fields(
self, user_id, course_key, block_key, partitioned_fields,
definition_locator, allow_not_found, force, **kwargs
):
"""
Broke out guts of update_item for short-circuited internal use only
"""
with self.bulk_operations(course_key):
if allow_not_found and isinstance(block_key.id, (LocalId, NoneType)):
fields = {}
for subfields in partitioned_fields.itervalues():
fields.update(subfields)
return self.create_item(
user_id, course_key, block_key.type, fields=fields, force=force
)
original_structure = self._lookup_course(course_key).structure
index_entry = self._get_index_if_valid(course_key, force)
original_entry = self._get_block_from_structure(original_structure, block_key)
if original_entry is None:
if allow_not_found:
fields = {}
for subfields in partitioned_fields.itervalues():
fields.update(subfields)
return self.create_item(
user_id, course_key, block_key.type, block_id=block_key.id, fields=fields, force=force,
)
else:
raise ItemNotFoundError(course_key.make_usage_key(block_key.type, block_key.id))
is_updated = False
definition_fields = partitioned_fields[Scope.content]
if definition_locator is None:
definition_locator = DefinitionLocator(original_entry.block_type, original_entry.definition)
if definition_fields:
definition_locator, is_updated = self.update_definition_from_data(
course_key, definition_locator, definition_fields, user_id
)
# check metadata
settings = partitioned_fields[Scope.settings]
settings = self._serialize_fields(block_key.type, settings)
if not is_updated:
is_updated = self._compare_settings(settings, original_entry.fields)
# check children
if partitioned_fields.get(Scope.children, {}): # purposely not 'is not None'
serialized_children = [BlockKey.from_usage_key(child) for child in partitioned_fields[Scope.children]['children']]
is_updated = is_updated or original_entry.fields.get('children', []) != serialized_children
if is_updated:
settings['children'] = serialized_children
# if updated, rev the structure
if is_updated:
new_structure = self.version_structure(course_key, original_structure, user_id)
block_data = self._get_block_from_structure(new_structure, block_key)
block_data.definition = definition_locator.definition_id
block_data.fields = settings
new_id = new_structure['_id']
# source_version records which revision a block was copied from. In this method, we're updating
# the block, so it's no longer a direct copy, and we can remove the source_version reference.
block_data.edit_info.source_version = None
self.version_block(block_data, user_id, new_id)
self.update_structure(course_key, new_structure)
# update the index entry if appropriate
if index_entry is not None:
self._update_search_targets(index_entry, definition_fields)
self._update_search_targets(index_entry, settings)
if isinstance(course_key, LibraryLocator):
course_key = LibraryLocator(
org=index_entry['org'],
library=index_entry['course'],
branch=course_key.branch,
version_guid=new_id
)
else:
course_key = CourseLocator(
org=index_entry['org'],
course=index_entry['course'],
run=index_entry['run'],
branch=course_key.branch,
version_guid=new_id
)
self._update_head(course_key, index_entry, course_key.branch, new_id)
elif isinstance(course_key, LibraryLocator):
course_key = LibraryLocator(version_guid=new_id)
else:
course_key = CourseLocator(version_guid=new_id)
if isinstance(course_key, LibraryLocator):
self._flag_library_updated_event(course_key)
# fetch and return the new item--fetching is unnecessary but a good qc step
new_locator = course_key.make_usage_key(block_key.type, block_key.id)
return self.get_item(new_locator, **kwargs)
else:
return None
# pylint: disable=unused-argument
def create_xblock(
self, runtime, course_key, block_type, block_id=None, fields=None,
definition_id=None, parent_xblock=None, **kwargs
):
"""
This method instantiates the correct subclass of XModuleDescriptor based
on the contents of json_data. It does not persist it and can create one which
has no usage id.
parent_xblock is used to compute inherited metadata as well as to append the new xblock.
json_data:
- 'block_type': the xmodule block_type
- 'fields': a dict of locally set fields (not inherited) in json format not pythonic typed format!
- 'definition': the object id of the existing definition
"""
assert runtime is not None
xblock_class = runtime.load_block_type(block_type)
json_data = {
'block_type': block_type,
'fields': {},
}
if definition_id is not None:
json_data['definition'] = definition_id
if parent_xblock is None:
# If no parent, then nothing to inherit.
inherited_settings = {}
else:
inherited_settings = parent_xblock.xblock_kvs.inherited_settings.copy()
if fields is not None:
for field_name in inheritance.InheritanceMixin.fields:
if field_name in fields:
inherited_settings[field_name] = fields[field_name]
new_block = runtime.xblock_from_json(
xblock_class,
course_key,
BlockKey(block_type, block_id) if block_id else None,
BlockData(**json_data),
**kwargs
)
for field_name, value in (fields or {}).iteritems():
setattr(new_block, field_name, value)
if parent_xblock is not None:
parent_xblock.children.append(new_block.scope_ids.usage_id)
# decache pending children field settings
parent_xblock.save()
return new_block
def persist_xblock_dag(self, xblock, user_id, force=False):
"""
create or update the xblock and all of its children. The xblock's location must specify a course.
If it doesn't specify a usage_id, then it's presumed to be new and need creation. This function
descends the children performing the same operation for any that are xblocks. Any children which
are block_ids just update the children pointer.
All updates go into the same course version (bulk updater).
Updates the objects which came in w/ updated location and definition_location info.
returns the post-persisted version of the incoming xblock. Note that its children will be ids not
objects.
:param xblock: the head of the dag
:param user_id: who's doing the change
"""
# find course_index entry if applicable and structures entry
course_key = xblock.location.course_key
with self.bulk_operations(course_key):
index_entry = self._get_index_if_valid(course_key, force)
structure = self._lookup_course(course_key).structure
new_structure = self.version_structure(course_key, structure, user_id)
new_id = new_structure['_id']
is_updated = self._persist_subdag(course_key, xblock, user_id, new_structure['blocks'], new_id)
if is_updated:
self.update_structure(course_key, new_structure)
# update the index entry if appropriate
if index_entry is not None:
self._update_head(course_key, index_entry, xblock.location.branch, new_id)
# fetch and return the new item--fetching is unnecessary but a good qc step
return self.get_item(xblock.location.for_version(new_id))
else:
return xblock
def _persist_subdag(self, course_key, xblock, user_id, structure_blocks, new_id):
# persist the definition if persisted != passed
partitioned_fields = self.partition_xblock_fields_by_scope(xblock)
new_def_data = self._serialize_fields(xblock.category, partitioned_fields[Scope.content])
is_updated = False
if xblock.definition_locator is None or isinstance(xblock.definition_locator.definition_id, LocalId):
xblock.definition_locator = self.create_definition_from_data(
course_key, new_def_data, xblock.category, user_id
)
is_updated = True
elif new_def_data:
xblock.definition_locator, is_updated = self.update_definition_from_data(
course_key, xblock.definition_locator, new_def_data, user_id
)
if isinstance(xblock.scope_ids.usage_id.block_id, LocalId):
# generate an id
is_new = True
is_updated = True
block_id = getattr(xblock.scope_ids.usage_id.block_id, 'block_id', None)
if block_id is None:
block_key = self._generate_block_key(structure_blocks, xblock.scope_ids.block_type)
else:
block_key = BlockKey(xblock.scope_ids.block_type, block_id)
new_usage_id = xblock.scope_ids.usage_id.replace(block_id=block_key.id)
xblock.scope_ids = xblock.scope_ids._replace(usage_id=new_usage_id) # pylint: disable=protected-access
else:
is_new = False
block_key = BlockKey(xblock.scope_ids.block_type, xblock.scope_ids.usage_id.block_id)
children = []
if xblock.has_children:
for child in xblock.children:
if isinstance(child.block_id, LocalId):
child_block = xblock.system.get_block(child)
is_updated = self._persist_subdag(course_key, child_block, user_id, structure_blocks, new_id) or is_updated
children.append(BlockKey.from_usage_key(child_block.location))
else:
children.append(BlockKey.from_usage_key(child))
is_updated = is_updated or structure_blocks[block_key].fields['children'] != children
block_fields = partitioned_fields[Scope.settings]
block_fields = self._serialize_fields(xblock.category, block_fields)
if not is_new and not is_updated:
is_updated = self._compare_settings(block_fields, structure_blocks[block_key].fields)
if children:
block_fields['children'] = children
if is_updated:
if is_new:
block_info = self._new_block(
user_id,
xblock.category,
block_fields,
xblock.definition_locator.definition_id,
new_id,
raw=True
)
else:
block_info = structure_blocks[block_key]
block_info.fields = block_fields
block_info.definition = xblock.definition_locator.definition_id
self.version_block(block_info, user_id, new_id)
structure_blocks[block_key] = block_info
return is_updated
def _compare_settings(self, settings, original_fields):
"""
Return True if the settings are not == to the original fields
:param settings:
:param original_fields:
"""
original_keys = original_fields.keys()
if 'children' in original_keys:
original_keys.remove('children')
if len(settings) != len(original_keys):
return True
else:
new_keys = settings.keys()
for key in original_keys:
if key not in new_keys or original_fields[key] != settings[key]:
return True
def copy(self, user_id, source_course, destination_course, subtree_list=None, blacklist=None):
"""
Copies each xblock in subtree_list and those blocks descendants excluding blacklist
from source_course to destination_course.
To delete a block in the destination_course, copy its parent and blacklist the other
sibs to keep them from being copies. You can also just call delete_item on the destination.
Ensures that each subtree occurs in the same place in destination as it does in source. If any
of the source's subtree parents are missing from destination, it raises ItemNotFound([parent_ids]).
To determine the same relative order vis-a-vis published siblings,
publishing may involve changing the order of previously published siblings. For example,
if publishing `[c, d]` and source parent has children `[a, b, c, d, e]` and destination parent
currently has children `[e, b]`, there's no obviously correct resulting order; thus, publish will
reorder destination to `[b, c, d, e]` to make it conform with the source.
:param source_course: a CourseLocator (can be a version or course w/ branch)
:param destination_course: a CourseLocator which must be an existing course but branch doesn't have
to exist yet. (The course must exist b/c Locator doesn't have everything necessary to create it).
Note, if the branch doesn't exist, then the source_course structure's root must be in subtree_list;
otherwise, the publish will violate the parents must exist rule.
:param subtree_list: a list of usage keys whose subtrees to publish.
:param blacklist: a list of usage keys to not change in the destination: i.e., don't add
if not there, don't update if there.
Raises:
ItemNotFoundError: if it cannot find the course. if the request is to publish a
subtree but the ancestors up to and including the course root are not published.
"""
# get the destination's index, and source and destination structures.
with self.bulk_operations(source_course):
source_structure = self._lookup_course(source_course).structure
with self.bulk_operations(destination_course):
index_entry = self.get_course_index(destination_course)
if index_entry is None:
# brand new course
raise ItemNotFoundError(destination_course)
if destination_course.branch not in index_entry['versions']:
# must be copying the dag root if there's no current dag
root_block_key = source_structure['root']
if not any(root_block_key == BlockKey.from_usage_key(subtree) for subtree in subtree_list):
raise ItemNotFoundError(u'Must publish course root {}'.format(root_block_key))
root_source = source_structure['blocks'][root_block_key]
# create branch
destination_structure = self._new_structure(
user_id, root_block_key,
# leave off the fields b/c the children must be filtered
definition_id=root_source.definition,
)
else:
destination_structure = self._lookup_course(destination_course).structure
destination_structure = self.version_structure(destination_course, destination_structure, user_id)
if blacklist != EXCLUDE_ALL:
blacklist = [BlockKey.from_usage_key(shunned) for shunned in blacklist or []]
# iterate over subtree list filtering out blacklist.
orphans = set()
destination_blocks = destination_structure['blocks']
for subtree_root in subtree_list:
if BlockKey.from_usage_key(subtree_root) != source_structure['root']:
# find the parents and put root in the right sequence
parents = self._get_parents_from_structure(BlockKey.from_usage_key(subtree_root), source_structure)
parent_found = False
for parent in parents:
# If a parent isn't found in the destination_blocks, it's possible it was renamed
# in the course export. Continue and only throw an exception if *no* parents are found.
if parent in destination_blocks:
parent_found = True
orphans.update(
self._sync_children(
source_structure['blocks'][parent],
destination_blocks[parent],
BlockKey.from_usage_key(subtree_root)
)
)
if len(parents) and not parent_found:
raise ItemNotFoundError(parents)
# update/create the subtree and its children in destination (skipping blacklist)
orphans.update(
self._copy_subdag(
user_id, destination_structure['_id'],
BlockKey.from_usage_key(subtree_root),
source_structure['blocks'],
destination_blocks,
blacklist
)
)
# remove any remaining orphans
for orphan in orphans:
# orphans will include moved as well as deleted xblocks. Only delete the deleted ones.
self._delete_if_true_orphan(orphan, destination_structure)
# update the db
self.update_structure(destination_course, destination_structure)
self._update_head(destination_course, index_entry, destination_course.branch, destination_structure['_id'])
@contract(source_keys="list(BlockUsageLocator)", dest_usage=BlockUsageLocator)
def copy_from_template(self, source_keys, dest_usage, user_id, head_validation=True):
"""
Flexible mechanism for inheriting content from an external course/library/etc.
Will copy all of the XBlocks whose keys are passed as `source_course` so that they become
children of the XBlock whose key is `dest_usage`. Any previously existing children of
`dest_usage` that haven't been replaced/updated by this copy_from_template operation will
be deleted.
Unlike `copy()`, this does not care whether the resulting blocks are positioned similarly
in their new course/library. However, the resulting blocks will be in the same relative
order as `source_keys`.
If any of the blocks specified already exist as children of the destination block, they
will be updated rather than duplicated or replaced. If they have Scope.settings field values
overriding inherited default values, those overrides will be preserved.
IMPORTANT: This method does not preserve block_id - in other words, every block that is
copied will be assigned a new block_id. This is because we assume that the same source block
may be copied into one course in multiple places. However, it *is* guaranteed that every
time this method is called for the same source block and dest_usage, the same resulting
block id will be generated.
:param source_keys: a list of BlockUsageLocators. Order is preserved.
:param dest_usage: The BlockUsageLocator that will become the parent of an inherited copy
of all the xblocks passed in `source_keys`.
:param user_id: The user who will get credit for making this change.
"""
# Preload the block structures for all source courses/libraries/etc.
# so that we can access descendant information quickly
source_structures = {}
for key in source_keys:
course_key = key.course_key
if course_key.branch is None:
raise ItemNotFoundError("branch is required for all source keys when using copy_from_template")
if course_key not in source_structures:
with self.bulk_operations(course_key):
source_structures[course_key] = self._lookup_course(
course_key, head_validation=head_validation
).structure
destination_course = dest_usage.course_key
with self.bulk_operations(destination_course):
index_entry = self.get_course_index(destination_course)
if index_entry is None:
raise ItemNotFoundError(destination_course)
dest_structure = self._lookup_course(destination_course).structure
old_dest_structure_version = dest_structure['_id']
dest_structure = self.version_structure(destination_course, dest_structure, user_id)
# Set of all descendent block IDs of dest_usage that are to be replaced:
block_key = BlockKey(dest_usage.block_type, dest_usage.block_id)
orig_descendants = set(self.descendants(dest_structure['blocks'], block_key, depth=None, descendent_map={}))
# The descendants() method used above adds the block itself, which we don't consider a descendant.
orig_descendants.remove(block_key)
new_descendants = self._copy_from_template(
source_structures, source_keys, dest_structure, block_key, user_id, head_validation
)
# Update the edit info:
dest_info = dest_structure['blocks'][block_key]
# Update the edit_info:
dest_info.edit_info.previous_version = dest_info.edit_info.update_version
dest_info.edit_info.update_version = old_dest_structure_version
dest_info.edit_info.edited_by = user_id
dest_info.edit_info.edited_on = datetime.datetime.now(UTC)
orphans = orig_descendants - new_descendants
for orphan in orphans:
del dest_structure['blocks'][orphan]
self.update_structure(destination_course, dest_structure)
self._update_head(destination_course, index_entry, destination_course.branch, dest_structure['_id'])
# Return usage locators for all the new children:
return [
destination_course.make_usage_key(*k)
for k in dest_structure['blocks'][block_key].fields['children']
]
def _copy_from_template(
self, source_structures, source_keys, dest_structure, new_parent_block_key, user_id, head_validation
):
"""
Internal recursive implementation of copy_from_template()
Returns the new set of BlockKeys that are the new descendants of the block with key 'block_key'
"""
new_blocks = set()
new_children = list() # ordered list of the new children of new_parent_block_key
for usage_key in source_keys:
src_course_key = usage_key.course_key
hashable_source_id = src_course_key.for_version(None)
block_key = BlockKey(usage_key.block_type, usage_key.block_id)
source_structure = source_structures[src_course_key]
if block_key not in source_structure['blocks']:
raise ItemNotFoundError(usage_key)
source_block_info = source_structure['blocks'][block_key]
# Compute a new block ID. This new block ID must be consistent when this
# method is called with the same (source_key, dest_structure) pair
unique_data = "{}:{}:{}".format(
unicode(hashable_source_id).encode("utf-8"),
block_key.id,
new_parent_block_key.id,
)
new_block_id = hashlib.sha1(unique_data).hexdigest()[:20]
new_block_key = BlockKey(block_key.type, new_block_id)
# Now clone block_key to new_block_key:
new_block_info = copy.deepcopy(source_block_info)
# Note that new_block_info now points to the same definition ID entry as source_block_info did
existing_block_info = dest_structure['blocks'].get(new_block_key, BlockData())
# Inherit the Scope.settings values from 'fields' to 'defaults'
new_block_info.defaults = new_block_info.fields
# <workaround>
# CAPA modules store their 'markdown' value (an alternate representation of their content)
# in Scope.settings rather than Scope.content :-/
# markdown is a field that really should not be overridable - it fundamentally changes the content.
# capa modules also use a custom editor that always saves their markdown field to the metadata,
# even if it hasn't changed, which breaks our override system.
# So until capa modules are fixed, we special-case them and remove their markdown fields,
# forcing the inherited version to use XML only.
if usage_key.block_type == 'problem' and 'markdown' in new_block_info.defaults:
del new_block_info.defaults['markdown']
# </workaround>
new_block_info.fields = existing_block_info.fields # Preserve any existing overrides
if 'children' in new_block_info.defaults:
del new_block_info.defaults['children'] # Will be set later
new_block_info.edit_info = existing_block_info.edit_info
new_block_info.edit_info.previous_version = new_block_info.edit_info.update_version
new_block_info.edit_info.update_version = dest_structure['_id']
# Note we do not set 'source_version' - it's only used for copying identical blocks
# from draft to published as part of publishing workflow.
# Setting it to the source_block_info structure version here breaks split_draft's has_changes() method.
new_block_info.edit_info.edited_by = user_id
new_block_info.edit_info.edited_on = datetime.datetime.now(UTC)
new_block_info.edit_info.original_usage = unicode(usage_key.replace(branch=None, version_guid=None))
new_block_info.edit_info.original_usage_version = source_block_info.edit_info.update_version
dest_structure['blocks'][new_block_key] = new_block_info
children = source_block_info.fields.get('children')
if children:
children = [src_course_key.make_usage_key(child.type, child.id) for child in children]
new_blocks |= self._copy_from_template(
source_structures, children, dest_structure, new_block_key, user_id, head_validation
)
new_blocks.add(new_block_key)
# And add new_block_key to the list of new_parent_block_key's new children:
new_children.append(new_block_key)
# Update the children of new_parent_block_key
dest_structure['blocks'][new_parent_block_key].fields['children'] = new_children
return new_blocks
def delete_item(self, usage_locator, user_id, force=False):
"""
Delete the block or tree rooted at block (if delete_children) and any references w/in the course to the block
from a new version of the course structure.
returns CourseLocator for new version
raises ItemNotFoundError if the location does not exist.
raises ValueError if usage_locator points to the structure root
Creates a new course version. If the descriptor's location has a org, a course, and a run, it moves the course head
pointer. If the version_guid of the descriptor points to a non-head version and there's been an intervening
change to this item, it raises a VersionConflictError unless force is True. In the force case, it forks
the course but leaves the head pointer where it is (this change will not be in the course head).
"""
if not isinstance(usage_locator, BlockUsageLocator) or usage_locator.deprecated:
# The supplied UsageKey is of the wrong type, so it can't possibly be stored in this modulestore.
raise ItemNotFoundError(usage_locator)
with self.bulk_operations(usage_locator.course_key):
original_structure = self._lookup_course(usage_locator.course_key).structure
block_key = BlockKey.from_usage_key(usage_locator)
if original_structure['root'] == block_key:
raise ValueError("Cannot delete the root of a course")
if block_key not in original_structure['blocks']:
raise ValueError("Cannot delete a block that does not exist")
index_entry = self._get_index_if_valid(usage_locator.course_key, force)
new_structure = self.version_structure(usage_locator.course_key, original_structure, user_id)
new_blocks = new_structure['blocks']
new_id = new_structure['_id']
parent_block_keys = self._get_parents_from_structure(block_key, original_structure)
for parent_block_key in parent_block_keys:
parent_block = new_blocks[parent_block_key]
parent_block.fields['children'].remove(block_key)
parent_block.edit_info.edited_on = datetime.datetime.now(UTC)
parent_block.edit_info.edited_by = user_id
parent_block.edit_info.previous_version = parent_block.edit_info.update_version
parent_block.edit_info.update_version = new_id
# remove the source_version reference
parent_block.edit_info.source_version = None
self.decache_block(usage_locator.course_key, new_id, parent_block_key)
self._remove_subtree(BlockKey.from_usage_key(usage_locator), new_blocks)
# update index if appropriate and structures
self.update_structure(usage_locator.course_key, new_structure)
if index_entry is not None:
# update the index entry if appropriate
self._update_head(usage_locator.course_key, index_entry, usage_locator.branch, new_id)
result = usage_locator.course_key.for_version(new_id)
else:
result = CourseLocator(version_guid=new_id)
if isinstance(usage_locator.course_key, LibraryLocator):
self._flag_library_updated_event(usage_locator.course_key)
return result
@contract(block_key=BlockKey, blocks='dict(BlockKey: BlockData)')
def _remove_subtree(self, block_key, blocks):
"""
Remove the subtree rooted at block_key
"""
for child in blocks[block_key].fields.get('children', []):
self._remove_subtree(BlockKey(*child), blocks)
del blocks[block_key]
def delete_course(self, course_key, user_id):
"""
Remove the given course from the course index.
Only removes the course from the index. The data remains. You can use create_course
with a versions hash to restore the course; however, the edited_on and
edited_by won't reflect the originals, of course.
"""
# this is the only real delete in the system. should it do something else?
log.info(u"deleting course from split-mongo: %s", course_key)
self.delete_course_index(course_key)
# We do NOT call the super class here since we need to keep the assets
# in case the course is later restored.
# super(SplitMongoModuleStore, self).delete_course(course_key, user_id)
self._emit_course_deleted_signal(course_key)
@contract(block_map="dict(BlockKey: dict)", block_key=BlockKey)
def inherit_settings(
self, block_map, block_key, inherited_settings_map, inheriting_settings=None, inherited_from=None
):
"""
Updates block_data with any inheritable setting set by an ancestor and recurses to children.
"""
if block_key not in block_map:
return
block_data = block_map[block_key]
if inheriting_settings is None:
inheriting_settings = {}
if inherited_from is None:
inherited_from = []
# the currently passed down values take precedence over any previously cached ones
# NOTE: this should show the values which all fields would have if inherited: i.e.,
# not set to the locally defined value but to value set by nearest ancestor who sets it
inherited_settings_map.setdefault(block_key, {}).update(inheriting_settings)
# update the inheriting w/ what should pass to children
inheriting_settings = inherited_settings_map[block_key].copy()
block_fields = block_data.fields
for field_name in inheritance.InheritanceMixin.fields:
if field_name in block_fields:
inheriting_settings[field_name] = block_fields[field_name]
for child in block_fields.get('children', []):
try:
if child in inherited_from:
raise Exception(u'Infinite loop detected when inheriting to {}, having already inherited from {}'.format(child, inherited_from))
self.inherit_settings(
block_map,
BlockKey(*child),
inherited_settings_map,
inheriting_settings,
inherited_from + [child]
)
except KeyError:
# here's where we need logic for looking up in other structures when we allow cross pointers
# but it's also getting this during course creation if creating top down w/ children set or
# migration where the old mongo published had pointers to privates
pass
def descendants(self, block_map, block_id, depth, descendent_map):
"""
adds block and its descendants out to depth to descendent_map
Depth specifies the number of levels of descendants to return
(0 => this usage only, 1 => this usage and its children, etc...)
A depth of None returns all descendants
"""
if block_id not in block_map:
return descendent_map
if block_id not in descendent_map:
descendent_map[block_id] = block_map[block_id]
if depth is None or depth > 0:
depth = depth - 1 if depth is not None else None
for child in descendent_map[block_id].fields.get('children', []):
descendent_map = self.descendants(block_map, child, depth, descendent_map)
return descendent_map
def get_modulestore_type(self, course_key=None):
"""
Returns an enumeration-like type reflecting the type of this modulestore, per ModuleStoreEnum.Type.
Args:
course_key: just for signature compatibility
"""
return ModuleStoreEnum.Type.split
def _find_course_assets(self, course_key):
"""
Split specific lookup
"""
try:
course_assets = self._lookup_course(course_key).structure.get('assets', {})
except (InsufficientSpecificationError, VersionConflictError) as err:
log.warning(u'Error finding assets for org "%s" course "%s" on asset '
u'request. Either version of course_key is None or invalid.',
course_key.org, course_key.course)
return {}
return course_assets
def _update_course_assets(self, user_id, asset_key, update_function):
"""
A wrapper for functions wanting to manipulate assets. Gets and versions the structure,
passes the mutable array for either 'assets' or 'thumbnails' as well as the idx to the function for it to
update, then persists the changed data back into the course.
The update function can raise an exception if it doesn't want to actually do the commit. The
surrounding method probably should catch that exception.
"""
with self.bulk_operations(asset_key.course_key):
original_structure = self._lookup_course(asset_key.course_key).structure
index_entry = self._get_index_if_valid(asset_key.course_key)
new_structure = self.version_structure(asset_key.course_key, original_structure, user_id)
course_assets = new_structure.setdefault('assets', {})
asset_type = asset_key.asset_type
all_assets = SortedAssetList(iterable=[])
# Assets should be pre-sorted, so add them efficiently without sorting.
# extend() will raise a ValueError if the passed-in list is not sorted.
all_assets.extend(course_assets.setdefault(asset_type, []))
asset_idx = all_assets.find(asset_key)
all_assets_updated = update_function(all_assets, asset_idx)
new_structure['assets'][asset_type] = all_assets_updated.as_list()
# update index if appropriate and structures
self.update_structure(asset_key.course_key, new_structure)
if index_entry is not None:
# update the index entry if appropriate
self._update_head(asset_key.course_key, index_entry, asset_key.branch, new_structure['_id'])
def save_asset_metadata_list(self, asset_metadata_list, user_id, import_only=False):
"""
Saves a list of AssetMetadata to the modulestore. The list can be composed of multiple
asset types. This method is optimized for multiple inserts at once - it only re-saves the structure
at the end of all saves/updates.
"""
# Determine course key to use in bulk operation. Use the first asset assuming that
# all assets will be for the same course.
asset_key = asset_metadata_list[0].asset_id
course_key = asset_key.course_key
with self.bulk_operations(course_key):
original_structure = self._lookup_course(course_key).structure
index_entry = self._get_index_if_valid(course_key)
new_structure = self.version_structure(course_key, original_structure, user_id)
course_assets = new_structure.setdefault('assets', {})
assets_by_type = self._save_assets_by_type(
course_key, asset_metadata_list, course_assets, user_id, import_only
)
for asset_type, assets in assets_by_type.iteritems():
new_structure['assets'][asset_type] = assets.as_list()
# update index if appropriate and structures
self.update_structure(course_key, new_structure)
if index_entry is not None:
# update the index entry if appropriate
self._update_head(course_key, index_entry, asset_key.branch, new_structure['_id'])
def save_asset_metadata(self, asset_metadata, user_id, import_only=False):
"""
Saves or updates a single asset. Simply makes it a list and calls the list save above.
"""
return self.save_asset_metadata_list([asset_metadata, ], user_id, import_only)
@contract(asset_key='AssetKey', attr_dict=dict)
def set_asset_metadata_attrs(self, asset_key, attr_dict, user_id):
"""
Add/set the given dict of attrs on the asset at the given location. Value can be any type which pymongo accepts.
Arguments:
asset_key (AssetKey): asset identifier
attr_dict (dict): attribute: value pairs to set
Raises:
ItemNotFoundError if no such item exists
AttributeError is attr is one of the build in attrs.
"""
def _internal_method(all_assets, asset_idx):
"""
Update the found item
"""
if asset_idx is None:
raise ItemNotFoundError(asset_key)
# Form an AssetMetadata.
mdata = AssetMetadata(asset_key, asset_key.path)
mdata.from_storable(all_assets[asset_idx])
mdata.update(attr_dict)
# Generate a Mongo doc from the metadata and update the course asset info.
all_assets[asset_idx] = mdata.to_storable()
return all_assets
self._update_course_assets(user_id, asset_key, _internal_method)
@contract(asset_key='AssetKey')
def delete_asset_metadata(self, asset_key, user_id):
"""
Internal; deletes a single asset's metadata.
Arguments:
asset_key (AssetKey): key containing original asset filename
Returns:
Number of asset metadata entries deleted (0 or 1)
"""
def _internal_method(all_asset_info, asset_idx):
"""
Remove the item if it was found
"""
if asset_idx is None:
raise ItemNotFoundError(asset_key)
all_asset_info.pop(asset_idx)
return all_asset_info
try:
self._update_course_assets(user_id, asset_key, _internal_method)
return 1
except ItemNotFoundError:
return 0
@contract(source_course_key='CourseKey', dest_course_key='CourseKey')
def copy_all_asset_metadata(self, source_course_key, dest_course_key, user_id):
"""
Copy all the course assets from source_course_key to dest_course_key.
Arguments:
source_course_key (CourseKey): identifier of course to copy from
dest_course_key (CourseKey): identifier of course to copy to
"""
source_structure = self._lookup_course(source_course_key).structure
with self.bulk_operations(dest_course_key):
original_structure = self._lookup_course(dest_course_key).structure
index_entry = self._get_index_if_valid(dest_course_key)
new_structure = self.version_structure(dest_course_key, original_structure, user_id)
new_structure['assets'] = source_structure.get('assets', {})
new_structure['thumbnails'] = source_structure.get('thumbnails', [])
# update index if appropriate and structures
self.update_structure(dest_course_key, new_structure)
if index_entry is not None:
# update the index entry if appropriate
self._update_head(dest_course_key, index_entry, dest_course_key.branch, new_structure['_id'])
def fix_not_found(self, course_locator, user_id):
"""
Only intended for rather low level methods to use. Goes through the children attrs of
each block removing any whose block_id is not a member of the course.
:param course_locator: the course to clean
"""
original_structure = self._lookup_course(course_locator).structure
index_entry = self._get_index_if_valid(course_locator)
new_structure = self.version_structure(course_locator, original_structure, user_id)
for block in new_structure['blocks'].itervalues():
if 'children' in block.fields:
block.fields['children'] = [
block_id for block_id in block.fields['children']
if block_id in new_structure['blocks']
]
self.update_structure(course_locator, new_structure)
if index_entry is not None:
# update the index entry if appropriate
self._update_head(course_locator, index_entry, course_locator.branch, new_structure['_id'])
def convert_references_to_keys(self, course_key, xblock_class, jsonfields, blocks):
"""
Convert the given serialized fields to the deserialized values by finding all references
and converting them.
:param jsonfields: the serialized copy of the xblock's fields
"""
@contract(block_key="BlockUsageLocator | seq[2]")
def robust_usage_key(block_key):
"""
create a course_key relative usage key for the block_key. If the block_key is in blocks,
use its correct category; otherwise, use 'unknown'.
The purpose for this is that some operations add pointers as they build up the
structure without worrying about order of creation. Because the category of the
usage_key is for the most part inert, it's better to hack a value than to work
out a dependency graph algorithm for those functions which may prereference blocks.
"""
# if this was taken from cache, then its fields are already converted
if isinstance(block_key, BlockUsageLocator):
return block_key.map_into_course(course_key)
elif not isinstance(block_key, BlockKey):
block_key = BlockKey(*block_key)
try:
return course_key.make_usage_key(
block_key.type, block_key.id
)
except KeyError:
return course_key.make_usage_key('unknown', block_key.id)
xblock_class = self.mixologist.mix(xblock_class)
# Make a shallow copy, so that we aren't manipulating a cached field dictionary
output_fields = dict(jsonfields)
for field_name, value in output_fields.iteritems():
if value:
field = xblock_class.fields.get(field_name)
if field is None:
continue
elif isinstance(field, Reference):
output_fields[field_name] = robust_usage_key(value)
elif isinstance(field, ReferenceList):
output_fields[field_name] = [robust_usage_key(ele) for ele in value]
elif isinstance(field, ReferenceValueDict):
for key, subvalue in value.iteritems():
value[key] = robust_usage_key(subvalue)
return output_fields
def _get_index_if_valid(self, course_key, force=False):
"""
If the course_key identifies a course and points to its draft (or plausibly its draft),
then return the index entry.
raises VersionConflictError if not the right version
:param course_key: a CourseLocator
:param force: if false, raises VersionConflictError if the current head of the course != the one identified
by course_key
"""
if course_key.org is None or course_key.course is None or course_key.run is None or course_key.branch is None:
return None
else:
index_entry = self.get_course_index(course_key)
is_head = (
course_key.version_guid is None or
index_entry['versions'][course_key.branch] == course_key.version_guid
)
if is_head or force:
return index_entry
else:
raise VersionConflictError(
course_key,
index_entry['versions'][course_key.branch]
)
def _find_local_root(self, element_to_find, possibility, tree):
if possibility not in tree:
return False
if element_to_find in tree[possibility]:
return True
for subtree in tree[possibility]:
if self._find_local_root(element_to_find, subtree, tree):
return True
return False
def _update_search_targets(self, index_entry, fields):
"""
Update the index entry if any of the given fields are in SEARCH_TARGET_DICT. (doesn't save
the changes, just changes them in the entry dict)
:param index_entry:
:param fields: a dictionary of fields and values usually only those explicitly set and already
ready for persisting (e.g., references converted to block_ids)
"""
for field_name, field_value in fields.iteritems():
if field_name in self.SEARCH_TARGET_DICT:
index_entry.setdefault('search_targets', {})[field_name] = field_value
def _update_head(self, course_key, index_entry, branch, new_id):
"""
Update the active index for the given course's branch to point to new_id
:param index_entry:
:param course_locator:
:param new_id:
"""
if not isinstance(new_id, ObjectId):
raise TypeError('new_id must be an ObjectId, but is {!r}'.format(new_id))
index_entry['versions'][branch] = new_id
self.update_course_index(course_key, index_entry)
def partition_xblock_fields_by_scope(self, xblock):
"""
Return a dictionary of scopes mapped to this xblock's explicitly set fields w/o any conversions
"""
# explicitly_set_fields_by_scope converts to json; so, avoiding it
# the existing partition_fields_by_scope works on a dict not an xblock
result = defaultdict(dict)
for field in xblock.fields.itervalues():
if field.is_set_on(xblock):
result[field.scope][field.name] = field.read_from(xblock)
return result
def _serialize_fields(self, category, fields):
"""
Convert any references to their serialized form. Handle some references already being unicoded
because the client passed them that way and nothing above this layer did the necessary deserialization.
Remove any fields which split or its kvs computes or adds but does not want persisted.
:param fields: a dict of fields
"""
assert isinstance(fields, dict)
xblock_class = XBlock.load_class(category, self.default_class)
xblock_class = self.mixologist.mix(xblock_class)
def reference_block_id(reference):
"""
Handle client possibly setting field to strings rather than keys to get the block_id
"""
# perhaps replace by fixing the views or Field Reference*.from_json to return a Key
if isinstance(reference, basestring):
reference = BlockUsageLocator.from_string(reference)
elif isinstance(reference, BlockKey):
return reference
return BlockKey.from_usage_key(reference)
for field_name, value in fields.iteritems():
if value is not None:
if isinstance(xblock_class.fields[field_name], Reference):
fields[field_name] = reference_block_id(value)
elif isinstance(xblock_class.fields[field_name], ReferenceList):
fields[field_name] = [
reference_block_id(ele) for ele in value
]
elif isinstance(xblock_class.fields[field_name], ReferenceValueDict):
for key, subvalue in value.iteritems():
value[key] = reference_block_id(subvalue)
# should this recurse down dicts and lists just in case they contain datetime?
elif not isinstance(value, datetime.datetime): # don't convert datetimes!
fields[field_name] = xblock_class.fields[field_name].to_json(value)
return fields
def _new_structure(self, user_id, root_block_key, block_fields=None, definition_id=None):
"""
Internal function: create a structure element with no previous version. Must provide the root id
but not necessarily the info needed to create it (for the use case of publishing). If providing
root_category, must also provide block_fields and definition_id
"""
new_id = ObjectId()
if root_block_key is not None:
if block_fields is None:
block_fields = {}
blocks = {
root_block_key: self._new_block(
user_id, root_block_key.type, block_fields, definition_id, new_id
)
}
else:
blocks = {}
return {
'_id': new_id,
'root': root_block_key,
'previous_version': None,
'original_version': new_id,
'edited_by': user_id,
'edited_on': datetime.datetime.now(UTC),
'blocks': blocks,
'schema_version': self.SCHEMA_VERSION,
}
@contract(block_key=BlockKey)
def _get_parents_from_structure(self, block_key, structure):
"""
Given a structure, find block_key's parent in that structure. Note returns
the encoded format for parent
"""
return [
parent_block_key
for parent_block_key, value in structure['blocks'].iteritems()
if block_key in value.fields.get('children', [])
]
def _sync_children(self, source_parent, destination_parent, new_child):
"""
Reorder destination's children to the same as source's and remove any no longer in source.
Return the removed ones as orphans (a set).
"""
destination_reordered = []
destination_children = set(destination_parent.fields['children'])
source_children = source_parent.fields['children']
orphans = destination_children - set(source_children)
for child in source_children:
if child == new_child or child in destination_children:
destination_reordered.append(child)
destination_parent.fields['children'] = destination_reordered
return orphans
@contract(
block_key=BlockKey,
source_blocks="dict(BlockKey: *)",
destination_blocks="dict(BlockKey: *)",
blacklist="list(BlockKey) | str",
)
def _copy_subdag(self, user_id, destination_version, block_key, source_blocks, destination_blocks, blacklist):
"""
Update destination_blocks for the sub-dag rooted at block_key to be like the one in
source_blocks excluding blacklist.
Return any newly discovered orphans (as a set)
"""
orphans = set()
destination_block = destination_blocks.get(block_key)
new_block = source_blocks[block_key]
if destination_block:
# reorder children to correspond to whatever order holds for source.
# remove any which source no longer claims (put into orphans)
# add any which are being copied
source_children = new_block.fields.get('children', [])
existing_children = destination_block.fields.get('children', [])
destination_reordered = SparseList()
for child in existing_children:
try:
index = source_children.index(child)
destination_reordered[index] = child
except ValueError:
orphans.add(BlockKey(*child))
if blacklist != EXCLUDE_ALL:
for index, child in enumerate(source_children):
if child not in blacklist:
destination_reordered[index] = child
# the history of the published leaps between publications and only points to
# previously published versions.
previous_version = destination_block.edit_info.update_version
destination_block = copy.deepcopy(new_block)
destination_block.fields['children'] = destination_reordered.compact_list()
destination_block.edit_info.previous_version = previous_version
destination_block.edit_info.update_version = destination_version
destination_block.edit_info.edited_by = user_id
destination_block.edit_info.edited_on = datetime.datetime.now(UTC)
else:
destination_block = self._new_block(
user_id, new_block.block_type,
self._filter_blacklist(copy.copy(new_block.fields), blacklist),
new_block.definition,
destination_version,
raw=True,
block_defaults=new_block.defaults
)
# Extend the block's new edit_info with any extra edit_info fields from the source (e.g. original_usage):
for key, val in new_block.edit_info.to_storable().iteritems():
if getattr(destination_block.edit_info, key) is None:
setattr(destination_block.edit_info, key, val)
# If the block we are copying from was itself a copy, then just
# reference the original source, rather than the copy.
destination_block.edit_info.source_version = (
new_block.edit_info.source_version or new_block.edit_info.update_version
)
if blacklist != EXCLUDE_ALL:
for child in destination_block.fields.get('children', []):
if child not in blacklist:
orphans.update(
self._copy_subdag(
user_id, destination_version, BlockKey(*child), source_blocks, destination_blocks, blacklist
)
)
destination_blocks[block_key] = destination_block
return orphans
@contract(blacklist='list(BlockKey) | str')
def _filter_blacklist(self, fields, blacklist):
"""
Filter out blacklist from the children field in fields. Will construct a new list for children;
so, no need to worry about copying the children field, but it will modify fiels.
"""
if blacklist == EXCLUDE_ALL:
fields['children'] = []
else:
fields['children'] = [child for child in fields.get('children', []) if BlockKey(*child) not in blacklist]
return fields
@contract(orphan=BlockKey)
def _delete_if_true_orphan(self, orphan, structure):
"""
Delete the orphan and any of its descendants which no longer have parents.
"""
if len(self._get_parents_from_structure(orphan, structure)) == 0:
for child in structure['blocks'][orphan].fields.get('children', []):
self._delete_if_true_orphan(BlockKey(*child), structure)
del structure['blocks'][orphan]
@contract(returns=BlockData)
def _new_block(self, user_id, category, block_fields, definition_id, new_id, raw=False, block_defaults=None):
"""
Create the core document structure for a block.
:param block_fields: the settings and children scoped fields as a dict or son
:param definition_id: the pointer to the content scoped fields
:param new_id: the structure's version id
:param raw: true if this block already has all references serialized
"""
if not raw:
block_fields = self._serialize_fields(category, block_fields)
document = {
'block_type': category,
'definition': definition_id,
'fields': block_fields,
'edit_info': {
'edited_on': datetime.datetime.now(UTC),
'edited_by': user_id,
'previous_version': None,
'update_version': new_id
}
}
if block_defaults:
document['defaults'] = block_defaults
return BlockData(**document)
@contract(block_key=BlockKey, returns='BlockData | None')
def _get_block_from_structure(self, structure, block_key):
"""
Encodes the block key before retrieving it from the structure to ensure it can
be a json dict key.
"""
return structure['blocks'].get(block_key)
@contract(block_key=BlockKey, content=BlockData)
def _update_block_in_structure(self, structure, block_key, content):
"""
Encodes the block key before accessing it in the structure to ensure it can
be a json dict key.
"""
structure['blocks'][block_key] = content
@autoretry_read()
def find_courses_by_search_target(self, field_name, field_value):
"""
Find all the courses which cached that they have the given field with the given value.
Returns: list of branch-agnostic course_keys
"""
entries = self.find_matching_course_indexes(
search_targets={field_name: field_value}
)
return [
CourseLocator(entry['org'], entry['course'], entry['run']) # Branch agnostic
for entry in entries
]
def get_courses_for_wiki(self, wiki_slug, **kwargs):
"""
Return the list of courses which use this wiki_slug
:param wiki_slug: the course wiki root slug
:return: list of course keys
"""
return self.find_courses_by_search_target('wiki_slug', wiki_slug)
def heartbeat(self):
"""
Check that the db is reachable.
"""
return {ModuleStoreEnum.Type.split: self.db_connection.heartbeat()}
def create_runtime(self, course_entry, lazy):
"""
Create the proper runtime for this course
"""
return CachingDescriptorSystem(
modulestore=self,
course_entry=course_entry,
module_data={},
lazy=lazy,
default_class=self.default_class,
error_tracker=self.error_tracker,
render_template=self.render_template,
mixins=self.xblock_mixins,
select=self.xblock_select,
disabled_xblock_types=self.disabled_xblock_types,
services=self.services,
)
def ensure_indexes(self):
"""
Ensure that all appropriate indexes are created that are needed by this modulestore, or raise
an exception if unable to.
This method is intended for use by tests and administrative commands, and not
to be run during server startup.
"""
self.db_connection.ensure_indexes()
class SparseList(list):
"""
Enable inserting items into a list in arbitrary order and then retrieving them.
"""
# taken from http://stackoverflow.com/questions/1857780/sparse-assignment-list-in-python
def __setitem__(self, index, value):
"""
Add value to the list ensuring the list is long enough to accommodate it at the given index
"""
missing = index - len(self) + 1
if missing > 0:
self.extend([None] * missing)
list.__setitem__(self, index, value)
def compact_list(self):
"""
Return as a regular lists w/ all Nones removed
"""
return [ele for ele in self if ele is not None]
|
agpl-3.0
|
open-craft/opencraft
|
playbooks/collect_activity/stats.py
|
1
|
4868
|
#!/edx/app/edxapp/venvs/edxapp/bin/python
# pylint: skip-file
from __future__ import print_function
from argparse import ArgumentParser
from datetime import datetime, timedelta
import gzip
import os
import re
import sys
import django
from django.utils import timezone
from six.moves.configparser import ConfigParser
# This regex pattern is used to extract the IPv4 address from the beginning of each line in the Nginx access logs.
NGINX_ACCESS_PATTERN = re.compile(r'- - (?P<ipaddress>\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})')
LOG_PATH = '/edx/var/log/nginx'
def valid_date(s):
"""
Verify that the string passed in is a date
"""
try:
return datetime.strptime(s, "%Y-%m-%d").date()
except ValueError:
msg = "Not a valid date: '{0}'.".format(s)
raise argparse.ArgumentTypeError(msg)
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument(
'--config-section',
default=None,
help='The header for the section in the output yml that the statistics will be assigned',
required=True
)
parser.add_argument(
'--out',
default=None,
help='Path to the output file of the new CSV. Leave blank to use stdout.'
)
parser.add_argument(
'--skip-hit-statistics',
default=False,
action='store_true',
help='Whether to skip the hit statistics'
)
parser.add_argument(
'--start-date',
default=None,
type=valid_date,
help='The first day on which statistics should be gathered. FORMAT: YYYY-MM-DD'
)
parser.add_argument(
'--end-date',
default=None,
type=valid_date,
help='The last day on which statistics should be gathered.'
)
args = parser.parse_args()
# Set up the environment so that edX can be initialized as the LMS with the correct settings.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'lms.envs.openstack')
os.environ.setdefault('SERVICE_VARIANT', 'lms')
django.setup()
# These modules need to be imported here because they rely on Django being already initialized
# (e.g., the settings need to have already loaded).
from django.contrib.auth.models import User
from xmodule.modulestore.django import modulestore
ms = modulestore()
start_date = args.start_date
end_date = args.end_date
if start_date is None or end_date is None:
# If a time interval is not passed as arguments
# get the active users for the last full month.
beginning_of_this_month = datetime.now(tz=timezone.utc).replace(day=1)
end_of_last_month = beginning_of_this_month - timedelta(days=1)
beginning_of_last_month = end_of_last_month.replace(day=1)
start_date = beginning_of_last_month
end_date = end_of_last_month
stats = {
'users': User.objects.count(),
'active_users': User.objects.filter(last_login__range=(start_date, end_date)).count(),
'courses': len(ms.get_courses()),
}
if not args.skip_hit_statistics:
# Produce the paths to all of the "access.log*" files in the Nginx log directory to be parsed.
log_files = [
f for f in os.listdir(LOG_PATH)
if os.path.isfile(os.path.join(LOG_PATH, f)) and f.startswith('access.log')
]
# Walk through all of the Nginx logs, storing the found remote host IP addresses in a set to enforce uniqueness.
unique_hits = set()
total_hits = 0
for file in log_files:
print('Parsing log file: {file}'.format(file=file), file=sys.stderr)
file_path = os.path.join(LOG_PATH, file)
# Make sure we use gzip to decompress any compressed log files.
if file.endswith('.gz'):
handle = gzip.open(file_path, 'rb')
else:
handle = open(file_path, 'r')
# Run each access log line through a regex to extract the IPv4 addresses of remote hosts.
for line in handle:
match = re.match(NGINX_ACCESS_PATTERN, line)
if match:
total_hits += 1
unique_hits.add(match.group('ipaddress'))
handle.close()
stats['unique_hits'] = len(unique_hits)
stats['total_hits'] = total_hits
# Build the ConfigParser data.
config = ConfigParser()
config.add_section(args.config_section)
for key, value in stats.items():
config.set(args.config_section, key, str(value))
# Output the data in ConfigParser format to stdout and to a file.
config.write(sys.stdout)
if args.out:
print('Writing to file passed via parameter: {filename}'.format(filename=args.out), file=sys.stderr)
with open(args.out, 'w') as output_file:
config.write(output_file)
|
agpl-3.0
|
florentx/OpenUpgrade
|
addons/sale/report/__init__.py
|
370
|
1086
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import sale_report
import invoice_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
jkonecki/autorest
|
AutoRest/Generators/Python/Python.Tests/Expected/AcceptanceTests/Http/autoresthttpinfrastructuretestservice/operations/http_server_failure.py
|
5
|
7493
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
from .. import models
class HttpServerFailure(object):
"""HttpServerFailure operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.config = config
def head501(
self, custom_headers={}, raw=False, **operation_config):
"""
Return 501 status code - should be represented in the client as an
error
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`Error <fixtures.acceptancetestshttp.models.Error>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/http/failure/server/501'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.head(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code < 200 or response.status_code >= 300:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get501(
self, custom_headers={}, raw=False, **operation_config):
"""
Return 501 status code - should be represented in the client as an
error
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`Error <fixtures.acceptancetestshttp.models.Error>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/http/failure/server/501'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code < 200 or response.status_code >= 300:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def post505(
self, boolean_value=None, custom_headers={}, raw=False, **operation_config):
"""
Return 505 status code - should be represented in the client as an
error
:param boolean_value: Simple boolean value true
:type boolean_value: bool
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`Error <fixtures.acceptancetestshttp.models.Error>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/http/failure/server/505'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
if boolean_value is not None:
body_content = self._serialize.body(boolean_value, 'bool')
else:
body_content = None
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code < 200 or response.status_code >= 300:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def delete505(
self, boolean_value=None, custom_headers={}, raw=False, **operation_config):
"""
Return 505 status code - should be represented in the client as an
error
:param boolean_value: Simple boolean value true
:type boolean_value: bool
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`Error <fixtures.acceptancetestshttp.models.Error>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/http/failure/server/505'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
if boolean_value is not None:
body_content = self._serialize.body(boolean_value, 'bool')
else:
body_content = None
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code < 200 or response.status_code >= 300:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
|
mit
|
davygeek/vitess
|
test/topo_flavor/zk2.py
|
3
|
3694
|
#!/usr/bin/env python
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""zk2 specific configuration."""
import server
class Zk2TopoServer(server.TopoServer):
"""Implementation of TopoServer for zk2."""
def __init__(self):
self.ports_assigned = False
def assign_ports(self):
"""Assign ports if not already assigned."""
if self.ports_assigned:
return
from environment import reserve_ports # pylint: disable=g-import-not-at-top
import utils # pylint: disable=g-import-not-at-top
self.zk_port_base = reserve_ports(3)
self.hostname = utils.hostname
self.zk_ports = ':'.join(str(self.zk_port_base + i) for i in range(3))
self.addr = 'localhost:%d' % (self.zk_port_base + 2)
self.ports_assigned = True
def setup(self):
from environment import run, binary_args, vtlogroot # pylint: disable=g-import-not-at-top,g-multiple-import
import utils # pylint: disable=g-import-not-at-top
self.assign_ports()
run(binary_args('zkctl') + [
'-log_dir', vtlogroot,
'-zk.cfg', '1@%s:%s' % (self.hostname, self.zk_ports),
'init'])
# Create the cell configurations using 'vtctl AddCellInfo'
utils.run_vtctl_vtctl(['AddCellInfo',
'-root', '/test_nj',
'-server_address', self.addr,
'test_nj'])
utils.run_vtctl_vtctl(['AddCellInfo',
'-root', '/test_ny',
'-server_address', self.addr,
'test_ny'])
ca_addr = self.addr
# Use UpdateCellInfo for this one, more coverage.
utils.run_vtctl_vtctl(['UpdateCellInfo',
'-root', '/test_ca',
'-server_address', ca_addr,
'test_ca'])
def teardown(self):
from environment import run, binary_args, vtlogroot # pylint: disable=g-import-not-at-top,g-multiple-import
import utils # pylint: disable=g-import-not-at-top
self.assign_ports()
run(binary_args('zkctl') + [
'-log_dir', vtlogroot,
'-zk.cfg', '1@%s:%s' % (self.hostname, self.zk_ports),
'shutdown' if utils.options.keep_logs else 'teardown'],
raise_on_error=False)
def flags(self):
return [
'-topo_implementation', 'zk2',
'-topo_global_server_address', self.addr,
'-topo_global_root', '/global',
]
def wipe(self):
from environment import run, binary_args # pylint: disable=g-import-not-at-top,g-multiple-import
# Only delete keyspaces/ in the global topology service, to keep
# the 'cells' directory. So we don't need to re-add the CellInfo records.
run(binary_args('zk') + ['-server', self.addr, 'rm', '-rf',
'/global/keyspaces'])
run(binary_args('zk') + ['-server', self.addr, 'rm', '-rf', '/test_nj/*'])
run(binary_args('zk') + ['-server', self.addr, 'rm', '-rf', '/test_ny/*'])
run(binary_args('zk') + ['-server', self.addr, 'rm', '-rf', '/test_ca/*'])
def update_addr(self, cell, keyspace, shard, tablet_index, port):
pass
server.flavor_map['zk2'] = Zk2TopoServer()
|
apache-2.0
|
rotofly/odoo
|
addons/marketing_campaign_crm_demo/__openerp__.py
|
260
|
1623
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Marketing Campaign - Demo',
'version': '1.0',
'depends': ['marketing_campaign',
'crm',
],
'author': 'OpenERP SA',
'category': 'Marketing',
'description': """
Demo data for the module marketing_campaign.
============================================
Creates demo data like leads, campaigns and segments for the module marketing_campaign.
""",
'website': 'https://www.odoo.com/page/lead-automation',
'data': [],
'demo': ['marketing_campaign_demo.xml'],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
damianam/easybuild-framework
|
easybuild/scripts/fix_broken_easyconfigs.py
|
3
|
5630
|
#!/usr/bin/env python
# Copyright 2015-2017 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
Script to fix easyconfigs that broke due to support for deprecated functionality being dropped in EasyBuild 2.0
:author: Kenneth Hoste (Ghent University)
"""
import os
import re
import sys
from vsc.utils import fancylogger
from vsc.utils.generaloption import SimpleOption
from easybuild.framework.easyconfig.easyconfig import get_easyblock_class
from easybuild.framework.easyconfig.parser import REPLACED_PARAMETERS, fetch_parameters_from_easyconfig
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.config import init_build_options
from easybuild.tools.filetools import find_easyconfigs, read_file, write_file
class FixBrokenEasyconfigsOption(SimpleOption):
"""Custom option parser for this script."""
ALLOPTSMANDATORY = False
def fix_broken_easyconfig(ectxt, easyblock_class):
"""
Fix provided easyconfig file, that may be broken due to non-backwards-compatible changes.
:param ectxt: raw contents of easyconfig to fix
:param easyblock_class: easyblock class, as derived from software name/specified easyblock
"""
log.debug("Raw contents of potentially broken easyconfig file to fix: %s" % ectxt)
subs = {
# replace former 'magic' variable shared_lib_ext with SHLIB_EXT constant
'shared_lib_ext': 'SHLIB_EXT',
}
# include replaced easyconfig parameters
subs.update(REPLACED_PARAMETERS)
# check whether any substitions need to be made
for old, new in subs.items():
regex = re.compile(r'(\W)%s(\W)' % old)
if regex.search(ectxt):
tup = (regex.pattern, old, new)
log.debug("Broken stuff detected using regex pattern '%s', replacing '%s' with '%s'" % tup)
ectxt = regex.sub(r'\1%s\2' % new, ectxt)
# check whether missing "easyblock = 'ConfigureMake'" needs to be inserted
if easyblock_class is None:
# prepend "easyblock = 'ConfigureMake'" to line containing "name =..."
easyblock_spec = "easyblock = 'ConfigureMake'"
log.debug("Inserting \"%s\", since no easyblock class was derived from easyconfig parameters" % easyblock_spec)
ectxt = re.sub(r'(\s*)(name\s*=)', r"\1%s\n\n\2" % easyblock_spec, ectxt, re.M)
return ectxt
def process_easyconfig_file(ec_file):
"""Process an easyconfig file: fix if it's broken, back it up before fixing it inline (if requested)."""
ectxt = read_file(ec_file)
name, easyblock = fetch_parameters_from_easyconfig(ectxt, ['name', 'easyblock'])
derived_easyblock_class = get_easyblock_class(easyblock, name=name, default_fallback=False)
fixed_ectxt = fix_broken_easyconfig(ectxt, derived_easyblock_class)
if ectxt != fixed_ectxt:
if go.options.backup:
try:
backup_ec_file = '%s.bk' % ec_file
i = 1
while os.path.exists(backup_ec_file):
backup_ec_file = '%s.bk%d' % (ec_file, i)
i += 1
os.rename(ec_file, backup_ec_file)
log.info("Backed up %s to %s" % (ec_file, backup_ec_file))
except OSError, err:
raise EasyBuildError("Failed to backup %s before rewriting it: %s", ec_file, err)
write_file(ec_file, fixed_ectxt)
log.debug("Contents of fixed easyconfig file: %s" % fixed_ectxt)
log.info("%s: fixed" % ec_file)
else:
log.info("%s: nothing to fix" % ec_file)
# MAIN
try:
init_build_options()
options = {
'backup': ("Backup up easyconfigs before modifying them", None, 'store_true', True, 'b'),
}
go = FixBrokenEasyconfigsOption(options)
log = go.log
fancylogger.logToScreen(enable=True, stdout=True)
fancylogger.setLogLevel('WARNING')
try:
import easybuild.easyblocks.generic.configuremake
except ImportError, err:
raise EasyBuildError("easyblocks are not available in Python search path: %s", err)
for path in go.args:
if not os.path.exists(path):
raise EasyBuildError("Non-existing path %s specified", path)
ec_files = [ec for p in go.args for ec in find_easyconfigs(p)]
if not ec_files:
raise EasyBuildError("No easyconfig files specified")
log.info("Processing %d easyconfigs" % len(ec_files))
for ec_file in ec_files:
try:
process_easyconfig_file(ec_file)
except EasyBuildError, err:
log.warning("Ignoring issue when processing %s: %s", ec_file, err)
except EasyBuildError, err:
sys.stderr.write("ERROR: %s\n" % err)
sys.exit(1)
|
gpl-2.0
|
wendlers/edubot-snap
|
ext/requests/packages/urllib3/contrib/appengine.py
|
360
|
7937
|
from __future__ import absolute_import
import logging
import os
import warnings
from ..exceptions import (
HTTPError,
HTTPWarning,
MaxRetryError,
ProtocolError,
TimeoutError,
SSLError
)
from ..packages.six import BytesIO
from ..request import RequestMethods
from ..response import HTTPResponse
from ..util.timeout import Timeout
from ..util.retry import Retry
try:
from google.appengine.api import urlfetch
except ImportError:
urlfetch = None
log = logging.getLogger(__name__)
class AppEnginePlatformWarning(HTTPWarning):
pass
class AppEnginePlatformError(HTTPError):
pass
class AppEngineManager(RequestMethods):
"""
Connection manager for Google App Engine sandbox applications.
This manager uses the URLFetch service directly instead of using the
emulated httplib, and is subject to URLFetch limitations as described in
the App Engine documentation here:
https://cloud.google.com/appengine/docs/python/urlfetch
Notably it will raise an AppEnginePlatformError if:
* URLFetch is not available.
* If you attempt to use this on GAEv2 (Managed VMs), as full socket
support is available.
* If a request size is more than 10 megabytes.
* If a response size is more than 32 megabtyes.
* If you use an unsupported request method such as OPTIONS.
Beyond those cases, it will raise normal urllib3 errors.
"""
def __init__(self, headers=None, retries=None, validate_certificate=True):
if not urlfetch:
raise AppEnginePlatformError(
"URLFetch is not available in this environment.")
if is_prod_appengine_mvms():
raise AppEnginePlatformError(
"Use normal urllib3.PoolManager instead of AppEngineManager"
"on Managed VMs, as using URLFetch is not necessary in "
"this environment.")
warnings.warn(
"urllib3 is using URLFetch on Google App Engine sandbox instead "
"of sockets. To use sockets directly instead of URLFetch see "
"https://urllib3.readthedocs.io/en/latest/contrib.html.",
AppEnginePlatformWarning)
RequestMethods.__init__(self, headers)
self.validate_certificate = validate_certificate
self.retries = retries or Retry.DEFAULT
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
# Return False to re-raise any potential exceptions
return False
def urlopen(self, method, url, body=None, headers=None,
retries=None, redirect=True, timeout=Timeout.DEFAULT_TIMEOUT,
**response_kw):
retries = self._get_retries(retries, redirect)
try:
response = urlfetch.fetch(
url,
payload=body,
method=method,
headers=headers or {},
allow_truncated=False,
follow_redirects=(
redirect and
retries.redirect != 0 and
retries.total),
deadline=self._get_absolute_timeout(timeout),
validate_certificate=self.validate_certificate,
)
except urlfetch.DeadlineExceededError as e:
raise TimeoutError(self, e)
except urlfetch.InvalidURLError as e:
if 'too large' in str(e):
raise AppEnginePlatformError(
"URLFetch request too large, URLFetch only "
"supports requests up to 10mb in size.", e)
raise ProtocolError(e)
except urlfetch.DownloadError as e:
if 'Too many redirects' in str(e):
raise MaxRetryError(self, url, reason=e)
raise ProtocolError(e)
except urlfetch.ResponseTooLargeError as e:
raise AppEnginePlatformError(
"URLFetch response too large, URLFetch only supports"
"responses up to 32mb in size.", e)
except urlfetch.SSLCertificateError as e:
raise SSLError(e)
except urlfetch.InvalidMethodError as e:
raise AppEnginePlatformError(
"URLFetch does not support method: %s" % method, e)
http_response = self._urlfetch_response_to_http_response(
response, **response_kw)
# Check for redirect response
if (http_response.get_redirect_location() and
retries.raise_on_redirect and redirect):
raise MaxRetryError(self, url, "too many redirects")
# Check if we should retry the HTTP response.
if retries.is_forced_retry(method, status_code=http_response.status):
retries = retries.increment(
method, url, response=http_response, _pool=self)
log.info("Forced retry: %s", url)
retries.sleep()
return self.urlopen(
method, url,
body=body, headers=headers,
retries=retries, redirect=redirect,
timeout=timeout, **response_kw)
return http_response
def _urlfetch_response_to_http_response(self, urlfetch_resp, **response_kw):
if is_prod_appengine():
# Production GAE handles deflate encoding automatically, but does
# not remove the encoding header.
content_encoding = urlfetch_resp.headers.get('content-encoding')
if content_encoding == 'deflate':
del urlfetch_resp.headers['content-encoding']
transfer_encoding = urlfetch_resp.headers.get('transfer-encoding')
# We have a full response's content,
# so let's make sure we don't report ourselves as chunked data.
if transfer_encoding == 'chunked':
encodings = transfer_encoding.split(",")
encodings.remove('chunked')
urlfetch_resp.headers['transfer-encoding'] = ','.join(encodings)
return HTTPResponse(
# In order for decoding to work, we must present the content as
# a file-like object.
body=BytesIO(urlfetch_resp.content),
headers=urlfetch_resp.headers,
status=urlfetch_resp.status_code,
**response_kw
)
def _get_absolute_timeout(self, timeout):
if timeout is Timeout.DEFAULT_TIMEOUT:
return 5 # 5s is the default timeout for URLFetch.
if isinstance(timeout, Timeout):
if timeout._read is not timeout._connect:
warnings.warn(
"URLFetch does not support granular timeout settings, "
"reverting to total timeout.", AppEnginePlatformWarning)
return timeout.total
return timeout
def _get_retries(self, retries, redirect):
if not isinstance(retries, Retry):
retries = Retry.from_int(
retries, redirect=redirect, default=self.retries)
if retries.connect or retries.read or retries.redirect:
warnings.warn(
"URLFetch only supports total retries and does not "
"recognize connect, read, or redirect retry parameters.",
AppEnginePlatformWarning)
return retries
def is_appengine():
return (is_local_appengine() or
is_prod_appengine() or
is_prod_appengine_mvms())
def is_appengine_sandbox():
return is_appengine() and not is_prod_appengine_mvms()
def is_local_appengine():
return ('APPENGINE_RUNTIME' in os.environ and
'Development/' in os.environ['SERVER_SOFTWARE'])
def is_prod_appengine():
return ('APPENGINE_RUNTIME' in os.environ and
'Google App Engine/' in os.environ['SERVER_SOFTWARE'] and
not is_prod_appengine_mvms())
def is_prod_appengine_mvms():
return os.environ.get('GAE_VM', False) == 'true'
|
mit
|
cfei18/incubator-airflow
|
airflow/operators/check_operator.py
|
5
|
9941
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from builtins import zip
from builtins import str
from airflow.exceptions import AirflowException
from airflow.hooks.base_hook import BaseHook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
class CheckOperator(BaseOperator):
"""
Performs checks against a db. The ``CheckOperator`` expects
a sql query that will return a single row. Each value on that
first row is evaluated using python ``bool`` casting. If any of the
values return ``False`` the check is failed and errors out.
Note that Python bool casting evals the following as ``False``:
* ``False``
* ``0``
* Empty string (``""``)
* Empty list (``[]``)
* Empty dictionary or set (``{}``)
Given a query like ``SELECT COUNT(*) FROM foo``, it will fail only if
the count ``== 0``. You can craft much more complex query that could,
for instance, check that the table has the same number of rows as
the source table upstream, or that the count of today's partition is
greater than yesterday's partition, or that a set of metrics are less
than 3 standard deviation for the 7 day average.
This operator can be used as a data quality check in your pipeline, and
depending on where you put it in your DAG, you have the choice to
stop the critical path, preventing from
publishing dubious data, or on the side and receive email alerts
without stopping the progress of the DAG.
Note that this is an abstract class and get_db_hook
needs to be defined. Whereas a get_db_hook is hook that gets a
single record from an external source.
:param sql: the sql to be executed. (templated)
:type sql: string
"""
template_fields = ('sql',)
template_ext = ('.hql', '.sql',)
ui_color = '#fff7e6'
@apply_defaults
def __init__(
self, sql,
conn_id=None,
*args, **kwargs):
super(CheckOperator, self).__init__(*args, **kwargs)
self.conn_id = conn_id
self.sql = sql
def execute(self, context=None):
self.log.info('Executing SQL check: %s', self.sql)
records = self.get_db_hook().get_first(self.sql)
self.log.info('Record: %s', records)
if not records:
raise AirflowException("The query returned None")
elif not all([bool(r) for r in records]):
exceptstr = "Test failed.\nQuery:\n{q}\nResults:\n{r!s}"
raise AirflowException(exceptstr.format(q=self.sql, r=records))
self.log.info("Success.")
def get_db_hook(self):
return BaseHook.get_hook(conn_id=self.conn_id)
def _convert_to_float_if_possible(s):
"""
A small helper function to convert a string to a numeric value
if appropriate
:param s: the string to be converted
:type s: str
"""
try:
ret = float(s)
except (ValueError, TypeError):
ret = s
return ret
class ValueCheckOperator(BaseOperator):
"""
Performs a simple value check using sql code.
Note that this is an abstract class and get_db_hook
needs to be defined. Whereas a get_db_hook is hook that gets a
single record from an external source.
:param sql: the sql to be executed. (templated)
:type sql: string
"""
__mapper_args__ = {
'polymorphic_identity': 'ValueCheckOperator'
}
template_fields = ('sql', 'pass_value',)
template_ext = ('.hql', '.sql',)
ui_color = '#fff7e6'
@apply_defaults
def __init__(
self, sql, pass_value, tolerance=None,
conn_id=None,
*args, **kwargs):
super(ValueCheckOperator, self).__init__(*args, **kwargs)
self.sql = sql
self.conn_id = conn_id
self.pass_value = str(pass_value)
tol = _convert_to_float_if_possible(tolerance)
self.tol = tol if isinstance(tol, float) else None
self.has_tolerance = self.tol is not None
def execute(self, context=None):
self.log.info('Executing SQL check: %s', self.sql)
records = self.get_db_hook().get_first(self.sql)
if not records:
raise AirflowException("The query returned None")
pass_value_conv = _convert_to_float_if_possible(self.pass_value)
is_numeric_value_check = isinstance(pass_value_conv, float)
tolerance_pct_str = None
if (self.tol is not None):
tolerance_pct_str = str(self.tol * 100) + '%'
except_temp = ("Test failed.\nPass value:{pass_value_conv}\n"
"Tolerance:{tolerance_pct_str}\n"
"Query:\n{self.sql}\nResults:\n{records!s}")
if not is_numeric_value_check:
tests = [str(r) == pass_value_conv for r in records]
elif is_numeric_value_check:
try:
num_rec = [float(r) for r in records]
except (ValueError, TypeError) as e:
cvestr = "Converting a result to float failed.\n"
raise AirflowException(cvestr + except_temp.format(**locals()))
if self.has_tolerance:
tests = [
pass_value_conv * (1 - self.tol) <=
r <= pass_value_conv * (1 + self.tol)
for r in num_rec]
else:
tests = [r == pass_value_conv for r in num_rec]
if not all(tests):
raise AirflowException(except_temp.format(**locals()))
def get_db_hook(self):
return BaseHook.get_hook(conn_id=self.conn_id)
class IntervalCheckOperator(BaseOperator):
"""
Checks that the values of metrics given as SQL expressions are within
a certain tolerance of the ones from days_back before.
Note that this is an abstract class and get_db_hook
needs to be defined. Whereas a get_db_hook is hook that gets a
single record from an external source.
:param table: the table name
:type table: str
:param days_back: number of days between ds and the ds we want to check
against. Defaults to 7 days
:type days_back: int
:param metrics_threshold: a dictionary of ratios indexed by metrics
:type metrics_threshold: dict
"""
__mapper_args__ = {
'polymorphic_identity': 'IntervalCheckOperator'
}
template_fields = ('sql1', 'sql2')
template_ext = ('.hql', '.sql',)
ui_color = '#fff7e6'
@apply_defaults
def __init__(
self, table, metrics_thresholds,
date_filter_column='ds', days_back=-7,
conn_id=None,
*args, **kwargs):
super(IntervalCheckOperator, self).__init__(*args, **kwargs)
self.table = table
self.metrics_thresholds = metrics_thresholds
self.metrics_sorted = sorted(metrics_thresholds.keys())
self.date_filter_column = date_filter_column
self.days_back = -abs(days_back)
self.conn_id = conn_id
sqlexp = ', '.join(self.metrics_sorted)
sqlt = ("SELECT {sqlexp} FROM {table}"
" WHERE {date_filter_column}=").format(**locals())
self.sql1 = sqlt + "'{{ ds }}'"
self.sql2 = sqlt + "'{{ macros.ds_add(ds, " + str(self.days_back) + ") }}'"
def execute(self, context=None):
hook = self.get_db_hook()
self.log.info('Executing SQL check: %s', self.sql2)
row2 = hook.get_first(self.sql2)
self.log.info('Executing SQL check: %s', self.sql1)
row1 = hook.get_first(self.sql1)
if not row2:
raise AirflowException("The query {q} returned None".format(q=self.sql2))
if not row1:
raise AirflowException("The query {q} returned None".format(q=self.sql1))
current = dict(zip(self.metrics_sorted, row1))
reference = dict(zip(self.metrics_sorted, row2))
ratios = {}
test_results = {}
rlog = "Ratio for {0}: {1} \n Ratio threshold : {2}"
fstr = "'{k}' check failed. {r} is above {tr}"
estr = "The following tests have failed:\n {0}"
countstr = "The following {j} tests out of {n} failed:"
for m in self.metrics_sorted:
if current[m] == 0 or reference[m] == 0:
ratio = None
else:
ratio = float(max(current[m], reference[m])) / \
min(current[m], reference[m])
self.log.info(rlog.format(m, ratio, self.metrics_thresholds[m]))
ratios[m] = ratio
test_results[m] = ratio < self.metrics_thresholds[m]
if not all(test_results.values()):
failed_tests = [it[0] for it in test_results.items() if not it[1]]
j = len(failed_tests)
n = len(self.metrics_sorted)
self.log.warning(countstr.format(**locals()))
for k in failed_tests:
self.log.warning(
fstr.format(k=k, r=ratios[k], tr=self.metrics_thresholds[k])
)
raise AirflowException(estr.format(", ".join(failed_tests)))
self.log.info("All tests have passed")
def get_db_hook(self):
return BaseHook.get_hook(conn_id=self.conn_id)
|
apache-2.0
|
maxamillion/ansible
|
test/lib/ansible_test/_internal/ci/__init__.py
|
21
|
8224
|
"""Support code for CI environments."""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import abc
import base64
import json
import os
import tempfile
from .. import types as t
from ..encoding import (
to_bytes,
to_text,
)
from ..io import (
read_text_file,
write_text_file,
)
from ..config import (
CommonConfig,
TestConfig,
)
from ..util import (
ABC,
ApplicationError,
display,
get_subclasses,
import_plugins,
raw_command,
)
class ChangeDetectionNotSupported(ApplicationError):
"""Exception for cases where change detection is not supported."""
class AuthContext:
"""Context information required for Ansible Core CI authentication."""
def __init__(self): # type: () -> None
pass
class CIProvider(ABC):
"""Base class for CI provider plugins."""
priority = 500
@staticmethod
@abc.abstractmethod
def is_supported(): # type: () -> bool
"""Return True if this provider is supported in the current running environment."""
@property
@abc.abstractmethod
def code(self): # type: () -> str
"""Return a unique code representing this provider."""
@property
@abc.abstractmethod
def name(self): # type: () -> str
"""Return descriptive name for this provider."""
@abc.abstractmethod
def generate_resource_prefix(self): # type: () -> str
"""Return a resource prefix specific to this CI provider."""
@abc.abstractmethod
def get_base_branch(self): # type: () -> str
"""Return the base branch or an empty string."""
@abc.abstractmethod
def detect_changes(self, args): # type: (TestConfig) -> t.Optional[t.List[str]]
"""Initialize change detection."""
@abc.abstractmethod
def supports_core_ci_auth(self, context): # type: (AuthContext) -> bool
"""Return True if Ansible Core CI is supported."""
@abc.abstractmethod
def prepare_core_ci_auth(self, context): # type: (AuthContext) -> t.Dict[str, t.Any]
"""Return authentication details for Ansible Core CI."""
@abc.abstractmethod
def get_git_details(self, args): # type: (CommonConfig) -> t.Optional[t.Dict[str, t.Any]]
"""Return details about git in the current environment."""
def get_ci_provider(): # type: () -> CIProvider
"""Return a CI provider instance for the current environment."""
try:
return get_ci_provider.provider
except AttributeError:
pass
provider = None
import_plugins('ci')
candidates = sorted(get_subclasses(CIProvider), key=lambda c: (c.priority, c.__name__))
for candidate in candidates:
if candidate.is_supported():
provider = candidate()
break
if provider.code:
display.info('Detected CI provider: %s' % provider.name)
get_ci_provider.provider = provider
return provider
class AuthHelper(ABC):
"""Public key based authentication helper for Ansible Core CI."""
def sign_request(self, request): # type: (t.Dict[str, t.Any]) -> None
"""Sign the given auth request and make the public key available."""
payload_bytes = to_bytes(json.dumps(request, sort_keys=True))
signature_raw_bytes = self.sign_bytes(payload_bytes)
signature = to_text(base64.b64encode(signature_raw_bytes))
request.update(signature=signature)
def initialize_private_key(self): # type: () -> str
"""
Initialize and publish a new key pair (if needed) and return the private key.
The private key is cached across ansible-test invocations so it is only generated and published once per CI job.
"""
path = os.path.expanduser('~/.ansible-core-ci-private.key')
if os.path.exists(to_bytes(path)):
private_key_pem = read_text_file(path)
else:
private_key_pem = self.generate_private_key()
write_text_file(path, private_key_pem)
return private_key_pem
@abc.abstractmethod
def sign_bytes(self, payload_bytes): # type: (bytes) -> bytes
"""Sign the given payload and return the signature, initializing a new key pair if required."""
@abc.abstractmethod
def publish_public_key(self, public_key_pem): # type: (str) -> None
"""Publish the given public key."""
@abc.abstractmethod
def generate_private_key(self): # type: () -> str
"""Generate a new key pair, publishing the public key and returning the private key."""
class CryptographyAuthHelper(AuthHelper, ABC): # pylint: disable=abstract-method
"""Cryptography based public key based authentication helper for Ansible Core CI."""
def sign_bytes(self, payload_bytes): # type: (bytes) -> bytes
"""Sign the given payload and return the signature, initializing a new key pair if required."""
# import cryptography here to avoid overhead and failures in environments which do not use/provide it
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.asymmetric import ec
from cryptography.hazmat.primitives.serialization import load_pem_private_key
private_key_pem = self.initialize_private_key()
private_key = load_pem_private_key(to_bytes(private_key_pem), None, default_backend())
signature_raw_bytes = private_key.sign(payload_bytes, ec.ECDSA(hashes.SHA256()))
return signature_raw_bytes
def generate_private_key(self): # type: () -> str
"""Generate a new key pair, publishing the public key and returning the private key."""
# import cryptography here to avoid overhead and failures in environments which do not use/provide it
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import ec
private_key = ec.generate_private_key(ec.SECP384R1(), default_backend())
public_key = private_key.public_key()
# noinspection PyUnresolvedReferences
private_key_pem = to_text(private_key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.PKCS8,
encryption_algorithm=serialization.NoEncryption(),
))
# noinspection PyTypeChecker
public_key_pem = to_text(public_key.public_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo,
))
self.publish_public_key(public_key_pem)
return private_key_pem
class OpenSSLAuthHelper(AuthHelper, ABC): # pylint: disable=abstract-method
"""OpenSSL based public key based authentication helper for Ansible Core CI."""
def sign_bytes(self, payload_bytes): # type: (bytes) -> bytes
"""Sign the given payload and return the signature, initializing a new key pair if required."""
private_key_pem = self.initialize_private_key()
with tempfile.NamedTemporaryFile() as private_key_file:
private_key_file.write(to_bytes(private_key_pem))
private_key_file.flush()
with tempfile.NamedTemporaryFile() as payload_file:
payload_file.write(payload_bytes)
payload_file.flush()
with tempfile.NamedTemporaryFile() as signature_file:
raw_command(['openssl', 'dgst', '-sha256', '-sign', private_key_file.name, '-out', signature_file.name, payload_file.name], capture=True)
signature_raw_bytes = signature_file.read()
return signature_raw_bytes
def generate_private_key(self): # type: () -> str
"""Generate a new key pair, publishing the public key and returning the private key."""
private_key_pem = raw_command(['openssl', 'ecparam', '-genkey', '-name', 'secp384r1', '-noout'], capture=True)[0]
public_key_pem = raw_command(['openssl', 'ec', '-pubout'], data=private_key_pem, capture=True)[0]
self.publish_public_key(public_key_pem)
return private_key_pem
|
gpl-3.0
|
jmr0/servo
|
tests/wpt/css-tests/css-text-decor-3_dev/xhtml1print/reference/support/generate-text-emphasis-line-height-tests.py
|
829
|
3431
|
#!/usr/bin/env python
# - * - coding: UTF-8 - * -
"""
This script generates tests text-emphasis-line-height-001 ~ 004 except
001z. They test the line height expansion in different directions. This
script outputs a list of all tests it generated in the format of Mozilla
reftest.list to the stdout.
"""
from __future__ import unicode_literals
TEST_FILE = 'text-emphasis-line-height-{:03}{}.html'
TEST_TEMPLATE = '''<!DOCTYPE html>
<meta charset="utf-8">
<title>CSS Test: text-emphasis line height, {pos}, {wm}, {tag}</title>
<link rel="author" title="Xidorn Quan" href="https://www.upsuper.org">
<link rel="author" title="Mozilla" href="https://www.mozilla.org">
<link rel="help" href="https://drafts.csswg.org/css-text-decor-3/#text-emphasis-position-property">
<meta name="assert" content="text emphasis marks should expand the line height like ruby if necessary">
<link rel="match" href="text-emphasis-line-height-{index:03}-ref.html">
<p>Pass if the emphasis marks are {dir} the black line:</p>
{start}試験テスト{end}
'''
REF_FILE = 'text-emphasis-line-height-{:03}-ref.html'
REF_TEMPLATE='''<!DOCTYPE html>
<meta charset="utf-8">
<title>CSS Reference: text-emphasis line height, {pos}</title>
<link rel="author" title="Xidorn Quan" href="https://www.upsuper.org">
<link rel="author" title="Mozilla" href="https://www.mozilla.org">
<style> rt {{ font-variant-east-asian: inherit; }} </style>
<p>Pass if the emphasis marks are {dir} the black line:</p>
<div style="line-height: 1; border-{pos}: 1px solid black; writing-mode: {wm}; ruby-position: {posval}"><ruby>試<rt>●</rt>験<rt>●</rt>テ<rt>●</rt>ス<rt>●</rt>ト<rt>●</rt></ruby></div>
'''
STYLE1 = 'line-height: 1; border-{pos}: 1px solid black; ' + \
'writing-mode: {wm}; text-emphasis-position: {posval};'
STYLE2 = 'text-emphasis: circle;'
TAGS = [
# (tag, start, end)
('div', '<div style="{style1}{style2}">', '</div>'),
('span', '<div style="{style1}"><span style="{style2}">', '</span></div>'),
]
POSITIONS = [
# pos, text-emphasis-position, ruby-position,
# writing-modes, dir text
('top', 'over right', 'over',
['horizontal-tb'], 'below'),
('bottom', 'under right', 'under',
['horizontal-tb'], 'over'),
('right', 'over right', 'over',
['vertical-rl', 'vertical-lr'], 'to the left of'),
('left', 'over left', 'under',
['vertical-rl', 'vertical-lr'], 'to the right of'),
]
import string
def write_file(filename, content):
with open(filename, 'wb') as f:
f.write(content.encode('UTF-8'))
print("# START tests from {}".format(__file__))
idx = 0
for (pos, emphasis_pos, ruby_pos, wms, dir) in POSITIONS:
idx += 1
ref_file = REF_FILE.format(idx)
content = REF_TEMPLATE.format(pos=pos, dir=dir, wm=wms[0], posval=ruby_pos)
write_file(ref_file, content)
suffix = iter(string.ascii_lowercase)
for wm in wms:
style1 = STYLE1.format(pos=pos, wm=wm, posval=emphasis_pos)
for (tag, start, end) in TAGS:
test_file = TEST_FILE.format(idx, next(suffix))
content = TEST_TEMPLATE.format(
pos=pos, wm=wm, tag=tag, index=idx, dir=dir,
start=start.format(style1=style1, style2=STYLE2), end=end)
write_file(test_file, content)
print("== {} {}".format(test_file, ref_file))
print("# END tests from {}".format(__file__))
|
mpl-2.0
|
skuda/client-python
|
kubernetes/client/models/v1_node_selector.py
|
1
|
3415
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.6.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1NodeSelector(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, node_selector_terms=None):
"""
V1NodeSelector - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'node_selector_terms': 'list[V1NodeSelectorTerm]'
}
self.attribute_map = {
'node_selector_terms': 'nodeSelectorTerms'
}
self._node_selector_terms = node_selector_terms
@property
def node_selector_terms(self):
"""
Gets the node_selector_terms of this V1NodeSelector.
Required. A list of node selector terms. The terms are ORed.
:return: The node_selector_terms of this V1NodeSelector.
:rtype: list[V1NodeSelectorTerm]
"""
return self._node_selector_terms
@node_selector_terms.setter
def node_selector_terms(self, node_selector_terms):
"""
Sets the node_selector_terms of this V1NodeSelector.
Required. A list of node selector terms. The terms are ORed.
:param node_selector_terms: The node_selector_terms of this V1NodeSelector.
:type: list[V1NodeSelectorTerm]
"""
if node_selector_terms is None:
raise ValueError("Invalid value for `node_selector_terms`, must not be `None`")
self._node_selector_terms = node_selector_terms
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
apache-2.0
|
andreesg/bda.plone.payment
|
src/bda/plone/payment/__init__.py
|
1
|
2636
|
# -*- coding: utf-8 -*-
from bda.plone.payment.interfaces import IPayment
from bda.plone.payment.interfaces import IPaymentEvent
from bda.plone.payment.interfaces import IPaymentFailedEvent
from bda.plone.payment.interfaces import IPaymentSettings
from bda.plone.payment.interfaces import IPaymentSuccessEvent
from zope.component import adapter
from zope.component import getAdapter
from zope.component import getAdapters
from zope.event import notify
from zope.interface import implementer
from zope.interface import Interface
@implementer(IPaymentEvent)
class PaymentEvent(object):
def __init__(self, context, request, payment, order_uid, data):
self.context = context
self.request = request
self.payment = payment
self.order_uid = order_uid
self.data = data
@implementer(IPaymentSuccessEvent)
class PaymentSuccessEvent(PaymentEvent):
pass
@implementer(IPaymentFailedEvent)
class PaymentFailedEvent(PaymentEvent):
pass
class Payments(object):
def __init__(self, context):
self.context = context
def get(self, name):
return getAdapter(self.context, IPayment, name=name)
@property
def payments(self):
adapters = getAdapters((self.context,), IPayment)
return [_[1] for _ in adapters]
@property
def vocab(self):
adapters = getAdapters((self.context,), IPayment)
return [(_[0], _[1].label) for _ in adapters if _[1].available]
@property
def default(self):
adapters = getAdapters((self.context,), IPayment)
for name, payment in adapters:
if payment.default:
return name
if adapters:
return adapters[0][0]
@implementer(IPayment)
@adapter(Interface)
class Payment(object):
pid = None
label = None
deferred = False
def __init__(self, context):
self.context = context
@property
def available(self):
settings = IPaymentSettings(self.context)
return self.pid in settings.available
@property
def default(self):
settings = IPaymentSettings(self.context)
return self.pid == settings.default
def succeed(self, request, order_uid, data=dict()):
evt = PaymentSuccessEvent(self.context, request, self, order_uid, data)
notify(evt)
def failed(self, request, order_uid, data=dict()):
evt = PaymentFailedEvent(self.context, request, self, order_uid, data)
notify(evt)
def init_url(self, uid):
raise NotImplementedError(u"Abstract ``Payment`` does not implement "
u"``init_url``")
|
bsd-3-clause
|
jianran/spark
|
examples/src/main/python/ml/quantile_discretizer_example.py
|
123
|
1707
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
# $example on$
from pyspark.ml.feature import QuantileDiscretizer
# $example off$
from pyspark.sql import SparkSession
if __name__ == "__main__":
spark = SparkSession\
.builder\
.appName("QuantileDiscretizerExample")\
.getOrCreate()
# $example on$
data = [(0, 18.0), (1, 19.0), (2, 8.0), (3, 5.0), (4, 2.2)]
df = spark.createDataFrame(data, ["id", "hour"])
# $example off$
# Output of QuantileDiscretizer for such small datasets can depend on the number of
# partitions. Here we force a single partition to ensure consistent results.
# Note this is not necessary for normal use cases
df = df.repartition(1)
# $example on$
discretizer = QuantileDiscretizer(numBuckets=3, inputCol="hour", outputCol="result")
result = discretizer.fit(df).transform(df)
result.show()
# $example off$
spark.stop()
|
apache-2.0
|
sandhujasmine/conda-build
|
conda_build/exceptions.py
|
8
|
1182
|
import textwrap
SEPARATOR = "-" * 70
indent = lambda s: textwrap.fill(textwrap.dedent(s))
class CondaBuildException(Exception):
pass
class YamlParsingError(CondaBuildException):
pass
class UnableToParse(YamlParsingError):
def __init__(self, original, *args, **kwargs):
super(UnableToParse, self).__init__(*args, **kwargs)
self.original = original
def error_msg(self):
return "\n".join([
SEPARATOR,
self.error_body(),
self.indented_exception(),
])
def error_body(self):
return "\n".join([
"Unable to parse meta.yaml file\n",
])
def indented_exception(self):
orig = str(self.original)
indent = lambda s: s.replace("\n", "\n--> ")
return "Error Message:\n--> {}\n\n".format(indent(orig))
class UnableToParseMissingJinja2(UnableToParse):
def error_body(self):
return "\n".join([
super(UnableToParseMissingJinja2, self).error_body(),
indent("""\
It appears you are missing jinja2. Please install that
package, then attempt to build.
"""),
])
|
bsd-3-clause
|
tmpgit/intellij-community
|
python/lib/Lib/socket.py
|
73
|
54596
|
"""
This is an updated socket module for use on JVMs > 1.5; it is derived from the old jython socket module.
It is documented, along with known issues and workarounds, on the jython wiki.
http://wiki.python.org/jython/NewSocketModule
"""
_defaulttimeout = None
import errno
import jarray
import string
import struct
import sys
import threading
import time
import types
# Java.io classes
import java.io.BufferedInputStream
import java.io.BufferedOutputStream
# Java.io exceptions
import java.io.InterruptedIOException
import java.io.IOException
# Java.lang classes
import java.lang.String
# Java.lang exceptions
import java.lang.Exception
# Java.net classes
import java.net.DatagramPacket
import java.net.InetAddress
import java.net.InetSocketAddress
import java.net.Socket
# Java.net exceptions
import java.net.BindException
import java.net.ConnectException
import java.net.NoRouteToHostException
import java.net.PortUnreachableException
import java.net.ProtocolException
import java.net.SocketException
import java.net.SocketTimeoutException
import java.net.UnknownHostException
# Java.nio classes
import java.nio.ByteBuffer
import java.nio.channels.DatagramChannel
import java.nio.channels.ServerSocketChannel
import java.nio.channels.SocketChannel
# Java.nio exceptions
import java.nio.channels.AlreadyConnectedException
import java.nio.channels.AsynchronousCloseException
import java.nio.channels.CancelledKeyException
import java.nio.channels.ClosedByInterruptException
import java.nio.channels.ClosedChannelException
import java.nio.channels.ClosedSelectorException
import java.nio.channels.ConnectionPendingException
import java.nio.channels.IllegalBlockingModeException
import java.nio.channels.IllegalSelectorException
import java.nio.channels.NoConnectionPendingException
import java.nio.channels.NonReadableChannelException
import java.nio.channels.NonWritableChannelException
import java.nio.channels.NotYetBoundException
import java.nio.channels.NotYetConnectedException
import java.nio.channels.UnresolvedAddressException
import java.nio.channels.UnsupportedAddressTypeException
# Javax.net.ssl classes
import javax.net.ssl.SSLSocketFactory
# Javax.net.ssl exceptions
javax.net.ssl.SSLException
javax.net.ssl.SSLHandshakeException
javax.net.ssl.SSLKeyException
javax.net.ssl.SSLPeerUnverifiedException
javax.net.ssl.SSLProtocolException
import org.python.core.io.DatagramSocketIO
import org.python.core.io.ServerSocketIO
import org.python.core.io.SocketIO
from org.python.core.Py import newString as asPyString
class error(Exception): pass
class herror(error): pass
class gaierror(error): pass
class timeout(error): pass
class sslerror(error): pass
def _unmapped_exception(exc):
return error(-1, 'Unmapped exception: %s' % exc)
def java_net_socketexception_handler(exc):
if exc.message.startswith("Address family not supported by protocol family"):
return error(errno.EAFNOSUPPORT, 'Address family not supported by protocol family: See http://wiki.python.org/jython/NewSocketModule#IPV6addresssupport')
return _unmapped_exception(exc)
def would_block_error(exc=None):
return error(errno.EWOULDBLOCK, 'The socket operation could not complete without blocking')
ALL = None
_exception_map = {
# (<javaexception>, <circumstance>) : callable that raises the python equivalent exception, or None to stub out as unmapped
(java.io.IOException, ALL) : lambda x: error(errno.ECONNRESET, 'Software caused connection abort'),
(java.io.InterruptedIOException, ALL) : lambda x: timeout('timed out'),
(java.net.BindException, ALL) : lambda x: error(errno.EADDRINUSE, 'Address already in use'),
(java.net.ConnectException, ALL) : lambda x: error(errno.ECONNREFUSED, 'Connection refused'),
(java.net.NoRouteToHostException, ALL) : None,
(java.net.PortUnreachableException, ALL) : None,
(java.net.ProtocolException, ALL) : None,
(java.net.SocketException, ALL) : java_net_socketexception_handler,
(java.net.SocketTimeoutException, ALL) : lambda x: timeout('timed out'),
(java.net.UnknownHostException, ALL) : lambda x: gaierror(errno.EGETADDRINFOFAILED, 'getaddrinfo failed'),
(java.nio.channels.AlreadyConnectedException, ALL) : lambda x: error(errno.EISCONN, 'Socket is already connected'),
(java.nio.channels.AsynchronousCloseException, ALL) : None,
(java.nio.channels.CancelledKeyException, ALL) : None,
(java.nio.channels.ClosedByInterruptException, ALL) : None,
(java.nio.channels.ClosedChannelException, ALL) : lambda x: error(errno.EPIPE, 'Socket closed'),
(java.nio.channels.ClosedSelectorException, ALL) : None,
(java.nio.channels.ConnectionPendingException, ALL) : None,
(java.nio.channels.IllegalBlockingModeException, ALL) : None,
(java.nio.channels.IllegalSelectorException, ALL) : None,
(java.nio.channels.NoConnectionPendingException, ALL) : None,
(java.nio.channels.NonReadableChannelException, ALL) : None,
(java.nio.channels.NonWritableChannelException, ALL) : None,
(java.nio.channels.NotYetBoundException, ALL) : None,
(java.nio.channels.NotYetConnectedException, ALL) : None,
(java.nio.channels.UnresolvedAddressException, ALL) : lambda x: gaierror(errno.EGETADDRINFOFAILED, 'getaddrinfo failed'),
(java.nio.channels.UnsupportedAddressTypeException, ALL) : None,
# These error codes are currently wrong: getting them correct is going to require
# some investigation. Cpython 2.6 introduced extensive SSL support.
(javax.net.ssl.SSLException, ALL) : lambda x: sslerror(-1, 'SSL exception'),
(javax.net.ssl.SSLHandshakeException, ALL) : lambda x: sslerror(-1, 'SSL handshake exception'),
(javax.net.ssl.SSLKeyException, ALL) : lambda x: sslerror(-1, 'SSL key exception'),
(javax.net.ssl.SSLPeerUnverifiedException, ALL) : lambda x: sslerror(-1, 'SSL peer unverified exception'),
(javax.net.ssl.SSLProtocolException, ALL) : lambda x: sslerror(-1, 'SSL protocol exception'),
}
def _map_exception(exc, circumstance=ALL):
# print "Mapping exception: %s" % exc
mapped_exception = _exception_map.get((exc.__class__, circumstance))
if mapped_exception:
exception = mapped_exception(exc)
else:
exception = error(-1, 'Unmapped exception: %s' % exc)
exception.java_exception = exc
return exception
_feature_support_map = {
'ipv6': True,
'idna': False,
'tipc': False,
}
def supports(feature, *args):
if len(args) == 1:
_feature_support_map[feature] = args[0]
return _feature_support_map.get(feature, False)
MODE_BLOCKING = 'block'
MODE_NONBLOCKING = 'nonblock'
MODE_TIMEOUT = 'timeout'
_permitted_modes = (MODE_BLOCKING, MODE_NONBLOCKING, MODE_TIMEOUT)
SHUT_RD = 0
SHUT_WR = 1
SHUT_RDWR = 2
AF_UNSPEC = 0
AF_INET = 2
AF_INET6 = 23
AI_PASSIVE=1
AI_CANONNAME=2
# For some reason, probably historical, SOCK_DGRAM and SOCK_STREAM are opposite values of what they are on cpython.
# I.E. The following is the way they are on cpython
# SOCK_STREAM = 1
# SOCK_DGRAM = 2
# At some point, we should probably switch them around, which *should* not affect anybody
SOCK_DGRAM = 1
SOCK_STREAM = 2
SOCK_RAW = 3 # not supported
SOCK_RDM = 4 # not supported
SOCK_SEQPACKET = 5 # not supported
SOL_SOCKET = 0xFFFF
IPPROTO_TCP = 6
IPPROTO_UDP = 17
SO_BROADCAST = 1
SO_KEEPALIVE = 2
SO_LINGER = 4
SO_OOBINLINE = 8
SO_RCVBUF = 16
SO_REUSEADDR = 32
SO_SNDBUF = 64
SO_TIMEOUT = 128
TCP_NODELAY = 256
INADDR_ANY = "0.0.0.0"
INADDR_BROADCAST = "255.255.255.255"
# Options with negative constants are not supported
# They are being added here so that code that refers to them
# will not break with an AttributeError
SO_ACCEPTCONN = -1
SO_DEBUG = -2
SO_DONTROUTE = -4
SO_ERROR = -8
SO_EXCLUSIVEADDRUSE = -16
SO_RCVLOWAT = -32
SO_RCVTIMEO = -64
SO_REUSEPORT = -128
SO_SNDLOWAT = -256
SO_SNDTIMEO = -512
SO_TYPE = -1024
SO_USELOOPBACK = -2048
__all__ = ['AF_UNSPEC', 'AF_INET', 'AF_INET6', 'AI_PASSIVE', 'SOCK_DGRAM',
'SOCK_RAW', 'SOCK_RDM', 'SOCK_SEQPACKET', 'SOCK_STREAM', 'SOL_SOCKET',
'SO_BROADCAST', 'SO_ERROR', 'SO_KEEPALIVE', 'SO_LINGER', 'SO_OOBINLINE',
'SO_RCVBUF', 'SO_REUSEADDR', 'SO_SNDBUF', 'SO_TIMEOUT', 'TCP_NODELAY',
'INADDR_ANY', 'INADDR_BROADCAST', 'IPPROTO_TCP', 'IPPROTO_UDP',
'SocketType', 'error', 'herror', 'gaierror', 'timeout',
'getfqdn', 'gethostbyaddr', 'gethostbyname', 'gethostname',
'socket', 'getaddrinfo', 'getdefaulttimeout', 'setdefaulttimeout',
'has_ipv6', 'htons', 'htonl', 'ntohs', 'ntohl',
'SHUT_RD', 'SHUT_WR', 'SHUT_RDWR',
]
def _constant_to_name(const_value):
sock_module = sys.modules['socket']
try:
for name in dir(sock_module):
if getattr(sock_module, name) is const_value:
return name
return "Unknown"
finally:
sock_module = None
class _nio_impl:
timeout = None
mode = MODE_BLOCKING
def getpeername(self):
return (self.jsocket.getInetAddress().getHostAddress(), self.jsocket.getPort() )
def config(self, mode, timeout):
self.mode = mode
if self.mode == MODE_BLOCKING:
self.jchannel.configureBlocking(1)
if self.mode == MODE_NONBLOCKING:
self.jchannel.configureBlocking(0)
if self.mode == MODE_TIMEOUT:
self.jchannel.configureBlocking(1)
self._timeout_millis = int(timeout*1000)
self.jsocket.setSoTimeout(self._timeout_millis)
def getsockopt(self, level, option):
if (level, option) in self.options:
result = getattr(self.jsocket, "get%s" % self.options[ (level, option) ])()
if option == SO_LINGER:
if result == -1:
enabled, linger_time = 0, 0
else:
enabled, linger_time = 1, result
return struct.pack('ii', enabled, linger_time)
return result
else:
raise error(errno.ENOPROTOOPT, "Socket option '%s' (level '%s') not supported on socket(%s)" % (_constant_to_name(option), _constant_to_name(level), str(self.jsocket)))
def setsockopt(self, level, option, value):
if (level, option) in self.options:
if option == SO_LINGER:
values = struct.unpack('ii', value)
self.jsocket.setSoLinger(*values)
else:
getattr(self.jsocket, "set%s" % self.options[ (level, option) ])(value)
else:
raise error(errno.ENOPROTOOPT, "Socket option '%s' (level '%s') not supported on socket(%s)" % (_constant_to_name(option), _constant_to_name(level), str(self.jsocket)))
def close(self):
self.jsocket.close()
def getchannel(self):
return self.jchannel
def fileno(self):
return self.socketio
class _client_socket_impl(_nio_impl):
options = {
(SOL_SOCKET, SO_KEEPALIVE): 'KeepAlive',
(SOL_SOCKET, SO_LINGER): 'SoLinger',
(SOL_SOCKET, SO_OOBINLINE): 'OOBInline',
(SOL_SOCKET, SO_RCVBUF): 'ReceiveBufferSize',
(SOL_SOCKET, SO_REUSEADDR): 'ReuseAddress',
(SOL_SOCKET, SO_SNDBUF): 'SendBufferSize',
(SOL_SOCKET, SO_TIMEOUT): 'SoTimeout',
(IPPROTO_TCP, TCP_NODELAY): 'TcpNoDelay',
}
def __init__(self, socket=None):
if socket:
self.jchannel = socket.getChannel()
else:
self.jchannel = java.nio.channels.SocketChannel.open()
self.jsocket = self.jchannel.socket()
self.socketio = org.python.core.io.SocketIO(self.jchannel, 'rw')
def bind(self, jsockaddr, reuse_addr):
self.jsocket.setReuseAddress(reuse_addr)
self.jsocket.bind(jsockaddr)
def connect(self, jsockaddr):
if self.mode == MODE_TIMEOUT:
self.jsocket.connect (jsockaddr, self._timeout_millis)
else:
self.jchannel.connect(jsockaddr)
def finish_connect(self):
return self.jchannel.finishConnect()
def _do_read_net(self, buf):
# Need two separate implementations because the java.nio APIs do not support timeouts
return self.jsocket.getInputStream().read(buf)
def _do_read_nio(self, buf):
bytebuf = java.nio.ByteBuffer.wrap(buf)
count = self.jchannel.read(bytebuf)
return count
def _do_write_net(self, buf):
self.jsocket.getOutputStream().write(buf)
return len(buf)
def _do_write_nio(self, buf):
bytebuf = java.nio.ByteBuffer.wrap(buf)
count = self.jchannel.write(bytebuf)
return count
def read(self, buf):
if self.mode == MODE_TIMEOUT:
return self._do_read_net(buf)
else:
return self._do_read_nio(buf)
def write(self, buf):
if self.mode == MODE_TIMEOUT:
return self._do_write_net(buf)
else:
return self._do_write_nio(buf)
def shutdown(self, how):
if how in (SHUT_RD, SHUT_RDWR):
self.jsocket.shutdownInput()
if how in (SHUT_WR, SHUT_RDWR):
self.jsocket.shutdownOutput()
class _server_socket_impl(_nio_impl):
options = {
(SOL_SOCKET, SO_RCVBUF): 'ReceiveBufferSize',
(SOL_SOCKET, SO_REUSEADDR): 'ReuseAddress',
(SOL_SOCKET, SO_TIMEOUT): 'SoTimeout',
}
def __init__(self, jsockaddr, backlog, reuse_addr):
self.jchannel = java.nio.channels.ServerSocketChannel.open()
self.jsocket = self.jchannel.socket()
self.jsocket.setReuseAddress(reuse_addr)
self.jsocket.bind(jsockaddr, backlog)
self.socketio = org.python.core.io.ServerSocketIO(self.jchannel, 'rw')
def accept(self):
if self.mode in (MODE_BLOCKING, MODE_NONBLOCKING):
new_cli_chan = self.jchannel.accept()
if new_cli_chan is not None:
return _client_socket_impl(new_cli_chan.socket())
else:
return None
else:
# In timeout mode now
new_cli_sock = self.jsocket.accept()
return _client_socket_impl(new_cli_sock)
def shutdown(self, how):
# This is no-op on java, for server sockets.
# What the user wants to achieve is achieved by calling close() on
# java/jython. But we can't call that here because that would then
# later cause the user explicit close() call to fail
pass
class _datagram_socket_impl(_nio_impl):
options = {
(SOL_SOCKET, SO_BROADCAST): 'Broadcast',
(SOL_SOCKET, SO_RCVBUF): 'ReceiveBufferSize',
(SOL_SOCKET, SO_REUSEADDR): 'ReuseAddress',
(SOL_SOCKET, SO_SNDBUF): 'SendBufferSize',
(SOL_SOCKET, SO_TIMEOUT): 'SoTimeout',
}
def __init__(self, jsockaddr=None, reuse_addr=0):
self.jchannel = java.nio.channels.DatagramChannel.open()
self.jsocket = self.jchannel.socket()
if jsockaddr is not None:
self.jsocket.setReuseAddress(reuse_addr)
self.jsocket.bind(jsockaddr)
self.socketio = org.python.core.io.DatagramSocketIO(self.jchannel, 'rw')
def connect(self, jsockaddr):
self.jchannel.connect(jsockaddr)
def disconnect(self):
"""
Disconnect the datagram socket.
cpython appears not to have this operation
"""
self.jchannel.disconnect()
def shutdown(self, how):
# This is no-op on java, for datagram sockets.
# What the user wants to achieve is achieved by calling close() on
# java/jython. But we can't call that here because that would then
# later cause the user explicit close() call to fail
pass
def _do_send_net(self, byte_array, socket_address, flags):
# Need two separate implementations because the java.nio APIs do not support timeouts
num_bytes = len(byte_array)
if self.jsocket.isConnected() and socket_address is None:
packet = java.net.DatagramPacket(byte_array, num_bytes)
else:
packet = java.net.DatagramPacket(byte_array, num_bytes, socket_address)
self.jsocket.send(packet)
return num_bytes
def _do_send_nio(self, byte_array, socket_address, flags):
byte_buf = java.nio.ByteBuffer.wrap(byte_array)
if self.jchannel.isConnected() and socket_address is None:
bytes_sent = self.jchannel.write(byte_buf)
else:
bytes_sent = self.jchannel.send(byte_buf, socket_address)
return bytes_sent
def sendto(self, byte_array, jsockaddr, flags):
if self.mode == MODE_TIMEOUT:
return self._do_send_net(byte_array, jsockaddr, flags)
else:
return self._do_send_nio(byte_array, jsockaddr, flags)
def send(self, byte_array, flags):
if self.mode == MODE_TIMEOUT:
return self._do_send_net(byte_array, None, flags)
else:
return self._do_send_nio(byte_array, None, flags)
def _do_receive_net(self, return_source_address, num_bytes, flags):
byte_array = jarray.zeros(num_bytes, 'b')
packet = java.net.DatagramPacket(byte_array, num_bytes)
self.jsocket.receive(packet)
bytes_rcvd = packet.getLength()
if bytes_rcvd < num_bytes:
byte_array = byte_array[:bytes_rcvd]
return_data = byte_array.tostring()
if return_source_address:
host = None
if packet.getAddress():
host = packet.getAddress().getHostAddress()
port = packet.getPort()
return return_data, (host, port)
else:
return return_data
def _do_receive_nio(self, return_source_address, num_bytes, flags):
byte_array = jarray.zeros(num_bytes, 'b')
byte_buf = java.nio.ByteBuffer.wrap(byte_array)
source_address = self.jchannel.receive(byte_buf)
if source_address is None and not self.jchannel.isBlocking():
raise would_block_error()
byte_buf.flip() ; bytes_read = byte_buf.remaining()
if bytes_read < num_bytes:
byte_array = byte_array[:bytes_read]
return_data = byte_array.tostring()
if return_source_address:
return return_data, (source_address.getAddress().getHostAddress(), source_address.getPort())
else:
return return_data
def recvfrom(self, num_bytes, flags):
if self.mode == MODE_TIMEOUT:
return self._do_receive_net(1, num_bytes, flags)
else:
return self._do_receive_nio(1, num_bytes, flags)
def recv(self, num_bytes, flags):
if self.mode == MODE_TIMEOUT:
return self._do_receive_net(0, num_bytes, flags)
else:
return self._do_receive_nio(0, num_bytes, flags)
has_ipv6 = True # IPV6 FTW!
# Name and address functions
def _gethostbyaddr(name):
# This is as close as I can get; at least the types are correct...
addresses = java.net.InetAddress.getAllByName(gethostbyname(name))
names = []
addrs = []
for addr in addresses:
names.append(asPyString(addr.getHostName()))
addrs.append(asPyString(addr.getHostAddress()))
return (names, addrs)
def getfqdn(name=None):
"""
Return a fully qualified domain name for name. If name is omitted or empty
it is interpreted as the local host. To find the fully qualified name,
the hostname returned by gethostbyaddr() is checked, then aliases for the
host, if available. The first name which includes a period is selected.
In case no fully qualified domain name is available, the hostname is retur
New in version 2.0.
"""
if not name:
name = gethostname()
names, addrs = _gethostbyaddr(name)
for a in names:
if a.find(".") >= 0:
return a
return name
def gethostname():
try:
return asPyString(java.net.InetAddress.getLocalHost().getHostName())
except java.lang.Exception, jlx:
raise _map_exception(jlx)
def gethostbyname(name):
try:
return asPyString(java.net.InetAddress.getByName(name).getHostAddress())
except java.lang.Exception, jlx:
raise _map_exception(jlx)
def gethostbyaddr(name):
names, addrs = _gethostbyaddr(name)
return (names[0], names, addrs)
def getservbyname(service_name, protocol_name=None):
try:
from jnr.netdb import Service
except ImportError:
return None
return Service.getServiceByName(service_name, protocol_name).getPort()
def getservbyport(port, protocol_name=None):
try:
from jnr.netdb import Service
except ImportError:
return None
return Service.getServiceByPort(port, protocol_name).getName()
def getprotobyname(protocol_name=None):
try:
from jnr.netdb import Protocol
except ImportError:
return None
return Protocol.getProtocolByName(protocol_name).getProto()
def _realsocket(family = AF_INET, type = SOCK_STREAM, protocol=0):
assert family in (AF_INET, AF_INET6), "Only AF_INET and AF_INET6 sockets are currently supported on jython"
assert type in (SOCK_DGRAM, SOCK_STREAM), "Only SOCK_STREAM and SOCK_DGRAM sockets are currently supported on jython"
if type == SOCK_STREAM:
if protocol != 0:
assert protocol == IPPROTO_TCP, "Only IPPROTO_TCP supported on SOCK_STREAM sockets"
return _tcpsocket()
else:
if protocol != 0:
assert protocol == IPPROTO_UDP, "Only IPPROTO_UDP supported on SOCK_DGRAM sockets"
return _udpsocket()
#
# Attempt to provide IDNA (RFC 3490) support.
#
# Try java.net.IDN, built into java 6
#
idna_libraries = [
('java.net.IDN', 'toASCII', java.lang.IllegalArgumentException)
]
for idna_lib, idna_fn_name, exc in idna_libraries:
try:
m = __import__(idna_lib, globals(), locals(), [idna_fn_name])
idna_fn = getattr(m, idna_fn_name)
def _encode_idna(name):
try:
return idna_fn(name)
except exc:
raise UnicodeEncodeError(name)
supports('idna', True)
break
except (AttributeError, ImportError), e:
pass
else:
_encode_idna = lambda x: x.encode("ascii")
#
# Define data structures to support IPV4 and IPV6.
#
class _ip_address_t: pass
class _ipv4_address_t(_ip_address_t):
def __init__(self, sockaddr, port, jaddress):
self.sockaddr = sockaddr
self.port = port
self.jaddress = jaddress
def __getitem__(self, index):
if 0 == index:
return self.sockaddr
elif 1 == index:
return self.port
else:
raise IndexError()
def __len__(self):
return 2
def __str__(self):
return "('%s', %d)" % (self.sockaddr, self.port)
__repr__ = __str__
class _ipv6_address_t(_ip_address_t):
def __init__(self, sockaddr, port, jaddress):
self.sockaddr = sockaddr
self.port = port
self.jaddress = jaddress
def __getitem__(self, index):
if 0 == index:
return self.sockaddr
elif 1 == index:
return self.port
elif 2 == index:
return 0
elif 3 == index:
return self.jaddress.scopeId
else:
raise IndexError()
def __len__(self):
return 4
def __str__(self):
return "('%s', %d, 0, %d)" % (self.sockaddr, self.port, self.jaddress.scopeId)
__repr__ = __str__
def _get_jsockaddr(address_object, for_udp=False):
if address_object is None:
return java.net.InetSocketAddress(0) # Let the system pick an ephemeral port
if isinstance(address_object, _ip_address_t):
return java.net.InetSocketAddress(address_object.jaddress, address_object[1])
error_message = "Address must be a 2-tuple (ipv4: (host, port)) or a 4-tuple (ipv6: (host, port, flow, scope))"
if not isinstance(address_object, tuple) or \
len(address_object) not in [2,4] or \
not isinstance(address_object[0], basestring) or \
not isinstance(address_object[1], (int, long)):
raise TypeError(error_message)
if len(address_object) == 4 and not isinstance(address_object[3], (int, long)):
raise TypeError(error_message)
hostname, port = address_object[0].strip(), address_object[1]
if for_udp:
if hostname == "":
hostname = INADDR_ANY
elif hostname == "<broadcast>":
hostname = INADDR_BROADCAST
else:
if hostname == "":
hostname = None
if hostname is None:
return java.net.InetSocketAddress(port)
if isinstance(hostname, unicode):
hostname = _encode_idna(hostname)
if len(address_object) == 4:
# There is no way to get a Inet6Address: Inet6Address.getByName() simply calls
# InetAddress.getByName,() which also returns Inet4Address objects
# If users want to use IPv6 address, scoped or not,
# they should use getaddrinfo(family=AF_INET6)
pass
return java.net.InetSocketAddress(java.net.InetAddress.getByName(hostname), port)
_ipv4_addresses_only = False
def _use_ipv4_addresses_only(value):
global _ipv4_addresses_only
_ipv4_addresses_only = value
def getaddrinfo(host, port, family=AF_INET, socktype=None, proto=0, flags=None):
try:
if not family in [AF_INET, AF_INET6, AF_UNSPEC]:
raise gaierror(errno.EIO, 'ai_family not supported')
filter_fns = []
if _ipv4_addresses_only:
filter_fns.append( lambda x: isinstance(x, java.net.Inet4Address) )
else:
filter_fns.append({
AF_INET: lambda x: isinstance(x, java.net.Inet4Address),
AF_INET6: lambda x: isinstance(x, java.net.Inet6Address),
AF_UNSPEC: lambda x: isinstance(x, java.net.InetAddress),
}[family])
if host == "":
host = java.net.InetAddress.getLocalHost().getHostName()
if isinstance(host, unicode):
host = _encode_idna(host)
passive_mode = flags is not None and flags & AI_PASSIVE
canonname_mode = flags is not None and flags & AI_CANONNAME
results = []
for a in java.net.InetAddress.getAllByName(host):
if len([f for f in filter_fns if f(a)]):
family = {java.net.Inet4Address: AF_INET, java.net.Inet6Address: AF_INET6}[a.getClass()]
if passive_mode and not canonname_mode:
canonname = ""
else:
canonname = asPyString(a.getCanonicalHostName())
if host is None and passive_mode and not canonname_mode:
sockaddr = INADDR_ANY
else:
sockaddr = asPyString(a.getHostAddress())
# TODO: Include flowinfo and scopeid in a 4-tuple for IPv6 addresses
sock_tuple = {AF_INET : _ipv4_address_t, AF_INET6 : _ipv6_address_t}[family](sockaddr, port, a)
results.append((family, socktype, proto, canonname, sock_tuple))
return results
except java.lang.Exception, jlx:
raise _map_exception(jlx)
def getnameinfo(sock_addr, flags):
raise NotImplementedError("getnameinfo not yet supported on jython.")
def getdefaulttimeout():
return _defaulttimeout
def _calctimeoutvalue(value):
if value is None:
return None
try:
floatvalue = float(value)
except:
raise TypeError('Socket timeout value must be a number or None')
if floatvalue < 0.0:
raise ValueError("Socket timeout value cannot be negative")
if floatvalue < 0.000001:
return 0.0
return floatvalue
def setdefaulttimeout(timeout):
global _defaulttimeout
try:
_defaulttimeout = _calctimeoutvalue(timeout)
finally:
_nonblocking_api_mixin.timeout = _defaulttimeout
def htons(x): return x
def htonl(x): return x
def ntohs(x): return x
def ntohl(x): return x
def inet_pton(family, ip_string):
try:
ia = java.net.InetAddress.getByName(ip_string)
bytes = []
for byte in ia.getAddress():
if byte < 0:
bytes.append(byte+256)
else:
bytes.append(byte)
return "".join([chr(byte) for byte in bytes])
except java.lang.Exception, jlx:
raise _map_exception(jlx)
def inet_ntop(family, packed_ip):
try:
jByteArray = jarray.array(packed_ip, 'b')
ia = java.net.InetAddress.getByAddress(jByteArray)
return ia.getHostAddress()
except java.lang.Exception, jlx:
raise _map_exception(jlx)
def inet_aton(ip_string):
return inet_pton(AF_INET, ip_string)
def inet_ntoa(packed_ip):
return inet_ntop(AF_INET, packed_ip)
class _nonblocking_api_mixin:
mode = MODE_BLOCKING
reference_count = 0
close_lock = threading.Lock()
def __init__(self):
self.timeout = _defaulttimeout
if self.timeout is not None:
self.mode = MODE_TIMEOUT
self.pending_options = {
(SOL_SOCKET, SO_REUSEADDR): 0,
}
def gettimeout(self):
return self.timeout
def settimeout(self, timeout):
self.timeout = _calctimeoutvalue(timeout)
if self.timeout is None:
self.mode = MODE_BLOCKING
elif self.timeout < 0.000001:
self.mode = MODE_NONBLOCKING
else:
self.mode = MODE_TIMEOUT
self._config()
def setblocking(self, flag):
if flag:
self.mode = MODE_BLOCKING
self.timeout = None
else:
self.mode = MODE_NONBLOCKING
self.timeout = 0.0
self._config()
def getblocking(self):
return self.mode == MODE_BLOCKING
def setsockopt(self, level, optname, value):
try:
if self.sock_impl:
self.sock_impl.setsockopt(level, optname, value)
else:
self.pending_options[ (level, optname) ] = value
except java.lang.Exception, jlx:
raise _map_exception(jlx)
def getsockopt(self, level, optname):
try:
if self.sock_impl:
return self.sock_impl.getsockopt(level, optname)
else:
return self.pending_options.get( (level, optname), None)
except java.lang.Exception, jlx:
raise _map_exception(jlx)
def shutdown(self, how):
assert how in (SHUT_RD, SHUT_WR, SHUT_RDWR)
if not self.sock_impl:
raise error(errno.ENOTCONN, "Transport endpoint is not connected")
try:
self.sock_impl.shutdown(how)
except java.lang.Exception, jlx:
raise _map_exception(jlx)
def close(self):
try:
if self.sock_impl:
self.sock_impl.close()
except java.lang.Exception, jlx:
raise _map_exception(jlx)
def _config(self):
assert self.mode in _permitted_modes
if self.sock_impl:
self.sock_impl.config(self.mode, self.timeout)
for level, optname in self.pending_options.keys():
if optname != SO_REUSEADDR:
self.sock_impl.setsockopt(level, optname, self.pending_options[ (level, optname) ])
def getchannel(self):
if not self.sock_impl:
return None
return self.sock_impl.getchannel()
def fileno(self):
if not self.sock_impl:
return None
return self.sock_impl.fileno()
def _get_jsocket(self):
return self.sock_impl.jsocket
class _tcpsocket(_nonblocking_api_mixin):
sock_impl = None
istream = None
ostream = None
local_addr = None
server = 0
def __init__(self):
_nonblocking_api_mixin.__init__(self)
def bind(self, addr):
assert not self.sock_impl
assert not self.local_addr
# Do the address format check
_get_jsockaddr(addr)
self.local_addr = addr
def listen(self, backlog):
"This signifies a server socket"
try:
assert not self.sock_impl
self.server = 1
self.sock_impl = _server_socket_impl(_get_jsockaddr(self.local_addr), backlog, self.pending_options[ (SOL_SOCKET, SO_REUSEADDR) ])
self._config()
except java.lang.Exception, jlx:
raise _map_exception(jlx)
def accept(self):
"This signifies a server socket"
try:
if not self.sock_impl:
self.listen()
assert self.server
new_sock = self.sock_impl.accept()
if not new_sock:
raise would_block_error()
cliconn = _tcpsocket()
cliconn.pending_options[ (SOL_SOCKET, SO_REUSEADDR) ] = new_sock.jsocket.getReuseAddress()
cliconn.sock_impl = new_sock
cliconn._setup()
return cliconn, new_sock.getpeername()
except java.lang.Exception, jlx:
raise _map_exception(jlx)
def _do_connect(self, addr):
try:
assert not self.sock_impl
self.sock_impl = _client_socket_impl()
if self.local_addr: # Has the socket been bound to a local address?
self.sock_impl.bind(_get_jsockaddr(self.local_addr), self.pending_options[ (SOL_SOCKET, SO_REUSEADDR) ])
self._config() # Configure timeouts, etc, now that the socket exists
self.sock_impl.connect(_get_jsockaddr(addr))
except java.lang.Exception, jlx:
raise _map_exception(jlx)
def connect(self, addr):
"This signifies a client socket"
self._do_connect(addr)
self._setup()
def connect_ex(self, addr):
"This signifies a client socket"
if not self.sock_impl:
self._do_connect(addr)
if self.sock_impl.finish_connect():
self._setup()
if self.mode == MODE_NONBLOCKING:
return errno.EISCONN
return 0
return errno.EINPROGRESS
def _setup(self):
if self.mode != MODE_NONBLOCKING:
self.istream = self.sock_impl.jsocket.getInputStream()
self.ostream = self.sock_impl.jsocket.getOutputStream()
def recv(self, n):
try:
if not self.sock_impl: raise error(errno.ENOTCONN, 'Socket is not connected')
if self.sock_impl.jchannel.isConnectionPending():
self.sock_impl.jchannel.finishConnect()
data = jarray.zeros(n, 'b')
m = self.sock_impl.read(data)
if m == -1:#indicates EOF has been reached, so we just return the empty string
return ""
elif m <= 0:
if self.mode == MODE_NONBLOCKING:
raise would_block_error()
return ""
if m < n:
data = data[:m]
return data.tostring()
except java.lang.Exception, jlx:
raise _map_exception(jlx)
def recvfrom(self, n):
return self.recv(n), None
def send(self, s):
try:
if not self.sock_impl: raise error(errno.ENOTCONN, 'Socket is not connected')
if self.sock_impl.jchannel.isConnectionPending():
self.sock_impl.jchannel.finishConnect()
numwritten = self.sock_impl.write(s)
if numwritten == 0 and self.mode == MODE_NONBLOCKING:
raise would_block_error()
return numwritten
except java.lang.Exception, jlx:
raise _map_exception(jlx)
sendall = send
def getsockname(self):
try:
if not self.sock_impl:
host, port = self.local_addr or ("", 0)
host = java.net.InetAddress.getByName(host).getHostAddress()
else:
if self.server:
host = self.sock_impl.jsocket.getInetAddress().getHostAddress()
else:
host = self.sock_impl.jsocket.getLocalAddress().getHostAddress()
port = self.sock_impl.jsocket.getLocalPort()
return (host, port)
except java.lang.Exception, jlx:
raise _map_exception(jlx)
def getpeername(self):
try:
assert self.sock_impl
assert not self.server
host = self.sock_impl.jsocket.getInetAddress().getHostAddress()
port = self.sock_impl.jsocket.getPort()
return (host, port)
except java.lang.Exception, jlx:
raise _map_exception(jlx)
def close(self):
try:
if self.istream:
self.istream.close()
if self.ostream:
self.ostream.close()
if self.sock_impl:
self.sock_impl.close()
except java.lang.Exception, jlx:
raise _map_exception(jlx)
class _udpsocket(_nonblocking_api_mixin):
sock_impl = None
connected = False
def __init__(self):
_nonblocking_api_mixin.__init__(self)
def bind(self, addr):
try:
assert not self.sock_impl
self.sock_impl = _datagram_socket_impl(_get_jsockaddr(addr, True), self.pending_options[ (SOL_SOCKET, SO_REUSEADDR) ])
self._config()
except java.lang.Exception, jlx:
raise _map_exception(jlx)
def _do_connect(self, addr):
try:
assert not self.connected, "Datagram Socket is already connected"
if not self.sock_impl:
self.sock_impl = _datagram_socket_impl()
self._config()
self.sock_impl.connect(_get_jsockaddr(addr))
self.connected = True
except java.lang.Exception, jlx:
raise _map_exception(jlx)
def connect(self, addr):
self._do_connect(addr)
def connect_ex(self, addr):
if not self.sock_impl:
self._do_connect(addr)
return 0
def sendto(self, data, p1, p2=None):
try:
if not p2:
flags, addr = 0, p1
else:
flags, addr = 0, p2
if not self.sock_impl:
self.sock_impl = _datagram_socket_impl()
self._config()
byte_array = java.lang.String(data).getBytes('iso-8859-1')
result = self.sock_impl.sendto(byte_array, _get_jsockaddr(addr, True), flags)
return result
except java.lang.Exception, jlx:
raise _map_exception(jlx)
def send(self, data, flags=None):
if not self.connected: raise error(errno.ENOTCONN, "Socket is not connected")
byte_array = java.lang.String(data).getBytes('iso-8859-1')
return self.sock_impl.send(byte_array, flags)
def recvfrom(self, num_bytes, flags=None):
"""
There is some disagreement as to what the behaviour should be if
a recvfrom operation is requested on an unbound socket.
See the following links for more information
http://bugs.jython.org/issue1005
http://bugs.sun.com/view_bug.do?bug_id=6621689
"""
try:
# This is the old 2.1 behaviour
#assert self.sock_impl
# This is amak's preferred interpretation
#raise error(errno.ENOTCONN, "Recvfrom on unbound udp socket meaningless operation")
# And this is the option for cpython compatibility
if not self.sock_impl:
self.sock_impl = _datagram_socket_impl()
self._config()
return self.sock_impl.recvfrom(num_bytes, flags)
except java.lang.Exception, jlx:
raise _map_exception(jlx)
def recv(self, num_bytes, flags=None):
if not self.sock_impl: raise error(errno.ENOTCONN, "Socket is not connected")
try:
return self.sock_impl.recv(num_bytes, flags)
except java.lang.Exception, jlx:
raise _map_exception(jlx)
def getsockname(self):
try:
assert self.sock_impl
host = self.sock_impl.jsocket.getLocalAddress().getHostAddress()
port = self.sock_impl.jsocket.getLocalPort()
return (host, port)
except java.lang.Exception, jlx:
raise _map_exception(jlx)
def getpeername(self):
try:
assert self.sock
host = self.sock_impl.jsocket.getInetAddress().getHostAddress()
port = self.sock_impl.jsocket.getPort()
return (host, port)
except java.lang.Exception, jlx:
raise _map_exception(jlx)
def __del__(self):
self.close()
_socketmethods = (
'bind', 'connect', 'connect_ex', 'fileno', 'listen',
'getpeername', 'getsockname', 'getsockopt', 'setsockopt',
'sendall', 'setblocking',
'settimeout', 'gettimeout', 'shutdown', 'getchannel')
# All the method names that must be delegated to either the real socket
# object or the _closedsocket object.
_delegate_methods = ("recv", "recvfrom", "recv_into", "recvfrom_into",
"send", "sendto")
class _closedsocket(object):
__slots__ = []
def _dummy(*args):
raise error(errno.EBADF, 'Bad file descriptor')
# All _delegate_methods must also be initialized here.
send = recv = recv_into = sendto = recvfrom = recvfrom_into = _dummy
__getattr__ = _dummy
_active_sockets = set()
def _closeActiveSockets():
for socket in _active_sockets.copy():
try:
socket.close()
except error:
msg = 'Problem closing socket: %s: %r' % (socket, sys.exc_info())
print >> sys.stderr, msg
class _socketobject(object):
__doc__ = _realsocket.__doc__
__slots__ = ["_sock", "__weakref__"] + list(_delegate_methods)
def __init__(self, family=AF_INET, type=SOCK_STREAM, proto=0, _sock=None):
if _sock is None:
_sock = _realsocket(family, type, proto)
_sock.reference_count += 1
elif isinstance(_sock, _nonblocking_api_mixin):
_sock.reference_count += 1
self._sock = _sock
for method in _delegate_methods:
meth = getattr(_sock, method, None)
if meth:
setattr(self, method, meth)
_active_sockets.add(self)
def close(self):
try:
_active_sockets.remove(self)
except KeyError:
pass
_sock = self._sock
if isinstance(_sock, _nonblocking_api_mixin):
_sock.close_lock.acquire()
try:
_sock.reference_count -=1
if not _sock.reference_count:
_sock.close()
self._sock = _closedsocket()
dummy = self._sock._dummy
for method in _delegate_methods:
setattr(self, method, dummy)
self.send = self.recv = self.sendto = self.recvfrom = \
self._sock._dummy
finally:
_sock.close_lock.release()
#close.__doc__ = _realsocket.close.__doc__
def accept(self):
sock, addr = self._sock.accept()
return _socketobject(_sock=sock), addr
#accept.__doc__ = _realsocket.accept.__doc__
def dup(self):
"""dup() -> socket object
Return a new socket object connected to the same system resource."""
_sock = self._sock
if not isinstance(_sock, _nonblocking_api_mixin):
return _socketobject(_sock=_sock)
_sock.close_lock.acquire()
try:
duped = _socketobject(_sock=_sock)
finally:
_sock.close_lock.release()
return duped
def makefile(self, mode='r', bufsize=-1):
"""makefile([mode[, bufsize]]) -> file object
Return a regular file object corresponding to the socket. The mode
and bufsize arguments are as for the built-in open() function."""
_sock = self._sock
if not isinstance(_sock, _nonblocking_api_mixin):
return _fileobject(_sock, mode, bufsize)
_sock.close_lock.acquire()
try:
fileobject = _fileobject(_sock, mode, bufsize)
finally:
_sock.close_lock.release()
return fileobject
family = property(lambda self: self._sock.family, doc="the socket family")
type = property(lambda self: self._sock.type, doc="the socket type")
proto = property(lambda self: self._sock.proto, doc="the socket protocol")
_s = ("def %s(self, *args): return self._sock.%s(*args)\n\n"
#"%s.__doc__ = _realsocket.%s.__doc__\n")
)
for _m in _socketmethods:
#exec _s % (_m, _m, _m, _m)
exec _s % (_m, _m)
del _m, _s
socket = SocketType = _socketobject
class _fileobject(object):
"""Faux file object attached to a socket object."""
default_bufsize = 8192
name = "<socket>"
__slots__ = ["mode", "bufsize", "softspace",
# "closed" is a property, see below
"_sock", "_rbufsize", "_wbufsize", "_rbuf", "_wbuf",
"_close"]
def __init__(self, sock, mode='rb', bufsize=-1, close=False):
self._sock = sock
if isinstance(sock, _nonblocking_api_mixin):
sock.reference_count += 1
self.mode = mode # Not actually used in this version
if bufsize < 0:
bufsize = self.default_bufsize
self.bufsize = bufsize
self.softspace = False
if bufsize == 0:
self._rbufsize = 1
elif bufsize == 1:
self._rbufsize = self.default_bufsize
else:
self._rbufsize = bufsize
self._wbufsize = bufsize
self._rbuf = "" # A string
self._wbuf = [] # A list of strings
self._close = close
def _getclosed(self):
return self._sock is None
closed = property(_getclosed, doc="True if the file is closed")
def close(self):
try:
if self._sock:
self.flush()
finally:
if self._sock:
if isinstance(self._sock, _nonblocking_api_mixin):
self._sock.reference_count -= 1
if not self._sock.reference_count or self._close:
self._sock.close()
elif self._close:
self._sock.close()
self._sock = None
def __del__(self):
try:
self.close()
except:
# close() may fail if __init__ didn't complete
pass
def flush(self):
if self._wbuf:
buffer = "".join(self._wbuf)
self._wbuf = []
self._sock.sendall(buffer)
def fileno(self):
return self._sock.fileno()
def write(self, data):
data = str(data) # XXX Should really reject non-string non-buffers
if not data:
return
self._wbuf.append(data)
if (self._wbufsize == 0 or
self._wbufsize == 1 and '\n' in data or
self._get_wbuf_len() >= self._wbufsize):
self.flush()
def writelines(self, list):
# XXX We could do better here for very long lists
# XXX Should really reject non-string non-buffers
self._wbuf.extend(filter(None, map(str, list)))
if (self._wbufsize <= 1 or
self._get_wbuf_len() >= self._wbufsize):
self.flush()
def _get_wbuf_len(self):
buf_len = 0
for x in self._wbuf:
buf_len += len(x)
return buf_len
def read(self, size=-1):
data = self._rbuf
if size < 0:
# Read until EOF
buffers = []
if data:
buffers.append(data)
self._rbuf = ""
if self._rbufsize <= 1:
recv_size = self.default_bufsize
else:
recv_size = self._rbufsize
while True:
data = self._sock.recv(recv_size)
if not data:
break
buffers.append(data)
return "".join(buffers)
else:
# Read until size bytes or EOF seen, whichever comes first
buf_len = len(data)
if buf_len >= size:
self._rbuf = data[size:]
return data[:size]
buffers = []
if data:
buffers.append(data)
self._rbuf = ""
while True:
left = size - buf_len
recv_size = max(self._rbufsize, left)
data = self._sock.recv(recv_size)
if not data:
break
buffers.append(data)
n = len(data)
if n >= left:
self._rbuf = data[left:]
buffers[-1] = data[:left]
break
buf_len += n
return "".join(buffers)
def readline(self, size=-1):
data = self._rbuf
if size < 0:
# Read until \n or EOF, whichever comes first
if self._rbufsize <= 1:
# Speed up unbuffered case
assert data == ""
buffers = []
recv = self._sock.recv
while data != "\n":
data = recv(1)
if not data:
break
buffers.append(data)
return "".join(buffers)
nl = data.find('\n')
if nl >= 0:
nl += 1
self._rbuf = data[nl:]
return data[:nl]
buffers = []
if data:
buffers.append(data)
self._rbuf = ""
while True:
data = self._sock.recv(self._rbufsize)
if not data:
break
buffers.append(data)
nl = data.find('\n')
if nl >= 0:
nl += 1
self._rbuf = data[nl:]
buffers[-1] = data[:nl]
break
return "".join(buffers)
else:
# Read until size bytes or \n or EOF seen, whichever comes first
nl = data.find('\n', 0, size)
if nl >= 0:
nl += 1
self._rbuf = data[nl:]
return data[:nl]
buf_len = len(data)
if buf_len >= size:
self._rbuf = data[size:]
return data[:size]
buffers = []
if data:
buffers.append(data)
self._rbuf = ""
while True:
data = self._sock.recv(self._rbufsize)
if not data:
break
buffers.append(data)
left = size - buf_len
nl = data.find('\n', 0, left)
if nl >= 0:
nl += 1
self._rbuf = data[nl:]
buffers[-1] = data[:nl]
break
n = len(data)
if n >= left:
self._rbuf = data[left:]
buffers[-1] = data[:left]
break
buf_len += n
return "".join(buffers)
def readlines(self, sizehint=0):
total = 0
list = []
while True:
line = self.readline()
if not line:
break
list.append(line)
total += len(line)
if sizehint and total >= sizehint:
break
return list
# Iterator protocols
def __iter__(self):
return self
def next(self):
line = self.readline()
if not line:
raise StopIteration
return line
# Define the SSL support
class ssl:
def __init__(self, plain_sock, keyfile=None, certfile=None):
try:
self.ssl_sock = self._make_ssl_socket(plain_sock)
self._in_buf = java.io.BufferedInputStream(self.ssl_sock.getInputStream())
self._out_buf = java.io.BufferedOutputStream(self.ssl_sock.getOutputStream())
except java.lang.Exception, jlx:
raise _map_exception(jlx)
def _make_ssl_socket(self, plain_socket, auto_close=0):
java_net_socket = plain_socket._get_jsocket()
assert isinstance(java_net_socket, java.net.Socket)
host = java_net_socket.getInetAddress().getHostAddress()
port = java_net_socket.getPort()
factory = javax.net.ssl.SSLSocketFactory.getDefault();
ssl_socket = factory.createSocket(java_net_socket, host, port, auto_close)
ssl_socket.setEnabledCipherSuites(ssl_socket.getSupportedCipherSuites())
ssl_socket.startHandshake()
return ssl_socket
def read(self, n=4096):
try:
data = jarray.zeros(n, 'b')
m = self._in_buf.read(data, 0, n)
if m <= 0:
return ""
if m < n:
data = data[:m]
return data.tostring()
except java.lang.Exception, jlx:
raise _map_exception(jlx)
def write(self, s):
try:
self._out_buf.write(s)
self._out_buf.flush()
return len(s)
except java.lang.Exception, jlx:
raise _map_exception(jlx)
def _get_server_cert(self):
try:
return self.ssl_sock.getSession().getPeerCertificates()[0]
except java.lang.Exception, jlx:
raise _map_exception(jlx)
def server(self):
cert = self._get_server_cert()
return cert.getSubjectDN().toString()
def issuer(self):
cert = self._get_server_cert()
return cert.getIssuerDN().toString()
_realssl = ssl
def ssl(sock, keyfile=None, certfile=None):
if hasattr(sock, "_sock"):
sock = sock._sock
return _realssl(sock, keyfile, certfile)
def test():
s = socket(AF_INET, SOCK_STREAM)
s.connect(("", 80))
s.send("GET / HTTP/1.0\r\n\r\n")
while 1:
data = s.recv(2000)
print data
if not data:
break
if __name__ == '__main__':
test()
|
apache-2.0
|
katfang/django-guardian
|
guardian/admin.py
|
37
|
15993
|
from __future__ import unicode_literals
from django import forms
from django.conf import settings
from guardian.compat import url, patterns
from django.contrib import admin
from django.contrib import messages
from django.contrib.admin.widgets import FilteredSelectMultiple
from django.core.urlresolvers import reverse
from django.shortcuts import render_to_response, get_object_or_404, redirect
from django.template import RequestContext
from django.utils.datastructures import SortedDict
from django.utils.translation import ugettext, ugettext_lazy as _
from guardian.compat import get_user_model
from guardian.forms import UserObjectPermissionsForm
from guardian.forms import GroupObjectPermissionsForm
from guardian.shortcuts import get_perms
from guardian.shortcuts import get_users_with_perms
from guardian.shortcuts import get_groups_with_perms
from guardian.shortcuts import get_perms_for_model
from guardian.models import Group
class AdminUserObjectPermissionsForm(UserObjectPermissionsForm):
"""
Extends :form:`UserObjectPermissionsForm`. It only overrides
``get_obj_perms_field_widget`` method so it return
``django.contrib.admin.widgets.FilteredSelectMultiple`` widget.
"""
def get_obj_perms_field_widget(self):
return FilteredSelectMultiple(_("Permissions"), False)
class AdminGroupObjectPermissionsForm(GroupObjectPermissionsForm):
"""
Extends :form:`GroupObjectPermissionsForm`. It only overrides
``get_obj_perms_field_widget`` method so it return
``django.contrib.admin.widgets.FilteredSelectMultiple`` widget.
"""
def get_obj_perms_field_widget(self):
return FilteredSelectMultiple(_("Permissions"), False)
class GuardedModelAdmin(admin.ModelAdmin):
"""
Extends ``django.contrib.admin.ModelAdmin`` class. Provides some extra
views for object permissions management at admin panel. It also changes
default ``change_form_template`` option to
``'admin/guardian/model/change_form.html'`` which is required for proper
url (object permissions related) being shown at the model pages.
**Extra options**
``GuardedModelAdmin.obj_perms_manage_template``
*Default*: ``admin/guardian/model/obj_perms_manage.html``
``GuardedModelAdmin.obj_perms_manage_user_template``
*Default*: ``admin/guardian/model/obj_perms_manage_user.html``
``GuardedModelAdmin.obj_perms_manage_group_template``
*Default*: ``admin/guardian/model/obj_perms_manage_group.html``
``GuardedModelAdmin.user_can_access_owned_objects_only``
*Default*: ``False``
If this would be set to ``True``, ``request.user`` would be used to
filter out objects he or she doesn't own (checking ``user`` field
of used model - field name may be overridden by
``user_owned_objects_field`` option).
.. note::
Please remember that this will **NOT** affect superusers!
Admins would still see all items.
``GuardedModelAdmin.user_can_access_owned_by_group_objects_only``
*Default*: ``False``
If this would be set to ``True``, ``request.user`` would be used to
filter out objects her or his group doesn't own (checking if any group
user belongs to is set as ``group`` field of the object; name of the
field can be changed by overriding ``group_owned_objects_field``).
.. note::
Please remember that this will **NOT** affect superusers!
Admins would still see all items.
``GuardedModelAdmin.group_owned_objects_field``
*Default*: ``group``
**Usage example**
Just use :admin:`GuardedModelAdmin` instead of
``django.contrib.admin.ModelAdmin``.
.. code-block:: python
from django.contrib import admin
from guardian.admin import GuardedModelAdmin
from myapp.models import Author
class AuthorAdmin(GuardedModelAdmin):
pass
admin.site.register(Author, AuthorAdmin)
"""
change_form_template = \
'admin/guardian/model/change_form.html'
obj_perms_manage_template = \
'admin/guardian/model/obj_perms_manage.html'
obj_perms_manage_user_template = \
'admin/guardian/model/obj_perms_manage_user.html'
obj_perms_manage_group_template = \
'admin/guardian/model/obj_perms_manage_group.html'
user_can_access_owned_objects_only = False
user_owned_objects_field = 'user'
user_can_access_owned_by_group_objects_only = False
group_owned_objects_field = 'group'
def queryset(self, request):
qs = super(GuardedModelAdmin, self).queryset(request)
if request.user.is_superuser:
return qs
if self.user_can_access_owned_objects_only:
filters = {self.user_owned_objects_field: request.user}
qs = qs.filter(**filters)
if self.user_can_access_owned_by_group_objects_only:
User = get_user_model()
user_rel_name = User.groups.field.related_query_name()
qs_key = '%s__%s' % (self.group_owned_objects_field, user_rel_name)
filters = {qs_key: request.user}
qs = qs.filter(**filters)
return qs
def get_urls(self):
"""
Extends standard admin model urls with the following:
- ``.../permissions/`` under ``app_mdodel_permissions`` url name (params: object_pk)
- ``.../permissions/user-manage/<user_id>/`` under ``app_model_permissions_manage_user`` url name (params: object_pk, user_pk)
- ``.../permissions/group-manage/<group_id>/`` under ``app_model_permissions_manage_group`` url name (params: object_pk, group_pk)
.. note::
``...`` above are standard, instance detail url (i.e.
``/admin/flatpages/1/``)
"""
urls = super(GuardedModelAdmin, self).get_urls()
info = self.model._meta.app_label, self.model._meta.module_name
myurls = patterns('',
url(r'^(?P<object_pk>.+)/permissions/$',
view=self.admin_site.admin_view(self.obj_perms_manage_view),
name='%s_%s_permissions' % info),
url(r'^(?P<object_pk>.+)/permissions/user-manage/(?P<user_id>\-?\d+)/$',
view=self.admin_site.admin_view(
self.obj_perms_manage_user_view),
name='%s_%s_permissions_manage_user' % info),
url(r'^(?P<object_pk>.+)/permissions/group-manage/(?P<group_id>\-?\d+)/$',
view=self.admin_site.admin_view(
self.obj_perms_manage_group_view),
name='%s_%s_permissions_manage_group' % info),
)
return myurls + urls
def get_obj_perms_base_context(self, request, obj):
"""
Returns context dictionary with common admin and object permissions
related content.
"""
context = {
'adminform': {'model_admin': self},
'media': self.media,
'object': obj,
'app_label': self.model._meta.app_label,
'opts': self.model._meta,
'original': hasattr(obj, '__unicode__') and obj.__unicode__() or\
str(obj),
'has_change_permission': self.has_change_permission(request, obj),
'model_perms': get_perms_for_model(obj),
'title': _("Object permissions"),
}
return context
def obj_perms_manage_view(self, request, object_pk):
"""
Main object permissions view. Presents all users and groups with any
object permissions for the current model *instance*. Users or groups
without object permissions for related *instance* would **not** be
shown. In order to add or manage user or group one should use links or
forms presented within the page.
"""
obj = get_object_or_404(self.queryset(request), pk=object_pk)
users_perms = SortedDict(
get_users_with_perms(obj, attach_perms=True,
with_group_users=False))
users_perms.keyOrder.sort(key=lambda user: user.username)
groups_perms = SortedDict(
get_groups_with_perms(obj, attach_perms=True))
groups_perms.keyOrder.sort(key=lambda group: group.name)
if request.method == 'POST' and 'submit_manage_user' in request.POST:
user_form = UserManage(request.POST)
group_form = GroupManage()
info = (
self.admin_site.name,
self.model._meta.app_label,
self.model._meta.module_name
)
if user_form.is_valid():
user_id = user_form.cleaned_data['user'].id
url = reverse(
'%s:%s_%s_permissions_manage_user' % info,
args=[obj.pk, user_id]
)
return redirect(url)
elif request.method == 'POST' and 'submit_manage_group' in request.POST:
user_form = UserManage()
group_form = GroupManage(request.POST)
info = (
self.admin_site.name,
self.model._meta.app_label,
self.model._meta.module_name
)
if group_form.is_valid():
group_id = group_form.cleaned_data['group'].id
url = reverse(
'%s:%s_%s_permissions_manage_group' % info,
args=[obj.pk, group_id]
)
return redirect(url)
else:
user_form = UserManage()
group_form = GroupManage()
context = self.get_obj_perms_base_context(request, obj)
context['users_perms'] = users_perms
context['groups_perms'] = groups_perms
context['user_form'] = user_form
context['group_form'] = group_form
return render_to_response(self.get_obj_perms_manage_template(),
context, RequestContext(request, current_app=self.admin_site.name))
def get_obj_perms_manage_template(self):
"""
Returns main object permissions admin template. May be overridden if
need to change it dynamically.
.. note::
If ``INSTALLED_APPS`` contains ``grappelli`` this function would
return ``"admin/guardian/grappelli/obj_perms_manage.html"``.
"""
if 'grappelli' in settings.INSTALLED_APPS:
return 'admin/guardian/contrib/grappelli/obj_perms_manage.html'
return self.obj_perms_manage_template
def obj_perms_manage_user_view(self, request, object_pk, user_id):
"""
Manages selected users' permissions for current object.
"""
user = get_object_or_404(get_user_model(), id=user_id)
obj = get_object_or_404(self.queryset(request), pk=object_pk)
form_class = self.get_obj_perms_manage_user_form()
form = form_class(user, obj, request.POST or None)
if request.method == 'POST' and form.is_valid():
form.save_obj_perms()
msg = ugettext("Permissions saved.")
messages.success(request, msg)
info = (
self.admin_site.name,
self.model._meta.app_label,
self.model._meta.module_name
)
url = reverse(
'%s:%s_%s_permissions_manage_user' % info,
args=[obj.pk, user.id]
)
return redirect(url)
context = self.get_obj_perms_base_context(request, obj)
context['user_obj'] = user
context['user_perms'] = get_perms(user, obj)
context['form'] = form
return render_to_response(self.get_obj_perms_manage_user_template(),
context, RequestContext(request, current_app=self.admin_site.name))
def get_obj_perms_manage_user_template(self):
"""
Returns object permissions for user admin template. May be overridden
if need to change it dynamically.
.. note::
If ``INSTALLED_APPS`` contains ``grappelli`` this function would
return ``"admin/guardian/grappelli/obj_perms_manage_user.html"``.
"""
if 'grappelli' in settings.INSTALLED_APPS:
return 'admin/guardian/contrib/grappelli/obj_perms_manage_user.html'
return self.obj_perms_manage_user_template
def get_obj_perms_manage_user_form(self):
"""
Returns form class for user object permissions management. By default
:form:`AdminUserObjectPermissionsForm` is returned.
"""
return AdminUserObjectPermissionsForm
def obj_perms_manage_group_view(self, request, object_pk, group_id):
"""
Manages selected groups' permissions for current object.
"""
group = get_object_or_404(Group, id=group_id)
obj = get_object_or_404(self.queryset(request), pk=object_pk)
form_class = self.get_obj_perms_manage_group_form()
form = form_class(group, obj, request.POST or None)
if request.method == 'POST' and form.is_valid():
form.save_obj_perms()
msg = ugettext("Permissions saved.")
messages.success(request, msg)
info = (
self.admin_site.name,
self.model._meta.app_label,
self.model._meta.module_name
)
url = reverse(
'%s:%s_%s_permissions_manage_group' % info,
args=[obj.pk, group.id]
)
return redirect(url)
context = self.get_obj_perms_base_context(request, obj)
context['group_obj'] = group
context['group_perms'] = get_perms(group, obj)
context['form'] = form
return render_to_response(self.get_obj_perms_manage_group_template(),
context, RequestContext(request, current_app=self.admin_site.name))
def get_obj_perms_manage_group_template(self):
"""
Returns object permissions for group admin template. May be overridden
if need to change it dynamically.
.. note::
If ``INSTALLED_APPS`` contains ``grappelli`` this function would
return ``"admin/guardian/grappelli/obj_perms_manage_group.html"``.
"""
if 'grappelli' in settings.INSTALLED_APPS:
return 'admin/guardian/contrib/grappelli/obj_perms_manage_group.html'
return self.obj_perms_manage_group_template
def get_obj_perms_manage_group_form(self):
"""
Returns form class for group object permissions management. By default
:form:`AdminGroupObjectPermissionsForm` is returned.
"""
return AdminGroupObjectPermissionsForm
class UserManage(forms.Form):
user = forms.RegexField(label=_("Username"), max_length=30,
regex=r'^[\w.@+-]+$',
error_messages = {
'invalid': _("This value may contain only letters, numbers and "
"@/./+/-/_ characters."),
'does_not_exist': _("This user does not exist")})
def clean_user(self):
"""
Returns ``User`` instance based on the given username.
"""
username = self.cleaned_data['user']
try:
user = get_user_model().objects.get(username=username)
return user
except get_user_model().DoesNotExist:
raise forms.ValidationError(
self.fields['user'].error_messages['does_not_exist'])
class GroupManage(forms.Form):
group = forms.CharField(max_length=80, error_messages={'does_not_exist':
_("This group does not exist")})
def clean_group(self):
"""
Returns ``Group`` instance based on the given group name.
"""
name = self.cleaned_data['group']
try:
group = Group.objects.get(name=name)
return group
except Group.DoesNotExist:
raise forms.ValidationError(
self.fields['group'].error_messages['does_not_exist'])
|
bsd-2-clause
|
chris-chris/tensorflow
|
tensorflow/contrib/slim/python/slim/nets/resnet_v2_test.py
|
49
|
18547
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for slim.nets.resnet_v2."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib import layers
from tensorflow.contrib.framework.python.ops import arg_scope
from tensorflow.contrib.layers.python.layers import utils
from tensorflow.contrib.slim.python.slim.nets import resnet_utils
from tensorflow.contrib.slim.python.slim.nets import resnet_v2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
def create_test_input(batch_size, height, width, channels):
"""Create test input tensor.
Args:
batch_size: The number of images per batch or `None` if unknown.
height: The height of each image or `None` if unknown.
width: The width of each image or `None` if unknown.
channels: The number of channels per image or `None` if unknown.
Returns:
Either a placeholder `Tensor` of dimension
[batch_size, height, width, channels] if any of the inputs are `None` or a
constant `Tensor` with the mesh grid values along the spatial dimensions.
"""
if None in [batch_size, height, width, channels]:
return array_ops.placeholder(dtypes.float32,
(batch_size, height, width, channels))
else:
return math_ops.to_float(
np.tile(
np.reshape(
np.reshape(np.arange(height), [height, 1]) + np.reshape(
np.arange(width), [1, width]), [1, height, width, 1]),
[batch_size, 1, 1, channels]))
class ResnetUtilsTest(test.TestCase):
def testSubsampleThreeByThree(self):
x = array_ops.reshape(math_ops.to_float(math_ops.range(9)), [1, 3, 3, 1])
x = resnet_utils.subsample(x, 2)
expected = array_ops.reshape(
constant_op.constant([0, 2, 6, 8]), [1, 2, 2, 1])
with self.test_session():
self.assertAllClose(x.eval(), expected.eval())
def testSubsampleFourByFour(self):
x = array_ops.reshape(math_ops.to_float(math_ops.range(16)), [1, 4, 4, 1])
x = resnet_utils.subsample(x, 2)
expected = array_ops.reshape(
constant_op.constant([0, 2, 8, 10]), [1, 2, 2, 1])
with self.test_session():
self.assertAllClose(x.eval(), expected.eval())
def testConv2DSameEven(self):
n, n2 = 4, 2
# Input image.
x = create_test_input(1, n, n, 1)
# Convolution kernel.
w = create_test_input(1, 3, 3, 1)
w = array_ops.reshape(w, [3, 3, 1, 1])
variable_scope.get_variable('Conv/weights', initializer=w)
variable_scope.get_variable('Conv/biases', initializer=array_ops.zeros([1]))
variable_scope.get_variable_scope().reuse_variables()
y1 = layers.conv2d(x, 1, [3, 3], stride=1, scope='Conv')
y1_expected = math_ops.to_float([[14, 28, 43, 26], [28, 48, 66, 37],
[43, 66, 84, 46], [26, 37, 46, 22]])
y1_expected = array_ops.reshape(y1_expected, [1, n, n, 1])
y2 = resnet_utils.subsample(y1, 2)
y2_expected = math_ops.to_float([[14, 43], [43, 84]])
y2_expected = array_ops.reshape(y2_expected, [1, n2, n2, 1])
y3 = resnet_utils.conv2d_same(x, 1, 3, stride=2, scope='Conv')
y3_expected = y2_expected
y4 = layers.conv2d(x, 1, [3, 3], stride=2, scope='Conv')
y4_expected = math_ops.to_float([[48, 37], [37, 22]])
y4_expected = array_ops.reshape(y4_expected, [1, n2, n2, 1])
with self.test_session() as sess:
sess.run(variables.global_variables_initializer())
self.assertAllClose(y1.eval(), y1_expected.eval())
self.assertAllClose(y2.eval(), y2_expected.eval())
self.assertAllClose(y3.eval(), y3_expected.eval())
self.assertAllClose(y4.eval(), y4_expected.eval())
def testConv2DSameOdd(self):
n, n2 = 5, 3
# Input image.
x = create_test_input(1, n, n, 1)
# Convolution kernel.
w = create_test_input(1, 3, 3, 1)
w = array_ops.reshape(w, [3, 3, 1, 1])
variable_scope.get_variable('Conv/weights', initializer=w)
variable_scope.get_variable('Conv/biases', initializer=array_ops.zeros([1]))
variable_scope.get_variable_scope().reuse_variables()
y1 = layers.conv2d(x, 1, [3, 3], stride=1, scope='Conv')
y1_expected = math_ops.to_float([[14, 28, 43, 58, 34],
[28, 48, 66, 84, 46],
[43, 66, 84, 102, 55],
[58, 84, 102, 120, 64],
[34, 46, 55, 64, 30]])
y1_expected = array_ops.reshape(y1_expected, [1, n, n, 1])
y2 = resnet_utils.subsample(y1, 2)
y2_expected = math_ops.to_float([[14, 43, 34],
[43, 84, 55],
[34, 55, 30]])
y2_expected = array_ops.reshape(y2_expected, [1, n2, n2, 1])
y3 = resnet_utils.conv2d_same(x, 1, 3, stride=2, scope='Conv')
y3_expected = y2_expected
y4 = layers.conv2d(x, 1, [3, 3], stride=2, scope='Conv')
y4_expected = y2_expected
with self.test_session() as sess:
sess.run(variables.global_variables_initializer())
self.assertAllClose(y1.eval(), y1_expected.eval())
self.assertAllClose(y2.eval(), y2_expected.eval())
self.assertAllClose(y3.eval(), y3_expected.eval())
self.assertAllClose(y4.eval(), y4_expected.eval())
def _resnet_plain(self, inputs, blocks, output_stride=None, scope=None):
"""A plain ResNet without extra layers before or after the ResNet blocks."""
with variable_scope.variable_scope(scope, values=[inputs]):
with arg_scope([layers.conv2d], outputs_collections='end_points'):
net = resnet_utils.stack_blocks_dense(inputs, blocks, output_stride)
end_points = utils.convert_collection_to_dict('end_points')
return net, end_points
def testEndPointsV2(self):
"""Test the end points of a tiny v2 bottleneck network."""
blocks = [
resnet_v2.resnet_v2_block(
'block1', base_depth=1, num_units=2, stride=2),
resnet_v2.resnet_v2_block(
'block2', base_depth=2, num_units=2, stride=1),
]
inputs = create_test_input(2, 32, 16, 3)
with arg_scope(resnet_utils.resnet_arg_scope()):
_, end_points = self._resnet_plain(inputs, blocks, scope='tiny')
expected = [
'tiny/block1/unit_1/bottleneck_v2/shortcut',
'tiny/block1/unit_1/bottleneck_v2/conv1',
'tiny/block1/unit_1/bottleneck_v2/conv2',
'tiny/block1/unit_1/bottleneck_v2/conv3',
'tiny/block1/unit_2/bottleneck_v2/conv1',
'tiny/block1/unit_2/bottleneck_v2/conv2',
'tiny/block1/unit_2/bottleneck_v2/conv3',
'tiny/block2/unit_1/bottleneck_v2/shortcut',
'tiny/block2/unit_1/bottleneck_v2/conv1',
'tiny/block2/unit_1/bottleneck_v2/conv2',
'tiny/block2/unit_1/bottleneck_v2/conv3',
'tiny/block2/unit_2/bottleneck_v2/conv1',
'tiny/block2/unit_2/bottleneck_v2/conv2',
'tiny/block2/unit_2/bottleneck_v2/conv3'
]
self.assertItemsEqual(expected, end_points)
def _stack_blocks_nondense(self, net, blocks):
"""A simplified ResNet Block stacker without output stride control."""
for block in blocks:
with variable_scope.variable_scope(block.scope, 'block', [net]):
for i, unit in enumerate(block.args):
with variable_scope.variable_scope('unit_%d' % (i + 1), values=[net]):
net = block.unit_fn(net, rate=1, **unit)
return net
def testAtrousValuesBottleneck(self):
"""Verify the values of dense feature extraction by atrous convolution.
Make sure that dense feature extraction by stack_blocks_dense() followed by
subsampling gives identical results to feature extraction at the nominal
network output stride using the simple self._stack_blocks_nondense() above.
"""
block = resnet_v2.resnet_v2_block
blocks = [
block('block1', base_depth=1, num_units=2, stride=2),
block('block2', base_depth=2, num_units=2, stride=2),
block('block3', base_depth=4, num_units=2, stride=2),
block('block4', base_depth=8, num_units=2, stride=1),
]
nominal_stride = 8
# Test both odd and even input dimensions.
height = 30
width = 31
with arg_scope(resnet_utils.resnet_arg_scope()):
with arg_scope([layers.batch_norm], is_training=False):
for output_stride in [1, 2, 4, 8, None]:
with ops.Graph().as_default():
with self.test_session() as sess:
random_seed.set_random_seed(0)
inputs = create_test_input(1, height, width, 3)
# Dense feature extraction followed by subsampling.
output = resnet_utils.stack_blocks_dense(inputs, blocks,
output_stride)
if output_stride is None:
factor = 1
else:
factor = nominal_stride // output_stride
output = resnet_utils.subsample(output, factor)
# Make the two networks use the same weights.
variable_scope.get_variable_scope().reuse_variables()
# Feature extraction at the nominal network rate.
expected = self._stack_blocks_nondense(inputs, blocks)
sess.run(variables.global_variables_initializer())
output, expected = sess.run([output, expected])
self.assertAllClose(output, expected, atol=1e-4, rtol=1e-4)
class ResnetCompleteNetworkTest(test.TestCase):
"""Tests with complete small ResNet v2 networks."""
def _resnet_small(self,
inputs,
num_classes=None,
is_training=None,
global_pool=True,
output_stride=None,
include_root_block=True,
reuse=None,
scope='resnet_v2_small'):
"""A shallow and thin ResNet v2 for faster tests."""
block = resnet_v2.resnet_v2_block
blocks = [
block('block1', base_depth=1, num_units=3, stride=2),
block('block2', base_depth=2, num_units=3, stride=2),
block('block3', base_depth=4, num_units=3, stride=2),
block('block4', base_depth=8, num_units=2, stride=1),
]
return resnet_v2.resnet_v2(inputs, blocks, num_classes, is_training,
global_pool, output_stride, include_root_block,
reuse, scope)
def testClassificationEndPoints(self):
global_pool = True
num_classes = 10
inputs = create_test_input(2, 224, 224, 3)
with arg_scope(resnet_utils.resnet_arg_scope()):
logits, end_points = self._resnet_small(
inputs, num_classes, global_pool=global_pool, scope='resnet')
self.assertTrue(logits.op.name.startswith('resnet/logits'))
self.assertListEqual(logits.get_shape().as_list(), [2, 1, 1, num_classes])
self.assertTrue('predictions' in end_points)
self.assertListEqual(end_points['predictions'].get_shape().as_list(),
[2, 1, 1, num_classes])
def testClassificationShapes(self):
global_pool = True
num_classes = 10
inputs = create_test_input(2, 224, 224, 3)
with arg_scope(resnet_utils.resnet_arg_scope()):
_, end_points = self._resnet_small(
inputs, num_classes, global_pool=global_pool, scope='resnet')
endpoint_to_shape = {
'resnet/block1': [2, 28, 28, 4],
'resnet/block2': [2, 14, 14, 8],
'resnet/block3': [2, 7, 7, 16],
'resnet/block4': [2, 7, 7, 32]
}
for endpoint in endpoint_to_shape:
shape = endpoint_to_shape[endpoint]
self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape)
def testFullyConvolutionalEndpointShapes(self):
global_pool = False
num_classes = 10
inputs = create_test_input(2, 321, 321, 3)
with arg_scope(resnet_utils.resnet_arg_scope()):
_, end_points = self._resnet_small(
inputs, num_classes, global_pool=global_pool, scope='resnet')
endpoint_to_shape = {
'resnet/block1': [2, 41, 41, 4],
'resnet/block2': [2, 21, 21, 8],
'resnet/block3': [2, 11, 11, 16],
'resnet/block4': [2, 11, 11, 32]
}
for endpoint in endpoint_to_shape:
shape = endpoint_to_shape[endpoint]
self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape)
def testRootlessFullyConvolutionalEndpointShapes(self):
global_pool = False
num_classes = 10
inputs = create_test_input(2, 128, 128, 3)
with arg_scope(resnet_utils.resnet_arg_scope()):
_, end_points = self._resnet_small(
inputs,
num_classes,
global_pool=global_pool,
include_root_block=False,
scope='resnet')
endpoint_to_shape = {
'resnet/block1': [2, 64, 64, 4],
'resnet/block2': [2, 32, 32, 8],
'resnet/block3': [2, 16, 16, 16],
'resnet/block4': [2, 16, 16, 32]
}
for endpoint in endpoint_to_shape:
shape = endpoint_to_shape[endpoint]
self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape)
def testAtrousFullyConvolutionalEndpointShapes(self):
global_pool = False
num_classes = 10
output_stride = 8
inputs = create_test_input(2, 321, 321, 3)
with arg_scope(resnet_utils.resnet_arg_scope()):
_, end_points = self._resnet_small(
inputs,
num_classes,
global_pool=global_pool,
output_stride=output_stride,
scope='resnet')
endpoint_to_shape = {
'resnet/block1': [2, 41, 41, 4],
'resnet/block2': [2, 41, 41, 8],
'resnet/block3': [2, 41, 41, 16],
'resnet/block4': [2, 41, 41, 32]
}
for endpoint in endpoint_to_shape:
shape = endpoint_to_shape[endpoint]
self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape)
def testAtrousFullyConvolutionalValues(self):
"""Verify dense feature extraction with atrous convolution."""
nominal_stride = 32
for output_stride in [4, 8, 16, 32, None]:
with arg_scope(resnet_utils.resnet_arg_scope()):
with ops.Graph().as_default():
with self.test_session() as sess:
random_seed.set_random_seed(0)
inputs = create_test_input(2, 81, 81, 3)
# Dense feature extraction followed by subsampling.
output, _ = self._resnet_small(
inputs,
None,
is_training=False,
global_pool=False,
output_stride=output_stride)
if output_stride is None:
factor = 1
else:
factor = nominal_stride // output_stride
output = resnet_utils.subsample(output, factor)
# Make the two networks use the same weights.
variable_scope.get_variable_scope().reuse_variables()
# Feature extraction at the nominal network rate.
expected, _ = self._resnet_small(
inputs, None, is_training=False, global_pool=False)
sess.run(variables.global_variables_initializer())
self.assertAllClose(
output.eval(), expected.eval(), atol=1e-4, rtol=1e-4)
def testUnknownBatchSize(self):
batch = 2
height, width = 65, 65
global_pool = True
num_classes = 10
inputs = create_test_input(None, height, width, 3)
with arg_scope(resnet_utils.resnet_arg_scope()):
logits, _ = self._resnet_small(
inputs, num_classes, global_pool=global_pool, scope='resnet')
self.assertTrue(logits.op.name.startswith('resnet/logits'))
self.assertListEqual(logits.get_shape().as_list(),
[None, 1, 1, num_classes])
images = create_test_input(batch, height, width, 3)
with self.test_session() as sess:
sess.run(variables.global_variables_initializer())
output = sess.run(logits, {inputs: images.eval()})
self.assertEqual(output.shape, (batch, 1, 1, num_classes))
def testFullyConvolutionalUnknownHeightWidth(self):
batch = 2
height, width = 65, 65
global_pool = False
inputs = create_test_input(batch, None, None, 3)
with arg_scope(resnet_utils.resnet_arg_scope()):
output, _ = self._resnet_small(inputs, None, global_pool=global_pool)
self.assertListEqual(output.get_shape().as_list(), [batch, None, None, 32])
images = create_test_input(batch, height, width, 3)
with self.test_session() as sess:
sess.run(variables.global_variables_initializer())
output = sess.run(output, {inputs: images.eval()})
self.assertEqual(output.shape, (batch, 3, 3, 32))
def testAtrousFullyConvolutionalUnknownHeightWidth(self):
batch = 2
height, width = 65, 65
global_pool = False
output_stride = 8
inputs = create_test_input(batch, None, None, 3)
with arg_scope(resnet_utils.resnet_arg_scope()):
output, _ = self._resnet_small(
inputs, None, global_pool=global_pool, output_stride=output_stride)
self.assertListEqual(output.get_shape().as_list(), [batch, None, None, 32])
images = create_test_input(batch, height, width, 3)
with self.test_session() as sess:
sess.run(variables.global_variables_initializer())
output = sess.run(output, {inputs: images.eval()})
self.assertEqual(output.shape, (batch, 9, 9, 32))
if __name__ == '__main__':
test.main()
|
apache-2.0
|
hubo1016/vlcp
|
doc/source/ext/gendoc.py
|
1
|
4701
|
'''
Created on 2017/1/13
:author: hubo
'''
from configlist import list_config
from listmodules import list_modules, list_proxy
import jinja2
import os
import os.path
import shutil
from pkgutil import walk_packages
from vlcp.event import Event
def _merge_all(func):
def _func():
result = func('vlcp')
result.update(func('vlcp_docker'))
return result
return _func
generate_list = [(_merge_all(list_config),
'allconfigurations.rst.tmpl',
'allconfigurations.inc',
['configurations']),
(_merge_all(list_proxy),
'allproxyconfigs.rst.tmpl',
'allproxyconfigs.inc',
['configurations']),
(_merge_all(list_modules),
'allmodulelist.rst.tmpl',
'allmodulelist.inc',
['modulelist']),
(_merge_all(list_proxy),
'allproxymodules.rst.tmpl',
'allproxymodules.inc',
['modulelist'])
]
def generate_doc(app, env, added, changed, removed):
updated = set()
if not os.path.isdir(os.path.join(env.srcdir, 'gensrc')):
os.makedirs(os.path.join(env.srcdir, 'gensrc'))
for func, source, target, update_list in generate_list:
app.info('Generating %r ...' % (target,))
config_dict = func()
with open(os.path.join(env.srcdir, source), 'rb') as f:
text = f.read().decode('utf-8')
template = jinja2.Template(text)
result = template.render(data_input=config_dict)
with open(os.path.join(env.srcdir,'gensrc',target), 'w') as f:
f.write(result)
updated.update(update_list)
return list(updated)
exclude_list = []
def generate_references(app):
branch = 'master'
if 'READTHEDOCS_VERSION' in os.environ:
branch = os.environ['READTHEDOCS_VERSION']
if branch == 'latest':
branch = 'master'
with open(os.path.join(app.srcdir, 'ref_package.rst.tmpl'), 'rb') as f:
text = f.read().decode('utf-8')
package_template = jinja2.Template(text)
with open(os.path.join(app.srcdir, 'ref_module.rst.tmpl'), 'rb') as f:
text = f.read().decode('utf-8')
module_template = jinja2.Template(text)
shutil.rmtree(os.path.join(app.srcdir,'gensrc/ref'), True)
def _build_package(root_package, githubproj):
pkg = __import__(root_package, fromlist=['_'])
for _, module, is_pkg in walk_packages(pkg.__path__, root_package + '.'):
if any(module.startswith(e) for e in exclude_list):
continue
app.info('Generating reference for ' + module + '...')
if is_pkg:
package_path = 'gensrc/ref/' + module.replace('.', '/')
module_path = os.path.join(package_path, '__init__')
result = package_template.render(package_name = module,
#package_path = '/' + package_path,
package_path = '.',
githubproject = githubproj,
branch = branch)
else:
module_path = 'gensrc/ref/' + module.replace('.', '/')
package_path = os.path.dirname(module_path)
result = module_template.render(module_name = module,
githubproject = githubproj,
branch = branch)
if not os.path.isdir(os.path.join(app.srcdir, package_path)):
app.info('Creating directory ' + os.path.join(app.srcdir, package_path))
os.makedirs(os.path.join(app.srcdir, package_path))
with open(os.path.join(app.srcdir, module_path + '.rst'), 'w') as f:
app.info('Writing ' + os.path.join(app.srcdir, module_path + '.rst'))
f.write(result)
yield module_path
return ['reference'] + list(_build_package('vlcp', 'hubo1016/vlcp')) + list(_build_package('vlcp_docker', 'hubo1016/vlcp-docker-plugin'))
def skip_members(app, what, name, obj, skip, options):
if not skip and name == '__weakref__':
return True
elif what == 'module' and isinstance(obj, type) and issubclass(obj, Event):
return False
else:
return skip
def setup(app):
app.connect('env-get-outdated', generate_doc)
app.connect('builder-inited', generate_references)
app.connect('autodoc-skip-member', skip_members)
return {'version': '0.1'}
|
apache-2.0
|
ByteInternet/libcloud
|
libcloud/test/test_types.py
|
68
|
3681
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import unittest
from libcloud.common.types import LazyList
class TestLazyList(unittest.TestCase):
def setUp(self):
super(TestLazyList, self).setUp
self._get_more_counter = 0
def tearDown(self):
super(TestLazyList, self).tearDown
def test_init(self):
data = [1, 2, 3, 4, 5]
ll = LazyList(get_more=self._get_more_exhausted)
ll_list = list(ll)
self.assertEqual(ll_list, data)
def test_iterator(self):
data = [1, 2, 3, 4, 5]
ll = LazyList(get_more=self._get_more_exhausted)
for i, d in enumerate(ll):
self.assertEqual(d, data[i])
def test_empty_list(self):
ll = LazyList(get_more=self._get_more_empty)
self.assertEqual(list(ll), [])
self.assertEqual(len(ll), 0)
self.assertTrue(10 not in ll)
def test_iterator_not_exhausted(self):
data = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
ll = LazyList(get_more=self._get_more_not_exhausted)
number_of_iterations = 0
for i, d in enumerate(ll):
self.assertEqual(d, data[i])
number_of_iterations += 1
self.assertEqual(number_of_iterations, 10)
def test_len(self):
ll = LazyList(get_more=self._get_more_not_exhausted)
ll = LazyList(get_more=self._get_more_not_exhausted)
self.assertEqual(len(ll), 10)
def test_contains(self):
ll = LazyList(get_more=self._get_more_not_exhausted)
self.assertTrue(40 not in ll)
self.assertTrue(1 in ll)
self.assertTrue(5 in ll)
self.assertTrue(10 in ll)
def test_indexing(self):
ll = LazyList(get_more=self._get_more_not_exhausted)
self.assertEqual(ll[0], 1)
self.assertEqual(ll[9], 10)
self.assertEqual(ll[-1], 10)
try:
ll[11]
except IndexError:
pass
else:
self.fail('Exception was not thrown')
def test_repr(self):
ll1 = LazyList(get_more=self._get_more_empty)
ll2 = LazyList(get_more=self._get_more_exhausted)
ll3 = LazyList(get_more=self._get_more_not_exhausted)
self.assertEqual(repr(ll1), '[]')
self.assertEqual(repr(ll2), '[1, 2, 3, 4, 5]')
self.assertEqual(repr(ll3), '[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]')
def _get_more_empty(self, last_key, value_dict):
return [], None, True
def _get_more_exhausted(self, last_key, value_dict):
data = [1, 2, 3, 4, 5]
return data, 5, True
def _get_more_not_exhausted(self, last_key, value_dict):
self._get_more_counter += 1
if not last_key:
data, last_key, exhausted = [1, 2, 3, 4, 5], 5, False
else:
data, last_key, exhausted = [6, 7, 8, 9, 10], 10, True
return data, last_key, exhausted
if __name__ == '__main__':
sys.exit(unittest.main())
|
apache-2.0
|
saydulk/horizon
|
openstack_dashboard/dashboards/project/volumes/volumes/tables.py
|
17
|
19481
|
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import NoReverseMatch # noqa
from django.core.urlresolvers import reverse
from django.http import HttpResponse # noqa
from django.template import defaultfilters as filters
from django.utils import html
from django.utils.http import urlencode
from django.utils import safestring
from django.utils.translation import pgettext_lazy
from django.utils.translation import string_concat # noqa
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ungettext_lazy
from horizon import exceptions
from horizon import messages
from horizon import tables
from openstack_dashboard import api
from openstack_dashboard.api import cinder
from openstack_dashboard import policy
DELETABLE_STATES = ("available", "error", "error_extending")
class VolumePolicyTargetMixin(policy.PolicyTargetMixin):
policy_target_attrs = (("project_id", 'os-vol-tenant-attr:tenant_id'),)
class LaunchVolume(tables.LinkAction):
name = "launch_volume"
verbose_name = _("Launch as Instance")
url = "horizon:project:instances:launch"
classes = ("ajax-modal", "btn-launch")
icon = "cloud-upload"
policy_rules = (("compute", "compute:create"),)
def get_link_url(self, datum):
base_url = reverse(self.url)
vol_id = "%s:vol" % self.table.get_object_id(datum)
params = urlencode({"source_type": "volume_id",
"source_id": vol_id})
return "?".join([base_url, params])
def allowed(self, request, volume=None):
if getattr(volume, 'bootable', '') == 'true':
return volume.status == "available"
return False
class DeleteVolume(VolumePolicyTargetMixin, tables.DeleteAction):
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Delete Volume",
u"Delete Volumes",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Scheduled deletion of Volume",
u"Scheduled deletion of Volumes",
count
)
policy_rules = (("volume", "volume:delete"),)
def delete(self, request, obj_id):
cinder.volume_delete(request, obj_id)
def allowed(self, request, volume=None):
if volume:
return (volume.status in DELETABLE_STATES and
not getattr(volume, 'has_snapshot', False))
return True
class CreateVolume(tables.LinkAction):
name = "create"
verbose_name = _("Create Volume")
url = "horizon:project:volumes:volumes:create"
classes = ("ajax-modal", "btn-create")
icon = "plus"
policy_rules = (("volume", "volume:create"),)
ajax = True
def __init__(self, attrs=None, **kwargs):
kwargs['preempt'] = True
super(CreateVolume, self).__init__(attrs, **kwargs)
def allowed(self, request, volume=None):
limits = api.cinder.tenant_absolute_limits(request)
gb_available = (limits.get('maxTotalVolumeGigabytes', float("inf"))
- limits.get('totalGigabytesUsed', 0))
volumes_available = (limits.get('maxTotalVolumes', float("inf"))
- limits.get('totalVolumesUsed', 0))
if gb_available <= 0 or volumes_available <= 0:
if "disabled" not in self.classes:
self.classes = [c for c in self.classes] + ['disabled']
self.verbose_name = string_concat(self.verbose_name, ' ',
_("(Quota exceeded)"))
else:
self.verbose_name = _("Create Volume")
classes = [c for c in self.classes if c != "disabled"]
self.classes = classes
return True
def single(self, table, request, object_id=None):
self.allowed(request, None)
return HttpResponse(self.render())
class ExtendVolume(VolumePolicyTargetMixin, tables.LinkAction):
name = "extend"
verbose_name = _("Extend Volume")
url = "horizon:project:volumes:volumes:extend"
classes = ("ajax-modal", "btn-extend")
policy_rules = (("volume", "volume:extend"),)
def allowed(self, request, volume=None):
return volume.status == "available"
class EditAttachments(tables.LinkAction):
name = "attachments"
verbose_name = _("Manage Attachments")
url = "horizon:project:volumes:volumes:attach"
classes = ("ajax-modal",)
icon = "pencil"
def allowed(self, request, volume=None):
if volume:
project_id = getattr(volume, "os-vol-tenant-attr:tenant_id", None)
attach_allowed = \
policy.check((("compute", "compute:attach_volume"),),
request,
{"project_id": project_id})
detach_allowed = \
policy.check((("compute", "compute:detach_volume"),),
request,
{"project_id": project_id})
if attach_allowed or detach_allowed:
return volume.status in ("available", "in-use")
return False
class CreateSnapshot(VolumePolicyTargetMixin, tables.LinkAction):
name = "snapshots"
verbose_name = _("Create Snapshot")
url = "horizon:project:volumes:volumes:create_snapshot"
classes = ("ajax-modal",)
icon = "camera"
policy_rules = (("volume", "volume:create_snapshot"),)
def allowed(self, request, volume=None):
try:
limits = api.cinder.tenant_absolute_limits(request)
except Exception:
exceptions.handle(request, _('Unable to retrieve tenant limits.'))
limits = {}
snapshots_available = (limits.get('maxTotalSnapshots', float("inf"))
- limits.get('totalSnapshotsUsed', 0))
if snapshots_available <= 0 and "disabled" not in self.classes:
self.classes = [c for c in self.classes] + ['disabled']
self.verbose_name = string_concat(self.verbose_name, ' ',
_("(Quota exceeded)"))
return volume.status in ("available", "in-use")
class CreateTransfer(VolumePolicyTargetMixin, tables.LinkAction):
name = "create_transfer"
verbose_name = _("Create Transfer")
url = "horizon:project:volumes:volumes:create_transfer"
classes = ("ajax-modal",)
policy_rules = (("volume", "volume:create_transfer"),)
def allowed(self, request, volume=None):
return volume.status == "available"
class CreateBackup(VolumePolicyTargetMixin, tables.LinkAction):
name = "backups"
verbose_name = _("Create Backup")
url = "horizon:project:volumes:volumes:create_backup"
classes = ("ajax-modal",)
policy_rules = (("volume", "backup:create"),)
def allowed(self, request, volume=None):
return (cinder.volume_backup_supported(request) and
volume.status == "available")
class UploadToImage(VolumePolicyTargetMixin, tables.LinkAction):
name = "upload_to_image"
verbose_name = _("Upload to Image")
url = "horizon:project:volumes:volumes:upload_to_image"
classes = ("ajax-modal",)
icon = "cloud-upload"
policy_rules = (("volume", "volume:upload_to_image"),)
def allowed(self, request, volume=None):
has_image_service_perm = \
request.user.has_perm('openstack.services.image')
return (volume.status in ("available", "in-use") and
has_image_service_perm)
class EditVolume(VolumePolicyTargetMixin, tables.LinkAction):
name = "edit"
verbose_name = _("Edit Volume")
url = "horizon:project:volumes:volumes:update"
classes = ("ajax-modal",)
icon = "pencil"
policy_rules = (("volume", "volume:update"),)
def allowed(self, request, volume=None):
return volume.status in ("available", "in-use")
class RetypeVolume(VolumePolicyTargetMixin, tables.LinkAction):
name = "retype"
verbose_name = _("Change Volume Type")
url = "horizon:project:volumes:volumes:retype"
classes = ("ajax-modal",)
icon = "pencil"
policy_rules = (("volume", "volume:retype"),)
def allowed(self, request, volume=None):
return volume.status in ("available", "in-use")
class AcceptTransfer(tables.LinkAction):
name = "accept_transfer"
verbose_name = _("Accept Transfer")
url = "horizon:project:volumes:volumes:accept_transfer"
classes = ("ajax-modal",)
icon = "exchange"
policy_rules = (("volume", "volume:accept_transfer"),)
ajax = True
def single(self, table, request, object_id=None):
return HttpResponse(self.render())
class DeleteTransfer(VolumePolicyTargetMixin, tables.Action):
# This class inherits from tables.Action instead of the more obvious
# tables.DeleteAction due to the confirmation message. When the delete
# is successful, DeleteAction automatically appends the name of the
# volume to the message, e.g. "Deleted volume transfer 'volume'". But
# we are deleting the volume *transfer*, whose name is different.
name = "delete_transfer"
verbose_name = _("Cancel Transfer")
policy_rules = (("volume", "volume:delete_transfer"),)
classes = ('btn-danger',)
help_text = _("This action cannot be undone.")
def allowed(self, request, volume):
return (volume.status == "awaiting-transfer" and
getattr(volume, 'transfer', None))
def single(self, table, request, volume_id):
volume = table.get_object_by_id(volume_id)
try:
cinder.transfer_delete(request, volume.transfer.id)
if volume.transfer.name:
msg = _('Successfully deleted volume transfer "%s"'
) % volume.transfer.name
else:
msg = _("Successfully deleted volume transfer")
messages.success(request, msg)
except Exception:
exceptions.handle(request, _("Unable to delete volume transfer."))
class UpdateRow(tables.Row):
ajax = True
def get_data(self, request, volume_id):
volume = cinder.volume_get(request, volume_id)
return volume
def get_size(volume):
return _("%sGB") % volume.size
def get_attachment_name(request, attachment):
server_id = attachment.get("server_id", None)
if "instance" in attachment and attachment['instance']:
name = attachment["instance"].name
else:
try:
server = api.nova.server_get(request, server_id)
name = server.name
except Exception:
name = None
exceptions.handle(request, _("Unable to retrieve "
"attachment information."))
try:
url = reverse("horizon:project:instances:detail", args=(server_id,))
instance = '<a href="%s">%s</a>' % (url, html.escape(name))
except NoReverseMatch:
instance = html.escape(name)
return instance
class AttachmentColumn(tables.Column):
"""Customized column class.
So it that does complex processing on the attachments
for a volume instance.
"""
def get_raw_data(self, volume):
request = self.table.request
link = _('Attached to %(instance)s on %(dev)s')
attachments = []
# Filter out "empty" attachments which the client returns...
for attachment in [att for att in volume.attachments if att]:
# When a volume is attached it may return the server_id
# without the server name...
instance = get_attachment_name(request, attachment)
vals = {"instance": instance,
"dev": html.escape(attachment.get("device", ""))}
attachments.append(link % vals)
return safestring.mark_safe(", ".join(attachments))
def get_volume_type(volume):
return volume.volume_type if volume.volume_type != "None" else None
def get_encrypted_value(volume):
if not hasattr(volume, 'encrypted') or volume.encrypted is None:
return _("-")
elif volume.encrypted is False:
return _("No")
else:
return _("Yes")
class VolumesTableBase(tables.DataTable):
STATUS_CHOICES = (
("in-use", True),
("available", True),
("creating", None),
("error", False),
("error_extending", False),
)
STATUS_DISPLAY_CHOICES = (
("available", pgettext_lazy("Current status of a Volume",
u"Available")),
("in-use", pgettext_lazy("Current status of a Volume", u"In-use")),
("error", pgettext_lazy("Current status of a Volume", u"Error")),
("creating", pgettext_lazy("Current status of a Volume",
u"Creating")),
("error_extending", pgettext_lazy("Current status of a Volume",
u"Error Extending")),
("extending", pgettext_lazy("Current status of a Volume",
u"Extending")),
("attaching", pgettext_lazy("Current status of a Volume",
u"Attaching")),
("detaching", pgettext_lazy("Current status of a Volume",
u"Detaching")),
("deleting", pgettext_lazy("Current status of a Volume",
u"Deleting")),
("error_deleting", pgettext_lazy("Current status of a Volume",
u"Error deleting")),
("backing-up", pgettext_lazy("Current status of a Volume",
u"Backing Up")),
("restoring-backup", pgettext_lazy("Current status of a Volume",
u"Restoring Backup")),
("error_restoring", pgettext_lazy("Current status of a Volume",
u"Error Restoring")),
)
name = tables.Column("name",
verbose_name=_("Name"),
link="horizon:project:volumes:volumes:detail")
description = tables.Column("description",
verbose_name=_("Description"),
truncate=40)
size = tables.Column(get_size,
verbose_name=_("Size"),
attrs={'data-type': 'size'})
status = tables.Column("status",
verbose_name=_("Status"),
status=True,
status_choices=STATUS_CHOICES,
display_choices=STATUS_DISPLAY_CHOICES)
def get_object_display(self, obj):
return obj.name
class VolumesFilterAction(tables.FilterAction):
def filter(self, table, volumes, filter_string):
"""Naive case-insensitive search."""
q = filter_string.lower()
return [volume for volume in volumes
if q in volume.name.lower()]
class VolumesTable(VolumesTableBase):
name = tables.Column("name",
verbose_name=_("Name"),
link="horizon:project:volumes:volumes:detail")
volume_type = tables.Column(get_volume_type,
verbose_name=_("Type"))
attachments = AttachmentColumn("attachments",
verbose_name=_("Attached To"))
availability_zone = tables.Column("availability_zone",
verbose_name=_("Availability Zone"))
bootable = tables.Column('is_bootable',
verbose_name=_("Bootable"),
filters=(filters.yesno, filters.capfirst))
encryption = tables.Column(get_encrypted_value,
verbose_name=_("Encrypted"),
link="horizon:project:volumes:"
"volumes:encryption_detail")
class Meta(object):
name = "volumes"
verbose_name = _("Volumes")
status_columns = ["status"]
row_class = UpdateRow
table_actions = (CreateVolume, AcceptTransfer, DeleteVolume,
VolumesFilterAction)
row_actions = (EditVolume, ExtendVolume, LaunchVolume, EditAttachments,
CreateSnapshot, CreateBackup, RetypeVolume,
UploadToImage, CreateTransfer, DeleteTransfer,
DeleteVolume)
class DetachVolume(tables.BatchAction):
name = "detach"
classes = ('btn-danger', 'btn-detach')
policy_rules = (("compute", "compute:detach_volume"),)
help_text = _("The data will remain in the volume and another instance"
" will be able to access the data if you attach"
" this volume to it.")
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Detach Volume",
u"Detach Volumes",
count
)
# This action is asynchronous.
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Detaching Volume",
u"Detaching Volumes",
count
)
def action(self, request, obj_id):
attachment = self.table.get_object_by_id(obj_id)
api.nova.instance_volume_detach(request,
attachment.get('server_id', None),
obj_id)
def get_success_url(self, request):
return reverse('horizon:project:volumes:index')
class AttachedInstanceColumn(tables.Column):
"""Customized column class that does complex processing on the attachments
for a volume instance.
"""
def get_raw_data(self, attachment):
request = self.table.request
return safestring.mark_safe(get_attachment_name(request, attachment))
class AttachmentsTable(tables.DataTable):
instance = AttachedInstanceColumn(get_attachment_name,
verbose_name=_("Instance"))
device = tables.Column("device",
verbose_name=_("Device"))
def get_object_id(self, obj):
return obj['id']
def get_object_display(self, attachment):
instance_name = get_attachment_name(self.request, attachment)
vals = {"volume_name": attachment['volume_name'],
"instance_name": html.strip_tags(instance_name)}
return _("Volume %(volume_name)s on instance %(instance_name)s") % vals
def get_object_by_id(self, obj_id):
for obj in self.data:
if self.get_object_id(obj) == obj_id:
return obj
raise ValueError('No match found for the id "%s".' % obj_id)
class Meta(object):
name = "attachments"
verbose_name = _("Attachments")
table_actions = (DetachVolume,)
row_actions = (DetachVolume,)
|
apache-2.0
|
sklnet/beyonwiz-enigma2
|
lib/python/Screens/WizardLanguage.py
|
6
|
1151
|
from Screens.Wizard import Wizard
from Components.Label import Label
from Components.Language import language
from os import system
class WizardLanguage(Wizard):
def __init__(self, session, showSteps = True, showStepSlider = True, showList = True, showConfig = True):
Wizard.__init__(self, session, showSteps, showStepSlider, showList, showConfig)
system("showiframe /usr/share/enigma2/black.mvi")
self["languagetext"] = Label()
self.updateLanguageDescription()
def red(self):
self.resetCounter()
self.languageSelect()
def languageSelect(self):
print "languageSelect"
newlanguage = language.getActiveLanguageIndex() + 1
if newlanguage >= len(language.getLanguageList()):
newlanguage = 0
language.activateLanguageIndex(newlanguage)
self.updateTexts()
def updateLanguageDescription(self):
print language.getLanguageList()[language.getActiveLanguageIndex()]
self["languagetext"].setText(self.getTranslation(language.getLanguageList()[language.getActiveLanguageIndex()][1][0]))
def updateTexts(self):
print "updateTexts"
self.updateText(firstset = True)
self.updateValues()
self.updateLanguageDescription()
|
gpl-2.0
|
Simran-B/arangodb
|
3rdParty/V8-4.3.61/third_party/python_26/Lib/encodings/euc_jisx0213.py
|
816
|
1051
|
#
# euc_jisx0213.py: Python Unicode Codec for EUC_JISX0213
#
# Written by Hye-Shik Chang <perky@FreeBSD.org>
#
import _codecs_jp, codecs
import _multibytecodec as mbc
codec = _codecs_jp.getcodec('euc_jisx0213')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
codecs.IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(
name='euc_jisx0213',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
|
apache-2.0
|
alanbowman/home-assistant
|
homeassistant/components/switch/__init__.py
|
4
|
4104
|
"""
homeassistant.components.switch
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Component to interface with various switches that can be controlled remotely.
"""
from datetime import timedelta
import logging
import os
from homeassistant.config import load_yaml_config_file
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.entity import ToggleEntity
from homeassistant.const import (
STATE_ON, SERVICE_TURN_ON, SERVICE_TURN_OFF, ATTR_ENTITY_ID)
from homeassistant.components import group, discovery, wink, isy994, verisure
DOMAIN = 'switch'
DEPENDENCIES = []
SCAN_INTERVAL = 30
GROUP_NAME_ALL_SWITCHES = 'all switches'
ENTITY_ID_ALL_SWITCHES = group.ENTITY_ID_FORMAT.format('all_switches')
ENTITY_ID_FORMAT = DOMAIN + '.{}'
ATTR_TODAY_MWH = "today_mwh"
ATTR_CURRENT_POWER_MWH = "current_power_mwh"
ATTR_SENSOR_STATE = "sensor_state"
MIN_TIME_BETWEEN_SCANS = timedelta(seconds=10)
# Maps discovered services to their platforms
DISCOVERY_PLATFORMS = {
discovery.SERVICE_WEMO: 'wemo',
wink.DISCOVER_SWITCHES: 'wink',
isy994.DISCOVER_SWITCHES: 'isy994',
verisure.DISCOVER_SWITCHES: 'verisure'
}
PROP_TO_ATTR = {
'current_power_mwh': ATTR_CURRENT_POWER_MWH,
'today_power_mw': ATTR_TODAY_MWH,
'sensor_state': ATTR_SENSOR_STATE
}
_LOGGER = logging.getLogger(__name__)
def is_on(hass, entity_id=None):
""" Returns if the switch is on based on the statemachine. """
entity_id = entity_id or ENTITY_ID_ALL_SWITCHES
return hass.states.is_state(entity_id, STATE_ON)
def turn_on(hass, entity_id=None):
""" Turns all or specified switch on. """
data = {ATTR_ENTITY_ID: entity_id} if entity_id else None
hass.services.call(DOMAIN, SERVICE_TURN_ON, data)
def turn_off(hass, entity_id=None):
""" Turns all or specified switch off. """
data = {ATTR_ENTITY_ID: entity_id} if entity_id else None
hass.services.call(DOMAIN, SERVICE_TURN_OFF, data)
def setup(hass, config):
""" Track states and offer events for switches. """
component = EntityComponent(
_LOGGER, DOMAIN, hass, SCAN_INTERVAL, DISCOVERY_PLATFORMS,
GROUP_NAME_ALL_SWITCHES)
component.setup(config)
def handle_switch_service(service):
""" Handles calls to the switch services. """
target_switches = component.extract_from_service(service)
for switch in target_switches:
if service.service == SERVICE_TURN_ON:
switch.turn_on()
else:
switch.turn_off()
if switch.should_poll:
switch.update_ha_state(True)
descriptions = load_yaml_config_file(
os.path.join(os.path.dirname(__file__), 'services.yaml'))
hass.services.register(DOMAIN, SERVICE_TURN_OFF, handle_switch_service,
descriptions.get(SERVICE_TURN_OFF))
hass.services.register(DOMAIN, SERVICE_TURN_ON, handle_switch_service,
descriptions.get(SERVICE_TURN_ON))
return True
class SwitchDevice(ToggleEntity):
""" Represents a switch within Home Assistant. """
# pylint: disable=no-self-use
@property
def current_power_mwh(self):
""" Current power usage in mwh. """
return None
@property
def today_power_mw(self):
""" Today total power usage in mw. """
return None
@property
def is_standby(self):
""" Is the device in standby. """
return None
@property
def sensor_state(self):
""" Is the sensor on or off. """
return None
@property
def device_state_attributes(self):
""" Returns device specific state attributes. """
return None
@property
def state_attributes(self):
""" Returns optional state attributes. """
data = {}
for prop, attr in PROP_TO_ATTR.items():
value = getattr(self, prop)
if value:
data[attr] = value
device_attr = self.device_state_attributes
if device_attr is not None:
data.update(device_attr)
return data
|
mit
|
kaste/waterf
|
tests/enqueue_test.py
|
1
|
5368
|
import pytest
from _helper import consume, tick, count_tasks
notimplemented = xfail = pytest.mark.xfail
beforeEach = pytest.mark.usefixtures
import datetime
from waterf import queue, task, snake
messages = []
@pytest.fixture
def clear_messages():
while messages:
messages.pop()
def P(message='P'):
messages.append(message)
def noop(): pass
class TestQueueing:
class TestNoProtection:
def testDisableProtection2(self, taskqueue):
task(noop).enqueue(use_id=False)
task(noop).enqueue(use_id=False)
assert count_tasks(taskqueue) == 2
def testDisableProtection3(self, taskqueue):
task(noop, _use_id=False).enqueue()
task(noop, _use_id=False).enqueue()
assert count_tasks(taskqueue) == 2
class TestProtectByName:
def testPreventNamedTaskToBeScheduledTwice(self, taskqueue):
task(noop).enqueue(name='A')
task(noop).enqueue(name='A')
assert count_tasks(taskqueue) == 1
def testPreventNamedTaskToBeScheduledTwice2(self, taskqueue):
task(noop, _name='A').enqueue()
task(noop, _name='A').enqueue()
assert count_tasks(taskqueue) == 1
class TestProtectById:
def testSilentlyDisallowDoubleExecution(self, taskqueue, ndb):
task(noop).enqueue()
task(noop).enqueue()
assert count_tasks(taskqueue) == 1
def testProtectedByGivenId(self, taskqueue, ndb):
task(noop).enqueue(use_id='A')
task(noop).enqueue(use_id='A')
assert count_tasks(taskqueue) == 1
def testProtectedByGivenId2(self, taskqueue, ndb):
task(noop, _use_id='A').enqueue()
task(noop, _use_id='A').enqueue()
assert count_tasks(taskqueue) == 1
def testIdCanBeReusedImmediately(self, taskqueue, ndb):
task(noop).enqueue()
assert count_tasks(taskqueue) == 1
consume(taskqueue)
task(noop).enqueue()
assert count_tasks(taskqueue) == 1
def testIdIsReleasedAfterXSeconds(self, taskqueue, ndb):
task(noop).enqueue(release_after=1)
assert count_tasks(taskqueue) == 1
tick(taskqueue)
assert count_tasks(taskqueue) == 1 # the cleanup handler
cleanup_handler = taskqueue.get_filtered_tasks()[0]
now = datetime.datetime.now(tz=queue.taskqueue.taskqueue._UTC)
now = now.replace(microsecond=0)
assert cleanup_handler.eta == now + datetime.timedelta(seconds=1)
task(noop).enqueue()
assert count_tasks(taskqueue) == 1
tick(taskqueue)
assert count_tasks(taskqueue) == 0 # ensure
task(noop).enqueue()
assert count_tasks(taskqueue) == 1
def testEnsureMultipleTaskGetCleanedIfReleaseAfterIsIused(
self, taskqueue, fastndb):
queue.inorder(
task(P, 'ONE')
).enqueue(release_after=1)
queue.inorder(
task(P, 'TWO')
).enqueue(release_after=1)
consume(taskqueue)
semaphores = queue.Lock.model.query().fetch()
assert len(semaphores) == 0
# 1/0
def testGeneratedIdHandlesParameters(self, taskqueue, ndb):
task(P, 'ONE').enqueue()
task(P, 'TWO').enqueue()
assert count_tasks(taskqueue) == 2
def testGeneratedIdHandlesParameters2(self, taskqueue, ndb):
task(P, message='ONE').enqueue()
task(P, message='TWO').enqueue()
assert count_tasks(taskqueue) == 2
class TestSubtasks:
def testParentUsesId1(self, taskqueue):
main = task(noop)
sub = task(P)
main.enqueue_subtask(sub)
main.enqueue_subtask(sub)
assert count_tasks(taskqueue) == 1
def testParentUsesId2(self, taskqueue):
main = task(noop, _use_id='A')
sub = task(P)
main.enqueue_subtask(sub)
main.enqueue_subtask(sub)
assert count_tasks(taskqueue) == 1
def testParentUsesName1(self, taskqueue):
main = task(noop, _name='A')
sub = task(P)
main.enqueue_subtask(sub)
main.enqueue_subtask(sub)
assert count_tasks(taskqueue) == 1
def testParentUsesName2(self, taskqueue):
main = task(noop)
sub = task(P)
main.enqueue_subtask(sub)
main.enqueue_subtask(sub)
assert count_tasks(taskqueue) == 1
@beforeEach("clear_messages")
def testA(self, taskqueue, ndb):
A = queue.inorder(
task(P)
).enqueue(use_id='A')
B = queue.inorder(
task(P)
).enqueue(use_id='B')
assert count_tasks(taskqueue) == 2
tick(taskqueue)
assert count_tasks(taskqueue) == 2
tick(taskqueue)
assert count_tasks(taskqueue) == 0
assert 'P P'.split() == messages
# 1/0
|
bsd-2-clause
|
andela-earinde/bellatrix-py
|
app/js/lib/lib/modules/test/test_fileio.py
|
8
|
16500
|
# Adapted from test_file.py by Daniel Stutzbach
from __future__ import unicode_literals
import sys
import os
import errno
import unittest
from array import array
from weakref import proxy
from functools import wraps
from UserList import UserList
from test.test_support import TESTFN, check_warnings, run_unittest, make_bad_fd
from test.test_support import py3k_bytes as bytes, cpython_only
from test.test_support import gc_collect
from test.script_helper import run_python
from _io import FileIO as _FileIO
class AutoFileTests(unittest.TestCase):
# file tests for which a test file is automatically set up
def setUp(self):
self.f = _FileIO(TESTFN, 'w')
def tearDown(self):
if self.f:
self.f.close()
os.remove(TESTFN)
def testWeakRefs(self):
# verify weak references
p = proxy(self.f)
p.write(bytes(range(10)))
self.assertEqual(self.f.tell(), p.tell())
self.f.close()
self.f = None
gc_collect()
self.assertRaises(ReferenceError, getattr, p, 'tell')
def testSeekTell(self):
self.f.write(bytes(range(20)))
self.assertEqual(self.f.tell(), 20)
self.f.seek(0)
self.assertEqual(self.f.tell(), 0)
self.f.seek(10)
self.assertEqual(self.f.tell(), 10)
self.f.seek(5, 1)
self.assertEqual(self.f.tell(), 15)
self.f.seek(-5, 1)
self.assertEqual(self.f.tell(), 10)
self.f.seek(-5, 2)
self.assertEqual(self.f.tell(), 15)
def testAttributes(self):
# verify expected attributes exist
f = self.f
self.assertEqual(f.mode, "wb")
self.assertEqual(f.closed, False)
# verify the attributes are readonly
for attr in 'mode', 'closed':
self.assertRaises((AttributeError, TypeError),
setattr, f, attr, 'oops')
def testReadinto(self):
# verify readinto
self.f.write(b"\x01\x02")
self.f.close()
a = array(b'b', b'x'*10)
self.f = _FileIO(TESTFN, 'r')
n = self.f.readinto(a)
self.assertEqual(array(b'b', [1, 2]), a[:n])
def testWritelinesList(self):
l = [b'123', b'456']
self.f.writelines(l)
self.f.close()
self.f = _FileIO(TESTFN, 'rb')
buf = self.f.read()
self.assertEqual(buf, b'123456')
def testWritelinesUserList(self):
l = UserList([b'123', b'456'])
self.f.writelines(l)
self.f.close()
self.f = _FileIO(TESTFN, 'rb')
buf = self.f.read()
self.assertEqual(buf, b'123456')
def testWritelinesError(self):
self.assertRaises(TypeError, self.f.writelines, [1, 2, 3])
self.assertRaises(TypeError, self.f.writelines, None)
def test_none_args(self):
self.f.write(b"hi\nbye\nabc")
self.f.close()
self.f = _FileIO(TESTFN, 'r')
self.assertEqual(self.f.read(None), b"hi\nbye\nabc")
self.f.seek(0)
self.assertEqual(self.f.readline(None), b"hi\n")
self.assertEqual(self.f.readlines(None), [b"bye\n", b"abc"])
def testRepr(self):
self.assertEqual(repr(self.f), "<_io.FileIO name=%r mode='%s'>"
% (self.f.name, self.f.mode))
del self.f.name
self.assertEqual(repr(self.f), "<_io.FileIO fd=%r mode='%s'>"
% (self.f.fileno(), self.f.mode))
self.f.close()
self.assertEqual(repr(self.f), "<_io.FileIO [closed]>")
def testErrors(self):
f = self.f
self.assertTrue(not f.isatty())
self.assertTrue(not f.closed)
#self.assertEqual(f.name, TESTFN)
self.assertRaises(ValueError, f.read, 10) # Open for reading
f.close()
self.assertTrue(f.closed)
f = _FileIO(TESTFN, 'r')
self.assertRaises(TypeError, f.readinto, "")
self.assertTrue(not f.closed)
f.close()
self.assertTrue(f.closed)
def testMethods(self):
methods = ['fileno', 'isatty', 'read',
'tell', 'truncate', 'seekable',
'readable', 'writable']
if sys.platform.startswith('atheos'):
methods.remove('truncate')
self.f.close()
self.assertTrue(self.f.closed)
for methodname in methods:
method = getattr(self.f, methodname)
# should raise on closed file
self.assertRaises(ValueError, method)
# methods with one argument
self.assertRaises(ValueError, self.f.readinto, 0)
self.assertRaises(ValueError, self.f.write, 0)
self.assertRaises(ValueError, self.f.seek, 0)
def testOpendir(self):
# Issue 3703: opening a directory should fill the errno
# Windows always returns "[Errno 13]: Permission denied
# Unix calls dircheck() and returns "[Errno 21]: Is a directory"
try:
_FileIO('.', 'r')
except IOError as e:
self.assertNotEqual(e.errno, 0)
self.assertEqual(e.filename, ".")
else:
self.fail("Should have raised IOError")
@unittest.skipIf(os.name == 'nt', "test only works on a POSIX-like system")
def testOpenDirFD(self):
fd = os.open('.', os.O_RDONLY)
with self.assertRaises(IOError) as cm:
_FileIO(fd, 'r')
os.close(fd)
self.assertEqual(cm.exception.errno, errno.EISDIR)
#A set of functions testing that we get expected behaviour if someone has
#manually closed the internal file descriptor. First, a decorator:
def ClosedFD(func):
@wraps(func)
def wrapper(self):
#forcibly close the fd before invoking the problem function
f = self.f
os.close(f.fileno())
try:
func(self, f)
finally:
try:
self.f.close()
except IOError:
pass
return wrapper
def ClosedFDRaises(func):
@wraps(func)
def wrapper(self):
#forcibly close the fd before invoking the problem function
f = self.f
os.close(f.fileno())
try:
func(self, f)
except IOError as e:
self.assertEqual(e.errno, errno.EBADF)
else:
self.fail("Should have raised IOError")
finally:
try:
self.f.close()
except IOError:
pass
return wrapper
@ClosedFDRaises
def testErrnoOnClose(self, f):
f.close()
@ClosedFDRaises
def testErrnoOnClosedWrite(self, f):
f.write('a')
@ClosedFDRaises
def testErrnoOnClosedSeek(self, f):
f.seek(0)
@ClosedFDRaises
def testErrnoOnClosedTell(self, f):
f.tell()
@ClosedFDRaises
def testErrnoOnClosedTruncate(self, f):
f.truncate(0)
@ClosedFD
def testErrnoOnClosedSeekable(self, f):
f.seekable()
@ClosedFD
def testErrnoOnClosedReadable(self, f):
f.readable()
@ClosedFD
def testErrnoOnClosedWritable(self, f):
f.writable()
@ClosedFD
def testErrnoOnClosedFileno(self, f):
f.fileno()
@ClosedFD
def testErrnoOnClosedIsatty(self, f):
self.assertEqual(f.isatty(), False)
def ReopenForRead(self):
try:
self.f.close()
except IOError:
pass
self.f = _FileIO(TESTFN, 'r')
os.close(self.f.fileno())
return self.f
@ClosedFDRaises
def testErrnoOnClosedRead(self, f):
f = self.ReopenForRead()
f.read(1)
@ClosedFDRaises
def testErrnoOnClosedReadall(self, f):
f = self.ReopenForRead()
f.readall()
@ClosedFDRaises
def testErrnoOnClosedReadinto(self, f):
f = self.ReopenForRead()
a = array(b'b', b'x'*10)
f.readinto(a)
class OtherFileTests(unittest.TestCase):
def testAbles(self):
try:
f = _FileIO(TESTFN, "w")
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
f.close()
f = _FileIO(TESTFN, "r")
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), False)
self.assertEqual(f.seekable(), True)
f.close()
f = _FileIO(TESTFN, "a+")
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
self.assertEqual(f.isatty(), False)
f.close()
finally:
os.unlink(TESTFN)
@unittest.skipIf(sys.platform == 'win32', 'no ttys on Windows')
def testAblesOnTTY(self):
try:
f = _FileIO("/dev/tty", "a")
except EnvironmentError:
# When run in a cron job there just aren't any
# ttys, so skip the test. This also handles other
# OS'es that don't support /dev/tty.
self.skipTest('need /dev/tty')
else:
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
if sys.platform != "darwin" and \
'bsd' not in sys.platform and \
not sys.platform.startswith('sunos'):
# Somehow /dev/tty appears seekable on some BSDs
self.assertEqual(f.seekable(), False)
self.assertEqual(f.isatty(), True)
f.close()
def testInvalidModeStrings(self):
# check invalid mode strings
for mode in ("", "aU", "wU+", "rw", "rt"):
try:
f = _FileIO(TESTFN, mode)
except ValueError:
pass
else:
f.close()
self.fail('%r is an invalid file mode' % mode)
def testModeStrings(self):
# test that the mode attribute is correct for various mode strings
# given as init args
try:
for modes in [('w', 'wb'), ('wb', 'wb'), ('wb+', 'rb+'),
('w+b', 'rb+'), ('a', 'ab'), ('ab', 'ab'),
('ab+', 'ab+'), ('a+b', 'ab+'), ('r', 'rb'),
('rb', 'rb'), ('rb+', 'rb+'), ('r+b', 'rb+')]:
# read modes are last so that TESTFN will exist first
with _FileIO(TESTFN, modes[0]) as f:
self.assertEqual(f.mode, modes[1])
finally:
if os.path.exists(TESTFN):
os.unlink(TESTFN)
def testUnicodeOpen(self):
# verify repr works for unicode too
f = _FileIO(str(TESTFN), "w")
f.close()
os.unlink(TESTFN)
def testBytesOpen(self):
# Opening a bytes filename
try:
fn = TESTFN.encode("ascii")
except UnicodeEncodeError:
self.skipTest('could not encode %r to ascii' % TESTFN)
f = _FileIO(fn, "w")
try:
f.write(b"abc")
f.close()
with open(TESTFN, "rb") as f:
self.assertEqual(f.read(), b"abc")
finally:
os.unlink(TESTFN)
def testInvalidFd(self):
self.assertRaises(ValueError, _FileIO, -10)
self.assertRaises(OSError, _FileIO, make_bad_fd())
if sys.platform == 'win32':
import msvcrt
self.assertRaises(IOError, msvcrt.get_osfhandle, make_bad_fd())
@cpython_only
def testInvalidFd_overflow(self):
# Issue 15989
import _testcapi
self.assertRaises(TypeError, _FileIO, _testcapi.INT_MAX + 1)
self.assertRaises(TypeError, _FileIO, _testcapi.INT_MIN - 1)
def testBadModeArgument(self):
# verify that we get a sensible error message for bad mode argument
bad_mode = "qwerty"
try:
f = _FileIO(TESTFN, bad_mode)
except ValueError as msg:
if msg.args[0] != 0:
s = str(msg)
if TESTFN in s or bad_mode not in s:
self.fail("bad error message for invalid mode: %s" % s)
# if msg.args[0] == 0, we're probably on Windows where there may be
# no obvious way to discover why open() failed.
else:
f.close()
self.fail("no error for invalid mode: %s" % bad_mode)
def testTruncate(self):
f = _FileIO(TESTFN, 'w')
f.write(bytes(bytearray(range(10))))
self.assertEqual(f.tell(), 10)
f.truncate(5)
self.assertEqual(f.tell(), 10)
self.assertEqual(f.seek(0, os.SEEK_END), 5)
f.truncate(15)
self.assertEqual(f.tell(), 5)
self.assertEqual(f.seek(0, os.SEEK_END), 15)
f.close()
def testTruncateOnWindows(self):
def bug801631():
# SF bug <http://www.python.org/sf/801631>
# "file.truncate fault on windows"
f = _FileIO(TESTFN, 'w')
f.write(bytes(range(11)))
f.close()
f = _FileIO(TESTFN,'r+')
data = f.read(5)
if data != bytes(range(5)):
self.fail("Read on file opened for update failed %r" % data)
if f.tell() != 5:
self.fail("File pos after read wrong %d" % f.tell())
f.truncate()
if f.tell() != 5:
self.fail("File pos after ftruncate wrong %d" % f.tell())
f.close()
size = os.path.getsize(TESTFN)
if size != 5:
self.fail("File size after ftruncate wrong %d" % size)
try:
bug801631()
finally:
os.unlink(TESTFN)
def testAppend(self):
try:
f = open(TESTFN, 'wb')
f.write(b'spam')
f.close()
f = open(TESTFN, 'ab')
f.write(b'eggs')
f.close()
f = open(TESTFN, 'rb')
d = f.read()
f.close()
self.assertEqual(d, b'spameggs')
finally:
try:
os.unlink(TESTFN)
except:
pass
def testInvalidInit(self):
self.assertRaises(TypeError, _FileIO, "1", 0, 0)
def testWarnings(self):
with check_warnings(quiet=True) as w:
self.assertEqual(w.warnings, [])
self.assertRaises(TypeError, _FileIO, [])
self.assertEqual(w.warnings, [])
self.assertRaises(ValueError, _FileIO, "/some/invalid/name", "rt")
self.assertEqual(w.warnings, [])
def test_surrogates(self):
# Issue #8438: try to open a filename containing surrogates.
# It should either fail because the file doesn't exist or the filename
# can't be represented using the filesystem encoding, but not because
# of a LookupError for the error handler "surrogateescape".
filename = u'\udc80.txt'
try:
with _FileIO(filename):
pass
except (UnicodeEncodeError, IOError):
pass
# Spawn a separate Python process with a different "file system
# default encoding", to exercise this further.
env = dict(os.environ)
env[b'LC_CTYPE'] = b'C'
_, out = run_python('-c', 'import _io; _io.FileIO(%r)' % filename, env=env)
if ('UnicodeEncodeError' not in out and not
( ('IOError: [Errno 2] No such file or directory' in out) or
('IOError: [Errno 22] Invalid argument' in out) ) ):
self.fail('Bad output: %r' % out)
def testUnclosedFDOnException(self):
class MyException(Exception): pass
class MyFileIO(_FileIO):
def __setattr__(self, name, value):
if name == "name":
raise MyException("blocked setting name")
return super(MyFileIO, self).__setattr__(name, value)
fd = os.open(__file__, os.O_RDONLY)
self.assertRaises(MyException, MyFileIO, fd)
os.close(fd) # should not raise OSError(EBADF)
def test_main():
# Historically, these tests have been sloppy about removing TESTFN.
# So get rid of it no matter what.
try:
run_unittest(AutoFileTests, OtherFileTests)
finally:
if os.path.exists(TESTFN):
os.unlink(TESTFN)
if __name__ == '__main__':
test_main()
|
mit
|
bixbydev/Bixby
|
google/dist/gdata-2.0.18/tests/gdata_tests/spreadsheets/live_client_test.py
|
23
|
10326
|
#!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module is used for version 2 of the Google Data APIs.
# These tests attempt to connect to Google servers.
__author__ = 'j.s@google.com (Jeff Scudder)'
import unittest
import gdata.spreadsheets.client
import gdata.gauth
import gdata.client
import atom.http_core
import atom.mock_http_core
import atom.core
import gdata.data
import gdata.test_config as conf
conf.options.register_option(conf.SPREADSHEET_ID_OPTION)
class SpreadsheetsClientTest(unittest.TestCase):
def setUp(self):
self.client = None
if conf.options.get_value('runlive') == 'true':
self.client = gdata.spreadsheets.client.SpreadsheetsClient()
conf.configure_client(self.client, 'SpreadsheetsClientTest', 'wise')
def tearDown(self):
conf.close_client(self.client)
def test_create_update_delete_worksheet(self):
if not conf.options.get_value('runlive') == 'true':
return
# Either load the recording or prepare to make a live request.
conf.configure_cache(self.client, 'test_create_update_delete_worksheet')
spreadsheet_id = conf.options.get_value('spreadsheetid')
original_worksheets = self.client.get_worksheets(spreadsheet_id)
self.assert_(isinstance(original_worksheets,
gdata.spreadsheets.data.WorksheetsFeed))
worksheet_count = int(original_worksheets.total_results.text)
# Add a new worksheet to the spreadsheet.
created = self.client.add_worksheet(
spreadsheet_id, 'a test worksheet', 4, 8)
self.assert_(isinstance(created,
gdata.spreadsheets.data.WorksheetEntry))
self.assertEqual(created.title.text, 'a test worksheet')
self.assertEqual(created.row_count.text, '4')
self.assertEqual(created.col_count.text, '8')
# There should now be one more worksheet in this spreadsheet.
updated_worksheets = self.client.get_worksheets(spreadsheet_id)
new_worksheet_count = int(updated_worksheets.total_results.text)
self.assertEqual(worksheet_count + 1, new_worksheet_count)
# Delete our test worksheet.
self.client.delete(created)
# We should be back to the original number of worksheets.
updated_worksheets = self.client.get_worksheets(spreadsheet_id)
new_worksheet_count = int(updated_worksheets.total_results.text)
self.assertEqual(worksheet_count, new_worksheet_count)
def test_create_update_delete_table_and_records(self):
if not conf.options.get_value('runlive') == 'true':
return
# Either load the recording or prepare to make a live request.
conf.configure_cache(
self.client, 'test_create_update_delete_table_and_records')
spreadsheet_id = conf.options.get_value('spreadsheetid')
tables = self.client.get_tables(spreadsheet_id)
test_worksheet = self.client.add_worksheet(
spreadsheet_id, 'worksheet x', rows=30, cols=3)
self.assert_(isinstance(tables, gdata.spreadsheets.data.TablesFeed))
initial_count = tables.total_results.text
created_table = self.client.add_table(
spreadsheet_id, 'Test Table', 'This table is for testing',
'worksheet x', header_row=5, num_rows=10, start_row=8,
insertion_mode=None,
column_headers={'B': 'Food', 'C': 'Drink', 'A': 'Price'})
# Re-get the list of tables and make sure there are more now.
updated_tables = self.client.get_tables(spreadsheet_id)
self.assertEqual(int(initial_count) + 1,
int(updated_tables.total_results.text))
# Get the records in our new table to make sure it has the correct
# number of records.
table_num = int(created_table.get_table_id())
starting_records = self.client.get_records(spreadsheet_id, table_num)
self.assertEqual(starting_records.total_results.text, '10')
self.assert_(starting_records.entry[0].field[0].text is None)
self.assert_(starting_records.entry[0].field[1].text is None)
self.assert_(starting_records.entry[1].field[0].text is None)
self.assert_(starting_records.entry[1].field[1].text is None)
record1 = self.client.add_record(
spreadsheet_id, table_num,
{'Food': 'Cheese', 'Drink': 'Soda', 'Price': '2.99'}, 'icky')
self.client.add_record(spreadsheet_id, table_num,
{'Food': 'Eggs', 'Drink': 'Milk'})
self.client.add_record(spreadsheet_id, table_num,
{'Food': 'Spinach', 'Drink': 'Water'})
updated_records = self.client.get_records(spreadsheet_id, table_num)
self.assertEqual(updated_records.entry[10].value_for_name('Price'), '2.99')
self.assertEqual(updated_records.entry[10].value_for_index('A'), '2.99')
self.assertEqual(updated_records.entry[10].value_for_name('Drink'),
'Soda')
self.assert_(updated_records.entry[11].value_for_name('Price') is None)
self.assertEqual(updated_records.entry[11].value_for_name('Drink'),
'Milk')
self.assertEqual(updated_records.entry[12].value_for_name('Drink'),
'Water')
self.assert_(updated_records.entry[1].value_for_index('A') is None)
self.assert_(updated_records.entry[2].value_for_index('B') is None)
self.assert_(updated_records.entry[3].value_for_index('C') is None)
# Cleanup the table.
self.client.delete(created_table)
# Delete the test worksheet in which the table was placed.
self.client.delete(test_worksheet)
# Make sure we are back to the original count.
updated_tables = self.client.get_tables(spreadsheet_id)
self.assertEqual(int(initial_count),
int(updated_tables.total_results.text))
def test_get_and_update_cell(self):
if not conf.options.get_value('runlive') == 'true':
return
# Either load the recording or prepare to make a live request.
conf.configure_cache(self.client, 'test_get_and_update_cell')
spreadsheet_id = conf.options.get_value('spreadsheetid')
test_worksheet = self.client.add_worksheet(
spreadsheet_id, 'worksheet x', rows=30, cols=3)
# Get a cell and set its value.
cell_entry = self.client.get_cell(
spreadsheet_id, test_worksheet.get_worksheet_id(), 1, 1)
cell_entry.cell.input_value = 'a test'
result = self.client.update(cell_entry)
self.assertEquals(cell_entry.cell.input_value, result.cell.input_value)
# Verify that the value was set.
cells = self.client.get_cells(
spreadsheet_id, test_worksheet.get_worksheet_id())
self.assertEquals(len(cells.entry), 1)
self.assertEquals(cells.entry[0].cell.input_value, 'a test')
# Delete the test worksheet.
self.client.delete(test_worksheet, force=True)
def set_cell(self, spreadsheet_id, worksheet_id, row, column, value):
cell_entry = self.client.get_cell(
spreadsheet_id, worksheet_id, row, column)
self.assert_(cell_entry is not None)
cell_entry.cell.input_value = value
self.assert_(self.client.update(cell_entry) is not None)
def test_batch_set_cells(self):
if not conf.options.get_value('runlive') == 'true':
return
# Either load the recording or prepare to make a live request.
conf.configure_cache(self.client, 'test_get_and_update_cell')
spreadsheet_id = conf.options.get_value('spreadsheetid')
test_worksheet = self.client.add_worksheet(
spreadsheet_id, 'worksheet x', rows=30, cols=3)
# Set a couple of cells in a batch request.
feed = gdata.spreadsheets.data.build_batch_cells_update(
spreadsheet_id, test_worksheet.get_worksheet_id())
feed.add_set_cell(1, 1, '5')
feed.add_set_cell(1, 2, '=A1+2')
result = self.client.batch(feed, force=True)
self.assertEqual(result.entry[0].cell.text, '5')
self.assertEqual(result.entry[1].cell.text, '7')
# Delete the test worksheet.
self.client.delete(test_worksheet, force=True)
def test_crud_on_list_feed(self):
if not conf.options.get_value('runlive') == 'true':
return
# Either load the recording or prepare to make a live request.
conf.configure_cache(self.client, 'test_crud_on_list_feed')
spreadsheet_id = conf.options.get_value('spreadsheetid')
test_worksheet = self.client.add_worksheet(
spreadsheet_id, 'worksheet x', rows=30, cols=3)
worksheet_id = test_worksheet.get_worksheet_id()
# Create the first column to provide row headings.
self.set_cell(spreadsheet_id, worksheet_id, 1, 1, 'cola')
self.set_cell(spreadsheet_id, worksheet_id, 1, 2, 'colb')
self.set_cell(spreadsheet_id, worksheet_id, 1, 3, 'colc')
# Add a row to the spreadsheet.
entry = gdata.spreadsheets.data.ListEntry()
entry.from_dict({'cola': 'alpha', 'colb': 'beta', 'colc': 'gamma'})
added = self.client.add_list_entry(entry, spreadsheet_id, worksheet_id)
self.assert_(isinstance(added, gdata.spreadsheets.data.ListEntry))
self.assertEquals(added.get_value('cola'), 'alpha')
# Update the row.
added.from_dict({'cola': '1', 'colb': '2', 'colc': '3'})
updated = self.client.update(added)
self.assert_(isinstance(updated, gdata.spreadsheets.data.ListEntry))
self.assertEquals(updated.get_value('cola'), '1')
# Check the number of rows.
rows = self.client.get_list_feed(spreadsheet_id, worksheet_id)
self.assertEquals(len(rows.entry), 1)
# Remove the row.
self.client.delete(updated)
# Check that it was removed.
rows = self.client.get_list_feed(spreadsheet_id, worksheet_id)
self.assertEquals(len(rows.entry), 0)
# Delete the test worksheet.
self.client.delete(test_worksheet, force=True)
def suite():
return conf.build_suite([SpreadsheetsClientTest])
if __name__ == '__main__':
unittest.TextTestRunner().run(suite())
|
gpl-3.0
|
SNoiraud/gramps
|
gramps/plugins/db/dbapi/test/db_test.py
|
8
|
33651
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2016 Nick Hall
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#-------------------------------------------------------------------------
#
# Standard python modules
#
#-------------------------------------------------------------------------
import unittest
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from gramps.gen.db import DbTxn
from gramps.gen.db.utils import make_database
from gramps.gen.lib import (Person, Family, Event, Place, Repository, Source,
Citation, Media, Note, Tag, Researcher, Surname)
#-------------------------------------------------------------------------
#
# DbRandomTest class
#
#-------------------------------------------------------------------------
class DbRandomTest(unittest.TestCase):
'''
Tests with random objects.
'''
@classmethod
def setUpClass(cls):
cls.db = make_database("sqlite")
cls.db.load(":memory:")
def setUp(self):
self.handles = {'Person': [], 'Family': [], 'Event': [], 'Place': [],
'Repository': [], 'Source': [], 'Citation': [],
'Media': [], 'Note': [], 'Tag': []}
self.gids = {'Person': [], 'Family': [], 'Event': [], 'Place': [],
'Repository': [], 'Source': [], 'Citation': [],
'Media': [], 'Note': []}
with DbTxn('Add test objects', self.db) as trans:
for i in range(10):
self.__add_object(Person, self.db.add_person, trans)
self.__add_object(Family, self.db.add_family, trans)
self.__add_object(Event, self.db.add_event, trans)
self.__add_object(Place, self.db.add_place, trans)
self.__add_object(Repository, self.db.add_repository, trans)
self.__add_object(Source, self.db.add_source, trans)
self.__add_object(Citation, self.db.add_citation, trans)
self.__add_object(Media, self.db.add_media, trans)
self.__add_object(Note, self.db.add_note, trans)
self.__add_object(Tag, self.db.add_tag, trans)
def tearDown(self):
with DbTxn('Remove test objects', self.db) as trans:
for handle in self.handles['Person']:
self.db.remove_person(handle, trans)
for handle in self.handles['Family']:
self.db.remove_family(handle, trans)
for handle in self.handles['Event']:
self.db.remove_event(handle, trans)
for handle in self.handles['Place']:
self.db.remove_place(handle, trans)
for handle in self.handles['Repository']:
self.db.remove_repository(handle, trans)
for handle in self.handles['Source']:
self.db.remove_source(handle, trans)
for handle in self.handles['Citation']:
self.db.remove_citation(handle, trans)
for handle in self.handles['Media']:
self.db.remove_media(handle, trans)
for handle in self.handles['Note']:
self.db.remove_note(handle, trans)
for handle in self.handles['Tag']:
self.db.remove_tag(handle, trans)
def __add_object(self, obj_class, add_func, trans):
obj = obj_class()
if obj_class == Tag:
obj.name = 'Tag%s' % len(self.handles['Tag'])
handle = add_func(obj, trans)
self.handles[obj_class.__name__].append(handle)
if obj_class != Tag:
self.gids[obj_class.__name__].append(obj.gramps_id)
################################################################
#
# Test get_number_of_* methods
#
################################################################
def test_number_of_people(self):
self.assertEqual(self.db.get_number_of_people(),
len(self.handles['Person']))
def test_number_of_families(self):
self.assertEqual(self.db.get_number_of_families(),
len(self.handles['Family']))
def test_number_of_events(self):
self.assertEqual(self.db.get_number_of_events(),
len(self.handles['Event']))
def test_number_of_places(self):
self.assertEqual(self.db.get_number_of_places(),
len(self.handles['Place']))
def test_number_of_repositories(self):
self.assertEqual(self.db.get_number_of_repositories(),
len(self.handles['Repository']))
def test_number_of_sources(self):
self.assertEqual(self.db.get_number_of_sources(),
len(self.handles['Source']))
def test_number_of_citations(self):
self.assertEqual(self.db.get_number_of_citations(),
len(self.handles['Citation']))
def test_number_of_media(self):
self.assertEqual(self.db.get_number_of_media(),
len(self.handles['Media']))
def test_number_of_notes(self):
self.assertEqual(self.db.get_number_of_notes(),
len(self.handles['Note']))
def test_number_of_tags(self):
self.assertEqual(self.db.get_number_of_tags(),
len(self.handles['Tag']))
################################################################
#
# Test get_*_handles methods
#
################################################################
def __get_handles_test(self, obj_type, handles_func, number_func,
sort_handles=False):
if sort_handles:
handles = handles_func(sort_handles=True)
else:
handles = handles_func()
self.assertEqual(len(handles), number_func())
for handle in handles:
self.assertIn(handle, self.handles[obj_type])
def test_get_person_handles(self):
self.__get_handles_test('Person',
self.db.get_person_handles,
self.db.get_number_of_people)
def test_get_family_handles(self):
self.__get_handles_test('Family',
self.db.get_family_handles,
self.db.get_number_of_families)
def test_get_event_handles(self):
self.__get_handles_test('Event',
self.db.get_event_handles,
self.db.get_number_of_events)
def test_get_place_handles(self):
self.__get_handles_test('Place',
self.db.get_place_handles,
self.db.get_number_of_places)
def test_get_repository_handles(self):
self.__get_handles_test('Repository',
self.db.get_repository_handles,
self.db.get_number_of_repositories)
def test_get_source_handles(self):
self.__get_handles_test('Source',
self.db.get_source_handles,
self.db.get_number_of_sources)
def test_get_citation_handles(self):
self.__get_handles_test('Citation',
self.db.get_citation_handles,
self.db.get_number_of_citations)
def test_get_media_handles(self):
self.__get_handles_test('Media',
self.db.get_media_handles,
self.db.get_number_of_media)
def test_get_note_handles(self):
self.__get_handles_test('Note',
self.db.get_note_handles,
self.db.get_number_of_notes)
def test_get_tag_handles(self):
self.__get_handles_test('Tag',
self.db.get_tag_handles,
self.db.get_number_of_tags)
def test_get_person_handles_sort(self):
self.__get_handles_test('Person',
self.db.get_person_handles,
self.db.get_number_of_people,
sort_handles=True)
def test_get_family_handles_sort(self):
self.__get_handles_test('Family',
self.db.get_family_handles,
self.db.get_number_of_families,
sort_handles=True)
def test_get_place_handles_sort(self):
self.__get_handles_test('Place',
self.db.get_place_handles,
self.db.get_number_of_places,
sort_handles=True)
def test_get_source_handles_sort(self):
self.__get_handles_test('Source',
self.db.get_source_handles,
self.db.get_number_of_sources,
sort_handles=True)
def test_get_citation_handles_sort(self):
self.__get_handles_test('Citation',
self.db.get_citation_handles,
self.db.get_number_of_citations,
sort_handles=True)
def test_get_media_handles_sort(self):
self.__get_handles_test('Media',
self.db.get_media_handles,
self.db.get_number_of_media,
sort_handles=True)
def test_get_tag_handles_sort(self):
self.__get_handles_test('Tag',
self.db.get_tag_handles,
self.db.get_number_of_tags,
sort_handles=True)
################################################################
#
# Test get_*_gramps_ids methods
#
################################################################
def __get_gids_test(self, obj_type, gids_func, number_func):
gids = gids_func()
self.assertEqual(len(gids), number_func())
for gid in gids:
self.assertIn(gid, self.gids[obj_type])
def test_get_person_gids(self):
self.__get_gids_test('Person',
self.db.get_person_gramps_ids,
self.db.get_number_of_people)
def test_get_family_gids(self):
self.__get_gids_test('Family',
self.db.get_family_gramps_ids,
self.db.get_number_of_families)
def test_get_event_gids(self):
self.__get_gids_test('Event',
self.db.get_event_gramps_ids,
self.db.get_number_of_events)
def test_get_place_gids(self):
self.__get_gids_test('Place',
self.db.get_place_gramps_ids,
self.db.get_number_of_places)
def test_get_repository_gids(self):
self.__get_gids_test('Repository',
self.db.get_repository_gramps_ids,
self.db.get_number_of_repositories)
def test_get_source_gids(self):
self.__get_gids_test('Source',
self.db.get_source_gramps_ids,
self.db.get_number_of_sources)
def test_get_citation_gids(self):
self.__get_gids_test('Citation',
self.db.get_citation_gramps_ids,
self.db.get_number_of_citations)
def test_get_media_gids(self):
self.__get_gids_test('Media',
self.db.get_media_gramps_ids,
self.db.get_number_of_media)
def test_get_note_gids(self):
self.__get_gids_test('Note',
self.db.get_note_gramps_ids,
self.db.get_number_of_notes)
################################################################
#
# Test get_*_from_handle methods
#
################################################################
def __get_from_handle_test(self, obj_class, handles_func, get_func):
for handle in handles_func():
person = get_func(handle)
self.assertIsInstance(person, obj_class)
self.assertEqual(person.handle, handle)
def test_get_person_from_handle(self):
self.__get_from_handle_test(Person,
self.db.get_person_handles,
self.db.get_person_from_handle)
def test_get_family_from_handle(self):
self.__get_from_handle_test(Family,
self.db.get_family_handles,
self.db.get_family_from_handle)
def test_get_event_from_handle(self):
self.__get_from_handle_test(Event,
self.db.get_event_handles,
self.db.get_event_from_handle)
def test_get_place_from_handle(self):
self.__get_from_handle_test(Place,
self.db.get_place_handles,
self.db.get_place_from_handle)
def test_get_repository_from_handle(self):
self.__get_from_handle_test(Repository,
self.db.get_repository_handles,
self.db.get_repository_from_handle)
def test_get_source_from_handle(self):
self.__get_from_handle_test(Source,
self.db.get_source_handles,
self.db.get_source_from_handle)
def test_get_citation_from_handle(self):
self.__get_from_handle_test(Citation,
self.db.get_citation_handles,
self.db.get_citation_from_handle)
def test_get_media_from_handle(self):
self.__get_from_handle_test(Media,
self.db.get_media_handles,
self.db.get_media_from_handle)
def test_get_note_from_handle(self):
self.__get_from_handle_test(Note,
self.db.get_note_handles,
self.db.get_note_from_handle)
def test_get_tag_from_handle(self):
self.__get_from_handle_test(Tag,
self.db.get_tag_handles,
self.db.get_tag_from_handle)
################################################################
#
# Test get_*_from_gramps_id methods
#
################################################################
def __get_from_gid_test(self, obj_class, gids_func, get_func):
for gid in gids_func():
person = get_func(gid)
self.assertIsInstance(person, obj_class)
self.assertEqual(person.gramps_id, gid)
def test_get_person_from_gid(self):
self.__get_from_gid_test(Person,
self.db.get_person_gramps_ids,
self.db.get_person_from_gramps_id)
def test_get_family_from_gid(self):
self.__get_from_gid_test(Family,
self.db.get_family_gramps_ids,
self.db.get_family_from_gramps_id)
def test_get_event_from_gid(self):
self.__get_from_gid_test(Event,
self.db.get_event_gramps_ids,
self.db.get_event_from_gramps_id)
def test_get_place_from_gid(self):
self.__get_from_gid_test(Place,
self.db.get_place_gramps_ids,
self.db.get_place_from_gramps_id)
def test_get_repository_from_gid(self):
self.__get_from_gid_test(Repository,
self.db.get_repository_gramps_ids,
self.db.get_repository_from_gramps_id)
def test_get_source_from_gid(self):
self.__get_from_gid_test(Source,
self.db.get_source_gramps_ids,
self.db.get_source_from_gramps_id)
def test_get_citation_from_gid(self):
self.__get_from_gid_test(Citation,
self.db.get_citation_gramps_ids,
self.db.get_citation_from_gramps_id)
def test_get_media_from_gid(self):
self.__get_from_gid_test(Media,
self.db.get_media_gramps_ids,
self.db.get_media_from_gramps_id)
def test_get_note_from_gid(self):
self.__get_from_gid_test(Note,
self.db.get_note_gramps_ids,
self.db.get_note_from_gramps_id)
def test_get_tag_from_name(self):
tag = self.db.get_tag_from_name('Tag0')
self.assertEqual(tag.handle, self.handles['Tag'][0])
################################################################
#
# Test has_*_handle methods
#
################################################################
def test_has_person_handle(self):
for handle in self.handles['Person']:
self.assertTrue(self.db.has_person_handle(handle))
def test_has_family_handle(self):
for handle in self.handles['Family']:
self.assertTrue(self.db.has_family_handle(handle))
def test_has_event_handle(self):
for handle in self.handles['Event']:
self.assertTrue(self.db.has_event_handle(handle))
def test_has_place_handle(self):
for handle in self.handles['Place']:
self.assertTrue(self.db.has_place_handle(handle))
def test_has_repository_handle(self):
for handle in self.handles['Repository']:
self.assertTrue(self.db.has_repository_handle(handle))
def test_has_source_handle(self):
for handle in self.handles['Source']:
self.assertTrue(self.db.has_source_handle(handle))
def test_has_citation_handle(self):
for handle in self.handles['Citation']:
self.assertTrue(self.db.has_citation_handle(handle))
def test_has_media_handle(self):
for handle in self.handles['Media']:
self.assertTrue(self.db.has_media_handle(handle))
def test_has_note_handle(self):
for handle in self.handles['Note']:
self.assertTrue(self.db.has_note_handle(handle))
def test_has_tag_handle(self):
for handle in self.handles['Tag']:
self.assertTrue(self.db.has_tag_handle(handle))
################################################################
#
# Test has_*_gramps_id methods
#
################################################################
def test_has_person_gramps_id(self):
for gramps_id in self.gids['Person']:
self.assertTrue(self.db.has_person_gramps_id(gramps_id))
def test_has_family_gramps_id(self):
for gramps_id in self.gids['Family']:
self.assertTrue(self.db.has_family_gramps_id(gramps_id))
def test_has_event_gramps_id(self):
for gramps_id in self.gids['Event']:
self.assertTrue(self.db.has_event_gramps_id(gramps_id))
def test_has_place_gramps_id(self):
for gramps_id in self.gids['Place']:
self.assertTrue(self.db.has_place_gramps_id(gramps_id))
def test_has_repository_gramps_id(self):
for gramps_id in self.gids['Repository']:
self.assertTrue(self.db.has_repository_gramps_id(gramps_id))
def test_has_source_gramps_id(self):
for gramps_id in self.gids['Source']:
self.assertTrue(self.db.has_source_gramps_id(gramps_id))
def test_has_citation_gramps_id(self):
for gramps_id in self.gids['Citation']:
self.assertTrue(self.db.has_citation_gramps_id(gramps_id))
def test_has_media_gramps_id(self):
for gramps_id in self.gids['Media']:
self.assertTrue(self.db.has_media_gramps_id(gramps_id))
def test_has_note_gramps_id(self):
for gramps_id in self.gids['Note']:
self.assertTrue(self.db.has_note_gramps_id(gramps_id))
################################################################
#
# Test get_*_cursor methods
#
################################################################
def __get_cursor_test(self, cursor_func, raw_func):
with cursor_func() as cursor:
for handle, data1 in cursor:
data2 = raw_func(handle)
self.assertEqual(data1, data2)
def test_get_person_cursor(self):
self.__get_cursor_test(self.db.get_person_cursor,
self.db.get_raw_person_data)
def test_get_family_cursor(self):
self.__get_cursor_test(self.db.get_family_cursor,
self.db.get_raw_family_data)
def test_get_event_cursor(self):
self.__get_cursor_test(self.db.get_event_cursor,
self.db.get_raw_event_data)
def test_get_place_cursor(self):
self.__get_cursor_test(self.db.get_place_cursor,
self.db.get_raw_place_data)
def test_get_repository_cursor(self):
self.__get_cursor_test(self.db.get_repository_cursor,
self.db.get_raw_repository_data)
def test_get_source_cursor(self):
self.__get_cursor_test(self.db.get_source_cursor,
self.db.get_raw_source_data)
def test_get_citation_cursor(self):
self.__get_cursor_test(self.db.get_citation_cursor,
self.db.get_raw_citation_data)
def test_get_media_cursor(self):
self.__get_cursor_test(self.db.get_media_cursor,
self.db.get_raw_media_data)
def test_get_note_cursor(self):
self.__get_cursor_test(self.db.get_note_cursor,
self.db.get_raw_note_data)
def test_get_tag_cursor(self):
self.__get_cursor_test(self.db.get_tag_cursor,
self.db.get_raw_tag_data)
################################################################
#
# Test iter_*_handles methods
#
################################################################
def __iter_handles_test(self, obj_type, iter_func):
for handle in iter_func():
self.assertIn(handle, self.handles[obj_type])
def test_iter_person_handles(self):
self.__iter_handles_test('Person',
self.db.iter_person_handles)
def test_iter_family_handles(self):
self.__iter_handles_test('Family',
self.db.iter_family_handles)
def test_iter_event_handles(self):
self.__iter_handles_test('Event',
self.db.iter_event_handles)
def test_iter_place_handles(self):
self.__iter_handles_test('Place',
self.db.iter_place_handles)
def test_iter_repository_handles(self):
self.__iter_handles_test('Repository',
self.db.iter_repository_handles)
def test_iter_source_handles(self):
self.__iter_handles_test('Source',
self.db.iter_source_handles)
def test_iter_citation_handles(self):
self.__iter_handles_test('Citation',
self.db.iter_citation_handles)
def test_iter_media_handles(self):
self.__iter_handles_test('Media',
self.db.iter_media_handles)
def test_iter_note_handles(self):
self.__iter_handles_test('Note',
self.db.iter_note_handles)
def test_iter_tag_handles(self):
self.__iter_handles_test('Tag',
self.db.iter_tag_handles)
################################################################
#
# Test iter_* methods
#
################################################################
def __iter_objects_test(self, obj_class, iter_func):
for obj in iter_func():
self.assertIsInstance(obj, obj_class)
def test_iter_people(self):
self.__iter_objects_test(Person,
self.db.iter_people)
def test_iter_families(self):
self.__iter_objects_test(Family,
self.db.iter_families)
def test_iter_events(self):
self.__iter_objects_test(Event,
self.db.iter_events)
def test_iter_places(self):
self.__iter_objects_test(Place,
self.db.iter_places)
def test_iter_repositories(self):
self.__iter_objects_test(Repository,
self.db.iter_repositories)
def test_iter_sources(self):
self.__iter_objects_test(Source,
self.db.iter_sources)
def test_iter_citations(self):
self.__iter_objects_test(Citation,
self.db.iter_citations)
def test_iter_media(self):
self.__iter_objects_test(Media,
self.db.iter_media)
def test_iter_notes(self):
self.__iter_objects_test(Note,
self.db.iter_notes)
def test_iter_tags(self):
self.__iter_objects_test(Tag,
self.db.iter_tags)
################################################################
#
# Test default and initial person methods
#
################################################################
def test_no_default_handle(self):
self.db.set_default_person_handle(None)
handle = self.db.get_default_handle()
self.assertIsNone(handle)
person = self.db.get_default_person()
self.assertIsNone(person)
person = self.db.find_initial_person()
self.assertIsInstance(person, Person)
def test_default_handle(self):
default_handle = self.handles['Person'][0]
self.db.set_default_person_handle(default_handle)
handle = self.db.get_default_handle()
self.assertEqual(handle, default_handle)
person = self.db.get_default_person()
self.assertEqual(person.handle, default_handle)
person = self.db.find_initial_person()
self.assertEqual(person.handle, default_handle)
################################################################
#
# Test get_total method
#
################################################################
def test_get_total(self):
total = sum([len(self.handles[obj_type])
for obj_type in self.handles.keys()])
self.assertEqual(self.db.get_total(), total)
#-------------------------------------------------------------------------
#
# DbEmptyTest class
#
#-------------------------------------------------------------------------
class DbEmptyTest(unittest.TestCase):
'''
Tests with an empty database.
'''
@classmethod
def setUpClass(cls):
cls.db = make_database("sqlite")
cls.db.load(":memory:")
################################################################
#
# Test metadata methods
#
################################################################
def test_metadata(self):
self.db._set_metadata('test-key', 'test-value')
value = self.db._get_metadata('test-key')
self.assertEqual(value, 'test-value')
def test_metadata_missing(self):
value = self.db._get_metadata('missing-key')
self.assertEqual(value, [])
def test_metadata_default(self):
value = self.db._get_metadata('missing-key', default='default-value')
self.assertEqual(value, 'default-value')
################################################################
#
# Test default and initial person methods
#
################################################################
def test_no_default_handle(self):
handle = self.db.get_default_handle()
self.assertIsNone(handle)
person = self.db.get_default_person()
self.assertIsNone(person)
person = self.db.find_initial_person()
self.assertIsNone(person)
################################################################
#
# Test researcher methods
#
################################################################
def test_researcher(self):
res1 = Researcher()
res1.street = 'street'
res1.locality = 'locality'
res1.city = 'city'
res1.county = 'county'
res1.state = 'state'
res1.country = 'country'
res1.postal = 'postal'
res1.phone = 'phone'
res1.name = 'name'
res1.addr = 'addr'
res1.email = 'email'
self.db.set_researcher(res1)
res2 = self.db.get_researcher()
self.assertEqual(res1.serialize(), res2.serialize())
################################################################
#
# Test name group mapping
#
################################################################
def test_name_group_mapping(self):
self.db.set_name_group_mapping('Clark', 'Clarke')
self.assertTrue(self.db.has_name_group_key('Clark'))
self.assertFalse(self.db.has_name_group_key('Baker'))
for key in self.db.get_name_group_keys():
self.assertTrue(self.db.has_name_group_key(key))
mapping = self.db.get_name_group_mapping('Clark')
self.assertEqual(mapping, 'Clarke')
################################################################
#
# Test get_total method
#
################################################################
def test_get_total(self):
self.assertEqual(self.db.get_total(), 0)
#-------------------------------------------------------------------------
#
# DbPersonTest class
#
#-------------------------------------------------------------------------
class DbPersonTest(unittest.TestCase):
'''
Tests with some sample people.
'''
@classmethod
def setUpClass(cls):
cls.db = make_database("sqlite")
cls.db.load(":memory:")
def __add_person(self, gender, first_name, surname, trans):
person = Person()
person.gender = gender
name = person.primary_name
name.first_name = first_name
surname1 = Surname()
surname1.surname = surname
name.set_surname_list([surname1])
self.all_surnames.append(surname)
self.db.add_person(person, trans)
def setUp(self):
self.all_surnames = []
with DbTxn('Add test objects', self.db) as trans:
self.__add_person(Person.MALE, 'John', 'Allen', trans)
self.__add_person(Person.MALE, 'John', 'Baker', trans)
self.__add_person(Person.MALE, 'John', 'Clark', trans)
self.__add_person(Person.FEMALE, 'John', 'Davis', trans)
self.__add_person(Person.UNKNOWN, 'John', 'Evans', trans)
self.__add_person(Person.FEMALE, 'Mary', 'Allen', trans)
self.__add_person(Person.FEMALE, 'Mary', 'Baker', trans)
self.__add_person(Person.FEMALE, 'Mary', 'Clark', trans)
self.__add_person(Person.MALE, 'Mary', 'Davis', trans)
self.__add_person(Person.FEMALE, 'Mary', 'Evans', trans)
def tearDown(self):
with DbTxn('Remove test objects', self.db) as trans:
for handle in self.db.get_person_handles():
self.db.remove_person(handle, trans)
################################################################
#
# Test surname list
#
################################################################
def test_surname_list(self):
surname_list = self.db.get_surname_list()
for surname in surname_list:
self.assertIn(surname, self.all_surnames)
################################################################
#
# Test gender stats
#
################################################################
def test_gender_stats(self):
stats = self.db.genderStats
self.assertEqual(stats.name_stats('John'), (3, 1, 1))
self.assertEqual(stats.name_stats('Mary'), (1, 4, 0))
self.db.save_gender_stats(stats)
saved = self.db.get_gender_stats()
self.assertEqual(saved['John'], (3, 1, 1))
self.assertEqual(saved['Mary'], (1, 4, 0))
if __name__ == "__main__":
unittest.main()
|
gpl-2.0
|
atruberg/django-custom
|
tests/mail/tests.py
|
48
|
36821
|
# coding: utf-8
from __future__ import unicode_literals
import asyncore
from email.mime.text import MIMEText
import os
import shutil
import smtpd
import sys
import tempfile
import threading
from django.core import mail
from django.core.mail import (EmailMessage, mail_admins, mail_managers,
EmailMultiAlternatives, send_mail, send_mass_mail)
from django.core.mail.backends import console, dummy, locmem, filebased, smtp
from django.core.mail.message import BadHeaderError
from django.test import SimpleTestCase
from django.test.utils import override_settings
from django.utils.encoding import force_text, force_bytes
from django.utils.six import PY3, StringIO, binary_type
from django.utils.translation import ugettext_lazy
if PY3:
from email.utils import parseaddr
from email import message_from_bytes, message_from_binary_file
else:
from email.Utils import parseaddr
from email import (message_from_string as message_from_bytes,
message_from_file as message_from_binary_file)
class HeadersCheckMixin(object):
def assertMessageHasHeaders(self, message, headers):
"""
Check that :param message: has all :param headers: headers.
:param message: can be an instance of an email.Message subclass or a
string with the contens of an email message.
:param headers: should be a set of (header-name, header-value) tuples.
"""
if isinstance(message, binary_type):
message = message_from_bytes(message)
msg_headers = set(message.items())
self.assertTrue(headers.issubset(msg_headers), msg='Message is missing '
'the following headers: %s' % (headers - msg_headers),)
class MailTests(HeadersCheckMixin, SimpleTestCase):
"""
Non-backend specific tests.
"""
def test_ascii(self):
email = EmailMessage('Subject', 'Content', 'from@example.com', ['to@example.com'])
message = email.message()
self.assertEqual(message['Subject'], 'Subject')
self.assertEqual(message.get_payload(), 'Content')
self.assertEqual(message['From'], 'from@example.com')
self.assertEqual(message['To'], 'to@example.com')
def test_multiple_recipients(self):
email = EmailMessage('Subject', 'Content', 'from@example.com', ['to@example.com', 'other@example.com'])
message = email.message()
self.assertEqual(message['Subject'], 'Subject')
self.assertEqual(message.get_payload(), 'Content')
self.assertEqual(message['From'], 'from@example.com')
self.assertEqual(message['To'], 'to@example.com, other@example.com')
def test_cc(self):
"""Regression test for #7722"""
email = EmailMessage('Subject', 'Content', 'from@example.com', ['to@example.com'], cc=['cc@example.com'])
message = email.message()
self.assertEqual(message['Cc'], 'cc@example.com')
self.assertEqual(email.recipients(), ['to@example.com', 'cc@example.com'])
# Test multiple CC with multiple To
email = EmailMessage('Subject', 'Content', 'from@example.com', ['to@example.com', 'other@example.com'], cc=['cc@example.com', 'cc.other@example.com'])
message = email.message()
self.assertEqual(message['Cc'], 'cc@example.com, cc.other@example.com')
self.assertEqual(email.recipients(), ['to@example.com', 'other@example.com', 'cc@example.com', 'cc.other@example.com'])
# Testing with Bcc
email = EmailMessage('Subject', 'Content', 'from@example.com', ['to@example.com', 'other@example.com'], cc=['cc@example.com', 'cc.other@example.com'], bcc=['bcc@example.com'])
message = email.message()
self.assertEqual(message['Cc'], 'cc@example.com, cc.other@example.com')
self.assertEqual(email.recipients(), ['to@example.com', 'other@example.com', 'cc@example.com', 'cc.other@example.com', 'bcc@example.com'])
def test_recipients_as_tuple(self):
email = EmailMessage('Subject', 'Content', 'from@example.com', ('to@example.com', 'other@example.com'), cc=('cc@example.com', 'cc.other@example.com'), bcc=('bcc@example.com',))
message = email.message()
self.assertEqual(message['Cc'], 'cc@example.com, cc.other@example.com')
self.assertEqual(email.recipients(), ['to@example.com', 'other@example.com', 'cc@example.com', 'cc.other@example.com', 'bcc@example.com'])
def test_header_injection(self):
email = EmailMessage('Subject\nInjection Test', 'Content', 'from@example.com', ['to@example.com'])
self.assertRaises(BadHeaderError, email.message)
email = EmailMessage(ugettext_lazy('Subject\nInjection Test'), 'Content', 'from@example.com', ['to@example.com'])
self.assertRaises(BadHeaderError, email.message)
def test_space_continuation(self):
"""
Test for space continuation character in long (ascii) subject headers (#7747)
"""
email = EmailMessage('Long subject lines that get wrapped should contain a space continuation character to get expected behavior in Outlook and Thunderbird', 'Content', 'from@example.com', ['to@example.com'])
message = email.message()
# Note that in Python 3, maximum line length has increased from 76 to 78
self.assertEqual(message['Subject'].encode(), b'Long subject lines that get wrapped should contain a space continuation\n character to get expected behavior in Outlook and Thunderbird')
def test_message_header_overrides(self):
"""
Specifying dates or message-ids in the extra headers overrides the
default values (#9233)
"""
headers = {"date": "Fri, 09 Nov 2001 01:08:47 -0000", "Message-ID": "foo"}
email = EmailMessage('subject', 'content', 'from@example.com', ['to@example.com'], headers=headers)
self.assertEqual(sorted(email.message().items()), [
('Content-Transfer-Encoding', '7bit'),
('Content-Type', 'text/plain; charset="utf-8"'),
('From', 'from@example.com'),
('MIME-Version', '1.0'),
('Message-ID', 'foo'),
('Subject', 'subject'),
('To', 'to@example.com'),
('date', 'Fri, 09 Nov 2001 01:08:47 -0000'),
])
def test_from_header(self):
"""
Make sure we can manually set the From header (#9214)
"""
email = EmailMessage('Subject', 'Content', 'bounce@example.com', ['to@example.com'], headers={'From': 'from@example.com'})
message = email.message()
self.assertEqual(message['From'], 'from@example.com')
def test_to_header(self):
"""
Make sure we can manually set the To header (#17444)
"""
email = EmailMessage('Subject', 'Content', 'bounce@example.com',
['list-subscriber@example.com', 'list-subscriber2@example.com'],
headers={'To': 'mailing-list@example.com'})
message = email.message()
self.assertEqual(message['To'], 'mailing-list@example.com')
self.assertEqual(email.to, ['list-subscriber@example.com', 'list-subscriber2@example.com'])
# If we don't set the To header manually, it should default to the `to` argument to the constructor
email = EmailMessage('Subject', 'Content', 'bounce@example.com',
['list-subscriber@example.com', 'list-subscriber2@example.com'])
message = email.message()
self.assertEqual(message['To'], 'list-subscriber@example.com, list-subscriber2@example.com')
self.assertEqual(email.to, ['list-subscriber@example.com', 'list-subscriber2@example.com'])
def test_multiple_message_call(self):
"""
Regression for #13259 - Make sure that headers are not changed when
calling EmailMessage.message()
"""
email = EmailMessage('Subject', 'Content', 'bounce@example.com', ['to@example.com'], headers={'From': 'from@example.com'})
message = email.message()
self.assertEqual(message['From'], 'from@example.com')
message = email.message()
self.assertEqual(message['From'], 'from@example.com')
def test_unicode_address_header(self):
"""
Regression for #11144 - When a to/from/cc header contains unicode,
make sure the email addresses are parsed correctly (especially with
regards to commas)
"""
email = EmailMessage('Subject', 'Content', 'from@example.com', ['"Firstname Sürname" <to@example.com>', 'other@example.com'])
self.assertEqual(email.message()['To'], '=?utf-8?q?Firstname_S=C3=BCrname?= <to@example.com>, other@example.com')
email = EmailMessage('Subject', 'Content', 'from@example.com', ['"Sürname, Firstname" <to@example.com>', 'other@example.com'])
self.assertEqual(email.message()['To'], '=?utf-8?q?S=C3=BCrname=2C_Firstname?= <to@example.com>, other@example.com')
def test_unicode_headers(self):
email = EmailMessage("Gżegżółka", "Content", "from@example.com", ["to@example.com"],
headers={"Sender": '"Firstname Sürname" <sender@example.com>',
"Comments": 'My Sürname is non-ASCII'})
message = email.message()
self.assertEqual(message['Subject'], '=?utf-8?b?R8W8ZWfFvMOzxYJrYQ==?=')
self.assertEqual(message['Sender'], '=?utf-8?q?Firstname_S=C3=BCrname?= <sender@example.com>')
self.assertEqual(message['Comments'], '=?utf-8?q?My_S=C3=BCrname_is_non-ASCII?=')
def test_safe_mime_multipart(self):
"""
Make sure headers can be set with a different encoding than utf-8 in
SafeMIMEMultipart as well
"""
headers = {"Date": "Fri, 09 Nov 2001 01:08:47 -0000", "Message-ID": "foo"}
subject, from_email, to = 'hello', 'from@example.com', '"Sürname, Firstname" <to@example.com>'
text_content = 'This is an important message.'
html_content = '<p>This is an <strong>important</strong> message.</p>'
msg = EmailMultiAlternatives('Message from Firstname Sürname', text_content, from_email, [to], headers=headers)
msg.attach_alternative(html_content, "text/html")
msg.encoding = 'iso-8859-1'
self.assertEqual(msg.message()['To'], '=?iso-8859-1?q?S=FCrname=2C_Firstname?= <to@example.com>')
self.assertEqual(msg.message()['Subject'], '=?iso-8859-1?q?Message_from_Firstname_S=FCrname?=')
def test_encoding(self):
"""
Regression for #12791 - Encode body correctly with other encodings
than utf-8
"""
email = EmailMessage('Subject', 'Firstname Sürname is a great guy.', 'from@example.com', ['other@example.com'])
email.encoding = 'iso-8859-1'
message = email.message()
self.assertTrue(message.as_string().startswith('Content-Type: text/plain; charset="iso-8859-1"\nMIME-Version: 1.0\nContent-Transfer-Encoding: quoted-printable\nSubject: Subject\nFrom: from@example.com\nTo: other@example.com'))
self.assertEqual(message.get_payload(), 'Firstname S=FCrname is a great guy.')
# Make sure MIME attachments also works correctly with other encodings than utf-8
text_content = 'Firstname Sürname is a great guy.'
html_content = '<p>Firstname Sürname is a <strong>great</strong> guy.</p>'
msg = EmailMultiAlternatives('Subject', text_content, 'from@example.com', ['to@example.com'])
msg.encoding = 'iso-8859-1'
msg.attach_alternative(html_content, "text/html")
payload0 = msg.message().get_payload(0)
self.assertMessageHasHeaders(payload0, set((
('MIME-Version', '1.0'),
('Content-Type', 'text/plain; charset="iso-8859-1"'),
('Content-Transfer-Encoding', 'quoted-printable'))))
self.assertTrue(payload0.as_bytes().endswith(b'\n\nFirstname S=FCrname is a great guy.'))
payload1 = msg.message().get_payload(1)
self.assertMessageHasHeaders(payload1, set((
('MIME-Version', '1.0'),
('Content-Type', 'text/html; charset="iso-8859-1"'),
('Content-Transfer-Encoding', 'quoted-printable'))))
self.assertTrue(payload1.as_bytes().endswith(b'\n\n<p>Firstname S=FCrname is a <strong>great</strong> guy.</p>'))
def test_attachments(self):
"""Regression test for #9367"""
headers = {"Date": "Fri, 09 Nov 2001 01:08:47 -0000", "Message-ID": "foo"}
subject, from_email, to = 'hello', 'from@example.com', 'to@example.com'
text_content = 'This is an important message.'
html_content = '<p>This is an <strong>important</strong> message.</p>'
msg = EmailMultiAlternatives(subject, text_content, from_email, [to], headers=headers)
msg.attach_alternative(html_content, "text/html")
msg.attach("an attachment.pdf", b"%PDF-1.4.%...", mimetype="application/pdf")
msg_bytes = msg.message().as_bytes()
message = message_from_bytes(msg_bytes)
self.assertTrue(message.is_multipart())
self.assertEqual(message.get_content_type(), 'multipart/mixed')
self.assertEqual(message.get_default_type(), 'text/plain')
payload = message.get_payload()
self.assertEqual(payload[0].get_content_type(), 'multipart/alternative')
self.assertEqual(payload[1].get_content_type(), 'application/pdf')
def test_non_ascii_attachment_filename(self):
"""Regression test for #14964"""
headers = {"Date": "Fri, 09 Nov 2001 01:08:47 -0000", "Message-ID": "foo"}
subject, from_email, to = 'hello', 'from@example.com', 'to@example.com'
content = 'This is the message.'
msg = EmailMessage(subject, content, from_email, [to], headers=headers)
# Unicode in file name
msg.attach("une pièce jointe.pdf", b"%PDF-1.4.%...", mimetype="application/pdf")
msg_bytes = msg.message().as_bytes()
message = message_from_bytes(msg_bytes)
payload = message.get_payload()
self.assertEqual(payload[1].get_filename(), 'une pièce jointe.pdf')
def test_dummy_backend(self):
"""
Make sure that dummy backends returns correct number of sent messages
"""
connection = dummy.EmailBackend()
email = EmailMessage('Subject', 'Content', 'bounce@example.com', ['to@example.com'], headers={'From': 'from@example.com'})
self.assertEqual(connection.send_messages([email, email, email]), 3)
def test_arbitrary_keyword(self):
"""
Make sure that get_connection() accepts arbitrary keyword that might be
used with custom backends.
"""
c = mail.get_connection(fail_silently=True, foo='bar')
self.assertTrue(c.fail_silently)
def test_custom_backend(self):
"""Test custom backend defined in this suite."""
conn = mail.get_connection('mail.custombackend.EmailBackend')
self.assertTrue(hasattr(conn, 'test_outbox'))
email = EmailMessage('Subject', 'Content', 'bounce@example.com', ['to@example.com'], headers={'From': 'from@example.com'})
conn.send_messages([email])
self.assertEqual(len(conn.test_outbox), 1)
def test_backend_arg(self):
"""Test backend argument of mail.get_connection()"""
self.assertIsInstance(mail.get_connection('django.core.mail.backends.smtp.EmailBackend'), smtp.EmailBackend)
self.assertIsInstance(mail.get_connection('django.core.mail.backends.locmem.EmailBackend'), locmem.EmailBackend)
self.assertIsInstance(mail.get_connection('django.core.mail.backends.dummy.EmailBackend'), dummy.EmailBackend)
self.assertIsInstance(mail.get_connection('django.core.mail.backends.console.EmailBackend'), console.EmailBackend)
tmp_dir = tempfile.mkdtemp()
try:
self.assertIsInstance(mail.get_connection('django.core.mail.backends.filebased.EmailBackend', file_path=tmp_dir), filebased.EmailBackend)
finally:
shutil.rmtree(tmp_dir)
self.assertIsInstance(mail.get_connection(), locmem.EmailBackend)
@override_settings(
EMAIL_BACKEND='django.core.mail.backends.locmem.EmailBackend',
ADMINS=[('nobody', 'nobody@example.com')],
MANAGERS=[('nobody', 'nobody@example.com')])
def test_connection_arg(self):
"""Test connection argument to send_mail(), et. al."""
mail.outbox = []
# Send using non-default connection
connection = mail.get_connection('mail.custombackend.EmailBackend')
send_mail('Subject', 'Content', 'from@example.com', ['to@example.com'], connection=connection)
self.assertEqual(mail.outbox, [])
self.assertEqual(len(connection.test_outbox), 1)
self.assertEqual(connection.test_outbox[0].subject, 'Subject')
connection = mail.get_connection('mail.custombackend.EmailBackend')
send_mass_mail([
('Subject1', 'Content1', 'from1@example.com', ['to1@example.com']),
('Subject2', 'Content2', 'from2@example.com', ['to2@example.com']),
], connection=connection)
self.assertEqual(mail.outbox, [])
self.assertEqual(len(connection.test_outbox), 2)
self.assertEqual(connection.test_outbox[0].subject, 'Subject1')
self.assertEqual(connection.test_outbox[1].subject, 'Subject2')
connection = mail.get_connection('mail.custombackend.EmailBackend')
mail_admins('Admin message', 'Content', connection=connection)
self.assertEqual(mail.outbox, [])
self.assertEqual(len(connection.test_outbox), 1)
self.assertEqual(connection.test_outbox[0].subject, '[Django] Admin message')
connection = mail.get_connection('mail.custombackend.EmailBackend')
mail_managers('Manager message', 'Content', connection=connection)
self.assertEqual(mail.outbox, [])
self.assertEqual(len(connection.test_outbox), 1)
self.assertEqual(connection.test_outbox[0].subject, '[Django] Manager message')
def test_dont_mangle_from_in_body(self):
# Regression for #13433 - Make sure that EmailMessage doesn't mangle
# 'From ' in message body.
email = EmailMessage('Subject', 'From the future', 'bounce@example.com', ['to@example.com'], headers={'From': 'from@example.com'})
self.assertFalse(b'>From the future' in email.message().as_bytes())
def test_dont_base64_encode(self):
# Ticket #3472
# Shouldn't use Base64 encoding at all
msg = EmailMessage('Subject', 'UTF-8 encoded body', 'bounce@example.com', ['to@example.com'], headers={'From': 'from@example.com'})
self.assertFalse(b'Content-Transfer-Encoding: base64' in msg.message().as_bytes())
# Ticket #11212
# Shouldn't use quoted printable, should detect it can represent content with 7 bit data
msg = EmailMessage('Subject', 'Body with only ASCII characters.', 'bounce@example.com', ['to@example.com'], headers={'From': 'from@example.com'})
s = msg.message().as_bytes()
self.assertFalse(b'Content-Transfer-Encoding: quoted-printable' in s)
self.assertTrue(b'Content-Transfer-Encoding: 7bit' in s)
# Shouldn't use quoted printable, should detect it can represent content with 8 bit data
msg = EmailMessage('Subject', 'Body with latin characters: àáä.', 'bounce@example.com', ['to@example.com'], headers={'From': 'from@example.com'})
s = msg.message().as_bytes()
self.assertFalse(b'Content-Transfer-Encoding: quoted-printable' in s)
self.assertTrue(b'Content-Transfer-Encoding: 8bit' in s)
msg = EmailMessage('Subject', 'Body with non latin characters: А Б В Г Д Е Ж Ѕ З И І К Л М Н О П.', 'bounce@example.com', ['to@example.com'], headers={'From': 'from@example.com'})
s = msg.message().as_bytes()
self.assertFalse(b'Content-Transfer-Encoding: quoted-printable' in s)
self.assertTrue(b'Content-Transfer-Encoding: 8bit' in s)
class BaseEmailBackendTests(object):
email_backend = None
def setUp(self):
self.settings_override = override_settings(EMAIL_BACKEND=self.email_backend)
self.settings_override.enable()
def tearDown(self):
self.settings_override.disable()
def assertStartsWith(self, first, second):
if not first.startswith(second):
self.longMessage = True
self.assertEqual(first[:len(second)], second, "First string doesn't start with the second.")
def get_mailbox_content(self):
raise NotImplementedError
def flush_mailbox(self):
raise NotImplementedError
def get_the_message(self):
mailbox = self.get_mailbox_content()
self.assertEqual(len(mailbox), 1,
"Expected exactly one message, got %d.\n%r" % (len(mailbox), [
m.as_string() for m in mailbox]))
return mailbox[0]
def test_send(self):
email = EmailMessage('Subject', 'Content', 'from@example.com', ['to@example.com'])
num_sent = mail.get_connection().send_messages([email])
self.assertEqual(num_sent, 1)
message = self.get_the_message()
self.assertEqual(message["subject"], "Subject")
self.assertEqual(message.get_payload(), "Content")
self.assertEqual(message["from"], "from@example.com")
self.assertEqual(message.get_all("to"), ["to@example.com"])
def test_send_unicode(self):
email = EmailMessage('Chère maman', 'Je t\'aime très fort', 'from@example.com', ['to@example.com'])
num_sent = mail.get_connection().send_messages([email])
self.assertEqual(num_sent, 1)
message = self.get_the_message()
self.assertEqual(message["subject"], '=?utf-8?q?Ch=C3=A8re_maman?=')
self.assertEqual(force_text(message.get_payload(decode=True)), 'Je t\'aime très fort')
def test_send_many(self):
email1 = EmailMessage('Subject', 'Content1', 'from@example.com', ['to@example.com'])
email2 = EmailMessage('Subject', 'Content2', 'from@example.com', ['to@example.com'])
num_sent = mail.get_connection().send_messages([email1, email2])
self.assertEqual(num_sent, 2)
messages = self.get_mailbox_content()
self.assertEqual(len(messages), 2)
self.assertEqual(messages[0].get_payload(), "Content1")
self.assertEqual(messages[1].get_payload(), "Content2")
def test_send_verbose_name(self):
email = EmailMessage("Subject", "Content", '"Firstname Sürname" <from@example.com>',
["to@example.com"])
email.send()
message = self.get_the_message()
self.assertEqual(message["subject"], "Subject")
self.assertEqual(message.get_payload(), "Content")
self.assertEqual(message["from"], "=?utf-8?q?Firstname_S=C3=BCrname?= <from@example.com>")
@override_settings(MANAGERS=[('nobody', 'nobody@example.com')])
def test_html_mail_managers(self):
"""Test html_message argument to mail_managers"""
mail_managers('Subject', 'Content', html_message='HTML Content')
message = self.get_the_message()
self.assertEqual(message.get('subject'), '[Django] Subject')
self.assertEqual(message.get_all('to'), ['nobody@example.com'])
self.assertTrue(message.is_multipart())
self.assertEqual(len(message.get_payload()), 2)
self.assertEqual(message.get_payload(0).get_payload(), 'Content')
self.assertEqual(message.get_payload(0).get_content_type(), 'text/plain')
self.assertEqual(message.get_payload(1).get_payload(), 'HTML Content')
self.assertEqual(message.get_payload(1).get_content_type(), 'text/html')
@override_settings(ADMINS=[('nobody', 'nobody@example.com')])
def test_html_mail_admins(self):
"""Test html_message argument to mail_admins """
mail_admins('Subject', 'Content', html_message='HTML Content')
message = self.get_the_message()
self.assertEqual(message.get('subject'), '[Django] Subject')
self.assertEqual(message.get_all('to'), ['nobody@example.com'])
self.assertTrue(message.is_multipart())
self.assertEqual(len(message.get_payload()), 2)
self.assertEqual(message.get_payload(0).get_payload(), 'Content')
self.assertEqual(message.get_payload(0).get_content_type(), 'text/plain')
self.assertEqual(message.get_payload(1).get_payload(), 'HTML Content')
self.assertEqual(message.get_payload(1).get_content_type(), 'text/html')
@override_settings(
ADMINS=[('nobody', 'nobody+admin@example.com')],
MANAGERS=[('nobody', 'nobody+manager@example.com')])
def test_manager_and_admin_mail_prefix(self):
"""
String prefix + lazy translated subject = bad output
Regression for #13494
"""
mail_managers(ugettext_lazy('Subject'), 'Content')
message = self.get_the_message()
self.assertEqual(message.get('subject'), '[Django] Subject')
self.flush_mailbox()
mail_admins(ugettext_lazy('Subject'), 'Content')
message = self.get_the_message()
self.assertEqual(message.get('subject'), '[Django] Subject')
@override_settings(ADMINS=(), MANAGERS=())
def test_empty_admins(self):
"""
Test that mail_admins/mail_managers doesn't connect to the mail server
if there are no recipients (#9383)
"""
mail_admins('hi', 'there')
self.assertEqual(self.get_mailbox_content(), [])
mail_managers('hi', 'there')
self.assertEqual(self.get_mailbox_content(), [])
def test_message_cc_header(self):
"""
Regression test for #7722
"""
email = EmailMessage('Subject', 'Content', 'from@example.com', ['to@example.com'], cc=['cc@example.com'])
mail.get_connection().send_messages([email])
message = self.get_the_message()
self.assertStartsWith(message.as_string(), 'MIME-Version: 1.0\nContent-Type: text/plain; charset="utf-8"\nContent-Transfer-Encoding: 7bit\nSubject: Subject\nFrom: from@example.com\nTo: to@example.com\nCc: cc@example.com\nDate: ')
def test_idn_send(self):
"""
Regression test for #14301
"""
self.assertTrue(send_mail('Subject', 'Content', 'from@öäü.com', ['to@öäü.com']))
message = self.get_the_message()
self.assertEqual(message.get('subject'), 'Subject')
self.assertEqual(message.get('from'), 'from@xn--4ca9at.com')
self.assertEqual(message.get('to'), 'to@xn--4ca9at.com')
self.flush_mailbox()
m = EmailMessage('Subject', 'Content', 'from@öäü.com',
['to@öäü.com'], cc=['cc@öäü.com'])
m.send()
message = self.get_the_message()
self.assertEqual(message.get('subject'), 'Subject')
self.assertEqual(message.get('from'), 'from@xn--4ca9at.com')
self.assertEqual(message.get('to'), 'to@xn--4ca9at.com')
self.assertEqual(message.get('cc'), 'cc@xn--4ca9at.com')
def test_recipient_without_domain(self):
"""
Regression test for #15042
"""
self.assertTrue(send_mail("Subject", "Content", "tester", ["django"]))
message = self.get_the_message()
self.assertEqual(message.get('subject'), 'Subject')
self.assertEqual(message.get('from'), "tester")
self.assertEqual(message.get('to'), "django")
def test_close_connection(self):
"""
Test that connection can be closed (even when not explicitely opened)
"""
conn = mail.get_connection(username='', password='')
try:
conn.close()
except Exception as e:
self.fail("close() unexpectedly raised an exception: %s" % e)
class LocmemBackendTests(BaseEmailBackendTests, SimpleTestCase):
email_backend = 'django.core.mail.backends.locmem.EmailBackend'
def get_mailbox_content(self):
return [m.message() for m in mail.outbox]
def flush_mailbox(self):
mail.outbox = []
def tearDown(self):
super(LocmemBackendTests, self).tearDown()
mail.outbox = []
def test_locmem_shared_messages(self):
"""
Make sure that the locmen backend populates the outbox.
"""
connection = locmem.EmailBackend()
connection2 = locmem.EmailBackend()
email = EmailMessage('Subject', 'Content', 'bounce@example.com', ['to@example.com'], headers={'From': 'from@example.com'})
connection.send_messages([email])
connection2.send_messages([email])
self.assertEqual(len(mail.outbox), 2)
def test_validate_multiline_headers(self):
# Ticket #18861 - Validate emails when using the locmem backend
with self.assertRaises(BadHeaderError):
send_mail('Subject\nMultiline', 'Content', 'from@example.com', ['to@example.com'])
class FileBackendTests(BaseEmailBackendTests, SimpleTestCase):
email_backend = 'django.core.mail.backends.filebased.EmailBackend'
def setUp(self):
super(FileBackendTests, self).setUp()
self.tmp_dir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, self.tmp_dir)
self._settings_override = override_settings(EMAIL_FILE_PATH=self.tmp_dir)
self._settings_override.enable()
def tearDown(self):
self._settings_override.disable()
super(FileBackendTests, self).tearDown()
def flush_mailbox(self):
for filename in os.listdir(self.tmp_dir):
os.unlink(os.path.join(self.tmp_dir, filename))
def get_mailbox_content(self):
messages = []
for filename in os.listdir(self.tmp_dir):
with open(os.path.join(self.tmp_dir, filename), 'rb') as fp:
session = fp.read().split(force_bytes('\n' + ('-' * 79) + '\n', encoding='ascii'))
messages.extend(message_from_bytes(m) for m in session if m)
return messages
def test_file_sessions(self):
"""Make sure opening a connection creates a new file"""
msg = EmailMessage('Subject', 'Content', 'bounce@example.com', ['to@example.com'], headers={'From': 'from@example.com'})
connection = mail.get_connection()
connection.send_messages([msg])
self.assertEqual(len(os.listdir(self.tmp_dir)), 1)
with open(os.path.join(self.tmp_dir, os.listdir(self.tmp_dir)[0]), 'rb') as fp:
message = message_from_binary_file(fp)
self.assertEqual(message.get_content_type(), 'text/plain')
self.assertEqual(message.get('subject'), 'Subject')
self.assertEqual(message.get('from'), 'from@example.com')
self.assertEqual(message.get('to'), 'to@example.com')
connection2 = mail.get_connection()
connection2.send_messages([msg])
self.assertEqual(len(os.listdir(self.tmp_dir)), 2)
connection.send_messages([msg])
self.assertEqual(len(os.listdir(self.tmp_dir)), 2)
msg.connection = mail.get_connection()
self.assertTrue(connection.open())
msg.send()
self.assertEqual(len(os.listdir(self.tmp_dir)), 3)
msg.send()
self.assertEqual(len(os.listdir(self.tmp_dir)), 3)
connection.close()
class ConsoleBackendTests(HeadersCheckMixin, BaseEmailBackendTests, SimpleTestCase):
email_backend = 'django.core.mail.backends.console.EmailBackend'
def setUp(self):
super(ConsoleBackendTests, self).setUp()
self.__stdout = sys.stdout
self.stream = sys.stdout = StringIO()
def tearDown(self):
del self.stream
sys.stdout = self.__stdout
del self.__stdout
super(ConsoleBackendTests, self).tearDown()
def flush_mailbox(self):
self.stream = sys.stdout = StringIO()
def get_mailbox_content(self):
messages = self.stream.getvalue().split(str('\n' + ('-' * 79) + '\n'))
return [message_from_bytes(force_bytes(m)) for m in messages if m]
def test_console_stream_kwarg(self):
"""
Test that the console backend can be pointed at an arbitrary stream.
"""
s = StringIO()
connection = mail.get_connection('django.core.mail.backends.console.EmailBackend', stream=s)
send_mail('Subject', 'Content', 'from@example.com', ['to@example.com'], connection=connection)
message = force_bytes(s.getvalue().split('\n' + ('-' * 79) + '\n')[0])
self.assertMessageHasHeaders(message, set((
('MIME-Version', '1.0'),
('Content-Type', 'text/plain; charset="utf-8"'),
('Content-Transfer-Encoding', '7bit'),
('Subject', 'Subject'),
('From', 'from@example.com'),
('To', 'to@example.com'))))
self.assertIn(b'\nDate: ', message)
class FakeSMTPServer(smtpd.SMTPServer, threading.Thread):
"""
Asyncore SMTP server wrapped into a thread. Based on DummyFTPServer from:
http://svn.python.org/view/python/branches/py3k/Lib/test/test_ftplib.py?revision=86061&view=markup
"""
def __init__(self, *args, **kwargs):
threading.Thread.__init__(self)
smtpd.SMTPServer.__init__(self, *args, **kwargs)
self._sink = []
self.active = False
self.active_lock = threading.Lock()
self.sink_lock = threading.Lock()
def process_message(self, peer, mailfrom, rcpttos, data):
if PY3:
data = data.encode('utf-8')
m = message_from_bytes(data)
maddr = parseaddr(m.get('from'))[1]
if mailfrom != maddr:
return "553 '%s' != '%s'" % (mailfrom, maddr)
with self.sink_lock:
self._sink.append(m)
def get_sink(self):
with self.sink_lock:
return self._sink[:]
def flush_sink(self):
with self.sink_lock:
self._sink[:] = []
def start(self):
assert not self.active
self.__flag = threading.Event()
threading.Thread.start(self)
self.__flag.wait()
def run(self):
self.active = True
self.__flag.set()
while self.active and asyncore.socket_map:
with self.active_lock:
asyncore.loop(timeout=0.1, count=1)
asyncore.close_all()
def stop(self):
if self.active:
self.active = False
self.join()
class SMTPBackendTests(BaseEmailBackendTests, SimpleTestCase):
email_backend = 'django.core.mail.backends.smtp.EmailBackend'
@classmethod
def setUpClass(cls):
cls.server = FakeSMTPServer(('127.0.0.1', 0), None)
cls._settings_override = override_settings(
EMAIL_HOST="127.0.0.1",
EMAIL_PORT=cls.server.socket.getsockname()[1])
cls._settings_override.enable()
cls.server.start()
@classmethod
def tearDownClass(cls):
cls._settings_override.disable()
cls.server.stop()
def setUp(self):
super(SMTPBackendTests, self).setUp()
self.server.flush_sink()
def tearDown(self):
self.server.flush_sink()
super(SMTPBackendTests, self).tearDown()
def flush_mailbox(self):
self.server.flush_sink()
def get_mailbox_content(self):
return self.server.get_sink()
@override_settings(EMAIL_HOST_USER="not empty username",
EMAIL_HOST_PASSWORD="not empty password")
def test_email_authentication_use_settings(self):
backend = smtp.EmailBackend()
self.assertEqual(backend.username, 'not empty username')
self.assertEqual(backend.password, 'not empty password')
@override_settings(EMAIL_HOST_USER="not empty username",
EMAIL_HOST_PASSWORD="not empty password")
def test_email_authentication_override_settings(self):
backend = smtp.EmailBackend(username='username', password='password')
self.assertEqual(backend.username, 'username')
self.assertEqual(backend.password, 'password')
@override_settings(EMAIL_HOST_USER="not empty username",
EMAIL_HOST_PASSWORD="not empty password")
def test_email_disabled_authentication(self):
backend = smtp.EmailBackend(username='', password='')
self.assertEqual(backend.username, '')
self.assertEqual(backend.password, '')
def test_server_stopped(self):
"""
Test that closing the backend while the SMTP server is stopped doesn't
raise an exception.
"""
backend = smtp.EmailBackend(username='', password='')
backend.open()
self.server.stop()
try:
backend.close()
except Exception as e:
self.fail("close() unexpectedly raised an exception: %s" % e)
|
bsd-3-clause
|
jmservera/connectthedots
|
Devices/GatewayConnectedDevices/BLEMoisture/SensorAgent.py
|
12
|
4093
|
'''
Copyright (c) Microsoft Open Technologies, Inc. All rights reserved.
The MIT License (MIT)
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
Code to read a moisture sensor attached to a RedBear BLE Nano, then augment and format as JSON to send via socket connection to a gateway.
Example of sending hydrology data to Microsoft Azure and analyzing with Azure Stream Analytics or Azure Machine Learning.
'''
import sys
import socket
import time
import datetime
import re
from BLEMoistureSensor import BLEMoistureSensor
Debug = False
Org = "Your organization";
Disp = "Sensor display name" # will be the label for the curve on the chart
GUID = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" # ensures all the data from this sensor appears on the same chart. You can use the Tools/Create GUID in Visual Studio to create.
# The last 6 bytes will be replaced with the mac address of the BLE module that is transmitting the moisture data.
Locn = "Sensor location";
Vendor = 0xfffe # Vendor ID for our custom device
Product = 0xfffe # Product ID for our custom device
HOST = '127.0.0.1'
PORT = 5002
CONNECT_RETRY_INTERVAL = 2
EXCEPTION_THRESHOLD = 3
SEND_INTERVAL = 5
s = None
def processSensorData( macAddress, value ) :
global s
timeStr = datetime.datetime.utcnow().isoformat()
# replace last group of digits with mac address of BLE sensor board
deviceID = GUID
deviceID = deviceID[:24] + macAddress
JSONString = "{"
JSONString += "\"value\":" + value
JSONString += ",\"guid\":\"" + deviceID
JSONString += "\",\"organization\":\"" + Org
JSONString += "\",\"displayname\":\"" + Disp
JSONString += "\",\"unitofmeasure\":\"" + "vol/vol"
JSONString += "\",\"measurename\":\"" + "WaterContent"
JSONString += "\",\"location\":\"" + Locn
JSONString += "\",\"timecreated\":\"" + timeStr + "\""
JSONString += "}"
if Debug == True:
print "JSONString=", JSONString
if s != None :
s.send("<" + JSONString + ">"); # sends to gateway over socket interface
def main() :
try:
global s
# setup moisture sensor
moistureSensor = BLEMoistureSensor()
moistureSensor.setSensorDataAvailableEvent(processSensorData)
# setup server socket
if Debug == False :
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print("Socket created.")
while True:
try:
s.connect((HOST, PORT));
break;
except socket.error as msg:
print("Socket connection failed. Error Code : " + str(msg[0]) + " Message " + msg[1])
time.sleep(CONNECT_RETRY_INTERVAL)
print ("Socket connection succeeded.")
# this will listen forever for advertising events and call processSensorData() when data arrives
moistureSensor.Listen();
except KeyboardInterrupt:
print("Continuous polling stopped")
if __name__ == '__main__':
main()
|
mit
|
minhphung171093/OpenERP_V8
|
openerp/addons/event/report/report_event_registration.py
|
310
|
4079
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, fields
from openerp import tools
class report_event_registration(models.Model):
"""Events Analysis"""
_name = "report.event.registration"
_order = 'event_date desc'
_auto = False
event_date = fields.Datetime('Event Date', readonly=True)
event_id = fields.Many2one('event.event', 'Event', required=True)
draft_state = fields.Integer(' # No of Draft Registrations')
confirm_state = fields.Integer(' # No of Confirmed Registrations')
seats_max = fields.Integer('Max Seats')
nbevent = fields.Integer('Number of Events')
nbregistration = fields.Integer('Number of Registrations')
event_type = fields.Many2one('event.type', 'Event Type')
registration_state = fields.Selection([('draft', 'Draft'), ('confirm', 'Confirmed'), ('done', 'Attended'), ('cancel', 'Cancelled')], 'Registration State', readonly=True, required=True)
event_state = fields.Selection([('draft', 'Draft'), ('confirm', 'Confirmed'), ('done', 'Done'), ('cancel', 'Cancelled')], 'Event State', readonly=True, required=True)
user_id = fields.Many2one('res.users', 'Event Responsible', readonly=True)
user_id_registration = fields.Many2one('res.users', 'Register', readonly=True)
name_registration = fields.Char('Participant / Contact Name', readonly=True)
company_id = fields.Many2one('res.company', 'Company', readonly=True)
def init(self, cr):
"""Initialize the sql view for the event registration """
tools.drop_view_if_exists(cr, 'report_event_registration')
# TOFIX this request won't select events that have no registration
cr.execute(""" CREATE VIEW report_event_registration AS (
SELECT
e.id::varchar || '/' || coalesce(r.id::varchar,'') AS id,
e.id AS event_id,
e.user_id AS user_id,
r.user_id AS user_id_registration,
r.name AS name_registration,
e.company_id AS company_id,
e.date_begin AS event_date,
count(r.id) AS nbevent,
sum(r.nb_register) AS nbregistration,
CASE WHEN r.state IN ('draft') THEN r.nb_register ELSE 0 END AS draft_state,
CASE WHEN r.state IN ('open','done') THEN r.nb_register ELSE 0 END AS confirm_state,
e.type AS event_type,
e.seats_max AS seats_max,
e.state AS event_state,
r.state AS registration_state
FROM
event_event e
LEFT JOIN event_registration r ON (e.id=r.event_id)
GROUP BY
event_id,
user_id_registration,
r.id,
registration_state,
r.nb_register,
event_type,
e.id,
e.date_begin,
e.user_id,
event_state,
e.company_id,
e.seats_max,
name_registration
)
""")
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
Jgarcia-IAS/ReporsitorioVacioOdoo
|
openerp/addons/product_extended/wizard/__init__.py
|
374
|
1078
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import wizard_price
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
MontyThibault/Force-Sensor-Integration
|
MayaIntegration/SixAxis.py
|
1
|
2895
|
from ctypes import *
import maya.cmds as cmds
import Calibration as C
import unittest
class SixAxis(object):
def __init__(self, device, channels, name, load = True):
""" Maps a single six-axis sensor to a Maya transform
Device: An instance of PAIO.AIODevice()
Channels: The channel indicies for the sensor in the following order:
[forceX, forceY, forceZ, torqueX, torqueY, torqueZ] """
self.device = device
self.channels = channels
self.measurements = (c_float * 6)()
if len(channels) != 6:
assert("Error: must give six channels in SixAxis.")
if name:
self.name = 'SixAxis_%s' % name
else:
self.name = 'SixAxis_%s' % hash(self)
self.transform = cmds.createNode('transform', name = self.name)
self.calibration = C.SixAxisCalibrationMatrix(name, load)
print('Created new SixAxis %s' % self.name)
def __del__(self):
cmds.delete(self.transform)
@property
def forces(self):
""" Channels 1 - 3 """
return self.calibration.process(self.measurements)[:3]
@property
def torques(self):
""" Channels 4 - 6 """
return self.calibration.process(self.measurements)[3:]
def updateMeasurements(self):
""" Update sensor measurements. Wrap this in an executeDeferred(). """
ptr = cast(self.measurements, c_voidp).value
for (i, channel) in enumerate(self.channels):
slot = cast(ptr + (4 * i), POINTER(c_float))
self.device.AioSingleAiEx(c_short(channel), slot)
# This will feed the data into the calibration object so that the user
# can set calibrations without accessing the data first.
self.forces
self.torques
def updateTransform(self):
""" Update Maya transform object. Wrap this in an executeDeferred(). """
cmds.xform(self.transform, t = self.forces, ro = self.torques)
# def setChannelsZero(self, channels):
# """ Sets given channels to zero. These are channel indicies, not the
# channel numbers themselves. For instance, setting all forces to zero
# would be `setChannelsZero([0, 1, 2])` """
# for channel in channels:
# self.calibrations[channel].setZero()
# def setChannelsOne(self, channels):
# """ Sets given channels to one. These are channel indicies, not the
# channel numbers themselves. For instance, setting all forces to one
# would be `setChannelsOne([0, 1, 2])` """
# for channel in channels:
# self.calibrations[channel].setOne()
class Tests(object):
class FauxDevice(object):
def Init():
pass
def AioSetAiRangeAll(range):
pass
def AioSingleAiEx(deviceID, c_channel, slot):
from random import random
slot.contents = c_float(random() + c_channel.value)
def setUp(self):
self.device = self.FauxDevice()
self.channels = [6, 7, 8, 9, 10, 11]
def test_create_and_process_six_axis(self):
rock = SixAxis(self.device, self.channels, "test", False)
rock.updateMeasurements()
assert rock.forces == rock.measurements[:3]
|
mit
|
akaszynski/vtkInterface
|
examples/00-load/create-geometric-objects.py
|
1
|
1356
|
"""
.. _ref_geometric_example:
Geometric Objects
~~~~~~~~~~~~~~~~~
The "Hello, world!" of VTK
"""
import pyvista as pv
###############################################################################
# This runs through several of the available geomoetric objects available in
# VTK which PyVista provides simple convenience methods for generating.
#
# Let's run through creating a few geometric objects!
cyl = pv.Cylinder()
arrow = pv.Arrow()
sphere = pv.Sphere()
plane = pv.Plane()
line = pv.Line()
box = pv.Box()
cone = pv.Cone()
poly = pv.Polygon()
disc = pv.Disc()
###############################################################################
# Now let's plot them all in one window
p = pv.Plotter(shape=(3, 3))
# Top row
p.subplot(0, 0)
p.add_mesh(cyl, color="tan", show_edges=True)
p.subplot(0, 1)
p.add_mesh(arrow, color="tan", show_edges=True)
p.subplot(0, 2)
p.add_mesh(sphere, color="tan", show_edges=True)
# Middle row
p.subplot(1, 0)
p.add_mesh(plane, color="tan", show_edges=True)
p.subplot(1, 1)
p.add_mesh(line, color="tan", line_width=3)
p.subplot(1, 2)
p.add_mesh(box, color="tan", show_edges=True)
# Bottom row
p.subplot(2, 0)
p.add_mesh(cone, color="tan", show_edges=True)
p.subplot(2, 1)
p.add_mesh(poly, color="tan", show_edges=True)
p.subplot(2, 2)
p.add_mesh(disc, color="tan", show_edges=True)
# Render all of them
p.show()
|
mit
|
akornatskyy/sample-blog-api
|
src/config.py
|
1
|
4285
|
"""
"""
import logging
import os
import sys
from datetime import timedelta
try: # pragma: nocover
from ConfigParser import ConfigParser
config = ConfigParser()
except ImportError: # pragma: nocover
from configparser import ConfigParser
config = ConfigParser(strict=False)
from wheezy.caching.logging import OnePassHandler
from wheezy.caching.patterns import Cached
from wheezy.core.collections import defaultdict
from wheezy.html.ext.template import WhitespaceExtension
from wheezy.html.ext.template import WidgetExtension
from wheezy.html.utils import format_value
from wheezy.html.utils import html_escape
from wheezy.security.crypto import Ticket
from wheezy.security.crypto.comp import aes128
from wheezy.security.crypto.comp import ripemd160
from wheezy.security.crypto.comp import sha1
from wheezy.security.crypto.comp import sha256
from wheezy.template.engine import Engine
from wheezy.template.ext.core import CoreExtension
from wheezy.template.loader import FileLoader
from wheezy.template.loader import PreprocessLoader
from wheezy.web.templates import WheezyTemplate
from public import __version__
from tracing import ERROR_REPORT_FORMAT
from tracing import error_report_extra_provider
config.read(os.getenv('CONFIG', 'etc/development.ini'))
mode = config.get('runtime', 'cache')
if mode == 'memory':
from wheezy.caching import MemoryCache
cache = MemoryCache()
elif mode == 'memcached':
from wheezy.core.pooling import EagerPool
from wheezy.caching.pylibmc import MemcachedClient
from wheezy.caching.pylibmc import client_factory
pool = EagerPool(
lambda: client_factory(config.get('memcached', 'servers').split(';')),
size=config.getint('memcached', 'pool-size'))
cache = MemcachedClient(pool)
else:
raise NotImplementedError(mode)
cached = Cached(cache, time=timedelta(hours=4))
options = {}
# HTTPCacheMiddleware
options.update({
'http_cache': cache
})
# HTTPErrorMiddleware
mode = config.get('runtime', 'unhandled')
if mode == 'stderr':
handler = logging.StreamHandler(sys.stderr)
elif mode == 'mail':
from logging.handlers import SMTPHandler
handler = SMTPHandler(
mailhost=config.get('mail', 'host'),
fromaddr=config.get('error_report', 'from-addr'),
toaddrs=config.get('error_report', 'to-addrs').split(';'),
subject=config.get('error_report', 'subject'))
else:
raise NotImplementedError(mode)
handler.setFormatter(logging.Formatter(ERROR_REPORT_FORMAT))
handler = OnePassHandler(handler, cache, timedelta(hours=12))
handler.setLevel(logging.ERROR)
unhandled_logger = logging.getLogger('unhandled')
unhandled_logger.setLevel(logging.ERROR)
unhandled_logger.addHandler(handler)
options.update({
'http_errors': defaultdict(lambda: 'http500', {
# HTTP status code: route name
400: 'http400',
403: 'http403',
404: 'http404',
405: 'http405',
500: 'http500'
}),
'http_errors_logger': unhandled_logger,
'http_errors_extra_provider': error_report_extra_provider
})
# Template Engine
searchpath = ['content/templates']
engine = Engine(
loader=FileLoader(searchpath),
extensions=[
CoreExtension(token_start='#')
]
)
engine.global_vars.update({
'__version__': __version__
})
engine = Engine(
loader=PreprocessLoader(engine),
extensions=[
CoreExtension(),
WidgetExtension(),
WhitespaceExtension(),
])
engine.global_vars.update({
'format_value': format_value,
'h': html_escape
})
options.update({
'render_template': WheezyTemplate(engine)
})
# Security
options.update({
'ticket': Ticket(
max_age=config.getint('crypto', 'ticket-max-age'),
salt=config.get('crypto', 'ticket-salt'),
cypher=aes128,
digestmod=ripemd160 or sha256 or sha1,
options={
'CRYPTO_ENCRYPTION_KEY': config.get('crypto', 'encryption-key'),
'CRYPTO_VALIDATION_KEY': config.get('crypto', 'validation-key')
}),
'AUTH_COOKIE': '_a',
'AUTH_COOKIE_DOMAIN': config.get('crypto', 'auth-cookie-domain'),
'AUTH_COOKIE_PATH': '',
'AUTH_COOKIE_SECURE': config.getboolean('crypto', 'auth-cookie-secure'),
'XSRF_NAME': '_x',
'RESUBMISSION_NAME': '_c'
})
|
mit
|
ahelsing/geni-ch
|
tools/guard_utils.py
|
1
|
29623
|
#----------------------------------------------------------------------
# Copyright (c) 2011-2015 Raytheon BBN Technologies
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and/or hardware specification (the "Work") to
# deal in the Work without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Work, and to permit persons to whom the Work
# is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Work.
#
# THE WORK IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE WORK OR THE USE OR OTHER DEALINGS
# IN THE WORK.
#----------------------------------------------------------------------
# A series of utilities for constructing
# ABACGuards and SubjectInvocationChecks
import amsoil.core.pluginmanager as pm
from sqlalchemy import *
from sqlalchemy.orm import aliased
import threading
from geni_utils import *
from cert_utils import *
from geni_constants import *
from chapi.Memoize import memoize
from chapi.Exceptions import *
import MA_constants as MA
from datetime import datetime, timedelta
from dbutils import add_filters
from chapi_log import *
import json
# context support
_context = threading.local()
# Set up caching of constant relationships of different sorts
# Get the cache of a given name (or create if doesn't exist)
def cache_get(k):
if not hasattr(_context, 'cache'):
_context.cache = dict()
if k not in _context.cache:
_context.cache[k] = dict()
return _context.cache[k]
# Remove cache
def cache_clear():
if hasattr(_context, 'cache'):
del _context.cache
# Manage caches that timeout
# Lookup the entry for a given urn (keys: timestamp and value) if not timed out
def timed_cache_lookup(cache, urn, lifetime):
timeout = datetime.utcnow() - timedelta(seconds=lifetime)
if urn in cache and cache[urn]['timestamp'] > timeout:
return cache[urn]
return None
# Register value with timestamp
def timed_cache_register(cache, urn, value):
now = datetime.utcnow()
cache[urn] = {'timestamp' : now, 'value' : value}
# Some helper methods
@memoize
# Get the project name from a slice URN
def lookup_project_name_for_slice(slice_urn):
parts = slice_urn.split("+")
authority = parts[1]
authority_parts = authority.split(":")
project_name = authority_parts[1]
return project_name
# Return a string based on a URN but with all punctuation (+:-.)
# replaced with _
def flatten_urn(urn):
if urn is None or not (isinstance(urn, str) or
isinstance(urn, unicode)):
return str(urn)
return urn.replace(':', '_').replace('+', '_').replace('-', '_').replace('.', '_')
# Return all names of projects for which a user (by urn) is a member
def lookup_project_names_for_user(user_urn, session):
db = pm.getService('chdbengine')
cache = cache_get('project_names_for_user')
if user_urn in cache:
return cache[user_urn]
q = session.query(db.PROJECT_TABLE, db.MEMBER_ATTRIBUTE_TABLE, \
db.PROJECT_MEMBER_TABLE)
q = q.filter(db.PROJECT_TABLE.c.expired == 'f')
q = q.filter(db.MEMBER_ATTRIBUTE_TABLE.c.name == 'urn')
q = q.filter(db.PROJECT_TABLE.c.project_id == \
db.PROJECT_MEMBER_TABLE.c.project_id)
q = q.filter(db.MEMBER_ATTRIBUTE_TABLE.c.member_id == \
db.PROJECT_MEMBER_TABLE.c.member_id)
q = q.filter(db.MEMBER_ATTRIBUTE_TABLE.c.value == user_urn)
rows = q.all()
project_names = [row.project_name for row in rows]
cache[user_urn] = project_names
return project_names
# Check that a list of UID's are all in the cache,
# otherwise raise ArgumentException
def validate_uid_list(uids, cache, label):
bad_uids = []
good_urns = []
for uid in uids:
if uid in cache:
good_urns.append(cache[uid])
else:
bad_uids.append(uid)
if len(bad_uids) > 0:
raise CHAPIv1ArgumentError("Unknown %s uids [%s] " % (label, bad_uids))
return good_urns
# Look at a list of URN's of a given type and determine that they are all valid
def ensure_valid_urns(urn_type, urns, session, authority):
# chapi_info("ENSURE_VALID_URNS", "%s %s" % (urn_type, urns))
if not isinstance(urns, list): urns = [urns]
db = pm.getService('chdbengine')
if urn_type == 'PROJECT_URN':
cache = cache_get('project_urns')
not_found_urns = [urn for urn in urns if urn not in cache]
if len(not_found_urns) == 0:
# chapi_debug('UTILS', "No cache misses for project URNs")
rows = []
else:
not_found_names = [not_found_urn.split('+')[3] \
for not_found_urn in not_found_urns]
q = session.query(db.PROJECT_TABLE.c.project_name)
q = q.filter(db.PROJECT_TABLE.c.project_name.in_(not_found_names))
rows = q.all()
for row in rows:
project_name = row.project_name
project_urn = to_project_urn(authority, project_name)
cache[project_urn] = True
bad_urns = [urn for urn in not_found_urns if urn not in cache]
if len(bad_urns) > 0:
raise CHAPIv1ArgumentError('Unknown project urns: [%s]' % bad_urns)
elif urn_type == 'SLICE_URN':
cache = cache_get('slice_urns')
not_found_urns = [urn for urn in urns if urn not in cache]
if len(not_found_urns) == 0:
# chapi_debug('UTILS', "No cache misses for slice URNs")
rows = []
else:
q = session.query(db.SLICE_TABLE.c.slice_urn)
q = q.filter(db.SLICE_TABLE.c.slice_urn.in_(not_found_urns))
rows = q.all()
for row in rows:
cache[row.slice_urn] = True
bad_urns = [urn for urn in not_found_urns if urn not in cache]
if len(bad_urns) > 0:
raise CHAPIv1ArgumentError('Unknown slice urns: [%s]' % bad_urns)
elif urn_type == 'MEMBER_URN':
cache = cache_get('member_urns')
not_found_urns = [urn for urn in urns if urn not in cache]
if len(not_found_urns) == 0:
# chapi_debug('UTILS', "No cache misses for member URNs")
rows = []
else:
q = session.query(db.MEMBER_ATTRIBUTE_TABLE.c.value)
q = q.filter(db.MEMBER_ATTRIBUTE_TABLE.c.name == 'urn')
q = q.filter(db.MEMBER_ATTRIBUTE_TABLE.c.value.in_(not_found_urns))
rows = q.all()
for row in rows:
cache[row.value] = True
bad_urns = [urn for urn in not_found_urns if urn not in cache]
if len(bad_urns) > 0:
raise CHAPIv1ArgumentError('Unknown member urns: [%s]' % bad_urns)
elif urn_type == 'SLIVER_URN':
q = session.query(db.SLIVER_INFO_TABLE.c.sliver_urn)
q = q.filter(db.SLIVER_INFO_TABLE.c.sliver_urn.in_(urns))
rows = q.all()
found_urns = [row.sliver_urn for row in rows]
bad_urns = [urn for urn in urns if urn not in found_urns]
if len(bad_urns) > 0:
raise CHAPIv1ArgumentError('Unknown sliver urns: [%s]' % bad_urns)
else:
pass
# Take a uid or list of uids, make sure they're all in the cache
# and return a urn or list of urns
def convert_slice_uid_to_urn(slice_uid, session):
db = pm.getService('chdbengine')
slice_uids = slice_uid
if not isinstance(slice_uid, list): slice_uids = [slice_uid]
if len(slice_uids) == 0:
return []
cache = cache_get('slice_uid_to_urn')
uncached_uids = [id for id in slice_uids if id not in cache]
if len(uncached_uids) > 0:
q = session.query(db.SLICE_TABLE.c.slice_urn, \
db.SLICE_TABLE.c.slice_id)
q = q.filter(db.SLICE_TABLE.c.slice_id.in_(uncached_uids))
rows = q.all()
for row in rows:
slice_id = row.slice_id
slice_urn = row.slice_urn
cache[slice_id] = slice_urn
if not isinstance(slice_uid, list):
if slice_uid in cache:
return cache[slice_uid]
else:
raise CHAPIv1ArgumentError('Unknown slice uid: %s' % slice_uid)
else:
return validate_uid_list(slice_uids, cache, 'slice')
# Take a uid or list of uids, make sure they're all in the cache
# and return a urn or list of urns
def convert_project_uid_to_urn(project_uid, session):
db = pm.getService('chdbengine')
sa = pm.getService('sav1handler')
authority = sa.getDelegate().authority
project_uids = project_uid
if not isinstance(project_uid, list): project_uids = [project_uid]
if len(project_uids) == 0:
return []
cache = cache_get('project_uid_to_urn')
uncached_uids = [id for id in project_uids if id not in cache]
if len(uncached_uids) > 0:
q = session.query(db.PROJECT_TABLE.c.project_name, \
db.PROJECT_TABLE.c.project_id)
q = q.filter(db.PROJECT_TABLE.c.project_id.in_(uncached_uids))
rows = q.all()
for row in rows:
project_id = row.project_id
project_name = row.project_name
project_urn = to_project_urn(authority, project_name)
cache[project_id] = project_urn
if not isinstance(project_uid, list):
if project_uid in cache:
return cache[project_uid]
else:
raise CHAPIv1ArgumentError("Unknown project uid: %s " % \
project_uid)
else:
return validate_uid_list(project_uids, cache, 'project')
# Take a project urn or list of urns, make sure they're all in the cache
# and return a uid or list of uid
def convert_project_urn_to_uid(project_urn, session):
db = pm.getService('chdbengine')
sa = pm.getService('sav1handler')
authority = sa.getDelegate().authority
project_urns = project_urn
if not isinstance(project_urn, list): project_urns = [project_urn]
if len(project_urns) == 0:
return []
cache = cache_get('project_urn_to_uid')
uncached_urns = [id for id in project_urns if id not in cache]
if len(uncached_urns) > 0:
uncached_names = [from_project_urn(urn) for urn in uncached_urns]
q = session.query(db.PROJECT_TABLE.c.project_name, \
db.PROJECT_TABLE.c.project_id)
q = q.filter(db.PROJECT_TABLE.c.project_name.in_(uncached_names))
rows = q.all()
for row in rows:
project_id = row.project_id
project_name = row.project_name
project_urn = to_project_urn(authority, project_name)
cache[project_urn] = project_id
if not isinstance(project_urn, list):
if project_urn in cache:
return cache[project_urn]
else:
raise CHAPIv1ArgumentError("Unknown project urn: %s " % \
project_urn)
else:
return validate_uid_list(project_urns, cache, 'project')
# Convert a project URN to project name
def convert_project_urn_to_name(urn, session):
return from_project_urn(urn)
# Convert a project name to project urn
def convert_project_name_to_urn(name, session):
sa = pm.getService('sav1handler')
authority = sa.getDelegate().authority
return to_project_urn(authority, name)
# Take a uid or list of uids, make sure they're all in the cache
# and return a urn or list of urns
def convert_member_uid_to_urn(member_uid, session):
db = pm.getService('chdbengine')
member_uids = member_uid
if not isinstance(member_uid, list): member_uids = [member_uid]
cache = cache_get('member_uid_to_urn')
uncached_uids = [id for id in member_uids if id not in cache]
if len(uncached_uids) > 0:
q = session.query(db.MEMBER_ATTRIBUTE_TABLE.c.value, \
db.MEMBER_ATTRIBUTE_TABLE.c.member_id)
q = q.filter(db.MEMBER_ATTRIBUTE_TABLE.c.member_id.in_(uncached_uids))
q = q.filter(db.MEMBER_ATTRIBUTE_TABLE.c.name == 'urn')
rows = q.all()
for row in rows:
member_urn = row.value
member_id = row.member_id
cache[member_id] = member_urn
if not isinstance(member_uid, list):
if member_uid in cache:
return cache[member_uid]
else:
raise CHAPIv1ArgumentError('Unknown member uid: %s ' % member_uid)
else:
return validate_uid_list(member_uids, cache, 'member')
# Take a uid or list of uids, make sure they're all in the cache
# and return an email or list of emails
def convert_member_uid_to_email(member_uid, session):
db = pm.getService('chdbengine')
member_uids = member_uid
if not isinstance(member_uid, list): member_uids = [member_uid]
cache = cache_get('member_uid_to_email')
uncached_uids = [id for id in member_uids if id not in cache]
if len(uncached_uids) > 0:
q = session.query(db.MEMBER_ATTRIBUTE_TABLE.c.value, \
db.MEMBER_ATTRIBUTE_TABLE.c.member_id)
q = q.filter(db.MEMBER_ATTRIBUTE_TABLE.c.member_id.in_(uncached_uids))
q = q.filter(db.MEMBER_ATTRIBUTE_TABLE.c.name == 'email_address')
rows = q.all()
for row in rows:
member_email = row.value
member_id = row.member_id
cache[member_id] = member_email
if not isinstance(member_uid, list):
if member_uid in cache:
return cache[member_uid]
else:
raise CHAPIv1ArgumentError('Unknown member uid: %s' % member_uid)
else:
return validate_uid_list(member_uids, cache, 'member')
# Take an email or list of emails, make sure they're all in the cache
# and return a uid or list of uids
def convert_member_email_to_uid(member_email, session):
db = pm.getService('chdbengine')
member_emails = member_email
if not isinstance(member_email, list): member_emails = [member_email]
cache = cache_get('member_email_to_uid')
uncached_emails = [em.lower() for em in member_emails \
if em.lower() not in cache]
if len(uncached_emails) > 0:
q = session.query(db.MEMBER_ATTRIBUTE_TABLE.c.value, \
db.MEMBER_ATTRIBUTE_TABLE.c.member_id)
q = q.filter(func.lower(db.MEMBER_ATTRIBUTE_TABLE.c.value).in_(uncached_emails))
q = q.filter(db.MEMBER_ATTRIBUTE_TABLE.c.name == 'email_address')
rows = q.all()
for row in rows:
email_value = row.value.lower()
member_id = row.member_id
cache[email_value] = member_id
# Unlike most other 'convert' routines, we want to return
# only the list of good uid's and not error on bad emails
# To support bulk email or asking about whether an email is valid
uids = [cache[em.lower()] for em in member_emails if em.lower() in cache]
return uids
# Take an EPPN or list of EPPNs, make sure they're all in the cache
# and return a uid or list of uids
def convert_member_eppn_to_uid(member_eppn, session):
db = pm.getService('chdbengine')
member_eppns = member_eppn
if not isinstance(member_eppn, list): member_eppns = [member_eppn]
cache = cache_get('member_eppn_to_uid')
uncached_eppns = [me.lower() for me in member_eppns if me.lower() \
not in cache]
if len(uncached_eppns) > 0:
q = session.query(db.MEMBER_ATTRIBUTE_TABLE.c.value, \
db.MEMBER_ATTRIBUTE_TABLE.c.member_id)
q = q.filter(func.lower(db.MEMBER_ATTRIBUTE_TABLE.c.value).in_(uncached_eppns))
q = q.filter(db.MEMBER_ATTRIBUTE_TABLE.c.name == 'eppn')
rows = q.all()
for row in rows:
eppn_value = row.value.lower()
member_id = row.member_id
cache[eppn_value] = member_id
if not isinstance(member_eppn, list):
if member_eppn in cache:
return cache[member_eppn]
else:
# Return an empty list if we can't find the eppn.
return list()
else:
return validate_uid_list(member_eppns, cache, 'member_eppn_to_uid')
def lookup_slice_urn_for_sliver_urn(sliver_urn, session):
db = pm.getService('chdbengine')
q = session.query(db.SLIVER_INFO_TABLE.c.slice_urn)
q = q.filter(db.SLIVER_INFO_TABLE.c.sliver_urn == sliver_urn)
rows = q.all()
if len(rows) == 1:
return rows[0].slice_urn
else:
return None
# How long do we keep cache entries for operator privileges
OPERATOR_CACHE_LIFETIME_SECS = 60
# How long do we keep cache entries for PI privileges
PI_CACHE_LIFETIME_SECS = 60
# Lookup whether given user (by urn) has 'operator'
# as an attribute in ma_member_attribute
def lookup_operator_privilege(user_urn, session):
db = pm.getService('chdbengine')
cache = cache_get('operator_privilege')
entry = timed_cache_lookup(cache, user_urn, OPERATOR_CACHE_LIFETIME_SECS)
if entry:
return entry['value']
ma1 = alias(db.MEMBER_ATTRIBUTE_TABLE)
ma2 = alias(db.MEMBER_ATTRIBUTE_TABLE)
q = session.query(ma2.c.value)
q = q.filter(ma1.c.member_id == ma2.c.member_id)
q = q.filter(ma1.c.name == 'urn')
q = q.filter(ma1.c.value == user_urn)
q = q.filter(ma2.c.name == 'OPERATOR')
rows = q.all()
is_operator = (len(rows)>0)
# chapi_debug('UTILS', 'lookup_operator_privilege: %s = %s' % \
# (user_urn, is_operator)
timed_cache_register(cache, user_urn, is_operator)
return is_operator
# Is given user an authority?
def lookup_authority_privilege(user_urn, session):
return user_urn.find("+authority+")>= 0
# Lookup whether given user (by urn) has 'project_lead'
# as an attribute in ma_member_attribute
def lookup_pi_privilege(user_urn, session):
db = pm.getService('chdbengine')
cache = cache_get('pi_privilege')
entry = timed_cache_lookup(cache, user_urn, PI_CACHE_LIFETIME_SECS)
if entry:
return entry['value']
ma1 = alias(db.MEMBER_ATTRIBUTE_TABLE)
ma2 = alias(db.MEMBER_ATTRIBUTE_TABLE)
q = session.query(ma2.c.value)
q = q.filter(ma1.c.member_id == ma2.c.member_id)
q = q.filter(ma1.c.name == 'urn')
q = q.filter(ma1.c.value == user_urn)
q = q.filter(ma2.c.name == 'PROJECT_LEAD')
# print "Q = " + str(q)
rows = q.all()
is_project_lead = (len(rows)>0)
timed_cache_register(cache, user_urn, is_project_lead)
return is_project_lead
# Get role of member on each of list of projects
def get_project_role_for_member(caller_urn, project_urns, session):
db = pm.getService('chdbengine')
if not isinstance(project_urns, list): project_urns = [project_urns]
if len(project_urns) == 0:
# chapi_debug('UTILS',
# "get_project_role_for_member got " + \
# "empty list of project urns")
return []
project_names = \
[get_name_from_urn(project_urn) for project_urn in project_urns]
q = session.query(db.PROJECT_MEMBER_TABLE.c.role,
db.PROJECT_TABLE.c.project_name,
db.MEMBER_ATTRIBUTE_TABLE)
q = q.filter(db.PROJECT_MEMBER_TABLE.c.project_id == \
db.PROJECT_TABLE.c.project_id)
q = q.filter(db.PROJECT_TABLE.c.project_name.in_(project_names))
q = q.filter(db.MEMBER_ATTRIBUTE_TABLE.c.name == 'urn')
q = q.filter(db.MEMBER_ATTRIBUTE_TABLE.c.value == caller_urn)
q = q.filter(db.MEMBER_ATTRIBUTE_TABLE.c.member_id == \
db.PROJECT_MEMBER_TABLE.c.member_id)
rows = q.all()
return rows
# Get role of member on each of list of slices
def get_slice_role_for_member(caller_urn, slice_urns, session):
db = pm.getService('chdbengine')
if not isinstance(slice_urns, list): slice_urns = [slice_urns]
if len(slice_urns) == 0:
return []
q = session.query(db.SLICE_MEMBER_TABLE.c.role,
db.SLICE_TABLE.c.slice_urn,
db.MEMBER_ATTRIBUTE_TABLE)
q = q.filter(db.SLICE_MEMBER_TABLE.c.slice_id == \
db.SLICE_TABLE.c.slice_id)
q = q.filter(db.MEMBER_ATTRIBUTE_TABLE.c.member_id == \
db.SLICE_MEMBER_TABLE.c.member_id)
q = q.filter(db.SLICE_TABLE.c.slice_urn.in_(slice_urns))
q = q.filter(db.MEMBER_ATTRIBUTE_TABLE.c.name == 'urn')
q = q.filter(db.MEMBER_ATTRIBUTE_TABLE.c.value == caller_urn)
rows = q.all()
return rows
# Support for parsing CHAPI policies from JSON files
# Take a JSON file and return a dictionary of
# method => {"policy" : ..., "assertions" : ... }
def parse_method_policies(filename):
policies = {}
try:
data = open(filename).read()
raw_policies = json.loads(data)
# chapi_info("PMP", "DATA = %s" % data)
# chapi_info("PMP", "RP = %s" % raw_policies)
# Replace names of functions with functions"
for method_name, method_attrs in raw_policies.items():
if method_name == "__DOC__" or \
isinstance(method_attrs, basestring):
continue
# chapi_info("PMP", "MN = %s MA = %s" % (method_name, method_attrs))
assertions = None
extractor = None
policy_statements = []
for attr_name, attr_values in method_attrs.items():
# chapi_info("PMP", "AN = %s AV = %s" % (attr_name, attr_values))
if attr_name == 'assertions':
assertions = attr_values
elif attr_name == 'policies':
raw_policy_statements = attr_values
policy_statements = \
[rps.replace("$METHOD", method_name.upper()) \
for rps in raw_policy_statements]
policies[method_name] = {"policies" : policy_statements,
"assertions" : assertions}
except Exception, e:
chapi_info("Error", "%s" % e)
raise Exception("Error parsing policy file: %s" % filename)
return policies
# The convention of these methods is to return the list of subjects that
# Satisfy the criteria
# e.g. shares_project(member1_urn, member2_urns) returns the
# subset of member2_urns that share a project with member1_urn
# Return those members of member2_urns that share membership
# in a project with member1_urn
def shares_project(member1_urn, member2_urns, session, project_uid = None):
if member2_urns is None or len(member2_urns) == 0:
return []
db = pm.getService("chdbengine")
pm1 = aliased(db.PROJECT_MEMBER_TABLE)
pm2 = aliased(db.PROJECT_MEMBER_TABLE)
ma1 = aliased(db.MEMBER_ATTRIBUTE_TABLE)
ma2 = aliased(db.MEMBER_ATTRIBUTE_TABLE)
q = session.query(pm1.c.project_id,
ma1.c.value.label('member1'),
ma2.c.value.label('member2'))
if project_uid is not None:
q = q.filter(pm1.c.project_id == project_uid)
q = q.filter(pm1.c.project_id == pm2.c.project_id)
q = q.filter(pm1.c.member_id == ma1.c.member_id)
q = q.filter(pm2.c.member_id == ma2.c.member_id)
q = q.filter(ma1.c.name == 'urn')
q = q.filter(ma2.c.name == 'urn')
q = q.filter(ma1.c.value == member1_urn)
q = q.filter(ma2.c.value.in_(member2_urns))
rows = q.all()
sharers = [row.member2 for row in rows]
return sharers
# Return those members of member2_urns who share a slice with member1_urn
def shares_slice(member1_urn, member2_urns, session, slice_uid = None):
if member2_urns is None or len(member2_urns) == 0:
return []
db = pm.getService("chdbengine")
st = aliased(db.SLICE_TABLE)
sm1 = aliased(db.SLICE_MEMBER_TABLE)
sm2 = aliased(db.SLICE_MEMBER_TABLE)
ma1 = aliased(db.MEMBER_ATTRIBUTE_TABLE)
ma2 = aliased(db.MEMBER_ATTRIBUTE_TABLE)
q = session.query(st.c.expired, sm1.c.slice_id, sm2.c.slice_id,
ma1.c.value.label('member1'),
ma2.c.value.label('member2'))
if slice_uid is not None:
q = q.filter(sm1.c.slice_id == slice_uid)
q = q.filter(st.c.slice_id == sm1.c.slice_id)
q = q.filter(st.c.expired == False)
q = q.filter(sm1.c.slice_id == sm2.c.slice_id)
q = q.filter(sm1.c.member_id == ma1.c.member_id)
q = q.filter(sm2.c.member_id == ma2.c.member_id)
q = q.filter(ma1.c.name == 'urn')
q = q.filter(ma2.c.name == 'urn')
q = q.filter(ma1.c.value == member1_urn)
q = q.filter(ma2.c.value.in_(member2_urns))
rows = q.all()
sharers = [row.member2 for row in rows]
return sharers
# Return those members of member_urns that have a given role on some object
def has_role_on_some_project(member_urns, role, session):
if member_urns is None or len(member_urns) == 0:
return []
db = pm.getService("chdbengine")
q = session.query(db.PROJECT_MEMBER_TABLE.c.member_id, \
db.PROJECT_MEMBER_TABLE.c.role, \
db.MEMBER_ATTRIBUTE_TABLE.c.value)
q = q.filter(db.MEMBER_ATTRIBUTE_TABLE.c.name == 'urn')
q = q.filter(db.MEMBER_ATTRIBUTE_TABLE.c.value.in_(member_urns))
q = q.filter(db.MEMBER_ATTRIBUTE_TABLE.c.member_id == \
db.PROJECT_MEMBER_TABLE.c.member_id)
q = q.filter(db.PROJECT_MEMBER_TABLE.c.role == role)
rows = q.all()
members_with_role = [row.value for row in rows]
return members_with_role
# Return the list of members who have a pending request from or
# to someone in list of other members
# That is, if subject_is_lead
# Return those members of lead_urns who have a request pending
# from one or more of the requestor_urns
# Otherwise if not subject_is_lead
# Return those members of requestor_urns who have a request pending
# to one or more of the lead_urns
def has_pending_request_on_project_lead_by(lead_urns, requestor_urns,
subject_is_lead,
session):
if lead_urns is None or len(lead_urns) == 0 or requestor_urns is None or len(requestor_urns) == 0:
return []
db = pm.getService("chdbengine")
pm1 = aliased(db.PROJECT_MEMBER_TABLE)
pm2 = aliased(db.PROJECT_MEMBER_TABLE)
ma1 = aliased(db.MEMBER_ATTRIBUTE_TABLE)
ma2 = aliased(db.MEMBER_ATTRIBUTE_TABLE)
q = session.query(db.PROJECT_REQUEST_TABLE.c.status,
ma1.c.value.label('lead_urn'),
ma2.c.value.label('requestor_urn'))
q = q.filter(pm1.c.member_id == ma1.c.member_id)
q = q.filter(db.PROJECT_REQUEST_TABLE.c.requestor == ma2.c.member_id)
q = q.filter(ma1.c.name == 'urn')
q = q.filter(ma2.c.name == 'urn')
q = q.filter(ma1.c.value.in_(lead_urns))
q = q.filter(ma2.c.value.in_(requestor_urns))
q = q.filter(db.PROJECT_REQUEST_TABLE.c.context_id == pm1.c.project_id)
q = q.filter(pm1.c.role.in_([LEAD_ATTRIBUTE, ADMIN_ATTRIBUTE]))
q = q.filter(db.PROJECT_REQUEST_TABLE.c.status == PENDING_STATUS)
rows = q.all()
subjects = requestor_urns
if subject_is_lead: subjects = lead_urns
members = []
for row in rows:
subject = row.requestor_urn
if subject_is_lead: subject = row.lead_urn
members.append(subject)
return members
# Return the requestor URN of the request ID, or None if none exists
def get_project_request_requestor_urn(request_id, session):
db = pm.getService("chdbengine")
q = session.query(db.PROJECT_REQUEST_TABLE.c.requestor, \
db.MEMBER_ATTRIBUTE_TABLE.c.value)
q = q.filter(db.PROJECT_REQUEST_TABLE.c.id == request_id)
q = q.filter(db.PROJECT_REQUEST_TABLE.c.requestor == \
db.MEMBER_ATTRIBUTE_TABLE.c.member_id)
q = q.filter(db.MEMBER_ATTRIBUTE_TABLE.c.name == 'urn')
rows = q.all()
if len(rows) > 0:
requestor_urn = rows[0].value
return requestor_urn
else:
return None
# Return the project URN of the request ID, or None if none exists
def get_project_request_project_urn(request_id, session):
db = pm.getService("chdbengine")
q = session.query(db.PROJECT_REQUEST_TABLE.c.context_id)
q = q.filter(db.PROJECT_REQUEST_TABLE.c.id == request_id)
rows = q.all()
if len(rows) > 0:
project_uid = rows[0].context_id
project_urn = convert_project_uid_to_urn(project_uid, session)
return project_urn
else:
return None
# Return the URN of the owner of an SSH key
def get_key_owner_urn(key_id, session):
db = pm.getService("chdbengine")
q = session.query(db.MEMBER_ATTRIBUTE_TABLE.c.value)
q = q.filter(db.MEMBER_ATTRIBUTE_TABLE.c.name == 'urn')
q = q.filter(db.MEMBER_ATTRIBUTE_TABLE.c.member_id == \
db.SSH_KEY_TABLE.c.member_id)
q = q.filter(db.SSH_KEY_TABLE.c.id == key_id)
rows = q.all()
owner_urn = None
if len(rows) > 0:
owner_urn = rows[0].value
return owner_urn
|
mit
|
elky/django
|
django/middleware/http.py
|
39
|
1636
|
from django.utils.cache import (
cc_delim_re, get_conditional_response, set_response_etag,
)
from django.utils.deprecation import MiddlewareMixin
from django.utils.http import parse_http_date_safe
class ConditionalGetMiddleware(MiddlewareMixin):
"""
Handle conditional GET operations. If the response has an ETag or
Last-Modified header and the request has If-None-Match or If-Modified-Since,
replace the response with HttpNotModified. Add an ETag header if needed.
"""
def process_response(self, request, response):
# It's too late to prevent an unsafe request with a 412 response, and
# for a HEAD request, the response body is always empty so computing
# an accurate ETag isn't possible.
if request.method != 'GET':
return response
if self.needs_etag(response) and not response.has_header('ETag'):
set_response_etag(response)
etag = response.get('ETag')
last_modified = response.get('Last-Modified')
if last_modified:
last_modified = parse_http_date_safe(last_modified)
if etag or last_modified:
return get_conditional_response(
request,
etag=etag,
last_modified=last_modified,
response=response,
)
return response
def needs_etag(self, response):
"""Return True if an ETag header should be added to response."""
cache_control_headers = cc_delim_re.split(response.get('Cache-Control', ''))
return all(header.lower() != 'no-store' for header in cache_control_headers)
|
bsd-3-clause
|
pandeyop/tempest
|
tempest/api/image/v2/test_images.py
|
6
|
10418
|
# Copyright 2013 OpenStack Foundation
# Copyright 2013 IBM Corp
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import random
from six import moves
from tempest_lib.common.utils import data_utils
from tempest_lib import decorators
from tempest.api.image import base
from tempest import test
class BasicOperationsImagesTest(base.BaseV2ImageTest):
"""
Here we test the basic operations of images
"""
@decorators.skip_because(bug="1452987")
@test.attr(type='smoke')
@test.idempotent_id('139b765e-7f3d-4b3d-8b37-3ca3876ee318')
def test_register_upload_get_image_file(self):
"""
Here we test these functionalities - Register image,
upload the image file, get image and get image file api's
"""
uuid = '00000000-1111-2222-3333-444455556666'
image_name = data_utils.rand_name('image')
body = self.create_image(name=image_name,
container_format='bare',
disk_format='raw',
visibility='private',
ramdisk_id=uuid)
self.assertIn('id', body)
image_id = body.get('id')
self.assertIn('name', body)
self.assertEqual(image_name, body['name'])
self.assertIn('visibility', body)
self.assertEqual('private', body['visibility'])
self.assertIn('status', body)
self.assertEqual('queued', body['status'])
# Now try uploading an image file
file_content = data_utils.random_bytes()
image_file = moves.cStringIO(file_content)
self.client.store_image_file(image_id, image_file)
# Now try to get image details
body = self.client.show_image(image_id)
self.assertEqual(image_id, body['id'])
self.assertEqual(image_name, body['name'])
self.assertEqual(uuid, body['ramdisk_id'])
self.assertIn('size', body)
self.assertEqual(1024, body.get('size'))
# Now try get image file
body = self.client.load_image_file(image_id)
self.assertEqual(file_content, body.data)
@test.attr(type='smoke')
@test.idempotent_id('f848bb94-1c6e-45a4-8726-39e3a5b23535')
def test_delete_image(self):
# Deletes an image by image_id
# Create image
image_name = data_utils.rand_name('image')
body = self.client.create_image(name=image_name,
container_format='bare',
disk_format='raw',
visibility='private')
image_id = body['id']
# Delete Image
self.client.delete_image(image_id)
self.client.wait_for_resource_deletion(image_id)
# Verifying deletion
images = self.client.list_images()
images_id = [item['id'] for item in images]
self.assertNotIn(image_id, images_id)
@decorators.skip_because(bug="1452987")
@test.attr(type='smoke')
@test.idempotent_id('f66891a7-a35c-41a8-b590-a065c2a1caa6')
def test_update_image(self):
# Updates an image by image_id
# Create image
image_name = data_utils.rand_name('image')
body = self.client.create_image(name=image_name,
container_format='bare',
disk_format='iso',
visibility='private')
self.addCleanup(self.client.delete_image, body['id'])
self.assertEqual('queued', body['status'])
image_id = body['id']
# Now try uploading an image file
image_file = moves.cStringIO(data_utils.random_bytes())
self.client.store_image_file(image_id, image_file)
# Update Image
new_image_name = data_utils.rand_name('new-image')
body = self.client.update_image(image_id, [
dict(replace='/name', value=new_image_name)])
# Verifying updating
body = self.client.show_image(image_id)
self.assertEqual(image_id, body['id'])
self.assertEqual(new_image_name, body['name'])
class ListImagesTest(base.BaseV2ImageTest):
"""
Here we test the listing of image information
"""
@classmethod
def resource_setup(cls):
super(ListImagesTest, cls).resource_setup()
# We add a few images here to test the listing functionality of
# the images API
cls._create_standard_image('bare', 'raw')
cls._create_standard_image('bare', 'raw')
cls._create_standard_image('ami', 'raw')
# Add some more for listing
cls._create_standard_image('ami', 'ami')
cls._create_standard_image('ari', 'ari')
cls._create_standard_image('aki', 'aki')
@classmethod
def _create_standard_image(cls, container_format, disk_format):
"""
Create a new standard image and return the ID of the newly-registered
image. Note that the size of the new image is a random number between
1024 and 4096
"""
size = random.randint(1024, 4096)
image_file = moves.cStringIO(data_utils.random_bytes(size))
name = data_utils.rand_name('image')
body = cls.create_image(name=name,
container_format=container_format,
disk_format=disk_format,
visibility='private')
image_id = body['id']
# cls.client.store_image(image_id, data=image_file)
return image_id
def _list_by_param_value_and_assert(self, params):
"""
Perform list action with given params and validates result.
"""
images_list = self.client.list_images(params=params)
# Validating params of fetched images
for image in images_list:
for key in params:
msg = "Failed to list images by %s" % key
self.assertEqual(params[key], image[key], msg)
@decorators.skip_because(bug="1452987")
@test.idempotent_id('1e341d7a-90a9-494c-b143-2cdf2aeb6aee')
def test_index_no_params(self):
# Simple test to see all fixture images returned
images_list = self.client.list_images()
image_list = map(lambda x: x['id'], images_list)
for image in self.created_images:
self.assertIn(image, image_list)
@decorators.skip_because(bug="1452987")
@test.idempotent_id('9959ca1d-1aa7-4b7a-a1ea-0fff0499b37e')
def test_list_images_param_container_format(self):
# Test to get all images with container_format='bare'
params = {"container_format": "bare"}
self._list_by_param_value_and_assert(params)
@decorators.skip_because(bug="1452987")
@test.idempotent_id('4a4735a7-f22f-49b6-b0d9-66e1ef7453eb')
def test_list_images_param_disk_format(self):
# Test to get all images with disk_format = raw
params = {"disk_format": "raw"}
self._list_by_param_value_and_assert(params)
@decorators.skip_because(bug="1452987")
@test.idempotent_id('7a95bb92-d99e-4b12-9718-7bc6ab73e6d2')
def test_list_images_param_visibility(self):
# Test to get all images with visibility = private
params = {"visibility": "private"}
self._list_by_param_value_and_assert(params)
@decorators.skip_because(bug="1452987")
@test.idempotent_id('cf1b9a48-8340-480e-af7b-fe7e17690876')
def test_list_images_param_size(self):
# Test to get all images by size
image_id = self.created_images[1]
# Get image metadata
image = self.client.show_image(image_id)
params = {"size": image['size']}
self._list_by_param_value_and_assert(params)
@decorators.skip_because(bug="1452987")
@test.idempotent_id('4ad8c157-971a-4ba8-aa84-ed61154b1e7f')
def test_list_images_param_min_max_size(self):
# Test to get all images with size between 2000 to 3000
image_id = self.created_images[1]
# Get image metadata
image = self.client.show_image(image_id)
size = image['size']
params = {"size_min": size - 500, "size_max": size + 500}
images_list = self.client.list_images(params=params)
image_size_list = map(lambda x: x['size'], images_list)
for image_size in image_size_list:
self.assertTrue(image_size >= params['size_min'] and
image_size <= params['size_max'],
"Failed to get images by size_min and size_max")
@decorators.skip_because(bug="1452987")
@test.idempotent_id('7fc9e369-0f58-4d05-9aa5-0969e2d59d15')
def test_list_images_param_status(self):
# Test to get all active images
params = {"status": "active"}
self._list_by_param_value_and_assert(params)
@decorators.skip_because(bug="1452987")
@test.idempotent_id('e914a891-3cc8-4b40-ad32-e0a39ffbddbb')
def test_list_images_param_limit(self):
# Test to get images by limit
params = {"limit": 2}
images_list = self.client.list_images(params=params)
self.assertEqual(len(images_list), params['limit'],
"Failed to get images by limit")
@decorators.skip_because(bug="1452987")
@test.idempotent_id('622b925c-479f-4736-860d-adeaf13bc371')
def test_get_image_schema(self):
# Test to get image schema
schema = "image"
body = self.client.show_schema(schema)
self.assertEqual("image", body['name'])
@decorators.skip_because(bug="1452987")
@test.idempotent_id('25c8d7b2-df21-460f-87ac-93130bcdc684')
def test_get_images_schema(self):
# Test to get images schema
schema = "images"
body = self.client.show_schema(schema)
self.assertEqual("images", body['name'])
|
apache-2.0
|
pchauncey/ansible
|
lib/ansible/constants.py
|
11
|
4431
|
# Copyright (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os # used to set lang and for backwards compat get_config
from ast import literal_eval
from jinja2 import Template
from string import ascii_letters, digits
from ansible.module_utils._text import to_text
from ansible.module_utils.parsing.convert_bool import boolean, BOOLEANS_TRUE
from ansible.module_utils.six import string_types
from ansible.config.manager import ConfigManager, ensure_type, get_ini_config_value
def _deprecated(msg):
''' display is not guaranteed here, nor it being the full class, but try anyways, fallback to sys.stderr.write '''
try:
from __main__ import display
display.deprecated(msg, version='2.8')
except:
import sys
sys.stderr.write('[DEPRECATED] %s, to be removed in 2.8' % msg)
def mk_boolean(value):
''' moved to module_utils'''
_deprecated('ansible.constants.mk_boolean() is deprecated. Use ansible.module_utils.parsing.convert_bool.boolean() instead')
return boolean(value, strict=False)
def get_config(parser, section, key, env_var, default_value, value_type=None, expand_relative_paths=False):
''' kept for backwarsd compatibility, but deprecated '''
_deprecated('ansible.constants.get_config() is deprecated. There is new config API, see porting docs.')
value = None
# small reconstruction of the old code env/ini/default
value = os.environ.get(env_var, None)
if value is None:
try:
value = get_ini_config_value(parser, {'key': key, 'section': section})
except:
pass
if value is None:
value = default_value
value = ensure_type(value, value_type)
return value
def set_constant(name, value, export=vars()):
''' sets constants and returns resolved options dict '''
export[name] = value
### CONSTANTS ### yes, actual ones
BECOME_METHODS = ['sudo', 'su', 'pbrun', 'pfexec', 'doas', 'dzdo', 'ksu', 'runas', 'pmrun']
BECOME_ERROR_STRINGS = {
'sudo': 'Sorry, try again.',
'su': 'Authentication failure',
'pbrun': '',
'pfexec': '',
'doas': 'Permission denied',
'dzdo': '',
'ksu': 'Password incorrect',
'pmrun': 'You are not permitted to run this command'
} # FIXME: deal with i18n
BECOME_MISSING_STRINGS = {
'sudo': 'sorry, a password is required to run sudo',
'su': '',
'pbrun': '',
'pfexec': '',
'doas': 'Authorization required',
'dzdo': '',
'ksu': 'No password given',
'pmrun': ''
} # FIXME: deal with i18n
BLACKLIST_EXTS = ('.pyc', '.pyo', '.swp', '.bak', '~', '.rpm', '.md', '.txt')
BOOL_TRUE = BOOLEANS_TRUE
CONTROLER_LANG = os.getenv('LANG', 'en_US.UTF-8')
DEFAULT_BECOME_PASS = None
DEFAULT_PASSWORD_CHARS = to_text(ascii_letters + digits + ".,:-_", errors='strict') # characters included in auto-generated passwords
DEFAULT_SUDO_PASS = None
DEFAULT_REMOTE_PASS = None
DEFAULT_SUBSET = None
DEFAULT_SU_PASS = None
IGNORE_FILES = ("COPYING", "CONTRIBUTING", "LICENSE", "README", "VERSION", "GUIDELINES") # ignore during module search
INTERNAL_RESULT_KEYS = ('add_host', 'add_group')
LOCALHOST = ('127.0.0.1', 'localhost', '::1')
MODULE_REQUIRE_ARGS = ('command', 'win_command', 'shell', 'win_shell', 'raw', 'script')
MODULE_NO_JSON = ('command', 'win_command', 'shell', 'win_shell', 'raw')
RESTRICTED_RESULT_KEYS = ('ansible_rsync_path', 'ansible_playbook_python')
TREE_DIR = None
VAULT_VERSION_MIN = 1.0
VAULT_VERSION_MAX = 1.0
### POPULATE SETTINGS FROM CONFIG ###
config = ConfigManager()
# Generate constants from config
for setting in config.data.get_settings():
value = setting.value
if setting.origin == 'default' and \
isinstance(setting.value, string_types) and \
(setting.value.startswith('{{') and setting.value.endswith('}}')):
try:
t = Template(setting.value)
value = t.render(vars())
try:
value = literal_eval(value)
except ValueError:
pass # not a python data structure
except:
pass # not templatable
value = ensure_type(value, setting.name)
set_constant(setting.name, value)
|
gpl-3.0
|
sposs/DIRAC
|
WorkloadManagementSystem/JobWrapper/WatchdogFactory.py
|
7
|
2208
|
########################################################################
# $HeadURL$
# File : WatchdogFactory.py
# Author : Stuart Paterson
########################################################################
""" The Watchdog Factory instantiates a given Watchdog based on a quick
determination of the local operating system.
"""
from DIRAC import S_OK, S_ERROR, gLogger
__RCSID__ = "$Id$"
import re,sys,types,platform
class WatchdogFactory:
#############################################################################
def __init__(self):
""" Standard constructor
"""
self.version = platform.uname()
self.log = gLogger
#############################################################################
def getWatchdog( self, pid, thread, spObject, jobcputime, memoryLimit ):
"""This method returns the CE instance corresponding to the local OS,
the Linux watchdog is returned by default.
"""
localOS = None
if re.search('Darwin',self.version[0]):
localOS = 'Mac'
self.log.info('WatchdogFactory will create Watchdog%s instance' %(localOS))
elif re.search('Windows',self.version[0]):
localOS = 'Windows'
self.log.info('WatchdogFactory will create Watchdog%s instance' %(localOS))
else:
localOS = 'Linux'
self.log.info('WatchdogFactory will create Watchdog%s instance' %(localOS))
try:
subClassName = "Watchdog%s" % (localOS)
ceSubClass = __import__('DIRAC.WorkloadManagementSystem.JobWrapper.%s' % subClassName,globals(),locals(),[subClassName])
except Exception, x:
msg = 'WatchdogFactory could not import DIRAC.WorkloadManagementSystem.JobWrapper.%s' %(subClassName)
self.log.error(msg,x)
return S_ERROR(msg)
try:
ceStr = 'ceSubClass.%s(pid, thread, spObject, jobcputime, memoryLimit)' %(subClassName)
watchdogInstance = eval(ceStr)
except Exception, x:
msg = 'WatchdogFactory could not instantiate %s()' %(subClassName)
self.log.error(msg,x)
return S_ERROR(msg)
return S_OK(watchdogInstance)
#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#
|
gpl-3.0
|
RadiumBot/Radium_tomato
|
tools/perf/scripts/python/check-perf-trace.py
|
11214
|
2503
|
# perf script event handlers, generated by perf script -g python
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# This script tests basic functionality such as flag and symbol
# strings, common_xxx() calls back into perf, begin, end, unhandled
# events, etc. Basically, if this script runs successfully and
# displays expected results, Python scripting support should be ok.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Core import *
from perf_trace_context import *
unhandled = autodict()
def trace_begin():
print "trace_begin"
pass
def trace_end():
print_unhandled()
def irq__softirq_entry(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
vec):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "vec=%s\n" % \
(symbol_str("irq__softirq_entry", "vec", vec)),
def kmem__kmalloc(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
call_site, ptr, bytes_req, bytes_alloc,
gfp_flags):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "call_site=%u, ptr=%u, bytes_req=%u, " \
"bytes_alloc=%u, gfp_flags=%s\n" % \
(call_site, ptr, bytes_req, bytes_alloc,
flag_str("kmem__kmalloc", "gfp_flags", gfp_flags)),
def trace_unhandled(event_name, context, event_fields_dict):
try:
unhandled[event_name] += 1
except TypeError:
unhandled[event_name] = 1
def print_header(event_name, cpu, secs, nsecs, pid, comm):
print "%-20s %5u %05u.%09u %8u %-20s " % \
(event_name, cpu, secs, nsecs, pid, comm),
# print trace fields not included in handler args
def print_uncommon(context):
print "common_preempt_count=%d, common_flags=%s, common_lock_depth=%d, " \
% (common_pc(context), trace_flag_str(common_flags(context)), \
common_lock_depth(context))
def print_unhandled():
keys = unhandled.keys()
if not keys:
return
print "\nunhandled events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for event_name in keys:
print "%-40s %10d\n" % (event_name, unhandled[event_name])
|
gpl-2.0
|
bigdatauniversity/edx-platform
|
common/test/acceptance/tests/studio/test_studio_bad_data.py
|
173
|
3970
|
from base_studio_test import ContainerBase
from ...fixtures.course import XBlockFixtureDesc
from ...pages.studio.utils import verify_ordering
class BadComponentTest(ContainerBase):
"""
Tests that components with bad content do not break the Unit page.
"""
__test__ = False
def get_bad_html_content(self):
"""
Return the "bad" HTML content that has been problematic for Studio.
"""
pass
def populate_course_fixture(self, course_fixture):
"""
Sets up a course structure with a unit and a HTML component with bad data and a properly constructed problem.
"""
course_fixture.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit').add_children(
XBlockFixtureDesc('html', 'Unit HTML', data=self.get_bad_html_content()),
XBlockFixtureDesc('problem', 'Unit Problem', data='<problem></problem>')
)
)
)
)
def test_html_comp_visible(self):
"""
Tests that bad HTML data within an HTML component doesn't prevent Studio from
displaying the components on the unit page.
"""
unit = self.go_to_unit_page()
verify_ordering(self, unit, [{"": ["Unit HTML", "Unit Problem"]}])
class CopiedFromLmsBadContentTest(BadComponentTest):
"""
Tests that components with HTML copied from the LMS (LmsRuntime) do not break the Unit page.
"""
__test__ = True
def get_bad_html_content(self):
"""
Return the "bad" HTML content that has been problematic for Studio.
"""
return """
<div class="xblock xblock-student_view xmodule_display xmodule_HtmlModule xblock-initialized"
data-runtime-class="LmsRuntime" data-init="XBlockToXModuleShim" data-block-type="html"
data-runtime-version="1" data-type="HTMLModule" data-course-id="GeorgetownX/HUMW-421-01"
data-request-token="thisIsNotARealRequestToken"
data-usage-id="i4x:;_;_GeorgetownX;_HUMW-421-01;_html;_3010cbbecaa1484da6cf8ba01362346a">
<p>Copied from LMS HTML component</p></div>
"""
class CopiedFromStudioBadContentTest(BadComponentTest):
"""
Tests that components with HTML copied from the Studio (containing "ui-sortable" class) do not break the Unit page.
"""
__test__ = True
def get_bad_html_content(self):
"""
Return the "bad" HTML content that has been problematic for Studio.
"""
return """
<ol class="components ui-sortable">
<li class="component" data-locator="i4x://Wellesley_College/100/html/6390f1fd3fe640d49580b8415fe1330b"
data-course-key="Wellesley_College/100/2014_Summer">
<div class="xblock xblock-student_view xmodule_display xmodule_HtmlModule xblock-initialized"
data-runtime-class="PreviewRuntime" data-init="XBlockToXModuleShim" data-runtime-version="1"
data-request-token="thisIsNotARealRequestToken"
data-usage-id="i4x://Wellesley_College/100/html/6390f1fd3fe640d49580b8415fe1330b"
data-type="HTMLModule" data-block-type="html">
<h2>VOICE COMPARISON </h2>
<p>You can access the experimental <strong >Voice Comparison</strong> tool at the link below.</p>
</div>
</li>
</ol>
"""
class JSErrorBadContentTest(BadComponentTest):
"""
Tests that components that throw JS errors do not break the Unit page.
"""
__test__ = True
def get_bad_html_content(self):
"""
Return the "bad" HTML content that has been problematic for Studio.
"""
return "<script>var doesNotExist = BadGlobal.foo;</script>"
|
agpl-3.0
|
PaoloCifariello/Node-Raspberry-GMusic
|
src/py_modules/url_getter.py
|
1
|
1438
|
import pygame, StringIO, sys, socket, os
from gmusicapi import Webclient, exceptions
class UrlGetter:
def __init__(self, sck_path, user, passwd):
pygame.init()
pygame.mixer.init()
self.sck_path = sck_path
self.webapi = Webclient()
self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
# Look if socket has been created
try:
os.remove(self.sck_path)
except OSError:
pass
# GMusic login
try:
self.webapi.login(user, passwd)
except:
sys.stderr.write('Problem with authentication on Google server\n')
self.init_socket()
def init_socket(self):
self.socket.bind(self.sck_path)
self.socket.listen(3)
while 1:
conn, addr = self.socket.accept()
self.manage_connection(conn)
def manage_connection(self, conn):
data = conn.recv(50)
if data:
try:
stream_url = self.webapi.get_stream_urls(data)
except exceptions.CallFailure:
conn.close()
return
conn.send(stream_url[0])
print('url_getter.py: Ottenuta URL -> ' + stream_url[0])
conn.close()
if __name__ == "__main__":
url_getter = UrlGetter(sys.argv[1], sys.argv[2], sys.argv[3])
|
mit
|
Pointedstick/ReplicatorG
|
skein_engines/skeinforge-40/fabmetheus_utilities/miscellaneous/fabricate/example.py
|
23
|
3740
|
#!/usr/bin/python
try:
import serial
except:
print('You do not have pySerial installed, which is needed to control the serial port.')
print('Information on pySerial is at:\nhttp://pyserial.wiki.sourceforge.net/pySerial')
import reprap, time, sys
#reprap.snap.printOutgoingPackets = True
#reprap.snap.printIncomingPackets = True
#reprap.snap.printFailedPackets = True
#reprap.printDebug = True
#work surface approx x 2523, y 2743
#reprap.serial = serial.Serial(0, 19200, timeout = reprap.snap.messageTimeout)
reprap.serial = serial.Serial(0, 19200, timeout = 60)
reprap.cartesian.x.active = True # these devices are present in network
reprap.cartesian.y.active = True
reprap.cartesian.z.active = True
reprap.extruder.active = True
reprap.cartesian.x.setNotify()
reprap.cartesian.y.setNotify()
reprap.cartesian.z.setNotify()
reprap.cartesian.x.limit = 2523
#reprap.cartesian.y.limit = 2743
reprap.cartesian.y.limit = 2000
def printPos():
x, y, z = reprap.cartesian.getPos()
print "Location [" + str(x) + ", " + str(y) + ", " + str(z) + "]"
print "================================================================"
########### control of cartesian frame as a whole #########
#stop all steppers
if sys.argv[1] == "stop":
reprap.cartesian.stop()
#goto 0,0
if sys.argv[1] == "reset":
reprap.cartesian.homeReset( 200, True )
#time.sleep(2)
printPos()
#print current positon
if sys.argv[1] == "pos":
printPos()
#goto a specific location
if sys.argv[1] == "goto":
reprap.cartesian.seek( ( int(sys.argv[2]), int(sys.argv[3]), 0 ), 200, False)
printPos()
#goto a specific location (use sync)
if sys.argv[1] == "gotos":
reprap.cartesian.syncSeek( ( int(sys.argv[2]), int(sys.argv[3]), 0 ), 200, False)
printPos()
if sys.argv[1] == "power":
reprap.cartesian.setPower( int( sys.argv[2] ) ) # This is a value from 0 to 63 (6 bits)
#test routine
if sys.argv[1] == "go": #stepper test
reprap.cartesian.seek( (1000, 1000, 0), 200, True )
time.sleep(2)
reprap.cartesian.seek( (500, 1000, 0), 200, True )
time.sleep(2)
reprap.cartesian.seek( (500, 500, 0), 200, True )
time.sleep(2)
reprap.cartesian.seek( (10, 10, 0), 200, True )
#free motors (switch off all coils)
if sys.argv[1] == "free":
reprap.axies.free(reprap.axisX)
reprap.axies.free(reprap.axisY)
############## control of individual steppers #############
#spin stepper
if sys.argv[1] == "run": # run axis
if sys.argv[2] == "x":
reprap.cartesian.x.forward( int(sys.argv[3]) )
elif sys.argv[2] == "y":
reprap.cartesian.y.forward( int(sys.argv[3]) )
#spin stepper in reverse
if sys.argv[1] == "runb": #runb axis
if sys.argv[2] == "x":
reprap.axies.backward( reprap.axisX, int(sys.argv[3]) )
elif sys.argv[2] == "y":
reprap.axies.backward( reprap.axisY, int(sys.argv[3]) )
if sys.argv[1] == "step":
if sys.argv[2] == "x":
reprap.cartesian.x.forward1()
elif sys.argv[2] == "y":
reprap.cartesian.y.forward1()
################# control of extruder #####################
#test extrder motor
elif sys.argv[1] == "motor":
nn = 0
while 1:
if nn > 0:
nn = 0
else:
nn = 150
reprap.extruder.setMotor(reprap.CMD_REVERSE, nn)
time.sleep(1)
elif sys.argv[1] == "getinfo":
mtype = reprap.extruder.getModuleType()
version = reprap.extruder.getVersion()
print "module", mtype, "version", version
elif sys.argv[1] == "heat":
reprap.extruder.setHeat(255, 255, 255, 255)
#setHeat(self, lowHeat, highHeat, tempTarget, tempMax
elif sys.argv[1] == "temp":
print "Temp is ", reprap.extruder.getTemp()
elif sys.argv[1] == "setref":
reprap.extruder.setVoltateReference( int(sys.argv[2]) )
############### scan network for devices ###################
#scan snap network
elif sys.argv[1] == "scan":
reprap.scanNetwork()
|
gpl-2.0
|
Reinis/openbabel
|
test/testfastsearch.py
|
2
|
1907
|
"""Test OpenBabel executables from Python
Note: Python bindings not used
On Windows or Linux, you can run these tests at the commandline
in the build folder with:
"C:\Program Files\CMake 2.6\bin\ctest.exe" -C CTestTestfile.cmake
-R pytest -VV
You could also "chdir" into build/test and run the test file directly:
python ../../../test/testfastsearch.py
In both cases, the test file is run directly from the source folder,
and so you can quickly develop the tests and try them out.
"""
import unittest
from testbabel import run_exec, BaseTest
class TestSym(BaseTest):
"""A series of tests relating to fastsearch functionality"""
def setUp(self):
self.canFindExecutable("babel")
def testSingleHit(self):
"""PR#2955101 - Difficulty reading from a fastsearch index"""
smiles = """C12(C(N(C(=O)C)c3c2cccc3)=O)Nc2c(ccc(c2N1)OCCCC)OCCCC
n1c([nH]c(cc1c1ccccc1)=O)c1ccc(cc1)Br
n1c(nc2c(c1N(C)C)cccc2)c1c(O)cccc1
C1(/[CH]2[CH]3\C(=C4/CC(C)(C)NC(C4)(C)C)C=C[CH]3[CH]1C=C2)=C1/CC(C)(C)NC(C1)(C)C
n1c(c2ccc(C(=O)O)cc2)ccc(c1)CCCCC
N1(C(CN(CC1=O)C(=O)C1CCCCC1)=O)CCc1ccccc1
S(N1[CH](c2ccccc2C=C1)C#N)(c1ccc(cc1)C)(=O)=O
c12c(c(OC)c3c(c1OC)occ3)ccc(o2)=O
c12c(O[CH](C1=O)C(C)C)cc1c(c2)ccc(=O)o1
c12[C]3([C@H]4([N@@](CCc1c1ccccc1[nH]2)C[C@H](C=C4CC)C3))C(=O)OC"""
outputfile = open("ten.smi", "w")
outputfile.write(smiles)
outputfile.close()
output, error = run_exec("babel ten.smi ten.fs")
self.canFindFile("ten.fs")
self.assertConverted(error, 10)
query = "Nc2nc(c1ccccc1)nc3ccccc23"
output, error = run_exec("babel ten.fs -ifs -s %s -osmi" % query)
self.assertConverted(error, 1)
output, error = run_exec("babel ten.fs -ifs -s %s -at 0.5 -aa -osmi" % query)
self.assertConverted(error, 1)
if __name__ == "__main__":
unittest.main()
|
gpl-2.0
|
naresh21/synergetics-edx-platform
|
lms/djangoapps/certificates/migrations/0003_data__default_modes.py
|
54
|
1388
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
# Converted from the original South migration 0002_default_rate_limit_config.py
from django.db import migrations, models
from django.conf import settings
from django.core.files import File
def forwards(apps, schema_editor):
"""Add default modes"""
BadgeImageConfiguration = apps.get_model("certificates", "BadgeImageConfiguration")
db_alias = schema_editor.connection.alias
# This will need to be changed if badges/certificates get moved out of the default db for some reason.
if db_alias != 'default':
return
objects = BadgeImageConfiguration.objects.using(db_alias)
if not objects.exists():
for mode in ['honor', 'verified', 'professional']:
conf = objects.create(mode=mode)
file_name = '{0}{1}'.format(mode, '.png')
conf.icon.save(
'badges/{}'.format(file_name),
File(open(settings.PROJECT_ROOT / 'static' / 'images' / 'default-badges' / file_name))
)
conf.save()
def backwards(apps, schema_editor):
"""Do nothing, assumptions too dangerous."""
pass
class Migration(migrations.Migration):
dependencies = [
('certificates', '0002_data__certificatehtmlviewconfiguration_data'),
]
operations = [
migrations.RunPython(forwards, backwards)
]
|
agpl-3.0
|
slipperyhank/pyphi
|
pyphi/models/fmt.py
|
1
|
12680
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# models/fmt.py
"""
Helper functions for formatting pretty representations of PyPhi models.
"""
from itertools import chain
from .. import config, utils
# TODO: will these print correctly on all terminals?
SMALL_PHI = "\u03C6"
BIG_PHI = "\u03D5"
# repr verbosity levels
LOW = 0
MEDIUM = 1
HIGH = 2
def make_repr(self, attrs):
"""Construct a repr string.
If `config.REPR_VERBOSITY` is ``1`` or ``2``, this function calls the
object's __str__ method. Although this breaks the convention that __repr__
should return a string which can reconstruct the object, readable reprs are
invaluable since the Python interpreter calls `repr` to represent all
objects in the shell. Since PyPhi is often used in the interpreter we want
to have meaningful and useful representations.
Args:
self (obj): The object in question
attrs (Iterable[str]): Attributes to include in the repr
Returns:
str: the ``repr``esentation of the object
"""
# TODO: change this to a closure so we can do
# __repr__ = make_repr(attrs) ???
if config.REPR_VERBOSITY in [MEDIUM, HIGH]:
return self.__str__()
elif config.REPR_VERBOSITY is LOW:
return "{}({})".format(
self.__class__.__name__,
", ".join(attr + '=' + repr(getattr(self, attr)) for attr in attrs))
raise ValueError("Invalid `REPR_VERBOSITY` value of {}. Must be one of "
"[0, 1, 2]".format(config.REPR_VERBOSITY))
def indent(lines, amount=2, chr=' '):
"""Indent a string.
Prepends whitespace to every line in the passed string. (Lines are
separated by newline characters.)
Args:
lines (str): The string to indent.
Keyword Args:
amount (int): The number of columns to indent by.
chr (char): The character to to use as the indentation.
Returns:
str: The indented string.
Example:
>>> print(indent("line1\\nline2", chr="*"))
**line1
**line2
"""
lines = str(lines)
padding = amount * chr
return padding + ('\n' + padding).join(lines.split('\n'))
def box(text):
"""Wrap a chunk of text in a box.
Example:
>>> print(box('line1\\nline2'))
---------
| line1 |
| line2 |
---------
"""
lines = text.split("\n")
width = max(len(l) for l in lines)
bar = "-" * (4 + width)
lines = ["| {line:<{width}} |".format(line=line, width=width)
for line in lines]
return bar + "\n" + "\n".join(lines) + "\n" + bar
def side_by_side(left, right):
"""Put two boxes next to each other.
Assumes that all lines in the boxes are the same width.
Example:
>>> left = "A \\nC "
>>> right = "B\\nD"
>>> print(side_by_side(left, right))
A B
C D
<BLANKLINE>
"""
left_lines = list(left.split("\n"))
right_lines = list(right.split("\n"))
# Pad the shorter column with whitespace
diff = abs(len(left_lines) - len(right_lines))
if len(left_lines) > len(right_lines):
fill = " " * len(right_lines[0])
right_lines += [fill] * diff
elif len(right_lines) > len(left_lines):
fill = " " * len(left_lines[0])
left_lines += [fill] * diff
return "\n".join(a + b for a, b in zip(left_lines, right_lines)) + "\n"
def header(header, text, over_char=None, under_char=None, center=True):
"""Center a header over a block of text.
The width of the text is the width of the longest line of the text.
"""
lines = list(text.split("\n"))
width = max(len(l) for l in lines)
# Center or left-justify
if center:
header = header.center(width) + "\n"
else:
header = header.ljust(width) + "\n"
# Underline header
if under_char:
header = header + under_char * width + "\n"
# 'Overline' header
if over_char:
header = over_char * width + "\n" + header
return header + text
def labels(indices, subsystem=None):
"""Get the labels for a tuple of mechanism indices."""
if subsystem is None:
return tuple(map(str, indices))
return subsystem.indices2labels(indices)
def fmt_mechanism(indices, subsystem=None):
"""Format a mechanism or purview."""
end = ',' if len(indices) in [0, 1] else ''
return '(' + ', '.join(labels(indices, subsystem)) + end + ')'
def fmt_part(part, subsystem=None):
"""Format a |Part|.
The returned string looks like::
0,1
---
[]
"""
def nodes(x):
return ','.join(labels(x, subsystem)) if x else '[]'
numer = nodes(part.mechanism)
denom = nodes(part.purview)
width = max(len(numer), len(denom))
divider = '-' * width
return (
"{numer:^{width}}\n"
"{divider}\n"
"{denom:^{width}}"
).format(numer=numer, divider=divider, denom=denom, width=width)
def fmt_bipartition(partition, subsystem=None):
"""Format a |Bipartition|.
The returned string looks like::
0,1 []
--- X ---
2 0,1
Args:
partition (Bipartition): The partition in question.
Returns:
str: A human-readable string representation of the partition.
"""
if not partition:
return ""
parts = [fmt_part(part, subsystem).split("\n") for part in partition]
times = (" ",
" X ",
" ")
breaks = ("\n", "\n", "") # No newline at the end of string
between = [times] * (len(parts) - 1) + [breaks]
# Alternate [part, break, part, ..., end]
elements = chain.from_iterable(zip(parts, between))
# Transform vertical stacks into horizontal lines
return "".join(chain.from_iterable(zip(*elements)))
def fmt_constellation(c, title=None):
"""Format a constellation."""
if not c:
return "()\n"
if title is None:
title = "Constellation"
concepts = "\n".join(indent(x) for x in c) + "\n"
title = "{} ({} concept{})".format(
title, len(c), "" if len(c) == 1 else "s")
return "\n" + header(title, concepts, "*", "*")
def fmt_concept(concept):
"""Format a |Concept|."""
def fmt_cause_or_effect(x):
if not x:
return ""
return box(indent(fmt_mip(x.mip, verbose=False), amount=1))
cause = header("Cause", fmt_cause_or_effect(concept.cause))
effect = header("Effect", fmt_cause_or_effect(concept.effect))
ce = side_by_side(cause, effect)
mechanism = fmt_mechanism(concept.mechanism, concept.subsystem)
title = "Concept: Mechanism = {}, {} = {}".format(
mechanism, SMALL_PHI, concept.phi)
# Only center headers for high-verbosity output
center = config.REPR_VERBOSITY is HIGH
return header(title, ce, "=", "=", center=center)
def fmt_mip(mip, verbose=True):
"""Format a |Mip|."""
if mip is False or mip is None: # mips can be Falsy
return ""
if verbose:
mechanism = "Mechanism: {}\n".format(
fmt_mechanism(mip.mechanism, mip.subsystem))
direction = "\nDirection: {}\n".format(mip.direction)
else:
mechanism = ""
direction = ""
if config.REPR_VERBOSITY is HIGH:
partition = "\nPartition:\n{}".format(
indent(fmt_bipartition(mip.partition, mip.subsystem)))
unpartitioned_repertoire = "\nUnpartitioned Repertoire:\n{}".format(
indent(fmt_repertoire(mip.unpartitioned_repertoire)))
partitioned_repertoire = "\nPartitioned Repertoire:\n{}".format(
indent(fmt_repertoire(mip.partitioned_repertoire)))
else:
partition = ""
unpartitioned_repertoire = ""
partitioned_repertoire = ""
return (
"{SMALL_PHI} = {phi}\n"
"{mechanism}"
"Purview = {purview}"
"{partition}"
"{direction}"
"{unpartitioned_repertoire}"
"{partitioned_repertoire}").format(
SMALL_PHI=SMALL_PHI,
mechanism=mechanism,
purview=fmt_mechanism(mip.purview, mip.subsystem),
direction=direction,
phi=mip.phi,
partition=partition,
unpartitioned_repertoire=unpartitioned_repertoire,
partitioned_repertoire=partitioned_repertoire)
# TODO: print the two repertoires side-by-side?
def fmt_cut(cut, subsystem=None):
"""Format a |Cut|."""
# Cut indices cannot be converted to labels for macro systems since macro
# systems are cut at the micro label. Avoid this error by using micro
# indices directly in the representation.
# TODO: somehow handle this with inheritance instead of a conditional?
from ..macro import MacroSubsystem
if isinstance(subsystem, MacroSubsystem):
severed = str(cut.severed)
intact = str(cut.intact)
else:
severed = fmt_mechanism(cut.severed, subsystem)
intact = fmt_mechanism(cut.intact, subsystem)
return "Cut {severed} --//--> {intact}".format(severed=severed,
intact=intact)
def fmt_big_mip(big_mip):
"""Format a |BigMip|."""
return (
"{BIG_PHI} = {phi}\n"
"{subsystem}\n"
"{cut}\n"
"{unpartitioned_constellation}"
"{partitioned_constellation}".format(
BIG_PHI=BIG_PHI,
phi=big_mip.phi,
subsystem=big_mip.subsystem,
cut=fmt_cut(big_mip.cut, big_mip.subsystem),
unpartitioned_constellation=fmt_constellation(
big_mip.unpartitioned_constellation,
"Unpartitioned Constellation"),
partitioned_constellation=fmt_constellation(
big_mip.partitioned_constellation,
"Partitioned Constellation")))
def fmt_repertoire(r):
"""Format a repertoire."""
# TODO: will this get unwieldy with large repertoires?
if r is None:
return ""
r = r.squeeze()
lines = []
# Header: "S P(S)"
space = " " * 4
lines.append("{S:^{s_width}}{space}P({S})".format(
S="S", s_width=r.ndim, space=space))
# Lines: "001 .25"
for state in utils.all_states(r.ndim):
state_str = "".join(str(i) for i in state)
lines.append("{0}{1}{2:g}".format(state_str, space, r[state]))
return box("\n".join(lines))
def fmt_ac_mip(acmip, verbose=True):
"""Helper function to format a nice Mip string"""
if acmip is False or acmip is None: # mips can be Falsy
return ""
mechanism = "mechanism: {}\t".format(acmip.mechanism) if verbose else ""
direction = "direction: {}\n".format(acmip.direction) if verbose else ""
return (
"{alpha}\t"
"{mechanism}"
"purview: {acmip.purview}\t"
"{direction}"
"partition:\n{partition}\n"
"probability:\t{probability}\t"
"partitioned_probability:\t{partitioned_probability}\n").format(
alpha="{0:.4f}".format(round(acmip.alpha, 4)),
mechanism=mechanism,
direction=direction,
acmip=acmip,
partition=indent(fmt_bipartition(acmip.partition)),
probability=indent(acmip.probability),
partitioned_probability=indent(acmip.partitioned_probability))
def fmt_ac_big_mip(ac_big_mip):
"""Format a AcBigMip"""
return (
"{alpha}\n"
"direction: {ac_big_mip.direction}\n"
"context: {ac_big_mip.context}\n"
"past_state: {ac_big_mip.before_state}\n"
"current_state: {ac_big_mip.after_state}\n"
"cut: {ac_big_mip.cut}\n"
"{unpartitioned_account}"
"{partitioned_account}".format(
alpha="{0:.4f}".format(round(ac_big_mip.alpha, 4)),
ac_big_mip=ac_big_mip,
unpartitioned_account=fmt_account(
ac_big_mip.unpartitioned_account, "Unpartitioned Account"),
partitioned_account=fmt_account(
ac_big_mip.partitioned_account, "Partitioned Account")))
def fmt_account(account, title=None):
"""Format an Account or a DirectedAccount"""
if title is None:
title = account.__class__.__name__ # `Account` or `DirectedAccount`
title = "{} ({} coefficient{})".format(
title, len(account), "" if len(account) == 1 else "s")
content = "\n".join(str(m) for m in account)
return "\n" + header(title, content, under_char="*")
def fmt_actual_cut(cut):
"""Format an ActualCut"""
return ("{cut.cause_part1} --//--> {cut.effect_part2} && "
"{cut.cause_part2} --//--> {cut.effect_part1}").format(cut=cut)
|
gpl-3.0
|
musically-ut/statsmodels
|
statsmodels/graphics/tests/test_dotplot.py
|
26
|
15330
|
import numpy as np
from statsmodels.graphics.dotplots import dot_plot
import pandas as pd
from numpy.testing import dec
# If true, the output is written to a multi-page pdf file.
pdf_output = False
try:
import matplotlib.pyplot as plt
import matplotlib
have_matplotlib = True
except ImportError:
have_matplotlib = False
def close_or_save(pdf, fig):
if pdf_output:
pdf.savefig(fig)
else:
plt.close(fig)
@dec.skipif(not have_matplotlib)
def test_all():
if pdf_output:
from matplotlib.backends.backend_pdf import PdfPages
pdf = PdfPages("test_dotplot.pdf")
else:
pdf = None
# Basic dotplot with points only
plt.clf()
points = range(20)
ax = plt.axes()
fig = dot_plot(points, ax=ax)
ax.set_title("Basic horizontal dotplot")
close_or_save(pdf, fig)
# Basic vertical dotplot
plt.clf()
points = range(20)
ax = plt.axes()
fig = dot_plot(points, ax=ax, horizontal=False)
ax.set_title("Basic vertical dotplot")
close_or_save(pdf, fig)
# Tall and skinny
plt.figure(figsize=(4,12))
ax = plt.axes()
vals = np.arange(40)
fig = dot_plot(points, ax=ax)
ax.set_title("Tall and skinny dotplot")
ax.set_xlabel("x axis label")
close_or_save(pdf, fig)
# Short and wide
plt.figure(figsize=(12,4))
ax = plt.axes()
vals = np.arange(40)
fig = dot_plot(points, ax=ax, horizontal=False)
ax.set_title("Short and wide dotplot")
ax.set_ylabel("y axis label")
close_or_save(pdf, fig)
# Tall and skinny striped dotplot
plt.figure(figsize=(4,12))
ax = plt.axes()
points = np.arange(40)
fig = dot_plot(points, ax=ax, striped=True)
ax.set_title("Tall and skinny striped dotplot")
ax.set_xlim(-10, 50)
close_or_save(pdf, fig)
# Short and wide striped
plt.figure(figsize=(12,4))
ax = plt.axes()
points = np.arange(40)
fig = dot_plot(points, ax=ax, striped=True, horizontal=False)
ax.set_title("Short and wide striped dotplot")
ax.set_ylim(-10, 50)
close_or_save(pdf, fig)
# Basic dotplot with few points
plt.figure()
ax = plt.axes()
points = np.arange(4)
fig = dot_plot(points, ax=ax)
ax.set_title("Basic horizontal dotplot with few lines")
close_or_save(pdf, fig)
# Basic dotplot with few points
plt.figure()
ax = plt.axes()
points = np.arange(4)
fig = dot_plot(points, ax=ax, horizontal=False)
ax.set_title("Basic vertical dotplot with few lines")
close_or_save(pdf, fig)
# Manually set the x axis limits
plt.figure()
ax = plt.axes()
points = np.arange(20)
fig = dot_plot(points, ax=ax)
ax.set_xlim(-10, 30)
ax.set_title("Dotplot with adjusted horizontal range")
close_or_save(pdf, fig)
# Left row labels
plt.clf()
ax = plt.axes()
lines = ["ABCDEFGH"[np.random.randint(0, 8)] for k in range(20)]
points = np.random.normal(size=20)
fig = dot_plot(points, lines=lines, ax=ax)
ax.set_title("Dotplot with user-supplied labels in the left margin")
close_or_save(pdf, fig)
# Left and right row labels
plt.clf()
ax = plt.axes()
points = np.random.normal(size=20)
lines = ["ABCDEFGH"[np.random.randint(0, 8)] + "::" + str(k+1)
for k in range(20)]
fig = dot_plot(points, lines=lines, ax=ax, split_names="::")
ax.set_title("Dotplot with user-supplied labels in both margins")
close_or_save(pdf, fig)
# Both sides row labels
plt.clf()
ax = plt.axes([0.1, 0.1, 0.88, 0.8])
points = np.random.normal(size=20)
lines = ["ABCDEFGH"[np.random.randint(0, 8)] + "::" + str(k+1)
for k in range(20)]
fig = dot_plot(points, lines=lines, ax=ax, split_names="::",
horizontal=False)
txt = ax.set_title("Vertical dotplot with user-supplied labels in both margins")
txt.set_position((0.5, 1.06))
close_or_save(pdf, fig)
# Custom colors and symbols
plt.clf()
ax = plt.axes([0.1, 0.07, 0.78, 0.85])
points = np.random.normal(size=20)
lines = np.kron(range(5), np.ones(4)).astype(np.int32)
styles = np.kron(np.ones(5), range(4)).astype(np.int32)
#marker_props = {k: {"color": "rgbc"[k], "marker": "osvp"[k],
# "ms": 7, "alpha": 0.6} for k in range(4)}
# python 2.6 compat, can be removed later
marker_props = dict((k, {"color": "rgbc"[k], "marker": "osvp"[k],
"ms": 7, "alpha": 0.6}) for k in range(4))
fig = dot_plot(points, lines=lines, styles=styles, ax=ax,
marker_props=marker_props)
ax.set_title("Dotplot with custom colors and symbols")
close_or_save(pdf, fig)
# Basic dotplot with symmetric intervals
plt.clf()
ax = plt.axes()
points = range(20)
fig = dot_plot(points, intervals=np.ones(20), ax=ax)
ax.set_title("Dotplot with symmetric intervals")
close_or_save(pdf, fig)
# Basic dotplot with symmetric intervals, pandas inputs.
plt.clf()
ax = plt.axes()
points = pd.Series(range(20))
intervals = pd.Series(np.ones(20))
fig = dot_plot(points, intervals=intervals, ax=ax)
ax.set_title("Dotplot with symmetric intervals (Pandas inputs)")
close_or_save(pdf, fig)
# Basic dotplot with nonsymmetric intervals
plt.clf()
ax = plt.axes()
points = np.arange(20)
intervals = [(1, 3) for i in range(20)]
fig = dot_plot(points, intervals=intervals, ax=ax)
ax.set_title("Dotplot with nonsymmetric intervals")
close_or_save(pdf, fig)
# Vertical dotplot with nonsymmetric intervals
plt.clf()
ax = plt.axes()
points = np.arange(20)
intervals = [(1, 3) for i in range(20)]
fig = dot_plot(points, intervals=intervals, ax=ax, horizontal=False)
ax.set_title("Vertical dotplot with nonsymmetric intervals")
close_or_save(pdf, fig)
# Dotplot with nonsymmetric intervals, adjust line properties
plt.clf()
ax = plt.axes()
points = np.arange(20)
intervals = [(1, 3) for x in range(20)]
line_props = {0: {"color": "lightgrey",
"solid_capstyle": "round"}}
fig = dot_plot(points, intervals=intervals, line_props=line_props, ax=ax)
ax.set_title("Dotplot with custom line properties")
close_or_save(pdf, fig)
# Dotplot with two points per line and a legend
plt.clf()
ax = plt.axes([0.1, 0.1, 0.75, 0.8])
points = 5*np.random.normal(size=40)
lines = np.kron(range(20), (1,1))
intervals = [(1,3) for k in range(40)]
styles = np.kron(np.ones(20), (0,1)).astype(np.int32)
styles = [["Cat", "Dog"][i] for i in styles]
fig = dot_plot(points, intervals=intervals, lines=lines, styles=styles,
ax=ax, stacked=True)
handles, labels = ax.get_legend_handles_labels()
leg = plt.figlegend(handles, labels, "center right", numpoints=1,
handletextpad=0.0001)
leg.draw_frame(False)
ax.set_title("Dotplot with two points per line")
close_or_save(pdf, fig)
# Dotplot with two points per line and a legend
plt.clf()
ax = plt.axes([0.1, 0.1, 0.75, 0.8])
fig = dot_plot(points, intervals=intervals, lines=lines,
styles=styles, ax=ax, stacked=True,
styles_order=["Dog", "Cat"])
handles, labels = ax.get_legend_handles_labels()
leg = plt.figlegend(handles, labels, "center right", numpoints=1,
handletextpad=0.0001)
leg.draw_frame(False)
ax.set_title("Dotplot with two points per line (reverse order)")
close_or_save(pdf, fig)
# Vertical dotplot with two points per line and a legend
plt.clf()
ax = plt.axes([0.1, 0.1, 0.75, 0.8])
points = 5*np.random.normal(size=40)
lines = np.kron(range(20), (1,1))
intervals = [(1,3) for k in range(40)]
styles = np.kron(np.ones(20), (0,1)).astype(np.int32)
styles = [["Cat", "Dog"][i] for i in styles]
fig = dot_plot(points, intervals=intervals, lines=lines, styles=styles,
ax=ax, stacked=True, horizontal=False)
handles, labels = ax.get_legend_handles_labels()
leg = plt.figlegend(handles, labels, "center right", numpoints=1,
handletextpad=0.0001)
leg.draw_frame(False)
ax.set_title("Vertical dotplot with two points per line")
close_or_save(pdf, fig)
# Vertical dotplot with two points per line and a legend
plt.clf()
ax = plt.axes([0.1, 0.1, 0.75, 0.8])
styles_order = ["Dog", "Cat"]
fig = dot_plot(points, intervals=intervals, lines=lines,
styles=styles, ax=ax, stacked=True,
horizontal=False, styles_order=styles_order)
handles, labels = ax.get_legend_handles_labels()
lh = dict(zip(labels, handles))
handles = [lh[l] for l in styles_order]
leg = plt.figlegend(handles, styles_order, "center right", numpoints=1,
handletextpad=0.0001)
leg.draw_frame(False)
ax.set_title("Vertical dotplot with two points per line (reverse order)")
close_or_save(pdf, fig)
# Vertical dotplot with two points per line and a legend
plt.clf()
ax = plt.axes([0.1, 0.1, 0.75, 0.8])
points = 5*np.random.normal(size=40)
lines = np.kron(range(20), (1,1))
intervals = [(1,3) for k in range(40)]
styles = np.kron(np.ones(20), (0,1)).astype(np.int32)
styles = [["Cat", "Dog"][i] for i in styles]
fig = dot_plot(points, intervals=intervals, lines=lines, styles=styles,
ax=ax, stacked=True, striped=True, horizontal=False)
handles, labels = ax.get_legend_handles_labels()
leg = plt.figlegend(handles, labels, "center right", numpoints=1,
handletextpad=0.0001)
leg.draw_frame(False)
plt.ylim(-20, 20)
ax.set_title("Vertical dotplot with two points per line")
close_or_save(pdf, fig)
# Dotplot with color-matched points and intervals
plt.clf()
ax = plt.axes([0.1, 0.1, 0.75, 0.8])
points = 5*np.random.normal(size=40)
lines = np.kron(range(20), (1,1))
intervals = [(1,3) for k in range(40)]
styles = np.kron(np.ones(20), (0,1)).astype(np.int32)
styles = [["Cat", "Dog"][i] for i in styles]
marker_props = {"Cat": {"color": "orange"},
"Dog": {"color": "purple"}}
line_props = {"Cat": {"color": "orange"},
"Dog": {"color": "purple"}}
fig = dot_plot(points, intervals=intervals, lines=lines, styles=styles,
ax=ax, stacked=True, marker_props=marker_props,
line_props=line_props)
handles, labels = ax.get_legend_handles_labels()
leg = plt.figlegend(handles, labels, "center right", numpoints=1,
handletextpad=0.0001)
leg.draw_frame(False)
ax.set_title("Dotplot with color-matched points and intervals")
close_or_save(pdf, fig)
# Dotplot with color-matched points and intervals
plt.clf()
ax = plt.axes([0.1, 0.1, 0.75, 0.8])
points = 5*np.random.normal(size=40)
lines = np.kron(range(20), (1,1))
intervals = [(1,3) for k in range(40)]
styles = np.kron(np.ones(20), (0,1)).astype(np.int32)
styles = [["Cat", "Dog"][i] for i in styles]
marker_props = {"Cat": {"color": "orange"},
"Dog": {"color": "purple"}}
line_props = {"Cat": {"color": "orange"},
"Dog": {"color": "purple"}}
fig = dot_plot(points, intervals=intervals, lines=lines, styles=styles,
ax=ax, stacked=True, marker_props=marker_props,
line_props=line_props, horizontal=False)
handles, labels = ax.get_legend_handles_labels()
leg = plt.figlegend(handles, labels, "center right", numpoints=1,
handletextpad=0.0001)
leg.draw_frame(False)
ax.set_title("Dotplot with color-matched points and intervals")
close_or_save(pdf, fig)
# Dotplot with sections
plt.clf()
ax = plt.axes()
points = range(30)
lines = np.kron(range(15), (1,1)).astype(np.int32)
styles = np.kron(np.ones(15), (0,1)).astype(np.int32)
sections = np.kron((0,1,2), np.ones(10)).astype(np.int32)
sections = [["Axx", "Byy", "Czz"][k] for k in sections]
fig = dot_plot(points, lines=lines, styles=styles, sections=sections, ax=ax)
ax.set_title("Dotplot with sections")
close_or_save(pdf, fig)
# Vertical dotplot with sections
plt.clf()
ax = plt.axes([0.1,0.1,0.9,0.75])
points = range(30)
lines = np.kron(range(15), (1,1)).astype(np.int32)
styles = np.kron(np.ones(15), (0,1)).astype(np.int32)
sections = np.kron((0,1,2), np.ones(10)).astype(np.int32)
sections = [["Axx", "Byy", "Czz"][k] for k in sections]
fig = dot_plot(points, lines=lines, styles=styles,
sections=sections, ax=ax, horizontal=False)
txt = ax.set_title("Vertical dotplot with sections")
txt.set_position((0.5, 1.08))
close_or_save(pdf, fig)
# Reorder sections
plt.clf()
ax = plt.axes()
points = range(30)
lines = np.kron(range(15), (1,1)).astype(np.int32)
styles = np.kron(np.ones(15), (0,1)).astype(np.int32)
sections = np.kron((0,1,2), np.ones(10)).astype(np.int32)
sections = [["Axx", "Byy", "Czz"][k] for k in sections]
fig = dot_plot(points, lines=lines, styles=styles, sections=sections, ax=ax,
section_order=["Byy", "Axx", "Czz"])
ax.set_title("Dotplot with sections in specified order")
close_or_save(pdf, fig)
# Reorder the lines.
plt.figure()
ax = plt.axes()
points = np.arange(4)
lines = ["A", "B", "C", "D"]
line_order = ["B", "C", "A", "D"]
fig = dot_plot(points, lines=lines, line_order=line_order, ax=ax)
ax.set_title("Dotplot with reordered lines")
close_or_save(pdf, fig)
# Format labels
plt.clf()
points = range(20)
lines = ["%d::%d" % (i, 100+i) for i in range(20)]
fmt_left = lambda x : "lft_" + x
fmt_right = lambda x : "rgt_" + x
ax = plt.axes()
fig = dot_plot(points, lines=lines, ax=ax, split_names="::",
fmt_left_name=fmt_left, fmt_right_name=fmt_right)
ax.set_title("Horizontal dotplot with name formatting")
close_or_save(pdf, fig)
# Right names only
plt.clf()
points = range(20)
lines = ["%d::%d" % (i, 100+i) for i in range(20)]
ax = plt.axes()
fig = dot_plot(points, lines=lines, ax=ax, split_names="::",
show_names="right")
ax.set_title("Show right names only")
close_or_save(pdf, fig)
# Dotplot with different numbers of points per line
plt.clf()
ax = plt.axes([0.1, 0.1, 0.75, 0.8])
points = 5*np.random.normal(size=40)
lines = []
ii = 0
while len(lines) < 40:
for k in range(np.random.randint(1, 4)):
lines.append(ii)
ii += 1
styles = np.kron(np.ones(20), (0,1)).astype(np.int32)
styles = [["Cat", "Dog"][i] for i in styles]
fig = dot_plot(points, lines=lines, styles=styles,
ax=ax, stacked=True)
handles, labels = ax.get_legend_handles_labels()
leg = plt.figlegend(handles, labels, "center right", numpoints=1,
handletextpad=0.0001)
leg.draw_frame(False)
ax.set_title("Dotplot with different numbers of points per line")
close_or_save(pdf, fig)
if pdf_output:
pdf.close()
|
bsd-3-clause
|
jonathanmarvens/jeeves
|
test/gallery/location/confidentiality/Location.py
|
3
|
2705
|
'''
Location example for Jeeves with confidentiality policies.
'''
from abc import ABCMeta, abstractmethod
from macropy.case_classes import macros, enum
import JeevesLib
from sourcetrans.macro_module import macros, jeeves
class InternalError(Exception):
def __init__(self, message):
self.message = message
# Definitions of locations.
@jeeves
class Location:
__metaclass__ = ABCMeta
@abstractmethod
def isIn(self, loc):
return False
@abstractmethod
def isNear(self, loc, radius=50.0):
return False
class GPS(Location):
def __init__(self, latitude, longitude, city=None):
self.latitude = latitude
self.longitude = longitude
self.city = city
def __eq__(self, other):
return (isinstance(other, GPS) and (self.latitude == other.latitude)
and (self.longitude == other.longitude))
def isIn(self, loc):
if isinstance(loc, GPS):
return self == loc
elif isinstance(loc, City):
return self.city == loc
elif isinstance(loc, Country):
return self.city.country == loc
else:
raise InternalError("A location must be a GPS, City, or Country.")
class City(Location):
def __init__(self, city, country):
self.city = city
self.country = country
def __eq__(self, other):
return (isinstance(other, City) and (self.city == other.city)
and (self.country == other.country))
def isIn(self, loc):
if isinstance(loc, GPS):
return False
elif isinstance(loc, City):
return self == loc
elif isinstance(loc, Country):
return self.country == loc
else:
raise InternalError("A location must be a GPS, City, or Country.")
class Country(Location):
def __init__(self, name):
self.name = name
def __eq__(self, other):
return isinstance(other, Country) and self.name == other.name
def isIn(self, loc):
if isinstance(loc, GPS):
return False
elif isinstance(loc, City):
return False
elif isinstance(loc, Country):
return self == loc
else:
raise InternalError("A location must be a GPS, City, or Country.")
# Users have a user ID and a location.
@jeeves
class User:
def __init__(self, userId, location, friends=[]):
self.userId = userId
self.location = location
self.friends = list(friends)
def addFriend(self, friend):
self.friends.append(friend)
def isFriends(self, other):
return JeevesLib.jhas(self.friends, other)
def prettyPrint(self):
return str(self.userId)
class LocationNetwork:
def __init__(self, users=[]):
self.users = users
@jeeves
def countUsersInLocation(self, loc):
sum = 0
for user in self.users:
if user.location.isIn(loc):
sum += 1
return sum
|
mit
|
bakins/kubernetes
|
vendor/github.com/ugorji/go/codec/test.py
|
1138
|
3876
|
#!/usr/bin/env python
# This will create golden files in a directory passed to it.
# A Test calls this internally to create the golden files
# So it can process them (so we don't have to checkin the files).
# Ensure msgpack-python and cbor are installed first, using:
# sudo apt-get install python-dev
# sudo apt-get install python-pip
# pip install --user msgpack-python msgpack-rpc-python cbor
import cbor, msgpack, msgpackrpc, sys, os, threading
def get_test_data_list():
# get list with all primitive types, and a combo type
l0 = [
-8,
-1616,
-32323232,
-6464646464646464,
192,
1616,
32323232,
6464646464646464,
192,
-3232.0,
-6464646464.0,
3232.0,
6464646464.0,
False,
True,
None,
u"someday",
u"",
u"bytestring",
1328176922000002000,
-2206187877999998000,
270,
-2013855847999995777,
#-6795364578871345152,
]
l1 = [
{ "true": True,
"false": False },
{ "true": "True",
"false": False,
"uint16(1616)": 1616 },
{ "list": [1616, 32323232, True, -3232.0, {"TRUE":True, "FALSE":False}, [True, False] ],
"int32":32323232, "bool": True,
"LONG STRING": "123456789012345678901234567890123456789012345678901234567890",
"SHORT STRING": "1234567890" },
{ True: "true", 8: False, "false": 0 }
]
l = []
l.extend(l0)
l.append(l0)
l.extend(l1)
return l
def build_test_data(destdir):
l = get_test_data_list()
for i in range(len(l)):
# packer = msgpack.Packer()
serialized = msgpack.dumps(l[i])
f = open(os.path.join(destdir, str(i) + '.msgpack.golden'), 'wb')
f.write(serialized)
f.close()
serialized = cbor.dumps(l[i])
f = open(os.path.join(destdir, str(i) + '.cbor.golden'), 'wb')
f.write(serialized)
f.close()
def doRpcServer(port, stopTimeSec):
class EchoHandler(object):
def Echo123(self, msg1, msg2, msg3):
return ("1:%s 2:%s 3:%s" % (msg1, msg2, msg3))
def EchoStruct(self, msg):
return ("%s" % msg)
addr = msgpackrpc.Address('localhost', port)
server = msgpackrpc.Server(EchoHandler())
server.listen(addr)
# run thread to stop it after stopTimeSec seconds if > 0
if stopTimeSec > 0:
def myStopRpcServer():
server.stop()
t = threading.Timer(stopTimeSec, myStopRpcServer)
t.start()
server.start()
def doRpcClientToPythonSvc(port):
address = msgpackrpc.Address('localhost', port)
client = msgpackrpc.Client(address, unpack_encoding='utf-8')
print client.call("Echo123", "A1", "B2", "C3")
print client.call("EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"})
def doRpcClientToGoSvc(port):
# print ">>>> port: ", port, " <<<<<"
address = msgpackrpc.Address('localhost', port)
client = msgpackrpc.Client(address, unpack_encoding='utf-8')
print client.call("TestRpcInt.Echo123", ["A1", "B2", "C3"])
print client.call("TestRpcInt.EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"})
def doMain(args):
if len(args) == 2 and args[0] == "testdata":
build_test_data(args[1])
elif len(args) == 3 and args[0] == "rpc-server":
doRpcServer(int(args[1]), int(args[2]))
elif len(args) == 2 and args[0] == "rpc-client-python-service":
doRpcClientToPythonSvc(int(args[1]))
elif len(args) == 2 and args[0] == "rpc-client-go-service":
doRpcClientToGoSvc(int(args[1]))
else:
print("Usage: test.py " +
"[testdata|rpc-server|rpc-client-python-service|rpc-client-go-service] ...")
if __name__ == "__main__":
doMain(sys.argv[1:])
|
apache-2.0
|
xkmato/tracpro
|
tracpro/baseline/tests/test_charts.py
|
2
|
5235
|
from __future__ import unicode_literals
import datetime
from dateutil.relativedelta import relativedelta
import pytz
from tracpro.test import factories
from tracpro.test.cases import TracProTest
from ..charts import chart_baseline
from ..forms import BaselineTermFilterForm
from ..models import BaselineTerm
class TestChartBaseline(TracProTest):
def setUp(self):
super(TestChartBaseline, self).setUp()
self.org = factories.Org()
self.contact1 = factories.Contact(org=self.org, name="Apple")
self.contact2 = factories.Contact(org=self.org, name="Blueberry")
self.contact3 = factories.Contact(org=self.org, name="Cherry")
self.start_date = datetime.datetime(2015, 1, 1, 8, tzinfo=pytz.utc)
self.baseline = factories.Question(poll__org=self.org)
self.baseline_pollrun1 = factories.UniversalPollRun(
poll=self.baseline.poll,
conducted_on=self.start_date)
self.baseline_pollrun2 = factories.UniversalPollRun(
poll=self.baseline.poll,
conducted_on=self.start_date + relativedelta(days=1))
self.follow_up = factories.Question(poll__org=self.org)
self.follow_up_pollrun1 = factories.UniversalPollRun(
poll=self.follow_up.poll,
conducted_on=self.start_date + relativedelta(days=1))
self.follow_up_pollrun2 = factories.UniversalPollRun(
poll=self.follow_up.poll,
conducted_on=self.start_date + relativedelta(days=2))
self.follow_up_pollrun3 = factories.UniversalPollRun(
poll=self.follow_up.poll,
conducted_on=self.start_date + relativedelta(days=3))
self.baseline_term = BaselineTerm.objects.create(
org=self.org,
name="Test Baseline Term",
start_date=self.start_date,
end_date=self.start_date + relativedelta(days=3),
baseline_poll=self.baseline.poll,
baseline_question=self.baseline,
follow_up_poll=self.follow_up.poll,
follow_up_question=self.follow_up,
y_axis_title="# cats")
# Create an answer for each contact.
contacts = [self.contact1, self.contact2, self.contact3]
for i, contact in enumerate(contacts, 1):
for j, pollrun in enumerate(self.baseline.poll.pollruns.all(), 1):
factories.Answer(
response__contact=contact,
response__pollrun=pollrun,
question=self.baseline,
value=10 * i * j,
submitted_on=self.start_date + relativedelta(days=j - 1))
for j, pollrun in enumerate(self.follow_up.poll.pollruns.all(), 1):
factories.Answer(
response__contact=contact,
response__pollrun=pollrun,
question=self.follow_up,
value=7 * i * j,
submitted_on=self.start_date + relativedelta(days=j))
# Empty filter form for testing.
self.filter_form = BaselineTermFilterForm(
org=self.org, baseline_term=self.baseline_term, data_regions=None)
def get_data(self, region, include_subregions, assert_empty=False):
"""Get data and transform for easy testing."""
self.filter_form.full_clean()
chart_data, summary_table = chart_baseline(
baseline_term=self.baseline_term,
filter_form=self.filter_form,
region=region,
include_subregions=include_subregions)
if assert_empty:
self.assertIsNone(chart_data)
self.assertIsNone(summary_table)
self.assertEqual(set(chart_data.keys()), set(['series', 'categories']))
series = {s['name']: s['data'] for s in chart_data['series']}
categories = chart_data['categories']
summary_data = dict(summary_table)
return series, categories, summary_data
def test_chart_data__universal(self):
series, categories, summary_data = self.get_data(region=None, include_subregions=True)
# There should be 3 dates to match each follow-up pollrun.
self.assertEqual(categories, [
"2015-01-02",
"2015-01-03",
"2015-01-04",
])
self.assertEqual(summary_data['Dates'], 'January 01, 2015 - January 04, 2015')
self.assertEqual(summary_data['Baseline response rate (%)'], 100)
self.assertEqual(summary_data['Baseline value (# cats)'], 60.0)
self.assertEqual(summary_data['Follow up mean (# cats)'], 84.0)
self.assertEqual(summary_data['Follow up standard deviation'], 34.3)
self.assertEqual(summary_data['Follow up response rate (%)'], 100)
# Baseline should be sum of first answers per contact.
# Series length should match that of follow-up.
# 10 + 20 + 30 = 60
self.assertEqual(series['Baseline'], [60.0, 60.0, 60.0])
# Follow-up should be sum of contact answers per pollrun.
self.assertEqual(series['Follow up'], [
{'y': 42.0}, # 7 + 14 + 21
{'y': 84.0}, # 14 + 28 + 42
{'y': 126.0}, # 21 + 42 + 63
])
|
bsd-3-clause
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.